aboutsummaryrefslogtreecommitdiff
path: root/pym
diff options
context:
space:
mode:
Diffstat (limited to 'pym')
-rw-r--r--pym/_emerge/AbstractDepPriority.py5
-rw-r--r--pym/_emerge/AbstractEbuildProcess.py58
-rw-r--r--pym/_emerge/AbstractPollTask.py2
-rw-r--r--pym/_emerge/AsynchronousLock.py66
-rw-r--r--pym/_emerge/AsynchronousTask.py14
-rw-r--r--pym/_emerge/Binpkg.py7
-rw-r--r--pym/_emerge/BinpkgExtractorAsync.py15
-rw-r--r--pym/_emerge/BinpkgFetcher.py18
-rw-r--r--pym/_emerge/BinpkgVerifier.py143
-rw-r--r--pym/_emerge/BlockerCache.py10
-rw-r--r--pym/_emerge/BlockerDB.py12
-rw-r--r--pym/_emerge/CompositeTask.py4
-rw-r--r--pym/_emerge/DepPriority.py29
-rw-r--r--pym/_emerge/DepPrioritySatisfiedRange.py24
-rw-r--r--pym/_emerge/DependencyArg.py10
-rw-r--r--pym/_emerge/EbuildBuild.py36
-rw-r--r--pym/_emerge/EbuildBuildDir.py11
-rw-r--r--pym/_emerge/EbuildExecuter.py13
-rw-r--r--pym/_emerge/EbuildFetcher.py68
-rw-r--r--pym/_emerge/EbuildMetadataPhase.py66
-rw-r--r--pym/_emerge/EbuildPhase.py63
-rw-r--r--pym/_emerge/EbuildProcess.py12
-rw-r--r--pym/_emerge/EbuildSpawnProcess.py10
-rw-r--r--pym/_emerge/FakeVartree.py123
-rw-r--r--pym/_emerge/FifoIpcDaemon.py43
-rw-r--r--pym/_emerge/JobStatusDisplay.py44
-rw-r--r--pym/_emerge/MergeListItem.py18
-rw-r--r--pym/_emerge/MetadataRegen.py93
-rw-r--r--pym/_emerge/MiscFunctionsProcess.py7
-rw-r--r--pym/_emerge/Package.py317
-rw-r--r--pym/_emerge/PackageMerge.py7
-rw-r--r--pym/_emerge/PackageUninstall.py6
-rw-r--r--pym/_emerge/PackageVirtualDbapi.py4
-rw-r--r--pym/_emerge/PipeReader.py37
-rw-r--r--pym/_emerge/PollScheduler.py129
-rw-r--r--pym/_emerge/QueueScheduler.py105
-rw-r--r--pym/_emerge/RootConfig.py13
-rw-r--r--pym/_emerge/Scheduler.py240
-rw-r--r--pym/_emerge/SpawnProcess.py269
-rw-r--r--pym/_emerge/SubProcess.py30
-rw-r--r--pym/_emerge/Task.py9
-rw-r--r--pym/_emerge/TaskScheduler.py26
-rw-r--r--pym/_emerge/UnmergeDepPriority.py27
-rw-r--r--pym/_emerge/UseFlagDisplay.py10
-rw-r--r--pym/_emerge/actions.py1704
-rw-r--r--pym/_emerge/chk_updated_cfg_files.py42
-rw-r--r--pym/_emerge/clear_caches.py4
-rw-r--r--pym/_emerge/countdown.py18
-rw-r--r--pym/_emerge/create_depgraph_params.py23
-rw-r--r--pym/_emerge/create_world_atom.py25
-rw-r--r--pym/_emerge/depgraph.py2451
-rw-r--r--pym/_emerge/emergelog.py12
-rw-r--r--pym/_emerge/getloadavg.py5
-rw-r--r--pym/_emerge/help.py10
-rw-r--r--pym/_emerge/is_valid_package_atom.py7
-rw-r--r--pym/_emerge/main.py1297
-rw-r--r--pym/_emerge/post_emerge.py165
-rw-r--r--pym/_emerge/resolver/backtracking.py38
-rw-r--r--pym/_emerge/resolver/circular_dependency.py24
-rw-r--r--pym/_emerge/resolver/output.py537
-rw-r--r--pym/_emerge/resolver/output_helpers.py95
-rw-r--r--pym/_emerge/resolver/package_tracker.py301
-rw-r--r--pym/_emerge/resolver/slot_collision.py230
-rw-r--r--pym/_emerge/search.py4
-rw-r--r--pym/_emerge/stdout_spinner.py13
-rw-r--r--pym/_emerge/unmerge.py5
-rw-r--r--pym/portage/__init__.py211
-rw-r--r--pym/portage/_emirrordist/Config.py132
-rw-r--r--pym/portage/_emirrordist/DeletionIterator.py83
-rw-r--r--pym/portage/_emirrordist/DeletionTask.py129
-rw-r--r--pym/portage/_emirrordist/FetchIterator.py147
-rw-r--r--pym/portage/_emirrordist/FetchTask.py629
-rw-r--r--pym/portage/_emirrordist/MirrorDistTask.py219
-rw-r--r--pym/portage/_emirrordist/__init__.py2
-rw-r--r--pym/portage/_emirrordist/main.py463
-rw-r--r--pym/portage/_global_updates.py238
-rw-r--r--pym/portage/_legacy_globals.py3
-rw-r--r--pym/portage/_selinux.py55
-rw-r--r--pym/portage/_sets/__init__.py30
-rw-r--r--pym/portage/_sets/base.py7
-rw-r--r--pym/portage/_sets/dbapi.py111
-rw-r--r--pym/portage/_sets/files.py10
-rw-r--r--pym/portage/_sets/libs.py17
-rw-r--r--pym/portage/_sets/security.py4
-rw-r--r--pym/portage/cache/ebuild_xattr.py2
-rw-r--r--pym/portage/cache/flat_hash.py32
-rw-r--r--pym/portage/cache/flat_list.py134
-rw-r--r--pym/portage/cache/fs_template.py6
-rw-r--r--pym/portage/cache/mappings.py6
-rw-r--r--pym/portage/cache/metadata.py6
-rw-r--r--pym/portage/cache/sqlite.py41
-rw-r--r--pym/portage/cache/template.py14
-rw-r--r--pym/portage/checksum.py100
-rw-r--r--pym/portage/const.py198
-rw-r--r--pym/portage/cvstree.py274
-rw-r--r--pym/portage/data.py76
-rw-r--r--pym/portage/dbapi/_MergeProcess.py214
-rw-r--r--pym/portage/dbapi/_SyncfsProcess.py53
-rw-r--r--pym/portage/dbapi/__init__.py110
-rw-r--r--pym/portage/dbapi/_expand_new_virt.py12
-rw-r--r--pym/portage/dbapi/_similar_name_search.py57
-rw-r--r--pym/portage/dbapi/bintree.py338
-rw-r--r--pym/portage/dbapi/cpv_expand.py4
-rw-r--r--pym/portage/dbapi/dep_expand.py6
-rw-r--r--pym/portage/dbapi/porttree.py144
-rw-r--r--pym/portage/dbapi/vartree.py604
-rw-r--r--pym/portage/dbapi/virtual.py7
-rw-r--r--pym/portage/debug.py10
-rw-r--r--pym/portage/dep/__init__.py309
-rw-r--r--pym/portage/dep/_slot_operator.py (renamed from pym/portage/dep/_slot_abi.py)53
-rw-r--r--pym/portage/dep/dep_check.py113
-rw-r--r--pym/portage/dispatch_conf.py326
-rw-r--r--pym/portage/eapi.py64
-rw-r--r--pym/portage/eclass_cache.py26
-rw-r--r--pym/portage/elog/__init__.py3
-rw-r--r--pym/portage/elog/mod_echo.py3
-rw-r--r--pym/portage/elog/mod_save.py24
-rw-r--r--pym/portage/elog/mod_save_summary.py40
-rw-r--r--pym/portage/elog/mod_syslog.py13
-rw-r--r--pym/portage/emaint/__init__.py4
-rw-r--r--pym/portage/emaint/defaults.py11
-rw-r--r--pym/portage/emaint/main.py157
-rw-r--r--pym/portage/emaint/module.py8
-rw-r--r--pym/portage/emaint/modules/__init__.py4
-rw-r--r--pym/portage/emaint/modules/binhost/__init__.py8
-rw-r--r--pym/portage/emaint/modules/binhost/binhost.py12
-rw-r--r--pym/portage/emaint/modules/config/__init__.py8
-rw-r--r--pym/portage/emaint/modules/config/config.py66
-rw-r--r--pym/portage/emaint/modules/logs/__init__.py22
-rw-r--r--pym/portage/emaint/modules/logs/logs.py17
-rw-r--r--pym/portage/emaint/modules/move/__init__.py9
-rw-r--r--pym/portage/emaint/modules/move/move.py42
-rw-r--r--pym/portage/emaint/modules/resume/__init__.py6
-rw-r--r--pym/portage/emaint/modules/world/__init__.py8
-rw-r--r--pym/portage/env/loaders.py26
-rw-r--r--pym/portage/exception.py54
-rw-r--r--pym/portage/getbinpkg.py255
-rw-r--r--pym/portage/glsa.py313
-rw-r--r--pym/portage/localization.py17
-rw-r--r--pym/portage/locks.py104
-rw-r--r--pym/portage/mail.py7
-rw-r--r--pym/portage/manifest.py114
-rw-r--r--pym/portage/news.py10
-rw-r--r--pym/portage/output.py43
-rw-r--r--pym/portage/package/ebuild/_config/KeywordsManager.py56
-rw-r--r--pym/portage/package/ebuild/_config/LocationsManager.py135
-rw-r--r--pym/portage/package/ebuild/_config/MaskManager.py33
-rw-r--r--pym/portage/package/ebuild/_config/UseManager.py290
-rw-r--r--pym/portage/package/ebuild/_config/special_env_vars.py56
-rw-r--r--pym/portage/package/ebuild/_config/unpack_dependencies.py38
-rw-r--r--pym/portage/package/ebuild/_ipc/QueryCommand.py91
-rw-r--r--pym/portage/package/ebuild/_metadata_invalid.py (renamed from pym/portage/package/ebuild/_eapi_invalid.py)13
-rw-r--r--pym/portage/package/ebuild/_parallel_manifest/ManifestProcess.py43
-rw-r--r--pym/portage/package/ebuild/_parallel_manifest/ManifestScheduler.py93
-rw-r--r--pym/portage/package/ebuild/_parallel_manifest/ManifestTask.py186
-rw-r--r--pym/portage/package/ebuild/_parallel_manifest/__init__.py2
-rw-r--r--pym/portage/package/ebuild/_spawn_nofetch.py23
-rw-r--r--pym/portage/package/ebuild/config.py610
-rw-r--r--pym/portage/package/ebuild/deprecated_profile_check.py63
-rw-r--r--pym/portage/package/ebuild/digestcheck.py15
-rw-r--r--pym/portage/package/ebuild/digestgen.py107
-rw-r--r--pym/portage/package/ebuild/doebuild.py546
-rw-r--r--pym/portage/package/ebuild/fetch.py84
-rw-r--r--pym/portage/package/ebuild/getmaskingreason.py30
-rw-r--r--pym/portage/package/ebuild/getmaskingstatus.py32
-rw-r--r--pym/portage/package/ebuild/prepare_build_dirs.py8
-rw-r--r--pym/portage/process.py333
-rw-r--r--pym/portage/proxy/lazyimport.py5
-rw-r--r--pym/portage/proxy/objectproxy.py9
-rw-r--r--pym/portage/repository/config.py552
-rw-r--r--pym/portage/tests/__init__.py93
-rw-r--r--pym/portage/tests/bin/setup_env.py54
-rw-r--r--pym/portage/tests/dbapi/test_fakedbapi.py10
-rw-r--r--pym/portage/tests/dbapi/test_portdb_cache.py183
-rw-r--r--pym/portage/tests/dep/testAtom.py267
-rw-r--r--pym/portage/tests/dep/testCheckRequiredUse.py192
-rw-r--r--pym/portage/tests/dep/testStandalone.py26
-rw-r--r--pym/portage/tests/dep/test_best_match_to_list.py44
-rw-r--r--pym/portage/tests/dep/test_dep_getcpv.py16
-rw-r--r--pym/portage/tests/dep/test_dep_getrepo.py6
-rw-r--r--pym/portage/tests/dep/test_dep_getslot.py10
-rw-r--r--pym/portage/tests/dep/test_dep_getusedeps.py12
-rw-r--r--pym/portage/tests/dep/test_get_operator.py24
-rw-r--r--pym/portage/tests/dep/test_get_required_use_flags.py4
-rw-r--r--pym/portage/tests/dep/test_isjustname.py14
-rw-r--r--pym/portage/tests/dep/test_isvalidatom.py13
-rw-r--r--pym/portage/tests/dep/test_match_from_list.py136
-rw-r--r--pym/portage/tests/dep/test_paren_reduce.py61
-rw-r--r--pym/portage/tests/dep/test_use_reduce.py519
-rw-r--r--pym/portage/tests/ebuild/test_config.py27
-rw-r--r--pym/portage/tests/ebuild/test_doebuild_fd_pipes.py137
-rw-r--r--pym/portage/tests/ebuild/test_doebuild_spawn.py46
-rw-r--r--pym/portage/tests/ebuild/test_ipc_daemon.py78
-rw-r--r--pym/portage/tests/ebuild/test_spawn.py15
-rw-r--r--pym/portage/tests/emerge/test_emerge_slot_abi.py30
-rw-r--r--pym/portage/tests/emerge/test_simple.py116
-rw-r--r--pym/portage/tests/env/config/test_PackageKeywordsFile.py8
-rw-r--r--pym/portage/tests/env/config/test_PackageUseFile.py6
-rw-r--r--pym/portage/tests/env/config/test_PortageModulesFile.py11
-rw-r--r--pym/portage/tests/glsa/__init__.py2
-rw-r--r--pym/portage/tests/glsa/__test__0
-rw-r--r--pym/portage/tests/glsa/test_security_set.py144
-rw-r--r--pym/portage/tests/lazyimport/test_lazy_import_portage_baseline.py4
-rw-r--r--pym/portage/tests/lint/test_bash_syntax.py26
-rw-r--r--pym/portage/tests/lint/test_compile_modules.py32
-rw-r--r--pym/portage/tests/lint/test_import_modules.py2
-rw-r--r--pym/portage/tests/locks/test_asynchronous_lock.py10
-rw-r--r--pym/portage/tests/process/test_PopenProcess.py85
-rw-r--r--pym/portage/tests/process/test_PopenProcessBlockingIO.py63
-rw-r--r--pym/portage/tests/process/test_poll.py35
-rw-r--r--pym/portage/tests/repoman/test_echangelog.py6
-rw-r--r--pym/portage/tests/repoman/test_simple.py83
-rw-r--r--pym/portage/tests/resolver/ResolverPlayground.py390
-rw-r--r--pym/portage/tests/resolver/test_autounmask.py304
-rw-r--r--pym/portage/tests/resolver/test_autounmask_multilib_use.py85
-rw-r--r--pym/portage/tests/resolver/test_backtracking.py48
-rw-r--r--pym/portage/tests/resolver/test_blocker.py48
-rw-r--r--pym/portage/tests/resolver/test_complete_graph.py4
-rw-r--r--pym/portage/tests/resolver/test_complete_if_new_subslot_without_revbump.py74
-rw-r--r--pym/portage/tests/resolver/test_depclean.py100
-rw-r--r--pym/portage/tests/resolver/test_depclean_order.py57
-rw-r--r--pym/portage/tests/resolver/test_depclean_slot_unavailable.py78
-rw-r--r--pym/portage/tests/resolver/test_features_test_use.py68
-rw-r--r--pym/portage/tests/resolver/test_merge_order.py35
-rw-r--r--pym/portage/tests/resolver/test_multirepo.py88
-rw-r--r--pym/portage/tests/resolver/test_onlydeps.py34
-rw-r--r--pym/portage/tests/resolver/test_or_choices.py134
-rw-r--r--pym/portage/tests/resolver/test_package_tracker.py261
-rw-r--r--pym/portage/tests/resolver/test_regular_slot_change_without_revbump.py59
-rw-r--r--pym/portage/tests/resolver/test_slot_abi.py111
-rw-r--r--pym/portage/tests/resolver/test_slot_abi_downgrade.py8
-rw-r--r--pym/portage/tests/resolver/test_slot_change_without_revbump.py69
-rw-r--r--pym/portage/tests/resolver/test_slot_collisions.py106
-rw-r--r--pym/portage/tests/resolver/test_slot_conflict_mask_update.py41
-rw-r--r--pym/portage/tests/resolver/test_slot_conflict_rebuild.py408
-rw-r--r--pym/portage/tests/resolver/test_slot_conflict_update.py98
-rw-r--r--pym/portage/tests/resolver/test_slot_operator_autounmask.py120
-rw-r--r--pym/portage/tests/resolver/test_slot_operator_unsatisfied.py70
-rw-r--r--pym/portage/tests/resolver/test_slot_operator_unsolved.py88
-rw-r--r--pym/portage/tests/resolver/test_targetroot.py85
-rw-r--r--pym/portage/tests/resolver/test_unpack_dependencies.py65
-rw-r--r--pym/portage/tests/resolver/test_use_aliases.py131
-rw-r--r--pym/portage/tests/resolver/test_useflags.py78
-rwxr-xr-xpym/portage/tests/runTests19
-rw-r--r--pym/portage/tests/unicode/test_string_format.py52
-rw-r--r--pym/portage/tests/update/test_move_ent.py6
-rw-r--r--pym/portage/tests/update/test_move_slot_ent.py6
-rw-r--r--pym/portage/tests/update/test_update_dbentry.py101
-rw-r--r--pym/portage/tests/util/test_getconfig.py31
-rw-r--r--pym/portage/tests/util/test_stackDictList.py12
-rw-r--r--pym/portage/tests/util/test_stackDicts.py41
-rw-r--r--pym/portage/tests/util/test_stackLists.py18
-rw-r--r--pym/portage/tests/util/test_uniqueArray.py14
-rw-r--r--pym/portage/tests/util/test_varExpand.py80
-rw-r--r--pym/portage/tests/util/test_whirlpool.py4
-rw-r--r--pym/portage/tests/versions/test_cpv_sort_key.py7
-rw-r--r--pym/portage/tests/versions/test_vercmp.py38
-rw-r--r--pym/portage/update.py137
-rw-r--r--pym/portage/util/ExtractKernelVersion.py6
-rw-r--r--pym/portage/util/SlotObject.py1
-rw-r--r--pym/portage/util/_ShelveUnicodeWrapper.py45
-rw-r--r--pym/portage/util/__init__.py394
-rw-r--r--pym/portage/util/_argparse.py42
-rw-r--r--pym/portage/util/_async/AsyncScheduler.py102
-rw-r--r--pym/portage/util/_async/FileCopier.py17
-rw-r--r--pym/portage/util/_async/FileDigester.py73
-rw-r--r--pym/portage/util/_async/ForkProcess.py65
-rw-r--r--pym/portage/util/_async/PipeLogger.py163
-rw-r--r--pym/portage/util/_async/PipeReaderBlockingIO.py91
-rw-r--r--pym/portage/util/_async/PopenProcess.py33
-rw-r--r--pym/portage/util/_async/SchedulerInterface.py79
-rw-r--r--pym/portage/util/_async/TaskScheduler.py20
-rw-r--r--pym/portage/util/_async/__init__.py2
-rw-r--r--pym/portage/util/_async/run_main_scheduler.py41
-rw-r--r--pym/portage/util/_ctypes.py47
-rw-r--r--pym/portage/util/_desktop_entry.py85
-rw-r--r--pym/portage/util/_dyn_libs/LinkageMapELF.py24
-rw-r--r--pym/portage/util/_dyn_libs/PreservedLibsRegistry.py3
-rw-r--r--pym/portage/util/_dyn_libs/display_preserved_libs.py98
-rw-r--r--pym/portage/util/_eventloop/EventLoop.py364
-rw-r--r--pym/portage/util/_eventloop/PollSelectAdapter.py2
-rw-r--r--pym/portage/util/_get_vm_info.py80
-rw-r--r--pym/portage/util/_info_files.py138
-rw-r--r--pym/portage/util/_path.py27
-rw-r--r--pym/portage/util/_urlopen.py102
-rw-r--r--pym/portage/util/digraph.py46
-rw-r--r--pym/portage/util/env_update.py78
-rw-r--r--pym/portage/util/lafilefixer.py10
-rw-r--r--pym/portage/util/listdir.py128
-rw-r--r--pym/portage/util/movefile.py220
-rw-r--r--pym/portage/util/whirlpool.py2
-rw-r--r--pym/portage/util/writeable_check.py79
-rw-r--r--pym/portage/versions.py85
-rw-r--r--pym/portage/xml/metadata.py15
-rw-r--r--pym/portage/xpak.py8
-rw-r--r--pym/repoman/checks.py235
-rw-r--r--pym/repoman/errors.py6
-rw-r--r--pym/repoman/herdbase.py11
-rw-r--r--pym/repoman/utilities.py146
299 files changed, 20988 insertions, 8739 deletions
diff --git a/pym/_emerge/AbstractDepPriority.py b/pym/_emerge/AbstractDepPriority.py
index 94f26efc5..1fcd04345 100644
--- a/pym/_emerge/AbstractDepPriority.py
+++ b/pym/_emerge/AbstractDepPriority.py
@@ -1,11 +1,12 @@
-# Copyright 1999-2012 Gentoo Foundation
+# Copyright 1999-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
import copy
from portage.util.SlotObject import SlotObject
class AbstractDepPriority(SlotObject):
- __slots__ = ("buildtime", "runtime", "runtime_post")
+ __slots__ = ("buildtime", "buildtime_slot_op",
+ "runtime", "runtime_post", "runtime_slot_op")
def __lt__(self, other):
return self.__int__() < other
diff --git a/pym/_emerge/AbstractEbuildProcess.py b/pym/_emerge/AbstractEbuildProcess.py
index c7b8f83ca..31127f474 100644
--- a/pym/_emerge/AbstractEbuildProcess.py
+++ b/pym/_emerge/AbstractEbuildProcess.py
@@ -1,8 +1,10 @@
-# Copyright 1999-2011 Gentoo Foundation
+# Copyright 1999-2012 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
import io
+import platform
import stat
+import subprocess
import textwrap
from _emerge.SpawnProcess import SpawnProcess
from _emerge.EbuildBuildDir import EbuildBuildDir
@@ -20,8 +22,10 @@ class AbstractEbuildProcess(SpawnProcess):
__slots__ = ('phase', 'settings',) + \
('_build_dir', '_ipc_daemon', '_exit_command', '_exit_timeout_id')
+
_phases_without_builddir = ('clean', 'cleanrm', 'depend', 'help',)
_phases_interactive_whitelist = ('config',)
+ _phases_without_cgroup = ('preinst', 'postinst', 'prerm', 'postrm', 'config')
# Number of milliseconds to allow natural exit of the ebuild
# process after it has called the exit command via IPC. It
@@ -52,13 +56,48 @@ class AbstractEbuildProcess(SpawnProcess):
if need_builddir and \
not os.path.isdir(self.settings['PORTAGE_BUILDDIR']):
msg = _("The ebuild phase '%s' has been aborted "
- "since PORTAGE_BUILDIR does not exist: '%s'") % \
+ "since PORTAGE_BUILDDIR does not exist: '%s'") % \
(self.phase, self.settings['PORTAGE_BUILDDIR'])
self._eerror(textwrap.wrap(msg, 72))
self._set_returncode((self.pid, 1 << 8))
- self.wait()
+ self._async_wait()
return
+ # Check if the cgroup hierarchy is in place. If it's not, mount it.
+ if (os.geteuid() == 0 and platform.system() == 'Linux'
+ and 'cgroup' in self.settings.features
+ and self.phase not in self._phases_without_cgroup):
+ cgroup_root = '/sys/fs/cgroup'
+ cgroup_portage = os.path.join(cgroup_root, 'portage')
+ cgroup_path = os.path.join(cgroup_portage,
+ '%s:%s' % (self.settings["CATEGORY"],
+ self.settings["PF"]))
+ try:
+ # cgroup tmpfs
+ if not os.path.ismount(cgroup_root):
+ # we expect /sys/fs to be there already
+ if not os.path.isdir(cgroup_root):
+ os.mkdir(cgroup_root, 0o755)
+ subprocess.check_call(['mount', '-t', 'tmpfs',
+ '-o', 'rw,nosuid,nodev,noexec,mode=0755',
+ 'tmpfs', cgroup_root])
+
+ # portage subsystem
+ if not os.path.ismount(cgroup_portage):
+ if not os.path.isdir(cgroup_portage):
+ os.mkdir(cgroup_portage, 0o755)
+ subprocess.check_call(['mount', '-t', 'cgroup',
+ '-o', 'rw,nosuid,nodev,noexec,none,name=portage',
+ 'tmpfs', cgroup_portage])
+
+ # the ebuild cgroup
+ if not os.path.isdir(cgroup_path):
+ os.mkdir(cgroup_path)
+ except (subprocess.CalledProcessError, OSError):
+ pass
+ else:
+ self.cgroup = cgroup_path
+
if self.background:
# Automatically prevent color codes from showing up in logs,
# since we're not displaying to a terminal anyway.
@@ -67,7 +106,7 @@ class AbstractEbuildProcess(SpawnProcess):
if self._enable_ipc_daemon:
self.settings.pop('PORTAGE_EBUILD_EXIT_FILE', None)
if self.phase not in self._phases_without_builddir:
- if 'PORTAGE_BUILDIR_LOCKED' not in self.settings:
+ if 'PORTAGE_BUILDDIR_LOCKED' not in self.settings:
self._build_dir = EbuildBuildDir(
scheduler=self.scheduler, settings=self.settings)
self._build_dir.lock()
@@ -143,9 +182,14 @@ class AbstractEbuildProcess(SpawnProcess):
self._exit_command.reply_hook = self._exit_command_callback
query_command = QueryCommand(self.settings, self.phase)
commands = {
- 'best_version' : query_command,
- 'exit' : self._exit_command,
- 'has_version' : query_command,
+ 'available_eclasses' : query_command,
+ 'best_version' : query_command,
+ 'eclass_path' : query_command,
+ 'exit' : self._exit_command,
+ 'has_version' : query_command,
+ 'license_path' : query_command,
+ 'master_repositories' : query_command,
+ 'repository_path' : query_command,
}
input_fifo, output_fifo = self._init_ipc_fifos()
self._ipc_daemon = EbuildIpcDaemon(commands=commands,
diff --git a/pym/_emerge/AbstractPollTask.py b/pym/_emerge/AbstractPollTask.py
index 2c8470925..3f6dd6cef 100644
--- a/pym/_emerge/AbstractPollTask.py
+++ b/pym/_emerge/AbstractPollTask.py
@@ -151,4 +151,4 @@ class AbstractPollTask(AsynchronousTask):
while self._registered and not timeout_cb.timed_out:
self.scheduler.iteration()
finally:
- self.scheduler.unregister(timeout_cb.timeout_id)
+ self.scheduler.source_remove(timeout_cb.timeout_id)
diff --git a/pym/_emerge/AsynchronousLock.py b/pym/_emerge/AsynchronousLock.py
index 587aa4650..c0b9b26dc 100644
--- a/pym/_emerge/AsynchronousLock.py
+++ b/pym/_emerge/AsynchronousLock.py
@@ -1,4 +1,4 @@
-# Copyright 2010-2012 Gentoo Foundation
+# Copyright 2010-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
import dummy_threading
@@ -49,7 +49,7 @@ class AsynchronousLock(AsynchronousTask):
pass
else:
self.returncode = os.EX_OK
- self.wait()
+ self._async_wait()
return
if self._force_process or \
@@ -105,44 +105,27 @@ class _LockThread(AbstractPollTask):
"""
__slots__ = ('path',) + \
- ('_files', '_force_dummy', '_lock_obj',
- '_thread', '_reg_id',)
+ ('_force_dummy', '_lock_obj', '_thread',)
def _start(self):
- pr, pw = os.pipe()
- self._files = {}
- self._files['pipe_read'] = pr
- self._files['pipe_write'] = pw
- for f in self._files.values():
- fcntl.fcntl(f, fcntl.F_SETFL,
- fcntl.fcntl(f, fcntl.F_GETFL) | os.O_NONBLOCK)
- self._reg_id = self.scheduler.register(self._files['pipe_read'],
- self.scheduler.IO_IN, self._output_handler)
self._registered = True
threading_mod = threading
if self._force_dummy:
threading_mod = dummy_threading
self._thread = threading_mod.Thread(target=self._run_lock)
+ self._thread.daemon = True
self._thread.start()
def _run_lock(self):
self._lock_obj = lockfile(self.path, wantnewlockfile=True)
- os.write(self._files['pipe_write'], b'\0')
-
- def _output_handler(self, f, event):
- buf = None
- if event & self.scheduler.IO_IN:
- try:
- buf = os.read(self._files['pipe_read'], self._bufsize)
- except OSError as e:
- if e.errno not in (errno.EAGAIN,):
- raise
- if buf:
- self._unregister()
- self.returncode = os.EX_OK
- self.wait()
+ # Thread-safe callback to EventLoop
+ self.scheduler.idle_add(self._run_lock_cb)
- return True
+ def _run_lock_cb(self):
+ self._unregister()
+ self.returncode = os.EX_OK
+ self.wait()
+ return False
def _cancel(self):
# There's currently no way to force thread termination.
@@ -163,15 +146,6 @@ class _LockThread(AbstractPollTask):
self._thread.join()
self._thread = None
- if self._reg_id is not None:
- self.scheduler.unregister(self._reg_id)
- self._reg_id = None
-
- if self._files is not None:
- for f in self._files.values():
- os.close(f)
- self._files = None
-
class _LockProcess(AbstractPollTask):
"""
This uses the portage.locks module to acquire a lock asynchronously,
@@ -190,16 +164,28 @@ class _LockProcess(AbstractPollTask):
self._files = {}
self._files['pipe_in'] = in_pr
self._files['pipe_out'] = out_pw
+
fcntl.fcntl(in_pr, fcntl.F_SETFL,
fcntl.fcntl(in_pr, fcntl.F_GETFL) | os.O_NONBLOCK)
- self._reg_id = self.scheduler.register(in_pr,
+
+ # FD_CLOEXEC is enabled by default in Python >=3.4.
+ if sys.hexversion < 0x3040000:
+ try:
+ fcntl.FD_CLOEXEC
+ except AttributeError:
+ pass
+ else:
+ fcntl.fcntl(in_pr, fcntl.F_SETFD,
+ fcntl.fcntl(in_pr, fcntl.F_GETFD) | fcntl.FD_CLOEXEC)
+
+ self._reg_id = self.scheduler.io_add_watch(in_pr,
self.scheduler.IO_IN, self._output_handler)
self._registered = True
self._proc = SpawnProcess(
args=[portage._python_interpreter,
os.path.join(portage._bin_path, 'lock-helper.py'), self.path],
env=dict(os.environ, PORTAGE_PYM_PATH=portage._pym_path),
- fd_pipes={0:out_pr, 1:in_pw, 2:sys.stderr.fileno()},
+ fd_pipes={0:out_pr, 1:in_pw, 2:sys.__stderr__.fileno()},
scheduler=self.scheduler)
self._proc.addExitListener(self._proc_exit)
self._proc.start()
@@ -273,7 +259,7 @@ class _LockProcess(AbstractPollTask):
self._registered = False
if self._reg_id is not None:
- self.scheduler.unregister(self._reg_id)
+ self.scheduler.source_remove(self._reg_id)
self._reg_id = None
if self._files is not None:
diff --git a/pym/_emerge/AsynchronousTask.py b/pym/_emerge/AsynchronousTask.py
index 7a193ce7d..da58261db 100644
--- a/pym/_emerge/AsynchronousTask.py
+++ b/pym/_emerge/AsynchronousTask.py
@@ -60,6 +60,20 @@ class AsynchronousTask(SlotObject):
def _wait(self):
return self.returncode
+ def _async_wait(self):
+ """
+ For cases where _start exits synchronously, this method is a
+ convenient way to trigger an asynchronous call to self.wait()
+ (in order to notify exit listeners), avoiding excessive event
+ loop recursion (or stack overflow) that synchronous calling of
+ exit listeners can cause. This method is thread-safe.
+ """
+ self.scheduler.idle_add(self._async_wait_cb)
+
+ def _async_wait_cb(self):
+ self.wait()
+ return False
+
def cancel(self):
"""
Cancel the task, but do not wait for exit status. If asynchronous exit
diff --git a/pym/_emerge/Binpkg.py b/pym/_emerge/Binpkg.py
index ea8a1ad13..a740efdb9 100644
--- a/pym/_emerge/Binpkg.py
+++ b/pym/_emerge/Binpkg.py
@@ -1,4 +1,4 @@
-# Copyright 1999-2012 Gentoo Foundation
+# Copyright 1999-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
from _emerge.EbuildPhase import EbuildPhase
@@ -298,6 +298,7 @@ class Binpkg(CompositeTask):
extractor = BinpkgExtractorAsync(background=self.background,
env=self.settings.environ(),
+ features=self.settings.features,
image_dir=self._image_dir,
pkg=self.pkg, pkg_path=self._pkg_path,
logfile=self.settings.get("PORTAGE_LOG_FILE"),
@@ -328,11 +329,13 @@ class Binpkg(CompositeTask):
self.wait()
return
+ env = self.settings.environ()
+ env["PYTHONPATH"] = self.settings["PORTAGE_PYTHONPATH"]
chpathtool = SpawnProcess(
args=[portage._python_interpreter,
os.path.join(self.settings["PORTAGE_BIN_PATH"], "chpathtool.py"),
self.settings["D"], self._build_prefix, self.settings["EPREFIX"]],
- background=self.background, env=self.settings.environ(),
+ background=self.background, env=env,
scheduler=self.scheduler,
logfile=self.settings.get('PORTAGE_LOG_FILE'))
self._writemsg_level(">>> Adjusting Prefix to %s\n" % self.settings["EPREFIX"])
diff --git a/pym/_emerge/BinpkgExtractorAsync.py b/pym/_emerge/BinpkgExtractorAsync.py
index f25cbf933..be74c2fb7 100644
--- a/pym/_emerge/BinpkgExtractorAsync.py
+++ b/pym/_emerge/BinpkgExtractorAsync.py
@@ -1,23 +1,31 @@
-# Copyright 1999-2011 Gentoo Foundation
+# Copyright 1999-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
from _emerge.SpawnProcess import SpawnProcess
import portage
import signal
+import subprocess
class BinpkgExtractorAsync(SpawnProcess):
- __slots__ = ("image_dir", "pkg", "pkg_path")
+ __slots__ = ("features", "image_dir", "pkg", "pkg_path")
_shell_binary = portage.const.BASH_BINARY
def _start(self):
+ tar_options = ""
+ if "xattr" in self.features:
+ process = subprocess.Popen(["tar", "--help"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ output = process.communicate()[0]
+ if b"--xattrs" in output:
+ tar_options = "--xattrs"
+
# Add -q to bzip2 opts, in order to avoid "trailing garbage after
# EOF ignored" warning messages due to xpak trailer.
# SIGPIPE handling (128 + SIGPIPE) should be compatible with
# assert_sigpipe_ok() that's used by the ebuild unpack() helper.
self.args = [self._shell_binary, "-c",
- ("${PORTAGE_BUNZIP2_COMMAND:-${PORTAGE_BZIP2_COMMAND} -d} -cq -- %s | tar -xp -C %s -f - ; " + \
+ ("${PORTAGE_BUNZIP2_COMMAND:-${PORTAGE_BZIP2_COMMAND} -d} -cq -- %s | tar -xp %s -C %s -f - ; " + \
"p=(${PIPESTATUS[@]}) ; " + \
"if [[ ${p[0]} != 0 && ${p[0]} != %d ]] ; then " % (128 + signal.SIGPIPE) + \
"echo bzip2 failed with status ${p[0]} ; exit ${p[0]} ; fi ; " + \
@@ -25,6 +33,7 @@ class BinpkgExtractorAsync(SpawnProcess):
"echo tar failed with status ${p[1]} ; exit ${p[1]} ; fi ; " + \
"exit 0 ;") % \
(portage._shell_quote(self.pkg_path),
+ tar_options,
portage._shell_quote(self.image_dir))]
SpawnProcess._start(self)
diff --git a/pym/_emerge/BinpkgFetcher.py b/pym/_emerge/BinpkgFetcher.py
index f415e2ec7..543881ee6 100644
--- a/pym/_emerge/BinpkgFetcher.py
+++ b/pym/_emerge/BinpkgFetcher.py
@@ -1,4 +1,4 @@
-# Copyright 1999-2011 Gentoo Foundation
+# Copyright 1999-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
from _emerge.AsynchronousLock import AsynchronousLock
@@ -63,7 +63,7 @@ class BinpkgFetcher(SpawnProcess):
if pretend:
portage.writemsg_stdout("\n%s\n" % uri, noiselevel=-1)
self._set_returncode((self.pid, os.EX_OK << 8))
- self.wait()
+ self._async_wait()
return
protocol = urllib_parse_urlparse(uri)[0]
@@ -80,6 +80,12 @@ class BinpkgFetcher(SpawnProcess):
"FILE" : os.path.basename(pkg_path)
}
+ for k in ("PORTAGE_SSH_OPTS",):
+ try:
+ fcmd_vars[k] = settings[k]
+ except KeyError:
+ pass
+
fetch_env = dict(settings.items())
fetch_args = [portage.util.varexpand(x, mydict=fcmd_vars) \
for x in portage.util.shlex_split(fcmd)]
@@ -91,9 +97,9 @@ class BinpkgFetcher(SpawnProcess):
# Redirect all output to stdout since some fetchers like
# wget pollute stderr (if portage detects a problem then it
# can send it's own message to stderr).
- fd_pipes.setdefault(0, sys.stdin.fileno())
- fd_pipes.setdefault(1, sys.stdout.fileno())
- fd_pipes.setdefault(2, sys.stdout.fileno())
+ fd_pipes.setdefault(0, portage._get_stdin().fileno())
+ fd_pipes.setdefault(1, sys.__stdout__.fileno())
+ fd_pipes.setdefault(2, sys.__stdout__.fileno())
self.args = fetch_args
self.env = fetch_env
@@ -104,7 +110,7 @@ class BinpkgFetcher(SpawnProcess):
def _pipe(self, fd_pipes):
"""When appropriate, use a pty so that fetcher progress bars,
like wget has, will work properly."""
- if self.background or not sys.stdout.isatty():
+ if self.background or not sys.__stdout__.isatty():
# When the output only goes to a log file,
# there's no point in creating a pty.
return os.pipe()
diff --git a/pym/_emerge/BinpkgVerifier.py b/pym/_emerge/BinpkgVerifier.py
index 0052967f6..2c6979265 100644
--- a/pym/_emerge/BinpkgVerifier.py
+++ b/pym/_emerge/BinpkgVerifier.py
@@ -1,75 +1,120 @@
-# Copyright 1999-2011 Gentoo Foundation
+# Copyright 1999-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
-from _emerge.AsynchronousTask import AsynchronousTask
-from portage.util import writemsg
+import errno
import io
import sys
+
+from _emerge.CompositeTask import CompositeTask
import portage
from portage import os
+from portage.checksum import (_apply_hash_filter,
+ _filter_unaccelarated_hashes, _hash_filter)
+from portage.output import EOutput
+from portage.util._async.FileDigester import FileDigester
from portage.package.ebuild.fetch import _checksum_failure_temp_file
-class BinpkgVerifier(AsynchronousTask):
- __slots__ = ("logfile", "pkg", "scheduler")
+class BinpkgVerifier(CompositeTask):
+ __slots__ = ("logfile", "pkg", "_digests", "_pkg_path")
def _start(self):
- """
- Note: Unlike a normal AsynchronousTask.start() method,
- this one does all work is synchronously. The returncode
- attribute will be set before it returns.
- """
-
- pkg = self.pkg
- root_config = pkg.root_config
- bintree = root_config.trees["bintree"]
- rval = os.EX_OK
+
+ bintree = self.pkg.root_config.trees["bintree"]
+ digests = bintree._get_digests(self.pkg)
+ if "size" not in digests:
+ self.returncode = os.EX_OK
+ self._async_wait()
+ return
+
+ digests = _filter_unaccelarated_hashes(digests)
+ hash_filter = _hash_filter(
+ bintree.settings.get("PORTAGE_CHECKSUM_FILTER", ""))
+ if not hash_filter.transparent:
+ digests = _apply_hash_filter(digests, hash_filter)
+
+ self._digests = digests
+ self._pkg_path = bintree.getname(self.pkg.cpv)
+
+ try:
+ size = os.stat(self._pkg_path).st_size
+ except OSError as e:
+ if e.errno not in (errno.ENOENT, errno.ESTALE):
+ raise
+ self.scheduler.output(("!!! Fetching Binary failed "
+ "for '%s'\n") % self.pkg.cpv, log_path=self.logfile,
+ background=self.background)
+ self.returncode = 1
+ self._async_wait()
+ return
+ else:
+ if size != digests["size"]:
+ self._digest_exception("size", size, digests["size"])
+ self.returncode = 1
+ self._async_wait()
+ return
+
+ self._start_task(FileDigester(file_path=self._pkg_path,
+ hash_names=(k for k in digests if k != "size"),
+ background=self.background, logfile=self.logfile,
+ scheduler=self.scheduler),
+ self._digester_exit)
+
+ def _digester_exit(self, digester):
+
+ if self._default_exit(digester) != os.EX_OK:
+ self.wait()
+ return
+
+ for hash_name in digester.hash_names:
+ if digester.digests[hash_name] != self._digests[hash_name]:
+ self._digest_exception(hash_name,
+ digester.digests[hash_name], self._digests[hash_name])
+ self.returncode = 1
+ self.wait()
+ return
+
+ if self.pkg.root_config.settings.get("PORTAGE_QUIET") != "1":
+ self._display_success()
+
+ self.returncode = os.EX_OK
+ self.wait()
+
+ def _display_success(self):
stdout_orig = sys.stdout
stderr_orig = sys.stderr
global_havecolor = portage.output.havecolor
out = io.StringIO()
- file_exists = True
try:
sys.stdout = out
sys.stderr = out
if portage.output.havecolor:
portage.output.havecolor = not self.background
- try:
- bintree.digestCheck(pkg)
- except portage.exception.FileNotFound:
- writemsg("!!! Fetching Binary failed " + \
- "for '%s'\n" % pkg.cpv, noiselevel=-1)
- rval = 1
- file_exists = False
- except portage.exception.DigestException as e:
- writemsg("\n!!! Digest verification failed:\n",
- noiselevel=-1)
- writemsg("!!! %s\n" % e.value[0],
- noiselevel=-1)
- writemsg("!!! Reason: %s\n" % e.value[1],
- noiselevel=-1)
- writemsg("!!! Got: %s\n" % e.value[2],
- noiselevel=-1)
- writemsg("!!! Expected: %s\n" % e.value[3],
- noiselevel=-1)
- rval = 1
- if rval == os.EX_OK:
- pass
- elif file_exists:
- pkg_path = bintree.getname(pkg.cpv)
- head, tail = os.path.split(pkg_path)
- temp_filename = _checksum_failure_temp_file(head, tail)
- writemsg("File renamed to '%s'\n" % (temp_filename,),
- noiselevel=-1)
+
+ eout = EOutput()
+ eout.ebegin("%s %s ;-)" % (os.path.basename(self._pkg_path),
+ " ".join(sorted(self._digests))))
+ eout.eend(0)
+
finally:
sys.stdout = stdout_orig
sys.stderr = stderr_orig
portage.output.havecolor = global_havecolor
- msg = out.getvalue()
- if msg:
- self.scheduler.output(msg, log_path=self.logfile,
- background=self.background)
+ self.scheduler.output(out.getvalue(), log_path=self.logfile,
+ background=self.background)
- self.returncode = rval
- self.wait()
+ def _digest_exception(self, name, value, expected):
+
+ head, tail = os.path.split(self._pkg_path)
+ temp_filename = _checksum_failure_temp_file(head, tail)
+ self.scheduler.output((
+ "\n!!! Digest verification failed:\n"
+ "!!! %s\n"
+ "!!! Reason: Failed on %s verification\n"
+ "!!! Got: %s\n"
+ "!!! Expected: %s\n"
+ "File renamed to '%s'\n") %
+ (self._pkg_path, name, value, expected, temp_filename),
+ log_path=self.logfile,
+ background=self.background)
diff --git a/pym/_emerge/BlockerCache.py b/pym/_emerge/BlockerCache.py
index fce81f83a..53342d6d6 100644
--- a/pym/_emerge/BlockerCache.py
+++ b/pym/_emerge/BlockerCache.py
@@ -1,4 +1,4 @@
-# Copyright 1999-2012 Gentoo Foundation
+# Copyright 1999-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
import errno
@@ -62,7 +62,9 @@ class BlockerCache(portage.cache.mappings.MutableMapping):
self._cache_data = mypickle.load()
f.close()
del f
- except (AttributeError, EOFError, EnvironmentError, ValueError, pickle.UnpicklingError) as e:
+ except (SystemExit, KeyboardInterrupt):
+ raise
+ except Exception as e:
if isinstance(e, EnvironmentError) and \
getattr(e, 'errno', None) in (errno.ENOENT, errno.EACCES):
pass
@@ -126,9 +128,9 @@ class BlockerCache(portage.cache.mappings.MutableMapping):
self._modified.clear()
def flush(self):
- """If the current user has permission and the internal blocker cache
+ """If the current user has permission and the internal blocker cache has
been updated, save it to disk and mark it unmodified. This is called
- by emerge after it has proccessed blockers for all installed packages.
+ by emerge after it has processed blockers for all installed packages.
Currently, the cache is only written if the user has superuser
privileges (since that's required to obtain a lock), but all users
have read access and benefit from faster blocker lookups (as long as
diff --git a/pym/_emerge/BlockerDB.py b/pym/_emerge/BlockerDB.py
index 459affdb0..8bb8f5fda 100644
--- a/pym/_emerge/BlockerDB.py
+++ b/pym/_emerge/BlockerDB.py
@@ -1,4 +1,4 @@
-# Copyright 1999-2011 Gentoo Foundation
+# Copyright 1999-2012 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
import sys
@@ -9,6 +9,7 @@ from portage import digraph
from portage._sets.base import InternalPackageSet
from _emerge.BlockerCache import BlockerCache
+from _emerge.Package import Package
from _emerge.show_invalid_depstring_notice import show_invalid_depstring_notice
if sys.hexversion >= 0x3000000:
@@ -38,7 +39,7 @@ class BlockerDB(object):
"""
blocker_cache = BlockerCache(None,
self._vartree.dbapi)
- dep_keys = ["RDEPEND", "PDEPEND"]
+ dep_keys = Package._runtime_keys
settings = self._vartree.settings
stale_cache = set(blocker_cache)
fake_vartree = self._fake_vartree
@@ -50,7 +51,7 @@ class BlockerDB(object):
stale_cache.discard(inst_pkg.cpv)
cached_blockers = blocker_cache.get(inst_pkg.cpv)
if cached_blockers is not None and \
- cached_blockers.counter != long(inst_pkg.metadata["COUNTER"]):
+ cached_blockers.counter != inst_pkg.counter:
cached_blockers = None
if cached_blockers is not None:
blocker_atoms = cached_blockers.atoms
@@ -71,9 +72,8 @@ class BlockerDB(object):
blocker_atoms = [atom for atom in atoms \
if atom.startswith("!")]
blocker_atoms.sort()
- counter = long(inst_pkg.metadata["COUNTER"])
blocker_cache[inst_pkg.cpv] = \
- blocker_cache.BlockerData(counter, blocker_atoms)
+ blocker_cache.BlockerData(inst_pkg.counter, blocker_atoms)
for cpv in stale_cache:
del blocker_cache[cpv]
blocker_cache.flush()
@@ -92,7 +92,7 @@ class BlockerDB(object):
blocking_pkgs.update(blocker_parents.parent_nodes(atom))
# Check for blockers in the other direction.
- depstr = " ".join(new_pkg.metadata[k] for k in dep_keys)
+ depstr = " ".join(new_pkg._metadata[k] for k in dep_keys)
success, atoms = portage.dep_check(depstr,
vardb, settings, myuse=new_pkg.use.enabled,
trees=dep_check_trees, myroot=new_pkg.root)
diff --git a/pym/_emerge/CompositeTask.py b/pym/_emerge/CompositeTask.py
index 3e434780b..40cf8596b 100644
--- a/pym/_emerge/CompositeTask.py
+++ b/pym/_emerge/CompositeTask.py
@@ -142,6 +142,10 @@ class CompositeTask(AsynchronousTask):
a task.
"""
+ try:
+ task.scheduler = self.scheduler
+ except AttributeError:
+ pass
task.addExitListener(exit_handler)
self._current_task = task
task.start()
diff --git a/pym/_emerge/DepPriority.py b/pym/_emerge/DepPriority.py
index 3c2256a8e..34fdb481c 100644
--- a/pym/_emerge/DepPriority.py
+++ b/pym/_emerge/DepPriority.py
@@ -1,4 +1,4 @@
-# Copyright 1999-2011 Gentoo Foundation
+# Copyright 1999-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
from _emerge.AbstractDepPriority import AbstractDepPriority
@@ -16,31 +16,38 @@ class DepPriority(AbstractDepPriority):
Attributes Hardness
- buildtime 0
- runtime -1
- runtime_post -2
- optional -3
- (none of the above) -4
+ buildtime_slot_op 0
+ buildtime -1
+ runtime -2
+ runtime_post -3
+ optional -4
+ (none of the above) -5
"""
if self.optional:
- return -3
- if self.buildtime:
+ return -4
+ if self.buildtime_slot_op:
return 0
- if self.runtime:
+ if self.buildtime:
return -1
- if self.runtime_post:
+ if self.runtime:
return -2
- return -4
+ if self.runtime_post:
+ return -3
+ return -5
def __str__(self):
if self.ignored:
return "ignored"
if self.optional:
return "optional"
+ if self.buildtime_slot_op:
+ return "buildtime_slot_op"
if self.buildtime:
return "buildtime"
+ if self.runtime_slot_op:
+ return "runtime_slot_op"
if self.runtime:
return "runtime"
if self.runtime_post:
diff --git a/pym/_emerge/DepPrioritySatisfiedRange.py b/pym/_emerge/DepPrioritySatisfiedRange.py
index edb29df96..391f5409b 100644
--- a/pym/_emerge/DepPrioritySatisfiedRange.py
+++ b/pym/_emerge/DepPrioritySatisfiedRange.py
@@ -1,4 +1,4 @@
-# Copyright 1999-2011 Gentoo Foundation
+# Copyright 1999-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
from _emerge.DepPriority import DepPriority
@@ -7,17 +7,18 @@ class DepPrioritySatisfiedRange(object):
DepPriority Index Category
not satisfied and buildtime HARD
- not satisfied and runtime 6 MEDIUM
- not satisfied and runtime_post 5 MEDIUM_SOFT
+ not satisfied and runtime 7 MEDIUM
+ not satisfied and runtime_post 6 MEDIUM_SOFT
+ satisfied and buildtime_slot_op 5 SOFT
satisfied and buildtime 4 SOFT
satisfied and runtime 3 SOFT
satisfied and runtime_post 2 SOFT
optional 1 SOFT
(none of the above) 0 NONE
"""
- MEDIUM = 6
- MEDIUM_SOFT = 5
- SOFT = 4
+ MEDIUM = 7
+ MEDIUM_SOFT = 6
+ SOFT = 5
NONE = 0
@classmethod
@@ -50,6 +51,16 @@ class DepPrioritySatisfiedRange(object):
def _ignore_satisfied_buildtime(cls, priority):
if priority.__class__ is not DepPriority:
return False
+ if priority.optional:
+ return True
+ if priority.buildtime_slot_op:
+ return False
+ return bool(priority.satisfied)
+
+ @classmethod
+ def _ignore_satisfied_buildtime_slot_op(cls, priority):
+ if priority.__class__ is not DepPriority:
+ return False
return bool(priority.optional or \
priority.satisfied)
@@ -80,6 +91,7 @@ DepPrioritySatisfiedRange.ignore_priority = (
DepPrioritySatisfiedRange._ignore_satisfied_runtime_post,
DepPrioritySatisfiedRange._ignore_satisfied_runtime,
DepPrioritySatisfiedRange._ignore_satisfied_buildtime,
+ DepPrioritySatisfiedRange._ignore_satisfied_buildtime_slot_op,
DepPrioritySatisfiedRange._ignore_runtime_post,
DepPrioritySatisfiedRange._ignore_runtime
)
diff --git a/pym/_emerge/DependencyArg.py b/pym/_emerge/DependencyArg.py
index 80134c804..29a0072c4 100644
--- a/pym/_emerge/DependencyArg.py
+++ b/pym/_emerge/DependencyArg.py
@@ -1,9 +1,11 @@
-# Copyright 1999-2012 Gentoo Foundation
+# Copyright 1999-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
+from __future__ import unicode_literals
+
import sys
-from portage import _encodings, _unicode_encode, _unicode_decode
+from portage import _encodings, _unicode_encode
class DependencyArg(object):
@@ -31,10 +33,10 @@ class DependencyArg(object):
return hash((self.arg, self.root_config.root))
def __str__(self):
- # Force unicode format string for python-2.x safety,
+ # Use unicode_literals format string for python-2.x safety,
# ensuring that self.arg.__unicode__() is used
# when necessary.
- return _unicode_decode("%s") % (self.arg,)
+ return "%s" % (self.arg,)
if sys.hexversion < 0x3000000:
diff --git a/pym/_emerge/EbuildBuild.py b/pym/_emerge/EbuildBuild.py
index 784a3e298..e13b1cf39 100644
--- a/pym/_emerge/EbuildBuild.py
+++ b/pym/_emerge/EbuildBuild.py
@@ -1,4 +1,4 @@
-# Copyright 1999-2012 Gentoo Foundation
+# Copyright 1999-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
from _emerge.EbuildExecuter import EbuildExecuter
@@ -10,11 +10,14 @@ from _emerge.EbuildMerge import EbuildMerge
from _emerge.EbuildFetchonly import EbuildFetchonly
from _emerge.EbuildBuildDir import EbuildBuildDir
from _emerge.MiscFunctionsProcess import MiscFunctionsProcess
+from _emerge.TaskSequence import TaskSequence
+
from portage.util import writemsg
import portage
from portage import os
from portage.output import colorize
from portage.package.ebuild.digestcheck import digestcheck
+from portage.package.ebuild.digestgen import digestgen
from portage.package.ebuild.doebuild import _check_temp_dir
from portage.package.ebuild._spawn_nofetch import spawn_nofetch
@@ -35,7 +38,7 @@ class EbuildBuild(CompositeTask):
if rval != os.EX_OK:
self.returncode = rval
self._current_task = None
- self.wait()
+ self._async_wait()
return
root_config = pkg.root_config
@@ -60,7 +63,7 @@ class EbuildBuild(CompositeTask):
if not self._check_manifest():
self.returncode = 1
self._current_task = None
- self.wait()
+ self._async_wait()
return
prefetcher = self.prefetcher
@@ -91,7 +94,8 @@ class EbuildBuild(CompositeTask):
success = True
settings = self.settings
- if 'strict' in settings.features:
+ if 'strict' in settings.features and \
+ 'digest' not in settings.features:
settings['O'] = os.path.dirname(self._ebuild_path)
quiet_setting = settings.get('PORTAGE_QUIET')
settings['PORTAGE_QUIET'] = '1'
@@ -160,6 +164,10 @@ class EbuildBuild(CompositeTask):
if self.returncode != os.EX_OK:
portdb = self.pkg.root_config.trees[self._tree].dbapi
spawn_nofetch(portdb, self._ebuild_path, settings=self.settings)
+ elif 'digest' in self.settings.features:
+ if not digestgen(mysettings=self.settings,
+ myportdb=self.pkg.root_config.trees[self._tree].dbapi):
+ self.returncode = 1
self.wait()
def _pre_clean_exit(self, pre_clean_phase):
@@ -260,8 +268,8 @@ class EbuildBuild(CompositeTask):
# to be displayed for problematic packages even though they do
# not set RESTRICT=fetch (bug #336499).
- if 'fetch' not in self.pkg.metadata.restrict and \
- 'nofetch' not in self.pkg.metadata.defined_phases:
+ if 'fetch' not in self.pkg.restrict and \
+ 'nofetch' not in self.pkg.defined_phases:
self._unlock_builddir()
self.wait()
return
@@ -300,10 +308,20 @@ class EbuildBuild(CompositeTask):
self.scheduler.output(msg,
log_path=self.settings.get("PORTAGE_LOG_FILE"))
- packager = EbuildBinpkg(background=self.background, pkg=self.pkg,
- scheduler=self.scheduler, settings=self.settings)
+ binpkg_tasks = TaskSequence()
+ requested_binpkg_formats = self.settings.get("PORTAGE_BINPKG_FORMAT", "tar").split()
+ for pkg_fmt in portage.const.SUPPORTED_BINPKG_FORMATS:
+ if pkg_fmt in requested_binpkg_formats:
+ if pkg_fmt == "rpm":
+ binpkg_tasks.add(EbuildPhase(background=self.background,
+ phase="rpm", scheduler=self.scheduler,
+ settings=self.settings))
+ else:
+ binpkg_tasks.add(EbuildBinpkg(background=self.background,
+ pkg=self.pkg, scheduler=self.scheduler,
+ settings=self.settings))
- self._start_task(packager, self._buildpkg_exit)
+ self._start_task(binpkg_tasks, self._buildpkg_exit)
def _buildpkg_exit(self, packager):
"""
diff --git a/pym/_emerge/EbuildBuildDir.py b/pym/_emerge/EbuildBuildDir.py
index 9773bd790..58905c2f6 100644
--- a/pym/_emerge/EbuildBuildDir.py
+++ b/pym/_emerge/EbuildBuildDir.py
@@ -7,7 +7,6 @@ import portage
from portage import os
from portage.exception import PortageException
from portage.util.SlotObject import SlotObject
-import errno
class EbuildBuildDir(SlotObject):
@@ -60,7 +59,7 @@ class EbuildBuildDir(SlotObject):
builddir_lock.wait()
self._assert_lock(builddir_lock)
self._lock_obj = builddir_lock
- self.settings['PORTAGE_BUILDIR_LOCKED'] = '1'
+ self.settings['PORTAGE_BUILDDIR_LOCKED'] = '1'
finally:
self.locked = self._lock_obj is not None
catdir_lock.unlock()
@@ -92,16 +91,14 @@ class EbuildBuildDir(SlotObject):
self._lock_obj.unlock()
self._lock_obj = None
self.locked = False
- self.settings.pop('PORTAGE_BUILDIR_LOCKED', None)
+ self.settings.pop('PORTAGE_BUILDDIR_LOCKED', None)
catdir_lock = AsynchronousLock(path=self._catdir, scheduler=self.scheduler)
catdir_lock.start()
if catdir_lock.wait() == os.EX_OK:
try:
os.rmdir(self._catdir)
- except OSError as e:
- if e.errno not in (errno.ENOENT,
- errno.ENOTEMPTY, errno.EEXIST, errno.EPERM):
- raise
+ except OSError:
+ pass
finally:
catdir_lock.unlock()
diff --git a/pym/_emerge/EbuildExecuter.py b/pym/_emerge/EbuildExecuter.py
index fd663a41d..5587d4eb0 100644
--- a/pym/_emerge/EbuildExecuter.py
+++ b/pym/_emerge/EbuildExecuter.py
@@ -16,16 +16,7 @@ class EbuildExecuter(CompositeTask):
_phases = ("prepare", "configure", "compile", "test", "install")
- _live_eclasses = frozenset([
- "bzr",
- "cvs",
- "darcs",
- "git",
- "git-2",
- "mercurial",
- "subversion",
- "tla",
- ])
+ _live_eclasses = portage.const.LIVE_ECLASSES
def _start(self):
pkg = self.pkg
@@ -83,7 +74,7 @@ class EbuildExecuter(CompositeTask):
pkg = self.pkg
phases = self._phases
- eapi = pkg.metadata["EAPI"]
+ eapi = pkg.eapi
if not eapi_has_src_prepare_and_src_configure(eapi):
# skip src_prepare and src_configure
phases = phases[2:]
diff --git a/pym/_emerge/EbuildFetcher.py b/pym/_emerge/EbuildFetcher.py
index c0a7fddaa..d98d00736 100644
--- a/pym/_emerge/EbuildFetcher.py
+++ b/pym/_emerge/EbuildFetcher.py
@@ -1,23 +1,22 @@
# Copyright 1999-2012 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
-import traceback
-
-from _emerge.SpawnProcess import SpawnProcess
import copy
import io
-import signal
import sys
+
import portage
from portage import os
from portage import _encodings
from portage import _unicode_encode
from portage import _unicode_decode
+from portage.checksum import _hash_filter
from portage.elog.messages import eerror
from portage.package.ebuild.fetch import _check_distfile, fetch
+from portage.util._async.ForkProcess import ForkProcess
from portage.util._pty import _create_pty_or_pipe
-class EbuildFetcher(SpawnProcess):
+class EbuildFetcher(ForkProcess):
__slots__ = ("config_pool", "ebuild_path", "fetchonly", "fetchall",
"pkg", "prefetch") + \
@@ -57,6 +56,9 @@ class EbuildFetcher(SpawnProcess):
if st.st_size != expected_size:
return False
+ hash_filter = _hash_filter(settings.get("PORTAGE_CHECKSUM_FILTER", ""))
+ if hash_filter.transparent:
+ hash_filter = None
stdout_orig = sys.stdout
stderr_orig = sys.stderr
global_havecolor = portage.output.havecolor
@@ -78,7 +80,7 @@ class EbuildFetcher(SpawnProcess):
break
continue
ok, st = _check_distfile(os.path.join(distdir, filename),
- mydigests, eout, show_errors=False)
+ mydigests, eout, show_errors=False, hash_filter=hash_filter)
if not ok:
success = False
break
@@ -115,13 +117,13 @@ class EbuildFetcher(SpawnProcess):
msg_lines.append(msg)
self._eerror(msg_lines)
self._set_returncode((self.pid, 1 << 8))
- self.wait()
+ self._async_wait()
return
if not uri_map:
# Nothing to fetch.
self._set_returncode((self.pid, os.EX_OK << 8))
- self.wait()
+ self._async_wait()
return
settings = self.config_pool.allocate()
@@ -133,7 +135,7 @@ class EbuildFetcher(SpawnProcess):
self._prefetch_size_ok(uri_map, settings, ebuild_path):
self.config_pool.deallocate(settings)
self._set_returncode((self.pid, os.EX_OK << 8))
- self.wait()
+ self._async_wait()
return
nocolor = settings.get("NOCOLOR")
@@ -148,7 +150,7 @@ class EbuildFetcher(SpawnProcess):
settings["NOCOLOR"] = nocolor
self._settings = settings
- SpawnProcess._start(self)
+ ForkProcess._start(self)
# Free settings now since it's no longer needed in
# this process (the subprocess has a private copy).
@@ -156,48 +158,20 @@ class EbuildFetcher(SpawnProcess):
settings = None
self._settings = None
- def _spawn(self, args, fd_pipes=None, **kwargs):
- """
- Fork a subprocess, apply local settings, and call fetch().
- """
-
- pid = os.fork()
- if pid != 0:
- if not isinstance(pid, int):
- raise AssertionError(
- "fork returned non-integer: %s" % (repr(pid),))
- portage.process.spawned_pids.append(pid)
- return [pid]
-
- portage.locks._close_fds()
- # Disable close_fds since we don't exec (see _setup_pipes docstring).
- portage.process._setup_pipes(fd_pipes, close_fds=False)
-
- # Use default signal handlers in order to avoid problems
- # killing subprocesses as reported in bug #353239.
- signal.signal(signal.SIGINT, signal.SIG_DFL)
- signal.signal(signal.SIGTERM, signal.SIG_DFL)
-
+ def _run(self):
# Force consistent color output, in case we are capturing fetch
# output through a normal pipe due to unavailability of ptys.
portage.output.havecolor = self._settings.get('NOCOLOR') \
not in ('yes', 'true')
rval = 1
- allow_missing = self._get_manifest().allow_missing
- try:
- if fetch(self._uri_map, self._settings, fetchonly=self.fetchonly,
- digests=copy.deepcopy(self._get_digests()),
- allow_missing_digests=allow_missing):
- rval = os.EX_OK
- except SystemExit:
- raise
- except:
- traceback.print_exc()
- finally:
- # Call os._exit() from finally block, in order to suppress any
- # finally blocks from earlier in the call stack. See bug #345289.
- os._exit(rval)
+ allow_missing = self._get_manifest().allow_missing or \
+ 'digest' in self._settings.features
+ if fetch(self._uri_map, self._settings, fetchonly=self.fetchonly,
+ digests=copy.deepcopy(self._get_digests()),
+ allow_missing_digests=allow_missing):
+ rval = os.EX_OK
+ return rval
def _get_ebuild_path(self):
if self.ebuild_path is not None:
@@ -297,7 +271,7 @@ class EbuildFetcher(SpawnProcess):
self.scheduler.output(msg, log_path=self.logfile)
def _set_returncode(self, wait_retval):
- SpawnProcess._set_returncode(self, wait_retval)
+ ForkProcess._set_returncode(self, wait_retval)
# Collect elog messages that might have been
# created by the pkg_nofetch phase.
# Skip elog messages for prefetch, in order to avoid duplicates.
diff --git a/pym/_emerge/EbuildMetadataPhase.py b/pym/_emerge/EbuildMetadataPhase.py
index c2d3747f7..bbb1ca9dc 100644
--- a/pym/_emerge/EbuildMetadataPhase.py
+++ b/pym/_emerge/EbuildMetadataPhase.py
@@ -1,4 +1,4 @@
-# Copyright 1999-2012 Gentoo Foundation
+# Copyright 1999-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
from _emerge.SubProcess import SubProcess
@@ -6,12 +6,14 @@ import sys
from portage.cache.mappings import slot_dict_class
import portage
portage.proxy.lazyimport.lazyimport(globals(),
- 'portage.package.ebuild._eapi_invalid:eapi_invalid',
+ 'portage.package.ebuild._metadata_invalid:eapi_invalid',
)
from portage import os
from portage import _encodings
from portage import _unicode_decode
from portage import _unicode_encode
+from portage.dep import extract_unpack_dependencies
+from portage.eapi import eapi_has_automatic_unpack_dependencies
import errno
import fcntl
@@ -25,12 +27,11 @@ class EbuildMetadataPhase(SubProcess):
"""
__slots__ = ("cpv", "eapi_supported", "ebuild_hash", "fd_pipes",
- "metadata", "portdb", "repo_path", "settings") + \
+ "metadata", "portdb", "repo_path", "settings", "write_auxdb") + \
("_eapi", "_eapi_lineno", "_raw_metadata",)
_file_names = ("ebuild",)
_files_dict = slot_dict_class(_file_names, prefix="")
- _metadata_fd = 9
def _start(self):
ebuild_path = self.ebuild_hash.location
@@ -49,14 +50,14 @@ class EbuildMetadataPhase(SubProcess):
# An empty EAPI setting is invalid.
self._eapi_invalid(None)
self._set_returncode((self.pid, 1 << 8))
- self.wait()
+ self._async_wait()
return
self.eapi_supported = portage.eapi_is_supported(parsed_eapi)
if not self.eapi_supported:
self.metadata = {"EAPI": parsed_eapi}
self._set_returncode((self.pid, os.EX_OK << 8))
- self.wait()
+ self._async_wait()
return
settings = self.settings
@@ -74,28 +75,41 @@ class EbuildMetadataPhase(SubProcess):
null_input = open('/dev/null', 'rb')
fd_pipes.setdefault(0, null_input.fileno())
- fd_pipes.setdefault(1, sys.stdout.fileno())
- fd_pipes.setdefault(2, sys.stderr.fileno())
+ fd_pipes.setdefault(1, sys.__stdout__.fileno())
+ fd_pipes.setdefault(2, sys.__stderr__.fileno())
# flush any pending output
+ stdout_filenos = (sys.__stdout__.fileno(), sys.__stderr__.fileno())
for fd in fd_pipes.values():
- if fd == sys.stdout.fileno():
- sys.stdout.flush()
- if fd == sys.stderr.fileno():
- sys.stderr.flush()
+ if fd in stdout_filenos:
+ sys.__stdout__.flush()
+ sys.__stderr__.flush()
+ break
self._files = self._files_dict()
files = self._files
master_fd, slave_fd = os.pipe()
+
fcntl.fcntl(master_fd, fcntl.F_SETFL,
fcntl.fcntl(master_fd, fcntl.F_GETFL) | os.O_NONBLOCK)
- fd_pipes[self._metadata_fd] = slave_fd
+ # FD_CLOEXEC is enabled by default in Python >=3.4.
+ if sys.hexversion < 0x3040000:
+ try:
+ fcntl.FD_CLOEXEC
+ except AttributeError:
+ pass
+ else:
+ fcntl.fcntl(master_fd, fcntl.F_SETFD,
+ fcntl.fcntl(master_fd, fcntl.F_GETFD) | fcntl.FD_CLOEXEC)
+
+ fd_pipes[slave_fd] = slave_fd
+ settings["PORTAGE_PIPE_FD"] = str(slave_fd)
self._raw_metadata = []
files.ebuild = master_fd
- self._reg_id = self.scheduler.register(files.ebuild,
+ self._reg_id = self.scheduler.io_add_watch(files.ebuild,
self._registered_events, self._output_handler)
self._registered = True
@@ -103,6 +117,7 @@ class EbuildMetadataPhase(SubProcess):
settings=settings, debug=debug,
mydbapi=self.portdb, tree="porttree",
fd_pipes=fd_pipes, returnpid=True)
+ settings.pop("PORTAGE_PIPE_FD", None)
os.close(slave_fd)
null_input.close()
@@ -111,11 +126,10 @@ class EbuildMetadataPhase(SubProcess):
# doebuild failed before spawning
self._unregister()
self._set_returncode((self.pid, retval << 8))
- self.wait()
+ self._async_wait()
return
self.pid = retval[0]
- portage.process.spawned_pids.remove(self.pid)
def _output_handler(self, fd, event):
@@ -141,8 +155,7 @@ class EbuildMetadataPhase(SubProcess):
def _set_returncode(self, wait_retval):
SubProcess._set_returncode(self, wait_retval)
# self._raw_metadata is None when _start returns
- # early due to an unsupported EAPI detected with
- # FEATURES=parse-eapi-ebuild-head
+ # early due to an unsupported EAPI
if self.returncode == os.EX_OK and \
self._raw_metadata is not None:
metadata_lines = _unicode_decode(b''.join(self._raw_metadata),
@@ -163,8 +176,7 @@ class EbuildMetadataPhase(SubProcess):
if (not metadata["EAPI"] or self.eapi_supported) and \
metadata["EAPI"] != parsed_eapi:
self._eapi_invalid(metadata)
- if 'parse-eapi-ebuild-head' in self.settings.features:
- metadata_valid = False
+ metadata_valid = False
if metadata_valid:
# Since we're supposed to be able to efficiently obtain the
@@ -181,8 +193,18 @@ class EbuildMetadataPhase(SubProcess):
metadata["_eclasses_"] = {}
metadata.pop("INHERITED", None)
- self.portdb._write_cache(self.cpv,
- self.repo_path, metadata, self.ebuild_hash)
+ if eapi_has_automatic_unpack_dependencies(metadata["EAPI"]):
+ repo = self.portdb.repositories.get_name_for_location(self.repo_path)
+ unpackers = self.settings.unpack_dependencies.get(repo, {}).get(metadata["EAPI"], {})
+ unpack_dependencies = extract_unpack_dependencies(metadata["SRC_URI"], unpackers)
+ if unpack_dependencies:
+ metadata["DEPEND"] += (" " if metadata["DEPEND"] else "") + unpack_dependencies
+
+ # If called by egencache, this cache write is
+ # undesirable when metadata-transfer is disabled.
+ if self.write_auxdb is not False:
+ self.portdb._write_cache(self.cpv,
+ self.repo_path, metadata, self.ebuild_hash)
else:
metadata = {"EAPI": metadata["EAPI"]}
self.metadata = metadata
diff --git a/pym/_emerge/EbuildPhase.py b/pym/_emerge/EbuildPhase.py
index fe44abcbd..b1f7c21df 100644
--- a/pym/_emerge/EbuildPhase.py
+++ b/pym/_emerge/EbuildPhase.py
@@ -1,4 +1,4 @@
-# Copyright 1999-2011 Gentoo Foundation
+# Copyright 1999-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
import gzip
@@ -11,6 +11,7 @@ from _emerge.BinpkgEnvExtractor import BinpkgEnvExtractor
from _emerge.MiscFunctionsProcess import MiscFunctionsProcess
from _emerge.EbuildProcess import EbuildProcess
from _emerge.CompositeTask import CompositeTask
+from portage.package.ebuild.prepare_build_dirs import _prepare_workdir
from portage.util import writemsg
try:
@@ -38,7 +39,7 @@ from portage import _unicode_encode
class EbuildPhase(CompositeTask):
- __slots__ = ("actionmap", "phase", "settings") + \
+ __slots__ = ("actionmap", "fd_pipes", "phase", "settings") + \
("_ebuild_lock",)
# FEATURES displayed prior to setup phase
@@ -156,8 +157,7 @@ class EbuildPhase(CompositeTask):
return
self._start_ebuild()
- def _start_ebuild(self):
-
+ def _get_log_path(self):
# Don't open the log file during the clean phase since the
# open file can result in an nfs lock on $T/build.log which
# prevents the clean phase from removing $T.
@@ -165,17 +165,21 @@ class EbuildPhase(CompositeTask):
if self.phase not in ("clean", "cleanrm") and \
self.settings.get("PORTAGE_BACKGROUND") != "subprocess":
logfile = self.settings.get("PORTAGE_LOG_FILE")
+ return logfile
+
+ def _start_ebuild(self):
- fd_pipes = None
- if not self.background and self.phase == 'nofetch':
- # All the pkg_nofetch output goes to stderr since
- # it's considered to be an error message.
- fd_pipes = {1 : sys.stderr.fileno()}
+ fd_pipes = self.fd_pipes
+ if fd_pipes is None:
+ if not self.background and self.phase == 'nofetch':
+ # All the pkg_nofetch output goes to stderr since
+ # it's considered to be an error message.
+ fd_pipes = {1 : sys.__stderr__.fileno()}
ebuild_process = EbuildProcess(actionmap=self.actionmap,
- background=self.background, fd_pipes=fd_pipes, logfile=logfile,
- phase=self.phase, scheduler=self.scheduler,
- settings=self.settings)
+ background=self.background, fd_pipes=fd_pipes,
+ logfile=self._get_log_path(), phase=self.phase,
+ scheduler=self.scheduler, settings=self.settings)
self._start_task(ebuild_process, self._ebuild_exit)
@@ -189,16 +193,21 @@ class EbuildPhase(CompositeTask):
if self._default_exit(ebuild_process) != os.EX_OK:
if self.phase == "test" and \
"test-fail-continue" in self.settings.features:
- pass
+ # mark test phase as complete (bug #452030)
+ try:
+ open(_unicode_encode(os.path.join(
+ self.settings["PORTAGE_BUILDDIR"], ".tested"),
+ encoding=_encodings['fs'], errors='strict'),
+ 'wb').close()
+ except OSError:
+ pass
else:
fail = True
if not fail:
self.returncode = None
- logfile = None
- if self.settings.get("PORTAGE_BACKGROUND") != "subprocess":
- logfile = self.settings.get("PORTAGE_LOG_FILE")
+ logfile = self._get_log_path()
if self.phase == "install":
out = io.StringIO()
@@ -213,7 +222,14 @@ class EbuildPhase(CompositeTask):
settings = self.settings
_post_phase_userpriv_perms(settings)
- if self.phase == "install":
+ if self.phase == "unpack":
+ # Bump WORKDIR timestamp, in case tar gave it a timestamp
+ # that will interfere with distfiles / WORKDIR timestamp
+ # comparisons as reported in bug #332217. Also, fix
+ # ownership since tar can change that too.
+ os.utime(settings["WORKDIR"], None)
+ _prepare_workdir(settings)
+ elif self.phase == "install":
out = io.StringIO()
_post_src_install_write_metadata(settings)
_post_src_install_uid_fix(settings, out)
@@ -235,8 +251,9 @@ class EbuildPhase(CompositeTask):
fd, logfile = tempfile.mkstemp()
os.close(fd)
post_phase = MiscFunctionsProcess(background=self.background,
- commands=post_phase_cmds, logfile=logfile, phase=self.phase,
- scheduler=self.scheduler, settings=settings)
+ commands=post_phase_cmds, fd_pipes=self.fd_pipes,
+ logfile=logfile, phase=self.phase, scheduler=self.scheduler,
+ settings=settings)
self._start_task(post_phase, self._post_phase_exit)
return
@@ -311,8 +328,9 @@ class EbuildPhase(CompositeTask):
self.returncode = None
phase = 'die_hooks'
die_hooks = MiscFunctionsProcess(background=self.background,
- commands=[phase], phase=phase,
- scheduler=self.scheduler, settings=self.settings)
+ commands=[phase], phase=phase, logfile=self._get_log_path(),
+ fd_pipes=self.fd_pipes, scheduler=self.scheduler,
+ settings=self.settings)
self._start_task(die_hooks, self._die_hooks_exit)
def _die_hooks_exit(self, die_hooks):
@@ -331,7 +349,8 @@ class EbuildPhase(CompositeTask):
portage.elog.elog_process(self.settings.mycpv, self.settings)
phase = "clean"
clean_phase = EbuildPhase(background=self.background,
- phase=phase, scheduler=self.scheduler, settings=self.settings)
+ fd_pipes=self.fd_pipes, phase=phase, scheduler=self.scheduler,
+ settings=self.settings)
self._start_task(clean_phase, self._fail_clean_exit)
return
diff --git a/pym/_emerge/EbuildProcess.py b/pym/_emerge/EbuildProcess.py
index ce97aff0f..333ad7bd0 100644
--- a/pym/_emerge/EbuildProcess.py
+++ b/pym/_emerge/EbuildProcess.py
@@ -1,4 +1,4 @@
-# Copyright 1999-2010 Gentoo Foundation
+# Copyright 1999-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
from _emerge.AbstractEbuildProcess import AbstractEbuildProcess
@@ -17,5 +17,11 @@ class EbuildProcess(AbstractEbuildProcess):
if actionmap is None:
actionmap = _spawn_actionmap(self.settings)
- return _doebuild_spawn(self.phase, self.settings,
- actionmap=actionmap, **kwargs)
+ if self._dummy_pipe_fd is not None:
+ self.settings["PORTAGE_PIPE_FD"] = str(self._dummy_pipe_fd)
+
+ try:
+ return _doebuild_spawn(self.phase, self.settings,
+ actionmap=actionmap, **kwargs)
+ finally:
+ self.settings.pop("PORTAGE_PIPE_FD", None)
diff --git a/pym/_emerge/EbuildSpawnProcess.py b/pym/_emerge/EbuildSpawnProcess.py
index e1f682a66..26d26fc77 100644
--- a/pym/_emerge/EbuildSpawnProcess.py
+++ b/pym/_emerge/EbuildSpawnProcess.py
@@ -1,4 +1,4 @@
-# Copyright 2010 Gentoo Foundation
+# Copyright 2010-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
from _emerge.AbstractEbuildProcess import AbstractEbuildProcess
@@ -13,4 +13,10 @@ class EbuildSpawnProcess(AbstractEbuildProcess):
__slots__ = ('fakeroot_state', 'spawn_func')
def _spawn(self, args, **kwargs):
- return self.spawn_func(args, env=self.settings.environ(), **kwargs)
+
+ env = self.settings.environ()
+
+ if self._dummy_pipe_fd is not None:
+ env["PORTAGE_PIPE_FD"] = str(self._dummy_pipe_fd)
+
+ return self.spawn_func(args, env=env, **kwargs)
diff --git a/pym/_emerge/FakeVartree.py b/pym/_emerge/FakeVartree.py
index ce15f5a36..14be50c7f 100644
--- a/pym/_emerge/FakeVartree.py
+++ b/pym/_emerge/FakeVartree.py
@@ -1,6 +1,8 @@
-# Copyright 1999-2011 Gentoo Foundation
+# Copyright 1999-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
+from __future__ import unicode_literals
+
import sys
import warnings
@@ -10,11 +12,11 @@ from _emerge.Package import Package
from _emerge.PackageVirtualDbapi import PackageVirtualDbapi
from portage.const import VDB_PATH
from portage.dbapi.vartree import vartree
-from portage.dep._slot_abi import find_built_slot_abi_atoms
+from portage.dep._slot_operator import find_built_slot_operator_atoms
from portage.eapi import _get_eapi_attrs
-from portage.exception import InvalidDependString
-from portage.repository.config import _gen_valid_repo
+from portage.exception import InvalidData, InvalidDependString
from portage.update import grab_updates, parse_updates, update_dbentries
+from portage.versions import _pkg_str
if sys.hexversion >= 0x3000000:
long = int
@@ -33,6 +35,9 @@ class FakeVardbapi(PackageVirtualDbapi):
path =os.path.join(path, filename)
return path
+class _DynamicDepsNotApplicable(Exception):
+ pass
+
class FakeVartree(vartree):
"""This is implements an in-memory copy of a vartree instance that provides
all the interfaces required for use by the depgraph. The vardb is locked
@@ -45,10 +50,10 @@ class FakeVartree(vartree):
is not a matching ebuild in the tree). Instances of this class are not
populated until the sync() method is called."""
def __init__(self, root_config, pkg_cache=None, pkg_root_config=None,
- dynamic_deps=True, ignore_built_slot_abi_deps=False):
+ dynamic_deps=True, ignore_built_slot_operator_deps=False):
self._root_config = root_config
self._dynamic_deps = dynamic_deps
- self._ignore_built_slot_abi_deps = ignore_built_slot_abi_deps
+ self._ignore_built_slot_operator_deps = ignore_built_slot_operator_deps
if pkg_root_config is None:
pkg_root_config = self._root_config
self._pkg_root_config = pkg_root_config
@@ -75,7 +80,7 @@ class FakeVartree(vartree):
self.dbapi.aux_get = self._aux_get_wrapper
self.dbapi.match = self._match_wrapper
self._aux_get_history = set()
- self._portdb_keys = ["EAPI", "KEYWORDS", "DEPEND", "RDEPEND", "PDEPEND"]
+ self._portdb_keys = Package._dep_keys + ("EAPI", "KEYWORDS")
self._portdb = portdb
self._global_updates = None
@@ -102,29 +107,30 @@ class FakeVartree(vartree):
self._aux_get_wrapper(cpv, [])
return matches
- def _aux_get_wrapper(self, pkg, wants, myrepo=None):
- if pkg in self._aux_get_history:
- return self._aux_get(pkg, wants)
- self._aux_get_history.add(pkg)
- # We need to check the EAPI, and this also raises
- # a KeyError to the caller if appropriate.
- pkg_obj = self.dbapi._cpv_map[pkg]
- installed_eapi = pkg_obj.metadata['EAPI']
- repo = pkg_obj.metadata['repository']
- eapi_attrs = _get_eapi_attrs(installed_eapi)
- built_slot_abi_atoms = None
-
- if eapi_attrs.slot_abi and not self._ignore_built_slot_abi_deps:
- try:
- built_slot_abi_atoms = find_built_slot_abi_atoms(pkg_obj)
- except InvalidDependString:
- pass
+ def _aux_get_wrapper(self, cpv, wants, myrepo=None):
+ if cpv in self._aux_get_history:
+ return self._aux_get(cpv, wants)
+ self._aux_get_history.add(cpv)
+
+ # This raises a KeyError to the caller if appropriate.
+ pkg = self.dbapi._cpv_map[cpv]
try:
- # Use the live ebuild metadata if possible.
- repo = _gen_valid_repo(repo)
live_metadata = dict(zip(self._portdb_keys,
- self._portdb.aux_get(pkg, self._portdb_keys, myrepo=repo)))
+ self._portdb.aux_get(cpv, self._portdb_keys,
+ myrepo=pkg.repo)))
+ except (KeyError, portage.exception.PortageException):
+ live_metadata = None
+
+ self._apply_dynamic_deps(pkg, live_metadata)
+
+ return self._aux_get(cpv, wants)
+
+ def _apply_dynamic_deps(self, pkg, live_metadata):
+
+ try:
+ if live_metadata is None:
+ raise _DynamicDepsNotApplicable()
# Use the metadata from the installed instance if the EAPI
# of either instance is unsupported, since if the installed
# instance has an unsupported or corrupt EAPI then we don't
@@ -134,26 +140,46 @@ class FakeVartree(vartree):
# order to respect dep updates without revision bump or EAPI
# bump, as in bug #368725.
if not (portage.eapi_is_supported(live_metadata["EAPI"]) and \
- portage.eapi_is_supported(installed_eapi)):
- raise KeyError(pkg)
+ portage.eapi_is_supported(pkg.eapi)):
+ raise _DynamicDepsNotApplicable()
- # preserve built SLOT/ABI := operator deps
- if built_slot_abi_atoms:
+ # preserve built slot/sub-slot := operator deps
+ built_slot_operator_atoms = None
+ if not self._ignore_built_slot_operator_deps and \
+ _get_eapi_attrs(pkg.eapi).slot_operator:
+ try:
+ built_slot_operator_atoms = \
+ find_built_slot_operator_atoms(pkg)
+ except InvalidDependString:
+ pass
+
+ if built_slot_operator_atoms:
live_eapi_attrs = _get_eapi_attrs(live_metadata["EAPI"])
- if not live_eapi_attrs.slot_abi:
- raise KeyError(pkg)
- for k, v in built_slot_abi_atoms.items():
+ if not live_eapi_attrs.slot_operator:
+ raise _DynamicDepsNotApplicable()
+ for k, v in built_slot_operator_atoms.items():
live_metadata[k] += (" " +
" ".join(_unicode(atom) for atom in v))
- self.dbapi.aux_update(pkg, live_metadata)
- except (KeyError, portage.exception.PortageException):
+ self.dbapi.aux_update(pkg.cpv, live_metadata)
+ except _DynamicDepsNotApplicable:
if self._global_updates is None:
self._global_updates = \
grab_global_updates(self._portdb)
+
+ # Bypass _aux_get_wrapper, since calling that
+ # here would trigger infinite recursion.
+ aux_keys = Package._dep_keys + self.dbapi._pkg_str_aux_keys
+ aux_dict = dict(zip(aux_keys, self._aux_get(pkg.cpv, aux_keys)))
perform_global_updates(
- pkg, self.dbapi, self._global_updates)
- return self._aux_get(pkg, wants)
+ pkg.cpv, aux_dict, self.dbapi, self._global_updates)
+
+ def dynamic_deps_preload(self, pkg, metadata):
+ if metadata is not None:
+ metadata = dict((k, metadata.get(k, ''))
+ for k in self._portdb_keys)
+ self._apply_dynamic_deps(pkg, metadata)
+ self._aux_get_history.add(pkg.cpv)
def cpv_discard(self, pkg):
"""
@@ -251,12 +277,6 @@ class FakeVartree(vartree):
root_config=self._pkg_root_config,
type_name="installed")
- try:
- mycounter = long(pkg.metadata["COUNTER"])
- except ValueError:
- mycounter = 0
- pkg.metadata["COUNTER"] = str(mycounter)
-
self._pkg_cache[pkg] = pkg
return pkg
@@ -285,13 +305,14 @@ def grab_global_updates(portdb):
return retupdates
-def perform_global_updates(mycpv, mydb, myupdates):
- aux_keys = ["DEPEND", "EAPI", "RDEPEND", "PDEPEND", 'repository']
- aux_dict = dict(zip(aux_keys, mydb.aux_get(mycpv, aux_keys)))
- eapi = aux_dict.pop('EAPI')
- repository = aux_dict.pop('repository')
+def perform_global_updates(mycpv, aux_dict, mydb, myupdates):
+ try:
+ pkg = _pkg_str(mycpv, metadata=aux_dict, settings=mydb.settings)
+ except InvalidData:
+ return
+ aux_dict = dict((k, aux_dict[k]) for k in Package._dep_keys)
try:
- mycommands = myupdates[repository]
+ mycommands = myupdates[pkg.repo]
except KeyError:
try:
mycommands = myupdates['DEFAULT']
@@ -301,6 +322,6 @@ def perform_global_updates(mycpv, mydb, myupdates):
if not mycommands:
return
- updates = update_dbentries(mycommands, aux_dict, eapi=eapi)
+ updates = update_dbentries(mycommands, aux_dict, parent=pkg)
if updates:
mydb.aux_update(mycpv, updates)
diff --git a/pym/_emerge/FifoIpcDaemon.py b/pym/_emerge/FifoIpcDaemon.py
index fcc4ab4b9..7468de5e2 100644
--- a/pym/_emerge/FifoIpcDaemon.py
+++ b/pym/_emerge/FifoIpcDaemon.py
@@ -1,6 +1,14 @@
-# Copyright 2010-2012 Gentoo Foundation
+# Copyright 2010-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
+import sys
+
+try:
+ import fcntl
+except ImportError:
+ # http://bugs.jython.org/issue1074
+ fcntl = None
+
from portage import os
from _emerge.AbstractPollTask import AbstractPollTask
from portage.cache.mappings import slot_dict_class
@@ -21,7 +29,18 @@ class FifoIpcDaemon(AbstractPollTask):
self._files.pipe_in = \
os.open(self.input_fifo, os.O_RDONLY|os.O_NONBLOCK)
- self._reg_id = self.scheduler.register(
+ # FD_CLOEXEC is enabled by default in Python >=3.4.
+ if sys.hexversion < 0x3040000 and fcntl is not None:
+ try:
+ fcntl.FD_CLOEXEC
+ except AttributeError:
+ pass
+ else:
+ fcntl.fcntl(self._files.pipe_in, fcntl.F_SETFD,
+ fcntl.fcntl(self._files.pipe_in,
+ fcntl.F_GETFD) | fcntl.FD_CLOEXEC)
+
+ self._reg_id = self.scheduler.io_add_watch(
self._files.pipe_in,
self._registered_events, self._input_handler)
@@ -32,11 +51,23 @@ class FifoIpcDaemon(AbstractPollTask):
Re-open the input stream, in order to suppress
POLLHUP events (bug #339976).
"""
- self.scheduler.unregister(self._reg_id)
+ self.scheduler.source_remove(self._reg_id)
os.close(self._files.pipe_in)
self._files.pipe_in = \
os.open(self.input_fifo, os.O_RDONLY|os.O_NONBLOCK)
- self._reg_id = self.scheduler.register(
+
+ # FD_CLOEXEC is enabled by default in Python >=3.4.
+ if sys.hexversion < 0x3040000 and fcntl is not None:
+ try:
+ fcntl.FD_CLOEXEC
+ except AttributeError:
+ pass
+ else:
+ fcntl.fcntl(self._files.pipe_in, fcntl.F_SETFD,
+ fcntl.fcntl(self._files.pipe_in,
+ fcntl.F_GETFD) | fcntl.FD_CLOEXEC)
+
+ self._reg_id = self.scheduler.io_add_watch(
self._files.pipe_in,
self._registered_events, self._input_handler)
@@ -47,6 +78,8 @@ class FifoIpcDaemon(AbstractPollTask):
if self.returncode is None:
self.returncode = 1
self._unregister()
+ # notify exit listeners
+ self.wait()
def _wait(self):
if self.returncode is not None:
@@ -67,7 +100,7 @@ class FifoIpcDaemon(AbstractPollTask):
self._registered = False
if self._reg_id is not None:
- self.scheduler.unregister(self._reg_id)
+ self.scheduler.source_remove(self._reg_id)
self._reg_id = None
if self._files is not None:
diff --git a/pym/_emerge/JobStatusDisplay.py b/pym/_emerge/JobStatusDisplay.py
index 5b9b2216f..9f6f09be0 100644
--- a/pym/_emerge/JobStatusDisplay.py
+++ b/pym/_emerge/JobStatusDisplay.py
@@ -1,6 +1,8 @@
-# Copyright 1999-2011 Gentoo Foundation
+# Copyright 1999-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
+from __future__ import unicode_literals
+
import formatter
import io
import sys
@@ -9,7 +11,6 @@ import time
import portage
from portage import os
from portage import _encodings
-from portage import _unicode_decode
from portage import _unicode_encode
from portage.output import xtermTitle
@@ -121,7 +122,8 @@ class JobStatusDisplay(object):
term_codes = {}
for k, capname in self._termcap_name_map.items():
- code = tigetstr(capname)
+ # Use _native_string for PyPy compat (bug #470258).
+ code = tigetstr(portage._native_string(capname))
if code is None:
code = self._default_term_codes[capname]
term_codes[k] = code
@@ -233,10 +235,10 @@ class JobStatusDisplay(object):
def _display_status(self):
# Don't use len(self._completed_tasks) here since that also
# can include uninstall tasks.
- curval_str = str(self.curval)
- maxval_str = str(self.maxval)
- running_str = str(self.running)
- failed_str = str(self.failed)
+ curval_str = "%s" % (self.curval,)
+ maxval_str = "%s" % (self.maxval,)
+ running_str = "%s" % (self.running,)
+ failed_str = "%s" % (self.failed,)
load_avg_str = self._load_avg_str()
color_output = io.StringIO()
@@ -248,36 +250,36 @@ class JobStatusDisplay(object):
f = formatter.AbstractFormatter(style_writer)
number_style = "INFORM"
- f.add_literal_data(_unicode_decode("Jobs: "))
+ f.add_literal_data("Jobs: ")
f.push_style(number_style)
- f.add_literal_data(_unicode_decode(curval_str))
+ f.add_literal_data(curval_str)
f.pop_style()
- f.add_literal_data(_unicode_decode(" of "))
+ f.add_literal_data(" of ")
f.push_style(number_style)
- f.add_literal_data(_unicode_decode(maxval_str))
+ f.add_literal_data(maxval_str)
f.pop_style()
- f.add_literal_data(_unicode_decode(" complete"))
+ f.add_literal_data(" complete")
if self.running:
- f.add_literal_data(_unicode_decode(", "))
+ f.add_literal_data(", ")
f.push_style(number_style)
- f.add_literal_data(_unicode_decode(running_str))
+ f.add_literal_data(running_str)
f.pop_style()
- f.add_literal_data(_unicode_decode(" running"))
+ f.add_literal_data(" running")
if self.failed:
- f.add_literal_data(_unicode_decode(", "))
+ f.add_literal_data(", ")
f.push_style(number_style)
- f.add_literal_data(_unicode_decode(failed_str))
+ f.add_literal_data(failed_str)
f.pop_style()
- f.add_literal_data(_unicode_decode(" failed"))
+ f.add_literal_data(" failed")
padding = self._jobs_column_width - len(plain_output.getvalue())
if padding > 0:
- f.add_literal_data(padding * _unicode_decode(" "))
+ f.add_literal_data(padding * " ")
- f.add_literal_data(_unicode_decode("Load avg: "))
- f.add_literal_data(_unicode_decode(load_avg_str))
+ f.add_literal_data("Load avg: ")
+ f.add_literal_data(load_avg_str)
# Truncate to fit width, to avoid making the terminal scroll if the
# line overflows (happens when the load average is large).
diff --git a/pym/_emerge/MergeListItem.py b/pym/_emerge/MergeListItem.py
index 8086c689a..938f8014a 100644
--- a/pym/_emerge/MergeListItem.py
+++ b/pym/_emerge/MergeListItem.py
@@ -1,7 +1,8 @@
-# Copyright 1999-2011 Gentoo Foundation
+# Copyright 1999-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
from portage import os
+from portage.dep import _repo_separator
from portage.output import colorize
from _emerge.AsynchronousTask import AsynchronousTask
@@ -32,7 +33,7 @@ class MergeListItem(CompositeTask):
if pkg.installed:
# uninstall, executed by self.merge()
self.returncode = os.EX_OK
- self.wait()
+ self._async_wait()
return
args_set = self.args_set
@@ -47,7 +48,9 @@ class MergeListItem(CompositeTask):
action_desc = "Emerging"
preposition = "for"
+ pkg_color = "PKG_MERGE"
if pkg.type_name == "binary":
+ pkg_color = "PKG_BINARY_MERGE"
action_desc += " binary"
if build_opts.fetchonly:
@@ -57,16 +60,7 @@ class MergeListItem(CompositeTask):
(action_desc,
colorize("MERGE_LIST_PROGRESS", str(pkg_count.curval)),
colorize("MERGE_LIST_PROGRESS", str(pkg_count.maxval)),
- colorize("GOOD", pkg.cpv))
-
- portdb = pkg.root_config.trees["porttree"].dbapi
- portdir_repo_name = portdb.getRepositoryName(portdb.porttree_root)
- if portdir_repo_name:
- pkg_repo_name = pkg.repo
- if pkg_repo_name != portdir_repo_name:
- if pkg_repo_name == pkg.UNKNOWN_REPO:
- pkg_repo_name = "unknown repo"
- msg += " from %s" % pkg_repo_name
+ colorize(pkg_color, pkg.cpv + _repo_separator + pkg.repo))
if pkg.root_config.settings["ROOT"] != "/":
msg += " %s %s" % (preposition, pkg.root)
diff --git a/pym/_emerge/MetadataRegen.py b/pym/_emerge/MetadataRegen.py
index e82015fd1..d92b6a06e 100644
--- a/pym/_emerge/MetadataRegen.py
+++ b/pym/_emerge/MetadataRegen.py
@@ -1,18 +1,20 @@
-# Copyright 1999-2012 Gentoo Foundation
+# Copyright 1999-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
import portage
from portage import os
from portage.dep import _repo_separator
from _emerge.EbuildMetadataPhase import EbuildMetadataPhase
-from _emerge.PollScheduler import PollScheduler
+from portage.cache.cache_errors import CacheError
+from portage.util._async.AsyncScheduler import AsyncScheduler
-class MetadataRegen(PollScheduler):
+class MetadataRegen(AsyncScheduler):
def __init__(self, portdb, cp_iter=None, consumer=None,
- max_jobs=None, max_load=None):
- PollScheduler.__init__(self, main=True)
+ write_auxdb=True, **kwargs):
+ AsyncScheduler.__init__(self, **kwargs)
self._portdb = portdb
+ self._write_auxdb = write_auxdb
self._global_cleanse = False
if cp_iter is None:
cp_iter = self._iter_every_cp()
@@ -22,34 +24,21 @@ class MetadataRegen(PollScheduler):
self._cp_iter = cp_iter
self._consumer = consumer
- if max_jobs is None:
- max_jobs = 1
-
- self._max_jobs = max_jobs
- self._max_load = max_load
-
self._valid_pkgs = set()
self._cp_set = set()
self._process_iter = self._iter_metadata_processes()
- self.returncode = os.EX_OK
- self._error_count = 0
self._running_tasks = set()
- self._remaining_tasks = True
- def _terminate_tasks(self):
- for task in list(self._running_tasks):
- task.cancel()
+ def _next_task(self):
+ return next(self._process_iter)
def _iter_every_cp(self):
- portage.writemsg_stdout("Listing available packages...\n")
- every_cp = self._portdb.cp_all()
- portage.writemsg_stdout("Regenerating cache entries...\n")
- every_cp.sort(reverse=True)
- try:
- while not self._terminated_tasks:
- yield every_cp.pop()
- except IndexError:
- pass
+ # List categories individually, in order to start yielding quicker,
+ # and in order to reduce latency in case of a signal interrupt.
+ cp_all = self._portdb.cp_all
+ for category in sorted(self._portdb.categories):
+ for cp in cp_all(categories=(category,)):
+ yield cp
def _iter_metadata_processes(self):
portdb = self._portdb
@@ -57,8 +46,9 @@ class MetadataRegen(PollScheduler):
cp_set = self._cp_set
consumer = self._consumer
+ portage.writemsg_stdout("Regenerating cache entries...\n")
for cp in self._cp_iter:
- if self._terminated_tasks:
+ if self._terminated.is_set():
break
cp_set.add(cp)
portage.writemsg_stdout("Processing %s\n" % cp)
@@ -68,7 +58,7 @@ class MetadataRegen(PollScheduler):
repo = portdb.repositories.get_repo_for_location(mytree)
cpv_list = portdb.cp_list(cp, mytree=[repo.location])
for cpv in cpv_list:
- if self._terminated_tasks:
+ if self._terminated.is_set():
break
valid_pkgs.add(cpv)
ebuild_path, repo_path = portdb.findname2(cpv, myrepo=repo.name)
@@ -84,22 +74,21 @@ class MetadataRegen(PollScheduler):
yield EbuildMetadataPhase(cpv=cpv,
ebuild_hash=ebuild_hash,
portdb=portdb, repo_path=repo_path,
- settings=portdb.doebuild_settings)
+ settings=portdb.doebuild_settings,
+ write_auxdb=self._write_auxdb)
- def _keep_scheduling(self):
- return self._remaining_tasks and not self._terminated_tasks
+ def _wait(self):
- def run(self):
+ AsyncScheduler._wait(self)
portdb = self._portdb
- from portage.cache.cache_errors import CacheError
dead_nodes = {}
- self._main_loop()
-
+ self._termination_check()
if self._terminated_tasks:
- self.returncode = 1
- return
+ portdb.flush_cache()
+ self.returncode = self._cancelled_returncode
+ return self.returncode
if self._global_cleanse:
for mytree in portdb.porttrees:
@@ -142,29 +131,12 @@ class MetadataRegen(PollScheduler):
except (KeyError, CacheError):
pass
- def _schedule_tasks(self):
- if self._terminated_tasks:
- return
-
- while self._can_add_job():
- try:
- metadata_process = next(self._process_iter)
- except StopIteration:
- self._remaining_tasks = False
- return
-
- self._jobs += 1
- self._running_tasks.add(metadata_process)
- metadata_process.scheduler = self.sched_iface
- metadata_process.addExitListener(self._metadata_exit)
- metadata_process.start()
-
- def _metadata_exit(self, metadata_process):
- self._jobs -= 1
- self._running_tasks.discard(metadata_process)
+ portdb.flush_cache()
+ return self.returncode
+
+ def _task_exit(self, metadata_process):
+
if metadata_process.returncode != os.EX_OK:
- self.returncode = 1
- self._error_count += 1
self._valid_pkgs.discard(metadata_process.cpv)
if not self._terminated_tasks:
portage.writemsg("Error processing %s, continuing...\n" % \
@@ -179,5 +151,4 @@ class MetadataRegen(PollScheduler):
metadata_process.ebuild_hash,
metadata_process.eapi_supported)
- self._schedule()
-
+ AsyncScheduler._task_exit(self, metadata_process)
diff --git a/pym/_emerge/MiscFunctionsProcess.py b/pym/_emerge/MiscFunctionsProcess.py
index afa44fb2a..bada79d86 100644
--- a/pym/_emerge/MiscFunctionsProcess.py
+++ b/pym/_emerge/MiscFunctionsProcess.py
@@ -1,4 +1,4 @@
-# Copyright 1999-2011 Gentoo Foundation
+# Copyright 1999-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
from _emerge.AbstractEbuildProcess import AbstractEbuildProcess
@@ -29,6 +29,10 @@ class MiscFunctionsProcess(AbstractEbuildProcess):
AbstractEbuildProcess._start(self)
def _spawn(self, args, **kwargs):
+
+ if self._dummy_pipe_fd is not None:
+ self.settings["PORTAGE_PIPE_FD"] = str(self._dummy_pipe_fd)
+
# Temporarily unset EBUILD_PHASE so that bashrc code doesn't
# think this is a real phase.
phase_backup = self.settings.pop("EBUILD_PHASE", None)
@@ -37,3 +41,4 @@ class MiscFunctionsProcess(AbstractEbuildProcess):
finally:
if phase_backup is not None:
self.settings["EBUILD_PHASE"] = phase_backup
+ self.settings.pop("PORTAGE_PIPE_FD", None)
diff --git a/pym/_emerge/Package.py b/pym/_emerge/Package.py
index 14d069449..a09f73c59 100644
--- a/pym/_emerge/Package.py
+++ b/pym/_emerge/Package.py
@@ -1,8 +1,12 @@
-# Copyright 1999-2012 Gentoo Foundation
+# Copyright 1999-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
+from __future__ import unicode_literals
+
import sys
from itertools import chain
+import warnings
+
import portage
from portage import _encodings, _unicode_decode, _unicode_encode
from portage.cache.mappings import slot_dict_class
@@ -10,67 +14,82 @@ from portage.const import EBUILD_PHASES
from portage.dep import Atom, check_required_use, use_reduce, \
paren_enclose, _slot_separator, _repo_separator
from portage.versions import _pkg_str, _unknown_repo
-from portage.eapi import _get_eapi_attrs
+from portage.eapi import _get_eapi_attrs, eapi_has_use_aliases
from portage.exception import InvalidDependString
+from portage.localization import _
from _emerge.Task import Task
if sys.hexversion >= 0x3000000:
basestring = str
long = int
+ _unicode = str
+else:
+ _unicode = unicode
class Package(Task):
__hash__ = Task.__hash__
__slots__ = ("built", "cpv", "depth",
- "installed", "metadata", "onlydeps", "operation",
+ "installed", "onlydeps", "operation",
"root_config", "type_name",
"category", "counter", "cp", "cpv_split",
"inherited", "iuse", "mtime",
- "pf", "root", "slot", "slot_abi", "slot_atom", "version") + \
- ("_invalid", "_raw_metadata", "_masks", "_use",
+ "pf", "root", "slot", "sub_slot", "slot_atom", "version") + \
+ ("_invalid", "_masks", "_metadata", "_raw_metadata", "_use",
"_validated_atoms", "_visible")
metadata_keys = [
"BUILD_TIME", "CHOST", "COUNTER", "DEPEND", "EAPI",
- "INHERITED", "IUSE", "KEYWORDS",
+ "HDEPEND", "INHERITED", "IUSE", "KEYWORDS",
"LICENSE", "PDEPEND", "PROVIDE", "RDEPEND",
"repository", "PROPERTIES", "RESTRICT", "SLOT", "USE",
"_mtime_", "DEFINED_PHASES", "REQUIRED_USE"]
- _dep_keys = ('DEPEND', 'PDEPEND', 'RDEPEND',)
+ _dep_keys = ('DEPEND', 'HDEPEND', 'PDEPEND', 'RDEPEND')
+ _buildtime_keys = ('DEPEND', 'HDEPEND')
+ _runtime_keys = ('PDEPEND', 'RDEPEND')
_use_conditional_misc_keys = ('LICENSE', 'PROPERTIES', 'RESTRICT')
UNKNOWN_REPO = _unknown_repo
def __init__(self, **kwargs):
+ metadata = _PackageMetadataWrapperBase(kwargs.pop('metadata'))
Task.__init__(self, **kwargs)
# the SlotObject constructor assigns self.root_config from keyword args
# and is an instance of a '_emerge.RootConfig.RootConfig class
self.root = self.root_config.root
- self._raw_metadata = _PackageMetadataWrapperBase(self.metadata)
- self.metadata = _PackageMetadataWrapper(self, self._raw_metadata)
+ self._raw_metadata = metadata
+ self._metadata = _PackageMetadataWrapper(self, metadata)
if not self.built:
- self.metadata['CHOST'] = self.root_config.settings.get('CHOST', '')
- eapi_attrs = _get_eapi_attrs(self.metadata["EAPI"])
- self.cpv = _pkg_str(self.cpv, slot=self.metadata["SLOT"],
- repo=self.metadata.get('repository', ''),
- eapi=self.metadata["EAPI"])
+ self._metadata['CHOST'] = self.root_config.settings.get('CHOST', '')
+ eapi_attrs = _get_eapi_attrs(self.eapi)
+ self.cpv = _pkg_str(self.cpv, metadata=self._metadata,
+ settings=self.root_config.settings)
if hasattr(self.cpv, 'slot_invalid'):
self._invalid_metadata('SLOT.invalid',
- "SLOT: invalid value: '%s'" % self.metadata["SLOT"])
+ "SLOT: invalid value: '%s'" % self._metadata["SLOT"])
+ self.cpv_split = self.cpv.cpv_split
+ self.category, self.pf = portage.catsplit(self.cpv)
self.cp = self.cpv.cp
+ self.version = self.cpv.version
self.slot = self.cpv.slot
- self.slot_abi = self.cpv.slot_abi
+ self.sub_slot = self.cpv.sub_slot
+ self.slot_atom = Atom("%s%s%s" % (self.cp, _slot_separator, self.slot))
# sync metadata with validated repo (may be UNKNOWN_REPO)
- self.metadata['repository'] = self.cpv.repo
+ self._metadata['repository'] = self.cpv.repo
+
+ if eapi_attrs.iuse_effective:
+ implicit_match = self.root_config.settings._iuse_effective_match
+ else:
+ implicit_match = self.root_config.settings._iuse_implicit_match
+ usealiases = self.root_config.settings._use_manager.getUseAliases(self)
+ self.iuse = self._iuse(self, self._metadata["IUSE"].split(), implicit_match,
+ usealiases, self.eapi)
+
if (self.iuse.enabled or self.iuse.disabled) and \
not eapi_attrs.iuse_defaults:
if not self.installed:
self._invalid_metadata('EAPI.incompatible',
"IUSE contains defaults, but EAPI doesn't allow them")
- self.slot_atom = Atom("%s%s%s" % (self.cp, _slot_separator, self.slot))
- self.category, self.pf = portage.catsplit(self.cpv)
- self.cpv_split = self.cpv.cpv_split
- self.version = self.cpv.version
if self.inherited is None:
self.inherited = frozenset()
@@ -87,6 +106,37 @@ class Package(Task):
type_name=self.type_name)
self._hash_value = hash(self._hash_key)
+ @property
+ def eapi(self):
+ return self._metadata["EAPI"]
+
+ @property
+ def build_time(self):
+ if not self.built:
+ raise AttributeError('build_time')
+ try:
+ return long(self._metadata['BUILD_TIME'])
+ except (KeyError, ValueError):
+ return 0
+
+ @property
+ def defined_phases(self):
+ return self._metadata.defined_phases
+
+ @property
+ def properties(self):
+ return self._metadata.properties
+
+ @property
+ def restrict(self):
+ return self._metadata.restrict
+
+ @property
+ def metadata(self):
+ warnings.warn("_emerge.Package.Package.metadata is deprecated",
+ DeprecationWarning, stacklevel=3)
+ return self._metadata
+
# These are calculated on-demand, so that they are calculated
# after FakeVartree applies its metadata tweaks.
@property
@@ -120,6 +170,10 @@ class Package(Task):
self._validate_deps()
return self._validated_atoms
+ @property
+ def stable(self):
+ return self.cpv.stable
+
@classmethod
def _gen_hash_key(cls, cpv=None, installed=None, onlydeps=None,
operation=None, repo_name=None, root_config=None,
@@ -154,15 +208,15 @@ class Package(Task):
# So overwrite the repo_key with type_name.
repo_key = type_name
- return (type_name, root, cpv, operation, repo_key)
+ return (type_name, root, _unicode(cpv), operation, repo_key)
def _validate_deps(self):
"""
Validate deps. This does not trigger USE calculation since that
is expensive for ebuilds and therefore we want to avoid doing
- in unnecessarily (like for masked packages).
+ it unnecessarily (like for masked packages).
"""
- eapi = self.metadata['EAPI']
+ eapi = self.eapi
dep_eapi = eapi
dep_valid_flag = self.iuse.is_valid_flag
if self.installed:
@@ -175,31 +229,42 @@ class Package(Task):
validated_atoms = []
for k in self._dep_keys:
- v = self.metadata.get(k)
+ v = self._metadata.get(k)
if not v:
continue
try:
- validated_atoms.extend(use_reduce(v, eapi=dep_eapi,
+ atoms = use_reduce(v, eapi=dep_eapi,
matchall=True, is_valid_flag=dep_valid_flag,
- token_class=Atom, flat=True))
+ token_class=Atom, flat=True)
except InvalidDependString as e:
self._metadata_exception(k, e)
+ else:
+ validated_atoms.extend(atoms)
+ if not self.built:
+ for atom in atoms:
+ if not isinstance(atom, Atom):
+ continue
+ if atom.slot_operator_built:
+ e = InvalidDependString(
+ _("Improper context for slot-operator "
+ "\"built\" atom syntax: %s") %
+ (atom.unevaluated_atom,))
+ self._metadata_exception(k, e)
self._validated_atoms = tuple(set(atom for atom in
validated_atoms if isinstance(atom, Atom)))
k = 'PROVIDE'
- v = self.metadata.get(k)
+ v = self._metadata.get(k)
if v:
try:
use_reduce(v, eapi=dep_eapi, matchall=True,
is_valid_flag=dep_valid_flag, token_class=Atom)
except InvalidDependString as e:
- self._invalid_metadata("PROVIDE.syntax",
- _unicode_decode("%s: %s") % (k, e))
+ self._invalid_metadata("PROVIDE.syntax", "%s: %s" % (k, e))
for k in self._use_conditional_misc_keys:
- v = self.metadata.get(k)
+ v = self._metadata.get(k)
if not v:
continue
try:
@@ -209,24 +274,20 @@ class Package(Task):
self._metadata_exception(k, e)
k = 'REQUIRED_USE'
- v = self.metadata.get(k)
- if v:
+ v = self._metadata.get(k)
+ if v and not self.built:
if not _get_eapi_attrs(eapi).required_use:
self._invalid_metadata('EAPI.incompatible',
"REQUIRED_USE set, but EAPI='%s' doesn't allow it" % eapi)
else:
try:
check_required_use(v, (),
- self.iuse.is_valid_flag)
+ self.iuse.is_valid_flag, eapi=eapi)
except InvalidDependString as e:
- # Force unicode format string for python-2.x safety,
- # ensuring that PortageException.__unicode__() is used
- # when necessary.
- self._invalid_metadata(k + ".syntax",
- _unicode_decode("%s: %s") % (k, e))
+ self._invalid_metadata(k + ".syntax", "%s: %s" % (k, e))
k = 'SRC_URI'
- v = self.metadata.get(k)
+ v = self._metadata.get(k)
if v:
try:
use_reduce(v, is_src_uri=True, eapi=eapi, matchall=True,
@@ -248,36 +309,45 @@ class Package(Task):
if self.invalid is not False:
masks['invalid'] = self.invalid
- if not settings._accept_chost(self.cpv, self.metadata):
- masks['CHOST'] = self.metadata['CHOST']
+ if not settings._accept_chost(self.cpv, self._metadata):
+ masks['CHOST'] = self._metadata['CHOST']
- eapi = self.metadata["EAPI"]
+ eapi = self.eapi
if not portage.eapi_is_supported(eapi):
masks['EAPI.unsupported'] = eapi
if portage._eapi_is_deprecated(eapi):
masks['EAPI.deprecated'] = eapi
missing_keywords = settings._getMissingKeywords(
- self.cpv, self.metadata)
+ self.cpv, self._metadata)
if missing_keywords:
masks['KEYWORDS'] = missing_keywords
try:
missing_properties = settings._getMissingProperties(
- self.cpv, self.metadata)
+ self.cpv, self._metadata)
if missing_properties:
masks['PROPERTIES'] = missing_properties
except InvalidDependString:
# already recorded as 'invalid'
pass
- mask_atom = settings._getMaskAtom(self.cpv, self.metadata)
+ try:
+ missing_restricts = settings._getMissingRestrict(
+ self.cpv, self._metadata)
+ if missing_restricts:
+ masks['RESTRICT'] = missing_restricts
+ except InvalidDependString:
+ # already recorded as 'invalid'
+ pass
+
+ mask_atom = settings._getMaskAtom(self.cpv, self._metadata)
if mask_atom is not None:
masks['package.mask'] = mask_atom
try:
missing_licenses = settings._getMissingLicenses(
- self.cpv, self.metadata)
+ self.cpv, self._metadata)
if missing_licenses:
masks['LICENSE'] = missing_licenses
except InvalidDependString:
@@ -303,7 +373,8 @@ class Package(Task):
'CHOST' in masks or \
'EAPI.deprecated' in masks or \
'KEYWORDS' in masks or \
- 'PROPERTIES' in masks):
+ 'PROPERTIES' in masks or \
+ 'RESTRICT' in masks):
return False
if 'package.mask' in masks or \
@@ -316,7 +387,7 @@ class Package(Task):
"""returns None, 'missing', or 'unstable'."""
missing = self.root_config.settings._getRawMissingKeywords(
- self.cpv, self.metadata)
+ self.cpv, self._metadata)
if not missing:
return None
@@ -337,17 +408,22 @@ class Package(Task):
"""returns a bool if the cpv is in the list of
expanded pmaskdict[cp] available ebuilds"""
pmask = self.root_config.settings._getRawMaskAtom(
- self.cpv, self.metadata)
+ self.cpv, self._metadata)
return pmask is not None
def _metadata_exception(self, k, e):
+ if k.endswith('DEPEND'):
+ qacat = 'dependency.syntax'
+ else:
+ qacat = k + ".syntax"
+
# For unicode safety with python-2.x we need to avoid
# using the string format operator with a non-unicode
# format string, since that will result in the
# PortageException.__str__() method being invoked,
# followed by unsafe decoding that may result in a
- # UnicodeDecodeError. Therefore, use _unicode_decode()
+ # UnicodeDecodeError. Therefore, use unicode_literals
# to ensure that format strings are unicode, so that
# PortageException.__unicode__() is used when necessary
# in python-2.x.
@@ -359,19 +435,17 @@ class Package(Task):
continue
categorized_error = True
self._invalid_metadata(error.category,
- _unicode_decode("%s: %s") % (k, error))
+ "%s: %s" % (k, error))
if not categorized_error:
- self._invalid_metadata(k + ".syntax",
- _unicode_decode("%s: %s") % (k, e))
+ self._invalid_metadata(qacat,"%s: %s" % (k, e))
else:
# For installed packages, show the path of the file
# containing the invalid metadata, since the user may
# want to fix the deps by hand.
vardb = self.root_config.trees['vartree'].dbapi
path = vardb.getpath(self.cpv, filename=k)
- self._invalid_metadata(k + ".syntax",
- _unicode_decode("%s: %s in '%s'") % (k, e, path))
+ self._invalid_metadata(qacat, "%s: %s in '%s'" % (k, e, path))
def _invalid_metadata(self, msg_type, msg):
if self._invalid is None:
@@ -394,7 +468,8 @@ class Package(Task):
cpv_color = "PKG_NOMERGE"
s = "(%s, %s" \
- % (portage.output.colorize(cpv_color, self.cpv + _repo_separator + self.repo) , self.type_name)
+ % (portage.output.colorize(cpv_color, self.cpv + _slot_separator + \
+ self.slot + "/" + self.sub_slot + _repo_separator + self.repo) , self.type_name)
if self.type_name == "installed":
if self.root_config.settings['ROOT'] != "/":
@@ -425,13 +500,16 @@ class Package(Task):
# Share identical frozenset instances when available.
_frozensets = {}
- def __init__(self, pkg, use_str):
+ def __init__(self, pkg, enabled_flags):
self._pkg = pkg
self._expand = None
self._expand_hidden = None
self._force = None
self._mask = None
- self.enabled = frozenset(use_str.split())
+ if eapi_has_use_aliases(pkg.eapi):
+ for enabled_flag in enabled_flags:
+ enabled_flags.extend(pkg.iuse.alias_mapping.get(enabled_flag, []))
+ self.enabled = frozenset(enabled_flags)
if pkg.built:
# Use IUSE to validate USE settings for built packages,
# in case the package manager that built this package
@@ -481,7 +559,7 @@ class Package(Task):
@property
def repo(self):
- return self.metadata['repository']
+ return self._metadata['repository']
@property
def repo_priority(self):
@@ -493,7 +571,7 @@ class Package(Task):
@property
def use(self):
if self._use is None:
- self.metadata._init_use()
+ self._init_use()
return self._use
def _get_pkgsettings(self):
@@ -502,28 +580,81 @@ class Package(Task):
pkgsettings.setcpv(self)
return pkgsettings
+ def _init_use(self):
+ if self.built:
+ # Use IUSE to validate USE settings for built packages,
+ # in case the package manager that built this package
+ # failed to do that for some reason (or in case of
+ # data corruption). The enabled flags must be consistent
+ # with implicit IUSE, in order to avoid potential
+ # inconsistencies in USE dep matching (see bug #453400).
+ use_str = self._metadata['USE']
+ is_valid_flag = self.iuse.is_valid_flag
+ enabled_flags = [x for x in use_str.split() if is_valid_flag(x)]
+ use_str = " ".join(enabled_flags)
+ self._use = self._use_class(
+ self, enabled_flags)
+ else:
+ try:
+ use_str = _PackageMetadataWrapperBase.__getitem__(
+ self._metadata, 'USE')
+ except KeyError:
+ use_str = None
+ calculated_use = False
+ if not use_str:
+ use_str = self._get_pkgsettings()["PORTAGE_USE"]
+ calculated_use = True
+ self._use = self._use_class(
+ self, use_str.split())
+ # Initialize these now, since USE access has just triggered
+ # setcpv, and we want to cache the result of the force/mask
+ # calculations that were done.
+ if calculated_use:
+ self._use._init_force_mask()
+
+ _PackageMetadataWrapperBase.__setitem__(
+ self._metadata, 'USE', use_str)
+
+ return use_str
+
class _iuse(object):
- __slots__ = ("__weakref__", "all", "enabled", "disabled",
- "tokens") + ("_iuse_implicit_match",)
+ __slots__ = ("__weakref__", "_iuse_implicit_match", "_pkg", "alias_mapping",
+ "all", "all_aliases", "enabled", "disabled", "tokens")
- def __init__(self, tokens, iuse_implicit_match):
+ def __init__(self, pkg, tokens, iuse_implicit_match, aliases, eapi):
+ self._pkg = pkg
self.tokens = tuple(tokens)
self._iuse_implicit_match = iuse_implicit_match
enabled = []
disabled = []
other = []
+ enabled_aliases = []
+ disabled_aliases = []
+ other_aliases = []
+ aliases_supported = eapi_has_use_aliases(eapi)
+ self.alias_mapping = {}
for x in tokens:
prefix = x[:1]
if prefix == "+":
enabled.append(x[1:])
+ if aliases_supported:
+ self.alias_mapping[x[1:]] = aliases.get(x[1:], [])
+ enabled_aliases.extend(self.alias_mapping[x[1:]])
elif prefix == "-":
disabled.append(x[1:])
+ if aliases_supported:
+ self.alias_mapping[x[1:]] = aliases.get(x[1:], [])
+ disabled_aliases.extend(self.alias_mapping[x[1:]])
else:
other.append(x)
- self.enabled = frozenset(enabled)
- self.disabled = frozenset(disabled)
+ if aliases_supported:
+ self.alias_mapping[x] = aliases.get(x, [])
+ other_aliases.extend(self.alias_mapping[x])
+ self.enabled = frozenset(chain(enabled, enabled_aliases))
+ self.disabled = frozenset(chain(disabled, disabled_aliases))
self.all = frozenset(chain(enabled, disabled, other))
+ self.all_aliases = frozenset(chain(enabled_aliases, disabled_aliases, other_aliases))
def is_valid_flag(self, flags):
"""
@@ -534,7 +665,7 @@ class Package(Task):
flags = [flags]
for flag in flags:
- if not flag in self.all and \
+ if not flag in self.all and not flag in self.all_aliases and \
not self._iuse_implicit_match(flag):
return False
return True
@@ -547,11 +678,28 @@ class Package(Task):
flags = [flags]
missing_iuse = []
for flag in flags:
- if not flag in self.all and \
+ if not flag in self.all and not flag in self.all_aliases and \
not self._iuse_implicit_match(flag):
missing_iuse.append(flag)
return missing_iuse
+ def get_real_flag(self, flag):
+ """
+ Returns the flag's name within the scope of this package
+ (accounting for aliases), or None if the flag is unknown.
+ """
+ if flag in self.all:
+ return flag
+ elif flag in self.all_aliases:
+ for k, v in self.alias_mapping.items():
+ if flag in v:
+ return k
+
+ if self._iuse_implicit_match(flag):
+ return flag
+
+ return None
+
def __len__(self):
return 4
@@ -604,7 +752,7 @@ class _PackageMetadataWrapper(_PackageMetadataWrapperBase):
__slots__ = ("_pkg",)
_wrapped_keys = frozenset(
- ["COUNTER", "INHERITED", "IUSE", "USE", "_mtime_"])
+ ["COUNTER", "INHERITED", "USE", "_mtime_"])
_use_conditional_keys = frozenset(
['LICENSE', 'PROPERTIES', 'PROVIDE', 'RESTRICT',])
@@ -617,31 +765,6 @@ class _PackageMetadataWrapper(_PackageMetadataWrapperBase):
self.update(metadata)
- def _init_use(self):
- if self._pkg.built:
- use_str = self['USE']
- self._pkg._use = self._pkg._use_class(
- self._pkg, use_str)
- else:
- try:
- use_str = _PackageMetadataWrapperBase.__getitem__(self, 'USE')
- except KeyError:
- use_str = None
- calculated_use = False
- if not use_str:
- use_str = self._pkg._get_pkgsettings()["PORTAGE_USE"]
- calculated_use = True
- _PackageMetadataWrapperBase.__setitem__(self, 'USE', use_str)
- self._pkg._use = self._pkg._use_class(
- self._pkg, use_str)
- # Initialize these now, since USE access has just triggered
- # setcpv, and we want to cache the result of the force/mask
- # calculations that were done.
- if calculated_use:
- self._pkg._use._init_force_mask()
-
- return use_str
-
def __getitem__(self, k):
v = _PackageMetadataWrapperBase.__getitem__(self, k)
if k in self._use_conditional_keys:
@@ -659,7 +782,7 @@ class _PackageMetadataWrapper(_PackageMetadataWrapperBase):
elif k == 'USE' and not self._pkg.built:
if not v:
# This is lazy because it's expensive.
- v = self._init_use()
+ v = self._pkg._init_use()
return v
@@ -673,10 +796,6 @@ class _PackageMetadataWrapper(_PackageMetadataWrapperBase):
v = frozenset(v.split())
self._pkg.inherited = v
- def _set_iuse(self, k, v):
- self._pkg.iuse = self._pkg._iuse(
- v.split(), self._pkg.root_config.settings._iuse_implicit_match)
-
def _set_counter(self, k, v):
if isinstance(v, basestring):
try:
diff --git a/pym/_emerge/PackageMerge.py b/pym/_emerge/PackageMerge.py
index eed34e99b..ef298ca48 100644
--- a/pym/_emerge/PackageMerge.py
+++ b/pym/_emerge/PackageMerge.py
@@ -1,4 +1,4 @@
-# Copyright 1999-2011 Gentoo Foundation
+# Copyright 1999-2012 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
from _emerge.CompositeTask import CompositeTask
@@ -11,6 +11,9 @@ class PackageMerge(CompositeTask):
self.scheduler = self.merge.scheduler
pkg = self.merge.pkg
pkg_count = self.merge.pkg_count
+ pkg_color = "PKG_MERGE"
+ if pkg.type_name == "binary":
+ pkg_color = "PKG_BINARY_MERGE"
if pkg.installed:
action_desc = "Uninstalling"
@@ -26,7 +29,7 @@ class PackageMerge(CompositeTask):
msg = "%s %s%s" % \
(action_desc,
counter_str,
- colorize("GOOD", pkg.cpv))
+ colorize(pkg_color, pkg.cpv))
if pkg.root_config.settings["ROOT"] != "/":
msg += " %s %s" % (preposition, pkg.root)
diff --git a/pym/_emerge/PackageUninstall.py b/pym/_emerge/PackageUninstall.py
index eb6a947a5..16c2f749b 100644
--- a/pym/_emerge/PackageUninstall.py
+++ b/pym/_emerge/PackageUninstall.py
@@ -1,4 +1,4 @@
-# Copyright 1999-2011 Gentoo Foundation
+# Copyright 1999-2012 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
import logging
@@ -33,7 +33,7 @@ class PackageUninstall(CompositeTask):
# Apparently the package got uninstalled
# already, so we can safely return early.
self.returncode = os.EX_OK
- self.wait()
+ self._async_wait()
return
self.settings.setcpv(self.pkg)
@@ -67,7 +67,7 @@ class PackageUninstall(CompositeTask):
if retval != os.EX_OK:
self._builddir_lock.unlock()
self.returncode = retval
- self.wait()
+ self._async_wait()
return
self._writemsg_level(">>> Unmerging %s...\n" % (self.pkg.cpv,),
diff --git a/pym/_emerge/PackageVirtualDbapi.py b/pym/_emerge/PackageVirtualDbapi.py
index 0f7be44b1..56a5576e3 100644
--- a/pym/_emerge/PackageVirtualDbapi.py
+++ b/pym/_emerge/PackageVirtualDbapi.py
@@ -140,10 +140,10 @@ class PackageVirtualDbapi(dbapi):
self._clear_cache()
def aux_get(self, cpv, wants, myrepo=None):
- metadata = self._cpv_map[cpv].metadata
+ metadata = self._cpv_map[cpv]._metadata
return [metadata.get(x, "") for x in wants]
def aux_update(self, cpv, values):
- self._cpv_map[cpv].metadata.update(values)
+ self._cpv_map[cpv]._metadata.update(values)
self._clear_cache()
diff --git a/pym/_emerge/PipeReader.py b/pym/_emerge/PipeReader.py
index 90febdf44..a8392c329 100644
--- a/pym/_emerge/PipeReader.py
+++ b/pym/_emerge/PipeReader.py
@@ -1,9 +1,11 @@
-# Copyright 1999-2012 Gentoo Foundation
+# Copyright 1999-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
+import fcntl
+import sys
+
from portage import os
from _emerge.AbstractPollTask import AbstractPollTask
-import fcntl
class PipeReader(AbstractPollTask):
@@ -27,18 +29,28 @@ class PipeReader(AbstractPollTask):
output_handler = self._output_handler
for f in self.input_files.values():
- fcntl.fcntl(f.fileno(), fcntl.F_SETFL,
- fcntl.fcntl(f.fileno(), fcntl.F_GETFL) | os.O_NONBLOCK)
- self._reg_ids.add(self.scheduler.register(f.fileno(),
+ fd = isinstance(f, int) and f or f.fileno()
+ fcntl.fcntl(fd, fcntl.F_SETFL,
+ fcntl.fcntl(fd, fcntl.F_GETFL) | os.O_NONBLOCK)
+
+ # FD_CLOEXEC is enabled by default in Python >=3.4.
+ if sys.hexversion < 0x3040000:
+ try:
+ fcntl.FD_CLOEXEC
+ except AttributeError:
+ pass
+ else:
+ fcntl.fcntl(fd, fcntl.F_SETFD,
+ fcntl.fcntl(fd, fcntl.F_GETFD) | fcntl.FD_CLOEXEC)
+
+ self._reg_ids.add(self.scheduler.io_add_watch(fd,
self._registered_events, output_handler))
self._registered = True
- def isAlive(self):
- return self._registered
-
def _cancel(self):
+ self._unregister()
if self.returncode is None:
- self.returncode = 1
+ self.returncode = self._cancelled_returncode
def _wait(self):
if self.returncode is not None:
@@ -102,11 +114,14 @@ class PipeReader(AbstractPollTask):
if self._reg_ids is not None:
for reg_id in self._reg_ids:
- self.scheduler.unregister(reg_id)
+ self.scheduler.source_remove(reg_id)
self._reg_ids = None
if self.input_files is not None:
for f in self.input_files.values():
- f.close()
+ if isinstance(f, int):
+ os.close(f)
+ else:
+ f.close()
self.input_files = None
diff --git a/pym/_emerge/PollScheduler.py b/pym/_emerge/PollScheduler.py
index 5103e31d6..b118ac157 100644
--- a/pym/_emerge/PollScheduler.py
+++ b/pym/_emerge/PollScheduler.py
@@ -1,18 +1,13 @@
-# Copyright 1999-2012 Gentoo Foundation
+# Copyright 1999-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
-import gzip
-import errno
-
try:
import threading
except ImportError:
import dummy_threading as threading
-from portage import _encodings
-from portage import _unicode_encode
-from portage.util import writemsg_level
-from portage.util.SlotObject import SlotObject
+import portage
+from portage.util._async.SchedulerInterface import SchedulerInterface
from portage.util._eventloop.EventLoop import EventLoop
from portage.util._eventloop.global_event_loop import global_event_loop
@@ -20,14 +15,10 @@ from _emerge.getloadavg import getloadavg
class PollScheduler(object):
- class _sched_iface_class(SlotObject):
- __slots__ = ("IO_ERR", "IO_HUP", "IO_IN", "IO_NVAL", "IO_OUT",
- "IO_PRI", "child_watch_add",
- "idle_add", "io_add_watch", "iteration",
- "output", "register", "run",
- "source_remove", "timeout_add", "unregister")
+ # max time between loadavg checks (milliseconds)
+ _loadavg_latency = None
- def __init__(self, main=False):
+ def __init__(self, main=False, event_loop=None):
"""
@param main: If True then use global_event_loop(), otherwise use
a local EventLoop instance (default is False, for safe use in
@@ -38,29 +29,20 @@ class PollScheduler(object):
self._terminated_tasks = False
self._max_jobs = 1
self._max_load = None
- self._jobs = 0
self._scheduling = False
self._background = False
- if main:
+ if event_loop is not None:
+ self._event_loop = event_loop
+ elif main:
self._event_loop = global_event_loop()
else:
- self._event_loop = EventLoop(main=False)
- self.sched_iface = self._sched_iface_class(
- IO_ERR=self._event_loop.IO_ERR,
- IO_HUP=self._event_loop.IO_HUP,
- IO_IN=self._event_loop.IO_IN,
- IO_NVAL=self._event_loop.IO_NVAL,
- IO_OUT=self._event_loop.IO_OUT,
- IO_PRI=self._event_loop.IO_PRI,
- child_watch_add=self._event_loop.child_watch_add,
- idle_add=self._event_loop.idle_add,
- io_add_watch=self._event_loop.io_add_watch,
- iteration=self._event_loop.iteration,
- output=self._task_output,
- register=self._event_loop.io_add_watch,
- source_remove=self._event_loop.source_remove,
- timeout_add=self._event_loop.timeout_add,
- unregister=self._event_loop.source_remove)
+ self._event_loop = (portage._internal_caller and
+ global_event_loop() or EventLoop(main=False))
+ self._sched_iface = SchedulerInterface(self._event_loop,
+ is_background=self._is_background)
+
+ def _is_background(self):
+ return self._background
def terminate(self):
"""
@@ -135,48 +117,23 @@ class PollScheduler(object):
Calls _schedule_tasks() and automatically returns early from
any recursive calls to this method that the _schedule_tasks()
call might trigger. This makes _schedule() safe to call from
- inside exit listeners.
+ inside exit listeners. This method always returns True, so that
+ it may be scheduled continuously via EventLoop.timeout_add().
"""
if self._scheduling:
- return False
+ return True
self._scheduling = True
try:
self._schedule_tasks()
finally:
self._scheduling = False
-
- def _main_loop(self):
- term_check_id = self.sched_iface.idle_add(self._termination_check)
- try:
- # Populate initial event sources. Unless we're scheduling
- # based on load average, we only need to do this once
- # here, since it can be called during the loop from within
- # event handlers.
- self._schedule()
- max_load = self._max_load
-
- # Loop while there are jobs to be scheduled.
- while self._keep_scheduling():
- self.sched_iface.iteration()
-
- if max_load is not None:
- # We have to schedule periodically, in case the load
- # average has changed since the last call.
- self._schedule()
-
- # Clean shutdown of previously scheduled jobs. In the
- # case of termination, this allows for basic cleanup
- # such as flushing of buffered output to logs.
- while self._is_work_scheduled():
- self.sched_iface.iteration()
- finally:
- self.sched_iface.source_remove(term_check_id)
+ return True
def _is_work_scheduled(self):
return bool(self._running_job_count())
def _running_job_count(self):
- return self._jobs
+ raise NotImplementedError(self)
def _can_add_job(self):
if self._terminated_tasks:
@@ -201,47 +158,3 @@ class PollScheduler(object):
return False
return True
-
- def _task_output(self, msg, log_path=None, background=None,
- level=0, noiselevel=-1):
- """
- Output msg to stdout if not self._background. If log_path
- is not None then append msg to the log (appends with
- compression if the filename extension of log_path
- corresponds to a supported compression type).
- """
-
- if background is None:
- # If the task does not have a local background value
- # (like for parallel-fetch), then use the global value.
- background = self._background
-
- msg_shown = False
- if not background:
- writemsg_level(msg, level=level, noiselevel=noiselevel)
- msg_shown = True
-
- if log_path is not None:
- try:
- f = open(_unicode_encode(log_path,
- encoding=_encodings['fs'], errors='strict'),
- mode='ab')
- f_real = f
- except IOError as e:
- if e.errno not in (errno.ENOENT, errno.ESTALE):
- raise
- if not msg_shown:
- writemsg_level(msg, level=level, noiselevel=noiselevel)
- else:
-
- if log_path.endswith('.gz'):
- # NOTE: The empty filename argument prevents us from
- # triggering a bug in python3 which causes GzipFile
- # to raise AttributeError if fileobj.name is bytes
- # instead of unicode.
- f = gzip.GzipFile(filename='', mode='ab', fileobj=f)
-
- f.write(_unicode_encode(msg))
- f.close()
- if f_real is not f:
- f_real.close()
diff --git a/pym/_emerge/QueueScheduler.py b/pym/_emerge/QueueScheduler.py
deleted file mode 100644
index 206087c7a..000000000
--- a/pym/_emerge/QueueScheduler.py
+++ /dev/null
@@ -1,105 +0,0 @@
-# Copyright 1999-2012 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License v2
-
-from _emerge.PollScheduler import PollScheduler
-
-class QueueScheduler(PollScheduler):
-
- """
- Add instances of SequentialTaskQueue and then call run(). The
- run() method returns when no tasks remain.
- """
-
- def __init__(self, main=True, max_jobs=None, max_load=None):
- PollScheduler.__init__(self, main=main)
-
- if max_jobs is None:
- max_jobs = 1
-
- self._max_jobs = max_jobs
- self._max_load = max_load
-
- self._queues = []
- self._schedule_listeners = []
-
- def add(self, q):
- self._queues.append(q)
-
- def remove(self, q):
- self._queues.remove(q)
-
- def clear(self):
- for q in self._queues:
- q.clear()
-
- def run(self, timeout=None):
-
- timeout_callback = None
- if timeout is not None:
- def timeout_callback():
- timeout_callback.timed_out = True
- return False
- timeout_callback.timed_out = False
- timeout_callback.timeout_id = self.sched_iface.timeout_add(
- timeout, timeout_callback)
-
- term_check_id = self.sched_iface.idle_add(self._termination_check)
- try:
- while not (timeout_callback is not None and
- timeout_callback.timed_out):
- # We don't have any callbacks to trigger _schedule(),
- # so we have to call it explicitly here.
- self._schedule()
- if self._keep_scheduling():
- self.sched_iface.iteration()
- else:
- break
-
- while self._is_work_scheduled() and \
- not (timeout_callback is not None and
- timeout_callback.timed_out):
- self.sched_iface.iteration()
- finally:
- self.sched_iface.source_remove(term_check_id)
- if timeout_callback is not None:
- self.sched_iface.unregister(timeout_callback.timeout_id)
-
- def _schedule_tasks(self):
- """
- @rtype: bool
- @return: True if there may be remaining tasks to schedule,
- False otherwise.
- """
- if self._terminated_tasks:
- return
-
- while self._can_add_job():
- n = self._max_jobs - self._running_job_count()
- if n < 1:
- break
-
- if not self._start_next_job(n):
- return
-
- def _keep_scheduling(self):
- return not self._terminated_tasks and any(self._queues)
-
- def _running_job_count(self):
- job_count = 0
- for q in self._queues:
- job_count += len(q.running_tasks)
- self._jobs = job_count
- return job_count
-
- def _start_next_job(self, n=1):
- started_count = 0
- for q in self._queues:
- initial_job_count = len(q.running_tasks)
- q.schedule()
- final_job_count = len(q.running_tasks)
- if final_job_count > initial_job_count:
- started_count += (final_job_count - initial_job_count)
- if started_count >= n:
- break
- return started_count
-
diff --git a/pym/_emerge/RootConfig.py b/pym/_emerge/RootConfig.py
index bb0d7682a..3648d01d7 100644
--- a/pym/_emerge/RootConfig.py
+++ b/pym/_emerge/RootConfig.py
@@ -1,10 +1,10 @@
-# Copyright 1999-2011 Gentoo Foundation
+# Copyright 1999-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
class RootConfig(object):
"""This is used internally by depgraph to track information about a
particular $ROOT."""
- __slots__ = ("root", "setconfig", "sets", "settings", "trees")
+ __slots__ = ("mtimedb", "root", "setconfig", "sets", "settings", "trees")
pkg_tree_map = {
"ebuild" : "porttree",
@@ -31,4 +31,11 @@ class RootConfig(object):
Shallow copy all attributes from another instance.
"""
for k in self.__slots__:
- setattr(self, k, getattr(other, k))
+ try:
+ setattr(self, k, getattr(other, k))
+ except AttributeError:
+ # mtimedb is currently not a required attribute
+ try:
+ delattr(self, k)
+ except AttributeError:
+ pass
diff --git a/pym/_emerge/Scheduler.py b/pym/_emerge/Scheduler.py
index 0b72a4cfc..dd268f708 100644
--- a/pym/_emerge/Scheduler.py
+++ b/pym/_emerge/Scheduler.py
@@ -1,7 +1,7 @@
-# Copyright 1999-2012 Gentoo Foundation
+# Copyright 1999-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
-from __future__ import print_function
+from __future__ import print_function, unicode_literals
from collections import deque
import gc
@@ -18,7 +18,7 @@ import zlib
import portage
from portage import os
from portage import _encodings
-from portage import _unicode_decode, _unicode_encode
+from portage import _unicode_encode
from portage.cache.mappings import slot_dict_class
from portage.elog.messages import eerror
from portage.localization import _
@@ -28,6 +28,8 @@ from portage._sets import SETPREFIX
from portage._sets.base import InternalPackageSet
from portage.util import ensure_dirs, writemsg, writemsg_level
from portage.util.SlotObject import SlotObject
+from portage.util._async.SchedulerInterface import SchedulerInterface
+from portage.util._eventloop.EventLoop import EventLoop
from portage.package.ebuild.digestcheck import digestcheck
from portage.package.ebuild.digestgen import digestgen
from portage.package.ebuild.doebuild import (_check_temp_dir,
@@ -50,6 +52,7 @@ from _emerge.EbuildFetcher import EbuildFetcher
from _emerge.EbuildPhase import EbuildPhase
from _emerge.emergelog import emergelog
from _emerge.FakeVartree import FakeVartree
+from _emerge.getloadavg import getloadavg
from _emerge._find_deep_system_runtime_deps import _find_deep_system_runtime_deps
from _emerge._flush_elog_mod_echo import _flush_elog_mod_echo
from _emerge.JobStatusDisplay import JobStatusDisplay
@@ -64,6 +67,9 @@ if sys.hexversion >= 0x3000000:
class Scheduler(PollScheduler):
+ # max time between loadavg checks (milliseconds)
+ _loadavg_latency = 30000
+
# max time between display status updates (milliseconds)
_max_display_latency = 3000
@@ -79,7 +85,7 @@ class Scheduler(PollScheduler):
_opts_no_self_update = frozenset(["--buildpkgonly",
"--fetchonly", "--fetch-all-uri", "--pretend"])
- class _iface_class(PollScheduler._sched_iface_class):
+ class _iface_class(SchedulerInterface):
__slots__ = ("fetch",
"scheduleSetup", "scheduleUnpack")
@@ -135,8 +141,7 @@ class Scheduler(PollScheduler):
portage.exception.PortageException.__init__(self, value)
def __init__(self, settings, trees, mtimedb, myopts,
- spinner, mergelist=None, favorites=None, graph_config=None,
- uninstall_only=False):
+ spinner, mergelist=None, favorites=None, graph_config=None):
PollScheduler.__init__(self, main=True)
if mergelist is not None:
@@ -152,7 +157,6 @@ class Scheduler(PollScheduler):
self._spinner = spinner
self._mtimedb = mtimedb
self._favorites = favorites
- self._uninstall_only = uninstall_only
self._args_set = InternalPackageSet(favorites, allow_repo=True)
self._build_opts = self._build_opts_class()
@@ -161,6 +165,8 @@ class Scheduler(PollScheduler):
self._build_opts.buildpkg_exclude = InternalPackageSet( \
initial_atoms=" ".join(myopts.get("--buildpkg-exclude", [])).split(), \
allow_wildcard=True, allow_repo=True)
+ if "mirror" in self.settings.features:
+ self._build_opts.fetch_all_uri = True
self._binpkg_opts = self._binpkg_opts_class()
for k in self._binpkg_opts.__slots__:
@@ -217,14 +223,15 @@ class Scheduler(PollScheduler):
fetch_iface = self._fetch_iface_class(log_file=self._fetch_log,
schedule=self._schedule_fetch)
self._sched_iface = self._iface_class(
+ self._event_loop,
+ is_background=self._is_background,
fetch=fetch_iface,
scheduleSetup=self._schedule_setup,
- scheduleUnpack=self._schedule_unpack,
- **dict((k, getattr(self.sched_iface, k))
- for k in self.sched_iface.__slots__))
+ scheduleUnpack=self._schedule_unpack)
self._prefetchers = weakref.WeakValueDictionary()
self._pkg_queue = []
+ self._jobs = 0
self._running_tasks = {}
self._completed_tasks = set()
@@ -243,10 +250,15 @@ class Scheduler(PollScheduler):
# The load average takes some time to respond when new
# jobs are added, so we need to limit the rate of adding
# new jobs.
- self._job_delay_max = 10
- self._job_delay_factor = 1.0
- self._job_delay_exp = 1.5
+ self._job_delay_max = 5
self._previous_job_start_time = None
+ self._job_delay_timeout_id = None
+
+ # The load average takes some time to respond when after
+ # a SIGSTOP/SIGCONT cycle, so delay scheduling for some
+ # time after SIGCONT is received.
+ self._sigcont_delay = 5
+ self._sigcont_time = None
# This is used to memoize the _choose_pkg() result when
# no packages can be chosen until one of the existing
@@ -300,15 +312,10 @@ class Scheduler(PollScheduler):
if not portage.dep.match_from_list(
portage.const.PORTAGE_PACKAGE_ATOM, [x]):
continue
- if self._running_portage is None or \
- self._running_portage.cpv != x.cpv or \
- '9999' in x.cpv or \
- 'git' in x.inherited or \
- 'git-2' in x.inherited:
- rval = _check_temp_dir(self.settings)
- if rval != os.EX_OK:
- return rval
- _prepare_self_update(self.settings)
+ rval = _check_temp_dir(self.settings)
+ if rval != os.EX_OK:
+ return rval
+ _prepare_self_update(self.settings)
break
return os.EX_OK
@@ -328,15 +335,13 @@ class Scheduler(PollScheduler):
self._set_graph_config(graph_config)
self._blocker_db = {}
dynamic_deps = self.myopts.get("--dynamic-deps", "y") != "n"
- ignore_built_slot_abi_deps = self.myopts.get(
- "--ignore-built-slot-abi-deps", "n") == "y"
+ ignore_built_slot_operator_deps = self.myopts.get(
+ "--ignore-built-slot-operator-deps", "n") == "y"
for root in self.trees:
- if self._uninstall_only:
- continue
if graph_config is None:
fake_vartree = FakeVartree(self.trees[root]["root_config"],
pkg_cache=self._pkg_cache, dynamic_deps=dynamic_deps,
- ignore_built_slot_abi_deps=ignore_built_slot_abi_deps)
+ ignore_built_slot_operator_deps=ignore_built_slot_operator_deps)
fake_vartree.sync()
else:
fake_vartree = graph_config.trees[root]['vartree']
@@ -413,7 +418,7 @@ class Scheduler(PollScheduler):
if not (isinstance(task, Package) and \
task.operation == "merge"):
continue
- if 'interactive' in task.metadata.properties:
+ if 'interactive' in task.properties:
interactive_tasks.append(task)
return interactive_tasks
@@ -658,10 +663,11 @@ class Scheduler(PollScheduler):
if value and value.strip():
continue
msg = _("%(var)s is not set... "
- "Are you missing the '%(configroot)setc/make.profile' symlink? "
+ "Are you missing the '%(configroot)s%(profile_path)s' symlink? "
"Is the symlink correct? "
"Is your portage tree complete?") % \
- {"var": var, "configroot": settings["PORTAGE_CONFIGROOT"]}
+ {"var": var, "configroot": settings["PORTAGE_CONFIGROOT"],
+ "profile_path": portage.const.PROFILE_PATH}
out = portage.output.EOutput()
for line in textwrap.wrap(msg, 70):
@@ -721,7 +727,6 @@ class Scheduler(PollScheduler):
return
if self._parallel_fetch:
- self._status_msg("Starting parallel fetch")
prefetchers = self._prefetchers
@@ -753,7 +758,8 @@ class Scheduler(PollScheduler):
prefetcher = EbuildFetcher(background=True,
config_pool=self._ConfigPool(pkg.root,
self._allocate_config, self._deallocate_config),
- fetchonly=1, logfile=self._fetch_log,
+ fetchonly=1, fetchall=self._build_opts.fetch_all_uri,
+ logfile=self._fetch_log,
pkg=pkg, prefetch=True, scheduler=self._sched_iface)
elif pkg.type_name == "binary" and \
@@ -774,10 +780,10 @@ class Scheduler(PollScheduler):
failures = 0
- # Use a local PollScheduler instance here, since we don't
+ # Use a local EventLoop instance here, since we don't
# want tasks here to trigger the usual Scheduler callbacks
# that handle job scheduling and status display.
- sched_iface = PollScheduler().sched_iface
+ sched_iface = SchedulerInterface(EventLoop(main=False))
for x in self._mergelist:
if not isinstance(x, Package):
@@ -786,10 +792,10 @@ class Scheduler(PollScheduler):
if x.operation == "uninstall":
continue
- if x.metadata["EAPI"] in ("0", "1", "2", "3"):
+ if x.eapi in ("0", "1", "2", "3"):
continue
- if "pretend" not in x.metadata.defined_phases:
+ if "pretend" not in x.defined_phases:
continue
out_str =">>> Running pre-merge checks for " + colorize("INFORM", x.cpv) + "\n"
@@ -808,7 +814,7 @@ class Scheduler(PollScheduler):
build_dir_path = os.path.join(
os.path.realpath(settings["PORTAGE_TMPDIR"]),
"portage", x.category, x.pf)
- existing_buildir = os.path.isdir(build_dir_path)
+ existing_builddir = os.path.isdir(build_dir_path)
settings["PORTAGE_BUILDDIR"] = build_dir_path
build_dir = EbuildBuildDir(scheduler=sched_iface,
settings=settings)
@@ -819,7 +825,7 @@ class Scheduler(PollScheduler):
# Clean up the existing build dir, in case pkg_pretend
# checks for available space (bug #390711).
- if existing_buildir:
+ if existing_builddir:
if x.built:
tree = "bintree"
infloc = os.path.join(build_dir_path, "build-info")
@@ -908,13 +914,18 @@ class Scheduler(PollScheduler):
failures += 1
portage.elog.elog_process(x.cpv, settings)
finally:
- if current_task is not None and current_task.isAlive():
- current_task.cancel()
- current_task.wait()
- clean_phase = EbuildPhase(background=False,
- phase='clean', scheduler=sched_iface, settings=settings)
- clean_phase.start()
- clean_phase.wait()
+
+ if current_task is not None:
+ if current_task.isAlive():
+ current_task.cancel()
+ current_task.wait()
+ if current_task.returncode == os.EX_OK:
+ clean_phase = EbuildPhase(background=False,
+ phase='clean', scheduler=sched_iface,
+ settings=settings)
+ clean_phase.start()
+ clean_phase.wait()
+
build_dir.unlock()
if failures:
@@ -1004,6 +1015,8 @@ class Scheduler(PollScheduler):
earlier_sigint_handler = signal.signal(signal.SIGINT, sighandler)
earlier_sigterm_handler = signal.signal(signal.SIGTERM, sighandler)
+ earlier_sigcont_handler = \
+ signal.signal(signal.SIGCONT, self._sigcont_handler)
try:
rval = self._merge()
@@ -1017,6 +1030,10 @@ class Scheduler(PollScheduler):
signal.signal(signal.SIGTERM, earlier_sigterm_handler)
else:
signal.signal(signal.SIGTERM, signal.SIG_DFL)
+ if earlier_sigcont_handler is not None:
+ signal.signal(signal.SIGCONT, earlier_sigcont_handler)
+ else:
+ signal.signal(signal.SIGCONT, signal.SIG_DFL)
if received_signal:
sys.exit(received_signal[0])
@@ -1063,7 +1080,8 @@ class Scheduler(PollScheduler):
printer = portage.output.EOutput()
background = self._background
failure_log_shown = False
- if background and len(self._failed_pkgs_all) == 1:
+ if background and len(self._failed_pkgs_all) == 1 and \
+ self.myopts.get('--quiet-fail', 'n') != 'y':
# If only one package failed then just show it's
# whole log for easy viewing.
failed_pkg = self._failed_pkgs_all[-1]
@@ -1142,9 +1160,9 @@ class Scheduler(PollScheduler):
printer.eerror(line)
printer.eerror("")
for failed_pkg in self._failed_pkgs_all:
- # Use _unicode_decode() to force unicode format string so
+ # Use unicode_literals to force unicode format string so
# that Package.__unicode__() is called in python2.
- msg = _unicode_decode(" %s") % (failed_pkg.pkg,)
+ msg = " %s" % (failed_pkg.pkg,)
log_path = self._locate_failure_log(failed_pkg)
if log_path is not None:
msg += ", Log file:"
@@ -1341,6 +1359,38 @@ class Scheduler(PollScheduler):
blocker_db = self._blocker_db[pkg.root]
blocker_db.discardBlocker(pkg)
+ def _main_loop(self):
+ term_check_id = self._event_loop.idle_add(self._termination_check)
+ loadavg_check_id = None
+ if self._max_load is not None and \
+ self._loadavg_latency is not None and \
+ (self._max_jobs is True or self._max_jobs > 1):
+ # We have to schedule periodically, in case the load
+ # average has changed since the last call.
+ loadavg_check_id = self._event_loop.timeout_add(
+ self._loadavg_latency, self._schedule)
+
+ try:
+ # Populate initial event sources. Unless we're scheduling
+ # based on load average, we only need to do this once
+ # here, since it can be called during the loop from within
+ # event handlers.
+ self._schedule()
+
+ # Loop while there are jobs to be scheduled.
+ while self._keep_scheduling():
+ self._event_loop.iteration()
+
+ # Clean shutdown of previously scheduled jobs. In the
+ # case of termination, this allows for basic cleanup
+ # such as flushing of buffered output to logs.
+ while self._is_work_scheduled():
+ self._event_loop.iteration()
+ finally:
+ self._event_loop.source_remove(term_check_id)
+ if loadavg_check_id is not None:
+ self._event_loop.source_remove(loadavg_check_id)
+
def _merge(self):
if self._opts_no_background.intersection(self.myopts):
@@ -1351,8 +1401,10 @@ class Scheduler(PollScheduler):
failed_pkgs = self._failed_pkgs
portage.locks._quiet = self._background
portage.elog.add_listener(self._elog_listener)
- display_timeout_id = self.sched_iface.timeout_add(
- self._max_display_latency, self._status_display.display)
+ display_timeout_id = None
+ if self._status_display._isatty and not self._status_display.quiet:
+ display_timeout_id = self._event_loop.timeout_add(
+ self._max_display_latency, self._status_display.display)
rval = os.EX_OK
try:
@@ -1361,7 +1413,8 @@ class Scheduler(PollScheduler):
self._main_loop_cleanup()
portage.locks._quiet = False
portage.elog.remove_listener(self._elog_listener)
- self.sched_iface.source_remove(display_timeout_id)
+ if display_timeout_id is not None:
+ self._event_loop.source_remove(display_timeout_id)
if failed_pkgs:
rval = failed_pkgs[-1].returncode
@@ -1493,12 +1546,15 @@ class Scheduler(PollScheduler):
self._config_pool[settings['EROOT']].append(settings)
def _keep_scheduling(self):
- return bool(not self._terminated_tasks and self._pkg_queue and \
+ return bool(not self._terminated.is_set() and self._pkg_queue and \
not (self._failed_pkgs and not self._build_opts.fetchonly))
def _is_work_scheduled(self):
return bool(self._running_tasks)
+ def _running_job_count(self):
+ return self._jobs
+
def _schedule_tasks(self):
while True:
@@ -1539,6 +1595,9 @@ class Scheduler(PollScheduler):
not self._task_queues.merge)):
break
+ def _sigcont_handler(self, signum, frame):
+ self._sigcont_time = time.time()
+
def _job_delay(self):
"""
@rtype: bool
@@ -1549,14 +1608,53 @@ class Scheduler(PollScheduler):
current_time = time.time()
- delay = self._job_delay_factor * self._jobs ** self._job_delay_exp
+ if self._sigcont_time is not None:
+
+ elapsed_seconds = current_time - self._sigcont_time
+ # elapsed_seconds < 0 means the system clock has been adjusted
+ if elapsed_seconds > 0 and \
+ elapsed_seconds < self._sigcont_delay:
+
+ if self._job_delay_timeout_id is not None:
+ self._event_loop.source_remove(
+ self._job_delay_timeout_id)
+
+ self._job_delay_timeout_id = self._event_loop.timeout_add(
+ 1000 * (self._sigcont_delay - elapsed_seconds),
+ self._schedule_once)
+ return True
+
+ # Only set this to None after the delay has expired,
+ # since this method may be called again before the
+ # delay has expired.
+ self._sigcont_time = None
+
+ try:
+ avg1, avg5, avg15 = getloadavg()
+ except OSError:
+ return False
+
+ delay = self._job_delay_max * avg1 / self._max_load
if delay > self._job_delay_max:
delay = self._job_delay_max
- if (current_time - self._previous_job_start_time) < delay:
+ elapsed_seconds = current_time - self._previous_job_start_time
+ # elapsed_seconds < 0 means the system clock has been adjusted
+ if elapsed_seconds > 0 and elapsed_seconds < delay:
+
+ if self._job_delay_timeout_id is not None:
+ self._event_loop.source_remove(
+ self._job_delay_timeout_id)
+
+ self._job_delay_timeout_id = self._event_loop.timeout_add(
+ 1000 * (delay - elapsed_seconds), self._schedule_once)
return True
return False
+ def _schedule_once(self):
+ self._schedule()
+ return False
+
def _schedule_tasks_imp(self):
"""
@rtype: bool
@@ -1738,7 +1836,7 @@ class Scheduler(PollScheduler):
# scope
e = exc
mydepgraph = e.depgraph
- dropped_tasks = set()
+ dropped_tasks = {}
if e is not None:
def unsatisfied_resume_dep_msg():
@@ -1775,11 +1873,7 @@ class Scheduler(PollScheduler):
return False
if success and self._show_list():
- mylist = mydepgraph.altlist()
- if mylist:
- if "--tree" in self.myopts:
- mylist.reverse()
- mydepgraph.display(mylist, favorites=self._favorites)
+ mydepgraph.display(mydepgraph.altlist(), favorites=self._favorites)
if not success:
self._post_mod_echo_msgs.append(mydepgraph.display_problems)
@@ -1788,7 +1882,7 @@ class Scheduler(PollScheduler):
self._init_graph(mydepgraph.schedulerGraph())
msg_width = 75
- for task in dropped_tasks:
+ for task, atoms in dropped_tasks.items():
if not (isinstance(task, Package) and task.operation == "merge"):
continue
pkg = task
@@ -1796,7 +1890,10 @@ class Scheduler(PollScheduler):
" %s" % (pkg.cpv,)
if pkg.root_config.settings["ROOT"] != "/":
msg += " for %s" % (pkg.root,)
- msg += " dropped due to unsatisfied dependency."
+ if not atoms:
+ msg += " dropped because it is masked or unavailable"
+ else:
+ msg += " dropped because it requires %s" % ", ".join(atoms)
for line in textwrap.wrap(msg, msg_width):
eerror(line, phase="other", key=pkg.cpv)
settings = self.pkgsettings[pkg.root]
@@ -1841,11 +1938,21 @@ class Scheduler(PollScheduler):
root_config = pkg.root_config
world_set = root_config.sets["selected"]
world_locked = False
- if hasattr(world_set, "lock"):
- world_set.lock()
- world_locked = True
+ atom = None
+
+ if pkg.operation != "uninstall":
+ # Do this before acquiring the lock, since it queries the
+ # portdbapi which can call the global event loop, triggering
+ # a concurrent call to this method or something else that
+ # needs an exclusive (non-reentrant) lock on the world file.
+ atom = create_world_atom(pkg, args_set, root_config)
try:
+
+ if hasattr(world_set, "lock"):
+ world_set.lock()
+ world_locked = True
+
if hasattr(world_set, "load"):
world_set.load() # maybe it's changed on disk
@@ -1857,8 +1964,7 @@ class Scheduler(PollScheduler):
for s in pkg.root_config.setconfig.active:
world_set.remove(SETPREFIX+s)
else:
- atom = create_world_atom(pkg, args_set, root_config)
- if atom:
+ if atom is not None:
if hasattr(world_set, "add"):
self._status_msg(('Recording %s in "world" ' + \
'favorites file...') % atom)
diff --git a/pym/_emerge/SpawnProcess.py b/pym/_emerge/SpawnProcess.py
index 9fbc96472..15d3dc5cf 100644
--- a/pym/_emerge/SpawnProcess.py
+++ b/pym/_emerge/SpawnProcess.py
@@ -1,17 +1,23 @@
-# Copyright 1999-2011 Gentoo Foundation
+# Copyright 2008-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
-from _emerge.SubProcess import SubProcess
+try:
+ import fcntl
+except ImportError:
+ # http://bugs.jython.org/issue1074
+ fcntl = None
+
+import errno
+import logging
+import signal
import sys
-from portage.cache.mappings import slot_dict_class
+
+from _emerge.SubProcess import SubProcess
import portage
-from portage import _encodings
-from portage import _unicode_encode
from portage import os
from portage.const import BASH_BINARY
-import fcntl
-import errno
-import gzip
+from portage.util import writemsg_level
+from portage.util._async.PipeLogger import PipeLogger
class SpawnProcess(SubProcess):
@@ -23,31 +29,27 @@ class SpawnProcess(SubProcess):
_spawn_kwarg_names = ("env", "opt_name", "fd_pipes",
"uid", "gid", "groups", "umask", "logfile",
- "path_lookup", "pre_exec")
+ "path_lookup", "pre_exec", "close_fds", "cgroup",
+ "unshare_ipc", "unshare_net")
__slots__ = ("args",) + \
- _spawn_kwarg_names + ("_log_file_real", "_selinux_type",)
-
- _file_names = ("log", "process", "stdout")
- _files_dict = slot_dict_class(_file_names, prefix="")
+ _spawn_kwarg_names + ("_pipe_logger", "_selinux_type",)
def _start(self):
if self.fd_pipes is None:
self.fd_pipes = {}
+ else:
+ self.fd_pipes = self.fd_pipes.copy()
fd_pipes = self.fd_pipes
- self._files = self._files_dict()
- files = self._files
-
master_fd, slave_fd = self._pipe(fd_pipes)
- fcntl.fcntl(master_fd, fcntl.F_SETFL,
- fcntl.fcntl(master_fd, fcntl.F_GETFL) | os.O_NONBLOCK)
- files.process = master_fd
- logfile = None
- if self._can_log(slave_fd):
- logfile = self.logfile
+ can_log = self._can_log(slave_fd)
+ if can_log:
+ log_file_path = self.logfile
+ else:
+ log_file_path = None
null_input = None
if not self.background or 0 in fd_pipes:
@@ -62,48 +64,34 @@ class SpawnProcess(SubProcess):
null_input = os.open('/dev/null', os.O_RDWR)
fd_pipes[0] = null_input
- fd_pipes.setdefault(0, sys.stdin.fileno())
- fd_pipes.setdefault(1, sys.stdout.fileno())
- fd_pipes.setdefault(2, sys.stderr.fileno())
+ fd_pipes.setdefault(0, portage._get_stdin().fileno())
+ fd_pipes.setdefault(1, sys.__stdout__.fileno())
+ fd_pipes.setdefault(2, sys.__stderr__.fileno())
# flush any pending output
+ stdout_filenos = (sys.__stdout__.fileno(), sys.__stderr__.fileno())
for fd in fd_pipes.values():
- if fd == sys.stdout.fileno():
- sys.stdout.flush()
- if fd == sys.stderr.fileno():
- sys.stderr.flush()
+ if fd in stdout_filenos:
+ sys.__stdout__.flush()
+ sys.__stderr__.flush()
+ break
- if logfile is not None:
+ fd_pipes_orig = fd_pipes.copy()
- fd_pipes_orig = fd_pipes.copy()
+ if log_file_path is not None or self.background:
fd_pipes[1] = slave_fd
fd_pipes[2] = slave_fd
- files.log = open(_unicode_encode(logfile,
- encoding=_encodings['fs'], errors='strict'), mode='ab')
- if logfile.endswith('.gz'):
- self._log_file_real = files.log
- files.log = gzip.GzipFile(filename='', mode='ab',
- fileobj=files.log)
-
- portage.util.apply_secpass_permissions(logfile,
- uid=portage.portage_uid, gid=portage.portage_gid,
- mode=0o660)
-
- if not self.background:
- files.stdout = os.dup(fd_pipes_orig[1])
-
- output_handler = self._output_handler
-
else:
-
- # Create a dummy pipe so the scheduler can monitor
- # the process from inside a poll() loop.
- fd_pipes[self._dummy_pipe_fd] = slave_fd
- if self.background:
- fd_pipes[1] = slave_fd
- fd_pipes[2] = slave_fd
- output_handler = self._dummy_handler
+ # Create a dummy pipe that PipeLogger uses to efficiently
+ # monitor for process exit by listening for the EOF event.
+ # Re-use of the allocated fd number for the key in fd_pipes
+ # guarantees that the keys will not collide for similarly
+ # allocated pipes which are used by callers such as
+ # FileDigester and MergeProcess. See the _setup_pipes
+ # docstring for more benefits of this allocation approach.
+ self._dummy_pipe_fd = slave_fd
+ fd_pipes[slave_fd] = slave_fd
kwargs = {}
for k in self._spawn_kwarg_names:
@@ -115,10 +103,6 @@ class SpawnProcess(SubProcess):
kwargs["returnpid"] = True
kwargs.pop("logfile", None)
- self._reg_id = self.scheduler.register(files.process,
- self._registered_events, output_handler)
- self._registered = True
-
retval = self._spawn(self.args, **kwargs)
os.close(slave_fd)
@@ -129,11 +113,32 @@ class SpawnProcess(SubProcess):
# spawn failed
self._unregister()
self._set_returncode((self.pid, retval))
- self.wait()
+ self._async_wait()
return
self.pid = retval[0]
- portage.process.spawned_pids.remove(self.pid)
+
+ stdout_fd = None
+ if can_log and not self.background:
+ stdout_fd = os.dup(fd_pipes_orig[1])
+ # FD_CLOEXEC is enabled by default in Python >=3.4.
+ if sys.hexversion < 0x3040000 and fcntl is not None:
+ try:
+ fcntl.FD_CLOEXEC
+ except AttributeError:
+ pass
+ else:
+ fcntl.fcntl(stdout_fd, fcntl.F_SETFD,
+ fcntl.fcntl(stdout_fd,
+ fcntl.F_GETFD) | fcntl.FD_CLOEXEC)
+
+ self._pipe_logger = PipeLogger(background=self.background,
+ scheduler=self.scheduler, input_fd=master_fd,
+ log_file_path=log_file_path,
+ stdout_fd=stdout_fd)
+ self._pipe_logger.addExitListener(self._pipe_logger_exit)
+ self._pipe_logger.start()
+ self._registered = True
def _can_log(self, slave_fd):
return True
@@ -157,92 +162,56 @@ class SpawnProcess(SubProcess):
return spawn_func(args, **kwargs)
- def _output_handler(self, fd, event):
-
- files = self._files
- while True:
- buf = self._read_buf(fd, event)
-
- if buf is None:
- # not a POLLIN event, EAGAIN, etc...
- break
-
- if not buf:
- # EOF
- self._unregister()
- self.wait()
- break
-
- else:
- if not self.background:
- write_successful = False
- failures = 0
- while True:
- try:
- if not write_successful:
- os.write(files.stdout, buf)
- write_successful = True
- break
- except OSError as e:
- if e.errno != errno.EAGAIN:
- raise
- del e
- failures += 1
- if failures > 50:
- # Avoid a potentially infinite loop. In
- # most cases, the failure count is zero
- # and it's unlikely to exceed 1.
- raise
-
- # This means that a subprocess has put an inherited
- # stdio file descriptor (typically stdin) into
- # O_NONBLOCK mode. This is not acceptable (see bug
- # #264435), so revert it. We need to use a loop
- # here since there's a race condition due to
- # parallel processes being able to change the
- # flags on the inherited file descriptor.
- # TODO: When possible, avoid having child processes
- # inherit stdio file descriptors from portage
- # (maybe it can't be avoided with
- # PROPERTIES=interactive).
- fcntl.fcntl(files.stdout, fcntl.F_SETFL,
- fcntl.fcntl(files.stdout,
- fcntl.F_GETFL) ^ os.O_NONBLOCK)
-
- files.log.write(buf)
- files.log.flush()
-
- self._unregister_if_appropriate(event)
-
- return True
-
- def _dummy_handler(self, fd, event):
- """
- This method is mainly interested in detecting EOF, since
- the only purpose of the pipe is to allow the scheduler to
- monitor the process from inside a poll() loop.
- """
-
- while True:
- buf = self._read_buf(fd, event)
-
- if buf is None:
- # not a POLLIN event, EAGAIN, etc...
- break
-
- if not buf:
- # EOF
- self._unregister()
- self.wait()
- break
-
- self._unregister_if_appropriate(event)
-
- return True
-
- def _unregister(self):
- super(SpawnProcess, self)._unregister()
- if self._log_file_real is not None:
- # Avoid "ResourceWarning: unclosed file" since python 3.2.
- self._log_file_real.close()
- self._log_file_real = None
+ def _pipe_logger_exit(self, pipe_logger):
+ self._pipe_logger = None
+ self._unregister()
+ self.wait()
+
+ def _waitpid_loop(self):
+ SubProcess._waitpid_loop(self)
+
+ pipe_logger = self._pipe_logger
+ if pipe_logger is not None:
+ self._pipe_logger = None
+ pipe_logger.removeExitListener(self._pipe_logger_exit)
+ pipe_logger.cancel()
+ pipe_logger.wait()
+
+ def _set_returncode(self, wait_retval):
+ SubProcess._set_returncode(self, wait_retval)
+
+ if self.cgroup:
+ def get_pids(cgroup):
+ try:
+ with open(os.path.join(cgroup, 'cgroup.procs'), 'r') as f:
+ return [int(p) for p in f.read().split()]
+ except OSError:
+ # cgroup removed already?
+ return []
+
+ def kill_all(pids, sig):
+ for p in pids:
+ try:
+ os.kill(p, sig)
+ except OSError as e:
+ if e.errno == errno.EPERM:
+ # Reported with hardened kernel (bug #358211).
+ writemsg_level(
+ "!!! kill: (%i) - Operation not permitted\n" %
+ (p,), level=logging.ERROR,
+ noiselevel=-1)
+ elif e.errno != errno.ESRCH:
+ raise
+
+ # step 1: kill all orphans
+ pids = get_pids(self.cgroup)
+ if pids:
+ kill_all(pids, signal.SIGKILL)
+
+ # step 2: remove the cgroup
+ try:
+ os.rmdir(self.cgroup)
+ except OSError:
+ # it may be removed already, or busy
+ # we can't do anything good about it
+ pass
diff --git a/pym/_emerge/SubProcess.py b/pym/_emerge/SubProcess.py
index 76b313fc2..13d938297 100644
--- a/pym/_emerge/SubProcess.py
+++ b/pym/_emerge/SubProcess.py
@@ -1,7 +1,10 @@
-# Copyright 1999-2012 Gentoo Foundation
+# Copyright 1999-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
+import logging
+
from portage import os
+from portage.util import writemsg_level
from _emerge.AbstractPollTask import AbstractPollTask
import signal
import errno
@@ -9,12 +12,7 @@ import errno
class SubProcess(AbstractPollTask):
__slots__ = ("pid",) + \
- ("_files", "_reg_id")
-
- # A file descriptor is required for the scheduler to monitor changes from
- # inside a poll() loop. When logging is not enabled, create a pipe just to
- # serve this purpose alone.
- _dummy_pipe_fd = 9
+ ("_dummy_pipe_fd", "_files", "_reg_id")
# This is how much time we allow for waitpid to succeed after
# we've sent a kill signal to our subprocess.
@@ -50,7 +48,13 @@ class SubProcess(AbstractPollTask):
try:
os.kill(self.pid, signal.SIGTERM)
except OSError as e:
- if e.errno != errno.ESRCH:
+ if e.errno == errno.EPERM:
+ # Reported with hardened kernel (bug #358211).
+ writemsg_level(
+ "!!! kill: (%i) - Operation not permitted\n" %
+ (self.pid,), level=logging.ERROR,
+ noiselevel=-1)
+ elif e.errno != errno.ESRCH:
raise
def isAlive(self):
@@ -69,7 +73,13 @@ class SubProcess(AbstractPollTask):
try:
os.kill(self.pid, signal.SIGKILL)
except OSError as e:
- if e.errno != errno.ESRCH:
+ if e.errno == errno.EPERM:
+ # Reported with hardened kernel (bug #358211).
+ writemsg_level(
+ "!!! kill: (%i) - Operation not permitted\n" %
+ (self.pid,), level=logging.ERROR,
+ noiselevel=-1)
+ elif e.errno != errno.ESRCH:
raise
del e
self._wait_loop(timeout=self._cancel_timeout)
@@ -116,7 +126,7 @@ class SubProcess(AbstractPollTask):
self._registered = False
if self._reg_id is not None:
- self.scheduler.unregister(self._reg_id)
+ self.scheduler.source_remove(self._reg_id)
self._reg_id = None
if self._files is not None:
diff --git a/pym/_emerge/Task.py b/pym/_emerge/Task.py
index 40f5066c0..250d45802 100644
--- a/pym/_emerge/Task.py
+++ b/pym/_emerge/Task.py
@@ -1,4 +1,4 @@
-# Copyright 1999-2012 Gentoo Foundation
+# Copyright 1999-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
from portage.util.SlotObject import SlotObject
@@ -41,3 +41,10 @@ class Task(SlotObject):
strings.
"""
return "(%s)" % ", ".join(("'%s'" % x for x in self._hash_key))
+
+ def __repr__(self):
+ if self._hash_key is None:
+ # triggered by python-trace
+ return SlotObject.__repr__(self)
+ return "<%s (%s)>" % (self.__class__.__name__,
+ ", ".join(("'%s'" % x for x in self._hash_key)))
diff --git a/pym/_emerge/TaskScheduler.py b/pym/_emerge/TaskScheduler.py
deleted file mode 100644
index 583bfe323..000000000
--- a/pym/_emerge/TaskScheduler.py
+++ /dev/null
@@ -1,26 +0,0 @@
-# Copyright 1999-2012 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License v2
-
-from _emerge.QueueScheduler import QueueScheduler
-from _emerge.SequentialTaskQueue import SequentialTaskQueue
-
-class TaskScheduler(object):
-
- """
- A simple way to handle scheduling of AsynchrousTask instances. Simply
- add tasks and call run(). The run() method returns when no tasks remain.
- """
-
- def __init__(self, main=True, max_jobs=None, max_load=None):
- self._queue = SequentialTaskQueue(max_jobs=max_jobs)
- self._scheduler = QueueScheduler(main=main,
- max_jobs=max_jobs, max_load=max_load)
- self.sched_iface = self._scheduler.sched_iface
- self.run = self._scheduler.run
- self.clear = self._scheduler.clear
- self.wait = self._queue.wait
- self._scheduler.add(self._queue)
-
- def add(self, task):
- self._queue.add(task)
-
diff --git a/pym/_emerge/UnmergeDepPriority.py b/pym/_emerge/UnmergeDepPriority.py
index 43166006f..ec44a67a1 100644
--- a/pym/_emerge/UnmergeDepPriority.py
+++ b/pym/_emerge/UnmergeDepPriority.py
@@ -1,4 +1,4 @@
-# Copyright 1999-2011 Gentoo Foundation
+# Copyright 1999-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
from _emerge.AbstractDepPriority import AbstractDepPriority
@@ -7,15 +7,16 @@ class UnmergeDepPriority(AbstractDepPriority):
"""
Combination of properties Priority Category
- runtime 0 HARD
- runtime_post -1 HARD
- buildtime -2 SOFT
- (none of the above) -2 SOFT
+ runtime_slot_op 0 HARD
+ runtime -1 HARD
+ runtime_post -2 HARD
+ buildtime -3 SOFT
+ (none of the above) -3 SOFT
"""
MAX = 0
- SOFT = -2
- MIN = -2
+ SOFT = -3
+ MIN = -3
def __init__(self, **kwargs):
AbstractDepPriority.__init__(self, **kwargs)
@@ -23,17 +24,21 @@ class UnmergeDepPriority(AbstractDepPriority):
self.optional = True
def __int__(self):
- if self.runtime:
+ if self.runtime_slot_op:
return 0
- if self.runtime_post:
+ if self.runtime:
return -1
- if self.buildtime:
+ if self.runtime_post:
return -2
- return -2
+ if self.buildtime:
+ return -3
+ return -3
def __str__(self):
if self.ignored:
return "ignored"
+ if self.runtime_slot_op:
+ return "hard slot op"
myvalue = self.__int__()
if myvalue > self.SOFT:
return "hard"
diff --git a/pym/_emerge/UseFlagDisplay.py b/pym/_emerge/UseFlagDisplay.py
index 3daca19e1..f46047454 100644
--- a/pym/_emerge/UseFlagDisplay.py
+++ b/pym/_emerge/UseFlagDisplay.py
@@ -1,10 +1,12 @@
-# Copyright 1999-2011 Gentoo Foundation
+# Copyright 1999-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
+from __future__ import unicode_literals
+
from itertools import chain
import sys
-from portage import _encodings, _unicode_decode, _unicode_encode
+from portage import _encodings, _unicode_encode
from portage.output import red
from portage.util import cmp_sort_key
from portage.output import blue
@@ -114,9 +116,9 @@ def pkg_use_display(pkg, opts, modified_use=None):
flags.sort(key=UseFlagDisplay.sort_combined)
else:
flags.sort(key=UseFlagDisplay.sort_separated)
- # Use _unicode_decode() to force unicode format string so
+ # Use unicode_literals to force unicode format string so
# that UseFlagDisplay.__unicode__() is called in python2.
flag_displays.append('%s="%s"' % (varname,
- ' '.join(_unicode_decode("%s") % (f,) for f in flags)))
+ ' '.join("%s" % (f,) for f in flags)))
return ' '.join(flag_displays)
diff --git a/pym/_emerge/actions.py b/pym/_emerge/actions.py
index 9a023a84a..2a1354b6b 100644
--- a/pym/_emerge/actions.py
+++ b/pym/_emerge/actions.py
@@ -1,7 +1,7 @@
-# Copyright 1999-2012 Gentoo Foundation
+# Copyright 1999-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
-from __future__ import print_function
+from __future__ import print_function, unicode_literals
import errno
import logging
@@ -18,27 +18,35 @@ import sys
import tempfile
import textwrap
import time
+import warnings
from itertools import chain
import portage
portage.proxy.lazyimport.lazyimport(globals(),
+ 'portage.dbapi._similar_name_search:similar_name_search',
+ 'portage.debug',
'portage.news:count_unread_news,display_news_notifications',
+ 'portage.util._get_vm_info:get_vm_info',
+ '_emerge.chk_updated_cfg_files:chk_updated_cfg_files',
+ '_emerge.help:help@emerge_help',
+ '_emerge.post_emerge:display_news_notification,post_emerge',
+ '_emerge.stdout_spinner:stdout_spinner',
)
from portage.localization import _
from portage import os
from portage import shutil
-from portage import eapi_is_supported, _unicode_decode
+from portage import eapi_is_supported, _encodings, _unicode_decode
from portage.cache.cache_errors import CacheError
-from portage.const import GLOBAL_CONFIG_PATH
-from portage.const import _ENABLE_DYN_LINK_MAP
+from portage.const import GLOBAL_CONFIG_PATH, VCS_DIRS, _DEPCLEAN_LIB_CHECK_DEFAULT
+from portage.const import SUPPORTED_BINPKG_FORMATS, TIMESTAMP_FORMAT
from portage.dbapi.dep_expand import dep_expand
from portage.dbapi._expand_new_virt import expand_new_virt
from portage.dep import Atom
from portage.eclass_cache import hashed_path
-from portage.exception import InvalidAtom, InvalidData
+from portage.exception import InvalidAtom, InvalidData, ParseError
from portage.output import blue, bold, colorize, create_color_func, darkgreen, \
- red, yellow
+ red, xtermTitle, xtermTitleReset, yellow
good = create_color_func("GOOD")
bad = create_color_func("BAD")
warn = create_color_func("WARN")
@@ -46,9 +54,13 @@ from portage.package.ebuild._ipc.QueryCommand import QueryCommand
from portage.package.ebuild.doebuild import _check_temp_dir
from portage._sets import load_default_config, SETPREFIX
from portage._sets.base import InternalPackageSet
-from portage.util import cmp_sort_key, writemsg, \
+from portage.util import cmp_sort_key, writemsg, varexpand, \
writemsg_level, writemsg_stdout
from portage.util.digraph import digraph
+from portage.util.SlotObject import SlotObject
+from portage.util._async.run_main_scheduler import run_main_scheduler
+from portage.util._async.SchedulerInterface import SchedulerInterface
+from portage.util._eventloop.global_event_loop import global_event_loop
from portage._global_updates import _global_updates
from _emerge.clear_caches import clear_caches
@@ -277,8 +289,14 @@ def action_build(settings, trees, mtimedb,
"dropped due to\n" + \
"!!! masking or unsatisfied dependencies:\n\n",
noiselevel=-1)
- for task in dropped_tasks:
- portage.writemsg(" " + str(task) + "\n", noiselevel=-1)
+ for task, atoms in dropped_tasks.items():
+ if not atoms:
+ writemsg(" %s is masked or unavailable\n" %
+ (task,), noiselevel=-1)
+ else:
+ writemsg(" %s requires %s\n" %
+ (task, ", ".join(atoms)), noiselevel=-1)
+
portage.writemsg("\n", noiselevel=-1)
del dropped_tasks
else:
@@ -309,6 +327,7 @@ def action_build(settings, trees, mtimedb,
mydepgraph.display_problems()
return 1
+ mergecount = None
if "--pretend" not in myopts and \
("--ask" in myopts or "--tree" in myopts or \
"--verbose" in myopts) and \
@@ -320,7 +339,7 @@ def action_build(settings, trees, mtimedb,
return os.EX_OK
favorites = mtimedb["resume"]["favorites"]
retval = mydepgraph.display(
- mydepgraph.altlist(reversed=tree),
+ mydepgraph.altlist(),
favorites=favorites)
mydepgraph.display_problems()
mergelist_shown = True
@@ -329,7 +348,7 @@ def action_build(settings, trees, mtimedb,
prompt="Would you like to resume merging these packages?"
else:
retval = mydepgraph.display(
- mydepgraph.altlist(reversed=("--tree" in myopts)),
+ mydepgraph.altlist(),
favorites=favorites)
mydepgraph.display_problems()
mergelist_shown = True
@@ -340,6 +359,7 @@ def action_build(settings, trees, mtimedb,
if isinstance(x, Package) and x.operation == "merge":
mergecount += 1
+ prompt = None
if mergecount==0:
sets = trees[settings['EROOT']]['root_config'].sets
world_candidates = None
@@ -352,14 +372,11 @@ def action_build(settings, trees, mtimedb,
world_candidates = [x for x in favorites \
if not (x.startswith(SETPREFIX) and \
not sets[x[1:]].world_candidate)]
+
if "selective" in myparams and \
not oneshot and world_candidates:
- print()
- for x in world_candidates:
- print(" %s %s" % (good("*"), x))
- prompt="Would you like to add these packages to your world favorites?"
- elif settings["AUTOCLEAN"] and "yes"==settings["AUTOCLEAN"]:
- prompt="Nothing to merge; would you like to auto-clean packages?"
+ # Prompt later, inside saveNomergeFavorites.
+ prompt = None
else:
print()
print("Nothing to merge; quitting.")
@@ -370,13 +387,15 @@ def action_build(settings, trees, mtimedb,
else:
prompt="Would you like to merge these packages?"
print()
- if "--ask" in myopts and userquery(prompt, enter_invalid) == "No":
+ if prompt is not None and "--ask" in myopts and \
+ userquery(prompt, enter_invalid) == "No":
print()
print("Quitting.")
print()
return 128 + signal.SIGINT
# Don't ask again (e.g. when auto-cleaning packages after merge)
- myopts.pop("--ask", None)
+ if mergecount != 0:
+ myopts.pop("--ask", None)
if ("--pretend" in myopts) and not ("--fetchonly" in myopts or "--fetch-all-uri" in myopts):
if ("--resume" in myopts):
@@ -386,7 +405,7 @@ def action_build(settings, trees, mtimedb,
return os.EX_OK
favorites = mtimedb["resume"]["favorites"]
retval = mydepgraph.display(
- mydepgraph.altlist(reversed=tree),
+ mydepgraph.altlist(),
favorites=favorites)
mydepgraph.display_problems()
mergelist_shown = True
@@ -394,39 +413,14 @@ def action_build(settings, trees, mtimedb,
return retval
else:
retval = mydepgraph.display(
- mydepgraph.altlist(reversed=("--tree" in myopts)),
+ mydepgraph.altlist(),
favorites=favorites)
mydepgraph.display_problems()
mergelist_shown = True
if retval != os.EX_OK:
return retval
- if "--buildpkgonly" in myopts:
- graph_copy = mydepgraph._dynamic_config.digraph.copy()
- removed_nodes = set()
- for node in graph_copy:
- if not isinstance(node, Package) or \
- node.operation == "nomerge":
- removed_nodes.add(node)
- graph_copy.difference_update(removed_nodes)
- if not graph_copy.hasallzeros(ignore_priority = \
- DepPrioritySatisfiedRange.ignore_medium):
- print("\n!!! --buildpkgonly requires all dependencies to be merged.")
- print("!!! You have to merge the dependencies before you can build this package.\n")
- return 1
+
else:
- if "--buildpkgonly" in myopts:
- graph_copy = mydepgraph._dynamic_config.digraph.copy()
- removed_nodes = set()
- for node in graph_copy:
- if not isinstance(node, Package) or \
- node.operation == "nomerge":
- removed_nodes.add(node)
- graph_copy.difference_update(removed_nodes)
- if not graph_copy.hasallzeros(ignore_priority = \
- DepPrioritySatisfiedRange.ignore_medium):
- print("\n!!! --buildpkgonly requires all dependencies to be merged.")
- print("!!! Cannot merge requested packages. Merge deps and try again.\n")
- return 1
if not mergelist_shown:
# If we haven't already shown the merge list above, at
@@ -446,25 +440,29 @@ def action_build(settings, trees, mtimedb,
mydepgraph.saveNomergeFavorites()
- mergetask = Scheduler(settings, trees, mtimedb, myopts,
- spinner, favorites=favorites,
- graph_config=mydepgraph.schedulerGraph())
-
- del mydepgraph
- clear_caches(trees)
-
- retval = mergetask.merge()
-
- if retval == os.EX_OK and not (buildpkgonly or fetchonly or pretend):
- if "yes" == settings.get("AUTOCLEAN"):
- portage.writemsg_stdout(">>> Auto-cleaning packages...\n")
- unmerge(trees[settings['EROOT']]['root_config'],
- myopts, "clean", [],
- ldpath_mtimes, autoclean=1)
- else:
- portage.writemsg_stdout(colorize("WARN", "WARNING:")
- + " AUTOCLEAN is disabled. This can cause serious"
- + " problems due to overlapping packages.\n")
+ if mergecount == 0:
+ retval = os.EX_OK
+ else:
+ mergetask = Scheduler(settings, trees, mtimedb, myopts,
+ spinner, favorites=favorites,
+ graph_config=mydepgraph.schedulerGraph())
+
+ del mydepgraph
+ clear_caches(trees)
+
+ retval = mergetask.merge()
+
+ if retval == os.EX_OK and \
+ not (buildpkgonly or fetchonly or pretend):
+ if "yes" == settings.get("AUTOCLEAN"):
+ portage.writemsg_stdout(">>> Auto-cleaning packages...\n")
+ unmerge(trees[settings['EROOT']]['root_config'],
+ myopts, "clean", [],
+ ldpath_mtimes, autoclean=1)
+ else:
+ portage.writemsg_stdout(colorize("WARN", "WARNING:")
+ + " AUTOCLEAN is disabled. This can cause serious"
+ + " problems due to overlapping packages.\n")
return retval
@@ -544,7 +542,8 @@ def action_depclean(settings, trees, ldpath_mtimes,
# specific packages.
msg = []
- if not _ENABLE_DYN_LINK_MAP:
+ if "preserve-libs" not in settings.features and \
+ not myopts.get("--depclean-lib-check", _DEPCLEAN_LIB_CHECK_DEFAULT) != "n":
msg.append("Depclean may break link level dependencies. Thus, it is\n")
msg.append("recommended to use a tool such as " + good("`revdep-rebuild`") + " (from\n")
msg.append("app-portage/gentoolkit) in order to detect such breakage.\n")
@@ -610,11 +609,17 @@ def action_depclean(settings, trees, ldpath_mtimes,
if not cleanlist and "--quiet" in myopts:
return rval
+ set_atoms = {}
+ for k in ("system", "selected"):
+ try:
+ set_atoms[k] = root_config.setconfig.getSetAtoms(k)
+ except portage.exception.PackageSetNotFound:
+ # A nested set could not be resolved, so ignore nested sets.
+ set_atoms[k] = root_config.sets[k].getAtoms()
+
print("Packages installed: " + str(len(vardb.cpv_all())))
- print("Packages in world: " + \
- str(len(root_config.sets["selected"].getAtoms())))
- print("Packages in system: " + \
- str(len(root_config.sets["system"].getAtoms())))
+ print("Packages in world: %d" % len(set_atoms["selected"]))
+ print("Packages in system: %d" % len(set_atoms["system"]))
print("Required packages: "+str(req_pkg_count))
if "--pretend" in myopts:
print("Number to remove: "+str(len(cleanlist)))
@@ -647,13 +652,21 @@ def calc_depclean(settings, trees, ldpath_mtimes,
required_sets[protected_set_name] = protected_set
system_set = psets["system"]
- if not system_set or not selected_set:
+ set_atoms = {}
+ for k in ("system", "selected"):
+ try:
+ set_atoms[k] = root_config.setconfig.getSetAtoms(k)
+ except portage.exception.PackageSetNotFound:
+ # A nested set could not be resolved, so ignore nested sets.
+ set_atoms[k] = root_config.sets[k].getAtoms()
+
+ if not set_atoms["system"] or not set_atoms["selected"]:
- if not system_set:
+ if not set_atoms["system"]:
writemsg_level("!!! You have no system list.\n",
level=logging.ERROR, noiselevel=-1)
- if not selected_set:
+ if not set_atoms["selected"]:
writemsg_level("!!! You have no world file.\n",
level=logging.WARNING, noiselevel=-1)
@@ -697,7 +710,7 @@ def calc_depclean(settings, trees, ldpath_mtimes,
continue
except portage.exception.InvalidDependString as e:
show_invalid_depstring_notice(pkg,
- pkg.metadata["PROVIDE"], str(e))
+ pkg._metadata["PROVIDE"], _unicode(e))
del e
protected_set.add("=" + pkg.cpv)
continue
@@ -751,7 +764,7 @@ def calc_depclean(settings, trees, ldpath_mtimes,
continue
except portage.exception.InvalidDependString as e:
show_invalid_depstring_notice(pkg,
- pkg.metadata["PROVIDE"], str(e))
+ pkg._metadata["PROVIDE"], _unicode(e))
del e
protected_set.add("=" + pkg.cpv)
continue
@@ -769,7 +782,7 @@ def calc_depclean(settings, trees, ldpath_mtimes,
required_sets['__excluded__'].add("=" + pkg.cpv)
except portage.exception.InvalidDependString as e:
show_invalid_depstring_notice(pkg,
- pkg.metadata["PROVIDE"], str(e))
+ pkg._metadata["PROVIDE"], _unicode(e))
del e
required_sets['__excluded__'].add("=" + pkg.cpv)
@@ -805,7 +818,12 @@ def calc_depclean(settings, trees, ldpath_mtimes,
msg.append("the following required packages not being installed:")
msg.append("")
for atom, parent in unresolvable:
- msg.append(" %s pulled in by:" % (atom,))
+ if atom != atom.unevaluated_atom and \
+ vardb.match(_unicode(atom)):
+ msg.append(" %s (%s) pulled in by:" %
+ (atom.unevaluated_atom, atom))
+ else:
+ msg.append(" %s pulled in by:" % (atom,))
msg.append(" %s" % (parent,))
msg.append("")
msg.extend(textwrap.wrap(
@@ -848,15 +866,27 @@ def calc_depclean(settings, trees, ldpath_mtimes,
required_pkgs_total += 1
def show_parents(child_node):
- parent_nodes = graph.parent_nodes(child_node)
- if not parent_nodes:
+ parent_atoms = \
+ resolver._dynamic_config._parent_atoms.get(child_node, [])
+
+ # Never display the special internal protected_set.
+ parent_atoms = [parent_atom for parent_atom in parent_atoms
+ if not (isinstance(parent_atom[0], SetArg) and
+ parent_atom[0].name == protected_set_name)]
+
+ if not parent_atoms:
# With --prune, the highest version can be pulled in without any
# real parent since all installed packages are pulled in. In that
# case there's nothing to show here.
return
+ parent_atom_dict = {}
+ for parent, atom in parent_atoms:
+ parent_atom_dict.setdefault(parent, []).append(atom)
+
parent_strs = []
- for node in parent_nodes:
- parent_strs.append(str(getattr(node, "cpv", node)))
+ for parent, atoms in parent_atom_dict.items():
+ parent_strs.append("%s requires %s" %
+ (getattr(parent, "cpv", parent), ", ".join(atoms)))
parent_strs.sort()
msg = []
msg.append(" %s pulled in by:\n" % (child_node.cpv,))
@@ -881,12 +911,6 @@ def calc_depclean(settings, trees, ldpath_mtimes,
graph.debug_print()
writemsg("\n", noiselevel=-1)
- # Never display the special internal protected_set.
- for node in graph:
- if isinstance(node, SetArg) and node.name == protected_set_name:
- graph.remove(node)
- break
-
pkgs_to_remove = []
if action == "depclean":
@@ -939,10 +963,19 @@ def calc_depclean(settings, trees, ldpath_mtimes,
cleanlist = create_cleanlist()
clean_set = set(cleanlist)
- if cleanlist and \
- real_vardb._linkmap is not None and \
- myopts.get("--depclean-lib-check") != "n" and \
- "preserve-libs" not in settings.features:
+ depclean_lib_check = cleanlist and real_vardb._linkmap is not None and \
+ myopts.get("--depclean-lib-check", _DEPCLEAN_LIB_CHECK_DEFAULT) != "n"
+ preserve_libs = "preserve-libs" in settings.features
+ preserve_libs_restrict = False
+
+ if depclean_lib_check and preserve_libs:
+ for pkg in cleanlist:
+ if "preserve-libs" in pkg.restrict:
+ preserve_libs_restrict = True
+ break
+
+ if depclean_lib_check and \
+ (preserve_libs_restrict or not preserve_libs):
# Check if any of these packages are the sole providers of libraries
# with consumers that have not been selected for removal. If so, these
@@ -955,6 +988,13 @@ def calc_depclean(settings, trees, ldpath_mtimes,
writemsg_level(">>> Checking for lib consumers...\n")
for pkg in cleanlist:
+
+ if preserve_libs and "preserve-libs" not in pkg.restrict:
+ # Any needed libraries will be preserved
+ # when this package is unmerged, so there's
+ # no need to account for it here.
+ continue
+
pkg_dblink = real_vardb._dblink(pkg.cpv)
consumers = {}
@@ -1109,7 +1149,8 @@ def calc_depclean(settings, trees, ldpath_mtimes,
"installed", root_config, installed=True)
if not resolver._add_pkg(pkg,
Dependency(parent=consumer_pkg,
- priority=UnmergeDepPriority(runtime=True),
+ priority=UnmergeDepPriority(runtime=True,
+ runtime_slot_op=True),
root=pkg.root)):
resolver.display_problems()
return 1, [], False, 0
@@ -1146,30 +1187,30 @@ def calc_depclean(settings, trees, ldpath_mtimes,
graph = digraph()
del cleanlist[:]
- dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
runtime = UnmergeDepPriority(runtime=True)
runtime_post = UnmergeDepPriority(runtime_post=True)
buildtime = UnmergeDepPriority(buildtime=True)
priority_map = {
"RDEPEND": runtime,
"PDEPEND": runtime_post,
+ "HDEPEND": buildtime,
"DEPEND": buildtime,
}
for node in clean_set:
graph.add(node, None)
- for dep_type in dep_keys:
- depstr = node.metadata[dep_type]
+ for dep_type in Package._dep_keys:
+ depstr = node._metadata[dep_type]
if not depstr:
continue
priority = priority_map[dep_type]
if debug:
- writemsg_level(_unicode_decode("\nParent: %s\n") \
+ writemsg_level("\nParent: %s\n"
% (node,), noiselevel=-1, level=logging.DEBUG)
- writemsg_level(_unicode_decode( "Depstring: %s\n") \
+ writemsg_level( "Depstring: %s\n"
% (depstr,), noiselevel=-1, level=logging.DEBUG)
- writemsg_level(_unicode_decode( "Priority: %s\n") \
+ writemsg_level( "Priority: %s\n"
% (priority,), noiselevel=-1, level=logging.DEBUG)
try:
@@ -1183,7 +1224,7 @@ def calc_depclean(settings, trees, ldpath_mtimes,
if debug:
writemsg_level("Candidates: [%s]\n" % \
- ', '.join(_unicode_decode("'%s'") % (x,) for x in atoms),
+ ', '.join("'%s'" % (x,) for x in atoms),
noiselevel=-1, level=logging.DEBUG)
for atom in atoms:
@@ -1197,7 +1238,15 @@ def calc_depclean(settings, trees, ldpath_mtimes,
continue
for child_node in matches:
if child_node in clean_set:
- graph.add(child_node, node, priority=priority)
+
+ mypriority = priority.copy()
+ if atom.slot_operator_built:
+ if mypriority.buildtime:
+ mypriority.buildtime_slot_op = True
+ if mypriority.runtime:
+ mypriority.runtime_slot_op = True
+
+ graph.add(child_node, node, priority=mypriority)
if debug:
writemsg_level("\nunmerge digraph:\n\n",
@@ -1277,11 +1326,8 @@ def action_deselect(settings, trees, opts, atoms):
allow_repo=True, allow_wildcard=True))
for cpv in vardb.match(atom):
- slot, = vardb.aux_get(cpv, ["SLOT"])
- if not slot:
- slot = "0"
- expanded_atoms.add(Atom("%s:%s" % \
- (portage.cpv_getkey(cpv), slot)))
+ pkg = vardb._pkg_str(cpv, None)
+ expanded_atoms.add(Atom("%s:%s" % (pkg.cp, pkg.slot)))
discard_atoms = set()
for atom in world_set:
@@ -1352,10 +1398,90 @@ class _info_pkgs_ver(object):
def action_info(settings, trees, myopts, myfiles):
+ # See if we can find any packages installed matching the strings
+ # passed on the command line
+ mypkgs = []
+ eroot = settings['EROOT']
+ vardb = trees[eroot]["vartree"].dbapi
+ portdb = trees[eroot]['porttree'].dbapi
+ bindb = trees[eroot]["bintree"].dbapi
+ for x in myfiles:
+ any_match = False
+ cp_exists = bool(vardb.match(x.cp))
+ installed_match = vardb.match(x)
+ for installed in installed_match:
+ mypkgs.append((installed, "installed"))
+ any_match = True
+
+ if any_match:
+ continue
+
+ for db, pkg_type in ((portdb, "ebuild"), (bindb, "binary")):
+ if pkg_type == "binary" and "--usepkg" not in myopts:
+ continue
+
+ # Use match instead of cp_list, to account for old-style virtuals.
+ if not cp_exists and db.match(x.cp):
+ cp_exists = True
+ # Search for masked packages too.
+ if not cp_exists and hasattr(db, "xmatch") and \
+ db.xmatch("match-all", x.cp):
+ cp_exists = True
+
+ matches = db.match(x)
+ matches.reverse()
+ for match in matches:
+ if pkg_type == "binary":
+ if db.bintree.isremote(match):
+ continue
+ auxkeys = ["EAPI", "DEFINED_PHASES"]
+ metadata = dict(zip(auxkeys, db.aux_get(match, auxkeys)))
+ if metadata["EAPI"] not in ("0", "1", "2", "3") and \
+ "info" in metadata["DEFINED_PHASES"].split():
+ mypkgs.append((match, pkg_type))
+ break
+
+ if not cp_exists:
+ xinfo = '"%s"' % x.unevaluated_atom
+ # Discard null/ from failed cpv_expand category expansion.
+ xinfo = xinfo.replace("null/", "")
+ if settings["ROOT"] != "/":
+ xinfo = "%s for %s" % (xinfo, eroot)
+ writemsg("\nemerge: there are no ebuilds to satisfy %s.\n" %
+ colorize("INFORM", xinfo), noiselevel=-1)
+
+ if myopts.get("--misspell-suggestions", "y") != "n":
+
+ writemsg("\nemerge: searching for similar names..."
+ , noiselevel=-1)
+
+ dbs = [vardb]
+ #if "--usepkgonly" not in myopts:
+ dbs.append(portdb)
+ if "--usepkg" in myopts:
+ dbs.append(bindb)
+
+ matches = similar_name_search(dbs, x)
+
+ if len(matches) == 1:
+ writemsg("\nemerge: Maybe you meant " + matches[0] + "?\n"
+ , noiselevel=-1)
+ elif len(matches) > 1:
+ writemsg(
+ "\nemerge: Maybe you meant any of these: %s?\n" % \
+ (", ".join(matches),), noiselevel=-1)
+ else:
+ # Generally, this would only happen if
+ # all dbapis are empty.
+ writemsg(" nothing similar found.\n"
+ , noiselevel=-1)
+
+ return 1
+
output_buffer = []
append = output_buffer.append
root_config = trees[settings['EROOT']]['root_config']
- running_eroot = trees._running_eroot
+ chost = settings.get("CHOST")
append(getportageversion(settings["PORTDIR"], None,
settings.profile_path, settings["CHOST"],
@@ -1369,6 +1495,18 @@ def action_info(settings, trees, myopts, myfiles):
append(header_width * "=")
append("System uname: %s" % (platform.platform(aliased=1),))
+ vm_info = get_vm_info()
+ if "ram.total" in vm_info:
+ line = "%-9s %10d total" % ("KiB Mem:", vm_info["ram.total"] / 1024)
+ if "ram.free" in vm_info:
+ line += ",%10d free" % (vm_info["ram.free"] / 1024,)
+ append(line)
+ if "swap.total" in vm_info:
+ line = "%-9s %10d total" % ("KiB Swap:", vm_info["swap.total"] / 1024)
+ if "swap.free" in vm_info:
+ line += ",%10d free" % (vm_info["swap.free"] / 1024,)
+ append(line)
+
lastSync = portage.grabfile(os.path.join(
settings["PORTDIR"], "metadata", "timestamp.chk"))
if lastSync:
@@ -1377,6 +1515,23 @@ def action_info(settings, trees, myopts, myfiles):
lastSync = "Unknown"
append("Timestamp of tree: %s" % (lastSync,))
+ ld_names = []
+ if chost:
+ ld_names.append(chost + "-ld")
+ ld_names.append("ld")
+ for name in ld_names:
+ try:
+ proc = subprocess.Popen([name, "--version"],
+ stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+ except OSError:
+ pass
+ else:
+ output = _unicode_decode(proc.communicate()[0]).splitlines()
+ proc.wait()
+ if proc.wait() == os.EX_OK and output:
+ append("ld %s" % (output[0]))
+ break
+
try:
proc = subprocess.Popen(["distcc", "--version"],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
@@ -1413,7 +1568,6 @@ def action_info(settings, trees, myopts, myfiles):
"sys-devel/binutils", "sys-devel/libtool", "dev-lang/python"]
myvars += portage.util.grabfile(settings["PORTDIR"]+"/profiles/info_pkgs")
atoms = []
- vardb = trees[running_eroot]['vartree'].dbapi
for x in myvars:
try:
x = Atom(x)
@@ -1426,7 +1580,6 @@ def action_info(settings, trees, myopts, myfiles):
myvars = sorted(set(atoms))
- portdb = trees[running_eroot]['porttree'].dbapi
main_repo = portdb.getRepositoryName(portdb.porttree_root)
cp_map = {}
cp_max_len = 0
@@ -1493,7 +1646,7 @@ def action_info(settings, trees, myopts, myfiles):
'PORTDIR_OVERLAY', 'PORTAGE_BUNZIP2_COMMAND',
'PORTAGE_BZIP2_COMMAND',
'USE', 'CHOST', 'CFLAGS', 'CXXFLAGS',
- 'ACCEPT_KEYWORDS', 'ACCEPT_LICENSE', 'SYNC', 'FEATURES',
+ 'ACCEPT_KEYWORDS', 'ACCEPT_LICENSE', 'FEATURES',
'EMERGE_DEFAULT_OPTS']
myvars.extend(portage.util.grabfile(settings["PORTDIR"]+"/profiles/info_vars"))
@@ -1539,40 +1692,7 @@ def action_info(settings, trees, myopts, myfiles):
append("")
writemsg_stdout("\n".join(output_buffer),
noiselevel=-1)
-
- # See if we can find any packages installed matching the strings
- # passed on the command line
- mypkgs = []
- eroot = settings['EROOT']
- vardb = trees[eroot]["vartree"].dbapi
- portdb = trees[eroot]['porttree'].dbapi
- bindb = trees[eroot]["bintree"].dbapi
- for x in myfiles:
- match_found = False
- installed_match = vardb.match(x)
- for installed in installed_match:
- mypkgs.append((installed, "installed"))
- match_found = True
-
- if match_found:
- continue
-
- for db, pkg_type in ((portdb, "ebuild"), (bindb, "binary")):
- if pkg_type == "binary" and "--usepkg" not in myopts:
- continue
-
- matches = db.match(x)
- matches.reverse()
- for match in matches:
- if pkg_type == "binary":
- if db.bintree.isremote(match):
- continue
- auxkeys = ["EAPI", "DEFINED_PHASES"]
- metadata = dict(zip(auxkeys, db.aux_get(match, auxkeys)))
- if metadata["EAPI"] not in ("0", "1", "2", "3") and \
- "info" in metadata["DEFINED_PHASES"].split():
- mypkgs.append((match, pkg_type))
- break
+ del output_buffer[:]
# If some packages were found...
if mypkgs:
@@ -1586,11 +1706,15 @@ def action_info(settings, trees, myopts, myfiles):
# Loop through each package
# Only print settings if they differ from global settings
header_title = "Package Settings"
- print(header_width * "=")
- print(header_title.rjust(int(header_width/2 + len(header_title)/2)))
- print(header_width * "=")
- from portage.output import EOutput
- out = EOutput()
+ append(header_width * "=")
+ append(header_title.rjust(int(header_width/2 + len(header_title)/2)))
+ append(header_width * "=")
+ append("")
+ writemsg_stdout("\n".join(output_buffer),
+ noiselevel=-1)
+ del output_buffer[:]
+
+ out = portage.output.EOutput()
for mypkg in mypkgs:
cpv = mypkg[0]
pkg_type = mypkg[1]
@@ -1608,28 +1732,32 @@ def action_info(settings, trees, myopts, myfiles):
root_config=root_config, type_name=pkg_type)
if pkg_type == "installed":
- print("\n%s was built with the following:" % \
+ append("\n%s was built with the following:" % \
colorize("INFORM", str(pkg.cpv)))
elif pkg_type == "ebuild":
- print("\n%s would be build with the following:" % \
+ append("\n%s would be build with the following:" % \
colorize("INFORM", str(pkg.cpv)))
elif pkg_type == "binary":
- print("\n%s (non-installed binary) was built with the following:" % \
+ append("\n%s (non-installed binary) was built with the following:" % \
colorize("INFORM", str(pkg.cpv)))
- writemsg_stdout('%s\n' % pkg_use_display(pkg, myopts),
- noiselevel=-1)
+ append('%s' % pkg_use_display(pkg, myopts))
if pkg_type == "installed":
for myvar in mydesiredvars:
if metadata[myvar].split() != settings.get(myvar, '').split():
- print("%s=\"%s\"" % (myvar, metadata[myvar]))
- print()
+ append("%s=\"%s\"" % (myvar, metadata[myvar]))
+ append("")
+ append("")
+ writemsg_stdout("\n".join(output_buffer),
+ noiselevel=-1)
+ del output_buffer[:]
if metadata['DEFINED_PHASES']:
if 'info' not in metadata['DEFINED_PHASES'].split():
continue
- print(">>> Attempting to run pkg_info() for '%s'" % pkg.cpv)
+ writemsg_stdout(">>> Attempting to run pkg_info() for '%s'\n"
+ % pkg.cpv, noiselevel=-1)
if pkg_type == "installed":
ebuildpath = vardb.findname(pkg.cpv)
@@ -1856,6 +1984,7 @@ def action_metadata(settings, portdb, myopts, porttrees=None):
print()
signal.signal(signal.SIGWINCH, signal.SIG_DFL)
+ portdb.flush_cache()
sys.stdout.flush()
os.umask(old_umask)
@@ -1865,35 +1994,12 @@ def action_regen(settings, portdb, max_jobs, max_load):
#regenerate cache entries
sys.stdout.flush()
- regen = MetadataRegen(portdb, max_jobs=max_jobs, max_load=max_load)
- received_signal = []
-
- def emergeexitsig(signum, frame):
- signal.signal(signal.SIGINT, signal.SIG_IGN)
- signal.signal(signal.SIGTERM, signal.SIG_IGN)
- portage.util.writemsg("\n\nExiting on signal %(signal)s\n" % \
- {"signal":signum})
- regen.terminate()
- received_signal.append(128 + signum)
-
- earlier_sigint_handler = signal.signal(signal.SIGINT, emergeexitsig)
- earlier_sigterm_handler = signal.signal(signal.SIGTERM, emergeexitsig)
+ regen = MetadataRegen(portdb, max_jobs=max_jobs,
+ max_load=max_load, main=True)
- try:
- regen.run()
- finally:
- # Restore previous handlers
- if earlier_sigint_handler is not None:
- signal.signal(signal.SIGINT, earlier_sigint_handler)
- else:
- signal.signal(signal.SIGINT, signal.SIG_DFL)
- if earlier_sigterm_handler is not None:
- signal.signal(signal.SIGTERM, earlier_sigterm_handler)
- else:
- signal.signal(signal.SIGTERM, signal.SIG_DFL)
-
- if received_signal:
- sys.exit(received_signal[0])
+ signum = run_main_scheduler(regen)
+ if signum is not None:
+ sys.exit(128 + signum)
portage.writemsg_stdout("done!\n")
return regen.returncode
@@ -1914,37 +2020,110 @@ def action_search(root_config, myopts, myfiles, spinner):
sys.exit(1)
searchinstance.output()
-def action_sync(settings, trees, mtimedb, myopts, myaction):
+def action_sync(emerge_config, trees=DeprecationWarning,
+ mtimedb=DeprecationWarning, opts=DeprecationWarning,
+ action=DeprecationWarning):
+
+ if not isinstance(emerge_config, _emerge_config):
+ warnings.warn("_emerge.actions.action_sync() now expects "
+ "an _emerge_config instance as the first parameter",
+ DeprecationWarning, stacklevel=2)
+ emerge_config = load_emerge_config(
+ action=action, args=[], trees=trees, opts=opts)
+
+ xterm_titles = "notitles" not in \
+ emerge_config.target_config.settings.features
+ emergelog(xterm_titles, " === sync")
+
+ selected_repos = []
+ unknown_repo_names = []
+ missing_sync_type = []
+ if emerge_config.args:
+ for repo_name in emerge_config.args:
+ try:
+ repo = emerge_config.target_config.settings.repositories[repo_name]
+ except KeyError:
+ unknown_repo_names.append(repo_name)
+ else:
+ selected_repos.append(repo)
+ if repo.sync_type is None:
+ missing_sync_type.append(repo)
+
+ if unknown_repo_names:
+ writemsg_level("!!! %s\n" % _("Unknown repo(s): %s") %
+ " ".join(unknown_repo_names),
+ level=logging.ERROR, noiselevel=-1)
+
+ if missing_sync_type:
+ writemsg_level("!!! %s\n" %
+ _("Missing sync-type for repo(s): %s") %
+ " ".join(repo.name for repo in missing_sync_type),
+ level=logging.ERROR, noiselevel=-1)
+
+ if unknown_repo_names or missing_sync_type:
+ return 1
+
+ else:
+ selected_repos.extend(emerge_config.target_config.settings.repositories)
+
+ for repo in selected_repos:
+ if repo.sync_type is not None:
+ returncode = _sync_repo(emerge_config, repo)
+ if returncode != os.EX_OK:
+ return returncode
+
+ # Reload the whole config from scratch.
+ portage._sync_mode = False
+ load_emerge_config(emerge_config=emerge_config)
+ adjust_configs(emerge_config.opts, emerge_config.trees)
+
+ if emerge_config.opts.get('--package-moves') != 'n' and \
+ _global_updates(emerge_config.trees,
+ emerge_config.target_config.mtimedb["updates"],
+ quiet=("--quiet" in emerge_config.opts)):
+ emerge_config.target_config.mtimedb.commit()
+ # Reload the whole config from scratch.
+ load_emerge_config(emerge_config=emerge_config)
+ adjust_configs(emerge_config.opts, emerge_config.trees)
+
+ mybestpv = emerge_config.target_config.trees['porttree'].dbapi.xmatch(
+ "bestmatch-visible", portage.const.PORTAGE_PACKAGE_ATOM)
+ mypvs = portage.best(
+ emerge_config.target_config.trees['vartree'].dbapi.match(
+ portage.const.PORTAGE_PACKAGE_ATOM))
+
+ chk_updated_cfg_files(emerge_config.target_config.root,
+ portage.util.shlex_split(
+ emerge_config.target_config.settings.get("CONFIG_PROTECT", "")))
+
+ if mybestpv != mypvs and "--quiet" not in emerge_config.opts:
+ print()
+ print(warn(" * ")+bold("An update to portage is available.")+" It is _highly_ recommended")
+ print(warn(" * ")+"that you update portage now, before any other packages are updated.")
+ print()
+ print(warn(" * ")+"To update portage, run 'emerge --oneshot portage' now.")
+ print()
+
+ display_news_notification(emerge_config.target_config, emerge_config.opts)
+ return os.EX_OK
+
+def _sync_repo(emerge_config, repo):
+ settings, trees, mtimedb = emerge_config
+ myopts = emerge_config.opts
enter_invalid = '--ask-enter-invalid' in myopts
xterm_titles = "notitles" not in settings.features
- emergelog(xterm_titles, " === sync")
- portdb = trees[settings['EROOT']]['porttree'].dbapi
- myportdir = portdb.porttree_root
- if not myportdir:
- myportdir = settings.get('PORTDIR', '')
- if myportdir and myportdir.strip():
- myportdir = os.path.realpath(myportdir)
- else:
- myportdir = None
+ msg = ">>> Synchronization of repository '%s' located in '%s'..." % (repo.name, repo.location)
+ emergelog(xterm_titles, msg)
+ writemsg_level(msg + "\n")
out = portage.output.EOutput()
- global_config_path = GLOBAL_CONFIG_PATH
- if settings['EPREFIX']:
- global_config_path = os.path.join(settings['EPREFIX'],
- GLOBAL_CONFIG_PATH.lstrip(os.sep))
- if not myportdir:
- sys.stderr.write("!!! PORTDIR is undefined. " + \
- "Is %s/make.globals missing?\n" % global_config_path)
- sys.exit(1)
- if myportdir[-1]=="/":
- myportdir=myportdir[:-1]
try:
- st = os.stat(myportdir)
+ st = os.stat(repo.location)
except OSError:
st = None
if st is None:
- print(">>>",myportdir,"not found, creating it.")
- portage.util.ensure_dirs(myportdir, mode=0o755)
- st = os.stat(myportdir)
+ print(">>> '%s' not found, creating it." % repo.location)
+ portage.util.ensure_dirs(repo.location, mode=0o755)
+ st = os.stat(repo.location)
usersync_uid = None
spawn_kwargs = {}
@@ -1977,59 +2156,51 @@ def action_sync(settings, trees, mtimedb, myopts, myaction):
if rval != os.EX_OK:
return rval
- syncuri = settings.get("SYNC", "").strip()
- if not syncuri:
- writemsg_level("!!! SYNC is undefined. " + \
- "Is %s/make.globals missing?\n" % global_config_path,
- noiselevel=-1, level=logging.ERROR)
- return 1
+ syncuri = repo.sync_uri
- vcs_dirs = frozenset([".git", ".svn", "CVS", ".hg"])
- vcs_dirs = vcs_dirs.intersection(os.listdir(myportdir))
+ vcs_dirs = frozenset(VCS_DIRS)
+ vcs_dirs = vcs_dirs.intersection(os.listdir(repo.location))
os.umask(0o022)
dosyncuri = syncuri
updatecache_flg = False
- git = False
- if myaction == "metadata":
- print("skipping sync")
- updatecache_flg = True
- elif ".git" in vcs_dirs:
+ if repo.sync_type == "git":
# Update existing git repository, and ignore the syncuri. We are
# going to trust the user and assume that the user is in the branch
# that he/she wants updated. We'll let the user manage branches with
# git directly.
if portage.process.find_binary("git") is None:
msg = ["Command not found: git",
- "Type \"emerge dev-util/git\" to enable git support."]
+ "Type \"emerge %s\" to enable git support." % portage.const.GIT_PACKAGE_ATOM]
for l in msg:
writemsg_level("!!! %s\n" % l,
level=logging.ERROR, noiselevel=-1)
return 1
- msg = ">>> Starting git pull in %s..." % myportdir
+ msg = ">>> Starting git pull in %s..." % repo.location
emergelog(xterm_titles, msg )
writemsg_level(msg + "\n")
exitcode = portage.process.spawn_bash("cd %s ; git pull" % \
- (portage._shell_quote(myportdir),), **spawn_kwargs)
+ (portage._shell_quote(repo.location),),
+ **portage._native_kwargs(spawn_kwargs))
if exitcode != os.EX_OK:
- msg = "!!! git pull error in %s." % myportdir
+ msg = "!!! git pull error in %s." % repo.location
emergelog(xterm_titles, msg)
writemsg_level(msg + "\n", level=logging.ERROR, noiselevel=-1)
return exitcode
- msg = ">>> Git pull in %s successful" % myportdir
+ msg = ">>> Git pull in %s successful" % repo.location
emergelog(xterm_titles, msg)
writemsg_level(msg + "\n")
- git = True
- elif syncuri[:8]=="rsync://" or syncuri[:6]=="ssh://":
+ elif repo.sync_type == "rsync":
for vcs_dir in vcs_dirs:
writemsg_level(("!!! %s appears to be under revision " + \
"control (contains %s).\n!!! Aborting rsync sync.\n") % \
- (myportdir, vcs_dir), level=logging.ERROR, noiselevel=-1)
+ (repo.location, vcs_dir), level=logging.ERROR, noiselevel=-1)
return 1
- if not os.path.exists("/usr/bin/rsync"):
+ rsync_binary = portage.process.find_binary("rsync")
+ if rsync_binary is None:
print("!!! /usr/bin/rsync does not exist, so rsync support is disabled.")
- print("!!! Type \"emerge net-misc/rsync\" to enable rsync support.")
- sys.exit(1)
+ print("!!! Type \"emerge %s\" to enable rsync support." % portage.const.RSYNC_PACKAGE_ATOM)
+ return os.EX_UNAVAILABLE
mytimeout=180
rsync_opts = []
@@ -2041,6 +2212,7 @@ def action_sync(settings, trees, mtimedb, myopts, myaction):
"--safe-links", # Ignore links outside of tree
"--perms", # Preserve permissions
"--times", # Preserive mod times
+ "--omit-dir-times",
"--compress", # Compress the data transmitted
"--force", # Force deletion on non-empty dirs
"--whole-file", # Don't do block transfers, only entire files
@@ -2103,14 +2275,14 @@ def action_sync(settings, trees, mtimedb, myopts, myaction):
# Real local timestamp file.
servertimestampfile = os.path.join(
- myportdir, "metadata", "timestamp.chk")
+ repo.location, "metadata", "timestamp.chk")
content = portage.util.grabfile(servertimestampfile)
mytimestamp = 0
if content:
try:
mytimestamp = time.mktime(time.strptime(content[0],
- "%a, %d %b %Y %H:%M:%S +0000"))
+ TIMESTAMP_FORMAT))
except (OverflowError, ValueError):
pass
del content
@@ -2134,9 +2306,12 @@ def action_sync(settings, trees, mtimedb, myopts, myaction):
r"(rsync|ssh)://([^:/]+@)?(\[[:\da-fA-F]*\]|[^:/]*)(:[0-9]+)?",
syncuri, maxsplit=4)[1:5]
except ValueError:
- writemsg_level("!!! SYNC is invalid: %s\n" % syncuri,
+ writemsg_level("!!! sync-uri is invalid: %s\n" % syncuri,
noiselevel=-1, level=logging.ERROR)
return 1
+
+ ssh_opts = settings.get("PORTAGE_SSH_OPTS")
+
if port is None:
port=""
if user_name is None:
@@ -2252,7 +2427,10 @@ def action_sync(settings, trees, mtimedb, myopts, myaction):
if mytimestamp != 0 and "--quiet" not in myopts:
print(">>> Checking server timestamp ...")
- rsynccommand = ["/usr/bin/rsync"] + rsync_opts + extra_rsync_opts
+ rsynccommand = [rsync_binary] + rsync_opts + extra_rsync_opts
+
+ if proto == 'ssh' and ssh_opts:
+ rsynccommand.append("--rsh=ssh " + ssh_opts)
if "--debug" in myopts:
print(rsynccommand)
@@ -2298,7 +2476,8 @@ def action_sync(settings, trees, mtimedb, myopts, myaction):
rsync_initial_timeout)
mypids.extend(portage.process.spawn(
- mycommand, returnpid=True, **spawn_kwargs))
+ mycommand, returnpid=True,
+ **portage._native_kwargs(spawn_kwargs)))
exitcode = os.waitpid(mypids[0], 0)[1]
if usersync_uid is not None:
portage.util.apply_permissions(tmpservertimestampfile,
@@ -2328,12 +2507,11 @@ def action_sync(settings, trees, mtimedb, myopts, myaction):
exitcode = (exitcode & 0xff) << 8
else:
exitcode = exitcode >> 8
- if mypids:
- portage.process.spawned_pids.remove(mypids[0])
+
if content:
try:
servertimestamp = time.mktime(time.strptime(
- content[0], "%a, %d %b %Y %H:%M:%S +0000"))
+ content[0], TIMESTAMP_FORMAT))
except (OverflowError, ValueError):
pass
del mycommand, mypids, content
@@ -2349,7 +2527,7 @@ def action_sync(settings, trees, mtimedb, myopts, myaction):
print(">>> In order to force sync, remove '%s'." % servertimestampfile)
print(">>>")
print()
- sys.exit(0)
+ return os.EX_OK
elif (servertimestamp != 0) and (servertimestamp < mytimestamp):
emergelog(xterm_titles,
">>> Server out of date: %s" % dosyncuri)
@@ -2363,8 +2541,33 @@ def action_sync(settings, trees, mtimedb, myopts, myaction):
exitcode = SERVER_OUT_OF_DATE
elif (servertimestamp == 0) or (servertimestamp > mytimestamp):
# actual sync
- mycommand = rsynccommand + [dosyncuri+"/", myportdir]
- exitcode = portage.process.spawn(mycommand, **spawn_kwargs)
+ mycommand = rsynccommand + [dosyncuri+"/", repo.location]
+ exitcode = None
+ try:
+ exitcode = portage.process.spawn(mycommand,
+ **portage._native_kwargs(spawn_kwargs))
+ finally:
+ if exitcode is None:
+ # interrupted
+ exitcode = 128 + signal.SIGINT
+
+ # 0 Success
+ # 1 Syntax or usage error
+ # 2 Protocol incompatibility
+ # 5 Error starting client-server protocol
+ # 35 Timeout waiting for daemon connection
+ if exitcode not in (0, 1, 2, 5, 35):
+ # If the exit code is not among those listed above,
+ # then we may have a partial/inconsistent sync
+ # state, so our previously read timestamp as well
+ # as the corresponding file can no longer be
+ # trusted.
+ mytimestamp = 0
+ try:
+ os.unlink(servertimestampfile)
+ except OSError:
+ pass
+
if exitcode in [0,1,3,4,11,14,20,21]:
break
elif exitcode in [1,3,4,11,14,20,21]:
@@ -2390,23 +2593,23 @@ def action_sync(settings, trees, mtimedb, myopts, myaction):
if (exitcode==0):
emergelog(xterm_titles, "=== Sync completed with %s" % dosyncuri)
elif exitcode == SERVER_OUT_OF_DATE:
- sys.exit(1)
+ return 1
elif exitcode == EXCEEDED_MAX_RETRIES:
sys.stderr.write(
">>> Exceeded PORTAGE_RSYNC_RETRIES: %s\n" % maxretries)
- sys.exit(1)
+ return 1
elif (exitcode>0):
msg = []
if exitcode==1:
msg.append("Rsync has reported that there is a syntax error. Please ensure")
- msg.append("that your SYNC statement is proper.")
- msg.append("SYNC=" + settings["SYNC"])
+ msg.append("that sync-uri attribute for repository '%s' is proper." % repo.name)
+ msg.append("sync-uri: '%s'" % repo.sync_uri)
elif exitcode==11:
msg.append("Rsync has reported that there is a File IO error. Normally")
msg.append("this means your disk is full, but can be caused by corruption")
- msg.append("on the filesystem that contains PORTDIR. Please investigate")
+ msg.append("on the filesystem that contains repository '%s'. Please investigate" % repo.name)
msg.append("and try again after the problem has been fixed.")
- msg.append("PORTDIR=" + settings["PORTDIR"])
+ msg.append("Location of repository: '%s'" % repo.location)
elif exitcode==20:
msg.append("Rsync was killed before it finished.")
else:
@@ -2417,115 +2620,76 @@ def action_sync(settings, trees, mtimedb, myopts, myaction):
msg.append("(and possibly your system's filesystem) configuration.")
for line in msg:
out.eerror(line)
- sys.exit(exitcode)
- elif syncuri[:6]=="cvs://":
+ return exitcode
+ elif repo.sync_type == "cvs":
if not os.path.exists("/usr/bin/cvs"):
print("!!! /usr/bin/cvs does not exist, so CVS support is disabled.")
- print("!!! Type \"emerge dev-vcs/cvs\" to enable CVS support.")
- sys.exit(1)
- cvsroot=syncuri[6:]
- cvsdir=os.path.dirname(myportdir)
- if not os.path.exists(myportdir+"/CVS"):
+ print("!!! Type \"emerge %s\" to enable CVS support." % portage.const.CVS_PACKAGE_ATOM)
+ return os.EX_UNAVAILABLE
+ cvs_root = syncuri
+ if cvs_root.startswith("cvs://"):
+ cvs_root = cvs_root[6:]
+ if not os.path.exists(os.path.join(repo.location, "CVS")):
#initial checkout
print(">>> Starting initial cvs checkout with "+syncuri+"...")
- if os.path.exists(cvsdir+"/gentoo-x86"):
- print("!!! existing",cvsdir+"/gentoo-x86 directory; exiting.")
- sys.exit(1)
try:
- os.rmdir(myportdir)
+ os.rmdir(repo.location)
except OSError as e:
if e.errno != errno.ENOENT:
sys.stderr.write(
- "!!! existing '%s' directory; exiting.\n" % myportdir)
- sys.exit(1)
+ "!!! existing '%s' directory; exiting.\n" % repo.location)
+ return 1
del e
if portage.process.spawn_bash(
- "cd %s; exec cvs -z0 -d %s co -P gentoo-x86" % \
- (portage._shell_quote(cvsdir), portage._shell_quote(cvsroot)),
- **spawn_kwargs) != os.EX_OK:
+ "cd %s; exec cvs -z0 -d %s co -P -d %s %s" %
+ (portage._shell_quote(os.path.dirname(repo.location)), portage._shell_quote(cvs_root),
+ portage._shell_quote(os.path.basename(repo.location)), portage._shell_quote(repo.sync_cvs_repo)),
+ **portage._native_kwargs(spawn_kwargs)) != os.EX_OK:
print("!!! cvs checkout error; exiting.")
- sys.exit(1)
- os.rename(os.path.join(cvsdir, "gentoo-x86"), myportdir)
+ return 1
else:
#cvs update
print(">>> Starting cvs update with "+syncuri+"...")
retval = portage.process.spawn_bash(
"cd %s; exec cvs -z0 -q update -dP" % \
- (portage._shell_quote(myportdir),), **spawn_kwargs)
+ (portage._shell_quote(repo.location),),
+ **portage._native_kwargs(spawn_kwargs))
if retval != os.EX_OK:
writemsg_level("!!! cvs update error; exiting.\n",
noiselevel=-1, level=logging.ERROR)
- sys.exit(retval)
+ return retval
dosyncuri = syncuri
- else:
- writemsg_level("!!! Unrecognized protocol: SYNC='%s'\n" % (syncuri,),
- noiselevel=-1, level=logging.ERROR)
- return 1
# Reload the whole config from scratch.
- settings, trees, mtimedb = load_emerge_config(trees=trees)
- adjust_configs(myopts, trees)
- root_config = trees[settings['EROOT']]['root_config']
+ settings, trees, mtimedb = load_emerge_config(emerge_config=emerge_config)
+ adjust_configs(emerge_config.opts, emerge_config.trees)
portdb = trees[settings['EROOT']]['porttree'].dbapi
- if git:
+ if repo.sync_type == "git":
# NOTE: Do this after reloading the config, in case
# it did not exist prior to sync, so that the config
# and portdb properly account for its existence.
- exitcode = git_sync_timestamps(portdb, myportdir)
+ exitcode = git_sync_timestamps(portdb, repo.location)
if exitcode == os.EX_OK:
updatecache_flg = True
- if updatecache_flg and \
- myaction != "metadata" and \
- "metadata-transfer" not in settings.features:
+ if updatecache_flg and "metadata-transfer" not in settings.features:
updatecache_flg = False
if updatecache_flg and \
- os.path.exists(os.path.join(myportdir, 'metadata', 'cache')):
+ os.path.exists(os.path.join(repo.location, 'metadata', 'cache')):
- # Only update cache for myportdir since that's
+ # Only update cache for repo.location since that's
# the only one that's been synced here.
- action_metadata(settings, portdb, myopts, porttrees=[myportdir])
-
- if myopts.get('--package-moves') != 'n' and \
- _global_updates(trees, mtimedb["updates"], quiet=("--quiet" in myopts)):
- mtimedb.commit()
- # Reload the whole config from scratch.
- settings, trees, mtimedb = load_emerge_config(trees=trees)
- adjust_configs(myopts, trees)
- portdb = trees[settings['EROOT']]['porttree'].dbapi
- root_config = trees[settings['EROOT']]['root_config']
-
- mybestpv = portdb.xmatch("bestmatch-visible",
- portage.const.PORTAGE_PACKAGE_ATOM)
- mypvs = portage.best(
- trees[settings['EROOT']]['vartree'].dbapi.match(
- portage.const.PORTAGE_PACKAGE_ATOM))
-
- chk_updated_cfg_files(settings["EROOT"],
- portage.util.shlex_split(settings.get("CONFIG_PROTECT", "")))
-
- if myaction != "metadata":
- postsync = os.path.join(settings["PORTAGE_CONFIGROOT"],
- portage.USER_CONFIG_PATH, "bin", "post_sync")
- if os.access(postsync, os.X_OK):
- retval = portage.process.spawn(
- [postsync, dosyncuri], env=settings.environ())
- if retval != os.EX_OK:
- writemsg_level(
- " %s spawn failed of %s\n" % (bad("*"), postsync,),
- level=logging.ERROR, noiselevel=-1)
+ action_metadata(settings, portdb, myopts, porttrees=[repo.location])
- if(mybestpv != mypvs) and not "--quiet" in myopts:
- print()
- print(warn(" * ")+bold("An update to portage is available.")+" It is _highly_ recommended")
- print(warn(" * ")+"that you update portage now, before any other packages are updated.")
- print()
- print(warn(" * ")+"To update portage, run 'emerge portage' now.")
- print()
+ postsync = os.path.join(settings["PORTAGE_CONFIGROOT"], portage.USER_CONFIG_PATH, "bin", "post_sync")
+ if os.access(postsync, os.X_OK):
+ retval = portage.process.spawn([postsync, dosyncuri], env=settings.environ())
+ if retval != os.EX_OK:
+ writemsg_level(" %s spawn failed of %s\n" % (bad("*"), postsync,),
+ level=logging.ERROR, noiselevel=-1)
- display_news_notification(root_config, myopts)
return os.EX_OK
def action_uninstall(settings, trees, ldpath_mtimes,
@@ -2647,13 +2811,8 @@ def action_uninstall(settings, trees, ldpath_mtimes,
if owners:
for cpv in owners:
- slot = vardb.aux_get(cpv, ['SLOT'])[0]
- if not slot:
- # portage now masks packages with missing slot, but it's
- # possible that one was installed by an older version
- atom = portage.cpv_getkey(cpv)
- else:
- atom = '%s:%s' % (portage.cpv_getkey(cpv), slot)
+ pkg = vardb._pkg_str(cpv, None)
+ atom = '%s:%s' % (pkg.cp, pkg.slot)
valid_atoms.append(portage.dep.Atom(atom))
else:
writemsg_level(("!!! '%s' is not claimed " + \
@@ -2677,20 +2836,20 @@ def action_uninstall(settings, trees, ldpath_mtimes,
if action == 'deselect':
return action_deselect(settings, trees, opts, valid_atoms)
- # Create a Scheduler for calls to unmerge(), in order to cause
- # redirection of ebuild phase output to logs as required for
- # options such as --quiet.
- sched = Scheduler(settings, trees, None, opts,
- spinner, uninstall_only=True)
- sched._background = sched._background_mode()
- sched._status_display.quiet = True
-
- if sched._background:
- sched.settings.unlock()
- sched.settings["PORTAGE_BACKGROUND"] = "1"
- sched.settings.backup_changes("PORTAGE_BACKGROUND")
- sched.settings.lock()
- sched.pkgsettings[eroot] = portage.config(clone=sched.settings)
+ # Use the same logic as the Scheduler class to trigger redirection
+ # of ebuild pkg_prerm/postrm phase output to logs as appropriate
+ # for options such as --jobs, --quiet and --quiet-build.
+ max_jobs = opts.get("--jobs", 1)
+ background = (max_jobs is True or max_jobs > 1 or
+ "--quiet" in opts or opts.get("--quiet-build") == "y")
+ sched_iface = SchedulerInterface(global_event_loop(),
+ is_background=lambda: background)
+
+ if background:
+ settings.unlock()
+ settings["PORTAGE_BACKGROUND"] = "1"
+ settings.backup_changes("PORTAGE_BACKGROUND")
+ settings.lock()
if action in ('clean', 'unmerge') or \
(action == 'prune' and "--nodeps" in opts):
@@ -2698,10 +2857,11 @@ def action_uninstall(settings, trees, ldpath_mtimes,
ordered = action == 'unmerge'
rval = unmerge(trees[settings['EROOT']]['root_config'], opts, action,
valid_atoms, ldpath_mtimes, ordered=ordered,
- scheduler=sched._sched_iface)
+ scheduler=sched_iface)
else:
rval = action_depclean(settings, trees, ldpath_mtimes,
- opts, action, valid_atoms, spinner, scheduler=sched._sched_iface)
+ opts, action, valid_atoms, spinner,
+ scheduler=sched_iface)
return rval
@@ -2807,6 +2967,10 @@ def adjust_config(myopts, settings):
settings["NOCOLOR"] = "true"
settings.backup_changes("NOCOLOR")
+ if "--pkg-format" in myopts:
+ settings["PORTAGE_BINPKG_FORMAT"] = myopts["--pkg-format"]
+ settings.backup_changes("PORTAGE_BINPKG_FORMAT")
+
def display_missing_pkg_set(root_config, set_name):
msg = []
@@ -3030,61 +3194,53 @@ def git_sync_timestamps(portdb, portdir):
return os.EX_OK
-def load_emerge_config(trees=None):
+class _emerge_config(SlotObject):
+
+ __slots__ = ('action', 'args', 'opts',
+ 'running_config', 'target_config', 'trees')
+
+ # Support unpack as tuple, for load_emerge_config backward compatibility.
+ def __iter__(self):
+ yield self.target_config.settings
+ yield self.trees
+ yield self.target_config.mtimedb
+
+ def __getitem__(self, index):
+ return list(self)[index]
+
+ def __len__(self):
+ return 3
+
+def load_emerge_config(emerge_config=None, **kargs):
+
+ if emerge_config is None:
+ emerge_config = _emerge_config(**kargs)
+
kwargs = {}
- for k, envvar in (("config_root", "PORTAGE_CONFIGROOT"), ("target_root", "ROOT")):
+ for k, envvar in (("config_root", "PORTAGE_CONFIGROOT"), ("target_root", "ROOT"),
+ ("eprefix", "EPREFIX")):
v = os.environ.get(envvar, None)
if v and v.strip():
kwargs[k] = v
- trees = portage.create_trees(trees=trees, **kwargs)
+ emerge_config.trees = portage.create_trees(trees=emerge_config.trees,
+ **portage._native_kwargs(kwargs))
- for root_trees in trees.values():
+ for root_trees in emerge_config.trees.values():
settings = root_trees["vartree"].settings
settings._init_dirs()
setconfig = load_default_config(settings, root_trees)
root_trees["root_config"] = RootConfig(settings, root_trees, setconfig)
- settings = trees[trees._target_eroot]['vartree'].settings
- mtimedbfile = os.path.join(settings['EROOT'], portage.CACHE_PATH, "mtimedb")
- mtimedb = portage.MtimeDB(mtimedbfile)
- QueryCommand._db = trees
- return settings, trees, mtimedb
-
-def chk_updated_cfg_files(eroot, config_protect):
- target_root = eroot
- result = list(
- portage.util.find_updated_config_files(target_root, config_protect))
-
- for x in result:
- writemsg_level("\n %s " % (colorize("WARN", "* " + _("IMPORTANT:"))),
- level=logging.INFO, noiselevel=-1)
- if not x[1]: # it's a protected file
- writemsg_level( _("config file '%s' needs updating.\n") % x[0],
- level=logging.INFO, noiselevel=-1)
- else: # it's a protected dir
- if len(x[1]) == 1:
- head, tail = os.path.split(x[1][0])
- tail = tail[len("._cfg0000_"):]
- fpath = os.path.join(head, tail)
- writemsg_level(_("config file '%s' needs updating.\n") % fpath,
- level=logging.INFO, noiselevel=-1)
- else:
- writemsg_level( _("%d config files in '%s' need updating.\n") % \
- (len(x[1]), x[0]), level=logging.INFO, noiselevel=-1)
-
- if result:
- print(" "+yellow("*")+ " See the "+colorize("INFORM", _("CONFIGURATION FILES"))\
- + " " + _("section of the") + " " + bold("emerge"))
- print(" "+yellow("*")+ " " + _("man page to learn how to update config files."))
-
+ target_eroot = emerge_config.trees._target_eroot
+ emerge_config.target_config = \
+ emerge_config.trees[target_eroot]['root_config']
+ emerge_config.target_config.mtimedb = portage.MtimeDB(
+ os.path.join(target_eroot, portage.CACHE_PATH, "mtimedb"))
+ emerge_config.running_config = emerge_config.trees[
+ emerge_config.trees._running_eroot]['root_config']
+ QueryCommand._db = emerge_config.trees
-def display_news_notification(root_config, myopts):
- if "news" not in root_config.settings.features:
- return
- portdb = root_config.trees["porttree"].dbapi
- vardb = root_config.trees["vartree"].dbapi
- news_counts = count_unread_news(portdb, vardb)
- display_news_notifications(news_counts)
+ return emerge_config
def getgccversion(chost):
"""
@@ -3140,3 +3296,771 @@ def getgccversion(chost):
portage.writemsg(gcc_not_found_error, noiselevel=-1)
return "[unavailable]"
+
+# Warn about features that may confuse users and
+# lead them to report invalid bugs.
+_emerge_features_warn = frozenset(['keeptemp', 'keepwork'])
+
+def validate_ebuild_environment(trees):
+ features_warn = set()
+ for myroot in trees:
+ settings = trees[myroot]["vartree"].settings
+ settings.validate()
+ features_warn.update(
+ _emerge_features_warn.intersection(settings.features))
+
+ if features_warn:
+ msg = "WARNING: The FEATURES variable contains one " + \
+ "or more values that should be disabled under " + \
+ "normal circumstances: %s" % " ".join(features_warn)
+ out = portage.output.EOutput()
+ for line in textwrap.wrap(msg, 65):
+ out.ewarn(line)
+
+def check_procfs():
+ procfs_path = '/proc'
+ if platform.system() not in ("Linux",) or \
+ os.path.ismount(procfs_path):
+ return os.EX_OK
+ msg = "It seems that %s is not mounted. You have been warned." % procfs_path
+ writemsg_level("".join("!!! %s\n" % l for l in textwrap.wrap(msg, 70)),
+ level=logging.ERROR, noiselevel=-1)
+ return 1
+
+def config_protect_check(trees):
+ for root, root_trees in trees.items():
+ settings = root_trees["root_config"].settings
+ if not settings.get("CONFIG_PROTECT"):
+ msg = "!!! CONFIG_PROTECT is empty"
+ if settings["ROOT"] != "/":
+ msg += " for '%s'" % root
+ msg += "\n"
+ writemsg_level(msg, level=logging.WARN, noiselevel=-1)
+
+def apply_priorities(settings):
+ ionice(settings)
+ nice(settings)
+
+def nice(settings):
+ try:
+ os.nice(int(settings.get("PORTAGE_NICENESS", "0")))
+ except (OSError, ValueError) as e:
+ out = portage.output.EOutput()
+ out.eerror("Failed to change nice value to '%s'" % \
+ settings["PORTAGE_NICENESS"])
+ out.eerror("%s\n" % str(e))
+
+def ionice(settings):
+
+ ionice_cmd = settings.get("PORTAGE_IONICE_COMMAND")
+ if ionice_cmd:
+ ionice_cmd = portage.util.shlex_split(ionice_cmd)
+ if not ionice_cmd:
+ return
+
+ variables = {"PID" : str(os.getpid())}
+ cmd = [varexpand(x, mydict=variables) for x in ionice_cmd]
+
+ try:
+ rval = portage.process.spawn(cmd, env=os.environ)
+ except portage.exception.CommandNotFound:
+ # The OS kernel probably doesn't support ionice,
+ # so return silently.
+ return
+
+ if rval != os.EX_OK:
+ out = portage.output.EOutput()
+ out.eerror("PORTAGE_IONICE_COMMAND returned %d" % (rval,))
+ out.eerror("See the make.conf(5) man page for PORTAGE_IONICE_COMMAND usage instructions.")
+
+def setconfig_fallback(root_config):
+ setconfig = root_config.setconfig
+ setconfig._create_default_config()
+ setconfig._parse(update=True)
+ root_config.sets = setconfig.getSets()
+
+def get_missing_sets(root_config):
+ # emerge requires existence of "world", "selected", and "system"
+ missing_sets = []
+
+ for s in ("selected", "system", "world",):
+ if s not in root_config.sets:
+ missing_sets.append(s)
+
+ return missing_sets
+
+def missing_sets_warning(root_config, missing_sets):
+ if len(missing_sets) > 2:
+ missing_sets_str = ", ".join('"%s"' % s for s in missing_sets[:-1])
+ missing_sets_str += ', and "%s"' % missing_sets[-1]
+ elif len(missing_sets) == 2:
+ missing_sets_str = '"%s" and "%s"' % tuple(missing_sets)
+ else:
+ missing_sets_str = '"%s"' % missing_sets[-1]
+ msg = ["emerge: incomplete set configuration, " + \
+ "missing set(s): %s" % missing_sets_str]
+ if root_config.sets:
+ msg.append(" sets defined: %s" % ", ".join(root_config.sets))
+ global_config_path = portage.const.GLOBAL_CONFIG_PATH
+ if portage.const.EPREFIX:
+ global_config_path = os.path.join(portage.const.EPREFIX,
+ portage.const.GLOBAL_CONFIG_PATH.lstrip(os.sep))
+ msg.append(" This usually means that '%s'" % \
+ (os.path.join(global_config_path, "sets/portage.conf"),))
+ msg.append(" is missing or corrupt.")
+ msg.append(" Falling back to default world and system set configuration!!!")
+ for line in msg:
+ writemsg_level(line + "\n", level=logging.ERROR, noiselevel=-1)
+
+def ensure_required_sets(trees):
+ warning_shown = False
+ for root_trees in trees.values():
+ missing_sets = get_missing_sets(root_trees["root_config"])
+ if missing_sets and not warning_shown:
+ warning_shown = True
+ missing_sets_warning(root_trees["root_config"], missing_sets)
+ if missing_sets:
+ setconfig_fallback(root_trees["root_config"])
+
+def expand_set_arguments(myfiles, myaction, root_config):
+ retval = os.EX_OK
+ setconfig = root_config.setconfig
+
+ sets = setconfig.getSets()
+
+ # In order to know exactly which atoms/sets should be added to the
+ # world file, the depgraph performs set expansion later. It will get
+ # confused about where the atoms came from if it's not allowed to
+ # expand them itself.
+ do_not_expand = myaction is None
+ newargs = []
+ for a in myfiles:
+ if a in ("system", "world"):
+ newargs.append(SETPREFIX+a)
+ else:
+ newargs.append(a)
+ myfiles = newargs
+ del newargs
+ newargs = []
+
+ # separators for set arguments
+ ARG_START = "{"
+ ARG_END = "}"
+
+ for i in range(0, len(myfiles)):
+ if myfiles[i].startswith(SETPREFIX):
+ start = 0
+ end = 0
+ x = myfiles[i][len(SETPREFIX):]
+ newset = ""
+ while x:
+ start = x.find(ARG_START)
+ end = x.find(ARG_END)
+ if start > 0 and start < end:
+ namepart = x[:start]
+ argpart = x[start+1:end]
+
+ # TODO: implement proper quoting
+ args = argpart.split(",")
+ options = {}
+ for a in args:
+ if "=" in a:
+ k, v = a.split("=", 1)
+ options[k] = v
+ else:
+ options[a] = "True"
+ setconfig.update(namepart, options)
+ newset += (x[:start-len(namepart)]+namepart)
+ x = x[end+len(ARG_END):]
+ else:
+ newset += x
+ x = ""
+ myfiles[i] = SETPREFIX+newset
+
+ sets = setconfig.getSets()
+
+ # display errors that occurred while loading the SetConfig instance
+ for e in setconfig.errors:
+ print(colorize("BAD", "Error during set creation: %s" % e))
+
+ unmerge_actions = ("unmerge", "prune", "clean", "depclean")
+
+ for a in myfiles:
+ if a.startswith(SETPREFIX):
+ s = a[len(SETPREFIX):]
+ if s not in sets:
+ display_missing_pkg_set(root_config, s)
+ return (None, 1)
+ if s == "installed":
+ msg = ("The @installed set is deprecated and will soon be "
+ "removed. Please refer to bug #387059 for details.")
+ out = portage.output.EOutput()
+ for line in textwrap.wrap(msg, 50):
+ out.ewarn(line)
+ setconfig.active.append(s)
+
+ if do_not_expand:
+ # Loading sets can be slow, so skip it here, in order
+ # to allow the depgraph to indicate progress with the
+ # spinner while sets are loading (bug #461412).
+ newargs.append(a)
+ continue
+
+ try:
+ set_atoms = setconfig.getSetAtoms(s)
+ except portage.exception.PackageSetNotFound as e:
+ writemsg_level(("emerge: the given set '%s' " + \
+ "contains a non-existent set named '%s'.\n") % \
+ (s, e), level=logging.ERROR, noiselevel=-1)
+ if s in ('world', 'selected') and \
+ SETPREFIX + e.value in sets['selected']:
+ writemsg_level(("Use `emerge --deselect %s%s` to "
+ "remove this set from world_sets.\n") %
+ (SETPREFIX, e,), level=logging.ERROR,
+ noiselevel=-1)
+ return (None, 1)
+ if myaction in unmerge_actions and \
+ not sets[s].supportsOperation("unmerge"):
+ writemsg_level("emerge: the given set '%s' does " % s + \
+ "not support unmerge operations\n",
+ level=logging.ERROR, noiselevel=-1)
+ retval = 1
+ elif not set_atoms:
+ writemsg_level("emerge: '%s' is an empty set\n" % s,
+ level=logging.INFO, noiselevel=-1)
+ else:
+ newargs.extend(set_atoms)
+ for error_msg in sets[s].errors:
+ writemsg_level("%s\n" % (error_msg,),
+ level=logging.ERROR, noiselevel=-1)
+ else:
+ newargs.append(a)
+ return (newargs, retval)
+
+def repo_name_check(trees):
+ missing_repo_names = set()
+ for root_trees in trees.values():
+ porttree = root_trees.get("porttree")
+ if porttree:
+ portdb = porttree.dbapi
+ missing_repo_names.update(portdb.getMissingRepoNames())
+
+ # Skip warnings about missing repo_name entries for
+ # /usr/local/portage (see bug #248603).
+ try:
+ missing_repo_names.remove('/usr/local/portage')
+ except KeyError:
+ pass
+
+ if missing_repo_names:
+ msg = []
+ msg.append("WARNING: One or more repositories " + \
+ "have missing repo_name entries:")
+ msg.append("")
+ for p in missing_repo_names:
+ msg.append("\t%s/profiles/repo_name" % (p,))
+ msg.append("")
+ msg.extend(textwrap.wrap("NOTE: Each repo_name entry " + \
+ "should be a plain text file containing a unique " + \
+ "name for the repository on the first line.", 70))
+ msg.append("\n")
+ writemsg_level("".join("%s\n" % l for l in msg),
+ level=logging.WARNING, noiselevel=-1)
+
+ return bool(missing_repo_names)
+
+def repo_name_duplicate_check(trees):
+ ignored_repos = {}
+ for root, root_trees in trees.items():
+ if 'porttree' in root_trees:
+ portdb = root_trees['porttree'].dbapi
+ if portdb.settings.get('PORTAGE_REPO_DUPLICATE_WARN') != '0':
+ for repo_name, paths in portdb.getIgnoredRepos():
+ k = (root, repo_name, portdb.getRepositoryPath(repo_name))
+ ignored_repos.setdefault(k, []).extend(paths)
+
+ if ignored_repos:
+ msg = []
+ msg.append('WARNING: One or more repositories ' + \
+ 'have been ignored due to duplicate')
+ msg.append(' profiles/repo_name entries:')
+ msg.append('')
+ for k in sorted(ignored_repos):
+ msg.append(' %s overrides' % ", ".join(k))
+ for path in ignored_repos[k]:
+ msg.append(' %s' % (path,))
+ msg.append('')
+ msg.extend(' ' + x for x in textwrap.wrap(
+ "All profiles/repo_name entries must be unique in order " + \
+ "to avoid having duplicates ignored. " + \
+ "Set PORTAGE_REPO_DUPLICATE_WARN=\"0\" in " + \
+ "/etc/portage/make.conf if you would like to disable this warning."))
+ msg.append("\n")
+ writemsg_level(''.join('%s\n' % l for l in msg),
+ level=logging.WARNING, noiselevel=-1)
+
+ return bool(ignored_repos)
+
+def run_action(emerge_config):
+
+ # skip global updates prior to sync, since it's called after sync
+ if emerge_config.action not in ('help', 'info', 'sync', 'version') and \
+ emerge_config.opts.get('--package-moves') != 'n' and \
+ _global_updates(emerge_config.trees,
+ emerge_config.target_config.mtimedb["updates"],
+ quiet=("--quiet" in emerge_config.opts)):
+ emerge_config.target_config.mtimedb.commit()
+ # Reload the whole config from scratch.
+ load_emerge_config(emerge_config=emerge_config)
+
+ xterm_titles = "notitles" not in \
+ emerge_config.target_config.settings.features
+ if xterm_titles:
+ xtermTitle("emerge")
+
+ if "--digest" in emerge_config.opts:
+ os.environ["FEATURES"] = os.environ.get("FEATURES","") + " digest"
+ # Reload the whole config from scratch so that the portdbapi internal
+ # config is updated with new FEATURES.
+ load_emerge_config(emerge_config=emerge_config)
+
+ # NOTE: adjust_configs() can map options to FEATURES, so any relevant
+ # options adjustments should be made prior to calling adjust_configs().
+ if "--buildpkgonly" in emerge_config.opts:
+ emerge_config.opts["--buildpkg"] = True
+
+ if "getbinpkg" in emerge_config.target_config.settings.features:
+ emerge_config.opts["--getbinpkg"] = True
+
+ if "--getbinpkgonly" in emerge_config.opts:
+ emerge_config.opts["--getbinpkg"] = True
+
+ if "--getbinpkgonly" in emerge_config.opts:
+ emerge_config.opts["--usepkgonly"] = True
+
+ if "--getbinpkg" in emerge_config.opts:
+ emerge_config.opts["--usepkg"] = True
+
+ if "--usepkgonly" in emerge_config.opts:
+ emerge_config.opts["--usepkg"] = True
+
+ if "--buildpkgonly" in emerge_config.opts:
+ # --buildpkgonly will not merge anything, so
+ # it cancels all binary package options.
+ for opt in ("--getbinpkg", "--getbinpkgonly",
+ "--usepkg", "--usepkgonly"):
+ emerge_config.opts.pop(opt, None)
+
+ adjust_configs(emerge_config.opts, emerge_config.trees)
+ apply_priorities(emerge_config.target_config.settings)
+
+ for fmt in emerge_config.target_config.settings["PORTAGE_BINPKG_FORMAT"].split():
+ if not fmt in portage.const.SUPPORTED_BINPKG_FORMATS:
+ if "--pkg-format" in emerge_config.opts:
+ problematic="--pkg-format"
+ else:
+ problematic="PORTAGE_BINPKG_FORMAT"
+
+ writemsg_level(("emerge: %s is not set correctly. Format " + \
+ "'%s' is not supported.\n") % (problematic, fmt),
+ level=logging.ERROR, noiselevel=-1)
+ return 1
+
+ if emerge_config.action == 'version':
+ writemsg_stdout(getportageversion(
+ emerge_config.target_config.settings["PORTDIR"],
+ None,
+ emerge_config.target_config.settings.profile_path,
+ emerge_config.target_config.settings["CHOST"],
+ emerge_config.target_config.trees['vartree'].dbapi) + '\n',
+ noiselevel=-1)
+ return 0
+ elif emerge_config.action == 'help':
+ emerge_help()
+ return 0
+
+ spinner = stdout_spinner()
+ if "candy" in emerge_config.target_config.settings.features:
+ spinner.update = spinner.update_scroll
+
+ if "--quiet" not in emerge_config.opts:
+ portage.deprecated_profile_check(
+ settings=emerge_config.target_config.settings)
+ repo_name_check(emerge_config.trees)
+ repo_name_duplicate_check(emerge_config.trees)
+ config_protect_check(emerge_config.trees)
+ check_procfs()
+
+ for mytrees in emerge_config.trees.values():
+ mydb = mytrees["porttree"].dbapi
+ # Freeze the portdbapi for performance (memoize all xmatch results).
+ mydb.freeze()
+
+ if emerge_config.action in ('search', None) and \
+ "--usepkg" in emerge_config.opts:
+ # Populate the bintree with current --getbinpkg setting.
+ # This needs to happen before expand_set_arguments(), in case
+ # any sets use the bintree.
+ try:
+ mytrees["bintree"].populate(
+ getbinpkgs="--getbinpkg" in emerge_config.opts)
+ except ParseError as e:
+ writemsg("\n\n!!!%s.\nSee make.conf(5) for more info.\n"
+ % e, noiselevel=-1)
+ return 1
+
+ del mytrees, mydb
+
+ for x in emerge_config.args:
+ if x.endswith((".ebuild", ".tbz2")) and \
+ os.path.exists(os.path.abspath(x)):
+ print(colorize("BAD", "\n*** emerging by path is broken "
+ "and may not always work!!!\n"))
+ break
+
+ if emerge_config.action == "list-sets":
+ writemsg_stdout("".join("%s\n" % s for s in
+ sorted(emerge_config.target_config.sets)))
+ return os.EX_OK
+ elif emerge_config.action == "check-news":
+ news_counts = count_unread_news(
+ emerge_config.target_config.trees["porttree"].dbapi,
+ emerge_config.target_config.trees["vartree"].dbapi)
+ if any(news_counts.values()):
+ display_news_notifications(news_counts)
+ elif "--quiet" not in emerge_config.opts:
+ print("", colorize("GOOD", "*"), "No news items were found.")
+ return os.EX_OK
+
+ ensure_required_sets(emerge_config.trees)
+
+ if emerge_config.action is None and \
+ "--resume" in emerge_config.opts and emerge_config.args:
+ writemsg("emerge: unexpected argument(s) for --resume: %s\n" %
+ " ".join(emerge_config.args), noiselevel=-1)
+ return 1
+
+ # only expand sets for actions taking package arguments
+ oldargs = emerge_config.args[:]
+ if emerge_config.action in ("clean", "config", "depclean",
+ "info", "prune", "unmerge", None):
+ newargs, retval = expand_set_arguments(
+ emerge_config.args, emerge_config.action,
+ emerge_config.target_config)
+ if retval != os.EX_OK:
+ return retval
+
+ # Need to handle empty sets specially, otherwise emerge will react
+ # with the help message for empty argument lists
+ if oldargs and not newargs:
+ print("emerge: no targets left after set expansion")
+ return 0
+
+ emerge_config.args = newargs
+
+ if "--tree" in emerge_config.opts and \
+ "--columns" in emerge_config.opts:
+ print("emerge: can't specify both of \"--tree\" and \"--columns\".")
+ return 1
+
+ if '--emptytree' in emerge_config.opts and \
+ '--noreplace' in emerge_config.opts:
+ writemsg_level("emerge: can't specify both of " + \
+ "\"--emptytree\" and \"--noreplace\".\n",
+ level=logging.ERROR, noiselevel=-1)
+ return 1
+
+ if ("--quiet" in emerge_config.opts):
+ spinner.update = spinner.update_quiet
+ portage.util.noiselimit = -1
+
+ if "--fetch-all-uri" in emerge_config.opts:
+ emerge_config.opts["--fetchonly"] = True
+
+ if "--skipfirst" in emerge_config.opts and \
+ "--resume" not in emerge_config.opts:
+ emerge_config.opts["--resume"] = True
+
+ # Allow -p to remove --ask
+ if "--pretend" in emerge_config.opts:
+ emerge_config.opts.pop("--ask", None)
+
+ # forbid --ask when not in a terminal
+ # note: this breaks `emerge --ask | tee logfile`, but that doesn't work anyway.
+ if ("--ask" in emerge_config.opts) and (not sys.stdin.isatty()):
+ portage.writemsg("!!! \"--ask\" should only be used in a terminal. Exiting.\n",
+ noiselevel=-1)
+ return 1
+
+ if emerge_config.target_config.settings.get("PORTAGE_DEBUG", "") == "1":
+ spinner.update = spinner.update_quiet
+ portage.util.noiselimit = 0
+ if "python-trace" in emerge_config.target_config.settings.features:
+ portage.debug.set_trace(True)
+
+ if not ("--quiet" in emerge_config.opts):
+ if '--nospinner' in emerge_config.opts or \
+ emerge_config.target_config.settings.get('TERM') == 'dumb' or \
+ not sys.stdout.isatty():
+ spinner.update = spinner.update_basic
+
+ if "--debug" in emerge_config.opts:
+ print("myaction", emerge_config.action)
+ print("myopts", emerge_config.opts)
+
+ if not emerge_config.action and not emerge_config.args and \
+ "--resume" not in emerge_config.opts:
+ emerge_help()
+ return 1
+
+ pretend = "--pretend" in emerge_config.opts
+ fetchonly = "--fetchonly" in emerge_config.opts or \
+ "--fetch-all-uri" in emerge_config.opts
+ buildpkgonly = "--buildpkgonly" in emerge_config.opts
+
+ # check if root user is the current user for the actions where emerge needs this
+ if portage.data.secpass < 2:
+ # We've already allowed "--version" and "--help" above.
+ if "--pretend" not in emerge_config.opts and \
+ emerge_config.action not in ("search", "info"):
+ need_superuser = emerge_config.action in ('clean', 'depclean',
+ 'deselect', 'prune', 'unmerge') or not \
+ (fetchonly or \
+ (buildpkgonly and portage.data.secpass >= 1) or \
+ emerge_config.action in ("metadata", "regen", "sync"))
+ if portage.data.secpass < 1 or \
+ need_superuser:
+ if need_superuser:
+ access_desc = "superuser"
+ else:
+ access_desc = "portage group"
+ # Always show portage_group_warning() when only portage group
+ # access is required but the user is not in the portage group.
+ if "--ask" in emerge_config.opts:
+ writemsg_stdout("This action requires %s access...\n" % \
+ (access_desc,), noiselevel=-1)
+ if portage.data.secpass < 1 and not need_superuser:
+ portage.data.portage_group_warning()
+ if userquery("Would you like to add --pretend to options?",
+ "--ask-enter-invalid" in emerge_config.opts) == "No":
+ return 128 + signal.SIGINT
+ emerge_config.opts["--pretend"] = True
+ emerge_config.opts.pop("--ask")
+ else:
+ sys.stderr.write(("emerge: %s access is required\n") \
+ % access_desc)
+ if portage.data.secpass < 1 and not need_superuser:
+ portage.data.portage_group_warning()
+ return 1
+
+ # Disable emergelog for everything except build or unmerge operations.
+ # This helps minimize parallel emerge.log entries that can confuse log
+ # parsers like genlop.
+ disable_emergelog = False
+ for x in ("--pretend", "--fetchonly", "--fetch-all-uri"):
+ if x in emerge_config.opts:
+ disable_emergelog = True
+ break
+ if disable_emergelog:
+ pass
+ elif emerge_config.action in ("search", "info"):
+ disable_emergelog = True
+ elif portage.data.secpass < 1:
+ disable_emergelog = True
+
+ import _emerge.emergelog
+ _emerge.emergelog._disable = disable_emergelog
+
+ if not disable_emergelog:
+ emerge_log_dir = \
+ emerge_config.target_config.settings.get('EMERGE_LOG_DIR')
+ if emerge_log_dir:
+ try:
+ # At least the parent needs to exist for the lock file.
+ portage.util.ensure_dirs(emerge_log_dir)
+ except portage.exception.PortageException as e:
+ writemsg_level("!!! Error creating directory for " + \
+ "EMERGE_LOG_DIR='%s':\n!!! %s\n" % \
+ (emerge_log_dir, e),
+ noiselevel=-1, level=logging.ERROR)
+ portage.util.ensure_dirs(_emerge.emergelog._emerge_log_dir)
+ else:
+ _emerge.emergelog._emerge_log_dir = emerge_log_dir
+ else:
+ _emerge.emergelog._emerge_log_dir = os.path.join(os.sep,
+ portage.const.EPREFIX.lstrip(os.sep), "var", "log")
+ portage.util.ensure_dirs(_emerge.emergelog._emerge_log_dir)
+
+ if not "--pretend" in emerge_config.opts:
+ time_fmt = "%b %d, %Y %H:%M:%S"
+ if sys.hexversion < 0x3000000:
+ time_fmt = portage._unicode_encode(time_fmt)
+ time_str = time.strftime(time_fmt, time.localtime(time.time()))
+ # Avoid potential UnicodeDecodeError in Python 2, since strftime
+ # returns bytes in Python 2, and %b may contain non-ascii chars.
+ time_str = _unicode_decode(time_str,
+ encoding=_encodings['content'], errors='replace')
+ emergelog(xterm_titles, "Started emerge on: %s" % time_str)
+ myelogstr=""
+ if emerge_config.opts:
+ opt_list = []
+ for opt, arg in emerge_config.opts.items():
+ if arg is True:
+ opt_list.append(opt)
+ elif isinstance(arg, list):
+ # arguments like --exclude that use 'append' action
+ for x in arg:
+ opt_list.append("%s=%s" % (opt, x))
+ else:
+ opt_list.append("%s=%s" % (opt, arg))
+ myelogstr=" ".join(opt_list)
+ if emerge_config.action:
+ myelogstr += " --" + emerge_config.action
+ if oldargs:
+ myelogstr += " " + " ".join(oldargs)
+ emergelog(xterm_titles, " *** emerge " + myelogstr)
+
+ oldargs = None
+
+ def emergeexitsig(signum, frame):
+ signal.signal(signal.SIGTERM, signal.SIG_IGN)
+ portage.util.writemsg(
+ "\n\nExiting on signal %(signal)s\n" % {"signal":signum})
+ sys.exit(128 + signum)
+
+ signal.signal(signal.SIGTERM, emergeexitsig)
+
+ def emergeexit():
+ """This gets out final log message in before we quit."""
+ if "--pretend" not in emerge_config.opts:
+ emergelog(xterm_titles, " *** terminating.")
+ if xterm_titles:
+ xtermTitleReset()
+ portage.atexit_register(emergeexit)
+
+ if emerge_config.action in ("config", "metadata", "regen", "sync"):
+ if "--pretend" in emerge_config.opts:
+ sys.stderr.write(("emerge: The '%s' action does " + \
+ "not support '--pretend'.\n") % emerge_config.action)
+ return 1
+
+ if "sync" == emerge_config.action:
+ return action_sync(emerge_config)
+ elif "metadata" == emerge_config.action:
+ action_metadata(emerge_config.target_config.settings,
+ emerge_config.target_config.trees['porttree'].dbapi,
+ emerge_config.opts)
+ elif emerge_config.action=="regen":
+ validate_ebuild_environment(emerge_config.trees)
+ return action_regen(emerge_config.target_config.settings,
+ emerge_config.target_config.trees['porttree'].dbapi,
+ emerge_config.opts.get("--jobs"),
+ emerge_config.opts.get("--load-average"))
+ # HELP action
+ elif "config" == emerge_config.action:
+ validate_ebuild_environment(emerge_config.trees)
+ action_config(emerge_config.target_config.settings,
+ emerge_config.trees, emerge_config.opts, emerge_config.args)
+
+ # SEARCH action
+ elif "search" == emerge_config.action:
+ validate_ebuild_environment(emerge_config.trees)
+ action_search(emerge_config.target_config,
+ emerge_config.opts, emerge_config.args, spinner)
+
+ elif emerge_config.action in \
+ ('clean', 'depclean', 'deselect', 'prune', 'unmerge'):
+ validate_ebuild_environment(emerge_config.trees)
+ rval = action_uninstall(emerge_config.target_config.settings,
+ emerge_config.trees, emerge_config.target_config.mtimedb["ldpath"],
+ emerge_config.opts, emerge_config.action,
+ emerge_config.args, spinner)
+ if not (emerge_config.action == 'deselect' or
+ buildpkgonly or fetchonly or pretend):
+ post_emerge(emerge_config.action, emerge_config.opts,
+ emerge_config.args, emerge_config.target_config.root,
+ emerge_config.trees, emerge_config.target_config.mtimedb, rval)
+ return rval
+
+ elif emerge_config.action == 'info':
+
+ # Ensure atoms are valid before calling unmerge().
+ vardb = emerge_config.target_config.trees['vartree'].dbapi
+ portdb = emerge_config.target_config.trees['porttree'].dbapi
+ bindb = emerge_config.target_config.trees['bintree'].dbapi
+ valid_atoms = []
+ for x in emerge_config.args:
+ if is_valid_package_atom(x, allow_repo=True):
+ try:
+ #look at the installed files first, if there is no match
+ #look at the ebuilds, since EAPI 4 allows running pkg_info
+ #on non-installed packages
+ valid_atom = dep_expand(x, mydb=vardb)
+ if valid_atom.cp.split("/")[0] == "null":
+ valid_atom = dep_expand(x, mydb=portdb)
+
+ if valid_atom.cp.split("/")[0] == "null" and \
+ "--usepkg" in emerge_config.opts:
+ valid_atom = dep_expand(x, mydb=bindb)
+
+ valid_atoms.append(valid_atom)
+
+ except portage.exception.AmbiguousPackageName as e:
+ msg = "The short ebuild name \"" + x + \
+ "\" is ambiguous. Please specify " + \
+ "one of the following " + \
+ "fully-qualified ebuild names instead:"
+ for line in textwrap.wrap(msg, 70):
+ writemsg_level("!!! %s\n" % (line,),
+ level=logging.ERROR, noiselevel=-1)
+ for i in e.args[0]:
+ writemsg_level(" %s\n" % colorize("INFORM", i),
+ level=logging.ERROR, noiselevel=-1)
+ writemsg_level("\n", level=logging.ERROR, noiselevel=-1)
+ return 1
+ continue
+ msg = []
+ msg.append("'%s' is not a valid package atom." % (x,))
+ msg.append("Please check ebuild(5) for full details.")
+ writemsg_level("".join("!!! %s\n" % line for line in msg),
+ level=logging.ERROR, noiselevel=-1)
+ return 1
+
+ return action_info(emerge_config.target_config.settings,
+ emerge_config.trees, emerge_config.opts, valid_atoms)
+
+ # "update", "system", or just process files:
+ else:
+ validate_ebuild_environment(emerge_config.trees)
+
+ for x in emerge_config.args:
+ if x.startswith(SETPREFIX) or \
+ is_valid_package_atom(x, allow_repo=True):
+ continue
+ if x[:1] == os.sep:
+ continue
+ try:
+ os.lstat(x)
+ continue
+ except OSError:
+ pass
+ msg = []
+ msg.append("'%s' is not a valid package atom." % (x,))
+ msg.append("Please check ebuild(5) for full details.")
+ writemsg_level("".join("!!! %s\n" % line for line in msg),
+ level=logging.ERROR, noiselevel=-1)
+ return 1
+
+ # GLEP 42 says to display news *after* an emerge --pretend
+ if "--pretend" not in emerge_config.opts:
+ display_news_notification(
+ emerge_config.target_config, emerge_config.opts)
+ retval = action_build(emerge_config.target_config.settings,
+ emerge_config.trees, emerge_config.target_config.mtimedb,
+ emerge_config.opts, emerge_config.action,
+ emerge_config.args, spinner)
+ post_emerge(emerge_config.action, emerge_config.opts,
+ emerge_config.args, emerge_config.target_config.root,
+ emerge_config.trees, emerge_config.target_config.mtimedb, retval)
+
+ return retval
diff --git a/pym/_emerge/chk_updated_cfg_files.py b/pym/_emerge/chk_updated_cfg_files.py
new file mode 100644
index 000000000..9f2ab6f3e
--- /dev/null
+++ b/pym/_emerge/chk_updated_cfg_files.py
@@ -0,0 +1,42 @@
+# Copyright 1999-2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import print_function
+
+import logging
+
+import portage
+from portage import os
+from portage.localization import _
+from portage.output import bold, colorize, yellow
+from portage.util import writemsg_level
+
+def chk_updated_cfg_files(eroot, config_protect):
+ target_root = eroot
+ result = list(
+ portage.util.find_updated_config_files(target_root, config_protect))
+
+ for x in result:
+ writemsg_level("\n %s " % (colorize("WARN", "* " + _("IMPORTANT:"))),
+ level=logging.INFO, noiselevel=-1)
+ if not x[1]: # it's a protected file
+ writemsg_level( _("config file '%s' needs updating.\n") % x[0],
+ level=logging.INFO, noiselevel=-1)
+ else: # it's a protected dir
+ if len(x[1]) == 1:
+ head, tail = os.path.split(x[1][0])
+ tail = tail[len("._cfg0000_"):]
+ fpath = os.path.join(head, tail)
+ writemsg_level(_("config file '%s' needs updating.\n") % fpath,
+ level=logging.INFO, noiselevel=-1)
+ else:
+ writemsg_level(
+ _("%d config files in '%s' need updating.\n") % \
+ (len(x[1]), x[0]), level=logging.INFO, noiselevel=-1)
+
+ if result:
+ print(" " + yellow("*") + " See the " +
+ colorize("INFORM", _("CONFIGURATION FILES")) +
+ " " + _("section of the") + " " + bold("emerge"))
+ print(" " + yellow("*") + " " +
+ _("man page to learn how to update config files."))
diff --git a/pym/_emerge/clear_caches.py b/pym/_emerge/clear_caches.py
index 7b7c5eced..513df626f 100644
--- a/pym/_emerge/clear_caches.py
+++ b/pym/_emerge/clear_caches.py
@@ -1,8 +1,7 @@
-# Copyright 1999-2010 Gentoo Foundation
+# Copyright 1999-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
import gc
-from portage.util.listdir import dircache
def clear_caches(trees):
for d in trees.values():
@@ -15,5 +14,4 @@ def clear_caches(trees):
pass
else:
d["vartree"].dbapi._linkmap._clear_cache()
- dircache.clear()
gc.collect()
diff --git a/pym/_emerge/countdown.py b/pym/_emerge/countdown.py
index 5abdc8a96..62e3c8dea 100644
--- a/pym/_emerge/countdown.py
+++ b/pym/_emerge/countdown.py
@@ -1,4 +1,4 @@
-# Copyright 1999-2009 Gentoo Foundation
+# Copyright 1999-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
from __future__ import print_function
@@ -8,15 +8,15 @@ import time
from portage.output import colorize
-def countdown(secs=5, doing="Starting"):
+
+def countdown(secs=5, doing='Starting'):
if secs:
- print(">>> Waiting",secs,"seconds before starting...")
- print(">>> (Control-C to abort)...\n"+doing+" in: ", end=' ')
- ticks=list(range(secs))
- ticks.reverse()
- for sec in ticks:
- sys.stdout.write(colorize("UNMERGE_WARN", str(sec+1)+" "))
+ print(
+ '>>> Waiting %s seconds before starting...\n'
+ '>>> (Control-C to abort)...\n'
+ '%s in:' % (secs, doing), end='')
+ for sec in range(secs, 0, -1):
+ sys.stdout.write(colorize('UNMERGE_WARN', ' %i' % sec))
sys.stdout.flush()
time.sleep(1)
print()
-
diff --git a/pym/_emerge/create_depgraph_params.py b/pym/_emerge/create_depgraph_params.py
index 2838e93c3..225b792b6 100644
--- a/pym/_emerge/create_depgraph_params.py
+++ b/pym/_emerge/create_depgraph_params.py
@@ -1,4 +1,4 @@
-# Copyright 1999-2011 Gentoo Foundation
+# Copyright 1999-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
import logging
@@ -15,11 +15,11 @@ def create_depgraph_params(myopts, myaction):
# complete: completely account for all known dependencies
# remove: build graph for use in removing packages
# rebuilt_binaries: replace installed packages with rebuilt binaries
- # rebuild_if_new_slot_abi: rebuild or reinstall packages when
- # SLOT/ABI := operator dependencies can be satisfied by a newer
- # SLOT/ABI, so that older packages slots will become eligible for
+ # rebuild_if_new_slot: rebuild or reinstall packages when
+ # slot/sub-slot := operator dependencies can be satisfied by a newer
+ # slot/sub-slot, so that older packages slots will become eligible for
# removal by the --depclean action as soon as possible
- # ignore_built_slot_abi_deps: ignore the SLOT/ABI := operator parts
+ # ignore_built_slot_operator_deps: ignore the slot/sub-slot := operator parts
# of dependencies that have been recorded when packages where built
myparams = {"recurse" : True}
@@ -27,9 +27,9 @@ def create_depgraph_params(myopts, myaction):
if bdeps is not None:
myparams["bdeps"] = bdeps
- ignore_built_slot_abi_deps = myopts.get("--ignore-built-slot-abi-deps")
- if ignore_built_slot_abi_deps is not None:
- myparams["ignore_built_slot_abi_deps"] = ignore_built_slot_abi_deps
+ ignore_built_slot_operator_deps = myopts.get("--ignore-built-slot-operator-deps")
+ if ignore_built_slot_operator_deps is not None:
+ myparams["ignore_built_slot_operator_deps"] = ignore_built_slot_operator_deps
dynamic_deps = myopts.get("--dynamic-deps")
if dynamic_deps is not None:
@@ -41,11 +41,12 @@ def create_depgraph_params(myopts, myaction):
myparams["selective"] = True
return myparams
- rebuild_if_new_slot_abi = myopts.get('--rebuild-if-new-slot-abi')
- if rebuild_if_new_slot_abi is not None:
- myparams['rebuild_if_new_slot_abi'] = rebuild_if_new_slot_abi
+ rebuild_if_new_slot = myopts.get('--rebuild-if-new-slot')
+ if rebuild_if_new_slot is not None:
+ myparams['rebuild_if_new_slot'] = rebuild_if_new_slot
if "--update" in myopts or \
+ "--newrepo" in myopts or \
"--newuse" in myopts or \
"--reinstall" in myopts or \
"--noreplace" in myopts or \
diff --git a/pym/_emerge/create_world_atom.py b/pym/_emerge/create_world_atom.py
index 35fb7c4bd..ac994cc04 100644
--- a/pym/_emerge/create_world_atom.py
+++ b/pym/_emerge/create_world_atom.py
@@ -1,7 +1,15 @@
-# Copyright 1999-2011 Gentoo Foundation
+# Copyright 1999-2012 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
+import sys
+
from portage.dep import _repo_separator
+from portage.exception import InvalidData
+
+if sys.hexversion >= 0x3000000:
+ _unicode = str
+else:
+ _unicode = unicode
def create_world_atom(pkg, args_set, root_config):
"""Create a new atom for the world file if one does not exist. If the
@@ -35,16 +43,15 @@ def create_world_atom(pkg, args_set, root_config):
for cpv in portdb.match(cp):
for repo in repos:
try:
- available_slots.add(portdb.aux_get(cpv, ["SLOT"],
- myrepo=repo)[0])
- except KeyError:
+ available_slots.add(portdb._pkg_str(_unicode(cpv), repo).slot)
+ except (KeyError, InvalidData):
pass
slotted = len(available_slots) > 1 or \
(len(available_slots) == 1 and "0" not in available_slots)
if not slotted:
# check the vdb in case this is multislot
- available_slots = set(vardb.aux_get(cpv, ["SLOT"])[0] \
+ available_slots = set(vardb._pkg_str(cpv, None).slot \
for cpv in vardb.match(cp))
slotted = len(available_slots) > 1 or \
(len(available_slots) == 1 and "0" not in available_slots)
@@ -83,14 +90,14 @@ def create_world_atom(pkg, args_set, root_config):
matched_slots = set()
if mydb is vardb:
for cpv in matches:
- matched_slots.add(mydb.aux_get(cpv, ["SLOT"])[0])
+ matched_slots.add(mydb._pkg_str(cpv, None).slot)
else:
for cpv in matches:
for repo in repos:
try:
- matched_slots.add(portdb.aux_get(cpv, ["SLOT"],
- myrepo=repo)[0])
- except KeyError:
+ matched_slots.add(
+ portdb._pkg_str(_unicode(cpv), repo).slot)
+ except (KeyError, InvalidData):
pass
if len(matched_slots) == 1:
diff --git a/pym/_emerge/depgraph.py b/pym/_emerge/depgraph.py
index 0f3bc9389..abb70a769 100644
--- a/pym/_emerge/depgraph.py
+++ b/pym/_emerge/depgraph.py
@@ -1,34 +1,38 @@
-# Copyright 1999-2012 Gentoo Foundation
+# Copyright 1999-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
-from __future__ import print_function
+from __future__ import print_function, unicode_literals
-import difflib
+import collections
import errno
import io
import logging
import stat
import sys
import textwrap
+import warnings
from collections import deque
from itertools import chain
import portage
from portage import os, OrderedDict
from portage import _unicode_decode, _unicode_encode, _encodings
-from portage.const import PORTAGE_PACKAGE_ATOM, USER_CONFIG_PATH
+from portage.const import PORTAGE_PACKAGE_ATOM, USER_CONFIG_PATH, VCS_DIRS
from portage.dbapi import dbapi
from portage.dbapi.dep_expand import dep_expand
+from portage.dbapi._similar_name_search import similar_name_search
from portage.dep import Atom, best_match_to_list, extract_affecting_use, \
check_required_use, human_readable_required_use, match_from_list, \
_repo_separator
-from portage.dep._slot_abi import ignore_built_slot_abi_deps
-from portage.eapi import eapi_has_strong_blocks, eapi_has_required_use
-from portage.exception import (InvalidAtom, InvalidDependString,
+from portage.dep._slot_operator import ignore_built_slot_operator_deps
+from portage.eapi import eapi_has_strong_blocks, eapi_has_required_use, \
+ _get_eapi_attrs
+from portage.exception import (InvalidAtom, InvalidData, InvalidDependString,
PackageNotFound, PortageException)
from portage.output import colorize, create_color_func, \
darkgreen, green
bad = create_color_func("BAD")
+from portage.package.ebuild.config import _get_feature_flags
from portage.package.ebuild.getmaskingstatus import \
_getmaskingstatus, _MaskReason
from portage._sets import SETPREFIX
@@ -38,13 +42,16 @@ from portage.util import cmp_sort_key, writemsg, writemsg_stdout
from portage.util import ensure_dirs
from portage.util import writemsg_level, write_atomic
from portage.util.digraph import digraph
-from portage.util.listdir import _ignorecvs_dirs
+from portage.util._async.TaskScheduler import TaskScheduler
+from portage.util._eventloop.EventLoop import EventLoop
+from portage.util._eventloop.global_event_loop import global_event_loop
from portage.versions import catpkgsplit
from _emerge.AtomArg import AtomArg
from _emerge.Blocker import Blocker
from _emerge.BlockerCache import BlockerCache
from _emerge.BlockerDepPriority import BlockerDepPriority
+from .chk_updated_cfg_files import chk_updated_cfg_files
from _emerge.countdown import countdown
from _emerge.create_world_atom import create_world_atom
from _emerge.Dependency import Dependency
@@ -52,6 +59,7 @@ from _emerge.DependencyArg import DependencyArg
from _emerge.DepPriority import DepPriority
from _emerge.DepPriorityNormalRange import DepPriorityNormalRange
from _emerge.DepPrioritySatisfiedRange import DepPrioritySatisfiedRange
+from _emerge.EbuildMetadataPhase import EbuildMetadataPhase
from _emerge.FakeVartree import FakeVartree
from _emerge._find_deep_system_runtime_deps import _find_deep_system_runtime_deps
from _emerge.is_valid_package_atom import insert_category_into_atom, \
@@ -68,9 +76,10 @@ from _emerge.UseFlagDisplay import pkg_use_display
from _emerge.userquery import userquery
from _emerge.resolver.backtracking import Backtracker, BacktrackParameter
+from _emerge.resolver.package_tracker import PackageTracker, PackageTrackerDbapiWrapper
from _emerge.resolver.slot_collision import slot_conflict_handler
from _emerge.resolver.circular_dependency import circular_dependency_handler
-from _emerge.resolver.output import Display
+from _emerge.resolver.output import Display, format_unmatched_atom
if sys.hexversion >= 0x3000000:
basestring = str
@@ -115,8 +124,8 @@ class _frozen_depgraph_config(object):
self._pkg_cache = {}
self._highest_license_masked = {}
dynamic_deps = myopts.get("--dynamic-deps", "y") != "n"
- ignore_built_slot_abi_deps = myopts.get(
- "--ignore-built-slot-abi-deps", "n") == "y"
+ ignore_built_slot_operator_deps = myopts.get(
+ "--ignore-built-slot-operator-deps", "n") == "y"
for myroot in trees:
self.trees[myroot] = {}
# Create a RootConfig instance that references
@@ -132,7 +141,7 @@ class _frozen_depgraph_config(object):
pkg_cache=self._pkg_cache,
pkg_root_config=self.roots[myroot],
dynamic_deps=dynamic_deps,
- ignore_built_slot_abi_deps=ignore_built_slot_abi_deps)
+ ignore_built_slot_operator_deps=ignore_built_slot_operator_deps)
self.pkgsettings[myroot] = portage.config(
clone=self.trees[myroot]["vartree"].settings)
@@ -267,13 +276,12 @@ class _rebuild_config(object):
return True
elif (parent.installed and
root_slot not in self.reinstall_list):
- inst_build_time = parent.metadata.get("BUILD_TIME")
try:
bin_build_time, = bindb.aux_get(parent.cpv,
["BUILD_TIME"])
except KeyError:
continue
- if bin_build_time != inst_build_time:
+ if bin_build_time != _unicode(parent.build_time):
# 2) Remote binary package is valid, and local package
# is not up to date. Force reinstall.
reinstall = True
@@ -335,11 +343,8 @@ class _dynamic_depgraph_config(object):
self.myparams = myparams.copy()
self._vdb_loaded = False
self._allow_backtracking = allow_backtracking
- # Maps slot atom to package for each Package added to the graph.
- self._slot_pkg_map = {}
# Maps nodes to the reasons they were selected for reinstallation.
self._reinstall_nodes = {}
- self.mydbapi = {}
# Contains a filtered view of preferred packages that are selected
# from available repositories.
self._filtered_trees = {}
@@ -374,14 +379,6 @@ class _dynamic_depgraph_config(object):
# This use used to check if we have accounted for blockers
# relevant to a package.
self._traversed_pkg_deps = set()
- # This should be ordered such that the backtracker will
- # attempt to solve conflicts which occurred earlier first,
- # since an earlier conflict can be the cause of a conflict
- # which occurs later.
- self._slot_collision_info = OrderedDict()
- # Slot collision nodes are not allowed to block other packages since
- # blocker validation is only able to account for one package per slot.
- self._slot_collision_nodes = set()
self._parent_atoms = {}
self._slot_conflict_handler = None
self._circular_dependency_handler = None
@@ -412,28 +409,31 @@ class _dynamic_depgraph_config(object):
self._needed_license_changes = backtrack_parameters.needed_license_changes
self._needed_use_config_changes = backtrack_parameters.needed_use_config_changes
self._runtime_pkg_mask = backtrack_parameters.runtime_pkg_mask
- self._slot_abi_replace_installed = backtrack_parameters.slot_abi_replace_installed
+ self._slot_operator_replace_installed = backtrack_parameters.slot_operator_replace_installed
+ self._prune_rebuilds = backtrack_parameters.prune_rebuilds
self._need_restart = False
# For conditions that always require user intervention, such as
# unsatisfied REQUIRED_USE (currently has no autounmask support).
self._skip_restart = False
self._backtrack_infos = {}
+ self._buildpkgonly_deps_unsatisfied = False
self._autounmask = depgraph._frozen_config.myopts.get('--autounmask') != 'n'
self._success_without_autounmask = False
self._traverse_ignored_deps = False
self._complete_mode = False
- self._slot_abi_deps = {}
+ self._slot_operator_deps = {}
+ self._package_tracker = PackageTracker()
+ # Track missed updates caused by solved conflicts.
+ self._conflict_missed_update = collections.defaultdict(dict)
for myroot in depgraph._frozen_config.trees:
self.sets[myroot] = _depgraph_sets()
- self._slot_pkg_map[myroot] = {}
vardb = depgraph._frozen_config.trees[myroot]["vartree"].dbapi
# This dbapi instance will model the state that the vdb will
# have after new packages have been installed.
- fakedb = PackageVirtualDbapi(vardb.settings)
+ fakedb = PackageTrackerDbapiWrapper(myroot, self._package_tracker)
- self.mydbapi[myroot] = fakedb
def graph_tree():
pass
graph_tree.dbapi = fakedb
@@ -446,6 +446,7 @@ class _dynamic_depgraph_config(object):
self._graph_trees[myroot]["vartree"] = graph_tree
self._graph_trees[myroot]["graph_db"] = graph_tree.dbapi
self._graph_trees[myroot]["graph"] = self.digraph
+ self._graph_trees[myroot]["want_update_pkg"] = depgraph._want_update_pkg
def filtered_tree():
pass
filtered_tree.dbapi = _dep_check_composite_db(depgraph, myroot)
@@ -472,6 +473,7 @@ class _dynamic_depgraph_config(object):
self._filtered_trees[myroot]["graph"] = self.digraph
self._filtered_trees[myroot]["vartree"] = \
depgraph._frozen_config.trees[myroot]["vartree"]
+ self._filtered_trees[myroot]["want_update_pkg"] = depgraph._want_update_pkg
dbs = []
# (db, pkg_type, built, installed, db_keys)
@@ -502,8 +504,6 @@ class depgraph(object):
pkg_tree_map = RootConfig.pkg_tree_map
- _dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
-
def __init__(self, settings, trees, myopts, myparams, spinner,
frozen_config=None, backtrack_parameters=BacktrackParameter(), allow_backtracking=False):
if frozen_config is None:
@@ -517,6 +517,9 @@ class depgraph(object):
self._select_atoms = self._select_atoms_highest_available
self._select_package = self._select_pkg_highest_available
+ self._event_loop = (portage._internal_caller and
+ global_event_loop() or EventLoop(main=False))
+
def _load_vdb(self):
"""
Load installed package metadata if appropriate. This used to be called
@@ -535,10 +538,6 @@ class depgraph(object):
preload_installed_pkgs = \
"--nodeps" not in self._frozen_config.myopts
- if self._frozen_config.myopts.get("--root-deps") is not None and \
- myroot != self._frozen_config.target_root:
- continue
-
fake_vartree = self._frozen_config.trees[myroot]["vartree"]
if not fake_vartree.dbapi:
# This needs to be called for the first depgraph, but not for
@@ -552,24 +551,157 @@ class depgraph(object):
if preload_installed_pkgs:
vardb = fake_vartree.dbapi
- fakedb = self._dynamic_config._graph_trees[
- myroot]["vartree"].dbapi
- for pkg in vardb:
- self._spinner_update()
- if dynamic_deps:
- # This causes FakeVartree to update the
- # Package instance dependencies via
- # PackageVirtualDbapi.aux_update()
- vardb.aux_get(pkg.cpv, [])
- fakedb.cpv_inject(pkg)
+ if not dynamic_deps:
+ for pkg in vardb:
+ self._dynamic_config._package_tracker.add_installed_pkg(pkg)
+ else:
+ max_jobs = self._frozen_config.myopts.get("--jobs")
+ max_load = self._frozen_config.myopts.get("--load-average")
+ scheduler = TaskScheduler(
+ self._dynamic_deps_preload(fake_vartree),
+ max_jobs=max_jobs,
+ max_load=max_load,
+ event_loop=fake_vartree._portdb._event_loop)
+ scheduler.start()
+ scheduler.wait()
self._dynamic_config._vdb_loaded = True
+ def _dynamic_deps_preload(self, fake_vartree):
+ portdb = fake_vartree._portdb
+ for pkg in fake_vartree.dbapi:
+ self._spinner_update()
+ self._dynamic_config._package_tracker.add_installed_pkg(pkg)
+ ebuild_path, repo_path = \
+ portdb.findname2(pkg.cpv, myrepo=pkg.repo)
+ if ebuild_path is None:
+ fake_vartree.dynamic_deps_preload(pkg, None)
+ continue
+ metadata, ebuild_hash = portdb._pull_valid_cache(
+ pkg.cpv, ebuild_path, repo_path)
+ if metadata is not None:
+ fake_vartree.dynamic_deps_preload(pkg, metadata)
+ else:
+ proc = EbuildMetadataPhase(cpv=pkg.cpv,
+ ebuild_hash=ebuild_hash,
+ portdb=portdb, repo_path=repo_path,
+ settings=portdb.doebuild_settings)
+ proc.addExitListener(
+ self._dynamic_deps_proc_exit(pkg, fake_vartree))
+ yield proc
+
+ class _dynamic_deps_proc_exit(object):
+
+ __slots__ = ('_pkg', '_fake_vartree')
+
+ def __init__(self, pkg, fake_vartree):
+ self._pkg = pkg
+ self._fake_vartree = fake_vartree
+
+ def __call__(self, proc):
+ metadata = None
+ if proc.returncode == os.EX_OK:
+ metadata = proc.metadata
+ self._fake_vartree.dynamic_deps_preload(self._pkg, metadata)
+
def _spinner_update(self):
if self._frozen_config.spinner:
self._frozen_config.spinner.update()
+ def _compute_abi_rebuild_info(self):
+ """
+ Fill self._forced_rebuilds with packages that cause rebuilds.
+ """
+
+ debug = "--debug" in self._frozen_config.myopts
+
+ # Get all atoms that might have caused a forced rebuild.
+ atoms = {}
+ for s in self._dynamic_config._initial_arg_list:
+ if s.force_reinstall:
+ root = s.root_config.root
+ atoms.setdefault(root, set()).update(s.pset)
+
+ if debug:
+ writemsg_level("forced reinstall atoms:\n",
+ level=logging.DEBUG, noiselevel=-1)
+
+ for root in atoms:
+ writemsg_level(" root: %s\n" % root,
+ level=logging.DEBUG, noiselevel=-1)
+ for atom in atoms[root]:
+ writemsg_level(" atom: %s\n" % atom,
+ level=logging.DEBUG, noiselevel=-1)
+ writemsg_level("\n\n",
+ level=logging.DEBUG, noiselevel=-1)
+
+ # Go through all slot operator deps and check if one of these deps
+ # has a parent that is matched by one of the atoms from above.
+ forced_rebuilds = {}
+ for (root, slot_atom), deps in self._dynamic_config._slot_operator_deps.items():
+ rebuild_atoms = atoms.get(root, set())
+
+ for dep in deps:
+ if getattr(dep.parent, "installed", False) or dep.child.installed or \
+ dep.parent.slot_atom not in rebuild_atoms:
+ continue
+
+ # Make sure the child's slot/subslot has changed. If it hasn't,
+ # then another child has forced this rebuild.
+ installed_pkg = self._select_pkg_from_installed(root, dep.child.slot_atom)[0]
+ if installed_pkg and installed_pkg.slot == dep.child.slot and \
+ installed_pkg.sub_slot == dep.child.sub_slot:
+ continue
+
+ # The child has forced a rebuild of the parent
+ forced_rebuilds.setdefault(root, {}).setdefault(dep.child, set()).add(dep.parent)
+
+ if debug:
+ writemsg_level("slot operator dependencies:\n",
+ level=logging.DEBUG, noiselevel=-1)
+
+ for (root, slot_atom), deps in self._dynamic_config._slot_operator_deps.items():
+ writemsg_level(" (%s, %s)\n" % \
+ (root, slot_atom), level=logging.DEBUG, noiselevel=-1)
+ for dep in deps:
+ writemsg_level(" parent: %s\n" % dep.parent, level=logging.DEBUG, noiselevel=-1)
+ writemsg_level(" child: %s (%s)\n" % (dep.child, dep.priority), level=logging.DEBUG, noiselevel=-1)
+
+ writemsg_level("\n\n",
+ level=logging.DEBUG, noiselevel=-1)
+
+
+ writemsg_level("forced rebuilds:\n",
+ level=logging.DEBUG, noiselevel=-1)
+
+ for root in forced_rebuilds:
+ writemsg_level(" root: %s\n" % root,
+ level=logging.DEBUG, noiselevel=-1)
+ for child in forced_rebuilds[root]:
+ writemsg_level(" child: %s\n" % child,
+ level=logging.DEBUG, noiselevel=-1)
+ for parent in forced_rebuilds[root][child]:
+ writemsg_level(" parent: %s\n" % parent,
+ level=logging.DEBUG, noiselevel=-1)
+ writemsg_level("\n\n",
+ level=logging.DEBUG, noiselevel=-1)
+
+ self._forced_rebuilds = forced_rebuilds
+
+ def _show_abi_rebuild_info(self):
+
+ if not self._forced_rebuilds:
+ return
+
+ writemsg_stdout("\nThe following packages are causing rebuilds:\n\n", noiselevel=-1)
+
+ for root in self._forced_rebuilds:
+ for child in self._forced_rebuilds[root]:
+ writemsg_stdout(" %s causes rebuilds for:\n" % (child,), noiselevel=-1)
+ for parent in self._forced_rebuilds[root][child]:
+ writemsg_stdout(" %s\n" % (parent,), noiselevel=-1)
+
def _show_ignored_binaries(self):
"""
Show binaries that have been ignored because their USE didn't
@@ -583,26 +715,23 @@ class depgraph(object):
for pkg in list(self._dynamic_config.ignored_binaries):
- selected_pkg = self._dynamic_config.mydbapi[pkg.root
- ].match_pkgs(pkg.slot_atom)
+ selected_pkg = list()
- if not selected_pkg:
- continue
+ for selected_pkg in self._dynamic_config._package_tracker.match(
+ pkg.root, pkg.slot_atom):
- selected_pkg = selected_pkg[-1]
- if selected_pkg > pkg:
- self._dynamic_config.ignored_binaries.pop(pkg)
- continue
+ if selected_pkg > pkg:
+ self._dynamic_config.ignored_binaries.pop(pkg)
+ break
- if selected_pkg.installed and \
- selected_pkg.cpv == pkg.cpv and \
- selected_pkg.metadata.get('BUILD_TIME') == \
- pkg.metadata.get('BUILD_TIME'):
- # We don't care about ignored binaries when an
- # identical installed instance is selected to
- # fill the slot.
- self._dynamic_config.ignored_binaries.pop(pkg)
- continue
+ if selected_pkg.installed and \
+ selected_pkg.cpv == pkg.cpv and \
+ selected_pkg.build_time == pkg.build_time:
+ # We don't care about ignored binaries when an
+ # identical installed instance is selected to
+ # fill the slot.
+ self._dynamic_config.ignored_binaries.pop(pkg)
+ break
if not self._dynamic_config.ignored_binaries:
return
@@ -613,11 +742,17 @@ class depgraph(object):
"due to non matching USE:\n\n", noiselevel=-1)
for pkg, flags in self._dynamic_config.ignored_binaries.items():
- writemsg(" =%s" % pkg.cpv, noiselevel=-1)
+ flag_display = []
+ for flag in sorted(flags):
+ if flag not in pkg.use.enabled:
+ flag = "-" + flag
+ flag_display.append(flag)
+ flag_display = " ".join(flag_display)
+ # The user can paste this line into package.use
+ writemsg(" =%s %s" % (pkg.cpv, flag_display), noiselevel=-1)
if pkg.root_config.settings["ROOT"] != "/":
- writemsg(" for %s" % (pkg.root,), noiselevel=-1)
- writemsg("\n use flag(s): %s\n" % ", ".join(sorted(flags)),
- noiselevel=-1)
+ writemsg(" # for %s" % (pkg.root,), noiselevel=-1)
+ writemsg("\n", noiselevel=-1)
msg = [
"",
@@ -631,31 +766,44 @@ class depgraph(object):
line = colorize("INFORM", line)
writemsg(line + "\n", noiselevel=-1)
- def _show_missed_update(self):
+ def _get_missed_updates(self):
# In order to minimize noise, show only the highest
# missed update from each SLOT.
missed_updates = {}
for pkg, mask_reasons in \
- self._dynamic_config._runtime_pkg_mask.items():
+ chain(self._dynamic_config._runtime_pkg_mask.items(),
+ self._dynamic_config._conflict_missed_update.items()):
if pkg.installed:
# Exclude installed here since we only
# want to show available updates.
continue
- chosen_pkg = self._dynamic_config.mydbapi[pkg.root
- ].match_pkgs(pkg.slot_atom)
- if not chosen_pkg or chosen_pkg[-1] >= pkg:
- continue
- k = (pkg.root, pkg.slot_atom)
- if k in missed_updates:
- other_pkg, mask_type, parent_atoms = missed_updates[k]
- if other_pkg > pkg:
- continue
- for mask_type, parent_atoms in mask_reasons.items():
- if not parent_atoms:
- continue
- missed_updates[k] = (pkg, mask_type, parent_atoms)
- break
+ missed_update = True
+ any_selected = False
+ for chosen_pkg in self._dynamic_config._package_tracker.match(
+ pkg.root, pkg.slot_atom):
+ any_selected = True
+ if chosen_pkg > pkg or (not chosen_pkg.installed and \
+ chosen_pkg.version == pkg.version):
+ missed_update = False
+ break
+ if any_selected and missed_update:
+ k = (pkg.root, pkg.slot_atom)
+ if k in missed_updates:
+ other_pkg, mask_type, parent_atoms = missed_updates[k]
+ if other_pkg > pkg:
+ continue
+ for mask_type, parent_atoms in mask_reasons.items():
+ if not parent_atoms:
+ continue
+ missed_updates[k] = (pkg, mask_type, parent_atoms)
+ break
+
+ return missed_updates
+
+ def _show_missed_update(self):
+
+ missed_updates = self._get_missed_updates()
if not missed_updates:
return
@@ -726,7 +874,7 @@ class depgraph(object):
self._show_merge_list()
msg = []
- msg.append("\nWARNING: One or more updates have been " + \
+ msg.append("\nWARNING: One or more updates/rebuilds have been " + \
"skipped due to a dependency conflict:\n\n")
indent = " "
@@ -736,22 +884,29 @@ class depgraph(object):
msg.append(" for %s" % (pkg.root,))
msg.append("\n\n")
- for parent, atom in parent_atoms:
- msg.append(indent)
- msg.append(str(pkg))
+ msg.append(indent)
+ msg.append(str(pkg))
+ msg.append(" conflicts with\n")
- msg.append(" conflicts with\n")
- msg.append(2*indent)
+ for parent, atom in parent_atoms:
if isinstance(parent,
(PackageArg, AtomArg)):
# For PackageArg and AtomArg types, it's
# redundant to display the atom attribute.
+ msg.append(2*indent)
msg.append(str(parent))
+ msg.append("\n")
else:
# Display the specific atom from SetArg or
# Package types.
- msg.append("%s required by %s" % (atom, parent))
- msg.append("\n")
+ atom, marker = format_unmatched_atom(
+ pkg, atom, self._pkg_use_enabled)
+
+ msg.append(2*indent)
+ msg.append("%s required by %s\n" % (atom, parent))
+ msg.append(2*indent)
+ msg.append(marker)
+ msg.append("\n")
msg.append("\n")
writemsg("".join(msg), noiselevel=-1)
@@ -764,7 +919,7 @@ class depgraph(object):
cases.
"""
- if not self._dynamic_config._slot_collision_info:
+ if not any(self._dynamic_config._package_tracker.slot_conflicts()):
return
self._show_merge_list()
@@ -774,7 +929,7 @@ class depgraph(object):
conflict = handler.get_conflict()
writemsg(conflict, noiselevel=-1)
-
+
explanation = handler.get_explanation()
if explanation:
writemsg(explanation, noiselevel=-1)
@@ -813,6 +968,239 @@ class depgraph(object):
writemsg(line + '\n', noiselevel=-1)
writemsg('\n', noiselevel=-1)
+ def _solve_non_slot_operator_slot_conflicts(self):
+ """
+ This function solves slot conflicts which can
+ be solved by simply choosing one of the conflicting
+ and removing all the other ones.
+ It is able to solve somewhat more complex cases where
+ conflicts can only be solved simultaniously.
+ """
+ debug = "--debug" in self._frozen_config.myopts
+
+ # List all conflicts. Ignore those that involve slot operator rebuilds
+ # as the logic there needs special slot conflict behavior which isn't
+ # provided by this function.
+ conflicts = []
+ for conflict in self._dynamic_config._package_tracker.slot_conflicts():
+ slot_key = conflict.root, conflict.atom
+ if slot_key not in self._dynamic_config._slot_operator_replace_installed:
+ conflicts.append(conflict)
+
+ if not conflicts:
+ return
+
+ # Get a set of all conflicting packages.
+ conflict_pkgs = set()
+ for conflict in conflicts:
+ conflict_pkgs.update(conflict)
+
+ # Get the list of other packages which are only
+ # required by conflict packages.
+ indirect_conflict_candidates = set()
+ for pkg in conflict_pkgs:
+ indirect_conflict_candidates.update(self._dynamic_config.digraph.child_nodes(pkg))
+ indirect_conflict_candidates.difference_update(conflict_pkgs)
+
+ indirect_conflict_pkgs = set()
+ while indirect_conflict_candidates:
+ pkg = indirect_conflict_candidates.pop()
+
+ only_conflict_parents = True
+ for parent, atom in self._dynamic_config._parent_atoms.get(pkg, []):
+ if parent not in conflict_pkgs and parent not in indirect_conflict_pkgs:
+ only_conflict_parents = False
+ break
+ if not only_conflict_parents:
+ continue
+
+ indirect_conflict_pkgs.add(pkg)
+ for child in self._dynamic_config.digraph.child_nodes(pkg):
+ if child in conflict_pkgs or child in indirect_conflict_pkgs:
+ continue
+ indirect_conflict_candidates.add(child)
+
+ # Create a graph containing the conflict packages
+ # and a special 'non_conflict_node' that represents
+ # all non-conflict packages.
+ conflict_graph = digraph()
+
+ non_conflict_node = "(non-conflict package)"
+ conflict_graph.add(non_conflict_node, None)
+
+ for pkg in chain(conflict_pkgs, indirect_conflict_pkgs):
+ conflict_graph.add(pkg, None)
+
+ # Add parent->child edges for each conflict package.
+ # Parents, which aren't conflict packages are represented
+ # by 'non_conflict_node'.
+ # If several conflicting packages are matched, but not all,
+ # add a tuple with the matched packages to the graph.
+ class or_tuple(tuple):
+ """
+ Helper class for debug printing.
+ """
+ def __str__(self):
+ return "(%s)" % ",".join(str(pkg) for pkg in self)
+
+ for conflict in conflicts:
+ all_parent_atoms = set()
+ for pkg in conflict:
+ all_parent_atoms.update(
+ self._dynamic_config._parent_atoms.get(pkg, []))
+
+ for parent, atom in all_parent_atoms:
+ is_arg_parent = isinstance(parent, AtomArg)
+
+ if parent not in conflict_pkgs and \
+ parent not in indirect_conflict_pkgs:
+ parent = non_conflict_node
+
+ atom_set = InternalPackageSet(
+ initial_atoms=(atom,), allow_repo=True)
+
+ matched = []
+ for pkg in conflict:
+ if atom_set.findAtomForPackage(pkg, \
+ modified_use=self._pkg_use_enabled(pkg)) and \
+ not (is_arg_parent and pkg.installed):
+ matched.append(pkg)
+ if len(matched) == len(conflict):
+ # All packages match.
+ continue
+ elif len(matched) == 1:
+ conflict_graph.add(matched[0], parent)
+ else:
+ # More than one packages matched, but not all.
+ conflict_graph.add(or_tuple(matched), parent)
+
+ for pkg in indirect_conflict_pkgs:
+ for parent, atom in self._dynamic_config._parent_atoms.get(pkg, []):
+ if parent not in conflict_pkgs and \
+ parent not in indirect_conflict_pkgs:
+ parent = non_conflict_node
+ conflict_graph.add(pkg, parent)
+
+ if debug:
+ writemsg_level(
+ "\n!!! Slot conflict graph:\n",
+ level=logging.DEBUG, noiselevel=-1)
+ conflict_graph.debug_print()
+
+ # Now select required packages. Collect them in the
+ # 'forced' set.
+ forced = set([non_conflict_node])
+ unexplored = set([non_conflict_node])
+ # or_tuples get special handling. We first explore
+ # all packages in the hope of having forced one of
+ # the packages in the tuple. This way we don't have
+ # to choose one.
+ unexplored_tuples = set()
+
+ while unexplored:
+ # Handle all unexplored packages.
+ while unexplored:
+ node = unexplored.pop()
+ for child in conflict_graph.child_nodes(node):
+ if child in forced:
+ continue
+ forced.add(child)
+ if isinstance(child, Package):
+ unexplored.add(child)
+ else:
+ unexplored_tuples.add(child)
+
+ # Now handle unexplored or_tuples. Move on with packages
+ # once we had to choose one.
+ while unexplored_tuples:
+ nodes = unexplored_tuples.pop()
+ if any(node in forced for node in nodes):
+ # At least one of the packages in the
+ # tuple is already forced, which means the
+ # dependency represented by this tuple
+ # is satisfied.
+ continue
+
+ # We now have to choose one of packages in the tuple.
+ # In theory one could solve more conflicts if we'd be
+ # able to try different choices here, but that has lots
+ # of other problems. For now choose the package that was
+ # pulled first, as this should be the most desirable choice
+ # (otherwise it wouldn't have been the first one).
+ forced.add(nodes[0])
+ unexplored.add(nodes[0])
+ break
+
+ # Remove 'non_conflict_node' and or_tuples from 'forced'.
+ forced = set(pkg for pkg in forced if isinstance(pkg, Package))
+ non_forced = set(pkg for pkg in conflict_pkgs if pkg not in forced)
+
+ if debug:
+ writemsg_level(
+ "\n!!! Slot conflict solution:\n",
+ level=logging.DEBUG, noiselevel=-1)
+ for conflict in conflicts:
+ writemsg_level(
+ " Conflict: (%s, %s)\n" % (conflict.root, conflict.atom),
+ level=logging.DEBUG, noiselevel=-1)
+ for pkg in conflict:
+ if pkg in forced:
+ writemsg_level(
+ " keep: %s\n" % pkg,
+ level=logging.DEBUG, noiselevel=-1)
+ else:
+ writemsg_level(
+ " remove: %s\n" % pkg,
+ level=logging.DEBUG, noiselevel=-1)
+
+ broken_packages = set()
+ for pkg in non_forced:
+ for parent, atom in self._dynamic_config._parent_atoms.get(pkg, []):
+ if isinstance(parent, Package) and parent not in non_forced:
+ # Non-forcing set args are expected to be a parent of all
+ # packages in the conflict.
+ broken_packages.add(parent)
+ self._remove_pkg(pkg)
+
+ # Process the dependencies of choosen conflict packages
+ # again to properly account for blockers.
+ broken_packages.update(forced)
+
+ # Filter out broken packages which have been removed during
+ # recursive removal in self._remove_pkg.
+ broken_packages = list(pkg for pkg in broken_packages if pkg in broken_packages \
+ if self._dynamic_config._package_tracker.contains(pkg, installed=False))
+
+ self._dynamic_config._dep_stack.extend(broken_packages)
+
+ if broken_packages:
+ # Process dependencies. This cannot fail because we just ensured that
+ # the remaining packages satisfy all dependencies.
+ self._create_graph()
+
+ # Record missed updates.
+ for conflict in conflicts:
+ if not any(pkg in non_forced for pkg in conflict):
+ continue
+ for pkg in conflict:
+ if pkg not in non_forced:
+ continue
+
+ for other in conflict:
+ if other is pkg:
+ continue
+
+ for parent, atom in self._dynamic_config._parent_atoms.get(other, []):
+ atom_set = InternalPackageSet(
+ initial_atoms=(atom,), allow_repo=True)
+ if not atom_set.findAtomForPackage(pkg,
+ modified_use=self._pkg_use_enabled(pkg)):
+ self._dynamic_config._conflict_missed_update[pkg].setdefault(
+ "slot conflict", set())
+ self._dynamic_config._conflict_missed_update[pkg]["slot conflict"].add(
+ (parent, atom))
+
+
def _process_slot_conflicts(self):
"""
If there are any slot conflicts and backtracking is enabled,
@@ -820,16 +1208,21 @@ class depgraph(object):
is called, so that all relevant reverse dependencies are
available for use in backtracking decisions.
"""
- for (slot_atom, root), slot_nodes in \
- self._dynamic_config._slot_collision_info.items():
- self._process_slot_conflict(root, slot_atom, slot_nodes)
- def _process_slot_conflict(self, root, slot_atom, slot_nodes):
+ self._solve_non_slot_operator_slot_conflicts()
+
+ for conflict in self._dynamic_config._package_tracker.slot_conflicts():
+ self._process_slot_conflict(conflict)
+
+ def _process_slot_conflict(self, conflict):
"""
Process slot conflict data to identify specific atoms which
lead to conflict. These atoms only match a subset of the
packages that have been pulled into a given slot.
"""
+ root = conflict.root
+ slot_atom = conflict.atom
+ slot_nodes = conflict.pkgs
debug = "--debug" in self._frozen_config.myopts
@@ -897,21 +1290,13 @@ class depgraph(object):
all_parents, conflict_pkgs):
debug = "--debug" in self._frozen_config.myopts
- existing_node = self._dynamic_config._slot_pkg_map[root][slot_atom]
+ existing_node = next(self._dynamic_config._package_tracker.match(
+ root, slot_atom, installed=False))
+ # In order to avoid a missed update, first mask lower versions
+ # that conflict with higher versions (the backtracker visits
+ # these in reverse order).
+ conflict_pkgs.sort(reverse=True)
backtrack_data = []
- # The ordering of backtrack_data can make
- # a difference here, because both mask actions may lead
- # to valid, but different, solutions and the one with
- # 'existing_node' masked is usually the better one. Because
- # of that, we choose an order such that
- # the backtracker will first explore the choice with
- # existing_node masked. The backtracker reverses the
- # order, so the order it uses is the reverse of the
- # order shown here. See bug #339606.
- if existing_node in conflict_pkgs and \
- existing_node is not conflict_pkgs[-1]:
- conflict_pkgs.remove(existing_node)
- conflict_pkgs.append(existing_node)
for to_be_masked in conflict_pkgs:
# For missed update messages, find out which
# atoms matched to_be_selected that did not
@@ -922,19 +1307,6 @@ class depgraph(object):
if parent_atom not in parent_atoms)
backtrack_data.append((to_be_masked, conflict_atoms))
- if len(backtrack_data) > 1:
- # NOTE: Generally, we prefer to mask the higher
- # version since this solves common cases in which a
- # lower version is needed so that all dependencies
- # will be satisfied (bug #337178). However, if
- # existing_node happens to be installed then we
- # mask that since this is a common case that is
- # triggered when --update is not enabled.
- if existing_node.installed:
- pass
- elif any(pkg > existing_node for pkg in conflict_pkgs):
- backtrack_data.reverse()
-
to_be_masked = backtrack_data[-1][0]
self._dynamic_config._backtrack_infos.setdefault(
@@ -956,7 +1328,7 @@ class depgraph(object):
def _slot_conflict_backtrack_abi(self, pkg, slot_nodes, conflict_atoms):
"""
- If one or more conflict atoms have a SLOT/ABI dep that can be resolved
+ If one or more conflict atoms have a slot/sub-slot dep that can be resolved
by rebuilding the parent package, then schedule the rebuild via
backtracking, and return True. Otherwise, return False.
"""
@@ -964,7 +1336,7 @@ class depgraph(object):
found_update = False
for parent_atom, conflict_pkgs in conflict_atoms.items():
parent, atom = parent_atom
- if atom.slot_abi_op != "=" or not parent.built:
+ if atom.slot_operator != "=" or not parent.built:
continue
if pkg not in conflict_pkgs:
@@ -977,13 +1349,96 @@ class depgraph(object):
dep = Dependency(atom=atom, child=other_pkg,
parent=parent, root=pkg.root)
- if self._slot_abi_update_probe(dep):
- self._slot_abi_update_backtrack(dep)
+ new_dep = \
+ self._slot_operator_update_probe_slot_conflict(dep)
+ if new_dep is not None:
+ self._slot_operator_update_backtrack(dep,
+ new_dep=new_dep)
found_update = True
return found_update
- def _slot_abi_update_backtrack(self, dep, new_child_slot=None):
+ def _slot_change_probe(self, dep):
+ """
+ @rtype: bool
+ @return: True if dep.child should be rebuilt due to a change
+ in sub-slot (without revbump, as in bug #456208).
+ """
+ if not (isinstance(dep.parent, Package) and \
+ not dep.parent.built and dep.child.built):
+ return None
+
+ root_config = self._frozen_config.roots[dep.root]
+ matches = []
+ try:
+ matches.append(self._pkg(dep.child.cpv, "ebuild",
+ root_config, myrepo=dep.child.repo))
+ except PackageNotFound:
+ pass
+
+ for unbuilt_child in chain(matches,
+ self._iter_match_pkgs(root_config, "ebuild",
+ Atom("=%s" % (dep.child.cpv,)))):
+ if unbuilt_child in self._dynamic_config._runtime_pkg_mask:
+ continue
+ if self._frozen_config.excluded_pkgs.findAtomForPackage(
+ unbuilt_child,
+ modified_use=self._pkg_use_enabled(unbuilt_child)):
+ continue
+ if not self._pkg_visibility_check(unbuilt_child):
+ continue
+ break
+ else:
+ return None
+
+ if unbuilt_child.slot == dep.child.slot and \
+ unbuilt_child.sub_slot == dep.child.sub_slot:
+ return None
+
+ return unbuilt_child
+
+ def _slot_change_backtrack(self, dep, new_child_slot):
+ child = dep.child
+ if "--debug" in self._frozen_config.myopts:
+ msg = []
+ msg.append("")
+ msg.append("")
+ msg.append("backtracking due to slot/sub-slot change:")
+ msg.append(" child package: %s" % child)
+ msg.append(" child slot: %s/%s" %
+ (child.slot, child.sub_slot))
+ msg.append(" new child: %s" % new_child_slot)
+ msg.append(" new child slot: %s/%s" %
+ (new_child_slot.slot, new_child_slot.sub_slot))
+ msg.append(" parent package: %s" % dep.parent)
+ msg.append(" atom: %s" % dep.atom)
+ msg.append("")
+ writemsg_level("\n".join(msg),
+ noiselevel=-1, level=logging.DEBUG)
+ backtrack_infos = self._dynamic_config._backtrack_infos
+ config = backtrack_infos.setdefault("config", {})
+
+ # mask unwanted binary packages if necessary
+ masks = {}
+ if not child.installed:
+ masks.setdefault(dep.child, {})["slot_operator_mask_built"] = None
+ if masks:
+ config.setdefault("slot_operator_mask_built", {}).update(masks)
+
+ # trigger replacement of installed packages if necessary
+ reinstalls = set()
+ if child.installed:
+ replacement_atom = self._replace_installed_atom(child)
+ if replacement_atom is not None:
+ reinstalls.add((child.root, replacement_atom))
+ if reinstalls:
+ config.setdefault("slot_operator_replace_installed",
+ set()).update(reinstalls)
+
+ self._dynamic_config._need_restart = True
+
+ def _slot_operator_update_backtrack(self, dep, new_child_slot=None,
+ new_dep=None):
if new_child_slot is None:
child = dep.child
else:
@@ -997,6 +1452,8 @@ class depgraph(object):
if new_child_slot is not None:
msg.append(" new child slot package: %s" % new_child_slot)
msg.append(" parent package: %s" % dep.parent)
+ if new_dep is not None:
+ msg.append(" new parent pkg: %s" % new_dep.parent)
msg.append(" atom: %s" % dep.atom)
msg.append("")
writemsg_level("\n".join(msg),
@@ -1008,28 +1465,54 @@ class depgraph(object):
abi_masks = {}
if new_child_slot is None:
if not child.installed:
- abi_masks.setdefault(child, {})["slot_abi_mask_built"] = None
+ abi_masks.setdefault(child, {})["slot_operator_mask_built"] = None
if not dep.parent.installed:
- abi_masks.setdefault(dep.parent, {})["slot_abi_mask_built"] = None
+ abi_masks.setdefault(dep.parent, {})["slot_operator_mask_built"] = None
if abi_masks:
- config.setdefault("slot_abi_mask_built", {}).update(abi_masks)
+ config.setdefault("slot_operator_mask_built", {}).update(abi_masks)
# trigger replacement of installed packages if necessary
abi_reinstalls = set()
if dep.parent.installed:
- abi_reinstalls.add((dep.parent.root, dep.parent.slot_atom))
+ if new_dep is not None:
+ replacement_atom = new_dep.parent.slot_atom
+ else:
+ replacement_atom = self._replace_installed_atom(dep.parent)
+ if replacement_atom is not None:
+ abi_reinstalls.add((dep.parent.root, replacement_atom))
if new_child_slot is None and child.installed:
- abi_reinstalls.add((child.root, child.slot_atom))
+ replacement_atom = self._replace_installed_atom(child)
+ if replacement_atom is not None:
+ abi_reinstalls.add((child.root, replacement_atom))
if abi_reinstalls:
- config.setdefault("slot_abi_replace_installed",
+ config.setdefault("slot_operator_replace_installed",
set()).update(abi_reinstalls)
self._dynamic_config._need_restart = True
- def _slot_abi_update_probe(self, dep, new_child_slot=False):
+ def _slot_operator_update_probe_slot_conflict(self, dep):
+ new_dep = self._slot_operator_update_probe(dep, slot_conflict=True)
+
+ if new_dep is not None:
+ return new_dep
+
+ if self._dynamic_config._autounmask is True:
+
+ for autounmask_level in self._autounmask_levels():
+
+ new_dep = self._slot_operator_update_probe(dep,
+ slot_conflict=True, autounmask_level=autounmask_level)
+
+ if new_dep is not None:
+ return new_dep
+
+ return None
+
+ def _slot_operator_update_probe(self, dep, new_child_slot=False,
+ slot_conflict=False, autounmask_level=None):
"""
- SLOT/ABI := operators tend to prevent updates from getting pulled in,
- since installed packages pull in packages with the SLOT/ABI that they
+ slot/sub-slot := operators tend to prevent updates from getting pulled in,
+ since installed packages pull in packages with the slot/sub-slot that they
were built against. Detect this case so that we can schedule rebuilds
and reinstalls when appropriate.
NOTE: This function only searches for updates that involve upgrades
@@ -1048,20 +1531,70 @@ class depgraph(object):
return None
debug = "--debug" in self._frozen_config.myopts
+ selective = "selective" in self._dynamic_config.myparams
want_downgrade = None
+ def check_reverse_dependencies(existing_pkg, candidate_pkg):
+ """
+ Check if candidate_pkg satisfies all of existing_pkg's non-
+ slot operator parents.
+ """
+ for parent, atom in self._dynamic_config._parent_atoms.get(existing_pkg, []):
+ if atom.slot_operator == "=" and parent.built:
+ continue
+
+ atom_set = InternalPackageSet(initial_atoms=(atom,),
+ allow_repo=True)
+ if not atom_set.findAtomForPackage(candidate_pkg,
+ modified_use=self._pkg_use_enabled(candidate_pkg)):
+ return False
+ return True
+
+
for replacement_parent in self._iter_similar_available(dep.parent,
- dep.parent.slot_atom):
+ dep.parent.slot_atom, autounmask_level=autounmask_level):
- for atom in replacement_parent.validated_atoms:
- if not atom.slot_abi_op == "=" or \
- atom.blocker or \
+ if not check_reverse_dependencies(dep.parent, replacement_parent):
+ continue
+
+ selected_atoms = None
+
+ atoms = set()
+ invalid_metadata = False
+ for dep_key in ("DEPEND", "HDEPEND", "RDEPEND", "PDEPEND"):
+ dep_string = replacement_parent._metadata[dep_key]
+ if not dep_string:
+ continue
+
+ try:
+ dep_string = portage.dep.use_reduce(dep_string,
+ uselist=self._pkg_use_enabled(replacement_parent),
+ is_valid_flag=replacement_parent.iuse.is_valid_flag,
+ flat=True, token_class=Atom,
+ eapi=replacement_parent.eapi)
+ except portage.exception.InvalidDependString:
+ invalid_metadata = True
+ break
+
+ atoms.update(token for token in dep_string if isinstance(token, Atom))
+
+ if invalid_metadata:
+ continue
+
+ # List of list of child,atom pairs for each atom.
+ replacement_candidates = []
+ # Set of all packages all atoms can agree on.
+ all_candidate_pkgs = None
+
+ for atom in atoms:
+ if atom.blocker or \
atom.cp != dep.atom.cp:
continue
# Discard USE deps, we're only searching for an approximate
# pattern, and dealing with USE states is too complex for
# this purpose.
+ unevaluated_atom = atom.unevaluated_atom
atom = atom.without_use
if replacement_parent.built and \
@@ -1071,11 +1604,13 @@ class depgraph(object):
# parent and search for another.
break
+ candidate_pkg_atoms = []
+ candidate_pkgs = []
for pkg in self._iter_similar_available(
dep.child, atom):
if pkg.slot == dep.child.slot and \
- pkg.slot_abi == dep.child.slot_abi:
- # If SLOT/ABI is identical, then there's
+ pkg.sub_slot == dep.child.sub_slot:
+ # If slot/sub-slot is identical, then there's
# no point in updating.
continue
if new_child_slot:
@@ -1093,39 +1628,192 @@ class depgraph(object):
want_downgrade = self._downgrade_probe(dep.child)
# be careful not to trigger a rebuild when
# the only version available with a
- # different slot_abi is an older version
+ # different slot_operator is an older version
if not want_downgrade:
continue
+ insignificant = False
+ if not slot_conflict and \
+ selective and \
+ dep.parent.installed and \
+ dep.child.installed and \
+ dep.parent >= replacement_parent and \
+ dep.child.cpv == pkg.cpv:
+ # Then can happen if the child's sub-slot changed
+ # without a revision bump. The sub-slot change is
+ # considered insignificant until one of its parent
+ # packages needs to be rebuilt (which may trigger a
+ # slot conflict).
+ insignificant = True
+
+ if not insignificant:
+ # Evaluate USE conditionals and || deps, in order
+ # to see if this atom is really desirable, since
+ # otherwise we may trigger an undesirable rebuild
+ # as in bug #460304.
+ if selected_atoms is None:
+ selected_atoms = self._select_atoms_probe(
+ dep.child.root, replacement_parent)
+ if unevaluated_atom not in selected_atoms:
+ continue
+
+ if not insignificant and \
+ check_reverse_dependencies(dep.child, pkg):
+
+ candidate_pkg_atoms.append((pkg, unevaluated_atom))
+ candidate_pkgs.append(pkg)
+ replacement_candidates.append(candidate_pkg_atoms)
+ if all_candidate_pkgs is None:
+ all_candidate_pkgs = set(candidate_pkgs)
+ else:
+ all_candidate_pkgs.intersection_update(candidate_pkgs)
+
+ if not all_candidate_pkgs:
+ # If the atoms that connect parent and child can't agree on
+ # any replacement child, we can't do anything.
+ continue
+
+ # Now select one of the pkgs as replacement. This is as easy as
+ # selecting the highest version.
+ # The more complicated part is to choose an atom for the
+ # new Dependency object. Choose the one which ranked the selected
+ # parent highest.
+ selected = None
+ for candidate_pkg_atoms in replacement_candidates:
+ for i, (pkg, atom) in enumerate(candidate_pkg_atoms):
+ if pkg not in all_candidate_pkgs:
+ continue
+ if selected is None or \
+ selected[0] < pkg or \
+ (selected[0] is pkg and i < selected[2]):
+ selected = (pkg, atom, i)
+
+ if debug:
+ msg = []
+ msg.append("")
+ msg.append("")
+ msg.append("slot_operator_update_probe:")
+ msg.append(" existing child package: %s" % dep.child)
+ msg.append(" existing parent package: %s" % dep.parent)
+ msg.append(" new child package: %s" % selected[0])
+ msg.append(" new parent package: %s" % replacement_parent)
+ msg.append("")
+ writemsg_level("\n".join(msg),
+ noiselevel=-1, level=logging.DEBUG)
+
+ return Dependency(parent=replacement_parent,
+ child=selected[0], atom=selected[1])
+
+ if debug:
+ msg = []
+ msg.append("")
+ msg.append("")
+ msg.append("slot_operator_update_probe:")
+ msg.append(" existing child package: %s" % dep.child)
+ msg.append(" existing parent package: %s" % dep.parent)
+ msg.append(" new child package: %s" % None)
+ msg.append(" new parent package: %s" % None)
+ msg.append("")
+ writemsg_level("\n".join(msg),
+ noiselevel=-1, level=logging.DEBUG)
+
+ return None
+
+ def _slot_operator_unsatisfied_probe(self, dep):
+
+ if dep.parent.installed and \
+ self._frozen_config.excluded_pkgs.findAtomForPackage(dep.parent,
+ modified_use=self._pkg_use_enabled(dep.parent)):
+ return False
+
+ debug = "--debug" in self._frozen_config.myopts
+
+ for replacement_parent in self._iter_similar_available(dep.parent,
+ dep.parent.slot_atom):
+
+ for atom in replacement_parent.validated_atoms:
+ if not atom.slot_operator == "=" or \
+ atom.blocker or \
+ atom.cp != dep.atom.cp:
+ continue
+
+ # Discard USE deps, we're only searching for an approximate
+ # pattern, and dealing with USE states is too complex for
+ # this purpose.
+ atom = atom.without_use
+
+ pkg, existing_node = self._select_package(dep.root, atom,
+ onlydeps=dep.onlydeps)
+
+ if pkg is not None:
+
if debug:
msg = []
msg.append("")
msg.append("")
- msg.append("slot_abi_update_probe:")
- msg.append(" existing child package: %s" % dep.child)
+ msg.append("slot_operator_unsatisfied_probe:")
msg.append(" existing parent package: %s" % dep.parent)
- msg.append(" new child package: %s" % pkg)
+ msg.append(" existing parent atom: %s" % dep.atom)
msg.append(" new parent package: %s" % replacement_parent)
+ msg.append(" new child package: %s" % pkg)
msg.append("")
writemsg_level("\n".join(msg),
noiselevel=-1, level=logging.DEBUG)
- return pkg
+ return True
if debug:
msg = []
msg.append("")
msg.append("")
- msg.append("slot_abi_update_probe:")
- msg.append(" existing child package: %s" % dep.child)
+ msg.append("slot_operator_unsatisfied_probe:")
msg.append(" existing parent package: %s" % dep.parent)
- msg.append(" new child package: %s" % None)
+ msg.append(" existing parent atom: %s" % dep.atom)
msg.append(" new parent package: %s" % None)
+ msg.append(" new child package: %s" % None)
msg.append("")
writemsg_level("\n".join(msg),
noiselevel=-1, level=logging.DEBUG)
- return None
+ return False
+
+ def _slot_operator_unsatisfied_backtrack(self, dep):
+
+ parent = dep.parent
+
+ if "--debug" in self._frozen_config.myopts:
+ msg = []
+ msg.append("")
+ msg.append("")
+ msg.append("backtracking due to unsatisfied "
+ "built slot-operator dep:")
+ msg.append(" parent package: %s" % parent)
+ msg.append(" atom: %s" % dep.atom)
+ msg.append("")
+ writemsg_level("\n".join(msg),
+ noiselevel=-1, level=logging.DEBUG)
+
+ backtrack_infos = self._dynamic_config._backtrack_infos
+ config = backtrack_infos.setdefault("config", {})
+
+ # mask unwanted binary packages if necessary
+ masks = {}
+ if not parent.installed:
+ masks.setdefault(parent, {})["slot_operator_mask_built"] = None
+ if masks:
+ config.setdefault("slot_operator_mask_built", {}).update(masks)
+
+ # trigger replacement of installed packages if necessary
+ reinstalls = set()
+ if parent.installed:
+ replacement_atom = self._replace_installed_atom(parent)
+ if replacement_atom is not None:
+ reinstalls.add((parent.root, replacement_atom))
+ if reinstalls:
+ config.setdefault("slot_operator_replace_installed",
+ set()).update(reinstalls)
+
+ self._dynamic_config._need_restart = True
def _downgrade_probe(self, pkg):
"""
@@ -1142,7 +1830,19 @@ class depgraph(object):
return available_pkg is not None
- def _iter_similar_available(self, graph_pkg, atom):
+ def _select_atoms_probe(self, root, pkg):
+ selected_atoms = []
+ use = self._pkg_use_enabled(pkg)
+ for k in pkg._dep_keys:
+ v = pkg._metadata.get(k)
+ if not v:
+ continue
+ selected_atoms.extend(self._select_atoms(
+ root, v, myuse=use, parent=pkg)[pkg])
+ return frozenset(x.unevaluated_atom for
+ x in selected_atoms)
+
+ def _iter_similar_available(self, graph_pkg, atom, autounmask_level=None):
"""
Given a package that's in the graph, do a rough check to
see if a similar package is available to install. The given
@@ -1166,49 +1866,91 @@ class depgraph(object):
if self._frozen_config.excluded_pkgs.findAtomForPackage(pkg,
modified_use=self._pkg_use_enabled(pkg)):
continue
- if not self._pkg_visibility_check(pkg):
- continue
if pkg.built:
if self._equiv_binary_installed(pkg):
continue
if not (not use_ebuild_visibility and
(usepkgonly or useoldpkg_atoms.findAtomForPackage(
pkg, modified_use=self._pkg_use_enabled(pkg)))) and \
- not self._equiv_ebuild_visible(pkg):
+ not self._equiv_ebuild_visible(pkg,
+ autounmask_level=autounmask_level):
continue
+ if not self._pkg_visibility_check(pkg,
+ autounmask_level=autounmask_level):
+ continue
yield pkg
- def _slot_abi_trigger_reinstalls(self):
+ def _replace_installed_atom(self, inst_pkg):
+ """
+ Given an installed package, generate an atom suitable for
+ slot_operator_replace_installed backtracking info. The replacement
+ SLOT may differ from the installed SLOT, so first search by cpv.
"""
- Search for packages with slot-abi deps on older slots, and schedule
+ built_pkgs = []
+ for pkg in self._iter_similar_available(inst_pkg,
+ Atom("=%s" % inst_pkg.cpv)):
+ if not pkg.built:
+ return pkg.slot_atom
+ elif not pkg.installed:
+ # avoid using SLOT from a built instance
+ built_pkgs.append(pkg)
+
+ for pkg in self._iter_similar_available(inst_pkg, inst_pkg.slot_atom):
+ if not pkg.built:
+ return pkg.slot_atom
+ elif not pkg.installed:
+ # avoid using SLOT from a built instance
+ built_pkgs.append(pkg)
+
+ if built_pkgs:
+ best_version = None
+ for pkg in built_pkgs:
+ if best_version is None or pkg > best_version:
+ best_version = pkg
+ return best_version.slot_atom
+
+ return None
+
+ def _slot_operator_trigger_reinstalls(self):
+ """
+ Search for packages with slot-operator deps on older slots, and schedule
rebuilds if they can link to a newer slot that's in the graph.
"""
- rebuild_if_new_slot_abi = self._dynamic_config.myparams.get(
- "rebuild_if_new_slot_abi", "y") == "y"
+ rebuild_if_new_slot = self._dynamic_config.myparams.get(
+ "rebuild_if_new_slot", "y") == "y"
- for slot_key, slot_info in self._dynamic_config._slot_abi_deps.items():
+ for slot_key, slot_info in self._dynamic_config._slot_operator_deps.items():
for dep in slot_info:
- if not (dep.child.built and dep.parent and
+
+ atom = dep.atom
+ if atom.slot_operator is None:
+ continue
+
+ if not atom.slot_operator_built:
+ new_child_slot = self._slot_change_probe(dep)
+ if new_child_slot is not None:
+ self._slot_change_backtrack(dep, new_child_slot)
+ continue
+
+ if not (dep.parent and
isinstance(dep.parent, Package) and dep.parent.built):
continue
# Check for slot update first, since we don't want to
# trigger reinstall of the child package when a newer
# slot will be used instead.
- if rebuild_if_new_slot_abi:
- new_child = self._slot_abi_update_probe(dep,
+ if rebuild_if_new_slot:
+ new_dep = self._slot_operator_update_probe(dep,
new_child_slot=True)
- if new_child:
- self._slot_abi_update_backtrack(dep,
- new_child_slot=new_child)
- break
+ if new_dep is not None:
+ self._slot_operator_update_backtrack(dep,
+ new_child_slot=new_dep.child)
if dep.want_update:
- if self._slot_abi_update_probe(dep):
- self._slot_abi_update_backtrack(dep)
- break
+ if self._slot_operator_update_probe(dep):
+ self._slot_operator_update_backtrack(dep)
def _reinstall_for_flags(self, pkg, forced_flags,
orig_use, orig_iuse, cur_use, cur_iuse):
@@ -1222,18 +1964,22 @@ class depgraph(object):
in ("y", "auto"))
newuse = "--newuse" in self._frozen_config.myopts
changed_use = "changed-use" == self._frozen_config.myopts.get("--reinstall")
+ feature_flags = _get_feature_flags(
+ _get_eapi_attrs(pkg.eapi))
if newuse or (binpkg_respect_use and not changed_use):
flags = set(orig_iuse.symmetric_difference(
cur_iuse).difference(forced_flags))
flags.update(orig_iuse.intersection(orig_use).symmetric_difference(
cur_iuse.intersection(cur_use)))
+ flags.difference_update(feature_flags)
if flags:
return flags
elif changed_use or binpkg_respect_use:
- flags = orig_iuse.intersection(orig_use).symmetric_difference(
- cur_iuse.intersection(cur_use))
+ flags = set(orig_iuse.intersection(orig_use).symmetric_difference(
+ cur_iuse.intersection(cur_use)))
+ flags.difference_update(feature_flags)
if flags:
return flags
return None
@@ -1319,11 +2065,16 @@ class depgraph(object):
buildpkgonly = "--buildpkgonly" in self._frozen_config.myopts
nodeps = "--nodeps" in self._frozen_config.myopts
if dep.blocker:
+
+ # Slot collision nodes are not allowed to block other packages since
+ # blocker validation is only able to account for one package per slot.
+ is_slot_conflict_parent = any(dep.parent in conflict.pkgs[1:] for conflict in \
+ self._dynamic_config._package_tracker.slot_conflicts())
if not buildpkgonly and \
not nodeps and \
not dep.collapsed_priority.ignored and \
not dep.collapsed_priority.optional and \
- dep.parent not in self._dynamic_config._slot_collision_nodes:
+ not is_slot_conflict_parent:
if dep.parent.onlydeps:
# It's safe to ignore blockers if the
# parent is an --onlydeps node.
@@ -1331,7 +2082,7 @@ class depgraph(object):
# The blocker applies to the root where
# the parent is or will be installed.
blocker = Blocker(atom=dep.atom,
- eapi=dep.parent.metadata["EAPI"],
+ eapi=dep.parent.eapi,
priority=dep.priority, root=dep.parent.root)
self._dynamic_config._blocker_parents.add(blocker, dep.parent)
return 1
@@ -1343,8 +2094,8 @@ class depgraph(object):
# The caller has selected a specific package
# via self._minimize_packages().
dep_pkg = dep.child
- existing_node = self._dynamic_config._slot_pkg_map[
- dep.root].get(dep_pkg.slot_atom)
+ existing_node = next(self._dynamic_config._package_tracker.match(
+ dep.root, dep_pkg.slot_atom, installed=False), None)
if not dep_pkg:
if (dep.collapsed_priority.optional or
@@ -1368,9 +2119,17 @@ class depgraph(object):
(dep.parent,
self._dynamic_config._runtime_pkg_mask[
dep.parent]), noiselevel=-1)
- elif not self.need_restart():
+ elif dep.atom.slot_operator_built and \
+ self._slot_operator_unsatisfied_probe(dep):
+ self._slot_operator_unsatisfied_backtrack(dep)
+ return 1
+ else:
# Do not backtrack if only USE have to be changed in
- # order to satisfy the dependency.
+ # order to satisfy the dependency. Note that when
+ # want_restart_for_use_change sets the need_restart
+ # flag, it causes _select_pkg_highest_available to
+ # return None, and eventually we come through here
+ # and skip the "missing dependency" backtracking path.
dep_pkg, existing_node = \
self._select_package(dep.root, dep.atom.without_use,
onlydeps=dep.onlydeps)
@@ -1401,7 +2160,9 @@ class depgraph(object):
return 1
def _check_slot_conflict(self, pkg, atom):
- existing_node = self._dynamic_config._slot_pkg_map[pkg.root].get(pkg.slot_atom)
+ existing_node = next(self._dynamic_config._package_tracker.match(
+ pkg.root, pkg.slot_atom, installed=False), None)
+
matches = None
if existing_node:
matches = pkg.cpv == existing_node.cpv
@@ -1477,12 +2238,13 @@ class depgraph(object):
# package selection, since we want to prompt the user
# for USE adjustment rather than have REQUIRED_USE
# affect package selection and || dep choices.
- if not pkg.built and pkg.metadata.get("REQUIRED_USE") and \
- eapi_has_required_use(pkg.metadata["EAPI"]):
+ if not pkg.built and pkg._metadata.get("REQUIRED_USE") and \
+ eapi_has_required_use(pkg.eapi):
required_use_is_sat = check_required_use(
- pkg.metadata["REQUIRED_USE"],
+ pkg._metadata["REQUIRED_USE"],
self._pkg_use_enabled(pkg),
- pkg.iuse.is_valid_flag)
+ pkg.iuse.is_valid_flag,
+ eapi=pkg.eapi)
if not required_use_is_sat:
if dep.atom is not None and dep.parent is not None:
self._add_parent_atom(pkg, (dep.parent, dep.atom))
@@ -1505,30 +2267,29 @@ class depgraph(object):
existing_node, existing_node_matches = \
self._check_slot_conflict(pkg, dep.atom)
- slot_collision = False
if existing_node:
if existing_node_matches:
# The existing node can be reused.
- if arg_atoms:
- for parent_atom in arg_atoms:
- parent, atom = parent_atom
- self._dynamic_config.digraph.add(existing_node, parent,
- priority=priority)
- self._add_parent_atom(existing_node, parent_atom)
- # If a direct circular dependency is not an unsatisfied
- # buildtime dependency then drop it here since otherwise
- # it can skew the merge order calculation in an unwanted
- # way.
- if existing_node != myparent or \
- (priority.buildtime and not priority.satisfied):
- self._dynamic_config.digraph.addnode(existing_node, myparent,
- priority=priority)
- if dep.atom is not None and dep.parent is not None:
- self._add_parent_atom(existing_node,
- (dep.parent, dep.atom))
- return 1
+ if pkg != existing_node:
+ pkg = existing_node
+ previously_added = True
+ try:
+ arg_atoms = list(self._iter_atoms_for_pkg(pkg))
+ except InvalidDependString as e:
+ if not pkg.installed:
+ # should have been masked before
+ # it was selected
+ raise
+
+ if debug:
+ writemsg_level(
+ "%s%s %s\n" % ("Re-used Child:".ljust(15),
+ pkg, pkg_use_display(pkg,
+ self._frozen_config.myopts,
+ modified_use=self._pkg_use_enabled(pkg))),
+ level=logging.DEBUG, noiselevel=-1)
+
else:
- self._add_slot_conflict(pkg)
if debug:
writemsg_level(
"%s%s %s\n" % ("Slot Conflict:".ljust(15),
@@ -1537,23 +2298,8 @@ class depgraph(object):
modified_use=self._pkg_use_enabled(existing_node))),
level=logging.DEBUG, noiselevel=-1)
- slot_collision = True
-
- if slot_collision:
- # Now add this node to the graph so that self.display()
- # can show use flags and --tree portage.output. This node is
- # only being partially added to the graph. It must not be
- # allowed to interfere with the other nodes that have been
- # added. Do not overwrite data for existing nodes in
- # self._dynamic_config.mydbapi since that data will be used for blocker
- # validation.
- # Even though the graph is now invalid, continue to process
- # dependencies so that things like --fetchonly can still
- # function despite collisions.
- pass
- elif not previously_added:
- self._dynamic_config._slot_pkg_map[pkg.root][pkg.slot_atom] = pkg
- self._dynamic_config.mydbapi[pkg.root].cpv_inject(pkg)
+ if not previously_added:
+ self._dynamic_config._package_tracker.add_pkg(pkg)
self._dynamic_config._filtered_trees[pkg.root]["porttree"].dbapi._clear_cache()
self._dynamic_config._highest_pkg_cache.clear()
self._check_masks(pkg)
@@ -1563,11 +2309,11 @@ class depgraph(object):
# doesn't already. Any pre-existing providers will be preferred
# over this one.
try:
- pkgsettings.setinst(pkg.cpv, pkg.metadata)
+ pkgsettings.setinst(pkg.cpv, pkg._metadata)
# For consistency, also update the global virtuals.
settings = self._frozen_config.roots[pkg.root].settings
settings.unlock()
- settings.setinst(pkg.cpv, pkg.metadata)
+ settings.setinst(pkg.cpv, pkg._metadata)
settings.lock()
except portage.exception.InvalidDependString:
if not pkg.installed:
@@ -1577,12 +2323,19 @@ class depgraph(object):
if arg_atoms:
self._dynamic_config._set_nodes.add(pkg)
- # Do this even when addme is False (--onlydeps) so that the
+ # Do this even for onlydeps, so that the
# parent/child relationship is always known in case
# self._show_slot_collision_notice() needs to be called later.
- self._dynamic_config.digraph.add(pkg, myparent, priority=priority)
- if dep.atom is not None and dep.parent is not None:
- self._add_parent_atom(pkg, (dep.parent, dep.atom))
+ # If a direct circular dependency is not an unsatisfied
+ # buildtime dependency then drop it here since otherwise
+ # it can skew the merge order calculation in an unwanted
+ # way.
+ if pkg != dep.parent or \
+ (priority.buildtime and not priority.satisfied):
+ self._dynamic_config.digraph.add(pkg,
+ dep.parent, priority=priority)
+ if dep.atom is not None and dep.parent is not None:
+ self._add_parent_atom(pkg, (dep.parent, dep.atom))
if arg_atoms:
for parent_atom in arg_atoms:
@@ -1612,9 +2365,9 @@ class depgraph(object):
not (deep is not True and depth > deep))
dep.child = pkg
- if (not pkg.onlydeps and pkg.built and
- dep.atom and dep.atom.slot_abi_built):
- self._add_slot_abi_dep(dep)
+ if (not pkg.onlydeps and
+ dep.atom and dep.atom.slot_operator is not None):
+ self._add_slot_operator_dep(dep)
recurse = deep is True or depth + 1 <= deep
dep_stack = self._dynamic_config._dep_stack
@@ -1629,6 +2382,64 @@ class depgraph(object):
dep_stack.append(pkg)
return 1
+
+ def _remove_pkg(self, pkg):
+ """
+ Remove a package and all its then parentless digraph
+ children from all depgraph datastructures.
+ """
+ debug = "--debug" in self._frozen_config.myopts
+ if debug:
+ writemsg_level(
+ "Removing package: %s\n" % pkg,
+ level=logging.DEBUG, noiselevel=-1)
+
+ try:
+ children = [child for child in self._dynamic_config.digraph.child_nodes(pkg) \
+ if child is not pkg]
+ self._dynamic_config.digraph.remove(pkg)
+ except KeyError:
+ children = []
+
+ self._dynamic_config._package_tracker.discard_pkg(pkg)
+
+ self._dynamic_config._parent_atoms.pop(pkg, None)
+ self._dynamic_config._set_nodes.discard(pkg)
+
+ for child in children:
+ try:
+ self._dynamic_config._parent_atoms[child] = set((parent, atom) \
+ for (parent, atom) in self._dynamic_config._parent_atoms[child] \
+ if parent is not pkg)
+ except KeyError:
+ pass
+
+ # Remove slot operator dependencies.
+ slot_key = (pkg.root, pkg.slot_atom)
+ if slot_key in self._dynamic_config._slot_operator_deps:
+ self._dynamic_config._slot_operator_deps[slot_key] = \
+ [dep for dep in self._dynamic_config._slot_operator_deps[slot_key] \
+ if dep.child is not pkg]
+ if not self._dynamic_config._slot_operator_deps[slot_key]:
+ del self._dynamic_config._slot_operator_deps[slot_key]
+
+ # Remove blockers.
+ self._dynamic_config._blocker_parents.discard(pkg)
+ self._dynamic_config._irrelevant_blockers.discard(pkg)
+ self._dynamic_config._unsolvable_blockers.discard(pkg)
+ self._dynamic_config._blocked_pkgs.discard(pkg)
+ self._dynamic_config._blocked_world_pkgs.pop(pkg, None)
+
+ for child in children:
+ if child in self._dynamic_config.digraph and \
+ not self._dynamic_config.digraph.parent_nodes(child):
+ self._remove_pkg(child)
+
+ # Clear caches.
+ self._dynamic_config._filtered_trees[pkg.root]["porttree"].dbapi._clear_cache()
+ self._dynamic_config._highest_pkg_cache.clear()
+
+
def _check_masks(self, pkg):
slot_key = (pkg.root, pkg.slot_atom)
@@ -1647,33 +2458,23 @@ class depgraph(object):
self._dynamic_config._parent_atoms[pkg] = parent_atoms
parent_atoms.add(parent_atom)
- def _add_slot_abi_dep(self, dep):
+ def _add_slot_operator_dep(self, dep):
slot_key = (dep.root, dep.child.slot_atom)
- slot_info = self._dynamic_config._slot_abi_deps.get(slot_key)
+ slot_info = self._dynamic_config._slot_operator_deps.get(slot_key)
if slot_info is None:
slot_info = []
- self._dynamic_config._slot_abi_deps[slot_key] = slot_info
+ self._dynamic_config._slot_operator_deps[slot_key] = slot_info
slot_info.append(dep)
- def _add_slot_conflict(self, pkg):
- self._dynamic_config._slot_collision_nodes.add(pkg)
- slot_key = (pkg.slot_atom, pkg.root)
- slot_nodes = self._dynamic_config._slot_collision_info.get(slot_key)
- if slot_nodes is None:
- slot_nodes = set()
- slot_nodes.add(self._dynamic_config._slot_pkg_map[pkg.root][pkg.slot_atom])
- self._dynamic_config._slot_collision_info[slot_key] = slot_nodes
- slot_nodes.add(pkg)
-
def _add_pkg_deps(self, pkg, allow_unsatisfied=False):
myroot = pkg.root
- metadata = pkg.metadata
+ metadata = pkg._metadata
removal_action = "remove" in self._dynamic_config.myparams
+ eapi_attrs = _get_eapi_attrs(pkg.eapi)
edepend={}
- depkeys = ["DEPEND","RDEPEND","PDEPEND"]
- for k in depkeys:
+ for k in Package._dep_keys:
edepend[k] = metadata[k]
if not pkg.built and \
@@ -1700,31 +2501,44 @@ class depgraph(object):
# Removal actions never traverse ignored buildtime
# dependencies, so it's safe to discard them early.
edepend["DEPEND"] = ""
+ edepend["HDEPEND"] = ""
ignore_build_time_deps = True
+ ignore_depend_deps = ignore_build_time_deps
+ ignore_hdepend_deps = ignore_build_time_deps
+
if removal_action:
depend_root = myroot
else:
- depend_root = self._frozen_config._running_root.root
- root_deps = self._frozen_config.myopts.get("--root-deps")
- if root_deps is not None:
- if root_deps is True:
- depend_root = myroot
- elif root_deps == "rdeps":
- ignore_build_time_deps = True
+ if eapi_attrs.hdepend:
+ depend_root = myroot
+ else:
+ depend_root = self._frozen_config._running_root.root
+ root_deps = self._frozen_config.myopts.get("--root-deps")
+ if root_deps is not None:
+ if root_deps is True:
+ depend_root = myroot
+ elif root_deps == "rdeps":
+ ignore_depend_deps = True
# If rebuild mode is not enabled, it's safe to discard ignored
# build-time dependencies. If you want these deps to be traversed
# in "complete" mode then you need to specify --with-bdeps=y.
- if ignore_build_time_deps and \
- not self._rebuild.rebuild:
- edepend["DEPEND"] = ""
+ if not self._rebuild.rebuild:
+ if ignore_depend_deps:
+ edepend["DEPEND"] = ""
+ if ignore_hdepend_deps:
+ edepend["HDEPEND"] = ""
deps = (
(depend_root, edepend["DEPEND"],
self._priority(buildtime=True,
- optional=(pkg.built or ignore_build_time_deps),
- ignored=ignore_build_time_deps)),
+ optional=(pkg.built or ignore_depend_deps),
+ ignored=ignore_depend_deps)),
+ (self._frozen_config._running_root.root, edepend["HDEPEND"],
+ self._priority(buildtime=True,
+ optional=(pkg.built or ignore_hdepend_deps),
+ ignored=ignore_hdepend_deps)),
(myroot, edepend["RDEPEND"],
self._priority(runtime=True)),
(myroot, edepend["PDEPEND"],
@@ -1749,7 +2563,7 @@ class depgraph(object):
uselist=self._pkg_use_enabled(pkg),
is_valid_flag=pkg.iuse.is_valid_flag,
opconvert=True, token_class=Atom,
- eapi=pkg.metadata['EAPI'])
+ eapi=pkg.eapi)
except portage.exception.InvalidDependString as e:
if not pkg.installed:
# should have been masked before it was selected
@@ -1763,7 +2577,7 @@ class depgraph(object):
dep_string = portage.dep.use_reduce(dep_string,
uselist=self._pkg_use_enabled(pkg),
opconvert=True, token_class=Atom,
- eapi=pkg.metadata['EAPI'])
+ eapi=pkg.eapi)
except portage.exception.InvalidDependString as e:
self._dynamic_config._masked_installed.add(pkg)
del e
@@ -1806,6 +2620,37 @@ class depgraph(object):
finally:
self._dynamic_config._autounmask = _autounmask_backup
+ def _ignore_dependency(self, atom, pkg, child, dep, mypriority, recurse_satisfied):
+ """
+ In some cases, dep_check will return deps that shouldn't
+ be processed any further, so they are identified and
+ discarded here. Try to discard as few as possible since
+ discarded dependencies reduce the amount of information
+ available for optimization of merge order.
+ Don't ignore dependencies if pkg has a slot operator dependency on the child
+ and the child has changed slot/sub_slot.
+ """
+ if not mypriority.satisfied:
+ return False
+ slot_operator_rebuild = False
+ if atom.slot_operator == '=' and \
+ (pkg.root, pkg.slot_atom) in self._dynamic_config._slot_operator_replace_installed and \
+ mypriority.satisfied is not child and \
+ mypriority.satisfied.installed and \
+ child and \
+ not child.installed and \
+ (child.slot != mypriority.satisfied.slot or child.sub_slot != mypriority.satisfied.sub_slot):
+ slot_operator_rebuild = True
+
+ return not atom.blocker and \
+ not recurse_satisfied and \
+ mypriority.satisfied.visible and \
+ dep.child is not None and \
+ not dep.child.installed and \
+ not any(self._dynamic_config._package_tracker.match(
+ dep.child.root, dep.child.slot_atom, installed=False)) and \
+ not slot_operator_rebuild
+
def _wrapped_add_pkg_dep_string(self, pkg, dep_root, dep_priority,
dep_string, allow_unsatisfied):
depth = pkg.depth + 1
@@ -1864,6 +2709,13 @@ class depgraph(object):
mypriority = dep_priority.copy()
if not atom.blocker:
+
+ if atom.slot_operator == "=":
+ if mypriority.buildtime:
+ mypriority.buildtime_slot_op = True
+ if mypriority.runtime:
+ mypriority.runtime_slot_op = True
+
inst_pkgs = [inst_pkg for inst_pkg in
reversed(vardb.match_pkgs(atom))
if not reinstall_atoms.findAtomForPackage(inst_pkg,
@@ -1883,19 +2735,12 @@ class depgraph(object):
priority=mypriority, root=dep_root)
# In some cases, dep_check will return deps that shouldn't
- # be proccessed any further, so they are identified and
+ # be processed any further, so they are identified and
# discarded here. Try to discard as few as possible since
# discarded dependencies reduce the amount of information
# available for optimization of merge order.
ignored = False
- if not atom.blocker and \
- not recurse_satisfied and \
- mypriority.satisfied and \
- mypriority.satisfied.visible and \
- dep.child is not None and \
- not dep.child.installed and \
- self._dynamic_config._slot_pkg_map[dep.child.root].get(
- dep.child.slot_atom) is None:
+ if self._ignore_dependency(atom, pkg, child, dep, mypriority, recurse_satisfied):
myarg = None
try:
myarg = next(self._iter_atoms_for_pkg(dep.child), None)
@@ -1998,14 +2843,7 @@ class depgraph(object):
collapsed_parent=pkg, collapsed_priority=dep_priority)
ignored = False
- if not atom.blocker and \
- not recurse_satisfied and \
- mypriority.satisfied and \
- mypriority.satisfied.visible and \
- dep.child is not None and \
- not dep.child.installed and \
- self._dynamic_config._slot_pkg_map[dep.child.root].get(
- dep.child.slot_atom) is None:
+ if self._ignore_dependency(atom, pkg, child, dep, mypriority, recurse_satisfied):
myarg = None
try:
myarg = next(self._iter_atoms_for_pkg(dep.child), None)
@@ -2053,7 +2891,7 @@ class depgraph(object):
yield (atom, None)
continue
dep_pkg, existing_node = self._select_package(
- root_config.root, atom)
+ root_config.root, atom, parent=parent)
if dep_pkg is None:
yield (atom, None)
continue
@@ -2105,12 +2943,12 @@ class depgraph(object):
# Yield ~, =*, < and <= atoms first, since those are more likely to
# cause slot conflicts, and we want those atoms to be displayed
# in the resulting slot conflict message (see bug #291142).
- # Give similar treatment to SLOT/ABI atoms.
+ # Give similar treatment to slot/sub-slot atoms.
conflict_atoms = []
normal_atoms = []
abi_atoms = []
for atom in cp_atoms:
- if atom.slot_abi_built:
+ if atom.slot_operator_built:
abi_atoms.append(atom)
continue
conflict = False
@@ -2135,7 +2973,7 @@ class depgraph(object):
def _queue_disjunctive_deps(self, pkg, dep_root, dep_priority, dep_struct):
"""
Queue disjunctive (virtual and ||) deps in self._dynamic_config._dep_disjunctive_stack.
- Yields non-disjunctive deps. Raises InvalidDependString when
+ Yields non-disjunctive deps. Raises InvalidDependString when
necessary.
"""
for x in dep_struct:
@@ -2242,9 +3080,24 @@ class depgraph(object):
continue
yield arg, atom
- def select_files(self, myfiles):
+ def select_files(self, args):
+ # Use the global event loop for spinner progress
+ # indication during file owner lookups (bug #461412).
+ spinner_id = None
+ try:
+ spinner = self._frozen_config.spinner
+ if spinner is not None and \
+ spinner.update is not spinner.update_quiet:
+ spinner_id = self._event_loop.idle_add(
+ self._frozen_config.spinner.update)
+ return self._select_files(args)
+ finally:
+ if spinner_id is not None:
+ self._event_loop.source_remove(spinner_id)
+
+ def _select_files(self, myfiles):
"""Given a list of .tbz2s, .ebuilds sets, and deps, populate
- self._dynamic_config._initial_arg_list and call self._resolve to create the
+ self._dynamic_config._initial_arg_list and call self._resolve to create the
appropriate depgraph and return a favorite list."""
self._load_vdb()
debug = "--debug" in self._frozen_config.myopts
@@ -2277,8 +3130,18 @@ class depgraph(object):
writemsg("!!! Please ensure the tbz2 exists as specified.\n\n", noiselevel=-1)
return 0, myfavorites
mytbz2=portage.xpak.tbz2(x)
- mykey=mytbz2.getelements("CATEGORY")[0]+"/"+os.path.splitext(os.path.basename(x))[0]
- if os.path.realpath(x) != \
+ mykey = None
+ cat = mytbz2.getfile("CATEGORY")
+ if cat is not None:
+ cat = _unicode_decode(cat.strip(),
+ encoding=_encodings['repo.content'])
+ mykey = cat + "/" + os.path.basename(x)[:-5]
+
+ if mykey is None:
+ writemsg(colorize("BAD", "\n*** Package is missing CATEGORY metadata: %s.\n\n" % x), noiselevel=-1)
+ self._dynamic_config._skip_restart = True
+ return 0, myfavorites
+ elif os.path.realpath(x) != \
os.path.realpath(bindb.bintree.getname(mykey)):
writemsg(colorize("BAD", "\n*** You need to adjust PKGDIR to emerge this package.\n\n"), noiselevel=-1)
self._dynamic_config._skip_restart = True
@@ -2293,15 +3156,16 @@ class depgraph(object):
pkgdir = os.path.dirname(ebuild_path)
tree_root = os.path.dirname(os.path.dirname(pkgdir))
cp = pkgdir[len(tree_root)+1:]
- e = portage.exception.PackageNotFound(
- ("%s is not in a valid portage tree " + \
- "hierarchy or does not exist") % x)
+ error_msg = ("\n\n!!! '%s' is not in a valid portage tree "
+ "hierarchy or does not exist\n") % x
if not portage.isvalidatom(cp):
- raise e
+ writemsg(error_msg, noiselevel=-1)
+ return 0, myfavorites
cat = portage.catsplit(cp)[0]
mykey = cat + "/" + os.path.basename(ebuild_path[:-7])
if not portage.isvalidatom("="+mykey):
- raise e
+ writemsg(error_msg, noiselevel=-1)
+ return 0, myfavorites
ebuild_path = portdb.findname(mykey)
if ebuild_path:
if ebuild_path != os.path.join(os.path.realpath(tree_root),
@@ -2317,8 +3181,8 @@ class depgraph(object):
countdown(int(self._frozen_config.settings["EMERGE_WARNING_DELAY"]),
"Continuing...")
else:
- raise portage.exception.PackageNotFound(
- "%s is not in a valid portage tree hierarchy or does not exist" % x)
+ writemsg(error_msg, noiselevel=-1)
+ return 0, myfavorites
pkg = self._pkg(mykey, "ebuild", root_config,
onlydeps=onlydeps, myrepo=portdb.getRepositoryName(
os.path.dirname(os.path.dirname(os.path.dirname(ebuild_path)))))
@@ -2351,6 +3215,30 @@ class depgraph(object):
raise portage.exception.PackageSetNotFound(s)
if s in depgraph_sets.sets:
continue
+
+ try:
+ set_atoms = root_config.setconfig.getSetAtoms(s)
+ except portage.exception.PackageSetNotFound as e:
+ writemsg_level("\n\n", level=logging.ERROR,
+ noiselevel=-1)
+ for pset in list(depgraph_sets.sets.values()) + [sets[s]]:
+ for error_msg in pset.errors:
+ writemsg_level("%s\n" % (error_msg,),
+ level=logging.ERROR, noiselevel=-1)
+
+ writemsg_level(("emerge: the given set '%s' "
+ "contains a non-existent set named '%s'.\n") % \
+ (s, e), level=logging.ERROR, noiselevel=-1)
+ if s in ('world', 'selected') and \
+ SETPREFIX + e.value in sets['selected']:
+ writemsg_level(("Use `emerge --deselect %s%s` to "
+ "remove this set from world_sets.\n") %
+ (SETPREFIX, e,), level=logging.ERROR,
+ noiselevel=-1)
+ writemsg_level("\n", level=logging.ERROR,
+ noiselevel=-1)
+ return False, myfavorites
+
pset = sets[s]
depgraph_sets.sets[s] = pset
args.append(SetArg(arg=x, pset=pset,
@@ -2370,7 +3258,7 @@ class depgraph(object):
# came from, if any.
# 2) It takes away freedom from the resolver to choose other
# possible expansions when necessary.
- if "/" in x:
+ if "/" in x.split(":")[0]:
args.append(AtomArg(arg=x, atom=Atom(x, allow_repo=True),
root_config=root_config))
continue
@@ -2471,13 +3359,8 @@ class depgraph(object):
return 0, []
for cpv in owners:
- slot = vardb.aux_get(cpv, ["SLOT"])[0]
- if not slot:
- # portage now masks packages with missing slot, but it's
- # possible that one was installed by an older version
- atom = Atom(portage.cpv_getkey(cpv))
- else:
- atom = Atom("%s:%s" % (portage.cpv_getkey(cpv), slot))
+ pkg = vardb._pkg_str(cpv, None)
+ atom = Atom("%s:%s" % (pkg.cp, pkg.slot))
args.append(AtomArg(arg=atom, atom=atom,
root_config=root_config))
@@ -2542,7 +3425,7 @@ class depgraph(object):
# Order needs to be preserved since a feature of --nodeps
# is to allow the user to force a specific merge order.
self._dynamic_config._initial_arg_list = args[:]
-
+
return self._resolve(myfavorites)
def _gen_reinstall_sets(self):
@@ -2552,8 +3435,8 @@ class depgraph(object):
atom_list.append((root, '__auto_rebuild__', atom))
for root, atom in self._rebuild.reinstall_list:
atom_list.append((root, '__auto_reinstall__', atom))
- for root, atom in self._dynamic_config._slot_abi_replace_installed:
- atom_list.append((root, '__auto_slot_abi_replace_installed__', atom))
+ for root, atom in self._dynamic_config._slot_operator_replace_installed:
+ atom_list.append((root, '__auto_slot_operator_replace_installed__', atom))
set_dict = {}
for root, set_name, atom in atom_list:
@@ -2572,8 +3455,8 @@ class depgraph(object):
root_config=self._frozen_config.roots[root])
def _resolve(self, myfavorites):
- """Given self._dynamic_config._initial_arg_list, pull in the root nodes,
- call self._creategraph to process theier deps and return
+ """Given self._dynamic_config._initial_arg_list, pull in the root nodes,
+ call self._creategraph to process theier deps and return
a favorite list."""
debug = "--debug" in self._frozen_config.myopts
onlydeps = "--onlydeps" in self._frozen_config.myopts
@@ -2624,6 +3507,16 @@ class depgraph(object):
if pprovided_match:
continue
+ excluded = False
+ for any_match in self._iter_match_pkgs_any(
+ self._frozen_config.roots[myroot], atom):
+ if self._frozen_config.excluded_pkgs.findAtomForPackage(
+ any_match, modified_use=self._pkg_use_enabled(any_match)):
+ excluded = True
+ break
+ if excluded:
+ continue
+
if not (isinstance(arg, SetArg) and \
arg.name in ("selected", "system", "world")):
self._dynamic_config._unsatisfied_deps_for_display.append(
@@ -2692,7 +3585,8 @@ class depgraph(object):
except self._unknown_internal_error:
return False, myfavorites
- if (self._dynamic_config._slot_collision_info and
+ have_slot_conflict = any(self._dynamic_config._package_tracker.slot_conflicts())
+ if (have_slot_conflict and
not self._accept_blocker_conflicts()) or \
(self._dynamic_config._allow_backtracking and
"slot conflict" in self._dynamic_config._backtrack_infos):
@@ -2707,11 +3601,47 @@ class depgraph(object):
return False, myfavorites
if "config" in self._dynamic_config._backtrack_infos and \
- ("slot_abi_mask_built" in self._dynamic_config._backtrack_infos["config"] or
- "slot_abi_replace_installed" in self._dynamic_config._backtrack_infos["config"]) and \
+ ("slot_operator_mask_built" in self._dynamic_config._backtrack_infos["config"] or
+ "slot_operator_replace_installed" in self._dynamic_config._backtrack_infos["config"]) and \
self.need_restart():
return False, myfavorites
+ if not self._dynamic_config._prune_rebuilds and \
+ self._dynamic_config._slot_operator_replace_installed and \
+ self._get_missed_updates():
+ # When there are missed updates, we might have triggered
+ # some unnecessary rebuilds (see bug #439688). So, prune
+ # all the rebuilds and backtrack with the problematic
+ # updates masked. The next backtrack run should pull in
+ # any rebuilds that are really needed, and this
+ # prune_rebuilds path should never be entered more than
+ # once in a series of backtracking nodes (in order to
+ # avoid a backtracking loop).
+ backtrack_infos = self._dynamic_config._backtrack_infos
+ config = backtrack_infos.setdefault("config", {})
+ config["prune_rebuilds"] = True
+ self._dynamic_config._need_restart = True
+ return False, myfavorites
+
+ if self.need_restart():
+ # want_restart_for_use_change triggers this
+ return False, myfavorites
+
+ if "--fetchonly" not in self._frozen_config.myopts and \
+ "--buildpkgonly" in self._frozen_config.myopts:
+ graph_copy = self._dynamic_config.digraph.copy()
+ removed_nodes = set()
+ for node in graph_copy:
+ if not isinstance(node, Package) or \
+ node.operation == "nomerge":
+ removed_nodes.add(node)
+ graph_copy.difference_update(removed_nodes)
+ if not graph_copy.hasallzeros(ignore_priority = \
+ DepPrioritySatisfiedRange.ignore_medium):
+ self._dynamic_config._buildpkgonly_deps_unsatisfied = True
+ self._dynamic_config._skip_restart = True
+ return False, myfavorites
+
# Any failures except those due to autounmask *alone* should return
# before this point, since the success_without_autounmask flag that's
# set below is reserved for cases where there are *zero* other
@@ -2773,8 +3703,8 @@ class depgraph(object):
if refs is None:
refs = []
atom_arg_map[atom_key] = refs
- if arg not in refs:
- refs.append(arg)
+ if arg not in refs:
+ refs.append(arg)
for root in self._dynamic_config.sets:
depgraph_sets = self._dynamic_config.sets[root]
@@ -2804,14 +3734,15 @@ class depgraph(object):
slots = set()
for cpv in vardb.match(atom):
# don't mix new virtuals with old virtuals
- if portage.cpv_getkey(cpv) == highest_pkg.cp:
- slots.add(vardb.aux_get(cpv, ["SLOT"])[0])
+ pkg = vardb._pkg_str(cpv, None)
+ if pkg.cp == highest_pkg.cp:
+ slots.add(pkg.slot)
- slots.add(highest_pkg.metadata["SLOT"])
+ slots.add(highest_pkg.slot)
if len(slots) == 1:
return []
greedy_pkgs = []
- slots.remove(highest_pkg.metadata["SLOT"])
+ slots.remove(highest_pkg.slot)
while slots:
slot = slots.pop()
slot_atom = portage.dep.Atom("%s:%s" % (highest_pkg.cp, slot))
@@ -2825,9 +3756,9 @@ class depgraph(object):
return [pkg.slot_atom for pkg in greedy_pkgs]
blockers = {}
- blocker_dep_keys = ["DEPEND", "PDEPEND", "RDEPEND"]
+ blocker_dep_keys = Package._dep_keys
for pkg in greedy_pkgs + [highest_pkg]:
- dep_str = " ".join(pkg.metadata[k] for k in blocker_dep_keys)
+ dep_str = " ".join(pkg._metadata[k] for k in blocker_dep_keys)
try:
selected_atoms = self._select_atoms(
pkg.root, dep_str, self._pkg_use_enabled(pkg),
@@ -2879,7 +3810,8 @@ class depgraph(object):
not been scheduled for replacement.
"""
kwargs["trees"] = self._dynamic_config._graph_trees
- return self._select_atoms_highest_available(*pargs, **kwargs)
+ return self._select_atoms_highest_available(*pargs,
+ **portage._native_kwargs(kwargs))
def _select_atoms_highest_available(self, root, depstring,
myuse=None, parent=None, strict=True, trees=None, priority=None):
@@ -2890,7 +3822,7 @@ class depgraph(object):
eapi = None
is_valid_flag = None
if parent is not None:
- eapi = parent.metadata['EAPI']
+ eapi = parent.eapi
if not parent.installed:
is_valid_flag = parent.iuse.is_valid_flag
depstring = portage.dep.use_reduce(depstring,
@@ -2898,9 +3830,9 @@ class depgraph(object):
is_valid_flag=is_valid_flag, eapi=eapi)
if (self._dynamic_config.myparams.get(
- "ignore_built_slot_abi_deps", "n") == "y" and
+ "ignore_built_slot_operator_deps", "n") == "y" and
parent and parent.built):
- ignore_built_slot_abi_deps(depstring)
+ ignore_built_slot_operator_deps(depstring)
pkgsettings = self._frozen_config.pkgsettings[root]
if trees is None:
@@ -3005,35 +3937,37 @@ class depgraph(object):
def _expand_virt_from_graph(self, root, atom):
if not isinstance(atom, Atom):
atom = Atom(atom)
- graphdb = self._dynamic_config.mydbapi[root]
- match = graphdb.match_pkgs(atom)
- if not match:
- yield atom
- return
- pkg = match[-1]
- if not pkg.cpv.startswith("virtual/"):
- yield atom
- return
- try:
- rdepend = self._select_atoms_from_graph(
- pkg.root, pkg.metadata.get("RDEPEND", ""),
- myuse=self._pkg_use_enabled(pkg),
- parent=pkg, strict=False)
- except InvalidDependString as e:
- writemsg_level("!!! Invalid RDEPEND in " + \
- "'%svar/db/pkg/%s/RDEPEND': %s\n" % \
- (pkg.root, pkg.cpv, e),
- noiselevel=-1, level=logging.ERROR)
+
+ if not atom.cp.startswith("virtual/"):
yield atom
return
- for atoms in rdepend.values():
- for atom in atoms:
- if hasattr(atom, "_orig_atom"):
- # Ignore virtual atoms since we're only
- # interested in expanding the real atoms.
- continue
- yield atom
+ any_match = False
+ for pkg in self._dynamic_config._package_tracker.match(root, atom):
+ try:
+ rdepend = self._select_atoms_from_graph(
+ pkg.root, pkg._metadata.get("RDEPEND", ""),
+ myuse=self._pkg_use_enabled(pkg),
+ parent=pkg, strict=False)
+ except InvalidDependString as e:
+ writemsg_level("!!! Invalid RDEPEND in " + \
+ "'%svar/db/pkg/%s/RDEPEND': %s\n" % \
+ (pkg.root, pkg.cpv, e),
+ noiselevel=-1, level=logging.ERROR)
+ continue
+
+ for atoms in rdepend.values():
+ for atom in atoms:
+ if hasattr(atom, "_orig_atom"):
+ # Ignore virtual atoms since we're only
+ # interested in expanding the real atoms.
+ continue
+ yield atom
+
+ any_match = True
+
+ if not any_match:
+ yield atom
def _virt_deps_visible(self, pkg, ignore_use=False):
"""
@@ -3044,7 +3978,7 @@ class depgraph(object):
"""
try:
rdepend = self._select_atoms(
- pkg.root, pkg.metadata.get("RDEPEND", ""),
+ pkg.root, pkg._metadata.get("RDEPEND", ""),
myuse=self._pkg_use_enabled(pkg),
parent=pkg, priority=self._priority(runtime=True))
except InvalidDependString as e:
@@ -3083,19 +4017,29 @@ class depgraph(object):
child = None
all_parents = self._dynamic_config._parent_atoms
graph = self._dynamic_config.digraph
+ verbose_main_repo_display = "--verbose-main-repo-display" in \
+ self._frozen_config.myopts
+
+ def format_pkg(pkg):
+ pkg_name = "%s" % (pkg.cpv,)
+ if verbose_main_repo_display or pkg.repo != \
+ pkg.root_config.settings.repositories.mainRepo().name:
+ pkg_name += _repo_separator + pkg.repo
+ return pkg_name
if target_atom is not None and isinstance(node, Package):
affecting_use = set()
- for dep_str in "DEPEND", "RDEPEND", "PDEPEND":
+ for dep_str in Package._dep_keys:
try:
affecting_use.update(extract_affecting_use(
- node.metadata[dep_str], target_atom,
- eapi=node.metadata["EAPI"]))
+ node._metadata[dep_str], target_atom,
+ eapi=node.eapi))
except InvalidDependString:
if not node.installed:
raise
affecting_use.difference_update(node.use.mask, node.use.force)
- pkg_name = _unicode_decode("%s") % (node.cpv,)
+ pkg_name = format_pkg(node)
+
if affecting_use:
usedep = []
for flag in affecting_use:
@@ -3150,7 +4094,7 @@ class depgraph(object):
node_type = "set"
else:
node_type = "argument"
- dep_chain.append((_unicode_decode("%s") % (node,), node_type))
+ dep_chain.append(("%s" % (node,), node_type))
elif node is not start_node:
for ppkg, patom in all_parents[child]:
@@ -3167,23 +4111,23 @@ class depgraph(object):
if priorities is None:
# This edge comes from _parent_atoms and was not added to
# the graph, and _parent_atoms does not contain priorities.
- dep_strings.add(node.metadata["DEPEND"])
- dep_strings.add(node.metadata["RDEPEND"])
- dep_strings.add(node.metadata["PDEPEND"])
+ for k in Package._dep_keys:
+ dep_strings.add(node._metadata[k])
else:
for priority in priorities:
if priority.buildtime:
- dep_strings.add(node.metadata["DEPEND"])
+ for k in Package._buildtime_keys:
+ dep_strings.add(node._metadata[k])
if priority.runtime:
- dep_strings.add(node.metadata["RDEPEND"])
+ dep_strings.add(node._metadata["RDEPEND"])
if priority.runtime_post:
- dep_strings.add(node.metadata["PDEPEND"])
+ dep_strings.add(node._metadata["PDEPEND"])
affecting_use = set()
for dep_str in dep_strings:
try:
affecting_use.update(extract_affecting_use(
- dep_str, atom, eapi=node.metadata["EAPI"]))
+ dep_str, atom, eapi=node.eapi))
except InvalidDependString:
if not node.installed:
raise
@@ -3192,7 +4136,7 @@ class depgraph(object):
affecting_use.difference_update(node.use.mask, \
node.use.force)
- pkg_name = _unicode_decode("%s") % (node.cpv,)
+ pkg_name = format_pkg(node)
if affecting_use:
usedep = []
for flag in affecting_use:
@@ -3244,8 +4188,7 @@ class depgraph(object):
if self._dynamic_config.digraph.parent_nodes(parent_arg):
selected_parent = parent_arg
else:
- dep_chain.append(
- (_unicode_decode("%s") % (parent_arg,), "argument"))
+ dep_chain.append(("%s" % (parent_arg,), "argument"))
selected_parent = None
node = selected_parent
@@ -3260,7 +4203,7 @@ class depgraph(object):
else:
display_list.append("required by %s" % node)
- msg = "#" + ", ".join(display_list) + "\n"
+ msg = "# " + "\n# ".join(display_list) + "\n"
return msg
@@ -3281,7 +4224,7 @@ class depgraph(object):
if arg:
xinfo='"%s"' % arg
if isinstance(myparent, AtomArg):
- xinfo = _unicode_decode('"%s"') % (myparent,)
+ xinfo = '"%s"' % (myparent,)
# Discard null/ from failed cpv_expand category expansion.
xinfo = xinfo.replace("null/", "")
if root != self._frozen_config._running_root.root:
@@ -3326,9 +4269,9 @@ class depgraph(object):
repo = metadata.get('repository')
pkg = self._pkg(cpv, pkg_type, root_config,
installed=installed, myrepo=repo)
- # pkg.metadata contains calculated USE for ebuilds,
+ # pkg._metadata contains calculated USE for ebuilds,
# required later for getMissingLicenses.
- metadata = pkg.metadata
+ metadata = pkg._metadata
if pkg.invalid:
# Avoid doing any operations with packages that
# have invalid metadata. It would be unsafe at
@@ -3367,12 +4310,13 @@ class depgraph(object):
raise
if not mreasons and \
not pkg.built and \
- pkg.metadata.get("REQUIRED_USE") and \
- eapi_has_required_use(pkg.metadata["EAPI"]):
+ pkg._metadata.get("REQUIRED_USE") and \
+ eapi_has_required_use(pkg.eapi):
if not check_required_use(
- pkg.metadata["REQUIRED_USE"],
+ pkg._metadata["REQUIRED_USE"],
self._pkg_use_enabled(pkg),
- pkg.iuse.is_valid_flag):
+ pkg.iuse.is_valid_flag,
+ eapi=pkg.eapi):
required_use_unsatisfied.append(pkg)
continue
root_slot = (pkg.root, pkg.slot_atom)
@@ -3422,7 +4366,7 @@ class depgraph(object):
continue
missing_use_adjustable.add(pkg)
- required_use = pkg.metadata.get("REQUIRED_USE")
+ required_use = pkg._metadata.get("REQUIRED_USE")
required_use_warning = ""
if required_use:
old_use = self._pkg_use_enabled(pkg)
@@ -3431,8 +4375,10 @@ class depgraph(object):
new_use.add(flag)
for flag in need_disable:
new_use.discard(flag)
- if check_required_use(required_use, old_use, pkg.iuse.is_valid_flag) and \
- not check_required_use(required_use, new_use, pkg.iuse.is_valid_flag):
+ if check_required_use(required_use, old_use,
+ pkg.iuse.is_valid_flag, eapi=pkg.eapi) \
+ and not check_required_use(required_use, new_use,
+ pkg.iuse.is_valid_flag, eapi=pkg.eapi):
required_use_warning = ", this change violates use flag constraints " + \
"defined by %s: '%s'" % (pkg.cpv, human_readable_required_use(required_use))
@@ -3470,7 +4416,7 @@ class depgraph(object):
if any(x in untouchable_flags for x in involved_flags):
continue
- required_use = myparent.metadata.get("REQUIRED_USE")
+ required_use = myparent._metadata.get("REQUIRED_USE")
required_use_warning = ""
if required_use:
old_use = self._pkg_use_enabled(myparent)
@@ -3480,8 +4426,12 @@ class depgraph(object):
new_use.discard(flag)
else:
new_use.add(flag)
- if check_required_use(required_use, old_use, myparent.iuse.is_valid_flag) and \
- not check_required_use(required_use, new_use, myparent.iuse.is_valid_flag):
+ if check_required_use(required_use, old_use,
+ myparent.iuse.is_valid_flag,
+ eapi=myparent.eapi) and \
+ not check_required_use(required_use, new_use,
+ myparent.iuse.is_valid_flag,
+ eapi=myparent.eapi):
required_use_warning = ", this change violates use flag constraints " + \
"defined by %s: '%s'" % (myparent.cpv, \
human_readable_required_use(required_use))
@@ -3568,14 +4518,15 @@ class depgraph(object):
writemsg("\n The following REQUIRED_USE flag constraints " + \
"are unsatisfied:\n", noiselevel=-1)
reduced_noise = check_required_use(
- pkg.metadata["REQUIRED_USE"],
+ pkg._metadata["REQUIRED_USE"],
self._pkg_use_enabled(pkg),
- pkg.iuse.is_valid_flag).tounicode()
+ pkg.iuse.is_valid_flag,
+ eapi=pkg.eapi).tounicode()
writemsg(" %s\n" % \
human_readable_required_use(reduced_noise),
noiselevel=-1)
normalized_required_use = \
- " ".join(pkg.metadata["REQUIRED_USE"].split())
+ " ".join(pkg._metadata["REQUIRED_USE"].split())
if reduced_noise != normalized_required_use:
writemsg("\n The above constraints " + \
"are a subset of the following complete expression:\n",
@@ -3620,57 +4571,17 @@ class depgraph(object):
not cp_exists and \
self._frozen_config.myopts.get(
"--misspell-suggestions", "y") != "n":
- cp = myparent.atom.cp.lower()
- cat, pkg = portage.catsplit(cp)
- if cat == "null":
- cat = None
writemsg("\nemerge: searching for similar names..."
, noiselevel=-1)
- all_cp = set()
- all_cp.update(vardb.cp_all())
+ dbs = [vardb]
if "--usepkgonly" not in self._frozen_config.myopts:
- all_cp.update(portdb.cp_all())
+ dbs.append(portdb)
if "--usepkg" in self._frozen_config.myopts:
- all_cp.update(bindb.cp_all())
- # discard dir containing no ebuilds
- all_cp.discard(cp)
-
- orig_cp_map = {}
- for cp_orig in all_cp:
- orig_cp_map.setdefault(cp_orig.lower(), []).append(cp_orig)
- all_cp = set(orig_cp_map)
-
- if cat:
- matches = difflib.get_close_matches(cp, all_cp)
- else:
- pkg_to_cp = {}
- for other_cp in list(all_cp):
- other_pkg = portage.catsplit(other_cp)[1]
- if other_pkg == pkg:
- # Check for non-identical package that
- # differs only by upper/lower case.
- identical = True
- for cp_orig in orig_cp_map[other_cp]:
- if portage.catsplit(cp_orig)[1] != \
- portage.catsplit(atom.cp)[1]:
- identical = False
- break
- if identical:
- # discard dir containing no ebuilds
- all_cp.discard(other_cp)
- continue
- pkg_to_cp.setdefault(other_pkg, set()).add(other_cp)
- pkg_matches = difflib.get_close_matches(pkg, pkg_to_cp)
- matches = []
- for pkg_match in pkg_matches:
- matches.extend(pkg_to_cp[pkg_match])
+ dbs.append(bindb)
- matches_orig_case = []
- for cp in matches:
- matches_orig_case.extend(orig_cp_map[cp])
- matches = matches_orig_case
+ matches = similar_name_search(dbs, atom)
if len(matches) == 1:
writemsg("\nemerge: Maybe you meant " + matches[0] + "?\n"
@@ -3691,8 +4602,7 @@ class depgraph(object):
dep_chain = self._get_dep_chain(myparent, atom)
for node, node_type in dep_chain:
msg.append('(dependency required by "%s" [%s])' % \
- (colorize('INFORM', _unicode_decode("%s") % \
- (node)), node_type))
+ (colorize('INFORM', "%s" % (node)), node_type))
if msg:
writemsg("\n".join(msg), noiselevel=-1)
@@ -3770,7 +4680,8 @@ class depgraph(object):
# the newly built package still won't have the expected slot.
# Therefore, assume that such SLOT dependencies are already
# satisfied rather than forcing a rebuild.
- if not matched_something and installed and atom.slot is not None:
+ if not matched_something and installed and \
+ atom.slot is not None and not atom.slot_operator_built:
if "remove" in self._dynamic_config.myparams:
# We need to search the portdbapi, which is not in our
@@ -3794,11 +4705,11 @@ class depgraph(object):
for other_db, other_type, other_built, \
other_installed, other_keys in dbs:
try:
- if atom.slot == \
- other_db.aux_get(cpv, ["SLOT"])[0]:
+ if portage.dep._match_slot(atom,
+ other_db._pkg_str(_unicode(cpv), None)):
slot_available = True
break
- except KeyError:
+ except (KeyError, InvalidData):
pass
if not slot_available:
continue
@@ -3810,12 +4721,12 @@ class depgraph(object):
yield inst_pkg
return
- def _select_pkg_highest_available(self, root, atom, onlydeps=False):
+ def _select_pkg_highest_available(self, root, atom, onlydeps=False, parent=None):
cache_key = (root, atom, atom.unevaluated_atom, onlydeps, self._dynamic_config._autounmask)
ret = self._dynamic_config._highest_pkg_cache.get(cache_key)
if ret is not None:
return ret
- ret = self._select_pkg_highest_available_imp(root, atom, onlydeps=onlydeps)
+ ret = self._select_pkg_highest_available_imp(root, atom, onlydeps=onlydeps, parent=parent)
self._dynamic_config._highest_pkg_cache[cache_key] = ret
pkg, existing = ret
if pkg is not None:
@@ -3847,6 +4758,36 @@ class depgraph(object):
return not arg
+ def _want_update_pkg(self, parent, pkg):
+
+ if self._frozen_config.excluded_pkgs.findAtomForPackage(pkg,
+ modified_use=self._pkg_use_enabled(pkg)):
+ return False
+
+ arg_atoms = None
+ try:
+ arg_atoms = list(self._iter_atoms_for_pkg(pkg))
+ except InvalidDependString:
+ if not pkg.installed:
+ # should have been masked before it was selected
+ raise
+
+ depth = parent.depth or 0
+ depth += 1
+
+ if arg_atoms:
+ for arg, atom in arg_atoms:
+ if arg.reset_depth:
+ depth = 0
+ break
+
+ deep = self._dynamic_config.myparams.get("deep", 0)
+ update = "--update" in self._frozen_config.myopts
+
+ return (not self._dynamic_config._complete_mode and
+ (arg_atoms or update) and
+ not (deep is not True and depth > deep))
+
def _equiv_ebuild_visible(self, pkg, autounmask_level=None):
try:
pkg_eb = self._pkg(
@@ -3867,7 +4808,7 @@ class depgraph(object):
return True
def _equiv_binary_installed(self, pkg):
- build_time = pkg.metadata.get('BUILD_TIME')
+ build_time = pkg.build_time
if not build_time:
return False
@@ -3877,7 +4818,7 @@ class depgraph(object):
except PackageNotFound:
return False
- return build_time == inst_pkg.metadata.get('BUILD_TIME')
+ return build_time == inst_pkg.build_time
class _AutounmaskLevel(object):
__slots__ = ("allow_use_changes", "allow_unstable_keywords", "allow_license_changes", \
@@ -3898,8 +4839,9 @@ class depgraph(object):
1. USE + license
2. USE + ~arch + license
3. USE + ~arch + license + missing keywords
- 4. USE + ~arch + license + masks
- 5. USE + ~arch + license + missing keywords + masks
+ 4. USE + license + masks
+ 5. USE + ~arch + license + masks
+ 6. USE + ~arch + license + missing keywords + masks
Some thoughts:
* Do least invasive changes first.
@@ -3919,15 +4861,25 @@ class depgraph(object):
autounmask_level.allow_license_changes = True
yield autounmask_level
- for only_use_changes in (False,):
+ autounmask_level.allow_unstable_keywords = True
+ yield autounmask_level
- autounmask_level.allow_unstable_keywords = (not only_use_changes)
- autounmask_level.allow_license_changes = (not only_use_changes)
+ if not autounmask_keep_masks:
- for missing_keyword, unmask in ((False,False), (True, False), (False, True), (True, True)):
+ autounmask_level.allow_missing_keywords = True
+ yield autounmask_level
- if (only_use_changes or autounmask_keep_masks) and (missing_keyword or unmask):
- break
+ # 4. USE + license + masks
+ # Try to respect keywords while discarding
+ # package.mask (see bug #463394).
+ autounmask_level.allow_unstable_keywords = False
+ autounmask_level.allow_missing_keywords = False
+ autounmask_level.allow_unmasks = True
+ yield autounmask_level
+
+ autounmask_level.allow_unstable_keywords = True
+
+ for missing_keyword, unmask in ((False, True), (True, True)):
autounmask_level.allow_missing_keywords = missing_keyword
autounmask_level.allow_unmasks = unmask
@@ -3935,33 +4887,42 @@ class depgraph(object):
yield autounmask_level
- def _select_pkg_highest_available_imp(self, root, atom, onlydeps=False):
- pkg, existing = self._wrapped_select_pkg_highest_available_imp(root, atom, onlydeps=onlydeps)
+ def _select_pkg_highest_available_imp(self, root, atom, onlydeps=False, parent=None):
+ pkg, existing = self._wrapped_select_pkg_highest_available_imp(
+ root, atom, onlydeps=onlydeps, parent=parent)
default_selection = (pkg, existing)
- def reset_pkg(pkg):
+ if self._dynamic_config._autounmask is True:
if pkg is not None and \
pkg.installed and \
not self._want_installed_pkg(pkg):
pkg = None
- if self._dynamic_config._autounmask is True:
- reset_pkg(pkg)
+ # Temporarily reset _need_restart state, in order to
+ # avoid interference as reported in bug #459832.
+ earlier_need_restart = self._dynamic_config._need_restart
+ self._dynamic_config._need_restart = False
+ try:
+ for autounmask_level in self._autounmask_levels():
+ if pkg is not None:
+ break
- for autounmask_level in self._autounmask_levels():
- if pkg is not None:
- break
+ pkg, existing = \
+ self._wrapped_select_pkg_highest_available_imp(
+ root, atom, onlydeps=onlydeps,
+ autounmask_level=autounmask_level, parent=parent)
- pkg, existing = \
- self._wrapped_select_pkg_highest_available_imp(
- root, atom, onlydeps=onlydeps,
- autounmask_level=autounmask_level)
+ if pkg is not None and \
+ pkg.installed and \
+ not self._want_installed_pkg(pkg):
+ pkg = None
- reset_pkg(pkg)
-
- if self._dynamic_config._need_restart:
- return None, None
+ if self._dynamic_config._need_restart:
+ return None, None
+ finally:
+ if earlier_need_restart:
+ self._dynamic_config._need_restart = True
if pkg is None:
# This ensures that we can fall back to an installed package
@@ -4091,25 +5052,29 @@ class depgraph(object):
new_changes = {}
for flag, state in target_use.items():
+ real_flag = pkg.iuse.get_real_flag(flag)
+ if real_flag is None:
+ # Triggered by use-dep defaults.
+ continue
if state:
- if flag not in old_use:
- if new_changes.get(flag) == False:
+ if real_flag not in old_use:
+ if new_changes.get(real_flag) == False:
return old_use
- new_changes[flag] = True
+ new_changes[real_flag] = True
new_use.add(flag)
else:
- if flag in old_use:
- if new_changes.get(flag) == True:
+ if real_flag in old_use:
+ if new_changes.get(real_flag) == True:
return old_use
- new_changes[flag] = False
+ new_changes[real_flag] = False
new_use.update(old_use.difference(target_use))
def want_restart_for_use_change(pkg, new_use):
if pkg not in self._dynamic_config.digraph.nodes:
return False
- for key in "DEPEND", "RDEPEND", "PDEPEND", "LICENSE":
- dep = pkg.metadata[key]
+ for key in Package._dep_keys + ("LICENSE",):
+ dep = pkg._metadata[key]
old_val = set(portage.dep.use_reduce(dep, pkg.use.enabled, is_valid_flag=pkg.iuse.is_valid_flag, flat=True))
new_val = set(portage.dep.use_reduce(dep, new_use, is_valid_flag=pkg.iuse.is_valid_flag, flat=True))
@@ -4132,9 +5097,11 @@ class depgraph(object):
if new_changes != old_changes:
#Don't do the change if it violates REQUIRED_USE.
- required_use = pkg.metadata.get("REQUIRED_USE")
- if required_use and check_required_use(required_use, old_use, pkg.iuse.is_valid_flag) and \
- not check_required_use(required_use, new_use, pkg.iuse.is_valid_flag):
+ required_use = pkg._metadata.get("REQUIRED_USE")
+ if required_use and check_required_use(required_use, old_use,
+ pkg.iuse.is_valid_flag, eapi=pkg.eapi) and \
+ not check_required_use(required_use, new_use,
+ pkg.iuse.is_valid_flag, eapi=pkg.eapi):
return old_use
if any(x in pkg.use.mask for x in new_changes) or \
@@ -4150,14 +5117,13 @@ class depgraph(object):
self._dynamic_config._need_restart = True
return new_use
- def _wrapped_select_pkg_highest_available_imp(self, root, atom, onlydeps=False, autounmask_level=None):
+ def _wrapped_select_pkg_highest_available_imp(self, root, atom, onlydeps=False, autounmask_level=None, parent=None):
root_config = self._frozen_config.roots[root]
pkgsettings = self._frozen_config.pkgsettings[root]
dbs = self._dynamic_config._filtered_trees[root]["dbs"]
vardb = self._frozen_config.roots[root].trees["vartree"].dbapi
# List of acceptable packages, ordered by type preference.
matched_packages = []
- matched_pkgs_ignore_use = []
highest_version = None
if not isinstance(atom, portage.dep.Atom):
atom = portage.dep.Atom(atom)
@@ -4209,7 +5175,7 @@ class depgraph(object):
# Ignore USE deps for the initial match since we want to
# ensure that updates aren't missed solely due to the user's
# USE configuration.
- for pkg in self._iter_match_pkgs(root_config, pkg_type, atom.without_use,
+ for pkg in self._iter_match_pkgs(root_config, pkg_type, atom.without_use,
onlydeps=onlydeps):
if pkg.cp != atom_cp and have_new_virt:
# pull in a new-style virtual instead
@@ -4295,8 +5261,8 @@ class depgraph(object):
for selected_pkg in matched_packages:
if selected_pkg.type_name == "binary" and \
selected_pkg.cpv == pkg.cpv and \
- selected_pkg.metadata.get('BUILD_TIME') == \
- pkg.metadata.get('BUILD_TIME'):
+ selected_pkg.build_time == \
+ pkg.build_time:
identical_binary = True
break
@@ -4339,7 +5305,6 @@ class depgraph(object):
if atom.use:
- matched_pkgs_ignore_use.append(pkg)
if autounmask_level and autounmask_level.allow_use_changes and not pkg.built:
target_use = {}
for flag in atom.use.enabled:
@@ -4352,8 +5317,11 @@ class depgraph(object):
use_match = True
can_adjust_use = not pkg.built
- missing_enabled = atom.use.missing_enabled.difference(pkg.iuse.all)
- missing_disabled = atom.use.missing_disabled.difference(pkg.iuse.all)
+ is_valid_flag = pkg.iuse.is_valid_flag
+ missing_enabled = frozenset(x for x in
+ atom.use.missing_enabled if not is_valid_flag(x))
+ missing_disabled = frozenset(x for x in
+ atom.use.missing_disabled if not is_valid_flag(x))
if atom.use.enabled:
if any(x in atom.use.enabled for x in missing_disabled):
@@ -4406,7 +5374,9 @@ class depgraph(object):
# will always end with a break statement below
# this point.
if find_existing_node:
- e_pkg = self._dynamic_config._slot_pkg_map[root].get(pkg.slot_atom)
+ e_pkg = next(self._dynamic_config._package_tracker.match(
+ root, pkg.slot_atom, installed=False), None)
+
if not e_pkg:
break
@@ -4427,50 +5397,56 @@ class depgraph(object):
break
# Compare built package to current config and
# reject the built package if necessary.
- if built and not useoldpkg and (not installed or matched_pkgs_ignore_use) and \
- ("--newuse" in self._frozen_config.myopts or \
- "--reinstall" in self._frozen_config.myopts or \
- (not installed and self._dynamic_config.myparams.get(
- "binpkg_respect_use") in ("y", "auto"))):
- iuses = pkg.iuse.all
- old_use = self._pkg_use_enabled(pkg)
- if myeb:
- pkgsettings.setcpv(myeb)
- else:
- pkgsettings.setcpv(pkg)
- now_use = pkgsettings["PORTAGE_USE"].split()
- forced_flags = set()
- forced_flags.update(pkgsettings.useforce)
- forced_flags.update(pkgsettings.usemask)
- cur_iuse = iuses
- if myeb and not usepkgonly and not useoldpkg:
- cur_iuse = myeb.iuse.all
- reinstall_for_flags = self._reinstall_for_flags(pkg,
- forced_flags, old_use, iuses, now_use, cur_iuse)
- if reinstall_for_flags:
- if not pkg.installed:
- self._dynamic_config.ignored_binaries.setdefault(pkg, set()).update(reinstall_for_flags)
+ reinstall_use = ("--newuse" in self._frozen_config.myopts or \
+ "--reinstall" in self._frozen_config.myopts)
+ respect_use = self._dynamic_config.myparams.get("binpkg_respect_use") in ("y", "auto")
+ if built and not useoldpkg and \
+ (not installed or matched_packages) and \
+ not (installed and
+ self._frozen_config.excluded_pkgs.findAtomForPackage(pkg,
+ modified_use=self._pkg_use_enabled(pkg))):
+ if myeb and "--newrepo" in self._frozen_config.myopts and myeb.repo != pkg.repo:
break
+ elif reinstall_use or (not installed and respect_use):
+ iuses = pkg.iuse.all
+ old_use = self._pkg_use_enabled(pkg)
+ if myeb:
+ pkgsettings.setcpv(myeb)
+ else:
+ pkgsettings.setcpv(pkg)
+ now_use = pkgsettings["PORTAGE_USE"].split()
+ forced_flags = set()
+ forced_flags.update(pkgsettings.useforce)
+ forced_flags.update(pkgsettings.usemask)
+ cur_iuse = iuses
+ if myeb and not usepkgonly and not useoldpkg:
+ cur_iuse = myeb.iuse.all
+ reinstall_for_flags = self._reinstall_for_flags(pkg,
+ forced_flags, old_use, iuses, now_use, cur_iuse)
+ if reinstall_for_flags:
+ if not pkg.installed:
+ self._dynamic_config.ignored_binaries.setdefault(pkg, set()).update(reinstall_for_flags)
+ break
# Compare current config to installed package
# and do not reinstall if possible.
- if not installed and not useoldpkg and \
- ("--newuse" in self._frozen_config.myopts or \
- "--reinstall" in self._frozen_config.myopts) and \
- cpv in vardb.match(atom):
- forced_flags = set()
- forced_flags.update(pkg.use.force)
- forced_flags.update(pkg.use.mask)
+ if not installed and not useoldpkg and cpv in vardb.match(atom):
inst_pkg = vardb.match_pkgs('=' + pkg.cpv)[0]
- old_use = inst_pkg.use.enabled
- old_iuse = inst_pkg.iuse.all
- cur_use = self._pkg_use_enabled(pkg)
- cur_iuse = pkg.iuse.all
- reinstall_for_flags = \
- self._reinstall_for_flags(pkg,
- forced_flags, old_use, old_iuse,
- cur_use, cur_iuse)
- if reinstall_for_flags:
+ if "--newrepo" in self._frozen_config.myopts and pkg.repo != inst_pkg.repo:
reinstall = True
+ elif reinstall_use:
+ forced_flags = set()
+ forced_flags.update(pkg.use.force)
+ forced_flags.update(pkg.use.mask)
+ old_use = inst_pkg.use.enabled
+ old_iuse = inst_pkg.iuse.all
+ cur_use = self._pkg_use_enabled(pkg)
+ cur_iuse = pkg.iuse.all
+ reinstall_for_flags = \
+ self._reinstall_for_flags(pkg,
+ forced_flags, old_use, old_iuse,
+ cur_use, cur_iuse)
+ if reinstall_for_flags:
+ reinstall = True
if reinstall_atoms.findAtomForPackage(pkg, \
modified_use=self._pkg_use_enabled(pkg)):
reinstall = True
@@ -4512,6 +5488,26 @@ class depgraph(object):
return existing_node, existing_node
if len(matched_packages) > 1:
+ if parent is not None and \
+ (parent.root, parent.slot_atom) in self._dynamic_config._slot_operator_replace_installed:
+ # We're forcing a rebuild of the parent because we missed some
+ # update because of a slot operator dep.
+ if atom.slot_operator == "=" and atom.sub_slot is None:
+ # This one is a slot operator dep. Exclude the installed packages if a newer non-installed
+ # pkg exists.
+ highest_installed = None
+ for pkg in matched_packages:
+ if pkg.installed:
+ if highest_installed is None or pkg.version > highest_installed.version:
+ highest_installed = pkg
+
+ if highest_installed:
+ non_installed = [pkg for pkg in matched_packages \
+ if not pkg.installed and pkg.version > highest_installed.version]
+
+ if non_installed:
+ matched_packages = non_installed
+
if rebuilt_binaries:
inst_pkg = None
built_pkg = None
@@ -4529,15 +5525,8 @@ class depgraph(object):
# non-empty, in order to avoid cases like to
# bug #306659 where BUILD_TIME fields are missing
# in local and/or remote Packages file.
- try:
- built_timestamp = int(built_pkg.metadata['BUILD_TIME'])
- except (KeyError, ValueError):
- built_timestamp = 0
-
- try:
- installed_timestamp = int(inst_pkg.metadata['BUILD_TIME'])
- except (KeyError, ValueError):
- installed_timestamp = 0
+ built_timestamp = built_pkg.build_time
+ installed_timestamp = inst_pkg.build_time
if unbuilt_pkg is not None and unbuilt_pkg > built_pkg:
pass
@@ -4584,7 +5573,7 @@ class depgraph(object):
# ordered by type preference ("ebuild" type is the last resort)
return matched_packages[-1], existing_node
- def _select_pkg_from_graph(self, root, atom, onlydeps=False):
+ def _select_pkg_from_graph(self, root, atom, onlydeps=False, parent=None):
"""
Select packages that have already been added to the graph or
those that are installed and have not been scheduled for
@@ -4594,11 +5583,18 @@ class depgraph(object):
matches = graph_db.match_pkgs(atom)
if not matches:
return None, None
- pkg = matches[-1] # highest match
- in_graph = self._dynamic_config._slot_pkg_map[root].get(pkg.slot_atom)
- return pkg, in_graph
- def _select_pkg_from_installed(self, root, atom, onlydeps=False):
+ # There may be multiple matches, and they may
+ # conflict with eachother, so choose the highest
+ # version that has already been added to the graph.
+ for pkg in reversed(matches):
+ if pkg in self._dynamic_config.digraph:
+ return pkg, pkg
+
+ # Fall back to installed packages
+ return self._select_pkg_from_installed(root, atom, onlydeps=onlydeps, parent=parent)
+
+ def _select_pkg_from_installed(self, root, atom, onlydeps=False, parent=None):
"""
Select packages that are installed.
"""
@@ -4621,8 +5617,18 @@ class depgraph(object):
unmasked = [pkg for pkg in matches if not pkg.masks]
if unmasked:
matches = unmasked
+ if len(matches) > 1:
+ # Now account for packages for which existing
+ # ebuilds are masked or unavailable (bug #445506).
+ unmasked = [pkg for pkg in matches if
+ self._equiv_ebuild_visible(pkg)]
+ if unmasked:
+ matches = unmasked
+
pkg = matches[-1] # highest match
- in_graph = self._dynamic_config._slot_pkg_map[root].get(pkg.slot_atom)
+ in_graph = next(self._dynamic_config._package_tracker.match(
+ root, pkg.slot_atom, installed=False), None)
+
return pkg, in_graph
def _complete_graph(self, required_sets=None):
@@ -4649,9 +5655,9 @@ class depgraph(object):
"complete_if_new_use", "y") == "y"
complete_if_new_ver = self._dynamic_config.myparams.get(
"complete_if_new_ver", "y") == "y"
- rebuild_if_new_slot_abi = self._dynamic_config.myparams.get(
- "rebuild_if_new_slot_abi", "y") == "y"
- complete_if_new_slot = rebuild_if_new_slot_abi
+ rebuild_if_new_slot = self._dynamic_config.myparams.get(
+ "rebuild_if_new_slot", "y") == "y"
+ complete_if_new_slot = rebuild_if_new_slot
if "complete" not in self._dynamic_config.myparams and \
(complete_if_new_use or
@@ -4670,10 +5676,16 @@ class depgraph(object):
inst_pkg = vardb.match_pkgs(node.slot_atom)
if inst_pkg and inst_pkg[0].cp == node.cp:
inst_pkg = inst_pkg[0]
- if complete_if_new_ver and \
- (inst_pkg < node or node < inst_pkg):
- version_change = True
- break
+ if complete_if_new_ver:
+ if inst_pkg < node or node < inst_pkg:
+ version_change = True
+ break
+ elif not (inst_pkg.slot == node.slot and
+ inst_pkg.sub_slot == node.sub_slot):
+ # slot/sub-slot change without revbump gets
+ # similar treatment to a version change
+ version_change = True
+ break
# Intersect enabled USE with IUSE, in order to
# ignore forced USE from implicit IUSE flags, since
@@ -4689,7 +5701,8 @@ class depgraph(object):
if complete_if_new_slot:
cp_list = vardb.match_pkgs(Atom(node.cp))
if (cp_list and cp_list[0].cp == node.cp and
- not any(node.slot == pkg.slot for pkg in cp_list)):
+ not any(node.slot == pkg.slot and
+ node.sub_slot == pkg.sub_slot for pkg in cp_list)):
version_change = True
break
@@ -4795,7 +5808,7 @@ class depgraph(object):
return 0
return 1
- def _pkg(self, cpv, type_name, root_config, installed=False,
+ def _pkg(self, cpv, type_name, root_config, installed=False,
onlydeps=False, myrepo = None):
"""
Get a package instance from the cache, or create a new
@@ -4813,10 +5826,14 @@ class depgraph(object):
installed=installed, onlydeps=onlydeps))
if pkg is None and onlydeps and not installed:
# Maybe it already got pulled in as a "merge" node.
- pkg = self._dynamic_config.mydbapi[root_config.root].get(
- Package._gen_hash_key(cpv=cpv, type_name=type_name,
- repo_name=myrepo, root_config=root_config,
- installed=installed, onlydeps=False))
+ for candidate in self._dynamic_config._package_tracker.match(
+ root_config.root, Atom("="+cpv)):
+ if candidate.type_name == type_name and \
+ candidate.repo_name == myrepo and \
+ candidate.root_config is root_config and \
+ candidate.installed == installed and \
+ not candidate.onlydeps:
+ pkg = candidate
if pkg is None:
tree_type = self.pkg_tree_map[type_name]
@@ -4866,7 +5883,7 @@ class depgraph(object):
# For installed packages, always ignore blockers from DEPEND since
# only runtime dependencies should be relevant for packages that
# are already built.
- dep_keys = ["RDEPEND", "PDEPEND"]
+ dep_keys = Package._runtime_keys
for myroot in self._frozen_config.trees:
if self._frozen_config.myopts.get("--root-deps") is not None and \
@@ -4876,7 +5893,8 @@ class depgraph(object):
vardb = self._frozen_config.trees[myroot]["vartree"].dbapi
pkgsettings = self._frozen_config.pkgsettings[myroot]
root_config = self._frozen_config.roots[myroot]
- final_db = self._dynamic_config.mydbapi[myroot]
+ final_db = PackageTrackerDbapiWrapper(
+ myroot, self._dynamic_config._package_tracker)
blocker_cache = BlockerCache(myroot, vardb)
stale_cache = set(blocker_cache)
@@ -4893,7 +5911,7 @@ class depgraph(object):
# the merge process or by --depclean. Always warn about
# packages masked by license, since the user likely wants
# to adjust ACCEPT_LICENSE.
- if pkg in final_db:
+ if pkg in self._dynamic_config._package_tracker:
if not self._pkg_visibility_check(pkg,
trust_graph=False) and \
(pkg_in_graph or 'LICENSE' in pkg.masks):
@@ -4928,7 +5946,7 @@ class depgraph(object):
self._spinner_update()
blocker_data = blocker_cache.get(cpv)
if blocker_data is not None and \
- blocker_data.counter != long(pkg.metadata["COUNTER"]):
+ blocker_data.counter != pkg.counter:
blocker_data = None
# If blocker data from the graph is available, use
@@ -4945,9 +5963,8 @@ class depgraph(object):
blockers is not None:
# Re-use the blockers from the graph.
blocker_atoms = sorted(blockers)
- counter = long(pkg.metadata["COUNTER"])
blocker_data = \
- blocker_cache.BlockerData(counter, blocker_atoms)
+ blocker_cache.BlockerData(pkg.counter, blocker_atoms)
blocker_cache[pkg.cpv] = blocker_data
continue
@@ -4972,13 +5989,14 @@ class depgraph(object):
# matches (this can happen if an atom lacks a
# category).
show_invalid_depstring_notice(
- pkg, depstr, _unicode_decode("%s") % (e,))
+ pkg, depstr, "%s" % (e,))
del e
raise
if not success:
- replacement_pkg = final_db.match_pkgs(pkg.slot_atom)
- if replacement_pkg and \
- replacement_pkg[0].operation == "merge":
+ replacement_pkgs = self._dynamic_config._package_tracker.match(
+ myroot, pkg.slot_atom)
+ if any(replacement_pkg[0].operation == "merge" for \
+ replacement_pkg in replacement_pkgs):
# This package is being replaced anyway, so
# ignore invalid dependencies so as not to
# annoy the user too much (otherwise they'd be
@@ -4989,22 +6007,20 @@ class depgraph(object):
blocker_atoms = [myatom for myatom in atoms \
if myatom.blocker]
blocker_atoms.sort()
- counter = long(pkg.metadata["COUNTER"])
blocker_cache[cpv] = \
- blocker_cache.BlockerData(counter, blocker_atoms)
+ blocker_cache.BlockerData(pkg.counter, blocker_atoms)
if blocker_atoms:
try:
for atom in blocker_atoms:
blocker = Blocker(atom=atom,
- eapi=pkg.metadata["EAPI"],
+ eapi=pkg.eapi,
priority=self._priority(runtime=True),
root=myroot)
self._dynamic_config._blocker_parents.add(blocker, pkg)
except portage.exception.InvalidAtom as e:
depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys))
show_invalid_depstring_notice(
- pkg, depstr,
- _unicode_decode("Invalid Atom: %s") % (e,))
+ pkg, depstr, "Invalid Atom: %s" % (e,))
return False
for cpv in stale_cache:
del blocker_cache[cpv]
@@ -5025,8 +6041,7 @@ class depgraph(object):
virtuals = root_config.settings.getvirtuals()
myroot = blocker.root
initial_db = self._frozen_config.trees[myroot]["vartree"].dbapi
- final_db = self._dynamic_config.mydbapi[myroot]
-
+
provider_virtual = False
if blocker.cp in virtuals and \
not self._have_new_virt(blocker.root, blocker.cp):
@@ -5053,7 +6068,7 @@ class depgraph(object):
blocked_final = set()
for atom in atoms:
- for pkg in final_db.match_pkgs(atom):
+ for pkg in self._dynamic_config._package_tracker.match(myroot, atom):
if atom_set.findAtomForPackage(pkg, modified_use=self._pkg_use_enabled(pkg)):
blocked_final.add(pkg)
@@ -5137,7 +6152,7 @@ class depgraph(object):
for inst_pkg, inst_task in depends_on_order:
uninst_task = Package(built=inst_pkg.built,
cpv=inst_pkg.cpv, installed=inst_pkg.installed,
- metadata=inst_pkg.metadata,
+ metadata=inst_pkg._metadata,
operation="uninstall",
root_config=inst_pkg.root_config,
type_name=inst_pkg.type_name)
@@ -5203,7 +6218,12 @@ class depgraph(object):
mygraph.order.sort(key=cmp_sort_key(cmp_merge_preference))
- def altlist(self, reversed=False):
+ def altlist(self, reversed=DeprecationWarning):
+
+ if reversed is not DeprecationWarning:
+ warnings.warn("The reversed parameter of "
+ "_emerge.depgraph.depgraph.altlist() is deprecated",
+ DeprecationWarning, stacklevel=2)
while self._dynamic_config._serialized_tasks_cache is None:
self._resolve_conflicts()
@@ -5213,9 +6233,13 @@ class depgraph(object):
except self._serialize_tasks_retry:
pass
- retlist = self._dynamic_config._serialized_tasks_cache[:]
- if reversed:
+ retlist = self._dynamic_config._serialized_tasks_cache
+ if reversed is not DeprecationWarning and reversed:
+ # TODO: remove the "reversed" parameter (builtin name collision)
+ retlist = list(retlist)
retlist.reverse()
+ retlist = tuple(retlist)
+
return retlist
def _implicit_libc_deps(self, mergelist, graph):
@@ -5226,19 +6250,15 @@ class depgraph(object):
libc_pkgs = {}
implicit_libc_roots = (self._frozen_config._running_root.root,)
for root in implicit_libc_roots:
- graphdb = self._dynamic_config.mydbapi[root]
vardb = self._frozen_config.trees[root]["vartree"].dbapi
for atom in self._expand_virt_from_graph(root,
portage.const.LIBC_PACKAGE_ATOM):
if atom.blocker:
continue
- match = graphdb.match_pkgs(atom)
- if not match:
- continue
- pkg = match[-1]
- if pkg.operation == "merge" and \
- not vardb.cpv_exists(pkg.cpv):
- libc_pkgs.setdefault(pkg.root, set()).add(pkg)
+ for pkg in self._dynamic_config._package_tracker.match(root, atom):
+ if pkg.operation == "merge" and \
+ not vardb.cpv_exists(pkg.cpv):
+ libc_pkgs.setdefault(pkg.root, set()).add(pkg)
if not libc_pkgs:
return
@@ -5326,7 +6346,7 @@ class depgraph(object):
if "complete" not in self._dynamic_config.myparams and \
self._dynamic_config._allow_backtracking and \
- self._dynamic_config._slot_collision_nodes and \
+ any(self._dynamic_config._package_tracker.slot_conflicts()) and \
not self._accept_blocker_conflicts():
self._dynamic_config.myparams["complete"] = True
@@ -5335,10 +6355,13 @@ class depgraph(object):
self._process_slot_conflicts()
- self._slot_abi_trigger_reinstalls()
+ if self._dynamic_config._allow_backtracking:
+ self._slot_operator_trigger_reinstalls()
if not self._validate_blockers():
- self._dynamic_config._skip_restart = True
+ # Blockers don't trigger the _skip_restart flag, since
+ # backtracking may solve blockers when it solves slot
+ # conflicts (or by blind luck).
raise self._unknown_internal_error()
def _serialize_tasks(self):
@@ -5436,8 +6459,8 @@ class depgraph(object):
initial_atoms=[PORTAGE_PACKAGE_ATOM])
running_portage = self._frozen_config.trees[running_root]["vartree"].dbapi.match_pkgs(
PORTAGE_PACKAGE_ATOM)
- replacement_portage = self._dynamic_config.mydbapi[running_root].match_pkgs(
- PORTAGE_PACKAGE_ATOM)
+ replacement_portage = list(self._dynamic_config._package_tracker.match(
+ running_root, Atom(PORTAGE_PACKAGE_ATOM)))
if running_portage:
running_portage = running_portage[0]
@@ -5455,7 +6478,7 @@ class depgraph(object):
if running_portage is not None:
try:
portage_rdepend = self._select_atoms_highest_available(
- running_root, running_portage.metadata["RDEPEND"],
+ running_root, running_portage._metadata["RDEPEND"],
myuse=self._pkg_use_enabled(running_portage),
parent=running_portage, strict=False)
except portage.exception.InvalidDependString as e:
@@ -5474,18 +6497,15 @@ class depgraph(object):
for root in implicit_libc_roots:
libc_pkgs = set()
vardb = self._frozen_config.trees[root]["vartree"].dbapi
- graphdb = self._dynamic_config.mydbapi[root]
for atom in self._expand_virt_from_graph(root,
portage.const.LIBC_PACKAGE_ATOM):
if atom.blocker:
continue
- match = graphdb.match_pkgs(atom)
- if not match:
- continue
- pkg = match[-1]
- if pkg.operation == "merge" and \
- not vardb.cpv_exists(pkg.cpv):
- libc_pkgs.add(pkg)
+
+ for pkg in self._dynamic_config._package_tracker.match(root, atom):
+ if pkg.operation == "merge" and \
+ not vardb.cpv_exists(pkg.cpv):
+ libc_pkgs.add(pkg)
if libc_pkgs:
# If there's also an os-headers upgrade, we need to
@@ -5494,13 +6514,11 @@ class depgraph(object):
portage.const.OS_HEADERS_PACKAGE_ATOM):
if atom.blocker:
continue
- match = graphdb.match_pkgs(atom)
- if not match:
- continue
- pkg = match[-1]
- if pkg.operation == "merge" and \
- not vardb.cpv_exists(pkg.cpv):
- asap_nodes.append(pkg)
+
+ for pkg in self._dynamic_config._package_tracker.match(root, atom):
+ if pkg.operation == "merge" and \
+ not vardb.cpv_exists(pkg.cpv):
+ asap_nodes.append(pkg)
asap_nodes.extend(libc_pkgs)
@@ -5803,8 +6821,7 @@ class depgraph(object):
other_version = None
for pkg in vardb.match_pkgs(atom):
if pkg.cpv == task.cpv and \
- pkg.metadata["COUNTER"] == \
- task.metadata["COUNTER"]:
+ pkg.counter == task.counter:
continue
other_version = pkg
break
@@ -5843,13 +6860,12 @@ class depgraph(object):
# For packages in the world set, go ahead an uninstall
# when necessary, as long as the atom will be satisfied
# in the final state.
- graph_db = self._dynamic_config.mydbapi[task.root]
skip = False
try:
for atom in root_config.sets[
"selected"].iterAtomsForPackage(task):
satisfied = False
- for pkg in graph_db.match_pkgs(atom):
+ for pkg in self._dynamic_config._package_tracker.match(task.root, atom):
if pkg == inst_pkg:
continue
satisfied = True
@@ -5931,12 +6947,11 @@ class depgraph(object):
# node unnecessary (due to occupying the same SLOT),
# and we want to avoid executing a separate uninstall
# task in that case.
- slot_node = self._dynamic_config.mydbapi[uninst_task.root
- ].match_pkgs(uninst_task.slot_atom)
- if slot_node and \
- slot_node[0].operation == "merge":
- mygraph.add(slot_node[0], uninst_task,
- priority=BlockerDepPriority.instance)
+ for slot_node in self._dynamic_config._package_tracker.match(
+ uninst_task.root, uninst_task.slot_atom):
+ if slot_node.operation == "merge":
+ mygraph.add(slot_node, uninst_task,
+ priority=BlockerDepPriority.instance)
# Reset the state variables for leaf node selection and
# continue trying to select leaf nodes.
@@ -6011,7 +7026,7 @@ class depgraph(object):
inst_pkg = inst_pkg[0]
uninst_task = Package(built=inst_pkg.built,
cpv=inst_pkg.cpv, installed=inst_pkg.installed,
- metadata=inst_pkg.metadata,
+ metadata=inst_pkg._metadata,
operation="uninstall",
root_config=inst_pkg.root_config,
type_name=inst_pkg.type_name)
@@ -6083,17 +7098,22 @@ class depgraph(object):
for blocker in unsolvable_blockers:
retlist.append(blocker)
+ retlist = tuple(retlist)
+
if unsolvable_blockers and \
not self._accept_blocker_conflicts():
self._dynamic_config._unsatisfied_blockers_for_display = unsolvable_blockers
- self._dynamic_config._serialized_tasks_cache = retlist[:]
+ self._dynamic_config._serialized_tasks_cache = retlist
self._dynamic_config._scheduler_graph = scheduler_graph
- self._dynamic_config._skip_restart = True
+ # Blockers don't trigger the _skip_restart flag, since
+ # backtracking may solve blockers when it solves slot
+ # conflicts (or by blind luck).
raise self._unknown_internal_error()
- if self._dynamic_config._slot_collision_info and \
+ have_slot_conflict = any(self._dynamic_config._package_tracker.slot_conflicts())
+ if have_slot_conflict and \
not self._accept_blocker_conflicts():
- self._dynamic_config._serialized_tasks_cache = retlist[:]
+ self._dynamic_config._serialized_tasks_cache = retlist
self._dynamic_config._scheduler_graph = scheduler_graph
raise self._unknown_internal_error()
@@ -6147,13 +7167,8 @@ class depgraph(object):
def _show_merge_list(self):
if self._dynamic_config._serialized_tasks_cache is not None and \
not (self._dynamic_config._displayed_list is not None and \
- (self._dynamic_config._displayed_list == self._dynamic_config._serialized_tasks_cache or \
- self._dynamic_config._displayed_list == \
- list(reversed(self._dynamic_config._serialized_tasks_cache)))):
- display_list = self._dynamic_config._serialized_tasks_cache[:]
- if "--tree" in self._frozen_config.myopts:
- display_list.reverse()
- self.display(display_list)
+ self._dynamic_config._displayed_list is self._dynamic_config._serialized_tasks_cache):
+ self.display(self._dynamic_config._serialized_tasks_cache)
def _show_unsatisfied_blockers(self, blockers):
self._show_merge_list()
@@ -6175,6 +7190,18 @@ class depgraph(object):
for blocker in blockers:
for pkg in chain(self._dynamic_config._blocked_pkgs.child_nodes(blocker), \
self._dynamic_config._blocker_parents.parent_nodes(blocker)):
+
+ is_slot_conflict_pkg = False
+ for conflict in self._dynamic_config._package_tracker.slot_conflicts():
+ if conflict.root == pkg.root and conflict.atom == pkg.slot_atom:
+ is_slot_conflict_pkg = True
+ break
+ if is_slot_conflict_pkg:
+ # The slot conflict display has better noise reduction
+ # than the unsatisfied blockers display, so skip
+ # unsatisfied blockers display for packages involved
+ # directly in slot conflicts (see bug #385391).
+ continue
parent_atoms = self._dynamic_config._parent_atoms.get(pkg)
if not parent_atoms:
atom = self._dynamic_config._blocked_world_pkgs.get(pkg)
@@ -6232,7 +7259,14 @@ class depgraph(object):
else:
# Display the specific atom from SetArg or
# Package types.
- msg.append("%s required by %s" % (atom, parent))
+ if atom != atom.unevaluated_atom:
+ # Show the unevaluated atom, since it can reveal
+ # issues with conditional use-flags missing
+ # from IUSE.
+ msg.append("%s (%s) required by %s" %
+ (atom.unevaluated_atom, atom, parent))
+ else:
+ msg.append("%s required by %s" % (atom, parent))
msg.append("\n")
msg.append("\n")
@@ -6248,6 +7282,10 @@ class depgraph(object):
# redundantly displaying this exact same merge list
# again via _show_merge_list().
self._dynamic_config._displayed_list = mylist
+
+ if "--tree" in self._frozen_config.myopts:
+ mylist = tuple(reversed(mylist))
+
display = Display()
return display(self, mylist, favorites, verbosity)
@@ -6320,7 +7358,7 @@ class depgraph(object):
if is_latest:
unstable_keyword_msg[root].append(">=%s %s\n" % (pkg.cpv, keyword))
elif is_latest_in_slot:
- unstable_keyword_msg[root].append(">=%s:%s %s\n" % (pkg.cpv, pkg.metadata["SLOT"], keyword))
+ unstable_keyword_msg[root].append(">=%s:%s %s\n" % (pkg.cpv, pkg.slot, keyword))
else:
unstable_keyword_msg[root].append("=%s %s\n" % (pkg.cpv, keyword))
else:
@@ -6343,7 +7381,7 @@ class depgraph(object):
keyword = reason.unmask_hint.value
comment, filename = portage.getmaskingreason(
- pkg.cpv, metadata=pkg.metadata,
+ pkg.cpv, metadata=pkg._metadata,
settings=pkgsettings,
portdb=pkg.root_config.trees["porttree"].dbapi,
return_location=True)
@@ -6360,7 +7398,7 @@ class depgraph(object):
if is_latest:
p_mask_change_msg[root].append(">=%s\n" % pkg.cpv)
elif is_latest_in_slot:
- p_mask_change_msg[root].append(">=%s:%s\n" % (pkg.cpv, pkg.metadata["SLOT"]))
+ p_mask_change_msg[root].append(">=%s:%s\n" % (pkg.cpv, pkg.slot))
else:
p_mask_change_msg[root].append("=%s\n" % pkg.cpv)
else:
@@ -6385,7 +7423,7 @@ class depgraph(object):
if is_latest:
use_changes_msg[root].append(">=%s %s\n" % (pkg.cpv, " ".join(adjustments)))
elif is_latest_in_slot:
- use_changes_msg[root].append(">=%s:%s %s\n" % (pkg.cpv, pkg.metadata["SLOT"], " ".join(adjustments)))
+ use_changes_msg[root].append(">=%s:%s %s\n" % (pkg.cpv, pkg.slot, " ".join(adjustments)))
else:
use_changes_msg[root].append("=%s %s\n" % (pkg.cpv, " ".join(adjustments)))
@@ -6402,7 +7440,7 @@ class depgraph(object):
if is_latest:
license_msg[root].append(">=%s %s\n" % (pkg.cpv, " ".join(sorted(missing_licenses))))
elif is_latest_in_slot:
- license_msg[root].append(">=%s:%s %s\n" % (pkg.cpv, pkg.metadata["SLOT"], " ".join(sorted(missing_licenses))))
+ license_msg[root].append(">=%s:%s %s\n" % (pkg.cpv, pkg.slot, " ".join(sorted(missing_licenses))))
else:
license_msg[root].append("=%s %s\n" % (pkg.cpv, " ".join(sorted(missing_licenses))))
@@ -6442,7 +7480,7 @@ class depgraph(object):
if stat.S_ISREG(st.st_mode):
last_file_path = p
elif stat.S_ISDIR(st.st_mode):
- if os.path.basename(p) in _ignorecvs_dirs:
+ if os.path.basename(p) in VCS_DIRS:
continue
try:
contents = os.listdir(p)
@@ -6511,24 +7549,25 @@ class depgraph(object):
if len(roots) > 1:
writemsg("\nFor %s:\n" % abs_user_config, noiselevel=-1)
+ def _writemsg(reason, file):
+ writemsg(('\nThe following %s are necessary to proceed:\n'
+ ' (see "%s" in the portage(5) man page for more details)\n')
+ % (colorize('BAD', reason), file), noiselevel=-1)
+
if root in unstable_keyword_msg:
- writemsg("\nThe following " + colorize("BAD", "keyword changes") + \
- " are necessary to proceed:\n", noiselevel=-1)
+ _writemsg('keyword changes', 'package.accept_keywords')
writemsg(format_msg(unstable_keyword_msg[root]), noiselevel=-1)
if root in p_mask_change_msg:
- writemsg("\nThe following " + colorize("BAD", "mask changes") + \
- " are necessary to proceed:\n", noiselevel=-1)
+ _writemsg('mask changes', 'package.unmask')
writemsg(format_msg(p_mask_change_msg[root]), noiselevel=-1)
if root in use_changes_msg:
- writemsg("\nThe following " + colorize("BAD", "USE changes") + \
- " are necessary to proceed:\n", noiselevel=-1)
+ _writemsg('USE changes', 'package.use')
writemsg(format_msg(use_changes_msg[root]), noiselevel=-1)
if root in license_msg:
- writemsg("\nThe following " + colorize("BAD", "license changes") + \
- " are necessary to proceed:\n", noiselevel=-1)
+ _writemsg('license changes', 'package.license')
writemsg(format_msg(license_msg[root]), noiselevel=-1)
protect_obj = {}
@@ -6542,11 +7581,12 @@ class depgraph(object):
def write_changes(root, changes, file_to_write_to):
file_contents = None
try:
- file_contents = io.open(
+ with io.open(
_unicode_encode(file_to_write_to,
encoding=_encodings['fs'], errors='strict'),
mode='r', encoding=_encodings['content'],
- errors='replace').readlines()
+ errors='replace') as f:
+ file_contents = f.readlines()
except IOError as e:
if e.errno == errno.ENOENT:
file_contents = []
@@ -6612,10 +7652,16 @@ class depgraph(object):
noiselevel=-1)
writemsg("".join(problems), noiselevel=-1)
elif write_to_file and roots:
- writemsg("\nAutounmask changes successfully written. Remember to run dispatch-conf.\n", \
+ writemsg("\nAutounmask changes successfully written.\n",
noiselevel=-1)
+ for root in roots:
+ chk_updated_cfg_files(root,
+ [os.path.join(os.sep, USER_CONFIG_PATH)])
elif not pretend and not autounmask_write and roots:
- writemsg("\nUse --autounmask-write to write changes to config files (honoring CONFIG_PROTECT).\n", \
+ writemsg("\nUse --autounmask-write to write changes to config files (honoring\n"
+ "CONFIG_PROTECT). Carefully examine the list of proposed changes,\n"
+ "paying special attention to mask or keyword changes that may expose\n"
+ "experimental or unstable packages.\n",
noiselevel=-1)
@@ -6632,21 +7678,35 @@ class depgraph(object):
self._show_circular_deps(
self._dynamic_config._circular_deps_for_display)
- # The slot conflict display has better noise reduction than
- # the unsatisfied blockers display, so skip unsatisfied blockers
- # display if there are slot conflicts (see bug #385391).
- if self._dynamic_config._slot_collision_info:
+ unresolved_conflicts = False
+ have_slot_conflict = any(self._dynamic_config._package_tracker.slot_conflicts())
+ if have_slot_conflict:
+ unresolved_conflicts = True
self._show_slot_collision_notice()
- elif self._dynamic_config._unsatisfied_blockers_for_display is not None:
+ if self._dynamic_config._unsatisfied_blockers_for_display is not None:
+ unresolved_conflicts = True
self._show_unsatisfied_blockers(
self._dynamic_config._unsatisfied_blockers_for_display)
- else:
+
+ # Only show missed updates if there are no unresolved conflicts,
+ # since they may be irrelevant after the conflicts are solved.
+ if not unresolved_conflicts:
self._show_missed_update()
+ if self._frozen_config.myopts.get("--verbose-slot-rebuilds", 'y') != 'n':
+ self._compute_abi_rebuild_info()
+ self._show_abi_rebuild_info()
+
self._show_ignored_binaries()
self._display_autounmask()
+ for depgraph_sets in self._dynamic_config.sets.values():
+ for pset in depgraph_sets.sets.values():
+ for error_msg in pset.errors:
+ writemsg_level("%s\n" % (error_msg,),
+ level=logging.ERROR, noiselevel=-1)
+
# TODO: Add generic support for "set problem" handlers so that
# the below warnings aren't special cases for world only.
@@ -6722,7 +7782,7 @@ class depgraph(object):
pkgsettings = self._frozen_config.pkgsettings[pkg.root]
mreasons = get_masking_status(pkg, pkgsettings, root_config, use=self._pkg_use_enabled(pkg))
masked_packages.append((root_config, pkgsettings,
- pkg.cpv, pkg.repo, pkg.metadata, mreasons))
+ pkg.cpv, pkg.repo, pkg._metadata, mreasons))
if masked_packages:
writemsg("\n" + colorize("BAD", "!!!") + \
" The following updates are masked by LICENSE changes:\n",
@@ -6737,7 +7797,7 @@ class depgraph(object):
pkgsettings = self._frozen_config.pkgsettings[pkg.root]
mreasons = get_masking_status(pkg, pkgsettings, root_config, use=self._pkg_use_enabled)
masked_packages.append((root_config, pkgsettings,
- pkg.cpv, pkg.repo, pkg.metadata, mreasons))
+ pkg.cpv, pkg.repo, pkg._metadata, mreasons))
if masked_packages:
writemsg("\n" + colorize("BAD", "!!!") + \
" The following installed packages are masked:\n",
@@ -6747,7 +7807,15 @@ class depgraph(object):
writemsg("\n", noiselevel=-1)
for pargs, kwargs in self._dynamic_config._unsatisfied_deps_for_display:
- self._show_unsatisfied_dep(*pargs, **kwargs)
+ self._show_unsatisfied_dep(*pargs,
+ **portage._native_kwargs(kwargs))
+
+ if self._dynamic_config._buildpkgonly_deps_unsatisfied:
+ self._show_merge_list()
+ writemsg("\n!!! --buildpkgonly requires all "
+ "dependencies to be merged.\n", noiselevel=-1)
+ writemsg("!!! Cannot merge requested packages. "
+ "Merge deps and try again.\n\n", noiselevel=-1)
def saveNomergeFavorites(self):
"""Find atoms in favorites that are not in the mergelist and add them
@@ -6808,16 +7876,31 @@ class depgraph(object):
all_added.append(SETPREFIX + k)
all_added.extend(added_favorites)
all_added.sort()
- for a in all_added:
- if a.startswith(SETPREFIX):
- filename = "world_sets"
- else:
- filename = "world"
- writemsg_stdout(
- ">>> Recording %s in \"%s\" favorites file...\n" %
- (colorize("INFORM", _unicode(a)), filename), noiselevel=-1)
if all_added:
- world_set.update(all_added)
+ skip = False
+ if "--ask" in self._frozen_config.myopts:
+ writemsg_stdout("\n", noiselevel=-1)
+ for a in all_added:
+ writemsg_stdout(" %s %s\n" % (colorize("GOOD", "*"), a),
+ noiselevel=-1)
+ writemsg_stdout("\n", noiselevel=-1)
+ prompt = "Would you like to add these packages to your world " \
+ "favorites?"
+ enter_invalid = '--ask-enter-invalid' in \
+ self._frozen_config.myopts
+ if userquery(prompt, enter_invalid) == "No":
+ skip = True
+
+ if not skip:
+ for a in all_added:
+ if a.startswith(SETPREFIX):
+ filename = "world_sets"
+ else:
+ filename = "world"
+ writemsg_stdout(
+ ">>> Recording %s in \"%s\" favorites file...\n" %
+ (colorize("INFORM", _unicode(a)), filename), noiselevel=-1)
+ world_set.update(all_added)
if world_locked:
world_set.unlock()
@@ -6844,7 +7927,6 @@ class depgraph(object):
else:
args = []
- fakedb = self._dynamic_config.mydbapi
serialized_tasks = []
masked_tasks = []
for x in mergelist:
@@ -6902,7 +7984,7 @@ class depgraph(object):
self._dynamic_config._unsatisfied_deps_for_display.append(
((pkg.root, "="+pkg.cpv), {"myparent":None}))
- fakedb[myroot].cpv_inject(pkg)
+ self._dynamic_config._package_tracker.add_pkg(pkg)
serialized_tasks.append(pkg)
self._spinner_update()
@@ -7092,14 +8174,15 @@ class depgraph(object):
try:
for pargs, kwargs in self._dynamic_config._unsatisfied_deps_for_display:
self._show_unsatisfied_dep(
- *pargs, check_autounmask_breakage=True, **kwargs)
+ *pargs, check_autounmask_breakage=True,
+ **portage._native_kwargs(kwargs))
except self._autounmask_breakage:
return True
return False
def get_backtrack_infos(self):
return self._dynamic_config._backtrack_infos
-
+
class _dep_check_composite_db(dbapi):
"""
@@ -7214,8 +8297,9 @@ class _dep_check_composite_db(dbapi):
elif not self._depgraph._equiv_ebuild_visible(pkg):
return False
- in_graph = self._depgraph._dynamic_config._slot_pkg_map[
- self._root].get(pkg.slot_atom)
+ in_graph = next(self._depgraph._dynamic_config._package_tracker.match(
+ self._root, pkg.slot_atom, installed=False), None)
+
if in_graph is None:
# Mask choices for packages which are not the highest visible
# version within their slot (since they usually trigger slot
@@ -7234,7 +8318,7 @@ class _dep_check_composite_db(dbapi):
return True
def aux_get(self, cpv, wants):
- metadata = self._cpv_pkg_map[cpv].metadata
+ metadata = self._cpv_pkg_map[cpv]._metadata
return [metadata.get(x, "") for x in wants]
def match_pkgs(self, atom):
@@ -7308,14 +8392,14 @@ def _spinner_stop(spinner):
portage.writemsg_stdout("... done!\n")
-def backtrack_depgraph(settings, trees, myopts, myparams,
+def backtrack_depgraph(settings, trees, myopts, myparams,
myaction, myfiles, spinner):
"""
Raises PackageSetNotFound if myfiles contains a missing package set.
"""
_spinner_start(spinner, myopts)
try:
- return _backtrack_depgraph(settings, trees, myopts, myparams,
+ return _backtrack_depgraph(settings, trees, myopts, myparams,
myaction, myfiles, spinner)
finally:
_spinner_stop(spinner)
@@ -7412,7 +8496,7 @@ def _resume_depgraph(settings, trees, mtimedb, myopts, myparams, spinner):
skip_masked = True
skip_unsatisfied = True
mergelist = mtimedb["resume"]["mergelist"]
- dropped_tasks = set()
+ dropped_tasks = {}
frozen_config = _frozen_depgraph_config(settings, trees,
myopts, spinner)
while True:
@@ -7426,12 +8510,21 @@ def _resume_depgraph(settings, trees, mtimedb, myopts, myparams, spinner):
raise
graph = mydepgraph._dynamic_config.digraph
- unsatisfied_parents = dict((dep.parent, dep.parent) \
- for dep in e.value)
+ unsatisfied_parents = {}
traversed_nodes = set()
- unsatisfied_stack = list(unsatisfied_parents)
+ unsatisfied_stack = [(dep.parent, dep.atom) for dep in e.value]
while unsatisfied_stack:
- pkg = unsatisfied_stack.pop()
+ pkg, atom = unsatisfied_stack.pop()
+ if atom is not None and \
+ mydepgraph._select_pkg_from_installed(
+ pkg.root, atom)[0] is not None:
+ continue
+ atoms = unsatisfied_parents.get(pkg)
+ if atoms is None:
+ atoms = []
+ unsatisfied_parents[pkg] = atoms
+ if atom is not None:
+ atoms.append(atom)
if pkg in traversed_nodes:
continue
traversed_nodes.add(pkg)
@@ -7440,7 +8533,8 @@ def _resume_depgraph(settings, trees, mtimedb, myopts, myparams, spinner):
# package scheduled for merge, removing this
# package may cause the the parent package's
# dependency to become unsatisfied.
- for parent_node in graph.parent_nodes(pkg):
+ for parent_node, atom in \
+ mydepgraph._dynamic_config._parent_atoms.get(pkg, []):
if not isinstance(parent_node, Package) \
or parent_node.operation not in ("merge", "nomerge"):
continue
@@ -7448,8 +8542,7 @@ def _resume_depgraph(settings, trees, mtimedb, myopts, myparams, spinner):
# ensure that a package with an unsatisfied depenedency
# won't get pulled in, even indirectly via a soft
# dependency.
- unsatisfied_parents[parent_node] = parent_node
- unsatisfied_stack.append(parent_node)
+ unsatisfied_stack.append((parent_node, atom))
unsatisfied_tuples = frozenset(tuple(parent_node)
for parent_node in unsatisfied_parents
@@ -7470,8 +8563,8 @@ def _resume_depgraph(settings, trees, mtimedb, myopts, myparams, spinner):
# Exclude installed packages that have been removed from the graph due
# to failure to build/install runtime dependencies after the dependent
# package has already been installed.
- dropped_tasks.update(pkg for pkg in \
- unsatisfied_parents if pkg.operation != "nomerge")
+ dropped_tasks.update((pkg, atoms) for pkg, atoms in \
+ unsatisfied_parents.items() if pkg.operation != "nomerge")
del e, graph, traversed_nodes, \
unsatisfied_parents, unsatisfied_stack
@@ -7557,9 +8650,11 @@ def show_masked_packages(masked_packages):
shown_comments.add(comment)
portdb = root_config.trees["porttree"].dbapi
for l in missing_licenses:
- l_path = portdb.findLicensePath(l)
if l in shown_licenses:
continue
+ l_path = portdb.findLicensePath(l)
+ if l_path is None:
+ continue
msg = ("A copy of the '%s' license" + \
" is located at '%s'.\n\n") % (l, l_path)
writemsg(msg, noiselevel=-1)
@@ -7586,9 +8681,9 @@ def _get_masking_status(pkg, pkgsettings, root_config, myrepo=None, use=None):
portdb=root_config.trees["porttree"].dbapi, myrepo=myrepo)
if not pkg.installed:
- if not pkgsettings._accept_chost(pkg.cpv, pkg.metadata):
+ if not pkgsettings._accept_chost(pkg.cpv, pkg._metadata):
mreasons.append(_MaskReason("CHOST", "CHOST: %s" % \
- pkg.metadata["CHOST"]))
+ pkg._metadata["CHOST"]))
if pkg.invalid:
for msgs in pkg.invalid.values():
@@ -7596,7 +8691,7 @@ def _get_masking_status(pkg, pkgsettings, root_config, myrepo=None, use=None):
mreasons.append(
_MaskReason("invalid", "invalid: %s" % (msg,)))
- if not pkg.metadata["SLOT"]:
+ if not pkg._metadata["SLOT"]:
mreasons.append(
_MaskReason("invalid", "SLOT: undefined"))
diff --git a/pym/_emerge/emergelog.py b/pym/_emerge/emergelog.py
index b1b093f52..aea94f74e 100644
--- a/pym/_emerge/emergelog.py
+++ b/pym/_emerge/emergelog.py
@@ -1,7 +1,7 @@
-# Copyright 1999-2011 Gentoo Foundation
+# Copyright 1999-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
-from __future__ import print_function
+from __future__ import unicode_literals
import io
import sys
@@ -20,10 +20,6 @@ from portage.output import xtermTitle
_disable = True
_emerge_log_dir = '/var/log'
-# Coerce to unicode, in order to prevent TypeError when writing
-# raw bytes to TextIOWrapper with python2.
-_log_fmt = _unicode_decode("%.0f: %s\n")
-
def emergelog(xterm_titles, mystr, short_msg=None):
if _disable:
@@ -51,10 +47,10 @@ def emergelog(xterm_titles, mystr, short_msg=None):
mode=0o660)
mylock = portage.locks.lockfile(file_path)
try:
- mylogfile.write(_log_fmt % (time.time(), mystr))
+ mylogfile.write("%.0f: %s\n" % (time.time(), mystr))
mylogfile.close()
finally:
portage.locks.unlockfile(mylock)
except (IOError,OSError,portage.exception.PortageException) as e:
if secpass >= 1:
- print("emergelog():",e, file=sys.stderr)
+ portage.util.writemsg("emergelog(): %s\n" % (e,), noiselevel=-1)
diff --git a/pym/_emerge/getloadavg.py b/pym/_emerge/getloadavg.py
index e9babf13e..6a2794fb1 100644
--- a/pym/_emerge/getloadavg.py
+++ b/pym/_emerge/getloadavg.py
@@ -1,4 +1,4 @@
-# Copyright 1999-2009 Gentoo Foundation
+# Copyright 1999-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
from portage import os
@@ -11,7 +11,8 @@ if getloadavg is None:
Raises OSError if the load average was unobtainable.
"""
try:
- loadavg_str = open('/proc/loadavg').readline()
+ with open('/proc/loadavg') as f:
+ loadavg_str = f.readline()
except IOError:
# getloadavg() is only supposed to raise OSError, so convert
raise OSError('unknown')
diff --git a/pym/_emerge/help.py b/pym/_emerge/help.py
index a1dbb37cc..8e241a85c 100644
--- a/pym/_emerge/help.py
+++ b/pym/_emerge/help.py
@@ -1,4 +1,4 @@
-# Copyright 1999-2011 Gentoo Foundation
+# Copyright 1999-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
from __future__ import print_function
@@ -9,15 +9,15 @@ def help():
print(bold("emerge:")+" the other white meat (command-line interface to the Portage system)")
print(bold("Usage:"))
print(" "+turquoise("emerge")+" [ "+green("options")+" ] [ "+green("action")+" ] [ "+turquoise("ebuild")+" | "+turquoise("tbz2")+" | "+turquoise("file")+" | "+turquoise("@set")+" | "+turquoise("atom")+" ] [ ... ]")
- print(" "+turquoise("emerge")+" [ "+green("options")+" ] [ "+green("action")+" ] < "+turquoise("system")+" | "+turquoise("world")+" >")
+ print(" "+turquoise("emerge")+" [ "+green("options")+" ] [ "+green("action")+" ] < "+turquoise("@system")+" | "+turquoise("@world")+" >")
print(" "+turquoise("emerge")+" < "+turquoise("--sync")+" | "+turquoise("--metadata")+" | "+turquoise("--info")+" >")
print(" "+turquoise("emerge")+" "+turquoise("--resume")+" [ "+green("--pretend")+" | "+green("--ask")+" | "+green("--skipfirst")+" ]")
- print(" "+turquoise("emerge")+" "+turquoise("--help")+" [ "+green("--verbose")+" ] ")
- print(bold("Options:")+" "+green("-")+"["+green("abBcCdDefgGhjkKlnNoOpPqrsStuvV")+"]")
+ print(" "+turquoise("emerge")+" "+turquoise("--help"))
+ print(bold("Options:")+" "+green("-")+"["+green("abBcCdDefgGhjkKlnNoOpPqrsStuvVw")+"]")
print(" [ " + green("--color")+" < " + turquoise("y") + " | "+ turquoise("n")+" > ] [ "+green("--columns")+" ]")
print(" [ "+green("--complete-graph")+" ] [ "+green("--deep")+" ]")
print(" [ "+green("--jobs") + " " + turquoise("JOBS")+" ] [ "+green("--keep-going")+" ] [ " + green("--load-average")+" " + turquoise("LOAD") + " ]")
- print(" [ "+green("--newuse")+" ] [ "+green("--noconfmem")+" ] [ "+green("--nospinner")+" ]")
+ print(" [ "+green("--newrepo")+" ] [ "+green("--newuse")+" ] [ "+green("--noconfmem")+" ] [ "+green("--nospinner")+" ]")
print(" [ "+green("--oneshot")+" ] [ "+green("--onlydeps")+" ] [ "+ green("--quiet-build")+" [ " + turquoise("y") + " | "+ turquoise("n")+" ] ]")
print(" [ "+green("--reinstall ")+turquoise("changed-use")+" ] [ " + green("--with-bdeps")+" < " + turquoise("y") + " | "+ turquoise("n")+" > ]")
print(bold("Actions:")+" [ "+green("--depclean")+" | "+green("--list-sets")+" | "+green("--search")+" | "+green("--sync")+" | "+green("--version")+" ]")
diff --git a/pym/_emerge/is_valid_package_atom.py b/pym/_emerge/is_valid_package_atom.py
index 7cb2a5bb1..112afc1ec 100644
--- a/pym/_emerge/is_valid_package_atom.py
+++ b/pym/_emerge/is_valid_package_atom.py
@@ -1,11 +1,12 @@
-# Copyright 1999-2011 Gentoo Foundation
+# Copyright 1999-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
import re
from portage.dep import isvalidatom
def insert_category_into_atom(atom, category):
- alphanum = re.search(r'\w', atom)
+ # Handle '*' character for "extended syntax" wildcard support.
+ alphanum = re.search(r'[\*\w]', atom, re.UNICODE)
if alphanum:
ret = atom[:alphanum.start()] + "%s/" % category + \
atom[alphanum.start():]
@@ -14,7 +15,7 @@ def insert_category_into_atom(atom, category):
return ret
def is_valid_package_atom(x, allow_repo=False):
- if "/" not in x:
+ if "/" not in x.split(":")[0]:
x2 = insert_category_into_atom(x, 'cat')
if x2 != None:
x = x2
diff --git a/pym/_emerge/main.py b/pym/_emerge/main.py
index f19994c46..cfe133264 100644
--- a/pym/_emerge/main.py
+++ b/pym/_emerge/main.py
@@ -1,53 +1,24 @@
-# Copyright 1999-2012 Gentoo Foundation
+# Copyright 1999-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
from __future__ import print_function
-import logging
-import signal
-import stat
-import subprocess
-import sys
-import textwrap
import platform
+import sys
+
import portage
portage.proxy.lazyimport.lazyimport(globals(),
- 'portage.news:count_unread_news,display_news_notifications',
- 'portage.emaint.modules.logs.logs:CleanLogs',
+ 'logging',
+ 'portage.dep:Atom',
+ 'portage.util:writemsg_level',
+ 'textwrap',
+ '_emerge.actions:load_emerge_config,run_action,' + \
+ 'validate_ebuild_environment',
+ '_emerge.help:help@emerge_help',
+ '_emerge.is_valid_package_atom:insert_category_into_atom'
)
from portage import os
-from portage import _encodings
-from portage import _unicode_decode
-import _emerge.help
-import portage.xpak, errno, re, time
-from portage.output import colorize, xtermTitle, xtermTitleReset
-from portage.output import create_color_func
-good = create_color_func("GOOD")
-bad = create_color_func("BAD")
-
-from portage.const import _ENABLE_DYN_LINK_MAP
-import portage.elog
-import portage.util
-import portage.locks
-import portage.exception
-from portage.data import secpass
-from portage.dbapi.dep_expand import dep_expand
-from portage.util import normalize_path as normpath
-from portage.util import (shlex_split, varexpand,
- writemsg_level, writemsg_stdout)
-from portage._sets import SETPREFIX
-from portage._global_updates import _global_updates
-
-from _emerge.actions import action_config, action_sync, action_metadata, \
- action_regen, action_search, action_uninstall, action_info, action_build, \
- adjust_configs, chk_updated_cfg_files, display_missing_pkg_set, \
- display_news_notification, getportageversion, load_emerge_config
-import _emerge
-from _emerge.emergelog import emergelog
-from _emerge._flush_elog_mod_echo import _flush_elog_mod_echo
-from _emerge.is_valid_package_atom import is_valid_package_atom
-from _emerge.stdout_spinner import stdout_spinner
-from _emerge.userquery import userquery
+from portage.util._argparse import ArgumentParser
if sys.hexversion >= 0x3000000:
long = int
@@ -61,9 +32,11 @@ options=[
"--debug",
"--digest",
"--emptytree",
+"--verbose-conflicts",
"--fetchonly", "--fetch-all-uri",
"--ignore-default-opts",
"--noconfmem",
+"--newrepo",
"--newuse",
"--nodeps", "--noreplace",
"--nospinner", "--oneshot",
@@ -76,7 +49,6 @@ options=[
"--tree",
"--unordered-display",
"--update",
-"--verbose",
"--verbose-main-repo-display",
]
@@ -97,7 +69,7 @@ shortmapping={
"s":"--search", "S":"--searchdesc",
"t":"--tree",
"u":"--update",
-"v":"--verbose", "V":"--version"
+"V":"--version"
}
COWSAY_MOO = """
@@ -109,331 +81,12 @@ COWSAY_MOO = """
-----------------------
\ ^__^
\ (oo)\_______
- (__)\ )\/\
+ (__)\ )\/\\
||----w |
|| ||
"""
-def chk_updated_info_files(root, infodirs, prev_mtimes, retval):
-
- if os.path.exists("/usr/bin/install-info"):
- out = portage.output.EOutput()
- regen_infodirs=[]
- for z in infodirs:
- if z=='':
- continue
- inforoot=normpath(root+z)
- if os.path.isdir(inforoot) and \
- not [x for x in os.listdir(inforoot) \
- if x.startswith('.keepinfodir')]:
- infomtime = os.stat(inforoot)[stat.ST_MTIME]
- if inforoot not in prev_mtimes or \
- prev_mtimes[inforoot] != infomtime:
- regen_infodirs.append(inforoot)
-
- if not regen_infodirs:
- portage.writemsg_stdout("\n")
- if portage.util.noiselimit >= 0:
- out.einfo("GNU info directory index is up-to-date.")
- else:
- portage.writemsg_stdout("\n")
- if portage.util.noiselimit >= 0:
- out.einfo("Regenerating GNU info directory index...")
-
- dir_extensions = ("", ".gz", ".bz2")
- icount=0
- badcount=0
- errmsg = ""
- for inforoot in regen_infodirs:
- if inforoot=='':
- continue
-
- if not os.path.isdir(inforoot) or \
- not os.access(inforoot, os.W_OK):
- continue
-
- file_list = os.listdir(inforoot)
- file_list.sort()
- dir_file = os.path.join(inforoot, "dir")
- moved_old_dir = False
- processed_count = 0
- for x in file_list:
- if x.startswith(".") or \
- os.path.isdir(os.path.join(inforoot, x)):
- continue
- if x.startswith("dir"):
- skip = False
- for ext in dir_extensions:
- if x == "dir" + ext or \
- x == "dir" + ext + ".old":
- skip = True
- break
- if skip:
- continue
- if processed_count == 0:
- for ext in dir_extensions:
- try:
- os.rename(dir_file + ext, dir_file + ext + ".old")
- moved_old_dir = True
- except EnvironmentError as e:
- if e.errno != errno.ENOENT:
- raise
- del e
- processed_count += 1
- try:
- proc = subprocess.Popen(
- ['/usr/bin/install-info',
- '--dir-file=%s' % os.path.join(inforoot, "dir"),
- os.path.join(inforoot, x)],
- env=dict(os.environ, LANG="C", LANGUAGE="C"),
- stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
- except OSError:
- myso = None
- else:
- myso = _unicode_decode(
- proc.communicate()[0]).rstrip("\n")
- proc.wait()
- existsstr="already exists, for file `"
- if myso:
- if re.search(existsstr,myso):
- # Already exists... Don't increment the count for this.
- pass
- elif myso[:44]=="install-info: warning: no info dir entry in ":
- # This info file doesn't contain a DIR-header: install-info produces this
- # (harmless) warning (the --quiet switch doesn't seem to work).
- # Don't increment the count for this.
- pass
- else:
- badcount=badcount+1
- errmsg += myso + "\n"
- icount=icount+1
-
- if moved_old_dir and not os.path.exists(dir_file):
- # We didn't generate a new dir file, so put the old file
- # back where it was originally found.
- for ext in dir_extensions:
- try:
- os.rename(dir_file + ext + ".old", dir_file + ext)
- except EnvironmentError as e:
- if e.errno != errno.ENOENT:
- raise
- del e
-
- # Clean dir.old cruft so that they don't prevent
- # unmerge of otherwise empty directories.
- for ext in dir_extensions:
- try:
- os.unlink(dir_file + ext + ".old")
- except EnvironmentError as e:
- if e.errno != errno.ENOENT:
- raise
- del e
-
- #update mtime so we can potentially avoid regenerating.
- prev_mtimes[inforoot] = os.stat(inforoot)[stat.ST_MTIME]
-
- if badcount:
- out.eerror("Processed %d info files; %d errors." % \
- (icount, badcount))
- writemsg_level(errmsg, level=logging.ERROR, noiselevel=-1)
- else:
- if icount > 0 and portage.util.noiselimit >= 0:
- out.einfo("Processed %d info files." % (icount,))
-
-def display_preserved_libs(vardbapi, myopts):
- MAX_DISPLAY = 3
-
- if vardbapi._linkmap is None or \
- vardbapi._plib_registry is None:
- # preserve-libs is entirely disabled
- return
-
- # Explicitly load and prune the PreservedLibsRegistry in order
- # to ensure that we do not display stale data.
- vardbapi._plib_registry.load()
-
- if vardbapi._plib_registry.hasEntries():
- if "--quiet" in myopts:
- print()
- print(colorize("WARN", "!!!") + " existing preserved libs found")
- return
- else:
- print()
- print(colorize("WARN", "!!!") + " existing preserved libs:")
-
- plibdata = vardbapi._plib_registry.getPreservedLibs()
- linkmap = vardbapi._linkmap
- consumer_map = {}
- owners = {}
-
- try:
- linkmap.rebuild()
- except portage.exception.CommandNotFound as e:
- writemsg_level("!!! Command Not Found: %s\n" % (e,),
- level=logging.ERROR, noiselevel=-1)
- del e
- else:
- search_for_owners = set()
- for cpv in plibdata:
- internal_plib_keys = set(linkmap._obj_key(f) \
- for f in plibdata[cpv])
- for f in plibdata[cpv]:
- if f in consumer_map:
- continue
- consumers = []
- for c in linkmap.findConsumers(f):
- # Filter out any consumers that are also preserved libs
- # belonging to the same package as the provider.
- if linkmap._obj_key(c) not in internal_plib_keys:
- consumers.append(c)
- consumers.sort()
- consumer_map[f] = consumers
- search_for_owners.update(consumers[:MAX_DISPLAY+1])
-
- owners = {}
- for f in search_for_owners:
- owner_set = set()
- for owner in linkmap.getOwners(f):
- owner_dblink = vardbapi._dblink(owner)
- if owner_dblink.exists():
- owner_set.add(owner_dblink)
- if owner_set:
- owners[f] = owner_set
-
- for cpv in plibdata:
- print(colorize("WARN", ">>>") + " package: %s" % cpv)
- samefile_map = {}
- for f in plibdata[cpv]:
- obj_key = linkmap._obj_key(f)
- alt_paths = samefile_map.get(obj_key)
- if alt_paths is None:
- alt_paths = set()
- samefile_map[obj_key] = alt_paths
- alt_paths.add(f)
-
- for alt_paths in samefile_map.values():
- alt_paths = sorted(alt_paths)
- for p in alt_paths:
- print(colorize("WARN", " * ") + " - %s" % (p,))
- f = alt_paths[0]
- consumers = consumer_map.get(f, [])
- for c in consumers[:MAX_DISPLAY]:
- print(colorize("WARN", " * ") + " used by %s (%s)" % \
- (c, ", ".join(x.mycpv for x in owners.get(c, []))))
- if len(consumers) == MAX_DISPLAY + 1:
- print(colorize("WARN", " * ") + " used by %s (%s)" % \
- (consumers[MAX_DISPLAY], ", ".join(x.mycpv \
- for x in owners.get(consumers[MAX_DISPLAY], []))))
- elif len(consumers) > MAX_DISPLAY:
- print(colorize("WARN", " * ") + " used by %d other files" % (len(consumers) - MAX_DISPLAY))
- print("Use " + colorize("GOOD", "emerge @preserved-rebuild") + " to rebuild packages using these libraries")
-
-def post_emerge(myaction, myopts, myfiles,
- target_root, trees, mtimedb, retval):
- """
- Misc. things to run at the end of a merge session.
-
- Update Info Files
- Update Config Files
- Update News Items
- Commit mtimeDB
- Display preserved libs warnings
-
- @param myaction: The action returned from parse_opts()
- @type myaction: String
- @param myopts: emerge options
- @type myopts: dict
- @param myfiles: emerge arguments
- @type myfiles: list
- @param target_root: The target EROOT for myaction
- @type target_root: String
- @param trees: A dictionary mapping each ROOT to it's package databases
- @type trees: dict
- @param mtimedb: The mtimeDB to store data needed across merge invocations
- @type mtimedb: MtimeDB class instance
- @param retval: Emerge's return value
- @type retval: Int
- """
-
- root_config = trees[target_root]["root_config"]
- vardbapi = trees[target_root]['vartree'].dbapi
- settings = vardbapi.settings
- info_mtimes = mtimedb["info"]
-
- # Load the most current variables from ${ROOT}/etc/profile.env
- settings.unlock()
- settings.reload()
- settings.regenerate()
- settings.lock()
-
- config_protect = shlex_split(settings.get("CONFIG_PROTECT", ""))
- infodirs = settings.get("INFOPATH","").split(":") + \
- settings.get("INFODIR","").split(":")
-
- os.chdir("/")
-
- if retval == os.EX_OK:
- exit_msg = " *** exiting successfully."
- else:
- exit_msg = " *** exiting unsuccessfully with status '%s'." % retval
- emergelog("notitles" not in settings.features, exit_msg)
-
- _flush_elog_mod_echo()
-
- if not vardbapi._pkgs_changed:
- # GLEP 42 says to display news *after* an emerge --pretend
- if "--pretend" in myopts:
- display_news_notification(root_config, myopts)
- # If vdb state has not changed then there's nothing else to do.
- return
-
- vdb_path = os.path.join(root_config.settings['EROOT'], portage.VDB_PATH)
- portage.util.ensure_dirs(vdb_path)
- vdb_lock = None
- if os.access(vdb_path, os.W_OK) and not "--pretend" in myopts:
- vardbapi.lock()
- vdb_lock = True
-
- if vdb_lock:
- try:
- if "noinfo" not in settings.features:
- chk_updated_info_files(target_root,
- infodirs, info_mtimes, retval)
- mtimedb.commit()
- finally:
- if vdb_lock:
- vardbapi.unlock()
-
- display_preserved_libs(vardbapi, myopts)
- chk_updated_cfg_files(settings['EROOT'], config_protect)
-
- display_news_notification(root_config, myopts)
-
- postemerge = os.path.join(settings["PORTAGE_CONFIGROOT"],
- portage.USER_CONFIG_PATH, "bin", "post_emerge")
- if os.access(postemerge, os.X_OK):
- hook_retval = portage.process.spawn(
- [postemerge], env=settings.environ())
- if hook_retval != os.EX_OK:
- writemsg_level(
- " %s spawn failed of %s\n" % (bad("*"), postemerge,),
- level=logging.ERROR, noiselevel=-1)
-
- clean_logs(settings)
-
- if "--quiet" not in myopts and \
- myaction is None and "@world" in myfiles:
- show_depclean_suggestion()
-
-def show_depclean_suggestion():
- out = portage.output.EOutput()
- msg = "After world updates, it is important to remove " + \
- "obsolete packages with emerge --depclean. Refer " + \
- "to `man emerge` for more information."
- for line in textwrap.wrap(msg, 72):
- out.ewarn(line)
-
def multiple_actions(action1, action2):
sys.stderr.write("\n!!! Multiple actions requested... Please choose one only.\n")
sys.stderr.write("!!! '%s' or '%s'\n\n" % (action1, action2))
@@ -455,6 +108,16 @@ def insert_optional_args(args):
return False
valid_integers = valid_integers()
+
+ class valid_floats(object):
+ def __contains__(self, s):
+ try:
+ return float(s) >= 0
+ except (ValueError, OverflowError):
+ return False
+
+ valid_floats = valid_floats()
+
y_or_n = ('y', 'n',)
new_args = []
@@ -468,6 +131,7 @@ def insert_optional_args(args):
'--buildpkg' : y_or_n,
'--complete-graph' : y_or_n,
'--deep' : valid_integers,
+ '--depclean-lib-check' : y_or_n,
'--deselect' : y_or_n,
'--binpkg-respect-use' : y_or_n,
'--fail-clean' : y_or_n,
@@ -475,10 +139,12 @@ def insert_optional_args(args):
'--getbinpkgonly' : y_or_n,
'--jobs' : valid_integers,
'--keep-going' : y_or_n,
+ '--load-average' : valid_floats,
'--package-moves' : y_or_n,
'--quiet' : y_or_n,
'--quiet-build' : y_or_n,
- '--rebuild-if-new-slot-abi': y_or_n,
+ '--quiet-fail' : y_or_n,
+ '--rebuild-if-new-slot': y_or_n,
'--rebuild-if-new-rev' : y_or_n,
'--rebuild-if-new-ver' : y_or_n,
'--rebuild-if-unbuilt' : y_or_n,
@@ -489,11 +155,10 @@ def insert_optional_args(args):
"--use-ebuild-visibility": y_or_n,
'--usepkg' : y_or_n,
'--usepkgonly' : y_or_n,
+ '--verbose' : y_or_n,
+ '--verbose-slot-rebuilds': y_or_n,
}
- if _ENABLE_DYN_LINK_MAP:
- default_arg_opts['--depclean-lib-check'] = y_or_n
-
short_arg_opts = {
'D' : valid_integers,
'j' : valid_integers,
@@ -509,6 +174,8 @@ def insert_optional_args(args):
'k' : y_or_n,
'K' : y_or_n,
'q' : y_or_n,
+ 'v' : y_or_n,
+ 'w' : y_or_n,
}
arg_stack = args[:]
@@ -597,14 +264,17 @@ def _find_bad_atoms(atoms, less_strict=False):
"""
bad_atoms = []
for x in ' '.join(atoms).split():
+ atom = x
+ if "/" not in x.split(":")[0]:
+ x_cat = insert_category_into_atom(x, 'dummy-category')
+ if x_cat is not None:
+ atom = x_cat
+
bad_atom = False
try:
- atom = portage.dep.Atom(x, allow_wildcard=True, allow_repo=less_strict)
+ atom = Atom(atom, allow_wildcard=True, allow_repo=less_strict)
except portage.exception.InvalidAtom:
- try:
- atom = portage.dep.Atom("*/"+x, allow_wildcard=True, allow_repo=less_strict)
- except portage.exception.InvalidAtom:
- bad_atom = True
+ bad_atom = True
if bad_atom or (atom.operator and not less_strict) or atom.blocker or atom.use:
bad_atoms.append(x)
@@ -632,31 +302,26 @@ def parse_opts(tmpcmdline, silent=False):
"--ask": {
"shortopt" : "-a",
"help" : "prompt before performing any actions",
- "type" : "choice",
"choices" : true_y_or_n
},
"--autounmask": {
"help" : "automatically unmask packages",
- "type" : "choice",
"choices" : true_y_or_n
},
"--autounmask-unrestricted-atoms": {
"help" : "write autounmask changes with >= atoms if possible",
- "type" : "choice",
"choices" : true_y_or_n
},
"--autounmask-keep-masks": {
"help" : "don't add package.unmask entries",
- "type" : "choice",
"choices" : true_y_or_n
},
"--autounmask-write": {
"help" : "write changes made by --autounmask to disk",
- "type" : "choice",
"choices" : true_y_or_n
},
@@ -665,6 +330,11 @@ def parse_opts(tmpcmdline, silent=False):
"action":"store"
},
+ "--accept-restrict": {
+ "help":"temporarily override ACCEPT_RESTRICT",
+ "action":"store"
+ },
+
"--backtrack": {
"help" : "Specifies how many times to backtrack if dependency " + \
@@ -676,7 +346,6 @@ def parse_opts(tmpcmdline, silent=False):
"--buildpkg": {
"shortopt" : "-b",
"help" : "build binary packages",
- "type" : "choice",
"choices" : true_y_or_n
},
@@ -694,25 +363,21 @@ def parse_opts(tmpcmdline, silent=False):
},
"--color": {
"help":"enable or disable color output",
- "type":"choice",
"choices":("y", "n")
},
"--complete-graph": {
"help" : "completely account for all known dependencies",
- "type" : "choice",
"choices" : true_y_or_n
},
"--complete-graph-if-new-use": {
"help" : "trigger --complete-graph behavior if USE or IUSE will change for an installed package",
- "type" : "choice",
"choices" : y_or_n
},
"--complete-graph-if-new-ver": {
"help" : "trigger --complete-graph behavior if an installed package version will change (upgrade or downgrade)",
- "type" : "choice",
"choices" : y_or_n
},
@@ -728,15 +393,18 @@ def parse_opts(tmpcmdline, silent=False):
"action" : "store"
},
+ "--depclean-lib-check": {
+ "help" : "check for consumers of libraries before removing them",
+ "choices" : true_y_or_n
+ },
+
"--deselect": {
"help" : "remove atoms/sets from the world file",
- "type" : "choice",
"choices" : true_y_or_n
},
"--dynamic-deps": {
"help": "substitute the dependencies of installed packages with the dependencies of unbuilt ebuilds",
- "type": "choice",
"choices": y_or_n
},
@@ -750,17 +418,15 @@ def parse_opts(tmpcmdline, silent=False):
"--fail-clean": {
"help" : "clean temp files after build failure",
- "type" : "choice",
"choices" : true_y_or_n
},
- "--ignore-built-slot-abi-deps": {
- "help": "Ignore the SLOT/ABI := operator parts of dependencies that have "
+ "--ignore-built-slot-operator-deps": {
+ "help": "Ignore the slot/sub-slot := operator parts of dependencies that have "
"been recorded when packages where built. This option is intended "
"only for debugging purposes, and it only affects built packages "
- "that specify SLOT/ABI := operator dependencies using the "
+ "that specify slot/sub-slot := operator dependencies using the "
"experimental \"4-slot-abi\" EAPI.",
- "type": "choice",
"choices": y_or_n
},
@@ -776,7 +442,6 @@ def parse_opts(tmpcmdline, silent=False):
"--keep-going": {
"help" : "continue as much as possible after an error",
- "type" : "choice",
"choices" : true_y_or_n
},
@@ -791,18 +456,15 @@ def parse_opts(tmpcmdline, silent=False):
"--misspell-suggestions": {
"help" : "enable package name misspell suggestions",
- "type" : "choice",
"choices" : ("y", "n")
},
"--with-bdeps": {
"help":"include unnecessary build time dependencies",
- "type":"choice",
"choices":("y", "n")
},
"--reinstall": {
"help":"specify conditions to trigger package reinstallation",
- "type":"choice",
"choices":["changed-use"]
},
@@ -817,21 +479,18 @@ def parse_opts(tmpcmdline, silent=False):
"--binpkg-respect-use": {
"help" : "discard binary packages if their use flags \
don't match the current configuration",
- "type" : "choice",
"choices" : true_y_or_n
},
"--getbinpkg": {
"shortopt" : "-g",
"help" : "fetch binary packages",
- "type" : "choice",
"choices" : true_y_or_n
},
"--getbinpkgonly": {
"shortopt" : "-G",
"help" : "fetch binary packages only",
- "type" : "choice",
"choices" : true_y_or_n
},
@@ -860,29 +519,40 @@ def parse_opts(tmpcmdline, silent=False):
"--package-moves": {
"help" : "perform package moves when necessary",
- "type" : "choice",
"choices" : true_y_or_n
},
+ "--prefix": {
+ "help" : "specify the installation prefix",
+ "action" : "store"
+ },
+
+ "--pkg-format": {
+ "help" : "format of result binary package",
+ "action" : "store",
+ },
+
"--quiet": {
"shortopt" : "-q",
"help" : "reduced or condensed output",
- "type" : "choice",
"choices" : true_y_or_n
},
"--quiet-build": {
"help" : "redirect build output to logs",
- "type" : "choice",
"choices" : true_y_or_n,
},
- "--rebuild-if-new-slot-abi": {
- "help" : ("Automatically rebuild or reinstall packages when SLOT/ABI := "
+ "--quiet-fail": {
+ "help" : "suppresses display of the build log on stdout",
+ "choices" : true_y_or_n,
+ },
+
+ "--rebuild-if-new-slot": {
+ "help" : ("Automatically rebuild or reinstall packages when slot/sub-slot := "
"operator dependencies can be satisfied by a newer slot, so that "
"older packages slots will become eligible for removal by the "
"--depclean action as soon as possible."),
- "type" : "choice",
"choices" : true_y_or_n
},
@@ -891,7 +561,6 @@ def parse_opts(tmpcmdline, silent=False):
"used at both build-time and run-time are built, " + \
"if the dependency is not already installed with the " + \
"same version and revision.",
- "type" : "choice",
"choices" : true_y_or_n
},
@@ -900,24 +569,21 @@ def parse_opts(tmpcmdline, silent=False):
"used at both build-time and run-time are built, " + \
"if the dependency is not already installed with the " + \
"same version. Revision numbers are ignored.",
- "type" : "choice",
"choices" : true_y_or_n
},
"--rebuild-if-unbuilt": {
"help" : "Rebuild packages when dependencies that are " + \
"used at both build-time and run-time are built.",
- "type" : "choice",
"choices" : true_y_or_n
},
"--rebuilt-binaries": {
"help" : "replace installed packages with binary " + \
"packages that have been rebuilt",
- "type" : "choice",
"choices" : true_y_or_n
},
-
+
"--rebuilt-binaries-timestamp": {
"help" : "use only binaries that are newer than this " + \
"timestamp for --rebuilt-binaries",
@@ -931,26 +597,23 @@ def parse_opts(tmpcmdline, silent=False):
"--root-deps": {
"help" : "modify interpretation of depedencies",
- "type" : "choice",
"choices" :("True", "rdeps")
},
"--select": {
+ "shortopt" : "-w",
"help" : "add specified packages to the world set " + \
"(inverse of --oneshot)",
- "type" : "choice",
"choices" : true_y_or_n
},
"--selective": {
"help" : "identical to --noreplace",
- "type" : "choice",
"choices" : true_y_or_n
},
"--use-ebuild-visibility": {
"help" : "use unbuilt ebuild metadata for visibility checks on built packages",
- "type" : "choice",
"choices" : true_y_or_n
},
@@ -964,42 +627,39 @@ def parse_opts(tmpcmdline, silent=False):
"--usepkg": {
"shortopt" : "-k",
"help" : "use binary packages",
- "type" : "choice",
"choices" : true_y_or_n
},
"--usepkgonly": {
"shortopt" : "-K",
"help" : "use only binary packages",
- "type" : "choice",
"choices" : true_y_or_n
},
+ "--verbose": {
+ "shortopt" : "-v",
+ "help" : "verbose output",
+ "choices" : true_y_or_n
+ },
+ "--verbose-slot-rebuilds": {
+ "help" : "verbose slot rebuild output",
+ "choices" : true_y_or_n
+ },
}
- if _ENABLE_DYN_LINK_MAP:
- argument_options["--depclean-lib-check"] = {
- "help" : "check for consumers of libraries before removing them",
- "type" : "choice",
- "choices" : true_y_or_n
- }
-
- from optparse import OptionParser
- parser = OptionParser()
- if parser.has_option("--help"):
- parser.remove_option("--help")
+ parser = ArgumentParser(add_help=False)
for action_opt in actions:
- parser.add_option("--" + action_opt, action="store_true",
+ parser.add_argument("--" + action_opt, action="store_true",
dest=action_opt.replace("-", "_"), default=False)
for myopt in options:
- parser.add_option(myopt, action="store_true",
+ parser.add_argument(myopt, action="store_true",
dest=myopt.lstrip("--").replace("-", "_"), default=False)
for shortopt, longopt in shortmapping.items():
- parser.add_option("-" + shortopt, action="store_true",
+ parser.add_argument("-" + shortopt, action="store_true",
dest=longopt.lstrip("--").replace("-", "_"), default=False)
for myalias, myopt in longopt_aliases.items():
- parser.add_option(myalias, action="store_true",
+ parser.add_argument(myalias, action="store_true",
dest=myopt.lstrip("--").replace("-", "_"), default=False)
for myopt, kwargs in argument_options.items():
@@ -1007,12 +667,12 @@ def parse_opts(tmpcmdline, silent=False):
args = [myopt]
if shortopt is not None:
args.append(shortopt)
- parser.add_option(dest=myopt.lstrip("--").replace("-", "_"),
+ parser.add_argument(dest=myopt.lstrip("--").replace("-", "_"),
*args, **kwargs)
tmpcmdline = insert_optional_args(tmpcmdline)
- myoptions, myargs = parser.parse_args(args=tmpcmdline)
+ myoptions, myargs = parser.parse_known_args(args=tmpcmdline)
if myoptions.ask in true_y:
myoptions.ask = True
@@ -1058,9 +718,8 @@ def parse_opts(tmpcmdline, silent=False):
else:
myoptions.complete_graph = None
- if _ENABLE_DYN_LINK_MAP:
- if myoptions.depclean_lib_check in true_y:
- myoptions.depclean_lib_check = True
+ if myoptions.depclean_lib_check in true_y:
+ myoptions.depclean_lib_check = True
if myoptions.exclude:
bad_atoms = _find_bad_atoms(myoptions.exclude)
@@ -1127,8 +786,11 @@ def parse_opts(tmpcmdline, silent=False):
if myoptions.quiet_build in true_y:
myoptions.quiet_build = 'y'
- if myoptions.rebuild_if_new_slot_abi in true_y:
- myoptions.rebuild_if_new_slot_abi = 'y'
+ if myoptions.quiet_fail in true_y:
+ myoptions.quiet_fail = 'y'
+
+ if myoptions.rebuild_if_new_slot in true_y:
+ myoptions.rebuild_if_new_slot = 'y'
if myoptions.rebuild_if_new_ver in true_y:
myoptions.rebuild_if_new_ver = True
@@ -1215,6 +877,9 @@ def parse_opts(tmpcmdline, silent=False):
myoptions.jobs = jobs
+ if myoptions.load_average == "True":
+ myoptions.load_average = None
+
if myoptions.load_average:
try:
load_average = float(myoptions.load_average)
@@ -1228,7 +893,7 @@ def parse_opts(tmpcmdline, silent=False):
(myoptions.load_average,))
myoptions.load_average = load_average
-
+
if myoptions.rebuilt_binaries_timestamp:
try:
rebuilt_binaries_timestamp = int(myoptions.rebuilt_binaries_timestamp)
@@ -1259,6 +924,11 @@ def parse_opts(tmpcmdline, silent=False):
else:
myoptions.usepkgonly = None
+ if myoptions.verbose in true_y:
+ myoptions.verbose = True
+ else:
+ myoptions.verbose = None
+
for myopt in options:
v = getattr(myoptions, myopt.lstrip("--").replace("-", "_"))
if v:
@@ -1283,309 +953,10 @@ def parse_opts(tmpcmdline, silent=False):
if myaction is None and myoptions.deselect is True:
myaction = 'deselect'
- if myargs and isinstance(myargs[0], bytes):
- for i in range(len(myargs)):
- myargs[i] = portage._unicode_decode(myargs[i])
-
myfiles += myargs
return myaction, myopts, myfiles
-# Warn about features that may confuse users and
-# lead them to report invalid bugs.
-_emerge_features_warn = frozenset(['keeptemp', 'keepwork'])
-
-def validate_ebuild_environment(trees):
- features_warn = set()
- for myroot in trees:
- settings = trees[myroot]["vartree"].settings
- settings.validate()
- features_warn.update(
- _emerge_features_warn.intersection(settings.features))
-
- if features_warn:
- msg = "WARNING: The FEATURES variable contains one " + \
- "or more values that should be disabled under " + \
- "normal circumstances: %s" % " ".join(features_warn)
- out = portage.output.EOutput()
- for line in textwrap.wrap(msg, 65):
- out.ewarn(line)
-
-def apply_priorities(settings):
- ionice(settings)
- nice(settings)
-
-def nice(settings):
- try:
- os.nice(int(settings.get("PORTAGE_NICENESS", "0")))
- except (OSError, ValueError) as e:
- out = portage.output.EOutput()
- out.eerror("Failed to change nice value to '%s'" % \
- settings["PORTAGE_NICENESS"])
- out.eerror("%s\n" % str(e))
-
-def ionice(settings):
-
- ionice_cmd = settings.get("PORTAGE_IONICE_COMMAND")
- if ionice_cmd:
- ionice_cmd = portage.util.shlex_split(ionice_cmd)
- if not ionice_cmd:
- return
-
- variables = {"PID" : str(os.getpid())}
- cmd = [varexpand(x, mydict=variables) for x in ionice_cmd]
-
- try:
- rval = portage.process.spawn(cmd, env=os.environ)
- except portage.exception.CommandNotFound:
- # The OS kernel probably doesn't support ionice,
- # so return silently.
- return
-
- if rval != os.EX_OK:
- out = portage.output.EOutput()
- out.eerror("PORTAGE_IONICE_COMMAND returned %d" % (rval,))
- out.eerror("See the make.conf(5) man page for PORTAGE_IONICE_COMMAND usage instructions.")
-
-def clean_logs(settings):
-
- if "clean-logs" not in settings.features:
- return
-
- logdir = settings.get("PORT_LOGDIR")
- if logdir is None or not os.path.isdir(logdir):
- return
-
- options = {
- 'eerror': portage.output.EOutput().eerror,
- # uncomment next line to output a succeeded message
- #'einfo': portage.output.EOutput().einfo
- }
- cleanlogs = CleanLogs()
- cleanlogs.clean(settings=settings, options=options)
-
-def setconfig_fallback(root_config):
- setconfig = root_config.setconfig
- setconfig._create_default_config()
- setconfig._parse(update=True)
- root_config.sets = setconfig.getSets()
-
-def get_missing_sets(root_config):
- # emerge requires existence of "world", "selected", and "system"
- missing_sets = []
-
- for s in ("selected", "system", "world",):
- if s not in root_config.sets:
- missing_sets.append(s)
-
- return missing_sets
-
-def missing_sets_warning(root_config, missing_sets):
- if len(missing_sets) > 2:
- missing_sets_str = ", ".join('"%s"' % s for s in missing_sets[:-1])
- missing_sets_str += ', and "%s"' % missing_sets[-1]
- elif len(missing_sets) == 2:
- missing_sets_str = '"%s" and "%s"' % tuple(missing_sets)
- else:
- missing_sets_str = '"%s"' % missing_sets[-1]
- msg = ["emerge: incomplete set configuration, " + \
- "missing set(s): %s" % missing_sets_str]
- if root_config.sets:
- msg.append(" sets defined: %s" % ", ".join(root_config.sets))
- global_config_path = portage.const.GLOBAL_CONFIG_PATH
- if root_config.settings['EPREFIX']:
- global_config_path = os.path.join(root_config.settings['EPREFIX'],
- portage.const.GLOBAL_CONFIG_PATH.lstrip(os.sep))
- msg.append(" This usually means that '%s'" % \
- (os.path.join(global_config_path, "sets/portage.conf"),))
- msg.append(" is missing or corrupt.")
- msg.append(" Falling back to default world and system set configuration!!!")
- for line in msg:
- writemsg_level(line + "\n", level=logging.ERROR, noiselevel=-1)
-
-def ensure_required_sets(trees):
- warning_shown = False
- for root_trees in trees.values():
- missing_sets = get_missing_sets(root_trees["root_config"])
- if missing_sets and not warning_shown:
- warning_shown = True
- missing_sets_warning(root_trees["root_config"], missing_sets)
- if missing_sets:
- setconfig_fallback(root_trees["root_config"])
-
-def expand_set_arguments(myfiles, myaction, root_config):
- retval = os.EX_OK
- setconfig = root_config.setconfig
-
- sets = setconfig.getSets()
-
- # In order to know exactly which atoms/sets should be added to the
- # world file, the depgraph performs set expansion later. It will get
- # confused about where the atoms came from if it's not allowed to
- # expand them itself.
- do_not_expand = (None, )
- newargs = []
- for a in myfiles:
- if a in ("system", "world"):
- newargs.append(SETPREFIX+a)
- else:
- newargs.append(a)
- myfiles = newargs
- del newargs
- newargs = []
-
- # separators for set arguments
- ARG_START = "{"
- ARG_END = "}"
-
- for i in range(0, len(myfiles)):
- if myfiles[i].startswith(SETPREFIX):
- start = 0
- end = 0
- x = myfiles[i][len(SETPREFIX):]
- newset = ""
- while x:
- start = x.find(ARG_START)
- end = x.find(ARG_END)
- if start > 0 and start < end:
- namepart = x[:start]
- argpart = x[start+1:end]
-
- # TODO: implement proper quoting
- args = argpart.split(",")
- options = {}
- for a in args:
- if "=" in a:
- k, v = a.split("=", 1)
- options[k] = v
- else:
- options[a] = "True"
- setconfig.update(namepart, options)
- newset += (x[:start-len(namepart)]+namepart)
- x = x[end+len(ARG_END):]
- else:
- newset += x
- x = ""
- myfiles[i] = SETPREFIX+newset
-
- sets = setconfig.getSets()
-
- # display errors that occurred while loading the SetConfig instance
- for e in setconfig.errors:
- print(colorize("BAD", "Error during set creation: %s" % e))
-
- unmerge_actions = ("unmerge", "prune", "clean", "depclean")
-
- for a in myfiles:
- if a.startswith(SETPREFIX):
- s = a[len(SETPREFIX):]
- if s not in sets:
- display_missing_pkg_set(root_config, s)
- return (None, 1)
- setconfig.active.append(s)
- try:
- set_atoms = setconfig.getSetAtoms(s)
- except portage.exception.PackageSetNotFound as e:
- writemsg_level(("emerge: the given set '%s' " + \
- "contains a non-existent set named '%s'.\n") % \
- (s, e), level=logging.ERROR, noiselevel=-1)
- if s in ('world', 'selected') and \
- SETPREFIX + e.value in sets['selected']:
- writemsg_level(("Use `emerge --deselect %s%s` to "
- "remove this set from world_sets.\n") %
- (SETPREFIX, e,), level=logging.ERROR,
- noiselevel=-1)
- return (None, 1)
- if myaction in unmerge_actions and \
- not sets[s].supportsOperation("unmerge"):
- sys.stderr.write("emerge: the given set '%s' does " % s + \
- "not support unmerge operations\n")
- retval = 1
- elif not set_atoms:
- print("emerge: '%s' is an empty set" % s)
- elif myaction not in do_not_expand:
- newargs.extend(set_atoms)
- else:
- newargs.append(SETPREFIX+s)
- for e in sets[s].errors:
- print(e)
- else:
- newargs.append(a)
- return (newargs, retval)
-
-def repo_name_check(trees):
- missing_repo_names = set()
- for root_trees in trees.values():
- porttree = root_trees.get("porttree")
- if porttree:
- portdb = porttree.dbapi
- missing_repo_names.update(portdb.getMissingRepoNames())
- if portdb.porttree_root in missing_repo_names and \
- not os.path.exists(os.path.join(
- portdb.porttree_root, "profiles")):
- # This is normal if $PORTDIR happens to be empty,
- # so don't warn about it.
- missing_repo_names.remove(portdb.porttree_root)
-
- if missing_repo_names:
- msg = []
- msg.append("WARNING: One or more repositories " + \
- "have missing repo_name entries:")
- msg.append("")
- for p in missing_repo_names:
- msg.append("\t%s/profiles/repo_name" % (p,))
- msg.append("")
- msg.extend(textwrap.wrap("NOTE: Each repo_name entry " + \
- "should be a plain text file containing a unique " + \
- "name for the repository on the first line.", 70))
- msg.append("\n")
- writemsg_level("".join("%s\n" % l for l in msg),
- level=logging.WARNING, noiselevel=-1)
-
- return bool(missing_repo_names)
-
-def repo_name_duplicate_check(trees):
- ignored_repos = {}
- for root, root_trees in trees.items():
- if 'porttree' in root_trees:
- portdb = root_trees['porttree'].dbapi
- if portdb.settings.get('PORTAGE_REPO_DUPLICATE_WARN') != '0':
- for repo_name, paths in portdb.getIgnoredRepos():
- k = (root, repo_name, portdb.getRepositoryPath(repo_name))
- ignored_repos.setdefault(k, []).extend(paths)
-
- if ignored_repos:
- msg = []
- msg.append('WARNING: One or more repositories ' + \
- 'have been ignored due to duplicate')
- msg.append(' profiles/repo_name entries:')
- msg.append('')
- for k in sorted(ignored_repos):
- msg.append(' %s overrides' % ", ".join(k))
- for path in ignored_repos[k]:
- msg.append(' %s' % (path,))
- msg.append('')
- msg.extend(' ' + x for x in textwrap.wrap(
- "All profiles/repo_name entries must be unique in order " + \
- "to avoid having duplicates ignored. " + \
- "Set PORTAGE_REPO_DUPLICATE_WARN=\"0\" in " + \
- "/etc/make.conf if you would like to disable this warning."))
- msg.append("\n")
- writemsg_level(''.join('%s\n' % l for l in msg),
- level=logging.WARNING, noiselevel=-1)
-
- return bool(ignored_repos)
-
-def config_protect_check(trees):
- for root, root_trees in trees.items():
- settings = root_trees["root_config"].settings
- if not settings.get("CONFIG_PROTECT"):
- msg = "!!! CONFIG_PROTECT is empty"
- if settings["ROOT"] != "/":
- msg += " for '%s'" % root
- msg += "\n"
- writemsg_level(msg, level=logging.WARN, noiselevel=-1)
-
def profile_check(trees, myaction):
if myaction in ("help", "info", "search", "sync", "version"):
return os.EX_OK
@@ -1603,16 +974,6 @@ def profile_check(trees, myaction):
return 1
return os.EX_OK
-def check_procfs():
- procfs_path = '/proc'
- if platform.system() not in ("Linux",) or \
- os.path.ismount(procfs_path):
- return os.EX_OK
- msg = "It seems that %s is not mounted. You have been warned." % procfs_path
- writemsg_level("".join("!!! %s\n" % l for l in textwrap.wrap(msg, 70)),
- level=logging.ERROR, noiselevel=-1)
- return 1
-
def emerge_main(args=None):
"""
@param args: command arguments (default: sys.argv[1:])
@@ -1621,11 +982,12 @@ def emerge_main(args=None):
if args is None:
args = sys.argv[1:]
- portage._disable_legacy_globals()
- portage.dep._internal_warnings = True
+ args = portage._decode_argv(args)
+
# Disable color until we're sure that it should be enabled (after
# EMERGE_DEFAULT_OPTS has been parsed).
portage.output.havecolor = 0
+
# This first pass is just for options that need to be known as early as
# possible, such as --config-root. They will be parsed again later,
# together with EMERGE_DEFAULT_OPTS (which may vary depending on the
@@ -1637,428 +999,45 @@ def emerge_main(args=None):
os.environ["PORTAGE_CONFIGROOT"] = myopts["--config-root"]
if "--root" in myopts:
os.environ["ROOT"] = myopts["--root"]
+ if "--prefix" in myopts:
+ os.environ["EPREFIX"] = myopts["--prefix"]
if "--accept-properties" in myopts:
os.environ["ACCEPT_PROPERTIES"] = myopts["--accept-properties"]
+ if "--accept-restrict" in myopts:
+ os.environ["ACCEPT_RESTRICT"] = myopts["--accept-restrict"]
+
+ # optimize --help (no need to load config / EMERGE_DEFAULT_OPTS)
+ if myaction == "help":
+ emerge_help()
+ return os.EX_OK
+ elif myaction == "moo":
+ print(COWSAY_MOO % platform.system())
+ return os.EX_OK
# Portage needs to ensure a sane umask for the files it creates.
os.umask(0o22)
- settings, trees, mtimedb = load_emerge_config()
- portdb = trees[settings['EROOT']]['porttree'].dbapi
- rval = profile_check(trees, myaction)
+ if myaction == "sync":
+ portage._sync_mode = True
+ emerge_config = load_emerge_config(
+ action=myaction, args=myfiles, opts=myopts)
+ rval = profile_check(emerge_config.trees, emerge_config.action)
if rval != os.EX_OK:
return rval
tmpcmdline = []
if "--ignore-default-opts" not in myopts:
- tmpcmdline.extend(settings["EMERGE_DEFAULT_OPTS"].split())
+ tmpcmdline.extend(portage.util.shlex_split(
+ emerge_config.target_config.settings.get(
+ "EMERGE_DEFAULT_OPTS", "")))
tmpcmdline.extend(args)
- myaction, myopts, myfiles = parse_opts(tmpcmdline)
-
- # skip global updates prior to sync, since it's called after sync
- if myaction not in ('help', 'info', 'sync', 'version') and \
- myopts.get('--package-moves') != 'n' and \
- _global_updates(trees, mtimedb["updates"], quiet=("--quiet" in myopts)):
- mtimedb.commit()
- # Reload the whole config from scratch.
- settings, trees, mtimedb = load_emerge_config(trees=trees)
- portdb = trees[settings['EROOT']]['porttree'].dbapi
-
- xterm_titles = "notitles" not in settings.features
- if xterm_titles:
- xtermTitle("emerge")
-
- if "--digest" in myopts:
- os.environ["FEATURES"] = os.environ.get("FEATURES","") + " digest"
- # Reload the whole config from scratch so that the portdbapi internal
- # config is updated with new FEATURES.
- settings, trees, mtimedb = load_emerge_config(trees=trees)
- portdb = trees[settings['EROOT']]['porttree'].dbapi
-
- # NOTE: adjust_configs() can map options to FEATURES, so any relevant
- # options adjustments should be made prior to calling adjust_configs().
- if "--buildpkgonly" in myopts:
- myopts["--buildpkg"] = True
-
- adjust_configs(myopts, trees)
- apply_priorities(settings)
-
- if myaction == 'version':
- writemsg_stdout(getportageversion(
- settings["PORTDIR"], None,
- settings.profile_path, settings["CHOST"],
- trees[settings['EROOT']]['vartree'].dbapi) + '\n', noiselevel=-1)
- return 0
- elif myaction == 'help':
- _emerge.help.help()
- return 0
-
- spinner = stdout_spinner()
- if "candy" in settings.features:
- spinner.update = spinner.update_scroll
-
- if "--quiet" not in myopts:
- portage.deprecated_profile_check(settings=settings)
- if portage.const._ENABLE_REPO_NAME_WARN:
- # Bug #248603 - Disable warnings about missing
- # repo_name entries for stable branch.
- repo_name_check(trees)
- repo_name_duplicate_check(trees)
- config_protect_check(trees)
- check_procfs()
-
- if "getbinpkg" in settings.features:
- myopts["--getbinpkg"] = True
-
- if "--getbinpkgonly" in myopts:
- myopts["--getbinpkg"] = True
-
- if "--getbinpkgonly" in myopts:
- myopts["--usepkgonly"] = True
-
- if "--getbinpkg" in myopts:
- myopts["--usepkg"] = True
-
- if "--usepkgonly" in myopts:
- myopts["--usepkg"] = True
-
- if "--buildpkgonly" in myopts:
- # --buildpkgonly will not merge anything, so
- # it cancels all binary package options.
- for opt in ("--getbinpkg", "--getbinpkgonly",
- "--usepkg", "--usepkgonly"):
- myopts.pop(opt, None)
-
- for mytrees in trees.values():
- mydb = mytrees["porttree"].dbapi
- # Freeze the portdbapi for performance (memoize all xmatch results).
- mydb.freeze()
-
- if myaction in ('search', None) and \
- "--usepkg" in myopts:
- # Populate the bintree with current --getbinpkg setting.
- # This needs to happen before expand_set_arguments(), in case
- # any sets use the bintree.
- mytrees["bintree"].populate(
- getbinpkgs="--getbinpkg" in myopts)
-
- del mytrees, mydb
-
- if "moo" in myfiles:
- print(COWSAY_MOO % platform.system())
- msg = ("The above `emerge moo` display is deprecated. "
- "Please use `emerge --moo` instead.")
- for line in textwrap.wrap(msg, 50):
- print(" %s %s" % (colorize("WARN", "*"), line))
-
- for x in myfiles:
- ext = os.path.splitext(x)[1]
- if (ext == ".ebuild" or ext == ".tbz2") and os.path.exists(os.path.abspath(x)):
- print(colorize("BAD", "\n*** emerging by path is broken and may not always work!!!\n"))
- break
-
- root_config = trees[settings['EROOT']]['root_config']
- if myaction == "moo":
- print(COWSAY_MOO % platform.system())
- return os.EX_OK
- elif myaction == "list-sets":
- writemsg_stdout("".join("%s\n" % s for s in sorted(root_config.sets)))
- return os.EX_OK
- elif myaction == "check-news":
- news_counts = count_unread_news(
- root_config.trees["porttree"].dbapi,
- root_config.trees["vartree"].dbapi)
- if any(news_counts.values()):
- display_news_notifications(news_counts)
- elif "--quiet" not in myopts:
- print("", colorize("GOOD", "*"), "No news items were found.")
- return os.EX_OK
-
- ensure_required_sets(trees)
-
- # only expand sets for actions taking package arguments
- oldargs = myfiles[:]
- if myaction in ("clean", "config", "depclean", "info", "prune", "unmerge", None):
- myfiles, retval = expand_set_arguments(myfiles, myaction, root_config)
- if retval != os.EX_OK:
- return retval
-
- # Need to handle empty sets specially, otherwise emerge will react
- # with the help message for empty argument lists
- if oldargs and not myfiles:
- print("emerge: no targets left after set expansion")
- return 0
-
- if ("--tree" in myopts) and ("--columns" in myopts):
- print("emerge: can't specify both of \"--tree\" and \"--columns\".")
- return 1
-
- if '--emptytree' in myopts and '--noreplace' in myopts:
- writemsg_level("emerge: can't specify both of " + \
- "\"--emptytree\" and \"--noreplace\".\n",
- level=logging.ERROR, noiselevel=-1)
- return 1
-
- if ("--quiet" in myopts):
- spinner.update = spinner.update_quiet
- portage.util.noiselimit = -1
-
- if "--fetch-all-uri" in myopts:
- myopts["--fetchonly"] = True
-
- if "--skipfirst" in myopts and "--resume" not in myopts:
- myopts["--resume"] = True
+ emerge_config.action, emerge_config.opts, emerge_config.args = \
+ parse_opts(tmpcmdline)
- # Allow -p to remove --ask
- if "--pretend" in myopts:
- myopts.pop("--ask", None)
-
- # forbid --ask when not in a terminal
- # note: this breaks `emerge --ask | tee logfile`, but that doesn't work anyway.
- if ("--ask" in myopts) and (not sys.stdin.isatty()):
- portage.writemsg("!!! \"--ask\" should only be used in a terminal. Exiting.\n",
- noiselevel=-1)
- return 1
-
- if settings.get("PORTAGE_DEBUG", "") == "1":
- spinner.update = spinner.update_quiet
- portage.util.noiselimit = 0
- if "python-trace" in settings.features:
- import portage.debug as portage_debug
- portage_debug.set_trace(True)
-
- if not ("--quiet" in myopts):
- if '--nospinner' in myopts or \
- settings.get('TERM') == 'dumb' or \
- not sys.stdout.isatty():
- spinner.update = spinner.update_basic
-
- if "--debug" in myopts:
- print("myaction", myaction)
- print("myopts", myopts)
-
- if not myaction and not myfiles and "--resume" not in myopts:
- _emerge.help.help()
- return 1
-
- pretend = "--pretend" in myopts
- fetchonly = "--fetchonly" in myopts or "--fetch-all-uri" in myopts
- buildpkgonly = "--buildpkgonly" in myopts
-
- # check if root user is the current user for the actions where emerge needs this
- if portage.secpass < 2:
- # We've already allowed "--version" and "--help" above.
- if "--pretend" not in myopts and myaction not in ("search","info"):
- need_superuser = myaction in ('clean', 'depclean', 'deselect',
- 'prune', 'unmerge') or not \
- (fetchonly or \
- (buildpkgonly and secpass >= 1) or \
- myaction in ("metadata", "regen", "sync"))
- if portage.secpass < 1 or \
- need_superuser:
- if need_superuser:
- access_desc = "superuser"
- else:
- access_desc = "portage group"
- # Always show portage_group_warning() when only portage group
- # access is required but the user is not in the portage group.
- from portage.data import portage_group_warning
- if "--ask" in myopts:
- writemsg_stdout("This action requires %s access...\n" % \
- (access_desc,), noiselevel=-1)
- if portage.secpass < 1 and not need_superuser:
- portage_group_warning()
- if userquery("Would you like to add --pretend to options?",
- "--ask-enter-invalid" in myopts) == "No":
- return 128 + signal.SIGINT
- myopts["--pretend"] = True
- del myopts["--ask"]
- else:
- sys.stderr.write(("emerge: %s access is required\n") \
- % access_desc)
- if portage.secpass < 1 and not need_superuser:
- portage_group_warning()
- return 1
-
- # Disable emergelog for everything except build or unmerge operations.
- # This helps minimize parallel emerge.log entries that can confuse log
- # parsers like genlop.
- disable_emergelog = False
- for x in ("--pretend", "--fetchonly", "--fetch-all-uri"):
- if x in myopts:
- disable_emergelog = True
- break
- if disable_emergelog:
- pass
- elif myaction in ("search", "info"):
- disable_emergelog = True
- elif portage.data.secpass < 1:
- disable_emergelog = True
-
- _emerge.emergelog._disable = disable_emergelog
-
- if not disable_emergelog:
- if 'EMERGE_LOG_DIR' in settings:
- try:
- # At least the parent needs to exist for the lock file.
- portage.util.ensure_dirs(settings['EMERGE_LOG_DIR'])
- except portage.exception.PortageException as e:
- writemsg_level("!!! Error creating directory for " + \
- "EMERGE_LOG_DIR='%s':\n!!! %s\n" % \
- (settings['EMERGE_LOG_DIR'], e),
- noiselevel=-1, level=logging.ERROR)
- portage.util.ensure_dirs(_emerge.emergelog._emerge_log_dir)
- else:
- _emerge.emergelog._emerge_log_dir = settings["EMERGE_LOG_DIR"]
- else:
- _emerge.emergelog._emerge_log_dir = os.path.join(os.sep,
- settings["EPREFIX"].lstrip(os.sep), "var", "log")
- portage.util.ensure_dirs(_emerge.emergelog._emerge_log_dir)
-
- if not "--pretend" in myopts:
- emergelog(xterm_titles, "Started emerge on: "+\
- _unicode_decode(
- time.strftime("%b %d, %Y %H:%M:%S", time.localtime()),
- encoding=_encodings['content'], errors='replace'))
- myelogstr=""
- if myopts:
- opt_list = []
- for opt, arg in myopts.items():
- if arg is True:
- opt_list.append(opt)
- elif isinstance(arg, list):
- # arguments like --exclude that use 'append' action
- for x in arg:
- opt_list.append("%s=%s" % (opt, x))
- else:
- opt_list.append("%s=%s" % (opt, arg))
- myelogstr=" ".join(opt_list)
- if myaction:
- myelogstr += " --" + myaction
- if myfiles:
- myelogstr += " " + " ".join(oldargs)
- emergelog(xterm_titles, " *** emerge " + myelogstr)
- del oldargs
-
- def emergeexitsig(signum, frame):
- signal.signal(signal.SIGINT, signal.SIG_IGN)
- signal.signal(signal.SIGTERM, signal.SIG_IGN)
- portage.util.writemsg("\n\nExiting on signal %(signal)s\n" % {"signal":signum})
- sys.exit(128 + signum)
- signal.signal(signal.SIGINT, emergeexitsig)
- signal.signal(signal.SIGTERM, emergeexitsig)
-
- def emergeexit():
- """This gets out final log message in before we quit."""
- if "--pretend" not in myopts:
- emergelog(xterm_titles, " *** terminating.")
- if xterm_titles:
- xtermTitleReset()
- portage.atexit_register(emergeexit)
-
- if myaction in ("config", "metadata", "regen", "sync"):
- if "--pretend" in myopts:
- sys.stderr.write(("emerge: The '%s' action does " + \
- "not support '--pretend'.\n") % myaction)
- return 1
-
- if "sync" == myaction:
- return action_sync(settings, trees, mtimedb, myopts, myaction)
- elif "metadata" == myaction:
- action_metadata(settings, portdb, myopts)
- elif myaction=="regen":
- validate_ebuild_environment(trees)
- return action_regen(settings, portdb, myopts.get("--jobs"),
- myopts.get("--load-average"))
- # HELP action
- elif "config"==myaction:
- validate_ebuild_environment(trees)
- action_config(settings, trees, myopts, myfiles)
-
- # SEARCH action
- elif "search"==myaction:
- validate_ebuild_environment(trees)
- action_search(trees[settings['EROOT']]['root_config'],
- myopts, myfiles, spinner)
-
- elif myaction in ('clean', 'depclean', 'deselect', 'prune', 'unmerge'):
- validate_ebuild_environment(trees)
- rval = action_uninstall(settings, trees, mtimedb["ldpath"],
- myopts, myaction, myfiles, spinner)
- if not (myaction == 'deselect' or buildpkgonly or fetchonly or pretend):
- post_emerge(myaction, myopts, myfiles, settings['EROOT'],
- trees, mtimedb, rval)
- return rval
-
- elif myaction == 'info':
-
- # Ensure atoms are valid before calling unmerge().
- vardb = trees[settings['EROOT']]['vartree'].dbapi
- portdb = trees[settings['EROOT']]['porttree'].dbapi
- bindb = trees[settings['EROOT']]["bintree"].dbapi
- valid_atoms = []
- for x in myfiles:
- if is_valid_package_atom(x, allow_repo=True):
- try:
- #look at the installed files first, if there is no match
- #look at the ebuilds, since EAPI 4 allows running pkg_info
- #on non-installed packages
- valid_atom = dep_expand(x, mydb=vardb, settings=settings)
- if valid_atom.cp.split("/")[0] == "null":
- valid_atom = dep_expand(x, mydb=portdb, settings=settings)
- if valid_atom.cp.split("/")[0] == "null" and "--usepkg" in myopts:
- valid_atom = dep_expand(x, mydb=bindb, settings=settings)
- valid_atoms.append(valid_atom)
- except portage.exception.AmbiguousPackageName as e:
- msg = "The short ebuild name \"" + x + \
- "\" is ambiguous. Please specify " + \
- "one of the following " + \
- "fully-qualified ebuild names instead:"
- for line in textwrap.wrap(msg, 70):
- writemsg_level("!!! %s\n" % (line,),
- level=logging.ERROR, noiselevel=-1)
- for i in e.args[0]:
- writemsg_level(" %s\n" % colorize("INFORM", i),
- level=logging.ERROR, noiselevel=-1)
- writemsg_level("\n", level=logging.ERROR, noiselevel=-1)
- return 1
- continue
- msg = []
- msg.append("'%s' is not a valid package atom." % (x,))
- msg.append("Please check ebuild(5) for full details.")
- writemsg_level("".join("!!! %s\n" % line for line in msg),
- level=logging.ERROR, noiselevel=-1)
- return 1
-
- return action_info(settings, trees, myopts, valid_atoms)
-
- # "update", "system", or just process files:
- else:
- validate_ebuild_environment(trees)
-
- for x in myfiles:
- if x.startswith(SETPREFIX) or \
- is_valid_package_atom(x, allow_repo=True):
- continue
- if x[:1] == os.sep:
- continue
- try:
- os.lstat(x)
+ try:
+ return run_action(emerge_config)
+ finally:
+ # Call destructors for our portdbapi instances.
+ for x in emerge_config.trees.values():
+ if "porttree" in x.lazy_items:
continue
- except OSError:
- pass
- msg = []
- msg.append("'%s' is not a valid package atom." % (x,))
- msg.append("Please check ebuild(5) for full details.")
- writemsg_level("".join("!!! %s\n" % line for line in msg),
- level=logging.ERROR, noiselevel=-1)
- return 1
-
- # GLEP 42 says to display news *after* an emerge --pretend
- if "--pretend" not in myopts:
- display_news_notification(root_config, myopts)
- retval = action_build(settings, trees, mtimedb,
- myopts, myaction, myfiles, spinner)
- post_emerge(myaction, myopts, myfiles, settings['EROOT'],
- trees, mtimedb, retval)
-
- return retval
+ x["porttree"].dbapi.close_caches()
diff --git a/pym/_emerge/post_emerge.py b/pym/_emerge/post_emerge.py
new file mode 100644
index 000000000..d5f1ba5fa
--- /dev/null
+++ b/pym/_emerge/post_emerge.py
@@ -0,0 +1,165 @@
+# Copyright 1999-2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import print_function
+
+import logging
+import textwrap
+
+import portage
+from portage import os
+from portage.emaint.modules.logs.logs import CleanLogs
+from portage.news import count_unread_news, display_news_notifications
+from portage.output import colorize
+from portage.util._dyn_libs.display_preserved_libs import \
+ display_preserved_libs
+from portage.util._info_files import chk_updated_info_files
+
+from .chk_updated_cfg_files import chk_updated_cfg_files
+from .emergelog import emergelog
+from ._flush_elog_mod_echo import _flush_elog_mod_echo
+
+def clean_logs(settings):
+
+ if "clean-logs" not in settings.features:
+ return
+
+ logdir = settings.get("PORT_LOGDIR")
+ if logdir is None or not os.path.isdir(logdir):
+ return
+
+ cleanlogs = CleanLogs()
+ errors = cleanlogs.clean(settings=settings)
+ if errors:
+ out = portage.output.EOutput()
+ for msg in errors:
+ out.eerror(msg)
+
+def display_news_notification(root_config, myopts):
+ if "news" not in root_config.settings.features:
+ return
+ portdb = root_config.trees["porttree"].dbapi
+ vardb = root_config.trees["vartree"].dbapi
+ news_counts = count_unread_news(portdb, vardb)
+ display_news_notifications(news_counts)
+
+def show_depclean_suggestion():
+ out = portage.output.EOutput()
+ msg = "After world updates, it is important to remove " + \
+ "obsolete packages with emerge --depclean. Refer " + \
+ "to `man emerge` for more information."
+ for line in textwrap.wrap(msg, 72):
+ out.ewarn(line)
+
+def post_emerge(myaction, myopts, myfiles,
+ target_root, trees, mtimedb, retval):
+ """
+ Misc. things to run at the end of a merge session.
+
+ Update Info Files
+ Update Config Files
+ Update News Items
+ Commit mtimeDB
+ Display preserved libs warnings
+
+ @param myaction: The action returned from parse_opts()
+ @type myaction: String
+ @param myopts: emerge options
+ @type myopts: dict
+ @param myfiles: emerge arguments
+ @type myfiles: list
+ @param target_root: The target EROOT for myaction
+ @type target_root: String
+ @param trees: A dictionary mapping each ROOT to it's package databases
+ @type trees: dict
+ @param mtimedb: The mtimeDB to store data needed across merge invocations
+ @type mtimedb: MtimeDB class instance
+ @param retval: Emerge's return value
+ @type retval: Int
+ """
+
+ root_config = trees[target_root]["root_config"]
+ vardbapi = trees[target_root]['vartree'].dbapi
+ settings = vardbapi.settings
+ info_mtimes = mtimedb["info"]
+
+ # Load the most current variables from ${ROOT}/etc/profile.env
+ settings.unlock()
+ settings.reload()
+ settings.regenerate()
+ settings.lock()
+
+ config_protect = portage.util.shlex_split(
+ settings.get("CONFIG_PROTECT", ""))
+ infodirs = settings.get("INFOPATH","").split(":") + \
+ settings.get("INFODIR","").split(":")
+
+ os.chdir("/")
+
+ if retval == os.EX_OK:
+ exit_msg = " *** exiting successfully."
+ else:
+ exit_msg = " *** exiting unsuccessfully with status '%s'." % retval
+ emergelog("notitles" not in settings.features, exit_msg)
+
+ _flush_elog_mod_echo()
+
+ if not vardbapi._pkgs_changed:
+ # GLEP 42 says to display news *after* an emerge --pretend
+ if "--pretend" in myopts:
+ display_news_notification(root_config, myopts)
+ # If vdb state has not changed then there's nothing else to do.
+ return
+
+ vdb_path = os.path.join(root_config.settings['EROOT'], portage.VDB_PATH)
+ portage.util.ensure_dirs(vdb_path)
+ vdb_lock = None
+ if os.access(vdb_path, os.W_OK) and not "--pretend" in myopts:
+ vardbapi.lock()
+ vdb_lock = True
+
+ if vdb_lock:
+ try:
+ if "noinfo" not in settings.features:
+ chk_updated_info_files(target_root,
+ infodirs, info_mtimes)
+ mtimedb.commit()
+ finally:
+ if vdb_lock:
+ vardbapi.unlock()
+
+ # Explicitly load and prune the PreservedLibsRegistry in order
+ # to ensure that we do not display stale data.
+ vardbapi._plib_registry.load()
+
+ if vardbapi._plib_registry.hasEntries():
+ if "--quiet" in myopts:
+ print()
+ print(colorize("WARN", "!!!") + " existing preserved libs found")
+ else:
+ print()
+ print(colorize("WARN", "!!!") + " existing preserved libs:")
+ display_preserved_libs(vardbapi)
+ print("Use " + colorize("GOOD", "emerge @preserved-rebuild") +
+ " to rebuild packages using these libraries")
+
+ chk_updated_cfg_files(settings['EROOT'], config_protect)
+
+ display_news_notification(root_config, myopts)
+
+ postemerge = os.path.join(settings["PORTAGE_CONFIGROOT"],
+ portage.USER_CONFIG_PATH, "bin", "post_emerge")
+ if os.access(postemerge, os.X_OK):
+ hook_retval = portage.process.spawn(
+ [postemerge], env=settings.environ())
+ if hook_retval != os.EX_OK:
+ portage.util.writemsg_level(
+ " %s spawn failed of %s\n" %
+ (colorize("BAD", "*"), postemerge,),
+ level=logging.ERROR, noiselevel=-1)
+
+ clean_logs(settings)
+
+ if "--quiet" not in myopts and \
+ myaction is None and "@world" in myfiles:
+ show_depclean_suggestion()
diff --git a/pym/_emerge/resolver/backtracking.py b/pym/_emerge/resolver/backtracking.py
index d8f49c679..c29b9d42a 100644
--- a/pym/_emerge/resolver/backtracking.py
+++ b/pym/_emerge/resolver/backtracking.py
@@ -1,4 +1,4 @@
-# Copyright 2010-2011 Gentoo Foundation
+# Copyright 2010-2012 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
import copy
@@ -7,8 +7,8 @@ class BacktrackParameter(object):
__slots__ = (
"needed_unstable_keywords", "runtime_pkg_mask", "needed_use_config_changes", "needed_license_changes",
- "rebuild_list", "reinstall_list", "needed_p_mask_changes",
- "slot_abi_replace_installed"
+ "prune_rebuilds", "rebuild_list", "reinstall_list", "needed_p_mask_changes",
+ "slot_operator_mask_built", "slot_operator_replace_installed"
)
def __init__(self):
@@ -19,7 +19,9 @@ class BacktrackParameter(object):
self.needed_license_changes = {}
self.rebuild_list = set()
self.reinstall_list = set()
- self.slot_abi_replace_installed = set()
+ self.slot_operator_replace_installed = set()
+ self.slot_operator_mask_built = set()
+ self.prune_rebuilds = False
def __deepcopy__(self, memo=None):
if memo is None:
@@ -35,7 +37,9 @@ class BacktrackParameter(object):
result.needed_license_changes = copy.copy(self.needed_license_changes)
result.rebuild_list = copy.copy(self.rebuild_list)
result.reinstall_list = copy.copy(self.reinstall_list)
- result.slot_abi_replace_installed = copy.copy(self.slot_abi_replace_installed)
+ result.slot_operator_replace_installed = copy.copy(self.slot_operator_replace_installed)
+ result.slot_operator_mask_built = self.slot_operator_mask_built.copy()
+ result.prune_rebuilds = self.prune_rebuilds
# runtime_pkg_mask contains nested dicts that must also be copied
result.runtime_pkg_mask = {}
@@ -52,7 +56,9 @@ class BacktrackParameter(object):
self.needed_license_changes == other.needed_license_changes and \
self.rebuild_list == other.rebuild_list and \
self.reinstall_list == other.reinstall_list and \
- self.slot_abi_replace_installed == other.slot_abi_replace_installed
+ self.slot_operator_replace_installed == other.slot_operator_replace_installed and \
+ self.slot_operator_mask_built == other.slot_operator_mask_built and \
+ self.prune_rebuilds == other.prune_rebuilds
class _BacktrackNode(object):
@@ -125,7 +131,7 @@ class Backtracker(object):
for pkg, mask_info in runtime_pkg_mask.items():
if "missing dependency" in mask_info or \
- "slot_abi_mask_built" in mask_info:
+ "slot_operator_mask_built" in mask_info:
continue
entry_is_valid = False
@@ -192,16 +198,28 @@ class Backtracker(object):
para.needed_use_config_changes[pkg] = (new_use, new_changes)
elif change == "slot_conflict_abi":
new_node.terminal = False
- elif change == "slot_abi_mask_built":
+ elif change == "slot_operator_mask_built":
+ para.slot_operator_mask_built.update(data)
for pkg, mask_reasons in data.items():
para.runtime_pkg_mask.setdefault(pkg,
{}).update(mask_reasons)
- elif change == "slot_abi_replace_installed":
- para.slot_abi_replace_installed.update(data)
+ elif change == "slot_operator_replace_installed":
+ para.slot_operator_replace_installed.update(data)
elif change == "rebuild_list":
para.rebuild_list.update(data)
elif change == "reinstall_list":
para.reinstall_list.update(data)
+ elif change == "prune_rebuilds":
+ para.prune_rebuilds = True
+ para.slot_operator_replace_installed.clear()
+ for pkg in para.slot_operator_mask_built:
+ runtime_masks = para.runtime_pkg_mask.get(pkg)
+ if runtime_masks is None:
+ continue
+ runtime_masks.pop("slot_operator_mask_built", None)
+ if not runtime_masks:
+ para.runtime_pkg_mask.pop(pkg)
+ para.slot_operator_mask_built.clear()
self._add(new_node, explore=explore)
self._current_node = new_node
diff --git a/pym/_emerge/resolver/circular_dependency.py b/pym/_emerge/resolver/circular_dependency.py
index aca81face..b7106714a 100644
--- a/pym/_emerge/resolver/circular_dependency.py
+++ b/pym/_emerge/resolver/circular_dependency.py
@@ -1,7 +1,7 @@
-# Copyright 2010-2011 Gentoo Foundation
+# Copyright 2010-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
-from __future__ import print_function
+from __future__ import print_function, unicode_literals
from itertools import chain, product
import logging
@@ -11,6 +11,7 @@ from portage.exception import InvalidDependString
from portage.output import colorize
from portage.util import writemsg_level
from _emerge.DepPrioritySatisfiedRange import DepPrioritySatisfiedRange
+from _emerge.Package import Package
class circular_dependency_handler(object):
@@ -61,8 +62,7 @@ class circular_dependency_handler(object):
node = nodes[0]
display_order.append(node)
tempgraph.remove(node)
- display_order.reverse()
- return display_order
+ return tuple(display_order)
def _prepare_circular_dep_message(self):
"""
@@ -113,9 +113,10 @@ class circular_dependency_handler(object):
parent_atoms = self.all_parent_atoms.get(pkg)
if priorities[-1].buildtime:
- dep = parent.metadata["DEPEND"]
+ dep = " ".join(parent._metadata[k]
+ for k in Package._buildtime_keys)
elif priorities[-1].runtime:
- dep = parent.metadata["RDEPEND"]
+ dep = parent._metadata["RDEPEND"]
for ppkg, atom in parent_atoms:
if ppkg == parent:
@@ -125,7 +126,7 @@ class circular_dependency_handler(object):
try:
affecting_use = extract_affecting_use(dep, parent_atom,
- eapi=parent.metadata["EAPI"])
+ eapi=parent.eapi)
except InvalidDependString:
if not parent.installed:
raise
@@ -144,7 +145,8 @@ class circular_dependency_handler(object):
#If any of the flags we're going to touch is in REQUIRED_USE, add all
#other flags in REQUIRED_USE to affecting_use, to not lose any solution.
required_use_flags = get_required_use_flags(
- parent.metadata.get("REQUIRED_USE", ""))
+ parent._metadata.get("REQUIRED_USE", ""),
+ eapi=parent.eapi)
if affecting_use.intersection(required_use_flags):
# TODO: Find out exactly which REQUIRED_USE flags are
@@ -186,9 +188,11 @@ class circular_dependency_handler(object):
parent_atom not in reduced_dep:
#We found an assignment that removes the atom from 'dep'.
#Make sure it doesn't conflict with REQUIRED_USE.
- required_use = parent.metadata.get("REQUIRED_USE", "")
+ required_use = parent._metadata.get("REQUIRED_USE", "")
- if check_required_use(required_use, current_use, parent.iuse.is_valid_flag):
+ if check_required_use(required_use, current_use,
+ parent.iuse.is_valid_flag,
+ eapi=parent.eapi):
use = self.depgraph._pkg_use_enabled(parent)
solution = set()
for flag, state in zip(affecting_use, use_state):
diff --git a/pym/_emerge/resolver/output.py b/pym/_emerge/resolver/output.py
index 61cfe9e98..5f550be0d 100644
--- a/pym/_emerge/resolver/output.py
+++ b/pym/_emerge/resolver/output.py
@@ -1,26 +1,31 @@
-# Copyright 2010-2012 Gentoo Foundation
+# Copyright 2010-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
"""Resolver output display operation.
"""
+from __future__ import unicode_literals
+
__all__ = (
- "Display",
+ "Display", "format_unmatched_atom",
)
import sys
+import portage
from portage import os
-from portage import _unicode_decode
from portage.dbapi.dep_expand import dep_expand
-from portage.dep import cpvequal, _repo_separator
+from portage.dep import cpvequal, _repo_separator, _slot_separator
+from portage.eapi import _get_eapi_attrs
from portage.exception import InvalidDependString, SignatureException
+from portage.package.ebuild.config import _get_feature_flags
from portage.package.ebuild._spawn_nofetch import spawn_nofetch
from portage.output import ( blue, colorize, create_color_func,
- darkblue, darkgreen, green, nc_len, red, teal, turquoise, yellow )
+ darkblue, darkgreen, green, nc_len, teal)
bad = create_color_func("BAD")
+from portage._sets.base import InternalPackageSet
from portage.util import writemsg_stdout
-from portage.versions import best, catpkgsplit
+from portage.versions import best, cpv_getversion
from _emerge.Blocker import Blocker
from _emerge.create_world_atom import create_world_atom
@@ -30,7 +35,9 @@ from _emerge.show_invalid_depstring_notice import show_invalid_depstring_notice
if sys.hexversion >= 0x3000000:
basestring = str
-
+ _unicode = str
+else:
+ _unicode = unicode
class Display(object):
"""Formats and outputs the depgrah supplied it for merge/re-merge, etc.
@@ -54,11 +61,6 @@ class Display(object):
self.oldlp = None
self.myfetchlist = None
self.indent = ''
- self.is_new = True
- self.cur_use = None
- self.cur_iuse = None
- self.old_use = ''
- self.old_iuse = ''
self.use_expand = None
self.use_expand_hidden = None
self.pkgsettings = None
@@ -68,93 +70,54 @@ class Display(object):
self.blocker_style = None
- def _blockers(self, pkg, fetch_symbol):
- """Processes pkg for blockers and adds colorized strings to
+ def _blockers(self, blocker):
+ """Adds colorized strings to
self.print_msg and self.blockers
- @param pkg: _emerge.Package.Package instance
- @param fetch_symbol: string
+ @param blocker: _emerge.Blocker.Blocker instance
@rtype: bool
Modifies class globals: self.blocker_style, self.resolved,
self.print_msg
"""
- if pkg.satisfied:
+ if blocker.satisfied:
self.blocker_style = "PKG_BLOCKER_SATISFIED"
- addl = "%s %s " % (colorize(self.blocker_style, "b"),
- fetch_symbol)
+ addl = "%s " % (colorize(self.blocker_style, "b"),)
else:
self.blocker_style = "PKG_BLOCKER"
- addl = "%s %s " % (colorize(self.blocker_style, "B"),
- fetch_symbol)
+ addl = "%s " % (colorize(self.blocker_style, "B"),)
addl += self.empty_space_in_brackets()
self.resolved = dep_expand(
- str(pkg.atom).lstrip("!"), mydb=self.vardb,
+ _unicode(blocker.atom).lstrip("!"), mydb=self.vardb,
settings=self.pkgsettings
)
if self.conf.columns and self.conf.quiet:
- addl += " " + colorize(self.blocker_style, str(self.resolved))
+ addl += " " + colorize(self.blocker_style, _unicode(self.resolved))
else:
addl = "[%s %s] %s%s" % \
(colorize(self.blocker_style, "blocks"),
addl, self.indent,
- colorize(self.blocker_style, str(self.resolved))
+ colorize(self.blocker_style, _unicode(self.resolved))
)
- block_parents = self.conf.blocker_parents.parent_nodes(pkg)
- block_parents = set([pnode[2] for pnode in block_parents])
+ block_parents = self.conf.blocker_parents.parent_nodes(blocker)
+ block_parents = set(_unicode(pnode.cpv) for pnode in block_parents)
block_parents = ", ".join(block_parents)
- if self.resolved != pkg[2]:
+ if blocker.atom.blocker.overlap.forbid:
+ blocking_desc = "hard blocking"
+ else:
+ blocking_desc = "blocking"
+ if self.resolved != blocker.atom:
addl += colorize(self.blocker_style,
- " (\"%s\" is blocking %s)") % \
- (str(pkg.atom).lstrip("!"), block_parents)
+ " (\"%s\" is %s %s)" %
+ (_unicode(blocker.atom).lstrip("!"),
+ blocking_desc, block_parents))
else:
addl += colorize(self.blocker_style,
- " (is blocking %s)") % block_parents
- if isinstance(pkg, Blocker) and pkg.satisfied:
- if self.conf.columns:
- return True
- self.print_msg.append(addl)
+ " (is %s %s)" % (blocking_desc, block_parents))
+ if blocker.satisfied:
+ if not self.conf.columns:
+ self.print_msg.append(addl)
else:
self.blockers.append(addl)
- return False
-
-
- def _display_use(self, pkg, myoldbest, myinslotlist):
- """ USE flag display
-
- @param pkg: _emerge.Package.Package instance
- @param myoldbest: list of installed versions
- @param myinslotlist: list of installed slots
- Modifies class globals: self.forced_flags, self.cur_iuse,
- self.old_iuse, self.old_use, self.use_expand
- """
-
- self.forced_flags = set()
- self.forced_flags.update(pkg.use.force)
- self.forced_flags.update(pkg.use.mask)
-
- self.cur_use = [flag for flag in self.conf.pkg_use_enabled(pkg) \
- if flag in pkg.iuse.all]
- self.cur_iuse = sorted(pkg.iuse.all)
-
- if myoldbest and myinslotlist:
- previous_cpv = myoldbest[0].cpv
- else:
- previous_cpv = pkg.cpv
- if self.vardb.cpv_exists(previous_cpv):
- previous_pkg = self.vardb.match_pkgs('=' + previous_cpv)[0]
- self.old_iuse = sorted(previous_pkg.iuse.all)
- self.old_use = previous_pkg.use.enabled
- self.is_new = False
- else:
- self.old_iuse = []
- self.old_use = []
- self.is_new = True
-
- self.old_use = [flag for flag in self.old_use if flag in self.old_iuse]
-
- self.use_expand = pkg.use.expand
- self.use_expand_hidden = pkg.use.expand_hidden
- return
def include_mask_str(self):
return self.conf.verbosity > 1
@@ -219,13 +182,40 @@ class Display(object):
return ret
- def recheck_hidden(self, pkg):
- """ Prevent USE_EXPAND_HIDDEN flags from being hidden if they
- are the only thing that triggered reinstallation.
+ def _display_use(self, pkg, pkg_info):
+ """ USE flag display
@param pkg: _emerge.Package.Package instance
- Modifies self.use_expand_hidden, self.use_expand, self.verboseadd
+ @param pkg_info: PkgInfo instance
+ Modifies self.use_expand_hidden, self.use_expand, self.verboseadd,
+ self.forced_flags
"""
+
+ self.forced_flags = set()
+ self.forced_flags.update(pkg.use.force)
+ self.forced_flags.update(pkg.use.mask)
+
+ cur_use = [flag for flag in self.conf.pkg_use_enabled(pkg) \
+ if flag in pkg.iuse.all]
+ cur_iuse = sorted(pkg.iuse.all)
+
+ if pkg_info.previous_pkg is not None:
+ previous_pkg = pkg_info.previous_pkg
+ old_iuse = sorted(previous_pkg.iuse.all)
+ old_use = previous_pkg.use.enabled
+ is_new = False
+ else:
+ old_iuse = []
+ old_use = []
+ is_new = True
+
+ old_use = [flag for flag in old_use if flag in old_iuse]
+
+ self.use_expand = pkg.use.expand
+ self.use_expand_hidden = pkg.use.expand_hidden
+
+ # Prevent USE_EXPAND_HIDDEN flags from being hidden if they
+ # are the only thing that triggered reinstallation.
reinst_flags_map = {}
reinstall_for_flags = self.conf.reinstall_nodes.get(pkg)
reinst_expand_map = None
@@ -246,13 +236,14 @@ class Display(object):
reinst_expand_map)
cur_iuse_map, iuse_forced = \
- self.map_to_use_expand(self.cur_iuse, forced_flags=True)
- cur_use_map = self.map_to_use_expand(self.cur_use)
- old_iuse_map = self.map_to_use_expand(self.old_iuse)
- old_use_map = self.map_to_use_expand(self.old_use)
+ self.map_to_use_expand(cur_iuse, forced_flags=True)
+ cur_use_map = self.map_to_use_expand(cur_use)
+ old_iuse_map = self.map_to_use_expand(old_iuse)
+ old_use_map = self.map_to_use_expand(old_use)
use_expand = sorted(self.use_expand)
use_expand.insert(0, "USE")
+ feature_flags = _get_feature_flags(_get_eapi_attrs(pkg.eapi))
for key in use_expand:
if key in self.use_expand_hidden:
@@ -260,7 +251,7 @@ class Display(object):
self.verboseadd += _create_use_string(self.conf, key.upper(),
cur_iuse_map[key], iuse_forced[key],
cur_use_map[key], old_iuse_map[key],
- old_use_map[key], self.is_new,
+ old_use_map[key], is_new, feature_flags,
reinst_flags_map.get(key))
return
@@ -318,13 +309,14 @@ class Display(object):
kwargs["myrepo"] = pkg.repo
myfilesdict = None
try:
- myfilesdict = db.getfetchsizes(pkg.cpv, **kwargs)
+ myfilesdict = db.getfetchsizes(pkg.cpv,
+ **portage._native_kwargs(kwargs))
except InvalidDependString as e:
# FIXME: validate SRC_URI earlier
depstr, = db.aux_get(pkg.cpv,
["SRC_URI"], myrepo=pkg.repo)
show_invalid_depstring_notice(
- pkg, depstr, str(e))
+ pkg, depstr, _unicode(e))
raise
except SignatureException:
# missing/invalid binary package SIZE signature
@@ -343,15 +335,13 @@ class Display(object):
if self.quiet_repo_display:
# overlay verbose
# assign index for a previous version in the same slot
- slot_matches = self.vardb.match(pkg.slot_atom)
- if slot_matches:
- repo_name_prev = self.vardb.aux_get(slot_matches[0],
- ["repository"])[0]
+ if pkg_info.previous_pkg is not None:
+ repo_name_prev = pkg_info.previous_pkg.repo
else:
repo_name_prev = None
# now use the data to generate output
- if pkg.installed or not slot_matches:
+ if pkg.installed or pkg_info.previous_pkg is None:
self.repoadd = self.conf.repo_display.repoStr(
pkg_info.repo_path_real)
else:
@@ -370,58 +360,86 @@ class Display(object):
repoadd_set.add(self.repoadd)
- def convert_myoldbest(self, pkg, myoldbest):
+ def convert_myoldbest(self, pkg, pkg_info):
"""converts and colorizes a version list to a string
@param pkg: _emerge.Package.Package instance
- @param myoldbest: list
+ @param pkg_info: dictionary
@rtype string.
"""
+ myoldbest = pkg_info.oldbest_list
# Convert myoldbest from a list to a string.
myoldbest_str = ""
if myoldbest:
versions = []
for pos, old_pkg in enumerate(myoldbest):
- key = catpkgsplit(old_pkg.cpv)[2] + "-" + catpkgsplit(old_pkg.cpv)[3]
+ key = old_pkg.version
if key[-3:] == "-r0":
key = key[:-3]
- if self.conf.verbosity == 3 and not self.quiet_repo_display and (self.verbose_main_repo_display or
- any(x.repo != self.portdb.repositories.mainRepo().name for x in myoldbest + [pkg])):
- key += _repo_separator + old_pkg.repo
+ if self.conf.verbosity == 3:
+ if pkg_info.attr_display.new_slot:
+ key += _slot_separator + old_pkg.slot
+ if old_pkg.slot != old_pkg.sub_slot:
+ key += "/" + old_pkg.sub_slot
+ elif any(x.slot + "/" + x.sub_slot != "0/0" for x in myoldbest + [pkg]):
+ key += _slot_separator + old_pkg.slot
+ if old_pkg.slot != old_pkg.sub_slot or \
+ old_pkg.slot == pkg.slot and old_pkg.sub_slot != pkg.sub_slot:
+ key += "/" + old_pkg.sub_slot
+ if not self.quiet_repo_display and (self.verbose_main_repo_display or
+ self.portdb.repositories.mainRepo() is None or
+ any(x.repo != self.portdb.repositories.mainRepo().name for x in myoldbest + [pkg])):
+ key += _repo_separator + old_pkg.repo
versions.append(key)
myoldbest_str = blue("["+", ".join(versions)+"]")
return myoldbest_str
+ def _append_slot(self, pkg_str, pkg, pkg_info):
+ """Potentially appends slot and subslot to package string.
- def set_interactive(self, pkg, ordered, addl):
- """Increments counters.interactive if the pkg is to
- be merged and it's metadata has interactive set True
+ @param pkg_str: string
+ @param pkg: _emerge.Package.Package instance
+ @param pkg_info: dictionary
+ @rtype string
+ """
+ if pkg_info.attr_display.new_slot:
+ pkg_str += _slot_separator + pkg_info.slot
+ if pkg_info.slot != pkg_info.sub_slot:
+ pkg_str += "/" + pkg_info.sub_slot
+ elif any(x.slot + "/" + x.sub_slot != "0/0" for x in pkg_info.oldbest_list + [pkg]):
+ pkg_str += _slot_separator + pkg_info.slot
+ if pkg_info.slot != pkg_info.sub_slot or \
+ any(x.slot == pkg_info.slot and x.sub_slot != pkg_info.sub_slot for x in pkg_info.oldbest_list):
+ pkg_str += "/" + pkg_info.sub_slot
+ return pkg_str
+
+ def _append_repository(self, pkg_str, pkg, pkg_info):
+ """Potentially appends repository to package string.
+ @param pkg_str: string
@param pkg: _emerge.Package.Package instance
- @param ordered: boolean
- @param addl: already defined string to add to
+ @param pkg_info: dictionary
+ @rtype string
"""
- if 'interactive' in pkg.metadata.properties and \
- pkg.operation == 'merge':
- addl = colorize("WARN", "I") + addl[1:]
- if ordered:
- self.counters.interactive += 1
- return addl
-
- def _set_non_root_columns(self, addl, pkg_info, pkg):
+ if not self.quiet_repo_display and (self.verbose_main_repo_display or
+ self.portdb.repositories.mainRepo() is None or
+ any(x.repo != self.portdb.repositories.mainRepo().name for x in pkg_info.oldbest_list + [pkg])):
+ pkg_str += _repo_separator + pkg.repo
+ return pkg_str
+
+ def _set_non_root_columns(self, pkg, pkg_info):
"""sets the indent level and formats the output
- @param addl: already defined string to add to
- @param pkg_info: dictionary
@param pkg: _emerge.Package.Package instance
+ @param pkg_info: dictionary
@rtype string
"""
ver_str = pkg_info.ver
- if self.conf.verbosity == 3 and not self.quiet_repo_display and (self.verbose_main_repo_display or
- any(x.repo != self.portdb.repositories.mainRepo().name for x in pkg_info.oldbest_list + [pkg])):
- ver_str += _repo_separator + pkg.repo
+ if self.conf.verbosity == 3:
+ ver_str = self._append_slot(ver_str, pkg, pkg_info)
+ ver_str = self._append_repository(ver_str, pkg, pkg_info)
if self.conf.quiet:
- myprint = addl + " " + self.indent + \
+ myprint = _unicode(pkg_info.attr_display) + " " + self.indent + \
self.pkgprint(pkg_info.cp, pkg_info)
myprint = myprint+darkblue(" "+ver_str)+" "
myprint = myprint+pkg_info.oldbest
@@ -434,7 +452,8 @@ class Display(object):
self.indent, self.pkgprint(pkg.cp, pkg_info))
else:
myprint = "[%s %s] %s%s" % \
- (self.pkgprint(pkg.type_name, pkg_info), addl,
+ (self.pkgprint(pkg.type_name, pkg_info),
+ pkg_info.attr_display,
self.indent, self.pkgprint(pkg.cp, pkg_info))
if (self.newlp-nc_len(myprint)) > 0:
myprint = myprint+(" "*(self.newlp-nc_len(myprint)))
@@ -446,21 +465,20 @@ class Display(object):
return myprint
- def _set_root_columns(self, addl, pkg_info, pkg):
+ def _set_root_columns(self, pkg, pkg_info):
"""sets the indent level and formats the output
- @param addl: already defined string to add to
- @param pkg_info: dictionary
@param pkg: _emerge.Package.Package instance
+ @param pkg_info: dictionary
@rtype string
Modifies self.verboseadd
"""
ver_str = pkg_info.ver
- if self.conf.verbosity == 3 and not self.quiet_repo_display and (self.verbose_main_repo_display or
- any(x.repo != self.portdb.repositories.mainRepo().name for x in pkg_info.oldbest_list + [pkg])):
- ver_str += _repo_separator + pkg.repo
+ if self.conf.verbosity == 3:
+ ver_str = self._append_slot(ver_str, pkg, pkg_info)
+ ver_str = self._append_repository(ver_str, pkg, pkg_info)
if self.conf.quiet:
- myprint = addl + " " + self.indent + \
+ myprint = _unicode(pkg_info.attr_display) + " " + self.indent + \
self.pkgprint(pkg_info.cp, pkg_info)
myprint = myprint+" "+green(ver_str)+" "
myprint = myprint+pkg_info.oldbest
@@ -473,7 +491,8 @@ class Display(object):
addl, self.indent, self.pkgprint(pkg.cp, pkg_info))
else:
myprint = "[%s %s] %s%s" % \
- (self.pkgprint(pkg.type_name, pkg_info), addl,
+ (self.pkgprint(pkg.type_name, pkg_info),
+ pkg_info.attr_display,
self.indent, self.pkgprint(pkg.cp, pkg_info))
if (self.newlp-nc_len(myprint)) > 0:
myprint = myprint+(" "*(self.newlp-nc_len(myprint)))
@@ -484,18 +503,17 @@ class Display(object):
return myprint
- def _set_no_columns(self, pkg, pkg_info, addl):
+ def _set_no_columns(self, pkg, pkg_info):
"""prints pkg info without column indentation.
@param pkg: _emerge.Package.Package instance
@param pkg_info: dictionary
- @param addl: the current text to add for the next line to output
@rtype the updated addl
"""
pkg_str = pkg.cpv
- if self.conf.verbosity == 3 and not self.quiet_repo_display and (self.verbose_main_repo_display or
- any(x.repo != self.portdb.repositories.mainRepo().name for x in pkg_info.oldbest_list + [pkg])):
- pkg_str += _repo_separator + pkg.repo
+ if self.conf.verbosity == 3:
+ pkg_str = self._append_slot(pkg_str, pkg, pkg_info)
+ pkg_str = self._append_repository(pkg_str, pkg, pkg_info)
if not pkg_info.merge:
addl = self.empty_space_in_brackets()
myprint = "[%s%s] %s%s %s" % \
@@ -506,46 +524,10 @@ class Display(object):
else:
myprint = "[%s %s] %s%s %s" % \
(self.pkgprint(pkg.type_name, pkg_info),
- addl, self.indent,
+ pkg_info.attr_display, self.indent,
self.pkgprint(pkg_str, pkg_info), pkg_info.oldbest)
return myprint
-
- def _insert_slot(self, pkg, pkg_info, myinslotlist):
- """Adds slot info to the message
-
- @return addl: formatted slot info
- @return myoldbest: installed version list
- Modifies self.counters.downgrades, self.counters.upgrades
- """
- addl = " " + pkg_info.fetch_symbol
- if not cpvequal(pkg.cpv,
- best([pkg.cpv] + [x.cpv for x in myinslotlist])):
- # Downgrade in slot
- addl += turquoise("U")+blue("D")
- if pkg_info.ordered:
- self.counters.downgrades += 1
- else:
- # Update in slot
- addl += turquoise("U") + " "
- if pkg_info.ordered:
- self.counters.upgrades += 1
- return addl
-
-
- def _new_slot(self, pkg, pkg_info):
- """New slot, mark it new.
-
- @return addl: formatted slot info
- @return myoldbest: installed version list
- Modifies self.counters.newslot
- """
- addl = " " + green("NS") + pkg_info.fetch_symbol + " "
- if pkg_info.ordered:
- self.counters.newslot += 1
- return addl
-
-
def print_messages(self, show_repos):
"""Performs the actual output printing of the pre-formatted
messages
@@ -581,9 +563,9 @@ class Display(object):
"""
writemsg_stdout('\n%s\n' % (self.counters,), noiselevel=-1)
if show_repos:
- # Use _unicode_decode() to force unicode format string so
+ # Use unicode_literals to force unicode format string so
# that RepoDisplay.__unicode__() is called in python2.
- writemsg_stdout(_unicode_decode("%s") % (self.conf.repo_display,),
+ writemsg_stdout("%s" % (self.conf.repo_display,),
noiselevel=-1)
return
@@ -635,15 +617,18 @@ class Display(object):
self.counters.restrict_fetch_satisfied
"""
pkg_info = PkgInfo()
+ pkg_info.cp = pkg.cp
+ pkg_info.ver = self.get_ver_str(pkg)
+ pkg_info.slot = pkg.slot
+ pkg_info.sub_slot = pkg.sub_slot
+ pkg_info.repo_name = pkg.repo
pkg_info.ordered = ordered
- pkg_info.fetch_symbol = " "
pkg_info.operation = pkg.operation
pkg_info.merge = ordered and pkg_info.operation == "merge"
if not pkg_info.merge and pkg_info.operation == "merge":
pkg_info.operation = "nomerge"
pkg_info.built = pkg.type_name != "ebuild"
pkg_info.ebuild_path = None
- pkg_info.repo_name = pkg.repo
if ordered:
if pkg_info.merge:
if pkg.type_name == "binary":
@@ -659,22 +644,30 @@ class Display(object):
pkg_info.repo_path_real = os.path.dirname(os.path.dirname(
os.path.dirname(pkg_info.ebuild_path)))
else:
- pkg_info.repo_path_real = \
- self.portdb.getRepositoryPath(pkg.metadata["repository"])
+ pkg_info.repo_path_real = self.portdb.getRepositoryPath(pkg.repo)
pkg_info.use = list(self.conf.pkg_use_enabled(pkg))
if not pkg.built and pkg.operation == 'merge' and \
- 'fetch' in pkg.metadata.restrict:
+ 'fetch' in pkg.restrict:
if pkg_info.ordered:
self.counters.restrict_fetch += 1
+ pkg_info.attr_display.fetch_restrict = True
if not self.portdb.getfetchsizes(pkg.cpv,
useflags=pkg_info.use, myrepo=pkg.repo):
- pkg_info.fetch_symbol = green("f")
+ pkg_info.attr_display.fetch_restrict_satisfied = True
if pkg_info.ordered:
self.counters.restrict_fetch_satisfied += 1
else:
- pkg_info.fetch_symbol = red("F")
if pkg_info.ebuild_path is not None:
self.restrict_fetch_list[pkg] = pkg_info
+
+ if self.vardb.cpv_exists(pkg.cpv):
+ # Do a cpv match first, in case the SLOT has changed.
+ pkg_info.previous_pkg = self.vardb.match_pkgs('=' + pkg.cpv)[0]
+ else:
+ slot_matches = self.vardb.match_pkgs(pkg.slot_atom)
+ if slot_matches:
+ pkg_info.previous_pkg = slot_matches[0]
+
return pkg_info
@@ -685,15 +678,14 @@ class Display(object):
@param pkg_info: dictionay
Modifies self.changelogs
"""
- inst_matches = self.vardb.match(pkg.slot_atom)
- if inst_matches:
+ if pkg_info.previous_pkg is not None:
ebuild_path_cl = pkg_info.ebuild_path
if ebuild_path_cl is None:
# binary package
ebuild_path_cl = self.portdb.findname(pkg.cpv, myrepo=pkg.repo)
if ebuild_path_cl is not None:
self.changelogs.extend(_calc_changelog(
- ebuild_path_cl, inst_matches[0], pkg.cpv))
+ ebuild_path_cl, pkg_info.previous_pkg, pkg.cpv))
return
@@ -733,12 +725,10 @@ class Display(object):
@param pkg: _emerge.Package.Package instance
@rtype string
"""
- ver_str = list(catpkgsplit(pkg.cpv)[2:])
- if ver_str[1] == "r0":
- ver_str[1] = ""
- else:
- ver_str[1] = "-" + ver_str[1]
- return ver_str[0]+ver_str[1]
+ ver_str = pkg.cpv.version
+ if ver_str.endswith("-r0"):
+ ver_str = ver_str[:-3]
+ return ver_str
def _get_installed_best(self, pkg, pkg_info):
@@ -757,9 +747,10 @@ class Display(object):
myinslotlist = None
installed_versions = self.vardb.match_pkgs(pkg.cp)
if self.vardb.cpv_exists(pkg.cpv):
- addl = " "+yellow("R")+pkg_info.fetch_symbol+" "
- installed_version = self.vardb.match_pkgs(pkg.cpv)[0]
- if not self.quiet_repo_display and installed_version.repo != pkg.repo:
+ pkg_info.attr_display.replace = True
+ installed_version = pkg_info.previous_pkg
+ if installed_version.slot != pkg.slot or installed_version.sub_slot != pkg.sub_slot or \
+ not self.quiet_repo_display and installed_version.repo != pkg.repo:
myoldbest = [installed_version]
if pkg_info.ordered:
if pkg_info.merge:
@@ -775,17 +766,31 @@ class Display(object):
myinslotlist = None
if myinslotlist:
myoldbest = myinslotlist[:]
- addl = self._insert_slot(pkg, pkg_info, myinslotlist)
+ if not cpvequal(pkg.cpv,
+ best([pkg.cpv] + [x.cpv for x in myinslotlist])):
+ # Downgrade in slot
+ pkg_info.attr_display.new_version = True
+ pkg_info.attr_display.downgrade = True
+ if pkg_info.ordered:
+ self.counters.downgrades += 1
+ else:
+ # Update in slot
+ pkg_info.attr_display.new_version = True
+ if pkg_info.ordered:
+ self.counters.upgrades += 1
else:
myoldbest = installed_versions
- addl = self._new_slot(pkg, pkg_info)
+ pkg_info.attr_display.new = True
+ pkg_info.attr_display.new_slot = True
+ if pkg_info.ordered:
+ self.counters.newslot += 1
if self.conf.changelog:
self.do_changelog(pkg, pkg_info)
else:
- addl = " " + green("N") + " " + pkg_info.fetch_symbol + " "
+ pkg_info.attr_display.new = True
if pkg_info.ordered:
self.counters.new += 1
- return addl, myoldbest, myinslotlist
+ return myoldbest, myinslotlist
def __call__(self, depgraph, mylist, favorites=None, verbosity=None):
@@ -806,7 +811,7 @@ class Display(object):
# files to fetch list - avoids counting a same file twice
# in size display (verbose mode)
self.myfetchlist = set()
-
+
self.quiet_repo_display = "--quiet-repo-display" in depgraph._frozen_config.myopts
if self.quiet_repo_display:
# Use this set to detect when all the "repoadd" strings are "[0]"
@@ -824,47 +829,52 @@ class Display(object):
self.indent = " " * depth
if isinstance(pkg, Blocker):
- if self._blockers(pkg, fetch_symbol=" "):
- continue
+ self._blockers(pkg)
else:
pkg_info = self.set_pkg_info(pkg, ordered)
- addl, pkg_info.oldbest_list, myinslotlist = \
+ pkg_info.oldbest_list, myinslotlist = \
self._get_installed_best(pkg, pkg_info)
+ if ordered and pkg_info.merge and \
+ not pkg_info.attr_display.new:
+ for arg, atom in depgraph._iter_atoms_for_pkg(pkg):
+ if arg.force_reinstall:
+ pkg_info.attr_display.force_reinstall = True
+ break
+
self.verboseadd = ""
if self.quiet_repo_display:
self.repoadd = None
- self._display_use(pkg, pkg_info.oldbest_list, myinslotlist)
- self.recheck_hidden(pkg)
+ self._display_use(pkg, pkg_info)
if self.conf.verbosity == 3:
if self.quiet_repo_display:
self.verbose_size(pkg, repoadd_set, pkg_info)
else:
self.verbose_size(pkg, None, pkg_info)
- pkg_info.cp = pkg.cp
- pkg_info.ver = self.get_ver_str(pkg)
-
self.oldlp = self.conf.columnwidth - 30
self.newlp = self.oldlp - 30
- pkg_info.oldbest = self.convert_myoldbest(pkg, pkg_info.oldbest_list)
+ pkg_info.oldbest = self.convert_myoldbest(pkg, pkg_info)
pkg_info.system, pkg_info.world = \
self.check_system_world(pkg)
- addl = self.set_interactive(pkg, pkg_info.ordered, addl)
+ if 'interactive' in pkg.properties and \
+ pkg.operation == 'merge':
+ pkg_info.attr_display.interactive = True
+ if ordered:
+ self.counters.interactive += 1
if self.include_mask_str():
- addl += self.gen_mask_str(pkg)
+ pkg_info.attr_display.mask = self.gen_mask_str(pkg)
if pkg.root_config.settings["ROOT"] != "/":
if pkg_info.oldbest:
pkg_info.oldbest += " "
if self.conf.columns:
- myprint = self._set_non_root_columns(
- addl, pkg_info, pkg)
+ myprint = self._set_non_root_columns(pkg, pkg_info)
else:
pkg_str = pkg.cpv
- if self.conf.verbosity == 3 and not self.quiet_repo_display and (self.verbose_main_repo_display or
- any(x.repo != self.portdb.repositories.mainRepo().name for x in pkg_info.oldbest_list + [pkg])):
- pkg_str += _repo_separator + pkg.repo
+ if self.conf.verbosity == 3:
+ pkg_str = self._append_slot(pkg_str, pkg, pkg_info)
+ pkg_str = self._append_repository(pkg_str, pkg, pkg_info)
if not pkg_info.merge:
addl = self.empty_space_in_brackets()
myprint = "[%s%s] " % (
@@ -873,17 +883,16 @@ class Display(object):
)
else:
myprint = "[%s %s] " % (
- self.pkgprint(pkg.type_name, pkg_info), addl)
+ self.pkgprint(pkg.type_name, pkg_info),
+ pkg_info.attr_display)
myprint += self.indent + \
self.pkgprint(pkg_str, pkg_info) + " " + \
pkg_info.oldbest + darkgreen("to " + pkg.root)
else:
if self.conf.columns:
- myprint = self._set_root_columns(
- addl, pkg_info, pkg)
+ myprint = self._set_root_columns(pkg, pkg_info)
else:
- myprint = self._set_no_columns(
- pkg, pkg_info, addl)
+ myprint = self._set_no_columns(pkg, pkg_info)
if self.conf.columns and pkg.operation == "uninstall":
continue
@@ -908,3 +917,105 @@ class Display(object):
self.print_changelog()
return os.EX_OK
+
+
+def format_unmatched_atom(pkg, atom, pkg_use_enabled):
+ """
+ Returns two strings. The first string contains the
+ 'atom' with parts of the atom colored, which 'pkg'
+ doesn't match. The second string has the same number
+ of characters as the first one, but consists of only
+ white space or ^. The ^ characters have the same position
+ as the colored parts of the first string.
+ """
+ # Things to check:
+ # 1. Version
+ # 2. cp
+ # 3. slot/sub_slot
+ # 4. repository
+ # 5. USE
+
+ highlight = set()
+
+ def perform_coloring():
+ atom_str = ""
+ marker_str = ""
+ for ii, x in enumerate(atom):
+ if ii in highlight:
+ atom_str += colorize("BAD", x)
+ marker_str += "^"
+ else:
+ atom_str += x
+ marker_str += " "
+ return atom_str, marker_str
+
+ if atom.cp != pkg.cp:
+ # Highlight the cp part only.
+ ii = atom.find(atom.cp)
+ highlight.update(range(ii, ii + len(atom.cp)))
+ return perform_coloring()
+
+ version_atom = atom.without_repo.without_slot.without_use
+ version_atom_set = InternalPackageSet(initial_atoms=(version_atom,))
+ highlight_version = not bool(version_atom_set.findAtomForPackage(pkg,
+ modified_use=pkg_use_enabled(pkg)))
+
+ highlight_slot = False
+ if (atom.slot and atom.slot != pkg.slot) or \
+ (atom.sub_slot and atom.sub_slot != pkg.sub_slot):
+ highlight_slot = True
+
+ if highlight_version:
+ op = atom.operator
+ ver = None
+ if atom.cp != atom.cpv:
+ ver = cpv_getversion(atom.cpv)
+
+ if op == "=*":
+ op = "="
+ ver += "*"
+
+ if op is not None:
+ highlight.update(range(len(op)))
+
+ if ver is not None:
+ start = atom.rfind(ver)
+ end = start + len(ver)
+ highlight.update(range(start, end))
+
+ if highlight_slot:
+ slot_str = ":" + atom.slot
+ if atom.sub_slot:
+ slot_str += "/" + atom.sub_slot
+ if atom.slot_operator:
+ slot_str += atom.slot_operator
+ start = atom.find(slot_str)
+ end = start + len(slot_str)
+ highlight.update(range(start, end))
+
+ highlight_use = set()
+ if atom.use:
+ use_atom = "%s[%s]" % (atom.cp, str(atom.use))
+ use_atom_set = InternalPackageSet(initial_atoms=(use_atom,))
+ if not use_atom_set.findAtomForPackage(pkg, \
+ modified_use=pkg_use_enabled(pkg)):
+ missing_iuse = pkg.iuse.get_missing_iuse(
+ atom.unevaluated_atom.use.required)
+ if missing_iuse:
+ highlight_use = set(missing_iuse)
+ else:
+ #Use conditionals not met.
+ violated_atom = atom.violated_conditionals(
+ pkg_use_enabled(pkg), pkg.iuse.is_valid_flag)
+ if violated_atom.use is not None:
+ highlight_use = set(violated_atom.use.enabled.union(
+ violated_atom.use.disabled))
+
+ if highlight_use:
+ ii = atom.find("[") + 1
+ for token in atom.use.tokens:
+ if token.lstrip("-!").rstrip("=?") in highlight_use:
+ highlight.update(range(ii, ii + len(token)))
+ ii += len(token) + 1
+
+ return perform_coloring()
diff --git a/pym/_emerge/resolver/output_helpers.py b/pym/_emerge/resolver/output_helpers.py
index e751dd8e4..58b26945a 100644
--- a/pym/_emerge/resolver/output_helpers.py
+++ b/pym/_emerge/resolver/output_helpers.py
@@ -1,9 +1,12 @@
-# Copyright 2010-2011 Gentoo Foundation
+# Copyright 2010-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
"""Contains private support functions for the Display class
in output.py
"""
+
+from __future__ import unicode_literals
+
__all__ = (
)
@@ -15,9 +18,10 @@ from portage import os
from portage import _encodings, _unicode_encode
from portage._sets.base import InternalPackageSet
from portage.output import (blue, bold, colorize, create_color_func,
- green, red, teal, yellow)
+ green, red, teal, turquoise, yellow)
bad = create_color_func("BAD")
from portage.util import shlex_split, writemsg
+from portage.util.SlotObject import SlotObject
from portage.versions import catpkgsplit
from _emerge.Blocker import Blocker
@@ -223,7 +227,7 @@ class _DisplayConfig(object):
self.reinstall_nodes = dynamic_config._reinstall_nodes
self.digraph = dynamic_config.digraph
self.blocker_uninstalls = dynamic_config._blocker_uninstalls
- self.slot_pkg_map = dynamic_config._slot_pkg_map
+ self.package_tracker = dynamic_config._package_tracker
self.set_nodes = dynamic_config._set_nodes
self.pkg_use_enabled = depgraph._pkg_use_enabled
@@ -245,10 +249,9 @@ def _format_size(mysize):
mystr=mystr[:mycount]+","+mystr[mycount:]
return mystr+" kB"
-
def _create_use_string(conf, name, cur_iuse, iuse_forced, cur_use,
old_iuse, old_use,
- is_new, reinst_flags):
+ is_new, feature_flags, reinst_flags):
if not conf.print_use_string:
return ""
@@ -266,6 +269,7 @@ def _create_use_string(conf, name, cur_iuse, iuse_forced, cur_use,
any_iuse = cur_iuse.union(old_iuse)
any_iuse = list(any_iuse)
any_iuse.sort()
+
for flag in any_iuse:
flag_str = None
isEnabled = False
@@ -299,7 +303,9 @@ def _create_use_string(conf, name, cur_iuse, iuse_forced, cur_use,
elif flag in old_use:
flag_str = green("-" + flag) + "*"
if flag_str:
- if flag in iuse_forced:
+ if flag in feature_flags:
+ flag_str = "{" + flag_str + "}"
+ elif flag in iuse_forced:
flag_str = "(" + flag_str + ")"
if isEnabled:
enabled.append(flag_str)
@@ -364,8 +370,9 @@ def _tree_display(conf, mylist):
# If the uninstall task did not need to be executed because
# of an upgrade, display Blocker -> Upgrade edges since the
# corresponding Blocker -> Uninstall edges will not be shown.
- upgrade_node = \
- conf.slot_pkg_map[uninstall.root].get(uninstall.slot_atom)
+ upgrade_node = next(conf.package_tracker.match(
+ uninstall.root, uninstall.slot_atom), None)
+
if upgrade_node is not None and \
uninstall not in executed_uninstalls:
for blocker in uninstall_parents:
@@ -611,9 +618,10 @@ class PkgInfo(object):
information about the pkg being printed.
"""
- __slots__ = ("built", "cp", "ebuild_path", "fetch_symbol", "merge",
- "oldbest", "oldbest_list", "operation", "ordered",
- "repo_name", "repo_path_real", "system", "use", "ver", "world")
+ __slots__ = ("attr_display", "built", "cp",
+ "ebuild_path", "fetch_symbol", "merge",
+ "oldbest", "oldbest_list", "operation", "ordered", "previous_pkg",
+ "repo_name", "repo_path_real", "slot", "sub_slot", "system", "use", "ver", "world")
def __init__(self):
@@ -626,9 +634,74 @@ class PkgInfo(object):
self.oldbest_list = []
self.operation = ''
self.ordered = False
+ self.previous_pkg = None
self.repo_path_real = ''
self.repo_name = ''
+ self.slot = ''
+ self.sub_slot = ''
self.system = False
self.use = ''
self.ver = ''
self.world = False
+ self.attr_display = PkgAttrDisplay()
+
+class PkgAttrDisplay(SlotObject):
+
+ __slots__ = ("downgrade", "fetch_restrict", "fetch_restrict_satisfied",
+ "force_reinstall",
+ "interactive", "mask", "new", "new_slot", "new_version", "replace")
+
+ def __str__(self):
+ output = []
+
+ if self.interactive:
+ output.append(colorize("WARN", "I"))
+ else:
+ output.append(" ")
+
+ if self.new or self.force_reinstall:
+ if self.force_reinstall:
+ output.append(red("r"))
+ else:
+ output.append(green("N"))
+ else:
+ output.append(" ")
+
+ if self.new_slot or self.replace:
+ if self.replace:
+ output.append(yellow("R"))
+ else:
+ output.append(green("S"))
+ else:
+ output.append(" ")
+
+ if self.fetch_restrict or self.fetch_restrict_satisfied:
+ if self.fetch_restrict_satisfied:
+ output.append(green("f"))
+ else:
+ output.append(red("F"))
+ else:
+ output.append(" ")
+
+ if self.new_version:
+ output.append(turquoise("U"))
+ else:
+ output.append(" ")
+
+ if self.downgrade:
+ output.append(blue("D"))
+ else:
+ output.append(" ")
+
+ if self.mask is not None:
+ output.append(self.mask)
+
+ return "".join(output)
+
+ if sys.hexversion < 0x3000000:
+
+ __unicode__ = __str__
+
+ def __str__(self):
+ return _unicode_encode(self.__unicode__(),
+ encoding=_encodings['content'])
diff --git a/pym/_emerge/resolver/package_tracker.py b/pym/_emerge/resolver/package_tracker.py
new file mode 100644
index 000000000..5982750a0
--- /dev/null
+++ b/pym/_emerge/resolver/package_tracker.py
@@ -0,0 +1,301 @@
+# Copyright 2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import print_function
+
+import collections
+
+import portage
+portage.proxy.lazyimport.lazyimport(globals(),
+ 'portage.dep:Atom,match_from_list',
+ 'portage.util:cmp_sort_key',
+ 'portage.versions:vercmp',
+)
+
+_PackageConflict = collections.namedtuple("_PackageConflict", ["root", "pkgs", "atom", "description"])
+
+class PackageConflict(_PackageConflict):
+ """
+ Class to track the reason for a conflict and the conflicting packages.
+ """
+ def __iter__(self):
+ return iter(self.pkgs)
+
+ def __contains__(self, pkg):
+ return pkg in self.pkgs
+
+ def __len__(self):
+ return len(self.pkgs)
+
+
+class PackageTracker(object):
+ """
+ This class tracks packages which are currently
+ installed and packages which have been pulled into
+ the dependency graph.
+
+ It automatically tracks conflicts between packages.
+
+ Possible conflicts:
+ 1) Packages that share the same SLOT.
+ 2) Packages with the same cpv.
+ Not yet implemented:
+ 3) Packages that block each other.
+ """
+
+ def __init__(self):
+ # Mapping from package keys to set of packages.
+ self._cp_pkg_map = collections.defaultdict(list)
+ self._cp_vdb_pkg_map = collections.defaultdict(list)
+ # List of package keys that may contain conflicts.
+ # The insetation order must be preserved.
+ self._multi_pkgs = []
+
+ # Cache for result of conflicts().
+ self._conflicts_cache = None
+
+ # Records for each pulled package which installed package
+ # are replaced.
+ self._replacing = collections.defaultdict(list)
+ # Records which pulled packages replace this package.
+ self._replaced_by = collections.defaultdict(list)
+
+ self._match_cache = collections.defaultdict(dict)
+
+ def add_pkg(self, pkg):
+ """
+ Add a new package to the tracker. Records conflicts as necessary.
+ """
+ cp_key = pkg.root, pkg.cp
+
+ if any(other is pkg for other in self._cp_pkg_map[cp_key]):
+ return
+
+ self._cp_pkg_map[cp_key].append(pkg)
+
+ if len(self._cp_pkg_map[cp_key]) > 1:
+ self._conflicts_cache = None
+ if len(self._cp_pkg_map[cp_key]) == 2:
+ self._multi_pkgs.append(cp_key)
+
+ self._replacing[pkg] = []
+ for installed in self._cp_vdb_pkg_map.get(cp_key, []):
+ if installed.slot_atom == pkg.slot_atom or \
+ installed.cpv == pkg.cpv:
+ self._replacing[pkg].append(installed)
+ self._replaced_by[installed].append(pkg)
+
+ self._match_cache.pop(cp_key, None)
+
+ def add_installed_pkg(self, installed):
+ """
+ Add an installed package during vdb load. These packages
+ are not returned by matched_pull as long as add_pkg hasn't
+ been called with them. They are only returned by match_final.
+ """
+ cp_key = installed.root, installed.cp
+ if any(other is installed for other in self._cp_vdb_pkg_map[cp_key]):
+ return
+
+ self._cp_vdb_pkg_map[cp_key].append(installed)
+
+ for pkg in self._cp_pkg_map.get(cp_key, []):
+ if installed.slot_atom == pkg.slot_atom or \
+ installed.cpv == pkg.cpv:
+ self._replacing[pkg].append(installed)
+ self._replaced_by[installed].append(pkg)
+
+ self._match_cache.pop(cp_key, None)
+
+ def remove_pkg(self, pkg):
+ """
+ Removes the package from the tracker.
+ Raises KeyError if it isn't present.
+ """
+ cp_key = pkg.root, pkg.cp
+ try:
+ self._cp_pkg_map.get(cp_key, []).remove(pkg)
+ except ValueError:
+ raise KeyError(pkg)
+
+ if self._cp_pkg_map[cp_key]:
+ self._conflicts_cache = None
+
+ if not self._cp_pkg_map[cp_key]:
+ del self._cp_pkg_map[cp_key]
+ elif len(self._cp_pkg_map[cp_key]) == 1:
+ self._multi_pkgs = [other_cp_key for other_cp_key in self._multi_pkgs \
+ if other_cp_key != cp_key]
+
+ for installed in self._replacing[pkg]:
+ self._replaced_by[installed].remove(pkg)
+ if not self._replaced_by[installed]:
+ del self._replaced_by[installed]
+ del self._replacing[pkg]
+
+ self._match_cache.pop(cp_key, None)
+
+ def discard_pkg(self, pkg):
+ """
+ Removes the package from the tracker.
+ Does not raises KeyError if it is not present.
+ """
+ try:
+ self.remove_pkg(pkg)
+ except KeyError:
+ pass
+
+ def match(self, root, atom, installed=True):
+ """
+ Iterates over the packages matching 'atom'.
+ If 'installed' is True, installed non-replaced
+ packages may also be returned.
+ """
+ cp_key = root, atom.cp
+ cache_key = root, atom, installed
+ try:
+ return iter(self._match_cache.get(cp_key, {})[cache_key])
+ except KeyError:
+ pass
+
+ candidates = self._cp_pkg_map.get(cp_key, [])[:]
+
+ if installed:
+ for installed in self._cp_vdb_pkg_map.get(cp_key, []):
+ if installed not in self._replaced_by:
+ candidates.append(installed)
+
+ ret = match_from_list(atom, candidates)
+ ret.sort(key=cmp_sort_key(lambda x, y: vercmp(x.version, y.version)))
+ self._match_cache[cp_key][cache_key] = ret
+
+ return iter(ret)
+
+ def conflicts(self):
+ """
+ Iterates over the curently existing conflicts.
+ """
+ if self._conflicts_cache is None:
+ self._conflicts_cache = []
+
+ for cp_key in self._multi_pkgs:
+
+ # Categorize packages according to cpv and slot.
+ slot_map = collections.defaultdict(list)
+ cpv_map = collections.defaultdict(list)
+ for pkg in self._cp_pkg_map[cp_key]:
+ slot_key = pkg.root, pkg.slot_atom
+ cpv_key = pkg.root, pkg.cpv
+ slot_map[slot_key].append(pkg)
+ cpv_map[cpv_key].append(pkg)
+
+ # Slot conflicts.
+ for slot_key in slot_map:
+ slot_pkgs = slot_map[slot_key]
+ if len(slot_pkgs) > 1:
+ self._conflicts_cache.append(PackageConflict(
+ description = "slot conflict",
+ root = slot_key[0],
+ atom = slot_key[1],
+ pkgs = tuple(slot_pkgs),
+ ))
+
+ # CPV conflicts.
+ for cpv_key in cpv_map:
+ cpv_pkgs = cpv_map[cpv_key]
+ if len(cpv_pkgs) > 1:
+ # Make sure this cpv conflict is not a slot conflict at the same time.
+ # Ignore it if it is.
+ slots = set(pkg.slot for pkg in cpv_pkgs)
+ if len(slots) > 1:
+ self._conflicts_cache.append(PackageConflict(
+ description = "cpv conflict",
+ root = cpv_key[0],
+ atom = cpv_key[1],
+ pkgs = tuple(cpv_pkgs),
+ ))
+
+ return iter(self._conflicts_cache)
+
+ def slot_conflicts(self):
+ """
+ Iterates over present slot conflicts.
+ This is only intended for consumers that haven't been
+ updated to deal with other kinds of conflicts.
+ This funcion should be removed once all consumers are updated.
+ """
+ return (conflict for conflict in self.conflicts() \
+ if conflict.description == "slot conflict")
+
+ def all_pkgs(self, root):
+ """
+ Iterates over all packages for the given root
+ present in the tracker, including the installed
+ packages.
+ """
+ for cp_key in self._cp_pkg_map:
+ if cp_key[0] == root:
+ for pkg in self._cp_pkg_map[cp_key]:
+ yield pkg
+
+ for cp_key in self._cp_vdb_pkg_map:
+ if cp_key[0] == root:
+ for installed in self._cp_vdb_pkg_map[cp_key]:
+ if installed not in self._replaced_by:
+ yield installed
+
+ def contains(self, pkg, installed=True):
+ """
+ Checks if the package is in the tracker.
+ If 'installed' is True, returns True for
+ non-replaced installed packages.
+ """
+ cp_key = pkg.root, pkg.cp
+ for other in self._cp_pkg_map.get(cp_key, []):
+ if other is pkg:
+ return True
+
+ if installed:
+ for installed in self._cp_vdb_pkg_map.get(cp_key, []):
+ if installed is pkg and \
+ installed not in self._replaced_by:
+ return True
+
+ return False
+
+ def __contains__(self, pkg):
+ """
+ Checks if the package is in the tracker.
+ Returns True for non-replaced installed packages.
+ """
+ return self.contains(pkg, installed=True)
+
+
+class PackageTrackerDbapiWrapper(object):
+ """
+ A wrpper class that provides parts of the legacy
+ dbapi interface. Remove it once all consumers have
+ died.
+ """
+ def __init__(self, root, package_tracker):
+ self._root = root
+ self._package_tracker = package_tracker
+
+ def cpv_inject(self, pkg):
+ self._package_tracker.add_pkg(pkg)
+
+ def match_pkgs(self, atom):
+ if not isinstance(atom, Atom):
+ atom = Atom(atom)
+ ret = sorted(self._package_tracker.match(self._root, atom),
+ key=cmp_sort_key(lambda x, y: vercmp(x.version, y.version)))
+ return ret
+
+ def __iter__(self):
+ return self._package_tracker.all_pkgs(self._root)
+
+ def match(self, atom, use_cache=None):
+ return self.match_pkgs(atom)
+
+ def cp_list(self, cp):
+ return self.match_pkgs(cp)
diff --git a/pym/_emerge/resolver/slot_collision.py b/pym/_emerge/resolver/slot_collision.py
index 783a6483d..baeab080a 100644
--- a/pym/_emerge/resolver/slot_collision.py
+++ b/pym/_emerge/resolver/slot_collision.py
@@ -1,10 +1,11 @@
-# Copyright 2010-2012 Gentoo Foundation
+# Copyright 2010-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
-from __future__ import print_function
+from __future__ import print_function, unicode_literals
import sys
+from portage import _encodings, _unicode_encode
from _emerge.AtomArg import AtomArg
from _emerge.Package import Package
from _emerge.PackageArg import PackageArg
@@ -88,10 +89,11 @@ class slot_conflict_handler(object):
self.debug = "--debug" in self.myopts
if self.debug:
writemsg("Starting slot conflict handler\n", noiselevel=-1)
- #slot_collision_info is a dict mapping (slot atom, root) to set
- #of packages. The packages in the set all belong to the same
- #slot.
- self.slot_collision_info = depgraph._dynamic_config._slot_collision_info
+
+ # List of tuples, where each tuple represents a slot conflict.
+ self.all_conflicts = []
+ for conflict in depgraph._dynamic_config._package_tracker.slot_conflicts():
+ self.all_conflicts.append((conflict.root, conflict.atom, conflict.pkgs))
#A dict mapping packages to pairs of parent package
#and parent atom
@@ -108,8 +110,7 @@ class slot_conflict_handler(object):
all_conflict_atoms_by_slotatom = []
#fill conflict_pkgs, all_conflict_atoms_by_slotatom
- for (atom, root), pkgs \
- in self.slot_collision_info.items():
+ for root, atom, pkgs in self.all_conflicts:
conflict_pkgs.append(list(pkgs))
all_conflict_atoms_by_slotatom.append(set())
@@ -150,7 +151,7 @@ class slot_conflict_handler(object):
if self.debug:
writemsg("\nNew configuration:\n", noiselevel=-1)
for pkg in config:
- writemsg(" " + str(pkg) + "\n", noiselevel=-1)
+ writemsg(" %s\n" % (pkg,), noiselevel=-1)
writemsg("\n", noiselevel=-1)
new_solutions = self._check_configuration(config, all_conflict_atoms_by_slotatom, conflict_nodes)
@@ -225,10 +226,14 @@ class slot_conflict_handler(object):
new_change = {}
for pkg in solution:
for flag, state in solution[pkg].items():
+ real_flag = pkg.iuse.get_real_flag(flag)
+ if real_flag is None:
+ # Triggered by use-dep defaults.
+ continue
if state == "enabled" and flag not in _pkg_use_enabled(pkg):
- new_change.setdefault(pkg, {})[flag] = True
+ new_change.setdefault(pkg, {})[real_flag] = True
elif state == "disabled" and flag in _pkg_use_enabled(pkg):
- new_change.setdefault(pkg, {})[flag] = False
+ new_change.setdefault(pkg, {})[real_flag] = False
return new_change
def _prepare_conflict_msg_and_check_for_specificity(self):
@@ -236,6 +241,7 @@ class slot_conflict_handler(object):
Print all slot conflicts in a human readable way.
"""
_pkg_use_enabled = self.depgraph._pkg_use_enabled
+ verboseconflicts = "--verbose-conflicts" in self.myopts
msg = self.conflict_msg
indent = " "
msg.append("\n!!! Multiple package instances within a single " + \
@@ -243,16 +249,15 @@ class slot_conflict_handler(object):
msg.append("!!! into the dependency graph, resulting" + \
" in a slot conflict:\n\n")
- for (slot_atom, root), pkgs \
- in self.slot_collision_info.items():
- msg.append(str(slot_atom))
+ for root, slot_atom, pkgs in self.all_conflicts:
+ msg.append("%s" % (slot_atom,))
if root != self.depgraph._frozen_config._running_root.root:
msg.append(" for %s" % (root,))
msg.append("\n\n")
for pkg in pkgs:
msg.append(indent)
- msg.append(str(pkg))
+ msg.append("%s" % (pkg,))
parent_atoms = self.all_parents.get(pkg)
if parent_atoms:
#Create a list of collision reasons and map them to sets
@@ -268,12 +273,14 @@ class slot_conflict_handler(object):
for ppkg, atom in parent_atoms:
atom_set = InternalPackageSet(initial_atoms=(atom,))
atom_without_use_set = InternalPackageSet(initial_atoms=(atom.without_use,))
+ atom_without_use_and_slot_set = InternalPackageSet(initial_atoms=(
+ atom.without_use.without_slot,))
for other_pkg in pkgs:
if other_pkg == pkg:
continue
- if not atom_without_use_set.findAtomForPackage(other_pkg, \
+ if not atom_without_use_and_slot_set.findAtomForPackage(other_pkg, \
modified_use=_pkg_use_enabled(other_pkg)):
if atom.operator is not None:
# The version range does not match.
@@ -290,9 +297,11 @@ class slot_conflict_handler(object):
atoms.add((ppkg, atom, other_pkg))
num_all_specific_atoms += 1
collision_reasons[key] = atoms
- else:
- # The slot_abi does not match.
- key = ("sub-slot", atom.slot_abi)
+
+ elif not atom_without_use_set.findAtomForPackage(other_pkg, \
+ modified_use=_pkg_use_enabled(other_pkg)):
+ # The slot and/or sub_slot does not match.
+ key = ("slot", (atom.slot, atom.sub_slot, atom.slot_operator))
atoms = collision_reasons.get(key, set())
atoms.add((ppkg, atom, other_pkg))
num_all_specific_atoms += 1
@@ -312,11 +321,36 @@ class slot_conflict_handler(object):
#Use conditionals not met.
violated_atom = atom.violated_conditionals(_pkg_use_enabled(other_pkg), \
other_pkg.iuse.is_valid_flag)
+ if violated_atom.use is None:
+ # Something like bug #453400 caused the
+ # above findAtomForPackage call to
+ # return None unexpectedly.
+ msg = ("\n\n!!! BUG: Detected "
+ "USE dep match inconsistency:\n"
+ "\tppkg: %s\n"
+ "\tviolated_atom: %s\n"
+ "\tatom: %s unevaluated: %s\n"
+ "\tother_pkg: %s IUSE: %s USE: %s\n" %
+ (ppkg,
+ violated_atom,
+ atom,
+ atom.unevaluated_atom,
+ other_pkg,
+ sorted(other_pkg.iuse.all),
+ sorted(_pkg_use_enabled(other_pkg))))
+ writemsg(msg, noiselevel=-2)
+ raise AssertionError(
+ 'BUG: USE dep match inconsistency')
for flag in violated_atom.use.enabled.union(violated_atom.use.disabled):
atoms = collision_reasons.get(("use", flag), set())
atoms.add((ppkg, atom, other_pkg))
collision_reasons[("use", flag)] = atoms
num_all_specific_atoms += 1
+ elif isinstance(ppkg, AtomArg) and other_pkg.installed:
+ parent_atoms = collision_reasons.get(("AtomArg", None), set())
+ parent_atoms.add((ppkg, atom))
+ collision_reasons[("AtomArg", None)] = parent_atoms
+ num_all_specific_atoms += 1
msg.append(" pulled in by\n")
@@ -342,10 +376,16 @@ class slot_conflict_handler(object):
best_matches[atom.cp] = (ppkg, atom)
else:
best_matches[atom.cp] = (ppkg, atom)
- selected_for_display.update(best_matches.values())
- elif type == "sub-slot":
+ if verboseconflicts:
+ selected_for_display.add((ppkg, atom))
+ if not verboseconflicts:
+ selected_for_display.update(
+ best_matches.values())
+ elif type == "slot":
for ppkg, atom, other_pkg in parents:
selected_for_display.add((ppkg, atom))
+ if not verboseconflicts:
+ break
elif type == "use":
#Prefer atoms with unconditional use deps over, because it's
#not possible to change them on the parent, which means there
@@ -387,21 +427,50 @@ class slot_conflict_handler(object):
# If the list is long, people can simply
# use a pager.
selected_for_display.add((ppkg, atom))
+ elif type == "AtomArg":
+ for ppkg, atom in parents:
+ selected_for_display.add((ppkg, atom))
- def highlight_violations(atom, version, use=[]):
+ def highlight_violations(atom, version, use, slot_violated):
"""Colorize parts of an atom"""
- atom_str = str(atom)
+ atom_str = "%s" % (atom,)
+ colored_idx = set()
if version:
op = atom.operator
ver = None
if atom.cp != atom.cpv:
ver = cpv_getversion(atom.cpv)
slot = atom.slot
+ sub_slot = atom.sub_slot
+ slot_operator = atom.slot_operator
if op == "=*":
op = "="
ver += "*"
+ slot_str = ""
+ if slot:
+ slot_str = ":" + slot
+ if sub_slot:
+ slot_str += "/" + sub_slot
+ if slot_operator:
+ slot_str += slot_operator
+
+ # Compute color_idx before adding the color codes
+ # as these change the indices of the letters.
+ if op is not None:
+ colored_idx.update(range(len(op)))
+
+ if ver is not None:
+ start = atom_str.rfind(ver)
+ end = start + len(ver)
+ colored_idx.update(range(start, end))
+
+ if slot_str:
+ ii = atom_str.find(slot_str)
+ colored_idx.update(range(ii, ii + len(slot_str)))
+
+
if op is not None:
atom_str = atom_str.replace(op, colorize("BAD", op), 1)
@@ -411,25 +480,48 @@ class slot_conflict_handler(object):
atom_str = atom_str[:start] + \
colorize("BAD", ver) + \
atom_str[end:]
+
+ if slot_str:
+ atom_str = atom_str.replace(slot_str, colorize("BAD", slot_str), 1)
+
+ elif slot_violated:
+ slot = atom.slot
+ sub_slot = atom.sub_slot
+ slot_operator = atom.slot_operator
+
+ slot_str = ""
if slot:
- atom_str = atom_str.replace(":" + slot, colorize("BAD", ":" + slot))
+ slot_str = ":" + slot
+ if sub_slot:
+ slot_str += "/" + sub_slot
+ if slot_operator:
+ slot_str += slot_operator
+
+ if slot_str:
+ ii = atom_str.find(slot_str)
+ colored_idx.update(range(ii, ii + len(slot_str)))
+ atom_str = atom_str.replace(slot_str, colorize("BAD", slot_str), 1)
if use and atom.use.tokens:
use_part_start = atom_str.find("[")
use_part_end = atom_str.find("]")
new_tokens = []
+ # Compute start index in non-colored atom.
+ ii = str(atom).find("[") + 1
for token in atom.use.tokens:
if token.lstrip("-!").rstrip("=?") in use:
new_tokens.append(colorize("BAD", token))
+ colored_idx.update(range(ii, ii + len(token)))
else:
new_tokens.append(token)
+ ii += 1 + len(token)
atom_str = atom_str[:use_part_start] \
+ "[%s]" % (",".join(new_tokens),) + \
atom_str[use_part_end+1:]
- return atom_str
+ return atom_str, colored_idx
# Show unconditional use deps first, since those
# are more problematic than the conditional kind.
@@ -440,37 +532,49 @@ class slot_conflict_handler(object):
ordered_list.append(parent_atom)
for parent_atom in ordered_list:
parent, atom = parent_atom
- msg.append(2*indent)
- if isinstance(parent,
- (PackageArg, AtomArg)):
- # For PackageArg and AtomArg types, it's
+ if isinstance(parent, PackageArg):
+ # For PackageArg it's
# redundant to display the atom attribute.
- msg.append(str(parent))
+ msg.append("%s\n" % (parent,))
+ elif isinstance(parent, AtomArg):
+ msg.append(2*indent)
+ msg.append("%s (Argument)\n" % (atom,))
else:
# Display the specific atom from SetArg or
# Package types.
version_violated = False
- sub_slot_violated = False
+ slot_violated = False
use = []
for (type, sub_type), parents in collision_reasons.items():
for x in parents:
if parent == x[0] and atom == x[1]:
if type == "version":
version_violated = True
- elif type == "sub-slot":
- sub_slot_violated = True
+ elif type == "slot":
+ slot_violated = True
elif type == "use":
use.append(sub_type)
break
- atom_str = highlight_violations(atom.unevaluated_atom, version_violated, use)
+ atom_str, colored_idx = highlight_violations(atom.unevaluated_atom,
+ version_violated, use, slot_violated)
- if version_violated or sub_slot_violated:
+ if version_violated or slot_violated:
self.is_a_version_conflict = True
- msg.append("%s required by %s" % (atom_str, parent))
- msg.append("\n")
-
+ cur_line = "%s required by %s\n" % (atom_str, parent)
+ marker_line = ""
+ for ii in range(len(cur_line)):
+ if ii in colored_idx:
+ marker_line += "^"
+ else:
+ marker_line += " "
+ marker_line += "\n"
+ msg.append(2*indent)
+ msg.append(cur_line)
+ msg.append(2*indent)
+ msg.append(marker_line)
+
if not selected_for_display:
msg.append(2*indent)
msg.append("(no parents that aren't satisfied by other packages in this slot)\n")
@@ -490,7 +594,6 @@ class slot_conflict_handler(object):
def get_explanation(self):
msg = ""
- _pkg_use_enabled = self.depgraph._pkg_use_enabled
if self.is_a_version_conflict:
return None
@@ -506,13 +609,13 @@ class slot_conflict_handler(object):
return None
if len(solutions)==1:
- if len(self.slot_collision_info)==1:
+ if len(self.all_conflicts) == 1:
msg += "It might be possible to solve this slot collision\n"
else:
msg += "It might be possible to solve these slot collisions\n"
msg += "by applying all of the following changes:\n"
else:
- if len(self.slot_collision_info)==1:
+ if len(self.all_conflicts) == 1:
msg += "It might be possible to solve this slot collision\n"
else:
msg += "It might be possible to solve these slot collisions\n"
@@ -553,8 +656,7 @@ class slot_conflict_handler(object):
if not pkg.installed:
continue
- for (atom, root), pkgs \
- in self.slot_collision_info.items():
+ for root, atom, pkgs in self.all_conflicts:
if pkg not in pkgs:
continue
for other_pkg in pkgs:
@@ -563,7 +665,9 @@ class slot_conflict_handler(object):
if pkg.iuse.all.symmetric_difference(other_pkg.iuse.all) \
or _pkg_use_enabled(pkg).symmetric_difference(_pkg_use_enabled(other_pkg)):
if self.debug:
- writemsg(str(pkg) + " has pending USE changes. Rejecting configuration.\n", noiselevel=-1)
+ writemsg(("%s has pending USE changes. "
+ "Rejecting configuration.\n") % (pkg,),
+ noiselevel=-1)
return False
#A list of dicts. Keeps one dict per slot conflict. [ { flag1: "enabled" }, { flag2: "disabled" } ]
@@ -586,16 +690,18 @@ class slot_conflict_handler(object):
if not i.findAtomForPackage(pkg, modified_use=_pkg_use_enabled(pkg)):
#Version range does not match.
if self.debug:
- writemsg(str(pkg) + " does not satify all version requirements." + \
- " Rejecting configuration.\n", noiselevel=-1)
+ writemsg(("%s does not satify all version "
+ "requirements. Rejecting configuration.\n") %
+ (pkg,), noiselevel=-1)
return False
if not pkg.iuse.is_valid_flag(atom.unevaluated_atom.use.required):
#Missing IUSE.
#FIXME: This needs to support use dep defaults.
if self.debug:
- writemsg(str(pkg) + " misses needed flags from IUSE." + \
- " Rejecting configuration.\n", noiselevel=-1)
+ writemsg(("%s misses needed flags from IUSE."
+ " Rejecting configuration.\n") % (pkg,),
+ noiselevel=-1)
return False
if not isinstance(ppkg, Package) or ppkg.installed:
@@ -620,8 +726,9 @@ class slot_conflict_handler(object):
#We can't change USE of an installed package (only of an ebuild, but that is already
#part of the conflict, isn't it?
if self.debug:
- writemsg(str(pkg) + ": installed package would need USE changes." + \
- " Rejecting configuration.\n", noiselevel=-1)
+ writemsg(("%s: installed package would need USE"
+ " changes. Rejecting configuration.\n") % (pkg,),
+ noiselevel=-1)
return False
#Compute the required USE changes. A flag can be forced to "enabled" or "disabled",
@@ -675,7 +782,7 @@ class slot_conflict_handler(object):
if self.debug:
writemsg("All involved flags:\n", noiselevel=-1)
for id, involved_flags in enumerate(all_involved_flags):
- writemsg(" " + str(config[id]) + "\n", noiselevel=-1)
+ writemsg(" %s\n" % (config[id],), noiselevel=-1)
for flag, state in involved_flags.items():
writemsg(" " + flag + ": " + state + "\n", noiselevel=-1)
@@ -758,7 +865,7 @@ class slot_conflict_handler(object):
inner_first = False
else:
msg += ", "
- msg += flag + ": " + str(state)
+ msg += flag + ": %s" % (state,)
msg += "}"
msg += "]\n"
writemsg(msg, noiselevel=-1)
@@ -862,8 +969,9 @@ class slot_conflict_handler(object):
#We managed to create a new problem with our changes.
is_valid_solution = False
if self.debug:
- writemsg("new conflict introduced: " + str(pkg) + \
- " does not match " + new_atom + " from " + str(ppkg) + "\n", noiselevel=-1)
+ writemsg(("new conflict introduced: %s"
+ " does not match %s from %s\n") %
+ (pkg, new_atom, ppkg), noiselevel=-1)
break
if not is_valid_solution:
@@ -871,7 +979,7 @@ class slot_conflict_handler(object):
#Make sure the changes don't violate REQUIRED_USE
for pkg in required_changes:
- required_use = pkg.metadata.get("REQUIRED_USE")
+ required_use = pkg._metadata.get("REQUIRED_USE")
if not required_use:
continue
@@ -950,8 +1058,16 @@ class _solution_candidate_generator(object):
else:
return self.value == other.value
def __str__(self):
- return str(self.value)
-
+ return "%s" % (self.value,)
+
+ if sys.hexversion < 0x3000000:
+
+ __unicode__ = __str__
+
+ def __str__(self):
+ return _unicode_encode(self.__unicode__(),
+ encoding=_encodings['content'], errors='backslashreplace')
+
def __init__(self, all_involved_flags):
#A copy of all_involved_flags with all "cond" values
#replaced by a _value_helper object.
diff --git a/pym/_emerge/search.py b/pym/_emerge/search.py
index 5abc8a00c..bd74fb7b1 100644
--- a/pym/_emerge/search.py
+++ b/pym/_emerge/search.py
@@ -1,4 +1,4 @@
-# Copyright 1999-2011 Gentoo Foundation
+# Copyright 1999-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
from __future__ import print_function
@@ -69,7 +69,7 @@ class search(object):
return db.aux_get(*args, **kwargs)
except KeyError:
pass
- raise
+ raise KeyError(args[0])
def _findname(self, *args, **kwargs):
for db in self._dbs:
diff --git a/pym/_emerge/stdout_spinner.py b/pym/_emerge/stdout_spinner.py
index 5ad31f001..670686adf 100644
--- a/pym/_emerge/stdout_spinner.py
+++ b/pym/_emerge/stdout_spinner.py
@@ -1,4 +1,4 @@
-# Copyright 1999-2009 Gentoo Foundation
+# Copyright 1999-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
import platform
@@ -53,17 +53,18 @@ class stdout_spinner(object):
def update_basic(self):
self.spinpos = (self.spinpos + 1) % 500
if self._return_early():
- return
+ return True
if (self.spinpos % 100) == 0:
if self.spinpos == 0:
sys.stdout.write(". ")
else:
sys.stdout.write(".")
sys.stdout.flush()
+ return True
def update_scroll(self):
if self._return_early():
- return
+ return True
if(self.spinpos >= len(self.scroll_sequence)):
sys.stdout.write(darkgreen(" \b\b\b" + self.scroll_sequence[
len(self.scroll_sequence) - 1 - (self.spinpos % len(self.scroll_sequence))]))
@@ -71,13 +72,15 @@ class stdout_spinner(object):
sys.stdout.write(green("\b " + self.scroll_sequence[self.spinpos]))
sys.stdout.flush()
self.spinpos = (self.spinpos + 1) % (2 * len(self.scroll_sequence))
+ return True
def update_twirl(self):
self.spinpos = (self.spinpos + 1) % len(self.twirl_sequence)
if self._return_early():
- return
+ return True
sys.stdout.write("\b\b " + self.twirl_sequence[self.spinpos])
sys.stdout.flush()
+ return True
def update_quiet(self):
- return
+ return True
diff --git a/pym/_emerge/unmerge.py b/pym/_emerge/unmerge.py
index b46b89cb8..b04f8f376 100644
--- a/pym/_emerge/unmerge.py
+++ b/pym/_emerge/unmerge.py
@@ -1,4 +1,4 @@
-# Copyright 1999-2012 Gentoo Foundation
+# Copyright 1999-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
from __future__ import print_function
@@ -457,9 +457,6 @@ def _unmerge_display(root_config, myopts, unmerge_action,
writemsg_level(colorize("WARN","!!! Unmerging it may " + \
"be damaging to your system.\n\n"),
level=logging.WARNING, noiselevel=-1)
- if clean_delay and "--pretend" not in myopts and "--ask" not in myopts:
- countdown(int(settings["EMERGE_WARNING_DELAY"]),
- colorize("UNMERGE_WARN", "Press Ctrl-C to Stop"))
if not quiet:
writemsg_level("\n %s\n" % (bold(cp),), noiselevel=-1)
else:
diff --git a/pym/portage/__init__.py b/pym/portage/__init__.py
index 46bdc961c..fdbc4a8c2 100644
--- a/pym/portage/__init__.py
+++ b/pym/portage/__init__.py
@@ -1,8 +1,9 @@
-# portage.py -- core Portage functionality
-# Copyright 1998-2011 Gentoo Foundation
+# Copyright 1998-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
-VERSION="HEAD"
+from __future__ import unicode_literals
+
+VERSION = "HEAD"
# ===========================================================================
# START OF IMPORTS -- START OF IMPORTS -- START OF IMPORTS -- START OF IMPORT
@@ -16,14 +17,6 @@ try:
errno.ESTALE = -1
import re
import types
-
- # Try the commands module first, since this allows us to eliminate
- # the subprocess module from the baseline imports under python2.
- try:
- from commands import getstatusoutput as subprocess_getstatusoutput
- except ImportError:
- from subprocess import getstatusoutput as subprocess_getstatusoutput
-
import platform
# Temporarily delete these imports, to ensure that only the
@@ -41,7 +34,7 @@ except ImportError as e:
sys.stderr.write("!!! You might consider starting python with verbose flags to see what has\n")
sys.stderr.write("!!! gone wrong. Here is the information we got for this exception:\n")
- sys.stderr.write(" "+str(e)+"\n\n");
+ sys.stderr.write(" "+str(e)+"\n\n")
raise
try:
@@ -70,6 +63,7 @@ try:
'match_from_list,match_to_list',
'portage.dep.dep_check:dep_check,dep_eval,dep_wordreduce,dep_zapdeps',
'portage.eclass_cache',
+ 'portage.elog',
'portage.exception',
'portage.getbinpkg',
'portage.locks',
@@ -114,6 +108,7 @@ try:
'cpv_getkey@getCPFromCPV,endversion_keys,' + \
'suffix_value@endversion,pkgcmp,pkgsplit,vercmp,ververify',
'portage.xpak',
+ 'subprocess',
'time',
)
@@ -145,6 +140,7 @@ except ImportError as e:
raise
if sys.hexversion >= 0x3000000:
+ # pylint: disable=W0622
basestring = str
long = int
@@ -178,6 +174,15 @@ _encodings = {
}
if sys.hexversion >= 0x3000000:
+
+ def _decode_argv(argv):
+ # With Python 3, the surrogateescape encoding error handler makes it
+ # possible to access the original argv bytes, which can be useful
+ # if their actual encoding does no match the filesystem encoding.
+ fs_encoding = sys.getfilesystemencoding()
+ return [_unicode_decode(x.encode(fs_encoding, 'surrogateescape'))
+ for x in argv]
+
def _unicode_encode(s, encoding=_encodings['content'], errors='backslashreplace'):
if isinstance(s, str):
s = s.encode(encoding, errors)
@@ -187,7 +192,13 @@ if sys.hexversion >= 0x3000000:
if isinstance(s, bytes):
s = str(s, encoding=encoding, errors=errors)
return s
+
+ _native_string = _unicode_decode
else:
+
+ def _decode_argv(argv):
+ return [_unicode_decode(x) for x in argv]
+
def _unicode_encode(s, encoding=_encodings['content'], errors='backslashreplace'):
if isinstance(s, unicode):
s = s.encode(encoding, errors)
@@ -198,6 +209,17 @@ else:
s = unicode(s, encoding=encoding, errors=errors)
return s
+ _native_string = _unicode_encode
+
+if sys.hexversion >= 0x20605f0:
+ def _native_kwargs(kwargs):
+ return kwargs
+else:
+ # Avoid "TypeError: keywords must be strings" issue triggered
+ # by unicode_literals: http://bugs.python.org/issue4978
+ def _native_kwargs(kwargs):
+ return dict((_native_string(k), v) for k, v in kwargs.iteritems())
+
class _unicode_func_wrapper(object):
"""
Wraps a function, converts arguments from unicode to bytes,
@@ -215,7 +237,7 @@ class _unicode_func_wrapper(object):
self._func = func
self._encoding = encoding
- def __call__(self, *args, **kwargs):
+ def _process_args(self, args, kwargs):
encoding = self._encoding
wrapped_args = [_unicode_encode(x, encoding=encoding, errors='strict')
@@ -227,6 +249,13 @@ class _unicode_func_wrapper(object):
else:
wrapped_kwargs = {}
+ return (wrapped_args, wrapped_kwargs)
+
+ def __call__(self, *args, **kwargs):
+
+ encoding = self._encoding
+ wrapped_args, wrapped_kwargs = self._process_args(args, kwargs)
+
rval = self._func(*wrapped_args, **wrapped_kwargs)
# Don't use isinstance() since we don't want to convert subclasses
@@ -294,12 +323,17 @@ class _unicode_module_wrapper(object):
import os as _os
_os_overrides = {
id(_os.fdopen) : _os.fdopen,
- id(_os.mkfifo) : _os.mkfifo,
id(_os.popen) : _os.popen,
id(_os.read) : _os.read,
id(_os.system) : _os.system,
}
+
+try:
+ _os_overrides[id(_os.mkfifo)] = _os.mkfifo
+except AttributeError:
+ pass # Jython
+
if hasattr(_os, 'statvfs'):
_os_overrides[id(_os.statvfs)] = _os.statvfs
@@ -334,6 +368,25 @@ except (ImportError, OSError) as e:
_python_interpreter = os.path.realpath(sys.executable)
_bin_path = PORTAGE_BIN_PATH
_pym_path = PORTAGE_PYM_PATH
+_not_installed = os.path.isfile(os.path.join(PORTAGE_BASE_PATH, ".portage_not_installed"))
+
+# Api consumers included in portage should set this to True.
+_internal_caller = False
+
+_sync_mode = False
+
+def _get_stdin():
+ """
+ Buggy code in python's multiprocessing/process.py closes sys.stdin
+ and reassigns it to open(os.devnull), but fails to update the
+ corresponding __stdin__ reference. So, detect that case and handle
+ it appropriately.
+ """
+ if not sys.__stdin__.closed:
+ return sys.__stdin__
+ return sys.stdin
+
+_shell_quote_re = re.compile(r"[\s><=*\\\"'$`]")
def _shell_quote(s):
"""
@@ -341,6 +394,8 @@ def _shell_quote(s):
escape any backslashes, double-quotes, dollar signs, or
backquotes in the string.
"""
+ if _shell_quote_re.search(s) is None:
+ return s
for letter in "\\\"$`":
if letter in s:
s = s.replace(letter, "\\" + letter)
@@ -354,8 +409,27 @@ if platform.system() in ('FreeBSD',):
@classmethod
def chflags(cls, path, flags, opts=""):
- cmd = 'chflags %s %o %s' % (opts, flags, _shell_quote(path))
- status, output = subprocess_getstatusoutput(cmd)
+ cmd = ['chflags']
+ if opts:
+ cmd.append(opts)
+ cmd.append('%o' % (flags,))
+ cmd.append(path)
+
+ if sys.hexversion < 0x3020000 and sys.hexversion >= 0x3000000:
+ # Python 3.1 _execvp throws TypeError for non-absolute executable
+ # path passed as bytes (see http://bugs.python.org/issue8513).
+ fullname = process.find_binary(cmd[0])
+ if fullname is None:
+ raise exception.CommandNotFound(cmd[0])
+ cmd[0] = fullname
+
+ encoding = _encodings['fs']
+ cmd = [_unicode_encode(x, encoding=encoding, errors='strict')
+ for x in cmd]
+ proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT)
+ output = proc.communicate()[0]
+ status = proc.wait()
if os.WIFEXITED(status) and os.WEXITSTATUS(status) == os.EX_OK:
return
# Try to generate an ENOENT error if appropriate.
@@ -368,6 +442,7 @@ if platform.system() in ('FreeBSD',):
raise portage.exception.CommandNotFound('chflags')
# Now we're not sure exactly why it failed or what
# the real errno was, so just report EPERM.
+ output = _unicode_decode(output, encoding=encoding)
e = OSError(errno.EPERM, output)
e.errno = errno.EPERM
e.filename = path
@@ -396,20 +471,29 @@ def getcwd():
getcwd()
def abssymlink(symlink, target=None):
- "This reads symlinks, resolving the relative symlinks, and returning the absolute."
+ """
+ This reads symlinks, resolving the relative symlinks,
+ and returning the absolute.
+ @param symlink: path of symlink (must be absolute)
+ @param target: the target of the symlink (as returned
+ by readlink)
+ @rtype: str
+ @return: the absolute path of the symlink target
+ """
if target is not None:
mylink = target
else:
mylink = os.readlink(symlink)
if mylink[0] != '/':
- mydir=os.path.dirname(symlink)
- mylink=mydir+"/"+mylink
+ mydir = os.path.dirname(symlink)
+ mylink = mydir + "/" + mylink
return os.path.normpath(mylink)
_doebuild_manifest_exempt_depend = 0
-_testing_eapis = frozenset(["4-python", "4-slot-abi"])
-_deprecated_eapis = frozenset(["4_pre1", "3_pre2", "3_pre1"])
+_testing_eapis = frozenset(["4-python", "4-slot-abi", "5-progress", "5-hdepend"])
+_deprecated_eapis = frozenset(["4_pre1", "3_pre2", "3_pre1", "5_pre1", "5_pre2"])
+_supported_eapis = frozenset([str(x) for x in range(portage.const.EAPI)] + list(_testing_eapis) + list(_deprecated_eapis))
def _eapi_is_deprecated(eapi):
return eapi in _deprecated_eapis
@@ -466,13 +550,13 @@ auxdbkeys = (
'RESTRICT', 'HOMEPAGE', 'LICENSE', 'DESCRIPTION',
'KEYWORDS', 'INHERITED', 'IUSE', 'REQUIRED_USE',
'PDEPEND', 'PROVIDE', 'EAPI',
- 'PROPERTIES', 'DEFINED_PHASES', 'UNUSED_05', 'UNUSED_04',
+ 'PROPERTIES', 'DEFINED_PHASES', 'HDEPEND', 'UNUSED_04',
'UNUSED_03', 'UNUSED_02', 'UNUSED_01',
)
-auxdbkeylen=len(auxdbkeys)
+auxdbkeylen = len(auxdbkeys)
def portageexit():
- close_portdbapi_caches()
+ pass
class _trees_dict(dict):
__slots__ = ('_running_eroot', '_target_eroot',)
@@ -483,13 +567,6 @@ class _trees_dict(dict):
def create_trees(config_root=None, target_root=None, trees=None, env=None,
eprefix=None):
- if trees is not None:
- # clean up any existing portdbapi instances
- for myroot in trees:
- portdb = trees[myroot]["porttree"].dbapi
- portdb.close_caches()
- portdbapi.portdbapi_instances.remove(portdb)
- del trees[myroot]["porttree"], myroot, portdb
if trees is None:
trees = _trees_dict()
@@ -507,7 +584,7 @@ def create_trees(config_root=None, target_root=None, trees=None, env=None,
trees._target_eroot = settings['EROOT']
myroots = [(settings['EROOT'], settings)]
- if settings["ROOT"] == "/":
+ if settings["ROOT"] == "/" and settings["EPREFIX"] == const.EPREFIX:
trees._running_eroot = trees._target_eroot
else:
@@ -515,15 +592,15 @@ def create_trees(config_root=None, target_root=None, trees=None, env=None,
# environment to apply to the config that's associated
# with ROOT != "/", so pass a nearly empty dict for the env parameter.
clean_env = {}
- for k in ('PATH', 'PORTAGE_GRPNAME', 'PORTAGE_USERNAME',
- 'SSH_AGENT_PID', 'SSH_AUTH_SOCK', 'TERM',
+ for k in ('PATH', 'PORTAGE_GRPNAME', 'PORTAGE_REPOSITORIES', 'PORTAGE_USERNAME',
+ 'PYTHONPATH', 'SSH_AGENT_PID', 'SSH_AUTH_SOCK', 'TERM',
'ftp_proxy', 'http_proxy', 'no_proxy',
'__PORTAGE_TEST_HARDLINK_LOCKS'):
v = settings.get(k)
if v is not None:
clean_env[k] = v
settings = config(config_root=None, target_root="/",
- env=clean_env, eprefix=eprefix)
+ env=clean_env, eprefix=None)
settings.lock()
trees._running_eroot = settings['EROOT']
myroots.append((settings['EROOT'], settings))
@@ -547,11 +624,17 @@ if VERSION == 'HEAD':
if VERSION is not self:
return VERSION
if os.path.isdir(os.path.join(PORTAGE_BASE_PATH, '.git')):
- status, output = subprocess_getstatusoutput((
- "cd %s ; git describe --tags || exit $? ; " + \
+ encoding = _encodings['fs']
+ cmd = [BASH_BINARY, "-c", ("cd %s ; git describe --tags || exit $? ; " + \
"if [ -n \"`git diff-index --name-only --diff-filter=M HEAD`\" ] ; " + \
"then echo modified ; git rev-list --format=%%ct -n 1 HEAD ; fi ; " + \
- "exit 0") % _shell_quote(PORTAGE_BASE_PATH))
+ "exit 0") % _shell_quote(PORTAGE_BASE_PATH)]
+ cmd = [_unicode_encode(x, encoding=encoding, errors='strict')
+ for x in cmd]
+ proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT)
+ output = _unicode_decode(proc.communicate()[0], encoding=encoding)
+ status = proc.wait()
if os.WIFEXITED(status) and os.WEXITSTATUS(status) == os.EX_OK:
output_lines = output.splitlines()
if output_lines:
@@ -561,7 +644,7 @@ if VERSION == 'HEAD':
patchlevel = False
if len(version_split) > 1:
patchlevel = True
- VERSION = "%s_p%s" %(VERSION, version_split[1])
+ VERSION = "%s_p%s" % (VERSION, version_split[1])
if len(output_lines) > 1 and output_lines[1] == 'modified':
head_timestamp = None
if len(output_lines) > 3:
@@ -580,34 +663,17 @@ if VERSION == 'HEAD':
return VERSION
VERSION = _LazyVersion()
-if "_legacy_globals_constructed" in globals():
- # The module has been reloaded, so perform any relevant cleanup
- # and prevent memory leaks.
- if "db" in _legacy_globals_constructed:
- try:
- db
- except NameError:
- pass
- else:
- if isinstance(db, dict) and db:
- for _x in db.values():
- try:
- if "porttree" in _x.lazy_items:
- continue
- except (AttributeError, TypeError):
- continue
- try:
- _x = _x["porttree"].dbapi
- except (AttributeError, KeyError):
- continue
- if not isinstance(_x, portdbapi):
- continue
- _x.close_caches()
- try:
- portdbapi.portdbapi_instances.remove(_x)
- except ValueError:
- pass
- del _x
+_legacy_global_var_names = ("archlist", "db", "features",
+ "groups", "mtimedb", "mtimedbfile", "pkglines",
+ "portdb", "profiledir", "root", "selinux_enabled",
+ "settings", "thirdpartymirrors")
+
+def _reset_legacy_globals():
+
+ global _legacy_globals_constructed
+ _legacy_globals_constructed = set()
+ for k in _legacy_global_var_names:
+ globals()[k] = _LegacyGlobalProxy(k)
class _LegacyGlobalProxy(proxy.objectproxy.ObjectProxy):
@@ -622,16 +688,7 @@ class _LegacyGlobalProxy(proxy.objectproxy.ObjectProxy):
from portage._legacy_globals import _get_legacy_global
return _get_legacy_global(name)
-_legacy_global_var_names = ("archlist", "db", "features",
- "groups", "mtimedb", "mtimedbfile", "pkglines",
- "portdb", "profiledir", "root", "selinux_enabled",
- "settings", "thirdpartymirrors")
-
-for k in _legacy_global_var_names:
- globals()[k] = _LegacyGlobalProxy(k)
-del k
-
-_legacy_globals_constructed = set()
+_reset_legacy_globals()
def _disable_legacy_globals():
"""
diff --git a/pym/portage/_emirrordist/Config.py b/pym/portage/_emirrordist/Config.py
new file mode 100644
index 000000000..db4bfebd4
--- /dev/null
+++ b/pym/portage/_emirrordist/Config.py
@@ -0,0 +1,132 @@
+# Copyright 2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import copy
+import io
+import logging
+import shelve
+import sys
+import time
+
+import portage
+from portage import os
+from portage.util import grabdict, grablines
+from portage.util._ShelveUnicodeWrapper import ShelveUnicodeWrapper
+
+class Config(object):
+ def __init__(self, options, portdb, event_loop):
+ self.options = options
+ self.portdb = portdb
+ self.event_loop = event_loop
+ self.added_byte_count = 0
+ self.added_file_count = 0
+ self.scheduled_deletion_count = 0
+ self.delete_count = 0
+ self.file_owners = {}
+ self.file_failures = {}
+ self.start_time = time.time()
+ self._open_files = []
+
+ self.log_success = self._open_log('success', options.success_log, 'a')
+ self.log_failure = self._open_log('failure', options.failure_log, 'a')
+
+ self.distfiles = None
+ if options.distfiles is not None:
+ self.distfiles = options.distfiles
+
+ self.mirrors = copy.copy(portdb.settings.thirdpartymirrors())
+
+ if options.mirror_overrides is not None:
+ self.mirrors.update(grabdict(options.mirror_overrides))
+
+ if options.mirror_skip is not None:
+ for x in options.mirror_skip.split(","):
+ self.mirrors[x] = []
+
+ self.whitelist = None
+ if options.whitelist_from is not None:
+ self.whitelist = set()
+ for filename in options.whitelist_from:
+ for line in grablines(filename):
+ line = line.strip()
+ if line and not line.startswith("#"):
+ self.whitelist.add(line)
+
+ self.restrict_mirror_exemptions = None
+ if options.restrict_mirror_exemptions is not None:
+ self.restrict_mirror_exemptions = frozenset(
+ options.restrict_mirror_exemptions.split(","))
+
+ self.recycle_db = None
+ if options.recycle_db is not None:
+ self.recycle_db = self._open_shelve(
+ options.recycle_db, 'recycle')
+
+ self.distfiles_db = None
+ if options.distfiles_db is not None:
+ self.distfiles_db = self._open_shelve(
+ options.distfiles_db, 'distfiles')
+
+ self.deletion_db = None
+ if options.deletion_db is not None:
+ self.deletion_db = self._open_shelve(
+ options.deletion_db, 'deletion')
+
+ def _open_log(self, log_desc, log_path, mode):
+
+ if log_path is None or self.options.dry_run:
+ log_func = logging.info
+ line_format = "%s: %%s" % log_desc
+ add_newline = False
+ if log_path is not None:
+ logging.warn(("dry-run: %s log "
+ "redirected to logging.info") % log_desc)
+ else:
+ self._open_files.append(io.open(log_path, mode=mode,
+ encoding='utf_8'))
+ line_format = "%s\n"
+ log_func = self._open_files[-1].write
+
+ return self._LogFormatter(line_format, log_func)
+
+ class _LogFormatter(object):
+
+ __slots__ = ('_line_format', '_log_func')
+
+ def __init__(self, line_format, log_func):
+ self._line_format = line_format
+ self._log_func = log_func
+
+ def __call__(self, msg):
+ self._log_func(self._line_format % (msg,))
+
+ def _open_shelve(self, db_file, db_desc):
+ if self.options.dry_run:
+ open_flag = "r"
+ else:
+ open_flag = "c"
+
+ if self.options.dry_run and not os.path.exists(db_file):
+ db = {}
+ else:
+ db = shelve.open(db_file, flag=open_flag)
+ if sys.hexversion < 0x3000000:
+ db = ShelveUnicodeWrapper(db)
+
+ if self.options.dry_run:
+ logging.warn("dry-run: %s db opened in readonly mode" % db_desc)
+ if not isinstance(db, dict):
+ volatile_db = dict((k, db[k]) for k in db)
+ db.close()
+ db = volatile_db
+ else:
+ self._open_files.append(db)
+
+ return db
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, exc_type, exc_value, traceback):
+ while self._open_files:
+ self._open_files.pop().close()
diff --git a/pym/portage/_emirrordist/DeletionIterator.py b/pym/portage/_emirrordist/DeletionIterator.py
new file mode 100644
index 000000000..dff52c042
--- /dev/null
+++ b/pym/portage/_emirrordist/DeletionIterator.py
@@ -0,0 +1,83 @@
+# Copyright 2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import logging
+import stat
+
+from portage import os
+from .DeletionTask import DeletionTask
+
+class DeletionIterator(object):
+
+ def __init__(self, config):
+ self._config = config
+
+ def __iter__(self):
+ distdir = self._config.options.distfiles
+ file_owners = self._config.file_owners
+ whitelist = self._config.whitelist
+ distfiles_local = self._config.options.distfiles_local
+ deletion_db = self._config.deletion_db
+ deletion_delay = self._config.options.deletion_delay
+ start_time = self._config.start_time
+ distfiles_set = set(os.listdir(self._config.options.distfiles))
+ for filename in distfiles_set:
+ try:
+ st = os.stat(os.path.join(distdir, filename))
+ except OSError as e:
+ logging.error("stat failed on '%s' in distfiles: %s\n" %
+ (filename, e))
+ continue
+ if not stat.S_ISREG(st.st_mode):
+ continue
+ elif filename in file_owners:
+ if deletion_db is not None:
+ try:
+ del deletion_db[filename]
+ except KeyError:
+ pass
+ elif whitelist is not None and filename in whitelist:
+ if deletion_db is not None:
+ try:
+ del deletion_db[filename]
+ except KeyError:
+ pass
+ elif distfiles_local is not None and \
+ os.path.exists(os.path.join(distfiles_local, filename)):
+ if deletion_db is not None:
+ try:
+ del deletion_db[filename]
+ except KeyError:
+ pass
+ else:
+ self._config.scheduled_deletion_count += 1
+
+ if deletion_db is None or deletion_delay is None:
+
+ yield DeletionTask(background=True,
+ distfile=filename,
+ config=self._config)
+
+ else:
+ deletion_entry = deletion_db.get(filename)
+
+ if deletion_entry is None:
+ logging.debug("add '%s' to deletion db" % filename)
+ deletion_db[filename] = start_time
+
+ elif deletion_entry + deletion_delay <= start_time:
+
+ yield DeletionTask(background=True,
+ distfile=filename,
+ config=self._config)
+
+ if deletion_db is not None:
+ for filename in list(deletion_db):
+ if filename not in distfiles_set:
+ try:
+ del deletion_db[filename]
+ except KeyError:
+ pass
+ else:
+ logging.debug("drop '%s' from deletion db" %
+ filename)
diff --git a/pym/portage/_emirrordist/DeletionTask.py b/pym/portage/_emirrordist/DeletionTask.py
new file mode 100644
index 000000000..7d10957fa
--- /dev/null
+++ b/pym/portage/_emirrordist/DeletionTask.py
@@ -0,0 +1,129 @@
+# Copyright 2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import errno
+import logging
+
+from portage import os
+from portage.util._async.FileCopier import FileCopier
+from _emerge.CompositeTask import CompositeTask
+
+class DeletionTask(CompositeTask):
+
+ __slots__ = ('distfile', 'config')
+
+ def _start(self):
+
+ distfile_path = os.path.join(
+ self.config.options.distfiles, self.distfile)
+
+ if self.config.options.recycle_dir is not None:
+ distfile_path = os.path.join(self.config.options.distfiles, self.distfile)
+ recycle_path = os.path.join(
+ self.config.options.recycle_dir, self.distfile)
+ if self.config.options.dry_run:
+ logging.info(("dry-run: move '%s' from "
+ "distfiles to recycle") % self.distfile)
+ else:
+ logging.debug(("move '%s' from "
+ "distfiles to recycle") % self.distfile)
+ try:
+ os.rename(distfile_path, recycle_path)
+ except OSError as e:
+ if e.errno != errno.EXDEV:
+ logging.error(("rename %s from distfiles to "
+ "recycle failed: %s") % (self.distfile, e))
+ else:
+ self.returncode = os.EX_OK
+ self._async_wait()
+ return
+
+ self._start_task(
+ FileCopier(src_path=distfile_path,
+ dest_path=recycle_path,
+ background=False),
+ self._recycle_copier_exit)
+ return
+
+ success = True
+
+ if self.config.options.dry_run:
+ logging.info(("dry-run: delete '%s' from "
+ "distfiles") % self.distfile)
+ else:
+ logging.debug(("delete '%s' from "
+ "distfiles") % self.distfile)
+ try:
+ os.unlink(distfile_path)
+ except OSError as e:
+ if e.errno not in (errno.ENOENT, errno.ESTALE):
+ logging.error("%s unlink failed in distfiles: %s" %
+ (self.distfile, e))
+ success = False
+
+ if success:
+ self._success()
+ self.returncode = os.EX_OK
+ else:
+ self.returncode = 1
+
+ self._async_wait()
+
+ def _recycle_copier_exit(self, copier):
+
+ self._assert_current(copier)
+ if self._was_cancelled():
+ self.wait()
+ return
+
+ success = True
+ if copier.returncode == os.EX_OK:
+
+ try:
+ os.unlink(copier.src_path)
+ except OSError as e:
+ if e.errno not in (errno.ENOENT, errno.ESTALE):
+ logging.error("%s unlink failed in distfiles: %s" %
+ (self.distfile, e))
+ success = False
+
+ else:
+ logging.error(("%s copy from distfiles "
+ "to recycle failed: %s") % (self.distfile, e))
+ success = False
+
+ if success:
+ self._success()
+ self.returncode = os.EX_OK
+ else:
+ self.returncode = 1
+
+ self._current_task = None
+ self.wait()
+
+ def _success(self):
+
+ cpv = "unknown"
+ if self.config.distfiles_db is not None:
+ cpv = self.config.distfiles_db.get(self.distfile, cpv)
+
+ self.config.delete_count += 1
+ self.config.log_success("%s\t%s\tremoved" % (cpv, self.distfile))
+
+ if self.config.distfiles_db is not None:
+ try:
+ del self.config.distfiles_db[self.distfile]
+ except KeyError:
+ pass
+ else:
+ logging.debug(("drop '%s' from "
+ "distfiles db") % self.distfile)
+
+ if self.config.deletion_db is not None:
+ try:
+ del self.config.deletion_db[self.distfile]
+ except KeyError:
+ pass
+ else:
+ logging.debug(("drop '%s' from "
+ "deletion db") % self.distfile)
diff --git a/pym/portage/_emirrordist/FetchIterator.py b/pym/portage/_emirrordist/FetchIterator.py
new file mode 100644
index 000000000..16a0b04c9
--- /dev/null
+++ b/pym/portage/_emirrordist/FetchIterator.py
@@ -0,0 +1,147 @@
+# Copyright 2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage import os
+from portage.checksum import (_apply_hash_filter,
+ _filter_unaccelarated_hashes, _hash_filter)
+from portage.dep import use_reduce
+from portage.exception import PortageException
+from .FetchTask import FetchTask
+
+class FetchIterator(object):
+
+ def __init__(self, config):
+ self._config = config
+ self._log_failure = config.log_failure
+
+ def _iter_every_cp(self):
+ # List categories individually, in order to start yielding quicker,
+ # and in order to reduce latency in case of a signal interrupt.
+ cp_all = self._config.portdb.cp_all
+ for category in sorted(self._config.portdb.categories):
+ for cp in cp_all(categories=(category,)):
+ yield cp
+
+ def __iter__(self):
+
+ portdb = self._config.portdb
+ get_repo_for_location = portdb.repositories.get_repo_for_location
+ file_owners = self._config.file_owners
+ file_failures = self._config.file_failures
+ restrict_mirror_exemptions = self._config.restrict_mirror_exemptions
+
+ hash_filter = _hash_filter(
+ portdb.settings.get("PORTAGE_CHECKSUM_FILTER", ""))
+ if hash_filter.transparent:
+ hash_filter = None
+
+ for cp in self._iter_every_cp():
+
+ for tree in portdb.porttrees:
+
+ # Reset state so the Manifest is pulled once
+ # for this cp / tree combination.
+ digests = None
+ repo_config = get_repo_for_location(tree)
+
+ for cpv in portdb.cp_list(cp, mytree=tree):
+
+ try:
+ restrict, = portdb.aux_get(cpv, ("RESTRICT",),
+ mytree=tree)
+ except (KeyError, PortageException) as e:
+ self._log_failure("%s\t\taux_get exception %s" %
+ (cpv, e))
+ continue
+
+ # Here we use matchnone=True to ignore conditional parts
+ # of RESTRICT since they don't apply unconditionally.
+ # Assume such conditionals only apply on the client side.
+ try:
+ restrict = frozenset(use_reduce(restrict,
+ flat=True, matchnone=True))
+ except PortageException as e:
+ self._log_failure("%s\t\tuse_reduce exception %s" %
+ (cpv, e))
+ continue
+
+ if "fetch" in restrict:
+ continue
+
+ try:
+ uri_map = portdb.getFetchMap(cpv)
+ except PortageException as e:
+ self._log_failure("%s\t\tgetFetchMap exception %s" %
+ (cpv, e))
+ continue
+
+ if not uri_map:
+ continue
+
+ if "mirror" in restrict:
+ skip = False
+ if restrict_mirror_exemptions is not None:
+ new_uri_map = {}
+ for filename, uri_tuple in uri_map.items():
+ for uri in uri_tuple:
+ if uri[:9] == "mirror://":
+ i = uri.find("/", 9)
+ if i != -1 and uri[9:i].strip("/") in \
+ restrict_mirror_exemptions:
+ new_uri_map[filename] = uri_tuple
+ break
+ if new_uri_map:
+ uri_map = new_uri_map
+ else:
+ skip = True
+ else:
+ skip = True
+
+ if skip:
+ continue
+
+ # Parse Manifest for this cp if we haven't yet.
+ if digests is None:
+ try:
+ digests = repo_config.load_manifest(
+ os.path.join(repo_config.location, cp)
+ ).getTypeDigests("DIST")
+ except (EnvironmentError, PortageException) as e:
+ for filename in uri_map:
+ self._log_failure(
+ "%s\t%s\tManifest exception %s" %
+ (cpv, filename, e))
+ file_failures[filename] = cpv
+ continue
+
+ if not digests:
+ for filename in uri_map:
+ self._log_failure("%s\t%s\tdigest entry missing" %
+ (cpv, filename))
+ file_failures[filename] = cpv
+ continue
+
+ for filename, uri_tuple in uri_map.items():
+ file_digests = digests.get(filename)
+ if file_digests is None:
+ self._log_failure("%s\t%s\tdigest entry missing" %
+ (cpv, filename))
+ file_failures[filename] = cpv
+ continue
+ if filename in file_owners:
+ continue
+ file_owners[filename] = cpv
+
+ file_digests = \
+ _filter_unaccelarated_hashes(file_digests)
+ if hash_filter is not None:
+ file_digests = _apply_hash_filter(
+ file_digests, hash_filter)
+
+ yield FetchTask(cpv=cpv,
+ background=True,
+ digests=file_digests,
+ distfile=filename,
+ restrict=restrict,
+ uri_tuple=uri_tuple,
+ config=self._config)
diff --git a/pym/portage/_emirrordist/FetchTask.py b/pym/portage/_emirrordist/FetchTask.py
new file mode 100644
index 000000000..66c41c1a2
--- /dev/null
+++ b/pym/portage/_emirrordist/FetchTask.py
@@ -0,0 +1,629 @@
+# Copyright 2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import collections
+import errno
+import logging
+import random
+import stat
+import subprocess
+import sys
+
+import portage
+from portage import _encodings, _unicode_encode
+from portage import os
+from portage.util._async.FileCopier import FileCopier
+from portage.util._async.FileDigester import FileDigester
+from portage.util._async.PipeLogger import PipeLogger
+from portage.util._async.PopenProcess import PopenProcess
+from _emerge.CompositeTask import CompositeTask
+
+default_hash_name = portage.const.MANIFEST2_REQUIRED_HASH
+
+# Use --no-check-certificate since Manifest digests should provide
+# enough security, and certificates can be self-signed or whatnot.
+default_fetchcommand = "wget -c -v -t 1 --passive-ftp --no-check-certificate --timeout=60 -O \"${DISTDIR}/${FILE}\" \"${URI}\""
+
+class FetchTask(CompositeTask):
+
+ __slots__ = ('distfile', 'digests', 'config', 'cpv',
+ 'restrict', 'uri_tuple', '_current_mirror',
+ '_current_stat', '_fetch_tmp_dir_info', '_fetch_tmp_file',
+ '_fs_mirror_stack', '_mirror_stack',
+ '_previously_added',
+ '_primaryuri_stack', '_log_path', '_tried_uris')
+
+ def _start(self):
+
+ if self.config.options.fetch_log_dir is not None and \
+ not self.config.options.dry_run:
+ self._log_path = os.path.join(
+ self.config.options.fetch_log_dir,
+ self.distfile + '.log')
+
+ self._previously_added = True
+ if self.config.distfiles_db is not None and \
+ self.distfile not in self.config.distfiles_db:
+ self._previously_added = False
+ self.config.distfiles_db[self.distfile] = self.cpv
+
+ if not self._have_needed_digests():
+ msg = "incomplete digests: %s" % " ".join(self.digests)
+ self.scheduler.output(msg, background=self.background,
+ log_path=self._log_path)
+ self.config.log_failure("%s\t%s\t%s" %
+ (self.cpv, self.distfile, msg))
+ self.config.file_failures[self.distfile] = self.cpv
+ self.returncode = os.EX_OK
+ self._async_wait()
+ return
+
+ distfile_path = os.path.join(
+ self.config.options.distfiles, self.distfile)
+
+ st = None
+ size_ok = False
+ try:
+ st = os.stat(distfile_path)
+ except OSError as e:
+ if e.errno not in (errno.ENOENT, errno.ESTALE):
+ msg = "%s stat failed in %s: %s" % \
+ (self.distfile, "distfiles", e)
+ self.scheduler.output(msg + '\n', background=True,
+ log_path=self._log_path)
+ logging.error(msg)
+ else:
+ size_ok = st.st_size == self.digests["size"]
+
+ if not size_ok:
+ if self.config.options.dry_run:
+ if st is not None:
+ logging.info(("dry-run: delete '%s' with "
+ "wrong size from distfiles") % (self.distfile,))
+ else:
+ # Do the unlink in order to ensure that the path is clear,
+ # even if stat raised ENOENT, since a broken symlink can
+ # trigger ENOENT.
+ if self._unlink_file(distfile_path, "distfiles"):
+ if st is not None:
+ logging.debug(("delete '%s' with "
+ "wrong size from distfiles") % (self.distfile,))
+ else:
+ self.config.log_failure("%s\t%s\t%s" %
+ (self.cpv, self.distfile, "unlink failed in distfiles"))
+ self.returncode = os.EX_OK
+ self._async_wait()
+ return
+
+ if size_ok:
+ if self.config.options.verify_existing_digest:
+ self._start_task(
+ FileDigester(file_path=distfile_path,
+ hash_names=(self._select_hash(),),
+ background=self.background,
+ logfile=self._log_path), self._distfiles_digester_exit)
+ return
+
+ self._success()
+ self.returncode = os.EX_OK
+ self._async_wait()
+ return
+
+ self._start_fetch()
+
+ def _success(self):
+ if not self._previously_added:
+ size = self.digests["size"]
+ self.config.added_byte_count += size
+ self.config.added_file_count += 1
+ self.config.log_success("%s\t%s\tadded %i bytes" %
+ (self.cpv, self.distfile, size))
+
+ if self._log_path is not None:
+ if not self.config.options.dry_run:
+ try:
+ os.unlink(self._log_path)
+ except OSError:
+ pass
+
+ if self.config.options.recycle_dir is not None:
+
+ recycle_file = os.path.join(
+ self.config.options.recycle_dir, self.distfile)
+
+ if self.config.options.dry_run:
+ if os.path.exists(recycle_file):
+ logging.info("dry-run: delete '%s' from recycle" %
+ (self.distfile,))
+ else:
+ try:
+ os.unlink(recycle_file)
+ except OSError:
+ pass
+ else:
+ logging.debug("delete '%s' from recycle" %
+ (self.distfile,))
+
+ def _distfiles_digester_exit(self, digester):
+
+ self._assert_current(digester)
+ if self._was_cancelled():
+ self.wait()
+ return
+
+ if self._default_exit(digester) != os.EX_OK:
+ # IOError reading file in our main distfiles directory? This
+ # is a bad situation which normally does not occur, so
+ # skip this file and report it, in order to draw attention
+ # from the administrator.
+ msg = "%s distfiles digester failed unexpectedly" % \
+ (self.distfile,)
+ self.scheduler.output(msg + '\n', background=True,
+ log_path=self._log_path)
+ logging.error(msg)
+ self.config.log_failure("%s\t%s\t%s" %
+ (self.cpv, self.distfile, msg))
+ self.config.file_failures[self.distfile] = self.cpv
+ self.wait()
+ return
+
+ wrong_digest = self._find_bad_digest(digester.digests)
+ if wrong_digest is None:
+ self._success()
+ self.returncode = os.EX_OK
+ self.wait()
+ return
+
+ self._start_fetch()
+
+ _mirror_info = collections.namedtuple('_mirror_info',
+ 'name location')
+
+ def _start_fetch(self):
+
+ self._previously_added = False
+ self._fs_mirror_stack = []
+ if self.config.options.distfiles_local is not None:
+ self._fs_mirror_stack.append(self._mirror_info(
+ 'distfiles-local', self.config.options.distfiles_local))
+ if self.config.options.recycle_dir is not None:
+ self._fs_mirror_stack.append(self._mirror_info(
+ 'recycle', self.config.options.recycle_dir))
+
+ self._primaryuri_stack = []
+ self._mirror_stack = []
+ for uri in reversed(self.uri_tuple):
+ if uri.startswith('mirror://'):
+ self._mirror_stack.append(
+ self._mirror_iterator(uri, self.config.mirrors))
+ else:
+ self._primaryuri_stack.append(uri)
+
+ self._tried_uris = set()
+ self._try_next_mirror()
+
+ @staticmethod
+ def _mirror_iterator(uri, mirrors_dict):
+
+ slash_index = uri.find("/", 9)
+ if slash_index != -1:
+ mirror_name = uri[9:slash_index].strip("/")
+ mirrors = mirrors_dict.get(mirror_name)
+ if not mirrors:
+ return
+ mirrors = list(mirrors)
+ while mirrors:
+ mirror = mirrors.pop(random.randint(0, len(mirrors) - 1))
+ yield mirror.rstrip("/") + "/" + uri[slash_index+1:]
+
+ def _try_next_mirror(self):
+ if self._fs_mirror_stack:
+ self._fetch_fs(self._fs_mirror_stack.pop())
+ return
+ else:
+ uri = self._next_uri()
+ if uri is not None:
+ self._tried_uris.add(uri)
+ self._fetch_uri(uri)
+ return
+
+ if self._tried_uris:
+ msg = "all uris failed"
+ else:
+ msg = "no fetchable uris"
+
+ self.config.log_failure("%s\t%s\t%s" %
+ (self.cpv, self.distfile, msg))
+ self.config.file_failures[self.distfile] = self.cpv
+ self.returncode = os.EX_OK
+ self.wait()
+
+ def _next_uri(self):
+ remaining_tries = self.config.options.tries - len(self._tried_uris)
+ if remaining_tries > 0:
+
+ if remaining_tries <= self.config.options.tries / 2:
+ while self._primaryuri_stack:
+ uri = self._primaryuri_stack.pop()
+ if uri not in self._tried_uris:
+ return uri
+
+ while self._mirror_stack:
+ uri = next(self._mirror_stack[-1], None)
+ if uri is None:
+ self._mirror_stack.pop()
+ else:
+ if uri not in self._tried_uris:
+ return uri
+
+ while self._primaryuri_stack:
+ uri = self._primaryuri_stack.pop()
+ if uri not in self._tried_uris:
+ return uri
+
+ return None
+
+ def _fetch_fs(self, mirror_info):
+ file_path = os.path.join(mirror_info.location, self.distfile)
+
+ st = None
+ size_ok = False
+ try:
+ st = os.stat(file_path)
+ except OSError as e:
+ if e.errno not in (errno.ENOENT, errno.ESTALE):
+ msg = "%s stat failed in %s: %s" % \
+ (self.distfile, mirror_info.name, e)
+ self.scheduler.output(msg + '\n', background=True,
+ log_path=self._log_path)
+ logging.error(msg)
+ else:
+ size_ok = st.st_size == self.digests["size"]
+ self._current_stat = st
+
+ if size_ok:
+ self._current_mirror = mirror_info
+ self._start_task(
+ FileDigester(file_path=file_path,
+ hash_names=(self._select_hash(),),
+ background=self.background,
+ logfile=self._log_path),
+ self._fs_mirror_digester_exit)
+ else:
+ self._try_next_mirror()
+
+ def _fs_mirror_digester_exit(self, digester):
+
+ self._assert_current(digester)
+ if self._was_cancelled():
+ self.wait()
+ return
+
+ current_mirror = self._current_mirror
+ if digester.returncode != os.EX_OK:
+ msg = "%s %s digester failed unexpectedly" % \
+ (self.distfile, current_mirror.name)
+ self.scheduler.output(msg + '\n', background=True,
+ log_path=self._log_path)
+ logging.error(msg)
+ else:
+ bad_digest = self._find_bad_digest(digester.digests)
+ if bad_digest is not None:
+ msg = "%s %s has bad %s digest: expected %s, got %s" % \
+ (self.distfile, current_mirror.name, bad_digest,
+ self.digests[bad_digest], digester.digests[bad_digest])
+ self.scheduler.output(msg + '\n', background=True,
+ log_path=self._log_path)
+ logging.error(msg)
+ elif self.config.options.dry_run:
+ # Report success without actually touching any files
+ if self._same_device(current_mirror.location,
+ self.config.options.distfiles):
+ logging.info(("dry-run: hardlink '%s' from %s "
+ "to distfiles") % (self.distfile, current_mirror.name))
+ else:
+ logging.info("dry-run: copy '%s' from %s to distfiles" %
+ (self.distfile, current_mirror.name))
+ self._success()
+ self.returncode = os.EX_OK
+ self.wait()
+ return
+ else:
+ src = os.path.join(current_mirror.location, self.distfile)
+ dest = os.path.join(self.config.options.distfiles, self.distfile)
+ if self._hardlink_atomic(src, dest,
+ "%s to %s" % (current_mirror.name, "distfiles")):
+ logging.debug("hardlink '%s' from %s to distfiles" %
+ (self.distfile, current_mirror.name))
+ self._success()
+ self.returncode = os.EX_OK
+ self.wait()
+ return
+ else:
+ self._start_task(
+ FileCopier(src_path=src, dest_path=dest,
+ background=(self.background and
+ self._log_path is not None),
+ logfile=self._log_path),
+ self._fs_mirror_copier_exit)
+ return
+
+ self._try_next_mirror()
+
+ def _fs_mirror_copier_exit(self, copier):
+
+ self._assert_current(copier)
+ if self._was_cancelled():
+ self.wait()
+ return
+
+ current_mirror = self._current_mirror
+ if copier.returncode != os.EX_OK:
+ msg = "%s %s copy failed unexpectedly" % \
+ (self.distfile, current_mirror.name)
+ self.scheduler.output(msg + '\n', background=True,
+ log_path=self._log_path)
+ logging.error(msg)
+ else:
+
+ logging.debug("copy '%s' from %s to distfiles" %
+ (self.distfile, current_mirror.name))
+
+ # Apply the timestamp from the source file, but
+ # just rely on umask for permissions.
+ try:
+ if sys.hexversion >= 0x3030000:
+ os.utime(copier.dest_path,
+ ns=(self._current_stat.st_mtime_ns,
+ self._current_stat.st_mtime_ns))
+ else:
+ os.utime(copier.dest_path,
+ (self._current_stat[stat.ST_MTIME],
+ self._current_stat[stat.ST_MTIME]))
+ except OSError as e:
+ msg = "%s %s utime failed unexpectedly: %s" % \
+ (self.distfile, current_mirror.name, e)
+ self.scheduler.output(msg + '\n', background=True,
+ log_path=self._log_path)
+ logging.error(msg)
+
+ self._success()
+ self.returncode = os.EX_OK
+ self.wait()
+ return
+
+ self._try_next_mirror()
+
+ def _fetch_uri(self, uri):
+
+ if self.config.options.dry_run:
+ # Simply report success.
+ logging.info("dry-run: fetch '%s' from '%s'" %
+ (self.distfile, uri))
+ self._success()
+ self.returncode = os.EX_OK
+ self.wait()
+ return
+
+ if self.config.options.temp_dir:
+ self._fetch_tmp_dir_info = 'temp-dir'
+ distdir = self.config.options.temp_dir
+ else:
+ self._fetch_tmp_dir_info = 'distfiles'
+ distdir = self.config.options.distfiles
+
+ tmp_basename = self.distfile + '._emirrordist_fetch_.%s' % os.getpid()
+
+ variables = {
+ "DISTDIR": distdir,
+ "URI": uri,
+ "FILE": tmp_basename
+ }
+
+ self._fetch_tmp_file = os.path.join(distdir, tmp_basename)
+
+ try:
+ os.unlink(self._fetch_tmp_file)
+ except OSError:
+ pass
+
+ args = portage.util.shlex_split(default_fetchcommand)
+ args = [portage.util.varexpand(x, mydict=variables)
+ for x in args]
+
+ if sys.hexversion < 0x3020000 and sys.hexversion >= 0x3000000 and \
+ not os.path.isabs(args[0]):
+ # Python 3.1 _execvp throws TypeError for non-absolute executable
+ # path passed as bytes (see http://bugs.python.org/issue8513).
+ fullname = portage.process.find_binary(args[0])
+ if fullname is None:
+ raise portage.exception.CommandNotFound(args[0])
+ args[0] = fullname
+
+ args = [_unicode_encode(x,
+ encoding=_encodings['fs'], errors='strict') for x in args]
+
+ null_fd = os.open(os.devnull, os.O_RDONLY)
+ fetcher = PopenProcess(background=self.background,
+ proc=subprocess.Popen(args, stdin=null_fd,
+ stdout=subprocess.PIPE, stderr=subprocess.STDOUT),
+ scheduler=self.scheduler)
+ os.close(null_fd)
+
+ fetcher.pipe_reader = PipeLogger(background=self.background,
+ input_fd=fetcher.proc.stdout, log_file_path=self._log_path,
+ scheduler=self.scheduler)
+
+ self._start_task(fetcher, self._fetcher_exit)
+
+ def _fetcher_exit(self, fetcher):
+
+ self._assert_current(fetcher)
+ if self._was_cancelled():
+ self.wait()
+ return
+
+ if os.path.exists(self._fetch_tmp_file):
+ self._start_task(
+ FileDigester(file_path=self._fetch_tmp_file,
+ hash_names=(self._select_hash(),),
+ background=self.background,
+ logfile=self._log_path),
+ self._fetch_digester_exit)
+ else:
+ self._try_next_mirror()
+
+ def _fetch_digester_exit(self, digester):
+
+ self._assert_current(digester)
+ if self._was_cancelled():
+ self.wait()
+ return
+
+ if digester.returncode != os.EX_OK:
+ msg = "%s %s digester failed unexpectedly" % \
+ (self.distfile, self._fetch_tmp_dir_info)
+ self.scheduler.output(msg + '\n', background=True,
+ log_path=self._log_path)
+ logging.error(msg)
+ else:
+ bad_digest = self._find_bad_digest(digester.digests)
+ if bad_digest is not None:
+ msg = "%s has bad %s digest: expected %s, got %s" % \
+ (self.distfile, bad_digest,
+ self.digests[bad_digest], digester.digests[bad_digest])
+ self.scheduler.output(msg + '\n', background=True,
+ log_path=self._log_path)
+ try:
+ os.unlink(self._fetch_tmp_file)
+ except OSError:
+ pass
+ else:
+ dest = os.path.join(self.config.options.distfiles, self.distfile)
+ try:
+ os.rename(self._fetch_tmp_file, dest)
+ except OSError:
+ self._start_task(
+ FileCopier(src_path=self._fetch_tmp_file,
+ dest_path=dest,
+ background=(self.background and
+ self._log_path is not None),
+ logfile=self._log_path),
+ self._fetch_copier_exit)
+ return
+ else:
+ self._success()
+ self.returncode = os.EX_OK
+ self.wait()
+ return
+
+ self._try_next_mirror()
+
+ def _fetch_copier_exit(self, copier):
+
+ self._assert_current(copier)
+
+ try:
+ os.unlink(self._fetch_tmp_file)
+ except OSError:
+ pass
+
+ if self._was_cancelled():
+ self.wait()
+ return
+
+ if copier.returncode == os.EX_OK:
+ self._success()
+ self.returncode = os.EX_OK
+ self.wait()
+ else:
+ # out of space?
+ msg = "%s %s copy failed unexpectedly" % \
+ (self.distfile, self._fetch_tmp_dir_info)
+ self.scheduler.output(msg + '\n', background=True,
+ log_path=self._log_path)
+ logging.error(msg)
+ self.config.log_failure("%s\t%s\t%s" %
+ (self.cpv, self.distfile, msg))
+ self.config.file_failures[self.distfile] = self.cpv
+ self.returncode = 1
+ self.wait()
+
+ def _unlink_file(self, file_path, dir_info):
+ try:
+ os.unlink(file_path)
+ except OSError as e:
+ if e.errno not in (errno.ENOENT, errno.ESTALE):
+ msg = "unlink '%s' failed in %s: %s" % \
+ (self.distfile, dir_info, e)
+ self.scheduler.output(msg + '\n', background=True,
+ log_path=self._log_path)
+ logging.error(msg)
+ return False
+ return True
+
+ def _have_needed_digests(self):
+ return "size" in self.digests and \
+ self._select_hash() is not None
+
+ def _select_hash(self):
+ if default_hash_name in self.digests:
+ return default_hash_name
+ else:
+ for hash_name in self.digests:
+ if hash_name != "size" and \
+ hash_name in portage.checksum.hashfunc_map:
+ return hash_name
+
+ return None
+
+ def _find_bad_digest(self, digests):
+ for hash_name, hash_value in digests.items():
+ if self.digests[hash_name] != hash_value:
+ return hash_name
+ return None
+
+ @staticmethod
+ def _same_device(path1, path2):
+ try:
+ st1 = os.stat(path1)
+ st2 = os.stat(path2)
+ except OSError:
+ return False
+ else:
+ return st1.st_dev == st2.st_dev
+
+ def _hardlink_atomic(self, src, dest, dir_info):
+
+ head, tail = os.path.split(dest)
+ hardlink_tmp = os.path.join(head, ".%s._mirrordist_hardlink_.%s" % \
+ (tail, os.getpid()))
+
+ try:
+ try:
+ os.link(src, hardlink_tmp)
+ except OSError as e:
+ if e.errno != errno.EXDEV:
+ msg = "hardlink %s from %s failed: %s" % \
+ (self.distfile, dir_info, e)
+ self.scheduler.output(msg + '\n', background=True,
+ log_path=self._log_path)
+ logging.error(msg)
+ return False
+
+ try:
+ os.rename(hardlink_tmp, dest)
+ except OSError as e:
+ msg = "hardlink rename '%s' from %s failed: %s" % \
+ (self.distfile, dir_info, e)
+ self.scheduler.output(msg + '\n', background=True,
+ log_path=self._log_path)
+ logging.error(msg)
+ return False
+ finally:
+ try:
+ os.unlink(hardlink_tmp)
+ except OSError:
+ pass
+
+ return True
diff --git a/pym/portage/_emirrordist/MirrorDistTask.py b/pym/portage/_emirrordist/MirrorDistTask.py
new file mode 100644
index 000000000..571caa52d
--- /dev/null
+++ b/pym/portage/_emirrordist/MirrorDistTask.py
@@ -0,0 +1,219 @@
+# Copyright 2013-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import errno
+import logging
+import sys
+import time
+
+try:
+ import threading
+except ImportError:
+ import dummy_threading as threading
+
+import portage
+from portage import os
+from portage.util._async.TaskScheduler import TaskScheduler
+from _emerge.CompositeTask import CompositeTask
+from .FetchIterator import FetchIterator
+from .DeletionIterator import DeletionIterator
+
+if sys.hexversion >= 0x3000000:
+ # pylint: disable=W0622
+ long = int
+
+class MirrorDistTask(CompositeTask):
+
+ __slots__ = ('_config', '_terminated', '_term_check_id')
+
+ def __init__(self, config):
+ CompositeTask.__init__(self, scheduler=config.event_loop)
+ self._config = config
+ self._terminated = threading.Event()
+
+ def _start(self):
+ self._term_check_id = self.scheduler.idle_add(self._termination_check)
+ fetch = TaskScheduler(iter(FetchIterator(self._config)),
+ max_jobs=self._config.options.jobs,
+ max_load=self._config.options.load_average,
+ event_loop=self._config.event_loop)
+ self._start_task(fetch, self._fetch_exit)
+
+ def _fetch_exit(self, fetch):
+
+ self._assert_current(fetch)
+ if self._was_cancelled():
+ self.wait()
+ return
+
+ if self._config.options.delete:
+ deletion = TaskScheduler(iter(DeletionIterator(self._config)),
+ max_jobs=self._config.options.jobs,
+ max_load=self._config.options.load_average,
+ event_loop=self._config.event_loop)
+ self._start_task(deletion, self._deletion_exit)
+ return
+
+ self._post_deletion()
+
+ def _deletion_exit(self, deletion):
+
+ self._assert_current(deletion)
+ if self._was_cancelled():
+ self.wait()
+ return
+
+ self._post_deletion()
+
+ def _post_deletion(self):
+
+ if self._config.options.recycle_db is not None:
+ self._update_recycle_db()
+
+ if self._config.options.scheduled_deletion_log is not None:
+ self._scheduled_deletion_log()
+
+ self._summary()
+
+ self.returncode = os.EX_OK
+ self._current_task = None
+ self.wait()
+
+ def _update_recycle_db(self):
+
+ start_time = self._config.start_time
+ recycle_dir = self._config.options.recycle_dir
+ recycle_db = self._config.recycle_db
+ r_deletion_delay = self._config.options.recycle_deletion_delay
+
+ # Use a dict optimize access.
+ recycle_db_cache = dict(recycle_db.items())
+
+ for filename in os.listdir(recycle_dir):
+
+ recycle_file = os.path.join(recycle_dir, filename)
+
+ try:
+ st = os.stat(recycle_file)
+ except OSError as e:
+ if e.errno not in (errno.ENOENT, errno.ESTALE):
+ logging.error(("stat failed for '%s' in "
+ "recycle: %s") % (filename, e))
+ continue
+
+ value = recycle_db_cache.pop(filename, None)
+ if value is None:
+ logging.debug(("add '%s' to "
+ "recycle db") % filename)
+ recycle_db[filename] = (st.st_size, start_time)
+ else:
+ r_size, r_time = value
+ if long(r_size) != st.st_size:
+ recycle_db[filename] = (st.st_size, start_time)
+ elif r_time + r_deletion_delay < start_time:
+ if self._config.options.dry_run:
+ logging.info(("dry-run: delete '%s' from "
+ "recycle") % filename)
+ logging.info(("drop '%s' from "
+ "recycle db") % filename)
+ else:
+ try:
+ os.unlink(recycle_file)
+ except OSError as e:
+ if e.errno not in (errno.ENOENT, errno.ESTALE):
+ logging.error(("delete '%s' from "
+ "recycle failed: %s") % (filename, e))
+ else:
+ logging.debug(("delete '%s' from "
+ "recycle") % filename)
+ try:
+ del recycle_db[filename]
+ except KeyError:
+ pass
+ else:
+ logging.debug(("drop '%s' from "
+ "recycle db") % filename)
+
+ # Existing files were popped from recycle_db_cache,
+ # so any remaining entries are for files that no
+ # longer exist.
+ for filename in recycle_db_cache:
+ try:
+ del recycle_db[filename]
+ except KeyError:
+ pass
+ else:
+ logging.debug(("drop non-existent '%s' from "
+ "recycle db") % filename)
+
+ def _scheduled_deletion_log(self):
+
+ start_time = self._config.start_time
+ dry_run = self._config.options.dry_run
+ deletion_delay = self._config.options.deletion_delay
+ distfiles_db = self._config.distfiles_db
+
+ date_map = {}
+ for filename, timestamp in self._config.deletion_db.items():
+ date = timestamp + deletion_delay
+ if date < start_time:
+ date = start_time
+ date = time.strftime("%Y-%m-%d", time.gmtime(date))
+ date_files = date_map.get(date)
+ if date_files is None:
+ date_files = []
+ date_map[date] = date_files
+ date_files.append(filename)
+
+ if dry_run:
+ logging.warn(("dry-run: scheduled-deletions log "
+ "will be summarized via logging.info"))
+
+ lines = []
+ for date in sorted(date_map):
+ date_files = date_map[date]
+ if dry_run:
+ logging.info(("dry-run: scheduled deletions for %s: %s files") %
+ (date, len(date_files)))
+ lines.append("%s\n" % date)
+ for filename in date_files:
+ cpv = "unknown"
+ if distfiles_db is not None:
+ cpv = distfiles_db.get(filename, cpv)
+ lines.append("\t%s\t%s\n" % (filename, cpv))
+
+ if not dry_run:
+ portage.util.write_atomic(
+ self._config.options.scheduled_deletion_log,
+ "".join(lines))
+
+ def _summary(self):
+ elapsed_time = time.time() - self._config.start_time
+ fail_count = len(self._config.file_failures)
+ delete_count = self._config.delete_count
+ scheduled_deletion_count = self._config.scheduled_deletion_count - delete_count
+ added_file_count = self._config.added_file_count
+ added_byte_count = self._config.added_byte_count
+
+ logging.info("finished in %i seconds" % elapsed_time)
+ logging.info("failed to fetch %i files" % fail_count)
+ logging.info("deleted %i files" % delete_count)
+ logging.info("deletion of %i files scheduled" %
+ scheduled_deletion_count)
+ logging.info("added %i files" % added_file_count)
+ logging.info("added %i bytes total" % added_byte_count)
+
+ def terminate(self):
+ self._terminated.set()
+
+ def _termination_check(self):
+ if self._terminated.is_set():
+ self.cancel()
+ self.wait()
+ return True
+
+ def _wait(self):
+ CompositeTask._wait(self)
+ if self._term_check_id is not None:
+ self.scheduler.source_remove(self._term_check_id)
+ self._term_check_id = None
diff --git a/pym/portage/_emirrordist/__init__.py b/pym/portage/_emirrordist/__init__.py
new file mode 100644
index 000000000..6cde9320b
--- /dev/null
+++ b/pym/portage/_emirrordist/__init__.py
@@ -0,0 +1,2 @@
+# Copyright 2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
diff --git a/pym/portage/_emirrordist/main.py b/pym/portage/_emirrordist/main.py
new file mode 100644
index 000000000..ce92c2aea
--- /dev/null
+++ b/pym/portage/_emirrordist/main.py
@@ -0,0 +1,463 @@
+# Copyright 2013-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import logging
+import sys
+
+import portage
+from portage import os
+from portage.util import normalize_path, writemsg_level, _recursive_file_list
+from portage.util._argparse import ArgumentParser
+from portage.util._async.run_main_scheduler import run_main_scheduler
+from portage.util._async.SchedulerInterface import SchedulerInterface
+from portage.util._eventloop.global_event_loop import global_event_loop
+from .Config import Config
+from .MirrorDistTask import MirrorDistTask
+
+if sys.hexversion >= 0x3000000:
+ # pylint: disable=W0622
+ long = int
+
+seconds_per_day = 24 * 60 * 60
+
+common_options = (
+ {
+ "longopt" : "--dry-run",
+ "help" : "perform a trial run with no changes made (usually combined "
+ "with --verbose)",
+ "action" : "store_true"
+ },
+ {
+ "longopt" : "--verbose",
+ "shortopt" : "-v",
+ "help" : "display extra information on stderr "
+ "(multiple occurences increase verbosity)",
+ "action" : "count",
+ "default" : 0,
+ },
+ {
+ "longopt" : "--ignore-default-opts",
+ "help" : "do not use the EMIRRORDIST_DEFAULT_OPTS environment variable",
+ "action" : "store_true"
+ },
+ {
+ "longopt" : "--distfiles",
+ "help" : "distfiles directory to use (required)",
+ "metavar" : "DIR"
+ },
+ {
+ "longopt" : "--jobs",
+ "shortopt" : "-j",
+ "help" : "number of concurrent jobs to run",
+ "type" : int
+ },
+ {
+ "longopt" : "--load-average",
+ "shortopt" : "-l",
+ "help" : "load average limit for spawning of new concurrent jobs",
+ "metavar" : "LOAD",
+ "type" : float
+ },
+ {
+ "longopt" : "--tries",
+ "help" : "maximum number of tries per file, 0 means unlimited (default is 10)",
+ "default" : 10,
+ "type" : int
+ },
+ {
+ "longopt" : "--repo",
+ "help" : "name of repo to operate on"
+ },
+ {
+ "longopt" : "--config-root",
+ "help" : "location of portage config files",
+ "metavar" : "DIR"
+ },
+ {
+ "longopt" : "--portdir",
+ "help" : "override the PORTDIR variable (deprecated in favor of --repositories-configuration)",
+ "metavar" : "DIR"
+ },
+ {
+ "longopt" : "--portdir-overlay",
+ "help" : "override the PORTDIR_OVERLAY variable (deprecated in favor of --repositories-configuration)"
+ },
+ {
+ "longopt" : "--repositories-configuration",
+ "help" : "override configuration of repositories (in format of repos.conf)"
+ },
+ {
+ "longopt" : "--strict-manifests",
+ "help" : "manually override \"strict\" FEATURES setting",
+ "choices" : ("y", "n"),
+ "metavar" : "<y|n>",
+ },
+ {
+ "longopt" : "--failure-log",
+ "help" : "log file for fetch failures, with tab-delimited "
+ "output, for reporting purposes",
+ "metavar" : "FILE"
+ },
+ {
+ "longopt" : "--success-log",
+ "help" : "log file for fetch successes, with tab-delimited "
+ "output, for reporting purposes",
+ "metavar" : "FILE"
+ },
+ {
+ "longopt" : "--scheduled-deletion-log",
+ "help" : "log file for scheduled deletions, with tab-delimited "
+ "output, for reporting purposes",
+ "metavar" : "FILE"
+ },
+ {
+ "longopt" : "--delete",
+ "help" : "enable deletion of unused distfiles",
+ "action" : "store_true"
+ },
+ {
+ "longopt" : "--deletion-db",
+ "help" : "database file used to track lifetime of files "
+ "scheduled for delayed deletion",
+ "metavar" : "FILE"
+ },
+ {
+ "longopt" : "--deletion-delay",
+ "help" : "delay time for deletion, measured in seconds",
+ "metavar" : "SECONDS"
+ },
+ {
+ "longopt" : "--temp-dir",
+ "help" : "temporary directory for downloads",
+ "metavar" : "DIR"
+ },
+ {
+ "longopt" : "--mirror-overrides",
+ "help" : "file holding a list of mirror overrides",
+ "metavar" : "FILE"
+ },
+ {
+ "longopt" : "--mirror-skip",
+ "help" : "comma delimited list of mirror targets to skip "
+ "when fetching"
+ },
+ {
+ "longopt" : "--restrict-mirror-exemptions",
+ "help" : "comma delimited list of mirror targets for which to "
+ "ignore RESTRICT=\"mirror\""
+ },
+ {
+ "longopt" : "--verify-existing-digest",
+ "help" : "use digest as a verification of whether existing "
+ "distfiles are valid",
+ "action" : "store_true"
+ },
+ {
+ "longopt" : "--distfiles-local",
+ "help" : "distfiles-local directory to use",
+ "metavar" : "DIR"
+ },
+ {
+ "longopt" : "--distfiles-db",
+ "help" : "database file used to track which ebuilds a "
+ "distfile belongs to",
+ "metavar" : "FILE"
+ },
+ {
+ "longopt" : "--recycle-dir",
+ "help" : "directory for extended retention of files that "
+ "are removed from distdir with the --delete option",
+ "metavar" : "DIR"
+ },
+ {
+ "longopt" : "--recycle-db",
+ "help" : "database file used to track lifetime of files "
+ "in recycle dir",
+ "metavar" : "FILE"
+ },
+ {
+ "longopt" : "--recycle-deletion-delay",
+ "help" : "delay time for deletion of unused files from "
+ "recycle dir, measured in seconds (defaults to "
+ "the equivalent of 60 days)",
+ "default" : 60 * seconds_per_day,
+ "metavar" : "SECONDS",
+ "type" : int
+ },
+ {
+ "longopt" : "--fetch-log-dir",
+ "help" : "directory for individual fetch logs",
+ "metavar" : "DIR"
+ },
+ {
+ "longopt" : "--whitelist-from",
+ "help" : "specifies a file containing a list of files to "
+ "whitelist, one per line, # prefixed lines ignored",
+ "action" : "append",
+ "metavar" : "FILE"
+ },
+)
+
+def parse_args(args):
+ description = "emirrordist - a fetch tool for mirroring " \
+ "of package distfiles"
+ usage = "emirrordist [options] <action>"
+ parser = ArgumentParser(description=description, usage=usage)
+
+ actions = parser.add_argument_group('Actions')
+ actions.add_argument("--version",
+ action="store_true",
+ help="display portage version and exit")
+ actions.add_argument("--mirror",
+ action="store_true",
+ help="mirror distfiles for the selected repository")
+
+ common = parser.add_argument_group('Common options')
+ for opt_info in common_options:
+ opt_pargs = [opt_info["longopt"]]
+ if opt_info.get("shortopt"):
+ opt_pargs.append(opt_info["shortopt"])
+ opt_kwargs = {"help" : opt_info["help"]}
+ for k in ("action", "choices", "default", "metavar", "type"):
+ if k in opt_info:
+ opt_kwargs[k] = opt_info[k]
+ common.add_argument(*opt_pargs, **opt_kwargs)
+
+ options, args = parser.parse_known_args(args)
+
+ return (parser, options, args)
+
+def emirrordist_main(args):
+
+ # The calling environment is ignored, so the program is
+ # completely controlled by commandline arguments.
+ env = {}
+
+ if not sys.stdout.isatty():
+ portage.output.nocolor()
+ env['NOCOLOR'] = 'true'
+
+ parser, options, args = parse_args(args)
+
+ if options.version:
+ sys.stdout.write("Portage %s\n" % portage.VERSION)
+ return os.EX_OK
+
+ config_root = options.config_root
+
+ if options.portdir is not None:
+ writemsg_level("emirrordist: warning: --portdir option is deprecated in favor of --repositories-configuration option\n",
+ level=logging.WARNING, noiselevel=-1)
+ if options.portdir_overlay is not None:
+ writemsg_level("emirrordist: warning: --portdir-overlay option is deprecated in favor of --repositories-configuration option\n",
+ level=logging.WARNING, noiselevel=-1)
+
+ if options.repositories_configuration is not None:
+ env['PORTAGE_REPOSITORIES'] = options.repositories_configuration
+ elif options.portdir_overlay is not None:
+ env['PORTDIR_OVERLAY'] = options.portdir_overlay
+
+ if options.portdir is not None:
+ env['PORTDIR'] = options.portdir
+
+ settings = portage.config(config_root=config_root,
+ local_config=False, env=env)
+
+ default_opts = None
+ if not options.ignore_default_opts:
+ default_opts = settings.get('EMIRRORDIST_DEFAULT_OPTS', '').split()
+
+ if default_opts:
+ parser, options, args = parse_args(default_opts + args)
+
+ settings = portage.config(config_root=config_root,
+ local_config=False, env=env)
+
+ if options.repo is None:
+ if len(settings.repositories.prepos) == 2:
+ for repo in settings.repositories:
+ if repo.name != "DEFAULT":
+ options.repo = repo.name
+ break
+
+ if options.repo is None:
+ parser.error("--repo option is required")
+
+ repo_path = settings.repositories.treemap.get(options.repo)
+ if repo_path is None:
+ parser.error("Unable to locate repository named '%s'" % (options.repo,))
+
+ if options.jobs is not None:
+ options.jobs = int(options.jobs)
+
+ if options.load_average is not None:
+ options.load_average = float(options.load_average)
+
+ if options.failure_log is not None:
+ options.failure_log = normalize_path(
+ os.path.abspath(options.failure_log))
+
+ parent_dir = os.path.dirname(options.failure_log)
+ if not (os.path.isdir(parent_dir) and
+ os.access(parent_dir, os.W_OK|os.X_OK)):
+ parser.error(("--failure-log '%s' parent is not a "
+ "writable directory") % options.failure_log)
+
+ if options.success_log is not None:
+ options.success_log = normalize_path(
+ os.path.abspath(options.success_log))
+
+ parent_dir = os.path.dirname(options.success_log)
+ if not (os.path.isdir(parent_dir) and
+ os.access(parent_dir, os.W_OK|os.X_OK)):
+ parser.error(("--success-log '%s' parent is not a "
+ "writable directory") % options.success_log)
+
+ if options.scheduled_deletion_log is not None:
+ options.scheduled_deletion_log = normalize_path(
+ os.path.abspath(options.scheduled_deletion_log))
+
+ parent_dir = os.path.dirname(options.scheduled_deletion_log)
+ if not (os.path.isdir(parent_dir) and
+ os.access(parent_dir, os.W_OK|os.X_OK)):
+ parser.error(("--scheduled-deletion-log '%s' parent is not a "
+ "writable directory") % options.scheduled_deletion_log)
+
+ if options.deletion_db is None:
+ parser.error("--scheduled-deletion-log requires --deletion-db")
+
+ if options.deletion_delay is not None:
+ options.deletion_delay = long(options.deletion_delay)
+ if options.deletion_db is None:
+ parser.error("--deletion-delay requires --deletion-db")
+
+ if options.deletion_db is not None:
+ if options.deletion_delay is None:
+ parser.error("--deletion-db requires --deletion-delay")
+ options.deletion_db = normalize_path(
+ os.path.abspath(options.deletion_db))
+
+ if options.temp_dir is not None:
+ options.temp_dir = normalize_path(
+ os.path.abspath(options.temp_dir))
+
+ if not (os.path.isdir(options.temp_dir) and
+ os.access(options.temp_dir, os.W_OK|os.X_OK)):
+ parser.error(("--temp-dir '%s' is not a "
+ "writable directory") % options.temp_dir)
+
+ if options.distfiles is not None:
+ options.distfiles = normalize_path(
+ os.path.abspath(options.distfiles))
+
+ if not (os.path.isdir(options.distfiles) and
+ os.access(options.distfiles, os.W_OK|os.X_OK)):
+ parser.error(("--distfiles '%s' is not a "
+ "writable directory") % options.distfiles)
+ else:
+ parser.error("missing required --distfiles parameter")
+
+ if options.mirror_overrides is not None:
+ options.mirror_overrides = normalize_path(
+ os.path.abspath(options.mirror_overrides))
+
+ if not (os.access(options.mirror_overrides, os.R_OK) and
+ os.path.isfile(options.mirror_overrides)):
+ parser.error(
+ "--mirror-overrides-file '%s' is not a readable file" %
+ options.mirror_overrides)
+
+ if options.distfiles_local is not None:
+ options.distfiles_local = normalize_path(
+ os.path.abspath(options.distfiles_local))
+
+ if not (os.path.isdir(options.distfiles_local) and
+ os.access(options.distfiles_local, os.W_OK|os.X_OK)):
+ parser.error(("--distfiles-local '%s' is not a "
+ "writable directory") % options.distfiles_local)
+
+ if options.distfiles_db is not None:
+ options.distfiles_db = normalize_path(
+ os.path.abspath(options.distfiles_db))
+
+ if options.tries is not None:
+ options.tries = int(options.tries)
+
+ if options.recycle_dir is not None:
+ options.recycle_dir = normalize_path(
+ os.path.abspath(options.recycle_dir))
+ if not (os.path.isdir(options.recycle_dir) and
+ os.access(options.recycle_dir, os.W_OK|os.X_OK)):
+ parser.error(("--recycle-dir '%s' is not a "
+ "writable directory") % options.recycle_dir)
+
+ if options.recycle_db is not None:
+ if options.recycle_dir is None:
+ parser.error("--recycle-db requires "
+ "--recycle-dir to be specified")
+ options.recycle_db = normalize_path(
+ os.path.abspath(options.recycle_db))
+
+ if options.recycle_deletion_delay is not None:
+ options.recycle_deletion_delay = \
+ long(options.recycle_deletion_delay)
+
+ if options.fetch_log_dir is not None:
+ options.fetch_log_dir = normalize_path(
+ os.path.abspath(options.fetch_log_dir))
+
+ if not (os.path.isdir(options.fetch_log_dir) and
+ os.access(options.fetch_log_dir, os.W_OK|os.X_OK)):
+ parser.error(("--fetch-log-dir '%s' is not a "
+ "writable directory") % options.fetch_log_dir)
+
+ if options.whitelist_from:
+ normalized_paths = []
+ for x in options.whitelist_from:
+ path = normalize_path(os.path.abspath(x))
+ if not os.access(path, os.R_OK):
+ parser.error("--whitelist-from '%s' is not readable" % x)
+ if os.path.isfile(path):
+ normalized_paths.append(path)
+ elif os.path.isdir(path):
+ for file in _recursive_file_list(path):
+ if not os.access(file, os.R_OK):
+ parser.error("--whitelist-from '%s' directory contains not readable file '%s'" % (x, file))
+ normalized_paths.append(file)
+ else:
+ parser.error("--whitelist-from '%s' is not a regular file or a directory" % x)
+ options.whitelist_from = normalized_paths
+
+ if options.strict_manifests is not None:
+ if options.strict_manifests == "y":
+ settings.features.add("strict")
+ else:
+ settings.features.discard("strict")
+
+ settings.lock()
+
+ portdb = portage.portdbapi(mysettings=settings)
+
+ # Limit ebuilds to the specified repo.
+ portdb.porttrees = [repo_path]
+
+ portage.util.initialize_logger()
+
+ if options.verbose > 0:
+ l = logging.getLogger()
+ l.setLevel(l.getEffectiveLevel() - 10 * options.verbose)
+
+ with Config(options, portdb,
+ SchedulerInterface(global_event_loop())) as config:
+
+ if not options.mirror:
+ parser.error('No action specified')
+
+ returncode = os.EX_OK
+
+ if options.mirror:
+ signum = run_main_scheduler(MirrorDistTask(config))
+ if signum is not None:
+ sys.exit(128 + signum)
+
+ return returncode
diff --git a/pym/portage/_global_updates.py b/pym/portage/_global_updates.py
index c0f3df0b6..dde726836 100644
--- a/pym/portage/_global_updates.py
+++ b/pym/portage/_global_updates.py
@@ -1,4 +1,4 @@
-# Copyright 2010-2012 Gentoo Foundation
+# Copyright 2010-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
from __future__ import print_function
@@ -46,12 +46,6 @@ def _do_global_updates(trees, prev_mtimes, quiet=False, if_mtime_changed=True):
portdb = trees[root]["porttree"].dbapi
vardb = trees[root]["vartree"].dbapi
bindb = trees[root]["bintree"].dbapi
- if not os.access(bindb.bintree.pkgdir, os.W_OK):
- bindb = None
- else:
- # Call binarytree.populate(), since we want to make sure it's
- # only populated with local packages here (getbinpkgs=0).
- bindb.bintree.populate()
world_file = os.path.join(mysettings['EROOT'], WORLD_FILE)
world_list = grabfile(world_file)
@@ -92,14 +86,10 @@ def _do_global_updates(trees, prev_mtimes, quiet=False, if_mtime_changed=True):
if not update_notice_printed:
update_notice_printed = True
writemsg_stdout("\n")
- if quiet:
- writemsg_stdout(colorize("GOOD",
- _("Performing Global Updates\n")))
- writemsg_stdout(_("(Could take a couple of minutes if you have a lot of binary packages.)\n"))
- else:
- writemsg_stdout(colorize("GOOD",
- _("Performing Global Updates:\n")))
- writemsg_stdout(_("(Could take a couple of minutes if you have a lot of binary packages.)\n"))
+ writemsg_stdout(colorize("GOOD",
+ _("Performing Global Updates\n")))
+ writemsg_stdout(_("(Could take a couple of minutes if you have a lot of binary packages.)\n"))
+ if not quiet:
writemsg_stdout(_(" %s='update pass' %s='binary update' "
"%s='/var/db update' %s='/var/db move'\n"
" %s='/var/db SLOT move' %s='binary move' "
@@ -120,63 +110,71 @@ def _do_global_updates(trees, prev_mtimes, quiet=False, if_mtime_changed=True):
if myupd:
retupd = True
+ if retupd:
+ if os.access(bindb.bintree.pkgdir, os.W_OK):
+ # Call binarytree.populate(), since we want to make sure it's
+ # only populated with local packages here (getbinpkgs=0).
+ bindb.bintree.populate()
+ else:
+ bindb = None
+
master_repo = portdb.getRepositoryName(portdb.porttree_root)
if master_repo in repo_map:
repo_map['DEFAULT'] = repo_map[master_repo]
for repo_name, myupd in repo_map.items():
- if repo_name == 'DEFAULT':
- continue
- if not myupd:
- continue
-
- def repo_match(repository):
- return repository == repo_name or \
- (repo_name == master_repo and repository not in repo_map)
-
- def _world_repo_match(atoma, atomb):
- """
- Check whether to perform a world change from atoma to atomb.
- If best vardb match for atoma comes from the same repository
- as the update file, allow that. Additionally, if portdb still
- can find a match for old atom name, warn about that.
- """
- matches = vardb.match(atoma)
- if not matches:
- matches = vardb.match(atomb)
- if matches and \
- repo_match(vardb.aux_get(best(matches), ['repository'])[0]):
- if portdb.match(atoma):
- world_warnings.add((atoma, atomb))
- return True
- else:
- return False
+ if repo_name == 'DEFAULT':
+ continue
+ if not myupd:
+ continue
- for update_cmd in myupd:
- for pos, atom in enumerate(world_list):
- new_atom = update_dbentry(update_cmd, atom)
- if atom != new_atom:
- if _world_repo_match(atom, new_atom):
- world_list[pos] = new_atom
- world_modified = True
-
- for update_cmd in myupd:
- if update_cmd[0] == "move":
- moves = vardb.move_ent(update_cmd, repo_match=repo_match)
+ def repo_match(repository):
+ return repository == repo_name or \
+ (repo_name == master_repo and repository not in repo_map)
+
+ def _world_repo_match(atoma, atomb):
+ """
+ Check whether to perform a world change from atoma to atomb.
+ If best vardb match for atoma comes from the same repository
+ as the update file, allow that. Additionally, if portdb still
+ can find a match for old atom name, warn about that.
+ """
+ matches = vardb.match(atoma)
+ if not matches:
+ matches = vardb.match(atomb)
+ if matches and \
+ repo_match(vardb.aux_get(best(matches), ['repository'])[0]):
+ if portdb.match(atoma):
+ world_warnings.add((atoma, atomb))
+ return True
+ else:
+ return False
+
+ for update_cmd in myupd:
+ for pos, atom in enumerate(world_list):
+ new_atom = update_dbentry(update_cmd, atom)
+ if atom != new_atom:
+ if _world_repo_match(atom, new_atom):
+ world_list[pos] = new_atom
+ world_modified = True
+
+ for update_cmd in myupd:
+ if update_cmd[0] == "move":
+ moves = vardb.move_ent(update_cmd, repo_match=repo_match)
+ if moves:
+ writemsg_stdout(moves * "@")
+ if bindb:
+ moves = bindb.move_ent(update_cmd, repo_match=repo_match)
if moves:
- writemsg_stdout(moves * "@")
- if bindb:
- moves = bindb.move_ent(update_cmd, repo_match=repo_match)
- if moves:
- writemsg_stdout(moves * "%")
- elif update_cmd[0] == "slotmove":
- moves = vardb.move_slot_ent(update_cmd, repo_match=repo_match)
+ writemsg_stdout(moves * "%")
+ elif update_cmd[0] == "slotmove":
+ moves = vardb.move_slot_ent(update_cmd, repo_match=repo_match)
+ if moves:
+ writemsg_stdout(moves * "s")
+ if bindb:
+ moves = bindb.move_slot_ent(update_cmd, repo_match=repo_match)
if moves:
- writemsg_stdout(moves * "s")
- if bindb:
- moves = bindb.move_slot_ent(update_cmd, repo_match=repo_match)
- if moves:
- writemsg_stdout(moves * "S")
+ writemsg_stdout(moves * "S")
if world_modified:
world_list.sort()
@@ -189,65 +187,65 @@ def _do_global_updates(trees, prev_mtimes, quiet=False, if_mtime_changed=True):
if retupd:
- def _config_repo_match(repo_name, atoma, atomb):
- """
- Check whether to perform a world change from atoma to atomb.
- If best vardb match for atoma comes from the same repository
- as the update file, allow that. Additionally, if portdb still
- can find a match for old atom name, warn about that.
- """
- matches = vardb.match(atoma)
+ def _config_repo_match(repo_name, atoma, atomb):
+ """
+ Check whether to perform a world change from atoma to atomb.
+ If best vardb match for atoma comes from the same repository
+ as the update file, allow that. Additionally, if portdb still
+ can find a match for old atom name, warn about that.
+ """
+ matches = vardb.match(atoma)
+ if not matches:
+ matches = vardb.match(atomb)
if not matches:
- matches = vardb.match(atomb)
- if not matches:
- return False
- repository = vardb.aux_get(best(matches), ['repository'])[0]
- return repository == repo_name or \
- (repo_name == master_repo and repository not in repo_map)
-
- update_config_files(root,
- shlex_split(mysettings.get("CONFIG_PROTECT", "")),
- shlex_split(mysettings.get("CONFIG_PROTECT_MASK", "")),
- repo_map, match_callback=_config_repo_match)
-
- # The above global updates proceed quickly, so they
- # are considered a single mtimedb transaction.
- if timestamps:
- # We do not update the mtime in the mtimedb
- # until after _all_ of the above updates have
- # been processed because the mtimedb will
- # automatically commit when killed by ctrl C.
- for mykey, mtime in timestamps.items():
- prev_mtimes[mykey] = mtime
-
- do_upgrade_packagesmessage = False
- # We gotta do the brute force updates for these now.
- if True:
- def onUpdate(maxval, curval):
+ return False
+ repository = vardb.aux_get(best(matches), ['repository'])[0]
+ return repository == repo_name or \
+ (repo_name == master_repo and repository not in repo_map)
+
+ update_config_files(root,
+ shlex_split(mysettings.get("CONFIG_PROTECT", "")),
+ shlex_split(mysettings.get("CONFIG_PROTECT_MASK", "")),
+ repo_map, match_callback=_config_repo_match)
+
+ # The above global updates proceed quickly, so they
+ # are considered a single mtimedb transaction.
+ if timestamps:
+ # We do not update the mtime in the mtimedb
+ # until after _all_ of the above updates have
+ # been processed because the mtimedb will
+ # automatically commit when killed by ctrl C.
+ for mykey, mtime in timestamps.items():
+ prev_mtimes[mykey] = mtime
+
+ do_upgrade_packagesmessage = False
+ # We gotta do the brute force updates for these now.
+ if True:
+ def onUpdate(_maxval, curval):
+ if curval > 0:
+ writemsg_stdout("#")
+ if quiet:
+ onUpdate = None
+ vardb.update_ents(repo_map, onUpdate=onUpdate)
+ if bindb:
+ def onUpdate(_maxval, curval):
if curval > 0:
- writemsg_stdout("#")
+ writemsg_stdout("*")
if quiet:
onUpdate = None
- vardb.update_ents(repo_map, onUpdate=onUpdate)
- if bindb:
- def onUpdate(maxval, curval):
- if curval > 0:
- writemsg_stdout("*")
- if quiet:
- onUpdate = None
- bindb.update_ents(repo_map, onUpdate=onUpdate)
- else:
- do_upgrade_packagesmessage = 1
-
- # Update progress above is indicated by characters written to stdout so
- # we print a couple new lines here to separate the progress output from
- # what follows.
- writemsg_stdout("\n\n")
-
- if do_upgrade_packagesmessage and bindb and \
- bindb.cpv_all():
- writemsg_stdout(_(" ** Skipping packages. Run 'fixpackages' or set it in FEATURES to fix the tbz2's in the packages directory.\n"))
- writemsg_stdout(bold(_("Note: This can take a very long time.")))
- writemsg_stdout("\n")
+ bindb.update_ents(repo_map, onUpdate=onUpdate)
+ else:
+ do_upgrade_packagesmessage = 1
+
+ # Update progress above is indicated by characters written to stdout so
+ # we print a couple new lines here to separate the progress output from
+ # what follows.
+ writemsg_stdout("\n\n")
+
+ if do_upgrade_packagesmessage and bindb and \
+ bindb.cpv_all():
+ writemsg_stdout(_(" ** Skipping packages. Run 'fixpackages' or set it in FEATURES to fix the tbz2's in the packages directory.\n"))
+ writemsg_stdout(bold(_("Note: This can take a very long time.")))
+ writemsg_stdout("\n")
return retupd
diff --git a/pym/portage/_legacy_globals.py b/pym/portage/_legacy_globals.py
index abffa0e9a..bb9691a77 100644
--- a/pym/portage/_legacy_globals.py
+++ b/pym/portage/_legacy_globals.py
@@ -27,7 +27,8 @@ def _get_legacy_global(name):
os.umask(0o22)
kwargs = {}
- for k, envvar in (("config_root", "PORTAGE_CONFIGROOT"), ("target_root", "ROOT")):
+ for k, envvar in (("config_root", "PORTAGE_CONFIGROOT"),
+ ("target_root", "ROOT"), ("eprefix", "EPREFIX")):
kwargs[k] = os.environ.get(envvar)
portage._initializing_globals = True
diff --git a/pym/portage/_selinux.py b/pym/portage/_selinux.py
index 173714515..2a7194c5d 100644
--- a/pym/portage/_selinux.py
+++ b/pym/portage/_selinux.py
@@ -1,4 +1,4 @@
-# Copyright 1999-2011 Gentoo Foundation
+# Copyright 1999-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
# Don't use the unicode-wrapped os and shutil modules here since
@@ -8,18 +8,18 @@ import shutil
import portage
from portage import _encodings
-from portage import _unicode_decode
-from portage import _unicode_encode
+from portage import _native_string, _unicode_decode
from portage.localization import _
portage.proxy.lazyimport.lazyimport(globals(),
'selinux')
def copyfile(src, dest):
- src = _unicode_encode(src, encoding=_encodings['fs'], errors='strict')
- dest = _unicode_encode(dest, encoding=_encodings['fs'], errors='strict')
+ src = _native_string(src, encoding=_encodings['fs'], errors='strict')
+ dest = _native_string(dest, encoding=_encodings['fs'], errors='strict')
(rc, ctx) = selinux.lgetfilecon(src)
if rc < 0:
- src = _unicode_decode(src, encoding=_encodings['fs'], errors='replace')
+ if sys.hexversion < 0x3000000:
+ src = _unicode_decode(src, encoding=_encodings['fs'], errors='replace')
raise OSError(_("copyfile: Failed getting context of \"%s\".") % src)
setfscreate(ctx)
@@ -39,12 +39,12 @@ def is_selinux_enabled():
return selinux.is_selinux_enabled()
def mkdir(target, refdir):
- target = _unicode_encode(target, encoding=_encodings['fs'], errors='strict')
- refdir = _unicode_encode(refdir, encoding=_encodings['fs'], errors='strict')
+ target = _native_string(target, encoding=_encodings['fs'], errors='strict')
+ refdir = _native_string(refdir, encoding=_encodings['fs'], errors='strict')
(rc, ctx) = selinux.getfilecon(refdir)
if rc < 0:
- refdir = _unicode_decode(refdir, encoding=_encodings['fs'],
- errors='replace')
+ if sys.hexversion < 0x3000000:
+ refdir = _unicode_decode(refdir, encoding=_encodings['fs'], errors='replace')
raise OSError(
_("mkdir: Failed getting context of reference directory \"%s\".") \
% refdir)
@@ -56,16 +56,17 @@ def mkdir(target, refdir):
setfscreate()
def rename(src, dest):
- src = _unicode_encode(src, encoding=_encodings['fs'], errors='strict')
- dest = _unicode_encode(dest, encoding=_encodings['fs'], errors='strict')
+ src = _native_string(src, encoding=_encodings['fs'], errors='strict')
+ dest = _native_string(dest, encoding=_encodings['fs'], errors='strict')
(rc, ctx) = selinux.lgetfilecon(src)
if rc < 0:
- src = _unicode_decode(src, encoding=_encodings['fs'], errors='replace')
+ if sys.hexversion < 0x3000000:
+ src = _unicode_decode(src, encoding=_encodings['fs'], errors='replace')
raise OSError(_("rename: Failed getting context of \"%s\".") % src)
setfscreate(ctx)
try:
- os.rename(src,dest)
+ os.rename(src, dest)
finally:
setfscreate()
@@ -75,10 +76,10 @@ def settype(newtype):
return ":".join(ret)
def setexec(ctx="\n"):
- ctx = _unicode_encode(ctx, encoding=_encodings['content'], errors='strict')
+ ctx = _native_string(ctx, encoding=_encodings['content'], errors='strict')
if selinux.setexeccon(ctx) < 0:
- ctx = _unicode_decode(ctx, encoding=_encodings['content'],
- errors='replace')
+ if sys.hexversion < 0x3000000:
+ ctx = _unicode_decode(ctx, encoding=_encodings['content'], errors='replace')
if selinux.security_getenforce() == 1:
raise OSError(_("Failed setting exec() context \"%s\".") % ctx)
else:
@@ -87,11 +88,10 @@ def setexec(ctx="\n"):
noiselevel=-1)
def setfscreate(ctx="\n"):
- ctx = _unicode_encode(ctx,
- encoding=_encodings['content'], errors='strict')
+ ctx = _native_string(ctx, encoding=_encodings['content'], errors='strict')
if selinux.setfscreatecon(ctx) < 0:
- ctx = _unicode_decode(ctx,
- encoding=_encodings['content'], errors='replace')
+ if sys.hexversion < 0x3000000:
+ ctx = _unicode_decode(ctx, encoding=_encodings['content'], errors='replace')
raise OSError(
_("setfscreate: Failed setting fs create context \"%s\".") % ctx)
@@ -106,8 +106,7 @@ class spawn_wrapper(object):
def __init__(self, spawn_func, selinux_type):
self._spawn_func = spawn_func
- selinux_type = _unicode_encode(selinux_type,
- encoding=_encodings['content'], errors='strict')
+ selinux_type = _native_string(selinux_type, encoding=_encodings['content'], errors='strict')
self._con = settype(selinux_type)
def __call__(self, *args, **kwargs):
@@ -123,13 +122,13 @@ class spawn_wrapper(object):
return self._spawn_func(*args, **kwargs)
def symlink(target, link, reflnk):
- target = _unicode_encode(target, encoding=_encodings['fs'], errors='strict')
- link = _unicode_encode(link, encoding=_encodings['fs'], errors='strict')
- reflnk = _unicode_encode(reflnk, encoding=_encodings['fs'], errors='strict')
+ target = _native_string(target, encoding=_encodings['fs'], errors='strict')
+ link = _native_string(link, encoding=_encodings['fs'], errors='strict')
+ reflnk = _native_string(reflnk, encoding=_encodings['fs'], errors='strict')
(rc, ctx) = selinux.lgetfilecon(reflnk)
if rc < 0:
- reflnk = _unicode_decode(reflnk, encoding=_encodings['fs'],
- errors='replace')
+ if sys.hexversion < 0x3000000:
+ reflnk = _unicode_decode(reflnk, encoding=_encodings['fs'], errors='replace')
raise OSError(
_("symlink: Failed getting context of reference symlink \"%s\".") \
% reflnk)
diff --git a/pym/portage/_sets/__init__.py b/pym/portage/_sets/__init__.py
index c3b590e92..75d1df7bf 100644
--- a/pym/portage/_sets/__init__.py
+++ b/pym/portage/_sets/__init__.py
@@ -17,6 +17,7 @@ try:
from configparser import SafeConfigParser
except ImportError:
from ConfigParser import SafeConfigParser, NoOptionError, ParsingError
+import portage
from portage import os
from portage import load_mod
from portage import _unicode_decode
@@ -124,6 +125,10 @@ class SetConfig(object):
parser.add_section("system")
parser.set("system", "class", "portage.sets.profiles.PackagesSystemSet")
+ parser.remove_section("security")
+ parser.add_section("security")
+ parser.set("security", "class", "portage.sets.security.NewAffectedSet")
+
parser.remove_section("usersets")
parser.add_section("usersets")
parser.set("usersets", "class", "portage.sets.files.StaticFileSet")
@@ -131,6 +136,27 @@ class SetConfig(object):
parser.set("usersets", "directory", "%(PORTAGE_CONFIGROOT)setc/portage/sets")
parser.set("usersets", "world-candidate", "true")
+ parser.remove_section("live-rebuild")
+ parser.add_section("live-rebuild")
+ parser.set("live-rebuild", "class", "portage.sets.dbapi.VariableSet")
+ parser.set("live-rebuild", "variable", "INHERITED")
+ parser.set("live-rebuild", "includes", " ".join(sorted(portage.const.LIVE_ECLASSES)))
+
+ parser.remove_section("module-rebuild")
+ parser.add_section("module-rebuild")
+ parser.set("module-rebuild", "class", "portage.sets.dbapi.OwnerSet")
+ parser.set("module-rebuild", "files", "/lib/modules")
+
+ parser.remove_section("preserved-rebuild")
+ parser.add_section("preserved-rebuild")
+ parser.set("preserved-rebuild", "class", "portage.sets.libs.PreservedLibraryConsumerSet")
+
+ parser.remove_section("x11-module-rebuild")
+ parser.add_section("x11-module-rebuild")
+ parser.set("x11-module-rebuild", "class", "portage.sets.dbapi.OwnerSet")
+ parser.set("x11-module-rebuild", "files", "/usr/lib/xorg/modules")
+ parser.set("x11-module-rebuild", "exclude-files", "/usr/bin/Xorg")
+
def update(self, setname, options):
parser = self._parser
self.errors = []
@@ -270,8 +296,8 @@ def load_default_config(settings, trees):
return SetConfig(None, settings, trees)
global_config_path = GLOBAL_CONFIG_PATH
- if settings['EPREFIX']:
- global_config_path = os.path.join(settings['EPREFIX'],
+ if portage.const.EPREFIX:
+ global_config_path = os.path.join(portage.const.EPREFIX,
GLOBAL_CONFIG_PATH.lstrip(os.sep))
def _getfiles():
for path, dirs, files in os.walk(os.path.join(global_config_path, "sets")):
diff --git a/pym/portage/_sets/base.py b/pym/portage/_sets/base.py
index c8d3ae405..ee20d3671 100644
--- a/pym/portage/_sets/base.py
+++ b/pym/portage/_sets/base.py
@@ -1,4 +1,4 @@
-# Copyright 2007-2011 Gentoo Foundation
+# Copyright 2007-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
import sys
@@ -7,6 +7,7 @@ from portage.exception import InvalidAtom
from portage.versions import cpv_getkey
if sys.hexversion >= 0x3000000:
+ # pylint: disable=W0622
basestring = str
OPERATIONS = ["merge", "unmerge"]
@@ -126,7 +127,7 @@ class PackageSet(object):
if modified_use is not None and modified_use is not pkg.use.enabled:
pkg = pkg.copy()
- pkg.metadata["USE"] = " ".join(modified_use)
+ pkg._metadata["USE"] = " ".join(modified_use)
# Atoms matched via PROVIDE must be temporarily transformed since
# match_from_list() only works correctly when atom.cp == pkg.cp.
@@ -156,7 +157,7 @@ class PackageSet(object):
for atom in atoms:
if match_from_list(atom, cpv_slot_list):
yield atom
- provides = pkg.metadata['PROVIDE']
+ provides = pkg._metadata['PROVIDE']
if not provides:
return
provides = provides.split()
diff --git a/pym/portage/_sets/dbapi.py b/pym/portage/_sets/dbapi.py
index 4982a9244..384fb3aa8 100644
--- a/pym/portage/_sets/dbapi.py
+++ b/pym/portage/_sets/dbapi.py
@@ -26,8 +26,7 @@ class EverythingSet(PackageSet):
def load(self):
myatoms = []
- db_keys = ["SLOT"]
- aux_get = self._db.aux_get
+ pkg_str = self._db._pkg_str
cp_list = self._db.cp_list
for cp in self._db.cp_all():
@@ -35,8 +34,8 @@ class EverythingSet(PackageSet):
# NOTE: Create SLOT atoms even when there is only one
# SLOT installed, in order to avoid the possibility
# of unwanted upgrades as reported in bug #338959.
- slot, = aux_get(cpv, db_keys)
- atom = Atom("%s:%s" % (cp, slot))
+ pkg = pkg_str(cpv, None)
+ atom = Atom("%s:%s" % (pkg.cp, pkg.slot))
if self._filter:
if self._filter(atom):
myatoms.append(atom)
@@ -68,20 +67,19 @@ class OwnerSet(PackageSet):
"""
rValue = set()
vardb = self._db
- aux_get = vardb.aux_get
- aux_keys = ["SLOT"]
+ pkg_str = vardb._pkg_str
if exclude_paths is None:
for link, p in vardb._owners.iter_owners(paths):
- slot, = aux_get(link.mycpv, aux_keys)
- rValue.add("%s:%s" % (link.mycpv.cp, slot))
+ pkg = pkg_str(link.mycpv, None)
+ rValue.add("%s:%s" % (pkg.cp, pkg.slot))
else:
all_paths = set()
all_paths.update(paths)
all_paths.update(exclude_paths)
exclude_atoms = set()
for link, p in vardb._owners.iter_owners(all_paths):
- slot, = aux_get(link.mycpv, aux_keys)
- atom = "%s:%s" % (link.mycpv.cp, slot)
+ pkg = pkg_str(link.mycpv, None)
+ atom = "%s:%s" % (pkg.cp, pkg.slot)
rValue.add(atom)
if p in exclude_paths:
exclude_atoms.add(atom)
@@ -173,12 +171,11 @@ class DowngradeSet(PackageSet):
xmatch = self._portdb.xmatch
xmatch_level = "bestmatch-visible"
cp_list = self._vardb.cp_list
- aux_get = self._vardb.aux_get
- aux_keys = ["SLOT"]
+ pkg_str = self._vardb._pkg_str
for cp in self._vardb.cp_all():
for cpv in cp_list(cp):
- slot, = aux_get(cpv, aux_keys)
- slot_atom = "%s:%s" % (cp, slot)
+ pkg = pkg_str(cpv, None)
+ slot_atom = "%s:%s" % (pkg.cp, pkg.slot)
ebuild = xmatch(xmatch_level, slot_atom)
if not ebuild:
continue
@@ -326,6 +323,7 @@ class CategorySet(PackageSet):
class AgeSet(EverythingSet):
_operations = ["merge", "unmerge"]
+ _aux_keys = ('BUILD_TIME',)
def __init__(self, vardb, mode="older", age=7):
super(AgeSet, self).__init__(vardb)
@@ -335,8 +333,12 @@ class AgeSet(EverythingSet):
def _filter(self, atom):
cpv = self._db.match(atom)[0]
- path = self._db.getpath(cpv, filename="COUNTER")
- age = (time.time() - os.stat(path).st_mtime) / (3600 * 24)
+ try:
+ date, = self._db.aux_get(cpv, self._aux_keys)
+ date = int(date)
+ except (KeyError, ValueError):
+ return bool(self._mode == "older")
+ age = (time.time() - date) / (3600 * 24)
if ((self._mode == "older" and age <= self._age) \
or (self._mode == "newer" and age >= self._age)):
return False
@@ -355,6 +357,83 @@ class AgeSet(EverythingSet):
singleBuilder = classmethod(singleBuilder)
+class DateSet(EverythingSet):
+ _operations = ["merge", "unmerge"]
+ _aux_keys = ('BUILD_TIME',)
+
+ def __init__(self, vardb, date, mode="older"):
+ super(DateSet, self).__init__(vardb)
+ self._mode = mode
+ self._date = date
+
+ def _filter(self, atom):
+
+ cpv = self._db.match(atom)[0]
+ try:
+ date, = self._db.aux_get(cpv, self._aux_keys)
+ date = int(date)
+ except (KeyError, ValueError):
+ return bool(self._mode == "older")
+ # Make sure inequality is _strict_ to exclude tested package
+ if ((self._mode == "older" and date < self._date) \
+ or (self._mode == "newer" and date > self._date)):
+ return True
+ else:
+ return False
+
+ def singleBuilder(cls, options, settings, trees):
+ vardbapi = trees["vartree"].dbapi
+ mode = options.get("mode", "older")
+ if str(mode).lower() not in ["newer", "older"]:
+ raise SetConfigError(_("invalid 'mode' value %s (use either 'newer' or 'older')") % mode)
+
+ formats = []
+ if options.get("package") is not None:
+ formats.append("package")
+ if options.get("filestamp") is not None:
+ formats.append("filestamp")
+ if options.get("seconds") is not None:
+ formats.append("seconds")
+ if options.get("date") is not None:
+ formats.append("date")
+
+ if not formats:
+ raise SetConfigError(_("none of these options specified: 'package', 'filestamp', 'seconds', 'date'"))
+ elif len(formats) > 1:
+ raise SetConfigError(_("no more than one of these options is allowed: 'package', 'filestamp', 'seconds', 'date'"))
+
+ format = formats[0]
+
+ if (format == "package"):
+ package = options.get("package")
+ try:
+ cpv = vardbapi.match(package)[0]
+ date, = vardbapi.aux_get(cpv, ('BUILD_TIME',))
+ date = int(date)
+ except (KeyError, ValueError):
+ raise SetConfigError(_("cannot determine installation date of package %s") % package)
+ elif (format == "filestamp"):
+ filestamp = options.get("filestamp")
+ try:
+ date = int(os.stat(filestamp).st_mtime)
+ except (OSError, ValueError):
+ raise SetConfigError(_("cannot determine 'filestamp' of '%s'") % filestamp)
+ elif (format == "seconds"):
+ try:
+ date = int(options.get("seconds"))
+ except ValueError:
+ raise SetConfigError(_("option 'seconds' must be an integer"))
+ else:
+ dateopt = options.get("date")
+ try:
+ dateformat = options.get("dateformat", "%x %X")
+ date = int(time.mktime(time.strptime(dateopt, dateformat)))
+ except ValueError:
+ raise SetConfigError(_("'date=%s' does not match 'dateformat=%s'") % (dateopt, dateformat))
+ return DateSet(vardb=vardbapi, date=date, mode=mode)
+
+ singleBuilder = classmethod(singleBuilder)
+
class RebuiltBinaries(EverythingSet):
_operations = ('merge',)
_aux_keys = ('BUILD_TIME',)
diff --git a/pym/portage/_sets/files.py b/pym/portage/_sets/files.py
index b891ea4f4..2fb64de87 100644
--- a/pym/portage/_sets/files.py
+++ b/pym/portage/_sets/files.py
@@ -1,4 +1,4 @@
-# Copyright 2007-2012 Gentoo Foundation
+# Copyright 2007-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
import errno
@@ -86,8 +86,8 @@ class StaticFileSet(EditablePackageSet):
for a in data:
matches = self.dbapi.match(a)
for cpv in matches:
- atoms.append("%s:%s" % (cpv_getkey(cpv),
- self.dbapi.aux_get(cpv, ["SLOT"])[0]))
+ pkg = self.dbapi._pkg_str(cpv, None)
+ atoms.append("%s:%s" % (pkg.cp, pkg.slot))
# In addition to any installed slots, also try to pull
# in the latest new slot that may be available.
atoms.append(a)
@@ -296,10 +296,14 @@ class WorldSelectedSet(EditablePackageSet):
ensure_dirs(os.path.dirname(self._filename), gid=portage_gid, mode=0o2750, mask=0o2)
def lock(self):
+ if self._lock is not None:
+ raise AssertionError("already locked")
self._ensure_dirs()
self._lock = lockfile(self._filename, wantnewlockfile=1)
def unlock(self):
+ if self._lock is None:
+ raise AssertionError("not locked")
unlockfile(self._lock)
self._lock = None
diff --git a/pym/portage/_sets/libs.py b/pym/portage/_sets/libs.py
index 6c5babc13..022e076f5 100644
--- a/pym/portage/_sets/libs.py
+++ b/pym/portage/_sets/libs.py
@@ -1,12 +1,12 @@
-# Copyright 2007-2011 Gentoo Foundation
+# Copyright 2007-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
from __future__ import print_function
+from portage.exception import InvalidData
from portage.localization import _
from portage._sets.base import PackageSet
from portage._sets import get_boolean, SetConfigError
-from portage.versions import cpv_getkey
import portage
class LibraryConsumerSet(PackageSet):
@@ -22,14 +22,14 @@ class LibraryConsumerSet(PackageSet):
for p in paths:
for cpv in self.dbapi._linkmap.getOwners(p):
try:
- slot, = self.dbapi.aux_get(cpv, ["SLOT"])
- except KeyError:
+ pkg = self.dbapi._pkg_str(cpv, None)
+ except (KeyError, InvalidData):
# This is expected for preserved libraries
# of packages that have been uninstalled
# without replacement.
pass
else:
- rValue.add("%s:%s" % (cpv_getkey(cpv), slot))
+ rValue.add("%s:%s" % (pkg.cp, pkg.slot))
return rValue
class LibraryFileConsumerSet(LibraryConsumerSet):
@@ -49,7 +49,8 @@ class LibraryFileConsumerSet(LibraryConsumerSet):
def load(self):
consumers = set()
for lib in self.files:
- consumers.update(self.dbapi._linkmap.findConsumers(lib))
+ consumers.update(
+ self.dbapi._linkmap.findConsumers(lib, greedy=False))
if not consumers:
return
@@ -77,10 +78,10 @@ class PreservedLibraryConsumerSet(LibraryConsumerSet):
for lib in libs:
if self.debug:
print(lib)
- for x in sorted(self.dbapi._linkmap.findConsumers(lib)):
+ for x in sorted(self.dbapi._linkmap.findConsumers(lib, greedy=False)):
print(" ", x)
print("-"*40)
- consumers.update(self.dbapi._linkmap.findConsumers(lib))
+ consumers.update(self.dbapi._linkmap.findConsumers(lib, greedy=False))
# Don't rebuild packages just because they contain preserved
# libs that happen to be consumers of other preserved libs.
for libs in plib_dict.values():
diff --git a/pym/portage/_sets/security.py b/pym/portage/_sets/security.py
index 7e856bc79..f8dbef2be 100644
--- a/pym/portage/_sets/security.py
+++ b/pym/portage/_sets/security.py
@@ -44,8 +44,8 @@ class SecuritySet(PackageSet):
mydict = {}
for atom in atomlist[:]:
cpv = self._portdbapi.xmatch("match-all", atom)[0]
- slot = self._portdbapi.aux_get(cpv, ["SLOT"])[0]
- cps = "%s:%s" % (cpv.cp, slot)
+ pkg = self._portdbapi._pkg_str(cpv, None)
+ cps = "%s:%s" % (pkg.cp, pkg.slot)
if not cps in mydict:
mydict[cps] = (atom, cpv)
else:
diff --git a/pym/portage/cache/ebuild_xattr.py b/pym/portage/cache/ebuild_xattr.py
index 0086e40a3..db6e177cf 100644
--- a/pym/portage/cache/ebuild_xattr.py
+++ b/pym/portage/cache/ebuild_xattr.py
@@ -1,4 +1,4 @@
-# -*- coding: UTF8 -*-
+# -*- coding: utf-8 -*-
# Copyright: 2009-2011 Gentoo Foundation
# Author(s): Petteri Räty (betelgeuse@gentoo.org)
# License: GPL2
diff --git a/pym/portage/cache/flat_hash.py b/pym/portage/cache/flat_hash.py
index 2eae9f634..53042965e 100644
--- a/pym/portage/cache/flat_hash.py
+++ b/pym/portage/cache/flat_hash.py
@@ -1,7 +1,9 @@
-# Copyright: 2005-2011 Gentoo Foundation
+# Copyright 2005-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
# Author(s): Brian Harring (ferringb@gentoo.org)
+from __future__ import unicode_literals
+
from portage.cache import fs_template
from portage.cache import cache_errors
import errno
@@ -11,16 +13,14 @@ import sys
import os as _os
from portage import os
from portage import _encodings
-from portage import _unicode_decode
from portage import _unicode_encode
+from portage.exception import InvalidData
+from portage.versions import _pkg_str
if sys.hexversion >= 0x3000000:
+ # pylint: disable=W0622
long = int
-# Coerce to unicode, in order to prevent TypeError when writing
-# raw bytes to TextIOWrapper with python2.
-_setitem_fmt = _unicode_decode("%s=%s\n")
-
class database(fs_template.FsBased):
autocommits = True
@@ -40,11 +40,10 @@ class database(fs_template.FsBased):
# Don't use os.path.join, for better performance.
fp = self.location + _os.sep + cpv
try:
- myf = io.open(_unicode_encode(fp,
+ with io.open(_unicode_encode(fp,
encoding=_encodings['fs'], errors='strict'),
mode='r', encoding=_encodings['repo.content'],
- errors='replace')
- try:
+ errors='replace') as myf:
lines = myf.read().split("\n")
if not lines[-1]:
lines.pop()
@@ -54,8 +53,6 @@ class database(fs_template.FsBased):
# that uses mtime mangling.
d['_mtime_'] = _os.fstat(myf.fileno())[stat.ST_MTIME]
return d
- finally:
- myf.close()
except (IOError, OSError) as e:
if e.errno != errno.ENOENT:
raise cache_errors.CacheCorruption(cpv, e)
@@ -94,7 +91,10 @@ class database(fs_template.FsBased):
v = values.get(k)
if not v:
continue
- myf.write(_setitem_fmt % (k, v))
+ # NOTE: This format string requires unicode_literals, so that
+ # k and v are coerced to unicode, in order to prevent TypeError
+ # when writing raw bytes to TextIOWrapper with Python 2.
+ myf.write("%s=%s\n" % (k, v))
finally:
myf.close()
self._ensure_access(fp)
@@ -135,8 +135,6 @@ class database(fs_template.FsBased):
del e
continue
for l in dir_list:
- if l.endswith(".cpickle"):
- continue
p = os.path.join(dir_path, l)
try:
st = os.lstat(p)
@@ -151,7 +149,11 @@ class database(fs_template.FsBased):
if depth < 1:
dirs.append((depth+1, p))
continue
- yield p[len_base+1:]
+
+ try:
+ yield _pkg_str(p[len_base+1:])
+ except InvalidData:
+ continue
class md5_database(database):
diff --git a/pym/portage/cache/flat_list.py b/pym/portage/cache/flat_list.py
deleted file mode 100644
index 728830753..000000000
--- a/pym/portage/cache/flat_list.py
+++ /dev/null
@@ -1,134 +0,0 @@
-# Copyright 2005-2011 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License v2
-
-from portage.cache import fs_template
-from portage.cache import cache_errors
-from portage import os
-from portage import _encodings
-from portage import _unicode_decode
-from portage import _unicode_encode
-import errno
-import io
-import stat
-import sys
-
-if sys.hexversion >= 0x3000000:
- long = int
-
-# Coerce to unicode, in order to prevent TypeError when writing
-# raw bytes to TextIOWrapper with python2.
-_setitem_fmt = _unicode_decode("%s\n")
-
-# store the current key order *here*.
-class database(fs_template.FsBased):
-
- autocommits = True
-
- # do not screw with this ordering. _eclasses_ needs to be last
- auxdbkey_order=('DEPEND', 'RDEPEND', 'SLOT', 'SRC_URI',
- 'RESTRICT', 'HOMEPAGE', 'LICENSE', 'DESCRIPTION',
- 'KEYWORDS', 'IUSE', 'REQUIRED_USE',
- 'PDEPEND', 'PROVIDE', 'EAPI', 'PROPERTIES', 'DEFINED_PHASES')
-
- def __init__(self, *args, **config):
- super(database,self).__init__(*args, **config)
- self.location = os.path.join(self.location,
- self.label.lstrip(os.path.sep).rstrip(os.path.sep))
-
- if len(self._known_keys) > len(self.auxdbkey_order) + 2:
- raise Exception("less ordered keys then auxdbkeys")
- if not os.path.exists(self.location):
- self._ensure_dirs()
-
-
- def _getitem(self, cpv):
- d = {}
- try:
- myf = io.open(_unicode_encode(os.path.join(self.location, cpv),
- encoding=_encodings['fs'], errors='strict'),
- mode='r', encoding=_encodings['repo.content'],
- errors='replace')
- for k,v in zip(self.auxdbkey_order, myf):
- d[k] = v.rstrip("\n")
- except (OSError, IOError) as e:
- if errno.ENOENT == e.errno:
- raise KeyError(cpv)
- raise cache_errors.CacheCorruption(cpv, e)
-
- try:
- d["_mtime_"] = os.fstat(myf.fileno())[stat.ST_MTIME]
- except OSError as e:
- myf.close()
- raise cache_errors.CacheCorruption(cpv, e)
- myf.close()
- return d
-
-
- def _setitem(self, cpv, values):
- s = cpv.rfind("/")
- fp=os.path.join(self.location,cpv[:s],".update.%i.%s" % (os.getpid(), cpv[s+1:]))
- try:
- myf = io.open(_unicode_encode(fp,
- encoding=_encodings['fs'], errors='strict'),
- mode='w', encoding=_encodings['repo.content'],
- errors='backslashreplace')
- except (OSError, IOError) as e:
- if errno.ENOENT == e.errno:
- try:
- self._ensure_dirs(cpv)
- myf = io.open(_unicode_encode(fp,
- encoding=_encodings['fs'], errors='strict'),
- mode='w', encoding=_encodings['repo.content'],
- errors='backslashreplace')
- except (OSError, IOError) as e:
- raise cache_errors.CacheCorruption(cpv, e)
- else:
- raise cache_errors.CacheCorruption(cpv, e)
-
-
- for x in self.auxdbkey_order:
- myf.write(_setitem_fmt % (values.get(x, ""),))
-
- myf.close()
- self._ensure_access(fp, mtime=values["_mtime_"])
- #update written. now we move it.
- new_fp = os.path.join(self.location,cpv)
- try:
- os.rename(fp, new_fp)
- except (OSError, IOError) as e:
- os.remove(fp)
- raise cache_errors.CacheCorruption(cpv, e)
-
-
- def _delitem(self, cpv):
- try:
- os.remove(os.path.join(self.location,cpv))
- except OSError as e:
- if errno.ENOENT == e.errno:
- raise KeyError(cpv)
- else:
- raise cache_errors.CacheCorruption(cpv, e)
-
-
- def __contains__(self, cpv):
- return os.path.exists(os.path.join(self.location, cpv))
-
-
- def __iter__(self):
- """generator for walking the dir struct"""
- dirs = [self.location]
- len_base = len(self.location)
- while len(dirs):
- for l in os.listdir(dirs[0]):
- if l.endswith(".cpickle"):
- continue
- p = os.path.join(dirs[0],l)
- st = os.lstat(p)
- if stat.S_ISDIR(st.st_mode):
- dirs.append(p)
- continue
- yield p[len_base+1:]
- dirs.pop(0)
-
-
- def commit(self): pass
diff --git a/pym/portage/cache/fs_template.py b/pym/portage/cache/fs_template.py
index 8f0636ed0..de4fe4ba5 100644
--- a/pym/portage/cache/fs_template.py
+++ b/pym/portage/cache/fs_template.py
@@ -1,4 +1,4 @@
-# Copyright 2005-2012 Gentoo Foundation
+# Copyright 2005-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
# Author(s): Brian Harring (ferringb@gentoo.org)
@@ -15,6 +15,7 @@ lazyimport(globals(),
del lazyimport
if sys.hexversion >= 0x3000000:
+ # pylint: disable=W0622
long = int
class FsBased(template.database):
@@ -25,7 +26,8 @@ class FsBased(template.database):
for x, y in (("gid", -1), ("perms", -1)):
if x in config:
- setattr(self, "_"+x, config[x])
+ # Since Python 3.4, chown requires int type (no proxies).
+ setattr(self, "_" + x, int(config[x]))
del config[x]
else:
setattr(self, "_"+x, y)
diff --git a/pym/portage/cache/mappings.py b/pym/portage/cache/mappings.py
index bc8ce9af8..cd39a6ea1 100644
--- a/pym/portage/cache/mappings.py
+++ b/pym/portage/cache/mappings.py
@@ -199,10 +199,10 @@ class OrderedDict(UserDict):
return iter(self._order)
def __setitem__(self, key, item):
- if key in self:
- self._order.remove(key)
+ new_key = key not in self
UserDict.__setitem__(self, key, item)
- self._order.append(key)
+ if new_key:
+ self._order.append(key)
def __delitem__(self, key):
UserDict.__delitem__(self, key)
diff --git a/pym/portage/cache/metadata.py b/pym/portage/cache/metadata.py
index 9d2c3a5d7..0c588bde9 100644
--- a/pym/portage/cache/metadata.py
+++ b/pym/portage/cache/metadata.py
@@ -1,4 +1,4 @@
-# Copyright: 2005 Gentoo Foundation
+# Copyright 2005-2014 Gentoo Foundation
# Author(s): Brian Harring (ferringb@gentoo.org)
# License: GPL2
@@ -16,6 +16,7 @@ from portage.cache.template import reconstruct_eclasses
from portage.cache.mappings import ProtectedDict
if sys.hexversion >= 0x3000000:
+ # pylint: disable=W0622
basestring = str
long = int
@@ -28,7 +29,8 @@ class database(flat_hash.database):
auxdbkey_order=('DEPEND', 'RDEPEND', 'SLOT', 'SRC_URI',
'RESTRICT', 'HOMEPAGE', 'LICENSE', 'DESCRIPTION',
'KEYWORDS', 'INHERITED', 'IUSE', 'REQUIRED_USE',
- 'PDEPEND', 'PROVIDE', 'EAPI', 'PROPERTIES', 'DEFINED_PHASES')
+ 'PDEPEND', 'PROVIDE', 'EAPI', 'PROPERTIES',
+ 'DEFINED_PHASES', 'HDEPEND')
autocommits = True
serialize_eclasses = False
diff --git a/pym/portage/cache/sqlite.py b/pym/portage/cache/sqlite.py
index a6a3e066d..42a239922 100644
--- a/pym/portage/cache/sqlite.py
+++ b/pym/portage/cache/sqlite.py
@@ -1,6 +1,8 @@
-# Copyright 1999-2012 Gentoo Foundation
+# Copyright 1999-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
+from __future__ import unicode_literals
+
import re
import sys
from portage.cache import fs_template
@@ -11,6 +13,7 @@ from portage.util import writemsg
from portage.localization import _
if sys.hexversion >= 0x3000000:
+ # pylint: disable=W0622
basestring = str
class database(fs_template.FsBased):
@@ -21,7 +24,6 @@ class database(fs_template.FsBased):
# to calculate the number of pages requested, according to the following
# equation: cache_bytes = page_bytes * page_count
cache_bytes = 1024 * 1024 * 10
- _db_table = None
def __init__(self, *args, **config):
super(database, self).__init__(*args, **config)
@@ -29,6 +31,7 @@ class database(fs_template.FsBased):
self._allowed_keys = ["_mtime_", "_eclasses_"]
self._allowed_keys.extend(self._known_keys)
self._allowed_keys.sort()
+ self._allowed_keys_set = frozenset(self._allowed_keys)
self.location = os.path.join(self.location,
self.label.lstrip(os.path.sep).rstrip(os.path.sep))
@@ -38,8 +41,8 @@ class database(fs_template.FsBased):
config.setdefault("autocommit", self.autocommits)
config.setdefault("cache_bytes", self.cache_bytes)
config.setdefault("synchronous", self.synchronous)
- # Timeout for throwing a "database is locked" exception (pysqlite
- # default is 5.0 seconds).
+ # Set longer timeout for throwing a "database is locked" exception.
+ # Default timeout in sqlite3 module is 5.0 seconds.
config.setdefault("timeout", 15)
self._db_init_connection(config)
self._db_init_structures()
@@ -48,11 +51,8 @@ class database(fs_template.FsBased):
# sqlite3 is optional with >=python-2.5
try:
import sqlite3 as db_module
- except ImportError:
- try:
- from pysqlite2 import dbapi2 as db_module
- except ImportError as e:
- raise cache_errors.InitializationError(self.__class__, e)
+ except ImportError as e:
+ raise cache_errors.InitializationError(self.__class__, e)
self._db_module = db_module
self._db_error = db_module.Error
@@ -63,7 +63,6 @@ class database(fs_template.FsBased):
# Avoid potential UnicodeEncodeError in python-2.x by
# only calling str() when it's absolutely necessary.
s = str(s)
- # This is equivalent to the _quote function from pysqlite 1.1.
return "'%s'" % s.replace("'", "''")
def _db_init_connection(self, config):
@@ -93,9 +92,6 @@ class database(fs_template.FsBased):
self._db_table["packages"]["table_name"] = mytable
self._db_table["packages"]["package_id"] = "internal_db_package_id"
self._db_table["packages"]["package_key"] = "portage_package_key"
- self._db_table["packages"]["internal_columns"] = \
- [self._db_table["packages"]["package_id"],
- self._db_table["packages"]["package_key"]]
create_statement = []
create_statement.append("CREATE TABLE")
create_statement.append(mytable)
@@ -110,9 +106,6 @@ class database(fs_template.FsBased):
create_statement.append(")")
self._db_table["packages"]["create"] = " ".join(create_statement)
- self._db_table["packages"]["columns"] = \
- self._db_table["packages"]["internal_columns"] + \
- self._allowed_keys
cursor = self._db_cursor
for k, v in self._db_table.items():
@@ -211,13 +204,17 @@ class database(fs_template.FsBased):
raise KeyError(cpv)
else:
raise cache_errors.CacheCorruption(cpv, "key is not unique")
+ result = result[0]
d = {}
- internal_columns = self._db_table["packages"]["internal_columns"]
- column_index = -1
- for k in self._db_table["packages"]["columns"]:
- column_index +=1
- if k not in internal_columns:
- d[k] = result[0][column_index]
+ allowed_keys_set = self._allowed_keys_set
+ for column_index, column_info in enumerate(cursor.description):
+ k = column_info[0]
+ if k in allowed_keys_set:
+ v = result[column_index]
+ if v is None:
+ # This happens after a new empty column has been added.
+ v = ""
+ d[k] = v
return d
diff --git a/pym/portage/cache/template.py b/pym/portage/cache/template.py
index cf1e8aebb..bc81b8642 100644
--- a/pym/portage/cache/template.py
+++ b/pym/portage/cache/template.py
@@ -1,6 +1,6 @@
-# Copyright: 2005-2012 Gentoo Foundation
+# Copyright 2005-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
# Author(s): Brian Harring (ferringb@gentoo.org)
-# License: GPL2
from portage.cache import cache_errors
from portage.cache.cache_errors import InvalidRestriction
@@ -10,6 +10,7 @@ import warnings
import operator
if sys.hexversion >= 0x3000000:
+ # pylint: disable=W0622
_unicode = str
basestring = str
long = int
@@ -164,7 +165,14 @@ class database(object):
def commit(self):
if not self.autocommits:
- raise NotImplementedError
+ raise NotImplementedError(self)
+
+ def __del__(self):
+ # This used to be handled by an atexit hook that called
+ # close_portdbapi_caches() for all portdbapi instances, but that was
+ # prone to memory leaks for API consumers that needed to create/destroy
+ # many portdbapi instances. So, instead we rely on __del__.
+ self.sync()
def __contains__(self, cpv):
"""This method should always be overridden. It is provided only for
diff --git a/pym/portage/checksum.py b/pym/portage/checksum.py
index daf4a0cbf..f24a90ffc 100644
--- a/pym/portage/checksum.py
+++ b/pym/portage/checksum.py
@@ -1,15 +1,16 @@
# checksum.py -- core Portage functionality
-# Copyright 1998-2012 Gentoo Foundation
+# Copyright 1998-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
import portage
-from portage.const import PRELINK_BINARY,HASHING_BLOCKSIZE
+from portage.const import PRELINK_BINARY, HASHING_BLOCKSIZE
from portage.localization import _
from portage import os
from portage import _encodings
from portage import _unicode_encode
import errno
import stat
+import subprocess
import tempfile
#dict of all available hash functions
@@ -48,16 +49,15 @@ class _generate_hash_function(object):
@type filename: String
@return: The hash and size of the data
"""
- f = _open_file(filename)
- blocksize = HASHING_BLOCKSIZE
- data = f.read(blocksize)
- size = 0
- checksum = self._hashobject()
- while data:
- checksum.update(data)
- size = size + len(data)
+ with _open_file(filename) as f:
+ blocksize = HASHING_BLOCKSIZE
+ size = 0
+ checksum = self._hashobject()
data = f.read(blocksize)
- f.close()
+ while data:
+ checksum.update(data)
+ size = size + len(data)
+ data = f.read(blocksize)
return (checksum.hexdigest(), size)
@@ -163,11 +163,16 @@ hashfunc_map["size"] = getsize
prelink_capable = False
if os.path.exists(PRELINK_BINARY):
- results = portage.subprocess_getstatusoutput(
- "%s --version > /dev/null 2>&1" % (PRELINK_BINARY,))
- if (results[0] >> 8) == 0:
- prelink_capable=1
- del results
+ cmd = [PRELINK_BINARY, "--version"]
+ cmd = [_unicode_encode(x, encoding=_encodings['fs'], errors='strict')
+ for x in cmd]
+ proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT)
+ proc.communicate()
+ status = proc.wait()
+ if os.WIFEXITED(status) and os.WEXITSTATUS(status) == os.EX_OK:
+ prelink_capable = 1
+ del cmd, proc, status
def is_prelinkable_elf(filename):
f = _open_file(filename)
@@ -217,6 +222,64 @@ def _filter_unaccelarated_hashes(digests):
return digests
+class _hash_filter(object):
+ """
+ Implements filtering for PORTAGE_CHECKSUM_FILTER.
+ """
+
+ __slots__ = ('transparent', '_tokens',)
+
+ def __init__(self, filter_str):
+ tokens = filter_str.upper().split()
+ if not tokens or tokens[-1] == "*":
+ del tokens[:]
+ self.transparent = not tokens
+ tokens.reverse()
+ self._tokens = tuple(tokens)
+
+ def __call__(self, hash_name):
+ if self.transparent:
+ return True
+ matches = ("*", hash_name)
+ for token in self._tokens:
+ if token in matches:
+ return True
+ elif token[:1] == "-":
+ if token[1:] in matches:
+ return False
+ return False
+
+def _apply_hash_filter(digests, hash_filter):
+ """
+ Return a new dict containing the filtered digests, or the same
+ dict if no changes are necessary. This will always preserve at
+ at least one digest, in order to ensure that they are not all
+ discarded.
+ @param digests: dictionary of digests
+ @type digests: dict
+ @param hash_filter: A callable that takes a single hash name
+ argument, and returns True if the hash is to be used or
+ False otherwise
+ @type hash_filter: callable
+ """
+
+ verifiable_hash_types = set(digests).intersection(hashfunc_map)
+ verifiable_hash_types.discard("size")
+ modified = False
+ if len(verifiable_hash_types) > 1:
+ for k in list(verifiable_hash_types):
+ if not hash_filter(k):
+ modified = True
+ verifiable_hash_types.remove(k)
+ if len(verifiable_hash_types) == 1:
+ break
+
+ if modified:
+ digests = dict((k, v) for (k, v) in digests.items()
+ if k == "size" or k in verifiable_hash_types)
+
+ return digests
+
def verify_all(filename, mydict, calc_prelink=0, strict=0):
"""
Verify all checksums against a file.
@@ -275,9 +338,10 @@ def verify_all(filename, mydict, calc_prelink=0, strict=0):
{"file" : filename, "type" : x})
else:
file_is_ok = False
- reason = (("Failed on %s verification" % x), myhash,mydict[x])
+ reason = (("Failed on %s verification" % x), myhash, mydict[x])
break
- return file_is_ok,reason
+
+ return file_is_ok, reason
def perform_checksum(filename, hashname="MD5", calc_prelink=0):
"""
diff --git a/pym/portage/const.py b/pym/portage/const.py
index ceef5c56b..1785bfff7 100644
--- a/pym/portage/const.py
+++ b/pym/portage/const.py
@@ -1,7 +1,9 @@
# portage: Constants
-# Copyright 1998-2012 Gentoo Foundation
+# Copyright 1998-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
+from __future__ import unicode_literals
+
import os
# ===========================================================================
@@ -27,8 +29,8 @@ import os
# The variables in this file are grouped by config_root, target_root.
# variables used with config_root (these need to be relative)
-MAKE_CONF_FILE = "etc/make.conf"
USER_CONFIG_PATH = "etc/portage"
+MAKE_CONF_FILE = USER_CONFIG_PATH + "/make.conf"
MODULES_FILE_PATH = USER_CONFIG_PATH + "/modules"
CUSTOM_PROFILE_PATH = USER_CONFIG_PATH + "/profile"
USER_VIRTUALS_FILE = USER_CONFIG_PATH + "/virtuals"
@@ -36,7 +38,7 @@ EBUILD_SH_ENV_FILE = USER_CONFIG_PATH + "/bashrc"
EBUILD_SH_ENV_DIR = USER_CONFIG_PATH + "/env"
CUSTOM_MIRRORS_FILE = USER_CONFIG_PATH + "/mirrors"
COLOR_MAP_FILE = USER_CONFIG_PATH + "/color.map"
-PROFILE_PATH = "etc/make.profile"
+PROFILE_PATH = USER_CONFIG_PATH + "/make.profile"
MAKE_DEFAULTS_FILE = PROFILE_PATH + "/make.defaults" # FIXME: not used
DEPRECATED_PROFILE_FILE = PROFILE_PATH + "/deprecated"
@@ -56,7 +58,10 @@ DEPCACHE_PATH = "/var/cache/edb/dep"
GLOBAL_CONFIG_PATH = "/usr/share/portage/config"
# these variables are not used with target_root or config_root
-PORTAGE_BASE_PATH = os.path.join(os.sep, os.sep.join(__file__.split(os.sep)[:-3]))
+# NOTE: Use realpath(__file__) so that python module symlinks in site-packages
+# are followed back to the real location of the whole portage installation.
+PORTAGE_BASE_PATH = os.path.join(os.sep, os.sep.join(os.path.realpath(
+ __file__.rstrip("co")).split(os.sep)[:-3]))
PORTAGE_BIN_PATH = PORTAGE_BASE_PATH + "/bin"
PORTAGE_PYM_PATH = PORTAGE_BASE_PATH + "/pym"
LOCALE_DATA_PATH = PORTAGE_BASE_PATH + "/locale" # FIXME: not used
@@ -75,40 +80,123 @@ REPO_NAME_LOC = "profiles" + "/" + REPO_NAME_FILE
PORTAGE_PACKAGE_ATOM = "sys-apps/portage"
LIBC_PACKAGE_ATOM = "virtual/libc"
OS_HEADERS_PACKAGE_ATOM = "virtual/os-headers"
+CVS_PACKAGE_ATOM = "dev-vcs/cvs"
+GIT_PACKAGE_ATOM = "dev-vcs/git"
+RSYNC_PACKAGE_ATOM = "net-misc/rsync"
-INCREMENTALS = ("USE", "USE_EXPAND", "USE_EXPAND_HIDDEN",
- "FEATURES", "ACCEPT_KEYWORDS",
- "CONFIG_PROTECT_MASK", "CONFIG_PROTECT",
- "PRELINK_PATH", "PRELINK_PATH_MASK",
- "PROFILE_ONLY_VARIABLES")
-EBUILD_PHASES = ("pretend", "setup", "unpack", "prepare", "configure",
- "compile", "test", "install",
- "package", "preinst", "postinst","prerm", "postrm",
- "nofetch", "config", "info", "other")
+INCREMENTALS = (
+ "ACCEPT_KEYWORDS",
+ "CONFIG_PROTECT",
+ "CONFIG_PROTECT_MASK",
+ "FEATURES",
+ "IUSE_IMPLICIT",
+ "PRELINK_PATH",
+ "PRELINK_PATH_MASK",
+ "PROFILE_ONLY_VARIABLES",
+ "USE",
+ "USE_EXPAND",
+ "USE_EXPAND_HIDDEN",
+ "USE_EXPAND_IMPLICIT",
+ "USE_EXPAND_UNPREFIXED",
+)
+EBUILD_PHASES = (
+ "pretend",
+ "setup",
+ "unpack",
+ "prepare",
+ "configure",
+ "compile",
+ "test",
+ "install",
+ "package",
+ "preinst",
+ "postinst",
+ "prerm",
+ "postrm",
+ "nofetch",
+ "config",
+ "info",
+ "other",
+)
SUPPORTED_FEATURES = frozenset([
- "assume-digests", "binpkg-logs", "buildpkg", "buildsyspkg", "candy",
- "ccache", "chflags", "clean-logs",
- "collision-protect", "compress-build-logs", "compressdebug",
- "config-protect-if-modified",
- "digest", "distcc", "distcc-pump", "distlocks",
- "downgrade-backup", "ebuild-locks", "fakeroot",
- "fail-clean", "force-mirror", "force-prefix", "getbinpkg",
- "installsources", "keeptemp", "keepwork", "fixlafiles", "lmirror",
- "metadata-transfer", "mirror", "multilib-strict", "news",
- "noauto", "noclean", "nodoc", "noinfo", "noman",
- "nostrip", "notitles", "parallel-fetch", "parallel-install",
- "parse-eapi-ebuild-head",
- "prelink-checksums",
- "protect-owned", "python-trace", "sandbox",
- "selinux", "sesandbox", "sfperms",
- "sign", "skiprocheck", "split-elog", "split-log", "splitdebug",
- "strict", "stricter", "suidctl", "test", "test-fail-continue",
- "unknown-features-filter", "unknown-features-warn",
- "unmerge-backup",
- "unmerge-logs", "unmerge-orphans", "userfetch", "userpriv",
- "usersandbox", "usersync", "webrsync-gpg", "xattr"])
-
-EAPI = 4
+ "assume-digests",
+ "binpkg-logs",
+ "buildpkg",
+ "buildsyspkg",
+ "candy",
+ "ccache",
+ "cgroup",
+ "chflags",
+ "clean-logs",
+ "collision-protect",
+ "compress-build-logs",
+ "compressdebug",
+ "compress-index",
+ "config-protect-if-modified",
+ "digest",
+ "distcc",
+ "distcc-pump",
+ "distlocks",
+ "downgrade-backup",
+ "ebuild-locks",
+ "fail-clean",
+ "fakeroot",
+ "fixlafiles",
+ "force-mirror",
+ "force-prefix",
+ "getbinpkg",
+ "installsources",
+ "ipc-sandbox",
+ "keeptemp",
+ "keepwork",
+ "lmirror",
+ "merge-sync",
+ "metadata-transfer",
+ "mirror",
+ "multilib-strict",
+ "network-sandbox",
+ "news",
+ "noauto",
+ "noclean",
+ "nodoc",
+ "noinfo",
+ "noman",
+ "nostrip",
+ "notitles",
+ "parallel-fetch",
+ "parallel-install",
+ "prelink-checksums",
+ "preserve-libs",
+ "protect-owned",
+ "python-trace",
+ "sandbox",
+ "selinux",
+ "sesandbox",
+ "sfperms",
+ "sign",
+ "skiprocheck",
+ "splitdebug",
+ "split-elog",
+ "split-log",
+ "strict",
+ "stricter",
+ "suidctl",
+ "test",
+ "test-fail-continue",
+ "unknown-features-filter",
+ "unknown-features-warn",
+ "unmerge-backup",
+ "unmerge-logs",
+ "unmerge-orphans",
+ "userfetch",
+ "userpriv",
+ "usersandbox",
+ "usersync",
+ "webrsync-gpg",
+ "xattr",
+])
+
+EAPI = 5
HASHING_BLOCKSIZE = 32768
MANIFEST1_HASH_FUNCTIONS = ("MD5", "SHA256", "RMD160")
@@ -151,13 +239,35 @@ MANIFEST2_IDENTIFIERS = ("AUX", "MISC", "DIST", "EBUILD")
# a config instance (since it's possible to contruct a config instance with
# a different EPREFIX). Therefore, the EPREFIX constant should *NOT* be used
# in the definition of any other constants within this file.
-EPREFIX=""
+EPREFIX = ""
# pick up EPREFIX from the environment if set
if "PORTAGE_OVERRIDE_EPREFIX" in os.environ:
EPREFIX = os.environ["PORTAGE_OVERRIDE_EPREFIX"]
if EPREFIX:
EPREFIX = os.path.normpath(EPREFIX)
+ if EPREFIX == os.sep:
+ EPREFIX = ""
+
+VCS_DIRS = ("CVS", "RCS", "SCCS", ".bzr", ".git", ".hg", ".svn")
+
+# List of known live eclasses. Keep it in sync with cnf/sets/portage.conf
+LIVE_ECLASSES = frozenset([
+ "bzr",
+ "cvs",
+ "darcs",
+ "git",
+ "git-2",
+ "git-r3",
+ "mercurial",
+ "subversion",
+ "tla",
+])
+
+SUPPORTED_BINPKG_FORMATS = ("tar", "rpm")
+
+# Time formats used in various places like metadata.chk.
+TIMESTAMP_FORMAT = "%a, %d %b %Y %H:%M:%S +0000" # to be used with time.gmtime()
# ===========================================================================
# END OF CONSTANTS -- END OF CONSTANTS -- END OF CONSTANTS -- END OF CONSTANT
@@ -165,17 +275,5 @@ if "PORTAGE_OVERRIDE_EPREFIX" in os.environ:
# Private constants for use in conditional code in order to minimize the diff
# between branches.
-_ENABLE_DYN_LINK_MAP = True
-_ENABLE_PRESERVE_LIBS = True
-_ENABLE_REPO_NAME_WARN = True
+_DEPCLEAN_LIB_CHECK_DEFAULT = True
_ENABLE_SET_CONFIG = True
-_ENABLE_INHERIT_CHECK = True
-
-
-# The definitions above will differ between branches, so it's useful to have
-# common lines of diff context here in order to avoid merge conflicts.
-
-if _ENABLE_PRESERVE_LIBS:
- SUPPORTED_FEATURES = set(SUPPORTED_FEATURES)
- SUPPORTED_FEATURES.add("preserve-libs")
- SUPPORTED_FEATURES = frozenset(SUPPORTED_FEATURES)
diff --git a/pym/portage/cvstree.py b/pym/portage/cvstree.py
index 3680ae41f..4a3afae11 100644
--- a/pym/portage/cvstree.py
+++ b/pym/portage/cvstree.py
@@ -1,5 +1,5 @@
# cvstree.py -- cvs tree utilities
-# Copyright 1998-2011 Gentoo Foundation
+# Copyright 1998-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
from __future__ import print_function
@@ -15,20 +15,20 @@ from portage import _encodings
from portage import _unicode_encode
if sys.hexversion >= 0x3000000:
+ # pylint: disable=W0622
long = int
# [D]/Name/Version/Date/Flags/Tags
def pathdata(entries, path):
- """(entries,path)
- Returns the data(dict) for a specific file/dir at the path specified."""
- mysplit=path.split("/")
- myentries=entries
- mytarget=mysplit[-1]
- mysplit=mysplit[:-1]
+ """Returns the data(dict) for a specific file/dir at the path specified."""
+ mysplit = path.split("/")
+ myentries = entries
+ mytarget = mysplit[-1]
+ mysplit = mysplit[:-1]
for mys in mysplit:
if mys in myentries["dirs"]:
- myentries=myentries["dirs"][mys]
+ myentries = myentries["dirs"][mys]
else:
return None
if mytarget in myentries["dirs"]:
@@ -39,18 +39,17 @@ def pathdata(entries, path):
return None
def fileat(entries, path):
- return pathdata(entries,path)
+ return pathdata(entries, path)
def isadded(entries, path):
- """(entries,path)
- Returns true if the path exists and is added to the cvs tree."""
- mytarget=pathdata(entries, path)
+ """Returns True if the path exists and is added to the cvs tree."""
+ mytarget = pathdata(entries, path)
if mytarget:
if "cvs" in mytarget["status"]:
return 1
- basedir=os.path.dirname(path)
- filename=os.path.basename(path)
+ basedir = os.path.dirname(path)
+ filename = os.path.basename(path)
try:
myfile = io.open(
@@ -59,234 +58,250 @@ def isadded(entries, path):
mode='r', encoding=_encodings['content'], errors='strict')
except IOError:
return 0
- mylines=myfile.readlines()
+ mylines = myfile.readlines()
myfile.close()
- rep=re.compile("^\/"+re.escape(filename)+"\/");
+ rep = re.compile("^\/%s\/" % re.escape(filename))
for x in mylines:
if rep.search(x):
return 1
return 0
-def findnew(entries,recursive=0,basedir=""):
- """(entries,recursive=0,basedir="")
- Recurses the entries tree to find all elements that have been added but
+def findnew(entries, recursive=0, basedir=""):
+ """Recurses the entries tree to find all elements that have been added but
have not yet been committed. Returns a list of paths, optionally prepended
- with a basedir."""
- if basedir and basedir[-1]!="/":
- basedir=basedir+"/"
- mylist=[]
+ with a basedir.
+ """
+ if basedir and basedir[-1] != "/":
+ basedir += "/"
+
+ mylist = []
for myfile in entries["files"]:
if "cvs" in entries["files"][myfile]["status"]:
if "0" == entries["files"][myfile]["revision"]:
- mylist.append(basedir+myfile)
+ mylist.append(basedir + myfile)
+
if recursive:
for mydir in entries["dirs"]:
- mylist+=findnew(entries["dirs"][mydir],recursive,basedir+mydir)
+ mylist += findnew(entries["dirs"][mydir], recursive, basedir + mydir)
+
return mylist
def findoption(entries, pattern, recursive=0, basedir=""):
- """(entries, pattern, recursive=0, basedir="")
- Iterate over paths of cvs entries for which the pattern.search() method
+ """Iterate over paths of cvs entries for which the pattern.search() method
finds a match. Returns a list of paths, optionally prepended with a
- basedir."""
+ basedir.
+ """
if not basedir.endswith("/"):
basedir += "/"
+
for myfile, mydata in entries["files"].items():
if "cvs" in mydata["status"]:
if pattern.search(mydata["flags"]):
- yield basedir+myfile
+ yield basedir + myfile
+
if recursive:
for mydir, mydata in entries["dirs"].items():
for x in findoption(mydata, pattern,
- recursive, basedir+mydir):
+ recursive, basedir + mydir):
yield x
-def findchanged(entries,recursive=0,basedir=""):
- """(entries,recursive=0,basedir="")
- Recurses the entries tree to find all elements that exist in the cvs tree
+def findchanged(entries, recursive=0, basedir=""):
+ """Recurses the entries tree to find all elements that exist in the cvs tree
and differ from the committed version. Returns a list of paths, optionally
- prepended with a basedir."""
- if basedir and basedir[-1]!="/":
- basedir=basedir+"/"
- mylist=[]
+ prepended with a basedir.
+ """
+ if basedir and basedir[-1] != "/":
+ basedir += "/"
+
+ mylist = []
for myfile in entries["files"]:
if "cvs" in entries["files"][myfile]["status"]:
if "current" not in entries["files"][myfile]["status"]:
if "exists" in entries["files"][myfile]["status"]:
- if entries["files"][myfile]["revision"]!="0":
- mylist.append(basedir+myfile)
+ if entries["files"][myfile]["revision"] != "0":
+ mylist.append(basedir + myfile)
+
if recursive:
for mydir in entries["dirs"]:
- mylist+=findchanged(entries["dirs"][mydir],recursive,basedir+mydir)
+ mylist += findchanged(entries["dirs"][mydir], recursive, basedir + mydir)
+
return mylist
-def findmissing(entries,recursive=0,basedir=""):
- """(entries,recursive=0,basedir="")
- Recurses the entries tree to find all elements that are listed in the cvs
+def findmissing(entries, recursive=0, basedir=""):
+ """Recurses the entries tree to find all elements that are listed in the cvs
tree but do not exist on the filesystem. Returns a list of paths,
- optionally prepended with a basedir."""
- if basedir and basedir[-1]!="/":
- basedir=basedir+"/"
- mylist=[]
+ optionally prepended with a basedir.
+ """
+ if basedir and basedir[-1] != "/":
+ basedir += "/"
+
+ mylist = []
for myfile in entries["files"]:
if "cvs" in entries["files"][myfile]["status"]:
if "exists" not in entries["files"][myfile]["status"]:
if "removed" not in entries["files"][myfile]["status"]:
- mylist.append(basedir+myfile)
+ mylist.append(basedir + myfile)
+
if recursive:
for mydir in entries["dirs"]:
- mylist+=findmissing(entries["dirs"][mydir],recursive,basedir+mydir)
+ mylist += findmissing(entries["dirs"][mydir], recursive, basedir + mydir)
+
return mylist
-def findunadded(entries,recursive=0,basedir=""):
- """(entries,recursive=0,basedir="")
- Recurses the entries tree to find all elements that are in valid cvs
+def findunadded(entries, recursive=0, basedir=""):
+ """Recurses the entries tree to find all elements that are in valid cvs
directories but are not part of the cvs tree. Returns a list of paths,
- optionally prepended with a basedir."""
- if basedir and basedir[-1]!="/":
- basedir=basedir+"/"
- mylist=[]
+ optionally prepended with a basedir.
+ """
+ if basedir and basedir[-1] != "/":
+ basedir += "/"
- #ignore what cvs ignores.
+ # Ignore what cvs ignores.
+ mylist = []
for myfile in entries["files"]:
if "cvs" not in entries["files"][myfile]["status"]:
- mylist.append(basedir+myfile)
+ mylist.append(basedir + myfile)
+
if recursive:
for mydir in entries["dirs"]:
- mylist+=findunadded(entries["dirs"][mydir],recursive,basedir+mydir)
+ mylist += findunadded(entries["dirs"][mydir], recursive, basedir + mydir)
+
return mylist
-def findremoved(entries,recursive=0,basedir=""):
- """(entries,recursive=0,basedir="")
- Recurses the entries tree to find all elements that are in flagged for cvs
- deletions. Returns a list of paths, optionally prepended with a basedir."""
- if basedir and basedir[-1]!="/":
- basedir=basedir+"/"
- mylist=[]
+def findremoved(entries, recursive=0, basedir=""):
+ """Recurses the entries tree to find all elements that are in flagged for cvs
+ deletions. Returns a list of paths, optionally prepended with a basedir.
+ """
+ if basedir and basedir[-1] != "/":
+ basedir += "/"
+
+ mylist = []
for myfile in entries["files"]:
if "removed" in entries["files"][myfile]["status"]:
- mylist.append(basedir+myfile)
+ mylist.append(basedir + myfile)
+
if recursive:
for mydir in entries["dirs"]:
- mylist+=findremoved(entries["dirs"][mydir],recursive,basedir+mydir)
+ mylist += findremoved(entries["dirs"][mydir], recursive, basedir + mydir)
+
return mylist
def findall(entries, recursive=0, basedir=""):
- """(entries,recursive=0,basedir="")
- Recurses the entries tree to find all new, changed, missing, and unadded
- entities. Returns a 4 element list of lists as returned from each find*()."""
-
- if basedir and basedir[-1]!="/":
- basedir=basedir+"/"
- mynew = findnew(entries,recursive,basedir)
- mychanged = findchanged(entries,recursive,basedir)
- mymissing = findmissing(entries,recursive,basedir)
- myunadded = findunadded(entries,recursive,basedir)
- myremoved = findremoved(entries,recursive,basedir)
+ """Recurses the entries tree to find all new, changed, missing, and unadded
+ entities. Returns a 4 element list of lists as returned from each find*().
+ """
+ if basedir and basedir[-1] != "/":
+ basedir += "/"
+ mynew = findnew(entries, recursive, basedir)
+ mychanged = findchanged(entries, recursive, basedir)
+ mymissing = findmissing(entries, recursive, basedir)
+ myunadded = findunadded(entries, recursive, basedir)
+ myremoved = findremoved(entries, recursive, basedir)
return [mynew, mychanged, mymissing, myunadded, myremoved]
ignore_list = re.compile("(^|/)(RCS(|LOG)|SCCS|CVS(|\.adm)|cvslog\..*|tags|TAGS|\.(make\.state|nse_depinfo)|.*~|(\.|)#.*|,.*|_$.*|.*\$|\.del-.*|.*\.(old|BAK|bak|orig|rej|a|olb|o|obj|so|exe|Z|elc|ln)|core)$")
def apply_cvsignore_filter(list):
- x=0
+ x = 0
while x < len(list):
if ignore_list.match(list[x].split("/")[-1]):
list.pop(x)
else:
- x+=1
+ x += 1
return list
-def getentries(mydir,recursive=0):
- """(basedir,recursive=0)
- Scans the given directory and returns a datadict of all the entries in
- the directory separated as a dirs dict and a files dict."""
- myfn=mydir+"/CVS/Entries"
+def getentries(mydir, recursive=0):
+ """Scans the given directory and returns a datadict of all the entries in
+ the directory separated as a dirs dict and a files dict.
+ """
+ myfn = mydir + "/CVS/Entries"
# entries=[dirs, files]
- entries={"dirs":{},"files":{}}
+ entries = {"dirs":{}, "files":{}}
if not os.path.exists(mydir):
return entries
try:
myfile = io.open(_unicode_encode(myfn,
encoding=_encodings['fs'], errors='strict'),
mode='r', encoding=_encodings['content'], errors='strict')
- mylines=myfile.readlines()
+ mylines = myfile.readlines()
myfile.close()
except SystemExit as e:
raise
except:
- mylines=[]
+ mylines = []
+
for line in mylines:
- if line and line[-1]=="\n":
- line=line[:-1]
+ if line and line[-1] == "\n":
+ line = line[:-1]
if not line:
continue
- if line=="D": # End of entries file
+ if line == "D": # End of entries file
break
- mysplit=line.split("/")
- if len(mysplit)!=6:
- print("Confused:",mysplit)
+ mysplit = line.split("/")
+ if len(mysplit) != 6:
+ print("Confused:", mysplit)
continue
- if mysplit[0]=="D":
- entries["dirs"][mysplit[1]]={"dirs":{},"files":{},"status":[]}
- entries["dirs"][mysplit[1]]["status"]=["cvs"]
+ if mysplit[0] == "D":
+ entries["dirs"][mysplit[1]] = {"dirs":{}, "files":{}, "status":[]}
+ entries["dirs"][mysplit[1]]["status"] = ["cvs"]
if os.path.isdir(mydir+"/"+mysplit[1]):
- entries["dirs"][mysplit[1]]["status"]+=["exists"]
- entries["dirs"][mysplit[1]]["flags"]=mysplit[2:]
+ entries["dirs"][mysplit[1]]["status"] += ["exists"]
+ entries["dirs"][mysplit[1]]["flags"] = mysplit[2:]
if recursive:
- rentries=getentries(mydir+"/"+mysplit[1],recursive)
- entries["dirs"][mysplit[1]]["dirs"]=rentries["dirs"]
- entries["dirs"][mysplit[1]]["files"]=rentries["files"]
+ rentries = getentries(mydir + "/" + mysplit[1], recursive)
+ entries["dirs"][mysplit[1]]["dirs"] = rentries["dirs"]
+ entries["dirs"][mysplit[1]]["files"] = rentries["files"]
else:
# [D]/Name/revision/Date/Flags/Tags
- entries["files"][mysplit[1]]={}
- entries["files"][mysplit[1]]["revision"]=mysplit[2]
- entries["files"][mysplit[1]]["date"]=mysplit[3]
- entries["files"][mysplit[1]]["flags"]=mysplit[4]
- entries["files"][mysplit[1]]["tags"]=mysplit[5]
- entries["files"][mysplit[1]]["status"]=["cvs"]
- if entries["files"][mysplit[1]]["revision"][0]=="-":
- entries["files"][mysplit[1]]["status"]+=["removed"]
+ entries["files"][mysplit[1]] = {}
+ entries["files"][mysplit[1]]["revision"] = mysplit[2]
+ entries["files"][mysplit[1]]["date"] = mysplit[3]
+ entries["files"][mysplit[1]]["flags"] = mysplit[4]
+ entries["files"][mysplit[1]]["tags"] = mysplit[5]
+ entries["files"][mysplit[1]]["status"] = ["cvs"]
+ if entries["files"][mysplit[1]]["revision"][0] == "-":
+ entries["files"][mysplit[1]]["status"] += ["removed"]
for file in os.listdir(mydir):
- if file=="CVS":
+ if file == "CVS":
continue
- if os.path.isdir(mydir+"/"+file):
+ if os.path.isdir(mydir + "/" + file):
if file not in entries["dirs"]:
if ignore_list.match(file) is not None:
continue
- entries["dirs"][file]={"dirs":{},"files":{}}
+ entries["dirs"][file] = {"dirs":{}, "files":{}}
# It's normal for a directory to be unlisted in Entries
# when checked out without -P (see bug #257660).
- rentries=getentries(mydir+"/"+file,recursive)
- entries["dirs"][file]["dirs"]=rentries["dirs"]
- entries["dirs"][file]["files"]=rentries["files"]
+ rentries = getentries(mydir + "/" + file, recursive)
+ entries["dirs"][file]["dirs"] = rentries["dirs"]
+ entries["dirs"][file]["files"] = rentries["files"]
if "status" in entries["dirs"][file]:
if "exists" not in entries["dirs"][file]["status"]:
- entries["dirs"][file]["status"]+=["exists"]
+ entries["dirs"][file]["status"] += ["exists"]
else:
- entries["dirs"][file]["status"]=["exists"]
- elif os.path.isfile(mydir+"/"+file):
+ entries["dirs"][file]["status"] = ["exists"]
+ elif os.path.isfile(mydir + "/" + file):
if file not in entries["files"]:
if ignore_list.match(file) is not None:
continue
- entries["files"][file]={"revision":"","date":"","flags":"","tags":""}
+ entries["files"][file] = {"revision":"", "date":"", "flags":"", "tags":""}
if "status" in entries["files"][file]:
if "exists" not in entries["files"][file]["status"]:
- entries["files"][file]["status"]+=["exists"]
+ entries["files"][file]["status"] += ["exists"]
else:
- entries["files"][file]["status"]=["exists"]
+ entries["files"][file]["status"] = ["exists"]
try:
- mystat=os.stat(mydir+"/"+file)
+ mystat = os.stat(mydir + "/" + file)
mytime = time.asctime(time.gmtime(mystat[stat.ST_MTIME]))
if "status" not in entries["files"][file]:
- entries["files"][file]["status"]=[]
- if mytime==entries["files"][file]["date"]:
- entries["files"][file]["status"]+=["current"]
+ entries["files"][file]["status"] = []
+ if mytime == entries["files"][file]["date"]:
+ entries["files"][file]["status"] += ["current"]
except SystemExit as e:
raise
except Exception as e:
- print("failed to stat",file)
+ print("failed to stat", file)
print(e)
return
@@ -294,6 +309,7 @@ def getentries(mydir,recursive=0):
pass
else:
print()
- print("File of unknown type:",mydir+"/"+file)
+ print("File of unknown type:", mydir + "/" + file)
print()
+
return entries
diff --git a/pym/portage/data.py b/pym/portage/data.py
index c4d967a1b..54e3a8d65 100644
--- a/pym/portage/data.py
+++ b/pym/portage/data.py
@@ -1,17 +1,18 @@
# data.py -- Calculated/Discovered Data Values
-# Copyright 1998-2011 Gentoo Foundation
+# Copyright 1998-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
-import os, pwd, grp, platform
+import os, pwd, grp, platform, sys
import portage
portage.proxy.lazyimport.lazyimport(globals(),
'portage.output:colorize',
'portage.util:writemsg',
+ 'subprocess'
)
from portage.localization import _
-ostype=platform.system()
+ostype = platform.system()
userland = None
if ostype == "DragonFly" or ostype.endswith("BSD"):
userland = "BSD"
@@ -22,10 +23,10 @@ lchown = getattr(os, "lchown", None)
if not lchown:
if ostype == "Darwin":
- def lchown(*pos_args, **key_args):
+ def lchown(*_args, **_kwargs):
pass
else:
- def lchown(*pargs, **kwargs):
+ def lchown(*_args, **_kwargs):
writemsg(colorize("BAD", "!!!") + _(
" It seems that os.lchown does not"
" exist. Please rebuild python.\n"), noiselevel=-1)
@@ -58,11 +59,10 @@ def portage_group_warning():
# If the "wheel" group does not exist then wheelgid falls back to 0.
# If the "portage" group does not exist then portage_uid falls back to wheelgid.
-uid=os.getuid()
-wheelgid=0
-
+uid = os.getuid()
+wheelgid = 0
try:
- wheelgid=grp.getgrnam("wheel")[2]
+ wheelgid = grp.getgrnam("wheel")[2]
except KeyError:
pass
@@ -85,19 +85,27 @@ def _get_global(k):
elif portage.const.EPREFIX:
secpass = 2
#Discover the uid and gid of the portage user/group
+ keyerror = False
try:
portage_uid = pwd.getpwnam(_get_global('_portage_username')).pw_uid
- _portage_grpname = _get_global('_portage_grpname')
- if platform.python_implementation() == 'PyPy':
- # Somehow this prevents "TypeError: expected string" errors
- # from grp.getgrnam() with PyPy 1.7
- _portage_grpname = str(_portage_grpname)
- portage_gid = grp.getgrnam(_portage_grpname).gr_gid
- if secpass < 1 and portage_gid in os.getgroups():
- secpass = 1
except KeyError:
+ keyerror = True
portage_uid = 0
+
+ try:
+ portage_gid = grp.getgrnam(_get_global('_portage_grpname')).gr_gid
+ except KeyError:
+ keyerror = True
portage_gid = 0
+
+ if secpass < 1 and portage_gid in os.getgroups():
+ secpass = 1
+
+ # Suppress this error message if both PORTAGE_GRPNAME and
+ # PORTAGE_USERNAME are set to "root", for things like
+ # Android (see bug #454060).
+ if keyerror and not (_get_global('_portage_username') == "root" and
+ _get_global('_portage_grpname') == "root"):
writemsg(colorize("BAD",
_("portage: 'portage' user or group missing.")) + "\n", noiselevel=-1)
writemsg(_(
@@ -129,10 +137,28 @@ def _get_global(k):
# Get a list of group IDs for the portage user. Do not use
# grp.getgrall() since it is known to trigger spurious
# SIGPIPE problems with nss_ldap.
- mystatus, myoutput = \
- portage.subprocess_getstatusoutput("id -G %s" % _portage_username)
- if mystatus == os.EX_OK:
- for x in myoutput.split():
+ cmd = ["id", "-G", _portage_username]
+
+ if sys.hexversion < 0x3020000 and sys.hexversion >= 0x3000000:
+ # Python 3.1 _execvp throws TypeError for non-absolute executable
+ # path passed as bytes (see http://bugs.python.org/issue8513).
+ fullname = portage.process.find_binary(cmd[0])
+ if fullname is None:
+ globals()[k] = v
+ _initialized_globals.add(k)
+ return v
+ cmd[0] = fullname
+
+ encoding = portage._encodings['content']
+ cmd = [portage._unicode_encode(x,
+ encoding=encoding, errors='strict') for x in cmd]
+ proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT)
+ myoutput = proc.communicate()[0]
+ status = proc.wait()
+ if os.WIFEXITED(status) and os.WEXITSTATUS(status) == os.EX_OK:
+ for x in portage._unicode_decode(myoutput,
+ encoding=encoding, errors='strict').split():
try:
v.append(int(x))
except ValueError:
@@ -213,10 +239,18 @@ def _init(settings):
if '_portage_grpname' not in _initialized_globals and \
'_portage_username' not in _initialized_globals:
+ # Prevents "TypeError: expected string" errors
+ # from grp.getgrnam() with PyPy
+ native_string = platform.python_implementation() == 'PyPy'
+
v = settings.get('PORTAGE_GRPNAME', 'portage')
+ if native_string:
+ v = portage._native_string(v)
globals()['_portage_grpname'] = v
_initialized_globals.add('_portage_grpname')
v = settings.get('PORTAGE_USERNAME', 'portage')
+ if native_string:
+ v = portage._native_string(v)
globals()['_portage_username'] = v
_initialized_globals.add('_portage_username')
diff --git a/pym/portage/dbapi/_MergeProcess.py b/pym/portage/dbapi/_MergeProcess.py
index b5f6a0b0b..956dbb9e6 100644
--- a/pym/portage/dbapi/_MergeProcess.py
+++ b/pym/portage/dbapi/_MergeProcess.py
@@ -1,7 +1,8 @@
-# Copyright 2010-2012 Gentoo Foundation
+# Copyright 2010-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
import io
+import platform
import signal
import sys
import traceback
@@ -10,10 +11,11 @@ import errno
import fcntl
import portage
from portage import os, _unicode_decode
+from portage.util._ctypes import find_library
import portage.elog.messages
-from _emerge.SpawnProcess import SpawnProcess
+from portage.util._async.ForkProcess import ForkProcess
-class MergeProcess(SpawnProcess):
+class MergeProcess(ForkProcess):
"""
Merge packages in a subprocess, so the Scheduler can run in the main
thread while files are moved or copied asynchronously.
@@ -40,11 +42,20 @@ class MergeProcess(SpawnProcess):
settings.reset()
settings.setcpv(cpv, mydb=self.mydbapi)
+ # This caches the libc library lookup in the current
+ # process, so that it's only done once rather than
+ # for each child process.
+ if platform.system() == "Linux" and \
+ "merge-sync" in settings.features:
+ find_library("c")
+
# Inherit stdin by default, so that the pdb SIGUSR1
# handler is usable for the subprocess.
if self.fd_pipes is None:
self.fd_pipes = {}
- self.fd_pipes.setdefault(0, sys.stdin.fileno())
+ else:
+ self.fd_pipes = self.fd_pipes.copy()
+ self.fd_pipes.setdefault(0, portage._get_stdin().fileno())
super(MergeProcess, self)._start()
@@ -90,7 +101,7 @@ class MergeProcess(SpawnProcess):
reporter(msg, phase=phase, key=key, out=out)
if event & self.scheduler.IO_HUP:
- self.scheduler.unregister(self._elog_reg_id)
+ self.scheduler.source_remove(self._elog_reg_id)
self._elog_reg_id = None
os.close(self._elog_reader_fd)
self._elog_reader_fd = None
@@ -101,12 +112,24 @@ class MergeProcess(SpawnProcess):
def _spawn(self, args, fd_pipes, **kwargs):
"""
Fork a subprocess, apply local settings, and call
- dblink.merge().
+ dblink.merge(). TODO: Share code with ForkProcess.
"""
elog_reader_fd, elog_writer_fd = os.pipe()
+
fcntl.fcntl(elog_reader_fd, fcntl.F_SETFL,
fcntl.fcntl(elog_reader_fd, fcntl.F_GETFL) | os.O_NONBLOCK)
+
+ # FD_CLOEXEC is enabled by default in Python >=3.4.
+ if sys.hexversion < 0x3040000:
+ try:
+ fcntl.FD_CLOEXEC
+ except AttributeError:
+ pass
+ else:
+ fcntl.fcntl(elog_reader_fd, fcntl.F_SETFD,
+ fcntl.fcntl(elog_reader_fd, fcntl.F_GETFD) | fcntl.FD_CLOEXEC)
+
blockers = None
if self.blockers is not None:
# Query blockers in the main process, since closing
@@ -116,10 +139,9 @@ class MergeProcess(SpawnProcess):
blockers = self.blockers()
mylink = portage.dblink(self.mycat, self.mypkg, settings=self.settings,
treetype=self.treetype, vartree=self.vartree,
- blockers=blockers, scheduler=self.scheduler,
- pipe=elog_writer_fd)
+ blockers=blockers, pipe=elog_writer_fd)
fd_pipes[elog_writer_fd] = elog_writer_fd
- self._elog_reg_id = self.scheduler.register(elog_reader_fd,
+ self._elog_reg_id = self.scheduler.io_add_watch(elog_reader_fd,
self._registered_events, self._elog_output_handler)
# If a concurrent emerge process tries to install a package
@@ -133,88 +155,100 @@ class MergeProcess(SpawnProcess):
if not self.unmerge:
counter = self.vartree.dbapi.counter_tick()
- pid = os.fork()
- if pid != 0:
- if not isinstance(pid, int):
- raise AssertionError(
- "fork returned non-integer: %s" % (repr(pid),))
-
- os.close(elog_writer_fd)
- self._elog_reader_fd = elog_reader_fd
- self._buf = ""
- self._elog_keys = set()
-
- # invalidate relevant vardbapi caches
- if self.vartree.dbapi._categories is not None:
- self.vartree.dbapi._categories = None
- self.vartree.dbapi._pkgs_changed = True
- self.vartree.dbapi._clear_pkg_cache(mylink)
-
- portage.process.spawned_pids.append(pid)
- return [pid]
-
- os.close(elog_reader_fd)
- portage.locks._close_fds()
- # Disable close_fds since we don't exec (see _setup_pipes docstring).
- portage.process._setup_pipes(fd_pipes, close_fds=False)
-
- # Use default signal handlers since the ones inherited
- # from the parent process are irrelevant here.
- signal.signal(signal.SIGINT, signal.SIG_DFL)
- signal.signal(signal.SIGTERM, signal.SIG_DFL)
-
- portage.output.havecolor = self.settings.get('NOCOLOR') \
- not in ('yes', 'true')
-
- # In this subprocess we want mylink._display_merge() to use
- # stdout/stderr directly since they are pipes. This behavior
- # is triggered when mylink._scheduler is None.
- mylink._scheduler = None
-
- # Avoid wastful updates of the vdb cache.
- self.vartree.dbapi._flush_cache_enabled = False
-
- # In this subprocess we don't want PORTAGE_BACKGROUND to
- # suppress stdout/stderr output since they are pipes. We
- # also don't want to open PORTAGE_LOG_FILE, since it will
- # already be opened by the parent process, so we set the
- # "subprocess" value for use in conditional logging code
- # involving PORTAGE_LOG_FILE.
- if not self.unmerge:
- # unmerge phases have separate logs
- if self.settings.get("PORTAGE_BACKGROUND") == "1":
- self.settings["PORTAGE_BACKGROUND_UNMERGE"] = "1"
- else:
- self.settings["PORTAGE_BACKGROUND_UNMERGE"] = "0"
- self.settings.backup_changes("PORTAGE_BACKGROUND_UNMERGE")
- self.settings["PORTAGE_BACKGROUND"] = "subprocess"
- self.settings.backup_changes("PORTAGE_BACKGROUND")
-
- rval = 1
+ parent_pid = os.getpid()
+ pid = None
try:
- if self.unmerge:
- if not mylink.exists():
- rval = os.EX_OK
- elif mylink.unmerge(
- ldpath_mtimes=self.prev_mtimes) == os.EX_OK:
- mylink.lockdb()
- try:
- mylink.delete()
- finally:
- mylink.unlockdb()
- rval = os.EX_OK
- else:
- rval = mylink.merge(self.pkgloc, self.infloc,
- myebuild=self.myebuild, mydbapi=self.mydbapi,
- prev_mtimes=self.prev_mtimes, counter=counter)
- except SystemExit:
- raise
- except:
- traceback.print_exc()
+ pid = os.fork()
+
+ if pid != 0:
+ if not isinstance(pid, int):
+ raise AssertionError(
+ "fork returned non-integer: %s" % (repr(pid),))
+
+ os.close(elog_writer_fd)
+ self._elog_reader_fd = elog_reader_fd
+ self._buf = ""
+ self._elog_keys = set()
+ # Discard messages which will be collected by the subprocess,
+ # in order to avoid duplicates (bug #446136).
+ portage.elog.messages.collect_messages(key=mylink.mycpv)
+
+ # invalidate relevant vardbapi caches
+ if self.vartree.dbapi._categories is not None:
+ self.vartree.dbapi._categories = None
+ self.vartree.dbapi._pkgs_changed = True
+ self.vartree.dbapi._clear_pkg_cache(mylink)
+
+ return [pid]
+
+ os.close(elog_reader_fd)
+
+ # Use default signal handlers in order to avoid problems
+ # killing subprocesses as reported in bug #353239.
+ signal.signal(signal.SIGINT, signal.SIG_DFL)
+ signal.signal(signal.SIGTERM, signal.SIG_DFL)
+
+ portage.locks._close_fds()
+ # We don't exec, so use close_fds=False
+ # (see _setup_pipes docstring).
+ portage.process._setup_pipes(fd_pipes, close_fds=False)
+
+ portage.output.havecolor = self.settings.get('NOCOLOR') \
+ not in ('yes', 'true')
+
+ # Avoid wastful updates of the vdb cache.
+ self.vartree.dbapi._flush_cache_enabled = False
+
+ # In this subprocess we don't want PORTAGE_BACKGROUND to
+ # suppress stdout/stderr output since they are pipes. We
+ # also don't want to open PORTAGE_LOG_FILE, since it will
+ # already be opened by the parent process, so we set the
+ # "subprocess" value for use in conditional logging code
+ # involving PORTAGE_LOG_FILE.
+ if not self.unmerge:
+ # unmerge phases have separate logs
+ if self.settings.get("PORTAGE_BACKGROUND") == "1":
+ self.settings["PORTAGE_BACKGROUND_UNMERGE"] = "1"
+ else:
+ self.settings["PORTAGE_BACKGROUND_UNMERGE"] = "0"
+ self.settings.backup_changes("PORTAGE_BACKGROUND_UNMERGE")
+ self.settings["PORTAGE_BACKGROUND"] = "subprocess"
+ self.settings.backup_changes("PORTAGE_BACKGROUND")
+
+ rval = 1
+ try:
+ if self.unmerge:
+ if not mylink.exists():
+ rval = os.EX_OK
+ elif mylink.unmerge(
+ ldpath_mtimes=self.prev_mtimes) == os.EX_OK:
+ mylink.lockdb()
+ try:
+ mylink.delete()
+ finally:
+ mylink.unlockdb()
+ rval = os.EX_OK
+ else:
+ rval = mylink.merge(self.pkgloc, self.infloc,
+ myebuild=self.myebuild, mydbapi=self.mydbapi,
+ prev_mtimes=self.prev_mtimes, counter=counter)
+ except SystemExit:
+ raise
+ except:
+ traceback.print_exc()
+ # os._exit() skips stderr flush!
+ sys.stderr.flush()
+ finally:
+ os._exit(rval)
+
finally:
- # Call os._exit() from finally block, in order to suppress any
- # finally blocks from earlier in the call stack. See bug #345289.
- os._exit(rval)
+ if pid == 0 or (pid is None and os.getpid() != parent_pid):
+ # Call os._exit() from a finally block in order
+ # to suppress any finally blocks from earlier
+ # in the call stack (see bug #345289). This
+ # finally block has to be setup before the fork
+ # in order to avoid a race condition.
+ os._exit(1)
def _unregister(self):
"""
@@ -231,7 +265,7 @@ class MergeProcess(SpawnProcess):
self._unlock_vdb()
if self._elog_reg_id is not None:
- self.scheduler.unregister(self._elog_reg_id)
+ self.scheduler.source_remove(self._elog_reg_id)
self._elog_reg_id = None
if self._elog_reader_fd is not None:
os.close(self._elog_reader_fd)
diff --git a/pym/portage/dbapi/_SyncfsProcess.py b/pym/portage/dbapi/_SyncfsProcess.py
new file mode 100644
index 000000000..7518214ec
--- /dev/null
+++ b/pym/portage/dbapi/_SyncfsProcess.py
@@ -0,0 +1,53 @@
+# Copyright 2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage import os
+from portage.util._ctypes import find_library, LoadLibrary
+from portage.util._async.ForkProcess import ForkProcess
+
+class SyncfsProcess(ForkProcess):
+ """
+ Isolate ctypes usage in a subprocess, in order to avoid
+ potential problems with stale cached libraries as
+ described in bug #448858, comment #14 (also see
+ http://bugs.python.org/issue14597).
+ """
+
+ __slots__ = ('paths',)
+
+ @staticmethod
+ def _get_syncfs():
+
+ filename = find_library("c")
+ if filename is not None:
+ library = LoadLibrary(filename)
+ if library is not None:
+ try:
+ return library.syncfs
+ except AttributeError:
+ pass
+
+ return None
+
+ def _run(self):
+
+ syncfs_failed = False
+ syncfs = self._get_syncfs()
+
+ if syncfs is not None:
+ for path in self.paths:
+ try:
+ fd = os.open(path, os.O_RDONLY)
+ except OSError:
+ pass
+ else:
+ try:
+ if syncfs(fd) != 0:
+ # Happens with PyPy (bug #446610)
+ syncfs_failed = True
+ finally:
+ os.close(fd)
+
+ if syncfs is None or syncfs_failed:
+ return 1
+ return os.EX_OK
diff --git a/pym/portage/dbapi/__init__.py b/pym/portage/dbapi/__init__.py
index b999fb5df..a20a1e84f 100644
--- a/pym/portage/dbapi/__init__.py
+++ b/pym/portage/dbapi/__init__.py
@@ -1,6 +1,8 @@
-# Copyright 1998-2012 Gentoo Foundation
+# Copyright 1998-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
+from __future__ import unicode_literals
+
__all__ = ["dbapi"]
import re
@@ -16,16 +18,18 @@ portage.proxy.lazyimport.lazyimport(globals(),
from portage import os
from portage import auxdbkeys
+from portage.eapi import _get_eapi_attrs
from portage.exception import InvalidData
from portage.localization import _
+from _emerge.Package import Package
class dbapi(object):
- _category_re = re.compile(r'^\w[-.+\w]*$')
+ _category_re = re.compile(r'^\w[-.+\w]*$', re.UNICODE)
_categories = None
_use_mutable = False
_known_keys = frozenset(x for x in auxdbkeys
if not x.startswith("UNUSED_0"))
- _pkg_str_aux_keys = ("EAPI", "SLOT", "repository")
+ _pkg_str_aux_keys = ("EAPI", "KEYWORDS", "SLOT", "repository")
def __init__(self):
pass
@@ -153,8 +157,7 @@ class dbapi(object):
metadata = dict(zip(self._pkg_str_aux_keys,
self.aux_get(cpv, self._pkg_str_aux_keys, myrepo=repo)))
- return _pkg_str(cpv, slot=metadata["SLOT"],
- repo=metadata["repository"], eapi=metadata["EAPI"])
+ return _pkg_str(cpv, metadata=metadata, settings=self.settings)
def _iter_match_repo(self, atom, cpv_iter):
for cpv in cpv_iter:
@@ -182,7 +185,7 @@ class dbapi(object):
2) Check enabled/disabled flag states.
"""
- aux_keys = ["IUSE", "SLOT", "USE", "repository"]
+ aux_keys = ["EAPI", "IUSE", "KEYWORDS", "SLOT", "USE", "repository"]
for cpv in cpv_iter:
try:
metadata = dict(zip(aux_keys,
@@ -190,17 +193,31 @@ class dbapi(object):
except KeyError:
continue
+ try:
+ cpv.slot
+ except AttributeError:
+ try:
+ cpv = _pkg_str(cpv, metadata=metadata,
+ settings=self.settings)
+ except InvalidData:
+ continue
+
if not self._match_use(atom, cpv, metadata):
continue
yield cpv
- def _match_use(self, atom, cpv, metadata):
- iuse_implicit_match = self.settings._iuse_implicit_match
- iuse = frozenset(x.lstrip('+-') for x in metadata["IUSE"].split())
+ def _match_use(self, atom, pkg, metadata):
+ eapi_attrs = _get_eapi_attrs(metadata["EAPI"])
+ if eapi_attrs.iuse_effective:
+ iuse_implicit_match = self.settings._iuse_effective_match
+ else:
+ iuse_implicit_match = self.settings._iuse_implicit_match
+ usealiases = self.settings._use_manager.getUseAliases(pkg)
+ iuse = Package._iuse(None, metadata["IUSE"].split(), iuse_implicit_match, usealiases, metadata["EAPI"])
for x in atom.unevaluated_atom.use.required:
- if x not in iuse and not iuse_implicit_match(x):
+ if iuse.get_real_flag(x) is None:
return False
if atom.use is None:
@@ -210,44 +227,54 @@ class dbapi(object):
# Use IUSE to validate USE settings for built packages,
# in case the package manager that built this package
# failed to do that for some reason (or in case of
- # data corruption).
- use = frozenset(x for x in metadata["USE"].split()
- if x in iuse or iuse_implicit_match(x))
- missing_enabled = atom.use.missing_enabled.difference(iuse)
- missing_disabled = atom.use.missing_disabled.difference(iuse)
-
- if atom.use.enabled:
- if any(x in atom.use.enabled for x in missing_disabled):
+ # data corruption). The enabled flags must be consistent
+ # with implicit IUSE, in order to avoid potential
+ # inconsistencies in USE dep matching (see bug #453400).
+ use = frozenset(x for x in metadata["USE"].split() if iuse.get_real_flag(x) is not None)
+ missing_enabled = frozenset(x for x in atom.use.missing_enabled if iuse.get_real_flag(x) is None)
+ missing_disabled = frozenset(x for x in atom.use.missing_disabled if iuse.get_real_flag(x) is None)
+ enabled = frozenset((iuse.get_real_flag(x) or x) for x in atom.use.enabled)
+ disabled = frozenset((iuse.get_real_flag(x) or x) for x in atom.use.disabled)
+
+ if enabled:
+ if any(x in enabled for x in missing_disabled):
return False
- need_enabled = atom.use.enabled.difference(use)
+ need_enabled = enabled.difference(use)
if need_enabled:
if any(x not in missing_enabled for x in need_enabled):
return False
- if atom.use.disabled:
- if any(x in atom.use.disabled for x in missing_enabled):
+ if disabled:
+ if any(x in disabled for x in missing_enabled):
return False
- need_disabled = atom.use.disabled.intersection(use)
+ need_disabled = disabled.intersection(use)
if need_disabled:
if any(x not in missing_disabled for x in need_disabled):
return False
elif not self.settings.local_config:
# Check masked and forced flags for repoman.
- if hasattr(cpv, 'slot'):
- pkg = cpv
- else:
- pkg = _pkg_str(cpv, slot=metadata["SLOT"],
- repo=metadata.get("repository"))
- usemask = self.settings._getUseMask(pkg)
+ usemask = self.settings._getUseMask(pkg,
+ stable=self.settings._parent_stable)
if any(x in usemask for x in atom.use.enabled):
return False
- useforce = self.settings._getUseForce(pkg)
+ useforce = self.settings._getUseForce(pkg,
+ stable=self.settings._parent_stable)
if any(x in useforce and x not in usemask
for x in atom.use.disabled):
return False
+ # Check unsatisfied use-default deps
+ if atom.use.enabled:
+ missing_disabled = frozenset(x for x in atom.use.missing_disabled if iuse.get_real_flag(x) is None)
+ if any(x in atom.use.enabled for x in missing_disabled):
+ return False
+ if atom.use.disabled:
+ missing_enabled = frozenset(x for x in atom.use.missing_enabled if iuse.get_real_flag(x) is None)
+ if any(x in atom.use.disabled for x in missing_enabled):
+ return False
+
return True
def invalidentry(self, mypath):
@@ -275,7 +302,8 @@ class dbapi(object):
maxval = len(cpv_all)
aux_get = self.aux_get
aux_update = self.aux_update
- meta_keys = ["DEPEND", "EAPI", "RDEPEND", "PDEPEND", "PROVIDE", 'repository']
+ update_keys = Package._dep_keys + ("PROVIDE",)
+ meta_keys = update_keys + self._pkg_str_aux_keys
repo_dict = None
if isinstance(updates, dict):
repo_dict = updates
@@ -284,14 +312,20 @@ class dbapi(object):
if onProgress:
onProgress(maxval, 0)
for i, cpv in enumerate(cpv_all):
- metadata = dict(zip(meta_keys, aux_get(cpv, meta_keys)))
- eapi = metadata.pop('EAPI')
- repo = metadata.pop('repository')
+ try:
+ metadata = dict(zip(meta_keys, aux_get(cpv, meta_keys)))
+ except KeyError:
+ continue
+ try:
+ pkg = _pkg_str(cpv, metadata=metadata, settings=self.settings)
+ except InvalidData:
+ continue
+ metadata = dict((k, metadata[k]) for k in update_keys)
if repo_dict is None:
updates_list = updates
else:
try:
- updates_list = repo_dict[repo]
+ updates_list = repo_dict[pkg.repo]
except KeyError:
try:
updates_list = repo_dict['DEFAULT']
@@ -302,7 +336,7 @@ class dbapi(object):
continue
metadata_updates = \
- portage.update_dbentries(updates_list, metadata, eapi=eapi)
+ portage.update_dbentries(updates_list, metadata, parent=pkg)
if metadata_updates:
aux_update(cpv, metadata_updates)
if onUpdate:
@@ -343,9 +377,9 @@ class dbapi(object):
continue
moves += 1
if "/" not in newslot and \
- mycpv.slot_abi and \
- mycpv.slot_abi not in (mycpv.slot, newslot):
- newslot = "%s/%s" % (newslot, mycpv.slot_abi)
+ mycpv.sub_slot and \
+ mycpv.sub_slot not in (mycpv.slot, newslot):
+ newslot = "%s/%s" % (newslot, mycpv.sub_slot)
mydata = {"SLOT": newslot+"\n"}
self.aux_update(mycpv, mydata)
return moves
diff --git a/pym/portage/dbapi/_expand_new_virt.py b/pym/portage/dbapi/_expand_new_virt.py
index d379b4c1d..9aa603d11 100644
--- a/pym/portage/dbapi/_expand_new_virt.py
+++ b/pym/portage/dbapi/_expand_new_virt.py
@@ -1,8 +1,11 @@
-# Copyright 2011 Gentoo Foundation
+# Copyright 2011-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
+from __future__ import unicode_literals
+
import portage
from portage.dep import Atom, _get_useflag_re
+from portage.eapi import _get_eapi_attrs
def expand_new_virt(vardb, atom):
"""
@@ -44,6 +47,7 @@ def expand_new_virt(vardb, atom):
yield atom
continue
+ eapi_attrs = _get_eapi_attrs(eapi)
# Validate IUSE and IUSE, for early detection of vardb corruption.
useflag_re = _get_useflag_re(eapi)
valid_iuse = []
@@ -54,7 +58,11 @@ def expand_new_virt(vardb, atom):
valid_iuse.append(x)
valid_iuse = frozenset(valid_iuse)
- iuse_implicit_match = vardb.settings._iuse_implicit_match
+ if eapi_attrs.iuse_effective:
+ iuse_implicit_match = vardb.settings._iuse_effective_match
+ else:
+ iuse_implicit_match = vardb.settings._iuse_implicit_match
+
valid_use = []
for x in use.split():
if x in valid_iuse or iuse_implicit_match(x):
diff --git a/pym/portage/dbapi/_similar_name_search.py b/pym/portage/dbapi/_similar_name_search.py
new file mode 100644
index 000000000..b6e4a1fbe
--- /dev/null
+++ b/pym/portage/dbapi/_similar_name_search.py
@@ -0,0 +1,57 @@
+# Copyright 2011-2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import difflib
+
+from portage.versions import catsplit
+
+def similar_name_search(dbs, atom):
+
+ cp_lower = atom.cp.lower()
+ cat, pkg = catsplit(cp_lower)
+ if cat == "null":
+ cat = None
+
+ all_cp = set()
+ for db in dbs:
+ all_cp.update(db.cp_all())
+
+ # discard dir containing no ebuilds
+ all_cp.discard(atom.cp)
+
+ orig_cp_map = {}
+ for cp_orig in all_cp:
+ orig_cp_map.setdefault(cp_orig.lower(), []).append(cp_orig)
+ all_cp = set(orig_cp_map)
+
+ if cat:
+ matches = difflib.get_close_matches(cp_lower, all_cp)
+ else:
+ pkg_to_cp = {}
+ for other_cp in list(all_cp):
+ other_pkg = catsplit(other_cp)[1]
+ if other_pkg == pkg:
+ # Check for non-identical package that
+ # differs only by upper/lower case.
+ identical = True
+ for cp_orig in orig_cp_map[other_cp]:
+ if catsplit(cp_orig)[1] != \
+ catsplit(atom.cp)[1]:
+ identical = False
+ break
+ if identical:
+ # discard dir containing no ebuilds
+ all_cp.discard(other_cp)
+ continue
+ pkg_to_cp.setdefault(other_pkg, set()).add(other_cp)
+
+ pkg_matches = difflib.get_close_matches(pkg, pkg_to_cp)
+ matches = []
+ for pkg_match in pkg_matches:
+ matches.extend(pkg_to_cp[pkg_match])
+
+ matches_orig_case = []
+ for cp in matches:
+ matches_orig_case.extend(orig_cp_map[cp])
+
+ return matches_orig_case
diff --git a/pym/portage/dbapi/bintree.py b/pym/portage/dbapi/bintree.py
index 9527b0766..229ce3b18 100644
--- a/pym/portage/dbapi/bintree.py
+++ b/pym/portage/dbapi/bintree.py
@@ -1,11 +1,14 @@
-# Copyright 1998-2012 Gentoo Foundation
+# Copyright 1998-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
+from __future__ import unicode_literals
+
__all__ = ["bindbapi", "binarytree"]
import portage
portage.proxy.lazyimport.lazyimport(globals(),
- 'portage.checksum:hashfunc_map,perform_multiple_checksums,verify_all',
+ 'portage.checksum:hashfunc_map,perform_multiple_checksums,' + \
+ 'verify_all,_apply_hash_filter,_hash_filter',
'portage.dbapi.dep_expand:dep_expand',
'portage.dep:dep_getkey,isjustname,isvalidatom,match_from_list',
'portage.output:EOutput,colorize',
@@ -24,7 +27,7 @@ from portage.const import CACHE_PATH
from portage.dbapi.virtual import fakedbapi
from portage.dep import Atom, use_reduce, paren_enclose
from portage.exception import AlarmSignal, InvalidData, InvalidPackageName, \
- PermissionDenied, PortageException
+ ParseError, PermissionDenied, PortageException
from portage.localization import _
from portage import _movefile
from portage import os
@@ -40,7 +43,9 @@ import subprocess
import sys
import tempfile
import textwrap
+import traceback
import warnings
+from gzip import GzipFile
from itertools import chain
try:
from urllib.parse import urlparse
@@ -48,12 +53,18 @@ except ImportError:
from urlparse import urlparse
if sys.hexversion >= 0x3000000:
+ # pylint: disable=W0622
_unicode = str
basestring = str
long = int
else:
_unicode = unicode
+class UseCachedCopyOfRemoteIndex(Exception):
+ # If the local copy is recent enough
+ # then fetching the remote index can be skipped.
+ pass
+
class bindbapi(fakedbapi):
_known_keys = frozenset(list(fakedbapi._known_keys) + \
["CHOST", "repository", "USE"])
@@ -65,9 +76,10 @@ class bindbapi(fakedbapi):
self.cpdict={}
# Selectively cache metadata in order to optimize dep matching.
self._aux_cache_keys = set(
- ["BUILD_TIME", "CHOST", "DEPEND", "EAPI", "IUSE", "KEYWORDS",
+ ["BUILD_TIME", "CHOST", "DEPEND", "EAPI",
+ "HDEPEND", "IUSE", "KEYWORDS",
"LICENSE", "PDEPEND", "PROPERTIES", "PROVIDE",
- "RDEPEND", "repository", "RESTRICT", "SLOT", "USE", "DEFINED_PHASES",
+ "RDEPEND", "repository", "RESTRICT", "SLOT", "USE", "DEFINED_PHASES"
])
self._aux_cache_slot_dict = slot_dict_class(self._aux_cache_keys)
self._aux_cache = {}
@@ -130,15 +142,15 @@ class bindbapi(fakedbapi):
if myval:
mydata[x] = " ".join(myval.split())
- if not mydata.setdefault('EAPI', _unicode_decode('0')):
- mydata['EAPI'] = _unicode_decode('0')
+ if not mydata.setdefault('EAPI', '0'):
+ mydata['EAPI'] = '0'
if cache_me:
aux_cache = self._aux_cache_slot_dict()
for x in self._aux_cache_keys:
- aux_cache[x] = mydata.get(x, _unicode_decode(''))
+ aux_cache[x] = mydata.get(x, '')
self._aux_cache[mycpv] = aux_cache
- return [mydata.get(x, _unicode_decode('')) for x in wants]
+ return [mydata.get(x, '') for x in wants]
def aux_update(self, cpv, values):
if not self.bintree.populated:
@@ -250,7 +262,7 @@ def _pkgindex_cpv_map_latest_build(pkgindex):
class binarytree(object):
"this tree scans for a list of all packages available in PKGDIR"
- def __init__(self, _unused=None, pkgdir=None,
+ def __init__(self, _unused=DeprecationWarning, pkgdir=None,
virtual=DeprecationWarning, settings=None):
if pkgdir is None:
@@ -259,11 +271,11 @@ class binarytree(object):
if settings is None:
raise TypeError("settings parameter is required")
- if _unused is not None and _unused != settings['ROOT']:
- warnings.warn("The root parameter of the "
+ if _unused is not DeprecationWarning:
+ warnings.warn("The first parameter of the "
"portage.dbapi.bintree.binarytree"
- " constructor is now unused. Use "
- "settings['ROOT'] instead.",
+ " constructor is now unused. Instead "
+ "settings['ROOT'] is used.",
DeprecationWarning, stacklevel=2)
if virtual is not DeprecationWarning:
@@ -295,22 +307,26 @@ class binarytree(object):
self._pkgindex_keys.update(["CPV", "MTIME", "SIZE"])
self._pkgindex_aux_keys = \
["BUILD_TIME", "CHOST", "DEPEND", "DESCRIPTION", "EAPI",
- "IUSE", "KEYWORDS", "LICENSE", "PDEPEND", "PROPERTIES",
- "PROVIDE", "RDEPEND", "repository", "SLOT", "USE", "DEFINED_PHASES",
+ "HDEPEND", "IUSE", "KEYWORDS", "LICENSE", "PDEPEND", "PROPERTIES",
+ "PROVIDE", "RESTRICT", "RDEPEND", "repository", "SLOT", "USE", "DEFINED_PHASES",
"BASE_URI"]
self._pkgindex_aux_keys = list(self._pkgindex_aux_keys)
self._pkgindex_use_evaluated_keys = \
- ("LICENSE", "RDEPEND", "DEPEND",
- "PDEPEND", "PROPERTIES", "PROVIDE")
+ ("DEPEND", "HDEPEND", "LICENSE", "RDEPEND",
+ "PDEPEND", "PROPERTIES", "PROVIDE", "RESTRICT")
self._pkgindex_header_keys = set([
"ACCEPT_KEYWORDS", "ACCEPT_LICENSE",
- "ACCEPT_PROPERTIES", "CBUILD",
+ "ACCEPT_PROPERTIES", "ACCEPT_RESTRICT", "CBUILD",
"CONFIG_PROTECT", "CONFIG_PROTECT_MASK", "FEATURES",
- "GENTOO_MIRRORS", "INSTALL_MASK", "SYNC", "USE"])
+ "GENTOO_MIRRORS", "INSTALL_MASK", "IUSE_IMPLICIT", "USE",
+ "USE_EXPAND", "USE_EXPAND_HIDDEN", "USE_EXPAND_IMPLICIT",
+ "USE_EXPAND_UNPREFIXED"])
self._pkgindex_default_pkg_data = {
"BUILD_TIME" : "",
+ "DEFINED_PHASES" : "",
"DEPEND" : "",
"EAPI" : "0",
+ "HDEPEND" : "",
"IUSE" : "",
"KEYWORDS": "",
"LICENSE" : "",
@@ -322,7 +338,6 @@ class binarytree(object):
"RESTRICT": "",
"SLOT" : "0",
"USE" : "",
- "DEFINED_PHASES" : "",
}
self._pkgindex_inherited_keys = ["CHOST", "repository"]
@@ -416,7 +431,7 @@ class binarytree(object):
moves += 1
mytbz2 = portage.xpak.tbz2(tbz2path)
mydata = mytbz2.get_data()
- updated_items = update_dbentries([mylist], mydata, eapi=mycpv.eapi)
+ updated_items = update_dbentries([mylist], mydata, parent=mycpv)
mydata.update(updated_items)
mydata[b'PF'] = \
_unicode_encode(mynewpkg + "\n",
@@ -552,6 +567,20 @@ class binarytree(object):
if not os.path.isdir(path):
raise
+ def _file_permissions(self, path):
+ try:
+ pkgdir_st = os.stat(self.pkgdir)
+ except OSError:
+ pass
+ else:
+ pkgdir_gid = pkgdir_st.st_gid
+ pkgdir_grp_mode = 0o0060 & pkgdir_st.st_mode
+ try:
+ portage.util.apply_permissions(path, gid=pkgdir_gid,
+ mode=pkgdir_grp_mode, mask=0)
+ except PortageException:
+ pass
+
def _move_to_all(self, cpv):
"""If the file exists, move it. Whether or not it exists, update state
for future getname() calls."""
@@ -807,9 +836,7 @@ class binarytree(object):
del pkgindex.packages[:]
pkgindex.packages.extend(iter(metadata.values()))
self._update_pkgindex_header(pkgindex.header)
- f = atomic_ofstream(self._pkgindex_file)
- pkgindex.write(f)
- f.close()
+ self._pkgindex_write(pkgindex)
if getbinpkgs and not self.settings["PORTAGE_BINHOST"]:
writemsg(_("!!! PORTAGE_BINHOST unset, but use is requested.\n"),
@@ -852,6 +879,7 @@ class binarytree(object):
if e.errno != errno.ENOENT:
raise
local_timestamp = pkgindex.header.get("TIMESTAMP", None)
+ remote_timestamp = None
rmt_idx = self._new_pkgindex()
proc = None
tmp_filename = None
@@ -860,41 +888,79 @@ class binarytree(object):
# protocols and requires the base url to have a trailing
# slash, so join manually...
url = base_url.rstrip("/") + "/Packages"
- try:
- f = _urlopen(url)
- except IOError:
- path = parsed_url.path.rstrip("/") + "/Packages"
+ f = None
+
+ # Don't use urlopen for https, since it doesn't support
+ # certificate/hostname verification (bug #469888).
+ if parsed_url.scheme not in ('https',):
+ try:
+ f = _urlopen(url, if_modified_since=local_timestamp)
+ if hasattr(f, 'headers') and f.headers.get('timestamp', ''):
+ remote_timestamp = f.headers.get('timestamp')
+ except IOError as err:
+ if hasattr(err, 'code') and err.code == 304: # not modified (since local_timestamp)
+ raise UseCachedCopyOfRemoteIndex()
+
+ if parsed_url.scheme in ('ftp', 'http', 'https'):
+ # This protocol is supposedly supported by urlopen,
+ # so apparently there's a problem with the url
+ # or a bug in urlopen.
+ if self.settings.get("PORTAGE_DEBUG", "0") != "0":
+ traceback.print_exc()
- if parsed_url.scheme == 'sftp':
- # The sftp command complains about 'Illegal seek' if
- # we try to make it write to /dev/stdout, so use a
- # temp file instead.
- fd, tmp_filename = tempfile.mkstemp()
- os.close(fd)
- if port is not None:
- port_args = ['-P', "%s" % (port,)]
- proc = subprocess.Popen(['sftp'] + port_args + \
- [user_passwd + host + ":" + path, tmp_filename])
- if proc.wait() != os.EX_OK:
raise
- f = open(tmp_filename, 'rb')
- elif parsed_url.scheme == 'ssh':
+ except ValueError:
+ raise ParseError("Invalid Portage BINHOST value '%s'"
+ % url.lstrip())
+
+ if f is None:
+
+ path = parsed_url.path.rstrip("/") + "/Packages"
+
+ if parsed_url.scheme == 'ssh':
+ # Use a pipe so that we can terminate the download
+ # early if we detect that the TIMESTAMP header
+ # matches that of the cached Packages file.
+ ssh_args = ['ssh']
if port is not None:
- port_args = ['-p', "%s" % (port,)]
- proc = subprocess.Popen(['ssh'] + port_args + \
- [user_passwd + host, '--', 'cat', path],
+ ssh_args.append("-p%s" % (port,))
+ # NOTE: shlex evaluates embedded quotes
+ ssh_args.extend(portage.util.shlex_split(
+ self.settings.get("PORTAGE_SSH_OPTS", "")))
+ ssh_args.append(user_passwd + host)
+ ssh_args.append('--')
+ ssh_args.append('cat')
+ ssh_args.append(path)
+
+ proc = subprocess.Popen(ssh_args,
stdout=subprocess.PIPE)
f = proc.stdout
else:
setting = 'FETCHCOMMAND_' + parsed_url.scheme.upper()
fcmd = self.settings.get(setting)
if not fcmd:
- raise
+ fcmd = self.settings.get('FETCHCOMMAND')
+ if not fcmd:
+ raise EnvironmentError("FETCHCOMMAND is unset")
+
fd, tmp_filename = tempfile.mkstemp()
tmp_dirname, tmp_basename = os.path.split(tmp_filename)
os.close(fd)
- success = portage.getbinpkg.file_get(url,
- tmp_dirname, fcmd=fcmd, filename=tmp_basename)
+
+ fcmd_vars = {
+ "DISTDIR": tmp_dirname,
+ "FILE": tmp_basename,
+ "URI": url
+ }
+
+ for k in ("PORTAGE_SSH_OPTS",):
+ try:
+ fcmd_vars[k] = self.settings[k]
+ except KeyError:
+ pass
+
+ success = portage.getbinpkg.file_get(
+ fcmd=fcmd, fcmd_vars=fcmd_vars)
if not success:
raise EnvironmentError("%s failed" % (setting,))
f = open(tmp_filename, 'rb')
@@ -903,7 +969,8 @@ class binarytree(object):
_encodings['repo.content'], errors='replace')
try:
rmt_idx.readHeader(f_dec)
- remote_timestamp = rmt_idx.header.get("TIMESTAMP", None)
+ if not remote_timestamp: # in case it had not been read from HTTP header
+ remote_timestamp = rmt_idx.header.get("TIMESTAMP", None)
if not remote_timestamp:
# no timestamp in the header, something's wrong
pkgindex = None
@@ -931,6 +998,12 @@ class binarytree(object):
writemsg("\n\n!!! %s\n" % \
_("Timed out while closing connection to binhost"),
noiselevel=-1)
+ except UseCachedCopyOfRemoteIndex:
+ writemsg_stdout("\n")
+ writemsg_stdout(
+ colorize("GOOD", _("Local copy of remote index is up-to-date and will be used.")) + \
+ "\n")
+ rmt_idx = pkgindex
except EnvironmentError as e:
writemsg(_("\n\n!!! Error fetching binhost package" \
" info from '%s'\n") % _hide_url_passwd(base_url))
@@ -999,75 +1072,7 @@ class binarytree(object):
# Local package instances override remote instances.
for cpv in metadata:
self._remotepkgs.pop(cpv, None)
- continue
- try:
- chunk_size = long(self.settings["PORTAGE_BINHOST_CHUNKSIZE"])
- if chunk_size < 8:
- chunk_size = 8
- except (ValueError, KeyError):
- chunk_size = 3000
- writemsg_stdout("\n")
- writemsg_stdout(
- colorize("GOOD", _("Fetching bininfo from ")) + \
- _hide_url_passwd(base_url) + "\n")
- remotepkgs = portage.getbinpkg.dir_get_metadata(
- base_url, chunk_size=chunk_size)
-
- for mypkg, remote_metadata in remotepkgs.items():
- mycat = remote_metadata.get("CATEGORY")
- if mycat is None:
- #old-style or corrupt package
- writemsg(_("!!! Invalid remote binary package: %s\n") % mypkg,
- noiselevel=-1)
- continue
- mycat = mycat.strip()
- try:
- fullpkg = _pkg_str(mycat+"/"+mypkg[:-5])
- except InvalidData:
- writemsg(_("!!! Invalid remote binary package: %s\n") % mypkg,
- noiselevel=-1)
- continue
-
- if fullpkg in metadata:
- # When using this old protocol, comparison with the remote
- # package isn't supported, so the local package is always
- # preferred even if getbinpkgsonly is enabled.
- continue
-
- if not self.dbapi._category_re.match(mycat):
- writemsg(_("!!! Remote binary package has an " \
- "unrecognized category: '%s'\n") % fullpkg,
- noiselevel=-1)
- writemsg(_("!!! '%s' has a category that is not" \
- " listed in %setc/portage/categories\n") % \
- (fullpkg, self.settings["PORTAGE_CONFIGROOT"]),
- noiselevel=-1)
- continue
- mykey = portage.cpv_getkey(fullpkg)
- try:
- # invalid tbz2's can hurt things.
- self.dbapi.cpv_inject(fullpkg)
- for k, v in remote_metadata.items():
- remote_metadata[k] = v.strip()
- remote_metadata["BASE_URI"] = base_url
-
- # Eliminate metadata values with names that digestCheck
- # uses, since they are not valid when using the old
- # protocol. Typically this is needed for SIZE metadata
- # which corresponds to the size of the unpacked files
- # rather than the binpkg file size, triggering digest
- # verification failures as reported in bug #303211.
- remote_metadata.pop('SIZE', None)
- for k in portage.checksum.hashfunc_map:
- remote_metadata.pop(k, None)
-
- self._remotepkgs[fullpkg] = remote_metadata
- except SystemExit as e:
- raise
- except:
- writemsg(_("!!! Failed to inject remote binary package: %s\n") % fullpkg,
- noiselevel=-1)
- continue
+
self.populated=1
def inject(self, cpv, filename=None):
@@ -1121,6 +1126,10 @@ class binarytree(object):
if not samefile:
self._ensure_dir(os.path.dirname(new_filename))
_movefile(filename, new_filename, mysettings=self.settings)
+ full_path = new_filename
+
+ self._file_permissions(full_path)
+
if self._all_directory and \
self.getname(cpv).split(os.path.sep)[-2] == "All":
self._create_symlink(cpv)
@@ -1168,13 +1177,35 @@ class binarytree(object):
pkgindex.packages.append(d)
self._update_pkgindex_header(pkgindex.header)
- f = atomic_ofstream(os.path.join(self.pkgdir, "Packages"))
- pkgindex.write(f)
- f.close()
+ self._pkgindex_write(pkgindex)
+
finally:
if pkgindex_lock:
unlockfile(pkgindex_lock)
+ def _pkgindex_write(self, pkgindex):
+ contents = codecs.getwriter(_encodings['repo.content'])(io.BytesIO())
+ pkgindex.write(contents)
+ contents = contents.getvalue()
+ atime = mtime = long(pkgindex.header["TIMESTAMP"])
+ output_files = [(atomic_ofstream(self._pkgindex_file, mode="wb"),
+ self._pkgindex_file, None)]
+
+ if "compress-index" in self.settings.features:
+ gz_fname = self._pkgindex_file + ".gz"
+ fileobj = atomic_ofstream(gz_fname, mode="wb")
+ output_files.append((GzipFile(filename='', mode="wb",
+ fileobj=fileobj, mtime=mtime), gz_fname, fileobj))
+
+ for f, fname, f_close in output_files:
+ f.write(contents)
+ f.close()
+ if f_close is not None:
+ f_close.close()
+ self._file_permissions(fname)
+ # some seconds might have elapsed since TIMESTAMP
+ os.utime(fname, (atime, mtime))
+
def _pkgindex_entry(self, cpv):
"""
Performs checksums and evaluates USE flag conditionals.
@@ -1234,6 +1265,16 @@ class binarytree(object):
else:
header.pop(k, None)
+ # These values may be useful for using a binhost without
+ # having a local copy of the profile (bug #470006).
+ for k in self.settings.get("USE_EXPAND_IMPLICIT", "").split():
+ k = "USE_EXPAND_VALUES_" + k
+ v = self.settings.get(k)
+ if v:
+ header[k] = v
+ else:
+ header.pop(k, None)
+
def _pkgindex_version_supported(self, pkgindex):
version = pkgindex.header.get("VERSION")
if version:
@@ -1246,11 +1287,6 @@ class binarytree(object):
def _eval_use_flags(self, cpv, metadata):
use = frozenset(metadata["USE"].split())
- raw_use = use
- iuse = set(f.lstrip("-+") for f in metadata["IUSE"].split())
- use = [f for f in use if f in iuse]
- use.sort()
- metadata["USE"] = " ".join(use)
for k in self._pkgindex_use_evaluated_keys:
if k.endswith('DEPEND'):
token_class = Atom
@@ -1259,7 +1295,7 @@ class binarytree(object):
try:
deps = metadata[k]
- deps = use_reduce(deps, uselist=raw_use, token_class=token_class)
+ deps = use_reduce(deps, uselist=use, token_class=token_class)
deps = paren_enclose(deps)
except portage.exception.InvalidDependString as e:
writemsg("%s: %s\n" % (k, str(e)),
@@ -1383,19 +1419,14 @@ class binarytree(object):
f.close()
return pkgindex
- def digestCheck(self, pkg):
- """
- Verify digests for the given package and raise DigestException
- if verification fails.
- @rtype: bool
- @return: True if digests could be located, False otherwise.
- """
- cpv = pkg
- if not isinstance(cpv, basestring):
+ def _get_digests(self, pkg):
+
+ try:
cpv = pkg.cpv
- pkg = None
+ except AttributeError:
+ cpv = pkg
- pkg_path = self.getname(cpv)
+ digests = {}
metadata = None
if self._remotepkgs is None or cpv not in self._remotepkgs:
for d in self._load_pkgindex().packages:
@@ -1405,9 +1436,8 @@ class binarytree(object):
else:
metadata = self._remotepkgs[cpv]
if metadata is None:
- return False
+ return digests
- digests = {}
for k in hashfunc_map:
v = metadata.get(k)
if not v:
@@ -1421,9 +1451,31 @@ class binarytree(object):
writemsg(_("!!! Malformed SIZE attribute in remote " \
"metadata for '%s'\n") % cpv)
+ return digests
+
+ def digestCheck(self, pkg):
+ """
+ Verify digests for the given package and raise DigestException
+ if verification fails.
+ @rtype: bool
+ @return: True if digests could be located, False otherwise.
+ """
+
+ digests = self._get_digests(pkg)
+
if not digests:
return False
+ try:
+ cpv = pkg.cpv
+ except AttributeError:
+ cpv = pkg
+
+ pkg_path = self.getname(cpv)
+ hash_filter = _hash_filter(
+ self.settings.get("PORTAGE_CHECKSUM_FILTER", ""))
+ if not hash_filter.transparent:
+ digests = _apply_hash_filter(digests, hash_filter)
eout = EOutput()
eout.quiet = self.settings.get("PORTAGE_QUIET") == "1"
ok, st = _check_distfile(pkg_path, digests, eout, show_errors=0)
@@ -1439,9 +1491,7 @@ class binarytree(object):
"Get a slot for a catpkg; assume it exists."
myslot = ""
try:
- myslot = self.dbapi.aux_get(mycatpkg,["SLOT"])[0]
- except SystemExit as e:
- raise
- except Exception as e:
+ myslot = self.dbapi._pkg_str(mycatpkg, None).slot
+ except KeyError:
pass
return myslot
diff --git a/pym/portage/dbapi/cpv_expand.py b/pym/portage/dbapi/cpv_expand.py
index 947194cca..70ee78245 100644
--- a/pym/portage/dbapi/cpv_expand.py
+++ b/pym/portage/dbapi/cpv_expand.py
@@ -1,6 +1,8 @@
-# Copyright 2010-2011 Gentoo Foundation
+# Copyright 2010-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
+from __future__ import unicode_literals
+
__all__ = ["cpv_expand"]
import portage
diff --git a/pym/portage/dbapi/dep_expand.py b/pym/portage/dbapi/dep_expand.py
index ac8ccf4b3..3de5d8fc3 100644
--- a/pym/portage/dbapi/dep_expand.py
+++ b/pym/portage/dbapi/dep_expand.py
@@ -1,6 +1,8 @@
-# Copyright 2010 Gentoo Foundation
+# Copyright 2010-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
+from __future__ import unicode_literals
+
__all__ = ["dep_expand"]
import re
@@ -23,7 +25,7 @@ def dep_expand(mydep, mydb=None, use_cache=1, settings=None):
if mydep[0] == "*":
mydep = mydep[1:]
orig_dep = mydep
- has_cat = '/' in orig_dep
+ has_cat = '/' in orig_dep.split(':')[0]
if not has_cat:
alphanum = re.search(r'\w', orig_dep)
if alphanum:
diff --git a/pym/portage/dbapi/porttree.py b/pym/portage/dbapi/porttree.py
index 945c22c3d..590e3c5ef 100644
--- a/pym/portage/dbapi/porttree.py
+++ b/pym/portage/dbapi/porttree.py
@@ -1,6 +1,8 @@
-# Copyright 1998-2012 Gentoo Foundation
+# Copyright 1998-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
+from __future__ import unicode_literals
+
__all__ = [
"close_portdbapi_caches", "FetchlistDict", "portagetree", "portdbapi"
]
@@ -33,21 +35,75 @@ from portage import os
from portage import _encodings
from portage import _unicode_encode
from portage import OrderedDict
+from portage.util._eventloop.EventLoop import EventLoop
+from portage.util._eventloop.global_event_loop import global_event_loop
from _emerge.EbuildMetadataPhase import EbuildMetadataPhase
-from _emerge.PollScheduler import PollScheduler
import os as _os
import sys
import traceback
import warnings
+try:
+ from urllib.parse import urlparse
+except ImportError:
+ from urlparse import urlparse
+
if sys.hexversion >= 0x3000000:
+ # pylint: disable=W0622
basestring = str
long = int
+def close_portdbapi_caches():
+ # The python interpreter does _not_ guarantee that destructors are
+ # called for objects that remain when the interpreter exits, so we
+ # use an atexit hook to call destructors for any global portdbapi
+ # instances that may have been constructed.
+ try:
+ portage._legacy_globals_constructed
+ except AttributeError:
+ pass
+ else:
+ if "db" in portage._legacy_globals_constructed:
+ try:
+ db = portage.db
+ except AttributeError:
+ pass
+ else:
+ if isinstance(db, dict):
+ for x in db.values():
+ try:
+ if "porttree" in x.lazy_items:
+ continue
+ except (AttributeError, TypeError):
+ continue
+ try:
+ x = x.pop("porttree").dbapi
+ except (AttributeError, KeyError):
+ continue
+ if not isinstance(x, portdbapi):
+ continue
+ x.close_caches()
+
+portage.process.atexit_register(close_portdbapi_caches)
+
+# It used to be necessary for API consumers to remove portdbapi instances
+# from portdbapi_instances, in order to avoid having accumulated instances
+# consume memory. Now, portdbapi_instances is just an empty dummy list, so
+# for backward compatibility, ignore ValueError for removal on non-existent
+# items.
+class _dummy_list(list):
+ def remove(self, item):
+ # TODO: Trigger a DeprecationWarning here, after stable portage
+ # has dummy portdbapi_instances.
+ try:
+ list.remove(self, item)
+ except ValueError:
+ pass
+
class portdbapi(dbapi):
"""this tree will scan a portage directory located at root (passed to init)"""
- portdbapi_instances = []
+ portdbapi_instances = _dummy_list()
_use_mutable = True
@property
@@ -56,23 +112,28 @@ class portdbapi(dbapi):
@property
def porttree_root(self):
+ warnings.warn("portage.dbapi.porttree.portdbapi.porttree_root is deprecated in favor of portage.repository.config.RepoConfig.location "
+ "(available as repositories[repo_name].location attribute of instances of portage.dbapi.porttree.portdbapi class)",
+ DeprecationWarning, stacklevel=2)
return self.settings.repositories.mainRepoLocation()
@property
def eclassdb(self):
+ warnings.warn("portage.dbapi.porttree.portdbapi.eclassdb is deprecated in favor of portage.repository.config.RepoConfig.eclass_db "
+ "(available as repositories[repo_name].eclass_db attribute of instances of portage.dbapi.porttree.portdbapi class)",
+ DeprecationWarning, stacklevel=2)
main_repo = self.repositories.mainRepo()
if main_repo is None:
return None
return main_repo.eclass_db
- def __init__(self, _unused_param=None, mysettings=None):
+ def __init__(self, _unused_param=DeprecationWarning, mysettings=None):
"""
@param _unused_param: deprecated, use mysettings['PORTDIR'] instead
@type _unused_param: None
@param mysettings: an immutable config instance
@type mysettings: portage.config
"""
- portdbapi.portdbapi_instances.append(self)
from portage import config
if mysettings:
@@ -81,7 +142,7 @@ class portdbapi(dbapi):
from portage import settings
self.settings = config(clone=settings)
- if _unused_param is not None:
+ if _unused_param is not DeprecationWarning:
warnings.warn("The first parameter of the " + \
"portage.dbapi.porttree.portdbapi" + \
" constructor is unused since portage-2.1.8. " + \
@@ -96,7 +157,6 @@ class portdbapi(dbapi):
# this purpose because doebuild makes many changes to the config
# instance that is passed in.
self.doebuild_settings = config(clone=self.settings)
- self._scheduler = PollScheduler().sched_iface
self.depcachedir = os.path.realpath(self.settings.depcachedir)
if os.environ.get("SANDBOX_ON") == "1":
@@ -153,10 +213,10 @@ class portdbapi(dbapi):
# portage group.
depcachedir_unshared = True
else:
- cache_kwargs.update({
+ cache_kwargs.update(portage._native_kwargs({
'gid' : portage_gid,
'perms' : 0o664
- })
+ }))
# If secpass < 1, we don't want to write to the cache
# since then we won't be able to apply group permissions
@@ -187,13 +247,25 @@ class portdbapi(dbapi):
self._pregen_auxdb[x] = cache
# Selectively cache metadata in order to optimize dep matching.
self._aux_cache_keys = set(
- ["DEPEND", "EAPI", "INHERITED", "IUSE", "KEYWORDS", "LICENSE",
+ ["DEPEND", "EAPI", "HDEPEND",
+ "INHERITED", "IUSE", "KEYWORDS", "LICENSE",
"PDEPEND", "PROPERTIES", "PROVIDE", "RDEPEND", "repository",
"RESTRICT", "SLOT", "DEFINED_PHASES", "REQUIRED_USE"])
self._aux_cache = {}
self._broken_ebuilds = set()
+ @property
+ def _event_loop(self):
+ if portage._internal_caller:
+ # For internal portage usage, the global_event_loop is safe.
+ return global_event_loop()
+ else:
+ # For external API consumers, use a local EventLoop, since
+ # we don't want to assume that it's safe to override the
+ # global SIGCHLD handler.
+ return EventLoop(main=False)
+
def _create_pregen_cache(self, tree):
conf = self.repositories.get_repo_for_location(tree)
cache = conf.get_pregenerated_cache(
@@ -203,6 +275,13 @@ class portdbapi(dbapi):
cache.ec = self.repositories.get_repo_for_location(tree).eclass_db
except AttributeError:
pass
+
+ if not cache.complete_eclass_entries:
+ warnings.warn(
+ ("Repository '%s' used deprecated 'pms' cache format. "
+ "Please migrate to 'md5-dict' format.") % (conf.name,),
+ DeprecationWarning)
+
return cache
def _init_cache_dirs(self):
@@ -447,7 +526,7 @@ class portdbapi(dbapi):
proc = EbuildMetadataPhase(cpv=mycpv,
ebuild_hash=ebuild_hash, portdb=self,
- repo_path=mylocation, scheduler=self._scheduler,
+ repo_path=mylocation, scheduler=self._event_loop,
settings=self.doebuild_settings)
proc.start()
@@ -627,13 +706,14 @@ class portdbapi(dbapi):
else:
return 0
- def cp_all(self, categories=None, trees=None):
+ def cp_all(self, categories=None, trees=None, reverse=False):
"""
This returns a list of all keys in our tree or trees
@param categories: optional list of categories to search or
defaults to self.settings.categories
@param trees: optional list of trees to search the categories in or
defaults to self.porttrees
+ @param reverse: reverse sort order (default is False)
@rtype list of [cat/pkg,...]
"""
d = {}
@@ -652,7 +732,7 @@ class portdbapi(dbapi):
continue
d[atom.cp] = None
l = list(d)
- l.sort()
+ l.sort(reverse=reverse)
return l
def cp_list(self, mycp, use_cache=1, mytree=None):
@@ -827,8 +907,8 @@ class portdbapi(dbapi):
continue
try:
- pkg_str = _pkg_str(cpv, slot=metadata["SLOT"],
- repo=metadata["repository"], eapi=metadata["EAPI"])
+ pkg_str = _pkg_str(cpv, metadata=metadata,
+ settings=self.settings)
except InvalidData:
continue
@@ -966,19 +1046,16 @@ class portdbapi(dbapi):
return False
if settings._getMissingProperties(cpv, metadata):
return False
+ if settings._getMissingRestrict(cpv, metadata):
+ return False
except InvalidDependString:
return False
return True
-def close_portdbapi_caches():
- for i in portdbapi.portdbapi_instances:
- i.close_caches()
-
-portage.process.atexit_register(portage.portageexit)
-
class portagetree(object):
- def __init__(self, root=None, virtual=DeprecationWarning, settings=None):
+ def __init__(self, root=DeprecationWarning, virtual=DeprecationWarning,
+ settings=None):
"""
Constructor for a PortageTree
@@ -994,7 +1071,7 @@ class portagetree(object):
settings = portage.settings
self.settings = settings
- if root is not None and root != settings['ROOT']:
+ if root is not DeprecationWarning:
warnings.warn("The root parameter of the " + \
"portage.dbapi.porttree.portagetree" + \
" constructor is now unused. Use " + \
@@ -1062,10 +1139,8 @@ class portagetree(object):
"Get a slot for a catpkg; assume it exists."
myslot = ""
try:
- myslot = self.dbapi.aux_get(mycatpkg, ["SLOT"])[0]
- except SystemExit:
- raise
- except Exception:
+ myslot = self.dbapi._pkg_str(mycatpkg, None).slot
+ except KeyError:
pass
return myslot
@@ -1137,9 +1212,18 @@ def _parse_uri_map(cpv, metadata, use=None):
uri_set = uri_map.get(distfile)
if uri_set is None:
- uri_set = set()
+ # Use OrderedDict to preserve order from SRC_URI
+ # while ensuring uniqueness.
+ uri_set = OrderedDict()
uri_map[distfile] = uri_set
- uri_set.add(uri)
- uri = None
+
+ # SRC_URI may contain a file name with no scheme, and in
+ # this case it does not belong in uri_set.
+ if urlparse(uri).scheme:
+ uri_set[uri] = True
+
+ # Convert OrderedDicts to tuples.
+ for k, v in uri_map.items():
+ uri_map[k] = tuple(v)
return uri_map
diff --git a/pym/portage/dbapi/vartree.py b/pym/portage/dbapi/vartree.py
index ea62f6bcc..6417a561b 100644
--- a/pym/portage/dbapi/vartree.py
+++ b/pym/portage/dbapi/vartree.py
@@ -1,6 +1,8 @@
-# Copyright 1998-2012 Gentoo Foundation
+# Copyright 1998-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
+from __future__ import unicode_literals
+
__all__ = [
"vardbapi", "vartree", "dblink"] + \
["write_contents", "tar_contents"]
@@ -11,8 +13,9 @@ portage.proxy.lazyimport.lazyimport(globals(),
'portage.data:portage_gid,portage_uid,secpass',
'portage.dbapi.dep_expand:dep_expand',
'portage.dbapi._MergeProcess:MergeProcess',
+ 'portage.dbapi._SyncfsProcess:SyncfsProcess',
'portage.dep:dep_getkey,isjustname,isvalidatom,match_from_list,' + \
- 'use_reduce,_get_slot_re',
+ 'use_reduce,_slot_separator,_repo_separator',
'portage.eapi:_get_eapi_attrs',
'portage.elog:collect_ebuild_messages,collect_messages,' + \
'elog_process,_merge_logentries',
@@ -22,7 +25,6 @@ portage.proxy.lazyimport.lazyimport(globals(),
'_merge_unicode_error', '_spawn_phase',
'portage.package.ebuild.prepare_build_dirs:prepare_build_dirs',
'portage.package.ebuild._ipc.QueryCommand:QueryCommand',
- 'portage.update:fixdbentries',
'portage.util:apply_secpass_permissions,ConfigProtect,ensure_dirs,' + \
'writemsg,writemsg_level,write_atomic,atomic_ofstream,writedict,' + \
'grabdict,normalize_path,new_protect_filename',
@@ -30,17 +32,20 @@ portage.proxy.lazyimport.lazyimport(globals(),
'portage.util.env_update:env_update',
'portage.util.listdir:dircache,listdir',
'portage.util.movefile:movefile',
+ 'portage.util.writeable_check:get_ro_checker',
'portage.util._dyn_libs.PreservedLibsRegistry:PreservedLibsRegistry',
'portage.util._dyn_libs.LinkageMapELF:LinkageMapELF@LinkageMap',
+ 'portage.util._async.SchedulerInterface:SchedulerInterface',
+ 'portage.util._eventloop.EventLoop:EventLoop',
+ 'portage.util._eventloop.global_event_loop:global_event_loop',
'portage.versions:best,catpkgsplit,catsplit,cpv_getkey,vercmp,' + \
- '_pkgsplit@pkgsplit,_pkg_str',
+ '_get_slot_re,_pkgsplit@pkgsplit,_pkg_str,_unknown_repo',
'subprocess',
'tarfile',
)
from portage.const import CACHE_PATH, CONFIG_MEMORY_FILE, \
PORTAGE_PACKAGE_ATOM, PRIVATE_PATH, VDB_PATH
-from portage.const import _ENABLE_DYN_LINK_MAP, _ENABLE_PRESERVE_LIBS
from portage.dbapi import dbapi
from portage.exception import CommandNotFound, \
InvalidData, InvalidLocation, InvalidPackageName, \
@@ -61,7 +66,6 @@ from portage import _unicode_encode
from _emerge.EbuildBuildDir import EbuildBuildDir
from _emerge.EbuildPhase import EbuildPhase
from _emerge.emergelog import emergelog
-from _emerge.PollScheduler import PollScheduler
from _emerge.MiscFunctionsProcess import MiscFunctionsProcess
from _emerge.SpawnProcess import SpawnProcess
@@ -73,6 +77,7 @@ import io
from itertools import chain
import logging
import os as _os
+import platform
import pwd
import re
import stat
@@ -88,6 +93,7 @@ except ImportError:
import pickle
if sys.hexversion >= 0x3000000:
+ # pylint: disable=W0622
basestring = str
long = int
_unicode = str
@@ -111,7 +117,8 @@ class vardbapi(dbapi):
_aux_cache_keys_re = re.compile(r'^NEEDED\..*$')
_aux_multi_line_re = re.compile(r'^(CONTENTS|NEEDED\..*)$')
- def __init__(self, _unused_param=None, categories=None, settings=None, vartree=None):
+ def __init__(self, _unused_param=DeprecationWarning,
+ categories=None, settings=None, vartree=None):
"""
The categories parameter is unused since the dbapi class
now has a categories property that is generated from the
@@ -141,11 +148,11 @@ class vardbapi(dbapi):
settings = portage.settings
self.settings = settings
- if _unused_param is not None and _unused_param != settings['ROOT']:
+ if _unused_param is not DeprecationWarning:
warnings.warn("The first parameter of the "
"portage.dbapi.vartree.vardbapi"
- " constructor is now unused. Use "
- "settings['ROOT'] instead.",
+ " constructor is now unused. Instead "
+ "settings['ROOT'] is used.",
DeprecationWarning, stacklevel=2)
self._eroot = settings['EROOT']
@@ -162,7 +169,7 @@ class vardbapi(dbapi):
self.vartree = vartree
self._aux_cache_keys = set(
["BUILD_TIME", "CHOST", "COUNTER", "DEPEND", "DESCRIPTION",
- "EAPI", "HOMEPAGE", "IUSE", "KEYWORDS",
+ "EAPI", "HDEPEND", "HOMEPAGE", "IUSE", "KEYWORDS",
"LICENSE", "PDEPEND", "PROPERTIES", "PROVIDE", "RDEPEND",
"repository", "RESTRICT" , "SLOT", "USE", "DEFINED_PHASES",
])
@@ -172,15 +179,9 @@ class vardbapi(dbapi):
self._counter_path = os.path.join(self._eroot,
CACHE_PATH, "counter")
- self._plib_registry = None
- if _ENABLE_PRESERVE_LIBS:
- self._plib_registry = PreservedLibsRegistry(settings["ROOT"],
- os.path.join(self._eroot, PRIVATE_PATH,
- "preserved_libs_registry"))
-
- self._linkmap = None
- if _ENABLE_DYN_LINK_MAP:
- self._linkmap = LinkageMap(self)
+ self._plib_registry = PreservedLibsRegistry(settings["ROOT"],
+ os.path.join(self._eroot, PRIVATE_PATH, "preserved_libs_registry"))
+ self._linkmap = LinkageMap(self)
self._owners = self._owners_db(self)
self._cached_counter = None
@@ -368,7 +369,7 @@ class vardbapi(dbapi):
del e
write_atomic(os.path.join(newpath, "PF"), new_pf+"\n")
write_atomic(os.path.join(newpath, "CATEGORY"), mynewcat+"\n")
- fixdbentries([mylist], newpath, eapi=mycpv.eapi)
+
return moves
def cp_list(self, mycp, use_cache=1):
@@ -376,7 +377,10 @@ class vardbapi(dbapi):
if mysplit[0] == '*':
mysplit[0] = mysplit[0][1:]
try:
- mystat = os.stat(self.getpath(mysplit[0])).st_mtime
+ if sys.hexversion >= 0x3030000:
+ mystat = os.stat(self.getpath(mysplit[0])).st_mtime_ns
+ else:
+ mystat = os.stat(self.getpath(mysplit[0])).st_mtime
except OSError:
mystat = 0
if use_cache and mycp in self.cpcache:
@@ -511,7 +515,10 @@ class vardbapi(dbapi):
return list(self._iter_match(mydep,
self.cp_list(mydep.cp, use_cache=use_cache)))
try:
- curmtime = os.stat(os.path.join(self._eroot, VDB_PATH, mycat)).st_mtime
+ if sys.hexversion >= 0x3030000:
+ curmtime = os.stat(os.path.join(self._eroot, VDB_PATH, mycat)).st_mtime_ns
+ else:
+ curmtime = os.stat(os.path.join(self._eroot, VDB_PATH, mycat)).st_mtime
except (IOError, OSError):
curmtime=0
@@ -566,31 +573,32 @@ class vardbapi(dbapi):
def _aux_cache_init(self):
aux_cache = None
open_kwargs = {}
- if sys.hexversion >= 0x3000000:
+ if sys.hexversion >= 0x3000000 and sys.hexversion < 0x3020000:
# Buffered io triggers extreme performance issues in
# Unpickler.load() (problem observed with python-3.0.1).
# Unfortunately, performance is still poor relative to
- # python-2.x, but buffering makes it much worse.
+ # python-2.x, but buffering makes it much worse (problem
+ # appears to be solved in Python >=3.2 at least).
open_kwargs["buffering"] = 0
try:
- f = open(_unicode_encode(self._aux_cache_filename,
+ with open(_unicode_encode(self._aux_cache_filename,
encoding=_encodings['fs'], errors='strict'),
- mode='rb', **open_kwargs)
- mypickle = pickle.Unpickler(f)
- try:
- mypickle.find_global = None
- except AttributeError:
- # TODO: If py3k, override Unpickler.find_class().
- pass
- aux_cache = mypickle.load()
- f.close()
- del f
- except (AttributeError, EOFError, EnvironmentError, ValueError, pickle.UnpicklingError) as e:
+ mode='rb', **open_kwargs) as f:
+ mypickle = pickle.Unpickler(f)
+ try:
+ mypickle.find_global = None
+ except AttributeError:
+ # TODO: If py3k, override Unpickler.find_class().
+ pass
+ aux_cache = mypickle.load()
+ except (SystemExit, KeyboardInterrupt):
+ raise
+ except Exception as e:
if isinstance(e, EnvironmentError) and \
getattr(e, 'errno', None) in (errno.ENOENT, errno.EACCES):
pass
else:
- writemsg(_unicode_decode(_("!!! Error loading '%s': %s\n")) % \
+ writemsg(_("!!! Error loading '%s': %s\n") % \
(self._aux_cache_filename, e), noiselevel=-1)
del e
@@ -710,7 +718,7 @@ class vardbapi(dbapi):
if _get_slot_re(eapi_attrs).match(mydata['SLOT']) is None:
# Empty or invalid slot triggers InvalidAtom exceptions when
# generating slot atoms for packages, so translate it to '0' here.
- mydata['SLOT'] = _unicode_decode('0')
+ mydata['SLOT'] = '0'
return [mydata[x] for x in wants]
@@ -735,21 +743,18 @@ class vardbapi(dbapi):
results[x] = st[stat.ST_MTIME]
continue
try:
- myf = io.open(
+ with io.open(
_unicode_encode(os.path.join(mydir, x),
encoding=_encodings['fs'], errors='strict'),
mode='r', encoding=_encodings['repo.content'],
- errors='replace')
- try:
- myd = myf.read()
- finally:
- myf.close()
+ errors='replace') as f:
+ myd = f.read()
except IOError:
if x not in self._aux_cache_keys and \
self._aux_cache_keys_re.match(x) is None:
env_keys.append(x)
continue
- myd = _unicode_decode('')
+ myd = ''
# Preserve \n for metadata that is known to
# contain multiple lines.
@@ -763,13 +768,13 @@ class vardbapi(dbapi):
for k in env_keys:
v = env_results.get(k)
if v is None:
- v = _unicode_decode('')
+ v = ''
if self._aux_multi_line_re.match(k) is None:
v = " ".join(v.split())
results[k] = v
if results.get("EAPI") == "":
- results[_unicode_decode("EAPI")] = _unicode_decode('0')
+ results["EAPI"] = '0'
return results
@@ -889,11 +894,17 @@ class vardbapi(dbapi):
del myroot
counter = -1
try:
- cfile = io.open(
+ with io.open(
_unicode_encode(self._counter_path,
encoding=_encodings['fs'], errors='strict'),
mode='r', encoding=_encodings['repo.content'],
- errors='replace')
+ errors='replace') as f:
+ try:
+ counter = long(f.readline().strip())
+ except (OverflowError, ValueError) as e:
+ writemsg(_("!!! COUNTER file is corrupt: '%s'\n") %
+ self._counter_path, noiselevel=-1)
+ writemsg("!!! %s\n" % (e,), noiselevel=-1)
except EnvironmentError as e:
# Silently allow ENOENT since files under
# /var/cache/ are allowed to disappear.
@@ -902,17 +913,6 @@ class vardbapi(dbapi):
self._counter_path, noiselevel=-1)
writemsg("!!! %s\n" % str(e), noiselevel=-1)
del e
- else:
- try:
- try:
- counter = long(cfile.readline().strip())
- finally:
- cfile.close()
- except (OverflowError, ValueError) as e:
- writemsg(_("!!! COUNTER file is corrupt: '%s'\n") % \
- self._counter_path, noiselevel=-1)
- writemsg("!!! %s\n" % str(e), noiselevel=-1)
- del e
if self._cached_counter == counter:
max_counter = counter
@@ -1004,16 +1004,31 @@ class vardbapi(dbapi):
relative_filename = filename[root_len:]
contents_key = pkg._match_contents(relative_filename)
if contents_key:
- del new_contents[contents_key]
+ # It's possible for two different paths to refer to the same
+ # contents_key, due to directory symlinks. Therefore, pass a
+ # default value to pop, in order to avoid a KeyError which
+ # could otherwise be triggered (see bug #454400).
+ new_contents.pop(contents_key, None)
removed += 1
if removed:
- self._bump_mtime(pkg.mycpv)
- f = atomic_ofstream(os.path.join(pkg.dbdir, "CONTENTS"))
- write_contents(new_contents, root, f)
- f.close()
- self._bump_mtime(pkg.mycpv)
- pkg._clear_contents_cache()
+ self.writeContentsToContentsFile(pkg, new_contents)
+
+ def writeContentsToContentsFile(self, pkg, new_contents):
+ """
+ @param pkg: package to write contents file for
+ @type pkg: dblink
+ @param new_contents: contents to write to CONTENTS file
+ @type new_contents: contents dictionary of the form
+ {u'/path/to/file' : (contents_attribute 1, ...), ...}
+ """
+ root = self.settings['ROOT']
+ self._bump_mtime(pkg.mycpv)
+ f = atomic_ofstream(os.path.join(pkg.dbdir, "CONTENTS"))
+ write_contents(new_contents, root, f)
+ f.close()
+ self._bump_mtime(pkg.mycpv)
+ pkg._clear_contents_cache()
class _owners_cache(object):
"""
@@ -1258,18 +1273,35 @@ class vardbapi(dbapi):
name = os.path.basename(path.rstrip(os.path.sep))
path_info_list.append((path, name, is_basename))
+ # Do work via the global event loop, so that it can be used
+ # for indication of progress during the search (bug #461412).
+ event_loop = (portage._internal_caller and
+ global_event_loop() or EventLoop(main=False))
root = self._vardb._eroot
- for cpv in self._vardb.cpv_all():
- dblnk = self._vardb._dblink(cpv)
+ def search_pkg(cpv):
+ dblnk = self._vardb._dblink(cpv)
for path, name, is_basename in path_info_list:
if is_basename:
for p in dblnk.getcontents():
if os.path.basename(p) == name:
- yield dblnk, p[len(root):]
+ search_pkg.results.append((dblnk, p[len(root):]))
else:
if dblnk.isowner(path):
- yield dblnk, path
+ search_pkg.results.append((dblnk, path))
+ search_pkg.complete = True
+ return False
+
+ search_pkg.results = []
+
+ for cpv in self._vardb.cpv_all():
+ del search_pkg.results[:]
+ search_pkg.complete = False
+ event_loop.idle_add(search_pkg, cpv)
+ while not search_pkg.complete:
+ event_loop.iteration()
+ for result in search_pkg.results:
+ yield result
class vartree(object):
"this tree will scan a var/db/pkg database located at root (passed to init)"
@@ -1390,7 +1422,7 @@ class vartree(object):
def getslot(self, mycatpkg):
"Get a slot for a catpkg; assume it exists."
try:
- return self.dbapi.aux_get(mycatpkg, ["SLOT"])[0]
+ return self.dbapi._pkg_str(mycatpkg, None).slot
except KeyError:
return ""
@@ -1483,11 +1515,16 @@ class dblink(object):
self._contents_inodes = None
self._contents_basenames = None
self._linkmap_broken = False
+ self._device_path_map = {}
self._hardlink_merge_map = {}
self._hash_key = (self._eroot, self.mycpv)
self._protect_obj = None
self._pipe = pipe
+ # When necessary, this attribute is modified for
+ # compliance with RESTRICT=preserve-libs.
+ self._preserve_libs = "preserve-libs" in mysettings.features
+
def __hash__(self):
return hash(self._hash_key)
@@ -1530,7 +1567,11 @@ class dblink(object):
"""
Remove this entry from the database
"""
- if not os.path.exists(self.dbdir):
+ try:
+ os.lstat(self.dbdir)
+ except OSError as e:
+ if e.errno not in (errno.ENOENT, errno.ENOTDIR, errno.ESTALE):
+ raise
return
# Check validity of self.dbdir before attempting to remove it.
@@ -1547,6 +1588,14 @@ class dblink(object):
pass
self.vartree.dbapi._remove(self)
+ # Use self.dbroot since we need an existing path for syncfs.
+ try:
+ self._merged_path(self.dbroot, os.lstat(self.dbroot))
+ except OSError:
+ pass
+
+ self._post_merge_sync()
+
def clearcontents(self):
"""
For a given db entry (self), erase the CONTENTS values.
@@ -1572,18 +1621,18 @@ class dblink(object):
return self.contentscache
pkgfiles = {}
try:
- myc = io.open(_unicode_encode(contents_file,
+ with io.open(_unicode_encode(contents_file,
encoding=_encodings['fs'], errors='strict'),
mode='r', encoding=_encodings['repo.content'],
- errors='replace')
+ errors='replace') as f:
+ mylines = f.readlines()
except EnvironmentError as e:
if e.errno != errno.ENOENT:
raise
del e
self.contentscache = pkgfiles
return pkgfiles
- mylines = myc.readlines()
- myc.close()
+
null_byte = "\0"
normalize_needed = self._normalize_needed
contents_re = self._contents_re
@@ -1598,7 +1647,7 @@ class dblink(object):
if myroot == os.path.sep:
myroot = None
# used to generate parent dir entries
- dir_entry = (_unicode_decode("dir"),)
+ dir_entry = ("dir",)
eroot_split_len = len(self.settings["EROOT"].split(os.sep)) - 1
pos = 0
errors = []
@@ -1698,8 +1747,11 @@ class dblink(object):
unmerge_preserve = \
self._find_libs_to_preserve(unmerge=True)
counter = self.vartree.dbapi.cpv_counter(self.mycpv)
- plib_registry.unregister(self.mycpv,
- self.settings["SLOT"], counter)
+ try:
+ slot = self.mycpv.slot
+ except AttributeError:
+ slot = _pkg_str(self.mycpv, slot=self.settings["SLOT"]).slot
+ plib_registry.unregister(self.mycpv, slot, counter)
if unmerge_preserve:
for path in sorted(unmerge_preserve):
contents_key = self._match_contents(path)
@@ -1709,7 +1761,7 @@ class dblink(object):
self._display_merge(_(">>> needed %s %s\n") % \
(obj_type, contents_key), noiselevel=-1)
plib_registry.register(self.mycpv,
- self.settings["SLOT"], counter, unmerge_preserve)
+ slot, counter, unmerge_preserve)
# Remove the preserved files from our contents
# so that they won't be unmerged.
self.vartree.dbapi.removeFromContents(self,
@@ -1779,7 +1831,8 @@ class dblink(object):
if self._scheduler is None:
# We create a scheduler instance and use it to
# log unmerge output separately from merge output.
- self._scheduler = PollScheduler().sched_iface
+ self._scheduler = SchedulerInterface(portage._internal_caller and
+ global_event_loop() or EventLoop(main=False))
if self.settings.get("PORTAGE_BACKGROUND") == "subprocess":
if self.settings.get("PORTAGE_BACKGROUND_UNMERGE") == "1":
self.settings["PORTAGE_BACKGROUND"] = "1"
@@ -1804,7 +1857,7 @@ class dblink(object):
# done for this slot, so it shouldn't be repeated until the next
# replacement or unmerge operation.
if others_in_slot is None:
- slot = self.vartree.dbapi.aux_get(self.mycpv, ["SLOT"])[0]
+ slot = self.vartree.dbapi._pkg_str(self.mycpv, None).slot
slot_matches = self.vartree.dbapi.match(
"%s:%s" % (portage.cpv_getkey(self.mycpv), slot))
others_in_slot = []
@@ -1848,13 +1901,17 @@ class dblink(object):
except UnsupportedAPIException as e:
eapi_unsupported = e
+ if self._preserve_libs and "preserve-libs" in \
+ self.settings["PORTAGE_RESTRICT"].split():
+ self._preserve_libs = False
+
builddir_lock = None
scheduler = self._scheduler
retval = os.EX_OK
try:
# Only create builddir_lock if the caller
# has not already acquired the lock.
- if "PORTAGE_BUILDIR_LOCKED" not in self.settings:
+ if "PORTAGE_BUILDDIR_LOCKED" not in self.settings:
builddir_lock = EbuildBuildDir(
scheduler=scheduler,
settings=self.settings)
@@ -1883,7 +1940,7 @@ class dblink(object):
showMessage(_("!!! FAILED prerm: %s\n") % \
os.path.join(self.dbdir, "EAPI"),
level=logging.ERROR, noiselevel=-1)
- showMessage(_unicode_decode("%s\n") % (eapi_unsupported,),
+ showMessage("%s\n" % (eapi_unsupported,),
level=logging.ERROR, noiselevel=-1)
elif os.path.isfile(myebuildpath):
phase = EbuildPhase(background=background,
@@ -2072,7 +2129,7 @@ class dblink(object):
if others_in_slot is None:
others_in_slot = []
- slot = self.vartree.dbapi.aux_get(self.mycpv, ["SLOT"])[0]
+ slot = self.vartree.dbapi._pkg_str(self.mycpv, None).slot
slot_matches = self.vartree.dbapi.match(
"%s:%s" % (portage.cpv_getkey(self.mycpv), slot))
for cur_cpv in slot_matches:
@@ -2129,6 +2186,14 @@ class dblink(object):
self._eerror("postrm",
["Could not chmod or unlink '%s': %s" % \
(file_name, ose)])
+ else:
+
+ # Even though the file no longer exists, we log it
+ # here so that _unmerge_dirs can see that we've
+ # removed a file from this device, and will record
+ # the parent directory for a syncfs call.
+ self._merged_path(file_name, lstatobj, exists=False)
+
finally:
if bsd_chflags and pflags != 0:
# Restore the parent flags we saved before unlinking
@@ -2549,15 +2614,19 @@ class dblink(object):
raise
del e
show_unmerge("!!!", "", "obj", child)
+
try:
+ parent_name = os.path.dirname(obj)
+ parent_stat = os.stat(parent_name)
+
if bsd_chflags:
lstatobj = os.lstat(obj)
if lstatobj.st_flags != 0:
bsd_chflags.lchflags(obj, 0)
- parent_name = os.path.dirname(obj)
+
# Use normal stat/chflags for the parent since we want to
# follow any symlinks to the real parent directory.
- pflags = os.stat(parent_name).st_flags
+ pflags = parent_stat.st_flags
if pflags != 0:
bsd_chflags.chflags(parent_name, 0)
try:
@@ -2566,13 +2635,34 @@ class dblink(object):
if bsd_chflags and pflags != 0:
# Restore the parent flags we saved before unlinking
bsd_chflags.chflags(parent_name, pflags)
+
+ # Record the parent directory for use in syncfs calls.
+ # Note that we use a realpath and a regular stat here, since
+ # we want to follow any symlinks back to the real device where
+ # the real parent directory resides.
+ self._merged_path(os.path.realpath(parent_name), parent_stat)
+
show_unmerge("<<<", "", "dir", obj)
except EnvironmentError as e:
if e.errno not in ignored_rmdir_errnos:
raise
if e.errno != errno.ENOENT:
show_unmerge("---", unmerge_desc["!empty"], "dir", obj)
- del e
+
+ # Since we didn't remove this directory, record the directory
+ # itself for use in syncfs calls, if we have removed another
+ # file from the same device.
+ # Note that we use a realpath and a regular stat here, since
+ # we want to follow any symlinks back to the real device where
+ # the real directory resides.
+ try:
+ dir_stat = os.stat(obj)
+ except OSError:
+ pass
+ else:
+ if dir_stat.st_dev in self._device_path_map:
+ self._merged_path(os.path.realpath(obj), dir_stat)
+
else:
# When a directory is successfully removed, there's
# no need to protect symlinks that point to it.
@@ -2799,7 +2889,7 @@ class dblink(object):
self.vartree.dbapi._linkmap is None or \
self.vartree.dbapi._plib_registry is None or \
(not unmerge and self._installed_instance is None) or \
- "preserve-libs" not in self.settings.features:
+ not self._preserve_libs:
return set()
os = _os_merge
@@ -3383,7 +3473,10 @@ class dblink(object):
else:
logdir = os.path.join(self.settings["T"], "logging")
ebuild_logentries = collect_ebuild_messages(logdir)
- py_logentries = collect_messages(key=cpv).get(cpv, {})
+ # phasefilter is irrelevant for the above collect_ebuild_messages
+ # call, since this package instance has a private logdir. However,
+ # it may be relevant for the following collect_messages call.
+ py_logentries = collect_messages(key=cpv, phasefilter=phasefilter).get(cpv, {})
logentries = _merge_logentries(py_logentries, ebuild_logentries)
funcnames = {
"INFO": "einfo",
@@ -3404,7 +3497,9 @@ class dblink(object):
str_buffer.append(' '.join(fields))
str_buffer.append('\n')
if str_buffer:
- os.write(self._pipe, _unicode_encode(''.join(str_buffer)))
+ str_buffer = _unicode_encode(''.join(str_buffer))
+ while str_buffer:
+ str_buffer = str_buffer[os.write(self._pipe, str_buffer):]
def _emerge_log(self, msg):
emergelog(False, msg)
@@ -3415,6 +3510,8 @@ class dblink(object):
This function does the following:
+ calls get_ro_checker to retrieve a function for checking whether Portage
+ will write to a read-only filesystem, then runs it against the directory list
calls self._preserve_libs if FEATURES=preserve-libs
calls self._collision_protect if FEATURES=collision-protect
calls doebuild(mydo=pkg_preinst)
@@ -3462,6 +3559,7 @@ class dblink(object):
level=logging.ERROR, noiselevel=-1)
return 1
+ is_binpkg = self.settings.get("EMERGE_FROM") == "binary"
slot = ''
for var_name in ('CHOST', 'SLOT'):
if var_name == 'CHOST' and self.cat == 'virtual':
@@ -3471,22 +3569,18 @@ class dblink(object):
pass
continue
- f = None
try:
- f = io.open(_unicode_encode(
+ with io.open(_unicode_encode(
os.path.join(inforoot, var_name),
encoding=_encodings['fs'], errors='strict'),
mode='r', encoding=_encodings['repo.content'],
- errors='replace')
- val = f.readline().strip()
+ errors='replace') as f:
+ val = f.readline().strip()
except EnvironmentError as e:
if e.errno != errno.ENOENT:
raise
del e
val = ''
- finally:
- if f is not None:
- f.close()
if var_name == 'SLOT':
slot = val
@@ -3499,7 +3593,9 @@ class dblink(object):
return 1
write_atomic(os.path.join(inforoot, var_name), slot + '\n')
- if val != self.settings.get(var_name, ''):
+ # This check only applies when built from source, since
+ # inforoot values are written just after src_install.
+ if not is_binpkg and val != self.settings.get(var_name, ''):
self._eqawarn('preinst',
[_("QA Notice: Expected %(var_name)s='%(expected_value)s', got '%(actual_value)s'\n") % \
{"var_name":var_name, "expected_value":self.settings.get(var_name, ''), "actual_value":val}])
@@ -3517,27 +3613,40 @@ class dblink(object):
cp = self.mysplit[0]
slot_atom = "%s:%s" % (cp, slot)
- # filter any old-style virtual matches
- slot_matches = [cpv for cpv in self.vartree.dbapi.match(slot_atom) \
- if cpv_getkey(cpv) == cp]
-
- if self.mycpv not in slot_matches and \
- self.vartree.dbapi.cpv_exists(self.mycpv):
- # handle multislot or unapplied slotmove
- slot_matches.append(self.mycpv)
-
- others_in_slot = []
- from portage import config
- for cur_cpv in slot_matches:
- # Clone the config in case one of these has to be unmerged since
- # we need it to have private ${T} etc... for things like elog.
- settings_clone = config(clone=self.settings)
- settings_clone.pop("PORTAGE_BUILDIR_LOCKED", None)
- settings_clone.reset()
- others_in_slot.append(dblink(self.cat, catsplit(cur_cpv)[1],
- settings=settings_clone,
- vartree=self.vartree, treetype="vartree",
- scheduler=self._scheduler, pipe=self._pipe))
+ self.lockdb()
+ try:
+ # filter any old-style virtual matches
+ slot_matches = [cpv for cpv in self.vartree.dbapi.match(slot_atom)
+ if cpv_getkey(cpv) == cp]
+
+ if self.mycpv not in slot_matches and \
+ self.vartree.dbapi.cpv_exists(self.mycpv):
+ # handle multislot or unapplied slotmove
+ slot_matches.append(self.mycpv)
+
+ others_in_slot = []
+ for cur_cpv in slot_matches:
+ # Clone the config in case one of these has to be unmerged,
+ # since we need it to have private ${T} etc... for things
+ # like elog.
+ settings_clone = portage.config(clone=self.settings)
+ settings_clone.pop("PORTAGE_BUILDDIR_LOCKED", None)
+ settings_clone.setcpv(cur_cpv, mydb=self.vartree.dbapi)
+ if self._preserve_libs and "preserve-libs" in \
+ settings_clone["PORTAGE_RESTRICT"].split():
+ self._preserve_libs = False
+ others_in_slot.append(dblink(self.cat, catsplit(cur_cpv)[1],
+ settings=settings_clone,
+ vartree=self.vartree, treetype="vartree",
+ scheduler=self._scheduler, pipe=self._pipe))
+ finally:
+ self.unlockdb()
+
+ # If any instance has RESTRICT=preserve-libs, then
+ # restrict it for all instances.
+ if not self._preserve_libs:
+ for dblnk in others_in_slot:
+ dblnk._preserve_libs = False
retval = self._security_check(others_in_slot)
if retval:
@@ -3579,8 +3688,9 @@ class dblink(object):
unicode_error = False
eagain_error = False
- myfilelist = []
- mylinklist = []
+ filelist = []
+ dirlist = []
+ linklist = []
paths_with_newlines = []
def onerror(e):
raise
@@ -3612,6 +3722,9 @@ class dblink(object):
unicode_errors.append(new_parent[ed_len:])
break
+ relative_path = parent[srcroot_len:]
+ dirlist.append(os.path.join("/", relative_path))
+
for fname in files:
try:
fname = _unicode_decode(fname,
@@ -3641,12 +3754,19 @@ class dblink(object):
file_mode = os.lstat(fpath).st_mode
if stat.S_ISREG(file_mode):
- myfilelist.append(relative_path)
+ filelist.append(relative_path)
elif stat.S_ISLNK(file_mode):
# Note: os.walk puts symlinks to directories in the "dirs"
# list and it does not traverse them since that could lead
# to an infinite recursion loop.
- mylinklist.append(relative_path)
+ linklist.append(relative_path)
+
+ myto = _unicode_decode(
+ _os.readlink(_unicode_encode(fpath,
+ encoding=_encodings['merge'], errors='strict')),
+ encoding=_encodings['merge'], errors='replace')
+ if line_ending_re.search(myto) is not None:
+ paths_with_newlines.append(relative_path)
if unicode_error:
break
@@ -3674,7 +3794,7 @@ class dblink(object):
# If there are no files to merge, and an installed package in the same
# slot has files, it probably means that something went wrong.
if self.settings.get("PORTAGE_PACKAGE_EMPTY_ABORT") == "1" and \
- not myfilelist and not mylinklist and others_in_slot:
+ not filelist and not linklist and others_in_slot:
installed_files = None
for other_dblink in others_in_slot:
installed_files = other_dblink.getcontents()
@@ -3699,7 +3819,7 @@ class dblink(object):
_("Manually run `emerge --unmerge =%s` if you "
"really want to remove the above files. Set "
"PORTAGE_PACKAGE_EMPTY_ABORT=\"0\" in "
- "/etc/make.conf if you do not want to "
+ "/etc/portage/make.conf if you do not want to "
"abort in cases like this.") % other_dblink.mycpv,
wrap_width))
eerror(msg)
@@ -3717,13 +3837,38 @@ class dblink(object):
for other in others_in_slot])
prepare_build_dirs(settings=self.settings, cleanup=cleanup)
+ # Check for read-only filesystems.
+ ro_checker = get_ro_checker()
+ rofilesystems = ro_checker(dirlist)
+
+ if rofilesystems:
+ msg = _("One or more files installed to this package are "
+ "set to be installed to read-only filesystems. "
+ "Please mount the following filesystems as read-write "
+ "and retry.")
+ msg = textwrap.wrap(msg, 70)
+ msg.append("")
+ for f in rofilesystems:
+ msg.append("\t%s" % os.path.join(destroot,
+ f.lstrip(os.path.sep)))
+ msg.append("")
+ self._elog("eerror", "preinst", msg)
+
+ msg = _("Package '%s' NOT merged due to read-only file systems.") % \
+ self.settings.mycpv
+ msg += _(" If necessary, refer to your elog "
+ "messages for the whole content of the above message.")
+ msg = textwrap.wrap(msg, 70)
+ eerror(msg)
+ return 1
+
# check for package collisions
blockers = self._blockers
if blockers is None:
blockers = []
collisions, symlink_collisions, plib_collisions = \
self._collision_protect(srcroot, destroot,
- others_in_slot + blockers, myfilelist, mylinklist)
+ others_in_slot + blockers, filelist, linklist)
if symlink_collisions:
# Symlink collisions need to be distinguished from other types
@@ -3765,7 +3910,9 @@ class dblink(object):
" enough information to determine if a real problem"
" exists. Please do NOT file a bug report at"
" http://bugs.gentoo.org unless you report exactly which"
- " two packages install the same file(s). Once again,"
+ " two packages install the same file(s). See"
+ " http://wiki.gentoo.org/wiki/Knowledge_Base:Blockers"
+ " for tips on how to solve the problem. And once again,"
" please do NOT file a bug report unless you have"
" completely understood the above message.")
@@ -3800,17 +3947,28 @@ class dblink(object):
# get_owners is slow for large numbers of files, so
# don't look them all up.
collisions = collisions[:20]
+
+ pkg_info_strs = {}
self.lockdb()
try:
owners = self.vartree.dbapi._owners.get_owners(collisions)
self.vartree.dbapi.flush_cache()
+
+ for pkg in owners:
+ pkg = self.vartree.dbapi._pkg_str(pkg.mycpv, None)
+ pkg_info_str = "%s%s%s" % (pkg,
+ _slot_separator, pkg.slot)
+ if pkg.repo != _unknown_repo:
+ pkg_info_str += "%s%s" % (_repo_separator,
+ pkg.repo)
+ pkg_info_strs[pkg] = pkg_info_str
+
finally:
self.unlockdb()
for pkg, owned_files in owners.items():
- cpv = pkg.mycpv
msg = []
- msg.append("%s" % cpv)
+ msg.append(pkg_info_strs[pkg.mycpv])
for f in sorted(owned_files):
msg.append("\t%s" % os.path.join(destroot,
f.lstrip(os.path.sep)))
@@ -3901,12 +4059,11 @@ class dblink(object):
# write local package counter for recording
if counter is None:
counter = self.vartree.dbapi.counter_tick(mycpv=self.mycpv)
- f = io.open(_unicode_encode(os.path.join(self.dbtmpdir, 'COUNTER'),
+ with io.open(_unicode_encode(os.path.join(self.dbtmpdir, 'COUNTER'),
encoding=_encodings['fs'], errors='strict'),
mode='w', encoding=_encodings['repo.content'],
- errors='backslashreplace')
- f.write(_unicode_decode(str(counter)))
- f.close()
+ errors='backslashreplace') as f:
+ f.write("%s" % counter)
self.updateprotect()
@@ -4031,6 +4188,7 @@ class dblink(object):
try:
self.delete()
_movefile(self.dbtmpdir, self.dbpkgdir, mysettings=self.settings)
+ self._merged_path(self.dbpkgdir, os.lstat(self.dbpkgdir))
finally:
self.unlockdb()
@@ -4075,9 +4233,9 @@ class dblink(object):
self.vartree.dbapi.lock()
try:
try:
- slot, counter = self.vartree.dbapi.aux_get(
- cpv, ["SLOT", "COUNTER"])
- except KeyError:
+ slot = self.vartree.dbapi._pkg_str(cpv, None).slot
+ counter = self.vartree.dbapi.cpv_counter(cpv)
+ except (KeyError, InvalidData):
pass
else:
has_vdb_entry = True
@@ -4146,6 +4304,7 @@ class dblink(object):
# For gcc upgrades, preserved libs have to be removed after the
# the library path has been updated.
self._prune_plib_registry()
+ self._post_merge_sync()
return os.EX_OK
@@ -4161,7 +4320,7 @@ class dblink(object):
x = -1
while True:
x += 1
- backup_p = p + '.backup.' + str(x).rjust(4, '0')
+ backup_p = '%s.backup.%04d' % (p, x)
try:
os.lstat(backup_p)
except OSError:
@@ -4262,8 +4421,9 @@ class dblink(object):
@type stufftomerge: String or List
@param cfgfiledict: { File:mtime } mapping for config_protected files
@type cfgfiledict: Dictionary
- @param thismtime: The current time (typically long(time.time())
- @type thismtime: Long
+ @param thismtime: None or new mtime for merged files (expressed in seconds
+ in Python <3.3 and nanoseconds in Python >=3.3)
+ @type thismtime: None or Int
@rtype: None or Boolean
@return:
1. True on failure
@@ -4288,18 +4448,18 @@ class dblink(object):
# this is supposed to merge a list of files. There will be 2 forms of argument passing.
if isinstance(stufftomerge, basestring):
#A directory is specified. Figure out protection paths, listdir() it and process it.
- mergelist = os.listdir(join(srcroot, stufftomerge))
- offset = stufftomerge
+ mergelist = [join(stufftomerge, child) for child in \
+ os.listdir(join(srcroot, stufftomerge))]
else:
- mergelist = stufftomerge
- offset = ""
+ mergelist = stufftomerge[:]
- for i, x in enumerate(mergelist):
+ while mergelist:
- mysrc = join(srcroot, offset, x)
- mydest = join(destroot, offset, x)
+ relative_path = mergelist.pop()
+ mysrc = join(srcroot, relative_path)
+ mydest = join(destroot, relative_path)
# myrealdest is mydest without the $ROOT prefix (makes a difference if ROOT!="/")
- myrealdest = join(sep, offset, x)
+ myrealdest = join(sep, relative_path)
# stat file once, test using S_* macros many times (faster that way)
mystat = os.lstat(mysrc)
mymode = mystat[stat.ST_MODE]
@@ -4394,9 +4554,26 @@ class dblink(object):
mymtime = movefile(mysrc, mydest, newmtime=thismtime,
sstat=mystat, mysettings=self.settings,
encoding=_encodings['merge'])
+
+ try:
+ self._merged_path(mydest, os.lstat(mydest))
+ except OSError:
+ pass
+
if mymtime != None:
+ # Use lexists, since if the target happens to be a broken
+ # symlink then that should trigger an independent warning.
+ if not (os.path.lexists(myrealto) or
+ os.path.lexists(join(srcroot, myabsto))):
+ self._eqawarn('preinst',
+ [_("QA Notice: Symbolic link /%s points to /%s which does not exist.")
+ % (relative_path, myabsto)])
+
showMessage(">>> %s -> %s\n" % (mydest, myto))
- outfile.write("sym "+myrealdest+" -> "+myto+" "+str(mymtime)+"\n")
+ if sys.hexversion >= 0x3030000:
+ outfile.write("sym "+myrealdest+" -> "+myto+" "+str(mymtime // 1000000000)+"\n")
+ else:
+ outfile.write("sym "+myrealdest+" -> "+myto+" "+str(mymtime)+"\n")
else:
showMessage(_("!!! Failed to move file.\n"),
level=logging.ERROR, noiselevel=-1)
@@ -4490,11 +4667,17 @@ class dblink(object):
os.chmod(mydest, mystat[0])
os.chown(mydest, mystat[4], mystat[5])
showMessage(">>> %s/\n" % mydest)
+
+ try:
+ self._merged_path(mydest, os.lstat(mydest))
+ except OSError:
+ pass
+
outfile.write("dir "+myrealdest+"\n")
# recurse and merge this directory
- if self.mergeme(srcroot, destroot, outfile, secondhand,
- join(offset, x), cfgfiledict, thismtime):
- return 1
+ mergelist.extend(join(relative_path, child) for child in
+ os.listdir(join(srcroot, relative_path)))
+
elif stat.S_ISREG(mymode):
# we are merging a regular file
mymd5 = perform_md5(mysrc, calc_prelink=calc_prelink)
@@ -4550,7 +4733,10 @@ class dblink(object):
cfgprot = cfgfiledict["IGNORE"]
if not moveme:
zing = "---"
- mymtime = mystat[stat.ST_MTIME]
+ if sys.hexversion >= 0x3030000:
+ mymtime = mystat.st_mtime_ns
+ else:
+ mymtime = mystat[stat.ST_MTIME]
else:
moveme = 1
cfgprot = 1
@@ -4586,8 +4772,16 @@ class dblink(object):
hardlink_candidates.append(mydest)
zing = ">>>"
+ try:
+ self._merged_path(mydest, os.lstat(mydest))
+ except OSError:
+ pass
+
if mymtime != None:
- outfile.write("obj "+myrealdest+" "+mymd5+" "+str(mymtime)+"\n")
+ if sys.hexversion >= 0x3030000:
+ outfile.write("obj "+myrealdest+" "+mymd5+" "+str(mymtime // 1000000000)+"\n")
+ else:
+ outfile.write("obj "+myrealdest+" "+mymd5+" "+str(mymtime)+"\n")
showMessage("%s %s\n" % (zing,mydest))
else:
# we are merging a fifo or device node
@@ -4598,6 +4792,12 @@ class dblink(object):
sstat=mystat, mysettings=self.settings,
encoding=_encodings['merge']) is not None:
zing = ">>>"
+
+ try:
+ self._merged_path(mydest, os.lstat(mydest))
+ except OSError:
+ pass
+
else:
return 1
if stat.S_ISFIFO(mymode):
@@ -4606,6 +4806,52 @@ class dblink(object):
outfile.write("dev %s\n" % myrealdest)
showMessage(zing + " " + mydest + "\n")
+ def _merged_path(self, path, lstatobj, exists=True):
+ previous_path = self._device_path_map.get(lstatobj.st_dev)
+ if previous_path is None or previous_path is False or \
+ (exists and len(path) < len(previous_path)):
+ if exists:
+ self._device_path_map[lstatobj.st_dev] = path
+ else:
+ # This entry is used to indicate that we've unmerged
+ # a file from this device, and later, this entry is
+ # replaced by a parent directory.
+ self._device_path_map[lstatobj.st_dev] = False
+
+ def _post_merge_sync(self):
+ """
+ Call this after merge or unmerge, in order to sync relevant files to
+ disk and avoid data-loss in the event of a power failure. This method
+ does nothing if FEATURES=merge-sync is disabled.
+ """
+ if not self._device_path_map or \
+ "merge-sync" not in self.settings.features:
+ return
+
+ returncode = None
+ if platform.system() == "Linux":
+
+ paths = []
+ for path in self._device_path_map.values():
+ if path is not False:
+ paths.append(path)
+ paths = tuple(paths)
+
+ proc = SyncfsProcess(paths=paths,
+ scheduler=(self._scheduler or
+ portage._internal_caller and global_event_loop() or
+ EventLoop(main=False)))
+ proc.start()
+ returncode = proc.wait()
+
+ if returncode is None or returncode != os.EX_OK:
+ try:
+ proc = subprocess.Popen(["sync"])
+ except EnvironmentError:
+ pass
+ else:
+ proc.wait()
+
def merge(self, mergeroot, inforoot, myroot=None, myebuild=None, cleanup=0,
mydbapi=None, prev_mtimes=None, counter=None):
"""
@@ -4618,7 +4864,8 @@ class dblink(object):
self.lockdb()
self.vartree.dbapi._bump_mtime(self.mycpv)
if self._scheduler is None:
- self._scheduler = PollScheduler().sched_iface
+ self._scheduler = SchedulerInterface(portage._internal_caller and
+ global_event_loop() or EventLoop(main=False))
try:
retval = self.treewalk(mergeroot, myroot, inforoot, myebuild,
cleanup=cleanup, mydbapi=mydbapi, prev_mtimes=prev_mtimes,
@@ -4669,11 +4916,12 @@ class dblink(object):
"returns contents of a file with whitespace converted to spaces"
if not os.path.exists(self.dbdir+"/"+name):
return ""
- mydata = io.open(
+ with io.open(
_unicode_encode(os.path.join(self.dbdir, name),
encoding=_encodings['fs'], errors='strict'),
mode='r', encoding=_encodings['repo.content'], errors='replace'
- ).read().split()
+ ) as f:
+ mydata = f.read().split()
return " ".join(mydata)
def copyfile(self,fname):
@@ -4682,10 +4930,11 @@ class dblink(object):
def getfile(self,fname):
if not os.path.exists(self.dbdir+"/"+fname):
return ""
- return io.open(_unicode_encode(os.path.join(self.dbdir, fname),
+ with io.open(_unicode_encode(os.path.join(self.dbdir, fname),
encoding=_encodings['fs'], errors='strict'),
mode='r', encoding=_encodings['repo.content'], errors='replace'
- ).read()
+ ) as f:
+ return f.read()
def setfile(self,fname,data):
kwargs = {}
@@ -4694,16 +4943,18 @@ class dblink(object):
else:
kwargs['mode'] = 'w'
kwargs['encoding'] = _encodings['repo.content']
- write_atomic(os.path.join(self.dbdir, fname), data, **kwargs)
+ write_atomic(os.path.join(self.dbdir, fname), data,
+ **portage._native_kwargs(kwargs))
def getelements(self,ename):
if not os.path.exists(self.dbdir+"/"+ename):
return []
- mylines = io.open(_unicode_encode(
+ with io.open(_unicode_encode(
os.path.join(self.dbdir, ename),
encoding=_encodings['fs'], errors='strict'),
mode='r', encoding=_encodings['repo.content'], errors='replace'
- ).readlines()
+ ) as f:
+ mylines = f.readlines()
myreturn = []
for x in mylines:
for y in x[:-1].split():
@@ -4711,14 +4962,13 @@ class dblink(object):
return myreturn
def setelements(self,mylist,ename):
- myelement = io.open(_unicode_encode(
+ with io.open(_unicode_encode(
os.path.join(self.dbdir, ename),
encoding=_encodings['fs'], errors='strict'),
mode='w', encoding=_encodings['repo.content'],
- errors='backslashreplace')
- for x in mylist:
- myelement.write(_unicode_decode(x+"\n"))
- myelement.close()
+ errors='backslashreplace') as f:
+ for x in mylist:
+ f.write("%s\n" % x)
def isregular(self):
"Is this a regular package (does it have a CATEGORY file? A dblink can be virtual *and* regular)"
@@ -4787,7 +5037,7 @@ class dblink(object):
def merge(mycat, mypkg, pkgloc, infloc,
myroot=None, settings=None, myebuild=None,
mytree=None, mydbapi=None, vartree=None, prev_mtimes=None, blockers=None,
- scheduler=None):
+ scheduler=None, fd_pipes=None):
"""
@param myroot: ignored, settings['EROOT'] is used instead
"""
@@ -4802,10 +5052,12 @@ def merge(mycat, mypkg, pkgloc, infloc,
merge_task = MergeProcess(
mycat=mycat, mypkg=mypkg, settings=settings,
treetype=mytree, vartree=vartree,
- scheduler=(scheduler or PollScheduler().sched_iface),
+ scheduler=(scheduler or portage._internal_caller and
+ global_event_loop() or EventLoop(main=False)),
background=background, blockers=blockers, pkgloc=pkgloc,
infloc=infloc, myebuild=myebuild, mydbapi=mydbapi,
- prev_mtimes=prev_mtimes, logfile=settings.get('PORTAGE_LOG_FILE'))
+ prev_mtimes=prev_mtimes, logfile=settings.get('PORTAGE_LOG_FILE'),
+ fd_pipes=fd_pipes)
merge_task.start()
retcode = merge_task.wait()
return retcode
@@ -4985,13 +5237,11 @@ def tar_contents(contents, root, tar, protect=None, onProgress=None):
tar.addfile(tarinfo, f)
f.close()
else:
- f = open(_unicode_encode(path,
+ with open(_unicode_encode(path,
encoding=encoding,
- errors='strict'), 'rb')
- try:
+ errors='strict'), 'rb') as f:
tar.addfile(tarinfo, f)
- finally:
- f.close()
+
else:
tar.addfile(tarinfo)
if onProgress:
diff --git a/pym/portage/dbapi/virtual.py b/pym/portage/dbapi/virtual.py
index 213708c93..ba9745c2a 100644
--- a/pym/portage/dbapi/virtual.py
+++ b/pym/portage/dbapi/virtual.py
@@ -1,6 +1,7 @@
-# Copyright 1998-2012 Gentoo Foundation
+# Copyright 1998-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
+from __future__ import unicode_literals
from portage.dbapi import dbapi
from portage.dbapi.dep_expand import dep_expand
@@ -89,8 +90,8 @@ class fakedbapi(dbapi):
if metadata is None:
mycpv = _pkg_str(mycpv)
else:
- mycpv = _pkg_str(mycpv, slot=metadata.get('SLOT'),
- repo=metadata.get('repository'), eapi=metadata.get('EAPI'))
+ mycpv = _pkg_str(mycpv, metadata=metadata,
+ settings=self.settings)
mycp = mycpv.cp
try:
diff --git a/pym/portage/debug.py b/pym/portage/debug.py
index ebf1a138a..d5a8cfbf6 100644
--- a/pym/portage/debug.py
+++ b/pym/portage/debug.py
@@ -1,4 +1,4 @@
-# Copyright 1999-2012 Gentoo Foundation
+# Copyright 1999-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
import os
@@ -38,7 +38,7 @@ class trace_handler(object):
self.max_repr_length = 200
def event_handler(self, *args):
- frame, event, arg = args
+ frame, event, _arg = args
if "line" == event:
if self.show_local_lines:
self.trace_line(*args)
@@ -56,7 +56,7 @@ class trace_handler(object):
self.arg_repr(frame, event, arg),
self.locals_repr(frame, event, arg)))
- def arg_repr(self, frame, event, arg):
+ def arg_repr(self, _frame, event, arg):
my_repr = None
if "return" == event:
my_repr = repr(arg)
@@ -71,7 +71,7 @@ class trace_handler(object):
return ""
- def trace_line(self, frame, event, arg):
+ def trace_line(self, frame, _event, _arg):
writemsg("%s line=%d\n" % (self.trim_filename(frame.f_code.co_filename), frame.f_lineno))
def ignore_filename(self, filename):
@@ -81,7 +81,7 @@ class trace_handler(object):
return True
return False
- def locals_repr(self, frame, event, arg):
+ def locals_repr(self, frame, _event, _arg):
"""Create a representation of the locals dict that is suitable for
tracing output."""
diff --git a/pym/portage/dep/__init__.py b/pym/portage/dep/__init__.py
index e547debd4..c457df045 100644
--- a/pym/portage/dep/__init__.py
+++ b/pym/portage/dep/__init__.py
@@ -1,7 +1,9 @@
# deps.py -- Portage dependency resolution functions
-# Copyright 2003-2012 Gentoo Foundation
+# Copyright 2003-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
+from __future__ import unicode_literals
+
__all__ = [
'Atom', 'best_match_to_list', 'cpvequal',
'dep_getcpv', 'dep_getkey', 'dep_getslot',
@@ -27,26 +29,21 @@ from portage.eapi import _get_eapi_attrs
from portage.exception import InvalidAtom, InvalidData, InvalidDependString
from portage.localization import _
from portage.versions import catpkgsplit, catsplit, \
- vercmp, ververify, _cp, _cpv, _pkg_str, _unknown_repo
+ vercmp, ververify, _cp, _cpv, _pkg_str, _slot, _unknown_repo, _vr
import portage.cache.mappings
if sys.hexversion >= 0x3000000:
+ # pylint: disable=W0622
basestring = str
_unicode = str
else:
_unicode = unicode
-# Api consumers included in portage should set this to True.
-# Once the relevant api changes are in a portage release with
-# stable keywords, make these warnings unconditional.
-_internal_warnings = False
-
# \w is [a-zA-Z0-9_]
# PMS 3.1.3: A slot name may contain any of the characters [A-Za-z0-9+_.-].
# It must not begin with a hyphen or a dot.
_slot_separator = ":"
-_slot = r'([\w+][\w+.-]*)'
# loosly match SLOT, which may have an optional ABI part
_slot_loose = r'([\w+./*=-]+)'
@@ -55,51 +52,34 @@ _op = r'([=~]|[><]=?)'
_repo_separator = "::"
_repo_name = r'[\w][\w-]*'
+_repo_name_re = re.compile('^' + _repo_name + '$', re.UNICODE)
_repo = r'(?:' + _repo_separator + '(' + _repo_name + ')' + ')?'
_extended_cat = r'[\w+*][\w+.*-]*'
-_slot_re_cache = {}
-
-def _get_slot_re(eapi_attrs):
- cache_key = eapi_attrs.slot_abi
- slot_re = _slot_re_cache.get(cache_key)
- if slot_re is not None:
- return slot_re
-
- if eapi_attrs.slot_abi:
- slot_re = _slot + r'(/' + _slot + r'=?)?'
- else:
- slot_re = _slot
-
- slot_re = re.compile('^' + slot_re + '$', re.VERBOSE)
-
- _slot_re_cache[cache_key] = slot_re
- return slot_re
-
_slot_dep_re_cache = {}
def _get_slot_dep_re(eapi_attrs):
- cache_key = eapi_attrs.slot_abi
+ cache_key = eapi_attrs.slot_operator
slot_re = _slot_dep_re_cache.get(cache_key)
if slot_re is not None:
return slot_re
- if eapi_attrs.slot_abi:
+ if eapi_attrs.slot_operator:
slot_re = _slot + r'?(\*|=|/' + _slot + r'=?)?'
else:
slot_re = _slot
- slot_re = re.compile('^' + slot_re + '$', re.VERBOSE)
+ slot_re = re.compile('^' + slot_re + '$', re.VERBOSE | re.UNICODE)
_slot_dep_re_cache[cache_key] = slot_re
return slot_re
def _match_slot(atom, pkg):
if pkg.slot == atom.slot:
- if not atom.slot_abi:
+ if not atom.sub_slot:
return True
- elif atom.slot_abi == pkg.slot_abi:
+ elif atom.sub_slot == pkg.sub_slot:
return True
return False
@@ -123,7 +103,7 @@ def _get_atom_re(eapi_attrs):
'(?P<star>=' + cpv_re + r'\*)|' +
'(?P<simple>' + cp_re + '))' +
'(' + _slot_separator + _slot_loose + ')?' +
- _repo + ')(' + _use + ')?$', re.VERBOSE)
+ _repo + ')(' + _use + ')?$', re.VERBOSE | re.UNICODE)
_atom_re_cache[cache_key] = atom_re
return atom_re
@@ -142,10 +122,10 @@ def _get_atom_wildcard_re(eapi_attrs):
pkg_re = r'[\w+*][\w+*-]*?'
atom_re = re.compile(r'((?P<simple>(' +
- _extended_cat + r')/(' + pkg_re + r'))' + \
- '|(?P<star>=((' + _extended_cat + r')/(' + pkg_re + r'))-(?P<version>\*\d+\*)))' + \
+ _extended_cat + r')/(' + pkg_re + r'(-' + _vr + ')?))' + \
+ '|(?P<star>=((' + _extended_cat + r')/(' + pkg_re + r'))-(?P<version>\*\w+\*)))' + \
'(:(?P<slot>' + _slot_loose + r'))?(' +
- _repo_separator + r'(?P<repo>' + _repo_name + r'))?$')
+ _repo_separator + r'(?P<repo>' + _repo_name + r'))?$', re.UNICODE)
_atom_wildcard_re_cache[cache_key] = atom_re
return atom_re
@@ -259,7 +239,7 @@ def strip_empty(myarr):
('portage.dep.strip_empty',), DeprecationWarning, stacklevel=2)
return [x for x in myarr if x]
-def paren_reduce(mystr):
+def paren_reduce(mystr, _deprecation_warn=True):
"""
Take a string and convert all paren enclosed entities into sublists and
split the list elements by spaces. All redundant brackets are removed.
@@ -273,7 +253,7 @@ def paren_reduce(mystr):
@rtype: Array
@return: The reduced string in an array
"""
- if _internal_warnings:
+ if portage._internal_caller and _deprecation_warn:
warnings.warn(_("%s is deprecated and will be removed without replacement.") % \
('portage.dep.paren_reduce',), DeprecationWarning, stacklevel=2)
mysplit = mystr.split()
@@ -365,7 +345,7 @@ class paren_normalize(list):
"""Take a dependency structure as returned by paren_reduce or use_reduce
and generate an equivalent structure that has no redundant lists."""
def __init__(self, src):
- if _internal_warnings:
+ if portage._internal_caller:
warnings.warn(_("%s is deprecated and will be removed without replacement.") % \
('portage.dep.paren_normalize',), DeprecationWarning, stacklevel=2)
list.__init__(self)
@@ -461,7 +441,7 @@ def use_reduce(depstr, uselist=[], masklist=[], matchall=False, excludeall=[], i
@return: The use reduced depend array
"""
if isinstance(depstr, list):
- if _internal_warnings:
+ if portage._internal_caller:
warnings.warn(_("Passing paren_reduced dep arrays to %s is deprecated. " + \
"Pass the original dep string instead.") % \
('portage.dep.use_reduce',), DeprecationWarning, stacklevel=2)
@@ -762,7 +742,7 @@ def dep_opconvert(deplist):
@return:
The new list with the new ordering
"""
- if _internal_warnings:
+ if portage._internal_caller:
warnings.warn(_("%s is deprecated. Use %s with the opconvert parameter set to True instead.") % \
('portage.dep.dep_opconvert', 'portage.dep.use_reduce'), DeprecationWarning, stacklevel=2)
@@ -793,7 +773,7 @@ def flatten(mylist):
@rtype: List
@return: A single list containing only non-list elements.
"""
- if _internal_warnings:
+ if portage._internal_caller:
warnings.warn(_("%s is deprecated and will be removed without replacement.") % \
('portage.dep.flatten',), DeprecationWarning, stacklevel=2)
@@ -1233,11 +1213,14 @@ class Atom(_unicode):
if allow_repo is None:
allow_repo = True
+ blocker_prefix = ""
if "!" == s[:1]:
blocker = self._blocker(forbid_overlap=("!" == s[1:2]))
if blocker.overlap.forbid:
+ blocker_prefix = s[:2]
s = s[2:]
else:
+ blocker_prefix = s[:1]
s = s[1:]
else:
blocker = False
@@ -1261,6 +1244,8 @@ class Atom(_unicode):
else:
op = None
cpv = cp = m.group('simple')
+ if m.group(atom_re.groupindex['simple'] + 3) is not None:
+ raise InvalidAtom(self)
if cpv.find("**") != -1:
raise InvalidAtom(self)
slot = m.group('slot')
@@ -1311,32 +1296,34 @@ class Atom(_unicode):
self.__dict__['repo'] = repo
if slot is None:
self.__dict__['slot'] = None
- self.__dict__['slot_abi'] = None
- self.__dict__['slot_abi_op'] = None
+ self.__dict__['sub_slot'] = None
+ self.__dict__['slot_operator'] = None
else:
slot_re = _get_slot_dep_re(eapi_attrs)
slot_match = slot_re.match(slot)
if slot_match is None:
raise InvalidAtom(self)
- if eapi_attrs.slot_abi:
+ if eapi_attrs.slot_operator:
self.__dict__['slot'] = slot_match.group(1)
- slot_abi = slot_match.group(2)
- if slot_abi is not None:
- slot_abi = slot_abi.lstrip("/")
- if slot_abi in ("*", "="):
- self.__dict__['slot_abi'] = None
- self.__dict__['slot_abi_op'] = slot_abi
+ sub_slot = slot_match.group(2)
+ if sub_slot is not None:
+ sub_slot = sub_slot.lstrip("/")
+ if sub_slot in ("*", "="):
+ self.__dict__['sub_slot'] = None
+ self.__dict__['slot_operator'] = sub_slot
else:
- slot_abi_op = None
- if slot_abi is not None and slot_abi[-1:] == "=":
- slot_abi_op = slot_abi[-1:]
- slot_abi = slot_abi[:-1]
- self.__dict__['slot_abi'] = slot_abi
- self.__dict__['slot_abi_op'] = slot_abi_op
+ slot_operator = None
+ if sub_slot is not None and sub_slot[-1:] == "=":
+ slot_operator = sub_slot[-1:]
+ sub_slot = sub_slot[:-1]
+ self.__dict__['sub_slot'] = sub_slot
+ self.__dict__['slot_operator'] = slot_operator
+ if self.slot is not None and self.slot_operator == "*":
+ raise InvalidAtom(self)
else:
self.__dict__['slot'] = slot
- self.__dict__['slot_abi'] = None
- self.__dict__['slot_abi_op'] = None
+ self.__dict__['sub_slot'] = None
+ self.__dict__['slot_operator'] = None
self.__dict__['operator'] = op
self.__dict__['extended_syntax'] = extended_syntax
@@ -1348,15 +1335,18 @@ class Atom(_unicode):
use = _use
else:
use = _use_dep(use_str[1:-1].split(","), eapi_attrs)
- without_use = Atom(m.group('without_use'), allow_repo=allow_repo)
+ without_use = Atom(blocker_prefix + m.group('without_use'),
+ allow_repo=allow_repo)
else:
use = None
if unevaluated_atom is not None and \
unevaluated_atom.use is not None:
# unevaluated_atom.use is used for IUSE checks when matching
# packages, so it must not propagate to without_use
- without_use = Atom(s, allow_wildcard=allow_wildcard,
- allow_repo=allow_repo)
+ without_use = Atom(_unicode(self),
+ allow_wildcard=allow_wildcard,
+ allow_repo=allow_repo,
+ eapi=eapi)
else:
without_use = self
@@ -1410,13 +1400,13 @@ class Atom(_unicode):
% (eapi, self), category='EAPI.incompatible')
@property
- def slot_abi_built(self):
+ def slot_operator_built(self):
"""
- Returns True if slot_abi_op == "=" and slot_abi is not None.
+ Returns True if slot_operator == "=" and sub_slot is not None.
NOTE: foo/bar:2= is unbuilt and returns False, whereas foo/bar:2/2=
is built and returns True.
"""
- return self.slot_abi_op == "=" and self.slot_abi is not None
+ return self.slot_operator == "=" and self.sub_slot is not None
@property
def without_repo(self):
@@ -1427,7 +1417,7 @@ class Atom(_unicode):
@property
def without_slot(self):
- if self.slot is None and self.slot_abi_op is None:
+ if self.slot is None and self.slot_operator is None:
return self
atom = remove_slot(self)
if self.repo is not None:
@@ -1439,14 +1429,14 @@ class Atom(_unicode):
def with_repo(self, repo):
atom = remove_slot(self)
- if self.slot is not None or self.slot_abi_op is not None:
+ if self.slot is not None or self.slot_operator is not None:
atom += _slot_separator
if self.slot is not None:
atom += self.slot
- if self.slot_abi is not None:
- atom += "/%s" % self.slot_abi
- if self.slot_abi_op is not None:
- atom += self.slot_abi_op
+ if self.sub_slot is not None:
+ atom += "/%s" % self.sub_slot
+ if self.slot_operator is not None:
+ atom += self.slot_operator
atom += _repo_separator + repo
if self.use is not None:
atom += _unicode(self.use)
@@ -1506,14 +1496,14 @@ class Atom(_unicode):
if not (self.use and self.use.conditional):
return self
atom = remove_slot(self)
- if self.slot is not None or self.slot_abi_op is not None:
+ if self.slot is not None or self.slot_operator is not None:
atom += _slot_separator
if self.slot is not None:
atom += self.slot
- if self.slot_abi is not None:
- atom += "/%s" % self.slot_abi
- if self.slot_abi_op is not None:
- atom += self.slot_abi_op
+ if self.sub_slot is not None:
+ atom += "/%s" % self.sub_slot
+ if self.slot_operator is not None:
+ atom += self.slot_operator
use_dep = self.use.evaluate_conditionals(use)
atom += _unicode(use_dep)
return Atom(atom, unevaluated_atom=self, allow_repo=(self.repo is not None), _use=use_dep)
@@ -1534,14 +1524,14 @@ class Atom(_unicode):
if not self.use:
return self
atom = remove_slot(self)
- if self.slot is not None or self.slot_abi_op is not None:
+ if self.slot is not None or self.slot_operator is not None:
atom += _slot_separator
if self.slot is not None:
atom += self.slot
- if self.slot_abi is not None:
- atom += "/%s" % self.slot_abi
- if self.slot_abi_op is not None:
- atom += self.slot_abi_op
+ if self.sub_slot is not None:
+ atom += "/%s" % self.sub_slot
+ if self.slot_operator is not None:
+ atom += self.slot_operator
use_dep = self.use.violated_conditionals(other_use, is_valid_flag, parent_use)
atom += _unicode(use_dep)
return Atom(atom, unevaluated_atom=self, allow_repo=(self.repo is not None), _use=use_dep)
@@ -1550,14 +1540,14 @@ class Atom(_unicode):
if not (self.use and self.use.conditional):
return self
atom = remove_slot(self)
- if self.slot is not None or self.slot_abi_op is not None:
+ if self.slot is not None or self.slot_operator is not None:
atom += _slot_separator
if self.slot is not None:
atom += self.slot
- if self.slot_abi is not None:
- atom += "/%s" % self.slot_abi
- if self.slot_abi_op is not None:
- atom += self.slot_abi_op
+ if self.sub_slot is not None:
+ atom += "/%s" % self.sub_slot
+ if self.slot_operator is not None:
+ atom += self.slot_operator
use_dep = self.use._eval_qa_conditionals(use_mask, use_force)
atom += _unicode(use_dep)
return Atom(atom, unevaluated_atom=self, allow_repo=(self.repo is not None), _use=use_dep)
@@ -1583,7 +1573,7 @@ def extended_cp_match(extended_cp, other_cp):
extended_cp_re = _extended_cp_re_cache.get(extended_cp)
if extended_cp_re is None:
extended_cp_re = re.compile("^" + re.escape(extended_cp).replace(
- r'\*', '[^/]*') + "$")
+ r'\*', '[^/]*') + "$", re.UNICODE)
_extended_cp_re_cache[extended_cp] = extended_cp_re
return extended_cp_re.match(other_cp) is not None
@@ -2132,7 +2122,7 @@ def match_from_list(mydep, candidate_list):
candidate_list = mylist
mylist = []
- # Currently, only \*\d+\* is supported.
+ # Currently, only \*\w+\* is supported.
ver = mydep.version[1:-1]
for x in candidate_list:
@@ -2172,19 +2162,40 @@ def match_from_list(mydep, candidate_list):
# XXX: Nasty special casing for leading zeros
# Required as =* is a literal prefix match, so can't
# use vercmp
- mysplit = catpkgsplit(mycpv)
- myver = mysplit[2].lstrip("0")
+ myver = mycpv_cps[2].lstrip("0")
if not myver or not myver[0].isdigit():
myver = "0"+myver
- mycpv_cmp = mysplit[0]+"/"+mysplit[1]+"-"+myver
+ if myver == mycpv_cps[2]:
+ mycpv_cmp = mycpv
+ else:
+ # Use replace to preserve the revision part if it exists
+ # (mycpv_cps[3] can't be trusted because in contains r0
+ # even when the input has no revision part).
+ mycpv_cmp = mycpv.replace(
+ mydep.cp + "-" + mycpv_cps[2],
+ mydep.cp + "-" + myver, 1)
for x in candidate_list:
- xs = getattr(x, "cpv_split", None)
- if xs is None:
- xs = catpkgsplit(remove_slot(x))
+ try:
+ x.cp
+ except AttributeError:
+ try:
+ pkg = _pkg_str(remove_slot(x))
+ except InvalidData:
+ continue
+ else:
+ pkg = x
+
+ xs = pkg.cpv_split
myver = xs[2].lstrip("0")
if not myver or not myver[0].isdigit():
myver = "0"+myver
- xcpv = xs[0]+"/"+xs[1]+"-"+myver
+ if myver == xs[2]:
+ xcpv = pkg.cpv
+ else:
+ # Use replace to preserve the revision part if it exists.
+ xcpv = pkg.cpv.replace(
+ pkg.cp + "-" + xs[2],
+ pkg.cp + "-" + myver, 1)
if xcpv.startswith(mycpv_cmp):
mylist.append(x)
@@ -2277,9 +2288,11 @@ def match_from_list(mydep, candidate_list):
continue
if mydep.use:
-
- missing_enabled = mydep.use.missing_enabled.difference(x.iuse.all)
- missing_disabled = mydep.use.missing_disabled.difference(x.iuse.all)
+ is_valid_flag = x.iuse.is_valid_flag
+ missing_enabled = frozenset(flag for flag in
+ mydep.use.missing_enabled if not is_valid_flag(flag))
+ missing_disabled = frozenset(flag for flag in
+ mydep.use.missing_disabled if not is_valid_flag(flag))
if mydep.use.enabled:
if any(f in mydep.use.enabled for f in missing_disabled):
@@ -2314,9 +2327,9 @@ def match_from_list(mydep, candidate_list):
return mylist
def human_readable_required_use(required_use):
- return required_use.replace("^^", "exactly-one-of").replace("||", "any-of")
+ return required_use.replace("^^", "exactly-one-of").replace("||", "any-of").replace("??", "at-most-one-of")
-def get_required_use_flags(required_use):
+def get_required_use_flags(required_use, eapi=None):
"""
Returns a set of use flags that are used in the given REQUIRED_USE string
@@ -2326,6 +2339,12 @@ def get_required_use_flags(required_use):
@return: Set of use flags that are used in the given REQUIRED_USE string
"""
+ eapi_attrs = _get_eapi_attrs(eapi)
+ if eapi_attrs.required_use_at_most_one_of:
+ valid_operators = ("||", "^^", "??")
+ else:
+ valid_operators = ("||", "^^")
+
mysplit = required_use.split()
level = 0
stack = [[]]
@@ -2354,7 +2373,7 @@ def get_required_use_flags(required_use):
l = stack.pop()
ignore = False
if stack[level]:
- if stack[level][-1] in ("||", "^^") or \
+ if stack[level][-1] in valid_operators or \
(not isinstance(stack[level][-1], bool) and \
stack[level][-1][-1] == "?"):
ignore = True
@@ -2366,15 +2385,14 @@ def get_required_use_flags(required_use):
else:
raise InvalidDependString(
_("malformed syntax: '%s'") % required_use)
- elif token in ("||", "^^"):
+ elif token in valid_operators:
if need_bracket:
raise InvalidDependString(
_("malformed syntax: '%s'") % required_use)
need_bracket = True
stack[level].append(token)
else:
- if need_bracket or "(" in token or ")" in token or \
- "|" in token or "^" in token:
+ if need_bracket:
raise InvalidDependString(
_("malformed syntax: '%s'") % required_use)
@@ -2429,7 +2447,7 @@ class _RequiredUseBranch(object):
complex_nesting = False
node = self
while node != None and not complex_nesting:
- if node._operator in ("||", "^^"):
+ if node._operator in ("||", "^^", "??"):
complex_nesting = True
else:
node = node._parent
@@ -2450,7 +2468,7 @@ class _RequiredUseBranch(object):
if sys.hexversion < 0x3000000:
__nonzero__ = __bool__
-def check_required_use(required_use, use, iuse_match):
+def check_required_use(required_use, use, iuse_match, eapi=None):
"""
Checks if the use flags listed in 'use' satisfy all
constraints specified in 'constraints'.
@@ -2466,6 +2484,12 @@ def check_required_use(required_use, use, iuse_match):
@return: Indicates if REQUIRED_USE constraints are satisfied
"""
+ eapi_attrs = _get_eapi_attrs(eapi)
+ if eapi_attrs.required_use_at_most_one_of:
+ valid_operators = ("||", "^^", "??")
+ else:
+ valid_operators = ("||", "^^")
+
def is_active(token):
if token.startswith("!"):
flag = token[1:]
@@ -2475,6 +2499,11 @@ def check_required_use(required_use, use, iuse_match):
is_negated = False
if not flag or not iuse_match(flag):
+ if not eapi_attrs.required_use_at_most_one_of and flag == "?":
+ msg = _("Operator '??' is not supported with EAPI '%s'") \
+ % (eapi,)
+ e = InvalidData(msg, category='EAPI.incompatible')
+ raise InvalidDependString(msg, errors=(e,))
msg = _("USE flag '%s' is not in IUSE") \
% (flag,)
e = InvalidData(msg, category='IUSE.missing')
@@ -2492,6 +2521,8 @@ def check_required_use(required_use, use, iuse_match):
return (True in argument)
elif operator == "^^":
return (argument.count(True) == 1)
+ elif operator == "??":
+ return (argument.count(True) <= 1)
elif operator[-1] == "?":
return (False not in argument)
@@ -2521,7 +2552,7 @@ def check_required_use(required_use, use, iuse_match):
l = stack.pop()
op = None
if stack[level]:
- if stack[level][-1] in ("||", "^^"):
+ if stack[level][-1] in valid_operators:
op = stack[level].pop()
satisfied = is_satisfied(op, l)
stack[level].append(satisfied)
@@ -2550,7 +2581,7 @@ def check_required_use(required_use, use, iuse_match):
stack[level].append(satisfied)
if len(node._children) <= 1 or \
- node._parent._operator not in ("||", "^^"):
+ node._parent._operator not in valid_operators:
last_node = node._parent._children.pop()
if last_node is not node:
raise AssertionError(
@@ -2566,7 +2597,7 @@ def check_required_use(required_use, use, iuse_match):
raise AssertionError(
"node is not last child of parent")
- elif len(node._children) == 1 and op in ("||", "^^"):
+ elif len(node._children) == 1 and op in valid_operators:
last_node = node._parent._children.pop()
if last_node is not node:
raise AssertionError(
@@ -2576,7 +2607,7 @@ def check_required_use(required_use, use, iuse_match):
node._children[0]._parent = node._parent
node = node._children[0]
if node._operator is None and \
- node._parent._operator not in ("||", "^^"):
+ node._parent._operator not in valid_operators:
last_node = node._parent._children.pop()
if last_node is not node:
raise AssertionError(
@@ -2590,7 +2621,7 @@ def check_required_use(required_use, use, iuse_match):
else:
raise InvalidDependString(
_("malformed syntax: '%s'") % required_use)
- elif token in ("||", "^^"):
+ elif token in valid_operators:
if need_bracket:
raise InvalidDependString(
_("malformed syntax: '%s'") % required_use)
@@ -2600,8 +2631,7 @@ def check_required_use(required_use, use, iuse_match):
node._children.append(child)
node = child
else:
- if need_bracket or "(" in token or ")" in token or \
- "|" in token or "^" in token:
+ if need_bracket:
raise InvalidDependString(
_("malformed syntax: '%s'") % required_use)
@@ -2629,16 +2659,16 @@ def extract_affecting_use(mystr, atom, eapi=None):
that decide if the given atom is in effect.
Example usage:
- >>> extract_use_cond('sasl? ( dev-libs/cyrus-sasl ) \
+ >>> extract_affecting_use('sasl? ( dev-libs/cyrus-sasl ) \
!minimal? ( cxx? ( dev-libs/cyrus-sasl ) )', 'dev-libs/cyrus-sasl')
- (['sasl', 'minimal', 'cxx'])
+ {'cxx', 'minimal', 'sasl'}
- @param dep: The dependency string
+ @param mystr: The dependency string
@type mystr: String
@param atom: The atom to get into effect
@type atom: String
- @rtype: Tuple of two lists of strings
- @return: List of use flags that need to be enabled, List of use flag that need to be disabled
+ @rtype: Set of strings
+ @return: Set of use flags affecting given atom
"""
useflag_re = _get_useflag_re(eapi)
mysplit = mystr.split()
@@ -2744,3 +2774,48 @@ def extract_affecting_use(mystr, atom, eapi=None):
_("malformed syntax: '%s'") % mystr)
return affecting_use
+
+def extract_unpack_dependencies(src_uri, unpackers):
+ """
+ Return unpack dependencies string for given SRC_URI string.
+
+ @param src_uri: SRC_URI string
+ @type src_uri: String
+ @param unpackers: Dictionary mapping archive suffixes to dependency strings
+ @type unpackers: Dictionary
+ @rtype: String
+ @return: Dependency string specifying packages required to unpack archives.
+ """
+ src_uri = src_uri.split()
+
+ depend = []
+ for i in range(len(src_uri)):
+ if src_uri[i][-1] == "?" or src_uri[i] in ("(", ")"):
+ depend.append(src_uri[i])
+ elif (i+1 < len(src_uri) and src_uri[i+1] == "->") or src_uri[i] == "->":
+ continue
+ else:
+ for suffix in sorted(unpackers, key=lambda x: len(x), reverse=True):
+ suffix = suffix.lower()
+ if src_uri[i].lower().endswith(suffix):
+ depend.append(unpackers[suffix])
+ break
+
+ while True:
+ cleaned_depend = depend[:]
+ for i in range(len(cleaned_depend)):
+ if cleaned_depend[i] is None:
+ continue
+ elif cleaned_depend[i] == "(" and cleaned_depend[i+1] == ")":
+ cleaned_depend[i] = None
+ cleaned_depend[i+1] = None
+ elif cleaned_depend[i][-1] == "?" and cleaned_depend[i+1] == "(" and cleaned_depend[i+2] == ")":
+ cleaned_depend[i] = None
+ cleaned_depend[i+1] = None
+ cleaned_depend[i+2] = None
+ if depend == cleaned_depend:
+ break
+ else:
+ depend = [x for x in cleaned_depend if x is not None]
+
+ return " ".join(depend)
diff --git a/pym/portage/dep/_slot_abi.py b/pym/portage/dep/_slot_operator.py
index 7c36e52dc..7b6444403 100644
--- a/pym/portage/dep/_slot_abi.py
+++ b/pym/portage/dep/_slot_operator.py
@@ -1,59 +1,64 @@
-# Copyright 2012 Gentoo Foundation
+# Copyright 2012-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
+from __future__ import unicode_literals
+
from portage.dep import Atom, paren_enclose, use_reduce
+from portage.eapi import _get_eapi_attrs
from portage.exception import InvalidData
+from _emerge.Package import Package
-_dep_keys = ('DEPEND', 'PDEPEND', 'RDEPEND')
-_runtime_keys = ('PDEPEND', 'RDEPEND')
-
-def find_built_slot_abi_atoms(pkg):
+def find_built_slot_operator_atoms(pkg):
atoms = {}
- for k in _dep_keys:
- atom_list = list(_find_built_slot_abi_op(use_reduce(pkg.metadata[k],
- uselist=pkg.use.enabled, eapi=pkg.metadata['EAPI'],
+ for k in Package._dep_keys:
+ atom_list = list(_find_built_slot_operator(use_reduce(pkg._metadata[k],
+ uselist=pkg.use.enabled, eapi=pkg.eapi,
token_class=Atom)))
if atom_list:
atoms[k] = atom_list
return atoms
-def _find_built_slot_abi_op(dep_struct):
+def _find_built_slot_operator(dep_struct):
for x in dep_struct:
if isinstance(x, list):
- for atom in _find_built_slot_abi_op(x):
+ for atom in _find_built_slot_operator(x):
yield atom
- elif isinstance(x, Atom) and x.slot_abi_built:
+ elif isinstance(x, Atom) and x.slot_operator_built:
yield x
-def ignore_built_slot_abi_deps(dep_struct):
+def ignore_built_slot_operator_deps(dep_struct):
for i, x in enumerate(dep_struct):
if isinstance(x, list):
- ignore_built_slot_abi_deps(x)
- elif isinstance(x, Atom) and x.slot_abi_built:
+ ignore_built_slot_operator_deps(x)
+ elif isinstance(x, Atom) and x.slot_operator_built:
# There's no way of knowing here whether the SLOT
- # part of the SLOT/ABI pair should be kept, so we
+ # part of the slot/sub-slot pair should be kept, so we
# ignore both parts.
dep_struct[i] = x.without_slot
-def evaluate_slot_abi_equal_deps(settings, use, trees):
+def evaluate_slot_operator_equal_deps(settings, use, trees):
metadata = settings.configdict['pkg']
eapi = metadata['EAPI']
+ eapi_attrs = _get_eapi_attrs(eapi)
running_vardb = trees[trees._running_eroot]["vartree"].dbapi
target_vardb = trees[trees._target_eroot]["vartree"].dbapi
vardbs = [target_vardb]
deps = {}
- for k in _dep_keys:
+ for k in Package._dep_keys:
deps[k] = use_reduce(metadata[k],
uselist=use, eapi=eapi, token_class=Atom)
- for k in _runtime_keys:
+ for k in Package._runtime_keys:
_eval_deps(deps[k], vardbs)
- if running_vardb is not target_vardb:
- vardbs.append(running_vardb)
-
- _eval_deps(deps["DEPEND"], vardbs)
+ if eapi_attrs.hdepend:
+ _eval_deps(deps["HDEPEND"], [running_vardb])
+ _eval_deps(deps["DEPEND"], [target_vardb])
+ else:
+ if running_vardb is not target_vardb:
+ vardbs.append(running_vardb)
+ _eval_deps(deps["DEPEND"], vardbs)
result = {}
for k, v in deps.items():
@@ -65,7 +70,7 @@ def _eval_deps(dep_struct, vardbs):
for i, x in enumerate(dep_struct):
if isinstance(x, list):
_eval_deps(x, vardbs)
- elif isinstance(x, Atom) and x.slot_abi_op == "=":
+ elif isinstance(x, Atom) and x.slot_operator == "=":
for vardb in vardbs:
best_version = vardb.match(x)
if best_version:
@@ -77,7 +82,7 @@ def _eval_deps(dep_struct, vardbs):
pass
else:
slot_part = "%s/%s=" % \
- (best_version.slot, best_version.slot_abi)
+ (best_version.slot, best_version.sub_slot)
x = x.with_slot(slot_part)
dep_struct[i] = x
break
diff --git a/pym/portage/dep/dep_check.py b/pym/portage/dep/dep_check.py
index d575ab3bc..b5ace3d39 100644
--- a/pym/portage/dep/dep_check.py
+++ b/pym/portage/dep/dep_check.py
@@ -1,16 +1,19 @@
-# Copyright 2010-2012 Gentoo Foundation
+# Copyright 2010-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
+from __future__ import unicode_literals
+
__all__ = ['dep_check', 'dep_eval', 'dep_wordreduce', 'dep_zapdeps']
import logging
+import operator
import portage
-from portage import _unicode_decode
from portage.dep import Atom, match_from_list, use_reduce
from portage.exception import InvalidDependString, ParseError
from portage.localization import _
from portage.util import writemsg, writemsg_level
+from portage.util.SlotObject import SlotObject
from portage.versions import vercmp, _pkg_str
def _expand_new_virtuals(mysplit, edebug, mydbapi, mysettings, myroot="/",
@@ -160,7 +163,7 @@ def _expand_new_virtuals(mysplit, edebug, mydbapi, mysettings, myroot="/",
# According to GLEP 37, RDEPEND is the only dependency
# type that is valid for new-style virtuals. Repoman
# should enforce this.
- depstring = pkg.metadata['RDEPEND']
+ depstring = pkg._metadata['RDEPEND']
pkg_kwargs = kwargs.copy()
pkg_kwargs["myuse"] = pkg_use_enabled(pkg)
if edebug:
@@ -183,7 +186,7 @@ def _expand_new_virtuals(mysplit, edebug, mydbapi, mysettings, myroot="/",
del mytrees["virt_parent"]
if not mycheck[0]:
- raise ParseError(_unicode_decode("%s: %s '%s'") % \
+ raise ParseError("%s: %s '%s'" % \
(pkg, mycheck[1], depstring))
# pull in the new-style virtual
@@ -254,6 +257,10 @@ def dep_eval(deplist):
return 0
return 1
+class _dep_choice(SlotObject):
+ __slots__ = ('atoms', 'slot_map', 'cp_map', 'all_available',
+ 'all_installed_slots')
+
def dep_zapdeps(unreduced, reduced, myroot, use_binaries=0, trees=None):
"""
Takes an unreduced and reduced deplist and removes satisfied dependencies.
@@ -316,6 +323,7 @@ def dep_zapdeps(unreduced, reduced, myroot, use_binaries=0, trees=None):
priority = trees[myroot].get("priority")
graph_db = trees[myroot].get("graph_db")
graph = trees[myroot].get("graph")
+ want_update_pkg = trees[myroot].get("want_update_pkg")
vardb = None
if "vartree" in trees[myroot]:
vardb = trees[myroot]["vartree"].dbapi
@@ -324,6 +332,13 @@ def dep_zapdeps(unreduced, reduced, myroot, use_binaries=0, trees=None):
else:
mydbapi = trees[myroot]["porttree"].dbapi
+ try:
+ mydbapi_match_pkgs = mydbapi.match_pkgs
+ except AttributeError:
+ def mydbapi_match_pkgs(atom):
+ return [mydbapi._pkg_str(cpv, atom.repo)
+ for cpv in mydbapi.match(atom)]
+
# Sort the deps into installed, not installed but already
# in the graph and other, not installed and not in the graph
# and other, with values of [[required_atom], availablility]
@@ -347,24 +362,17 @@ def dep_zapdeps(unreduced, reduced, myroot, use_binaries=0, trees=None):
continue
# Ignore USE dependencies here since we don't want USE
# settings to adversely affect || preference evaluation.
- avail_pkg = mydbapi.match(atom.without_use)
+ avail_pkg = mydbapi_match_pkgs(atom.without_use)
if avail_pkg:
avail_pkg = avail_pkg[-1] # highest (ascending order)
- try:
- slot = avail_pkg.slot
- except AttributeError:
- eapi, slot, repo = mydbapi.aux_get(avail_pkg,
- ["EAPI", "SLOT", "repository"])
- avail_pkg = _pkg_str(avail_pkg, eapi=eapi,
- slot=slot, repo=repo)
- avail_slot = Atom("%s:%s" % (atom.cp, slot))
+ avail_slot = Atom("%s:%s" % (atom.cp, avail_pkg.slot))
if not avail_pkg:
all_available = False
all_use_satisfied = False
break
if atom.use:
- avail_pkg_use = mydbapi.match(atom)
+ avail_pkg_use = mydbapi_match_pkgs(atom)
if not avail_pkg_use:
all_use_satisfied = False
else:
@@ -372,13 +380,7 @@ def dep_zapdeps(unreduced, reduced, myroot, use_binaries=0, trees=None):
avail_pkg_use = avail_pkg_use[-1]
if avail_pkg_use != avail_pkg:
avail_pkg = avail_pkg_use
- try:
- slot = avail_pkg.slot
- except AttributeError:
- eapi, slot, repo = mydbapi.aux_get(avail_pkg,
- ["EAPI", "SLOT", "repository"])
- avail_pkg = _pkg_str(avail_pkg,
- eapi=eapi, slot=slot, repo=repo)
+ avail_slot = Atom("%s:%s" % (atom.cp, avail_pkg.slot))
slot_map[avail_slot] = avail_pkg
highest_cpv = cp_map.get(avail_pkg.cp)
@@ -386,7 +388,9 @@ def dep_zapdeps(unreduced, reduced, myroot, use_binaries=0, trees=None):
vercmp(avail_pkg.version, highest_cpv.version) > 0:
cp_map[avail_pkg.cp] = avail_pkg
- this_choice = (atoms, slot_map, cp_map, all_available)
+ this_choice = _dep_choice(atoms=atoms, slot_map=slot_map,
+ cp_map=cp_map, all_available=all_available,
+ all_installed_slots=False)
if all_available:
# The "all installed" criterion is not version or slot specific.
# If any version of a package is already in the graph then we
@@ -407,6 +411,7 @@ def dep_zapdeps(unreduced, reduced, myroot, use_binaries=0, trees=None):
not slot_atom.startswith("virtual/"):
all_installed_slots = False
break
+ this_choice.all_installed_slots = all_installed_slots
if graph_db is None:
if all_use_satisfied:
if all_installed:
@@ -468,8 +473,27 @@ def dep_zapdeps(unreduced, reduced, myroot, use_binaries=0, trees=None):
elif all_installed:
if all_installed_slots:
preferred_installed.append(this_choice)
- else:
+ elif parent is None or want_update_pkg is None:
preferred_any_slot.append(this_choice)
+ else:
+ # When appropriate, prefer a slot that is not
+ # installed yet for bug #478188.
+ want_update = True
+ for slot_atom, avail_pkg in slot_map.items():
+ if avail_pkg in graph:
+ continue
+ # New-style virtuals have zero cost to install.
+ if slot_atom.startswith("virtual/") or \
+ vardb.match(slot_atom):
+ continue
+ if not want_update_pkg(parent, avail_pkg):
+ want_update = False
+ break
+
+ if want_update:
+ preferred_installed.append(this_choice)
+ else:
+ preferred_any_slot.append(this_choice)
else:
preferred_non_installed.append(this_choice)
else:
@@ -490,6 +514,7 @@ def dep_zapdeps(unreduced, reduced, myroot, use_binaries=0, trees=None):
all_installed = False
if all_installed:
+ this_choice.all_installed_slots = True
other_installed.append(this_choice)
elif some_installed:
other_installed_some.append(this_choice)
@@ -506,22 +531,23 @@ def dep_zapdeps(unreduced, reduced, myroot, use_binaries=0, trees=None):
for choices in choice_bins:
if len(choices) < 2:
continue
+ # Prefer choices with all_installed_slots for bug #480736.
+ choices.sort(key=operator.attrgetter('all_installed_slots'),
+ reverse=True)
for choice_1 in choices[1:]:
- atoms_1, slot_map_1, cp_map_1, all_available_1 = choice_1
- cps = set(cp_map_1)
+ cps = set(choice_1.cp_map)
for choice_2 in choices:
if choice_1 is choice_2:
# choice_1 will not be promoted, so move on
break
- atoms_2, slot_map_2, cp_map_2, all_available_2 = choice_2
- intersecting_cps = cps.intersection(cp_map_2)
+ intersecting_cps = cps.intersection(choice_2.cp_map)
if not intersecting_cps:
continue
has_upgrade = False
has_downgrade = False
for cp in intersecting_cps:
- version_1 = cp_map_1[cp]
- version_2 = cp_map_2[cp]
+ version_1 = choice_1.cp_map[cp]
+ version_2 = choice_2.cp_map[cp]
difference = vercmp(version_1.version, version_2.version)
if difference != 0:
if difference > 0:
@@ -538,9 +564,9 @@ def dep_zapdeps(unreduced, reduced, myroot, use_binaries=0, trees=None):
for allow_masked in (False, True):
for choices in choice_bins:
- for atoms, slot_map, cp_map, all_available in choices:
- if all_available or allow_masked:
- return atoms
+ for choice in choices:
+ if choice.all_available or allow_masked:
+ return choice.atoms
assert(False) # This point should not be reachable
@@ -575,18 +601,15 @@ def dep_check(depstring, mydbapi, mysettings, use="yes", mode=None, myuse=None,
mymasks = set()
useforce = set()
- useforce.add(mysettings["ARCH"])
if use == "all":
- # This masking/forcing is only for repoman. In other cases, relevant
- # masking/forcing should have already been applied via
- # config.regenerate(). Also, binary or installed packages may have
- # been built with flags that are now masked, and it would be
- # inconsistent to mask them now. Additionally, myuse may consist of
- # flags from a parent package that is being merged to a $ROOT that is
- # different from the one that mysettings represents.
+ # This is only for repoman, in order to constrain the use_reduce
+ # matchall behavior to account for profile use.mask/force. The
+ # ARCH/archlist code here may be redundant, since the profile
+ # really should be handling ARCH masking/forcing itself.
mymasks.update(mysettings.usemask)
mymasks.update(mysettings.archlist())
mymasks.discard(mysettings["ARCH"])
+ useforce.add(mysettings["ARCH"])
useforce.update(mysettings.useforce)
useforce.difference_update(mymasks)
@@ -609,7 +632,7 @@ def dep_check(depstring, mydbapi, mysettings, use="yes", mode=None, myuse=None,
# dependencies so that things like --depclean work as well as possible
# in spite of partial invalidity.
if not current_parent.installed:
- eapi = current_parent.metadata['EAPI']
+ eapi = current_parent.eapi
if isinstance(depstring, list):
mysplit = depstring
@@ -619,7 +642,7 @@ def dep_check(depstring, mydbapi, mysettings, use="yes", mode=None, myuse=None,
masklist=mymasks, matchall=(use=="all"), excludeall=useforce,
opconvert=True, token_class=Atom, eapi=eapi)
except InvalidDependString as e:
- return [0, _unicode_decode("%s") % (e,)]
+ return [0, "%s" % (e,)]
if mysplit == []:
#dependencies were reduced to nothing
@@ -633,10 +656,10 @@ def dep_check(depstring, mydbapi, mysettings, use="yes", mode=None, myuse=None,
use_force=useforce, use_mask=mymasks, use_cache=use_cache,
use_binaries=use_binaries, myroot=myroot, trees=trees)
except ParseError as e:
- return [0, _unicode_decode("%s") % (e,)]
+ return [0, "%s" % (e,)]
- mysplit2=mysplit[:]
- mysplit2=dep_wordreduce(mysplit2,mysettings,mydbapi,mode,use_cache=use_cache)
+ mysplit2 = dep_wordreduce(mysplit,
+ mysettings, mydbapi, mode, use_cache=use_cache)
if mysplit2 is None:
return [0, _("Invalid token")]
diff --git a/pym/portage/dispatch_conf.py b/pym/portage/dispatch_conf.py
index 4c68dfc7b..f975ccd59 100644
--- a/pym/portage/dispatch_conf.py
+++ b/pym/portage/dispatch_conf.py
@@ -1,5 +1,5 @@
# archive_conf.py -- functionality common to archive-conf and dispatch-conf
-# Copyright 2003-2012 Gentoo Foundation
+# Copyright 2003-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
@@ -24,175 +24,187 @@ RCS_MERGE = "rcsmerge -p -r" + RCS_BRANCH + " '%s' > '%s'"
DIFF3_MERGE = "diff3 -mE '%s' '%s' '%s' > '%s'"
def diffstatusoutput(cmd, file1, file2):
- """
- Execute the string cmd in a shell with getstatusoutput() and return a
- 2-tuple (status, output).
- """
- # Use Popen to emulate getstatusoutput(), since getstatusoutput() may
- # raise a UnicodeDecodeError which makes the output inaccessible.
- args = shlex_split(cmd % (file1, file2))
- if sys.hexversion < 0x3000000 or sys.hexversion >= 0x3020000:
- # Python 3.1 does not support bytes in Popen args.
- args = [portage._unicode_encode(x, errors='strict') for x in args]
- proc = subprocess.Popen(args,
- stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
- output = portage._unicode_decode(proc.communicate()[0])
- if output and output[-1] == "\n":
- # getstatusoutput strips one newline
- output = output[:-1]
- return (proc.wait(), output)
+ """
+ Execute the string cmd in a shell with getstatusoutput() and return a
+ 2-tuple (status, output).
+ """
+ # Use Popen to emulate getstatusoutput(), since getstatusoutput() may
+ # raise a UnicodeDecodeError which makes the output inaccessible.
+ args = shlex_split(cmd % (file1, file2))
+
+ if sys.hexversion < 0x3020000 and sys.hexversion >= 0x3000000 and \
+ not os.path.isabs(args[0]):
+ # Python 3.1 _execvp throws TypeError for non-absolute executable
+ # path passed as bytes (see http://bugs.python.org/issue8513).
+ fullname = portage.process.find_binary(args[0])
+ if fullname is None:
+ raise portage.exception.CommandNotFound(args[0])
+ args[0] = fullname
+
+ args = [portage._unicode_encode(x, errors='strict') for x in args]
+ proc = subprocess.Popen(args,
+ stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+ output = portage._unicode_decode(proc.communicate()[0])
+ if output and output[-1] == "\n":
+ # getstatusoutput strips one newline
+ output = output[:-1]
+ return (proc.wait(), output)
def read_config(mandatory_opts):
- eprefix = portage.const.EPREFIX
- config_path = os.path.join(eprefix or os.sep, "etc/dispatch-conf.conf")
- loader = KeyValuePairFileLoader(config_path, None)
- opts, errors = loader.load()
- if not opts:
- print(_('dispatch-conf: Error reading /etc/dispatch-conf.conf; fatal'), file=sys.stderr)
- sys.exit(1)
+ eprefix = portage.settings["EPREFIX"]
+ if portage._not_installed:
+ config_path = os.path.join(portage.PORTAGE_BASE_PATH, "cnf", "dispatch-conf.conf")
+ else:
+ config_path = os.path.join(eprefix or os.sep, "etc/dispatch-conf.conf")
+ loader = KeyValuePairFileLoader(config_path, None)
+ opts, _errors = loader.load()
+ if not opts:
+ print(_('dispatch-conf: Error reading /etc/dispatch-conf.conf; fatal'), file=sys.stderr)
+ sys.exit(1)
# Handle quote removal here, since KeyValuePairFileLoader doesn't do that.
- quotes = "\"'"
- for k, v in opts.items():
- if v[:1] in quotes and v[:1] == v[-1:]:
- opts[k] = v[1:-1]
-
- for key in mandatory_opts:
- if key not in opts:
- if key == "merge":
- opts["merge"] = "sdiff --suppress-common-lines --output='%s' '%s' '%s'"
- else:
- print(_('dispatch-conf: Missing option "%s" in /etc/dispatch-conf.conf; fatal') % (key,), file=sys.stderr)
-
- # archive-dir supports ${EPREFIX} expansion, in order to avoid hardcoding
- variables = {"EPREFIX": eprefix}
- opts['archive-dir'] = varexpand(opts['archive-dir'], mydict=variables)
-
- if not os.path.exists(opts['archive-dir']):
- os.mkdir(opts['archive-dir'])
- # Use restrictive permissions by default, in order to protect
- # against vulnerabilities (like bug #315603 involving rcs).
- os.chmod(opts['archive-dir'], 0o700)
- elif not os.path.isdir(opts['archive-dir']):
- print(_('dispatch-conf: Config archive dir [%s] must exist; fatal') % (opts['archive-dir'],), file=sys.stderr)
- sys.exit(1)
-
- return opts
+ quotes = "\"'"
+ for k, v in opts.items():
+ if v[:1] in quotes and v[:1] == v[-1:]:
+ opts[k] = v[1:-1]
+
+ for key in mandatory_opts:
+ if key not in opts:
+ if key == "merge":
+ opts["merge"] = "sdiff --suppress-common-lines --output='%s' '%s' '%s'"
+ else:
+ print(_('dispatch-conf: Missing option "%s" in /etc/dispatch-conf.conf; fatal') % (key,), file=sys.stderr)
+
+ # archive-dir supports ${EPREFIX} expansion, in order to avoid hardcoding
+ variables = {"EPREFIX": eprefix}
+ opts['archive-dir'] = varexpand(opts['archive-dir'], mydict=variables)
+
+ if not os.path.exists(opts['archive-dir']):
+ os.mkdir(opts['archive-dir'])
+ # Use restrictive permissions by default, in order to protect
+ # against vulnerabilities (like bug #315603 involving rcs).
+ os.chmod(opts['archive-dir'], 0o700)
+ elif not os.path.isdir(opts['archive-dir']):
+ print(_('dispatch-conf: Config archive dir [%s] must exist; fatal') % (opts['archive-dir'],), file=sys.stderr)
+ sys.exit(1)
+
+ return opts
def rcs_archive(archive, curconf, newconf, mrgconf):
- """Archive existing config in rcs (on trunk). Then, if mrgconf is
- specified and an old branch version exists, merge the user's changes
- and the distributed changes and put the result into mrgconf. Lastly,
- if newconf was specified, leave it in the archive dir with a .dist.new
- suffix along with the last 1.1.1 branch version with a .dist suffix."""
-
- try:
- os.makedirs(os.path.dirname(archive))
- except OSError:
- pass
-
- if os.path.isfile(curconf):
- try:
- shutil.copy2(curconf, archive)
- except(IOError, os.error) as why:
- print(_('dispatch-conf: Error copying %(curconf)s to %(archive)s: %(reason)s; fatal') % \
- {"curconf": curconf, "archive": archive, "reason": str(why)}, file=sys.stderr)
-
- if os.path.exists(archive + ',v'):
- os.system(RCS_LOCK + ' ' + archive)
- os.system(RCS_PUT + ' ' + archive)
-
- ret = 0
- if newconf != '':
- os.system(RCS_GET + ' -r' + RCS_BRANCH + ' ' + archive)
- has_branch = os.path.exists(archive)
- if has_branch:
- os.rename(archive, archive + '.dist')
-
- try:
- shutil.copy2(newconf, archive)
- except(IOError, os.error) as why:
- print(_('dispatch-conf: Error copying %(newconf)s to %(archive)s: %(reason)s; fatal') % \
- {"newconf": newconf, "archive": archive, "reason": str(why)}, file=sys.stderr)
-
- if has_branch:
- if mrgconf != '':
- # This puts the results of the merge into mrgconf.
- ret = os.system(RCS_MERGE % (archive, mrgconf))
- mystat = os.lstat(newconf)
- os.chmod(mrgconf, mystat.st_mode)
- os.chown(mrgconf, mystat.st_uid, mystat.st_gid)
- os.rename(archive, archive + '.dist.new')
- return ret
+ """Archive existing config in rcs (on trunk). Then, if mrgconf is
+ specified and an old branch version exists, merge the user's changes
+ and the distributed changes and put the result into mrgconf. Lastly,
+ if newconf was specified, leave it in the archive dir with a .dist.new
+ suffix along with the last 1.1.1 branch version with a .dist suffix."""
+
+ try:
+ os.makedirs(os.path.dirname(archive))
+ except OSError:
+ pass
+
+ if os.path.isfile(curconf):
+ try:
+ shutil.copy2(curconf, archive)
+ except(IOError, os.error) as why:
+ print(_('dispatch-conf: Error copying %(curconf)s to %(archive)s: %(reason)s; fatal') % \
+ {"curconf": curconf, "archive": archive, "reason": str(why)}, file=sys.stderr)
+
+ if os.path.exists(archive + ',v'):
+ os.system(RCS_LOCK + ' ' + archive)
+ os.system(RCS_PUT + ' ' + archive)
+
+ ret = 0
+ if newconf != '':
+ os.system(RCS_GET + ' -r' + RCS_BRANCH + ' ' + archive)
+ has_branch = os.path.exists(archive)
+ if has_branch:
+ os.rename(archive, archive + '.dist')
+
+ try:
+ shutil.copy2(newconf, archive)
+ except(IOError, os.error) as why:
+ print(_('dispatch-conf: Error copying %(newconf)s to %(archive)s: %(reason)s; fatal') % \
+ {"newconf": newconf, "archive": archive, "reason": str(why)}, file=sys.stderr)
+
+ if has_branch:
+ if mrgconf != '':
+ # This puts the results of the merge into mrgconf.
+ ret = os.system(RCS_MERGE % (archive, mrgconf))
+ mystat = os.lstat(newconf)
+ os.chmod(mrgconf, mystat.st_mode)
+ os.chown(mrgconf, mystat.st_uid, mystat.st_gid)
+ os.rename(archive, archive + '.dist.new')
+
+ return ret
def file_archive(archive, curconf, newconf, mrgconf):
- """Archive existing config to the archive-dir, bumping old versions
- out of the way into .# versions (log-rotate style). Then, if mrgconf
- was specified and there is a .dist version, merge the user's changes
- and the distributed changes and put the result into mrgconf. Lastly,
- if newconf was specified, archive it as a .dist.new version (which
- gets moved to the .dist version at the end of the processing)."""
-
- try:
- os.makedirs(os.path.dirname(archive))
- except OSError:
- pass
-
- # Archive the current config file if it isn't already saved
- if os.path.exists(archive) \
- and len(diffstatusoutput("diff -aq '%s' '%s'", curconf, archive)[1]) != 0:
- suf = 1
- while suf < 9 and os.path.exists(archive + '.' + str(suf)):
- suf += 1
-
- while suf > 1:
- os.rename(archive + '.' + str(suf-1), archive + '.' + str(suf))
- suf -= 1
-
- os.rename(archive, archive + '.1')
-
- if os.path.isfile(curconf):
- try:
- shutil.copy2(curconf, archive)
- except(IOError, os.error) as why:
- print(_('dispatch-conf: Error copying %(curconf)s to %(archive)s: %(reason)s; fatal') % \
- {"curconf": curconf, "archive": archive, "reason": str(why)}, file=sys.stderr)
-
- if newconf != '':
- # Save off new config file in the archive dir with .dist.new suffix
- try:
- shutil.copy2(newconf, archive + '.dist.new')
- except(IOError, os.error) as why:
- print(_('dispatch-conf: Error copying %(newconf)s to %(archive)s: %(reason)s; fatal') % \
- {"newconf": newconf, "archive": archive + '.dist.new', "reason": str(why)}, file=sys.stderr)
-
- ret = 0
- if mrgconf != '' and os.path.exists(archive + '.dist'):
- # This puts the results of the merge into mrgconf.
- ret = os.system(DIFF3_MERGE % (curconf, archive + '.dist', newconf, mrgconf))
- mystat = os.lstat(newconf)
- os.chmod(mrgconf, mystat.st_mode)
- os.chown(mrgconf, mystat.st_uid, mystat.st_gid)
-
- return ret
+ """Archive existing config to the archive-dir, bumping old versions
+ out of the way into .# versions (log-rotate style). Then, if mrgconf
+ was specified and there is a .dist version, merge the user's changes
+ and the distributed changes and put the result into mrgconf. Lastly,
+ if newconf was specified, archive it as a .dist.new version (which
+ gets moved to the .dist version at the end of the processing)."""
+
+ try:
+ os.makedirs(os.path.dirname(archive))
+ except OSError:
+ pass
+
+ # Archive the current config file if it isn't already saved
+ if (os.path.exists(archive) and
+ len(diffstatusoutput("diff -aq '%s' '%s'", curconf, archive)[1]) != 0):
+ suf = 1
+ while suf < 9 and os.path.exists(archive + '.' + str(suf)):
+ suf += 1
+
+ while suf > 1:
+ os.rename(archive + '.' + str(suf-1), archive + '.' + str(suf))
+ suf -= 1
+
+ os.rename(archive, archive + '.1')
+
+ if os.path.isfile(curconf):
+ try:
+ shutil.copy2(curconf, archive)
+ except(IOError, os.error) as why:
+ print(_('dispatch-conf: Error copying %(curconf)s to %(archive)s: %(reason)s; fatal') % \
+ {"curconf": curconf, "archive": archive, "reason": str(why)}, file=sys.stderr)
+
+ if newconf != '':
+ # Save off new config file in the archive dir with .dist.new suffix
+ try:
+ shutil.copy2(newconf, archive + '.dist.new')
+ except(IOError, os.error) as why:
+ print(_('dispatch-conf: Error copying %(newconf)s to %(archive)s: %(reason)s; fatal') % \
+ {"newconf": newconf, "archive": archive + '.dist.new', "reason": str(why)}, file=sys.stderr)
+
+ ret = 0
+ if mrgconf != '' and os.path.exists(archive + '.dist'):
+ # This puts the results of the merge into mrgconf.
+ ret = os.system(DIFF3_MERGE % (curconf, archive + '.dist', newconf, mrgconf))
+ mystat = os.lstat(newconf)
+ os.chmod(mrgconf, mystat.st_mode)
+ os.chown(mrgconf, mystat.st_uid, mystat.st_gid)
+
+ return ret
def rcs_archive_post_process(archive):
- """Check in the archive file with the .dist.new suffix on the branch
- and remove the one with the .dist suffix."""
- os.rename(archive + '.dist.new', archive)
- if os.path.exists(archive + '.dist'):
- # Commit the last-distributed version onto the branch.
- os.system(RCS_LOCK + RCS_BRANCH + ' ' + archive)
- os.system(RCS_PUT + ' -r' + RCS_BRANCH + ' ' + archive)
- os.unlink(archive + '.dist')
- else:
- # Forcefully commit the last-distributed version onto the branch.
- os.system(RCS_PUT + ' -f -r' + RCS_BRANCH + ' ' + archive)
+ """Check in the archive file with the .dist.new suffix on the branch
+ and remove the one with the .dist suffix."""
+ os.rename(archive + '.dist.new', archive)
+ if os.path.exists(archive + '.dist'):
+ # Commit the last-distributed version onto the branch.
+ os.system(RCS_LOCK + RCS_BRANCH + ' ' + archive)
+ os.system(RCS_PUT + ' -r' + RCS_BRANCH + ' ' + archive)
+ os.unlink(archive + '.dist')
+ else:
+ # Forcefully commit the last-distributed version onto the branch.
+ os.system(RCS_PUT + ' -f -r' + RCS_BRANCH + ' ' + archive)
def file_archive_post_process(archive):
- """Rename the archive file with the .dist.new suffix to a .dist suffix"""
- os.rename(archive + '.dist.new', archive + '.dist')
+ """Rename the archive file with the .dist.new suffix to a .dist suffix"""
+ os.rename(archive + '.dist.new', archive + '.dist')
diff --git a/pym/portage/eapi.py b/pym/portage/eapi.py
index 8b03f830e..4f77910c5 100644
--- a/pym/portage/eapi.py
+++ b/pym/portage/eapi.py
@@ -3,14 +3,19 @@
import collections
+from portage import eapi_is_supported
+
def eapi_has_iuse_defaults(eapi):
return eapi != "0"
+def eapi_has_iuse_effective(eapi):
+ return eapi not in ("0", "1", "2", "3", "4", "4-python", "4-slot-abi")
+
def eapi_has_slot_deps(eapi):
return eapi != "0"
-def eapi_has_slot_abi(eapi):
- return eapi in ("4-slot-abi",)
+def eapi_has_slot_operator(eapi):
+ return eapi not in ("0", "1", "2", "3", "4", "4-python")
def eapi_has_src_uri_arrows(eapi):
return eapi not in ("0", "1")
@@ -39,8 +44,11 @@ def eapi_exports_merge_type(eapi):
def eapi_exports_replace_vars(eapi):
return eapi not in ("0", "1", "2", "3")
+def eapi_exports_EBUILD_PHASE_FUNC(eapi):
+ return eapi not in ("0", "1", "2", "3", "4", "4-python", "4-slot-abi")
+
def eapi_exports_REPOSITORY(eapi):
- return eapi in ("4-python",)
+ return eapi in ("4-python", "5-progress")
def eapi_has_pkg_pretend(eapi):
return eapi not in ("0", "1", "2", "3")
@@ -54,21 +62,44 @@ def eapi_has_dosed_dohard(eapi):
def eapi_has_required_use(eapi):
return eapi not in ("0", "1", "2", "3")
+def eapi_has_required_use_at_most_one_of(eapi):
+ return eapi not in ("0", "1", "2", "3", "4", "4-python", "4-slot-abi")
+
def eapi_has_use_dep_defaults(eapi):
return eapi not in ("0", "1", "2", "3")
def eapi_has_repo_deps(eapi):
- return eapi in ("4-python",)
+ return eapi in ("4-python", "5-progress")
def eapi_allows_dots_in_PN(eapi):
- return eapi in ("4-python",)
+ return eapi in ("4-python", "5-progress")
def eapi_allows_dots_in_use_flags(eapi):
- return eapi in ("4-python",)
+ return eapi in ("4-python", "5-progress")
+
+def eapi_supports_stable_use_forcing_and_masking(eapi):
+ return eapi not in ("0", "1", "2", "3", "4", "4-python", "4-slot-abi")
+
+def eapi_allows_directories_on_profile_level_and_repository_level(eapi):
+ return eapi in ("4-python", "5-progress")
+
+def eapi_has_use_aliases(eapi):
+ return eapi in ("4-python", "5-progress")
+
+def eapi_has_automatic_unpack_dependencies(eapi):
+ return eapi in ("5-progress",)
+
+def eapi_has_hdepend(eapi):
+ return eapi in ("5-hdepend",)
+
+def eapi_has_targetroot(eapi):
+ return eapi in ("5-hdepend",)
_eapi_attrs = collections.namedtuple('_eapi_attrs',
- 'dots_in_PN dots_in_use_flags iuse_defaults '
- 'repo_deps required_use slot_abi slot_deps '
+ 'dots_in_PN dots_in_use_flags exports_EBUILD_PHASE_FUNC '
+ 'feature_flag_test feature_flag_targetroot '
+ 'hdepend iuse_defaults iuse_effective '
+ 'repo_deps required_use required_use_at_most_one_of slot_operator slot_deps '
'src_uri_arrows strong_blocks use_deps use_dep_defaults')
_eapi_attrs_cache = {}
@@ -77,24 +108,37 @@ def _get_eapi_attrs(eapi):
"""
When eapi is None then validation is not as strict, since we want the
same to work for multiple EAPIs that may have slightly different rules.
+ An unsupported eapi is handled the same as when eapi is None, which may
+ be helpful for handling of corrupt EAPI metadata in essential functions
+ such as pkgsplit.
"""
eapi_attrs = _eapi_attrs_cache.get(eapi)
if eapi_attrs is not None:
return eapi_attrs
+ orig_eapi = eapi
+ if eapi is not None and not eapi_is_supported(eapi):
+ eapi = None
+
eapi_attrs = _eapi_attrs(
dots_in_PN = (eapi is None or eapi_allows_dots_in_PN(eapi)),
dots_in_use_flags = (eapi is None or eapi_allows_dots_in_use_flags(eapi)),
+ exports_EBUILD_PHASE_FUNC = (eapi is None or eapi_exports_EBUILD_PHASE_FUNC(eapi)),
+ feature_flag_test = True,
+ feature_flag_targetroot = (eapi is not None and eapi_has_targetroot(eapi)),
+ hdepend = (eapi is not None and eapi_has_hdepend(eapi)),
iuse_defaults = (eapi is None or eapi_has_iuse_defaults(eapi)),
+ iuse_effective = (eapi is not None and eapi_has_iuse_effective(eapi)),
repo_deps = (eapi is None or eapi_has_repo_deps(eapi)),
required_use = (eapi is None or eapi_has_required_use(eapi)),
+ required_use_at_most_one_of = (eapi is None or eapi_has_required_use_at_most_one_of(eapi)),
slot_deps = (eapi is None or eapi_has_slot_deps(eapi)),
- slot_abi = (eapi is None or eapi_has_slot_abi(eapi)),
+ slot_operator = (eapi is None or eapi_has_slot_operator(eapi)),
src_uri_arrows = (eapi is None or eapi_has_src_uri_arrows(eapi)),
strong_blocks = (eapi is None or eapi_has_strong_blocks(eapi)),
use_deps = (eapi is None or eapi_has_use_deps(eapi)),
use_dep_defaults = (eapi is None or eapi_has_use_dep_defaults(eapi))
)
- _eapi_attrs_cache[eapi] = eapi_attrs
+ _eapi_attrs_cache[orig_eapi] = eapi_attrs
return eapi_attrs
diff --git a/pym/portage/eclass_cache.py b/pym/portage/eclass_cache.py
index cb2cf8a98..2988d25d6 100644
--- a/pym/portage/eclass_cache.py
+++ b/pym/portage/eclass_cache.py
@@ -1,19 +1,24 @@
-# Copyright 2005-2011 Gentoo Foundation
+# Copyright 2005-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
# Author(s): Nicholas Carpaski (carpaski@gentoo.org), Brian Harring (ferringb@gentoo.org)
+from __future__ import unicode_literals
+
__all__ = ["cache"]
import stat
import sys
import operator
+import warnings
from portage.util import normalize_path
import errno
from portage.exception import FileNotFound, PermissionDenied
from portage import os
from portage import checksum
+from portage import _shell_quote
if sys.hexversion >= 0x3000000:
+ # pylint: disable=W0622
long = int
@@ -56,17 +61,20 @@ class cache(object):
"""
Maintains the cache information about eclasses used in ebuild.
"""
- def __init__(self, porttree_root, overlays=[]):
+ def __init__(self, porttree_root, overlays=None):
+ if overlays is not None:
+ warnings.warn("overlays parameter of portage.eclass_cache.cache constructor is deprecated and no longer used",
+ DeprecationWarning, stacklevel=2)
self.eclasses = {} # {"Name": hashed_path}
self._eclass_locations = {}
+ self._eclass_locations_str = None
# screw with the porttree ordering, w/out having bash inherit match it, and I'll hurt you.
# ~harring
if porttree_root:
self.porttree_root = porttree_root
- self.porttrees = [self.porttree_root] + overlays
- self.porttrees = tuple(map(normalize_path, self.porttrees))
+ self.porttrees = (normalize_path(self.porttree_root),)
self._master_eclass_root = os.path.join(self.porttrees[0], "eclass")
self.update_eclasses()
else:
@@ -98,6 +106,7 @@ class cache(object):
self.porttrees = self.porttrees + other.porttrees
self.eclasses.update(other.eclasses)
self._eclass_locations.update(other._eclass_locations)
+ self._eclass_locations_str = None
def update_eclasses(self):
self.eclasses = {}
@@ -124,7 +133,7 @@ class cache(object):
mtime = obj.mtime
except FileNotFound:
continue
- ys=y[:-eclass_len]
+ ys = y[:-eclass_len]
if x == self._master_eclass_root:
master_eclasses[ys] = mtime
self.eclasses[ys] = obj
@@ -169,3 +178,10 @@ class cache(object):
ec_dict[x] = self.eclasses[x]
return ec_dict
+
+ @property
+ def eclass_locations_string(self):
+ if self._eclass_locations_str is None:
+ self._eclass_locations_str = " ".join(_shell_quote(x)
+ for x in reversed(self.porttrees))
+ return self._eclass_locations_str
diff --git a/pym/portage/elog/__init__.py b/pym/portage/elog/__init__.py
index 33dac178d..cc086123f 100644
--- a/pym/portage/elog/__init__.py
+++ b/pym/portage/elog/__init__.py
@@ -1,9 +1,10 @@
# elog/__init__.py - elog core functions
-# Copyright 2006-2011 Gentoo Foundation
+# Copyright 2006-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
import sys
if sys.hexversion >= 0x3000000:
+ # pylint: disable=W0622
basestring = str
import portage
diff --git a/pym/portage/elog/mod_echo.py b/pym/portage/elog/mod_echo.py
index 59117beb3..f9cc53788 100644
--- a/pym/portage/elog/mod_echo.py
+++ b/pym/portage/elog/mod_echo.py
@@ -1,5 +1,5 @@
# elog/mod_echo.py - elog dispatch module
-# Copyright 2007 Gentoo Foundation
+# Copyright 2007-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
from __future__ import print_function
@@ -10,6 +10,7 @@ from portage.const import EBUILD_PHASES
from portage.localization import _
if sys.hexversion >= 0x3000000:
+ # pylint: disable=W0622
basestring = str
_items = []
diff --git a/pym/portage/elog/mod_save.py b/pym/portage/elog/mod_save.py
index c69f4a3cf..7b1cd46a8 100644
--- a/pym/portage/elog/mod_save.py
+++ b/pym/portage/elog/mod_save.py
@@ -1,7 +1,8 @@
# elog/mod_save.py - elog dispatch module
-# Copyright 2006-2011 Gentoo Foundation
+# Copyright 2006-2012 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
+import errno
import io
import time
import portage
@@ -47,11 +48,22 @@ def process(mysettings, key, logentries, fulltext):
elogfilename = os.path.join(log_subdir, cat + ':' + elogfilename)
_ensure_log_subdirs(logdir, log_subdir)
- elogfile = io.open(_unicode_encode(elogfilename,
- encoding=_encodings['fs'], errors='strict'),
- mode='w', encoding=_encodings['content'], errors='backslashreplace')
- elogfile.write(_unicode_decode(fulltext))
- elogfile.close()
+ try:
+ with io.open(_unicode_encode(elogfilename,
+ encoding=_encodings['fs'], errors='strict'), mode='w',
+ encoding=_encodings['content'],
+ errors='backslashreplace') as elogfile:
+ elogfile.write(_unicode_decode(fulltext))
+ except IOError as e:
+ func_call = "open('%s', 'w')" % elogfilename
+ if e.errno == errno.EACCES:
+ raise portage.exception.PermissionDenied(func_call)
+ elif e.errno == errno.EPERM:
+ raise portage.exception.OperationNotPermitted(func_call)
+ elif e.errno == errno.EROFS:
+ raise portage.exception.ReadOnlyFileSystem(func_call)
+ else:
+ raise
# Copy group permission bits from parent directory.
elogdir_st = os.stat(log_subdir)
diff --git a/pym/portage/elog/mod_save_summary.py b/pym/portage/elog/mod_save_summary.py
index 347f66e6e..786f89454 100644
--- a/pym/portage/elog/mod_save_summary.py
+++ b/pym/portage/elog/mod_save_summary.py
@@ -1,8 +1,12 @@
# elog/mod_save_summary.py - elog dispatch module
-# Copyright 2006-2011 Gentoo Foundation
+# Copyright 2006-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
+from __future__ import unicode_literals
+
+import errno
import io
+import sys
import time
import portage
from portage import os
@@ -37,9 +41,21 @@ def process(mysettings, key, logentries, fulltext):
# TODO: Locking
elogfilename = elogdir+"/summary.log"
- elogfile = io.open(_unicode_encode(elogfilename,
- encoding=_encodings['fs'], errors='strict'),
- mode='a', encoding=_encodings['content'], errors='backslashreplace')
+ try:
+ elogfile = io.open(_unicode_encode(elogfilename,
+ encoding=_encodings['fs'], errors='strict'),
+ mode='a', encoding=_encodings['content'],
+ errors='backslashreplace')
+ except IOError as e:
+ func_call = "open('%s', 'a')" % elogfilename
+ if e.errno == errno.EACCES:
+ raise portage.exception.PermissionDenied(func_call)
+ elif e.errno == errno.EPERM:
+ raise portage.exception.OperationNotPermitted(func_call)
+ elif e.errno == errno.EROFS:
+ raise portage.exception.ReadOnlyFileSystem(func_call)
+ else:
+ raise
# Copy group permission bits from parent directory.
elogdir_st = os.stat(elogdir)
@@ -58,17 +74,19 @@ def process(mysettings, key, logentries, fulltext):
apply_permissions(elogfilename, uid=logfile_uid, gid=elogdir_gid,
mode=elogdir_grp_mode, mask=0)
- time_str = time.strftime("%Y-%m-%d %H:%M:%S %Z",
- time.localtime(time.time()))
- # Avoid potential UnicodeDecodeError later.
+ time_fmt = "%Y-%m-%d %H:%M:%S %Z"
+ if sys.hexversion < 0x3000000:
+ time_fmt = _unicode_encode(time_fmt)
+ time_str = time.strftime(time_fmt, time.localtime(time.time()))
+ # Avoid potential UnicodeDecodeError in Python 2, since strftime
+ # returns bytes in Python 2, and %Z may contain non-ascii chars.
time_str = _unicode_decode(time_str,
encoding=_encodings['content'], errors='replace')
- elogfile.write(_unicode_decode(
- _(">>> Messages generated by process " +
+ elogfile.write(_(">>> Messages generated by process "
"%(pid)d on %(time)s for package %(pkg)s:\n\n") %
- {"pid": os.getpid(), "time": time_str, "pkg": key}))
+ {"pid": os.getpid(), "time": time_str, "pkg": key})
elogfile.write(_unicode_decode(fulltext))
- elogfile.write(_unicode_decode("\n"))
+ elogfile.write("\n")
elogfile.close()
return elogfilename
diff --git a/pym/portage/elog/mod_syslog.py b/pym/portage/elog/mod_syslog.py
index c8bf44172..8b26ffa1e 100644
--- a/pym/portage/elog/mod_syslog.py
+++ b/pym/portage/elog/mod_syslog.py
@@ -1,5 +1,5 @@
# elog/mod_syslog.py - elog dispatch module
-# Copyright 2006-2011 Gentoo Foundation
+# Copyright 2006-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
import sys
@@ -8,12 +8,13 @@ from portage.const import EBUILD_PHASES
from portage import _encodings
if sys.hexversion >= 0x3000000:
+ # pylint: disable=W0622
basestring = str
_pri = {
- "INFO" : syslog.LOG_INFO,
- "WARN" : syslog.LOG_WARNING,
- "ERROR" : syslog.LOG_ERR,
+ "INFO" : syslog.LOG_INFO,
+ "WARN" : syslog.LOG_WARNING,
+ "ERROR" : syslog.LOG_ERR,
"LOG" : syslog.LOG_NOTICE,
"QA" : syslog.LOG_WARNING
}
@@ -23,14 +24,14 @@ def process(mysettings, key, logentries, fulltext):
for phase in EBUILD_PHASES:
if not phase in logentries:
continue
- for msgtype,msgcontent in logentries[phase]:
+ for msgtype, msgcontent in logentries[phase]:
if isinstance(msgcontent, basestring):
msgcontent = [msgcontent]
for line in msgcontent:
line = "%s: %s: %s" % (key, phase, line)
if sys.hexversion < 0x3000000 and not isinstance(line, bytes):
# Avoid TypeError from syslog.syslog()
- line = line.encode(_encodings['content'],
+ line = line.encode(_encodings['content'],
'backslashreplace')
syslog.syslog(_pri[msgtype], line.rstrip("\n"))
syslog.closelog()
diff --git a/pym/portage/emaint/__init__.py b/pym/portage/emaint/__init__.py
index 5e0ae700a..48bc6e2ae 100644
--- a/pym/portage/emaint/__init__.py
+++ b/pym/portage/emaint/__init__.py
@@ -1,7 +1,5 @@
# Copyright 2005-2012 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
-"""'The emaint program provides checks and maintenance
-on a gentoo system.
+"""System health checks and maintenance utilities.
"""
-
diff --git a/pym/portage/emaint/defaults.py b/pym/portage/emaint/defaults.py
index d9d83ffbb..30f36af50 100644
--- a/pym/portage/emaint/defaults.py
+++ b/pym/portage/emaint/defaults.py
@@ -1,18 +1,25 @@
-# Copyright 2005-2012 Gentoo Foundation
+# Copyright 2005-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
# parser option data
CHECK = {"short": "-c", "long": "--check",
"help": "Check for problems (a default option for most modules)",
'status': "Checking %s for problems",
+ 'action': 'store_true',
'func': 'check'
}
FIX = {"short": "-f", "long": "--fix",
"help": "Attempt to fix problems (a default option for most modules)",
'status': "Attempting to fix %s",
+ 'action': 'store_true',
'func': 'fix'
}
+VERSION = {"long": "--version",
+ "help": "show program's version number and exit",
+ 'action': 'store_true',
+ }
+
# parser options
-DEFAULT_OPTIONS = {'check': CHECK, 'fix': FIX}
+DEFAULT_OPTIONS = {'check': CHECK, 'fix': FIX, 'version': VERSION}
diff --git a/pym/portage/emaint/main.py b/pym/portage/emaint/main.py
index dbc5f18cc..6a17027b5 100644
--- a/pym/portage/emaint/main.py
+++ b/pym/portage/emaint/main.py
@@ -1,4 +1,4 @@
-# Copyright 2005-2012 Gentoo Foundation
+# Copyright 2005-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
from __future__ import print_function
@@ -6,61 +6,59 @@ from __future__ import print_function
import sys
import textwrap
-from optparse import OptionParser, OptionValueError
-
import portage
from portage import os
from portage.emaint.module import Modules
from portage.emaint.progress import ProgressBar
from portage.emaint.defaults import DEFAULT_OPTIONS
+from portage.util._argparse import ArgumentParser
class OptionItem(object):
- """class to hold module OptionParser options data
+ """class to hold module ArgumentParser options data
"""
- def __init__(self, opt, parser):
+ def __init__(self, opt):
"""
@type opt: dictionary
@param opt: options parser options
"""
- self.parser = parser
- self.short = opt['short']
- self.long = opt['long']
- self.help = opt['help']
- self.status = opt['status']
- self.func = opt['func']
- self.action = opt.get('action', "callback")
- self.type = opt.get('type', None)
- self.dest = opt.get('dest', None)
- self.callback = opt.get('callback', self._exclusive)
- self.callback_kwargs = opt.get('callback_kwargs', {"var":"action"})
-
-
- def _exclusive(self, option, *args, **kw):
- """Generic check for the 2 default options
- """
- var = kw.get("var", None)
- if var is None:
- raise ValueError("var not specified to exclusive()")
- if getattr(self.parser, var, ""):
- raise OptionValueError("%s and %s are exclusive options"
- % (getattr(self.parser, var), option))
- setattr(self.parser, var, str(option))
-
- def check_action(self, action):
- """Checks if 'action' is the same as this option
-
- @type action: string
- @param action: the action to compare
- @rtype: boolean
- """
- if action == self.action:
- return True
- elif action == '/'.join([self.short, self.long]):
- return True
- return False
-
+ self.short = opt.get('short')
+ self.long = opt.get('long')
+ # '-' are not allowed in python identifiers
+ # so store the sanitized target variable name
+ self.target = self.long[2:].replace('-','_')
+ self.help = opt.get('help')
+ self.status = opt.get('status')
+ self.func = opt.get('func')
+ self.action = opt.get('action')
+ self.type = opt.get('type')
+ self.dest = opt.get('dest')
+
+ @property
+ def pargs(self):
+ pargs = []
+ if self.short is not None:
+ pargs.append(self.short)
+ if self.long is not None:
+ pargs.append(self.long)
+ return pargs
+
+ @property
+ def kwargs(self):
+ # Support for keyword arguments varies depending on the action,
+ # so only pass in the keywords that are needed, in order
+ # to avoid a TypeError.
+ kwargs = {}
+ if self.help is not None:
+ kwargs['help'] = self.help
+ if self.action is not None:
+ kwargs['action'] = self.action
+ if self.type is not None:
+ kwargs['type'] = self.type
+ if self.dest is not None:
+ kwargs['dest'] = self.dest
+ return kwargs
def usage(module_controller):
_usage = "usage: emaint [options] COMMAND"
@@ -91,15 +89,14 @@ def module_opts(module_controller, module):
opts = DEFAULT_OPTIONS
for opt in sorted(opts):
optd = opts[opt]
- opto = " %s, %s" %(optd['short'], optd['long'])
- _usage += '%s %s\n' % (opto.ljust(15),optd['help'])
+ opto = " %s, %s" % (optd['short'], optd['long'])
+ _usage += '%s %s\n' % (opto.ljust(15), optd['help'])
_usage += '\n'
return _usage
class TaskHandler(object):
- """Handles the running of the tasks it is given
- """
+ """Handles the running of the tasks it is given"""
def __init__(self, show_progress_bar=True, verbose=True, callback=None):
self.show_progress_bar = show_progress_bar
@@ -108,14 +105,13 @@ class TaskHandler(object):
self.isatty = os.environ.get('TERM') != 'dumb' and sys.stdout.isatty()
self.progress_bar = ProgressBar(self.isatty, title="Emaint", max_desc_length=27)
-
def run_tasks(self, tasks, func, status=None, verbose=True, options=None):
"""Runs the module tasks"""
if tasks is None or func is None:
return
for task in tasks:
inst = task()
- show_progress = self.show_progress_bar
+ show_progress = self.show_progress_bar and self.isatty
# check if the function is capable of progressbar
# and possibly override it off
if show_progress and hasattr(inst, 'can_progressbar'):
@@ -133,7 +129,7 @@ class TaskHandler(object):
'options': options.copy()
}
result = getattr(inst, func)(**kwargs)
- if self.isatty and show_progress:
+ if show_progress:
# make sure the final progress is displayed
self.progress_bar.display()
print()
@@ -160,59 +156,68 @@ def emaint_main(myargv):
module_names.insert(0, "all")
- parser = OptionParser(usage=usage(module_controller), version=portage.VERSION)
+ parser = ArgumentParser(usage=usage(module_controller))
# add default options
parser_options = []
for opt in DEFAULT_OPTIONS:
- parser_options.append(OptionItem(DEFAULT_OPTIONS[opt], parser))
+ parser_options.append(OptionItem(DEFAULT_OPTIONS[opt]))
for mod in module_names[1:]:
desc = module_controller.get_func_descriptions(mod)
if desc:
for opt in desc:
- parser_options.append(OptionItem(desc[opt], parser))
+ parser_options.append(OptionItem(desc[opt]))
for opt in parser_options:
- parser.add_option(opt.short, opt.long, help=opt.help, action=opt.action,
- type=opt.type, dest=opt.dest,
- callback=opt.callback, callback_kwargs=opt.callback_kwargs)
+ parser.add_argument(*opt.pargs, **opt.kwargs)
- parser.action = None
+ options, args = parser.parse_known_args(args=myargv)
+
+ if options.version:
+ print(portage.VERSION)
+ return os.EX_OK
- (options, args) = parser.parse_args(args=myargv)
- #print('options', options, '\nargs', args, '\naction', parser.action)
if len(args) != 1:
parser.error("Incorrect number of arguments")
if args[0] not in module_names:
parser.error("%s target is not a known target" % args[0])
- if parser.action:
- action = parser.action
- else:
- action = "-c/--check"
- long_action = action.split('/')[1].lstrip('-')
- #print("DEBUG: action = ", action, long_action)
+ check_opt = None
+ func = status = long_action = None
+ for opt in parser_options:
+ if opt.long == '--check':
+ # Default action
+ check_opt = opt
+ if opt.status and getattr(options, opt.target, False):
+ if long_action is not None:
+ parser.error("--%s and %s are exclusive options" %
+ (long_action, opt.long))
+ status = opt.status
+ func = opt.func
+ long_action = opt.long.lstrip('-')
+
+ if long_action is None:
+ #print("DEBUG: long_action is None: setting to 'check'")
+ long_action = 'check'
+ func = check_opt.func
+ status = check_opt.status
if args[0] == "all":
tasks = []
for m in module_names[1:]:
- #print("DEBUG: module: %s, functions: " %(m, str(module_controller.get_functions(m))))
- if long_action in module_controller.get_functions(m):
+ #print("DEBUG: module: %s, functions: " % (m, str(module_controller.get_functions(m))))
+ if func in module_controller.get_functions(m):
tasks.append(module_controller.get_class(m))
- elif long_action in module_controller.get_functions(args[0]):
+ elif func in module_controller.get_functions(args[0]):
tasks = [module_controller.get_class(args[0] )]
else:
- print("\nERROR: module '%s' does not have option '%s'\n" %(args[0], action))
- print(module_opts(module_controller, args[0]))
+ portage.util.writemsg(
+ "\nERROR: module '%s' does not have option '--%s'\n\n" %
+ (args[0], long_action), noiselevel=-1)
+ portage.util.writemsg(module_opts(module_controller, args[0]),
+ noiselevel=-1)
sys.exit(1)
- func = status = None
- for opt in parser_options:
- if opt.check_action(action):
- status = opt.status
- func = opt.func
- break
# need to pass the parser options dict to the modules
# so they are available if needed.
task_opts = options.__dict__
taskmaster = TaskHandler(callback=print_results)
taskmaster.run_tasks(tasks, func, status, options=task_opts)
-
diff --git a/pym/portage/emaint/module.py b/pym/portage/emaint/module.py
index 64b0c64b5..bf7d25fc5 100644
--- a/pym/portage/emaint/module.py
+++ b/pym/portage/emaint/module.py
@@ -1,4 +1,4 @@
-# Copyright 2005-2012 Gentoo Foundation
+# Copyright 2005-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
@@ -37,10 +37,10 @@ class Module(object):
self.valid = False
try:
mod_name = ".".join([self._namepath, self.name])
- self._module = __import__(mod_name, [],[], ["not empty"])
+ self._module = __import__(mod_name, [], [], ["not empty"])
self.valid = True
except ImportError as e:
- print("MODULE; failed import", mod_name, " error was:",e)
+ print("MODULE; failed import", mod_name, " error was:", e)
return False
self.module_spec = self._module.module_spec
for submodule in self.module_spec['provides']:
@@ -61,7 +61,7 @@ class Module(object):
module = kid['instance']
else:
try:
- module = __import__(kid['module_name'], [],[], ["not empty"])
+ module = __import__(kid['module_name'], [], [], ["not empty"])
kid['instance'] = module
kid['is_imported'] = True
except ImportError:
diff --git a/pym/portage/emaint/modules/__init__.py b/pym/portage/emaint/modules/__init__.py
index 35674e342..f67197d9f 100644
--- a/pym/portage/emaint/modules/__init__.py
+++ b/pym/portage/emaint/modules/__init__.py
@@ -1,7 +1,5 @@
# Copyright 2005-2012 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
-"""'The emaint program plug-in module provides an automatic method
-of adding/removing modules to perform checks and maintenance
-on a gentoo system.
+"""Plug-in modules for system health checks and maintenance.
"""
diff --git a/pym/portage/emaint/modules/binhost/__init__.py b/pym/portage/emaint/modules/binhost/__init__.py
index 1a61af42b..c60e8bcb4 100644
--- a/pym/portage/emaint/modules/binhost/__init__.py
+++ b/pym/portage/emaint/modules/binhost/__init__.py
@@ -1,20 +1,18 @@
# Copyright 2005-2012 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
-"""'The emaint program module provides checks and maintenancefor:
- Scanning, checking and fixing problems in the world file.
+"""Scan and generate metadata indexes for binary packages.
"""
module_spec = {
'name': 'binhost',
- 'description': "Provides functions to scan, check and " + \
- "Generate a metadata index for binary packages",
+ 'description': __doc__,
'provides':{
'module1': {
'name': "binhost",
'class': "BinhostHandler",
- 'description': "Generate a metadata index for binary packages",
+ 'description': __doc__,
'functions': ['check', 'fix'],
'func_desc': {}
}
diff --git a/pym/portage/emaint/modules/binhost/binhost.py b/pym/portage/emaint/modules/binhost/binhost.py
index b540d7686..1138a8c7e 100644
--- a/pym/portage/emaint/modules/binhost/binhost.py
+++ b/pym/portage/emaint/modules/binhost/binhost.py
@@ -1,4 +1,4 @@
-# Copyright 2005-2012 Gentoo Foundation
+# Copyright 2005-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
import errno
@@ -9,7 +9,9 @@ from portage import os
from portage.util import writemsg
import sys
+
if sys.hexversion >= 0x3000000:
+ # pylint: disable=W0622
long = int
class BinhostHandler(object):
@@ -151,12 +153,8 @@ class BinhostHandler(object):
del pkgindex.packages[:]
pkgindex.packages.extend(metadata.values())
- from portage.util import atomic_ofstream
- f = atomic_ofstream(self._pkgindex_file)
- try:
- self._pkgindex.write(f)
- finally:
- f.close()
+ bintree._pkgindex_write(self._pkgindex)
+
finally:
locks.unlockfile(pkgindex_lock)
diff --git a/pym/portage/emaint/modules/config/__init__.py b/pym/portage/emaint/modules/config/__init__.py
index 22abb07b1..f0585b39a 100644
--- a/pym/portage/emaint/modules/config/__init__.py
+++ b/pym/portage/emaint/modules/config/__init__.py
@@ -1,20 +1,18 @@
# Copyright 2005-2012 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
-"""'This emaint module provides checks and maintenance for:
-Cleaning the emerge config tracker list
+"""Check and clean the config tracker list for uninstalled packages.
"""
module_spec = {
'name': 'config',
- 'description': "Provides functions to scan, check for and fix no " +\
- "longer installed config files in emerge's tracker file",
+ 'description': __doc__,
'provides':{
'module1': {
'name': "cleanconfmem",
'class': "CleanConfig",
- 'description': "Discard no longer installed config tracker entries",
+ 'description': __doc__,
'functions': ['check', 'fix'],
'func_desc': {}
}
diff --git a/pym/portage/emaint/modules/config/config.py b/pym/portage/emaint/modules/config/config.py
index a80d87d29..dad024b21 100644
--- a/pym/portage/emaint/modules/config/config.py
+++ b/pym/portage/emaint/modules/config/config.py
@@ -4,14 +4,14 @@
import portage
from portage import os
from portage.const import PRIVATE_PATH
-from portage.checksum import perform_md5
-
+from portage.util import grabdict, writedict
class CleanConfig(object):
short_desc = "Discard any no longer installed configs from emerge's tracker list"
def __init__(self):
+ self._root = portage.settings["ROOT"]
self.target = os.path.join(portage.settings["EROOT"], PRIVATE_PATH, 'config')
def name():
@@ -19,70 +19,55 @@ class CleanConfig(object):
name = staticmethod(name)
def load_configlist(self):
-
- configs = {}
- with open(self.target, 'r') as configfile:
- lines = configfile.readlines()
- for line in lines:
- ls = line.split()
- configs[ls[0]] = ls[1]
- return configs
+ return grabdict(self.target)
def check(self, **kwargs):
onProgress = kwargs.get('onProgress', None)
configs = self.load_configlist()
messages = []
- chksums = []
maxval = len(configs)
if onProgress:
onProgress(maxval, 0)
i = 0
keys = sorted(configs)
for config in keys:
- if os.path.exists(config):
- md5sumactual = perform_md5(config)
- if md5sumactual != configs[config]:
- chksums.append(" %s" % config)
- else:
+ if not os.path.exists(config):
messages.append(" %s" % config)
if onProgress:
onProgress(maxval, i+1)
i += 1
- return self._format_output(messages, chksums)
+ return self._format_output(messages)
def fix(self, **kwargs):
onProgress = kwargs.get('onProgress', None)
configs = self.load_configlist()
messages = []
- chksums = []
maxval = len(configs)
if onProgress:
onProgress(maxval, 0)
i = 0
- keys = sorted(configs)
- for config in keys:
- if os.path.exists(config):
- md5sumactual = perform_md5(config)
- if md5sumactual != configs[config]:
- chksums.append(" %s" % config)
- configs.pop(config)
+
+ root = self._root
+ if root == "/":
+ root = None
+ modified = False
+ for config in sorted(configs):
+ if root is None:
+ full_path = config
else:
- configs.pop(config)
- messages.append(" %s" % config)
+ full_path = os.path.join(root, config.lstrip(os.sep))
+ if not os.path.exists(full_path):
+ modified = True
+ configs.pop(config)
+ messages.append(" %s" % config)
if onProgress:
onProgress(maxval, i+1)
i += 1
- lines = []
- keys = sorted(configs)
- for key in keys:
- line = ' '.join([key, configs[key]])
- lines.append(line)
- lines.append('')
- with open(self.target, 'w') as configfile:
- configfile.write('\n'.join(lines))
- return self._format_output(messages, chksums, True)
+ if modified:
+ writedict(configs, self.target)
+ return self._format_output(messages, True)
- def _format_output(self, messages=[], chksums=[], cleaned=False):
+ def _format_output(self, messages=[], cleaned=False):
output = []
if messages:
output.append('Not Installed:')
@@ -91,11 +76,4 @@ class CleanConfig(object):
if cleaned:
tot += ' ...Cleaned'
output.append(tot % len(messages))
- if chksums:
- output.append('\nChecksums did not match:')
- output += chksums
- tot = '------------------------------------\n Total %i Checksums did not match'
- if cleaned:
- tot += ' ...Cleaned'
- output.append(tot % len(chksums))
return output
diff --git a/pym/portage/emaint/modules/logs/__init__.py b/pym/portage/emaint/modules/logs/__init__.py
index 005b608a6..0407efe2b 100644
--- a/pym/portage/emaint/modules/logs/__init__.py
+++ b/pym/portage/emaint/modules/logs/__init__.py
@@ -1,38 +1,34 @@
-# Copyright 2005-2012 Gentoo Foundation
+# Copyright 2005-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
-"""'This emaint module provides checks and maintenance for:
-Cleaning the PORT_LOGDIR logs
+"""Check and clean old logs in the PORT_LOGDIR.
"""
module_spec = {
'name': 'logs',
- 'description': "Provides functions to scan, check and clean old logs " +\
- "in the PORT_LOGDIR",
+ 'description': __doc__,
'provides':{
'module1': {
'name': "logs",
'class': "CleanLogs",
- 'description': "Clean out old logs from the PORT_LOGDIR",
+ 'description': __doc__,
'functions': ['check','clean'],
'func_desc': {
'clean': {
"short": "-C", "long": "--clean",
"help": "Cleans out logs more than 7 days old (cleanlogs only)" + \
- " modulke-options: -t, -p",
+ " module-options: -t, -p",
'status': "Cleaning %s",
- 'func': 'clean'
+ 'action': 'store_true',
+ 'func': 'clean',
},
'time': {
"short": "-t", "long": "--time",
"help": "(cleanlogs only): -t, --time Delete logs older than NUM of days",
'status': "",
- 'action': 'store',
- 'type': 'int',
+ 'type': int,
'dest': 'NUM',
- 'callback': None,
- 'callback_kwargs': None,
'func': 'clean'
},
'pretend': {
@@ -41,8 +37,6 @@ module_spec = {
'status': "",
'action': 'store_true',
'dest': 'pretend',
- 'callback': None,
- 'callback_kwargs': None,
'func': 'clean'
}
}
diff --git a/pym/portage/emaint/modules/logs/logs.py b/pym/portage/emaint/modules/logs/logs.py
index 32c8508f7..fe65cf587 100644
--- a/pym/portage/emaint/modules/logs/logs.py
+++ b/pym/portage/emaint/modules/logs/logs.py
@@ -39,11 +39,10 @@ class CleanLogs(object):
options: dict:
'NUM': int: number of days
'pretend': boolean
- 'eerror': defaults to None, optional output module to output errors.
- 'einfo': defaults to None, optional output module to output info msgs.
"""
messages = []
num_of_days = None
+ pretend = False
if kwargs:
# convuluted, I know, but portage.settings does not exist in
# kwargs.get() when called from _emerge.main.clean_logs()
@@ -54,8 +53,6 @@ class CleanLogs(object):
if options:
num_of_days = options.get('NUM', None)
pretend = options.get('pretend', False)
- eerror = options.get('eerror', None)
- einfo = options.get('einfo', None)
clean_cmd = settings.get("PORT_LOGDIR_CLEAN")
if clean_cmd:
@@ -75,7 +72,7 @@ class CleanLogs(object):
if not clean_cmd:
return []
rval = self._clean_logs(clean_cmd, settings)
- messages += self._convert_errors(rval, eerror, einfo)
+ messages += self._convert_errors(rval)
return messages
@@ -96,19 +93,11 @@ class CleanLogs(object):
@staticmethod
- def _convert_errors(rval, eerror=None, einfo=None):
+ def _convert_errors(rval):
msg = []
if rval != os.EX_OK:
msg.append("PORT_LOGDIR_CLEAN command returned %s"
% ("%d" % rval if rval else "None"))
msg.append("See the make.conf(5) man page for "
"PORT_LOGDIR_CLEAN usage instructions.")
- if eerror:
- for m in msg:
- eerror(m)
- else:
- msg.append("PORT_LOGDIR_CLEAN command succeeded")
- if einfo:
- for m in msg:
- einfo(m)
return msg
diff --git a/pym/portage/emaint/modules/move/__init__.py b/pym/portage/emaint/modules/move/__init__.py
index 5399440ce..d31d7b346 100644
--- a/pym/portage/emaint/modules/move/__init__.py
+++ b/pym/portage/emaint/modules/move/__init__.py
@@ -1,21 +1,18 @@
# Copyright 2005-2012 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
-"""'This emaint module provides checks and maintenance for:
- 1) "Performing package move updates for installed packages",
- 2)"Perform package move updates for binary packages"
+"""Perform package move updates for installed and binary packages.
"""
module_spec = {
'name': 'move',
- 'description': "Provides functions to check for and move packages " +\
- "either installed or binary packages stored on this system",
+ 'description': __doc__,
'provides':{
'module1': {
'name': "moveinst",
'class': "MoveInstalled",
- 'description': "Perform package move updates for installed packages",
+ 'description': __doc__,
'options': ['check', 'fix'],
'functions': ['check', 'fix'],
'func_desc': {
diff --git a/pym/portage/emaint/modules/move/move.py b/pym/portage/emaint/modules/move/move.py
index 018e6cac1..ef674d47a 100644
--- a/pym/portage/emaint/modules/move/move.py
+++ b/pym/portage/emaint/modules/move/move.py
@@ -3,14 +3,16 @@
import portage
from portage import os
-
+from portage.exception import InvalidData
+from _emerge.Package import Package
+from portage.versions import _pkg_str
class MoveHandler(object):
def __init__(self, tree, porttree):
self._tree = tree
self._portdb = porttree.dbapi
- self._update_keys = ["DEPEND", "RDEPEND", "PDEPEND", "PROVIDE"]
+ self._update_keys = Package._dep_keys + ("PROVIDE",)
self._master_repo = \
self._portdb.getRepositoryName(self._portdb.porttree_root)
@@ -48,6 +50,8 @@ class MoveHandler(object):
# progress bar is updated in indeterminate mode.
match = self._tree.dbapi.match
aux_get = self._tree.dbapi.aux_get
+ pkg_str = self._tree.dbapi._pkg_str
+ settings = self._tree.dbapi.settings
if onProgress:
onProgress(0, 0)
for repo, updates in allupdates.items():
@@ -65,13 +69,21 @@ class MoveHandler(object):
if update_cmd[0] == "move":
origcp, newcp = update_cmd[1:]
for cpv in match(origcp):
- if repo_match(aux_get(cpv, ["repository"])[0]):
+ try:
+ cpv = pkg_str(cpv, origcp.repo)
+ except (KeyError, InvalidData):
+ continue
+ if repo_match(cpv.repo):
errors.append("'%s' moved to '%s'" % (cpv, newcp))
elif update_cmd[0] == "slotmove":
pkg, origslot, newslot = update_cmd[1:]
- for cpv in match(pkg):
- slot, prepo = aux_get(cpv, ["SLOT", "repository"])
- if slot == origslot and repo_match(prepo):
+ atom = pkg.with_slot(origslot)
+ for cpv in match(atom):
+ try:
+ cpv = pkg_str(cpv, atom.repo)
+ except (KeyError, InvalidData):
+ continue
+ if repo_match(cpv.repo):
errors.append("'%s' slot moved from '%s' to '%s'" % \
(cpv, origslot, newslot))
if onProgress:
@@ -82,15 +94,21 @@ class MoveHandler(object):
cpv_all = self._tree.dbapi.cpv_all()
cpv_all.sort()
maxval = len(cpv_all)
- meta_keys = self._update_keys + ['repository', 'EAPI']
+ meta_keys = self._update_keys + self._portdb._pkg_str_aux_keys
if onProgress:
onProgress(maxval, 0)
for i, cpv in enumerate(cpv_all):
- metadata = dict(zip(meta_keys, aux_get(cpv, meta_keys)))
- eapi = metadata.pop('EAPI')
- repository = metadata.pop('repository')
try:
- updates = allupdates[repository]
+ metadata = dict(zip(meta_keys, aux_get(cpv, meta_keys)))
+ except KeyError:
+ continue
+ try:
+ pkg = _pkg_str(cpv, metadata=metadata, settings=settings)
+ except InvalidData:
+ continue
+ metadata = dict((k, metadata[k]) for k in self._update_keys)
+ try:
+ updates = allupdates[pkg.repo]
except KeyError:
try:
updates = allupdates['DEFAULT']
@@ -99,7 +117,7 @@ class MoveHandler(object):
if not updates:
continue
metadata_updates = \
- portage.update_dbentries(updates, metadata, eapi=eapi)
+ portage.update_dbentries(updates, metadata, parent=pkg)
if metadata_updates:
errors.append("'%s' has outdated metadata" % cpv)
if onProgress:
diff --git a/pym/portage/emaint/modules/resume/__init__.py b/pym/portage/emaint/modules/resume/__init__.py
index 60cffe9db..965e8f945 100644
--- a/pym/portage/emaint/modules/resume/__init__.py
+++ b/pym/portage/emaint/modules/resume/__init__.py
@@ -1,15 +1,13 @@
# Copyright 2005-2012 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
-"""'This emaint module provides checks and maintenance for:
-Cleaning the "emerge --resume" lists
+"""Check and fix problems in the resume and/or resume_backup files.
"""
module_spec = {
'name': 'resume',
- 'description': "Provides functions to scan, check and fix problems " +\
- "in the resume and/or resume_backup files",
+ 'description': __doc__,
'provides':{
'module1': {
'name': "cleanresume",
diff --git a/pym/portage/emaint/modules/world/__init__.py b/pym/portage/emaint/modules/world/__init__.py
index 103b5c5ba..3f62270ee 100644
--- a/pym/portage/emaint/modules/world/__init__.py
+++ b/pym/portage/emaint/modules/world/__init__.py
@@ -1,20 +1,18 @@
# Copyright 2005-2012 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
-"""'This emaint module provides checks and maintenance for:
-Fixing problems with the "world" file.
+"""Check and fix problems in the world file.
"""
module_spec = {
'name': 'world',
- 'description': "Provides functions to scan, " +
- "check and fix problems in the world file",
+ 'description': __doc__,
'provides':{
'module1':{
'name': "world",
'class': "WorldHandler",
- 'description': "Fix problems in the world file",
+ 'description': __doc__,
'functions': ['check', 'fix'],
'func_desc': {}
}
diff --git a/pym/portage/env/loaders.py b/pym/portage/env/loaders.py
index 372bc12fa..f86988471 100644
--- a/pym/portage/env/loaders.py
+++ b/pym/portage/env/loaders.py
@@ -1,10 +1,14 @@
# config.py -- Portage Config
-# Copyright 2007-2011 Gentoo Foundation
+# Copyright 2007-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
import errno
import io
import stat
+import portage
+portage.proxy.lazyimport.lazyimport(globals(),
+ 'portage.util:writemsg',
+)
from portage import os
from portage import _encodings
from portage import _unicode_decode
@@ -149,17 +153,21 @@ class FileLoader(DataLoader):
func = self.lineParser
for fn in RecursiveFileLoader(self.fname):
try:
- f = io.open(_unicode_encode(fn,
+ with io.open(_unicode_encode(fn,
encoding=_encodings['fs'], errors='strict'), mode='r',
- encoding=_encodings['content'], errors='replace')
+ encoding=_encodings['content'], errors='replace') as f:
+ lines = f.readlines()
except EnvironmentError as e:
- if e.errno not in (errno.ENOENT, errno.ESTALE):
+ if e.errno == errno.EACCES:
+ writemsg(_("Permission denied: '%s'\n") % fn, noiselevel=-1)
+ del e
+ elif e.errno in (errno.ENOENT, errno.ESTALE):
+ del e
+ else:
raise
- del e
- continue
- for line_num, line in enumerate(f):
- func(line, line_num, data, errors)
- f.close()
+ else:
+ for line_num, line in enumerate(lines):
+ func(line, line_num, data, errors)
return (data, errors)
def lineParser(self, line, line_num, data, errors):
diff --git a/pym/portage/exception.py b/pym/portage/exception.py
index 5ccd750ab..6fa5447a7 100644
--- a/pym/portage/exception.py
+++ b/pym/portage/exception.py
@@ -1,4 +1,4 @@
-# Copyright 1998-2011 Gentoo Foundation
+# Copyright 1998-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
import signal
@@ -7,30 +7,40 @@ from portage import _encodings, _unicode_encode, _unicode_decode
from portage.localization import _
if sys.hexversion >= 0x3000000:
+ # pylint: disable=W0622
basestring = str
class PortageException(Exception):
"""General superclass for portage exceptions"""
- def __init__(self,value):
- self.value = value[:]
- if isinstance(self.value, basestring):
- self.value = _unicode_decode(self.value,
- encoding=_encodings['content'], errors='replace')
+ if sys.hexversion >= 0x3000000:
+ def __init__(self, value):
+ self.value = value[:]
- def __str__(self):
- if isinstance(self.value, basestring):
- return self.value
- else:
- return _unicode_decode(repr(self.value),
- encoding=_encodings['content'], errors='replace')
-
- if sys.hexversion < 0x3000000:
-
- __unicode__ = __str__
+ def __str__(self):
+ if isinstance(self.value, str):
+ return self.value
+ else:
+ return repr(self.value)
+ else:
+ def __init__(self, value):
+ self.value = value[:]
+ if isinstance(self.value, basestring):
+ self.value = _unicode_decode(self.value,
+ encoding=_encodings['content'], errors='replace')
+
+ def __unicode__(self):
+ if isinstance(self.value, unicode):
+ return self.value
+ else:
+ return _unicode_decode(repr(self.value),
+ encoding=_encodings['content'], errors='replace')
def __str__(self):
- return _unicode_encode(self.__unicode__(),
- encoding=_encodings['content'], errors='backslashreplace')
+ if isinstance(self.value, unicode):
+ return _unicode_encode(self.value,
+ encoding=_encodings['content'], errors='backslashreplace')
+ else:
+ return repr(self.value)
class CorruptionError(PortageException):
"""Corruption indication"""
@@ -75,20 +85,20 @@ class DirectoryNotFound(InvalidLocation):
"""A directory was not found when it was expected to exist"""
class OperationNotPermitted(PortageException):
- from errno import EPERM as errno
"""An operation was not permitted operating system"""
+ from errno import EPERM as errno
class OperationNotSupported(PortageException):
- from errno import EOPNOTSUPP as errno
"""Operation not supported"""
+ from errno import EOPNOTSUPP as errno
class PermissionDenied(PortageException):
- from errno import EACCES as errno
"""Permission denied"""
+ from errno import EACCES as errno
class TryAgain(PortageException):
- from errno import EAGAIN as errno
"""Try again"""
+ from errno import EAGAIN as errno
class TimeoutException(PortageException):
"""Operation timed out"""
diff --git a/pym/portage/getbinpkg.py b/pym/portage/getbinpkg.py
index 212f78889..14dc149b1 100644
--- a/pym/portage/getbinpkg.py
+++ b/pym/portage/getbinpkg.py
@@ -1,7 +1,9 @@
# getbinpkg.py -- Portage binary-package helper functions
-# Copyright 2003-2012 Gentoo Foundation
+# Copyright 2003-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
+from __future__ import unicode_literals
+
from portage.output import colorize
from portage.cache.mappings import slot_dict_class
from portage.localization import _
@@ -18,6 +20,7 @@ import socket
import time
import tempfile
import base64
+import warnings
_all_errors = [NotImplementedError, ValueError, socket.error]
@@ -39,7 +42,7 @@ except ImportError:
try:
import ftplib
except ImportError as e:
- sys.stderr.write(colorize("BAD","!!! CANNOT IMPORT FTPLIB: ")+str(e)+"\n")
+ sys.stderr.write(colorize("BAD", "!!! CANNOT IMPORT FTPLIB: ") + str(e) + "\n")
else:
_all_errors.extend(ftplib.all_errors)
@@ -55,24 +58,28 @@ try:
from httplib import ResponseNotReady as http_client_ResponseNotReady
from httplib import error as http_client_error
except ImportError as e:
- sys.stderr.write(colorize("BAD","!!! CANNOT IMPORT HTTP.CLIENT: ")+str(e)+"\n")
+ sys.stderr.write(colorize("BAD", "!!! CANNOT IMPORT HTTP.CLIENT: ") + str(e) + "\n")
else:
_all_errors.append(http_client_error)
_all_errors = tuple(_all_errors)
if sys.hexversion >= 0x3000000:
+ # pylint: disable=W0622
long = int
def make_metadata_dict(data):
- myid,myglob = data
+
+ warnings.warn("portage.getbinpkg.make_metadata_dict() is deprecated",
+ DeprecationWarning, stacklevel=2)
+
+ myid, _myglob = data
mydict = {}
for k_bytes in portage.xpak.getindex_mem(myid):
k = _unicode_decode(k_bytes,
encoding=_encodings['repo.content'], errors='replace')
- if k not in _all_metadata_keys and \
- k != "CATEGORY":
+ if k not in _all_metadata_keys and k != "CATEGORY":
continue
v = _unicode_decode(portage.xpak.getitem(data, k_bytes),
encoding=_encodings['repo.content'], errors='replace')
@@ -84,13 +91,17 @@ class ParseLinks(html_parser_HTMLParser):
"""Parser class that overrides HTMLParser to grab all anchors from an html
page and provide suffix and prefix limitors"""
def __init__(self):
+
+ warnings.warn("portage.getbinpkg.ParseLinks is deprecated",
+ DeprecationWarning, stacklevel=2)
+
self.PL_anchors = []
html_parser_HTMLParser.__init__(self)
def get_anchors(self):
return self.PL_anchors
- def get_anchors_by_prefix(self,prefix):
+ def get_anchors_by_prefix(self, prefix):
newlist = []
for x in self.PL_anchors:
if x.startswith(prefix):
@@ -98,7 +109,7 @@ class ParseLinks(html_parser_HTMLParser):
newlist.append(x[:])
return newlist
- def get_anchors_by_suffix(self,suffix):
+ def get_anchors_by_suffix(self, suffix):
newlist = []
for x in self.PL_anchors:
if x.endswith(suffix):
@@ -106,10 +117,10 @@ class ParseLinks(html_parser_HTMLParser):
newlist.append(x[:])
return newlist
- def handle_endtag(self,tag):
+ def handle_endtag(self, tag):
pass
- def handle_starttag(self,tag,attrs):
+ def handle_starttag(self, tag, attrs):
if tag == "a":
for x in attrs:
if x[0] == 'href':
@@ -117,16 +128,19 @@ class ParseLinks(html_parser_HTMLParser):
self.PL_anchors.append(urllib_parse_unquote(x[1]))
-def create_conn(baseurl,conn=None):
- """(baseurl,conn) --- Takes a protocol://site:port/address url, and an
+def create_conn(baseurl, conn=None):
+ """Takes a protocol://site:port/address url, and an
optional connection. If connection is already active, it is passed on.
baseurl is reduced to address and is returned in tuple (conn,address)"""
- parts = baseurl.split("://",1)
+ warnings.warn("portage.getbinpkg.create_conn() is deprecated",
+ DeprecationWarning, stacklevel=2)
+
+ parts = baseurl.split("://", 1)
if len(parts) != 2:
raise ValueError(_("Provided URI does not "
"contain protocol identifier. '%s'") % baseurl)
- protocol,url_parts = parts
+ protocol, url_parts = parts
del parts
url_parts = url_parts.split("/")
@@ -137,7 +151,7 @@ def create_conn(baseurl,conn=None):
address = "/"+"/".join(url_parts[1:])
del url_parts
- userpass_host = host.split("@",1)
+ userpass_host = host.split("@", 1)
if len(userpass_host) == 1:
host = userpass_host[0]
userpass = ["anonymous"]
@@ -196,10 +210,10 @@ def create_conn(baseurl,conn=None):
host = host[:-1]
conn = ftplib.FTP(host)
if password:
- conn.login(username,password)
+ conn.login(username, password)
else:
sys.stderr.write(colorize("WARN",
- _(" * No password provided for username"))+" '%s'" % \
+ _(" * No password provided for username")) + " '%s'" % \
(username,) + "\n\n")
conn.login(username)
conn.set_pasv(passive)
@@ -216,11 +230,15 @@ def create_conn(baseurl,conn=None):
else:
raise NotImplementedError(_("%s is not a supported protocol.") % protocol)
- return (conn,protocol,address, http_params, http_headers)
+ return (conn, protocol, address, http_params, http_headers)
def make_ftp_request(conn, address, rest=None, dest=None):
- """(conn,address,rest) --- uses the conn object to request the data
+ """Uses the |conn| object to request the data
from address and issuing a rest if it is passed."""
+
+ warnings.warn("portage.getbinpkg.make_ftp_request() is deprecated",
+ DeprecationWarning, stacklevel=2)
+
try:
if dest:
@@ -235,9 +253,9 @@ def make_ftp_request(conn, address, rest=None, dest=None):
rest = 0
if rest != None:
- mysocket = conn.transfercmd("RETR "+str(address), rest)
+ mysocket = conn.transfercmd("RETR %s" % str(address), rest)
else:
- mysocket = conn.transfercmd("RETR "+str(address))
+ mysocket = conn.transfercmd("RETR %s" % str(address))
mydata = ""
while 1:
@@ -259,28 +277,31 @@ def make_ftp_request(conn, address, rest=None, dest=None):
conn.voidresp()
conn.voidcmd("TYPE A")
- return mydata,not (fsize==data_size),""
+ return mydata, (fsize != data_size), ""
except ValueError as e:
- return None,int(str(e)[:4]),str(e)
+ return None, int(str(e)[:4]), str(e)
-def make_http_request(conn, address, params={}, headers={}, dest=None):
- """(conn,address,params,headers) --- uses the conn object to request
+def make_http_request(conn, address, _params={}, headers={}, dest=None):
+ """Uses the |conn| object to request
the data from address, performing Location forwarding and using the
optional params and headers."""
+ warnings.warn("portage.getbinpkg.make_http_request() is deprecated",
+ DeprecationWarning, stacklevel=2)
+
rc = 0
response = None
while (rc == 0) or (rc == 301) or (rc == 302):
try:
- if (rc != 0):
- conn,ignore,ignore,ignore,ignore = create_conn(address)
+ if rc != 0:
+ conn = create_conn(address)[0]
conn.request("GET", address, body=None, headers=headers)
except SystemExit as e:
raise
except Exception as e:
- return None,None,"Server request failed: "+str(e)
+ return None, None, "Server request failed: %s" % str(e)
response = conn.getresponse()
rc = response.status
@@ -289,7 +310,7 @@ def make_http_request(conn, address, params={}, headers={}, dest=None):
ignored_data = response.read()
del ignored_data
for x in str(response.msg).split("\n"):
- parts = x.split(": ",1)
+ parts = x.split(": ", 1)
if parts[0] == "Location":
if (rc == 301):
sys.stderr.write(colorize("BAD",
@@ -302,16 +323,20 @@ def make_http_request(conn, address, params={}, headers={}, dest=None):
break
if (rc != 200) and (rc != 206):
- return None,rc,"Server did not respond successfully ("+str(response.status)+": "+str(response.reason)+")"
+ return None, rc, "Server did not respond successfully (%s: %s)" % (str(response.status), str(response.reason))
if dest:
dest.write(response.read())
- return "",0,""
+ return "", 0, ""
- return response.read(),0,""
+ return response.read(), 0, ""
def match_in_array(array, prefix="", suffix="", match_both=1, allow_overlap=0):
+
+ warnings.warn("portage.getbinpkg.match_in_array() is deprecated",
+ DeprecationWarning, stacklevel=2)
+
myarray = []
if not (prefix and suffix):
@@ -344,20 +369,22 @@ def match_in_array(array, prefix="", suffix="", match_both=1, allow_overlap=0):
continue # Doesn't match.
return myarray
-
-def dir_get_list(baseurl,conn=None):
- """(baseurl[,connection]) -- Takes a base url to connect to and read from.
+def dir_get_list(baseurl, conn=None):
+ """Takes a base url to connect to and read from.
URI should be in the form <proto>://<site>[:port]<path>
Connection is used for persistent connection instances."""
+ warnings.warn("portage.getbinpkg.dir_get_list() is deprecated",
+ DeprecationWarning, stacklevel=2)
+
if not conn:
keepconnection = 0
else:
keepconnection = 1
- conn,protocol,address,params,headers = create_conn(baseurl, conn)
+ conn, protocol, address, params, headers = create_conn(baseurl, conn)
listing = None
if protocol in ["http","https"]:
@@ -365,7 +392,7 @@ def dir_get_list(baseurl,conn=None):
# http servers can return a 400 error here
# if the address doesn't end with a slash.
address += "/"
- page,rc,msg = make_http_request(conn,address,params,headers)
+ page, rc, msg = make_http_request(conn, address, params, headers)
if page:
parser = ParseLinks()
@@ -395,23 +422,26 @@ def dir_get_list(baseurl,conn=None):
return listing
-def file_get_metadata(baseurl,conn=None, chunk_size=3000):
- """(baseurl[,connection]) -- Takes a base url to connect to and read from.
+def file_get_metadata(baseurl, conn=None, chunk_size=3000):
+ """Takes a base url to connect to and read from.
URI should be in the form <proto>://<site>[:port]<path>
Connection is used for persistent connection instances."""
+ warnings.warn("portage.getbinpkg.file_get_metadata() is deprecated",
+ DeprecationWarning, stacklevel=2)
+
if not conn:
keepconnection = 0
else:
keepconnection = 1
- conn,protocol,address,params,headers = create_conn(baseurl, conn)
+ conn, protocol, address, params, headers = create_conn(baseurl, conn)
if protocol in ["http","https"]:
- headers["Range"] = "bytes=-"+str(chunk_size)
- data,rc,msg = make_http_request(conn, address, params, headers)
+ headers["Range"] = "bytes=-%s" % str(chunk_size)
+ data, _x, _x = make_http_request(conn, address, params, headers)
elif protocol in ["ftp"]:
- data,rc,msg = make_ftp_request(conn, address, -chunk_size)
+ data, _x, _x = make_ftp_request(conn, address, -chunk_size)
elif protocol == "sftp":
f = conn.open(address)
try:
@@ -424,21 +454,21 @@ def file_get_metadata(baseurl,conn=None, chunk_size=3000):
if data:
xpaksize = portage.xpak.decodeint(data[-8:-4])
- if (xpaksize+8) > chunk_size:
- myid = file_get_metadata(baseurl, conn, (xpaksize+8))
+ if (xpaksize + 8) > chunk_size:
+ myid = file_get_metadata(baseurl, conn, xpaksize + 8)
if not keepconnection:
conn.close()
return myid
else:
- xpak_data = data[len(data)-(xpaksize+8):-8]
+ xpak_data = data[len(data) - (xpaksize + 8):-8]
del data
myid = portage.xpak.xsplit_mem(xpak_data)
if not myid:
- myid = None,None
+ myid = None, None
del xpak_data
else:
- myid = None,None
+ myid = None, None
if not keepconnection:
conn.close()
@@ -446,53 +476,79 @@ def file_get_metadata(baseurl,conn=None, chunk_size=3000):
return myid
-def file_get(baseurl,dest,conn=None,fcmd=None,filename=None):
- """(baseurl,dest,fcmd=) -- Takes a base url to connect to and read from.
+def file_get(baseurl=None, dest=None, conn=None, fcmd=None, filename=None,
+ fcmd_vars=None):
+ """Takes a base url to connect to and read from.
URI should be in the form <proto>://[user[:pass]@]<site>[:port]<path>"""
if not fcmd:
- return file_get_lib(baseurl,dest,conn)
- if not filename:
- filename = os.path.basename(baseurl)
-
- variables = {
- "DISTDIR": dest,
- "URI": baseurl,
- "FILE": filename
- }
+
+ warnings.warn("Use of portage.getbinpkg.file_get() without the fcmd "
+ "parameter is deprecated", DeprecationWarning, stacklevel=2)
+
+ return file_get_lib(baseurl, dest, conn)
+
+ variables = {}
+
+ if fcmd_vars is not None:
+ variables.update(fcmd_vars)
+
+ if "DISTDIR" not in variables:
+ if dest is None:
+ raise portage.exception.MissingParameter(
+ _("%s is missing required '%s' key") %
+ ("fcmd_vars", "DISTDIR"))
+ variables["DISTDIR"] = dest
+
+ if "URI" not in variables:
+ if baseurl is None:
+ raise portage.exception.MissingParameter(
+ _("%s is missing required '%s' key") %
+ ("fcmd_vars", "URI"))
+ variables["URI"] = baseurl
+
+ if "FILE" not in variables:
+ if filename is None:
+ filename = os.path.basename(variables["URI"])
+ variables["FILE"] = filename
from portage.util import varexpand
from portage.process import spawn
myfetch = portage.util.shlex_split(fcmd)
myfetch = [varexpand(x, mydict=variables) for x in myfetch]
- fd_pipes= {
- 0:sys.stdin.fileno(),
- 1:sys.stdout.fileno(),
- 2:sys.stdout.fileno()
+ fd_pipes = {
+ 0: portage._get_stdin().fileno(),
+ 1: sys.__stdout__.fileno(),
+ 2: sys.__stdout__.fileno()
}
+ sys.__stdout__.flush()
+ sys.__stderr__.flush()
retval = spawn(myfetch, env=os.environ.copy(), fd_pipes=fd_pipes)
if retval != os.EX_OK:
sys.stderr.write(_("Fetcher exited with a failure condition.\n"))
return 0
return 1
-def file_get_lib(baseurl,dest,conn=None):
- """(baseurl[,connection]) -- Takes a base url to connect to and read from.
+def file_get_lib(baseurl, dest, conn=None):
+ """Takes a base url to connect to and read from.
URI should be in the form <proto>://<site>[:port]<path>
Connection is used for persistent connection instances."""
+ warnings.warn("portage.getbinpkg.file_get_lib() is deprecated",
+ DeprecationWarning, stacklevel=2)
+
if not conn:
keepconnection = 0
else:
keepconnection = 1
- conn,protocol,address,params,headers = create_conn(baseurl, conn)
+ conn, protocol, address, params, headers = create_conn(baseurl, conn)
- sys.stderr.write("Fetching '"+str(os.path.basename(address)+"'\n"))
- if protocol in ["http","https"]:
- data,rc,msg = make_http_request(conn, address, params, headers, dest=dest)
+ sys.stderr.write("Fetching '" + str(os.path.basename(address)) + "'\n")
+ if protocol in ["http", "https"]:
+ data, rc, _msg = make_http_request(conn, address, params, headers, dest=dest)
elif protocol in ["ftp"]:
- data,rc,msg = make_ftp_request(conn, address, dest=dest)
+ data, rc, _msg = make_ftp_request(conn, address, dest=dest)
elif protocol == "sftp":
rc = 0
try:
@@ -522,8 +578,10 @@ def file_get_lib(baseurl,dest,conn=None):
def dir_get_metadata(baseurl, conn=None, chunk_size=3000, verbose=1, usingcache=1, makepickle=None):
- """(baseurl,conn,chunk_size,verbose) --
- """
+
+ warnings.warn("portage.getbinpkg.dir_get_metadata() is deprecated",
+ DeprecationWarning, stacklevel=2)
+
if not conn:
keepconnection = 0
else:
@@ -536,7 +594,7 @@ def dir_get_metadata(baseurl, conn=None, chunk_size=3000, verbose=1, usingcache=
makepickle = "/var/cache/edb/metadata.idx.most_recent"
try:
- conn, protocol, address, params, headers = create_conn(baseurl, conn)
+ conn = create_conn(baseurl, conn)[0]
except _all_errors as e:
# ftplib.FTP(host) can raise errors like this:
# socket.error: (111, 'Connection refused')
@@ -557,18 +615,20 @@ def dir_get_metadata(baseurl, conn=None, chunk_size=3000, verbose=1, usingcache=
out.write(_("Loaded metadata pickle.\n"))
out.flush()
metadatafile.close()
- except (AttributeError, EOFError, EnvironmentError, ValueError, pickle.UnpicklingError):
+ except (SystemExit, KeyboardInterrupt):
+ raise
+ except Exception:
metadata = {}
if baseurl not in metadata:
- metadata[baseurl]={}
+ metadata[baseurl] = {}
if "indexname" not in metadata[baseurl]:
- metadata[baseurl]["indexname"]=""
+ metadata[baseurl]["indexname"] = ""
if "timestamp" not in metadata[baseurl]:
- metadata[baseurl]["timestamp"]=0
+ metadata[baseurl]["timestamp"] = 0
if "unmodified" not in metadata[baseurl]:
- metadata[baseurl]["unmodified"]=0
+ metadata[baseurl]["unmodified"] = 0
if "data" not in metadata[baseurl]:
- metadata[baseurl]["data"]={}
+ metadata[baseurl]["data"] = {}
if not os.access(cache_path, os.W_OK):
sys.stderr.write(_("!!! Unable to write binary metadata to disk!\n"))
@@ -594,36 +654,36 @@ def dir_get_metadata(baseurl, conn=None, chunk_size=3000, verbose=1, usingcache=
for mfile in metalist:
if usingcache and \
((metadata[baseurl]["indexname"] != mfile) or \
- (metadata[baseurl]["timestamp"] < int(time.time()-(60*60*24)))):
+ (metadata[baseurl]["timestamp"] < int(time.time() - (60 * 60 * 24)))):
# Try to download new cache until we succeed on one.
- data=""
- for trynum in [1,2,3]:
+ data = ""
+ for trynum in [1, 2, 3]:
mytempfile = tempfile.TemporaryFile()
try:
- file_get(baseurl+"/"+mfile, mytempfile, conn)
+ file_get(baseurl + "/" + mfile, mytempfile, conn)
if mytempfile.tell() > len(data):
mytempfile.seek(0)
data = mytempfile.read()
except ValueError as e:
- sys.stderr.write("--- "+str(e)+"\n")
+ sys.stderr.write("--- %s\n" % str(e))
if trynum < 3:
sys.stderr.write(_("Retrying...\n"))
sys.stderr.flush()
mytempfile.close()
continue
- if match_in_array([mfile],suffix=".gz"):
+ if match_in_array([mfile], suffix=".gz"):
out.write("gzip'd\n")
out.flush()
try:
import gzip
mytempfile.seek(0)
- gzindex = gzip.GzipFile(mfile[:-3],'rb',9,mytempfile)
+ gzindex = gzip.GzipFile(mfile[:-3], 'rb', 9, mytempfile)
data = gzindex.read()
except SystemExit as e:
raise
except Exception as e:
mytempfile.close()
- sys.stderr.write(_("!!! Failed to use gzip: ")+str(e)+"\n")
+ sys.stderr.write(_("!!! Failed to use gzip: ") + str(e) + "\n")
sys.stderr.flush()
mytempfile.close()
try:
@@ -638,8 +698,8 @@ def dir_get_metadata(baseurl, conn=None, chunk_size=3000, verbose=1, usingcache=
except SystemExit as e:
raise
except Exception as e:
- sys.stderr.write(_("!!! Failed to read data from index: ")+str(mfile)+"\n")
- sys.stderr.write("!!! "+str(e)+"\n")
+ sys.stderr.write(_("!!! Failed to read data from index: ") + str(mfile) + "\n")
+ sys.stderr.write("!!! %s" % str(e))
sys.stderr.flush()
try:
metadatafile = open(_unicode_encode(metadatafilename,
@@ -650,7 +710,7 @@ def dir_get_metadata(baseurl, conn=None, chunk_size=3000, verbose=1, usingcache=
raise
except Exception as e:
sys.stderr.write(_("!!! Failed to write binary metadata to disk!\n"))
- sys.stderr.write("!!! "+str(e)+"\n")
+ sys.stderr.write("!!! %s\n" % str(e))
sys.stderr.flush()
break
# We may have metadata... now we run through the tbz2 list and check.
@@ -670,8 +730,8 @@ def dir_get_metadata(baseurl, conn=None, chunk_size=3000, verbose=1, usingcache=
self.display()
def display(self):
self.out.write("\r"+colorize("WARN",
- _("cache miss: '")+str(self.misses)+"'") + \
- " --- "+colorize("GOOD", _("cache hit: '")+str(self.hits)+"'"))
+ _("cache miss: '") + str(self.misses) + "'") + \
+ " --- " + colorize("GOOD", _("cache hit: '") + str(self.hits) + "'"))
self.out.flush()
cache_stats = CacheStats(out)
@@ -688,7 +748,7 @@ def dir_get_metadata(baseurl, conn=None, chunk_size=3000, verbose=1, usingcache=
cache_stats.update()
metadata[baseurl]["modified"] = 1
myid = None
- for retry in range(3):
+ for _x in range(3):
try:
myid = file_get_metadata(
"/".join((baseurl.rstrip("/"), x.lstrip("/"))),
@@ -699,22 +759,20 @@ def dir_get_metadata(baseurl, conn=None, chunk_size=3000, verbose=1, usingcache=
# make_http_request(). The docstring for this error in
# httplib.py says "Presumably, the server closed the
# connection before sending a valid response".
- conn, protocol, address, params, headers = create_conn(
- baseurl)
+ conn = create_conn(baseurl)[0]
except http_client_ResponseNotReady:
# With some http servers this error is known to be thrown
# from conn.getresponse() in make_http_request() when the
# remote file does not have appropriate read permissions.
# Maybe it's possible to recover from this exception in
# cases though, so retry.
- conn, protocol, address, params, headers = create_conn(
- baseurl)
+ conn = create_conn(baseurl)[0]
if myid and myid[0]:
metadata[baseurl]["data"][x] = make_metadata_dict(myid)
elif verbose:
sys.stderr.write(colorize("BAD",
- _("!!! Failed to retrieve metadata on: "))+str(x)+"\n")
+ _("!!! Failed to retrieve metadata on: ")) + str(x) + "\n")
sys.stderr.flush()
else:
cache_stats.hits += 1
@@ -861,7 +919,6 @@ class PackageIndex(object):
for metadata in sorted(self.packages,
key=portage.util.cmp_sort_key(_cmp_cpv)):
metadata = metadata.copy()
- cpv = metadata["CPV"]
if self._inherited_keys:
for k in self._inherited_keys:
v = self.header.get(k)
diff --git a/pym/portage/glsa.py b/pym/portage/glsa.py
index 185769574..834572ac7 100644
--- a/pym/portage/glsa.py
+++ b/pym/portage/glsa.py
@@ -1,7 +1,7 @@
-# Copyright 2003-2012 Gentoo Foundation
+# Copyright 2003-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
-from __future__ import absolute_import
+from __future__ import absolute_import, unicode_literals
import io
import sys
@@ -9,23 +9,27 @@ try:
from urllib.request import urlopen as urllib_request_urlopen
except ImportError:
from urllib import urlopen as urllib_request_urlopen
+import codecs
import re
+import operator
import xml.dom.minidom
+from io import StringIO
+from functools import reduce
import portage
from portage import os
from portage import _encodings
from portage import _unicode_decode
from portage import _unicode_encode
-from portage.versions import pkgsplit, vercmp, best
+from portage.versions import pkgsplit, vercmp
from portage.util import grabfile
-from portage.const import CACHE_PATH
+from portage.const import PRIVATE_PATH
from portage.localization import _
from portage.dep import _slot_separator
# Note: the space for rgt and rlt is important !!
# FIXME: use slot deps instead, requires GLSA format versioning
-opMapping = {"le": "<=", "lt": "<", "eq": "=", "gt": ">", "ge": ">=",
+opMapping = {"le": "<=", "lt": "<", "eq": "=", "gt": ">", "ge": ">=",
"rge": ">=~", "rle": "<=~", "rgt": " >~", "rlt": " <~"}
NEWLINE_ESCAPE = "!;\\n" # some random string to mark newlines that should be preserved
SPACE_ESCAPE = "!;_" # some random string to mark spaces that should be preserved
@@ -39,22 +43,22 @@ def get_applied_glsas(settings):
@rtype: list
@return: list of glsa IDs
"""
- return grabfile(os.path.join(settings["EROOT"], CACHE_PATH, "glsa"))
+ return grabfile(os.path.join(settings["EROOT"], PRIVATE_PATH, "glsa_injected"))
# TODO: use the textwrap module instead
def wrap(text, width, caption=""):
"""
Wraps the given text at column I{width}, optionally indenting
- it so that no text is under I{caption}. It's possible to encode
+ it so that no text is under I{caption}. It's possible to encode
hard linebreaks in I{text} with L{NEWLINE_ESCAPE}.
-
+
@type text: String
@param text: the text to be wrapped
@type width: Integer
@param width: the column at which the text should be wrapped
@type caption: String
- @param caption: this string is inserted at the beginning of the
+ @param caption: this string is inserted at the beginning of the
return value and the paragraph is indented up to
C{len(caption)}.
@rtype: String
@@ -65,7 +69,7 @@ def wrap(text, width, caption=""):
text = text.replace(2*NEWLINE_ESCAPE, NEWLINE_ESCAPE+" "+NEWLINE_ESCAPE)
words = text.split()
indentLevel = len(caption)+1
-
+
for w in words:
if line != "" and line[-1] == "\n":
rValue += line
@@ -94,10 +98,10 @@ def get_glsa_list(myconfig):
Returns a list of all available GLSAs in the given repository
by comparing the filelist there with the pattern described in
the config.
-
+
@type myconfig: portage.config
@param myconfig: Portage settings instance
-
+
@rtype: List of Strings
@return: a list of GLSA IDs in this repository
"""
@@ -113,10 +117,10 @@ def get_glsa_list(myconfig):
dirlist = os.listdir(repository)
prefix = "glsa-"
suffix = ".xml"
-
+
for f in dirlist:
try:
- if f[:len(prefix)] == prefix:
+ if f[:len(prefix)] == prefix and f[-1*len(suffix):] == suffix:
rValue.append(f[len(prefix):-1*len(suffix)])
except IndexError:
pass
@@ -125,22 +129,20 @@ def get_glsa_list(myconfig):
def getListElements(listnode):
"""
Get all <li> elements for a given <ol> or <ul> node.
-
+
@type listnode: xml.dom.Node
@param listnode: <ul> or <ol> list to get the elements for
@rtype: List of Strings
@return: a list that contains the value of the <li> elements
"""
- rValue = []
if not listnode.nodeName in ["ul", "ol"]:
raise GlsaFormatException("Invalid function call: listnode is not <ul> or <ol>")
- for li in listnode.childNodes:
- if li.nodeType != xml.dom.Node.ELEMENT_NODE:
- continue
- rValue.append(getText(li, format="strip"))
+ rValue = [getText(li, format="strip") \
+ for li in listnode.childNodes \
+ if li.nodeType == xml.dom.Node.ELEMENT_NODE]
return rValue
-def getText(node, format):
+def getText(node, format, textfd = None):
"""
This is the main parser function. It takes a node and traverses
recursive over the subnodes, getting the text of each (and the
@@ -148,7 +150,7 @@ def getText(node, format):
parameter the text might be formatted by adding/removing newlines,
tabs and spaces. This function is only useful for the GLSA DTD,
it's not applicable for other DTDs.
-
+
@type node: xml.dom.Node
@param node: the root node to start with the parsing
@type format: String
@@ -158,45 +160,54 @@ def getText(node, format):
replaces multiple spaces with one space.
I{xml} does some more formatting, depending on the
type of the encountered nodes.
+ @type textfd: writable file-like object
+ @param textfd: the file-like object to write the output to
@rtype: String
@return: the (formatted) content of the node and its subnodes
+ except if textfd was not none
"""
- rValue = ""
+ if not textfd:
+ textfd = StringIO()
+ returnNone = False
+ else:
+ returnNone = True
if format in ["strip", "keep"]:
if node.nodeName in ["uri", "mail"]:
- rValue += node.childNodes[0].data+": "+node.getAttribute("link")
+ textfd.write(node.childNodes[0].data+": "+node.getAttribute("link"))
else:
for subnode in node.childNodes:
if subnode.nodeName == "#text":
- rValue += subnode.data
+ textfd.write(subnode.data)
else:
- rValue += getText(subnode, format)
- else:
+ getText(subnode, format, textfd)
+ else: # format = "xml"
for subnode in node.childNodes:
if subnode.nodeName == "p":
for p_subnode in subnode.childNodes:
if p_subnode.nodeName == "#text":
- rValue += p_subnode.data.strip()
+ textfd.write(p_subnode.data.strip())
elif p_subnode.nodeName in ["uri", "mail"]:
- rValue += p_subnode.childNodes[0].data
- rValue += " ( "+p_subnode.getAttribute("link")+" )"
- rValue += NEWLINE_ESCAPE
+ textfd.write(p_subnode.childNodes[0].data)
+ textfd.write(" ( "+p_subnode.getAttribute("link")+" )")
+ textfd.write(NEWLINE_ESCAPE)
elif subnode.nodeName == "ul":
for li in getListElements(subnode):
- rValue += "-"+SPACE_ESCAPE+li+NEWLINE_ESCAPE+" "
+ textfd.write("-"+SPACE_ESCAPE+li+NEWLINE_ESCAPE+" ")
elif subnode.nodeName == "ol":
i = 0
for li in getListElements(subnode):
i = i+1
- rValue += str(i)+"."+SPACE_ESCAPE+li+NEWLINE_ESCAPE+" "
+ textfd.write(str(i)+"."+SPACE_ESCAPE+li+NEWLINE_ESCAPE+" ")
elif subnode.nodeName == "code":
- rValue += getText(subnode, format="keep").replace("\n", NEWLINE_ESCAPE)
- if rValue[-1*len(NEWLINE_ESCAPE):] != NEWLINE_ESCAPE:
- rValue += NEWLINE_ESCAPE
+ textfd.write(getText(subnode, format="keep").lstrip().replace("\n", NEWLINE_ESCAPE))
+ textfd.write(NEWLINE_ESCAPE)
elif subnode.nodeName == "#text":
- rValue += subnode.data
+ textfd.write(subnode.data)
else:
raise GlsaFormatException(_("Invalid Tag found: "), subnode.nodeName)
+ if returnNone:
+ return None
+ rValue = textfd.getvalue()
if format == "strip":
rValue = rValue.strip(" \n\t")
rValue = re.sub("[\s]{2,}", " ", rValue)
@@ -206,7 +217,7 @@ def getMultiTagsText(rootnode, tagname, format):
"""
Returns a list with the text of all subnodes of type I{tagname}
under I{rootnode} (which itself is not parsed) using the given I{format}.
-
+
@type rootnode: xml.dom.Node
@param rootnode: the node to search for I{tagname}
@type tagname: String
@@ -216,16 +227,15 @@ def getMultiTagsText(rootnode, tagname, format):
@rtype: List of Strings
@return: a list containing the text of all I{tagname} childnodes
"""
- rValue = []
- for e in rootnode.getElementsByTagName(tagname):
- rValue.append(getText(e, format))
+ rValue = [getText(e, format) \
+ for e in rootnode.getElementsByTagName(tagname)]
return rValue
def makeAtom(pkgname, versionNode):
"""
- creates from the given package name and information in the
+ creates from the given package name and information in the
I{versionNode} a (syntactical) valid portage atom.
-
+
@type pkgname: String
@param pkgname: the name of the package for this atom
@type versionNode: xml.dom.Node
@@ -248,9 +258,9 @@ def makeAtom(pkgname, versionNode):
def makeVersion(versionNode):
"""
- creates from the information in the I{versionNode} a
+ creates from the information in the I{versionNode} a
version string (format <op><version>).
-
+
@type versionNode: xml.dom.Node
@param versionNode: a <vulnerable> or <unaffected> Node that
contains the version information for this atom
@@ -270,17 +280,17 @@ def makeVersion(versionNode):
def match(atom, dbapi, match_type="default"):
"""
- wrapper that calls revisionMatch() or portage.dbapi.dbapi.match() depending on
+ wrapper that calls revisionMatch() or portage.dbapi.dbapi.match() depending on
the given atom.
-
+
@type atom: string
@param atom: a <~ or >~ atom or a normal portage atom that contains the atom to match against
@type dbapi: portage.dbapi.dbapi
@param dbapi: one of the portage databases to use as information source
@type match_type: string
- @param match_type: if != "default" passed as first argument to dbapi.xmatch
+ @param match_type: if != "default" passed as first argument to dbapi.xmatch
to apply the wanted visibility filters
-
+
@rtype: list of strings
@return: a list with the matching versions
"""
@@ -296,15 +306,15 @@ def revisionMatch(revisionAtom, dbapi, match_type="default"):
handler for the special >~, >=~, <=~ and <~ atoms that are supposed to behave
as > and < except that they are limited to the same version, the range only
applies to the revision part.
-
+
@type revisionAtom: string
@param revisionAtom: a <~ or >~ atom that contains the atom to match against
@type dbapi: portage.dbapi.dbapi
@param dbapi: one of the portage databases to use as information source
@type match_type: string
- @param match_type: if != "default" passed as first argument to portdb.xmatch
+ @param match_type: if != "default" passed as first argument to portdb.xmatch
to apply the wanted visibility filters
-
+
@rtype: list of strings
@return: a list with the matching versions
"""
@@ -325,18 +335,19 @@ def revisionMatch(revisionAtom, dbapi, match_type="default"):
if eval(r1+" "+revisionAtom[0:2]+" "+r2):
rValue.append(v)
return rValue
-
+
def getMinUpgrade(vulnerableList, unaffectedList, portdbapi, vardbapi, minimize=True):
"""
Checks if the systemstate is matching an atom in
I{vulnerableList} and returns string describing
- the lowest version for the package that matches an atom in
+ the lowest version for the package that matches an atom in
I{unaffectedList} and is greater than the currently installed
- version or None if the system is not affected. Both
- I{vulnerableList} and I{unaffectedList} should have the
+ version. It will return an empty list if the system is affected,
+ and no upgrade is possible or None if the system is not affected.
+ Both I{vulnerableList} and I{unaffectedList} should have the
same base package.
-
+
@type vulnerableList: List of Strings
@param vulnerableList: atoms matching vulnerable package versions
@type unaffectedList: List of Strings
@@ -347,46 +358,51 @@ def getMinUpgrade(vulnerableList, unaffectedList, portdbapi, vardbapi, minimize=
@param vardbapi: Installed package repository
@type minimize: Boolean
@param minimize: True for a least-change upgrade, False for emerge-like algorithm
-
+
@rtype: String | None
@return: the lowest unaffected version that is greater than
the installed version.
- """
- rValue = None
- v_installed = []
- u_installed = []
- for v in vulnerableList:
- v_installed += match(v, vardbapi)
+ """
+ rValue = ""
+ v_installed = reduce(operator.add, [match(v, vardbapi) for v in vulnerableList], [])
+ u_installed = reduce(operator.add, [match(u, vardbapi) for u in unaffectedList], [])
- for u in unaffectedList:
- u_installed += match(u, vardbapi)
-
- install_unaffected = True
- for i in v_installed:
- if i not in u_installed:
- install_unaffected = False
+ # remove all unaffected atoms from vulnerable list
+ v_installed = list(set(v_installed).difference(set(u_installed)))
- if install_unaffected:
- return rValue
-
+ if not v_installed:
+ return None
+
+ # this tuple holds all vulnerable atoms, and the related upgrade atom
+ vuln_update = []
+ avail_updates = set()
for u in unaffectedList:
- mylist = match(u, portdbapi, match_type="match-all")
- for c in mylist:
- i = best(v_installed)
- if vercmp(c.version, i.version) > 0 \
- and (rValue == None \
- or not match("="+rValue, portdbapi) \
- or (minimize ^ (vercmp(c.version, rValue.version) > 0)) \
- and match("="+c, portdbapi)) \
- and portdbapi.aux_get(c, ["SLOT"]) == vardbapi.aux_get(best(v_installed), ["SLOT"]):
- rValue = c
- return rValue
+ # TODO: This had match_type="match-all" before. I don't think it should
+ # since we disregarded masked items later anyway (match(=rValue, "porttree"))
+ avail_updates.update(match(u, portdbapi))
+ # if an atom is already installed, we should not consider it for upgrades
+ avail_updates.difference_update(u_installed)
+
+ for vuln in v_installed:
+ update = ""
+ for c in avail_updates:
+ c_pv = portage.catpkgsplit(c)
+ if vercmp(c.version, vuln.version) > 0 \
+ and (update == "" \
+ or (minimize ^ (vercmp(c.version, update.version) > 0))) \
+ and portdbapi._pkg_str(c, None).slot == vardbapi._pkg_str(vuln, None).slot:
+ update = c_pv[0]+"/"+c_pv[1]+"-"+c_pv[2]
+ if c_pv[3] != "r0": # we don't like -r0 for display
+ update += "-"+c_pv[3]
+ vuln_update.append([vuln, update])
+
+ return vuln_update
def format_date(datestr):
"""
Takes a date (announced, revised) date from a GLSA and formats
it as readable text (i.e. "January 1, 2008").
-
+
@type date: String
@param date: the date string to reformat
@rtype: String
@@ -396,16 +412,16 @@ def format_date(datestr):
splitdate = datestr.split("-", 2)
if len(splitdate) != 3:
return datestr
-
+
# This cannot raise an error as we use () instead of []
splitdate = (int(x) for x in splitdate)
-
+
from datetime import date
try:
d = date(*splitdate)
except ValueError:
return datestr
-
+
# TODO We could format to local date format '%x' here?
return _unicode_decode(d.strftime("%B %d, %Y"),
encoding=_encodings['content'], errors='replace')
@@ -417,7 +433,7 @@ class GlsaTypeException(Exception):
class GlsaFormatException(Exception):
pass
-
+
class GlsaArgumentException(Exception):
pass
@@ -429,9 +445,9 @@ class Glsa:
"""
def __init__(self, myid, myconfig, vardbapi, portdbapi):
"""
- Simple constructor to set the ID, store the config and gets the
+ Simple constructor to set the ID, store the config and gets the
XML data by calling C{self.read()}.
-
+
@type myid: String
@param myid: String describing the id for the GLSA object (standard
GLSAs have an ID of the form YYYYMM-nn) or an existing
@@ -461,7 +477,7 @@ class Glsa:
"""
Here we build the filename from the config and the ID and pass
it to urllib to fetch it from the filesystem or a remote server.
-
+
@rtype: None
@return: None
"""
@@ -473,15 +489,21 @@ class Glsa:
myurl = "file://"+self.nr
else:
myurl = repository + "glsa-%s.xml" % str(self.nr)
- self.parse(urllib_request_urlopen(myurl))
+
+ f = urllib_request_urlopen(myurl)
+ try:
+ self.parse(f)
+ finally:
+ f.close()
+
return None
def parse(self, myfile):
"""
- This method parses the XML file and sets up the internal data
+ This method parses the XML file and sets up the internal data
structures by calling the different helper functions in this
module.
-
+
@type myfile: String
@param myfile: Filename to grab the XML data from
@rtype: None
@@ -504,27 +526,27 @@ class Glsa:
self.title = getText(myroot.getElementsByTagName("title")[0], format="strip")
self.synopsis = getText(myroot.getElementsByTagName("synopsis")[0], format="strip")
self.announced = format_date(getText(myroot.getElementsByTagName("announced")[0], format="strip"))
-
- count = 1
+
# Support both formats of revised:
# <revised>December 30, 2007: 02</revised>
# <revised count="2">2007-12-30</revised>
revisedEl = myroot.getElementsByTagName("revised")[0]
self.revised = getText(revisedEl, format="strip")
- if ((sys.hexversion >= 0x3000000 and "count" in revisedEl.attributes) or
- (sys.hexversion < 0x3000000 and revisedEl.attributes.has_key("count"))):
- count = revisedEl.getAttribute("count")
- elif (self.revised.find(":") >= 0):
- (self.revised, count) = self.revised.split(":")
-
+ count = revisedEl.attributes.get("count")
+ if count is None:
+ if self.revised.find(":") >= 0:
+ (self.revised, count) = self.revised.split(":")
+ else:
+ count = 1
+
self.revised = format_date(self.revised)
-
+
try:
self.count = int(count)
except ValueError:
# TODO should this raise a GlsaFormatException?
self.count = 1
-
+
# now the optional and 0-n toplevel, #PCDATA tags and references
try:
self.access = getText(myroot.getElementsByTagName("access")[0], format="strip")
@@ -532,7 +554,7 @@ class Glsa:
self.access = ""
self.bugs = getMultiTagsText(myroot, "bug", format="strip")
self.references = getMultiTagsText(myroot.getElementsByTagName("references")[0], "uri", format="keep")
-
+
# and now the formatted text elements
self.description = getText(myroot.getElementsByTagName("description")[0], format="xml")
self.workaround = getText(myroot.getElementsByTagName("workaround")[0], format="xml")
@@ -542,7 +564,7 @@ class Glsa:
try:
self.background = getText(myroot.getElementsByTagName("background")[0], format="xml")
except IndexError:
- self.background = ""
+ self.background = ""
# finally the interesting tags (product, affected, package)
self.glsatype = myroot.getElementsByTagName("product")[0].getAttribute("type")
@@ -572,16 +594,18 @@ class Glsa:
self.services = self.affected.getElementsByTagName("service")
return None
- def dump(self, outstream=sys.stdout):
+ def dump(self, outstream=sys.stdout, encoding="utf-8"):
"""
- Dumps a plaintext representation of this GLSA to I{outfile} or
+ Dumps a plaintext representation of this GLSA to I{outfile} or
B{stdout} if it is ommitted. You can specify an alternate
- I{encoding} if needed (default is latin1).
-
+ I{encoding} if needed (default is utf-8).
+
@type outstream: File
@param outfile: Stream that should be used for writing
(defaults to sys.stdout)
"""
+ outstream = getattr(outstream, "buffer", outstream)
+ outstream = codecs.getwriter(encoding)(outstream)
width = 76
outstream.write(("GLSA %s: \n%s" % (self.nr, self.title)).center(width)+"\n")
outstream.write((width*"=")+"\n")
@@ -606,30 +630,24 @@ class Glsa:
pass
if len(self.bugs) > 0:
outstream.write(_("\nRelated bugs: "))
- for i in range(0, len(self.bugs)):
- outstream.write(self.bugs[i])
- if i < len(self.bugs)-1:
- outstream.write(", ")
- else:
- outstream.write("\n")
+ outstream.write(", ".join(self.bugs))
+ outstream.write("\n")
if self.background:
outstream.write("\n"+wrap(self.background, width, caption=_("Background: ")))
outstream.write("\n"+wrap(self.description, width, caption=_("Description: ")))
outstream.write("\n"+wrap(self.impact_text, width, caption=_("Impact: ")))
outstream.write("\n"+wrap(self.workaround, width, caption=_("Workaround: ")))
outstream.write("\n"+wrap(self.resolution, width, caption=_("Resolution: ")))
- myreferences = ""
- for r in self.references:
- myreferences += (r.replace(" ", SPACE_ESCAPE)+NEWLINE_ESCAPE+" ")
+ myreferences = " ".join(r.replace(" ", SPACE_ESCAPE)+NEWLINE_ESCAPE for r in self.references)
outstream.write("\n"+wrap(myreferences, width, caption=_("References: ")))
outstream.write("\n")
-
+
def isVulnerable(self):
"""
Tests if the system is affected by this GLSA by checking if any
vulnerable package versions are installed. Also checks for affected
architectures.
-
+
@rtype: Boolean
@return: True if the system is affected, False if not
"""
@@ -641,56 +659,67 @@ class Glsa:
for v in path["vul_atoms"]:
rValue = rValue \
or (len(match(v, self.vardbapi)) > 0 \
- and getMinUpgrade(path["vul_atoms"], path["unaff_atoms"], \
+ and None != getMinUpgrade(path["vul_atoms"], path["unaff_atoms"], \
self.portdbapi, self.vardbapi))
return rValue
-
- def isApplied(self):
+
+ def isInjected(self):
"""
- Looks if the GLSA IDis in the GLSA checkfile to check if this
- GLSA was already applied.
-
+ Looks if the GLSA ID is in the GLSA checkfile to check if this
+ GLSA should be marked as applied.
+
@rtype: Boolean
- @return: True if the GLSA was applied, False if not
+ @returns: True if the GLSA is in the inject file, False if not
"""
+ if not os.access(os.path.join(self.config["EROOT"],
+ PRIVATE_PATH, "glsa_injected"), os.R_OK):
+ return False
return (self.nr in get_applied_glsas(self.config))
def inject(self):
"""
Puts the ID of this GLSA into the GLSA checkfile, so it won't
- show up on future checks. Should be called after a GLSA is
+ show up on future checks. Should be called after a GLSA is
applied or on explicit user request.
@rtype: None
@return: None
"""
- if not self.isApplied():
+ if not self.isInjected():
checkfile = io.open(
_unicode_encode(os.path.join(self.config["EROOT"],
- CACHE_PATH, "glsa"),
- encoding=_encodings['fs'], errors='strict'),
+ PRIVATE_PATH, "glsa_injected"),
+ encoding=_encodings['fs'], errors='strict'),
mode='a+', encoding=_encodings['content'], errors='strict')
checkfile.write(_unicode_decode(self.nr + "\n"))
checkfile.close()
return None
-
+
def getMergeList(self, least_change=True):
"""
Returns the list of package-versions that have to be merged to
- apply this GLSA properly. The versions are as low as possible
+ apply this GLSA properly. The versions are as low as possible
while avoiding downgrades (see L{getMinUpgrade}).
-
+
@type least_change: Boolean
@param least_change: True if the smallest possible upgrade should be selected,
False for an emerge-like algorithm
@rtype: List of Strings
@return: list of package-versions that have to be merged
"""
- rValue = []
- for pkg in self.packages:
+ return list(set(update for (vuln, update) in self.getAffectionTable(least_change) if update))
+
+ def getAffectionTable(self, least_change=True):
+ """
+ Will initialize the self.systemAffection list of
+ atoms installed on the system that are affected
+ by this GLSA, and the atoms that are minimal upgrades.
+ """
+ systemAffection = []
+ for pkg in self.packages.keys():
for path in self.packages[pkg]:
- update = getMinUpgrade(path["vul_atoms"], path["unaff_atoms"], \
+ update = getMinUpgrade(path["vul_atoms"], path["unaff_atoms"],
self.portdbapi, self.vardbapi, minimize=least_change)
if update:
- rValue.append(update)
- return rValue
+ systemAffection.extend(update)
+ return systemAffection
diff --git a/pym/portage/localization.py b/pym/portage/localization.py
index d16c4b131..b54835a42 100644
--- a/pym/portage/localization.py
+++ b/pym/portage/localization.py
@@ -1,12 +1,18 @@
# localization.py -- Code to manage/help portage localization.
-# Copyright 2004 Gentoo Foundation
+# Copyright 2004-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
+from portage import _unicode_decode
# We define this to make the transition easier for us.
def _(mystr):
- return mystr
-
+ """
+ Always returns unicode, regardless of the input type. This is
+ helpful for avoiding UnicodeDecodeError from __str__() with
+ Python 2, by ensuring that string format operations invoke
+ __unicode__() instead of __str__().
+ """
+ return _unicode_decode(mystr)
def localization_example():
# Dict references allow translators to rearrange word order.
@@ -15,6 +21,7 @@ def localization_example():
a_value = "value.of.a"
b_value = 123
- c_value = [1,2,3,4]
- print(_("A: %(a)s -- B: %(b)s -- C: %(c)s") % {"a":a_value,"b":b_value,"c":c_value})
+ c_value = [1, 2, 3, 4]
+ print(_("A: %(a)s -- B: %(b)s -- C: %(c)s") %
+ {"a": a_value, "b": b_value, "c": c_value})
diff --git a/pym/portage/locks.py b/pym/portage/locks.py
index 59fbc6ec0..0789f8941 100644
--- a/pym/portage/locks.py
+++ b/pym/portage/locks.py
@@ -1,5 +1,5 @@
# portage: Lock management code
-# Copyright 2004-2012 Gentoo Foundation
+# Copyright 2004-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
__all__ = ["lockdir", "unlockdir", "lockfile", "unlockfile", \
@@ -17,11 +17,11 @@ import portage
from portage import os, _encodings, _unicode_decode
from portage.exception import DirectoryNotFound, FileNotFound, \
InvalidData, TryAgain, OperationNotPermitted, PermissionDenied
-from portage.data import portage_gid
from portage.util import writemsg
from portage.localization import _
if sys.hexversion >= 0x3000000:
+ # pylint: disable=W0622
basestring = str
HARDLINK_FD = -2
@@ -64,6 +64,9 @@ def lockfile(mypath, wantnewlockfile=0, unlinkfile=0,
if not mypath:
raise InvalidData(_("Empty path given"))
+ # Since Python 3.4, chown requires int type (no proxies).
+ portage_gid = int(portage.data.portage_gid)
+
# Support for file object or integer file descriptor parameters is
# deprecated due to ambiguity in whether or not it's safe to close
# the file descriptor, making it prone to "Bad file descriptor" errors
@@ -148,7 +151,7 @@ def lockfile(mypath, wantnewlockfile=0, unlinkfile=0,
except IOError as e:
if not hasattr(e, "errno"):
raise
- if e.errno in (errno.EACCES, errno.EAGAIN):
+ if e.errno in (errno.EACCES, errno.EAGAIN, errno.ENOLCK):
# resource temp unavailable; eg, someone beat us to the lock.
if flags & os.O_NONBLOCK:
os.close(myfd)
@@ -163,19 +166,43 @@ def lockfile(mypath, wantnewlockfile=0, unlinkfile=0,
if isinstance(mypath, int):
waiting_msg = _("waiting for lock on fd %i") % myfd
else:
- waiting_msg = _("waiting for lock on %s\n") % lockfilename
+ waiting_msg = _("waiting for lock on %s") % lockfilename
if out is not None:
out.ebegin(waiting_msg)
# try for the exclusive lock now.
- try:
- locking_method(myfd, fcntl.LOCK_EX)
- except EnvironmentError as e:
- if out is not None:
- out.eend(1, str(e))
- raise
+ enolock_msg_shown = False
+ while True:
+ try:
+ locking_method(myfd, fcntl.LOCK_EX)
+ except EnvironmentError as e:
+ if e.errno == errno.ENOLCK:
+ # This is known to occur on Solaris NFS (see
+ # bug #462694). Assume that the error is due
+ # to temporary exhaustion of record locks,
+ # and loop until one becomes available.
+ if not enolock_msg_shown:
+ enolock_msg_shown = True
+ if isinstance(mypath, int):
+ context_desc = _("Error while waiting "
+ "to lock fd %i") % myfd
+ else:
+ context_desc = _("Error while waiting "
+ "to lock '%s'") % lockfilename
+ writemsg("\n!!! %s: %s\n" % (context_desc, e),
+ noiselevel=-1)
+
+ time.sleep(_HARDLINK_POLL_LATENCY)
+ continue
+
+ if out is not None:
+ out.eend(1, str(e))
+ raise
+ else:
+ break
+
if out is not None:
out.eend(os.EX_OK)
- elif e.errno in (errno.ENOSYS, errno.ENOLCK):
+ elif e.errno in (errno.ENOSYS,):
# We're not allowed to lock on this FS.
if not isinstance(lockfilename, int):
# If a file object was passed in, it's not safe
@@ -207,10 +234,21 @@ def lockfile(mypath, wantnewlockfile=0, unlinkfile=0,
waiting_msg=waiting_msg, flags=flags)
if myfd != HARDLINK_FD:
+
+ # FD_CLOEXEC is enabled by default in Python >=3.4.
+ if sys.hexversion < 0x3040000:
+ try:
+ fcntl.FD_CLOEXEC
+ except AttributeError:
+ pass
+ else:
+ fcntl.fcntl(myfd, fcntl.F_SETFD,
+ fcntl.fcntl(myfd, fcntl.F_GETFD) | fcntl.FD_CLOEXEC)
+
_open_fds.add(myfd)
- writemsg(str((lockfilename,myfd,unlinkfile))+"\n",1)
- return (lockfilename,myfd,unlinkfile,locking_method)
+ writemsg(str((lockfilename, myfd, unlinkfile)) + "\n", 1)
+ return (lockfilename, myfd, unlinkfile, locking_method)
def _fstat_nlink(fd):
"""
@@ -232,10 +270,10 @@ def unlockfile(mytuple):
#XXX: Compatability hack.
if len(mytuple) == 3:
- lockfilename,myfd,unlinkfile = mytuple
+ lockfilename, myfd, unlinkfile = mytuple
locking_method = fcntl.flock
elif len(mytuple) == 4:
- lockfilename,myfd,unlinkfile,locking_method = mytuple
+ lockfilename, myfd, unlinkfile, locking_method = mytuple
else:
raise InvalidData
@@ -246,7 +284,7 @@ def unlockfile(mytuple):
# myfd may be None here due to myfd = mypath in lockfile()
if isinstance(lockfilename, basestring) and \
not os.path.exists(lockfilename):
- writemsg(_("lockfile does not exist '%s'\n") % lockfilename,1)
+ writemsg(_("lockfile does not exist '%s'\n") % lockfilename, 1)
if myfd is not None:
os.close(myfd)
_open_fds.remove(myfd)
@@ -254,9 +292,9 @@ def unlockfile(mytuple):
try:
if myfd is None:
- myfd = os.open(lockfilename, os.O_WRONLY,0o660)
+ myfd = os.open(lockfilename, os.O_WRONLY, 0o660)
unlinkfile = 1
- locking_method(myfd,fcntl.LOCK_UN)
+ locking_method(myfd, fcntl.LOCK_UN)
except OSError:
if isinstance(lockfilename, basestring):
os.close(myfd)
@@ -271,14 +309,14 @@ def unlockfile(mytuple):
# commenting until it is proved necessary.
#time.sleep(0.0001)
if unlinkfile:
- locking_method(myfd,fcntl.LOCK_EX|fcntl.LOCK_NB)
+ locking_method(myfd, fcntl.LOCK_EX | fcntl.LOCK_NB)
# We won the lock, so there isn't competition for it.
# We can safely delete the file.
writemsg(_("Got the lockfile...\n"), 1)
if _fstat_nlink(myfd) == 1:
os.unlink(lockfilename)
writemsg(_("Unlinked lockfile...\n"), 1)
- locking_method(myfd,fcntl.LOCK_UN)
+ locking_method(myfd, fcntl.LOCK_UN)
else:
writemsg(_("lockfile does not exist '%s'\n") % lockfilename, 1)
os.close(myfd)
@@ -288,7 +326,7 @@ def unlockfile(mytuple):
raise
except Exception as e:
writemsg(_("Failed to get lock... someone took it.\n"), 1)
- writemsg(str(e)+"\n",1)
+ writemsg(str(e) + "\n", 1)
# why test lockfilename? because we may have been handed an
# fd originally, and the caller might not like having their
@@ -300,14 +338,12 @@ def unlockfile(mytuple):
return True
-
-
def hardlock_name(path):
base, tail = os.path.split(path)
return os.path.join(base, ".%s.hardlock-%s-%s" %
(tail, os.uname()[1], os.getpid()))
-def hardlink_is_mine(link,lock):
+def hardlink_is_mine(link, lock):
try:
lock_st = os.stat(lock)
if lock_st.st_nlink == 2:
@@ -339,6 +375,9 @@ def hardlink_lockfile(lockfilename, max_wait=DeprecationWarning,
preexisting = os.path.exists(lockfilename)
myhardlock = hardlock_name(lockfilename)
+ # Since Python 3.4, chown requires int type (no proxies).
+ portage_gid = int(portage.data.portage_gid)
+
# myhardlock must not exist prior to our link() call, and we can
# safely unlink it since its file name is unique to our PID
try:
@@ -456,7 +495,6 @@ def unhardlink_lockfile(lockfilename, unlinkfile=True):
pass
def hardlock_cleanup(path, remove_all_locks=False):
- mypid = str(os.getpid())
myhost = os.uname()[1]
mydl = os.listdir(path)
@@ -465,7 +503,7 @@ def hardlock_cleanup(path, remove_all_locks=False):
mylist = {}
for x in mydl:
- if os.path.isfile(path+"/"+x):
+ if os.path.isfile(path + "/" + x):
parts = x.split(".hardlock-")
if len(parts) == 2:
filename = parts[0][1:]
@@ -482,17 +520,17 @@ def hardlock_cleanup(path, remove_all_locks=False):
mycount += 1
- results.append(_("Found %(count)s locks") % {"count":mycount})
+ results.append(_("Found %(count)s locks") % {"count": mycount})
for x in mylist:
if myhost in mylist[x] or remove_all_locks:
- mylockname = hardlock_name(path+"/"+x)
- if hardlink_is_mine(mylockname, path+"/"+x) or \
- not os.path.exists(path+"/"+x) or \
+ mylockname = hardlock_name(path + "/" + x)
+ if hardlink_is_mine(mylockname, path + "/" + x) or \
+ not os.path.exists(path + "/" + x) or \
remove_all_locks:
for y in mylist[x]:
for z in mylist[x][y]:
- filename = path+"/."+x+".hardlock-"+y+"-"+z
+ filename = path + "/." + x + ".hardlock-" + y + "-" + z
if filename == mylockname:
continue
try:
@@ -502,8 +540,8 @@ def hardlock_cleanup(path, remove_all_locks=False):
except OSError:
pass
try:
- os.unlink(path+"/"+x)
- results.append(_("Unlinked: ") + path+"/"+x)
+ os.unlink(path + "/" + x)
+ results.append(_("Unlinked: ") + path + "/" + x)
os.unlink(mylockname)
results.append(_("Unlinked: ") + mylockname)
except OSError:
diff --git a/pym/portage/mail.py b/pym/portage/mail.py
index 3fcadd27b..723da04b8 100644
--- a/pym/portage/mail.py
+++ b/pym/portage/mail.py
@@ -1,4 +1,4 @@
-# Copyright 1998-2011 Gentoo Foundation
+# Copyright 1998-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
# Since python ebuilds remove the 'email' module when USE=build
@@ -21,6 +21,7 @@ from portage.localization import _
import portage
if sys.hexversion >= 0x3000000:
+ # pylint: disable=W0622
basestring = str
def _force_ascii_if_necessary(s):
@@ -117,13 +118,13 @@ def send_mail(mysettings, message):
if "@" in mymailuri:
myauthdata, myconndata = mymailuri.rsplit("@", 1)
try:
- mymailuser,mymailpasswd = myauthdata.split(":")
+ mymailuser, mymailpasswd = myauthdata.split(":")
except ValueError:
print(_("!!! invalid SMTP AUTH configuration, trying unauthenticated ..."))
else:
myconndata = mymailuri
if ":" in myconndata:
- mymailhost,mymailport = myconndata.split(":")
+ mymailhost, mymailport = myconndata.split(":")
else:
mymailhost = myconndata
else:
diff --git a/pym/portage/manifest.py b/pym/portage/manifest.py
index a04b71780..3936b9a1d 100644
--- a/pym/portage/manifest.py
+++ b/pym/portage/manifest.py
@@ -1,15 +1,19 @@
-# Copyright 1999-2012 Gentoo Foundation
+# Copyright 1999-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
+from __future__ import unicode_literals
+
import errno
import io
import re
+import sys
import warnings
import portage
portage.proxy.lazyimport.lazyimport(globals(),
'portage.checksum:hashfunc_map,perform_multiple_checksums,' + \
- 'verify_all,_filter_unaccelarated_hashes',
+ 'verify_all,_apply_hash_filter,_filter_unaccelarated_hashes',
+ 'portage.repository.config:_find_invalid_path_char',
'portage.util:write_atomic',
)
@@ -24,8 +28,16 @@ from portage.const import (MANIFEST1_HASH_FUNCTIONS, MANIFEST2_HASH_DEFAULTS,
MANIFEST2_HASH_FUNCTIONS, MANIFEST2_IDENTIFIERS, MANIFEST2_REQUIRED_HASH)
from portage.localization import _
-# Characters prohibited by repoman's file.name check.
-_prohibited_filename_chars_re = re.compile(r'[^a-zA-Z0-9._\-+:]')
+_manifest_re = re.compile(
+ r'^(' + '|'.join(MANIFEST2_IDENTIFIERS) + r') (.*)( \d+( \S+ \S+)+)$',
+ re.UNICODE)
+
+if sys.hexversion >= 0x3000000:
+ # pylint: disable=W0622
+ _unicode = str
+ basestring = str
+else:
+ _unicode = unicode
class FileNotInManifestException(PortageException):
pass
@@ -38,15 +50,10 @@ def manifest2AuxfileFilter(filename):
for x in mysplit:
if x[:1] == '.':
return False
- if _prohibited_filename_chars_re.search(x) is not None:
- return False
return not filename[:7] == 'digest-'
def manifest2MiscfileFilter(filename):
- filename = filename.strip(os.sep)
- if _prohibited_filename_chars_re.search(filename) is not None:
- return False
- return not (filename in ["CVS", ".svn", "files", "Manifest"] or filename.endswith(".ebuild"))
+ return not (filename == "Manifest" or filename.endswith(".ebuild"))
def guessManifestFileType(filename):
""" Perform a best effort guess of which type the given filename is, avoid using this if possible """
@@ -67,18 +74,17 @@ def guessThinManifestFileType(filename):
return None
return "DIST"
-def parseManifest2(mysplit):
+def parseManifest2(line):
+ if not isinstance(line, basestring):
+ line = ' '.join(line)
myentry = None
- if len(mysplit) > 4 and mysplit[0] in MANIFEST2_IDENTIFIERS:
- mytype = mysplit[0]
- myname = mysplit[1]
- try:
- mysize = int(mysplit[2])
- except ValueError:
- return None
- myhashes = dict(zip(mysplit[3::2], mysplit[4::2]))
- myhashes["size"] = mysize
- myentry = Manifest2Entry(type=mytype, name=myname, hashes=myhashes)
+ match = _manifest_re.match(line)
+ if match is not None:
+ tokens = match.group(3).split()
+ hashes = dict(zip(tokens[1::2], tokens[2::2]))
+ hashes["size"] = int(tokens[0])
+ myentry = Manifest2Entry(type=match.group(1),
+ name=match.group(2), hashes=hashes)
return myentry
class ManifestEntry(object):
@@ -108,11 +114,20 @@ class Manifest2Entry(ManifestEntry):
def __ne__(self, other):
return not self.__eq__(other)
+ if sys.hexversion < 0x3000000:
+
+ __unicode__ = __str__
+
+ def __str__(self):
+ return _unicode_encode(self.__unicode__(),
+ encoding=_encodings['repo.content'], errors='strict')
+
class Manifest(object):
parsers = (parseManifest2,)
- def __init__(self, pkgdir, distdir, fetchlist_dict=None,
+ def __init__(self, pkgdir, distdir=None, fetchlist_dict=None,
manifest1_compat=DeprecationWarning, from_scratch=False, thin=False,
- allow_missing=False, allow_create=True, hashes=None):
+ allow_missing=False, allow_create=True, hashes=None,
+ find_invalid_path_char=None):
""" Create new Manifest instance for package in pkgdir.
Do not parse Manifest file if from_scratch == True (only for internal use)
The fetchlist_dict parameter is required only for generation of
@@ -125,6 +140,9 @@ class Manifest(object):
"portage.manifest.Manifest constructor is deprecated.",
DeprecationWarning, stacklevel=2)
+ if find_invalid_path_char is None:
+ find_invalid_path_char = _find_invalid_path_char
+ self._find_invalid_path_char = find_invalid_path_char
self.pkgdir = _unicode_decode(pkgdir).rstrip(os.sep) + os.sep
self.fhashdict = {}
self.hashes = set()
@@ -173,13 +191,12 @@ class Manifest(object):
"""Parse a manifest. If myhashdict is given then data will be added too it.
Otherwise, a new dict will be created and returned."""
try:
- fd = io.open(_unicode_encode(file_path,
+ with io.open(_unicode_encode(file_path,
encoding=_encodings['fs'], errors='strict'), mode='r',
- encoding=_encodings['repo.content'], errors='replace')
- if myhashdict is None:
- myhashdict = {}
- self._parseDigests(fd, myhashdict=myhashdict, **kwargs)
- fd.close()
+ encoding=_encodings['repo.content'], errors='replace') as f:
+ if myhashdict is None:
+ myhashdict = {}
+ self._parseDigests(f, myhashdict=myhashdict, **kwargs)
return myhashdict
except (OSError, IOError) as e:
if e.errno == errno.ENOENT:
@@ -198,9 +215,8 @@ class Manifest(object):
"""Parse manifest lines and return a list of manifest entries."""
for myline in mylines:
myentry = None
- mysplit = myline.split()
for parser in self.parsers:
- myentry = parser(mysplit)
+ myentry = parser(myline)
if myentry is not None:
yield myentry
break # go to the next line
@@ -255,9 +271,12 @@ class Manifest(object):
(MANIFEST2_REQUIRED_HASH, t, f))
def write(self, sign=False, force=False):
- """ Write Manifest instance to disk, optionally signing it """
+ """ Write Manifest instance to disk, optionally signing it. Returns
+ True if the Manifest is actually written, and False if the write
+ is skipped due to existing Manifest being identical."""
+ rval = False
if not self.allow_create:
- return
+ return rval
self.checkIntegrity()
try:
myentries = list(self._createManifestEntries())
@@ -289,7 +308,8 @@ class Manifest(object):
# thin manifests with no DIST entries, myentries is
# non-empty for all currently known use cases.
write_atomic(self.getFullname(), "".join("%s\n" %
- str(myentry) for myentry in myentries))
+ _unicode(myentry) for myentry in myentries))
+ rval = True
else:
# With thin manifest, there's no need to have
# a Manifest file if there are no DIST entries.
@@ -298,6 +318,7 @@ class Manifest(object):
except OSError as e:
if e.errno != errno.ENOENT:
raise
+ rval = True
if sign:
self.sign()
@@ -305,6 +326,7 @@ class Manifest(object):
if e.errno == errno.EACCES:
raise PermissionDenied(str(e))
raise
+ return rval
def sign(self):
""" Sign the Manifest """
@@ -363,10 +385,11 @@ class Manifest(object):
distfilehashes = self.fhashdict["DIST"]
else:
distfilehashes = {}
- self.__init__(self.pkgdir, self.distdir,
+ self.__init__(self.pkgdir, distdir=self.distdir,
fetchlist_dict=self.fetchlist_dict, from_scratch=True,
thin=self.thin, allow_missing=self.allow_missing,
- allow_create=self.allow_create, hashes=self.hashes)
+ allow_create=self.allow_create, hashes=self.hashes,
+ find_invalid_path_char=self._find_invalid_path_char)
pn = os.path.basename(self.pkgdir.rstrip(os.path.sep))
cat = self._pkgdir_category()
@@ -461,7 +484,8 @@ class Manifest(object):
if pf is not None:
mytype = "EBUILD"
cpvlist.append(pf)
- elif manifest2MiscfileFilter(f):
+ elif self._find_invalid_path_char(f) == -1 and \
+ manifest2MiscfileFilter(f):
mytype = "MISC"
else:
continue
@@ -480,7 +504,8 @@ class Manifest(object):
full_path = os.path.join(parentdir, f)
recursive_files.append(full_path[cut_len:])
for f in recursive_files:
- if not manifest2AuxfileFilter(f):
+ if self._find_invalid_path_char(f) != -1 or \
+ not manifest2AuxfileFilter(f):
continue
self.fhashdict["AUX"][f] = perform_multiple_checksums(
os.path.join(self.pkgdir, "files", f.lstrip(os.sep)), self.hashes)
@@ -502,14 +527,17 @@ class Manifest(object):
for t in MANIFEST2_IDENTIFIERS:
self.checkTypeHashes(t, ignoreMissingFiles=ignoreMissingFiles)
- def checkTypeHashes(self, idtype, ignoreMissingFiles=False):
+ def checkTypeHashes(self, idtype, ignoreMissingFiles=False, hash_filter=None):
for f in self.fhashdict[idtype]:
- self.checkFileHashes(idtype, f, ignoreMissing=ignoreMissingFiles)
+ self.checkFileHashes(idtype, f, ignoreMissing=ignoreMissingFiles,
+ hash_filter=hash_filter)
- def checkFileHashes(self, ftype, fname, ignoreMissing=False):
+ def checkFileHashes(self, ftype, fname, ignoreMissing=False, hash_filter=None):
+ digests = _filter_unaccelarated_hashes(self.fhashdict[ftype][fname])
+ if hash_filter is not None:
+ digests = _apply_hash_filter(digests, hash_filter)
try:
- ok, reason = verify_all(self._getAbsname(ftype, fname),
- _filter_unaccelarated_hashes(self.fhashdict[ftype][fname]))
+ ok, reason = verify_all(self._getAbsname(ftype, fname), digests)
if not ok:
raise DigestException(tuple([self._getAbsname(ftype, fname)]+list(reason)))
return ok, reason
diff --git a/pym/portage/news.py b/pym/portage/news.py
index bbd93257a..408fb5c5f 100644
--- a/pym/portage/news.py
+++ b/pym/portage/news.py
@@ -1,8 +1,8 @@
# portage: news management code
-# Copyright 2006-2011 Gentoo Foundation
+# Copyright 2006-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
-from __future__ import print_function
+from __future__ import print_function, unicode_literals
__all__ = ["NewsManager", "NewsItem", "DisplayRestriction",
"DisplayProfileRestriction", "DisplayKeywordRestriction",
@@ -13,6 +13,7 @@ import io
import logging
import os as _os
import re
+import portage
from portage import OrderedDict
from portage import os
from portage import _encodings
@@ -241,7 +242,8 @@ class NewsItem(object):
for values in self.restrictions.values():
any_match = False
for restriction in values:
- if restriction.checkRestriction(**kwargs):
+ if restriction.checkRestriction(
+ **portage._native_kwargs(kwargs)):
any_match = True
if not any_match:
all_match = False
@@ -388,7 +390,7 @@ def count_unread_news(portdb, vardb, repos=None, update=True):
# NOTE: The NewsManager typically handles permission errors by
# returning silently, so PermissionDenied won't necessarily be
# raised even if we do trigger a permission error above.
- msg = _unicode_decode("Permission denied: '%s'\n") % (e,)
+ msg = "Permission denied: '%s'\n" % (e,)
if msg in permission_msgs:
pass
else:
diff --git a/pym/portage/output.py b/pym/portage/output.py
index e44375ee3..cd660ac99 100644
--- a/pym/portage/output.py
+++ b/pym/portage/output.py
@@ -1,4 +1,4 @@
-# Copyright 1998-2011 Gentoo Foundation
+# Copyright 1998-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
__docformat__ = "epytext"
@@ -24,8 +24,8 @@ from portage.exception import CommandNotFound, FileNotFound, \
ParseError, PermissionDenied, PortageException
from portage.localization import _
-havecolor=1
-dotitles=1
+havecolor = 1
+dotitles = 1
_styles = {}
"""Maps style class to tuple of attribute names."""
@@ -164,15 +164,12 @@ def _parse_color_map(config_root='/', onerror=None):
token = token[1:-1]
return token
- f = None
try:
- f = io.open(_unicode_encode(myfile,
+ with io.open(_unicode_encode(myfile,
encoding=_encodings['fs'], errors='strict'),
- mode='r', encoding=_encodings['content'], errors='replace')
- lineno = 0
- for line in f:
- lineno += 1
-
+ mode='r', encoding=_encodings['content'], errors='replace') as f:
+ lines = f.readlines()
+ for lineno, line in enumerate(lines):
commenter_pos = line.find("#")
line = line[:commenter_pos].strip()
@@ -230,9 +227,6 @@ def _parse_color_map(config_root='/', onerror=None):
elif e.errno == errno.EACCES:
raise PermissionDenied(myfile)
raise
- finally:
- if f is not None:
- f.close()
def nc_len(mystr):
tmp = re.sub(esc_seq + "^m]+m", "", mystr);
@@ -245,7 +239,7 @@ _max_xtermTitle_len = 253
def xtermTitle(mystr, raw=False):
global _disable_xtermTitle
if _disable_xtermTitle is None:
- _disable_xtermTitle = not (sys.stderr.isatty() and \
+ _disable_xtermTitle = not (sys.__stderr__.isatty() and \
'TERM' in os.environ and \
_legal_terms_re.match(os.environ['TERM']) is not None)
@@ -278,15 +272,18 @@ def xtermTitleReset():
if dotitles and \
'TERM' in os.environ and \
_legal_terms_re.match(os.environ['TERM']) is not None and \
- sys.stderr.isatty():
+ sys.__stderr__.isatty():
from portage.process import find_binary, spawn
shell = os.environ.get("SHELL")
if not shell or not os.access(shell, os.EX_OK):
shell = find_binary("sh")
if shell:
spawn([shell, "-c", prompt_command], env=os.environ,
- fd_pipes={0:sys.stdin.fileno(),1:sys.stderr.fileno(),
- 2:sys.stderr.fileno()})
+ fd_pipes={
+ 0: portage._get_stdin().fileno(),
+ 1: sys.__stderr__.fileno(),
+ 2: sys.__stderr__.fileno()
+ })
else:
os.system(prompt_command)
return
@@ -302,12 +299,12 @@ def xtermTitleReset():
def notitles():
"turn off title setting"
- dotitles=0
+ dotitles = 0
def nocolor():
"turn off colorization"
global havecolor
- havecolor=0
+ havecolor = 0
def resetColor():
return codes["reset"]
@@ -344,9 +341,11 @@ def colorize(color_key, text):
else:
return text
-compat_functions_colors = ["bold","white","teal","turquoise","darkteal",
- "fuchsia","purple","blue","darkblue","green","darkgreen","yellow",
- "brown","darkyellow","red","darkred"]
+compat_functions_colors = [
+ "bold", "white", "teal", "turquoise", "darkteal",
+ "fuchsia", "purple", "blue", "darkblue", "green", "darkgreen", "yellow",
+ "brown", "darkyellow", "red", "darkred",
+]
class create_color_func(object):
__slots__ = ("_color_key",)
diff --git a/pym/portage/package/ebuild/_config/KeywordsManager.py b/pym/portage/package/ebuild/_config/KeywordsManager.py
index 0c613ce04..af606f1eb 100644
--- a/pym/portage/package/ebuild/_config/KeywordsManager.py
+++ b/pym/portage/package/ebuild/_config/KeywordsManager.py
@@ -11,7 +11,7 @@ from portage.dep import ExtendedAtomDict, _repo_separator, _slot_separator
from portage.localization import _
from portage.package.ebuild._config.helper import ordered_by_atom_specificity
from portage.util import grabdict_package, stack_lists, writemsg
-from portage.versions import cpv_getkey, _pkg_str
+from portage.versions import _pkg_str
class KeywordsManager(object):
"""Manager class to handle keywords processing and validation"""
@@ -77,7 +77,9 @@ class KeywordsManager(object):
def getKeywords(self, cpv, slot, keywords, repo):
- if not hasattr(cpv, 'slot'):
+ try:
+ cpv.slot
+ except AttributeError:
pkg = _pkg_str(cpv, slot=slot, repo=repo)
else:
pkg = cpv
@@ -91,6 +93,47 @@ class KeywordsManager(object):
keywords.extend(pkg_keywords)
return stack_lists(keywords, incremental=True)
+ def isStable(self, pkg, global_accept_keywords, backuped_accept_keywords):
+ mygroups = self.getKeywords(pkg, None, pkg._metadata["KEYWORDS"], None)
+ pgroups = global_accept_keywords.split()
+
+ unmaskgroups = self.getPKeywords(pkg, None, None,
+ global_accept_keywords)
+ pgroups.extend(unmaskgroups)
+
+ egroups = backuped_accept_keywords.split()
+
+ if unmaskgroups or egroups:
+ pgroups = self._getEgroups(egroups, pgroups)
+ else:
+ pgroups = set(pgroups)
+
+ if self._getMissingKeywords(pkg, pgroups, mygroups):
+ return False
+
+ if pkg.cpv._settings.local_config:
+ # If replacing all keywords with unstable variants would mask the
+ # package, then it's considered stable.
+ unstable = []
+ for kw in mygroups:
+ if kw[:1] != "~":
+ kw = "~" + kw
+ unstable.append(kw)
+
+ return bool(self._getMissingKeywords(pkg, pgroups, set(unstable)))
+ else:
+ # For repoman, if the package has an effective stable keyword that
+ # intersects with the effective ACCEPT_KEYWORDS for the current
+ # profile, then consider it stable.
+ for kw in pgroups:
+ if kw[:1] != "~":
+ if kw in mygroups or '*' in mygroups:
+ return True
+ if kw == '*':
+ for x in mygroups:
+ if x[:1] != "~":
+ return True
+ return False
def getMissingKeywords(self,
cpv,
@@ -237,7 +280,7 @@ class KeywordsManager(object):
if not mygroups:
# If KEYWORDS is empty then we still have to return something
# in order to distinguish from the case of "none missing".
- mygroups.append("**")
+ mygroups = ["**"]
missing = mygroups
return missing
@@ -261,9 +304,11 @@ class KeywordsManager(object):
"""
pgroups = global_accept_keywords.split()
- if not hasattr(cpv, 'slot'):
+ try:
+ cpv.slot
+ except AttributeError:
cpv = _pkg_str(cpv, slot=slot, repo=repo)
- cp = cpv_getkey(cpv)
+ cp = cpv.cp
unmaskgroups = []
if self._p_accept_keywords:
@@ -288,4 +333,3 @@ class KeywordsManager(object):
for x in pkg_accept_keywords:
unmaskgroups.extend(x)
return unmaskgroups
-
diff --git a/pym/portage/package/ebuild/_config/LocationsManager.py b/pym/portage/package/ebuild/_config/LocationsManager.py
index f7a1177e7..4427f1d05 100644
--- a/pym/portage/package/ebuild/_config/LocationsManager.py
+++ b/pym/portage/package/ebuild/_config/LocationsManager.py
@@ -1,6 +1,8 @@
-# Copyright 2010-2011 Gentoo Foundation
+# Copyright 2010-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
+from __future__ import unicode_literals
+
__all__ = (
'LocationsManager',
)
@@ -13,10 +15,12 @@ import portage
from portage import os, eapi_is_supported, _encodings, _unicode_encode
from portage.const import CUSTOM_PROFILE_PATH, GLOBAL_CONFIG_PATH, \
PROFILE_PATH, USER_CONFIG_PATH
+from portage.eapi import eapi_allows_directories_on_profile_level_and_repository_level
from portage.exception import DirectoryNotFound, ParseError
from portage.localization import _
from portage.util import ensure_dirs, grabfile, \
normalize_path, shlex_split, writemsg
+from portage.util._path import exists_raise_eaccess, isdir_raise_eaccess
from portage.repository.config import parse_layout_conf, \
_portage1_profiles_allow_directories
@@ -27,7 +31,7 @@ _PORTAGE1_DIRECTORIES = frozenset([
'use.mask', 'use.force'])
_profile_node = collections.namedtuple('_profile_node',
- 'location portage1_directories')
+ 'location portage1_directories user_config')
_allow_parent_colon = frozenset(
["portage-2"])
@@ -45,9 +49,13 @@ class LocationsManager(object):
if self.eprefix is None:
self.eprefix = portage.const.EPREFIX
+ elif self.eprefix:
+ self.eprefix = normalize_path(self.eprefix)
+ if self.eprefix == os.sep:
+ self.eprefix = ""
if self.config_root is None:
- self.config_root = self.eprefix + os.sep
+ self.config_root = portage.const.EPREFIX + os.sep
self.config_root = normalize_path(os.path.abspath(
self.config_root)).rstrip(os.path.sep) + os.path.sep
@@ -72,14 +80,26 @@ class LocationsManager(object):
known_repos = tuple(known_repos)
if self.config_profile_path is None:
+ deprecated_profile_path = os.path.join(
+ self.config_root, 'etc', 'make.profile')
self.config_profile_path = \
os.path.join(self.config_root, PROFILE_PATH)
- if os.path.isdir(self.config_profile_path):
+ if isdir_raise_eaccess(self.config_profile_path):
self.profile_path = self.config_profile_path
+ if isdir_raise_eaccess(deprecated_profile_path) and not \
+ os.path.samefile(self.profile_path,
+ deprecated_profile_path):
+ # Don't warn if they refer to the same path, since
+ # that can be used for backward compatibility with
+ # old software.
+ writemsg("!!! %s\n" %
+ _("Found 2 make.profile dirs: "
+ "using '%s', ignoring '%s'") %
+ (self.profile_path, deprecated_profile_path),
+ noiselevel=-1)
else:
- self.config_profile_path = \
- os.path.join(self.abs_user_config, 'make.profile')
- if os.path.isdir(self.config_profile_path):
+ self.config_profile_path = deprecated_profile_path
+ if isdir_raise_eaccess(self.config_profile_path):
self.profile_path = self.config_profile_path
else:
self.profile_path = None
@@ -99,9 +119,9 @@ class LocationsManager(object):
self._addProfile(os.path.realpath(self.profile_path),
repositories, known_repos)
except ParseError as e:
- writemsg(_("!!! Unable to parse profile: '%s'\n") % \
- self.profile_path, noiselevel=-1)
- writemsg("!!! ParseError: %s\n" % str(e), noiselevel=-1)
+ if not portage._sync_mode:
+ writemsg(_("!!! Unable to parse profile: '%s'\n") % self.profile_path, noiselevel=-1)
+ writemsg("!!! ParseError: %s\n" % str(e), noiselevel=-1)
self.profiles = []
self.profiles_complex = []
@@ -111,14 +131,15 @@ class LocationsManager(object):
if os.path.exists(custom_prof):
self.user_profile_dir = custom_prof
self.profiles.append(custom_prof)
- self.profiles_complex.append(_profile_node(custom_prof, True))
+ self.profiles_complex.append(
+ _profile_node(custom_prof, True, True))
del custom_prof
self.profiles = tuple(self.profiles)
self.profiles_complex = tuple(self.profiles_complex)
def _check_var_directory(self, varname, var):
- if not os.path.isdir(var):
+ if not isdir_raise_eaccess(var):
writemsg(_("!!! Error: %s='%s' is not a directory. "
"Please correct this.\n") % (varname, var),
noiselevel=-1)
@@ -130,33 +151,9 @@ class LocationsManager(object):
allow_parent_colon = True
repo_loc = None
compat_mode = False
- intersecting_repos = [x for x in known_repos if current_abs_path.startswith(x[0])]
- if intersecting_repos:
- # protect against nested repositories. Insane configuration, but the longest
- # path will be the correct one.
- repo_loc, layout_data = max(intersecting_repos, key=lambda x:len(x[0]))
- allow_directories = any(x in _portage1_profiles_allow_directories
- for x in layout_data['profile-formats'])
- compat_mode = layout_data['profile-formats'] == ('portage-1-compat',)
- allow_parent_colon = any(x in _allow_parent_colon
- for x in layout_data['profile-formats'])
- if compat_mode:
- offenders = _PORTAGE1_DIRECTORIES.intersection(os.listdir(currentPath))
- offenders = sorted(x for x in offenders
- if os.path.isdir(os.path.join(currentPath, x)))
- if offenders:
- warnings.warn(_("Profile '%(profile_path)s' in repository "
- "'%(repo_name)s' is implicitly using 'portage-1' profile format, but "
- "the repository profiles are not marked as that format. This will break "
- "in the future. Please either convert the following paths "
- "to files, or add\nprofile-formats = portage-1\nto the "
- "repositories layout.conf. Files: '%(files)s'\n")
- % dict(profile_path=currentPath, repo_name=repo_loc,
- files=', '.join(offenders)))
-
- parentsFile = os.path.join(currentPath, "parent")
eapi_file = os.path.join(currentPath, "eapi")
+ eapi = "0"
f = None
try:
f = io.open(_unicode_encode(eapi_file,
@@ -174,7 +171,38 @@ class LocationsManager(object):
finally:
if f is not None:
f.close()
- if os.path.exists(parentsFile):
+
+ intersecting_repos = [x for x in known_repos if current_abs_path.startswith(x[0])]
+ if intersecting_repos:
+ # protect against nested repositories. Insane configuration, but the longest
+ # path will be the correct one.
+ repo_loc, layout_data = max(intersecting_repos, key=lambda x:len(x[0]))
+ allow_directories = eapi_allows_directories_on_profile_level_and_repository_level(eapi) or \
+ any(x in _portage1_profiles_allow_directories for x in layout_data['profile-formats'])
+ compat_mode = not eapi_allows_directories_on_profile_level_and_repository_level(eapi) and \
+ layout_data['profile-formats'] == ('portage-1-compat',)
+ allow_parent_colon = any(x in _allow_parent_colon
+ for x in layout_data['profile-formats'])
+
+ if compat_mode:
+ offenders = _PORTAGE1_DIRECTORIES.intersection(os.listdir(currentPath))
+ offenders = sorted(x for x in offenders
+ if os.path.isdir(os.path.join(currentPath, x)))
+ if offenders:
+ warnings.warn(_(
+ "\nThe selected profile is implicitly using the 'portage-1' format:\n"
+ "\tprofile = %(profile_path)s\n"
+ "But this repository is not using that format:\n"
+ "\trepo = %(repo_name)s\n"
+ "This will break in the future. Please convert these dirs to files:\n"
+ "\t%(files)s\n"
+ "Or, add this line to the repository's layout.conf:\n"
+ "\tprofile-formats = portage-1")
+ % dict(profile_path=currentPath, repo_name=repo_loc,
+ files='\n\t'.join(offenders)))
+
+ parentsFile = os.path.join(currentPath, "parent")
+ if exists_raise_eaccess(parentsFile):
parents = grabfile(parentsFile)
if not parents:
raise ParseError(
@@ -196,7 +224,7 @@ class LocationsManager(object):
# of the current repo, so realpath it.
parentPath = os.path.realpath(parentPath)
- if os.path.exists(parentPath):
+ if exists_raise_eaccess(parentPath):
self._addProfile(parentPath, repositories, known_repos)
else:
raise ParseError(
@@ -205,7 +233,7 @@ class LocationsManager(object):
self.profiles.append(currentPath)
self.profiles_complex.append(
- _profile_node(currentPath, allow_directories))
+ _profile_node(currentPath, allow_directories, False))
def _expand_parent_colon(self, parentsFile, parentPath,
repo_loc, repositories):
@@ -253,29 +281,10 @@ class LocationsManager(object):
self.eroot = self.target_root.rstrip(os.sep) + self.eprefix + os.sep
- # make.globals should not be relative to config_root
- # because it only contains constants. However, if EPREFIX
- # is set then there are two possible scenarios:
- # 1) If $ROOT == "/" then make.globals should be
- # relative to EPREFIX.
- # 2) If $ROOT != "/" then the correct location of
- # make.globals needs to be specified in the constructor
- # parameters, since it's a property of the host system
- # (and the current config represents the target system).
self.global_config_path = GLOBAL_CONFIG_PATH
- if self.eprefix:
- if self.target_root == "/":
- # case (1) above
- self.global_config_path = os.path.join(self.eprefix,
- GLOBAL_CONFIG_PATH.lstrip(os.sep))
- else:
- # case (2) above
- # For now, just assume make.globals is relative
- # to EPREFIX.
- # TODO: Pass in more info to the constructor,
- # so we know the host system configuration.
- self.global_config_path = os.path.join(self.eprefix,
- GLOBAL_CONFIG_PATH.lstrip(os.sep))
+ if portage.const.EPREFIX:
+ self.global_config_path = os.path.join(portage.const.EPREFIX,
+ GLOBAL_CONFIG_PATH.lstrip(os.sep))
def set_port_dirs(self, portdir, portdir_overlay):
self.portdir = portdir
@@ -287,7 +296,7 @@ class LocationsManager(object):
for ov in shlex_split(self.portdir_overlay):
ov = normalize_path(ov)
profiles_dir = os.path.join(ov, "profiles")
- if os.path.isdir(profiles_dir):
+ if isdir_raise_eaccess(profiles_dir):
self.overlay_profiles.append(profiles_dir)
self.profile_locations = [os.path.join(portdir, "profiles")] + self.overlay_profiles
diff --git a/pym/portage/package/ebuild/_config/MaskManager.py b/pym/portage/package/ebuild/_config/MaskManager.py
index bce1152ee..0f060c96e 100644
--- a/pym/portage/package/ebuild/_config/MaskManager.py
+++ b/pym/portage/package/ebuild/_config/MaskManager.py
@@ -1,4 +1,4 @@
-# Copyright 2010-2011 Gentoo Foundation
+# Copyright 2010-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
__all__ = (
@@ -8,11 +8,10 @@ __all__ = (
import warnings
from portage import os
-from portage.dep import ExtendedAtomDict, match_from_list, _repo_separator, _slot_separator
+from portage.dep import ExtendedAtomDict, match_from_list
from portage.localization import _
from portage.util import append_repo, grabfile_package, stack_lists, writemsg
-from portage.versions import cpv_getkey
-from _emerge.Package import Package
+from portage.versions import _pkg_str
class MaskManager(object):
@@ -47,7 +46,7 @@ class MaskManager(object):
"the repository profiles are not marked as that format. This will break "
"in the future. Please either convert the following paths "
"to files, or add\nprofile-formats = portage-1\nto the "
- "repositories layout.conf.\n")
+ "repository's layout.conf.\n")
% dict(repo_name=repo_config.name))
return pmask_cache[loc]
@@ -185,12 +184,15 @@ class MaskManager(object):
@return: A matching atom string or None if one is not found.
"""
- cp = cpv_getkey(cpv)
- mask_atoms = self._pmaskdict.get(cp)
+ try:
+ cpv.slot
+ except AttributeError:
+ pkg = _pkg_str(cpv, slot=slot, repo=repo)
+ else:
+ pkg = cpv
+
+ mask_atoms = self._pmaskdict.get(pkg.cp)
if mask_atoms:
- pkg = "".join((cpv, _slot_separator, slot))
- if repo and repo != Package.UNKNOWN_REPO:
- pkg = "".join((pkg, _repo_separator, repo))
pkg_list = [pkg]
for x in mask_atoms:
if not match_from_list(x, pkg_list):
@@ -219,8 +221,15 @@ class MaskManager(object):
@return: A matching atom string or None if one is not found.
"""
- cp = cpv_getkey(cpv)
- return self._getMaskAtom(cpv, slot, repo, self._punmaskdict.get(cp))
+ try:
+ cpv.slot
+ except AttributeError:
+ pkg = _pkg_str(cpv, slot=slot, repo=repo)
+ else:
+ pkg = cpv
+
+ return self._getMaskAtom(pkg, slot, repo,
+ self._punmaskdict.get(pkg.cp))
def getRawMaskAtom(self, cpv, slot, repo):
diff --git a/pym/portage/package/ebuild/_config/UseManager.py b/pym/portage/package/ebuild/_config/UseManager.py
index e1ec7f4a0..1c8c60eae 100644
--- a/pym/portage/package/ebuild/_config/UseManager.py
+++ b/pym/portage/package/ebuild/_config/UseManager.py
@@ -1,4 +1,4 @@
-# Copyright 2010-2012 Gentoo Foundation
+# Copyright 2010-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
__all__ = (
@@ -7,36 +7,49 @@ __all__ = (
from _emerge.Package import Package
from portage import os
-from portage.dep import dep_getrepo, dep_getslot, ExtendedAtomDict, remove_slot, _get_useflag_re
+from portage.dep import Atom, dep_getrepo, dep_getslot, ExtendedAtomDict, remove_slot, _get_useflag_re, _repo_separator
+from portage.eapi import eapi_has_use_aliases, eapi_supports_stable_use_forcing_and_masking
+from portage.exception import InvalidAtom
from portage.localization import _
-from portage.util import grabfile, grabdict_package, read_corresponding_eapi_file, stack_lists, writemsg
-from portage.versions import cpv_getkey, _pkg_str
+from portage.util import grabfile, grabdict, grabdict_package, read_corresponding_eapi_file, stack_lists, writemsg
+from portage.versions import _pkg_str
from portage.package.ebuild._config.helper import ordered_by_atom_specificity
class UseManager(object):
- def __init__(self, repositories, profiles, abs_user_config, user_config=True):
+ def __init__(self, repositories, profiles, abs_user_config, is_stable,
+ user_config=True):
# file variable
#--------------------------------
# repositories
#--------------------------------
# use.mask _repo_usemask_dict
+ # use.stable.mask _repo_usestablemask_dict
# use.force _repo_useforce_dict
+ # use.stable.force _repo_usestableforce_dict
+ # use.aliases _repo_usealiases_dict
# package.use.mask _repo_pusemask_dict
+ # package.use.stable.mask _repo_pusestablemask_dict
# package.use.force _repo_puseforce_dict
+ # package.use.stable.force _repo_pusestableforce_dict
+ # package.use.aliases _repo_pusealiases_dict
#--------------------------------
# profiles
#--------------------------------
# use.mask _usemask_list
+ # use.stable.mask _usestablemask_list
# use.force _useforce_list
+ # use.stable.force _usestableforce_list
# package.use.mask _pusemask_list
+ # package.use.stable.mask _pusestablemask_list
# package.use _pkgprofileuse
# package.use.force _puseforce_list
+ # package.use.stable.force _pusestableforce_list
#--------------------------------
# user config
#--------------------------------
- # package.use _pusedict
+ # package.use _pusedict
# Dynamic variables tracked by the config class
#--------------------------------
@@ -49,26 +62,61 @@ class UseManager(object):
#--------------------------------
# puse
+ self._user_config = user_config
+ self._is_stable = is_stable
self._repo_usemask_dict = self._parse_repository_files_to_dict_of_tuples("use.mask", repositories)
+ self._repo_usestablemask_dict = \
+ self._parse_repository_files_to_dict_of_tuples("use.stable.mask",
+ repositories, eapi_filter=eapi_supports_stable_use_forcing_and_masking)
self._repo_useforce_dict = self._parse_repository_files_to_dict_of_tuples("use.force", repositories)
+ self._repo_usestableforce_dict = \
+ self._parse_repository_files_to_dict_of_tuples("use.stable.force",
+ repositories, eapi_filter=eapi_supports_stable_use_forcing_and_masking)
self._repo_pusemask_dict = self._parse_repository_files_to_dict_of_dicts("package.use.mask", repositories)
+ self._repo_pusestablemask_dict = \
+ self._parse_repository_files_to_dict_of_dicts("package.use.stable.mask",
+ repositories, eapi_filter=eapi_supports_stable_use_forcing_and_masking)
self._repo_puseforce_dict = self._parse_repository_files_to_dict_of_dicts("package.use.force", repositories)
+ self._repo_pusestableforce_dict = \
+ self._parse_repository_files_to_dict_of_dicts("package.use.stable.force",
+ repositories, eapi_filter=eapi_supports_stable_use_forcing_and_masking)
self._repo_puse_dict = self._parse_repository_files_to_dict_of_dicts("package.use", repositories)
self._usemask_list = self._parse_profile_files_to_tuple_of_tuples("use.mask", profiles)
+ self._usestablemask_list = \
+ self._parse_profile_files_to_tuple_of_tuples("use.stable.mask",
+ profiles, eapi_filter=eapi_supports_stable_use_forcing_and_masking)
self._useforce_list = self._parse_profile_files_to_tuple_of_tuples("use.force", profiles)
+ self._usestableforce_list = \
+ self._parse_profile_files_to_tuple_of_tuples("use.stable.force",
+ profiles, eapi_filter=eapi_supports_stable_use_forcing_and_masking)
self._pusemask_list = self._parse_profile_files_to_tuple_of_dicts("package.use.mask", profiles)
+ self._pusestablemask_list = \
+ self._parse_profile_files_to_tuple_of_dicts("package.use.stable.mask",
+ profiles, eapi_filter=eapi_supports_stable_use_forcing_and_masking)
self._pkgprofileuse = self._parse_profile_files_to_tuple_of_dicts("package.use", profiles, juststrings=True)
self._puseforce_list = self._parse_profile_files_to_tuple_of_dicts("package.use.force", profiles)
+ self._pusestableforce_list = \
+ self._parse_profile_files_to_tuple_of_dicts("package.use.stable.force",
+ profiles, eapi_filter=eapi_supports_stable_use_forcing_and_masking)
self._pusedict = self._parse_user_files_to_extatomdict("package.use", abs_user_config, user_config)
+ self._repo_usealiases_dict = self._parse_repository_usealiases(repositories)
+ self._repo_pusealiases_dict = self._parse_repository_packageusealiases(repositories)
+
self.repositories = repositories
-
- def _parse_file_to_tuple(self, file_name, recursive=True):
+
+ def _parse_file_to_tuple(self, file_name, recursive=True, eapi_filter=None):
ret = []
lines = grabfile(file_name, recursive=recursive)
eapi = read_corresponding_eapi_file(file_name)
+ if eapi_filter is not None and not eapi_filter(eapi):
+ if lines:
+ writemsg(_("--- EAPI '%s' does not support '%s': '%s'\n") %
+ (eapi, os.path.basename(file_name), file_name),
+ noiselevel=-1)
+ return ()
useflag_re = _get_useflag_re(eapi)
for prefixed_useflag in lines:
if prefixed_useflag[:1] == "-":
@@ -82,11 +130,26 @@ class UseManager(object):
ret.append(prefixed_useflag)
return tuple(ret)
- def _parse_file_to_dict(self, file_name, juststrings=False, recursive=True):
+ def _parse_file_to_dict(self, file_name, juststrings=False, recursive=True,
+ eapi_filter=None, user_config=False):
ret = {}
location_dict = {}
- file_dict = grabdict_package(file_name, recursive=recursive, verify_eapi=True)
- eapi = read_corresponding_eapi_file(file_name)
+ eapi = read_corresponding_eapi_file(file_name, default=None)
+ if eapi is None and not user_config:
+ eapi = "0"
+ if eapi is None:
+ ret = ExtendedAtomDict(dict)
+ else:
+ ret = {}
+ file_dict = grabdict_package(file_name, recursive=recursive,
+ allow_wildcard=(eapi is None), allow_repo=(eapi is None),
+ verify_eapi=(eapi is not None))
+ if eapi is not None and eapi_filter is not None and not eapi_filter(eapi):
+ if file_dict:
+ writemsg(_("--- EAPI '%s' does not support '%s': '%s'\n") %
+ (eapi, os.path.basename(file_name), file_name),
+ noiselevel=-1)
+ return ret
useflag_re = _get_useflag_re(eapi)
for k, v in file_dict.items():
useflags = []
@@ -119,31 +182,116 @@ class UseManager(object):
return ret
- def _parse_repository_files_to_dict_of_tuples(self, file_name, repositories):
+ def _parse_repository_files_to_dict_of_tuples(self, file_name, repositories, eapi_filter=None):
ret = {}
for repo in repositories.repos_with_profiles():
- ret[repo.name] = self._parse_file_to_tuple(os.path.join(repo.location, "profiles", file_name))
+ ret[repo.name] = self._parse_file_to_tuple(os.path.join(repo.location, "profiles", file_name), eapi_filter=eapi_filter)
return ret
- def _parse_repository_files_to_dict_of_dicts(self, file_name, repositories):
+ def _parse_repository_files_to_dict_of_dicts(self, file_name, repositories, eapi_filter=None):
ret = {}
for repo in repositories.repos_with_profiles():
- ret[repo.name] = self._parse_file_to_dict(os.path.join(repo.location, "profiles", file_name))
+ ret[repo.name] = self._parse_file_to_dict(os.path.join(repo.location, "profiles", file_name), eapi_filter=eapi_filter)
return ret
- def _parse_profile_files_to_tuple_of_tuples(self, file_name, locations):
+ def _parse_profile_files_to_tuple_of_tuples(self, file_name, locations,
+ eapi_filter=None):
return tuple(self._parse_file_to_tuple(
os.path.join(profile.location, file_name),
- recursive=profile.portage1_directories)
+ recursive=profile.portage1_directories, eapi_filter=eapi_filter)
for profile in locations)
- def _parse_profile_files_to_tuple_of_dicts(self, file_name, locations, juststrings=False):
+ def _parse_profile_files_to_tuple_of_dicts(self, file_name, locations,
+ juststrings=False, eapi_filter=None):
return tuple(self._parse_file_to_dict(
os.path.join(profile.location, file_name), juststrings,
- recursive=profile.portage1_directories)
+ recursive=profile.portage1_directories, eapi_filter=eapi_filter,
+ user_config=profile.user_config)
for profile in locations)
- def getUseMask(self, pkg=None):
+ def _parse_repository_usealiases(self, repositories):
+ ret = {}
+ for repo in repositories.repos_with_profiles():
+ file_name = os.path.join(repo.location, "profiles", "use.aliases")
+ eapi = read_corresponding_eapi_file(file_name)
+ useflag_re = _get_useflag_re(eapi)
+ raw_file_dict = grabdict(file_name, recursive=True)
+ file_dict = {}
+ for real_flag, aliases in raw_file_dict.items():
+ if useflag_re.match(real_flag) is None:
+ writemsg(_("--- Invalid real USE flag in '%s': '%s'\n") % (file_name, real_flag), noiselevel=-1)
+ else:
+ for alias in aliases:
+ if useflag_re.match(alias) is None:
+ writemsg(_("--- Invalid USE flag alias for '%s' real USE flag in '%s': '%s'\n") %
+ (real_flag, file_name, alias), noiselevel=-1)
+ else:
+ if any(alias in v for k, v in file_dict.items() if k != real_flag):
+ writemsg(_("--- Duplicated USE flag alias in '%s': '%s'\n") %
+ (file_name, alias), noiselevel=-1)
+ else:
+ file_dict.setdefault(real_flag, []).append(alias)
+ ret[repo.name] = file_dict
+ return ret
+
+ def _parse_repository_packageusealiases(self, repositories):
+ ret = {}
+ for repo in repositories.repos_with_profiles():
+ file_name = os.path.join(repo.location, "profiles", "package.use.aliases")
+ eapi = read_corresponding_eapi_file(file_name)
+ useflag_re = _get_useflag_re(eapi)
+ lines = grabfile(file_name, recursive=True)
+ file_dict = {}
+ for line in lines:
+ elements = line.split()
+ atom = elements[0]
+ try:
+ atom = Atom(atom, eapi=eapi)
+ except InvalidAtom:
+ writemsg(_("--- Invalid atom in '%s': '%s'\n") % (file_name, atom))
+ continue
+ if len(elements) == 1:
+ writemsg(_("--- Missing real USE flag for '%s' in '%s'\n") % (atom, file_name), noiselevel=-1)
+ continue
+ real_flag = elements[1]
+ if useflag_re.match(real_flag) is None:
+ writemsg(_("--- Invalid real USE flag for '%s' in '%s': '%s'\n") % (atom, file_name, real_flag), noiselevel=-1)
+ else:
+ for alias in elements[2:]:
+ if useflag_re.match(alias) is None:
+ writemsg(_("--- Invalid USE flag alias for '%s' real USE flag for '%s' in '%s': '%s'\n") %
+ (real_flag, atom, file_name, alias), noiselevel=-1)
+ else:
+ # Duplicated USE flag aliases in entries for different atoms
+ # matching the same package version are detected in getUseAliases().
+ if any(alias in v for k, v in file_dict.get(atom.cp, {}).get(atom, {}).items() if k != real_flag):
+ writemsg(_("--- Duplicated USE flag alias for '%s' in '%s': '%s'\n") %
+ (atom, file_name, alias), noiselevel=-1)
+ else:
+ file_dict.setdefault(atom.cp, {}).setdefault(atom, {}).setdefault(real_flag, []).append(alias)
+ ret[repo.name] = file_dict
+ return ret
+
+ def _isStable(self, pkg):
+ if self._user_config:
+ try:
+ return pkg.stable
+ except AttributeError:
+ # KEYWORDS is unavailable (prior to "depend" phase)
+ return False
+
+ try:
+ pkg._metadata
+ except AttributeError:
+ # KEYWORDS is unavailable (prior to "depend" phase)
+ return False
+
+ # Since repoman uses different config instances for
+ # different profiles, we have to be careful to do the
+ # stable check against the correct profile here.
+ return self._is_stable(pkg)
+
+ def getUseMask(self, pkg=None, stable=None):
if pkg is None:
return frozenset(stack_lists(
self._usemask_list, incremental=True))
@@ -155,7 +303,12 @@ class UseManager(object):
repo = dep_getrepo(pkg)
pkg = _pkg_str(remove_slot(pkg), slot=slot, repo=repo)
cp = pkg.cp
+
+ if stable is None:
+ stable = self._isStable(pkg)
+
usemask = []
+
if hasattr(pkg, "repo") and pkg.repo != Package.UNKNOWN_REPO:
repos = []
try:
@@ -166,30 +319,56 @@ class UseManager(object):
repos.append(pkg.repo)
for repo in repos:
usemask.append(self._repo_usemask_dict.get(repo, {}))
+ if stable:
+ usemask.append(self._repo_usestablemask_dict.get(repo, {}))
cpdict = self._repo_pusemask_dict.get(repo, {}).get(cp)
if cpdict:
pkg_usemask = ordered_by_atom_specificity(cpdict, pkg)
if pkg_usemask:
usemask.extend(pkg_usemask)
+ if stable:
+ cpdict = self._repo_pusestablemask_dict.get(repo, {}).get(cp)
+ if cpdict:
+ pkg_usemask = ordered_by_atom_specificity(cpdict, pkg)
+ if pkg_usemask:
+ usemask.extend(pkg_usemask)
+
for i, pusemask_dict in enumerate(self._pusemask_list):
if self._usemask_list[i]:
usemask.append(self._usemask_list[i])
+ if stable and self._usestablemask_list[i]:
+ usemask.append(self._usestablemask_list[i])
cpdict = pusemask_dict.get(cp)
if cpdict:
pkg_usemask = ordered_by_atom_specificity(cpdict, pkg)
if pkg_usemask:
usemask.extend(pkg_usemask)
+ if stable:
+ cpdict = self._pusestablemask_list[i].get(cp)
+ if cpdict:
+ pkg_usemask = ordered_by_atom_specificity(cpdict, pkg)
+ if pkg_usemask:
+ usemask.extend(pkg_usemask)
+
return frozenset(stack_lists(usemask, incremental=True))
- def getUseForce(self, pkg=None):
+ def getUseForce(self, pkg=None, stable=None):
if pkg is None:
return frozenset(stack_lists(
self._useforce_list, incremental=True))
cp = getattr(pkg, "cp", None)
if cp is None:
- cp = cpv_getkey(remove_slot(pkg))
+ slot = dep_getslot(pkg)
+ repo = dep_getrepo(pkg)
+ pkg = _pkg_str(remove_slot(pkg), slot=slot, repo=repo)
+ cp = pkg.cp
+
+ if stable is None:
+ stable = self._isStable(pkg)
+
useforce = []
+
if hasattr(pkg, "repo") and pkg.repo != Package.UNKNOWN_REPO:
repos = []
try:
@@ -200,25 +379,90 @@ class UseManager(object):
repos.append(pkg.repo)
for repo in repos:
useforce.append(self._repo_useforce_dict.get(repo, {}))
+ if stable:
+ useforce.append(self._repo_usestableforce_dict.get(repo, {}))
cpdict = self._repo_puseforce_dict.get(repo, {}).get(cp)
if cpdict:
pkg_useforce = ordered_by_atom_specificity(cpdict, pkg)
if pkg_useforce:
useforce.extend(pkg_useforce)
+ if stable:
+ cpdict = self._repo_pusestableforce_dict.get(repo, {}).get(cp)
+ if cpdict:
+ pkg_useforce = ordered_by_atom_specificity(cpdict, pkg)
+ if pkg_useforce:
+ useforce.extend(pkg_useforce)
+
for i, puseforce_dict in enumerate(self._puseforce_list):
if self._useforce_list[i]:
useforce.append(self._useforce_list[i])
+ if stable and self._usestableforce_list[i]:
+ useforce.append(self._usestableforce_list[i])
cpdict = puseforce_dict.get(cp)
if cpdict:
pkg_useforce = ordered_by_atom_specificity(cpdict, pkg)
if pkg_useforce:
useforce.extend(pkg_useforce)
+ if stable:
+ cpdict = self._pusestableforce_list[i].get(cp)
+ if cpdict:
+ pkg_useforce = ordered_by_atom_specificity(cpdict, pkg)
+ if pkg_useforce:
+ useforce.extend(pkg_useforce)
+
return frozenset(stack_lists(useforce, incremental=True))
+ def getUseAliases(self, pkg):
+ if hasattr(pkg, "eapi") and not eapi_has_use_aliases(pkg.eapi):
+ return {}
+
+ cp = getattr(pkg, "cp", None)
+ if cp is None:
+ slot = dep_getslot(pkg)
+ repo = dep_getrepo(pkg)
+ pkg = _pkg_str(remove_slot(pkg), slot=slot, repo=repo)
+ cp = pkg.cp
+
+ usealiases = {}
+
+ if hasattr(pkg, "repo") and pkg.repo != Package.UNKNOWN_REPO:
+ repos = []
+ try:
+ repos.extend(repo.name for repo in
+ self.repositories[pkg.repo].masters)
+ except KeyError:
+ pass
+ repos.append(pkg.repo)
+ for repo in repos:
+ usealiases_dict = self._repo_usealiases_dict.get(repo, {})
+ for real_flag, aliases in usealiases_dict.items():
+ for alias in aliases:
+ if any(alias in v for k, v in usealiases.items() if k != real_flag):
+ writemsg(_("--- Duplicated USE flag alias for '%s%s%s': '%s'\n") %
+ (pkg.cpv, _repo_separator, pkg.repo, alias), noiselevel=-1)
+ else:
+ usealiases.setdefault(real_flag, []).append(alias)
+ cp_usealiases_dict = self._repo_pusealiases_dict.get(repo, {}).get(cp)
+ if cp_usealiases_dict:
+ usealiases_dict_list = ordered_by_atom_specificity(cp_usealiases_dict, pkg)
+ for usealiases_dict in usealiases_dict_list:
+ for real_flag, aliases in usealiases_dict.items():
+ for alias in aliases:
+ if any(alias in v for k, v in usealiases.items() if k != real_flag):
+ writemsg(_("--- Duplicated USE flag alias for '%s%s%s': '%s'\n") %
+ (pkg.cpv, _repo_separator, pkg.repo, alias), noiselevel=-1)
+ else:
+ usealiases.setdefault(real_flag, []).append(alias)
+
+ return usealiases
+
def getPUSE(self, pkg):
cp = getattr(pkg, "cp", None)
if cp is None:
- cp = cpv_getkey(remove_slot(pkg))
+ slot = dep_getslot(pkg)
+ repo = dep_getrepo(pkg)
+ pkg = _pkg_str(remove_slot(pkg), slot=slot, repo=repo)
+ cp = pkg.cp
ret = ""
cpdict = self._pusedict.get(cp)
if cpdict:
diff --git a/pym/portage/package/ebuild/_config/special_env_vars.py b/pym/portage/package/ebuild/_config/special_env_vars.py
index 6ed6d0542..74fedd689 100644
--- a/pym/portage/package/ebuild/_config/special_env_vars.py
+++ b/pym/portage/package/ebuild/_config/special_env_vars.py
@@ -1,6 +1,8 @@
-# Copyright 2010-2012 Gentoo Foundation
+# Copyright 2010-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
+from __future__ import unicode_literals
+
__all__ = (
'case_insensitive_vars', 'default_globals', 'env_blacklist', \
'environ_filter', 'environ_whitelist', 'environ_whitelist_re',
@@ -13,14 +15,17 @@ import re
# configuration files.
env_blacklist = frozenset((
"A", "AA", "CATEGORY", "DEPEND", "DESCRIPTION", "EAPI",
- "EBUILD_FORCE_TEST", "EBUILD_PHASE", "EBUILD_SKIP_MANIFEST",
+ "EBUILD_FORCE_TEST", "EBUILD_PHASE",
+ "EBUILD_PHASE_FUNC", "EBUILD_SKIP_MANIFEST",
"ED", "EMERGE_FROM", "EPREFIX", "EROOT",
- "GREP_OPTIONS", "HOMEPAGE", "INHERITED", "IUSE",
+ "GREP_OPTIONS", "HDEPEND", "HOMEPAGE",
+ "INHERITED", "IUSE", "IUSE_EFFECTIVE",
"KEYWORDS", "LICENSE", "MERGE_TYPE",
"PDEPEND", "PF", "PKGUSE", "PORTAGE_BACKGROUND",
- "PORTAGE_BACKGROUND_UNMERGE", "PORTAGE_BUILDIR_LOCKED",
- "PORTAGE_BUILT_USE", "PORTAGE_CONFIGROOT", "PORTAGE_IUSE",
- "PORTAGE_NONFATAL", "PORTAGE_REPO_NAME",
+ "PORTAGE_BACKGROUND_UNMERGE", "PORTAGE_BUILDDIR_LOCKED",
+ "PORTAGE_BUILT_USE", "PORTAGE_CONFIGROOT",
+ "PORTAGE_INTERNAL_CALLER", "PORTAGE_IUSE",
+ "PORTAGE_NONFATAL", "PORTAGE_PIPE_FD", "PORTAGE_REPO_NAME",
"PORTAGE_USE", "PROPERTIES", "PROVIDE", "RDEPEND", "REPOSITORY",
"RESTRICT", "ROOT", "SLOT", "SRC_URI"
))
@@ -39,7 +44,7 @@ environ_whitelist += [
"ACCEPT_LICENSE", "BASH_ENV", "BUILD_PREFIX", "COLUMNS", "D",
"DISTDIR", "DOC_SYMLINKS_DIR", "EAPI", "EBUILD",
"EBUILD_FORCE_TEST",
- "EBUILD_PHASE", "ECLASSDIR", "ECLASS_DEPTH", "ED",
+ "EBUILD_PHASE", "EBUILD_PHASE_FUNC", "ECLASSDIR", "ECLASS_DEPTH", "ED",
"EMERGE_FROM", "EPREFIX", "EROOT",
"FEATURES", "FILESDIR", "HOME", "MERGE_TYPE", "NOCOLOR", "PATH",
"PKGDIR",
@@ -49,7 +54,8 @@ environ_whitelist += [
"PORTAGE_BINPKG_FILE", "PORTAGE_BINPKG_TAR_OPTS",
"PORTAGE_BINPKG_TMPFILE",
"PORTAGE_BIN_PATH",
- "PORTAGE_BUILDDIR", "PORTAGE_BUNZIP2_COMMAND", "PORTAGE_BZIP2_COMMAND",
+ "PORTAGE_BUILDDIR", "PORTAGE_BUILD_GROUP", "PORTAGE_BUILD_USER",
+ "PORTAGE_BUNZIP2_COMMAND", "PORTAGE_BZIP2_COMMAND",
"PORTAGE_COLORMAP", "PORTAGE_COMPRESS",
"PORTAGE_COMPRESS_EXCLUDE_SUFFIXES",
"PORTAGE_CONFIGROOT", "PORTAGE_DEBUG", "PORTAGE_DEPCACHEDIR",
@@ -58,14 +64,16 @@ environ_whitelist += [
"PORTAGE_DOHTML_WARN_ON_SKIPPED_FILES",
"PORTAGE_EBUILD_EXIT_FILE", "PORTAGE_FEATURES",
"PORTAGE_GID", "PORTAGE_GRPNAME",
+ "PORTAGE_INTERNAL_CALLER",
"PORTAGE_INST_GID", "PORTAGE_INST_UID",
- "PORTAGE_IPC_DAEMON", "PORTAGE_IUSE",
- "PORTAGE_LOG_FILE", "PORTAGE_OVERRIDE_EPREFIX",
- "PORTAGE_PYM_PATH", "PORTAGE_PYTHON", "PORTAGE_QUIET",
- "PORTAGE_REPO_NAME", "PORTAGE_RESTRICT",
+ "PORTAGE_IPC_DAEMON", "PORTAGE_IUSE", "PORTAGE_ECLASS_LOCATIONS",
+ "PORTAGE_LOG_FILE", "PORTAGE_OVERRIDE_EPREFIX", "PORTAGE_PIPE_FD",
+ "PORTAGE_PYM_PATH", "PORTAGE_PYTHON",
+ "PORTAGE_PYTHONPATH", "PORTAGE_QUIET",
+ "PORTAGE_REPO_NAME", "PORTAGE_REPOSITORIES", "PORTAGE_RESTRICT",
"PORTAGE_SIGPIPE_STATUS",
"PORTAGE_TMPDIR", "PORTAGE_UPDATE_ENV", "PORTAGE_USERNAME",
- "PORTAGE_VERBOSE", "PORTAGE_WORKDIR_MODE",
+ "PORTAGE_VERBOSE", "PORTAGE_WORKDIR_MODE", "PORTAGE_XATTR_EXCLUDE",
"PORTDIR", "PORTDIR_OVERLAY", "PREROOTPATH", "PROFILE_PATHS",
"REPLACING_VERSIONS", "REPLACED_BY_VERSION",
"ROOT", "ROOTPATH", "T", "TMP", "TMPDIR",
@@ -137,9 +145,11 @@ environ_filter += [
# portage config variables and variables set directly by portage
environ_filter += [
- "ACCEPT_CHOSTS", "ACCEPT_KEYWORDS", "ACCEPT_PROPERTIES", "AUTOCLEAN",
+ "ACCEPT_CHOSTS", "ACCEPT_KEYWORDS", "ACCEPT_PROPERTIES",
+ "ACCEPT_RESTRICT", "AUTOCLEAN",
"CLEAN_DELAY", "COLLISION_IGNORE",
"CONFIG_PROTECT", "CONFIG_PROTECT_MASK",
+ "DCO_SIGNED_OFF_BY",
"EGENCACHE_DEFAULT_OPTS", "EMERGE_DEFAULT_OPTS",
"EMERGE_LOG_DIR",
"EMERGE_WARNING_DELAY",
@@ -148,8 +158,9 @@ environ_filter += [
"FETCHCOMMAND_RSYNC", "FETCHCOMMAND_SFTP",
"GENTOO_MIRRORS", "NOCONFMEM", "O",
"PORTAGE_BACKGROUND", "PORTAGE_BACKGROUND_UNMERGE",
- "PORTAGE_BINHOST",
- "PORTAGE_BINHOST_CHUNKSIZE", "PORTAGE_BUILDIR_LOCKED",
+ "PORTAGE_BINHOST", "PORTAGE_BINPKG_FORMAT",
+ "PORTAGE_BUILDDIR_LOCKED",
+ "PORTAGE_CHECKSUM_FILTER",
"PORTAGE_ELOG_CLASSES",
"PORTAGE_ELOG_MAILFROM", "PORTAGE_ELOG_MAILSUBJECT",
"PORTAGE_ELOG_MAILURI", "PORTAGE_ELOG_SYSTEM",
@@ -161,13 +172,20 @@ environ_filter += [
"PORTAGE_REPO_DUPLICATE_WARN",
"PORTAGE_RO_DISTDIRS",
"PORTAGE_RSYNC_EXTRA_OPTS", "PORTAGE_RSYNC_OPTS",
- "PORTAGE_RSYNC_RETRIES", "PORTAGE_SYNC_STALE",
- "PORTAGE_USE", "PORT_LOGDIR", "PORT_LOGDIR_CLEAN",
+ "PORTAGE_RSYNC_RETRIES", "PORTAGE_SSH_OPTS", "PORTAGE_SYNC_STALE",
+ "PORTAGE_USE",
+ "PORT_LOGDIR", "PORT_LOGDIR_CLEAN",
"QUICKPKG_DEFAULT_OPTS", "REPOMAN_DEFAULT_OPTS",
"RESUMECOMMAND", "RESUMECOMMAND_FTP",
"RESUMECOMMAND_HTTP", "RESUMECOMMAND_HTTPS",
"RESUMECOMMAND_RSYNC", "RESUMECOMMAND_SFTP",
- "SYNC", "UNINSTALL_IGNORE", "USE_EXPAND_HIDDEN", "USE_ORDER",
+ "UNINSTALL_IGNORE", "USE_EXPAND_HIDDEN", "USE_ORDER",
+ "__PORTAGE_HELPER"
+]
+
+# No longer supported variables
+environ_filter += [
+ "SYNC"
]
environ_filter = frozenset(environ_filter)
diff --git a/pym/portage/package/ebuild/_config/unpack_dependencies.py b/pym/portage/package/ebuild/_config/unpack_dependencies.py
new file mode 100644
index 000000000..137518949
--- /dev/null
+++ b/pym/portage/package/ebuild/_config/unpack_dependencies.py
@@ -0,0 +1,38 @@
+# Copyright 2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage import os, _supported_eapis
+from portage.dep import use_reduce
+from portage.eapi import eapi_has_automatic_unpack_dependencies
+from portage.exception import InvalidDependString
+from portage.localization import _
+from portage.util import grabfile, writemsg
+
+def load_unpack_dependencies_configuration(repositories):
+ repo_dict = {}
+ for repo in repositories.repos_with_profiles():
+ for eapi in _supported_eapis:
+ if eapi_has_automatic_unpack_dependencies(eapi):
+ file_name = os.path.join(repo.location, "profiles", "unpack_dependencies", eapi)
+ lines = grabfile(file_name, recursive=True)
+ for line in lines:
+ elements = line.split()
+ suffix = elements[0].lower()
+ if len(elements) == 1:
+ writemsg(_("--- Missing unpack dependencies for '%s' suffix in '%s'\n") % (suffix, file_name))
+ depend = " ".join(elements[1:])
+ try:
+ use_reduce(depend, eapi=eapi)
+ except InvalidDependString as e:
+ writemsg(_("--- Invalid unpack dependencies for '%s' suffix in '%s': '%s'\n" % (suffix, file_name, e)))
+ else:
+ repo_dict.setdefault(repo.name, {}).setdefault(eapi, {})[suffix] = depend
+
+ ret = {}
+ for repo in repositories.repos_with_profiles():
+ for repo_name in [x.name for x in repo.masters] + [repo.name]:
+ for eapi in repo_dict.get(repo_name, {}):
+ for suffix, depend in repo_dict.get(repo_name, {}).get(eapi, {}).items():
+ ret.setdefault(repo.name, {}).setdefault(eapi, {})[suffix] = depend
+
+ return ret
diff --git a/pym/portage/package/ebuild/_ipc/QueryCommand.py b/pym/portage/package/ebuild/_ipc/QueryCommand.py
index d200fe80d..351c95628 100644
--- a/pym/portage/package/ebuild/_ipc/QueryCommand.py
+++ b/pym/portage/package/ebuild/_ipc/QueryCommand.py
@@ -1,12 +1,13 @@
-# Copyright 2010-2012 Gentoo Foundation
+# Copyright 2010-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
+from __future__ import unicode_literals
+
import io
import portage
from portage import os
-from portage import _unicode_decode
-from portage.dep import Atom
+from portage.dep import Atom, _repo_name_re
from portage.eapi import eapi_has_repo_deps
from portage.elog import messages as elog_messages
from portage.exception import InvalidAtom
@@ -36,40 +37,47 @@ class QueryCommand(IpcCommand):
@return: tuple of (stdout, stderr, returncode)
"""
- cmd, root, atom_str = argv
-
- eapi = self.settings.get('EAPI')
- allow_repo = eapi_has_repo_deps(eapi)
- try:
- atom = Atom(atom_str, allow_repo=allow_repo)
- except InvalidAtom:
- return ('', 'invalid atom: %s\n' % atom_str, 2)
+ # Python 3:
+ # cmd, root, *args = argv
+ cmd = argv[0]
+ root = argv[1]
+ args = argv[2:]
warnings = []
- try:
- atom = Atom(atom_str, allow_repo=allow_repo, eapi=eapi)
- except InvalidAtom as e:
- warnings.append(_unicode_decode("QA Notice: %s: %s") % (cmd, e))
-
- use = self.settings.get('PORTAGE_BUILT_USE')
- if use is None:
- use = self.settings['PORTAGE_USE']
-
- use = frozenset(use.split())
- atom = atom.evaluate_conditionals(use)
+ warnings_str = ''
db = self.get_db()
-
- warnings_str = ''
- if warnings:
- warnings_str = self._elog('eqawarn', warnings)
+ eapi = self.settings.get('EAPI')
root = normalize_path(root).rstrip(os.path.sep) + os.path.sep
if root not in db:
- return ('', 'invalid ROOT: %s\n' % root, 2)
+ return ('', '%s: Invalid ROOT: %s\n' % (cmd, root), 3)
+ portdb = db[root]["porttree"].dbapi
vardb = db[root]["vartree"].dbapi
+ if cmd in ('best_version', 'has_version'):
+ allow_repo = eapi_has_repo_deps(eapi)
+ try:
+ atom = Atom(args[0], allow_repo=allow_repo)
+ except InvalidAtom:
+ return ('', '%s: Invalid atom: %s\n' % (cmd, args[0]), 2)
+
+ try:
+ atom = Atom(args[0], allow_repo=allow_repo, eapi=eapi)
+ except InvalidAtom as e:
+ warnings.append("QA Notice: %s: %s" % (cmd, e))
+
+ use = self.settings.get('PORTAGE_BUILT_USE')
+ if use is None:
+ use = self.settings['PORTAGE_USE']
+
+ use = frozenset(use.split())
+ atom = atom.evaluate_conditionals(use)
+
+ if warnings:
+ warnings_str = self._elog('eqawarn', warnings)
+
if cmd == 'has_version':
if vardb.match(atom):
returncode = 0
@@ -79,8 +87,35 @@ class QueryCommand(IpcCommand):
elif cmd == 'best_version':
m = best(vardb.match(atom))
return ('%s\n' % m, warnings_str, 0)
+ elif cmd in ('master_repositories', 'repository_path', 'available_eclasses', 'eclass_path', 'license_path'):
+ repo = _repo_name_re.match(args[0])
+ if repo is None:
+ return ('', '%s: Invalid repository: %s\n' % (cmd, args[0]), 2)
+ try:
+ repo = portdb.repositories[args[0]]
+ except KeyError:
+ return ('', warnings_str, 1)
+
+ if cmd == 'master_repositories':
+ return ('%s\n' % ' '.join(x.name for x in repo.masters), warnings_str, 0)
+ elif cmd == 'repository_path':
+ return ('%s\n' % repo.location, warnings_str, 0)
+ elif cmd == 'available_eclasses':
+ return ('%s\n' % ' '.join(sorted(repo.eclass_db.eclasses)), warnings_str, 0)
+ elif cmd == 'eclass_path':
+ try:
+ eclass = repo.eclass_db.eclasses[args[1]]
+ except KeyError:
+ return ('', warnings_str, 1)
+ return ('%s\n' % eclass.location, warnings_str, 0)
+ elif cmd == 'license_path':
+ paths = reversed([os.path.join(x.location, 'licenses', args[1]) for x in list(repo.masters) + [repo]])
+ for path in paths:
+ if os.path.exists(path):
+ return ('%s\n' % path, warnings_str, 0)
+ return ('', warnings_str, 1)
else:
- return ('', 'invalid command: %s\n' % cmd, 2)
+ return ('', 'Invalid command: %s\n' % cmd, 3)
def _elog(self, elog_funcname, lines):
"""
diff --git a/pym/portage/package/ebuild/_eapi_invalid.py b/pym/portage/package/ebuild/_metadata_invalid.py
index d23677d23..bcf1f7fcd 100644
--- a/pym/portage/package/ebuild/_eapi_invalid.py
+++ b/pym/portage/package/ebuild/_metadata_invalid.py
@@ -28,19 +28,6 @@ def eapi_invalid(self, cpv, repo_name, settings,
"assignment on line: %s") %
(eapi_var, eapi_lineno))
- if 'parse-eapi-ebuild-head' in settings.features:
- msg.extend(textwrap.wrap(("NOTE: This error will soon"
- " become unconditionally fatal in a future version of Portage,"
- " but at this time, it can by made non-fatal by setting"
- " FEATURES=-parse-eapi-ebuild-head in"
- " make.conf."), 70))
- else:
- msg.extend(textwrap.wrap(("NOTE: This error will soon"
- " become unconditionally fatal in a future version of Portage."
- " At the earliest opportunity, please enable"
- " FEATURES=parse-eapi-ebuild-head in make.conf in order to"
- " make this error fatal."), 70))
-
if portage.data.secpass >= 2:
# TODO: improve elog permission error handling (bug #416231)
for line in msg:
diff --git a/pym/portage/package/ebuild/_parallel_manifest/ManifestProcess.py b/pym/portage/package/ebuild/_parallel_manifest/ManifestProcess.py
new file mode 100644
index 000000000..44e257664
--- /dev/null
+++ b/pym/portage/package/ebuild/_parallel_manifest/ManifestProcess.py
@@ -0,0 +1,43 @@
+# Copyright 2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import portage
+from portage import os
+from portage.exception import (FileNotFound,
+ PermissionDenied, PortagePackageException)
+from portage.localization import _
+from portage.util._async.ForkProcess import ForkProcess
+
+class ManifestProcess(ForkProcess):
+
+ __slots__ = ("cp", "distdir", "fetchlist_dict", "repo_config")
+
+ MODIFIED = 16
+
+ def _run(self):
+ mf = self.repo_config.load_manifest(
+ os.path.join(self.repo_config.location, self.cp),
+ self.distdir, fetchlist_dict=self.fetchlist_dict)
+
+ try:
+ mf.create(assumeDistHashesAlways=True)
+ except FileNotFound as e:
+ portage.writemsg(_("!!! File %s doesn't exist, can't update "
+ "Manifest\n") % e, noiselevel=-1)
+ return 1
+
+ except PortagePackageException as e:
+ portage.writemsg(("!!! %s\n") % (e,), noiselevel=-1)
+ return 1
+
+ try:
+ modified = mf.write(sign=False)
+ except PermissionDenied as e:
+ portage.writemsg("!!! %s: %s\n" % (_("Permission Denied"), e,),
+ noiselevel=-1)
+ return 1
+ else:
+ if modified:
+ return self.MODIFIED
+ else:
+ return os.EX_OK
diff --git a/pym/portage/package/ebuild/_parallel_manifest/ManifestScheduler.py b/pym/portage/package/ebuild/_parallel_manifest/ManifestScheduler.py
new file mode 100644
index 000000000..38ac4825e
--- /dev/null
+++ b/pym/portage/package/ebuild/_parallel_manifest/ManifestScheduler.py
@@ -0,0 +1,93 @@
+# Copyright 2012-2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import portage
+from portage import os
+from portage.dep import _repo_separator
+from portage.exception import InvalidDependString
+from portage.localization import _
+from portage.util._async.AsyncScheduler import AsyncScheduler
+from .ManifestTask import ManifestTask
+
+class ManifestScheduler(AsyncScheduler):
+
+ def __init__(self, portdb, cp_iter=None,
+ gpg_cmd=None, gpg_vars=None, force_sign_key=None, **kwargs):
+
+ AsyncScheduler.__init__(self, **kwargs)
+
+ self._portdb = portdb
+
+ if cp_iter is None:
+ cp_iter = self._iter_every_cp()
+ self._cp_iter = cp_iter
+ self._gpg_cmd = gpg_cmd
+ self._gpg_vars = gpg_vars
+ self._force_sign_key = force_sign_key
+ self._task_iter = self._iter_tasks()
+
+ def _next_task(self):
+ return next(self._task_iter)
+
+ def _iter_every_cp(self):
+ # List categories individually, in order to start yielding quicker,
+ # and in order to reduce latency in case of a signal interrupt.
+ cp_all = self._portdb.cp_all
+ for category in sorted(self._portdb.categories):
+ for cp in cp_all(categories=(category,)):
+ yield cp
+
+ def _iter_tasks(self):
+ portdb = self._portdb
+ distdir = portdb.settings["DISTDIR"]
+ disabled_repos = set()
+
+ for cp in self._cp_iter:
+ if self._terminated.is_set():
+ break
+ # We iterate over portdb.porttrees, since it's common to
+ # tweak this attribute in order to adjust repo selection.
+ for mytree in portdb.porttrees:
+ if self._terminated.is_set():
+ break
+ repo_config = portdb.repositories.get_repo_for_location(mytree)
+ if not repo_config.create_manifest:
+ if repo_config.name not in disabled_repos:
+ disabled_repos.add(repo_config.name)
+ portage.writemsg(
+ _(">>> Skipping creating Manifest for %s%s%s; "
+ "repository is configured to not use them\n") %
+ (cp, _repo_separator, repo_config.name),
+ noiselevel=-1)
+ continue
+ cpv_list = portdb.cp_list(cp, mytree=[repo_config.location])
+ if not cpv_list:
+ continue
+ fetchlist_dict = {}
+ try:
+ for cpv in cpv_list:
+ fetchlist_dict[cpv] = \
+ list(portdb.getFetchMap(cpv, mytree=mytree))
+ except InvalidDependString as e:
+ portage.writemsg(
+ _("!!! %s%s%s: SRC_URI: %s\n") %
+ (cp, _repo_separator, repo_config.name, e),
+ noiselevel=-1)
+ self._error_count += 1
+ continue
+
+ yield ManifestTask(cp=cp, distdir=distdir,
+ fetchlist_dict=fetchlist_dict, repo_config=repo_config,
+ gpg_cmd=self._gpg_cmd, gpg_vars=self._gpg_vars,
+ force_sign_key=self._force_sign_key)
+
+ def _task_exit(self, task):
+
+ if task.returncode != os.EX_OK:
+ if not self._terminated_tasks:
+ portage.writemsg(
+ "Error processing %s%s%s, continuing...\n" %
+ (task.cp, _repo_separator, task.repo_config.name),
+ noiselevel=-1)
+
+ AsyncScheduler._task_exit(self, task)
diff --git a/pym/portage/package/ebuild/_parallel_manifest/ManifestTask.py b/pym/portage/package/ebuild/_parallel_manifest/ManifestTask.py
new file mode 100644
index 000000000..0ee2b910d
--- /dev/null
+++ b/pym/portage/package/ebuild/_parallel_manifest/ManifestTask.py
@@ -0,0 +1,186 @@
+# Copyright 2012-2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import errno
+import re
+import subprocess
+
+from portage import os
+from portage import _unicode_encode, _encodings
+from portage.const import MANIFEST2_IDENTIFIERS
+from portage.util import (atomic_ofstream, grablines,
+ shlex_split, varexpand, writemsg)
+from portage.util._async.PipeLogger import PipeLogger
+from portage.util._async.PopenProcess import PopenProcess
+from _emerge.CompositeTask import CompositeTask
+from _emerge.PipeReader import PipeReader
+from .ManifestProcess import ManifestProcess
+
+class ManifestTask(CompositeTask):
+
+ __slots__ = ("cp", "distdir", "fetchlist_dict", "gpg_cmd",
+ "gpg_vars", "repo_config", "force_sign_key", "_manifest_path")
+
+ _PGP_HEADER = b"BEGIN PGP SIGNED MESSAGE"
+ _manifest_line_re = re.compile(r'^(%s) ' % "|".join(MANIFEST2_IDENTIFIERS))
+ _gpg_key_id_re = re.compile(r'^[0-9A-F]*$')
+ _gpg_key_id_lengths = (8, 16, 24, 32, 40)
+
+ def _start(self):
+ self._manifest_path = os.path.join(self.repo_config.location,
+ self.cp, "Manifest")
+ manifest_proc = ManifestProcess(cp=self.cp, distdir=self.distdir,
+ fetchlist_dict=self.fetchlist_dict, repo_config=self.repo_config,
+ scheduler=self.scheduler)
+ self._start_task(manifest_proc, self._manifest_proc_exit)
+
+ def _manifest_proc_exit(self, manifest_proc):
+ self._assert_current(manifest_proc)
+ if manifest_proc.returncode not in (os.EX_OK, manifest_proc.MODIFIED):
+ self.returncode = manifest_proc.returncode
+ self._current_task = None
+ self.wait()
+ return
+
+ modified = manifest_proc.returncode == manifest_proc.MODIFIED
+ sign = self.gpg_cmd is not None
+
+ if not modified and sign:
+ sign = self._need_signature()
+ if not sign and self.force_sign_key is not None \
+ and os.path.exists(self._manifest_path):
+ self._check_sig_key()
+ return
+
+ if not sign or not os.path.exists(self._manifest_path):
+ self.returncode = os.EX_OK
+ self._current_task = None
+ self.wait()
+ return
+
+ self._start_gpg_proc()
+
+ def _check_sig_key(self):
+ null_fd = os.open('/dev/null', os.O_RDONLY)
+ popen_proc = PopenProcess(proc=subprocess.Popen(
+ ["gpg", "--verify", self._manifest_path],
+ stdin=null_fd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT),
+ pipe_reader=PipeReader())
+ os.close(null_fd)
+ popen_proc.pipe_reader.input_files = {
+ "producer" : popen_proc.proc.stdout}
+ self._start_task(popen_proc, self._check_sig_key_exit)
+
+ @staticmethod
+ def _parse_gpg_key(output):
+ """
+ Returns the first token which appears to represent a gpg key
+ id, or None if there is no such token.
+ """
+ regex = ManifestTask._gpg_key_id_re
+ lengths = ManifestTask._gpg_key_id_lengths
+ for token in output.split():
+ m = regex.match(token)
+ if m is not None and len(m.group(0)) in lengths:
+ return m.group(0)
+ return None
+
+ @staticmethod
+ def _normalize_gpg_key(key_str):
+ """
+ Strips leading "0x" and trailing "!", and converts to uppercase
+ (intended to be the same format as that in gpg --verify output).
+ """
+ key_str = key_str.upper()
+ if key_str.startswith("0X"):
+ key_str = key_str[2:]
+ key_str = key_str.rstrip("!")
+ return key_str
+
+ def _check_sig_key_exit(self, proc):
+ self._assert_current(proc)
+
+ parsed_key = self._parse_gpg_key(
+ proc.pipe_reader.getvalue().decode('utf_8', 'replace'))
+ if parsed_key is not None and \
+ self._normalize_gpg_key(parsed_key) == \
+ self._normalize_gpg_key(self.force_sign_key):
+ self.returncode = os.EX_OK
+ self._current_task = None
+ self.wait()
+ return
+
+ if self._was_cancelled():
+ self.wait()
+ return
+
+ self._strip_sig(self._manifest_path)
+ self._start_gpg_proc()
+
+ @staticmethod
+ def _strip_sig(manifest_path):
+ """
+ Strip an existing signature from a Manifest file.
+ """
+ line_re = ManifestTask._manifest_line_re
+ lines = grablines(manifest_path)
+ f = None
+ try:
+ f = atomic_ofstream(manifest_path)
+ for line in lines:
+ if line_re.match(line) is not None:
+ f.write(line)
+ f.close()
+ f = None
+ finally:
+ if f is not None:
+ f.abort()
+
+ def _start_gpg_proc(self):
+ gpg_vars = self.gpg_vars
+ if gpg_vars is None:
+ gpg_vars = {}
+ else:
+ gpg_vars = gpg_vars.copy()
+ gpg_vars["FILE"] = self._manifest_path
+ gpg_cmd = varexpand(self.gpg_cmd, mydict=gpg_vars)
+ gpg_cmd = shlex_split(gpg_cmd)
+ gpg_proc = PopenProcess(proc=subprocess.Popen(gpg_cmd,
+ stdout=subprocess.PIPE, stderr=subprocess.STDOUT))
+ # PipeLogger echos output and efficiently monitors for process
+ # exit by listening for the stdout EOF event.
+ gpg_proc.pipe_reader = PipeLogger(background=self.background,
+ input_fd=gpg_proc.proc.stdout, scheduler=self.scheduler)
+ self._start_task(gpg_proc, self._gpg_proc_exit)
+
+ def _gpg_proc_exit(self, gpg_proc):
+ if self._default_exit(gpg_proc) != os.EX_OK:
+ self.wait()
+ return
+
+ rename_args = (self._manifest_path + ".asc", self._manifest_path)
+ try:
+ os.rename(*rename_args)
+ except OSError as e:
+ writemsg("!!! rename('%s', '%s'): %s\n" % rename_args + (e,),
+ noiselevel=-1)
+ try:
+ os.unlink(self._manifest_path + ".asc")
+ except OSError:
+ pass
+ self.returncode = 1
+ else:
+ self.returncode = os.EX_OK
+
+ self._current_task = None
+ self.wait()
+
+ def _need_signature(self):
+ try:
+ with open(_unicode_encode(self._manifest_path,
+ encoding=_encodings['fs'], errors='strict'), 'rb') as f:
+ return self._PGP_HEADER not in f.readline()
+ except IOError as e:
+ if e.errno in (errno.ENOENT, errno.ESTALE):
+ return False
+ raise
diff --git a/pym/portage/package/ebuild/_parallel_manifest/__init__.py b/pym/portage/package/ebuild/_parallel_manifest/__init__.py
new file mode 100644
index 000000000..418ad862b
--- /dev/null
+++ b/pym/portage/package/ebuild/_parallel_manifest/__init__.py
@@ -0,0 +1,2 @@
+# Copyright 2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
diff --git a/pym/portage/package/ebuild/_spawn_nofetch.py b/pym/portage/package/ebuild/_spawn_nofetch.py
index 94f8c79a3..0fc53c8ca 100644
--- a/pym/portage/package/ebuild/_spawn_nofetch.py
+++ b/pym/portage/package/ebuild/_spawn_nofetch.py
@@ -1,8 +1,9 @@
-# Copyright 2010-2011 Gentoo Foundation
+# Copyright 2010-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
import tempfile
+import portage
from portage import os
from portage import shutil
from portage.const import EBUILD_PHASES
@@ -10,10 +11,12 @@ from portage.elog import elog_process
from portage.package.ebuild.config import config
from portage.package.ebuild.doebuild import doebuild_environment
from portage.package.ebuild.prepare_build_dirs import prepare_build_dirs
+from portage.util._async.SchedulerInterface import SchedulerInterface
+from portage.util._eventloop.EventLoop import EventLoop
+from portage.util._eventloop.global_event_loop import global_event_loop
from _emerge.EbuildPhase import EbuildPhase
-from _emerge.PollScheduler import PollScheduler
-def spawn_nofetch(portdb, ebuild_path, settings=None):
+def spawn_nofetch(portdb, ebuild_path, settings=None, fd_pipes=None):
"""
This spawns pkg_nofetch if appropriate. The settings parameter
is useful only if setcpv has already been called in order
@@ -47,7 +50,7 @@ def spawn_nofetch(portdb, ebuild_path, settings=None):
settings = config(clone=settings)
if 'PORTAGE_PARALLEL_FETCHONLY' in settings:
- return
+ return os.EX_OK
# We must create our private PORTAGE_TMPDIR before calling
# doebuild_environment(), since lots of variables such
@@ -59,7 +62,7 @@ def spawn_nofetch(portdb, ebuild_path, settings=None):
settings['PORTAGE_TMPDIR'] = private_tmpdir
settings.backup_changes('PORTAGE_TMPDIR')
# private temp dir was just created, so it's not locked yet
- settings.pop('PORTAGE_BUILDIR_LOCKED', None)
+ settings.pop('PORTAGE_BUILDDIR_LOCKED', None)
try:
doebuild_environment(ebuild_path, 'nofetch',
@@ -73,14 +76,18 @@ def spawn_nofetch(portdb, ebuild_path, settings=None):
if 'fetch' not in restrict and \
'nofetch' not in defined_phases:
- return
+ return os.EX_OK
prepare_build_dirs(settings=settings)
ebuild_phase = EbuildPhase(background=False,
- phase='nofetch', scheduler=PollScheduler().sched_iface,
- settings=settings)
+ phase='nofetch',
+ scheduler=SchedulerInterface(portage._internal_caller and
+ global_event_loop() or EventLoop(main=False)),
+ fd_pipes=fd_pipes, settings=settings)
ebuild_phase.start()
ebuild_phase.wait()
elog_process(settings.mycpv, settings)
finally:
shutil.rmtree(private_tmpdir)
+
+ return ebuild_phase.returncode
diff --git a/pym/portage/package/ebuild/config.py b/pym/portage/package/ebuild/config.py
index 2fa799f7e..e104501dc 100644
--- a/pym/portage/package/ebuild/config.py
+++ b/pym/portage/package/ebuild/config.py
@@ -1,6 +1,8 @@
-# Copyright 2010-2012 Gentoo Foundation
+# Copyright 2010-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
+from __future__ import unicode_literals
+
__all__ = [
'autouse', 'best_from_dict', 'check_config_instance', 'config',
]
@@ -19,20 +21,21 @@ from _emerge.Package import Package
import portage
portage.proxy.lazyimport.lazyimport(globals(),
'portage.data:portage_gid',
+ 'portage.dbapi.vartree:vartree',
+ 'portage.package.ebuild.doebuild:_phase_func_map',
)
from portage import bsd_chflags, \
load_mod, os, selinux, _unicode_decode
from portage.const import CACHE_PATH, \
DEPCACHE_PATH, INCREMENTALS, MAKE_CONF_FILE, \
- MODULES_FILE_PATH, \
+ MODULES_FILE_PATH, PORTAGE_BASE_PATH, \
PRIVATE_PATH, PROFILE_PATH, USER_CONFIG_PATH, \
USER_VIRTUALS_FILE
from portage.dbapi import dbapi
from portage.dbapi.porttree import portdbapi
-from portage.dbapi.vartree import vartree
from portage.dep import Atom, isvalidatom, match_from_list, use_reduce, _repo_separator, _slot_separator
from portage.eapi import eapi_exports_AA, eapi_exports_merge_type, \
- eapi_supports_prefix, eapi_exports_replace_vars
+ eapi_supports_prefix, eapi_exports_replace_vars, _get_eapi_attrs
from portage.env.loaders import KeyValuePairFileLoader
from portage.exception import InvalidDependString, PortageException
from portage.localization import _
@@ -42,7 +45,8 @@ from portage.repository.config import load_repository_config
from portage.util import ensure_dirs, getconfig, grabdict, \
grabdict_package, grabfile, grabfile_package, LazyItemsDict, \
normalize_path, shlex_split, stack_dictlist, stack_dicts, stack_lists, \
- writemsg, writemsg_level
+ writemsg, writemsg_level, _eapi_cache
+from portage.util._path import exists_raise_eaccess, isdir_raise_eaccess
from portage.versions import catpkgsplit, catsplit, cpv_getkey, _pkg_str
from portage.package.ebuild._config import special_env_vars
@@ -55,10 +59,30 @@ from portage.package.ebuild._config.LocationsManager import LocationsManager
from portage.package.ebuild._config.MaskManager import MaskManager
from portage.package.ebuild._config.VirtualsManager import VirtualsManager
from portage.package.ebuild._config.helper import ordered_by_atom_specificity, prune_incremental
+from portage.package.ebuild._config.unpack_dependencies import load_unpack_dependencies_configuration
if sys.hexversion >= 0x3000000:
+ # pylint: disable=W0622
basestring = str
+_feature_flags_cache = {}
+
+def _get_feature_flags(eapi_attrs):
+ cache_key = (eapi_attrs.feature_flag_test, eapi_attrs.feature_flag_targetroot)
+ flags = _feature_flags_cache.get(cache_key)
+ if flags is not None:
+ return flags
+
+ flags = []
+ if eapi_attrs.feature_flag_test:
+ flags.append("test")
+ if eapi_attrs.feature_flag_targetroot:
+ flags.append("targetroot")
+
+ flags = frozenset(flags)
+ _feature_flags_cache[cache_key] = flags
+ return flags
+
def autouse(myvartree, use_cache=1, mysettings=None):
warnings.warn("portage.autouse() is deprecated",
DeprecationWarning, stacklevel=2)
@@ -123,9 +147,9 @@ class config(object):
"""
_constant_keys = frozenset(['PORTAGE_BIN_PATH', 'PORTAGE_GID',
- 'PORTAGE_PYM_PATH'])
+ 'PORTAGE_PYM_PATH', 'PORTAGE_PYTHONPATH'])
- _setcpv_aux_keys = ('DEFINED_PHASES', 'DEPEND', 'EAPI',
+ _setcpv_aux_keys = ('DEFINED_PHASES', 'DEPEND', 'EAPI', 'HDEPEND',
'INHERITED', 'IUSE', 'REQUIRED_USE', 'KEYWORDS', 'LICENSE', 'PDEPEND',
'PROPERTIES', 'PROVIDE', 'RDEPEND', 'SLOT',
'repository', 'RESTRICT', 'LICENSE',)
@@ -146,7 +170,7 @@ class config(object):
def __init__(self, clone=None, mycpv=None, config_profile_path=None,
config_incrementals=None, config_root=None, target_root=None,
eprefix=None, local_config=True, env=None,
- _unmatched_removal=False):
+ _unmatched_removal=False, repositories=None):
"""
@param clone: If provided, init will use deepcopy to copy by value the instance.
@type clone: Instance of config class.
@@ -160,7 +184,8 @@ class config(object):
@type config_incrementals: List
@param config_root: path to read local config from (defaults to "/", see PORTAGE_CONFIGROOT)
@type config_root: String
- @param target_root: __init__ override of $ROOT env variable.
+ @param target_root: the target root, which typically corresponds to the
+ value of the $ROOT env variable (default is /)
@type target_root: String
@param eprefix: set the EPREFIX variable (default is portage.const.EPREFIX)
@type eprefix: String
@@ -173,8 +198,14 @@ class config(object):
@param _unmatched_removal: Enabled by repoman when the
--unmatched-removal option is given.
@type _unmatched_removal: Boolean
+ @param repositories: Configuration of repositories.
+ Defaults to portage.repository.config.load_repository_config().
+ @type repositories: Instance of portage.repository.config.RepoConfigLoader class.
"""
+ # This is important when config is reloaded after emerge --sync.
+ _eapi_cache.clear()
+
# When initializing the global portage.settings instance, avoid
# raising exceptions whenever possible since exceptions thrown
# from 'import portage' or 'import portage.exceptions' statements
@@ -192,8 +223,10 @@ class config(object):
self.uvlist = []
self._accept_chost_re = None
self._accept_properties = None
+ self._accept_restrict = None
self._features_overrides = []
self._make_defaults = None
+ self._parent_stable = None
# _unknown_features records unknown features that
# have triggered warning messages, and ensures that
@@ -215,6 +248,8 @@ class config(object):
self.profiles = clone.profiles
self.packages = clone.packages
self.repositories = clone.repositories
+ self.unpack_dependencies = clone.unpack_dependencies
+ self._iuse_effective = clone._iuse_effective
self._iuse_implicit_match = clone._iuse_implicit_match
self._non_user_variables = clone._non_user_variables
self._env_d_blacklist = clone._env_d_blacklist
@@ -277,6 +312,8 @@ class config(object):
self._accept_properties = copy.deepcopy(clone._accept_properties)
self._ppropertiesdict = copy.deepcopy(clone._ppropertiesdict)
+ self._accept_restrict = copy.deepcopy(clone._accept_restrict)
+ self._paccept_restrict = copy.deepcopy(clone._paccept_restrict)
self._penvdict = copy.deepcopy(clone._penvdict)
self._expand_map = copy.deepcopy(clone._expand_map)
@@ -294,15 +331,30 @@ class config(object):
eprefix = locations_manager.eprefix
config_root = locations_manager.config_root
abs_user_config = locations_manager.abs_user_config
+ make_conf_paths = [
+ os.path.join(config_root, 'etc', 'make.conf'),
+ os.path.join(config_root, MAKE_CONF_FILE)
+ ]
+ try:
+ if os.path.samefile(*make_conf_paths):
+ make_conf_paths.pop()
+ except OSError:
+ pass
- make_conf = getconfig(
- os.path.join(config_root, MAKE_CONF_FILE),
- tolerant=tolerant, allow_sourcing=True) or {}
-
- make_conf.update(getconfig(
- os.path.join(abs_user_config, 'make.conf'),
- tolerant=tolerant, allow_sourcing=True,
- expand=make_conf) or {})
+ make_conf_count = 0
+ make_conf = {}
+ for x in make_conf_paths:
+ mygcfg = getconfig(x,
+ tolerant=tolerant, allow_sourcing=True,
+ expand=make_conf, recursive=True)
+ if mygcfg is not None:
+ make_conf.update(mygcfg)
+ make_conf_count += 1
+
+ if make_conf_count == 2:
+ writemsg("!!! %s\n" %
+ _("Found 2 make.conf files, using both '%s' and '%s'") %
+ tuple(make_conf_paths), noiselevel=-1)
# Allow ROOT setting to come from make.conf if it's not overridden
# by the constructor argument (from the calling environment).
@@ -335,8 +387,23 @@ class config(object):
# Allow make.globals to set default paths relative to ${EPREFIX}.
expand_map["EPREFIX"] = eprefix
- make_globals = getconfig(os.path.join(
- self.global_config_path, 'make.globals'),
+ if portage._not_installed:
+ make_globals_path = os.path.join(PORTAGE_BASE_PATH, "cnf", "make.globals")
+ else:
+ make_globals_path = os.path.join(self.global_config_path, "make.globals")
+ old_make_globals = os.path.join(config_root, "etc", "make.globals")
+ if os.path.isfile(old_make_globals) and \
+ not os.path.samefile(make_globals_path, old_make_globals):
+ # Don't warn if they refer to the same path, since
+ # that can be used for backward compatibility with
+ # old software.
+ writemsg("!!! %s\n" %
+ _("Found obsolete make.globals file: "
+ "'%s', (using '%s' instead)") %
+ (old_make_globals, make_globals_path),
+ noiselevel=-1)
+
+ make_globals = getconfig(make_globals_path,
tolerant=tolerant, expand=expand_map)
if make_globals is None:
make_globals = {}
@@ -426,6 +493,7 @@ class config(object):
known_repos = []
portdir = ""
portdir_overlay = ""
+ portdir_sync = None
for confs in [make_globals, make_conf, self.configdict["env"]]:
v = confs.get("PORTDIR")
if v is not None:
@@ -435,12 +503,52 @@ class config(object):
if v is not None:
portdir_overlay = v
known_repos.extend(shlex_split(v))
+ v = confs.get("SYNC")
+ if v is not None:
+ portdir_sync = v
+
known_repos = frozenset(known_repos)
self["PORTDIR"] = portdir
self["PORTDIR_OVERLAY"] = portdir_overlay
+ if portdir_sync:
+ self["SYNC"] = portdir_sync
self.lookuplist = [self.configdict["env"]]
- self.repositories = load_repository_config(self)
+ if repositories is None:
+ self.repositories = load_repository_config(self)
+ else:
+ self.repositories = repositories
+
+ self['PORTAGE_REPOSITORIES'] = self.repositories.config_string()
+ self.backup_changes('PORTAGE_REPOSITORIES')
+
+ #filling PORTDIR and PORTDIR_OVERLAY variable for compatibility
+ main_repo = self.repositories.mainRepo()
+ if main_repo is not None:
+ self["PORTDIR"] = main_repo.user_location
+ self.backup_changes("PORTDIR")
+ expand_map["PORTDIR"] = self["PORTDIR"]
+ # repoman controls PORTDIR_OVERLAY via the environment, so no
+ # special cases are needed here.
+ portdir_overlay = list(self.repositories.repoUserLocationList())
+ if portdir_overlay and portdir_overlay[0] == self["PORTDIR"]:
+ portdir_overlay = portdir_overlay[1:]
+
+ new_ov = []
+ if portdir_overlay:
+ for ov in portdir_overlay:
+ ov = normalize_path(ov)
+ if isdir_raise_eaccess(ov) or portage._sync_mode:
+ new_ov.append(portage._shell_quote(ov))
+ else:
+ writemsg(_("!!! Invalid PORTDIR_OVERLAY"
+ " (not a dir): '%s'\n") % ov, noiselevel=-1)
+
+ self["PORTDIR_OVERLAY"] = " ".join(new_ov)
+ self.backup_changes("PORTDIR_OVERLAY")
+ expand_map["PORTDIR_OVERLAY"] = self["PORTDIR_OVERLAY"]
+
+ locations_manager.set_port_dirs(self["PORTDIR"], self["PORTDIR_OVERLAY"])
locations_manager.load_profiles(self.repositories, known_repos)
profiles_complex = locations_manager.profiles_complex
@@ -460,12 +568,13 @@ class config(object):
x = Atom(x.lstrip('*'))
self.prevmaskdict.setdefault(x.cp, []).append(x)
+ self.unpack_dependencies = load_unpack_dependencies_configuration(self.repositories)
mygcfg = {}
- if self.profiles:
- mygcfg_dlists = [getconfig(os.path.join(x, "make.defaults"),
- tolerant=tolerant, expand=expand_map)
- for x in self.profiles]
+ if profiles_complex:
+ mygcfg_dlists = [getconfig(os.path.join(x.location, "make.defaults"),
+ tolerant=tolerant, expand=expand_map, recursive=x.portage1_directories)
+ for x in profiles_complex]
self._make_defaults = mygcfg_dlists
mygcfg = stack_dicts(mygcfg_dlists,
incrementals=self.incrementals)
@@ -474,15 +583,11 @@ class config(object):
self.configlist.append(mygcfg)
self.configdict["defaults"]=self.configlist[-1]
- mygcfg = getconfig(
- os.path.join(config_root, MAKE_CONF_FILE),
- tolerant=tolerant, allow_sourcing=True,
- expand=expand_map) or {}
-
- mygcfg.update(getconfig(
- os.path.join(abs_user_config, 'make.conf'),
- tolerant=tolerant, allow_sourcing=True,
- expand=expand_map) or {})
+ mygcfg = {}
+ for x in make_conf_paths:
+ mygcfg.update(getconfig(x,
+ tolerant=tolerant, allow_sourcing=True,
+ expand=expand_map, recursive=True) or {})
# Don't allow the user to override certain variables in make.conf
profile_only_variables = self.configdict["defaults"].get(
@@ -535,54 +640,25 @@ class config(object):
self.backup_changes("PORTAGE_CONFIGROOT")
self["ROOT"] = target_root
self.backup_changes("ROOT")
-
- # The PORTAGE_OVERRIDE_EPREFIX variable propagates the EPREFIX
- # of this config instance to any portage commands or API
- # consumers running in subprocesses.
self["EPREFIX"] = eprefix
self.backup_changes("EPREFIX")
- self["PORTAGE_OVERRIDE_EPREFIX"] = eprefix
- self.backup_changes("PORTAGE_OVERRIDE_EPREFIX")
self["EROOT"] = eroot
self.backup_changes("EROOT")
+ # The prefix of the running portage instance is used in the
+ # ebuild environment to implement the --host-root option for
+ # best_version and has_version.
+ self["PORTAGE_OVERRIDE_EPREFIX"] = portage.const.EPREFIX
+ self.backup_changes("PORTAGE_OVERRIDE_EPREFIX")
+
self._ppropertiesdict = portage.dep.ExtendedAtomDict(dict)
+ self._paccept_restrict = portage.dep.ExtendedAtomDict(dict)
self._penvdict = portage.dep.ExtendedAtomDict(dict)
- #filling PORTDIR and PORTDIR_OVERLAY variable for compatibility
- main_repo = self.repositories.mainRepo()
- if main_repo is not None:
- self["PORTDIR"] = main_repo.user_location
- self.backup_changes("PORTDIR")
-
- # repoman controls PORTDIR_OVERLAY via the environment, so no
- # special cases are needed here.
- portdir_overlay = list(self.repositories.repoUserLocationList())
- if portdir_overlay and portdir_overlay[0] == self["PORTDIR"]:
- portdir_overlay = portdir_overlay[1:]
-
- new_ov = []
- if portdir_overlay:
- shell_quote_re = re.compile(r"[\s\\\"'$`]")
- for ov in portdir_overlay:
- ov = normalize_path(ov)
- if os.path.isdir(ov):
- if shell_quote_re.search(ov) is not None:
- ov = portage._shell_quote(ov)
- new_ov.append(ov)
- else:
- writemsg(_("!!! Invalid PORTDIR_OVERLAY"
- " (not a dir): '%s'\n") % ov, noiselevel=-1)
-
- self["PORTDIR_OVERLAY"] = " ".join(new_ov)
- self.backup_changes("PORTDIR_OVERLAY")
-
- locations_manager.set_port_dirs(self["PORTDIR"], self["PORTDIR_OVERLAY"])
-
self._repo_make_defaults = {}
for repo in self.repositories.repos_with_profiles():
d = getconfig(os.path.join(repo.location, "profiles", "make.defaults"),
- tolerant=tolerant, expand=self.configdict["globals"].copy()) or {}
+ tolerant=tolerant, expand=self.configdict["globals"].copy(), recursive=repo.portage1_profiles) or {}
if d:
for k in chain(self._env_blacklist,
profile_only_variables, self._global_only_vars):
@@ -590,7 +666,8 @@ class config(object):
self._repo_make_defaults[repo.name] = d
#Read all USE related files from profiles and optionally from user config.
- self._use_manager = UseManager(self.repositories, profiles_complex, abs_user_config, user_config=local_config)
+ self._use_manager = UseManager(self.repositories, profiles_complex,
+ abs_user_config, self._isStable, user_config=local_config)
#Initialize all USE related variables we track ourselves.
self.usemask = self._use_manager.getUseMask()
self.useforce = self._use_manager.getUseForce()
@@ -620,6 +697,20 @@ class config(object):
for k, v in propdict.items():
self._ppropertiesdict.setdefault(k.cp, {})[k] = v
+ # package.accept_restrict
+ d = grabdict_package(os.path.join(
+ abs_user_config, "package.accept_restrict"),
+ recursive=True, allow_wildcard=True,
+ allow_repo=True, verify_eapi=False)
+ v = d.pop("*/*", None)
+ if v is not None:
+ if "ACCEPT_RESTRICT" in self.configdict["conf"]:
+ self.configdict["conf"]["ACCEPT_RESTRICT"] += " " + " ".join(v)
+ else:
+ self.configdict["conf"]["ACCEPT_RESTRICT"] = " ".join(v)
+ for k, v in d.items():
+ self._paccept_restrict.setdefault(k.cp, {})[k] = v
+
#package.env
penvdict = grabdict_package(os.path.join(
abs_user_config, "package.env"), recursive=1, allow_wildcard=True, \
@@ -706,21 +797,9 @@ class config(object):
self.backupenv["USE_ORDER"] = "env:pkg:conf:defaults:pkginternal:repo:env.d"
self.depcachedir = DEPCACHE_PATH
- if eprefix:
- # See comments about make.globals and EPREFIX
- # above. DEPCACHE_PATH is similar.
- if target_root == "/":
- # case (1) above
- self.depcachedir = os.path.join(eprefix,
- DEPCACHE_PATH.lstrip(os.sep))
- else:
- # case (2) above
- # For now, just assume DEPCACHE_PATH is relative
- # to EPREFIX.
- # TODO: Pass in more info to the constructor,
- # so we know the host system configuration.
- self.depcachedir = os.path.join(eprefix,
- DEPCACHE_PATH.lstrip(os.sep))
+ if portage.const.EPREFIX:
+ self.depcachedir = os.path.join(portage.const.EPREFIX,
+ DEPCACHE_PATH.lstrip(os.sep))
if self.get("PORTAGE_DEPCACHEDIR", None):
self.depcachedir = self["PORTAGE_DEPCACHEDIR"]
@@ -787,12 +866,17 @@ class config(object):
self[var] = default_val
self.backup_changes(var)
+ if portage._internal_caller:
+ self["PORTAGE_INTERNAL_CALLER"] = "1"
+ self.backup_changes("PORTAGE_INTERNAL_CALLER")
+
# initialize self.features
self.regenerate()
if bsd_chflags:
self.features.add('chflags')
+ self._iuse_effective = self._calc_iuse_effective()
self._iuse_implicit_match = _iuse_implicit_match_cache(self)
self._validate_commands()
@@ -802,11 +886,6 @@ class config(object):
self[k] = self[k].lower()
self.backup_changes(k)
- if main_repo is not None and not main_repo.sync:
- main_repo_sync = self.get("SYNC")
- if main_repo_sync:
- main_repo.sync = main_repo_sync
-
# The first constructed config object initializes these modules,
# and subsequent calls to the _init() functions have no effect.
portage.output._init(config_root=self['PORTAGE_CONFIGROOT'])
@@ -949,13 +1028,23 @@ class config(object):
writemsg(_("!!! INVALID ACCEPT_KEYWORDS: %s\n") % str(group),
noiselevel=-1)
- profile_broken = not self.profile_path or \
- not os.path.exists(os.path.join(self.profile_path, "parent")) and \
- os.path.exists(os.path.join(self["PORTDIR"], "profiles"))
+ profile_broken = False
+
+ if not self.profile_path:
+ profile_broken = True
+ else:
+ # If any one of these files exists, then
+ # the profile is considered valid.
+ for x in ("make.defaults", "parent",
+ "packages", "use.force", "use.mask"):
+ if exists_raise_eaccess(os.path.join(self.profile_path, x)):
+ break
+ else:
+ profile_broken = True
- if profile_broken:
+ if profile_broken and not portage._sync_mode:
abs_profile_path = None
- for x in (PROFILE_PATH, 'etc/portage/make.profile'):
+ for x in (PROFILE_PATH, 'etc/make.profile'):
x = os.path.join(self["PORTAGE_CONFIGROOT"], x)
try:
os.lstat(x)
@@ -1121,8 +1210,11 @@ class config(object):
the previously calculated USE settings.
"""
- def __init__(self, use, usemask, iuse_implicit,
+ def __init__(self, settings, unfiltered_use,
+ use, usemask, iuse_implicit,
use_expand_split, use_expand_dict):
+ self._settings = settings
+ self._unfiltered_use = unfiltered_use
self._use = use
self._usemask = usemask
self._iuse_implicit = iuse_implicit
@@ -1177,13 +1269,32 @@ class config(object):
# Don't export empty USE_EXPAND vars unless the user config
# exports them as empty. This is required for vars such as
# LINGUAS, where unset and empty have different meanings.
+ # The special '*' token is understood by ebuild.sh, which
+ # will unset the variable so that things like LINGUAS work
+ # properly (see bug #459350).
if has_wildcard:
- # ebuild.sh will see this and unset the variable so
- # that things like LINGUAS work properly
value = '*'
else:
if has_iuse:
- value = ''
+ already_set = False
+ # Skip the first 'env' configdict, in order to
+ # avoid infinite recursion here, since that dict's
+ # __getitem__ calls the current __getitem__.
+ for d in self._settings.lookuplist[1:]:
+ if key in d:
+ already_set = True
+ break
+
+ if not already_set:
+ for x in self._unfiltered_use:
+ if x[:prefix_len] == prefix:
+ already_set = True
+ break
+
+ if already_set:
+ value = ''
+ else:
+ value = '*'
else:
# It's not in IUSE, so just allow the variable content
# to pass through if it is defined somewhere. This
@@ -1219,7 +1330,7 @@ class config(object):
if not isinstance(mycpv, basestring):
pkg = mycpv
mycpv = pkg.cpv
- mydb = pkg.metadata
+ mydb = pkg._metadata
explicit_iuse = pkg.iuse.all
args_hash = (mycpv, id(pkg))
if pkg.built:
@@ -1240,6 +1351,7 @@ class config(object):
iuse = ""
pkg_configdict = self.configdict["pkg"]
previous_iuse = pkg_configdict.get("IUSE")
+ previous_iuse_effective = pkg_configdict.get("IUSE_EFFECTIVE")
previous_features = pkg_configdict.get("FEATURES")
aux_keys = self._setcpv_aux_keys
@@ -1251,6 +1363,7 @@ class config(object):
pkg_configdict["CATEGORY"] = cat
pkg_configdict["PF"] = pf
repository = None
+ eapi = None
if mydb:
if not hasattr(mydb, "aux_get"):
for k in aux_keys:
@@ -1277,14 +1390,16 @@ class config(object):
# Empty USE means this dbapi instance does not contain
# built packages.
built_use = None
+ eapi = pkg_configdict['EAPI']
repository = pkg_configdict.pop("repository", None)
if repository is not None:
pkg_configdict["PORTAGE_REPO_NAME"] = repository
- slot = pkg_configdict["SLOT"]
iuse = pkg_configdict["IUSE"]
if pkg is None:
- cpv_slot = _pkg_str(self.mycpv, slot=slot, repo=repository)
+ self.mycpv = _pkg_str(self.mycpv, metadata=pkg_configdict,
+ settings=self)
+ cpv_slot = self.mycpv
else:
cpv_slot = pkg
pkginternaluse = []
@@ -1294,6 +1409,9 @@ class config(object):
elif x.startswith("-"):
pkginternaluse.append(x)
pkginternaluse = " ".join(pkginternaluse)
+
+ eapi_attrs = _get_eapi_attrs(eapi)
+
if pkginternaluse != self.configdict["pkginternal"].get("USE", ""):
self.configdict["pkginternal"]["USE"] = pkginternaluse
has_changed = True
@@ -1424,30 +1542,70 @@ class config(object):
# If reset() has not been called, it's safe to return
# early if IUSE has not changed.
- if not has_changed and previous_iuse == iuse:
+ if not has_changed and previous_iuse == iuse and \
+ (previous_iuse_effective is not None == eapi_attrs.iuse_effective):
return
# Filter out USE flags that aren't part of IUSE. This has to
# be done for every setcpv() call since practically every
# package has different IUSE.
use = set(self["USE"].split())
+ unfiltered_use = frozenset(use)
if explicit_iuse is None:
explicit_iuse = frozenset(x.lstrip("+-") for x in iuse.split())
- iuse_implicit_match = self._iuse_implicit_match
- portage_iuse = self._get_implicit_iuse()
- portage_iuse.update(explicit_iuse)
+
+ if eapi_attrs.iuse_effective:
+ iuse_implicit_match = self._iuse_effective_match
+ portage_iuse = set(self._iuse_effective)
+ portage_iuse.update(explicit_iuse)
+ self.configdict["pkg"]["IUSE_EFFECTIVE"] = \
+ " ".join(sorted(portage_iuse))
+ else:
+ iuse_implicit_match = self._iuse_implicit_match
+ portage_iuse = self._get_implicit_iuse()
+ portage_iuse.update(explicit_iuse)
# PORTAGE_IUSE is not always needed so it's lazily evaluated.
self.configdict["env"].addLazySingleton(
"PORTAGE_IUSE", _lazy_iuse_regex, portage_iuse)
- ebuild_force_test = self.get("EBUILD_FORCE_TEST") == "1"
+ if pkg is None:
+ raw_restrict = pkg_configdict.get("RESTRICT")
+ else:
+ raw_restrict = pkg._raw_metadata["RESTRICT"]
+
+ restrict_test = False
+ if raw_restrict:
+ try:
+ if built_use is not None:
+ restrict = use_reduce(raw_restrict,
+ uselist=built_use, flat=True)
+ else:
+ # Use matchnone=True to ignore USE conditional parts
+ # of RESTRICT, since we want to know whether to mask
+ # the "test" flag _before_ we know the USE values
+ # that would be needed to evaluate the USE
+ # conditionals (see bug #273272).
+ restrict = use_reduce(raw_restrict,
+ matchnone=True, flat=True)
+ except PortageException:
+ pass
+ else:
+ restrict_test = "test" in restrict
+
+ ebuild_force_test = not restrict_test and \
+ self.get("EBUILD_FORCE_TEST") == "1"
+
if ebuild_force_test and \
not hasattr(self, "_ebuild_force_test_msg_shown"):
self._ebuild_force_test_msg_shown = True
writemsg(_("Forcing test.\n"), noiselevel=-1)
- if "test" in self.features:
- if "test" in self.usemask and not ebuild_force_test:
+
+ if "test" in explicit_iuse or iuse_implicit_match("test"):
+ if "test" not in self.features:
+ use.discard("test")
+ elif restrict_test or \
+ ("test" in self.usemask and not ebuild_force_test):
# "test" is in IUSE and USE=test is masked, so execution
# of src_test() probably is not reliable. Therefore,
# temporarily disable FEATURES=test just for this package.
@@ -1460,6 +1618,13 @@ class config(object):
self.usemask = \
frozenset(x for x in self.usemask if x != "test")
+ if eapi_attrs.feature_flag_targetroot and \
+ ("targetroot" in explicit_iuse or iuse_implicit_match("targetroot")):
+ if self["ROOT"] != "/":
+ use.add("targetroot")
+ else:
+ use.discard("targetroot")
+
# Allow _* flags from USE_EXPAND wildcards to pass through here.
use.difference_update([x for x in use \
if (x not in explicit_iuse and \
@@ -1470,7 +1635,8 @@ class config(object):
# comparison instead of startswith().
use_expand_split = set(x.lower() for \
x in self.get('USE_EXPAND', '').split())
- lazy_use_expand = self._lazy_use_expand(use, self.usemask,
+ lazy_use_expand = self._lazy_use_expand(
+ self, unfiltered_use, use, self.usemask,
portage_iuse, use_expand_split, self._use_expand_dict)
use_expand_iuses = {}
@@ -1500,6 +1666,14 @@ class config(object):
self.configdict['env'].addLazySingleton(k,
lazy_use_expand.__getitem__, k)
+ for k in self.get("USE_EXPAND_UNPREFIXED", "").split():
+ var_split = self.get(k, '').split()
+ var_split = [ x for x in var_split if x in use ]
+ if var_split:
+ self.configlist[-1][k] = ' '.join(var_split)
+ elif k in self:
+ self.configlist[-1][k] = ''
+
# Filtered for the ebuild environment. Store this in a separate
# attribute since we still want to be able to see global USE
# settings for things like emerge --info.
@@ -1507,6 +1681,10 @@ class config(object):
self.configdict["env"]["PORTAGE_USE"] = \
" ".join(sorted(x for x in use if x[-2:] != '_*'))
+ # Clear the eapi cache here rather than in the constructor, since
+ # setcpv triggers lazy instantiation of things like _use_manager.
+ _eapi_cache.clear()
+
def _grab_pkg_env(self, penv, container, protected_keys=None):
if protected_keys is None:
protected_keys = ()
@@ -1540,9 +1718,42 @@ class config(object):
else:
container[k] = v
+ def _iuse_effective_match(self, flag):
+ return flag in self._iuse_effective
+
+ def _calc_iuse_effective(self):
+ """
+ Beginning with EAPI 5, IUSE_EFFECTIVE is defined by PMS.
+ """
+ iuse_effective = []
+ iuse_effective.extend(self.get("IUSE_IMPLICIT", "").split())
+
+ # USE_EXPAND_IMPLICIT should contain things like ARCH, ELIBC,
+ # KERNEL, and USERLAND.
+ use_expand_implicit = frozenset(
+ self.get("USE_EXPAND_IMPLICIT", "").split())
+
+ # USE_EXPAND_UNPREFIXED should contain at least ARCH, and
+ # USE_EXPAND_VALUES_ARCH should contain all valid ARCH flags.
+ for v in self.get("USE_EXPAND_UNPREFIXED", "").split():
+ if v not in use_expand_implicit:
+ continue
+ iuse_effective.extend(
+ self.get("USE_EXPAND_VALUES_" + v, "").split())
+
+ use_expand = frozenset(self.get("USE_EXPAND", "").split())
+ for v in use_expand_implicit:
+ if v not in use_expand:
+ continue
+ lower_v = v.lower()
+ for x in self.get("USE_EXPAND_VALUES_" + v, "").split():
+ iuse_effective.append(lower_v + "_" + x)
+
+ return frozenset(iuse_effective)
+
def _get_implicit_iuse(self):
"""
- Some flags are considered to
+ Prior to EAPI 5, these flags are considered to
be implicit members of IUSE:
* Flags derived from ARCH
* Flags derived from USE_EXPAND_HIDDEN variables
@@ -1579,11 +1790,11 @@ class config(object):
return iuse_implicit
- def _getUseMask(self, pkg):
- return self._use_manager.getUseMask(pkg)
+ def _getUseMask(self, pkg, stable=None):
+ return self._use_manager.getUseMask(pkg, stable=stable)
- def _getUseForce(self, pkg):
- return self._use_manager.getUseForce(pkg)
+ def _getUseForce(self, pkg, stable=None):
+ return self._use_manager.getUseForce(pkg, stable=stable)
def _getMaskAtom(self, cpv, metadata):
"""
@@ -1648,6 +1859,11 @@ class config(object):
return x
return None
+ def _isStable(self, pkg):
+ return self._keywords_manager.isStable(pkg,
+ self.get("ACCEPT_KEYWORDS", ""),
+ self.configdict["backupenv"].get("ACCEPT_KEYWORDS", ""))
+
def _getKeywords(self, cpv, metadata):
return self._keywords_manager.getKeywords(cpv, metadata["SLOT"], \
metadata.get("KEYWORDS", ""), metadata.get("repository"))
@@ -1736,9 +1952,10 @@ class config(object):
@return: A list of properties that have not been accepted.
"""
accept_properties = self._accept_properties
- if not hasattr(cpv, 'slot'):
- cpv = _pkg_str(cpv, slot=metadata["SLOT"],
- repo=metadata.get("repository"))
+ try:
+ cpv.slot
+ except AttributeError:
+ cpv = _pkg_str(cpv, metadata=metadata, settings=self)
cp = cpv_getkey(cpv)
cpdict = self._ppropertiesdict.get(cp)
if cpdict:
@@ -1750,7 +1967,6 @@ class config(object):
properties_str = metadata.get("PROPERTIES", "")
properties = set(use_reduce(properties_str, matchall=1, flat=True))
- properties.discard('||')
acceptable_properties = set()
for x in accept_properties:
@@ -1768,40 +1984,58 @@ class config(object):
else:
use = []
- properties_struct = use_reduce(properties_str, uselist=use, opconvert=True)
- return self._getMaskedProperties(properties_struct, acceptable_properties)
-
- def _getMaskedProperties(self, properties_struct, acceptable_properties):
- if not properties_struct:
- return []
- if properties_struct[0] == "||":
- ret = []
- for element in properties_struct[1:]:
- if isinstance(element, list):
- if element:
- tmp = self._getMaskedProperties(
- element, acceptable_properties)
- if not tmp:
- return []
- ret.extend(tmp)
- else:
- if element in acceptable_properties:
- return[]
- ret.append(element)
- # Return all masked properties, since we don't know which combination
- # (if any) the user will decide to unmask
- return ret
-
- ret = []
- for element in properties_struct:
- if isinstance(element, list):
- if element:
- ret.extend(self._getMaskedProperties(element,
- acceptable_properties))
+ return [x for x in use_reduce(properties_str, uselist=use, flat=True)
+ if x not in acceptable_properties]
+
+ def _getMissingRestrict(self, cpv, metadata):
+ """
+ Take a RESTRICT string and return a list of any tokens the user
+ may need to accept for the given package. The returned list will not
+ contain any tokens that have already been accepted. This method
+ can throw an InvalidDependString exception.
+
+ @param cpv: The package name (for package.accept_restrict support)
+ @type cpv: String
+ @param metadata: A dictionary of raw package metadata
+ @type metadata: dict
+ @rtype: List
+ @return: A list of tokens that have not been accepted.
+ """
+ accept_restrict = self._accept_restrict
+ try:
+ cpv.slot
+ except AttributeError:
+ cpv = _pkg_str(cpv, metadata=metadata, settings=self)
+ cp = cpv_getkey(cpv)
+ cpdict = self._paccept_restrict.get(cp)
+ if cpdict:
+ paccept_restrict_list = ordered_by_atom_specificity(cpdict, cpv)
+ if paccept_restrict_list:
+ accept_restrict = list(self._accept_restrict)
+ for x in paccept_restrict_list:
+ accept_restrict.extend(x)
+
+ restrict_str = metadata.get("RESTRICT", "")
+ all_restricts = set(use_reduce(restrict_str, matchall=1, flat=True))
+
+ acceptable_restricts = set()
+ for x in accept_restrict:
+ if x == '*':
+ acceptable_restricts.update(all_restricts)
+ elif x == '-*':
+ acceptable_restricts.clear()
+ elif x[:1] == '-':
+ acceptable_restricts.discard(x[1:])
else:
- if element not in acceptable_properties:
- ret.append(element)
- return ret
+ acceptable_restricts.add(x)
+
+ if "?" in restrict_str:
+ use = metadata["USE"].split()
+ else:
+ use = []
+
+ return [x for x in use_reduce(restrict_str, uselist=use, flat=True)
+ if x not in acceptable_restricts]
def _accept_chost(self, cpv, metadata):
"""
@@ -1940,6 +2174,18 @@ class config(object):
# repoman will accept any property
self._accept_properties = ('*',)
+ if self.local_config:
+ mysplit = []
+ for curdb in mydbs:
+ mysplit.extend(curdb.get('ACCEPT_RESTRICT', '').split())
+ mysplit = prune_incremental(mysplit)
+ self.configlist[-1]['ACCEPT_RESTRICT'] = ' '.join(mysplit)
+ if tuple(mysplit) != self._accept_restrict:
+ self._accept_restrict = tuple(mysplit)
+ else:
+ # repoman will accept any property
+ self._accept_restrict = ('*',)
+
increment_lists = {}
for k in myincrementals:
incremental_list = []
@@ -1994,6 +2240,8 @@ class config(object):
if v is not None:
use_expand_dict[k] = v
+ use_expand_unprefixed = self.get("USE_EXPAND_UNPREFIXED", "").split()
+
# In order to best accomodate the long-standing practice of
# setting default USE_EXPAND variables in the profile's
# make.defaults, we translate these variables into their
@@ -2007,6 +2255,12 @@ class config(object):
continue
use = cfg.get("USE", "")
expand_use = []
+
+ for k in use_expand_unprefixed:
+ v = cfg.get(k)
+ if v is not None:
+ expand_use.extend(v.split())
+
for k in use_expand_dict:
v = cfg.get(k)
if v is None:
@@ -2044,6 +2298,17 @@ class config(object):
iuse = [x.lstrip("+-") for x in iuse.split()]
myflags = set()
for curdb in self.uvlist:
+
+ for k in use_expand_unprefixed:
+ v = curdb.get(k)
+ if v is None:
+ continue
+ for x in v.split():
+ if x[:1] == "-":
+ myflags.discard(x[1:])
+ else:
+ myflags.add(x)
+
cur_use_expand = [x for x in use_expand if x in curdb]
mysplit = curdb.get("USE", "").split()
if not mysplit and not cur_use_expand:
@@ -2160,6 +2425,14 @@ class config(object):
elif k in self:
self.configlist[-1][k] = ''
+ for k in use_expand_unprefixed:
+ var_split = self.get(k, '').split()
+ var_split = [ x for x in var_split if x in myflags ]
+ if var_split:
+ self.configlist[-1][k] = ' '.join(var_split)
+ elif k in self:
+ self.configlist[-1][k] = ''
+
@property
def virts_p(self):
warnings.warn("portage config.virts_p attribute " + \
@@ -2220,8 +2493,22 @@ class config(object):
elif mykey == "PORTAGE_PYM_PATH":
return portage._pym_path
+ elif mykey == "PORTAGE_PYTHONPATH":
+ value = [x for x in \
+ self.backupenv.get("PYTHONPATH", "").split(":") if x]
+ need_pym_path = True
+ if value:
+ try:
+ need_pym_path = not os.path.samefile(value[0],
+ portage._pym_path)
+ except OSError:
+ pass
+ if need_pym_path:
+ value.insert(0, portage._pym_path)
+ return ":".join(value)
+
elif mykey == "PORTAGE_GID":
- return _unicode_decode(str(portage_gid))
+ return "%s" % portage_gid
for d in self.lookuplist:
try:
@@ -2308,6 +2595,7 @@ class config(object):
environ_filter = self._environ_filter
eapi = self.get('EAPI')
+ eapi_attrs = _get_eapi_attrs(eapi)
phase = self.get('EBUILD_PHASE')
filter_calling_env = False
if self.mycpv is not None and \
@@ -2389,14 +2677,20 @@ class config(object):
not eapi_exports_replace_vars(eapi):
mydict.pop("REPLACED_BY_VERSION", None)
+ if phase is not None and eapi_attrs.exports_EBUILD_PHASE_FUNC:
+ phase_func = _phase_func_map.get(phase)
+ if phase_func is not None:
+ mydict["EBUILD_PHASE_FUNC"] = phase_func
+
return mydict
def thirdpartymirrors(self):
if getattr(self, "_thirdpartymirrors", None) is None:
- profileroots = [os.path.join(self["PORTDIR"], "profiles")]
- for x in shlex_split(self.get("PORTDIR_OVERLAY", "")):
- profileroots.insert(0, os.path.join(x, "profiles"))
- thirdparty_lists = [grabdict(os.path.join(x, "thirdpartymirrors")) for x in profileroots]
+ thirdparty_lists = []
+ for repo_name in reversed(self.repositories.prepos_order):
+ thirdparty_lists.append(grabdict(os.path.join(
+ self.repositories[repo_name].location,
+ "profiles", "thirdpartymirrors")))
self._thirdpartymirrors = stack_dictlist(thirdparty_lists, incremental=True)
return self._thirdpartymirrors
diff --git a/pym/portage/package/ebuild/deprecated_profile_check.py b/pym/portage/package/ebuild/deprecated_profile_check.py
index 3fab4da6e..fdb19b4ac 100644
--- a/pym/portage/package/ebuild/deprecated_profile_check.py
+++ b/pym/portage/package/ebuild/deprecated_profile_check.py
@@ -1,10 +1,11 @@
-# Copyright 2010-2011 Gentoo Foundation
+# Copyright 2010-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
__all__ = ['deprecated_profile_check']
import io
+import portage
from portage import os, _encodings, _unicode_encode
from portage.const import DEPRECATED_PROFILE_FILE
from portage.localization import _
@@ -12,16 +13,32 @@ from portage.output import colorize
from portage.util import writemsg
def deprecated_profile_check(settings=None):
- config_root = "/"
+ config_root = None
+ eprefix = None
+ deprecated_profile_file = None
if settings is not None:
config_root = settings["PORTAGE_CONFIGROOT"]
- deprecated_profile_file = os.path.join(config_root,
- DEPRECATED_PROFILE_FILE)
- if not os.access(deprecated_profile_file, os.R_OK):
- return False
- dcontent = io.open(_unicode_encode(deprecated_profile_file,
+ eprefix = settings["EPREFIX"]
+ for x in reversed(settings.profiles):
+ deprecated_profile_file = os.path.join(x, "deprecated")
+ if os.access(deprecated_profile_file, os.R_OK):
+ break
+ else:
+ deprecated_profile_file = None
+
+ if deprecated_profile_file is None:
+ deprecated_profile_file = os.path.join(config_root or "/",
+ DEPRECATED_PROFILE_FILE)
+ if not os.access(deprecated_profile_file, os.R_OK):
+ deprecated_profile_file = os.path.join(config_root or "/",
+ 'etc', 'make.profile', 'deprecated')
+ if not os.access(deprecated_profile_file, os.R_OK):
+ return
+
+ with io.open(_unicode_encode(deprecated_profile_file,
encoding=_encodings['fs'], errors='strict'),
- mode='r', encoding=_encodings['content'], errors='replace').readlines()
+ mode='r', encoding=_encodings['content'], errors='replace') as f:
+ dcontent = f.readlines()
writemsg(colorize("BAD", _("\n!!! Your current profile is "
"deprecated and not supported anymore.")) + "\n", noiselevel=-1)
writemsg(colorize("BAD", _("!!! Use eselect profile to update your "
@@ -30,13 +47,37 @@ def deprecated_profile_check(settings=None):
writemsg(colorize("BAD", _("!!! Please refer to the "
"Gentoo Upgrading Guide.")) + "\n", noiselevel=-1)
return True
- newprofile = dcontent[0]
+ newprofile = dcontent[0].rstrip("\n")
writemsg(colorize("BAD", _("!!! Please upgrade to the "
- "following profile if possible:")) + "\n", noiselevel=-1)
- writemsg(8*" " + colorize("GOOD", newprofile) + "\n", noiselevel=-1)
+ "following profile if possible:")) + "\n\n", noiselevel=-1)
+ writemsg(8*" " + colorize("GOOD", newprofile) + "\n\n", noiselevel=-1)
if len(dcontent) > 1:
writemsg(_("To upgrade do the following steps:\n"), noiselevel=-1)
for myline in dcontent[1:]:
writemsg(myline, noiselevel=-1)
writemsg("\n\n", noiselevel=-1)
+ else:
+ writemsg(_("You may use the following command to upgrade:\n\n"), noiselevel=-1)
+ writemsg(8*" " + colorize("INFORM", 'eselect profile set ' +
+ newprofile) + "\n\n", noiselevel=-1)
+
+ if settings is not None:
+ main_repo_loc = settings.repositories.mainRepoLocation()
+ new_profile_path = os.path.join(main_repo_loc,
+ "profiles", newprofile.rstrip("\n"))
+
+ if os.path.isdir(new_profile_path):
+ new_config = portage.config(config_root=config_root,
+ config_profile_path=new_profile_path,
+ eprefix=eprefix)
+
+ if not new_config.profiles:
+ writemsg("\n %s %s\n" % (colorize("WARN", "*"),
+ _("You must update portage before you "
+ "can migrate to the above profile.")), noiselevel=-1)
+ writemsg(" %s %s\n\n" % (colorize("WARN", "*"),
+ _("In order to update portage, "
+ "run 'emerge --oneshot portage'.")),
+ noiselevel=-1)
+
return True
diff --git a/pym/portage/package/ebuild/digestcheck.py b/pym/portage/package/ebuild/digestcheck.py
index 8705639d1..e207ba841 100644
--- a/pym/portage/package/ebuild/digestcheck.py
+++ b/pym/portage/package/ebuild/digestcheck.py
@@ -1,4 +1,4 @@
-# Copyright 2010-2011 Gentoo Foundation
+# Copyright 2010-2012 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
__all__ = ['digestcheck']
@@ -6,6 +6,7 @@ __all__ = ['digestcheck']
import warnings
from portage import os, _encodings, _unicode_decode
+from portage.checksum import _hash_filter
from portage.exception import DigestException, FileNotFound
from portage.localization import _
from portage.output import EOutput
@@ -28,6 +29,9 @@ def digestcheck(myfiles, mysettings, strict=False, justmanifest=None, mf=None):
if mysettings.get("EBUILD_SKIP_MANIFEST") == "1":
return 1
pkgdir = mysettings["O"]
+ hash_filter = _hash_filter(mysettings.get("PORTAGE_CHECKSUM_FILTER", ""))
+ if hash_filter.transparent:
+ hash_filter = None
if mf is None:
mf = mysettings.repositories.get_repo_for_location(
os.path.dirname(os.path.dirname(pkgdir)))
@@ -38,15 +42,16 @@ def digestcheck(myfiles, mysettings, strict=False, justmanifest=None, mf=None):
if not mf.thin and strict and "PORTAGE_PARALLEL_FETCHONLY" not in mysettings:
if mf.fhashdict.get("EBUILD"):
eout.ebegin(_("checking ebuild checksums ;-)"))
- mf.checkTypeHashes("EBUILD")
+ mf.checkTypeHashes("EBUILD", hash_filter=hash_filter)
eout.eend(0)
if mf.fhashdict.get("AUX"):
eout.ebegin(_("checking auxfile checksums ;-)"))
- mf.checkTypeHashes("AUX")
+ mf.checkTypeHashes("AUX", hash_filter=hash_filter)
eout.eend(0)
if mf.fhashdict.get("MISC"):
eout.ebegin(_("checking miscfile checksums ;-)"))
- mf.checkTypeHashes("MISC", ignoreMissingFiles=True)
+ mf.checkTypeHashes("MISC", ignoreMissingFiles=True,
+ hash_filter=hash_filter)
eout.eend(0)
for f in myfiles:
eout.ebegin(_("checking %s ;-)") % f)
@@ -58,7 +63,7 @@ def digestcheck(myfiles, mysettings, strict=False, justmanifest=None, mf=None):
writemsg(_("\n!!! Missing digest for '%s'\n") % (f,),
noiselevel=-1)
return 0
- mf.checkFileHashes(ftype, f)
+ mf.checkFileHashes(ftype, f, hash_filter=hash_filter)
eout.eend(0)
except FileNotFound as e:
eout.eend(1)
diff --git a/pym/portage/package/ebuild/digestgen.py b/pym/portage/package/ebuild/digestgen.py
index 6ad339737..95d02db9b 100644
--- a/pym/portage/package/ebuild/digestgen.py
+++ b/pym/portage/package/ebuild/digestgen.py
@@ -112,67 +112,64 @@ def digestgen(myarchives=None, mysettings=None, myportdb=None):
missing_files.append(myfile)
continue
- if missing_files:
- for myfile in missing_files:
- uris = set()
- all_restrict = set()
- for cpv in distfiles_map[myfile]:
- uris.update(myportdb.getFetchMap(
- cpv, mytree=mytree)[myfile])
- restrict = myportdb.aux_get(cpv, ['RESTRICT'],
- mytree=mytree)[0]
- # Here we ignore conditional parts of RESTRICT since
- # they don't apply unconditionally. Assume such
- # conditionals only apply on the client side where
- # digestgen() does not need to be called.
- all_restrict.update(use_reduce(restrict,
- flat=True, matchnone=True))
-
- # fetch() uses CATEGORY and PF to display a message
- # when fetch restriction is triggered.
- cat, pf = catsplit(cpv)
- mysettings["CATEGORY"] = cat
- mysettings["PF"] = pf
-
- # fetch() uses PORTAGE_RESTRICT to control fetch
- # restriction, which is only applied to files that
- # are not fetchable via a mirror:// URI.
- mysettings["PORTAGE_RESTRICT"] = " ".join(all_restrict)
-
- try:
- st = os.stat(os.path.join(
- mysettings["DISTDIR"],myfile))
- except OSError:
- st = None
-
- if not fetch({myfile : uris}, mysettings):
- myebuild = os.path.join(mysettings["O"],
- catsplit(cpv)[1] + ".ebuild")
- spawn_nofetch(myportdb, myebuild)
- writemsg(_("!!! Fetch failed for %s, can't update "
- "Manifest\n") % myfile, noiselevel=-1)
- if myfile in dist_hashes and \
- st is not None and st.st_size > 0:
- # stat result is obtained before calling fetch(),
- # since fetch may rename the existing file if the
- # digest does not match.
- writemsg(_("!!! If you would like to "
- "forcefully replace the existing "
- "Manifest entry\n!!! for %s, use "
- "the following command:\n") % myfile + \
- "!!! " + colorize("INFORM",
- "ebuild --force %s manifest" % \
- os.path.basename(myebuild)) + "\n",
- noiselevel=-1)
- return 0
+ for myfile in missing_files:
+ uris = set()
+ all_restrict = set()
+ for cpv in distfiles_map[myfile]:
+ uris.update(myportdb.getFetchMap(
+ cpv, mytree=mytree)[myfile])
+ restrict = myportdb.aux_get(cpv, ['RESTRICT'], mytree=mytree)[0]
+ # Here we ignore conditional parts of RESTRICT since
+ # they don't apply unconditionally. Assume such
+ # conditionals only apply on the client side where
+ # digestgen() does not need to be called.
+ all_restrict.update(use_reduce(restrict,
+ flat=True, matchnone=True))
+
+ # fetch() uses CATEGORY and PF to display a message
+ # when fetch restriction is triggered.
+ cat, pf = catsplit(cpv)
+ mysettings["CATEGORY"] = cat
+ mysettings["PF"] = pf
+
+ # fetch() uses PORTAGE_RESTRICT to control fetch
+ # restriction, which is only applied to files that
+ # are not fetchable via a mirror:// URI.
+ mysettings["PORTAGE_RESTRICT"] = " ".join(all_restrict)
+
+ try:
+ st = os.stat(os.path.join(mysettings["DISTDIR"], myfile))
+ except OSError:
+ st = None
+
+ if not fetch({myfile : uris}, mysettings):
+ myebuild = os.path.join(mysettings["O"],
+ catsplit(cpv)[1] + ".ebuild")
+ spawn_nofetch(myportdb, myebuild)
+ writemsg(_("!!! Fetch failed for %s, can't update Manifest\n")
+ % myfile, noiselevel=-1)
+ if myfile in dist_hashes and \
+ st is not None and st.st_size > 0:
+ # stat result is obtained before calling fetch(),
+ # since fetch may rename the existing file if the
+ # digest does not match.
+ cmd = colorize("INFORM", "ebuild --force %s manifest" %
+ os.path.basename(myebuild))
+ writemsg((_(
+ "!!! If you would like to forcefully replace the existing Manifest entry\n"
+ "!!! for %s, use the following command:\n") % myfile) +
+ "!!! %s\n" % cmd,
+ noiselevel=-1)
+ return 0
+
writemsg_stdout(_(">>> Creating Manifest for %s\n") % mysettings["O"])
try:
mf.create(assumeDistHashesSometimes=True,
assumeDistHashesAlways=(
"assume-digests" in mysettings.features))
except FileNotFound as e:
- writemsg(_("!!! File %s doesn't exist, can't update "
- "Manifest\n") % e, noiselevel=-1)
+ writemsg(_("!!! File %s doesn't exist, can't update Manifest\n")
+ % e, noiselevel=-1)
return 0
except PortagePackageException as e:
writemsg(("!!! %s\n") % (e,), noiselevel=-1)
diff --git a/pym/portage/package/ebuild/doebuild.py b/pym/portage/package/ebuild/doebuild.py
index 09062f9f3..01707aeec 100644
--- a/pym/portage/package/ebuild/doebuild.py
+++ b/pym/portage/package/ebuild/doebuild.py
@@ -1,14 +1,19 @@
-# Copyright 2010-2012 Gentoo Foundation
+# Copyright 2010-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
+from __future__ import unicode_literals
+
__all__ = ['doebuild', 'doebuild_environment', 'spawn', 'spawnebuild']
+import grp
import gzip
import errno
import io
from itertools import chain
import logging
import os as _os
+import platform
+import pwd
import re
import signal
import stat
@@ -26,8 +31,12 @@ portage.proxy.lazyimport.lazyimport(globals(),
'portage.package.ebuild.digestgen:digestgen',
'portage.package.ebuild.fetch:fetch',
'portage.package.ebuild._ipc.QueryCommand:QueryCommand',
- 'portage.dep._slot_abi:evaluate_slot_abi_equal_deps',
+ 'portage.dep._slot_operator:evaluate_slot_operator_equal_deps',
'portage.package.ebuild._spawn_nofetch:spawn_nofetch',
+ 'portage.util._desktop_entry:validate_desktop_entry',
+ 'portage.util._async.SchedulerInterface:SchedulerInterface',
+ 'portage.util._eventloop.EventLoop:EventLoop',
+ 'portage.util._eventloop.global_event_loop:global_event_loop',
'portage.util.ExtractKernelVersion:ExtractKernelVersion'
)
@@ -64,7 +73,6 @@ from _emerge.EbuildBuildDir import EbuildBuildDir
from _emerge.EbuildPhase import EbuildPhase
from _emerge.EbuildSpawnProcess import EbuildSpawnProcess
from _emerge.Package import Package
-from _emerge.PollScheduler import PollScheduler
from _emerge.RootConfig import RootConfig
_unsandboxed_phases = frozenset([
@@ -74,6 +82,40 @@ _unsandboxed_phases = frozenset([
"prerm", "setup"
])
+# phases in which IPC with host is allowed
+_ipc_phases = frozenset([
+ "setup", "pretend",
+ "preinst", "postinst", "prerm", "postrm",
+])
+
+# phases in which networking access is allowed
+_networked_phases = frozenset([
+ # for VCS fetching
+ "unpack",
+ # + for network-bound IPC
+] + list(_ipc_phases))
+
+_phase_func_map = {
+ "config": "pkg_config",
+ "setup": "pkg_setup",
+ "nofetch": "pkg_nofetch",
+ "unpack": "src_unpack",
+ "prepare": "src_prepare",
+ "configure": "src_configure",
+ "compile": "src_compile",
+ "test": "src_test",
+ "install": "src_install",
+ "preinst": "pkg_preinst",
+ "postinst": "pkg_postinst",
+ "prerm": "pkg_prerm",
+ "postrm": "pkg_postrm",
+ "info": "pkg_info",
+ "pretend": "pkg_pretend",
+}
+
+_vdb_use_conditional_keys = Package._dep_keys + \
+ ('LICENSE', 'PROPERTIES', 'PROVIDE', 'RESTRICT',)
+
def _doebuild_spawn(phase, settings, actionmap=None, **kwargs):
"""
All proper ebuild phases which execute ebuild.sh are spawned
@@ -83,8 +125,18 @@ def _doebuild_spawn(phase, settings, actionmap=None, **kwargs):
if phase in _unsandboxed_phases:
kwargs['free'] = True
+ kwargs['ipc'] = 'ipc-sandbox' not in settings.features or \
+ phase in _ipc_phases
+ kwargs['networked'] = 'network-sandbox' not in settings.features or \
+ phase in _networked_phases
+
if phase == 'depend':
kwargs['droppriv'] = 'userpriv' in settings.features
+ # It's not necessary to close_fds for this phase, since
+ # it should not spawn any daemons, and close_fds is
+ # best avoided since it can interact badly with some
+ # garbage collectors (see _setup_pipes docstring).
+ kwargs['close_fds'] = False
if actionmap is not None and phase in actionmap:
kwargs.update(actionmap[phase]["args"])
@@ -102,17 +154,24 @@ def _doebuild_spawn(phase, settings, actionmap=None, **kwargs):
settings['EBUILD_PHASE'] = phase
try:
- return spawn(cmd, settings, **kwargs)
+ return spawn(cmd, settings, **portage._native_kwargs(kwargs))
finally:
settings.pop('EBUILD_PHASE', None)
-def _spawn_phase(phase, settings, actionmap=None, **kwargs):
- if kwargs.get('returnpid'):
- return _doebuild_spawn(phase, settings, actionmap=actionmap, **kwargs)
+def _spawn_phase(phase, settings, actionmap=None, returnpid=False,
+ logfile=None, **kwargs):
+ if returnpid:
+ return _doebuild_spawn(phase, settings, actionmap=actionmap,
+ returnpid=returnpid, logfile=logfile, **kwargs)
+
+ # The logfile argument is unused here, since EbuildPhase uses
+ # the PORTAGE_LOG_FILE variable if set.
ebuild_phase = EbuildPhase(actionmap=actionmap, background=False,
- phase=phase, scheduler=PollScheduler().sched_iface,
- settings=settings)
+ phase=phase, scheduler=SchedulerInterface(portage._internal_caller and
+ global_event_loop() or EventLoop(main=False)),
+ settings=settings, **kwargs)
+
ebuild_phase.start()
ebuild_phase.wait()
return ebuild_phase.returncode
@@ -125,19 +184,28 @@ def _doebuild_path(settings, eapi=None):
# Note: PORTAGE_BIN_PATH may differ from the global constant
# when portage is reinstalling itself.
portage_bin_path = settings["PORTAGE_BIN_PATH"]
- eprefix = settings["EPREFIX"]
+ eprefix = portage.const.EPREFIX
prerootpath = [x for x in settings.get("PREROOTPATH", "").split(":") if x]
rootpath = [x for x in settings.get("ROOTPATH", "").split(":") if x]
+ overrides = [x for x in settings.get(
+ "__PORTAGE_TEST_PATH_OVERRIDE", "").split(":") if x]
prefixes = []
if eprefix:
prefixes.append(eprefix)
prefixes.append("/")
- path = []
+ path = overrides
+
+ if "xattr" in settings.features:
+ path.append(os.path.join(portage_bin_path, "ebuild-helpers", "xattr"))
- if eapi not in (None, "0", "1", "2", "3"):
- path.append(os.path.join(portage_bin_path, "ebuild-helpers", "4"))
+ if eprefix and uid != 0 and "fakeroot" not in settings.features:
+ path.append(os.path.join(portage_bin_path,
+ "ebuild-helpers", "unprivileged"))
+
+ if settings.get("USERLAND", "GNU") != "GNU":
+ path.append(os.path.join(portage_bin_path, "ebuild-helpers", "bsd"))
path.append(os.path.join(portage_bin_path, "ebuild-helpers"))
path.extend(prerootpath)
@@ -256,10 +324,11 @@ def doebuild_environment(myebuild, mydo, myroot=None, settings=None,
if hasattr(mydbapi, 'repositories'):
repo = mydbapi.repositories.get_repo_for_location(mytree)
mysettings['PORTDIR'] = repo.eclass_db.porttrees[0]
- mysettings['PORTDIR_OVERLAY'] = ' '.join(repo.eclass_db.porttrees[1:])
+ mysettings['PORTAGE_ECLASS_LOCATIONS'] = repo.eclass_db.eclass_locations_string
mysettings.configdict["pkg"]["PORTAGE_REPO_NAME"] = repo.name
mysettings["PORTDIR"] = os.path.realpath(mysettings["PORTDIR"])
+ mysettings.pop("PORTDIR_OVERLAY", None)
mysettings["DISTDIR"] = os.path.realpath(mysettings["DISTDIR"])
mysettings["RPMDIR"] = os.path.realpath(mysettings["RPMDIR"])
@@ -416,8 +485,8 @@ _doebuild_commands_without_builddir = (
'fetch', 'fetchall', 'help', 'manifest'
)
-def doebuild(myebuild, mydo, _unused=None, settings=None, debug=0, listonly=0,
- fetchonly=0, cleanup=0, dbkey=None, use_cache=1, fetchall=0, tree=None,
+def doebuild(myebuild, mydo, _unused=DeprecationWarning, settings=None, debug=0, listonly=0,
+ fetchonly=0, cleanup=0, dbkey=DeprecationWarning, use_cache=1, fetchall=0, tree=None,
mydbapi=None, vartree=None, prev_mtimes=None,
fd_pipes=None, returnpid=False):
"""
@@ -480,10 +549,15 @@ def doebuild(myebuild, mydo, _unused=None, settings=None, debug=0, listonly=0,
mysettings = settings
myroot = settings['EROOT']
- if _unused is not None and _unused != mysettings['EROOT']:
+ if _unused is not DeprecationWarning:
warnings.warn("The third parameter of the "
- "portage.doebuild() is now unused. Use "
- "settings['ROOT'] instead.",
+ "portage.doebuild() is deprecated. Instead "
+ "settings['EROOT'] is used.",
+ DeprecationWarning, stacklevel=2)
+
+ if dbkey is not DeprecationWarning:
+ warnings.warn("portage.doebuild() called "
+ "with deprecated dbkey argument.",
DeprecationWarning, stacklevel=2)
if not tree:
@@ -520,7 +594,7 @@ def doebuild(myebuild, mydo, _unused=None, settings=None, debug=0, listonly=0,
"fetch", "fetchall", "digest",
"unpack", "prepare", "configure", "compile", "test",
"install", "rpm", "qmerge", "merge",
- "package","unmerge", "manifest"]
+ "package", "unmerge", "manifest", "nofetch"]
if mydo not in validcommands:
validcommands.sort()
@@ -534,8 +608,11 @@ def doebuild(myebuild, mydo, _unused=None, settings=None, debug=0, listonly=0,
return 1
if returnpid and mydo != 'depend':
- warnings.warn("portage.doebuild() called " + \
- "with returnpid parameter enabled. This usage will " + \
+ # This case is not supported, since it bypasses the EbuildPhase class
+ # which implements important functionality (including post phase hooks
+ # and IPC for things like best/has_version and die).
+ warnings.warn("portage.doebuild() called "
+ "with returnpid parameter enabled. This usage will "
"not be supported in the future.",
DeprecationWarning, stacklevel=2)
@@ -543,9 +620,6 @@ def doebuild(myebuild, mydo, _unused=None, settings=None, debug=0, listonly=0,
fetchall = 1
mydo = "fetch"
- parallel_fetchonly = mydo in ("fetch", "fetchall") and \
- "PORTAGE_PARALLEL_FETCHONLY" in mysettings
-
if mydo not in clean_phases and not os.path.exists(myebuild):
writemsg("!!! doebuild: %s not found for %s\n" % (myebuild, mydo),
noiselevel=-1)
@@ -652,7 +726,7 @@ def doebuild(myebuild, mydo, _unused=None, settings=None, debug=0, listonly=0,
# we can temporarily override PORTAGE_TMPDIR with a random temp dir
# so that there's no need for locking and it can be used even if the
# user isn't in the portage group.
- if mydo in ("info",):
+ if not returnpid and mydo in ("info",):
tmpdir = tempfile.mkdtemp()
tmpdir_orig = mysettings["PORTAGE_TMPDIR"]
mysettings["PORTAGE_TMPDIR"] = tmpdir
@@ -663,9 +737,10 @@ def doebuild(myebuild, mydo, _unused=None, settings=None, debug=0, listonly=0,
if mydo in clean_phases:
builddir_lock = None
if not returnpid and \
- 'PORTAGE_BUILDIR_LOCKED' not in mysettings:
+ 'PORTAGE_BUILDDIR_LOCKED' not in mysettings:
builddir_lock = EbuildBuildDir(
- scheduler=PollScheduler().sched_iface,
+ scheduler=(portage._internal_caller and
+ global_event_loop() or EventLoop(main=False)),
settings=mysettings)
builddir_lock.lock()
try:
@@ -681,42 +756,7 @@ def doebuild(myebuild, mydo, _unused=None, settings=None, debug=0, listonly=0,
if returnpid:
return _spawn_phase(mydo, mysettings,
fd_pipes=fd_pipes, returnpid=returnpid)
- elif isinstance(dbkey, dict):
- warnings.warn("portage.doebuild() called " + \
- "with dict dbkey argument. This usage will " + \
- "not be supported in the future.",
- DeprecationWarning, stacklevel=2)
- mysettings["dbkey"] = ""
- pr, pw = os.pipe()
- fd_pipes = {
- 0:sys.stdin.fileno(),
- 1:sys.stdout.fileno(),
- 2:sys.stderr.fileno(),
- 9:pw}
- mypids = _spawn_phase(mydo, mysettings, returnpid=True,
- fd_pipes=fd_pipes)
- os.close(pw) # belongs exclusively to the child process now
- f = os.fdopen(pr, 'rb', 0)
- for k, v in zip(auxdbkeys,
- (_unicode_decode(line).rstrip('\n') for line in f)):
- dbkey[k] = v
- f.close()
- retval = os.waitpid(mypids[0], 0)[1]
- portage.process.spawned_pids.remove(mypids[0])
- # If it got a signal, return the signal that was sent, but
- # shift in order to distinguish it from a return value. (just
- # like portage.process.spawn() would do).
- if retval & 0xff:
- retval = (retval & 0xff) << 8
- else:
- # Otherwise, return its exit code.
- retval = retval >> 8
- if retval == os.EX_OK and len(dbkey) != len(auxdbkeys):
- # Don't trust bash's returncode if the
- # number of lines is incorrect.
- retval = 1
- return retval
- elif dbkey:
+ elif dbkey and dbkey is not DeprecationWarning:
mysettings["dbkey"] = dbkey
else:
mysettings["dbkey"] = \
@@ -725,14 +765,25 @@ def doebuild(myebuild, mydo, _unused=None, settings=None, debug=0, listonly=0,
return _spawn_phase(mydo, mysettings,
fd_pipes=fd_pipes, returnpid=returnpid)
- # Validate dependency metadata here to ensure that ebuilds with invalid
- # data are never installed via the ebuild command. Don't bother when
- # returnpid == True since there's no need to do this every time emerge
- # executes a phase.
+ elif mydo == "nofetch":
+
+ if returnpid:
+ writemsg("!!! doebuild: %s\n" %
+ _("returnpid is not supported for phase '%s'\n" % mydo),
+ noiselevel=-1)
+
+ return spawn_nofetch(mydbapi, myebuild, settings=mysettings,
+ fd_pipes=fd_pipes)
+
if tree == "porttree":
- rval = _validate_deps(mysettings, myroot, mydo, mydbapi)
- if rval != os.EX_OK:
- return rval
+
+ if not returnpid:
+ # Validate dependency metadata here to ensure that ebuilds with
+ # invalid data are never installed via the ebuild command. Skip
+ # this when returnpid is True (assume the caller handled it).
+ rval = _validate_deps(mysettings, myroot, mydo, mydbapi)
+ if rval != os.EX_OK:
+ return rval
else:
# FEATURES=noauto only makes sense for porttree, and we don't want
@@ -741,20 +792,25 @@ def doebuild(myebuild, mydo, _unused=None, settings=None, debug=0, listonly=0,
if "noauto" in mysettings.features:
mysettings.features.discard("noauto")
- # The info phase is special because it uses mkdtemp so and
- # user (not necessarily in the portage group) can run it.
- if mydo not in ('info',) and \
+ # If we are not using a private temp dir, then check access
+ # to the global temp dir.
+ if tmpdir is None and \
mydo not in _doebuild_commands_without_builddir:
rval = _check_temp_dir(mysettings)
if rval != os.EX_OK:
return rval
if mydo == "unmerge":
+ if returnpid:
+ writemsg("!!! doebuild: %s\n" %
+ _("returnpid is not supported for phase '%s'\n" % mydo),
+ noiselevel=-1)
return unmerge(mysettings["CATEGORY"],
mysettings["PF"], myroot, mysettings, vartree=vartree)
phases_to_run = set()
- if "noauto" in mysettings.features or \
+ if returnpid or \
+ "noauto" in mysettings.features or \
mydo not in actionmap_deps:
phases_to_run.add(mydo)
else:
@@ -805,9 +861,10 @@ def doebuild(myebuild, mydo, _unused=None, settings=None, debug=0, listonly=0,
if newstuff:
if builddir_lock is None and \
- 'PORTAGE_BUILDIR_LOCKED' not in mysettings:
+ 'PORTAGE_BUILDDIR_LOCKED' not in mysettings:
builddir_lock = EbuildBuildDir(
- scheduler=PollScheduler().sched_iface,
+ scheduler=(portage._internal_caller and
+ global_event_loop() or EventLoop(main=False)),
settings=mysettings)
builddir_lock.lock()
try:
@@ -825,12 +882,12 @@ def doebuild(myebuild, mydo, _unused=None, settings=None, debug=0, listonly=0,
# in order to satisfy the sane $PWD requirement (from bug #239560)
# when pkg_nofetch is spawned.
have_build_dirs = False
- if not parallel_fetchonly and \
- mydo not in ('digest', 'fetch', 'help', 'manifest'):
+ if mydo not in ('digest', 'fetch', 'help', 'manifest'):
if not returnpid and \
- 'PORTAGE_BUILDIR_LOCKED' not in mysettings:
+ 'PORTAGE_BUILDDIR_LOCKED' not in mysettings:
builddir_lock = EbuildBuildDir(
- scheduler=PollScheduler().sched_iface,
+ scheduler=(portage._internal_caller and
+ global_event_loop() or EventLoop(main=False)),
settings=mysettings)
builddir_lock.lock()
mystatus = prepare_build_dirs(myroot, mysettings, cleanup)
@@ -873,9 +930,8 @@ def doebuild(myebuild, mydo, _unused=None, settings=None, debug=0, listonly=0,
else:
vardb = vartree.dbapi
cpv = mysettings.mycpv
- cp = portage.versions.cpv_getkey(cpv)
- slot = mysettings["SLOT"]
- cpv_slot = cp + ":" + slot
+ cpv_slot = "%s%s%s" % \
+ (cpv.cp, portage.dep._slot_separator, cpv.slot)
mysettings["REPLACING_VERSIONS"] = " ".join(
set(portage.versions.cpv_getversion(match) \
for match in vardb.match(cpv_slot) + \
@@ -885,8 +941,16 @@ def doebuild(myebuild, mydo, _unused=None, settings=None, debug=0, listonly=0,
# the sandbox -- and stop now.
if mydo in ("config", "help", "info", "postinst",
"preinst", "pretend", "postrm", "prerm"):
- return _spawn_phase(mydo, mysettings,
- fd_pipes=fd_pipes, logfile=logfile, returnpid=returnpid)
+ if mydo in ("preinst", "postinst"):
+ env_file = os.path.join(os.path.dirname(mysettings["EBUILD"]),
+ "environment.bz2")
+ if os.path.isfile(env_file):
+ mysettings["PORTAGE_UPDATE_ENV"] = env_file
+ try:
+ return _spawn_phase(mydo, mysettings,
+ fd_pipes=fd_pipes, logfile=logfile, returnpid=returnpid)
+ finally:
+ mysettings.pop("PORTAGE_UPDATE_ENV", None)
mycpv = "/".join((mysettings["CATEGORY"], mysettings["PF"]))
@@ -927,7 +991,8 @@ def doebuild(myebuild, mydo, _unused=None, settings=None, debug=0, listonly=0,
if not fetch(fetchme, mysettings, listonly=listonly,
fetchonly=fetchonly, allow_missing_digests=True,
digests=dist_digests):
- spawn_nofetch(mydbapi, myebuild, settings=mysettings)
+ spawn_nofetch(mydbapi, myebuild, settings=mysettings,
+ fd_pipes=fd_pipes)
if listonly:
# The convention for listonly mode is to report
# success in any case, even though fetch() may
@@ -959,11 +1024,7 @@ def doebuild(myebuild, mydo, _unused=None, settings=None, debug=0, listonly=0,
mf = None
_doebuild_manifest_cache = None
return not digestgen(mysettings=mysettings, myportdb=mydbapi)
- elif mydo != 'fetch' and \
- "digest" in mysettings.features:
- # Don't do this when called by emerge or when called just
- # for fetch (especially parallel-fetch) since it's not needed
- # and it can interfere with parallel tasks.
+ elif "digest" in mysettings.features:
mf = None
_doebuild_manifest_cache = None
digestgen(mysettings=mysettings, myportdb=mydbapi)
@@ -972,14 +1033,17 @@ def doebuild(myebuild, mydo, _unused=None, settings=None, debug=0, listonly=0,
if mydo in ("digest", "manifest"):
return 1
+ if mydo == "fetch":
+ # Return after digestgen for FEATURES=digest support.
+ # Return before digestcheck, since fetch() already
+ # checked any relevant digests.
+ return 0
+
# See above comment about fetching only when needed
if tree == 'porttree' and \
not digestcheck(checkme, mysettings, "strict" in features, mf=mf):
return 1
- if mydo == "fetch":
- return 0
-
# remove PORTAGE_ACTUAL_DISTDIR once cvs/svn is supported via SRC_URI
if tree == 'porttree' and \
((mydo != "setup" and "noauto" not in features) \
@@ -995,7 +1059,9 @@ def doebuild(myebuild, mydo, _unused=None, settings=None, debug=0, listonly=0,
if len(actionmap_deps.get(x, [])):
actionmap[x]["dep"] = ' '.join(actionmap_deps[x])
- if mydo in actionmap:
+ regular_actionmap_phase = mydo in actionmap
+
+ if regular_actionmap_phase:
bintree = None
if mydo == "package":
# Make sure the package directory exists before executing
@@ -1019,6 +1085,9 @@ def doebuild(myebuild, mydo, _unused=None, settings=None, debug=0, listonly=0,
actionmap, mysettings, debug, logfile=logfile,
fd_pipes=fd_pipes, returnpid=returnpid)
+ if returnpid and isinstance(retval, list):
+ return retval
+
if retval == os.EX_OK:
if mydo == "package" and bintree is not None:
bintree.inject(mysettings.mycpv,
@@ -1030,7 +1099,15 @@ def doebuild(myebuild, mydo, _unused=None, settings=None, debug=0, listonly=0,
except OSError:
pass
- elif mydo=="qmerge":
+ elif returnpid:
+ writemsg("!!! doebuild: %s\n" %
+ _("returnpid is not supported for phase '%s'\n" % mydo),
+ noiselevel=-1)
+
+ if regular_actionmap_phase:
+ # handled above
+ pass
+ elif mydo == "qmerge":
# check to ensure install was run. this *only* pops up when users
# forget it and are using ebuild
if not os.path.exists(
@@ -1047,7 +1124,8 @@ def doebuild(myebuild, mydo, _unused=None, settings=None, debug=0, listonly=0,
mysettings["CATEGORY"], mysettings["PF"], mysettings["D"],
os.path.join(mysettings["PORTAGE_BUILDDIR"], "build-info"),
myroot, mysettings, myebuild=mysettings["EBUILD"], mytree=tree,
- mydbapi=mydbapi, vartree=vartree, prev_mtimes=prev_mtimes)
+ mydbapi=mydbapi, vartree=vartree, prev_mtimes=prev_mtimes,
+ fd_pipes=fd_pipes)
elif mydo=="merge":
retval = spawnebuild("install", actionmap, mysettings, debug,
alwaysdep=1, logfile=logfile, fd_pipes=fd_pipes,
@@ -1063,7 +1141,9 @@ def doebuild(myebuild, mydo, _unused=None, settings=None, debug=0, listonly=0,
mysettings["D"], os.path.join(mysettings["PORTAGE_BUILDDIR"],
"build-info"), myroot, mysettings,
myebuild=mysettings["EBUILD"], mytree=tree, mydbapi=mydbapi,
- vartree=vartree, prev_mtimes=prev_mtimes)
+ vartree=vartree, prev_mtimes=prev_mtimes,
+ fd_pipes=fd_pipes)
+
else:
writemsg_stdout(_("!!! Unknown mydo: %s\n") % mydo, noiselevel=-1)
return 1
@@ -1163,7 +1243,9 @@ def _prepare_env_file(settings):
"""
env_extractor = BinpkgEnvExtractor(background=False,
- scheduler=PollScheduler().sched_iface, settings=settings)
+ scheduler=(portage._internal_caller and
+ global_event_loop() or EventLoop(main=False)),
+ settings=settings)
if env_extractor.dest_env_exists():
# There are lots of possible states when doebuild()
@@ -1242,7 +1324,7 @@ def _spawn_actionmap(settings):
misc_sh_binary = os.path.join(portage_bin_path,
os.path.basename(MISC_SH_BINARY))
ebuild_sh = _shell_quote(ebuild_sh_binary) + " %s"
- misc_sh = _shell_quote(misc_sh_binary) + " dyn_%s"
+ misc_sh = _shell_quote(misc_sh_binary) + " __dyn_%s"
# args are for the to spawn function
actionmap = {
@@ -1298,10 +1380,10 @@ def _validate_deps(mysettings, myroot, mydo, mydbapi):
if not pkg.built and \
mydo not in ("digest", "help", "manifest") and \
- pkg.metadata["REQUIRED_USE"] and \
- eapi_has_required_use(pkg.metadata["EAPI"]):
- result = check_required_use(pkg.metadata["REQUIRED_USE"],
- pkg.use.enabled, pkg.iuse.is_valid_flag)
+ pkg._metadata["REQUIRED_USE"] and \
+ eapi_has_required_use(pkg.eapi):
+ result = check_required_use(pkg._metadata["REQUIRED_USE"],
+ pkg.use.enabled, pkg.iuse.is_valid_flag, eapi=pkg.eapi)
if not result:
reduced_noise = result.tounicode()
writemsg("\n %s\n" % _("The following REQUIRED_USE flag" + \
@@ -1309,7 +1391,7 @@ def _validate_deps(mysettings, myroot, mydo, mydbapi):
writemsg(" %s\n" % reduced_noise,
noiselevel=-1)
normalized_required_use = \
- " ".join(pkg.metadata["REQUIRED_USE"].split())
+ " ".join(pkg._metadata["REQUIRED_USE"].split())
if reduced_noise != normalized_required_use:
writemsg("\n %s\n" % _("The above constraints " + \
"are a subset of the following complete expression:"),
@@ -1324,7 +1406,8 @@ def _validate_deps(mysettings, myroot, mydo, mydbapi):
# XXX This would be to replace getstatusoutput completely.
# XXX Issue: cannot block execution. Deadlock condition.
-def spawn(mystring, mysettings, debug=0, free=0, droppriv=0, sesandbox=0, fakeroot=0, **keywords):
+def spawn(mystring, mysettings, debug=False, free=False, droppriv=False,
+ sesandbox=False, fakeroot=False, networked=True, ipc=True, **keywords):
"""
Spawn a subprocess with extra portage-specific options.
Optiosn include:
@@ -1354,6 +1437,10 @@ def spawn(mystring, mysettings, debug=0, free=0, droppriv=0, sesandbox=0, fakero
@type sesandbox: Boolean
@param fakeroot: Run this command with faked root privileges
@type fakeroot: Boolean
+ @param networked: Run this command with networking access enabled
+ @type networked: Boolean
+ @param ipc: Run this command with host IPC access enabled
+ @type ipc: Boolean
@param keywords: Extra options encoded as a dict, to be passed to spawn
@type keywords: Dictionary
@rtype: Integer
@@ -1366,29 +1453,90 @@ def spawn(mystring, mysettings, debug=0, free=0, droppriv=0, sesandbox=0, fakero
fd_pipes = keywords.get("fd_pipes")
if fd_pipes is None:
fd_pipes = {
- 0:sys.stdin.fileno(),
- 1:sys.stdout.fileno(),
- 2:sys.stderr.fileno(),
+ 0:portage._get_stdin().fileno(),
+ 1:sys.__stdout__.fileno(),
+ 2:sys.__stderr__.fileno(),
}
# In some cases the above print statements don't flush stdout, so
# it needs to be flushed before allowing a child process to use it
# so that output always shows in the correct order.
- stdout_filenos = (sys.stdout.fileno(), sys.stderr.fileno())
+ stdout_filenos = (sys.__stdout__.fileno(), sys.__stderr__.fileno())
for fd in fd_pipes.values():
if fd in stdout_filenos:
- sys.stdout.flush()
- sys.stderr.flush()
+ sys.__stdout__.flush()
+ sys.__stderr__.flush()
break
features = mysettings.features
+
+ # Use Linux namespaces if available
+ if uid == 0 and platform.system() == 'Linux':
+ keywords['unshare_net'] = not networked
+ keywords['unshare_ipc'] = not ipc
+
# TODO: Enable fakeroot to be used together with droppriv. The
# fake ownership/permissions will have to be converted to real
# permissions in the merge phase.
fakeroot = fakeroot and uid != 0 and portage.process.fakeroot_capable
- if droppriv and uid == 0 and portage_gid and portage_uid and \
- hasattr(os, "setgroups"):
- keywords.update({"uid":portage_uid,"gid":portage_gid,
- "groups":userpriv_groups,"umask":0o02})
+ portage_build_uid = os.getuid()
+ portage_build_gid = os.getgid()
+ if uid == 0 and portage_uid and portage_gid and hasattr(os, "setgroups"):
+ if droppriv:
+ keywords.update({
+ "uid": portage_uid,
+ "gid": portage_gid,
+ "groups": userpriv_groups,
+ "umask": 0o02
+ })
+
+ # Adjust pty ownership so that subprocesses
+ # can directly access /dev/fd/{1,2}.
+ stdout_fd = fd_pipes.get(1)
+ if stdout_fd is not None:
+ try:
+ subprocess_tty = _os.ttyname(stdout_fd)
+ except OSError:
+ pass
+ else:
+ try:
+ parent_tty = _os.ttyname(sys.__stdout__.fileno())
+ except OSError:
+ parent_tty = None
+
+ if subprocess_tty != parent_tty:
+ _os.chown(subprocess_tty,
+ int(portage_uid), int(portage_gid))
+
+ if "userpriv" in features and "userpriv" not in mysettings["PORTAGE_RESTRICT"].split() and secpass >= 2:
+ # Since Python 3.4, getpwuid and getgrgid
+ # require int type (no proxies).
+ portage_build_uid = int(portage_uid)
+ portage_build_gid = int(portage_gid)
+
+ if "PORTAGE_BUILD_USER" not in mysettings:
+ user = None
+ try:
+ user = pwd.getpwuid(portage_build_uid).pw_name
+ except KeyError:
+ if portage_build_uid == 0:
+ user = "root"
+ elif portage_build_uid == portage_uid:
+ user = portage.data._portage_username
+ if user is not None:
+ mysettings["PORTAGE_BUILD_USER"] = user
+
+ if "PORTAGE_BUILD_GROUP" not in mysettings:
+ group = None
+ try:
+ group = grp.getgrgid(portage_build_gid).gr_name
+ except KeyError:
+ if portage_build_gid == 0:
+ group = "root"
+ elif portage_build_gid == portage_gid:
+ group = portage.data._portage_grpname
+ if group is not None:
+ mysettings["PORTAGE_BUILD_GROUP"] = group
+
if not free:
free=((droppriv and "usersandbox" not in features) or \
(not droppriv and "sandbox" not in features and \
@@ -1419,12 +1567,15 @@ def spawn(mystring, mysettings, debug=0, free=0, droppriv=0, sesandbox=0, fakero
mysettings["PORTAGE_SANDBOX_T"])
if keywords.get("returnpid"):
- return spawn_func(mystring, env=mysettings.environ(), **keywords)
+ return spawn_func(mystring, env=mysettings.environ(),
+ **portage._native_kwargs(keywords))
proc = EbuildSpawnProcess(
background=False, args=mystring,
- scheduler=PollScheduler().sched_iface, spawn_func=spawn_func,
- settings=mysettings, **keywords)
+ scheduler=SchedulerInterface(portage._internal_caller and
+ global_event_loop() or EventLoop(main=False)),
+ spawn_func=spawn_func,
+ settings=mysettings, **portage._native_kwargs(keywords))
proc.start()
proc.wait()
@@ -1436,8 +1587,8 @@ def spawnebuild(mydo, actionmap, mysettings, debug, alwaysdep=0,
logfile=None, fd_pipes=None, returnpid=False):
if returnpid:
- warnings.warn("portage.spawnebuild() called " + \
- "with returnpid parameter enabled. This usage will " + \
+ warnings.warn("portage.spawnebuild() called "
+ "with returnpid parameter enabled. This usage will "
"not be supported in the future.",
DeprecationWarning, stacklevel=2)
@@ -1530,7 +1681,52 @@ def _check_build_log(mysettings, out=None):
configure_opts_warn = []
configure_opts_warn_re = re.compile(
- r'^configure: WARNING: [Uu]nrecognized options: ')
+ r'^configure: WARNING: [Uu]nrecognized options: (.*)')
+
+ qa_configure_opts = ""
+ try:
+ with io.open(_unicode_encode(os.path.join(
+ mysettings["PORTAGE_BUILDDIR"],
+ "build-info", "QA_CONFIGURE_OPTIONS"),
+ encoding=_encodings['fs'], errors='strict'),
+ mode='r', encoding=_encodings['repo.content'],
+ errors='replace') as qa_configure_opts_f:
+ qa_configure_opts = qa_configure_opts_f.read()
+ except IOError as e:
+ if e.errno not in (errno.ENOENT, errno.ESTALE):
+ raise
+
+ qa_configure_opts = qa_configure_opts.split()
+ if qa_configure_opts:
+ if len(qa_configure_opts) > 1:
+ qa_configure_opts = "|".join("(%s)" % x for x in qa_configure_opts)
+ qa_configure_opts = "^(%s)$" % qa_configure_opts
+ else:
+ qa_configure_opts = "^%s$" % qa_configure_opts[0]
+ qa_configure_opts = re.compile(qa_configure_opts)
+
+ qa_am_maintainer_mode = []
+ try:
+ with io.open(_unicode_encode(os.path.join(
+ mysettings["PORTAGE_BUILDDIR"],
+ "build-info", "QA_AM_MAINTAINER_MODE"),
+ encoding=_encodings['fs'], errors='strict'),
+ mode='r', encoding=_encodings['repo.content'],
+ errors='replace') as qa_am_maintainer_mode_f:
+ qa_am_maintainer_mode = [x for x in
+ qa_am_maintainer_mode_f.read().splitlines() if x]
+ except IOError as e:
+ if e.errno not in (errno.ENOENT, errno.ESTALE):
+ raise
+
+ if qa_am_maintainer_mode:
+ if len(qa_am_maintainer_mode) > 1:
+ qa_am_maintainer_mode = \
+ "|".join("(%s)" % x for x in qa_am_maintainer_mode)
+ qa_am_maintainer_mode = "^(%s)$" % qa_am_maintainer_mode
+ else:
+ qa_am_maintainer_mode = "^%s$" % qa_am_maintainer_mode[0]
+ qa_am_maintainer_mode = re.compile(qa_am_maintainer_mode)
# Exclude output from dev-libs/yaz-3.0.47 which looks like this:
#
@@ -1552,7 +1748,9 @@ def _check_build_log(mysettings, out=None):
for line in f:
line = _unicode_decode(line)
if am_maintainer_mode_re.search(line) is not None and \
- am_maintainer_mode_exclude_re.search(line) is None:
+ am_maintainer_mode_exclude_re.search(line) is None and \
+ (not qa_am_maintainer_mode or
+ qa_am_maintainer_mode.search(line) is None):
am_maintainer_mode.append(line.rstrip("\n"))
if bash_command_not_found_re.match(line) is not None and \
@@ -1562,8 +1760,11 @@ def _check_build_log(mysettings, out=None):
if helper_missing_file_re.match(line) is not None:
helper_missing_file.append(line.rstrip("\n"))
- if configure_opts_warn_re.match(line) is not None:
- configure_opts_warn.append(line.rstrip("\n"))
+ m = configure_opts_warn_re.match(line)
+ if m is not None:
+ for x in m.group(1).split(", "):
+ if not qa_configure_opts or qa_configure_opts.match(x) is None:
+ configure_opts_warn.append(x)
if make_jobserver_re.match(line) is not None:
make_jobserver.append(line.rstrip("\n"))
@@ -1612,7 +1813,7 @@ def _check_build_log(mysettings, out=None):
if configure_opts_warn:
msg = [_("QA Notice: Unrecognized configure options:")]
msg.append("")
- msg.extend("\t" + line for line in configure_opts_warn)
+ msg.extend("\t%s" % x for x in configure_opts_warn)
_eqawarn(msg)
if make_jobserver:
@@ -1637,8 +1838,12 @@ def _post_src_install_write_metadata(settings):
build_info_dir = os.path.join(settings['PORTAGE_BUILDDIR'], 'build-info')
- for k in ('IUSE',):
- v = settings.get(k)
+ metadata_keys = ['IUSE']
+ if eapi_attrs.iuse_effective:
+ metadata_keys.append('IUSE_EFFECTIVE')
+
+ for k in metadata_keys:
+ v = settings.configdict['pkg'].get(k)
if v is not None:
write_atomic(os.path.join(build_info_dir, k), v + '\n')
@@ -1654,7 +1859,7 @@ def _post_src_install_write_metadata(settings):
'BUILD_TIME'), encoding=_encodings['fs'], errors='strict'),
mode='w', encoding=_encodings['repo.content'],
errors='strict') as f:
- f.write(_unicode_decode("%.0f\n" % (time.time(),)))
+ f.write("%.0f\n" % (time.time(),))
use = frozenset(settings['PORTAGE_USE'].split())
for k in _vdb_use_conditional_keys:
@@ -1668,7 +1873,7 @@ def _post_src_install_write_metadata(settings):
continue
if k.endswith('DEPEND'):
- if eapi_attrs.slot_abi:
+ if eapi_attrs.slot_operator:
continue
token_class = Atom
else:
@@ -1686,10 +1891,10 @@ def _post_src_install_write_metadata(settings):
k), encoding=_encodings['fs'], errors='strict'),
mode='w', encoding=_encodings['repo.content'],
errors='strict') as f:
- f.write(_unicode_decode(v + '\n'))
+ f.write('%s\n' % v)
- if eapi_attrs.slot_abi:
- deps = evaluate_slot_abi_equal_deps(settings, use, QueryCommand.get_db())
+ if eapi_attrs.slot_operator:
+ deps = evaluate_slot_operator_equal_deps(settings, use, QueryCommand.get_db())
for k, v in deps.items():
filename = os.path.join(build_info_dir, k)
if not v:
@@ -1702,10 +1907,7 @@ def _post_src_install_write_metadata(settings):
k), encoding=_encodings['fs'], errors='strict'),
mode='w', encoding=_encodings['repo.content'],
errors='strict') as f:
- f.write(_unicode_decode(v + '\n'))
-
-_vdb_use_conditional_keys = ('DEPEND', 'LICENSE', 'PDEPEND',
- 'PROPERTIES', 'PROVIDE', 'RDEPEND', 'RESTRICT',)
+ f.write('%s\n' % v)
def _preinst_bsdflags(mysettings):
if bsd_chflags:
@@ -1747,6 +1949,33 @@ def _post_src_install_uid_fix(mysettings, out):
destdir = mysettings["D"]
ed_len = len(mysettings["ED"])
unicode_errors = []
+ desktop_file_validate = \
+ portage.process.find_binary("desktop-file-validate") is not None
+ xdg_dirs = mysettings.get('XDG_DATA_DIRS', '/usr/share').split(':')
+ xdg_dirs = tuple(os.path.join(i, "applications") + os.sep
+ for i in xdg_dirs if i)
+
+ qa_desktop_file = ""
+ try:
+ with io.open(_unicode_encode(os.path.join(
+ mysettings["PORTAGE_BUILDDIR"],
+ "build-info", "QA_DESKTOP_FILE"),
+ encoding=_encodings['fs'], errors='strict'),
+ mode='r', encoding=_encodings['repo.content'],
+ errors='replace') as f:
+ qa_desktop_file = f.read()
+ except IOError as e:
+ if e.errno not in (errno.ENOENT, errno.ESTALE):
+ raise
+
+ qa_desktop_file = qa_desktop_file.split()
+ if qa_desktop_file:
+ if len(qa_desktop_file) > 1:
+ qa_desktop_file = "|".join("(%s)" % x for x in qa_desktop_file)
+ qa_desktop_file = "^(%s)$" % qa_desktop_file
+ else:
+ qa_desktop_file = "^%s$" % qa_desktop_file[0]
+ qa_desktop_file = re.compile(qa_desktop_file)
while True:
@@ -1755,6 +1984,7 @@ def _post_src_install_uid_fix(mysettings, out):
counted_inodes = set()
fixlafiles_announced = False
fixlafiles = "fixlafiles" in mysettings.features
+ desktopfile_errors = []
for parent, dirs, files in os.walk(destdir):
try:
@@ -1794,6 +2024,16 @@ def _post_src_install_uid_fix(mysettings, out):
else:
fpath = os.path.join(parent, fname)
+ fpath_relative = fpath[ed_len - 1:]
+ if desktop_file_validate and fname.endswith(".desktop") and \
+ os.path.isfile(fpath) and \
+ fpath_relative.startswith(xdg_dirs) and \
+ not (qa_desktop_file and qa_desktop_file.match(fpath_relative.strip(os.sep)) is not None):
+
+ desktop_validate = validate_desktop_entry(fpath)
+ if desktop_validate:
+ desktopfile_errors.extend(desktop_validate)
+
if fixlafiles and \
fname.endswith(".la") and os.path.isfile(fpath):
f = open(_unicode_encode(fpath,
@@ -1860,6 +2100,11 @@ def _post_src_install_uid_fix(mysettings, out):
if not unicode_error:
break
+ if desktopfile_errors:
+ for l in _merge_desktopfile_error(desktopfile_errors):
+ l = l.replace(mysettings["ED"], '/')
+ eqawarn(l, phase='install', key=mysettings.mycpv, out=out)
+
if unicode_errors:
for l in _merge_unicode_error(unicode_errors):
eqawarn(l, phase='install', key=mysettings.mycpv, out=out)
@@ -1871,7 +2116,7 @@ def _post_src_install_uid_fix(mysettings, out):
'SIZE'), encoding=_encodings['fs'], errors='strict'),
mode='w', encoding=_encodings['repo.content'],
errors='strict')
- f.write(_unicode_decode(str(size) + '\n'))
+ f.write('%d\n' % size)
f.close()
_reapply_bsdflags_to_image(mysettings)
@@ -2022,6 +2267,20 @@ def _post_src_install_soname_symlinks(mysettings, out):
for line in qa_msg:
eqawarn(line, key=mysettings.mycpv, out=out)
+def _merge_desktopfile_error(errors):
+ lines = []
+
+ msg = _("QA Notice: This package installs one or more .desktop files "
+ "that do not pass validation.")
+ lines.extend(wrap(msg, 72))
+
+ lines.append("")
+ errors.sort()
+ lines.extend("\t" + x for x in errors)
+ lines.append("")
+
+ return lines
+
def _merge_unicode_error(errors):
lines = []
@@ -2078,11 +2337,6 @@ def _handle_self_update(settings, vardb):
if settings["ROOT"] == "/" and \
portage.dep.match_from_list(
portage.const.PORTAGE_PACKAGE_ATOM, [cpv]):
- inherited = frozenset(settings.get('INHERITED', '').split())
- if not vardb.cpv_exists(cpv) or \
- '9999' in cpv or \
- 'git' in inherited or \
- 'git-2' in inherited:
- _prepare_self_update(settings)
- return True
+ _prepare_self_update(settings)
+ return True
return False
diff --git a/pym/portage/package/ebuild/fetch.py b/pym/portage/package/ebuild/fetch.py
index 60ed04da2..2424ff3c5 100644
--- a/pym/portage/package/ebuild/fetch.py
+++ b/pym/portage/package/ebuild/fetch.py
@@ -1,4 +1,4 @@
-# Copyright 2010-2012 Gentoo Foundation
+# Copyright 2010-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
from __future__ import print_function
@@ -14,6 +14,10 @@ import stat
import sys
import tempfile
+try:
+ from urllib.parse import urlparse
+except ImportError:
+ from urlparse import urlparse
import portage
portage.proxy.lazyimport.lazyimport(globals(),
@@ -26,7 +30,7 @@ portage.proxy.lazyimport.lazyimport(globals(),
from portage import OrderedDict, os, selinux, shutil, _encodings, \
_shell_quote, _unicode_encode
from portage.checksum import (hashfunc_map, perform_md5, verify_all,
- _filter_unaccelarated_hashes)
+ _filter_unaccelarated_hashes, _hash_filter, _apply_hash_filter)
from portage.const import BASH_BINARY, CUSTOM_MIRRORS_FILE, \
GLOBAL_CONFIG_PATH
from portage.data import portage_gid, portage_uid, secpass, userpriv_groups
@@ -64,9 +68,9 @@ def _spawn_fetch(settings, args, **kwargs):
if "fd_pipes" not in kwargs:
kwargs["fd_pipes"] = {
- 0 : sys.stdin.fileno(),
- 1 : sys.stdout.fileno(),
- 2 : sys.stdout.fileno(),
+ 0 : portage._get_stdin().fileno(),
+ 1 : sys.__stdout__.fileno(),
+ 2 : sys.__stdout__.fileno(),
}
if "userfetch" in settings.features and \
@@ -185,7 +189,7 @@ def _check_digests(filename, digests, show_errors=1):
return False
return True
-def _check_distfile(filename, digests, eout, show_errors=1):
+def _check_distfile(filename, digests, eout, show_errors=1, hash_filter=None):
"""
@return a tuple of (match, stat_obj) where match is True if filename
matches all given digests (if any) and stat_obj is a stat result, or
@@ -212,6 +216,8 @@ def _check_distfile(filename, digests, eout, show_errors=1):
return (False, st)
else:
digests = _filter_unaccelarated_hashes(digests)
+ if hash_filter is not None:
+ digests = _apply_hash_filter(digests, hash_filter)
if _check_digests(filename, digests, show_errors=show_errors):
eout.ebegin("%s %s ;-)" % (os.path.basename(filename),
" ".join(sorted(digests))))
@@ -341,7 +347,7 @@ def fetch(myuris, mysettings, listonly=0, fetchonly=0,
_("!!! For fetching to a read-only filesystem, "
"locking should be turned off.\n")), noiselevel=-1)
writemsg(_("!!! This can be done by adding -distlocks to "
- "FEATURES in /etc/make.conf\n"), noiselevel=-1)
+ "FEATURES in /etc/portage/make.conf\n"), noiselevel=-1)
# use_locks = 0
# local mirrors are always added
@@ -355,6 +361,9 @@ def fetch(myuris, mysettings, listonly=0, fetchonly=0,
if try_mirrors:
mymirrors += [x.rstrip("/") for x in mysettings["GENTOO_MIRRORS"].split() if x]
+ hash_filter = _hash_filter(mysettings.get("PORTAGE_CHECKSUM_FILTER", ""))
+ if hash_filter.transparent:
+ hash_filter = None
skip_manifest = mysettings.get("EBUILD_SKIP_MANIFEST") == "1"
if skip_manifest:
allow_missing_digests = True
@@ -397,12 +406,16 @@ def fetch(myuris, mysettings, listonly=0, fetchonly=0,
for myfile, uri_set in myuris.items():
for myuri in uri_set:
file_uri_tuples.append((myfile, myuri))
+ if not uri_set:
+ file_uri_tuples.append((myfile, None))
else:
for myuri in myuris:
- file_uri_tuples.append((os.path.basename(myuri), myuri))
+ if urlparse(myuri).scheme:
+ file_uri_tuples.append((os.path.basename(myuri), myuri))
+ else:
+ file_uri_tuples.append((os.path.basename(myuri), None))
filedict = OrderedDict()
- primaryuri_indexes={}
primaryuri_dict = {}
thirdpartymirror_uris = {}
for myfile, myuri in file_uri_tuples:
@@ -410,6 +423,8 @@ def fetch(myuris, mysettings, listonly=0, fetchonly=0,
filedict[myfile]=[]
for y in range(0,len(locations)):
filedict[myfile].append(locations[y]+"/distfiles/"+myfile)
+ if myuri is None:
+ continue
if myuri[:9]=="mirror://":
eidx = myuri.find("/", 9)
if eidx != -1:
@@ -424,15 +439,15 @@ def fetch(myuris, mysettings, listonly=0, fetchonly=0,
# now try the official mirrors
if mirrorname in thirdpartymirrors:
- random.shuffle(thirdpartymirrors[mirrorname])
-
uris = [locmirr.rstrip("/") + "/" + path \
for locmirr in thirdpartymirrors[mirrorname]]
+ random.shuffle(uris)
filedict[myfile].extend(uris)
thirdpartymirror_uris.setdefault(myfile, []).extend(uris)
- if not filedict[myfile]:
- writemsg(_("No known mirror by the name: %s\n") % (mirrorname))
+ if mirrorname not in custommirrors and \
+ mirrorname not in thirdpartymirrors:
+ writemsg(_("!!! No known mirror by the name: %s\n") % (mirrorname))
else:
writemsg(_("Invalid mirror definition in SRC_URI:\n"), noiselevel=-1)
writemsg(" %s\n" % (myuri), noiselevel=-1)
@@ -440,26 +455,30 @@ def fetch(myuris, mysettings, listonly=0, fetchonly=0,
if restrict_fetch or force_mirror:
# Only fetch from specific mirrors is allowed.
continue
- if "primaryuri" in restrict:
- # Use the source site first.
- if myfile in primaryuri_indexes:
- primaryuri_indexes[myfile] += 1
- else:
- primaryuri_indexes[myfile] = 0
- filedict[myfile].insert(primaryuri_indexes[myfile], myuri)
- else:
- filedict[myfile].append(myuri)
primaryuris = primaryuri_dict.get(myfile)
if primaryuris is None:
primaryuris = []
primaryuri_dict[myfile] = primaryuris
primaryuris.append(myuri)
+ # Order primaryuri_dict values to match that in SRC_URI.
+ for uris in primaryuri_dict.values():
+ uris.reverse()
+
# Prefer thirdpartymirrors over normal mirrors in cases when
# the file does not yet exist on the normal mirrors.
for myfile, uris in thirdpartymirror_uris.items():
primaryuri_dict.setdefault(myfile, []).extend(uris)
+ # Now merge primaryuri values into filedict (includes mirrors
+ # explicitly referenced in SRC_URI).
+ if "primaryuri" in restrict:
+ for myfile, uris in filedict.items():
+ filedict[myfile] = primaryuri_dict.get(myfile, []) + uris
+ else:
+ for myfile in filedict:
+ filedict[myfile] += primaryuri_dict.get(myfile, [])
+
can_fetch=True
if listonly:
@@ -637,7 +656,7 @@ def fetch(myuris, mysettings, listonly=0, fetchonly=0,
eout = EOutput()
eout.quiet = mysettings.get("PORTAGE_QUIET") == "1"
match, mystat = _check_distfile(
- myfile_path, pruned_digests, eout)
+ myfile_path, pruned_digests, eout, hash_filter=hash_filter)
if match:
# Skip permission adjustment for symlinks, since we don't
# want to modify anything outside of the primary DISTDIR,
@@ -709,7 +728,7 @@ def fetch(myuris, mysettings, listonly=0, fetchonly=0,
for x in ro_distdirs:
filename = os.path.join(x, myfile)
match, mystat = _check_distfile(
- filename, pruned_digests, eout)
+ filename, pruned_digests, eout, hash_filter=hash_filter)
if match:
readonly_file = filename
break
@@ -734,7 +753,7 @@ def fetch(myuris, mysettings, listonly=0, fetchonly=0,
"remaining space.\n"), noiselevel=-1)
if userfetch:
writemsg(_("!!! You may set FEATURES=\"-userfetch\""
- " in /etc/make.conf in order to fetch with\n"
+ " in /etc/portage/make.conf in order to fetch with\n"
"!!! superuser privileges.\n"), noiselevel=-1)
if fsmirrors and not os.path.exists(myfile_path) and has_space:
@@ -796,6 +815,8 @@ def fetch(myuris, mysettings, listonly=0, fetchonly=0,
continue
else:
digests = _filter_unaccelarated_hashes(mydigests[myfile])
+ if hash_filter is not None:
+ digests = _apply_hash_filter(digests, hash_filter)
verified_ok, reason = verify_all(myfile_path, digests)
if not verified_ok:
writemsg(_("!!! Previously fetched"
@@ -845,8 +866,8 @@ def fetch(myuris, mysettings, listonly=0, fetchonly=0,
protocol = loc[0:loc.find("://")]
global_config_path = GLOBAL_CONFIG_PATH
- if mysettings['EPREFIX']:
- global_config_path = os.path.join(mysettings['EPREFIX'],
+ if portage.const.EPREFIX:
+ global_config_path = os.path.join(portage.const.EPREFIX,
GLOBAL_CONFIG_PATH.lstrip(os.sep))
missing_file_param = False
@@ -955,11 +976,16 @@ def fetch(myuris, mysettings, listonly=0, fetchonly=0,
writemsg_stdout(_(">>> Downloading '%s'\n") % \
_hide_url_passwd(loc))
variables = {
- "DISTDIR": mysettings["DISTDIR"],
"URI": loc,
"FILE": myfile
}
+ for k in ("DISTDIR", "PORTAGE_SSH_OPTS"):
+ try:
+ variables[k] = mysettings[k]
+ except KeyError:
+ pass
+
myfetch = shlex_split(locfetch)
myfetch = [varexpand(x, mydict=variables) for x in myfetch]
myret = -1
@@ -1053,6 +1079,8 @@ def fetch(myuris, mysettings, listonly=0, fetchonly=0,
# net connection. This way we have a chance to try to download
# from another mirror...
digests = _filter_unaccelarated_hashes(mydigests[myfile])
+ if hash_filter is not None:
+ digests = _apply_hash_filter(digests, hash_filter)
verified_ok, reason = verify_all(myfile_path, digests)
if not verified_ok:
writemsg(_("!!! Fetched file: %s VERIFY FAILED!\n") % myfile,
diff --git a/pym/portage/package/ebuild/getmaskingreason.py b/pym/portage/package/ebuild/getmaskingreason.py
index 8a88c2f60..1e4ed21ce 100644
--- a/pym/portage/package/ebuild/getmaskingreason.py
+++ b/pym/portage/package/ebuild/getmaskingreason.py
@@ -1,4 +1,4 @@
-# Copyright 2010-2011 Gentoo Foundation
+# Copyright 2010-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
__all__ = ['getmaskingreason']
@@ -6,13 +6,12 @@ __all__ = ['getmaskingreason']
import portage
from portage import os
from portage.const import USER_CONFIG_PATH
-from portage.dep import Atom, match_from_list, _slot_separator, _repo_separator
+from portage.dep import Atom, match_from_list
from portage.exception import InvalidAtom
from portage.localization import _
from portage.repository.config import _gen_valid_repo
from portage.util import grablines, normalize_path
-from portage.versions import catpkgsplit
-from _emerge.Package import Package
+from portage.versions import catpkgsplit, _pkg_str
def getmaskingreason(mycpv, metadata=None, settings=None,
portdb=None, return_location=False, myrepo=None):
@@ -60,23 +59,20 @@ def getmaskingreason(mycpv, metadata=None, settings=None,
# Sometimes we can't access SLOT or repository due to corruption.
pkg = mycpv
- if metadata is not None:
- pkg = "".join((mycpv, _slot_separator, metadata["SLOT"]))
- # At this point myrepo should be None, a valid name, or
- # Package.UNKNOWN_REPO which we ignore.
- if myrepo is not None and myrepo != Package.UNKNOWN_REPO:
- pkg = "".join((pkg, _repo_separator, myrepo))
+ try:
+ pkg.slot
+ except AttributeError:
+ pkg = _pkg_str(mycpv, metadata=metadata, repo=myrepo)
+
cpv_slot_list = [pkg]
- mycp=mysplit[0]+"/"+mysplit[1]
+ mycp = pkg.cp
- # XXX- This is a temporary duplicate of code from the config constructor.
- locations = [os.path.join(settings["PORTDIR"], "profiles")]
+ locations = []
+ if pkg.repo in settings.repositories:
+ for repo in settings.repositories[pkg.repo].masters + (settings.repositories[pkg.repo],):
+ locations.append(os.path.join(repo.location, "profiles"))
locations.extend(settings.profiles)
- for ov in settings["PORTDIR_OVERLAY"].split():
- profdir = os.path.join(normalize_path(ov), "profiles")
- if os.path.isdir(profdir):
- locations.append(profdir)
locations.append(os.path.join(settings["PORTAGE_CONFIGROOT"],
USER_CONFIG_PATH))
locations.reverse()
diff --git a/pym/portage/package/ebuild/getmaskingstatus.py b/pym/portage/package/ebuild/getmaskingstatus.py
index 9bf605db6..4b9e588f7 100644
--- a/pym/portage/package/ebuild/getmaskingstatus.py
+++ b/pym/portage/package/ebuild/getmaskingstatus.py
@@ -1,17 +1,21 @@
-# Copyright 2010-2012 Gentoo Foundation
+# Copyright 2010-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
+from __future__ import unicode_literals
+
__all__ = ['getmaskingstatus']
import sys
import portage
from portage import eapi_is_supported, _eapi_is_deprecated
+from portage.exception import InvalidDependString
from portage.localization import _
from portage.package.ebuild.config import config
from portage.versions import catpkgsplit, _pkg_str
if sys.hexversion >= 0x3000000:
+ # pylint: disable=W0622
basestring = str
class _UnmaskHint(object):
@@ -48,7 +52,7 @@ def _getmaskingstatus(mycpv, settings, portdb, myrepo=None):
# emerge passed in a Package instance
pkg = mycpv
mycpv = pkg.cpv
- metadata = pkg.metadata
+ metadata = pkg._metadata
installed = pkg.installed
if metadata is None:
@@ -65,10 +69,11 @@ def _getmaskingstatus(mycpv, settings, portdb, myrepo=None):
else:
metadata["USE"] = ""
- if not hasattr(mycpv, 'slot'):
+ try:
+ mycpv.slot
+ except AttributeError:
try:
- mycpv = _pkg_str(mycpv, slot=metadata['SLOT'],
- repo=metadata.get('repository'))
+ mycpv = _pkg_str(mycpv, metadata=metadata, settings=settings)
except portage.exception.InvalidData:
raise ValueError(_("invalid CPV: %s") % mycpv)
@@ -83,6 +88,7 @@ def _getmaskingstatus(mycpv, settings, portdb, myrepo=None):
mygroups = settings._getKeywords(mycpv, metadata)
licenses = metadata["LICENSE"]
properties = metadata["PROPERTIES"]
+ restrict = metadata["RESTRICT"]
if not eapi_is_supported(eapi):
return [_MaskReason("EAPI", "EAPI %s" % eapi)]
elif _eapi_is_deprecated(eapi) and not installed:
@@ -122,6 +128,13 @@ def _getmaskingstatus(mycpv, settings, portdb, myrepo=None):
if gp=="*":
kmask=None
break
+ elif gp == "~*":
+ for x in pgroups:
+ if x[:1] == "~":
+ kmask = None
+ break
+ if kmask is None:
+ break
elif gp=="-"+myarch and myarch in pgroups:
kmask="-"+myarch
break
@@ -161,6 +174,15 @@ def _getmaskingstatus(mycpv, settings, portdb, myrepo=None):
except portage.exception.InvalidDependString as e:
rValue.append(_MaskReason("invalid", "PROPERTIES: "+str(e)))
+ try:
+ missing_restricts = settings._getMissingRestrict(mycpv, metadata)
+ if missing_restricts:
+ msg = list(missing_restricts)
+ msg.append("in RESTRICT")
+ rValue.append(_MaskReason("RESTRICT", " ".join(msg)))
+ except InvalidDependString as e:
+ rValue.append(_MaskReason("invalid", "RESTRICT: %s" % (e,)))
+
# Only show KEYWORDS masks for installed packages
# if they're not masked for any other reason.
if kmask and (not installed or not rValue):
diff --git a/pym/portage/package/ebuild/prepare_build_dirs.py b/pym/portage/package/ebuild/prepare_build_dirs.py
index b8fbdc5cf..6782160e4 100644
--- a/pym/portage/package/ebuild/prepare_build_dirs.py
+++ b/pym/portage/package/ebuild/prepare_build_dirs.py
@@ -1,6 +1,8 @@
-# Copyright 2010-2011 Gentoo Foundation
+# Copyright 2010-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
+from __future__ import unicode_literals
+
__all__ = ['prepare_build_dirs']
import errno
@@ -338,12 +340,12 @@ def _prepare_workdir(mysettings):
try:
_ensure_log_subdirs(logdir, log_subdir)
except PortageException as e:
- writemsg(_unicode_decode("!!! %s\n") % (e,), noiselevel=-1)
+ writemsg("!!! %s\n" % (e,), noiselevel=-1)
if os.access(log_subdir, os.W_OK):
logdir_subdir_ok = True
else:
- writemsg(_unicode_decode("!!! %s: %s\n") %
+ writemsg("!!! %s: %s\n" %
(_("Permission Denied"), log_subdir), noiselevel=-1)
tmpdir_log_path = os.path.join(
diff --git a/pym/portage/process.py b/pym/portage/process.py
index f3cec8815..ba41ea8eb 100644
--- a/pym/portage/process.py
+++ b/pym/portage/process.py
@@ -1,25 +1,30 @@
# portage.py -- core Portage functionality
-# Copyright 1998-2012 Gentoo Foundation
+# Copyright 1998-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
import atexit
import errno
+import fcntl
import platform
import signal
+import socket
+import struct
import sys
import traceback
+import os as _os
from portage import os
from portage import _encodings
from portage import _unicode_encode
import portage
portage.proxy.lazyimport.lazyimport(globals(),
- 'portage.util:dump_traceback',
+ 'portage.util:dump_traceback,writemsg',
)
from portage.const import BASH_BINARY, SANDBOX_BINARY, FAKEROOT_BINARY
from portage.exception import CommandNotFound
+from portage.util._ctypes import find_library, LoadLibrary, ctypes
try:
import resource
@@ -28,12 +33,35 @@ except ImportError:
max_fd_limit = 256
if sys.hexversion >= 0x3000000:
+ # pylint: disable=W0622
basestring = str
-if os.path.isdir("/proc/%i/fd" % os.getpid()):
+# Support PEP 446 for Python >=3.4
+try:
+ _set_inheritable = _os.set_inheritable
+except AttributeError:
+ _set_inheritable = None
+
+try:
+ _FD_CLOEXEC = fcntl.FD_CLOEXEC
+except AttributeError:
+ _FD_CLOEXEC = None
+
+# Prefer /proc/self/fd if available (/dev/fd
+# doesn't work on solaris, see bug #474536).
+for _fd_dir in ("/proc/self/fd", "/dev/fd"):
+ if os.path.isdir(_fd_dir):
+ break
+ else:
+ _fd_dir = None
+
+# /dev/fd does not work on FreeBSD, see bug #478446
+if platform.system() in ('FreeBSD',) and _fd_dir == '/dev/fd':
+ _fd_dir = None
+
+if _fd_dir is not None:
def get_open_fds():
- return (int(fd) for fd in os.listdir("/proc/%i/fd" % os.getpid()) \
- if fd.isdigit())
+ return (int(fd) for fd in os.listdir(_fd_dir) if fd.isdigit())
if platform.python_implementation() == 'PyPy':
# EAGAIN observed with PyPy 1.8.
@@ -46,6 +74,13 @@ if os.path.isdir("/proc/%i/fd" % os.getpid()):
raise
return range(max_fd_limit)
+elif os.path.isdir("/proc/%s/fd" % os.getpid()):
+ # In order for this function to work in forked subprocesses,
+ # os.getpid() must be called from inside the function.
+ def get_open_fds():
+ return (int(fd) for fd in os.listdir("/proc/%s/fd" % os.getpid())
+ if fd.isdigit())
+
else:
def get_open_fds():
return range(max_fd_limit)
@@ -83,14 +118,14 @@ def spawn_bash(mycommand, debug=False, opt_name=None, **keywords):
def spawn_sandbox(mycommand, opt_name=None, **keywords):
if not sandbox_capable:
return spawn_bash(mycommand, opt_name=opt_name, **keywords)
- args=[SANDBOX_BINARY]
+ args = [SANDBOX_BINARY]
if not opt_name:
opt_name = os.path.basename(mycommand.split()[0])
args.append(mycommand)
return spawn(args, opt_name=opt_name, **keywords)
def spawn_fakeroot(mycommand, fakeroot_state=None, opt_name=None, **keywords):
- args=[FAKEROOT_BINARY]
+ args = [FAKEROOT_BINARY]
if not opt_name:
opt_name = os.path.basename(mycommand.split()[0])
if fakeroot_state:
@@ -141,30 +176,28 @@ def run_exitfuncs():
atexit.register(run_exitfuncs)
-# We need to make sure that any processes spawned are killed off when
-# we exit. spawn() takes care of adding and removing pids to this list
-# as it creates and cleans up processes.
-spawned_pids = []
-def cleanup():
- while spawned_pids:
- pid = spawned_pids.pop()
+# It used to be necessary for API consumers to remove pids from spawned_pids,
+# since otherwise it would accumulate a pids endlessly. Now, spawned_pids is
+# just an empty dummy list, so for backward compatibility, ignore ValueError
+# for removal on non-existent items.
+class _dummy_list(list):
+ def remove(self, item):
+ # TODO: Trigger a DeprecationWarning here, after stable portage
+ # has dummy spawned_pids.
try:
- # With waitpid and WNOHANG, only check the
- # first element of the tuple since the second
- # element may vary (bug #337465).
- if os.waitpid(pid, os.WNOHANG)[0] == 0:
- os.kill(pid, signal.SIGTERM)
- os.waitpid(pid, 0)
- except OSError:
- # This pid has been cleaned up outside
- # of spawn().
+ list.remove(self, item)
+ except ValueError:
pass
-atexit_register(cleanup)
+spawned_pids = _dummy_list()
+
+def cleanup():
+ pass
def spawn(mycommand, env={}, opt_name=None, fd_pipes=None, returnpid=False,
uid=None, gid=None, groups=None, umask=None, logfile=None,
- path_lookup=True, pre_exec=None):
+ path_lookup=True, pre_exec=None, close_fds=True, unshare_net=False,
+ unshare_ipc=False, cgroup=None):
"""
Spawns a given command.
@@ -175,6 +208,7 @@ def spawn(mycommand, env={}, opt_name=None, fd_pipes=None, returnpid=False,
@param opt_name: an optional name for the spawn'd process (defaults to the binary name)
@type opt_name: String
@param fd_pipes: A dict of mapping for pipes, { '0': stdin, '1': stdout } for example
+ (default is {0:stdin, 1:stdout, 2:stderr})
@type fd_pipes: Dictionary
@param returnpid: Return the Process IDs for a successful spawn.
NOTE: This requires the caller clean up all the PIDs, otherwise spawn will clean them.
@@ -193,7 +227,16 @@ def spawn(mycommand, env={}, opt_name=None, fd_pipes=None, returnpid=False,
@type path_lookup: Boolean
@param pre_exec: A function to be called with no arguments just prior to the exec call.
@type pre_exec: callable
-
+ @param close_fds: If True, then close all file descriptors except those
+ referenced by fd_pipes (default is True).
+ @type close_fds: Boolean
+ @param unshare_net: If True, networking will be unshared from the spawned process
+ @type unshare_net: Boolean
+ @param unshare_ipc: If True, IPC will be unshared from the spawned process
+ @type unshare_ipc: Boolean
+ @param cgroup: CGroup path to bind the process to
+ @type cgroup: String
+
logfile requires stdout and stderr to be assigned to this process (ie not pointed
somewhere else.)
@@ -226,9 +269,9 @@ def spawn(mycommand, env={}, opt_name=None, fd_pipes=None, returnpid=False,
# default to propagating our stdin, stdout and stderr.
if fd_pipes is None:
fd_pipes = {
- 0:sys.stdin.fileno(),
- 1:sys.stdout.fileno(),
- 2:sys.stderr.fileno(),
+ 0:portage._get_stdin().fileno(),
+ 1:sys.__stdout__.fileno(),
+ 2:sys.__stderr__.fileno(),
}
# mypids will hold the pids of all processes created.
@@ -256,21 +299,40 @@ def spawn(mycommand, env={}, opt_name=None, fd_pipes=None, returnpid=False,
fd_pipes[1] = pw
fd_pipes[2] = pw
- pid = os.fork()
+ # This caches the libc library lookup in the current
+ # process, so that it's only done once rather than
+ # for each child process.
+ if unshare_net or unshare_ipc:
+ find_library("c")
- if pid == 0:
- try:
- _exec(binary, mycommand, opt_name, fd_pipes,
- env, gid, groups, uid, umask, pre_exec)
- except SystemExit:
- raise
- except Exception as e:
- # We need to catch _any_ exception so that it doesn't
- # propagate out of this function and cause exiting
- # with anything other than os._exit()
- sys.stderr.write("%s:\n %s\n" % (e, " ".join(mycommand)))
- traceback.print_exc()
- sys.stderr.flush()
+ parent_pid = os.getpid()
+ pid = None
+ try:
+ pid = os.fork()
+
+ if pid == 0:
+ try:
+ _exec(binary, mycommand, opt_name, fd_pipes,
+ env, gid, groups, uid, umask, pre_exec, close_fds,
+ unshare_net, unshare_ipc, cgroup)
+ except SystemExit:
+ raise
+ except Exception as e:
+ # We need to catch _any_ exception so that it doesn't
+ # propagate out of this function and cause exiting
+ # with anything other than os._exit()
+ writemsg("%s:\n %s\n" % (e, " ".join(mycommand)),
+ noiselevel=-1)
+ traceback.print_exc()
+ sys.stderr.flush()
+
+ finally:
+ if pid == 0 or (pid is None and os.getpid() != parent_pid):
+ # Call os._exit() from a finally block in order
+ # to suppress any finally blocks from earlier
+ # in the call stack (see bug #345289). This
+ # finally block has to be setup before the fork
+ # in order to avoid a race condition.
os._exit(1)
if not isinstance(pid, int):
@@ -278,7 +340,6 @@ def spawn(mycommand, env={}, opt_name=None, fd_pipes=None, returnpid=False,
# Add the pid to our local and the global pid lists.
mypids.append(pid)
- spawned_pids.append(pid)
# If we started a tee process the write side of the pipe is no
# longer needed, so close it.
@@ -301,10 +362,6 @@ def spawn(mycommand, env={}, opt_name=None, fd_pipes=None, returnpid=False,
# and wait for it.
retval = os.waitpid(pid, 0)[1]
- # When it's done, we can remove it from the
- # global pid list as well.
- spawned_pids.remove(pid)
-
if retval:
# If it failed, kill off anything else that
# isn't dead yet.
@@ -315,7 +372,6 @@ def spawn(mycommand, env={}, opt_name=None, fd_pipes=None, returnpid=False,
if os.waitpid(pid, os.WNOHANG)[0] == 0:
os.kill(pid, signal.SIGTERM)
os.waitpid(pid, 0)
- spawned_pids.remove(pid)
# If it got a signal, return the signal that was sent.
if (retval & 0xff):
@@ -328,7 +384,7 @@ def spawn(mycommand, env={}, opt_name=None, fd_pipes=None, returnpid=False,
return 0
def _exec(binary, mycommand, opt_name, fd_pipes, env, gid, groups, uid, umask,
- pre_exec):
+ pre_exec, close_fds, unshare_net, unshare_ipc, cgroup):
"""
Execute a given binary with options
@@ -353,10 +409,16 @@ def _exec(binary, mycommand, opt_name, fd_pipes, env, gid, groups, uid, umask,
@type umask: Integer
@param pre_exec: A function to be called with no arguments just prior to the exec call.
@type pre_exec: callable
+ @param unshare_net: If True, networking will be unshared from the spawned process
+ @type unshare_net: Boolean
+ @param unshare_ipc: If True, IPC will be unshared from the spawned process
+ @type unshare_ipc: Boolean
+ @param cgroup: CGroup path to bind the process to
+ @type cgroup: String
@rtype: None
@return: Never returns (calls os.execve)
"""
-
+
# If the process we're creating hasn't been given a name
# assign it the name of the executable.
if not opt_name:
@@ -371,6 +433,10 @@ def _exec(binary, mycommand, opt_name, fd_pipes, env, gid, groups, uid, umask,
myargs = [opt_name]
myargs.extend(mycommand[1:])
+ # Avoid a potential UnicodeEncodeError from os.execve().
+ myargs = [_unicode_encode(x, encoding=_encodings['fs'],
+ errors='strict') for x in myargs]
+
# Use default signal handlers in order to avoid problems
# killing subprocesses as reported in bug #353239.
signal.signal(signal.SIGINT, signal.SIG_DFL)
@@ -383,15 +449,63 @@ def _exec(binary, mycommand, opt_name, fd_pipes, env, gid, groups, uid, umask,
# the parent process (see bug #289486).
signal.signal(signal.SIGQUIT, signal.SIG_DFL)
- _setup_pipes(fd_pipes)
+ _setup_pipes(fd_pipes, close_fds=close_fds, inheritable=True)
+
+ # Add to cgroup
+ # it's better to do it from the child since we can guarantee
+ # it is done before we start forking children
+ if cgroup:
+ with open(os.path.join(cgroup, 'cgroup.procs'), 'a') as f:
+ f.write('%d\n' % os.getpid())
+
+ # Unshare (while still uid==0)
+ if unshare_net or unshare_ipc:
+ filename = find_library("c")
+ if filename is not None:
+ libc = LoadLibrary(filename)
+ if libc is not None:
+ CLONE_NEWIPC = 0x08000000
+ CLONE_NEWNET = 0x40000000
+
+ flags = 0
+ if unshare_net:
+ flags |= CLONE_NEWNET
+ if unshare_ipc:
+ flags |= CLONE_NEWIPC
+
+ try:
+ if libc.unshare(flags) != 0:
+ writemsg("Unable to unshare: %s\n" % (
+ errno.errorcode.get(ctypes.get_errno(), '?')),
+ noiselevel=-1)
+ else:
+ if unshare_net:
+ # 'up' the loopback
+ IFF_UP = 0x1
+ ifreq = struct.pack('16sh', b'lo', IFF_UP)
+ SIOCSIFFLAGS = 0x8914
+
+ sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, 0)
+ try:
+ fcntl.ioctl(sock, SIOCSIFFLAGS, ifreq)
+ except IOError as e:
+ writemsg("Unable to enable loopback interface: %s\n" % (
+ errno.errorcode.get(e.errno, '?')),
+ noiselevel=-1)
+ sock.close()
+ except AttributeError:
+ # unshare() not supported by libc
+ pass
# Set requested process permissions.
if gid:
- os.setgid(gid)
+ # Cast proxies to int, in case it matters.
+ os.setgid(int(gid))
if groups:
os.setgroups(groups)
if uid:
- os.setuid(uid)
+ # Cast proxies to int, in case it matters.
+ os.setuid(int(uid))
if umask:
os.umask(umask)
if pre_exec:
@@ -400,9 +514,16 @@ def _exec(binary, mycommand, opt_name, fd_pipes, env, gid, groups, uid, umask,
# And switch to the new process.
os.execve(binary, myargs, env)
-def _setup_pipes(fd_pipes, close_fds=True):
+def _setup_pipes(fd_pipes, close_fds=True, inheritable=None):
"""Setup pipes for a forked process.
+ Even when close_fds is False, file descriptors referenced as
+ values in fd_pipes are automatically closed if they do not also
+ occur as keys in fd_pipes. It is assumed that the caller will
+ explicitly add them to the fd_pipes keys if they are intended
+ to remain open. This allows for convenient elimination of
+ unnecessary duplicate file descriptors.
+
WARNING: When not followed by exec, the close_fds behavior
can trigger interference from destructors that close file
descriptors. This interference happens when the garbage
@@ -413,22 +534,92 @@ def _setup_pipes(fd_pipes, close_fds=True):
and also with CPython under some circumstances (as triggered
by xmpppy in bug #374335). In order to close a safe subset of
file descriptors, see portage.locks._close_fds().
+
+ NOTE: When not followed by exec, even when close_fds is False,
+ it's still possible for dup2() calls to cause interference in a
+ way that's similar to the way that close_fds interferes (since
+ dup2() has to close the target fd if it happens to be open).
+ It's possible to avoid such interference by using allocated
+ file descriptors as the keys in fd_pipes. For example:
+
+ pr, pw = os.pipe()
+ fd_pipes[pw] = pw
+
+ By using the allocated pw file descriptor as the key in fd_pipes,
+ it's not necessary for dup2() to close a file descriptor (it
+ actually does nothing in this case), which avoids possible
+ interference.
"""
- my_fds = {}
+
+ reverse_map = {}
# To protect from cases where direct assignment could
- # clobber needed fds ({1:2, 2:1}) we first dupe the fds
- # into unused fds.
- for fd in fd_pipes:
- my_fds[fd] = os.dup(fd_pipes[fd])
- # Then assign them to what they should be.
- for fd in my_fds:
- os.dup2(my_fds[fd], fd)
+ # clobber needed fds ({1:2, 2:1}) we create a reverse map
+ # in order to know when it's necessary to create temporary
+ # backup copies with os.dup().
+ for newfd, oldfd in fd_pipes.items():
+ newfds = reverse_map.get(oldfd)
+ if newfds is None:
+ newfds = []
+ reverse_map[oldfd] = newfds
+ newfds.append(newfd)
+
+ # Assign newfds via dup2(), making temporary backups when
+ # necessary, and closing oldfd if the caller has not
+ # explicitly requested for it to remain open by adding
+ # it to the keys of fd_pipes.
+ while reverse_map:
+
+ oldfd, newfds = reverse_map.popitem()
+ old_fdflags = None
+
+ for newfd in newfds:
+ if newfd in reverse_map:
+ # Make a temporary backup before re-assignment, assuming
+ # that backup_fd won't collide with a key in reverse_map
+ # (since all of the keys correspond to open file
+ # descriptors, and os.dup() only allocates a previously
+ # unused file discriptors).
+ backup_fd = os.dup(newfd)
+ reverse_map[backup_fd] = reverse_map.pop(newfd)
+
+ if oldfd != newfd:
+ os.dup2(oldfd, newfd)
+ if _set_inheritable is not None:
+ # Don't do this unless _set_inheritable is available,
+ # since it's used below to ensure correct state, and
+ # otherwise /dev/null stdin fails to inherit (at least
+ # with Python versions from 3.1 to 3.3).
+ if old_fdflags is None:
+ old_fdflags = fcntl.fcntl(oldfd, fcntl.F_GETFD)
+ fcntl.fcntl(newfd, fcntl.F_SETFD, old_fdflags)
+
+ if _set_inheritable is not None:
+
+ inheritable_state = None
+ if not (old_fdflags is None or _FD_CLOEXEC is None):
+ inheritable_state = not bool(old_fdflags & _FD_CLOEXEC)
+
+ if inheritable is not None:
+ if inheritable_state is not inheritable:
+ _set_inheritable(newfd, inheritable)
+
+ elif newfd in (0, 1, 2):
+ if inheritable_state is not True:
+ _set_inheritable(newfd, True)
+
+ if oldfd not in fd_pipes:
+ # If oldfd is not a key in fd_pipes, then it's safe
+ # to close now, since we've already made all of the
+ # requested duplicates. This also closes every
+ # backup_fd that may have been created on previous
+ # iterations of this loop.
+ os.close(oldfd)
if close_fds:
# Then close _all_ fds that haven't been explicitly
# requested to be kept open.
for fd in get_open_fds():
- if fd not in my_fds:
+ if fd not in fd_pipes:
try:
os.close(fd)
except OSError:
@@ -443,8 +634,16 @@ def find_binary(binary):
@rtype: None or string
@return: full path to binary or None if the binary could not be located.
"""
- for path in os.environ.get("PATH", "").split(":"):
- filename = "%s/%s" % (path, binary)
- if os.access(filename, os.X_OK) and os.path.isfile(filename):
+ paths = os.environ.get("PATH", "")
+ if sys.hexversion >= 0x3000000 and isinstance(binary, bytes):
+ # return bytes when input is bytes
+ paths = paths.encode(sys.getfilesystemencoding(), 'surrogateescape')
+ paths = paths.split(b':')
+ else:
+ paths = paths.split(':')
+
+ for path in paths:
+ filename = _os.path.join(path, binary)
+ if _os.access(filename, os.X_OK) and _os.path.isfile(filename):
return filename
return None
diff --git a/pym/portage/proxy/lazyimport.py b/pym/portage/proxy/lazyimport.py
index ad4a54271..5aa7e50ca 100644
--- a/pym/portage/proxy/lazyimport.py
+++ b/pym/portage/proxy/lazyimport.py
@@ -1,4 +1,4 @@
-# Copyright 2009 Gentoo Foundation
+# Copyright 2009-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
__all__ = ['lazyimport']
@@ -14,6 +14,7 @@ except ImportError:
from portage.proxy.objectproxy import ObjectProxy
if sys.hexversion >= 0x3000000:
+ # pylint: disable=W0622
basestring = str
_module_proxies = {}
@@ -32,7 +33,7 @@ def _preload_portage_submodules():
while True:
remaining = False
for name in list(_module_proxies):
- if name.startswith('portage.'):
+ if name.startswith('portage.') or name.startswith('_emerge.'):
if name in imported:
continue
imported.add(name)
diff --git a/pym/portage/proxy/objectproxy.py b/pym/portage/proxy/objectproxy.py
index 92b36d111..a755774ae 100644
--- a/pym/portage/proxy/objectproxy.py
+++ b/pym/portage/proxy/objectproxy.py
@@ -1,4 +1,4 @@
-# Copyright 2008-2009 Gentoo Foundation
+# Copyright 2008-2012 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
import sys
@@ -30,6 +30,13 @@ class ObjectProxy(object):
result = object.__getattribute__(self, '_get_target')()
return result(*args, **kwargs)
+ def __enter__(self):
+ return object.__getattribute__(self, '_get_target')().__enter__()
+
+ def __exit__(self, exc_type, exc_value, traceback):
+ return object.__getattribute__(self, '_get_target')().__exit__(
+ exc_type, exc_value, traceback)
+
def __setitem__(self, key, value):
object.__getattribute__(self, '_get_target')()[key] = value
diff --git a/pym/portage/repository/config.py b/pym/portage/repository/config.py
index 9b43f3872..5e0d05523 100644
--- a/pym/portage/repository/config.py
+++ b/pym/portage/repository/config.py
@@ -1,6 +1,8 @@
-# Copyright 2010-2012 Gentoo Foundation
+# Copyright 2010-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
+from __future__ import unicode_literals
+
import io
import logging
import warnings
@@ -8,25 +10,35 @@ import sys
import re
try:
- from configparser import ParsingError
+ from configparser import Error as ConfigParserError
if sys.hexversion >= 0x3020000:
from configparser import ConfigParser as SafeConfigParser
else:
from configparser import SafeConfigParser
except ImportError:
- from ConfigParser import SafeConfigParser, ParsingError
+ from ConfigParser import SafeConfigParser, Error as ConfigParserError
+import portage
from portage import eclass_cache, os
from portage.const import (MANIFEST2_HASH_FUNCTIONS, MANIFEST2_REQUIRED_HASH,
- REPO_NAME_LOC, USER_CONFIG_PATH)
+ PORTAGE_BASE_PATH, REPO_NAME_LOC, USER_CONFIG_PATH)
+from portage.eapi import eapi_allows_directories_on_profile_level_and_repository_level
from portage.env.loaders import KeyValuePairFileLoader
from portage.util import (normalize_path, read_corresponding_eapi_file, shlex_split,
- stack_lists, writemsg, writemsg_level)
+ stack_lists, writemsg, writemsg_level, _recursive_file_list)
+from portage.util._path import exists_raise_eaccess, isdir_raise_eaccess
from portage.localization import _
from portage import _unicode_decode
from portage import _unicode_encode
from portage import _encodings
from portage import manifest
+if sys.hexversion >= 0x3000000:
+ # pylint: disable=W0622
+ basestring = str
+
+# Characters prohibited by repoman's file.name check.
+_invalid_path_char_re = re.compile(r'[^a-zA-Z0-9._\-+:/]')
+
_valid_profile_formats = frozenset(
['pms', 'portage-1', 'portage-2'])
@@ -48,38 +60,76 @@ def _gen_valid_repo(name):
name = None
return name
+def _find_invalid_path_char(path, pos=0, endpos=None):
+ """
+ Returns the position of the first invalid character found in basename,
+ or -1 if no invalid characters are found.
+ """
+ if endpos is None:
+ endpos = len(path)
+
+ m = _invalid_path_char_re.search(path, pos=pos, endpos=endpos)
+ if m is not None:
+ return m.start()
+
+ return -1
+
class RepoConfig(object):
"""Stores config of one repository"""
__slots__ = ('aliases', 'allow_missing_manifest', 'allow_provide_virtual',
'cache_formats', 'create_manifest', 'disable_manifest', 'eapi',
- 'eclass_db', 'eclass_locations', 'eclass_overrides', 'format', 'location',
+ 'eclass_db', 'eclass_locations', 'eclass_overrides',
+ 'find_invalid_path_char', 'force', 'format', 'local_config', 'location',
'main_repo', 'manifest_hashes', 'masters', 'missing_repo_name',
'name', 'portage1_profiles', 'portage1_profiles_compat', 'priority',
- 'profile_formats', 'sign_commit', 'sign_manifest', 'sync',
- 'thin_manifest', 'update_changelog', 'user_location')
+ 'profile_formats', 'sign_commit', 'sign_manifest', 'sync_cvs_repo',
+ 'sync_type', 'sync_uri', 'thin_manifest', 'update_changelog',
+ 'user_location', '_eapis_banned', '_eapis_deprecated', '_masters_orig')
- def __init__(self, name, repo_opts):
+ def __init__(self, name, repo_opts, local_config=True):
"""Build a RepoConfig with options in repo_opts
Try to read repo_name in repository location, but if
it is not found use variable name as repository name"""
- aliases = repo_opts.get('aliases')
- if aliases is not None:
- aliases = tuple(aliases.split())
+
+ force = repo_opts.get('force')
+ if force is not None:
+ force = tuple(force.split())
+ self.force = force
+ if force is None:
+ force = ()
+
+ self.local_config = local_config
+
+ if local_config or 'aliases' in force:
+ aliases = repo_opts.get('aliases')
+ if aliases is not None:
+ aliases = tuple(aliases.split())
+ else:
+ aliases = None
+
self.aliases = aliases
- eclass_overrides = repo_opts.get('eclass-overrides')
- if eclass_overrides is not None:
- eclass_overrides = tuple(eclass_overrides.split())
+ if local_config or 'eclass-overrides' in force:
+ eclass_overrides = repo_opts.get('eclass-overrides')
+ if eclass_overrides is not None:
+ eclass_overrides = tuple(eclass_overrides.split())
+ else:
+ eclass_overrides = None
+
self.eclass_overrides = eclass_overrides
# Eclass databases and locations are computed later.
self.eclass_db = None
self.eclass_locations = None
- # Masters from repos.conf override layout.conf.
- masters = repo_opts.get('masters')
- if masters is not None:
- masters = tuple(masters.split())
+ if local_config or 'masters' in force:
+ # Masters from repos.conf override layout.conf.
+ masters = repo_opts.get('masters')
+ if masters is not None:
+ masters = tuple(masters.split())
+ else:
+ masters = None
+
self.masters = masters
#The main-repo key makes only sense for the 'DEFAULT' section.
@@ -93,11 +143,22 @@ class RepoConfig(object):
priority = None
self.priority = priority
- sync = repo_opts.get('sync')
- if sync is not None:
- sync = sync.strip()
- self.sync = sync
+ sync_cvs_repo = repo_opts.get('sync-cvs-repo')
+ if sync_cvs_repo is not None:
+ sync_cvs_repo = sync_cvs_repo.strip()
+ self.sync_cvs_repo = sync_cvs_repo or None
+
+ sync_type = repo_opts.get('sync-type')
+ if sync_type is not None:
+ sync_type = sync_type.strip()
+ self.sync_type = sync_type or None
+ sync_uri = repo_opts.get('sync-uri')
+ if sync_uri is not None:
+ sync_uri = sync_uri.strip()
+ self.sync_uri = sync_uri or None
+
+ # Not implemented.
format = repo_opts.get('format')
if format is not None:
format = format.strip()
@@ -106,7 +167,7 @@ class RepoConfig(object):
location = repo_opts.get('location')
self.user_location = location
if location is not None and location.strip():
- if os.path.isdir(location):
+ if os.path.isdir(location) or portage._sync_mode:
location = os.path.realpath(location)
else:
location = None
@@ -114,14 +175,23 @@ class RepoConfig(object):
eapi = None
missing = True
+ self.name = name
if self.location is not None:
eapi = read_corresponding_eapi_file(os.path.join(self.location, REPO_NAME_LOC))
- name, missing = self._read_valid_repo_name(self.location)
- elif name == "DEFAULT":
+ self.name, missing = self._read_valid_repo_name(self.location)
+ if missing:
+ # The name from repos.conf has to be used here for
+ # things like emerge-webrsync to work when the repo
+ # is empty (bug #484950).
+ if name is not None:
+ self.name = name
+ if portage._sync_mode:
+ missing = False
+
+ elif name == "DEFAULT":
missing = False
self.eapi = eapi
- self.name = name
self.missing_repo_name = missing
# sign_commit is disabled by default, since it requires Git >=1.7.9,
# and key_id configured by `git config user.signingkey key_id`
@@ -137,18 +207,20 @@ class RepoConfig(object):
self.cache_formats = None
self.portage1_profiles = True
self.portage1_profiles_compat = False
+ self.find_invalid_path_char = _find_invalid_path_char
+ self._masters_orig = None
# Parse layout.conf.
if self.location:
- layout_filename = os.path.join(self.location, "metadata", "layout.conf")
layout_data = parse_layout_conf(self.location, self.name)[0]
+ self._masters_orig = layout_data['masters']
# layout.conf masters may be overridden here if we have a masters
# setting from the user's repos.conf
if self.masters is None:
self.masters = layout_data['masters']
- if layout_data['aliases']:
+ if (local_config or 'aliases' in force) and layout_data['aliases']:
aliases = self.aliases
if aliases is None:
aliases = ()
@@ -156,6 +228,12 @@ class RepoConfig(object):
# them the ability to do incremental overrides
self.aliases = layout_data['aliases'] + tuple(aliases)
+ if layout_data['repo-name']:
+ # allow layout.conf to override repository name
+ # useful when having two copies of the same repo enabled
+ # to avoid modifying profiles/repo_name in one of them
+ self.name = layout_data['repo-name']
+
for value in ('allow-missing-manifest',
'allow-provide-virtual', 'cache-formats',
'create-manifest', 'disable-manifest', 'manifest-hashes',
@@ -163,9 +241,19 @@ class RepoConfig(object):
'sign-commit', 'sign-manifest', 'thin-manifest', 'update-changelog'):
setattr(self, value.lower().replace("-", "_"), layout_data[value])
- self.portage1_profiles = any(x in _portage1_profiles_allow_directories
- for x in layout_data['profile-formats'])
- self.portage1_profiles_compat = layout_data['profile-formats'] == ('portage-1-compat',)
+ self.portage1_profiles = eapi_allows_directories_on_profile_level_and_repository_level(eapi) or \
+ any(x in _portage1_profiles_allow_directories for x in layout_data['profile-formats'])
+ self.portage1_profiles_compat = not eapi_allows_directories_on_profile_level_and_repository_level(eapi) and \
+ layout_data['profile-formats'] == ('portage-1-compat',)
+
+ self._eapis_banned = frozenset(layout_data['eapis-banned'])
+ self._eapis_deprecated = frozenset(layout_data['eapis-deprecated'])
+
+ def eapi_is_banned(self, eapi):
+ return eapi in self._eapis_banned
+
+ def eapi_is_deprecated(self, eapi):
+ return eapi in self._eapis_deprecated
def iter_pregenerated_caches(self, auxdbkeys, readonly=True, force=False):
"""
@@ -178,7 +266,11 @@ class RepoConfig(object):
if not formats:
if not force:
return
- formats = ('pms',)
+ # The default egencache format was 'pms' prior to portage-2.1.11.32
+ # (portage versions prior to portage-2.1.11.14 will NOT
+ # recognize md5-dict format unless it is explicitly listed in
+ # layout.conf).
+ formats = ('md5-dict',)
for fmt in formats:
name = None
@@ -209,7 +301,8 @@ class RepoConfig(object):
kwds['hashes'] = self.manifest_hashes
if self.disable_manifest:
kwds['from_scratch'] = True
- return manifest.Manifest(*args, **kwds)
+ kwds['find_invalid_path_char'] = self.find_invalid_path_char
+ return manifest.Manifest(*args, **portage._native_kwargs(kwds))
def update(self, new_repo):
"""Update repository with options in another RepoConfig"""
@@ -272,8 +365,12 @@ class RepoConfig(object):
repo_msg.append(indent + "format: " + self.format)
if self.user_location:
repo_msg.append(indent + "location: " + self.user_location)
- if self.sync:
- repo_msg.append(indent + "sync: " + self.sync)
+ if self.sync_cvs_repo:
+ repo_msg.append(indent + "sync-cvs-repo: " + self.sync_cvs_repo)
+ if self.sync_type:
+ repo_msg.append(indent + "sync-type: " + self.sync_type)
+ if self.sync_uri:
+ repo_msg.append(indent + "sync-uri: " + self.sync_uri)
if self.masters:
repo_msg.append(indent + "masters: " + " ".join(master.name for master in self.masters))
if self.priority is not None:
@@ -281,19 +378,19 @@ class RepoConfig(object):
if self.aliases:
repo_msg.append(indent + "aliases: " + " ".join(self.aliases))
if self.eclass_overrides:
- repo_msg.append(indent + "eclass_overrides: " + \
+ repo_msg.append(indent + "eclass-overrides: " + \
" ".join(self.eclass_overrides))
repo_msg.append("")
return "\n".join(repo_msg)
def __repr__(self):
- return "<portage.repository.config.RepoConfig(name='%s', location='%s')>" % (self.name, _unicode_decode(self.location))
+ return "<portage.repository.config.RepoConfig(name=%r, location=%r)>" % (self.name, _unicode_decode(self.location))
def __str__(self):
d = {}
for k in self.__slots__:
d[k] = getattr(self, k, None)
- return _unicode_decode("%s") % (d,)
+ return "%s" % (d,)
if sys.hexversion < 0x3000000:
@@ -306,11 +403,14 @@ class RepoConfigLoader(object):
"""Loads and store config of several repositories, loaded from PORTDIR_OVERLAY or repos.conf"""
@staticmethod
- def _add_repositories(portdir, portdir_overlay, prepos, ignored_map, ignored_location_map):
+ def _add_repositories(portdir, portdir_overlay, prepos,
+ ignored_map, ignored_location_map, local_config, default_portdir):
"""Add overlays in PORTDIR_OVERLAY as repositories"""
overlays = []
+ portdir_orig = None
if portdir:
portdir = normalize_path(portdir)
+ portdir_orig = portdir
overlays.append(portdir)
try:
port_ov = [normalize_path(i) for i in shlex_split(portdir_overlay)]
@@ -344,43 +444,57 @@ class RepoConfigLoader(object):
#overlay priority is negative because we want them to be looked before any other repo
base_priority = 0
for ov in overlays:
- if os.path.isdir(ov):
+ # Ignore missing directory for 'gentoo' so that
+ # first sync with emerge-webrsync is possible.
+ if isdir_raise_eaccess(ov) or \
+ (base_priority == 0 and ov is portdir):
repo_opts = default_repo_opts.copy()
repo_opts['location'] = ov
- repo = RepoConfig(None, repo_opts)
+ repo = RepoConfig(None, repo_opts, local_config=local_config)
# repos_conf_opts contains options from repos.conf
repos_conf_opts = repos_conf.get(repo.name)
if repos_conf_opts is not None:
# Selectively copy only the attributes which
# repos.conf is allowed to override.
- for k in ('aliases', 'eclass_overrides', 'masters', 'priority'):
+ for k in ('aliases', 'eclass_overrides', 'force', 'masters',
+ 'priority', 'sync_cvs_repo', 'sync_type', 'sync_uri'):
v = getattr(repos_conf_opts, k, None)
if v is not None:
setattr(repo, k, v)
if repo.name in prepos:
+ # Silently ignore when PORTDIR overrides the location
+ # setting from the default repos.conf (bug #478544).
old_location = prepos[repo.name].location
- if old_location is not None and old_location != repo.location:
+ if old_location is not None and \
+ old_location != repo.location and \
+ not (base_priority == 0 and
+ old_location == default_portdir):
ignored_map.setdefault(repo.name, []).append(old_location)
ignored_location_map[old_location] = repo.name
if old_location == portdir:
portdir = repo.user_location
- if ov == portdir and portdir not in port_ov:
- repo.priority = -1000
- elif repo.priority is None:
- repo.priority = base_priority
- base_priority += 1
+ if repo.priority is None:
+ if base_priority == 0 and ov == portdir_orig:
+ # If it's the original PORTDIR setting and it's not
+ # in PORTDIR_OVERLAY, then it will be assigned a
+ # special priority setting later.
+ pass
+ else:
+ repo.priority = base_priority
+ base_priority += 1
prepos[repo.name] = repo
else:
- writemsg(_("!!! Invalid PORTDIR_OVERLAY"
- " (not a dir): '%s'\n") % ov, noiselevel=-1)
+
+ if not portage._sync_mode:
+ writemsg(_("!!! Invalid PORTDIR_OVERLAY (not a dir): '%s'\n") % ov, noiselevel=-1)
return portdir
@staticmethod
- def _parse(paths, prepos, ignored_map, ignored_location_map):
+ def _parse(paths, prepos, ignored_map, ignored_location_map, local_config, portdir):
"""Parse files in paths to load config"""
parser = SafeConfigParser()
@@ -388,49 +502,78 @@ class RepoConfigLoader(object):
try:
# Python >=3.2
read_file = parser.read_file
+ source_kwarg = 'source'
except AttributeError:
read_file = parser.readfp
+ source_kwarg = 'filename'
+ recursive_paths = []
for p in paths:
- f = None
- try:
- f = io.open(_unicode_encode(p,
- encoding=_encodings['fs'], errors='strict'),
- mode='r', encoding=_encodings['repo.content'],
- errors='replace')
- except EnvironmentError:
- pass
+ if isinstance(p, basestring):
+ recursive_paths.extend(_recursive_file_list(p))
else:
+ recursive_paths.append(p)
+
+ for p in recursive_paths:
+ if isinstance(p, basestring):
+ f = None
try:
- read_file(f)
- except ParsingError as e:
- writemsg(_unicode_decode(
- _("!!! Error while reading repo config file: %s\n")
- ) % e, noiselevel=-1)
- finally:
- if f is not None:
- f.close()
-
- prepos['DEFAULT'] = RepoConfig("DEFAULT", parser.defaults())
+ f = io.open(_unicode_encode(p,
+ encoding=_encodings['fs'], errors='strict'),
+ mode='r', encoding=_encodings['repo.content'],
+ errors='replace')
+ except EnvironmentError:
+ pass
+ else:
+ # The 'source' keyword argument is needed since otherwise
+ # ConfigParser in Python <3.3.3 may throw a TypeError
+ # because it assumes that f.name is a native string rather
+ # than binary when constructing error messages.
+ kwargs = {source_kwarg: p}
+ read_file(f, **portage._native_kwargs(kwargs))
+ finally:
+ if f is not None:
+ f.close()
+ elif isinstance(p, io.StringIO):
+ kwargs = {source_kwarg: "<io.StringIO>"}
+ read_file(p, **portage._native_kwargs(kwargs))
+ else:
+ raise TypeError("Unsupported type %r of element %r of 'paths' argument" % (type(p), p))
+
+ prepos['DEFAULT'] = RepoConfig("DEFAULT",
+ parser.defaults(), local_config=local_config)
+
for sname in parser.sections():
optdict = {}
for oname in parser.options(sname):
optdict[oname] = parser.get(sname, oname)
- repo = RepoConfig(sname, optdict)
- if repo.location and not os.path.exists(repo.location):
- writemsg(_("!!! Invalid repos.conf entry '%s'"
- " (not a dir): '%s'\n") % (sname, repo.location), noiselevel=-1)
+ repo = RepoConfig(sname, optdict, local_config=local_config)
+
+ if repo.sync_type is not None and repo.sync_uri is None:
+ writemsg_level("!!! %s\n" % _("Repository '%s' has sync-type attribute, but is missing sync-uri attribute") %
+ sname, level=logging.ERROR, noiselevel=-1)
continue
- if repo.name in prepos:
- old_location = prepos[repo.name].location
- if old_location is not None and repo.location is not None and old_location != repo.location:
- ignored_map.setdefault(repo.name, []).append(old_location)
- ignored_location_map[old_location] = repo.name
- prepos[repo.name].update(repo)
- else:
- prepos[repo.name] = repo
+ if repo.sync_uri is not None and repo.sync_type is None:
+ writemsg_level("!!! %s\n" % _("Repository '%s' has sync-uri attribute, but is missing sync-type attribute") %
+ sname, level=logging.ERROR, noiselevel=-1)
+ continue
+
+ if repo.sync_type not in (None, "cvs", "git", "rsync"):
+ writemsg_level("!!! %s\n" % _("Repository '%s' has sync-type attribute set to unsupported value: '%s'") %
+ (sname, repo.sync_type), level=logging.ERROR, noiselevel=-1)
+ continue
+
+ if repo.sync_type == "cvs" and repo.sync_cvs_repo is None:
+ writemsg_level("!!! %s\n" % _("Repository '%s' has sync-type=cvs, but is missing sync-cvs-repo attribute") %
+ sname, level=logging.ERROR, noiselevel=-1)
+ continue
+
+ # For backward compatibility with locations set via PORTDIR and
+ # PORTDIR_OVERLAY, delay validation of the location and repo.name
+ # until after PORTDIR and PORTDIR_OVERLAY have been processed.
+ prepos[sname] = repo
def __init__(self, paths, settings):
"""Load config from files in paths"""
@@ -441,15 +584,42 @@ class RepoConfigLoader(object):
ignored_map = {}
ignored_location_map = {}
- portdir = settings.get('PORTDIR', '')
- portdir_overlay = settings.get('PORTDIR_OVERLAY', '')
+ if "PORTAGE_REPOSITORIES" in settings:
+ portdir = ""
+ portdir_overlay = ""
+ portdir_sync = ""
+ else:
+ portdir = settings.get("PORTDIR", "")
+ portdir_overlay = settings.get("PORTDIR_OVERLAY", "")
+ portdir_sync = settings.get("SYNC", "")
- self._parse(paths, prepos, ignored_map, ignored_location_map)
+ try:
+ self._parse(paths, prepos, ignored_map,
+ ignored_location_map, settings.local_config,
+ portdir)
+ except ConfigParserError as e:
+ writemsg(
+ _("!!! Error while reading repo config file: %s\n") % e,
+ noiselevel=-1)
+ # The configparser state is unreliable (prone to quirky
+ # exceptions) after it has thrown an error, so use empty
+ # config and try to fall back to PORTDIR{,_OVERLAY}.
+ prepos.clear()
+ prepos['DEFAULT'] = RepoConfig('DEFAULT',
+ {}, local_config=settings.local_config)
+ location_map.clear()
+ treemap.clear()
+ ignored_map.clear()
+ ignored_location_map.clear()
+
+ default_portdir = os.path.join(os.sep,
+ settings['EPREFIX'].lstrip(os.sep), 'usr', 'portage')
# If PORTDIR_OVERLAY contains a repo with the same repo_name as
# PORTDIR, then PORTDIR is overridden.
portdir = self._add_repositories(portdir, portdir_overlay, prepos,
- ignored_map, ignored_location_map)
+ ignored_map, ignored_location_map, settings.local_config,
+ default_portdir)
if portdir and portdir.strip():
portdir = os.path.realpath(portdir)
@@ -460,9 +630,51 @@ class RepoConfigLoader(object):
for repo in prepos.values()
if repo.location is not None and repo.missing_repo_name)
- #Take aliases into account.
- new_prepos = {}
- for repo_name, repo in prepos.items():
+ # Do this before expanding aliases, so that location_map and
+ # treemap consistently map unaliased names whenever available.
+ for repo_name, repo in list(prepos.items()):
+ if repo.location is None:
+ if repo_name != 'DEFAULT':
+ # Skip this warning for repoman (bug #474578).
+ if settings.local_config and paths:
+ writemsg_level("!!! %s\n" % _("Section '%s' in repos.conf is missing location attribute") %
+ repo.name, level=logging.ERROR, noiselevel=-1)
+ del prepos[repo_name]
+ continue
+ else:
+ if not portage._sync_mode:
+ if not isdir_raise_eaccess(repo.location):
+ writemsg_level("!!! %s\n" % _("Section '%s' in repos.conf has location attribute set "
+ "to nonexistent directory: '%s'") %
+ (repo_name, repo.location), level=logging.ERROR, noiselevel=-1)
+
+ # Ignore missing directory for 'gentoo' so that
+ # first sync with emerge-webrsync is possible.
+ if repo.name != 'gentoo':
+ del prepos[repo_name]
+ continue
+
+ # After removing support for PORTDIR_OVERLAY, the following check can be:
+ # if repo.missing_repo_name:
+ if repo.missing_repo_name and repo.name != repo_name:
+ writemsg_level("!!! %s\n" % _("Section '%s' in repos.conf refers to repository "
+ "without repository name set in '%s'") %
+ (repo_name, os.path.join(repo.location, REPO_NAME_LOC)), level=logging.ERROR, noiselevel=-1)
+ del prepos[repo_name]
+ continue
+
+ if repo.name != repo_name:
+ writemsg_level("!!! %s\n" % _("Section '%s' in repos.conf has name different "
+ "from repository name '%s' set inside repository") %
+ (repo_name, repo.name), level=logging.ERROR, noiselevel=-1)
+ del prepos[repo_name]
+ continue
+
+ location_map[repo.location] = repo_name
+ treemap[repo_name] = repo.location
+
+ # Add alias mappings, but never replace unaliased mappings.
+ for repo_name, repo in list(prepos.items()):
names = set()
names.add(repo_name)
if repo.aliases:
@@ -470,36 +682,55 @@ class RepoConfigLoader(object):
names.update(aliases)
for name in names:
- if name in new_prepos:
+ if name in prepos and prepos[name].location is not None:
+ if name == repo_name:
+ # unaliased names already handled earlier
+ continue
writemsg_level(_("!!! Repository name or alias '%s', " + \
"defined for repository '%s', overrides " + \
"existing alias or repository.\n") % (name, repo_name), level=logging.WARNING, noiselevel=-1)
- new_prepos[name] = repo
- prepos = new_prepos
+ # Never replace an unaliased mapping with
+ # an aliased mapping.
+ continue
+ prepos[name] = repo
+ if repo.location is not None:
+ if repo.location not in location_map:
+ # Never replace an unaliased mapping with
+ # an aliased mapping.
+ location_map[repo.location] = name
+ treemap[name] = repo.location
+
+ main_repo = prepos['DEFAULT'].main_repo
+ if main_repo is None or main_repo not in prepos:
+ #setting main_repo if it was not set in repos.conf
+ main_repo = location_map.get(portdir)
+ if main_repo is not None:
+ prepos['DEFAULT'].main_repo = main_repo
+ else:
+ prepos['DEFAULT'].main_repo = None
+ if portdir and not portage._sync_mode:
+ writemsg(_("!!! main-repo not set in DEFAULT and PORTDIR is empty.\n"), noiselevel=-1)
- for (name, r) in prepos.items():
- if r.location is not None:
- location_map[r.location] = name
- treemap[name] = r.location
+ if main_repo is not None and prepos[main_repo].priority is None:
+ # This happens if main-repo has been set in repos.conf.
+ prepos[main_repo].priority = -1000
- # filter duplicates from aliases, by only including
- # items where repo.name == key
+ # Backward compatible SYNC support for mirrorselect.
+ if portdir_sync and main_repo is not None:
+ if portdir_sync.startswith("rsync://"):
+ prepos[main_repo].sync_uri = portdir_sync
+ prepos[main_repo].sync_type = "rsync"
- prepos_order = sorted(prepos.items(), key=lambda r:r[1].priority or 0)
+ # Include repo.name in sort key, for predictable sorting
+ # even when priorities are equal.
+ prepos_order = sorted(prepos.items(),
+ key=lambda r:(r[1].priority or 0, r[1].name))
+ # filter duplicates from aliases, by only including
+ # items where repo.name == key
prepos_order = [repo.name for (key, repo) in prepos_order
- if repo.name == key and repo.location is not None]
-
- if prepos['DEFAULT'].main_repo is None or \
- prepos['DEFAULT'].main_repo not in prepos:
- #setting main_repo if it was not set in repos.conf
- if portdir in location_map:
- prepos['DEFAULT'].main_repo = location_map[portdir]
- elif portdir in ignored_location_map:
- prepos['DEFAULT'].main_repo = ignored_location_map[portdir]
- else:
- prepos['DEFAULT'].main_repo = None
- writemsg(_("!!! main-repo not set in DEFAULT and PORTDIR is empty. \n"), noiselevel=-1)
+ if repo.name == key and key != 'DEFAULT' and
+ repo.location is not None]
self.prepos = prepos
self.prepos_order = prepos_order
@@ -578,6 +809,18 @@ class RepoConfigLoader(object):
eclass_db.append(tree_db)
repo.eclass_db = eclass_db
+ for repo_name, repo in prepos.items():
+ if repo_name == "DEFAULT":
+ continue
+
+ if repo._masters_orig is None and self.mainRepo() and \
+ repo.name != self.mainRepo().name and not portage._sync_mode:
+ # TODO: Delete masters code in pym/portage/tests/resolver/ResolverPlayground.py when deleting this warning.
+ writemsg_level("!!! %s\n" % _("Repository '%s' is missing masters attribute in '%s'") %
+ (repo.name, os.path.join(repo.location, "metadata", "layout.conf")) +
+ "!!! %s\n" % _("Set 'masters = %s' in this file for future compatibility") %
+ self.mainRepo().name, level=logging.WARNING, noiselevel=-1)
+
self._prepos_changed = True
self._repo_location_list = []
@@ -613,10 +856,10 @@ class RepoConfigLoader(object):
def mainRepo(self):
"""Returns the main repo"""
- maid_repo = self.prepos['DEFAULT'].main_repo
- if maid_repo is None:
+ main_repo = self.prepos['DEFAULT'].main_repo
+ if main_repo is None:
return None
- return self.prepos[maid_repo]
+ return self.prepos[main_repo]
def _check_locations(self):
"""Check if repositories location are correct and show a warning message if not"""
@@ -625,7 +868,7 @@ class RepoConfigLoader(object):
if r.location is None:
writemsg(_("!!! Location not set for repository %s\n") % name, noiselevel=-1)
else:
- if not os.path.isdir(r.location):
+ if not isdir_raise_eaccess(r.location) and not portage._sync_mode:
self.prepos_order.remove(name)
writemsg(_("!!! Invalid Repository Location"
" (not a dir): '%s'\n") % r.location, noiselevel=-1)
@@ -650,19 +893,66 @@ class RepoConfigLoader(object):
def get_repo_for_location(self, location):
return self.prepos[self.get_name_for_location(location)]
+ def __setitem__(self, repo_name, repo):
+ # self.prepos[repo_name] = repo
+ raise NotImplementedError
+
def __getitem__(self, repo_name):
return self.prepos[repo_name]
+ def __delitem__(self, repo_name):
+ if repo_name == self.prepos['DEFAULT'].main_repo:
+ self.prepos['DEFAULT'].main_repo = None
+ location = self.prepos[repo_name].location
+ del self.prepos[repo_name]
+ if repo_name in self.prepos_order:
+ self.prepos_order.remove(repo_name)
+ for k, v in self.location_map.copy().items():
+ if v == repo_name:
+ del self.location_map[k]
+ if repo_name in self.treemap:
+ del self.treemap[repo_name]
+ self._repo_location_list = tuple(x for x in self._repo_location_list if x != location)
+
def __iter__(self):
for repo_name in self.prepos_order:
yield self.prepos[repo_name]
-def load_repository_config(settings):
- #~ repoconfigpaths = [os.path.join(settings.global_config_path, "repos.conf")]
+ def __contains__(self, repo_name):
+ return repo_name in self.prepos
+
+ def config_string(self):
+ str_or_int_keys = ("format", "location", "main_repo", "priority", "sync_cvs_repo", "sync_type", "sync_uri")
+ str_tuple_keys = ("aliases", "eclass_overrides", "force")
+ repo_config_tuple_keys = ("masters",)
+ keys = str_or_int_keys + str_tuple_keys + repo_config_tuple_keys
+ config_string = ""
+ for repo_name, repo in sorted(self.prepos.items(), key=lambda x: (x[0] != "DEFAULT", x[0])):
+ config_string += "\n[%s]\n" % repo_name
+ for key in sorted(keys):
+ if key == "main_repo" and repo_name != "DEFAULT":
+ continue
+ if getattr(repo, key) is not None:
+ if key in str_or_int_keys:
+ config_string += "%s = %s\n" % (key.replace("_", "-"), getattr(repo, key))
+ elif key in str_tuple_keys:
+ config_string += "%s = %s\n" % (key.replace("_", "-"), " ".join(getattr(repo, key)))
+ elif key in repo_config_tuple_keys:
+ config_string += "%s = %s\n" % (key.replace("_", "-"), " ".join(x.name for x in getattr(repo, key)))
+ return config_string.lstrip("\n")
+
+def load_repository_config(settings, extra_files=None):
repoconfigpaths = []
- if settings.local_config:
- repoconfigpaths.append(os.path.join(settings["PORTAGE_CONFIGROOT"],
- USER_CONFIG_PATH, "repos.conf"))
+ if "PORTAGE_REPOSITORIES" in settings:
+ repoconfigpaths.append(io.StringIO(settings["PORTAGE_REPOSITORIES"]))
+ else:
+ if portage._not_installed:
+ repoconfigpaths.append(os.path.join(PORTAGE_BASE_PATH, "cnf", "repos.conf"))
+ else:
+ repoconfigpaths.append(os.path.join(settings.global_config_path, "repos.conf"))
+ repoconfigpaths.append(os.path.join(settings["PORTAGE_CONFIGROOT"], USER_CONFIG_PATH, "repos.conf"))
+ if extra_files:
+ repoconfigpaths.extend(extra_files)
return RepoConfigLoader(repoconfigpaths, settings)
def _get_repo_name(repo_location, cached=None):
@@ -696,6 +986,9 @@ def parse_layout_conf(repo_location, repo_name=None):
data['allow-provide-virtual'] = \
layout_data.get('allow-provide-virtuals', 'false').lower() == 'true'
+ data['eapis-banned'] = tuple(layout_data.get('eapis-banned', '').split())
+ data['eapis-deprecated'] = tuple(layout_data.get('eapis-deprecated', '').split())
+
data['sign-commit'] = layout_data.get('sign-commits', 'false').lower() \
== 'true'
@@ -705,6 +998,8 @@ def parse_layout_conf(repo_location, repo_name=None):
data['thin-manifest'] = layout_data.get('thin-manifests', 'false').lower() \
== 'true'
+ data['repo-name'] = _gen_valid_repo(layout_data.get('repo-name', ''))
+
manifest_policy = layout_data.get('use-manifests', 'strict').lower()
data['allow-missing-manifest'] = manifest_policy != 'strict'
data['create-manifest'] = manifest_policy != 'false'
@@ -713,9 +1008,18 @@ def parse_layout_conf(repo_location, repo_name=None):
# for compatibility w/ PMS, fallback to pms; but also check if the
# cache exists or not.
cache_formats = layout_data.get('cache-formats', '').lower().split()
- if not cache_formats and os.path.isdir(
- os.path.join(repo_location, 'metadata', 'cache')):
- cache_formats = ['pms']
+ if not cache_formats:
+ # Auto-detect cache formats, and prefer md5-cache if available.
+ # This behavior was deployed in portage-2.1.11.14, so that the
+ # default egencache format could eventually be changed to md5-dict
+ # in portage-2.1.11.32. WARNING: Versions prior to portage-2.1.11.14
+ # will NOT recognize md5-dict format unless it is explicitly
+ # listed in layout.conf.
+ cache_formats = []
+ if os.path.isdir(os.path.join(repo_location, 'metadata', 'md5-cache')):
+ cache_formats.append('md5-dict')
+ if os.path.isdir(os.path.join(repo_location, 'metadata', 'cache')):
+ cache_formats.append('pms')
data['cache-formats'] = tuple(cache_formats)
manifest_hashes = layout_data.get('manifest-hashes')
@@ -754,7 +1058,7 @@ def parse_layout_conf(repo_location, repo_name=None):
raw_formats = layout_data.get('profile-formats')
if raw_formats is None:
- if eapi in ('4-python',):
+ if eapi_allows_directories_on_profile_level_and_repository_level(eapi):
raw_formats = ('portage-1',)
else:
raw_formats = ('portage-1-compat',)
diff --git a/pym/portage/tests/__init__.py b/pym/portage/tests/__init__.py
index 492ece44b..84e732a1c 100644
--- a/pym/portage/tests/__init__.py
+++ b/pym/portage/tests/__init__.py
@@ -1,5 +1,5 @@
# tests/__init__.py -- Portage Unit Test functionality
-# Copyright 2006-2011 Gentoo Foundation
+# Copyright 2006-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
from __future__ import print_function
@@ -7,26 +7,40 @@ from __future__ import print_function
import sys
import time
import unittest
-from optparse import OptionParser, OptionValueError
try:
from unittest.runner import _TextTestResult # new in python-2.7
except ImportError:
from unittest import _TextTestResult
+try:
+ # They added the skip framework to python-2.7.
+ # Drop this once we drop python-2.6 support.
+ unittest_skip_shims = False
+ import unittest.SkipTest as SkipTest # new in python-2.7
+except ImportError:
+ unittest_skip_shims = True
+
+import portage
from portage import os
from portage import _encodings
from portage import _unicode_decode
+from portage.util._argparse import ArgumentParser
def main():
suite = unittest.TestSuite()
basedir = os.path.dirname(os.path.realpath(__file__))
usage = "usage: %s [options] [tests to run]" % os.path.basename(sys.argv[0])
- parser = OptionParser(usage=usage)
- parser.add_option("-l", "--list", help="list all tests",
+ parser = ArgumentParser(usage=usage)
+ parser.add_argument("-l", "--list", help="list all tests",
action="store_true", dest="list_tests")
- (options, args) = parser.parse_args(args=sys.argv)
+ options, args = parser.parse_known_args(args=sys.argv)
+
+ if (os.environ.get('NOCOLOR') in ('yes', 'true') or
+ os.environ.get('TERM') == 'dumb' or
+ not sys.stdout.isatty()):
+ portage.output.nocolor()
if options.list_tests:
testdir = os.path.dirname(sys.argv[0])
@@ -70,15 +84,12 @@ def getTestFromCommandLine(args, base_path):
def getTestDirs(base_path):
TEST_FILE = b'__test__'
- svn_dirname = b'.svn'
testDirs = []
# the os.walk help mentions relative paths as being quirky
# I was tired of adding dirs to the list, so now we add __test__
# to each dir we want tested.
for root, dirs, files in os.walk(base_path):
- if svn_dirname in dirs:
- dirs.remove(svn_dirname)
try:
root = _unicode_decode(root,
encoding=_encodings['fs'], errors='strict')
@@ -93,7 +104,7 @@ def getTestDirs(base_path):
def getTestNames(path):
files = os.listdir(path)
- files = [ f[:-3] for f in files if f.startswith("test") and f.endswith(".py") ]
+ files = [f[:-3] for f in files if f.startswith("test") and f.endswith(".py")]
files.sort()
return files
@@ -134,14 +145,14 @@ class TextTestResult(_TextTestResult):
self.portage_skipped = []
def addTodo(self, test, info):
- self.todoed.append((test,info))
+ self.todoed.append((test, info))
if self.showAll:
self.stream.writeln("TODO")
elif self.dots:
self.stream.write(".")
def addPortageSkip(self, test, info):
- self.portage_skipped.append((test,info))
+ self.portage_skipped.append((test, info))
if self.showAll:
self.stream.writeln("SKIP")
elif self.dots:
@@ -185,10 +196,14 @@ class TestCase(unittest.TestCase):
except:
result.addError(self, sys.exc_info())
return
+
ok = False
try:
testMethod()
ok = True
+ except SkipTest as e:
+ result.addPortageSkip(self, "%s: SKIP: %s" %
+ (testMethod, str(e)))
except self.failureException:
if self.portage_skip is not None:
if self.portage_skip is True:
@@ -197,13 +212,14 @@ class TestCase(unittest.TestCase):
result.addPortageSkip(self, "%s: SKIP: %s" %
(testMethod, self.portage_skip))
elif self.todo:
- result.addTodo(self,"%s: TODO" % testMethod)
+ result.addTodo(self, "%s: TODO" % testMethod)
else:
result.addFailure(self, sys.exc_info())
except (KeyboardInterrupt, SystemExit):
raise
except:
result.addError(self, sys.exc_info())
+
try:
self.tearDown()
except SystemExit:
@@ -213,7 +229,8 @@ class TestCase(unittest.TestCase):
except:
result.addError(self, sys.exc_info())
ok = False
- if ok: result.addSuccess(self)
+ if ok:
+ result.addSuccess(self)
finally:
result.stopTest(self)
@@ -230,10 +247,48 @@ class TestCase(unittest.TestCase):
except excClass:
return
else:
- if hasattr(excClass,'__name__'): excName = excClass.__name__
+ if hasattr(excClass, '__name__'): excName = excClass.__name__
else: excName = str(excClass)
raise self.failureException("%s not raised: %s" % (excName, msg))
+ def assertExists(self, path):
+ """Make sure |path| exists"""
+ if not os.path.exists(path):
+ msg = ['path is missing: %s' % (path,)]
+ while path != '/':
+ path = os.path.dirname(path)
+ if not path:
+ # If we're given something like "foo", abort once we get to "".
+ break
+ result = os.path.exists(path)
+ msg.append('\tos.path.exists(%s): %s' % (path, result))
+ if result:
+ msg.append('\tcontents: %r' % os.listdir(path))
+ break
+ raise self.failureException('\n'.join(msg))
+
+ def assertNotExists(self, path):
+ """Make sure |path| does not exist"""
+ if os.path.exists(path):
+ raise self.failureException('path exists when it should not: %s' % path)
+
+if unittest_skip_shims:
+ # Shim code for <python-2.7.
+ class SkipTest(Exception):
+ """unittest.SkipTest shim for <python-2.7"""
+
+ def skipTest(self, reason):
+ raise SkipTest(reason)
+ setattr(TestCase, 'skipTest', skipTest)
+
+ def assertIn(self, member, container, msg=None):
+ self.assertTrue(member in container, msg=msg)
+ setattr(TestCase, 'assertIn', assertIn)
+
+ def assertNotIn(self, member, container, msg=None):
+ self.assertFalse(member in container, msg=msg)
+ setattr(TestCase, 'assertNotIn', assertNotIn)
+
class TextTestRunner(unittest.TextTestRunner):
"""
We subclass unittest.TextTestRunner to output SKIP for tests that fail but are skippable
@@ -271,8 +326,8 @@ class TextTestRunner(unittest.TextTestRunner):
self.stream.writeln("OK")
return result
-test_cps = ['sys-apps/portage','virtual/portage']
-test_versions = ['1.0', '1.0-r1','2.3_p4','1.0_alpha57']
-test_slots = [ None, '1','gentoo-sources-2.6.17','spankywashere']
-test_usedeps = ['foo','-bar', ('foo','bar'),
- ('foo','-bar'), ('foo?', '!bar?') ]
+test_cps = ['sys-apps/portage', 'virtual/portage']
+test_versions = ['1.0', '1.0-r1', '2.3_p4', '1.0_alpha57']
+test_slots = [None, '1', 'gentoo-sources-2.6.17', 'spankywashere']
+test_usedeps = ['foo', '-bar', ('foo', 'bar'),
+ ('foo', '-bar'), ('foo?', '!bar?')]
diff --git a/pym/portage/tests/bin/setup_env.py b/pym/portage/tests/bin/setup_env.py
index 1f8554e42..9cc26df08 100644
--- a/pym/portage/tests/bin/setup_env.py
+++ b/pym/portage/tests/bin/setup_env.py
@@ -1,19 +1,17 @@
# setup_env.py -- Make sure bin subdir has sane env for testing
-# Copyright 2007-2011 Gentoo Foundation
+# Copyright 2007-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
import tempfile
from portage import os
from portage import shutil
+from portage.const import PORTAGE_BIN_PATH
+from portage.const import PORTAGE_PYM_PATH
from portage.tests import TestCase
from portage.process import spawn
-basepath = os.path.join(os.path.dirname(os.path.dirname(
- os.path.abspath(__file__))),
- "..", "..", "..")
-bindir = os.path.join(basepath, "bin")
-pymdir = os.path.join(basepath, "pym")
+bindir = PORTAGE_BIN_PATH
basedir = None
env = None
@@ -30,20 +28,20 @@ def binTestsInit():
global basedir, env
basedir = tempfile.mkdtemp()
env = {}
- env["EAPI"] = "0"
- env["D"] = os.path.join(basedir, "image")
- env["T"] = os.path.join(basedir, "temp")
- env["S"] = os.path.join(basedir, "workdir")
- env["PF"] = "portage-tests-0.09-r1"
- env["PATH"] = bindir + ":" + os.environ["PATH"]
- env["PORTAGE_BIN_PATH"] = bindir
- env["PORTAGE_PYM_PATH"] = pymdir
- env["PORTAGE_INST_UID"] = str(os.getuid())
- env["PORTAGE_INST_GID"] = str(os.getgid())
- env["DESTTREE"] = "/usr"
- os.mkdir(env["D"])
- os.mkdir(env["T"])
- os.mkdir(env["S"])
+ env['EAPI'] = '0'
+ env['D'] = os.path.join(basedir, 'image')
+ env['T'] = os.path.join(basedir, 'temp')
+ env['S'] = os.path.join(basedir, 'workdir')
+ env['PF'] = 'portage-tests-0.09-r1'
+ env['PATH'] = bindir + ':' + os.environ['PATH']
+ env['PORTAGE_BIN_PATH'] = bindir
+ env['PORTAGE_PYM_PATH'] = PORTAGE_PYM_PATH
+ env['PORTAGE_INST_UID'] = str(os.getuid())
+ env['PORTAGE_INST_GID'] = str(os.getgid())
+ env['DESTTREE'] = '/usr'
+ os.mkdir(env['D'])
+ os.mkdir(env['T'])
+ os.mkdir(env['S'])
class BinTestCase(TestCase):
def init(self):
@@ -53,7 +51,7 @@ class BinTestCase(TestCase):
def _exists_in_D(path):
# Note: do not use os.path.join() here, we assume D to end in /
- return os.access(env["D"] + path, os.W_OK)
+ return os.access(env['D'] + path, os.W_OK)
def exists_in_D(path):
if not _exists_in_D(path):
raise TestCase.failureException
@@ -68,7 +66,7 @@ def portage_func(func, args, exit_status=0):
f = open('/dev/null', 'wb')
fd_pipes = {0:0,1:f.fileno(),2:f.fileno()}
def pre_exec():
- os.chdir(env["S"])
+ os.chdir(env['S'])
spawn([func] + args.split(), env=env,
fd_pipes=fd_pipes, pre_exec=pre_exec)
f.close()
@@ -80,10 +78,10 @@ def create_portage_wrapper(bin):
return portage_func(*newargs)
return derived_func
-for bin in os.listdir(os.path.join(bindir, "ebuild-helpers")):
- if bin.startswith("do") or \
- bin.startswith("new") or \
- bin.startswith("prep") or \
- bin in ["ecompress","ecompressdir","fowners","fperms"]:
+for bin in os.listdir(os.path.join(bindir, 'ebuild-helpers')):
+ if bin.startswith('do') or \
+ bin.startswith('new') or \
+ bin.startswith('prep') or \
+ bin in ('ecompress', 'ecompressdir', 'fowners', 'fperms'):
globals()[bin] = create_portage_wrapper(
- os.path.join(bindir, "ebuild-helpers", bin))
+ os.path.join(bindir, 'ebuild-helpers', bin))
diff --git a/pym/portage/tests/dbapi/test_fakedbapi.py b/pym/portage/tests/dbapi/test_fakedbapi.py
index e3843f0a4..771356350 100644
--- a/pym/portage/tests/dbapi/test_fakedbapi.py
+++ b/pym/portage/tests/dbapi/test_fakedbapi.py
@@ -1,4 +1,4 @@
-# Copyright 2011-2012 Gentoo Foundation
+# Copyright 2011-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
import tempfile
@@ -42,10 +42,12 @@ class TestFakedbapi(TestCase):
tempdir = tempfile.mkdtemp()
try:
- portdir = os.path.join(tempdir, "usr/portage")
- os.makedirs(portdir)
+ test_repo = os.path.join(tempdir, "var", "repositories", "test_repo")
+ os.makedirs(os.path.join(test_repo, "profiles"))
+ with open(os.path.join(test_repo, "profiles", "repo_name"), "w") as f:
+ f.write("test_repo")
env = {
- "PORTDIR": portdir,
+ "PORTAGE_REPOSITORIES": "[DEFAULT]\nmain-repo = test_repo\n[test_repo]\nlocation = %s" % test_repo
}
fakedb = fakedbapi(settings=config(config_profile_path="",
env=env, eprefix=tempdir))
diff --git a/pym/portage/tests/dbapi/test_portdb_cache.py b/pym/portage/tests/dbapi/test_portdb_cache.py
new file mode 100644
index 000000000..94af96eaf
--- /dev/null
+++ b/pym/portage/tests/dbapi/test_portdb_cache.py
@@ -0,0 +1,183 @@
+# Copyright 2012-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import subprocess
+import sys
+import textwrap
+
+import portage
+from portage import os
+from portage import _unicode_decode
+from portage.const import (BASH_BINARY, PORTAGE_BIN_PATH,
+ PORTAGE_PYM_PATH, USER_CONFIG_PATH)
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import ResolverPlayground
+from portage.util import ensure_dirs
+
+class PortdbCacheTestCase(TestCase):
+
+ def testPortdbCache(self):
+ debug = False
+
+ ebuilds = {
+ "dev-libs/A-1": {},
+ "dev-libs/A-2": {},
+ "sys-apps/B-1": {},
+ "sys-apps/B-2": {},
+ }
+
+ playground = ResolverPlayground(ebuilds=ebuilds, debug=debug)
+ settings = playground.settings
+ eprefix = settings["EPREFIX"]
+ test_repo_location = settings.repositories["test_repo"].location
+ user_config_dir = os.path.join(eprefix, USER_CONFIG_PATH)
+ metadata_dir = os.path.join(test_repo_location, "metadata")
+ md5_cache_dir = os.path.join(metadata_dir, "md5-cache")
+ pms_cache_dir = os.path.join(metadata_dir, "cache")
+ layout_conf_path = os.path.join(metadata_dir, "layout.conf")
+
+ portage_python = portage._python_interpreter
+ egencache_cmd = (portage_python, "-b", "-Wd",
+ os.path.join(PORTAGE_BIN_PATH, "egencache"),
+ "--repo", "test_repo",
+ "--repositories-configuration", settings.repositories.config_string())
+ python_cmd = (portage_python, "-b", "-Wd", "-c")
+
+ test_commands = (
+ (lambda: not os.path.exists(pms_cache_dir),),
+ (lambda: not os.path.exists(md5_cache_dir),),
+ python_cmd + (textwrap.dedent("""
+ import os, sys, portage
+ if portage.portdb.porttree_root in portage.portdb._pregen_auxdb:
+ sys.exit(1)
+ """),),
+
+ egencache_cmd + ("--update",),
+ (lambda: not os.path.exists(pms_cache_dir),),
+ (lambda: os.path.exists(md5_cache_dir),),
+ python_cmd + (textwrap.dedent("""
+ import os, sys, portage
+ if portage.portdb.porttree_root not in portage.portdb._pregen_auxdb:
+ sys.exit(1)
+ """),),
+ python_cmd + (textwrap.dedent("""
+ import os, sys, portage
+ from portage.cache.flat_hash import md5_database
+ if not isinstance(portage.portdb._pregen_auxdb[portage.portdb.porttree_root], md5_database):
+ sys.exit(1)
+ """),),
+
+ (BASH_BINARY, "-c", "echo %s > %s" %
+ tuple(map(portage._shell_quote,
+ ("cache-formats = md5-dict pms", layout_conf_path,)))),
+ egencache_cmd + ("--update",),
+ (lambda: os.path.exists(md5_cache_dir),),
+ python_cmd + (textwrap.dedent("""
+ import os, sys, portage
+ if portage.portdb.porttree_root not in portage.portdb._pregen_auxdb:
+ sys.exit(1)
+ """),),
+ python_cmd + (textwrap.dedent("""
+ import os, sys, portage
+ from portage.cache.flat_hash import md5_database
+ if not isinstance(portage.portdb._pregen_auxdb[portage.portdb.porttree_root], md5_database):
+ sys.exit(1)
+ """),),
+
+ # Disable DeprecationWarnings, since the pms format triggers them
+ # in portdbapi._create_pregen_cache().
+ (BASH_BINARY, "-c", "echo %s > %s" %
+ tuple(map(portage._shell_quote,
+ ("cache-formats = pms md5-dict", layout_conf_path,)))),
+ (portage_python, "-b", "-Wd", "-Wi::DeprecationWarning", "-c") + (textwrap.dedent("""
+ import os, sys, portage
+ if portage.portdb.porttree_root not in portage.portdb._pregen_auxdb:
+ sys.exit(1)
+ """),),
+ (portage_python, "-b", "-Wd", "-Wi::DeprecationWarning", "-c") + (textwrap.dedent("""
+ import os, sys, portage
+ from portage.cache.metadata import database as pms_database
+ if not isinstance(portage.portdb._pregen_auxdb[portage.portdb.porttree_root], pms_database):
+ sys.exit(1)
+ """),),
+
+ # Test auto-detection and preference for md5-cache when both
+ # cache formats are available but layout.conf is absent.
+ (BASH_BINARY, "-c", "rm %s" % portage._shell_quote(layout_conf_path)),
+ python_cmd + (textwrap.dedent("""
+ import os, sys, portage
+ if portage.portdb.porttree_root not in portage.portdb._pregen_auxdb:
+ sys.exit(1)
+ """),),
+ python_cmd + (textwrap.dedent("""
+ import os, sys, portage
+ from portage.cache.flat_hash import md5_database
+ if not isinstance(portage.portdb._pregen_auxdb[portage.portdb.porttree_root], md5_database):
+ sys.exit(1)
+ """),),
+ )
+
+ pythonpath = os.environ.get("PYTHONPATH")
+ if pythonpath is not None and not pythonpath.strip():
+ pythonpath = None
+ if pythonpath is not None and \
+ pythonpath.split(":")[0] == PORTAGE_PYM_PATH:
+ pass
+ else:
+ if pythonpath is None:
+ pythonpath = ""
+ else:
+ pythonpath = ":" + pythonpath
+ pythonpath = PORTAGE_PYM_PATH + pythonpath
+
+ env = {
+ "PATH" : os.environ.get("PATH", ""),
+ "PORTAGE_OVERRIDE_EPREFIX" : eprefix,
+ "PORTAGE_PYTHON" : portage_python,
+ "PORTAGE_REPOSITORIES" : settings.repositories.config_string(),
+ "PYTHONPATH" : pythonpath,
+ }
+
+ if "__PORTAGE_TEST_HARDLINK_LOCKS" in os.environ:
+ env["__PORTAGE_TEST_HARDLINK_LOCKS"] = \
+ os.environ["__PORTAGE_TEST_HARDLINK_LOCKS"]
+
+ dirs = [user_config_dir]
+
+ try:
+ for d in dirs:
+ ensure_dirs(d)
+
+ if debug:
+ # The subprocess inherits both stdout and stderr, for
+ # debugging purposes.
+ stdout = None
+ else:
+ # The subprocess inherits stderr so that any warnings
+ # triggered by python -Wd will be visible.
+ stdout = subprocess.PIPE
+
+ for i, args in enumerate(test_commands):
+
+ if hasattr(args[0], '__call__'):
+ self.assertTrue(args[0](),
+ "callable at index %s failed" % (i,))
+ continue
+
+ proc = subprocess.Popen(args,
+ env=env, stdout=stdout)
+
+ if debug:
+ proc.wait()
+ else:
+ output = proc.stdout.readlines()
+ proc.wait()
+ proc.stdout.close()
+ if proc.returncode != os.EX_OK:
+ for line in output:
+ sys.stderr.write(_unicode_decode(line))
+
+ self.assertEqual(os.EX_OK, proc.returncode,
+ "command %d failed with args %s" % (i, args,))
+ finally:
+ playground.cleanup()
diff --git a/pym/portage/tests/dep/testAtom.py b/pym/portage/tests/dep/testAtom.py
index f5a7d3749..da58be27c 100644
--- a/pym/portage/tests/dep/testAtom.py
+++ b/pym/portage/tests/dep/testAtom.py
@@ -1,4 +1,4 @@
-# Copyright 2006, 2010 Gentoo Foundation
+# Copyright 2006-2012 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
from portage.tests import TestCase
@@ -10,154 +10,157 @@ class TestAtom(TestCase):
def testAtom(self):
tests = (
- ( "=sys-apps/portage-2.1-r1:0[doc,a=,!b=,c?,!d?,-e]",
- ('=', 'sys-apps/portage', '2.1-r1', '0', '[doc,a=,!b=,c?,!d?,-e]', None), False, False ),
- ( "=sys-apps/portage-2.1-r1*:0[doc]",
- ('=*', 'sys-apps/portage', '2.1-r1', '0', '[doc]', None), False, False ),
- ( "sys-apps/portage:0[doc]",
- (None, 'sys-apps/portage', None, '0', '[doc]', None), False, False ),
- ( "sys-apps/portage:0[doc]",
- (None, 'sys-apps/portage', None, '0', '[doc]', None), False, False ),
- ( "*/*",
- (None, '*/*', None, None, None, None), True, False ),
- ( "=*/*-*9999*",
- ('=*', '*/*', '*9999*', None, None, None), True, False ),
- ( "=*/*-*9999*:0::repo_name",
- ('=*', '*/*', '*9999*', '0', None, 'repo_name'), True, True ),
- ( "sys-apps/*",
- (None, 'sys-apps/*', None, None, None, None), True, False ),
- ( "*/portage",
- (None, '*/portage', None, None, None, None), True, False ),
- ( "s*s-*/portage:1",
- (None, 's*s-*/portage', None, '1', None, None), True, False ),
- ( "*/po*ge:2",
- (None, '*/po*ge', None, '2', None, None), True, False ),
- ( "!dev-libs/A",
- (None, 'dev-libs/A', None, None, None, None), True, True ),
- ( "!!dev-libs/A",
- (None, 'dev-libs/A', None, None, None, None), True, True ),
- ( "!!dev-libs/A",
- (None, 'dev-libs/A', None, None, None, None), True, True ),
- ( "dev-libs/A[foo(+)]",
- (None, 'dev-libs/A', None, None, "[foo(+)]", None), True, True ),
- ( "dev-libs/A[a(+),b(-)=,!c(+)=,d(-)?,!e(+)?,-f(-)]",
- (None, 'dev-libs/A', None, None, "[a(+),b(-)=,!c(+)=,d(-)?,!e(+)?,-f(-)]", None), True, True ),
- ( "dev-libs/A:2[a(+),b(-)=,!c(+)=,d(-)?,!e(+)?,-f(-)]",
- (None, 'dev-libs/A', None, "2", "[a(+),b(-)=,!c(+)=,d(-)?,!e(+)?,-f(-)]", None), True, True ),
-
- ( "=sys-apps/portage-2.1-r1:0::repo_name[doc,a=,!b=,c?,!d?,-e]",
- ('=', 'sys-apps/portage', '2.1-r1', '0', '[doc,a=,!b=,c?,!d?,-e]', 'repo_name'), False, True ),
- ( "=sys-apps/portage-2.1-r1*:0::repo_name[doc]",
- ('=*', 'sys-apps/portage', '2.1-r1', '0', '[doc]', 'repo_name'), False, True ),
- ( "sys-apps/portage:0::repo_name[doc]",
- (None, 'sys-apps/portage', None, '0', '[doc]', 'repo_name'), False, True ),
-
- ( "*/*::repo_name",
- (None, '*/*', None, None, None, 'repo_name'), True, True ),
- ( "sys-apps/*::repo_name",
- (None, 'sys-apps/*', None, None, None, 'repo_name'), True, True ),
- ( "*/portage::repo_name",
- (None, '*/portage', None, None, None, 'repo_name'), True, True ),
- ( "s*s-*/portage:1::repo_name",
- (None, 's*s-*/portage', None, '1', None, 'repo_name'), True, True ),
+ ("=sys-apps/portage-2.1-r1:0[doc,a=,!b=,c?,!d?,-e]",
+ ('=', 'sys-apps/portage', '2.1-r1', '0', '[doc,a=,!b=,c?,!d?,-e]', None), False, False),
+ ("=sys-apps/portage-2.1-r1*:0[doc]",
+ ('=*', 'sys-apps/portage', '2.1-r1', '0', '[doc]', None), False, False),
+ ("sys-apps/portage:0[doc]",
+ (None, 'sys-apps/portage', None, '0', '[doc]', None), False, False),
+ ("sys-apps/portage:0[doc]",
+ (None, 'sys-apps/portage', None, '0', '[doc]', None), False, False),
+ ("*/*",
+ (None, '*/*', None, None, None, None), True, False),
+ ("=*/*-*9999*",
+ ('=*', '*/*', '*9999*', None, None, None), True, False),
+ ("=*/*-*9999*:0::repo_name",
+ ('=*', '*/*', '*9999*', '0', None, 'repo_name'), True, True),
+ ("=*/*-*_beta*",
+ ('=*', '*/*', '*_beta*', None, None, None), True, False),
+ ("=*/*-*_beta*:0::repo_name",
+ ('=*', '*/*', '*_beta*', '0', None, 'repo_name'), True, True),
+ ("sys-apps/*",
+ (None, 'sys-apps/*', None, None, None, None), True, False),
+ ("*/portage",
+ (None, '*/portage', None, None, None, None), True, False),
+ ("s*s-*/portage:1",
+ (None, 's*s-*/portage', None, '1', None, None), True, False),
+ ("*/po*ge:2",
+ (None, '*/po*ge', None, '2', None, None), True, False),
+ ("!dev-libs/A",
+ (None, 'dev-libs/A', None, None, None, None), True, True),
+ ("!!dev-libs/A",
+ (None, 'dev-libs/A', None, None, None, None), True, True),
+ ("!!dev-libs/A",
+ (None, 'dev-libs/A', None, None, None, None), True, True),
+ ("dev-libs/A[foo(+)]",
+ (None, 'dev-libs/A', None, None, "[foo(+)]", None), True, True),
+ ("dev-libs/A[a(+),b(-)=,!c(+)=,d(-)?,!e(+)?,-f(-)]",
+ (None, 'dev-libs/A', None, None, "[a(+),b(-)=,!c(+)=,d(-)?,!e(+)?,-f(-)]", None), True, True),
+ ("dev-libs/A:2[a(+),b(-)=,!c(+)=,d(-)?,!e(+)?,-f(-)]",
+ (None, 'dev-libs/A', None, "2", "[a(+),b(-)=,!c(+)=,d(-)?,!e(+)?,-f(-)]", None), True, True),
+
+ ("=sys-apps/portage-2.1-r1:0::repo_name[doc,a=,!b=,c?,!d?,-e]",
+ ('=', 'sys-apps/portage', '2.1-r1', '0', '[doc,a=,!b=,c?,!d?,-e]', 'repo_name'), False, True),
+ ("=sys-apps/portage-2.1-r1*:0::repo_name[doc]",
+ ('=*', 'sys-apps/portage', '2.1-r1', '0', '[doc]', 'repo_name'), False, True),
+ ("sys-apps/portage:0::repo_name[doc]",
+ (None, 'sys-apps/portage', None, '0', '[doc]', 'repo_name'), False, True),
+
+ ("*/*::repo_name",
+ (None, '*/*', None, None, None, 'repo_name'), True, True),
+ ("sys-apps/*::repo_name",
+ (None, 'sys-apps/*', None, None, None, 'repo_name'), True, True),
+ ("*/portage::repo_name",
+ (None, '*/portage', None, None, None, 'repo_name'), True, True),
+ ("s*s-*/portage:1::repo_name",
+ (None, 's*s-*/portage', None, '1', None, 'repo_name'), True, True),
)
-
+
tests_xfail = (
- ( Atom("sys-apps/portage"), False, False ),
- ( "cat/pkg[a!]", False, False ),
- ( "cat/pkg[!a]", False, False ),
- ( "cat/pkg[!a!]", False, False ),
- ( "cat/pkg[!a-]", False, False ),
- ( "cat/pkg[-a=]", False, False ),
- ( "cat/pkg[-a?]", False, False ),
- ( "cat/pkg[-a!]", False, False ),
- ( "cat/pkg[=a]", False, False ),
- ( "cat/pkg[=a=]", False, False ),
- ( "cat/pkg[=a?]", False, False ),
- ( "cat/pkg[=a!]", False, False ),
- ( "cat/pkg[=a-]", False, False ),
- ( "cat/pkg[?a]", False, False ),
- ( "cat/pkg[?a=]", False, False ),
- ( "cat/pkg[?a?]", False, False ),
- ( "cat/pkg[?a!]", False, False ),
- ( "cat/pkg[?a-]", False, False ),
- ( "sys-apps/portage[doc]:0", False, False ),
- ( "*/*", False, False ),
- ( "sys-apps/*", False, False ),
- ( "*/portage", False, False ),
- ( "*/**", True, False ),
- ( "*/portage[use]", True, False ),
- ( "cat/pkg[a()]", False, False ),
- ( "cat/pkg[a(]", False, False ),
- ( "cat/pkg[a)]", False, False ),
- ( "cat/pkg[a(,b]", False, False ),
- ( "cat/pkg[a),b]", False, False ),
- ( "cat/pkg[a(*)]", False, False ),
- ( "cat/pkg[a(*)]", True, False ),
- ( "cat/pkg[a(+-)]", False, False ),
- ( "cat/pkg[a()]", False, False ),
- ( "cat/pkg[(+)a]", False, False ),
- ( "cat/pkg[a=(+)]", False, False ),
- ( "cat/pkg[!(+)a=]", False, False ),
- ( "cat/pkg[!a=(+)]", False, False ),
- ( "cat/pkg[a?(+)]", False, False ),
- ( "cat/pkg[!a?(+)]", False, False ),
- ( "cat/pkg[!(+)a?]", False, False ),
- ( "cat/pkg[-(+)a]", False, False ),
- ( "cat/pkg[a(+),-a]", False, False ),
- ( "cat/pkg[a(-),-a]", False, False ),
- ( "cat/pkg[-a,a(+)]", False, False ),
- ( "cat/pkg[-a,a(-)]", False, False ),
- ( "cat/pkg[-a(+),a(-)]", False, False ),
- ( "cat/pkg[-a(-),a(+)]", False, False ),
- ( "sys-apps/portage[doc]::repo_name", False, False ),
- ( "sys-apps/portage:0[doc]::repo_name", False, False ),
- ( "sys-apps/portage[doc]:0::repo_name", False, False ),
- ( "=sys-apps/portage-2.1-r1:0::repo_name[doc,a=,!b=,c?,!d?,-e]", False, False ),
- ( "=sys-apps/portage-2.1-r1*:0::repo_name[doc]", False, False ),
- ( "sys-apps/portage:0::repo_name[doc]", False, False ),
- ( "*/*::repo_name", True, False ),
+ (Atom("sys-apps/portage"), False, False),
+ ("cat/pkg[a!]", False, False),
+ ("cat/pkg[!a]", False, False),
+ ("cat/pkg[!a!]", False, False),
+ ("cat/pkg[!a-]", False, False),
+ ("cat/pkg[-a=]", False, False),
+ ("cat/pkg[-a?]", False, False),
+ ("cat/pkg[-a!]", False, False),
+ ("cat/pkg[=a]", False, False),
+ ("cat/pkg[=a=]", False, False),
+ ("cat/pkg[=a?]", False, False),
+ ("cat/pkg[=a!]", False, False),
+ ("cat/pkg[=a-]", False, False),
+ ("cat/pkg[?a]", False, False),
+ ("cat/pkg[?a=]", False, False),
+ ("cat/pkg[?a?]", False, False),
+ ("cat/pkg[?a!]", False, False),
+ ("cat/pkg[?a-]", False, False),
+ ("sys-apps/portage[doc]:0", False, False),
+ ("*/*", False, False),
+ ("sys-apps/*", False, False),
+ ("*/portage", False, False),
+ ("*/**", True, False),
+ ("*/portage[use]", True, False),
+ ("cat/pkg[a()]", False, False),
+ ("cat/pkg[a(]", False, False),
+ ("cat/pkg[a)]", False, False),
+ ("cat/pkg[a(,b]", False, False),
+ ("cat/pkg[a),b]", False, False),
+ ("cat/pkg[a(*)]", False, False),
+ ("cat/pkg[a(*)]", True, False),
+ ("cat/pkg[a(+-)]", False, False),
+ ("cat/pkg[a()]", False, False),
+ ("cat/pkg[(+)a]", False, False),
+ ("cat/pkg[a=(+)]", False, False),
+ ("cat/pkg[!(+)a=]", False, False),
+ ("cat/pkg[!a=(+)]", False, False),
+ ("cat/pkg[a?(+)]", False, False),
+ ("cat/pkg[!a?(+)]", False, False),
+ ("cat/pkg[!(+)a?]", False, False),
+ ("cat/pkg[-(+)a]", False, False),
+ ("cat/pkg[a(+),-a]", False, False),
+ ("cat/pkg[a(-),-a]", False, False),
+ ("cat/pkg[-a,a(+)]", False, False),
+ ("cat/pkg[-a,a(-)]", False, False),
+ ("cat/pkg[-a(+),a(-)]", False, False),
+ ("cat/pkg[-a(-),a(+)]", False, False),
+ ("sys-apps/portage[doc]::repo_name", False, False),
+ ("sys-apps/portage:0[doc]::repo_name", False, False),
+ ("sys-apps/portage[doc]:0::repo_name", False, False),
+ ("=sys-apps/portage-2.1-r1:0::repo_name[doc,a=,!b=,c?,!d?,-e]", False, False),
+ ("=sys-apps/portage-2.1-r1*:0::repo_name[doc]", False, False),
+ ("sys-apps/portage:0::repo_name[doc]", False, False),
+ ("*/*::repo_name", True, False),
)
for atom, parts, allow_wildcard, allow_repo in tests:
a = Atom(atom, allow_wildcard=allow_wildcard, allow_repo=allow_repo)
op, cp, ver, slot, use, repo = parts
- self.assertEqual( op, a.operator,
- msg="Atom('%s').operator = %s == '%s'" % ( atom, a.operator, op ) )
- self.assertEqual( cp, a.cp,
- msg="Atom('%s').cp = %s == '%s'" % ( atom, a.cp, cp ) )
+ self.assertEqual(op, a.operator,
+ msg="Atom('%s').operator = %s == '%s'" % (atom, a.operator, op))
+ self.assertEqual(cp, a.cp,
+ msg="Atom('%s').cp = %s == '%s'" % (atom, a.cp, cp))
if ver is not None:
cpv = "%s-%s" % (cp, ver)
else:
cpv = cp
- self.assertEqual( cpv, a.cpv,
- msg="Atom('%s').cpv = %s == '%s'" % ( atom, a.cpv, cpv ) )
- self.assertEqual( slot, a.slot,
- msg="Atom('%s').slot = %s == '%s'" % ( atom, a.slot, slot ) )
- self.assertEqual( repo, a.repo,
- msg="Atom('%s').repo == %s == '%s'" % ( atom, a.repo, repo ) )
+ self.assertEqual(cpv, a.cpv,
+ msg="Atom('%s').cpv = %s == '%s'" % (atom, a.cpv, cpv))
+ self.assertEqual(slot, a.slot,
+ msg="Atom('%s').slot = %s == '%s'" % (atom, a.slot, slot))
+ self.assertEqual(repo, a.repo,
+ msg="Atom('%s').repo == %s == '%s'" % (atom, a.repo, repo))
if a.use:
returned_use = str(a.use)
else:
returned_use = None
- self.assertEqual( use, returned_use,
- msg="Atom('%s').use = %s == '%s'" % ( atom, returned_use, use ) )
+ self.assertEqual(use, returned_use,
+ msg="Atom('%s').use = %s == '%s'" % (atom, returned_use, use))
for atom, allow_wildcard, allow_repo in tests_xfail:
- self.assertRaisesMsg(atom, (InvalidAtom, TypeError), Atom, atom, \
+ self.assertRaisesMsg(atom, (InvalidAtom, TypeError), Atom, atom,
allow_wildcard=allow_wildcard, allow_repo=allow_repo)
def testSlotAbiAtom(self):
tests = (
- ("virtual/ffmpeg:0/53", "4-slot-abi", {"slot": "0", "slot_abi": "53", "slot_abi_op": None}),
- ("virtual/ffmpeg:0/53=", "4-slot-abi", {"slot": "0", "slot_abi": "53", "slot_abi_op": "="}),
- ("virtual/ffmpeg:=", "4-slot-abi", {"slot": None, "slot_abi": None, "slot_abi_op": "="}),
- ("virtual/ffmpeg:0=", "4-slot-abi", {"slot": "0", "slot_abi": None, "slot_abi_op": "="}),
- ("virtual/ffmpeg:*", "4-slot-abi", {"slot": None, "slot_abi": None, "slot_abi_op": "*"}),
- ("virtual/ffmpeg:0*", "4-slot-abi", {"slot": "0", "slot_abi": None, "slot_abi_op": "*"}),
- ("virtual/ffmpeg:0", "4-slot-abi", {"slot": "0", "slot_abi": None, "slot_abi_op": None}),
- ("virtual/ffmpeg", "4-slot-abi", {"slot": None, "slot_abi": None, "slot_abi_op": None}),
+ ("virtual/ffmpeg:0/53", "4-slot-abi", {"slot": "0", "sub_slot": "53", "slot_operator": None}),
+ ("virtual/ffmpeg:0/53=", "4-slot-abi", {"slot": "0", "sub_slot": "53", "slot_operator": "="}),
+ ("virtual/ffmpeg:=", "4-slot-abi", {"slot": None, "sub_slot": None, "slot_operator": "="}),
+ ("virtual/ffmpeg:0=", "4-slot-abi", {"slot": "0", "sub_slot": None, "slot_operator": "="}),
+ ("virtual/ffmpeg:*", "4-slot-abi", {"slot": None, "sub_slot": None, "slot_operator": "*"}),
+ ("virtual/ffmpeg:0", "4-slot-abi", {"slot": "0", "sub_slot": None, "slot_operator": None}),
+ ("virtual/ffmpeg", "4-slot-abi", {"slot": None, "sub_slot": None, "slot_operator": None}),
)
for atom, eapi, parts in tests:
@@ -165,7 +168,7 @@ class TestAtom(TestCase):
for k, v in parts.items():
self.assertEqual(v, getattr(a, k),
msg="Atom('%s').%s = %s == '%s'" %
- (atom, k, getattr(a, k), v ))
+ (atom, k, getattr(a, k), v))
def test_intersects(self):
test_cases = (
@@ -182,7 +185,7 @@ class TestAtom(TestCase):
)
for atom, other, expected_result in test_cases:
- self.assertEqual(Atom(atom).intersects(Atom(other)), expected_result, \
+ self.assertEqual(Atom(atom).intersects(Atom(other)), expected_result,
"%s and %s should intersect: %s" % (atom, other, expected_result))
def test_violated_conditionals(self):
@@ -276,7 +279,7 @@ class TestAtom(TestCase):
for atom, other_use, iuse, parent_use in test_cases_xfail:
a = Atom(atom)
validator = use_flag_validator(iuse)
- self.assertRaisesMsg(atom, InvalidAtom, \
+ self.assertRaisesMsg(atom, InvalidAtom,
a.violated_conditionals, other_use, validator.is_valid_flag, parent_use)
def test_evaluate_conditionals(self):
@@ -325,9 +328,9 @@ class TestAtom(TestCase):
("dev-libs/A[a,b=,!c=,d?,!e?,-f]", ["d", "e", "f"], [], "dev-libs/A[a,b,-b,c,-c,-e,-f]"),
("dev-libs/A[a,b=,!c=,d?,!e?,-f]", [], ["d", "e", "f"], "dev-libs/A[a,b,-b,c,-c,d,-f]"),
- ("dev-libs/A[a(-),b(+)=,!c(-)=,d(+)?,!e(-)?,-f(+)]", \
+ ("dev-libs/A[a(-),b(+)=,!c(-)=,d(+)?,!e(-)?,-f(+)]",
["a", "b", "c", "d", "e", "f"], [], "dev-libs/A[a(-),-b(+),c(-),-e(-),-f(+)]"),
- ("dev-libs/A[a(+),b(-)=,!c(+)=,d(-)?,!e(+)?,-f(-)]", \
+ ("dev-libs/A[a(+),b(-)=,!c(+)=,d(-)?,!e(+)?,-f(-)]",
[], ["a", "b", "c", "d", "e", "f"], "dev-libs/A[a(+),b(-),-c(+),d(-),-f(-)]"),
)
diff --git a/pym/portage/tests/dep/testCheckRequiredUse.py b/pym/portage/tests/dep/testCheckRequiredUse.py
index 54791e016..63330b5cb 100644
--- a/pym/portage/tests/dep/testCheckRequiredUse.py
+++ b/pym/portage/tests/dep/testCheckRequiredUse.py
@@ -1,4 +1,4 @@
-# Copyright 2010-2011 Gentoo Foundation
+# Copyright 2010-2012 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
from portage.tests import TestCase
@@ -9,97 +9,106 @@ class TestCheckRequiredUse(TestCase):
def testCheckRequiredUse(self):
test_cases = (
- ( "|| ( a b )", [], ["a", "b"], False),
- ( "|| ( a b )", ["a"], ["a", "b"], True),
- ( "|| ( a b )", ["b"], ["a", "b"], True),
- ( "|| ( a b )", ["a", "b"], ["a", "b"], True),
-
- ( "^^ ( a b )", [], ["a", "b"], False),
- ( "^^ ( a b )", ["a"], ["a", "b"], True),
- ( "^^ ( a b )", ["b"], ["a", "b"], True),
- ( "^^ ( a b )", ["a", "b"], ["a", "b"], False),
-
- ( "^^ ( || ( a b ) c )", [], ["a", "b", "c"], False),
- ( "^^ ( || ( a b ) c )", ["a"], ["a", "b", "c"], True),
-
- ( "^^ ( || ( ( a b ) ) ( c ) )", [], ["a", "b", "c"], False),
- ( "( ^^ ( ( || ( ( a ) ( b ) ) ) ( ( c ) ) ) )", ["a"], ["a", "b", "c"], True),
-
- ( "a || ( b c )", ["a"], ["a", "b", "c"], False),
- ( "|| ( b c ) a", ["a"], ["a", "b", "c"], False),
-
- ( "|| ( a b c )", ["a"], ["a", "b", "c"], True),
- ( "|| ( a b c )", ["b"], ["a", "b", "c"], True),
- ( "|| ( a b c )", ["c"], ["a", "b", "c"], True),
-
- ( "^^ ( a b c )", ["a"], ["a", "b", "c"], True),
- ( "^^ ( a b c )", ["b"], ["a", "b", "c"], True),
- ( "^^ ( a b c )", ["c"], ["a", "b", "c"], True),
- ( "^^ ( a b c )", ["a", "b"], ["a", "b", "c"], False),
- ( "^^ ( a b c )", ["b", "c"], ["a", "b", "c"], False),
- ( "^^ ( a b c )", ["a", "c"], ["a", "b", "c"], False),
- ( "^^ ( a b c )", ["a", "b", "c"], ["a", "b", "c"], False),
-
- ( "a? ( ^^ ( b c ) )", [], ["a", "b", "c"], True),
- ( "a? ( ^^ ( b c ) )", ["a"], ["a", "b", "c"], False),
- ( "a? ( ^^ ( b c ) )", ["b"], ["a", "b", "c"], True),
- ( "a? ( ^^ ( b c ) )", ["c"], ["a", "b", "c"], True),
- ( "a? ( ^^ ( b c ) )", ["a", "b"], ["a", "b", "c"], True),
- ( "a? ( ^^ ( b c ) )", ["a", "b", "c"], ["a", "b", "c"], False),
-
- ( "^^ ( a? ( !b ) !c? ( d ) )", [], ["a", "b", "c", "d"], False),
- ( "^^ ( a? ( !b ) !c? ( d ) )", ["a"], ["a", "b", "c", "d"], True),
- ( "^^ ( a? ( !b ) !c? ( d ) )", ["c"], ["a", "b", "c", "d"], True),
- ( "^^ ( a? ( !b ) !c? ( d ) )", ["a", "c"], ["a", "b", "c", "d"], True),
- ( "^^ ( a? ( !b ) !c? ( d ) )", ["a", "b", "c"], ["a", "b", "c", "d"], False),
- ( "^^ ( a? ( !b ) !c? ( d ) )", ["a", "b", "d"], ["a", "b", "c", "d"], True),
- ( "^^ ( a? ( !b ) !c? ( d ) )", ["a", "b", "d"], ["a", "b", "c", "d"], True),
- ( "^^ ( a? ( !b ) !c? ( d ) )", ["a", "d"], ["a", "b", "c", "d"], False),
-
- ( "|| ( ^^ ( a b ) ^^ ( b c ) )", [], ["a", "b", "c"], False),
- ( "|| ( ^^ ( a b ) ^^ ( b c ) )", ["a"], ["a", "b", "c"], True),
- ( "|| ( ^^ ( a b ) ^^ ( b c ) )", ["b"], ["a", "b", "c"], True),
- ( "|| ( ^^ ( a b ) ^^ ( b c ) )", ["c"], ["a", "b", "c"], True),
- ( "|| ( ^^ ( a b ) ^^ ( b c ) )", ["a", "b"], ["a", "b", "c"], True),
- ( "|| ( ^^ ( a b ) ^^ ( b c ) )", ["a", "c"], ["a", "b", "c"], True),
- ( "|| ( ^^ ( a b ) ^^ ( b c ) )", ["b", "c"], ["a", "b", "c"], True),
- ( "|| ( ^^ ( a b ) ^^ ( b c ) )", ["a", "b", "c"], ["a", "b", "c"], False),
-
- ( "^^ ( || ( a b ) ^^ ( b c ) )", [], ["a", "b", "c"], False),
- ( "^^ ( || ( a b ) ^^ ( b c ) )", ["a"], ["a", "b", "c"], True),
- ( "^^ ( || ( a b ) ^^ ( b c ) )", ["b"], ["a", "b", "c"], False),
- ( "^^ ( || ( a b ) ^^ ( b c ) )", ["c"], ["a", "b", "c"], True),
- ( "^^ ( || ( a b ) ^^ ( b c ) )", ["a", "b"], ["a", "b", "c"], False),
- ( "^^ ( || ( a b ) ^^ ( b c ) )", ["a", "c"], ["a", "b", "c"], False),
- ( "^^ ( || ( a b ) ^^ ( b c ) )", ["b", "c"], ["a", "b", "c"], True),
- ( "^^ ( || ( a b ) ^^ ( b c ) )", ["a", "b", "c"], ["a", "b", "c"], True),
-
- ( "|| ( ( a b ) c )", ["a", "b", "c"], ["a", "b", "c"], True),
- ( "|| ( ( a b ) c )", ["b", "c"], ["a", "b", "c"], True),
- ( "|| ( ( a b ) c )", ["a", "c"], ["a", "b", "c"], True),
- ( "|| ( ( a b ) c )", ["a", "b"], ["a", "b", "c"], True),
- ( "|| ( ( a b ) c )", ["a"], ["a", "b", "c"], False),
- ( "|| ( ( a b ) c )", ["b"], ["a", "b", "c"], False),
- ( "|| ( ( a b ) c )", ["c"], ["a", "b", "c"], True),
- ( "|| ( ( a b ) c )", [], ["a", "b", "c"], False),
-
- ( "^^ ( ( a b ) c )", ["a", "b", "c"], ["a", "b", "c"], False),
- ( "^^ ( ( a b ) c )", ["b", "c"], ["a", "b", "c"], True),
- ( "^^ ( ( a b ) c )", ["a", "c"], ["a", "b", "c"], True),
- ( "^^ ( ( a b ) c )", ["a", "b"], ["a", "b", "c"], True),
- ( "^^ ( ( a b ) c )", ["a"], ["a", "b", "c"], False),
- ( "^^ ( ( a b ) c )", ["b"], ["a", "b", "c"], False),
- ( "^^ ( ( a b ) c )", ["c"], ["a", "b", "c"], True),
- ( "^^ ( ( a b ) c )", [], ["a", "b", "c"], False),
+ ("|| ( a b )", [], ["a", "b"], False),
+ ("|| ( a b )", ["a"], ["a", "b"], True),
+ ("|| ( a b )", ["b"], ["a", "b"], True),
+ ("|| ( a b )", ["a", "b"], ["a", "b"], True),
+
+ ("^^ ( a b )", [], ["a", "b"], False),
+ ("^^ ( a b )", ["a"], ["a", "b"], True),
+ ("^^ ( a b )", ["b"], ["a", "b"], True),
+ ("^^ ( a b )", ["a", "b"], ["a", "b"], False),
+ ("?? ( a b )", ["a", "b"], ["a", "b"], False),
+ ("?? ( a b )", ["a"], ["a", "b"], True),
+ ("?? ( a b )", ["b"], ["a", "b"], True),
+ ("?? ( a b )", [], ["a", "b"], True),
+ ("?? ( )", [], [], True),
+
+ ("^^ ( || ( a b ) c )", [], ["a", "b", "c"], False),
+ ("^^ ( || ( a b ) c )", ["a"], ["a", "b", "c"], True),
+
+ ("^^ ( || ( ( a b ) ) ( c ) )", [], ["a", "b", "c"], False),
+ ("( ^^ ( ( || ( ( a ) ( b ) ) ) ( ( c ) ) ) )", ["a"], ["a", "b", "c"], True),
+
+ ("a || ( b c )", ["a"], ["a", "b", "c"], False),
+ ("|| ( b c ) a", ["a"], ["a", "b", "c"], False),
+
+ ("|| ( a b c )", ["a"], ["a", "b", "c"], True),
+ ("|| ( a b c )", ["b"], ["a", "b", "c"], True),
+ ("|| ( a b c )", ["c"], ["a", "b", "c"], True),
+
+ ("^^ ( a b c )", ["a"], ["a", "b", "c"], True),
+ ("^^ ( a b c )", ["b"], ["a", "b", "c"], True),
+ ("^^ ( a b c )", ["c"], ["a", "b", "c"], True),
+ ("^^ ( a b c )", ["a", "b"], ["a", "b", "c"], False),
+ ("^^ ( a b c )", ["b", "c"], ["a", "b", "c"], False),
+ ("^^ ( a b c )", ["a", "c"], ["a", "b", "c"], False),
+ ("^^ ( a b c )", ["a", "b", "c"], ["a", "b", "c"], False),
+
+ ("a? ( ^^ ( b c ) )", [], ["a", "b", "c"], True),
+ ("a? ( ^^ ( b c ) )", ["a"], ["a", "b", "c"], False),
+ ("a? ( ^^ ( b c ) )", ["b"], ["a", "b", "c"], True),
+ ("a? ( ^^ ( b c ) )", ["c"], ["a", "b", "c"], True),
+ ("a? ( ^^ ( b c ) )", ["a", "b"], ["a", "b", "c"], True),
+ ("a? ( ^^ ( b c ) )", ["a", "b", "c"], ["a", "b", "c"], False),
+
+ ("^^ ( a? ( !b ) !c? ( d ) )", [], ["a", "b", "c", "d"], False),
+ ("^^ ( a? ( !b ) !c? ( d ) )", ["a"], ["a", "b", "c", "d"], True),
+ ("^^ ( a? ( !b ) !c? ( d ) )", ["c"], ["a", "b", "c", "d"], True),
+ ("^^ ( a? ( !b ) !c? ( d ) )", ["a", "c"], ["a", "b", "c", "d"], True),
+ ("^^ ( a? ( !b ) !c? ( d ) )", ["a", "b", "c"], ["a", "b", "c", "d"], False),
+ ("^^ ( a? ( !b ) !c? ( d ) )", ["a", "b", "d"], ["a", "b", "c", "d"], True),
+ ("^^ ( a? ( !b ) !c? ( d ) )", ["a", "b", "d"], ["a", "b", "c", "d"], True),
+ ("^^ ( a? ( !b ) !c? ( d ) )", ["a", "d"], ["a", "b", "c", "d"], False),
+
+ ("|| ( ^^ ( a b ) ^^ ( b c ) )", [], ["a", "b", "c"], False),
+ ("|| ( ^^ ( a b ) ^^ ( b c ) )", ["a"], ["a", "b", "c"], True),
+ ("|| ( ^^ ( a b ) ^^ ( b c ) )", ["b"], ["a", "b", "c"], True),
+ ("|| ( ^^ ( a b ) ^^ ( b c ) )", ["c"], ["a", "b", "c"], True),
+ ("|| ( ^^ ( a b ) ^^ ( b c ) )", ["a", "b"], ["a", "b", "c"], True),
+ ("|| ( ^^ ( a b ) ^^ ( b c ) )", ["a", "c"], ["a", "b", "c"], True),
+ ("|| ( ^^ ( a b ) ^^ ( b c ) )", ["b", "c"], ["a", "b", "c"], True),
+ ("|| ( ^^ ( a b ) ^^ ( b c ) )", ["a", "b", "c"], ["a", "b", "c"], False),
+
+ ("^^ ( || ( a b ) ^^ ( b c ) )", [], ["a", "b", "c"], False),
+ ("^^ ( || ( a b ) ^^ ( b c ) )", ["a"], ["a", "b", "c"], True),
+ ("^^ ( || ( a b ) ^^ ( b c ) )", ["b"], ["a", "b", "c"], False),
+ ("^^ ( || ( a b ) ^^ ( b c ) )", ["c"], ["a", "b", "c"], True),
+ ("^^ ( || ( a b ) ^^ ( b c ) )", ["a", "b"], ["a", "b", "c"], False),
+ ("^^ ( || ( a b ) ^^ ( b c ) )", ["a", "c"], ["a", "b", "c"], False),
+ ("^^ ( || ( a b ) ^^ ( b c ) )", ["b", "c"], ["a", "b", "c"], True),
+ ("^^ ( || ( a b ) ^^ ( b c ) )", ["a", "b", "c"], ["a", "b", "c"], True),
+
+ ("|| ( ( a b ) c )", ["a", "b", "c"], ["a", "b", "c"], True),
+ ("|| ( ( a b ) c )", ["b", "c"], ["a", "b", "c"], True),
+ ("|| ( ( a b ) c )", ["a", "c"], ["a", "b", "c"], True),
+ ("|| ( ( a b ) c )", ["a", "b"], ["a", "b", "c"], True),
+ ("|| ( ( a b ) c )", ["a"], ["a", "b", "c"], False),
+ ("|| ( ( a b ) c )", ["b"], ["a", "b", "c"], False),
+ ("|| ( ( a b ) c )", ["c"], ["a", "b", "c"], True),
+ ("|| ( ( a b ) c )", [], ["a", "b", "c"], False),
+
+ ("^^ ( ( a b ) c )", ["a", "b", "c"], ["a", "b", "c"], False),
+ ("^^ ( ( a b ) c )", ["b", "c"], ["a", "b", "c"], True),
+ ("^^ ( ( a b ) c )", ["a", "c"], ["a", "b", "c"], True),
+ ("^^ ( ( a b ) c )", ["a", "b"], ["a", "b", "c"], True),
+ ("^^ ( ( a b ) c )", ["a"], ["a", "b", "c"], False),
+ ("^^ ( ( a b ) c )", ["b"], ["a", "b", "c"], False),
+ ("^^ ( ( a b ) c )", ["c"], ["a", "b", "c"], True),
+ ("^^ ( ( a b ) c )", [], ["a", "b", "c"], False),
)
test_cases_xfail = (
- ( "^^ ( || ( a b ) ^^ ( b c ) )", [], ["a", "b"]),
- ( "^^ ( || ( a b ) ^^ ( b c )", [], ["a", "b", "c"]),
- ( "^^( || ( a b ) ^^ ( b c ) )", [], ["a", "b", "c"]),
- ( "^^ || ( a b ) ^^ ( b c )", [], ["a", "b", "c"]),
- ( "^^ ( ( || ) ( a b ) ^^ ( b c ) )", [], ["a", "b", "c"]),
- ( "^^ ( || ( a b ) ) ^^ ( b c ) )", [], ["a", "b", "c"]),
+ ("^^ ( || ( a b ) ^^ ( b c ) )", [], ["a", "b"]),
+ ("^^ ( || ( a b ) ^^ ( b c )", [], ["a", "b", "c"]),
+ ("^^( || ( a b ) ^^ ( b c ) )", [], ["a", "b", "c"]),
+ ("^^ || ( a b ) ^^ ( b c )", [], ["a", "b", "c"]),
+ ("^^ ( ( || ) ( a b ) ^^ ( b c ) )", [], ["a", "b", "c"]),
+ ("^^ ( || ( a b ) ) ^^ ( b c ) )", [], ["a", "b", "c"]),
+ )
+
+ test_cases_xfail_eapi = (
+ ("?? ( a b )", [], ["a", "b"], "4"),
)
for required_use, use, iuse, expected in test_cases:
@@ -110,6 +119,11 @@ class TestCheckRequiredUse(TestCase):
self.assertRaisesMsg(required_use + ", USE = " + " ".join(use), \
InvalidDependString, check_required_use, required_use, use, iuse.__contains__)
+ for required_use, use, iuse, eapi in test_cases_xfail_eapi:
+ self.assertRaisesMsg(required_use + ", USE = " + " ".join(use), \
+ InvalidDependString, check_required_use, required_use, use,
+ iuse.__contains__, eapi=eapi)
+
def testCheckRequiredUseFilterSatisfied(self):
"""
Test filtering of satisfied parts of REQUIRED_USE,
diff --git a/pym/portage/tests/dep/testStandalone.py b/pym/portage/tests/dep/testStandalone.py
index f03f2d508..88e3f39f8 100644
--- a/pym/portage/tests/dep/testStandalone.py
+++ b/pym/portage/tests/dep/testStandalone.py
@@ -12,20 +12,20 @@ class TestStandalone(TestCase):
def testCPVequal(self):
test_cases = (
- ( "sys-apps/portage-2.1","sys-apps/portage-2.1", True ),
- ( "sys-apps/portage-2.1","sys-apps/portage-2.0", False ),
- ( "sys-apps/portage-2.1","sys-apps/portage-2.1-r1", False ),
- ( "sys-apps/portage-2.1-r1","sys-apps/portage-2.1", False ),
- ( "sys-apps/portage-2.1_alpha3","sys-apps/portage-2.1", False ),
- ( "sys-apps/portage-2.1_alpha3_p6","sys-apps/portage-2.1_alpha3", False ),
- ( "sys-apps/portage-2.1_alpha3","sys-apps/portage-2.1", False ),
- ( "sys-apps/portage-2.1","sys-apps/X-2.1", False ),
- ( "sys-apps/portage-2.1","portage-2.1", False ),
+ ("sys-apps/portage-2.1", "sys-apps/portage-2.1", True),
+ ("sys-apps/portage-2.1", "sys-apps/portage-2.0", False),
+ ("sys-apps/portage-2.1", "sys-apps/portage-2.1-r1", False),
+ ("sys-apps/portage-2.1-r1", "sys-apps/portage-2.1", False),
+ ("sys-apps/portage-2.1_alpha3", "sys-apps/portage-2.1", False),
+ ("sys-apps/portage-2.1_alpha3_p6", "sys-apps/portage-2.1_alpha3", False),
+ ("sys-apps/portage-2.1_alpha3", "sys-apps/portage-2.1", False),
+ ("sys-apps/portage-2.1", "sys-apps/X-2.1", False),
+ ("sys-apps/portage-2.1", "portage-2.1", False),
)
-
+
test_cases_xfail = (
- ( "sys-apps/portage","sys-apps/portage" ),
- ( "sys-apps/portage-2.1-6","sys-apps/portage-2.1-6" ),
+ ("sys-apps/portage", "sys-apps/portage"),
+ ("sys-apps/portage-2.1-6", "sys-apps/portage-2.1-6"),
)
for cpv1, cpv2, expected_result in test_cases:
@@ -33,5 +33,5 @@ class TestStandalone(TestCase):
"cpvequal('%s', '%s') != %s" % (cpv1, cpv2, expected_result))
for cpv1, cpv2 in test_cases_xfail:
- self.assertRaisesMsg("cpvequal("+cpv1+", "+cpv2+")", \
+ self.assertRaisesMsg("cpvequal(%s, %s)" % (cpv1, cpv2),
PortageException, cpvequal, cpv1, cpv2)
diff --git a/pym/portage/tests/dep/test_best_match_to_list.py b/pym/portage/tests/dep/test_best_match_to_list.py
index 8a1403828..586c8bc50 100644
--- a/pym/portage/tests/dep/test_best_match_to_list.py
+++ b/pym/portage/tests/dep/test_best_match_to_list.py
@@ -1,5 +1,5 @@
# test_best_match_to_list.py -- Portage Unit Testing Functionality
-# Copyright 2010-2011 Gentoo Foundation
+# Copyright 2010-2012 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
from itertools import permutations
@@ -28,25 +28,29 @@ class Test_best_match_to_list(TestCase):
def testBest_match_to_list(self):
tests = [
- ("dev-libs/A-4", [Atom(">=dev-libs/A-3"), Atom(">=dev-libs/A-2")], \
- [Atom(">=dev-libs/A-3"), Atom(">=dev-libs/A-2")], True),
- ("dev-libs/A-4", [Atom("<=dev-libs/A-5"), Atom("<=dev-libs/A-6")], \
- [Atom("<=dev-libs/A-5"), Atom("<=dev-libs/A-6")], True),
- ("dev-libs/A-1", [Atom("dev-libs/A"), Atom("=dev-libs/A-1")], \
- [Atom("=dev-libs/A-1"), Atom("dev-libs/A")], True),
- ("dev-libs/A-1", [Atom("dev-libs/B"), Atom("=dev-libs/A-1:0")], \
- [Atom("=dev-libs/A-1:0")], True),
- ("dev-libs/A-1", [Atom("dev-libs/*", allow_wildcard=True), Atom("=dev-libs/A-1:0")], \
- [Atom("=dev-libs/A-1:0"), Atom("dev-libs/*", allow_wildcard=True)], True),
- ("dev-libs/A-4.9999-r1", [Atom("dev-libs/*", allow_wildcard=True), Atom("=*/*-*9999*", allow_wildcard=True)], \
- [Atom("=*/*-*9999*", allow_wildcard=True), Atom("dev-libs/*", allow_wildcard=True)], True),
- ("dev-libs/A-1:0", [Atom("dev-*/*", allow_wildcard=True), Atom("dev-*/*:0", allow_wildcard=True),\
- Atom("dev-libs/A"), Atom("<=dev-libs/A-2"), Atom("dev-libs/A:0"), \
- Atom("=dev-libs/A-1*"), Atom("~dev-libs/A-1"), Atom("=dev-libs/A-1")], \
- [Atom("=dev-libs/A-1"), Atom("~dev-libs/A-1"), Atom("=dev-libs/A-1*"), \
- Atom("dev-libs/A:0"), Atom("<=dev-libs/A-2"), Atom("dev-libs/A"), \
- Atom("dev-*/*:0", allow_wildcard=True), Atom("dev-*/*", allow_wildcard=True)], False)
- ]
+ ("dev-libs/A-4", [Atom(">=dev-libs/A-3"), Atom(">=dev-libs/A-2")],
+ [Atom(">=dev-libs/A-3"), Atom(">=dev-libs/A-2")], True),
+ ("dev-libs/A-4", [Atom("<=dev-libs/A-5"), Atom("<=dev-libs/A-6")],
+ [Atom("<=dev-libs/A-5"), Atom("<=dev-libs/A-6")], True),
+ ("dev-libs/A-1", [Atom("dev-libs/A"), Atom("=dev-libs/A-1")],
+ [Atom("=dev-libs/A-1"), Atom("dev-libs/A")], True),
+ ("dev-libs/A-1", [Atom("dev-libs/B"), Atom("=dev-libs/A-1:0")],
+ [Atom("=dev-libs/A-1:0")], True),
+ ("dev-libs/A-1", [Atom("dev-libs/*", allow_wildcard=True), Atom("=dev-libs/A-1:0")],
+ [Atom("=dev-libs/A-1:0"), Atom("dev-libs/*", allow_wildcard=True)], True),
+ ("dev-libs/A-4.9999-r1", [Atom("dev-libs/*", allow_wildcard=True), Atom("=*/*-*9999*", allow_wildcard=True)],
+ [Atom("=*/*-*9999*", allow_wildcard=True), Atom("dev-libs/*", allow_wildcard=True)], True),
+ ("dev-libs/A-4_beta-r1", [Atom("dev-libs/*", allow_wildcard=True), Atom("=*/*-*_beta*", allow_wildcard=True)],
+ [Atom("=*/*-*_beta*", allow_wildcard=True), Atom("dev-libs/*", allow_wildcard=True)], True),
+ ("dev-libs/A-4_beta1-r1", [Atom("dev-libs/*", allow_wildcard=True), Atom("=*/*-*_beta*", allow_wildcard=True)],
+ [Atom("=*/*-*_beta*", allow_wildcard=True), Atom("dev-libs/*", allow_wildcard=True)], True),
+ ("dev-libs/A-1:0", [Atom("dev-*/*", allow_wildcard=True), Atom("dev-*/*:0", allow_wildcard=True),
+ Atom("dev-libs/A"), Atom("<=dev-libs/A-2"), Atom("dev-libs/A:0"),
+ Atom("=dev-libs/A-1*"), Atom("~dev-libs/A-1"), Atom("=dev-libs/A-1")],
+ [Atom("=dev-libs/A-1"), Atom("~dev-libs/A-1"), Atom("=dev-libs/A-1*"),
+ Atom("dev-libs/A:0"), Atom("<=dev-libs/A-2"), Atom("dev-libs/A"),
+ Atom("dev-*/*:0", allow_wildcard=True), Atom("dev-*/*", allow_wildcard=True)], False)
+ ]
for pkg, atom_list, result, all_permutations in tests:
if all_permutations:
diff --git a/pym/portage/tests/dep/test_dep_getcpv.py b/pym/portage/tests/dep/test_dep_getcpv.py
index 8a0a8aa2f..79c1514a1 100644
--- a/pym/portage/tests/dep/test_dep_getcpv.py
+++ b/pym/portage/tests/dep/test_dep_getcpv.py
@@ -10,12 +10,14 @@ class DepGetCPV(TestCase):
"""
def testDepGetCPV(self):
-
- prefix_ops = ["<", ">", "=", "~", "<=",
- ">=", "!=", "!<", "!>", "!~"]
- bad_prefix_ops = [ ">~", "<~", "~>", "~<" ]
- postfix_ops = [ ("=", "*"), ]
+ prefix_ops = [
+ "<", ">", "=", "~", "<=",
+ ">=", "!=", "!<", "!>", "!~"
+ ]
+
+ bad_prefix_ops = [">~", "<~", "~>", "~<"]
+ postfix_ops = [("=", "*"),]
cpvs = ["sys-apps/portage-2.1", "sys-apps/portage-2.1",
"sys-apps/portage-2.1"]
@@ -26,10 +28,10 @@ class DepGetCPV(TestCase):
mycpv = prefix + cpv
if slot:
mycpv += slot
- self.assertEqual( dep_getcpv( mycpv ), cpv )
+ self.assertEqual(dep_getcpv(mycpv), cpv)
for prefix, postfix in postfix_ops:
mycpv = prefix + cpv + postfix
if slot:
mycpv += slot
- self.assertEqual( dep_getcpv( mycpv ), cpv )
+ self.assertEqual(dep_getcpv(mycpv), cpv)
diff --git a/pym/portage/tests/dep/test_dep_getrepo.py b/pym/portage/tests/dep/test_dep_getrepo.py
index 78ead8cee..6c17d3cf7 100644
--- a/pym/portage/tests/dep/test_dep_getrepo.py
+++ b/pym/portage/tests/dep/test_dep_getrepo.py
@@ -11,9 +11,9 @@ class DepGetRepo(TestCase):
def testDepGetRepo(self):
repo_char = "::"
- repos = ( "a", "repo-name", "repo_name", "repo123", None )
+ repos = ("a", "repo-name", "repo_name", "repo123", None)
cpvs = ["sys-apps/portage"]
- versions = ["2.1.1","2.1-r1", None]
+ versions = ["2.1.1", "2.1-r1", None]
uses = ["[use]", None]
for cpv in cpvs:
for version in versions:
@@ -26,4 +26,4 @@ class DepGetRepo(TestCase):
pkg = pkg + repo_char + repo
if use:
pkg = pkg + use
- self.assertEqual( dep_getrepo( pkg ), repo )
+ self.assertEqual(dep_getrepo(pkg), repo)
diff --git a/pym/portage/tests/dep/test_dep_getslot.py b/pym/portage/tests/dep/test_dep_getslot.py
index 206cecc8c..84828648b 100644
--- a/pym/portage/tests/dep/test_dep_getslot.py
+++ b/pym/portage/tests/dep/test_dep_getslot.py
@@ -12,9 +12,9 @@ class DepGetSlot(TestCase):
def testDepGetSlot(self):
slot_char = ":"
- slots = ( "a", "1.2", "1", "IloveVapier", None )
+ slots = ("a", "1.2", "1", "IloveVapier", None)
cpvs = ["sys-apps/portage"]
- versions = ["2.1.1","2.1-r1"]
+ versions = ["2.1.1", "2.1-r1"]
for cpv in cpvs:
for version in versions:
for slot in slots:
@@ -22,7 +22,7 @@ class DepGetSlot(TestCase):
if version:
mycpv = '=' + mycpv + '-' + version
if slot is not None:
- self.assertEqual( dep_getslot(
- mycpv + slot_char + slot ), slot )
+ self.assertEqual(dep_getslot(
+ mycpv + slot_char + slot), slot)
else:
- self.assertEqual( dep_getslot( mycpv ), slot )
+ self.assertEqual(dep_getslot(mycpv), slot)
diff --git a/pym/portage/tests/dep/test_dep_getusedeps.py b/pym/portage/tests/dep/test_dep_getusedeps.py
index d2494f7b3..cd58eab35 100644
--- a/pym/portage/tests/dep/test_dep_getusedeps.py
+++ b/pym/portage/tests/dep/test_dep_getusedeps.py
@@ -24,12 +24,12 @@ class DepGetUseDeps(TestCase):
cpv += ":" + slot
if isinstance(use, tuple):
cpv += "[%s]" % (",".join(use),)
- self.assertEqual( dep_getusedeps(
- cpv ), use )
+ self.assertEqual(dep_getusedeps(
+ cpv), use)
else:
if len(use):
- self.assertEqual( dep_getusedeps(
- cpv + "[" + use + "]" ), (use,) )
+ self.assertEqual(dep_getusedeps(
+ cpv + "[" + use + "]"), (use,))
else:
- self.assertEqual( dep_getusedeps(
- cpv + "[" + use + "]" ), () )
+ self.assertEqual(dep_getusedeps(
+ cpv + "[" + use + "]"), ())
diff --git a/pym/portage/tests/dep/test_get_operator.py b/pym/portage/tests/dep/test_get_operator.py
index 4f9848f5d..5076e2107 100644
--- a/pym/portage/tests/dep/test_get_operator.py
+++ b/pym/portage/tests/dep/test_get_operator.py
@@ -10,24 +10,28 @@ class GetOperator(TestCase):
def testGetOperator(self):
# get_operator does not validate operators
- tests = [ ( "~", "~" ), ( "=", "=" ), ( ">", ">" ),
- ( ">=", ">=" ), ( "<=", "<=" ),
+ tests = [
+ ("~", "~"),
+ ("=", "="),
+ (">", ">"),
+ (">=", ">="),
+ ("<=", "<="),
]
test_cpvs = ["sys-apps/portage-2.1"]
- slots = [ None,"1","linux-2.5.6" ]
+ slots = [None, "1", "linux-2.5.6"]
for cpv in test_cpvs:
for test in tests:
for slot in slots:
atom = cpv[:]
if slot:
atom += ":" + slot
- result = get_operator( test[0] + atom )
- self.assertEqual( result, test[1],
- msg="get_operator(%s) != %s" % (test[0] + atom, test[1]) )
+ result = get_operator(test[0] + atom)
+ self.assertEqual(result, test[1],
+ msg="get_operator(%s) != %s" % (test[0] + atom, test[1]))
- result = get_operator( "sys-apps/portage" )
- self.assertEqual( result, None )
+ result = get_operator("sys-apps/portage")
+ self.assertEqual(result, None)
- result = get_operator( "=sys-apps/portage-2.1*" )
- self.assertEqual( result , "=*" )
+ result = get_operator("=sys-apps/portage-2.1*")
+ self.assertEqual(result , "=*")
diff --git a/pym/portage/tests/dep/test_get_required_use_flags.py b/pym/portage/tests/dep/test_get_required_use_flags.py
index 06f81106a..90e096c78 100644
--- a/pym/portage/tests/dep/test_get_required_use_flags.py
+++ b/pym/portage/tests/dep/test_get_required_use_flags.py
@@ -1,4 +1,4 @@
-# Copyright 2010 Gentoo Foundation
+# Copyright 2010-2012 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
from portage.tests import TestCase
@@ -13,6 +13,8 @@ class TestCheckRequiredUse(TestCase):
("|| ( a b c )", ["a", "b", "c"]),
("^^ ( a b c )", ["a", "b", "c"]),
+ ("?? ( a b c )", ["a", "b", "c"]),
+ ("?? ( )", []),
("|| ( a b ^^ ( d e f ) )", ["a", "b", "d", "e", "f"]),
("^^ ( a b || ( d e f ) )", ["a", "b", "d", "e", "f"]),
diff --git a/pym/portage/tests/dep/test_isjustname.py b/pym/portage/tests/dep/test_isjustname.py
index c16fb5493..9b95bcd0f 100644
--- a/pym/portage/tests/dep/test_isjustname.py
+++ b/pym/portage/tests/dep/test_isjustname.py
@@ -9,16 +9,16 @@ class IsJustName(TestCase):
def testIsJustName(self):
- cats = ( "", "sys-apps/", "foo/", "virtual/" )
- pkgs = ( "portage", "paludis", "pkgcore", "notARealPkg" )
- vers = ( "", "-2.0-r3", "-1.0_pre2", "-3.1b" )
+ cats = ("", "sys-apps/", "foo/", "virtual/")
+ pkgs = ("portage", "paludis", "pkgcore", "notARealPkg")
+ vers = ("", "-2.0-r3", "-1.0_pre2", "-3.1b")
for pkg in pkgs:
for cat in cats:
for ver in vers:
if len(ver):
- self.assertFalse( isjustname( cat + pkg + ver ),
- msg="isjustname(%s) is True!" % (cat + pkg + ver) )
+ self.assertFalse(isjustname(cat + pkg + ver),
+ msg="isjustname(%s) is True!" % (cat + pkg + ver))
else:
- self.assertTrue( isjustname( cat + pkg + ver ),
- msg="isjustname(%s) is False!" % (cat + pkg + ver) )
+ self.assertTrue(isjustname(cat + pkg + ver),
+ msg="isjustname(%s) is False!" % (cat + pkg + ver))
diff --git a/pym/portage/tests/dep/test_isvalidatom.py b/pym/portage/tests/dep/test_isvalidatom.py
index abcec755e..67ba60398 100644
--- a/pym/portage/tests/dep/test_isvalidatom.py
+++ b/pym/portage/tests/dep/test_isvalidatom.py
@@ -1,4 +1,4 @@
-# Copyright 2006-2010 Gentoo Foundation
+# Copyright 2006-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
from portage.tests import TestCase
@@ -26,7 +26,7 @@ class IsValidAtom(TestCase):
IsValidAtomTestCase("~sys-apps/portage-2.1", True),
IsValidAtomTestCase("sys-apps/portage:foo", True),
IsValidAtomTestCase("sys-apps/portage-2.1:foo", False),
- IsValidAtomTestCase( "sys-apps/portage-2.1:", False),
+ IsValidAtomTestCase("sys-apps/portage-2.1:", False),
IsValidAtomTestCase("sys-apps/portage-2.1:", False),
IsValidAtomTestCase("sys-apps/portage-2.1:[foo]", False),
IsValidAtomTestCase("sys-apps/portage", True),
@@ -141,8 +141,11 @@ class IsValidAtom(TestCase):
IsValidAtomTestCase("virtual/ffmpeg:=", True),
IsValidAtomTestCase("virtual/ffmpeg:0=", True),
IsValidAtomTestCase("virtual/ffmpeg:*", True),
- IsValidAtomTestCase("virtual/ffmpeg:0*", True),
+ IsValidAtomTestCase("virtual/ffmpeg:0*", False),
IsValidAtomTestCase("virtual/ffmpeg:0", True),
+
+ # Wildcard atoms
+ IsValidAtomTestCase("*/portage-2.1", False, allow_wildcard=True),
)
for test_case in test_cases:
@@ -150,6 +153,6 @@ class IsValidAtom(TestCase):
atom_type = "valid"
else:
atom_type = "invalid"
- self.assertEqual( bool(isvalidatom(test_case.atom, allow_wildcard=test_case.allow_wildcard, \
+ self.assertEqual(bool(isvalidatom(test_case.atom, allow_wildcard=test_case.allow_wildcard,
allow_repo=test_case.allow_repo)), test_case.expected,
- msg="isvalidatom(%s) != %s" % ( test_case.atom, test_case.expected ) )
+ msg="isvalidatom(%s) != %s" % (test_case.atom, test_case.expected))
diff --git a/pym/portage/tests/dep/test_match_from_list.py b/pym/portage/tests/dep/test_match_from_list.py
index d5d718f74..75ac8fd80 100644
--- a/pym/portage/tests/dep/test_match_from_list.py
+++ b/pym/portage/tests/dep/test_match_from_list.py
@@ -1,4 +1,4 @@
-# Copyright 2006-2012 Gentoo Foundation
+# Copyright 2006-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
import sys
@@ -7,6 +7,7 @@ from portage.dep import Atom, match_from_list, _repo_separator
from portage.versions import catpkgsplit, _pkg_str
if sys.hexversion >= 0x3000000:
+ # pylint: disable=W0622
basestring = str
class Package(object):
@@ -17,14 +18,14 @@ class Package(object):
atom = Atom(atom, allow_repo=True)
self.cp = atom.cp
slot = atom.slot
- if atom.slot_abi:
- slot = "%s/%s" % (slot, atom.slot_abi)
+ if atom.sub_slot:
+ slot = "%s/%s" % (slot, atom.sub_slot)
if not slot:
slot = '0'
self.cpv = _pkg_str(atom.cpv, slot=slot, repo=atom.repo)
self.cpv_split = catpkgsplit(self.cpv)
self.slot = self.cpv.slot
- self.slot_abi = self.cpv.slot_abi
+ self.sub_slot = self.cpv.sub_slot
self.repo = atom.repo
if atom.use:
self.use = self._use_class(atom.use.enabled)
@@ -53,76 +54,79 @@ class Test_match_from_list(TestCase):
def testMatch_from_list(self):
tests = (
- ("=sys-apps/portage-45*", [], [] ),
- ("=sys-apps/portage-45*", ["sys-apps/portage-045"], ["sys-apps/portage-045"] ),
- ("!=sys-apps/portage-45*", ["sys-apps/portage-045"], ["sys-apps/portage-045"] ),
- ("!!=sys-apps/portage-45*", ["sys-apps/portage-045"], ["sys-apps/portage-045"] ),
- ("=sys-apps/portage-045", ["sys-apps/portage-045"], ["sys-apps/portage-045"] ),
- ("=sys-apps/portage-045", ["sys-apps/portage-046"], [] ),
- ("~sys-apps/portage-045", ["sys-apps/portage-045-r1"], ["sys-apps/portage-045-r1"] ),
- ("~sys-apps/portage-045", ["sys-apps/portage-046-r1"], [] ),
- ("<=sys-apps/portage-045", ["sys-apps/portage-045"], ["sys-apps/portage-045"] ),
- ("<=sys-apps/portage-045", ["sys-apps/portage-046"], [] ),
- ("<sys-apps/portage-046", ["sys-apps/portage-045"], ["sys-apps/portage-045"] ),
- ("<sys-apps/portage-046", ["sys-apps/portage-046"], [] ),
- (">=sys-apps/portage-045", ["sys-apps/portage-045"], ["sys-apps/portage-045"] ),
- (">=sys-apps/portage-047", ["sys-apps/portage-046-r1"], [] ),
- (">sys-apps/portage-044", ["sys-apps/portage-045"], ["sys-apps/portage-045"] ),
- (">sys-apps/portage-047", ["sys-apps/portage-046-r1"], [] ),
- ("sys-apps/portage:0", [Package("=sys-apps/portage-045:0")], ["sys-apps/portage-045"] ),
- ("sys-apps/portage:0", [Package("=sys-apps/portage-045:1")], [] ),
+ ("=sys-apps/portage-45*", [], []),
+ ("=sys-apps/portage-45*", ["sys-apps/portage-045"], ["sys-apps/portage-045"]),
+ ("!=sys-apps/portage-45*", ["sys-apps/portage-045"], ["sys-apps/portage-045"]),
+ ("!!=sys-apps/portage-45*", ["sys-apps/portage-045"], ["sys-apps/portage-045"]),
+ ("=sys-apps/portage-045", ["sys-apps/portage-045"], ["sys-apps/portage-045"]),
+ ("=sys-apps/portage-045", ["sys-apps/portage-046"], []),
+ ("~sys-apps/portage-045", ["sys-apps/portage-045-r1"], ["sys-apps/portage-045-r1"]),
+ ("~sys-apps/portage-045", ["sys-apps/portage-046-r1"], []),
+ ("<=sys-apps/portage-045", ["sys-apps/portage-045"], ["sys-apps/portage-045"]),
+ ("<=sys-apps/portage-045", ["sys-apps/portage-046"], []),
+ ("<sys-apps/portage-046", ["sys-apps/portage-045"], ["sys-apps/portage-045"]),
+ ("<sys-apps/portage-046", ["sys-apps/portage-046"], []),
+ (">=sys-apps/portage-045", ["sys-apps/portage-045"], ["sys-apps/portage-045"]),
+ (">=sys-apps/portage-047", ["sys-apps/portage-046-r1"], []),
+ (">sys-apps/portage-044", ["sys-apps/portage-045"], ["sys-apps/portage-045"]),
+ (">sys-apps/portage-047", ["sys-apps/portage-046-r1"], []),
+ ("sys-apps/portage:0", [Package("=sys-apps/portage-045:0")], ["sys-apps/portage-045"]),
+ ("sys-apps/portage:0", [Package("=sys-apps/portage-045:1")], []),
+ ("=cat/pkg-1-r1*", ["cat/pkg-1_alpha1"], []),
+ ("=cat/pkg-1-r1*", ["cat/pkg-1-r11"], ["cat/pkg-1-r11"]),
+ ("=cat/pkg-1-r1*", ["cat/pkg-01-r11"], ["cat/pkg-01-r11"]),
+ ("=cat/pkg-01-r1*", ["cat/pkg-1-r11"], ["cat/pkg-1-r11"]),
+ ("=cat/pkg-01-r1*", ["cat/pkg-001-r11"], ["cat/pkg-001-r11"]),
("=sys-fs/udev-1*", ["sys-fs/udev-123"], ["sys-fs/udev-123"]),
- ("=sys-fs/udev-4*", ["sys-fs/udev-456"], ["sys-fs/udev-456"] ),
- ("*/*", ["sys-fs/udev-456"], ["sys-fs/udev-456"] ),
- ("*/*:0", ["sys-fs/udev-456:0"], ["sys-fs/udev-456:0"] ),
- ("*/*:1", ["sys-fs/udev-456:0"], [] ),
- ("sys-fs/*", ["sys-fs/udev-456"], ["sys-fs/udev-456"] ),
- ("*/udev", ["sys-fs/udev-456"], ["sys-fs/udev-456"] ),
- ("=sys-apps/portage-2*", ["sys-apps/portage-2.1"], ["sys-apps/portage-2.1"] ),
- ("=sys-apps/portage-2.1*", ["sys-apps/portage-2.1.2"], ["sys-apps/portage-2.1.2"] ),
- ("dev-libs/*", ["sys-apps/portage-2.1.2"], [] ),
- ("*/tar", ["sys-apps/portage-2.1.2"], [] ),
- ("*/*", ["dev-libs/A-1", "dev-libs/B-1"], ["dev-libs/A-1", "dev-libs/B-1"] ),
- ("dev-libs/*", ["dev-libs/A-1", "sci-libs/B-1"], ["dev-libs/A-1"] ),
+ ("=sys-fs/udev-4*", ["sys-fs/udev-456"], ["sys-fs/udev-456"]),
+ ("*/*", ["sys-fs/udev-456"], ["sys-fs/udev-456"]),
+ ("*/*:0", ["sys-fs/udev-456:0"], ["sys-fs/udev-456:0"]),
+ ("*/*:1", ["sys-fs/udev-456:0"], []),
+ ("sys-fs/*", ["sys-fs/udev-456"], ["sys-fs/udev-456"]),
+ ("*/udev", ["sys-fs/udev-456"], ["sys-fs/udev-456"]),
+ ("=sys-apps/portage-2*", ["sys-apps/portage-2.1"], ["sys-apps/portage-2.1"]),
+ ("=sys-apps/portage-2.1*", ["sys-apps/portage-2.1.2"], ["sys-apps/portage-2.1.2"]),
+ ("dev-libs/*", ["sys-apps/portage-2.1.2"], []),
+ ("*/tar", ["sys-apps/portage-2.1.2"], []),
+ ("*/*", ["dev-libs/A-1", "dev-libs/B-1"], ["dev-libs/A-1", "dev-libs/B-1"]),
+ ("dev-libs/*", ["dev-libs/A-1", "sci-libs/B-1"], ["dev-libs/A-1"]),
- ("dev-libs/A[foo]", [Package("=dev-libs/A-1[foo]"), Package("=dev-libs/A-2[-foo]")], ["dev-libs/A-1"] ),
- ("dev-libs/A[-foo]", [Package("=dev-libs/A-1[foo]"), Package("=dev-libs/A-2[-foo]")], ["dev-libs/A-2"] ),
- ("dev-libs/A[-foo]", [Package("=dev-libs/A-1[foo]"), Package("=dev-libs/A-2")], [] ),
- ("dev-libs/A[foo,bar]", [Package("=dev-libs/A-1[foo]"), Package("=dev-libs/A-2[-foo]")], [] ),
- ("dev-libs/A[foo,bar]", [Package("=dev-libs/A-1[foo]"), Package("=dev-libs/A-2[-foo,bar]")], [] ),
- ("dev-libs/A[foo,bar]", [Package("=dev-libs/A-1[foo]"), Package("=dev-libs/A-2[foo,bar]")], ["dev-libs/A-2"] ),
- ("dev-libs/A[foo,bar(+)]", [Package("=dev-libs/A-1[-foo]"), Package("=dev-libs/A-2[foo]")], ["dev-libs/A-2"] ),
- ("dev-libs/A[foo,bar(-)]", [Package("=dev-libs/A-1[-foo]"), Package("=dev-libs/A-2[foo]")], [] ),
- ("dev-libs/A[foo,-bar(-)]", [Package("=dev-libs/A-1[-foo,bar]"), Package("=dev-libs/A-2[foo]")], ["dev-libs/A-2"] ),
+ ("dev-libs/A[foo]", [Package("=dev-libs/A-1[foo]"), Package("=dev-libs/A-2[-foo]")], ["dev-libs/A-1"]),
+ ("dev-libs/A[-foo]", [Package("=dev-libs/A-1[foo]"), Package("=dev-libs/A-2[-foo]")], ["dev-libs/A-2"]),
+ ("dev-libs/A[-foo]", [Package("=dev-libs/A-1[foo]"), Package("=dev-libs/A-2")], []),
+ ("dev-libs/A[foo,bar]", [Package("=dev-libs/A-1[foo]"), Package("=dev-libs/A-2[-foo]")], []),
+ ("dev-libs/A[foo,bar]", [Package("=dev-libs/A-1[foo]"), Package("=dev-libs/A-2[-foo,bar]")], []),
+ ("dev-libs/A[foo,bar]", [Package("=dev-libs/A-1[foo]"), Package("=dev-libs/A-2[foo,bar]")], ["dev-libs/A-2"]),
+ ("dev-libs/A[foo,bar(+)]", [Package("=dev-libs/A-1[-foo]"), Package("=dev-libs/A-2[foo]")], ["dev-libs/A-2"]),
+ ("dev-libs/A[foo,bar(-)]", [Package("=dev-libs/A-1[-foo]"), Package("=dev-libs/A-2[foo]")], []),
+ ("dev-libs/A[foo,-bar(-)]", [Package("=dev-libs/A-1[-foo,bar]"), Package("=dev-libs/A-2[foo]")], ["dev-libs/A-2"]),
- ("dev-libs/A::repo1", [Package("=dev-libs/A-1::repo1"), Package("=dev-libs/A-1::repo2")], ["dev-libs/A-1::repo1"] ),
- ("dev-libs/A::repo2", [Package("=dev-libs/A-1::repo1"), Package("=dev-libs/A-1::repo2")], ["dev-libs/A-1::repo2"] ),
- ("dev-libs/A::repo2[foo]", [Package("=dev-libs/A-1::repo1[foo]"), Package("=dev-libs/A-1::repo2[-foo]")], [] ),
- ("dev-libs/A::repo2[foo]", [Package("=dev-libs/A-1::repo1[-foo]"), Package("=dev-libs/A-1::repo2[foo]")], ["dev-libs/A-1::repo2"] ),
- ("dev-libs/A:1::repo2[foo]", [Package("=dev-libs/A-1:1::repo1"), Package("=dev-libs/A-1:2::repo2")], [] ),
- ("dev-libs/A:1::repo2[foo]", [Package("=dev-libs/A-1:2::repo1"), Package("=dev-libs/A-1:1::repo2[foo]")], ["dev-libs/A-1::repo2"] ),
+ ("dev-libs/A::repo1", [Package("=dev-libs/A-1::repo1"), Package("=dev-libs/A-1::repo2")], ["dev-libs/A-1::repo1"]),
+ ("dev-libs/A::repo2", [Package("=dev-libs/A-1::repo1"), Package("=dev-libs/A-1::repo2")], ["dev-libs/A-1::repo2"]),
+ ("dev-libs/A::repo2[foo]", [Package("=dev-libs/A-1::repo1[foo]"), Package("=dev-libs/A-1::repo2[-foo]")], []),
+ ("dev-libs/A::repo2[foo]", [Package("=dev-libs/A-1::repo1[-foo]"), Package("=dev-libs/A-1::repo2[foo]")], ["dev-libs/A-1::repo2"]),
+ ("dev-libs/A:1::repo2[foo]", [Package("=dev-libs/A-1:1::repo1"), Package("=dev-libs/A-1:2::repo2")], []),
+ ("dev-libs/A:1::repo2[foo]", [Package("=dev-libs/A-1:2::repo1"), Package("=dev-libs/A-1:1::repo2[foo]")], ["dev-libs/A-1::repo2"]),
- ("virtual/ffmpeg:0/53", [Package("=virtual/ffmpeg-0.10.3:0/53")], ["virtual/ffmpeg-0.10.3"] ),
- ("virtual/ffmpeg:0/53=", [Package("=virtual/ffmpeg-0.10.3:0/53")], ["virtual/ffmpeg-0.10.3"] ),
- ("virtual/ffmpeg:0/52", [Package("=virtual/ffmpeg-0.10.3:0/53")], [] ),
- ("virtual/ffmpeg:=", [Package("=virtual/ffmpeg-0.10.3:0/53")], ["virtual/ffmpeg-0.10.3"] ),
- ("virtual/ffmpeg:0=", [Package("=virtual/ffmpeg-0.10.3:0/53")], ["virtual/ffmpeg-0.10.3"] ),
- ("virtual/ffmpeg:*", [Package("=virtual/ffmpeg-0.10.3:0/53")], ["virtual/ffmpeg-0.10.3"] ),
- ("virtual/ffmpeg:0*", [Package("=virtual/ffmpeg-0.10.3:0/53")], ["virtual/ffmpeg-0.10.3"] ),
- ("virtual/ffmpeg:0", [Package("=virtual/ffmpeg-0.10.3:0/53")], ["virtual/ffmpeg-0.10.3"] ),
+ ("virtual/ffmpeg:0/53", [Package("=virtual/ffmpeg-0.10.3:0/53")], ["virtual/ffmpeg-0.10.3"]),
+ ("virtual/ffmpeg:0/53=", [Package("=virtual/ffmpeg-0.10.3:0/53")], ["virtual/ffmpeg-0.10.3"]),
+ ("virtual/ffmpeg:0/52", [Package("=virtual/ffmpeg-0.10.3:0/53")], []),
+ ("virtual/ffmpeg:=", [Package("=virtual/ffmpeg-0.10.3:0/53")], ["virtual/ffmpeg-0.10.3"]),
+ ("virtual/ffmpeg:0=", [Package("=virtual/ffmpeg-0.10.3:0/53")], ["virtual/ffmpeg-0.10.3"]),
+ ("virtual/ffmpeg:*", [Package("=virtual/ffmpeg-0.10.3:0/53")], ["virtual/ffmpeg-0.10.3"]),
+ ("virtual/ffmpeg:0", [Package("=virtual/ffmpeg-0.10.3:0/53")], ["virtual/ffmpeg-0.10.3"]),
- ("sys-libs/db:4.8/4.8", [Package("=sys-libs/db-4.8.30:4.8")], ["sys-libs/db-4.8.30"] ),
- ("sys-libs/db:4.8/4.8=", [Package("=sys-libs/db-4.8.30:4.8")], ["sys-libs/db-4.8.30"] ),
- ("sys-libs/db:4.8=", [Package("=sys-libs/db-4.8.30:4.8")], ["sys-libs/db-4.8.30"] ),
- ("sys-libs/db:4.8*", [Package("=sys-libs/db-4.8.30:4.8")], ["sys-libs/db-4.8.30"] ),
- ("sys-libs/db:*", [Package("=sys-libs/db-4.8.30:4.8")], ["sys-libs/db-4.8.30"] ),
- ("sys-libs/db:4.8/0", [Package("=sys-libs/db-4.8.30:4.8")], [] ),
- ("sys-libs/db:4.8/0=", [Package("=sys-libs/db-4.8.30:4.8")], [] ),
+ ("sys-libs/db:4.8/4.8", [Package("=sys-libs/db-4.8.30:4.8")], ["sys-libs/db-4.8.30"]),
+ ("sys-libs/db:4.8/4.8=", [Package("=sys-libs/db-4.8.30:4.8")], ["sys-libs/db-4.8.30"]),
+ ("sys-libs/db:4.8=", [Package("=sys-libs/db-4.8.30:4.8")], ["sys-libs/db-4.8.30"]),
+ ("sys-libs/db:*", [Package("=sys-libs/db-4.8.30:4.8")], ["sys-libs/db-4.8.30"]),
+ ("sys-libs/db:4.8/0", [Package("=sys-libs/db-4.8.30:4.8")], []),
+ ("sys-libs/db:4.8/0=", [Package("=sys-libs/db-4.8.30:4.8")], []),
)
for atom, cpv_list, expected_result in tests:
result = []
- for pkg in match_from_list( atom, cpv_list ):
+ for pkg in match_from_list(atom, cpv_list):
if isinstance(pkg, Package):
if pkg.repo:
result.append(pkg.cpv + _repo_separator + pkg.repo)
@@ -130,4 +134,4 @@ class Test_match_from_list(TestCase):
result.append(pkg.cpv)
else:
result.append(pkg)
- self.assertEqual( result, expected_result )
+ self.assertEqual(result, expected_result)
diff --git a/pym/portage/tests/dep/test_paren_reduce.py b/pym/portage/tests/dep/test_paren_reduce.py
index 9a147a02e..324465289 100644
--- a/pym/portage/tests/dep/test_paren_reduce.py
+++ b/pym/portage/tests/dep/test_paren_reduce.py
@@ -1,4 +1,4 @@
-# Copyright 2010-2011 Gentoo Foundation
+# Copyright 2010-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
from portage.tests import TestCase
@@ -10,30 +10,30 @@ class TestParenReduce(TestCase):
def testParenReduce(self):
test_cases = (
- ( "A", ["A"]),
- ( "( A )", ["A"]),
- ( "|| ( A B )", [ "||", ["A", "B"] ]),
- ( "|| ( A || ( B C ) )", [ "||", ["A", "||", ["B", "C"]]]),
- ( "|| ( A || ( B C D ) )", [ "||", ["A", "||", ["B", "C", "D"]] ]),
- ( "|| ( A || ( B || ( C D ) E ) )", [ "||", ["A", "||", ["B", "||", ["C", "D"], "E"]] ]),
- ( "a? ( A )", ["a?", ["A"]]),
-
- ( "( || ( ( ( A ) B ) ) )", ["A", "B"]),
- ( "( || ( || ( ( A ) B ) ) )", [ "||", ["A", "B"] ]),
- ( "|| ( A )", ["A"]),
- ( "( || ( || ( || ( A ) foo? ( B ) ) ) )", [ "||", ["A", "foo?", ["B"] ]]),
- ( "( || ( || ( bar? ( A ) || ( foo? ( B ) ) ) ) )", [ "||", ["bar?", ["A"], "foo?", ["B"] ]]),
- ( "A || ( ) foo? ( ) B", ["A", "B"]),
+ ("A", ["A"]),
+ ("( A )", ["A"]),
+ ("|| ( A B )", ["||", ["A", "B"]]),
+ ("|| ( A || ( B C ) )", ["||", ["A", "||", ["B", "C"]]]),
+ ("|| ( A || ( B C D ) )", ["||", ["A", "||", ["B", "C", "D"]]]),
+ ("|| ( A || ( B || ( C D ) E ) )", ["||", ["A", "||", ["B", "||", ["C", "D"], "E"]]]),
+ ("a? ( A )", ["a?", ["A"]]),
- ( "|| ( A ) || ( B )", ["A", "B"]),
- ( "foo? ( A ) foo? ( B )", ["foo?", ["A"], "foo?", ["B"]]),
+ ("( || ( ( ( A ) B ) ) )", ["A", "B"]),
+ ("( || ( || ( ( A ) B ) ) )", ["||", ["A", "B"]]),
+ ("|| ( A )", ["A"]),
+ ("( || ( || ( || ( A ) foo? ( B ) ) ) )", ["||", ["A", "foo?", ["B"]]]),
+ ("( || ( || ( bar? ( A ) || ( foo? ( B ) ) ) ) )", ["||", ["bar?", ["A"], "foo?", ["B"]]]),
+ ("A || ( ) foo? ( ) B", ["A", "B"]),
- ( "|| ( ( A B ) C )", [ "||", [ ["A", "B"], "C"] ]),
- ( "|| ( ( A B ) ( C ) )", [ "||", [ ["A", "B"], "C"] ]),
+ ("|| ( A ) || ( B )", ["A", "B"]),
+ ("foo? ( A ) foo? ( B )", ["foo?", ["A"], "foo?", ["B"]]),
+
+ ("|| ( ( A B ) C )", ["||", [["A", "B"], "C"]]),
+ ("|| ( ( A B ) ( C ) )", ["||", [["A", "B"], "C"]]),
# test USE dep defaults for bug #354003
- ( ">=dev-lang/php-5.2[pcre(+)]", [ ">=dev-lang/php-5.2[pcre(+)]" ]),
+ (">=dev-lang/php-5.2[pcre(+)]", [">=dev-lang/php-5.2[pcre(+)]"]),
)
-
+
test_cases_xfail = (
"( A",
"A )",
@@ -47,20 +47,23 @@ class TestParenReduce(TestCase):
"|| A B",
"|| ( A B ) )",
"|| || B C",
-
+
"|| ( A B || )",
-
+
"a? A",
-
- ( "( || ( || || ( A ) foo? ( B ) ) )"),
- ( "( || ( || bar? ( A ) foo? ( B ) ) )"),
+
+ "( || ( || || ( A ) foo? ( B ) ) )",
+ "( || ( || bar? ( A ) foo? ( B ) ) )",
)
for dep_str, expected_result in test_cases:
- self.assertEqual(paren_reduce(dep_str), expected_result,
+ self.assertEqual(paren_reduce(dep_str, _deprecation_warn=False),
+ expected_result,
"input: '%s' result: %s != %s" % (dep_str,
- paren_reduce(dep_str), expected_result))
+ paren_reduce(dep_str, _deprecation_warn=False),
+ expected_result))
for dep_str in test_cases_xfail:
self.assertRaisesMsg(dep_str,
- InvalidDependString, paren_reduce, dep_str)
+ InvalidDependString, paren_reduce, dep_str,
+ _deprecation_warn=False)
diff --git a/pym/portage/tests/dep/test_use_reduce.py b/pym/portage/tests/dep/test_use_reduce.py
index 1618430c5..4f65567cf 100644
--- a/pym/portage/tests/dep/test_use_reduce.py
+++ b/pym/portage/tests/dep/test_use_reduce.py
@@ -6,10 +6,10 @@ from portage.exception import InvalidDependString
from portage.dep import Atom, use_reduce
class UseReduceTestCase(object):
- def __init__(self, deparray, uselist=[], masklist=[], \
- matchall=0, excludeall=[], is_src_uri=False, \
- eapi="0", opconvert=False, flat=False, expected_result=None, \
- is_valid_flag=None, token_class=None):
+ def __init__(self, deparray, uselist=[], masklist=[],
+ matchall=0, excludeall=[], is_src_uri=False,
+ eapi='0', opconvert=False, flat=False, expected_result=None,
+ is_valid_flag=None, token_class=None):
self.deparray = deparray
self.uselist = uselist
self.masklist = masklist
@@ -25,8 +25,8 @@ class UseReduceTestCase(object):
def run(self):
try:
- return use_reduce(self.deparray, self.uselist, self.masklist, \
- self.matchall, self.excludeall, self.is_src_uri, self.eapi, \
+ return use_reduce(self.deparray, self.uselist, self.masklist,
+ self.matchall, self.excludeall, self.is_src_uri, self.eapi,
self.opconvert, self.flat, self.is_valid_flag, self.token_class)
except InvalidDependString as e:
raise InvalidDependString("%s: %s" % (e, self.deparray))
@@ -47,508 +47,507 @@ class UseReduce(TestCase):
test_cases = (
UseReduceTestCase(
"a? ( A ) b? ( B ) !c? ( C ) !d? ( D )",
- uselist = ["a", "b", "c", "d"],
- expected_result = ["A", "B"]
+ uselist=["a", "b", "c", "d"],
+ expected_result=["A", "B"]
),
UseReduceTestCase(
"a? ( A ) b? ( B ) !c? ( C ) !d? ( D )",
- uselist = ["a", "b", "c"],
- expected_result = ["A", "B", "D"]
+ uselist=["a", "b", "c"],
+ expected_result=["A", "B", "D"]
),
UseReduceTestCase(
"a? ( A ) b? ( B ) !c? ( C ) !d? ( D )",
- uselist = ["b", "c"],
- expected_result = ["B", "D"]
+ uselist=["b", "c"],
+ expected_result=["B", "D"]
),
UseReduceTestCase(
"a? ( A ) b? ( B ) !c? ( C ) !d? ( D )",
- matchall = True,
- expected_result = ["A", "B", "C", "D"]
+ matchall=True,
+ expected_result=["A", "B", "C", "D"]
),
UseReduceTestCase(
"a? ( A ) b? ( B ) !c? ( C ) !d? ( D )",
- masklist = ["a", "c"],
- expected_result = ["C", "D"]
+ masklist=["a", "c"],
+ expected_result=["C", "D"]
),
UseReduceTestCase(
"a? ( A ) b? ( B ) !c? ( C ) !d? ( D )",
- matchall = True,
- masklist = ["a", "c"],
- expected_result = ["B", "C", "D"]
+ matchall=True,
+ masklist=["a", "c"],
+ expected_result=["B", "C", "D"]
),
UseReduceTestCase(
"a? ( A ) b? ( B ) !c? ( C ) !d? ( D )",
- uselist = ["a", "b"],
- masklist = ["a", "c"],
- expected_result = ["B", "C", "D"]
+ uselist=["a", "b"],
+ masklist=["a", "c"],
+ expected_result=["B", "C", "D"]
),
UseReduceTestCase(
"a? ( A ) b? ( B ) !c? ( C ) !d? ( D )",
- excludeall = ["a", "c"],
- expected_result = ["D"]
+ excludeall=["a", "c"],
+ expected_result=["D"]
),
UseReduceTestCase(
"a? ( A ) b? ( B ) !c? ( C ) !d? ( D )",
- uselist = ["b"],
- excludeall = ["a", "c"],
- expected_result = ["B", "D"]
+ uselist=["b"],
+ excludeall=["a", "c"],
+ expected_result=["B", "D"]
),
UseReduceTestCase(
"a? ( A ) b? ( B ) !c? ( C ) !d? ( D )",
- matchall = True,
- excludeall = ["a", "c"],
- expected_result = ["A", "B", "D"]
+ matchall=True,
+ excludeall=["a", "c"],
+ expected_result=["A", "B", "D"]
),
UseReduceTestCase(
"a? ( A ) b? ( B ) !c? ( C ) !d? ( D )",
- matchall = True,
- excludeall = ["a", "c"],
- masklist = ["b"],
- expected_result = ["A", "D"]
+ matchall=True,
+ excludeall=["a", "c"],
+ masklist=["b"],
+ expected_result=["A", "D"]
),
-
UseReduceTestCase(
"a? ( b? ( AB ) )",
- uselist = ["a", "b"],
- expected_result = ["AB"]
+ uselist=["a", "b"],
+ expected_result=["AB"]
),
UseReduceTestCase(
"a? ( b? ( AB ) C )",
- uselist = ["a"],
- expected_result = ["C"]
+ uselist=["a"],
+ expected_result=["C"]
),
UseReduceTestCase(
"a? ( b? ( || ( AB CD ) ) )",
- uselist = ["a", "b"],
- expected_result = ["||", ["AB", "CD"]]
+ uselist=["a", "b"],
+ expected_result=["||", ["AB", "CD"]]
),
UseReduceTestCase(
"|| ( || ( a? ( A ) b? ( B ) ) )",
- uselist = ["a", "b"],
- expected_result = ["||", ["A", "B"]]
+ uselist=["a", "b"],
+ expected_result=["||", ["A", "B"]]
),
UseReduceTestCase(
"|| ( || ( a? ( A ) b? ( B ) ) )",
- uselist = ["a"],
- expected_result = ["A"]
+ uselist=["a"],
+ expected_result=["A"]
),
UseReduceTestCase(
"|| ( || ( a? ( A ) b? ( B ) ) )",
- uselist = [],
- expected_result = []
+ uselist=[],
+ expected_result=[]
),
UseReduceTestCase(
"|| ( || ( a? ( || ( A c? ( C ) ) ) b? ( B ) ) )",
- uselist = [],
- expected_result = []
+ uselist=[],
+ expected_result=[]
),
UseReduceTestCase(
"|| ( || ( a? ( || ( A c? ( C ) ) ) b? ( B ) ) )",
- uselist = ["a"],
- expected_result = ["A"]
+ uselist=["a"],
+ expected_result=["A"]
),
UseReduceTestCase(
"|| ( || ( a? ( || ( A c? ( C ) ) ) b? ( B ) ) )",
- uselist = ["b"],
- expected_result = ["B"]
+ uselist=["b"],
+ expected_result=["B"]
),
UseReduceTestCase(
"|| ( || ( a? ( || ( A c? ( C ) ) ) b? ( B ) ) )",
- uselist = ["c"],
- expected_result = []
+ uselist=["c"],
+ expected_result=[]
),
UseReduceTestCase(
"|| ( || ( a? ( || ( A c? ( C ) ) ) b? ( B ) ) )",
- uselist = ["a", "c"],
- expected_result = ["||", [ "A", "C"]]
+ uselist=["a", "c"],
+ expected_result=["||", ["A", "C"]]
),
-
- #paren_reduce tests
+
+ # paren_reduce tests
UseReduceTestCase(
"A",
- expected_result = ["A"]),
+ expected_result=["A"]),
UseReduceTestCase(
"( A )",
- expected_result = ["A"]),
+ expected_result=["A"]),
UseReduceTestCase(
"|| ( A B )",
- expected_result = [ "||", ["A", "B"] ]),
+ expected_result=["||", ["A", "B"]]),
UseReduceTestCase(
"|| ( ( A B ) C )",
- expected_result = [ "||", [ ["A", "B"], "C"] ]),
+ expected_result=["||", [["A", "B"], "C"]]),
UseReduceTestCase(
"|| ( ( A B ) ( C ) )",
- expected_result = [ "||", [ ["A", "B"], "C"] ]),
+ expected_result=["||", [["A", "B"], "C"]]),
UseReduceTestCase(
"|| ( A || ( B C ) )",
- expected_result = [ "||", ["A", "B", "C"]]),
+ expected_result=["||", ["A", "B", "C"]]),
UseReduceTestCase(
"|| ( A || ( B C D ) )",
- expected_result = [ "||", ["A", "B", "C", "D"] ]),
+ expected_result=["||", ["A", "B", "C", "D"]]),
UseReduceTestCase(
"|| ( A || ( B || ( C D ) E ) )",
- expected_result = [ "||", ["A", "B", "C", "D", "E"] ]),
+ expected_result=["||", ["A", "B", "C", "D", "E"]]),
UseReduceTestCase(
"( || ( ( ( A ) B ) ) )",
- expected_result = ["A", "B"] ),
+ expected_result=["A", "B"]),
UseReduceTestCase(
"( || ( || ( ( A ) B ) ) )",
- expected_result = [ "||", ["A", "B"] ]),
+ expected_result=["||", ["A", "B"]]),
UseReduceTestCase(
"( || ( || ( ( A ) B ) ) )",
- expected_result = [ "||", ["A", "B"] ]),
+ expected_result=["||", ["A", "B"]]),
UseReduceTestCase(
"|| ( A )",
- expected_result = ["A"]),
+ expected_result=["A"]),
UseReduceTestCase(
"( || ( || ( || ( A ) foo? ( B ) ) ) )",
- expected_result = ["A"]),
+ expected_result=["A"]),
UseReduceTestCase(
"( || ( || ( || ( A ) foo? ( B ) ) ) )",
- uselist = ["foo"],
- expected_result = [ "||", ["A", "B"] ]),
+ uselist=["foo"],
+ expected_result=["||", ["A", "B"]]),
UseReduceTestCase(
"( || ( || ( bar? ( A ) || ( foo? ( B ) ) ) ) )",
- expected_result = []),
+ expected_result=[]),
UseReduceTestCase(
"( || ( || ( bar? ( A ) || ( foo? ( B ) ) ) ) )",
- uselist = ["foo", "bar"],
- expected_result = [ "||", [ "A", "B" ] ]),
+ uselist=["foo", "bar"],
+ expected_result=["||", ["A", "B"]]),
UseReduceTestCase(
"A || ( bar? ( C ) ) foo? ( bar? ( C ) ) B",
- expected_result = ["A", "B"]),
+ expected_result=["A", "B"]),
UseReduceTestCase(
"|| ( A ) || ( B )",
- expected_result = ["A", "B"]),
+ expected_result=["A", "B"]),
UseReduceTestCase(
"foo? ( A ) foo? ( B )",
- expected_result = []),
+ expected_result=[]),
UseReduceTestCase(
"foo? ( A ) foo? ( B )",
- uselist = ["foo"],
- expected_result = ["A", "B"]),
+ uselist=["foo"],
+ expected_result=["A", "B"]),
UseReduceTestCase(
"|| ( A B ) C",
- expected_result = ['||', ['A', 'B'], 'C']),
+ expected_result=['||', ['A', 'B'], 'C']),
UseReduceTestCase(
"A || ( B C )",
- expected_result = ['A', '||', ['B', 'C']]),
+ expected_result=['A', '||', ['B', 'C']]),
- #SRC_URI stuff
+ # SRC_URI stuff
UseReduceTestCase(
"http://foo/bar -> blah.tbz2",
- is_src_uri = True,
- eapi = EAPI_WITH_SRC_URI_ARROWS,
- expected_result = ["http://foo/bar", "->", "blah.tbz2"]),
+ is_src_uri=True,
+ eapi=EAPI_WITH_SRC_URI_ARROWS,
+ expected_result=["http://foo/bar", "->", "blah.tbz2"]),
UseReduceTestCase(
"foo? ( http://foo/bar -> blah.tbz2 )",
- uselist = [],
- is_src_uri = True,
- eapi = EAPI_WITH_SRC_URI_ARROWS,
- expected_result = []),
+ uselist=[],
+ is_src_uri=True,
+ eapi=EAPI_WITH_SRC_URI_ARROWS,
+ expected_result=[]),
UseReduceTestCase(
"foo? ( http://foo/bar -> blah.tbz2 )",
- uselist = ["foo"],
- is_src_uri = True,
- eapi = EAPI_WITH_SRC_URI_ARROWS,
- expected_result = ["http://foo/bar", "->", "blah.tbz2"]),
+ uselist=["foo"],
+ is_src_uri=True,
+ eapi=EAPI_WITH_SRC_URI_ARROWS,
+ expected_result=["http://foo/bar", "->", "blah.tbz2"]),
UseReduceTestCase(
"http://foo/bar -> bar.tbz2 foo? ( ftp://foo/a )",
- uselist = [],
- is_src_uri = True,
- eapi = EAPI_WITH_SRC_URI_ARROWS,
- expected_result = ["http://foo/bar", "->", "bar.tbz2"]),
+ uselist=[],
+ is_src_uri=True,
+ eapi=EAPI_WITH_SRC_URI_ARROWS,
+ expected_result=["http://foo/bar", "->", "bar.tbz2"]),
UseReduceTestCase(
"http://foo/bar -> bar.tbz2 foo? ( ftp://foo/a )",
- uselist = ["foo"],
- is_src_uri = True,
- eapi = EAPI_WITH_SRC_URI_ARROWS,
- expected_result = ["http://foo/bar", "->", "bar.tbz2", "ftp://foo/a"]),
+ uselist=["foo"],
+ is_src_uri=True,
+ eapi=EAPI_WITH_SRC_URI_ARROWS,
+ expected_result=["http://foo/bar", "->", "bar.tbz2", "ftp://foo/a"]),
UseReduceTestCase(
"http://foo.com/foo http://foo/bar -> blah.tbz2",
- uselist = ["foo"],
- is_src_uri = True,
- eapi = EAPI_WITH_SRC_URI_ARROWS,
- expected_result = ["http://foo.com/foo", "http://foo/bar", "->", "blah.tbz2"]),
+ uselist=["foo"],
+ is_src_uri=True,
+ eapi=EAPI_WITH_SRC_URI_ARROWS,
+ expected_result=["http://foo.com/foo", "http://foo/bar", "->", "blah.tbz2"]),
- #opconvert tests
+ # opconvert tests
UseReduceTestCase(
"A",
- opconvert = True,
- expected_result = ["A"]),
+ opconvert=True,
+ expected_result=["A"]),
UseReduceTestCase(
"( A )",
- opconvert = True,
- expected_result = ["A"]),
+ opconvert=True,
+ expected_result=["A"]),
UseReduceTestCase(
"|| ( A B )",
- opconvert = True,
- expected_result = [['||', 'A', 'B']]),
+ opconvert=True,
+ expected_result=[['||', 'A', 'B']]),
UseReduceTestCase(
"|| ( ( A B ) C )",
- opconvert = True,
- expected_result = [['||', ['A', 'B'], 'C']]),
+ opconvert=True,
+ expected_result=[['||', ['A', 'B'], 'C']]),
UseReduceTestCase(
"|| ( A || ( B C ) )",
- opconvert = True,
- expected_result = [['||', 'A', 'B', 'C']]),
+ opconvert=True,
+ expected_result=[['||', 'A', 'B', 'C']]),
UseReduceTestCase(
"|| ( A || ( B C D ) )",
- opconvert = True,
- expected_result = [['||', 'A', 'B', 'C', 'D']]),
+ opconvert=True,
+ expected_result=[['||', 'A', 'B', 'C', 'D']]),
UseReduceTestCase(
"|| ( A || ( B || ( C D ) E ) )",
- expected_result = [ "||", ["A", "B", "C", "D", "E"] ]),
+ expected_result=["||", ["A", "B", "C", "D", "E"]]),
UseReduceTestCase(
"( || ( ( ( A ) B ) ) )",
- opconvert = True,
- expected_result = [ "A", "B" ] ),
+ opconvert=True,
+ expected_result=['A', 'B']),
UseReduceTestCase(
"( || ( || ( ( A ) B ) ) )",
- opconvert = True,
- expected_result = [['||', 'A', 'B']]),
+ opconvert=True,
+ expected_result=[['||', 'A', 'B']]),
UseReduceTestCase(
"|| ( A B ) C",
- opconvert = True,
- expected_result = [['||', 'A', 'B'], 'C']),
+ opconvert=True,
+ expected_result=[['||', 'A', 'B'], 'C']),
UseReduceTestCase(
"A || ( B C )",
- opconvert = True,
- expected_result = ['A', ['||', 'B', 'C']]),
+ opconvert=True,
+ expected_result=['A', ['||', 'B', 'C']]),
UseReduceTestCase(
"A foo? ( || ( B || ( bar? ( || ( C D E ) ) !bar? ( F ) ) ) ) G",
- uselist = ["foo", "bar"],
- opconvert = True,
- expected_result = ['A', ['||', 'B', 'C', 'D', 'E'], 'G']),
+ uselist=["foo", "bar"],
+ opconvert=True,
+ expected_result=['A', ['||', 'B', 'C', 'D', 'E'], 'G']),
UseReduceTestCase(
"A foo? ( || ( B || ( bar? ( || ( C D E ) ) !bar? ( F ) ) ) ) G",
- uselist = ["foo", "bar"],
- opconvert = False,
- expected_result = ['A', '||', ['B', 'C', 'D', 'E'], 'G']),
+ uselist=["foo", "bar"],
+ opconvert=False,
+ expected_result=['A', '||', ['B', 'C', 'D', 'E'], 'G']),
UseReduceTestCase(
"|| ( A )",
- opconvert = True,
- expected_result = ["A"]),
+ opconvert=True,
+ expected_result=["A"]),
UseReduceTestCase(
"( || ( || ( || ( A ) foo? ( B ) ) ) )",
- expected_result = ["A"]),
+ expected_result=["A"]),
UseReduceTestCase(
"( || ( || ( || ( A ) foo? ( B ) ) ) )",
- uselist = ["foo"],
- opconvert = True,
- expected_result = [['||', 'A', 'B']]),
+ uselist=["foo"],
+ opconvert=True,
+ expected_result=[['||', 'A', 'B']]),
UseReduceTestCase(
"( || ( || ( bar? ( A ) || ( foo? ( B ) ) ) ) )",
- opconvert = True,
- expected_result = []),
+ opconvert=True,
+ expected_result=[]),
UseReduceTestCase(
"( || ( || ( bar? ( A ) || ( foo? ( B ) ) ) ) )",
- uselist = ["foo", "bar"],
- opconvert = True,
- expected_result = [['||', 'A', 'B']]),
+ uselist=["foo", "bar"],
+ opconvert=True,
+ expected_result=[['||', 'A', 'B']]),
UseReduceTestCase(
"A || ( bar? ( C ) ) foo? ( bar? ( C ) ) B",
- opconvert = True,
- expected_result = ["A", "B"]),
+ opconvert=True,
+ expected_result=["A", "B"]),
UseReduceTestCase(
"|| ( A ) || ( B )",
- opconvert = True,
- expected_result = ["A", "B"]),
+ opconvert=True,
+ expected_result=["A", "B"]),
UseReduceTestCase(
"foo? ( A ) foo? ( B )",
- opconvert = True,
- expected_result = []),
+ opconvert=True,
+ expected_result=[]),
UseReduceTestCase(
"foo? ( A ) foo? ( B )",
- uselist = ["foo"],
- opconvert = True,
- expected_result = ["A", "B"]),
+ uselist=["foo"],
+ opconvert=True,
+ expected_result=["A", "B"]),
UseReduceTestCase(
"|| ( foo? ( || ( A B ) ) )",
- uselist = ["foo"],
- opconvert = True,
- expected_result = [['||', 'A', 'B']]),
+ uselist=["foo"],
+ opconvert=True,
+ expected_result=[['||', 'A', 'B']]),
UseReduceTestCase(
"|| ( ( A B ) foo? ( || ( C D ) ) )",
- uselist = ["foo"],
- opconvert = True,
- expected_result = [['||', ['A', 'B'], 'C', 'D']]),
+ uselist=["foo"],
+ opconvert=True,
+ expected_result=[['||', ['A', 'B'], 'C', 'D']]),
UseReduceTestCase(
"|| ( ( A B ) foo? ( || ( C D ) ) )",
- uselist = ["foo"],
- opconvert = False,
- expected_result = ['||', [['A', 'B'], 'C', 'D']]),
+ uselist=["foo"],
+ opconvert=False,
+ expected_result=['||', [['A', 'B'], 'C', 'D']]),
UseReduceTestCase(
"|| ( ( A B ) || ( C D ) )",
- expected_result = ['||', [['A', 'B'], 'C', 'D']]),
+ expected_result=['||', [['A', 'B'], 'C', 'D']]),
UseReduceTestCase(
"|| ( ( A B ) || ( C D || ( E ( F G ) || ( H ) ) ) )",
- expected_result = ['||', [['A', 'B'], 'C', 'D', 'E', ['F', 'G'], 'H']]),
+ expected_result=['||', [['A', 'B'], 'C', 'D', 'E', ['F', 'G'], 'H']]),
UseReduceTestCase(
"|| ( ( A B ) || ( C D || ( E ( F G ) || ( H ) ) ) )",
- opconvert = True,
- expected_result = [['||', ['A', 'B'], 'C', 'D', 'E', ['F', 'G'], 'H']]),
+ opconvert=True,
+ expected_result=[['||', ['A', 'B'], 'C', 'D', 'E', ['F', 'G'], 'H']]),
UseReduceTestCase(
"|| ( foo? ( A B ) )",
- uselist = ["foo"],
- expected_result = ['A', 'B']),
+ uselist=["foo"],
+ expected_result=['A', 'B']),
UseReduceTestCase(
"|| ( || ( foo? ( A B ) ) )",
- uselist = ["foo"],
- expected_result = ['A', 'B']),
+ uselist=["foo"],
+ expected_result=['A', 'B']),
UseReduceTestCase(
"|| ( || ( || ( a? ( b? ( c? ( || ( || ( || ( d? ( e? ( f? ( A B ) ) ) ) ) ) ) ) ) ) ) )",
- uselist = ["a", "b", "c", "d", "e", "f"],
- expected_result = ['A', 'B']),
+ uselist=["a", "b", "c", "d", "e", "f"],
+ expected_result=['A', 'B']),
UseReduceTestCase(
"|| ( || ( ( || ( a? ( ( b? ( c? ( || ( || ( || ( ( d? ( e? ( f? ( A B ) ) ) ) ) ) ) ) ) ) ) ) ) ) )",
- uselist = ["a", "b", "c", "d", "e", "f"],
- expected_result = ['A', 'B']),
+ uselist=["a", "b", "c", "d", "e", "f"],
+ expected_result=['A', 'B']),
UseReduceTestCase(
"|| ( ( A ( || ( B ) ) ) )",
- expected_result = ['A', 'B']),
+ expected_result=['A', 'B']),
UseReduceTestCase(
"|| ( ( A B ) || ( foo? ( bar? ( ( C D || ( baz? ( E ) ( F G ) || ( H ) ) ) ) ) ) )",
- uselist = ["foo", "bar", "baz"],
- expected_result = ['||', [['A', 'B'], ['C', 'D', '||', ['E', ['F', 'G'], 'H']]]]),
+ uselist=["foo", "bar", "baz"],
+ expected_result=['||', [['A', 'B'], ['C', 'D', '||', ['E', ['F', 'G'], 'H']]]]),
UseReduceTestCase(
"|| ( ( A B ) || ( foo? ( bar? ( ( C D || ( baz? ( E ) ( F G ) || ( H ) ) ) ) ) ) )",
- uselist = ["foo", "bar", "baz"],
- opconvert = True,
- expected_result = [['||', ['A', 'B'], ['C', 'D', ['||', 'E', ['F', 'G'], 'H']]]]),
+ uselist=["foo", "bar", "baz"],
+ opconvert=True,
+ expected_result=[['||', ['A', 'B'], ['C', 'D', ['||', 'E', ['F', 'G'], 'H']]]]),
UseReduceTestCase(
"|| ( foo? ( A B ) )",
- uselist = ["foo"],
+ uselist=["foo"],
opconvert=True,
- expected_result = ['A', 'B']),
+ expected_result=['A', 'B']),
UseReduceTestCase(
"|| ( || ( foo? ( A B ) ) )",
- uselist = ["foo"],
+ uselist=["foo"],
opconvert=True,
- expected_result = ['A', 'B']),
+ expected_result=['A', 'B']),
UseReduceTestCase(
"|| ( || ( || ( a? ( b? ( c? ( || ( || ( || ( d? ( e? ( f? ( A B ) ) ) ) ) ) ) ) ) ) ) )",
- uselist = ["a", "b", "c", "d", "e", "f"],
+ uselist=["a", "b", "c", "d", "e", "f"],
opconvert=True,
- expected_result = ['A', 'B']),
+ expected_result=['A', 'B']),
- #flat test
+ # flat test
UseReduceTestCase(
"A",
- flat = True,
- expected_result = ["A"]),
+ flat=True,
+ expected_result=["A"]),
UseReduceTestCase(
"( A )",
- flat = True,
- expected_result = ["A"]),
+ flat=True,
+ expected_result=["A"]),
UseReduceTestCase(
"|| ( A B )",
- flat = True,
- expected_result = [ "||", "A", "B" ] ),
+ flat=True,
+ expected_result=["||", "A", "B"]),
UseReduceTestCase(
"|| ( A || ( B C ) )",
- flat = True,
- expected_result = [ "||", "A", "||", "B", "C" ]),
+ flat=True,
+ expected_result=["||", "A", "||", "B", "C"]),
UseReduceTestCase(
"|| ( A || ( B C D ) )",
- flat = True,
- expected_result = [ "||", "A", "||", "B", "C", "D" ]),
+ flat=True,
+ expected_result=["||", "A", "||", "B", "C", "D"]),
UseReduceTestCase(
"|| ( A || ( B || ( C D ) E ) )",
- flat = True,
- expected_result = [ "||", "A", "||", "B", "||", "C", "D", "E" ]),
+ flat=True,
+ expected_result=["||", "A", "||", "B", "||", "C", "D", "E"]),
UseReduceTestCase(
"( || ( ( ( A ) B ) ) )",
- flat = True,
- expected_result = [ "||", "A", "B"] ),
+ flat=True,
+ expected_result=["||", "A", "B"]),
UseReduceTestCase(
"( || ( || ( ( A ) B ) ) )",
- flat = True,
- expected_result = [ "||", "||", "A", "B" ]),
+ flat=True,
+ expected_result=["||", "||", "A", "B"]),
UseReduceTestCase(
"( || ( || ( ( A ) B ) ) )",
- flat = True,
- expected_result = [ "||", "||", "A", "B" ]),
+ flat=True,
+ expected_result=["||", "||", "A", "B"]),
UseReduceTestCase(
"|| ( A )",
- flat = True,
- expected_result = ["||", "A"]),
+ flat=True,
+ expected_result=["||", "A"]),
UseReduceTestCase(
"( || ( || ( || ( A ) foo? ( B ) ) ) )",
- expected_result = ["A"]),
+ expected_result=["A"]),
UseReduceTestCase(
"( || ( || ( || ( A ) foo? ( B ) ) ) )",
- uselist = ["foo"],
- flat = True,
- expected_result = [ "||", "||","||", "A", "B" ]),
+ uselist=["foo"],
+ flat=True,
+ expected_result=["||", "||", "||", "A", "B"]),
UseReduceTestCase(
"( || ( || ( bar? ( A ) || ( foo? ( B ) ) ) ) )",
- flat = True,
- expected_result = ["||", "||","||"]),
+ flat=True,
+ expected_result=["||", "||", "||"]),
UseReduceTestCase(
"( || ( || ( bar? ( A ) || ( foo? ( B ) ) ) ) )",
- uselist = ["foo", "bar"],
- flat = True,
- expected_result = [ "||", "||", "A", "||", "B" ]),
+ uselist=["foo", "bar"],
+ flat=True,
+ expected_result=["||", "||", "A", "||", "B"]),
UseReduceTestCase(
"A || ( bar? ( C ) ) foo? ( bar? ( C ) ) B",
- flat = True,
- expected_result = ["A", "||", "B"]),
+ flat=True,
+ expected_result=["A", "||", "B"]),
UseReduceTestCase(
"|| ( A ) || ( B )",
- flat = True,
- expected_result = ["||", "A", "||", "B"]),
+ flat=True,
+ expected_result=["||", "A", "||", "B"]),
UseReduceTestCase(
"foo? ( A ) foo? ( B )",
- flat = True,
- expected_result = []),
+ flat=True,
+ expected_result=[]),
UseReduceTestCase(
"foo? ( A ) foo? ( B )",
- uselist = ["foo"],
- flat = True,
- expected_result = ["A", "B"]),
+ uselist=["foo"],
+ flat=True,
+ expected_result=["A", "B"]),
- #use flag validation
+ # use flag validation
UseReduceTestCase(
"foo? ( A )",
- uselist = ["foo"],
- is_valid_flag = self.always_true,
- expected_result = ["A"]),
+ uselist=["foo"],
+ is_valid_flag=self.always_true,
+ expected_result=["A"]),
UseReduceTestCase(
"foo? ( A )",
- is_valid_flag = self.always_true,
- expected_result = []),
+ is_valid_flag=self.always_true,
+ expected_result=[]),
- #token_class
+ # token_class
UseReduceTestCase(
"foo? ( dev-libs/A )",
- uselist = ["foo"],
+ uselist=["foo"],
token_class=Atom,
- expected_result = ["dev-libs/A"]),
+ expected_result=["dev-libs/A"]),
UseReduceTestCase(
"foo? ( dev-libs/A )",
token_class=Atom,
- expected_result = []),
+ expected_result=[]),
)
-
+
test_cases_xfail = (
UseReduceTestCase("? ( A )"),
UseReduceTestCase("!? ( A )"),
@@ -571,44 +570,44 @@ class UseReduce(TestCase):
UseReduceTestCase("|| ( )"),
UseReduceTestCase("foo? ( )"),
- #SRC_URI stuff
- UseReduceTestCase("http://foo/bar -> blah.tbz2", is_src_uri = True, eapi = EAPI_WITHOUT_SRC_URI_ARROWS),
- UseReduceTestCase("|| ( http://foo/bar -> blah.tbz2 )", is_src_uri = True, eapi = EAPI_WITH_SRC_URI_ARROWS),
- UseReduceTestCase("http://foo/bar -> foo? ( ftp://foo/a )", is_src_uri = True, eapi = EAPI_WITH_SRC_URI_ARROWS),
- UseReduceTestCase("http://foo/bar blah.tbz2 ->", is_src_uri = True, eapi = EAPI_WITH_SRC_URI_ARROWS),
- UseReduceTestCase("-> http://foo/bar blah.tbz2 )", is_src_uri = True, eapi = EAPI_WITH_SRC_URI_ARROWS),
- UseReduceTestCase("http://foo/bar ->", is_src_uri = True, eapi = EAPI_WITH_SRC_URI_ARROWS),
- UseReduceTestCase("http://foo/bar -> foo? ( http://foo.com/foo )", is_src_uri = True, eapi = EAPI_WITH_SRC_URI_ARROWS),
- UseReduceTestCase("foo? ( http://foo/bar -> ) blah.tbz2", is_src_uri = True, eapi = EAPI_WITH_SRC_URI_ARROWS),
- UseReduceTestCase("http://foo/bar -> foo/blah.tbz2", is_src_uri = True, eapi = EAPI_WITH_SRC_URI_ARROWS),
- UseReduceTestCase("http://foo/bar -> -> bar.tbz2 foo? ( ftp://foo/a )", is_src_uri = True, eapi = EAPI_WITH_SRC_URI_ARROWS),
-
- UseReduceTestCase("http://foo/bar -> bar.tbz2 foo? ( ftp://foo/a )", is_src_uri = False, eapi = EAPI_WITH_SRC_URI_ARROWS),
+ # SRC_URI stuff
+ UseReduceTestCase("http://foo/bar -> blah.tbz2", is_src_uri=True, eapi=EAPI_WITHOUT_SRC_URI_ARROWS),
+ UseReduceTestCase("|| ( http://foo/bar -> blah.tbz2 )", is_src_uri=True, eapi=EAPI_WITH_SRC_URI_ARROWS),
+ UseReduceTestCase("http://foo/bar -> foo? ( ftp://foo/a )", is_src_uri=True, eapi=EAPI_WITH_SRC_URI_ARROWS),
+ UseReduceTestCase("http://foo/bar blah.tbz2 ->", is_src_uri=True, eapi=EAPI_WITH_SRC_URI_ARROWS),
+ UseReduceTestCase("-> http://foo/bar blah.tbz2 )", is_src_uri=True, eapi=EAPI_WITH_SRC_URI_ARROWS),
+ UseReduceTestCase("http://foo/bar ->", is_src_uri=True, eapi=EAPI_WITH_SRC_URI_ARROWS),
+ UseReduceTestCase("http://foo/bar -> foo? ( http://foo.com/foo )", is_src_uri=True, eapi=EAPI_WITH_SRC_URI_ARROWS),
+ UseReduceTestCase("foo? ( http://foo/bar -> ) blah.tbz2", is_src_uri=True, eapi=EAPI_WITH_SRC_URI_ARROWS),
+ UseReduceTestCase("http://foo/bar -> foo/blah.tbz2", is_src_uri=True, eapi=EAPI_WITH_SRC_URI_ARROWS),
+ UseReduceTestCase("http://foo/bar -> -> bar.tbz2 foo? ( ftp://foo/a )", is_src_uri=True, eapi=EAPI_WITH_SRC_URI_ARROWS),
+
+ UseReduceTestCase("http://foo/bar -> bar.tbz2 foo? ( ftp://foo/a )", is_src_uri=False, eapi=EAPI_WITH_SRC_URI_ARROWS),
UseReduceTestCase(
"A",
- opconvert = True,
- flat = True),
+ opconvert=True,
+ flat=True),
- #use flag validation
+ # use flag validation
UseReduceTestCase("1.0? ( A )"),
UseReduceTestCase("!1.0? ( A )"),
UseReduceTestCase("!? ( A )"),
UseReduceTestCase("!?? ( A )"),
UseReduceTestCase(
"foo? ( A )",
- is_valid_flag = self.always_false,
+ is_valid_flag=self.always_false,
),
UseReduceTestCase(
"foo? ( A )",
- uselist = ["foo"],
- is_valid_flag = self.always_false,
+ uselist=["foo"],
+ is_valid_flag=self.always_false,
),
- #token_class
+ # token_class
UseReduceTestCase(
"foo? ( A )",
- uselist = ["foo"],
+ uselist=["foo"],
token_class=Atom),
UseReduceTestCase(
"A(B",
diff --git a/pym/portage/tests/ebuild/test_config.py b/pym/portage/tests/ebuild/test_config.py
index 63cb99d41..08e0a5dcf 100644
--- a/pym/portage/tests/ebuild/test_config.py
+++ b/pym/portage/tests/ebuild/test_config.py
@@ -1,4 +1,4 @@
-# Copyright 2010-2012 Gentoo Foundation
+# Copyright 2010-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
import portage
@@ -46,7 +46,7 @@ class ConfigTestCase(TestCase):
settings.features.add('noclean')
self.assertEqual('noclean' in settings['FEATURES'].split(), True)
settings.regenerate()
- self.assertEqual('noclean' in settings['FEATURES'].split(),True)
+ self.assertEqual('noclean' in settings['FEATURES'].split(), True)
settings.features.discard('noclean')
self.assertEqual('noclean' in settings['FEATURES'].split(), False)
@@ -56,7 +56,7 @@ class ConfigTestCase(TestCase):
settings.features.add('noclean')
self.assertEqual('noclean' in settings['FEATURES'].split(), True)
settings.regenerate()
- self.assertEqual('noclean' in settings['FEATURES'].split(),True)
+ self.assertEqual('noclean' in settings['FEATURES'].split(), True)
# before: ['noclean', '-noclean', 'noclean']
settings.features._prune_overrides()
@@ -92,7 +92,7 @@ class ConfigTestCase(TestCase):
try:
portage.util.noiselimit = -2
- license_group_locations = (os.path.join(playground.portdir, "profiles"),)
+ license_group_locations = (os.path.join(playground.settings.repositories["test_repo"].location, "profiles"),)
pkg_license = os.path.join(playground.eroot, "etc", "portage")
lic_man = LicenseManager(license_group_locations, pkg_license)
@@ -221,6 +221,7 @@ class ConfigTestCase(TestCase):
"profile-formats = pms",
"thin-manifests = true",
"manifest-hashes = SHA256 SHA512 WHIRLPOOL",
+ "# use implicit masters"
),
}
}
@@ -239,28 +240,30 @@ class ConfigTestCase(TestCase):
playground = ResolverPlayground(ebuilds=ebuilds,
repo_configs=repo_configs, distfiles=distfiles)
+ settings = playground.settings
- new_repo_config = playground.settings.repositories.prepos['new_repo']
+ new_repo_config = settings.repositories["new_repo"]
+ old_repo_config = settings.repositories["old_repo"]
self.assertTrue(len(new_repo_config.masters) > 0, "new_repo has no default master")
- self.assertEqual(new_repo_config.masters[0].user_location, playground.portdir,
- "new_repo default master is not PORTDIR")
+ self.assertEqual(new_repo_config.masters[0].user_location, playground.settings.repositories["test_repo"].location,
+ "new_repo default master is not test_repo")
self.assertEqual(new_repo_config.thin_manifest, True,
"new_repo_config.thin_manifest != True")
- new_manifest_file = os.path.join(playground.repo_dirs["new_repo"], "dev-libs", "A", "Manifest")
- self.assertEqual(os.path.exists(new_manifest_file), False)
+ new_manifest_file = os.path.join(new_repo_config.location, "dev-libs", "A", "Manifest")
+ self.assertNotExists(new_manifest_file)
- new_manifest_file = os.path.join(playground.repo_dirs["new_repo"], "dev-libs", "B", "Manifest")
+ new_manifest_file = os.path.join(new_repo_config.location, "dev-libs", "B", "Manifest")
f = open(new_manifest_file)
self.assertEqual(len(list(f)), 1)
f.close()
- new_manifest_file = os.path.join(playground.repo_dirs["new_repo"], "dev-libs", "C", "Manifest")
+ new_manifest_file = os.path.join(new_repo_config.location, "dev-libs", "C", "Manifest")
f = open(new_manifest_file)
self.assertEqual(len(list(f)), 2)
f.close()
- old_manifest_file = os.path.join(playground.repo_dirs["old_repo"], "dev-libs", "A", "Manifest")
+ old_manifest_file = os.path.join(old_repo_config.location, "dev-libs", "A", "Manifest")
f = open(old_manifest_file)
self.assertEqual(len(list(f)), 1)
f.close()
diff --git a/pym/portage/tests/ebuild/test_doebuild_fd_pipes.py b/pym/portage/tests/ebuild/test_doebuild_fd_pipes.py
new file mode 100644
index 000000000..61392dd54
--- /dev/null
+++ b/pym/portage/tests/ebuild/test_doebuild_fd_pipes.py
@@ -0,0 +1,137 @@
+# Copyright 2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import textwrap
+
+import portage
+from portage import os
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import ResolverPlayground
+from portage.package.ebuild._ipc.QueryCommand import QueryCommand
+from portage.util._async.ForkProcess import ForkProcess
+from portage.util._async.TaskScheduler import TaskScheduler
+from portage.util._eventloop.global_event_loop import global_event_loop
+from _emerge.Package import Package
+from _emerge.PipeReader import PipeReader
+
+class DoebuildProcess(ForkProcess):
+
+ __slots__ = ('doebuild_kwargs', 'doebuild_pargs')
+
+ def _run(self):
+ return portage.doebuild(*self.doebuild_pargs, **self.doebuild_kwargs)
+
+class DoebuildFdPipesTestCase(TestCase):
+
+ def testDoebuild(self):
+ """
+ Invoke portage.doebuild() with the fd_pipes parameter, and
+ check that the expected output appears in the pipe. This
+ functionality is not used by portage internally, but it is
+ supported for API consumers (see bug #475812).
+ """
+
+ ebuild_body = textwrap.dedent("""
+ S=${WORKDIR}
+ pkg_info() { echo info ; }
+ pkg_nofetch() { echo nofetch ; }
+ pkg_pretend() { echo pretend ; }
+ pkg_setup() { echo setup ; }
+ src_unpack() { echo unpack ; }
+ src_prepare() { echo prepare ; }
+ src_configure() { echo configure ; }
+ src_compile() { echo compile ; }
+ src_test() { echo test ; }
+ src_install() { echo install ; }
+ """)
+
+ ebuilds = {
+ 'app-misct/foo-1': {
+ 'EAPI' : '5',
+ "MISC_CONTENT": ebuild_body,
+ }
+ }
+
+ # Override things that may be unavailable, or may have portability
+ # issues when running tests in exotic environments.
+ # prepstrip - bug #447810 (bash read builtin EINTR problem)
+ true_symlinks = ("find", "prepstrip", "sed", "scanelf")
+ true_binary = portage.process.find_binary("true")
+ self.assertEqual(true_binary is None, False,
+ "true command not found")
+
+ playground = ResolverPlayground(ebuilds=ebuilds)
+ try:
+ QueryCommand._db = playground.trees
+ root_config = playground.trees[playground.eroot]['root_config']
+ portdb = root_config.trees["porttree"].dbapi
+ settings = portage.config(clone=playground.settings)
+ if "__PORTAGE_TEST_HARDLINK_LOCKS" in os.environ:
+ settings["__PORTAGE_TEST_HARDLINK_LOCKS"] = \
+ os.environ["__PORTAGE_TEST_HARDLINK_LOCKS"]
+ settings.backup_changes("__PORTAGE_TEST_HARDLINK_LOCKS")
+
+ settings.features.add("noauto")
+ settings.features.add("test")
+ settings['PORTAGE_PYTHON'] = portage._python_interpreter
+ settings['PORTAGE_QUIET'] = "1"
+
+ fake_bin = os.path.join(settings["EPREFIX"], "bin")
+ portage.util.ensure_dirs(fake_bin)
+ for x in true_symlinks:
+ os.symlink(true_binary, os.path.join(fake_bin, x))
+
+ settings["__PORTAGE_TEST_PATH_OVERRIDE"] = fake_bin
+ settings.backup_changes("__PORTAGE_TEST_PATH_OVERRIDE")
+
+ cpv = 'app-misct/foo-1'
+ metadata = dict(zip(Package.metadata_keys,
+ portdb.aux_get(cpv, Package.metadata_keys)))
+
+ pkg = Package(built=False, cpv=cpv, installed=False,
+ metadata=metadata, root_config=root_config,
+ type_name='ebuild')
+ settings.setcpv(pkg)
+ ebuildpath = portdb.findname(cpv)
+ self.assertNotEqual(ebuildpath, None)
+
+ for phase in ('info', 'nofetch',
+ 'pretend', 'setup', 'unpack', 'prepare', 'configure',
+ 'compile', 'test', 'install', 'qmerge', 'clean', 'merge'):
+
+ pr, pw = os.pipe()
+
+ producer = DoebuildProcess(doebuild_pargs=(ebuildpath, phase),
+ doebuild_kwargs={"settings" : settings,
+ "mydbapi": portdb, "tree": "porttree",
+ "vartree": root_config.trees["vartree"],
+ "fd_pipes": {1: pw, 2: pw},
+ "prev_mtimes": {}})
+
+ consumer = PipeReader(
+ input_files={"producer" : pr})
+
+ task_scheduler = TaskScheduler(iter([producer, consumer]),
+ max_jobs=2)
+
+ try:
+ task_scheduler.start()
+ finally:
+ # PipeReader closes pr
+ os.close(pw)
+
+ task_scheduler.wait()
+ output = portage._unicode_decode(
+ consumer.getvalue()).rstrip("\n")
+
+ if task_scheduler.returncode != os.EX_OK:
+ portage.writemsg(output, noiselevel=-1)
+
+ self.assertEqual(task_scheduler.returncode, os.EX_OK)
+
+ if phase not in ('clean', 'merge', 'qmerge'):
+ self.assertEqual(phase, output)
+
+ finally:
+ playground.cleanup()
+ QueryCommand._db = None
diff --git a/pym/portage/tests/ebuild/test_doebuild_spawn.py b/pym/portage/tests/ebuild/test_doebuild_spawn.py
index 89e27a331..ae9a5c504 100644
--- a/pym/portage/tests/ebuild/test_doebuild_spawn.py
+++ b/pym/portage/tests/ebuild/test_doebuild_spawn.py
@@ -1,18 +1,22 @@
-# Copyright 2010-2011 Gentoo Foundation
+# Copyright 2010-2012 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
+import textwrap
+
from portage import os
from portage import _python_interpreter
from portage import _shell_quote
from portage.const import EBUILD_SH_BINARY
from portage.package.ebuild.config import config
from portage.package.ebuild.doebuild import spawn as doebuild_spawn
+from portage.package.ebuild._spawn_nofetch import spawn_nofetch
from portage.tests import TestCase
from portage.tests.resolver.ResolverPlayground import ResolverPlayground
+from portage.util._async.SchedulerInterface import SchedulerInterface
+from portage.util._eventloop.global_event_loop import global_event_loop
from _emerge.EbuildPhase import EbuildPhase
from _emerge.MiscFunctionsProcess import MiscFunctionsProcess
from _emerge.Package import Package
-from _emerge.PollScheduler import PollScheduler
class DoebuildSpawnTestCase(TestCase):
"""
@@ -23,25 +27,37 @@ class DoebuildSpawnTestCase(TestCase):
"""
def testDoebuildSpawn(self):
- playground = ResolverPlayground()
- try:
- settings = config(clone=playground.settings)
- if "__PORTAGE_TEST_HARDLINK_LOCKS" in os.environ:
- settings["__PORTAGE_TEST_HARDLINK_LOCKS"] = \
- os.environ["__PORTAGE_TEST_HARDLINK_LOCKS"]
- settings.backup_changes("__PORTAGE_TEST_HARDLINK_LOCKS")
- cpv = 'sys-apps/portage-2.1'
- metadata = {
+ ebuild_body = textwrap.dedent("""
+ pkg_nofetch() { : ; }
+ """)
+
+ ebuilds = {
+ 'sys-apps/portage-2.1': {
'EAPI' : '2',
- 'INHERITED' : 'python eutils',
'IUSE' : 'build doc epydoc python3 selinux',
+ 'KEYWORDS' : 'x86',
'LICENSE' : 'GPL-2',
- 'PROVIDE' : 'virtual/portage',
'RDEPEND' : '>=app-shells/bash-3.2_p17 >=dev-lang/python-2.6',
'SLOT' : '0',
+ "MISC_CONTENT": ebuild_body,
}
+ }
+
+ playground = ResolverPlayground(ebuilds=ebuilds)
+ try:
root_config = playground.trees[playground.eroot]['root_config']
+ portdb = root_config.trees["porttree"].dbapi
+ settings = config(clone=playground.settings)
+ if "__PORTAGE_TEST_HARDLINK_LOCKS" in os.environ:
+ settings["__PORTAGE_TEST_HARDLINK_LOCKS"] = \
+ os.environ["__PORTAGE_TEST_HARDLINK_LOCKS"]
+ settings.backup_changes("__PORTAGE_TEST_HARDLINK_LOCKS")
+
+ cpv = 'sys-apps/portage-2.1'
+ metadata = dict(zip(Package.metadata_keys,
+ portdb.aux_get(cpv, Package.metadata_keys)))
+
pkg = Package(built=False, cpv=cpv, installed=False,
metadata=metadata, root_config=root_config,
type_name='ebuild')
@@ -57,7 +73,7 @@ class DoebuildSpawnTestCase(TestCase):
# has been sourced already.
open(os.path.join(settings['T'], 'environment'), 'wb').close()
- scheduler = PollScheduler().sched_iface
+ scheduler = SchedulerInterface(global_event_loop())
for phase in ('_internal_test',):
# Test EbuildSpawnProcess by calling doebuild.spawn() with
@@ -83,5 +99,7 @@ class DoebuildSpawnTestCase(TestCase):
ebuild_phase.start()
ebuild_phase.wait()
self.assertEqual(ebuild_phase.returncode, os.EX_OK)
+
+ spawn_nofetch(portdb, portdb.findname(cpv), settings=settings)
finally:
playground.cleanup()
diff --git a/pym/portage/tests/ebuild/test_ipc_daemon.py b/pym/portage/tests/ebuild/test_ipc_daemon.py
index 0efab6584..a87107625 100644
--- a/pym/portage/tests/ebuild/test_ipc_daemon.py
+++ b/pym/portage/tests/ebuild/test_ipc_daemon.py
@@ -1,4 +1,4 @@
-# Copyright 2010-2011 Gentoo Foundation
+# Copyright 2010-2012 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
import tempfile
@@ -13,16 +13,28 @@ from portage.const import BASH_BINARY
from portage.locks import hardlock_cleanup
from portage.package.ebuild._ipc.ExitCommand import ExitCommand
from portage.util import ensure_dirs
+from portage.util._async.ForkProcess import ForkProcess
+from portage.util._async.TaskScheduler import TaskScheduler
+from portage.util._eventloop.global_event_loop import global_event_loop
from _emerge.SpawnProcess import SpawnProcess
from _emerge.EbuildBuildDir import EbuildBuildDir
from _emerge.EbuildIpcDaemon import EbuildIpcDaemon
-from _emerge.TaskScheduler import TaskScheduler
+
+class SleepProcess(ForkProcess):
+ """
+ Emulate the sleep command, in order to ensure a consistent
+ return code when it is killed by SIGTERM (see bug #437180).
+ """
+ __slots__ = ('seconds',)
+ def _run(self):
+ time.sleep(self.seconds)
class IpcDaemonTestCase(TestCase):
_SCHEDULE_TIMEOUT = 40000 # 40 seconds
def testIpcDaemon(self):
+ event_loop = global_event_loop()
tmpdir = tempfile.mkdtemp()
build_dir = None
try:
@@ -44,9 +56,8 @@ class IpcDaemonTestCase(TestCase):
env["__PORTAGE_TEST_HARDLINK_LOCKS"] = \
os.environ["__PORTAGE_TEST_HARDLINK_LOCKS"]
- task_scheduler = TaskScheduler(max_jobs=2)
build_dir = EbuildBuildDir(
- scheduler=task_scheduler.sched_iface,
+ scheduler=event_loop,
settings=env)
build_dir.lock()
ensure_dirs(env['PORTAGE_BUILDDIR'])
@@ -61,26 +72,23 @@ class IpcDaemonTestCase(TestCase):
commands = {'exit' : exit_command}
daemon = EbuildIpcDaemon(commands=commands,
input_fifo=input_fifo,
- output_fifo=output_fifo,
- scheduler=task_scheduler.sched_iface)
+ output_fifo=output_fifo)
proc = SpawnProcess(
args=[BASH_BINARY, "-c",
'"$PORTAGE_BIN_PATH"/ebuild-ipc exit %d' % exitcode],
- env=env, scheduler=task_scheduler.sched_iface)
+ env=env)
+ task_scheduler = TaskScheduler(iter([daemon, proc]),
+ max_jobs=2, event_loop=event_loop)
self.received_command = False
def exit_command_callback():
self.received_command = True
- task_scheduler.clear()
- task_scheduler.wait()
+ task_scheduler.cancel()
exit_command.reply_hook = exit_command_callback
start_time = time.time()
- task_scheduler.add(daemon)
- task_scheduler.add(proc)
- task_scheduler.run(timeout=self._SCHEDULE_TIMEOUT)
- task_scheduler.clear()
- task_scheduler.wait()
+ self._run(event_loop, task_scheduler, self._SCHEDULE_TIMEOUT)
+
hardlock_cleanup(env['PORTAGE_BUILDDIR'],
remove_all_locks=True)
@@ -91,8 +99,10 @@ class IpcDaemonTestCase(TestCase):
self.assertEqual(daemon.isAlive(), False)
self.assertEqual(exit_command.exitcode, exitcode)
- # Intentionally short timeout test for QueueScheduler.run()
- sleep_time_s = 10 # 10.000 seconds
+ # Intentionally short timeout test for EventLoop/AsyncScheduler.
+ # Use a ridiculously long sleep_time_s in case the user's
+ # system is heavily loaded (see bug #436334).
+ sleep_time_s = 600 #600.000 seconds
short_timeout_ms = 10 # 0.010 seconds
for i in range(3):
@@ -100,25 +110,20 @@ class IpcDaemonTestCase(TestCase):
commands = {'exit' : exit_command}
daemon = EbuildIpcDaemon(commands=commands,
input_fifo=input_fifo,
- output_fifo=output_fifo,
- scheduler=task_scheduler.sched_iface)
- proc = SpawnProcess(
- args=[BASH_BINARY, "-c", 'exec sleep %d' % sleep_time_s],
- env=env, scheduler=task_scheduler.sched_iface)
+ output_fifo=output_fifo)
+ proc = SleepProcess(seconds=sleep_time_s)
+ task_scheduler = TaskScheduler(iter([daemon, proc]),
+ max_jobs=2, event_loop=event_loop)
self.received_command = False
def exit_command_callback():
self.received_command = True
- task_scheduler.clear()
- task_scheduler.wait()
+ task_scheduler.cancel()
exit_command.reply_hook = exit_command_callback
start_time = time.time()
- task_scheduler.add(daemon)
- task_scheduler.add(proc)
- task_scheduler.run(timeout=short_timeout_ms)
- task_scheduler.clear()
- task_scheduler.wait()
+ self._run(event_loop, task_scheduler, short_timeout_ms)
+
hardlock_cleanup(env['PORTAGE_BUILDDIR'],
remove_all_locks=True)
@@ -133,3 +138,20 @@ class IpcDaemonTestCase(TestCase):
if build_dir is not None:
build_dir.unlock()
shutil.rmtree(tmpdir)
+
+ def _timeout_callback(self):
+ self._timed_out = True
+
+ def _run(self, event_loop, task_scheduler, timeout):
+ self._timed_out = False
+ timeout_id = event_loop.timeout_add(timeout, self._timeout_callback)
+
+ try:
+ task_scheduler.start()
+ while not self._timed_out and task_scheduler.poll() is None:
+ event_loop.iteration()
+ if self._timed_out:
+ task_scheduler.cancel()
+ task_scheduler.wait()
+ finally:
+ event_loop.source_remove(timeout_id)
diff --git a/pym/portage/tests/ebuild/test_spawn.py b/pym/portage/tests/ebuild/test_spawn.py
index fea4738d4..a38e10972 100644
--- a/pym/portage/tests/ebuild/test_spawn.py
+++ b/pym/portage/tests/ebuild/test_spawn.py
@@ -1,17 +1,18 @@
-# Copyright 1998-2011 Gentoo Foundation
+# Copyright 1998-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
import errno
import io
import sys
import tempfile
+import portage
from portage import os
from portage import _encodings
from portage import _unicode_encode
from portage.const import BASH_BINARY
from portage.tests import TestCase
+from portage.util._eventloop.global_event_loop import global_event_loop
from _emerge.SpawnProcess import SpawnProcess
-from _emerge.PollScheduler import PollScheduler
class SpawnTestCase(TestCase):
@@ -22,12 +23,16 @@ class SpawnTestCase(TestCase):
os.close(fd)
null_fd = os.open('/dev/null', os.O_RDWR)
test_string = 2 * "blah blah blah\n"
- scheduler = PollScheduler().sched_iface
proc = SpawnProcess(
args=[BASH_BINARY, "-c",
"echo -n '%s'" % test_string],
- env={}, fd_pipes={0:sys.stdin.fileno(), 1:null_fd, 2:null_fd},
- scheduler=scheduler,
+ env={},
+ fd_pipes={
+ 0: portage._get_stdin().fileno(),
+ 1: null_fd,
+ 2: null_fd
+ },
+ scheduler=global_event_loop(),
logfile=logfile)
proc.start()
os.close(null_fd)
diff --git a/pym/portage/tests/emerge/test_emerge_slot_abi.py b/pym/portage/tests/emerge/test_emerge_slot_abi.py
index f18bd123b..fd7ec0e6a 100644
--- a/pym/portage/tests/emerge/test_emerge_slot_abi.py
+++ b/pym/portage/tests/emerge/test_emerge_slot_abi.py
@@ -1,4 +1,4 @@
-# Copyright 2012 Gentoo Foundation
+# Copyright 2012-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
import subprocess
@@ -64,15 +64,14 @@ class SlotAbiEmergeTestCase(TestCase):
trees = playground.trees
portdb = trees[eroot]["porttree"].dbapi
vardb = trees[eroot]["vartree"].dbapi
- portdir = settings["PORTDIR"]
var_cache_edb = os.path.join(eprefix, "var", "cache", "edb")
user_config_dir = os.path.join(eprefix, USER_CONFIG_PATH)
package_mask_path = os.path.join(user_config_dir, "package.mask")
portage_python = portage._python_interpreter
- ebuild_cmd = (portage_python, "-Wd",
+ ebuild_cmd = (portage_python, "-b", "-Wd",
os.path.join(PORTAGE_BIN_PATH, "ebuild"))
- emerge_cmd = (portage_python, "-Wd",
+ emerge_cmd = (portage_python, "-b", "-Wd",
os.path.join(PORTAGE_BIN_PATH, "emerge"))
test_ebuild = portdb.findname("dev-libs/dbus-glib-0.98")
@@ -94,25 +93,6 @@ class SlotAbiEmergeTestCase(TestCase):
portage_tmpdir = os.path.join(eprefix, "var", "tmp", "portage")
profile_path = settings.profile_path
- features = []
- if not portage.process.sandbox_capable or \
- os.environ.get("SANDBOX_ON") == "1":
- features.append("-sandbox")
-
- make_conf = (
- "FEATURES=\"%s\"\n" % (" ".join(features),),
- "PORTDIR=\"%s\"\n" % (portdir,),
- "PORTAGE_GRPNAME=\"%s\"\n" % (os.environ["PORTAGE_GRPNAME"],),
- "PORTAGE_USERNAME=\"%s\"\n" % (os.environ["PORTAGE_USERNAME"],),
- "PKGDIR=\"%s\"\n" % (pkgdir,),
- "PORTAGE_INST_GID=%s\n" % (portage.data.portage_gid,),
- "PORTAGE_INST_UID=%s\n" % (portage.data.portage_uid,),
- "PORTAGE_TMPDIR=\"%s\"\n" % (portage_tmpdir,),
- "CLEAN_DELAY=0\n",
- "DISTDIR=\"%s\"\n" % (distdir,),
- "EMERGE_WARNING_DELAY=0\n",
- )
-
path = os.environ.get("PATH")
if path is not None and not path.strip():
path = None
@@ -139,6 +119,7 @@ class SlotAbiEmergeTestCase(TestCase):
"PORTAGE_OVERRIDE_EPREFIX" : eprefix,
"PATH" : path,
"PORTAGE_PYTHON" : portage_python,
+ "PORTAGE_REPOSITORIES" : settings.repositories.config_string(),
"PYTHONPATH" : pythonpath,
}
@@ -155,9 +136,6 @@ class SlotAbiEmergeTestCase(TestCase):
try:
for d in dirs:
ensure_dirs(d)
- with open(os.path.join(user_config_dir, "make.conf"), 'w') as f:
- for line in make_conf:
- f.write(line)
for x in true_symlinks:
os.symlink(true_binary, os.path.join(fake_bin, x))
with open(os.path.join(var_cache_edb, "counter"), 'wb') as f:
diff --git a/pym/portage/tests/emerge/test_simple.py b/pym/portage/tests/emerge/test_simple.py
index f87170a71..bf0af8bc8 100644
--- a/pym/portage/tests/emerge/test_simple.py
+++ b/pym/portage/tests/emerge/test_simple.py
@@ -1,4 +1,4 @@
-# Copyright 2011-2012 Gentoo Foundation
+# Copyright 2011-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
import subprocess
@@ -7,7 +7,8 @@ import sys
import portage
from portage import os
from portage import _unicode_decode
-from portage.const import PORTAGE_BIN_PATH, PORTAGE_PYM_PATH, USER_CONFIG_PATH
+from portage.const import (BASH_BINARY, PORTAGE_BASE_PATH,
+ PORTAGE_BIN_PATH, PORTAGE_PYM_PATH, USER_CONFIG_PATH)
from portage.process import find_binary
from portage.tests import TestCase
from portage.tests.resolver.ResolverPlayground import ResolverPlayground
@@ -75,13 +76,21 @@ pkg_preinst() {
else
einfo "has_version does not detect an installed instance of $CATEGORY/$PN:$SLOT"
fi
+ if [[ ${EPREFIX} != ${PORTAGE_OVERRIDE_EPREFIX} ]] ; then
+ if has_version --host-root $CATEGORY/$PN:$SLOT ; then
+ einfo "has_version --host-root detects an installed instance of $CATEGORY/$PN:$SLOT"
+ einfo "best_version --host-root reports that the installed instance is $(best_version $CATEGORY/$PN:$SLOT)"
+ else
+ einfo "has_version --host-root does not detect an installed instance of $CATEGORY/$PN:$SLOT"
+ fi
+ fi
}
"""
ebuilds = {
"dev-libs/A-1": {
- "EAPI" : "4",
+ "EAPI" : "5",
"IUSE" : "+flag",
"KEYWORDS": "x86",
"LICENSE": "GPL-2",
@@ -89,14 +98,14 @@ pkg_preinst() {
"RDEPEND": "flag? ( dev-libs/B[flag] )",
},
"dev-libs/B-1": {
- "EAPI" : "4",
+ "EAPI" : "5",
"IUSE" : "+flag",
"KEYWORDS": "x86",
"LICENSE": "GPL-2",
"MISC_CONTENT": install_something,
},
"virtual/foo-0": {
- "EAPI" : "4",
+ "EAPI" : "5",
"KEYWORDS": "x86",
"LICENSE": "GPL-2",
},
@@ -104,7 +113,7 @@ pkg_preinst() {
installed = {
"dev-libs/A-1": {
- "EAPI" : "4",
+ "EAPI" : "5",
"IUSE" : "+flag",
"KEYWORDS": "x86",
"LICENSE": "GPL-2",
@@ -112,21 +121,21 @@ pkg_preinst() {
"USE": "flag",
},
"dev-libs/B-1": {
- "EAPI" : "4",
+ "EAPI" : "5",
"IUSE" : "+flag",
"KEYWORDS": "x86",
"LICENSE": "GPL-2",
"USE": "flag",
},
"dev-libs/depclean-me-1": {
- "EAPI" : "4",
+ "EAPI" : "5",
"IUSE" : "",
"KEYWORDS": "x86",
"LICENSE": "GPL-2",
"USE": "",
},
"app-misc/depclean-me-1": {
- "EAPI" : "4",
+ "EAPI" : "5",
"IUSE" : "",
"KEYWORDS": "x86",
"LICENSE": "GPL-2",
@@ -159,29 +168,35 @@ pkg_preinst() {
eroot = settings["EROOT"]
trees = playground.trees
portdb = trees[eroot]["porttree"].dbapi
- portdir = settings["PORTDIR"]
+ test_repo_location = settings.repositories["test_repo"].location
var_cache_edb = os.path.join(eprefix, "var", "cache", "edb")
cachedir = os.path.join(var_cache_edb, "dep")
- cachedir_pregen = os.path.join(portdir, "metadata", "cache")
+ cachedir_pregen = os.path.join(test_repo_location, "metadata", "md5-cache")
portage_python = portage._python_interpreter
- ebuild_cmd = (portage_python, "-Wd",
+ dispatch_conf_cmd = (portage_python, "-b", "-Wd",
+ os.path.join(PORTAGE_BIN_PATH, "dispatch-conf"))
+ ebuild_cmd = (portage_python, "-b", "-Wd",
os.path.join(PORTAGE_BIN_PATH, "ebuild"))
- egencache_cmd = (portage_python, "-Wd",
- os.path.join(PORTAGE_BIN_PATH, "egencache"))
- emerge_cmd = (portage_python, "-Wd",
+ egencache_cmd = (portage_python, "-b", "-Wd",
+ os.path.join(PORTAGE_BIN_PATH, "egencache"),
+ "--repo", "test_repo",
+ "--repositories-configuration", settings.repositories.config_string())
+ emerge_cmd = (portage_python, "-b", "-Wd",
os.path.join(PORTAGE_BIN_PATH, "emerge"))
- emaint_cmd = (portage_python, "-Wd",
+ emaint_cmd = (portage_python, "-b", "-Wd",
os.path.join(PORTAGE_BIN_PATH, "emaint"))
- env_update_cmd = (portage_python, "-Wd",
+ env_update_cmd = (portage_python, "-b", "-Wd",
os.path.join(PORTAGE_BIN_PATH, "env-update"))
- fixpackages_cmd = (portage_python, "-Wd",
+ etc_update_cmd = (BASH_BINARY,
+ os.path.join(PORTAGE_BIN_PATH, "etc-update"))
+ fixpackages_cmd = (portage_python, "-b", "-Wd",
os.path.join(PORTAGE_BIN_PATH, "fixpackages"))
- portageq_cmd = (portage_python, "-Wd",
+ portageq_cmd = (portage_python, "-b", "-Wd",
os.path.join(PORTAGE_BIN_PATH, "portageq"))
- quickpkg_cmd = (portage_python, "-Wd",
+ quickpkg_cmd = (portage_python, "-b", "-Wd",
os.path.join(PORTAGE_BIN_PATH, "quickpkg"))
- regenworld_cmd = (portage_python, "-Wd",
+ regenworld_cmd = (portage_python, "-b", "-Wd",
os.path.join(PORTAGE_BIN_PATH, "regenworld"))
rm_binary = find_binary("rm")
@@ -196,8 +211,14 @@ pkg_preinst() {
test_ebuild = portdb.findname("dev-libs/A-1")
self.assertFalse(test_ebuild is None)
+ cross_prefix = os.path.join(eprefix, "cross_prefix")
+
test_commands = (
env_update_cmd,
+ portageq_cmd + ("envvar", "-v", "CONFIG_PROTECT", "EROOT",
+ "PORTAGE_CONFIGROOT", "PORTAGE_TMPDIR", "USERLAND"),
+ etc_update_cmd,
+ dispatch_conf_cmd,
emerge_cmd + ("--version",),
emerge_cmd + ("--info",),
emerge_cmd + ("--info", "--verbose"),
@@ -210,7 +231,7 @@ pkg_preinst() {
({"FEATURES" : "metadata-transfer"},) + \
emerge_cmd + ("--regen",),
rm_cmd + ("-rf", cachedir),
- ({"FEATURES" : "metadata-transfer parse-eapi-ebuild-head"},) + \
+ ({"FEATURES" : "metadata-transfer"},) + \
emerge_cmd + ("--regen",),
rm_cmd + ("-rf", cachedir),
egencache_cmd + ("--update",) + tuple(egencache_extra_args),
@@ -226,6 +247,7 @@ pkg_preinst() {
ebuild_cmd + (test_ebuild, "manifest", "clean", "package", "merge"),
emerge_cmd + ("--pretend", "--tree", "--complete-graph", "dev-libs/A"),
emerge_cmd + ("-p", "dev-libs/B"),
+ emerge_cmd + ("-p", "--newrepo", "dev-libs/B"),
emerge_cmd + ("-B", "dev-libs/B",),
emerge_cmd + ("--oneshot", "--usepkg", "dev-libs/B",),
@@ -257,6 +279,24 @@ pkg_preinst() {
emerge_cmd + ("-p", "--unmerge", "-q", eroot + "usr"),
emerge_cmd + ("--unmerge", "--quiet", "dev-libs/A"),
emerge_cmd + ("-C", "--quiet", "dev-libs/B"),
+
+ # Test cross-prefix usage, including chpathtool for binpkgs.
+ ({"EPREFIX" : cross_prefix},) + \
+ emerge_cmd + ("--usepkgonly", "dev-libs/A"),
+ ({"EPREFIX" : cross_prefix},) + \
+ portageq_cmd + ("has_version", cross_prefix, "dev-libs/A"),
+ ({"EPREFIX" : cross_prefix},) + \
+ portageq_cmd + ("has_version", cross_prefix, "dev-libs/B"),
+ ({"EPREFIX" : cross_prefix},) + \
+ emerge_cmd + ("-C", "--quiet", "dev-libs/B"),
+ ({"EPREFIX" : cross_prefix},) + \
+ emerge_cmd + ("-C", "--quiet", "dev-libs/A"),
+ ({"EPREFIX" : cross_prefix},) + \
+ emerge_cmd + ("dev-libs/A",),
+ ({"EPREFIX" : cross_prefix},) + \
+ portageq_cmd + ("has_version", cross_prefix, "dev-libs/A"),
+ ({"EPREFIX" : cross_prefix},) + \
+ portageq_cmd + ("has_version", cross_prefix, "dev-libs/B"),
)
distdir = playground.distdir
@@ -266,20 +306,6 @@ pkg_preinst() {
profile_path = settings.profile_path
user_config_dir = os.path.join(os.sep, eprefix, USER_CONFIG_PATH)
- features = []
- if not portage.process.sandbox_capable or \
- os.environ.get("SANDBOX_ON") == "1":
- features.append("-sandbox")
-
- # Since egencache ignores settings from the calling environment,
- # configure it via make.conf.
- make_conf = (
- "FEATURES=\"%s\"\n" % (" ".join(features),),
- "PORTDIR=\"%s\"\n" % (portdir,),
- "PORTAGE_GRPNAME=\"%s\"\n" % (os.environ["PORTAGE_GRPNAME"],),
- "PORTAGE_USERNAME=\"%s\"\n" % (os.environ["PORTAGE_USERNAME"],),
- )
-
path = os.environ.get("PATH")
if path is not None and not path.strip():
path = None
@@ -314,37 +340,43 @@ pkg_preinst() {
"PORTAGE_INST_GID" : str(portage.data.portage_gid),
"PORTAGE_INST_UID" : str(portage.data.portage_uid),
"PORTAGE_PYTHON" : portage_python,
+ "PORTAGE_REPOSITORIES" : settings.repositories.config_string(),
"PORTAGE_TMPDIR" : portage_tmpdir,
"PYTHONPATH" : pythonpath,
+ "__PORTAGE_TEST_PATH_OVERRIDE" : fake_bin,
}
if "__PORTAGE_TEST_HARDLINK_LOCKS" in os.environ:
env["__PORTAGE_TEST_HARDLINK_LOCKS"] = \
os.environ["__PORTAGE_TEST_HARDLINK_LOCKS"]
- updates_dir = os.path.join(portdir, "profiles", "updates")
+ updates_dir = os.path.join(test_repo_location, "profiles", "updates")
dirs = [cachedir, cachedir_pregen, distdir, fake_bin,
portage_tmpdir, updates_dir,
user_config_dir, var_cache_edb]
- true_symlinks = ["chown", "chgrp"]
+ etc_symlinks = ("dispatch-conf.conf", "etc-update.conf")
+ # Override things that may be unavailable, or may have portability
+ # issues when running tests in exotic environments.
+ # prepstrip - bug #447810 (bash read builtin EINTR problem)
+ true_symlinks = ["find", "prepstrip", "sed", "scanelf"]
true_binary = find_binary("true")
self.assertEqual(true_binary is None, False,
"true command not found")
try:
for d in dirs:
ensure_dirs(d)
- with open(os.path.join(user_config_dir, "make.conf"), 'w') as f:
- for line in make_conf:
- f.write(line)
for x in true_symlinks:
os.symlink(true_binary, os.path.join(fake_bin, x))
+ for x in etc_symlinks:
+ os.symlink(os.path.join(PORTAGE_BASE_PATH, "cnf", x),
+ os.path.join(eprefix, "etc", x))
with open(os.path.join(var_cache_edb, "counter"), 'wb') as f:
f.write(b"100")
# non-empty system set keeps --depclean quiet
with open(os.path.join(profile_path, "packages"), 'w') as f:
f.write("*dev-libs/token-system-pkg")
for cp, xml_data in metadata_xml_files:
- with open(os.path.join(portdir, cp, "metadata.xml"), 'w') as f:
+ with open(os.path.join(test_repo_location, cp, "metadata.xml"), 'w') as f:
f.write(playground.metadata_xml_template % xml_data)
with open(os.path.join(updates_dir, "1Q-2010"), 'w') as f:
f.write("""
diff --git a/pym/portage/tests/env/config/test_PackageKeywordsFile.py b/pym/portage/tests/env/config/test_PackageKeywordsFile.py
index f1e9e98f0..609c0fda2 100644
--- a/pym/portage/tests/env/config/test_PackageKeywordsFile.py
+++ b/pym/portage/tests/env/config/test_PackageKeywordsFile.py
@@ -11,7 +11,7 @@ class PackageKeywordsFileTestCase(TestCase):
cpv = ['sys-apps/portage']
keywords = ['~x86', 'amd64', '-mips']
-
+
def testPackageKeywordsFile(self):
"""
A simple test to ensure the load works properly
@@ -23,17 +23,17 @@ class PackageKeywordsFileTestCase(TestCase):
f.load()
i = 0
for cpv, keyword in f.items():
- self.assertEqual( cpv, self.cpv[i] )
+ self.assertEqual(cpv, self.cpv[i])
[k for k in keyword if self.assertTrue(k in self.keywords)]
i = i + 1
finally:
self.NukeFile()
-
+
def BuildFile(self):
fd, self.fname = mkstemp()
f = os.fdopen(fd, 'w')
for c in self.cpv:
- f.write("%s %s\n" % (c,' '.join(self.keywords)))
+ f.write("%s %s\n" % (c, ' '.join(self.keywords)))
f.close()
def NukeFile(self):
diff --git a/pym/portage/tests/env/config/test_PackageUseFile.py b/pym/portage/tests/env/config/test_PackageUseFile.py
index 7a3806792..b1a6ccbde 100644
--- a/pym/portage/tests/env/config/test_PackageUseFile.py
+++ b/pym/portage/tests/env/config/test_PackageUseFile.py
@@ -12,7 +12,7 @@ class PackageUseFileTestCase(TestCase):
cpv = 'sys-apps/portage'
useflags = ['cdrom', 'far', 'boo', 'flag', 'blat']
-
+
def testPackageUseFile(self):
"""
A simple test to ensure the load works properly
@@ -22,7 +22,7 @@ class PackageUseFileTestCase(TestCase):
f = PackageUseFile(self.fname)
f.load()
for cpv, use in f.items():
- self.assertEqual( cpv, self.cpv )
+ self.assertEqual(cpv, self.cpv)
[flag for flag in use if self.assertTrue(flag in self.useflags)]
finally:
self.NukeFile()
@@ -32,6 +32,6 @@ class PackageUseFileTestCase(TestCase):
f = os.fdopen(fd, 'w')
f.write("%s %s" % (self.cpv, ' '.join(self.useflags)))
f.close()
-
+
def NukeFile(self):
os.unlink(self.fname)
diff --git a/pym/portage/tests/env/config/test_PortageModulesFile.py b/pym/portage/tests/env/config/test_PortageModulesFile.py
index 2cd1a8ab1..05584a5f8 100644
--- a/pym/portage/tests/env/config/test_PortageModulesFile.py
+++ b/pym/portage/tests/env/config/test_PortageModulesFile.py
@@ -8,14 +8,13 @@ from tempfile import mkstemp
class PortageModulesFileTestCase(TestCase):
- keys = ['foo.bar','baz','bob','extra_key']
- invalid_keys = ['',""]
- modules = ['spanky','zmedico','antarus','ricer','5','6']
+ keys = ['foo.bar', 'baz', 'bob', 'extra_key']
+ invalid_keys = ['', ""]
+ modules = ['spanky', 'zmedico', 'antarus', 'ricer', '5', '6']
def setUp(self):
self.items = {}
- for k, v in zip(self.keys + self.invalid_keys,
- self.modules):
+ for k, v in zip(self.keys + self.invalid_keys, self.modules):
self.items[k] = v
def testPortageModulesFile(self):
@@ -32,7 +31,7 @@ class PortageModulesFileTestCase(TestCase):
fd, self.fname = mkstemp()
f = os.fdopen(fd, 'w')
for k, v in self.items.items():
- f.write('%s=%s\n' % (k,v))
+ f.write('%s=%s\n' % (k, v))
f.close()
def NukeFile(self):
diff --git a/pym/portage/tests/glsa/__init__.py b/pym/portage/tests/glsa/__init__.py
new file mode 100644
index 000000000..6cde9320b
--- /dev/null
+++ b/pym/portage/tests/glsa/__init__.py
@@ -0,0 +1,2 @@
+# Copyright 2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
diff --git a/pym/portage/tests/glsa/__test__ b/pym/portage/tests/glsa/__test__
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/pym/portage/tests/glsa/__test__
diff --git a/pym/portage/tests/glsa/test_security_set.py b/pym/portage/tests/glsa/test_security_set.py
new file mode 100644
index 000000000..edf567809
--- /dev/null
+++ b/pym/portage/tests/glsa/test_security_set.py
@@ -0,0 +1,144 @@
+# Copyright 2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import unicode_literals
+
+import io
+
+import portage
+from portage import os, _encodings
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import (ResolverPlayground,
+ ResolverPlaygroundTestCase)
+
+class SecuritySetTestCase(TestCase):
+
+ glsa_template = """\
+<?xml version="1.0" encoding="UTF-8"?>
+<?xml-stylesheet href="/xsl/glsa.xsl" type="text/xsl"?>
+<?xml-stylesheet href="/xsl/guide.xsl" type="text/xsl"?>
+<!DOCTYPE glsa SYSTEM "http://www.gentoo.org/dtd/glsa.dtd">
+<glsa id="%(glsa_id)s">
+ <title>%(pkgname)s: Multiple vulnerabilities</title>
+ <synopsis>Multiple vulnerabilities have been found in %(pkgname)s.
+ </synopsis>
+ <product type="ebuild">%(pkgname)s</product>
+ <announced>January 18, 2013</announced>
+ <revised>January 18, 2013: 1</revised>
+ <bug>55555</bug>
+ <access>remote</access>
+ <affected>
+ <package name="%(cp)s" auto="yes" arch="*">
+ <unaffected range="ge">%(unaffected_version)s</unaffected>
+ <vulnerable range="lt">%(unaffected_version)s</vulnerable>
+ </package>
+ </affected>
+ <background>
+ <p>%(pkgname)s is software package.</p>
+ </background>
+ <description>
+ <p>Multiple vulnerabilities have been discovered in %(pkgname)s.
+ </p>
+ </description>
+ <impact type="normal">
+ <p>A remote attacker could exploit these vulnerabilities.</p>
+ </impact>
+ <workaround>
+ <p>There is no known workaround at this time.</p>
+ </workaround>
+ <resolution>
+ <p>All %(pkgname)s users should upgrade to the latest version:</p>
+ <code>
+ # emerge --sync
+ # emerge --ask --oneshot --verbose "&gt;=%(cp)s-%(unaffected_version)s"
+ </code>
+ </resolution>
+ <references>
+ </references>
+</glsa>
+"""
+
+ def _must_skip(self):
+ try:
+ __import__("xml.etree.ElementTree")
+ __import__("xml.parsers.expat").parsers.expat.ExpatError
+ except (AttributeError, ImportError):
+ return "python is missing xml support"
+
+ def testSecuritySet(self):
+
+ skip_reason = self._must_skip()
+ if skip_reason:
+ self.portage_skip = skip_reason
+ self.assertFalse(True, skip_reason)
+ return
+
+ ebuilds = {
+ "cat/A-vulnerable-2.2": {
+ "KEYWORDS": "x86"
+ },
+ "cat/B-not-vulnerable-4.5": {
+ "KEYWORDS": "x86"
+ },
+ }
+
+ installed = {
+ "cat/A-vulnerable-2.1": {
+ "KEYWORDS": "x86"
+ },
+ "cat/B-not-vulnerable-4.4": {
+ "KEYWORDS": "x86"
+ },
+ }
+
+ glsas = (
+ {
+ "glsa_id": "201301-01",
+ "pkgname": "A-vulnerable",
+ "cp": "cat/A-vulnerable",
+ "unaffected_version": "2.2"
+ },
+ {
+ "glsa_id": "201301-02",
+ "pkgname": "B-not-vulnerable",
+ "cp": "cat/B-not-vulnerable",
+ "unaffected_version": "4.4"
+ },
+ {
+ "glsa_id": "201301-03",
+ "pkgname": "NotInstalled",
+ "cp": "cat/NotInstalled",
+ "unaffected_version": "3.5"
+ },
+ )
+
+ world = ["cat/A"]
+
+ test_cases = (
+
+ ResolverPlaygroundTestCase(
+ ["@security"],
+ options = {},
+ success = True,
+ mergelist = ["cat/A-vulnerable-2.2"]),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds,
+ installed=installed, world=world, debug=False)
+
+ try:
+
+ portdb = playground.trees[playground.eroot]["porttree"].dbapi
+ glsa_dir = os.path.join(portdb.porttree_root, 'metadata', 'glsa')
+ portage.util.ensure_dirs(glsa_dir)
+ for glsa in glsas:
+ with io.open(os.path.join(glsa_dir,
+ 'glsa-' + glsa["glsa_id"] + '.xml'),
+ encoding=_encodings['repo.content'], mode='w') as f:
+ f.write(self.glsa_template % glsa)
+
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
diff --git a/pym/portage/tests/lazyimport/test_lazy_import_portage_baseline.py b/pym/portage/tests/lazyimport/test_lazy_import_portage_baseline.py
index c7ebbaff9..080cf3f98 100644
--- a/pym/portage/tests/lazyimport/test_lazy_import_portage_baseline.py
+++ b/pym/portage/tests/lazyimport/test_lazy_import_portage_baseline.py
@@ -6,8 +6,8 @@ import portage
from portage import os
from portage.const import PORTAGE_PYM_PATH
from portage.tests import TestCase
+from portage.util._eventloop.global_event_loop import global_event_loop
-from _emerge.PollScheduler import PollScheduler
from _emerge.PipeReader import PipeReader
from _emerge.SpawnProcess import SpawnProcess
@@ -52,7 +52,7 @@ sys.stdout.write(" ".join(k for k in sys.modules
# then the above PYTHONPATH override doesn't help.
env['PORTAGE_PYM_PATH'] = PORTAGE_PYM_PATH
- scheduler = PollScheduler().sched_iface
+ scheduler = global_event_loop()
master_fd, slave_fd = os.pipe()
master_file = os.fdopen(master_fd, 'rb', 0)
slave_file = os.fdopen(slave_fd, 'wb')
diff --git a/pym/portage/tests/lint/test_bash_syntax.py b/pym/portage/tests/lint/test_bash_syntax.py
index aef8d74f1..fdbb6fe88 100644
--- a/pym/portage/tests/lint/test_bash_syntax.py
+++ b/pym/portage/tests/lint/test_bash_syntax.py
@@ -1,20 +1,26 @@
-# Copyright 2010 Gentoo Foundation
+# Copyright 2010-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
+from itertools import chain
import stat
+import subprocess
+import sys
-from portage.const import BASH_BINARY, PORTAGE_BIN_PATH
+from portage.const import BASH_BINARY, PORTAGE_BASE_PATH, PORTAGE_BIN_PATH
from portage.tests import TestCase
from portage import os
-from portage import subprocess_getstatusoutput
from portage import _encodings
-from portage import _shell_quote
from portage import _unicode_decode, _unicode_encode
class BashSyntaxTestCase(TestCase):
def testBashSyntax(self):
- for parent, dirs, files in os.walk(PORTAGE_BIN_PATH):
+ locations = [PORTAGE_BIN_PATH]
+ misc_dir = os.path.join(PORTAGE_BASE_PATH, "misc")
+ if os.path.isdir(misc_dir):
+ locations.append(misc_dir)
+ for parent, dirs, files in \
+ chain.from_iterable(os.walk(x) for x in locations):
parent = _unicode_decode(parent,
encoding=_encodings['fs'], errors='strict')
for x in files:
@@ -36,7 +42,13 @@ class BashSyntaxTestCase(TestCase):
f.close()
if line[:2] == '#!' and \
'bash' in line:
- cmd = "%s -n %s" % (_shell_quote(BASH_BINARY), _shell_quote(x))
- status, output = subprocess_getstatusoutput(cmd)
+ cmd = [BASH_BINARY, "-n", x]
+ cmd = [_unicode_encode(x,
+ encoding=_encodings['fs'], errors='strict') for x in cmd]
+ proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT)
+ output = _unicode_decode(proc.communicate()[0],
+ encoding=_encodings['fs'])
+ status = proc.wait()
self.assertEqual(os.WIFEXITED(status) and \
os.WEXITSTATUS(status) == os.EX_OK, True, msg=output)
diff --git a/pym/portage/tests/lint/test_compile_modules.py b/pym/portage/tests/lint/test_compile_modules.py
index f90a6665a..ce7e3fb90 100644
--- a/pym/portage/tests/lint/test_compile_modules.py
+++ b/pym/portage/tests/lint/test_compile_modules.py
@@ -1,6 +1,7 @@
-# Copyright 2009-2010 Gentoo Foundation
+# Copyright 2009-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
+import errno
import itertools
import stat
@@ -10,12 +11,10 @@ from portage import os
from portage import _encodings
from portage import _unicode_decode, _unicode_encode
-import py_compile
-
class CompileModulesTestCase(TestCase):
def testCompileModules(self):
- for parent, dirs, files in itertools.chain(
+ for parent, _dirs, files in itertools.chain(
os.walk(PORTAGE_BIN_PATH),
os.walk(PORTAGE_PYM_PATH)):
parent = _unicode_decode(parent,
@@ -33,14 +32,21 @@ class CompileModulesTestCase(TestCase):
if x[-3:] == '.py':
do_compile = True
else:
- # Check for python shebang
- f = open(_unicode_encode(x,
- encoding=_encodings['fs'], errors='strict'), 'rb')
- line = _unicode_decode(f.readline(),
- encoding=_encodings['content'], errors='replace')
- f.close()
- if line[:2] == '#!' and \
- 'python' in line:
+ # Check for python shebang.
+ try:
+ with open(_unicode_encode(x,
+ encoding=_encodings['fs'], errors='strict'), 'rb') as f:
+ line = _unicode_decode(f.readline(),
+ encoding=_encodings['content'], errors='replace')
+ except IOError as e:
+ # Some tests create files that are unreadable by the
+ # user (by design), so ignore EACCES issues.
+ if e.errno != errno.EACCES:
+ raise
+ continue
+ if line[:2] == '#!' and 'python' in line:
do_compile = True
if do_compile:
- py_compile.compile(x, cfile='/dev/null', doraise=True)
+ with open(_unicode_encode(x,
+ encoding=_encodings['fs'], errors='strict'), 'rb') as f:
+ compile(f.read(), x, 'exec')
diff --git a/pym/portage/tests/lint/test_import_modules.py b/pym/portage/tests/lint/test_import_modules.py
index 8d257c5a6..34261f464 100644
--- a/pym/portage/tests/lint/test_import_modules.py
+++ b/pym/portage/tests/lint/test_import_modules.py
@@ -1,4 +1,4 @@
-# Copyright 2011 Gentoo Foundation
+# Copyright 2011-2012 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
from portage.const import PORTAGE_PYM_PATH
diff --git a/pym/portage/tests/locks/test_asynchronous_lock.py b/pym/portage/tests/locks/test_asynchronous_lock.py
index 49dd10ec4..3a2ccfb84 100644
--- a/pym/portage/tests/locks/test_asynchronous_lock.py
+++ b/pym/portage/tests/locks/test_asynchronous_lock.py
@@ -7,13 +7,13 @@ import tempfile
from portage import os
from portage import shutil
from portage.tests import TestCase
+from portage.util._eventloop.global_event_loop import global_event_loop
from _emerge.AsynchronousLock import AsynchronousLock
-from _emerge.PollScheduler import PollScheduler
class AsynchronousLockTestCase(TestCase):
def _testAsynchronousLock(self):
- scheduler = PollScheduler().sched_iface
+ scheduler = global_event_loop()
tempdir = tempfile.mkdtemp()
try:
path = os.path.join(tempdir, 'lock_me')
@@ -53,7 +53,7 @@ class AsynchronousLockTestCase(TestCase):
os.environ["__PORTAGE_TEST_HARDLINK_LOCKS"] = prev_state
def _testAsynchronousLockWait(self):
- scheduler = PollScheduler().sched_iface
+ scheduler = global_event_loop()
tempdir = tempfile.mkdtemp()
try:
path = os.path.join(tempdir, 'lock_me')
@@ -94,7 +94,7 @@ class AsynchronousLockTestCase(TestCase):
os.environ["__PORTAGE_TEST_HARDLINK_LOCKS"] = prev_state
def _testAsynchronousLockWaitCancel(self):
- scheduler = PollScheduler().sched_iface
+ scheduler = global_event_loop()
tempdir = tempfile.mkdtemp()
try:
path = os.path.join(tempdir, 'lock_me')
@@ -132,7 +132,7 @@ class AsynchronousLockTestCase(TestCase):
os.environ["__PORTAGE_TEST_HARDLINK_LOCKS"] = prev_state
def _testAsynchronousLockWaitKill(self):
- scheduler = PollScheduler().sched_iface
+ scheduler = global_event_loop()
tempdir = tempfile.mkdtemp()
try:
path = os.path.join(tempdir, 'lock_me')
diff --git a/pym/portage/tests/process/test_PopenProcess.py b/pym/portage/tests/process/test_PopenProcess.py
new file mode 100644
index 000000000..88da0b354
--- /dev/null
+++ b/pym/portage/tests/process/test_PopenProcess.py
@@ -0,0 +1,85 @@
+# Copyright 2012-2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import subprocess
+import tempfile
+
+from portage import os
+from portage.tests import TestCase
+from portage.util._async.PipeLogger import PipeLogger
+from portage.util._async.PopenProcess import PopenProcess
+from portage.util._eventloop.global_event_loop import global_event_loop
+from _emerge.PipeReader import PipeReader
+
+class PopenPipeTestCase(TestCase):
+ """
+ Test PopenProcess, which can be useful for Jython support, since it
+ uses the subprocess.Popen instead of os.fork().
+ """
+
+ _echo_cmd = "echo -n '%s'"
+
+ def _testPipeReader(self, test_string):
+ """
+ Use a poll loop to read data from a pipe and assert that
+ the data written to the pipe is identical to the data
+ read from the pipe.
+ """
+
+ producer = PopenProcess(proc=subprocess.Popen(
+ ["bash", "-c", self._echo_cmd % test_string],
+ stdout=subprocess.PIPE, stderr=subprocess.STDOUT),
+ pipe_reader=PipeReader(), scheduler=global_event_loop())
+
+ consumer = producer.pipe_reader
+ consumer.input_files = {"producer" : producer.proc.stdout}
+
+ producer.start()
+ producer.wait()
+
+ self.assertEqual(producer.returncode, os.EX_OK)
+ self.assertEqual(consumer.returncode, os.EX_OK)
+
+ return consumer.getvalue().decode('ascii', 'replace')
+
+ def _testPipeLogger(self, test_string):
+
+ producer = PopenProcess(proc=subprocess.Popen(
+ ["bash", "-c", self._echo_cmd % test_string],
+ stdout=subprocess.PIPE, stderr=subprocess.STDOUT),
+ scheduler=global_event_loop())
+
+ fd, log_file_path = tempfile.mkstemp()
+ try:
+
+ consumer = PipeLogger(background=True,
+ input_fd=producer.proc.stdout,
+ log_file_path=log_file_path)
+
+ producer.pipe_reader = consumer
+
+ producer.start()
+ producer.wait()
+
+ self.assertEqual(producer.returncode, os.EX_OK)
+ self.assertEqual(consumer.returncode, os.EX_OK)
+
+ with open(log_file_path, 'rb') as f:
+ content = f.read()
+
+ finally:
+ os.close(fd)
+ os.unlink(log_file_path)
+
+ return content.decode('ascii', 'replace')
+
+ def testPopenPipe(self):
+ for x in (1, 2, 5, 6, 7, 8, 2**5, 2**10, 2**12, 2**13, 2**14):
+ test_string = x * "a"
+ output = self._testPipeReader(test_string)
+ self.assertEqual(test_string, output,
+ "x = %s, len(output) = %s" % (x, len(output)))
+
+ output = self._testPipeLogger(test_string)
+ self.assertEqual(test_string, output,
+ "x = %s, len(output) = %s" % (x, len(output)))
diff --git a/pym/portage/tests/process/test_PopenProcessBlockingIO.py b/pym/portage/tests/process/test_PopenProcessBlockingIO.py
new file mode 100644
index 000000000..9ee291a39
--- /dev/null
+++ b/pym/portage/tests/process/test_PopenProcessBlockingIO.py
@@ -0,0 +1,63 @@
+# Copyright 2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import subprocess
+
+try:
+ import threading
+except ImportError:
+ # dummy_threading will not suffice
+ threading = None
+
+from portage import os
+from portage.tests import TestCase
+from portage.util._async.PopenProcess import PopenProcess
+from portage.util._eventloop.global_event_loop import global_event_loop
+from portage.util._async.PipeReaderBlockingIO import PipeReaderBlockingIO
+
+class PopenPipeBlockingIOTestCase(TestCase):
+ """
+ Test PopenProcess, which can be useful for Jython support:
+ * use subprocess.Popen since Jython does not support os.fork()
+ * use blocking IO with threads, since Jython does not support
+ fcntl non-blocking IO)
+ """
+
+ _echo_cmd = "echo -n '%s'"
+
+ def _testPipeReader(self, test_string):
+ """
+ Use a poll loop to read data from a pipe and assert that
+ the data written to the pipe is identical to the data
+ read from the pipe.
+ """
+
+ producer = PopenProcess(proc=subprocess.Popen(
+ ["bash", "-c", self._echo_cmd % test_string],
+ stdout=subprocess.PIPE, stderr=subprocess.STDOUT),
+ pipe_reader=PipeReaderBlockingIO(), scheduler=global_event_loop())
+
+ consumer = producer.pipe_reader
+ consumer.input_files = {"producer" : producer.proc.stdout}
+
+ producer.start()
+ producer.wait()
+
+ self.assertEqual(producer.returncode, os.EX_OK)
+ self.assertEqual(consumer.returncode, os.EX_OK)
+
+ return consumer.getvalue().decode('ascii', 'replace')
+
+ def testPopenPipeBlockingIO(self):
+
+ if threading is None:
+ skip_reason = "threading disabled"
+ self.portage_skip = "threading disabled"
+ self.assertFalse(True, skip_reason)
+ return
+
+ for x in (1, 2, 5, 6, 7, 8, 2**5, 2**10, 2**12, 2**13, 2**14):
+ test_string = x * "a"
+ output = self._testPipeReader(test_string)
+ self.assertEqual(test_string, output,
+ "x = %s, len(output) = %s" % (x, len(output)))
diff --git a/pym/portage/tests/process/test_poll.py b/pym/portage/tests/process/test_poll.py
index d6667b4e0..8c57c237a 100644
--- a/pym/portage/tests/process/test_poll.py
+++ b/pym/portage/tests/process/test_poll.py
@@ -1,12 +1,14 @@
-# Copyright 1998-2011 Gentoo Foundation
+# Copyright 1998-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
+import subprocess
+
from portage import os
from portage.tests import TestCase
from portage.util._pty import _create_pty_or_pipe
-from _emerge.TaskScheduler import TaskScheduler
+from portage.util._async.PopenProcess import PopenProcess
+from portage.util._eventloop.global_event_loop import global_event_loop
from _emerge.PipeReader import PipeReader
-from _emerge.SpawnProcess import SpawnProcess
class PipeReaderTestCase(TestCase):
@@ -36,26 +38,23 @@ class PipeReaderTestCase(TestCase):
# WARNING: It is very important to use unbuffered mode here,
# in order to avoid issue 5380 with python3.
master_file = os.fdopen(master_fd, 'rb', 0)
- slave_file = os.fdopen(slave_fd, 'wb', 0)
- task_scheduler = TaskScheduler(max_jobs=2)
- producer = SpawnProcess(
- args=["bash", "-c", self._echo_cmd % test_string],
- env=os.environ, fd_pipes={1:slave_fd},
- scheduler=task_scheduler.sched_iface)
- task_scheduler.add(producer)
- slave_file.close()
+ scheduler = global_event_loop()
consumer = PipeReader(
input_files={"producer" : master_file},
- scheduler=task_scheduler.sched_iface, _use_array=self._use_array)
+ _use_array=self._use_array,
+ scheduler=scheduler)
+
+ producer = PopenProcess(
+ pipe_reader=consumer,
+ proc=subprocess.Popen(["bash", "-c", self._echo_cmd % test_string],
+ stdout=slave_fd),
+ scheduler=scheduler)
- task_scheduler.add(consumer)
+ producer.start()
+ os.close(slave_fd)
+ producer.wait()
- # This will ensure that both tasks have exited, which
- # is necessary to avoid "ResourceWarning: unclosed file"
- # warnings since Python 3.2 (and also ensures that we
- # don't leave any zombie child processes).
- task_scheduler.run()
self.assertEqual(producer.returncode, os.EX_OK)
self.assertEqual(consumer.returncode, os.EX_OK)
diff --git a/pym/portage/tests/repoman/test_echangelog.py b/pym/portage/tests/repoman/test_echangelog.py
index 502aa7292..1640be268 100644
--- a/pym/portage/tests/repoman/test_echangelog.py
+++ b/pym/portage/tests/repoman/test_echangelog.py
@@ -1,13 +1,9 @@
# Copyright 2012 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
-import datetime
-import subprocess
-import sys
import tempfile
import time
-import portage
from portage import os
from portage import shutil
from portage.tests import TestCase
@@ -35,7 +31,7 @@ class RepomanEchangelogTestCase(TestCase):
self.header_pkg = '# ChangeLog for %s/%s\n' % (self.cat, self.pkg)
self.header_copyright = '# Copyright 1999-%s Gentoo Foundation; Distributed under the GPL v2\n' % \
- datetime.datetime.now().year
+ time.strftime('%Y', time.gmtime())
self.header_cvs = '# $Header: $\n'
self.changelog = os.path.join(self.pkgdir, 'ChangeLog')
diff --git a/pym/portage/tests/repoman/test_simple.py b/pym/portage/tests/repoman/test_simple.py
index eab06d9b8..69eb36de8 100644
--- a/pym/portage/tests/repoman/test_simple.py
+++ b/pym/portage/tests/repoman/test_simple.py
@@ -1,4 +1,4 @@
-# Copyright 2011 Gentoo Foundation
+# Copyright 2011-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
import subprocess
@@ -76,9 +76,26 @@ class SimpleRepomanTestCase(TestCase):
profiles = (
("x86", "default/linux/x86/test_profile", "stable"),
+ ("x86", "default/linux/x86/test_dev", "dev"),
+ ("x86", "default/linux/x86/test_exp", "exp"),
)
+ profile = {
+ "eapi": ("5",),
+ "package.use.stable.mask": ("dev-libs/A flag",)
+ }
+
ebuilds = {
+ "dev-libs/A-0": {
+ "COPYRIGHT_HEADER" : copyright_header,
+ "DESCRIPTION" : "Desc goes here",
+ "EAPI" : "5",
+ "HOMEPAGE" : "http://example.com",
+ "IUSE" : "flag",
+ "KEYWORDS": "x86",
+ "LICENSE": "GPL-2",
+ "RDEPEND": "flag? ( dev-libs/B[flag] )",
+ },
"dev-libs/A-1": {
"COPYRIGHT_HEADER" : copyright_header,
"DESCRIPTION" : "Desc goes here",
@@ -98,6 +115,17 @@ class SimpleRepomanTestCase(TestCase):
"KEYWORDS": "~x86",
"LICENSE": "GPL-2",
},
+ "dev-libs/C-0": {
+ "COPYRIGHT_HEADER" : copyright_header,
+ "DESCRIPTION" : "Desc goes here",
+ "EAPI" : "4",
+ "HOMEPAGE" : "http://example.com",
+ "IUSE" : "flag",
+ # must be unstable, since dev-libs/A[flag] is stable masked
+ "KEYWORDS": "~x86",
+ "LICENSE": "GPL-2",
+ "RDEPEND": "flag? ( dev-libs/A[flag] )",
+ },
}
licenses = ["GPL-2"]
arch_list = ["x86"]
@@ -107,7 +135,7 @@ class SimpleRepomanTestCase(TestCase):
"dev-libs/A",
{
"herd" : "base-system",
- "flags" : "<flag name='flag'>Description of how USE='flag' affects this package</flag>",
+ "flags" : "<flag name='flag' restrict='&gt;=dev-libs/A-0'>Description of how USE='flag' affects this package</flag>",
},
),
(
@@ -117,6 +145,13 @@ class SimpleRepomanTestCase(TestCase):
"flags" : "<flag name='flag'>Description of how USE='flag' affects this package</flag>",
},
),
+ (
+ "dev-libs/C",
+ {
+ "herd" : "no-herd",
+ "flags" : "<flag name='flag'>Description of how USE='flag' affects this package</flag>",
+ },
+ ),
)
use_desc = (
@@ -124,18 +159,18 @@ class SimpleRepomanTestCase(TestCase):
)
playground = ResolverPlayground(ebuilds=ebuilds,
- repo_configs=repo_configs, debug=debug)
+ profile=profile, repo_configs=repo_configs, debug=debug)
settings = playground.settings
eprefix = settings["EPREFIX"]
eroot = settings["EROOT"]
portdb = playground.trees[playground.eroot]["porttree"].dbapi
homedir = os.path.join(eroot, "home")
distdir = os.path.join(eprefix, "distdir")
- portdir = settings["PORTDIR"]
- profiles_dir = os.path.join(portdir, "profiles")
- license_dir = os.path.join(portdir, "licenses")
+ test_repo_location = settings.repositories["test_repo"].location
+ profiles_dir = os.path.join(test_repo_location, "profiles")
+ license_dir = os.path.join(test_repo_location, "licenses")
- repoman_cmd = (portage._python_interpreter, "-Wd",
+ repoman_cmd = (portage._python_interpreter, "-b", "-Wd",
os.path.join(PORTAGE_BIN_PATH, "repoman"))
git_binary = find_binary("git")
@@ -159,6 +194,7 @@ class SimpleRepomanTestCase(TestCase):
("", git_cmd + ("init-db",)),
("", git_cmd + ("add", ".")),
("", git_cmd + ("commit", "-a", "-m", "add whole repo")),
+ ("", repoman_cmd + ("full", "-d")),
("", cp_cmd + (test_ebuild, test_ebuild[:-8] + "2.ebuild")),
("", git_cmd + ("add", test_ebuild[:-8] + "2.ebuild")),
("", repoman_cmd + ("commit", "-m", "bump to version 2")),
@@ -192,23 +228,35 @@ class SimpleRepomanTestCase(TestCase):
"PATH" : os.environ["PATH"],
"PORTAGE_GRPNAME" : os.environ["PORTAGE_GRPNAME"],
"PORTAGE_USERNAME" : os.environ["PORTAGE_USERNAME"],
- "PORTDIR" : portdir,
+ "PORTAGE_REPOSITORIES" : settings.repositories.config_string(),
"PYTHONPATH" : pythonpath,
}
if os.environ.get("SANDBOX_ON") == "1":
# avoid problems from nested sandbox instances
- env["FEATURES"] = "-sandbox"
+ env["FEATURES"] = "-sandbox -usersandbox"
dirs = [homedir, license_dir, profiles_dir, distdir]
try:
for d in dirs:
ensure_dirs(d)
- with open(os.path.join(portdir, "skel.ChangeLog"), 'w') as f:
+ with open(os.path.join(test_repo_location, "skel.ChangeLog"), 'w') as f:
f.write(copyright_header)
with open(os.path.join(profiles_dir, "profiles.desc"), 'w') as f:
for x in profiles:
f.write("%s %s %s\n" % x)
+
+ # ResolverPlayground only created the first profile,
+ # so create the remaining ones.
+ for x in profiles[1:]:
+ sub_profile_dir = os.path.join(profiles_dir, x[1])
+ ensure_dirs(sub_profile_dir)
+ for config_file, lines in profile.items():
+ file_name = os.path.join(sub_profile_dir, config_file)
+ with open(file_name, "w") as f:
+ for line in lines:
+ f.write("%s\n" % line)
+
for x in licenses:
open(os.path.join(license_dir, x), 'wb').close()
with open(os.path.join(profiles_dir, "arch.list"), 'w') as f:
@@ -218,12 +266,12 @@ class SimpleRepomanTestCase(TestCase):
for k, v in use_desc:
f.write("%s - %s\n" % (k, v))
for cp, xml_data in metadata_xml_files:
- with open(os.path.join(portdir, cp, "metadata.xml"), 'w') as f:
+ with open(os.path.join(test_repo_location, cp, "metadata.xml"), 'w') as f:
f.write(playground.metadata_xml_template % xml_data)
- # Use a symlink to portdir, in order to trigger bugs
+ # Use a symlink to test_repo, in order to trigger bugs
# involving canonical vs. non-canonical paths.
- portdir_symlink = os.path.join(eroot, "portdir_symlink")
- os.symlink(portdir, portdir_symlink)
+ test_repo_symlink = os.path.join(eroot, "test_repo_symlink")
+ os.symlink(test_repo_location, test_repo_symlink)
# repoman checks metadata.dtd for recent CTIME, so copy the file in
# order to ensure that the CTIME is current
shutil.copyfile(metadata_dtd, os.path.join(distdir, "metadata.dtd"))
@@ -238,9 +286,8 @@ class SimpleRepomanTestCase(TestCase):
stdout = subprocess.PIPE
for cwd in ("", "dev-libs", "dev-libs/A", "dev-libs/B"):
- abs_cwd = os.path.join(portdir_symlink, cwd)
- proc = subprocess.Popen([portage._python_interpreter, "-Wd",
- os.path.join(PORTAGE_BIN_PATH, "repoman"), "full"],
+ abs_cwd = os.path.join(test_repo_symlink, cwd)
+ proc = subprocess.Popen(repoman_cmd + ("full",),
cwd=abs_cwd, env=env, stdout=stdout)
if debug:
@@ -258,7 +305,7 @@ class SimpleRepomanTestCase(TestCase):
if git_binary is not None:
for cwd, cmd in git_test:
- abs_cwd = os.path.join(portdir_symlink, cwd)
+ abs_cwd = os.path.join(test_repo_symlink, cwd)
proc = subprocess.Popen(cmd,
cwd=abs_cwd, env=env, stdout=stdout)
diff --git a/pym/portage/tests/resolver/ResolverPlayground.py b/pym/portage/tests/resolver/ResolverPlayground.py
index 0ac209761..077e27159 100644
--- a/pym/portage/tests/resolver/ResolverPlayground.py
+++ b/pym/portage/tests/resolver/ResolverPlayground.py
@@ -1,7 +1,8 @@
-# Copyright 2010-2012 Gentoo Foundation
+# Copyright 2010-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
from itertools import permutations
+import fnmatch
import sys
import tempfile
import portage
@@ -25,6 +26,7 @@ from _emerge.depgraph import backtrack_depgraph
from _emerge.RootConfig import RootConfig
if sys.hexversion >= 0x3000000:
+ # pylint: disable=W0622
basestring = str
class ResolverPlayground(object):
@@ -34,9 +36,10 @@ class ResolverPlayground(object):
its work.
"""
- config_files = frozenset(("package.accept_keywords", "package.use", "package.mask", "package.keywords", \
- "package.unmask", "package.properties", "package.license", "use.mask", "use.force",
- "layout.conf",))
+ config_files = frozenset(("eapi", "layout.conf", "make.conf", "package.accept_keywords",
+ "package.keywords", "package.license", "package.mask", "package.properties",
+ "package.unmask", "package.use", "package.use.aliases", "package.use.stable.mask",
+ "unpack_dependencies", "use.aliases", "use.force", "use.mask", "layout.conf"))
metadata_xml_template = """<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE pkgmetadata SYSTEM "http://www.gentoo.org/dtd/metadata.dtd">
@@ -54,27 +57,32 @@ class ResolverPlayground(object):
"""
def __init__(self, ebuilds={}, binpkgs={}, installed={}, profile={}, repo_configs={}, \
- user_config={}, sets={}, world=[], world_sets=[], distfiles={}, debug=False):
+ user_config={}, sets={}, world=[], world_sets=[], distfiles={},
+ targetroot=False, debug=False):
"""
- ebuilds: cpv -> metadata mapping simulating available ebuilds.
+ ebuilds: cpv -> metadata mapping simulating available ebuilds.
installed: cpv -> metadata mapping simulating installed packages.
If a metadata key is missing, it gets a default value.
profile: settings defined by the profile.
"""
self.debug = debug
self.eprefix = normalize_path(tempfile.mkdtemp())
+ portage.const.EPREFIX = self.eprefix.rstrip(os.sep)
+
self.eroot = self.eprefix + os.sep
+ if targetroot:
+ self.target_root = os.path.join(self.eroot, 'target_root')
+ else:
+ self.target_root = os.sep
self.distdir = os.path.join(self.eroot, "var", "portage", "distfiles")
self.pkgdir = os.path.join(self.eprefix, "pkgdir")
- self.portdir = os.path.join(self.eroot, "usr/portage")
self.vdbdir = os.path.join(self.eroot, "var/db/pkg")
- os.makedirs(self.portdir)
os.makedirs(self.vdbdir)
if not debug:
portage.util.noiselimit = -2
- self.repo_dirs = {}
+ self._repositories = {}
#Make sure the main repo is always created
self._get_repo_dir("test_repo")
@@ -88,20 +96,19 @@ class ResolverPlayground(object):
self.settings, self.trees = self._load_config()
self._create_ebuild_manifests(ebuilds)
-
+
portage.util.noiselimit = 0
def _get_repo_dir(self, repo):
"""
Create the repo directory if needed.
"""
- if repo not in self.repo_dirs:
+ if repo not in self._repositories:
if repo == "test_repo":
- repo_path = self.portdir
- else:
- repo_path = os.path.join(self.eroot, "usr", "local", repo)
+ self._repositories["DEFAULT"] = {"main-repo": repo}
- self.repo_dirs[repo] = repo_path
+ repo_path = os.path.join(self.eroot, "var", "repositories", repo)
+ self._repositories[repo] = {"location": repo_path}
profile_path = os.path.join(repo_path, "profiles")
try:
@@ -110,11 +117,10 @@ class ResolverPlayground(object):
pass
repo_name_file = os.path.join(profile_path, "repo_name")
- f = open(repo_name_file, "w")
- f.write("%s\n" % repo)
- f.close()
+ with open(repo_name_file, "w") as f:
+ f.write("%s\n" % repo)
- return self.repo_dirs[repo]
+ return self._repositories[repo]["location"]
def _create_distfiles(self, distfiles):
os.makedirs(self.distdir)
@@ -131,24 +137,18 @@ class ResolverPlayground(object):
metadata = ebuilds[cpv].copy()
copyright_header = metadata.pop("COPYRIGHT_HEADER", None)
- desc = metadata.pop("DESCRIPTION", None)
- eapi = metadata.pop("EAPI", 0)
- lic = metadata.pop("LICENSE", "")
- properties = metadata.pop("PROPERTIES", "")
- slot = metadata.pop("SLOT", 0)
- keywords = metadata.pop("KEYWORDS", "x86")
- homepage = metadata.pop("HOMEPAGE", None)
- src_uri = metadata.pop("SRC_URI", None)
- iuse = metadata.pop("IUSE", "")
- provide = metadata.pop("PROVIDE", None)
- depend = metadata.pop("DEPEND", "")
- rdepend = metadata.pop("RDEPEND", None)
- pdepend = metadata.pop("PDEPEND", None)
- required_use = metadata.pop("REQUIRED_USE", None)
+ eapi = metadata.pop("EAPI", "0")
misc_content = metadata.pop("MISC_CONTENT", None)
+ metadata.setdefault("DEPEND", "")
+ metadata.setdefault("SLOT", "0")
+ metadata.setdefault("KEYWORDS", "x86")
+ metadata.setdefault("IUSE", "")
- if metadata:
- raise ValueError("metadata of ebuild '%s' contains unknown keys: %s" % (cpv, metadata.keys()))
+ unknown_keys = set(metadata).difference(
+ portage.dbapi.dbapi._known_keys)
+ if unknown_keys:
+ raise ValueError("metadata of ebuild '%s' contains unknown keys: %s" %
+ (cpv, sorted(unknown_keys)))
repo_dir = self._get_repo_dir(repo)
ebuild_dir = os.path.join(repo_dir, a.cp)
@@ -158,33 +158,14 @@ class ResolverPlayground(object):
except os.error:
pass
- f = open(ebuild_path, "w")
- if copyright_header is not None:
- f.write(copyright_header)
- f.write('EAPI="' + str(eapi) + '"\n')
- if desc is not None:
- f.write('DESCRIPTION="%s"\n' % desc)
- if homepage is not None:
- f.write('HOMEPAGE="%s"\n' % homepage)
- if src_uri is not None:
- f.write('SRC_URI="%s"\n' % src_uri)
- f.write('LICENSE="' + str(lic) + '"\n')
- f.write('PROPERTIES="' + str(properties) + '"\n')
- f.write('SLOT="' + str(slot) + '"\n')
- f.write('KEYWORDS="' + str(keywords) + '"\n')
- f.write('IUSE="' + str(iuse) + '"\n')
- if provide is not None:
- f.write('PROVIDE="%s"\n' % provide)
- f.write('DEPEND="' + str(depend) + '"\n')
- if rdepend is not None:
- f.write('RDEPEND="' + str(rdepend) + '"\n')
- if pdepend is not None:
- f.write('PDEPEND="' + str(pdepend) + '"\n')
- if required_use is not None:
- f.write('REQUIRED_USE="' + str(required_use) + '"\n')
- if misc_content is not None:
- f.write(misc_content)
- f.close()
+ with open(ebuild_path, "w") as f:
+ if copyright_header is not None:
+ f.write(copyright_header)
+ f.write('EAPI="%s"\n' % eapi)
+ for k, v in metadata.items():
+ f.write('%s="%s"\n' % (k, v))
+ if misc_content is not None:
+ f.write(misc_content)
def _create_ebuild_manifests(self, ebuilds):
tmpsettings = config(clone=self.settings)
@@ -241,49 +222,25 @@ class ResolverPlayground(object):
pass
metadata = installed[cpv].copy()
- eapi = metadata.pop("EAPI", 0)
- lic = metadata.pop("LICENSE", "")
- properties = metadata.pop("PROPERTIES", "")
- slot = metadata.pop("SLOT", 0)
- build_time = metadata.pop("BUILD_TIME", "0")
- keywords = metadata.pop("KEYWORDS", "~x86")
- iuse = metadata.pop("IUSE", "")
- use = metadata.pop("USE", "")
- provide = metadata.pop("PROVIDE", None)
- depend = metadata.pop("DEPEND", "")
- rdepend = metadata.pop("RDEPEND", None)
- pdepend = metadata.pop("PDEPEND", None)
- required_use = metadata.pop("REQUIRED_USE", None)
-
- if metadata:
- raise ValueError("metadata of installed '%s' contains unknown keys: %s" % (cpv, metadata.keys()))
-
- def write_key(key, value):
- f = open(os.path.join(vdb_pkg_dir, key), "w")
- f.write(str(value) + "\n")
- f.close()
-
- write_key("EAPI", eapi)
- write_key("BUILD_TIME", build_time)
- write_key("COUNTER", "0")
- write_key("LICENSE", lic)
- write_key("PROPERTIES", properties)
- write_key("SLOT", slot)
- write_key("LICENSE", lic)
- write_key("PROPERTIES", properties)
- write_key("repository", repo)
- write_key("KEYWORDS", keywords)
- write_key("IUSE", iuse)
- write_key("USE", use)
- if provide is not None:
- write_key("PROVIDE", provide)
- write_key("DEPEND", depend)
- if rdepend is not None:
- write_key("RDEPEND", rdepend)
- if pdepend is not None:
- write_key("PDEPEND", pdepend)
- if required_use is not None:
- write_key("REQUIRED_USE", required_use)
+ metadata.setdefault("SLOT", "0")
+ metadata.setdefault("BUILD_TIME", "0")
+ metadata.setdefault("COUNTER", "0")
+ metadata.setdefault("KEYWORDS", "~x86")
+
+ unknown_keys = set(metadata).difference(
+ portage.dbapi.dbapi._known_keys)
+ unknown_keys.discard("BUILD_TIME")
+ unknown_keys.discard("COUNTER")
+ unknown_keys.discard("repository")
+ unknown_keys.discard("USE")
+ if unknown_keys:
+ raise ValueError("metadata of installed '%s' contains unknown keys: %s" %
+ (cpv, sorted(unknown_keys)))
+
+ metadata["repository"] = repo
+ for k, v in metadata.items():
+ with open(os.path.join(vdb_pkg_dir, k), "w") as f:
+ f.write("%s\n" % v)
def _create_profile(self, ebuilds, installed, profile, repo_configs, user_config, sets):
@@ -294,9 +251,12 @@ class ResolverPlayground(object):
except os.error:
pass
- for repo in self.repo_dirs:
+ for repo in self._repositories:
+ if repo == "DEFAULT":
+ continue
+
repo_dir = self._get_repo_dir(repo)
- profile_dir = os.path.join(self._get_repo_dir(repo), "profiles")
+ profile_dir = os.path.join(repo_dir, "profiles")
metadata_dir = os.path.join(repo_dir, "metadata")
os.makedirs(metadata_dir)
@@ -310,60 +270,66 @@ class ResolverPlayground(object):
categories.add(catsplit(cpv)[0])
categories_file = os.path.join(profile_dir, "categories")
- f = open(categories_file, "w")
- for cat in categories:
- f.write(cat + "\n")
- f.close()
-
+ with open(categories_file, "w") as f:
+ for cat in categories:
+ f.write(cat + "\n")
+
#Create $REPO/profiles/license_groups
license_file = os.path.join(profile_dir, "license_groups")
- f = open(license_file, "w")
- f.write("EULA TEST\n")
- f.close()
+ with open(license_file, "w") as f:
+ f.write("EULA TEST\n")
- repo_config = repo_configs.get(repo)
+ repo_config = repo_configs.get(repo)
if repo_config:
for config_file, lines in repo_config.items():
- if config_file not in self.config_files:
+ if config_file not in self.config_files and not any(fnmatch.fnmatch(config_file, os.path.join(x, "*")) for x in self.config_files):
raise ValueError("Unknown config file: '%s'" % config_file)
if config_file in ("layout.conf",):
file_name = os.path.join(repo_dir, "metadata", config_file)
else:
file_name = os.path.join(profile_dir, config_file)
- f = open(file_name, "w")
- for line in lines:
- f.write("%s\n" % line)
- f.close()
+ if "/" in config_file and not os.path.isdir(os.path.dirname(file_name)):
+ os.makedirs(os.path.dirname(file_name))
+ with open(file_name, "w") as f:
+ for line in lines:
+ f.write("%s\n" % line)
+ # Temporarily write empty value of masters until it becomes default.
+ # TODO: Delete all references to "# use implicit masters" when empty value becomes default.
+ if config_file == "layout.conf" and not any(line.startswith(("masters =", "# use implicit masters")) for line in lines):
+ f.write("masters =\n")
#Create $profile_dir/eclass (we fail to digest the ebuilds if it's not there)
os.makedirs(os.path.join(repo_dir, "eclass"))
+ # Temporarily write empty value of masters until it becomes default.
+ if not repo_config or "layout.conf" not in repo_config:
+ layout_conf_path = os.path.join(repo_dir, "metadata", "layout.conf")
+ with open(layout_conf_path, "w") as f:
+ f.write("masters =\n")
+
if repo == "test_repo":
#Create a minimal profile in /usr/portage
sub_profile_dir = os.path.join(profile_dir, "default", "linux", "x86", "test_profile")
os.makedirs(sub_profile_dir)
- eapi_file = os.path.join(sub_profile_dir, "eapi")
- f = open(eapi_file, "w")
- f.write("0\n")
- f.close()
+ if not (profile and "eapi" in profile):
+ eapi_file = os.path.join(sub_profile_dir, "eapi")
+ with open(eapi_file, "w") as f:
+ f.write("0\n")
make_defaults_file = os.path.join(sub_profile_dir, "make.defaults")
- f = open(make_defaults_file, "w")
- f.write("ARCH=\"x86\"\n")
- f.write("ACCEPT_KEYWORDS=\"x86\"\n")
- f.close()
+ with open(make_defaults_file, "w") as f:
+ f.write("ARCH=\"x86\"\n")
+ f.write("ACCEPT_KEYWORDS=\"x86\"\n")
use_force_file = os.path.join(sub_profile_dir, "use.force")
- f = open(use_force_file, "w")
- f.write("x86\n")
- f.close()
+ with open(use_force_file, "w") as f:
+ f.write("x86\n")
parent_file = os.path.join(sub_profile_dir, "parent")
- f = open(parent_file, "w")
- f.write("..\n")
- f.close()
+ with open(parent_file, "w") as f:
+ f.write("..\n")
if profile:
for config_file, lines in profile.items():
@@ -371,10 +337,9 @@ class ResolverPlayground(object):
raise ValueError("Unknown config file: '%s'" % config_file)
file_name = os.path.join(sub_profile_dir, config_file)
- f = open(file_name, "w")
- for line in lines:
- f.write("%s\n" % line)
- f.close()
+ with open(file_name, "w") as f:
+ for line in lines:
+ f.write("%s\n" % line)
#Create profile symlink
os.symlink(sub_profile_dir, os.path.join(user_config_dir, "make.profile"))
@@ -400,24 +365,50 @@ class ResolverPlayground(object):
with open(os.path.join(metadata_dir, "metadata.xml"), 'w') as f:
f.write(herds_xml)
- # Write empty entries for each repository, in order to exercise
- # RepoConfigLoader's repos.conf processing.
- repos_conf_file = os.path.join(user_config_dir, "repos.conf")
- f = open(repos_conf_file, "w")
- for repo in sorted(self.repo_dirs.keys()):
- f.write("[%s]\n" % repo)
- f.write("\n")
- f.close()
+ make_conf = {
+ "ACCEPT_KEYWORDS": "x86",
+ "CLEAN_DELAY": "0",
+ "DISTDIR" : self.distdir,
+ "EMERGE_WARNING_DELAY": "0",
+ "PKGDIR": self.pkgdir,
+ "PORTAGE_INST_GID": str(portage.data.portage_gid),
+ "PORTAGE_INST_UID": str(portage.data.portage_uid),
+ "PORTAGE_TMPDIR": os.path.join(self.eroot, 'var/tmp'),
+ }
+
+ if os.environ.get("NOCOLOR"):
+ make_conf["NOCOLOR"] = os.environ["NOCOLOR"]
+
+ # Pass along PORTAGE_USERNAME and PORTAGE_GRPNAME since they
+ # need to be inherited by ebuild subprocesses.
+ if 'PORTAGE_USERNAME' in os.environ:
+ make_conf['PORTAGE_USERNAME'] = os.environ['PORTAGE_USERNAME']
+ if 'PORTAGE_GRPNAME' in os.environ:
+ make_conf['PORTAGE_GRPNAME'] = os.environ['PORTAGE_GRPNAME']
+
+ make_conf_lines = []
+ for k_v in make_conf.items():
+ make_conf_lines.append('%s="%s"' % k_v)
- for config_file, lines in user_config.items():
+ if "make.conf" in user_config:
+ make_conf_lines.extend(user_config["make.conf"])
+
+ if not portage.process.sandbox_capable or \
+ os.environ.get("SANDBOX_ON") == "1":
+ # avoid problems from nested sandbox instances
+ make_conf_lines.append('FEATURES="${FEATURES} -sandbox -usersandbox"')
+
+ configs = user_config.copy()
+ configs["make.conf"] = make_conf_lines
+
+ for config_file, lines in configs.items():
if config_file not in self.config_files:
raise ValueError("Unknown config file: '%s'" % config_file)
file_name = os.path.join(user_config_dir, config_file)
- f = open(file_name, "w")
- for line in lines:
- f.write("%s\n" % line)
- f.close()
+ with open(file_name, "w") as f:
+ for line in lines:
+ f.write("%s\n" % line)
#Create /usr/share/portage/config/make.globals
make_globals_path = os.path.join(self.eroot,
@@ -428,7 +419,7 @@ class ResolverPlayground(object):
#Create /usr/share/portage/config/sets/portage.conf
default_sets_conf_dir = os.path.join(self.eroot, "usr/share/portage/config/sets")
-
+
try:
os.makedirs(default_sets_conf_dir)
except os.error:
@@ -447,27 +438,9 @@ class ResolverPlayground(object):
for sets_file, lines in sets.items():
file_name = os.path.join(set_config_dir, sets_file)
- f = open(file_name, "w")
- for line in lines:
- f.write("%s\n" % line)
- f.close()
-
- user_config_dir = os.path.join(self.eroot, "etc", "portage")
-
- try:
- os.makedirs(user_config_dir)
- except os.error:
- pass
-
- for config_file, lines in user_config.items():
- if config_file not in self.config_files:
- raise ValueError("Unknown config file: '%s'" % config_file)
-
- file_name = os.path.join(user_config_dir, config_file)
- f = open(file_name, "w")
- for line in lines:
- f.write("%s\n" % line)
- f.close()
+ with open(file_name, "w") as f:
+ for line in lines:
+ f.write("%s\n" % line)
def _create_world(self, world, world_sets):
#Create /var/lib/portage/world
@@ -477,54 +450,34 @@ class ResolverPlayground(object):
world_file = os.path.join(var_lib_portage, "world")
world_set_file = os.path.join(var_lib_portage, "world_sets")
- f = open(world_file, "w")
- for atom in world:
- f.write("%s\n" % atom)
- f.close()
+ with open(world_file, "w") as f:
+ for atom in world:
+ f.write("%s\n" % atom)
- f = open(world_set_file, "w")
- for atom in world_sets:
- f.write("%s\n" % atom)
- f.close()
+ with open(world_set_file, "w") as f:
+ for atom in world_sets:
+ f.write("%s\n" % atom)
def _load_config(self):
- portdir_overlay = []
- for repo_name in sorted(self.repo_dirs):
- path = self.repo_dirs[repo_name]
- if path != self.portdir:
- portdir_overlay.append(path)
+
+ create_trees_kwargs = {}
+ if self.target_root != os.sep:
+ create_trees_kwargs["target_root"] = self.target_root
env = {
- "ACCEPT_KEYWORDS": "x86",
- "DISTDIR" : self.distdir,
- "PKGDIR": self.pkgdir,
- "PORTDIR": self.portdir,
- "PORTDIR_OVERLAY": " ".join(portdir_overlay),
- 'PORTAGE_TMPDIR' : os.path.join(self.eroot, 'var/tmp'),
+ "PORTAGE_REPOSITORIES": "\n".join("[%s]\n%s" % (repo_name, "\n".join("%s = %s" % (k, v) for k, v in repo_config.items())) for repo_name, repo_config in self._repositories.items())
}
- if os.environ.get("NOCOLOR"):
- env["NOCOLOR"] = os.environ["NOCOLOR"]
-
- if os.environ.get("SANDBOX_ON") == "1":
- # avoid problems from nested sandbox instances
- env["FEATURES"] = "-sandbox"
+ trees = portage.create_trees(env=env, eprefix=self.eprefix,
+ **create_trees_kwargs)
- # Pass along PORTAGE_USERNAME and PORTAGE_GRPNAME since they
- # need to be inherited by ebuild subprocesses.
- if 'PORTAGE_USERNAME' in os.environ:
- env['PORTAGE_USERNAME'] = os.environ['PORTAGE_USERNAME']
- if 'PORTAGE_GRPNAME' in os.environ:
- env['PORTAGE_GRPNAME'] = os.environ['PORTAGE_GRPNAME']
-
- trees = portage.create_trees(env=env, eprefix=self.eprefix)
for root, root_trees in trees.items():
settings = root_trees["vartree"].settings
settings._init_dirs()
setconfig = load_default_config(settings, root_trees)
root_trees["root_config"] = RootConfig(settings, root_trees, setconfig)
-
- return settings, trees
+
+ return trees[trees._target_eroot]["vartree"].settings, trees
def run(self, atoms, options={}, action=None):
options = options.copy()
@@ -553,7 +506,7 @@ class ResolverPlayground(object):
rval, cleanlist, ordered, req_pkg_count = \
calc_depclean(self.settings, self.trees, None,
options, action, InternalPackageSet(initial_atoms=atoms, allow_wildcard=True), None)
- result = ResolverPlaygroundDepcleanResult( \
+ result = ResolverPlaygroundDepcleanResult(
atoms, rval, cleanlist, ordered, req_pkg_count)
else:
params = create_depgraph_params(options, action)
@@ -577,9 +530,9 @@ class ResolverPlayground(object):
return
def cleanup(self):
- portdb = self.trees[self.eroot]["porttree"].dbapi
- portdb.close_caches()
- portage.dbapi.porttree.portdbapi.portdbapi_instances.remove(portdb)
+ for eroot in self.trees:
+ portdb = self.trees[eroot]["porttree"].dbapi
+ portdb.close_caches()
if self.debug:
print("\nEROOT=%s" % self.eroot)
else:
@@ -742,13 +695,14 @@ class ResolverPlaygroundResult(object):
if self.depgraph._dynamic_config._serialized_tasks_cache is not None:
self.mergelist = []
+ host_root = self.depgraph._frozen_config._running_root.root
for x in self.depgraph._dynamic_config._serialized_tasks_cache:
if isinstance(x, Blocker):
self.mergelist.append(x.atom)
else:
repo_str = ""
- if x.metadata["repository"] != "test_repo":
- repo_str = _repo_separator + x.metadata["repository"]
+ if x.repo != "test_repo":
+ repo_str = _repo_separator + x.repo
mergelist_str = x.cpv + repo_str
if x.built:
if x.operation == "merge":
@@ -756,6 +710,8 @@ class ResolverPlaygroundResult(object):
else:
desc = x.operation
mergelist_str = "[%s]%s" % (desc, mergelist_str)
+ if x.root != host_root:
+ mergelist_str += "{targetroot}"
self.mergelist.append(mergelist_str)
if self.depgraph._dynamic_config._needed_use_config_changes:
@@ -781,7 +737,7 @@ class ResolverPlaygroundResult(object):
self.license_changes[pkg.cpv] = missing_licenses
if self.depgraph._dynamic_config._slot_conflict_handler is not None:
- self.slot_collision_solutions = []
+ self.slot_collision_solutions = []
handler = self.depgraph._dynamic_config._slot_conflict_handler
for change in handler.changes:
@@ -793,7 +749,7 @@ class ResolverPlaygroundResult(object):
if self.depgraph._dynamic_config._circular_dependency_handler is not None:
handler = self.depgraph._dynamic_config._circular_dependency_handler
sol = handler.solutions
- self.circular_dependency_solutions = dict( zip([x.cpv for x in sol.keys()], sol.values()) )
+ self.circular_dependency_solutions = dict(zip([x.cpv for x in sol.keys()], sol.values()))
class ResolverPlaygroundDepcleanResult(object):
diff --git a/pym/portage/tests/resolver/test_autounmask.py b/pym/portage/tests/resolver/test_autounmask.py
index 6acac9984..75fb36843 100644
--- a/pym/portage/tests/resolver/test_autounmask.py
+++ b/pym/portage/tests/resolver/test_autounmask.py
@@ -10,9 +10,9 @@ class AutounmaskTestCase(TestCase):
ebuilds = {
#ebuilds to test use changes
- "dev-libs/A-1": { "SLOT": 1, "DEPEND": "dev-libs/B[foo]", "EAPI": 2},
- "dev-libs/A-2": { "SLOT": 2, "DEPEND": "dev-libs/B[bar]", "EAPI": 2},
- "dev-libs/B-1": { "DEPEND": "foo? ( dev-libs/C ) bar? ( dev-libs/D )", "IUSE": "foo bar"},
+ "dev-libs/A-1": { "SLOT": 1, "DEPEND": "dev-libs/B[foo]", "EAPI": 2},
+ "dev-libs/A-2": { "SLOT": 2, "DEPEND": "dev-libs/B[bar]", "EAPI": 2},
+ "dev-libs/B-1": { "DEPEND": "foo? ( dev-libs/C ) bar? ( dev-libs/D )", "IUSE": "foo bar"},
"dev-libs/C-1": {},
"dev-libs/D-1": {},
@@ -56,10 +56,10 @@ class AutounmaskTestCase(TestCase):
"dev-util/R-1": { "IUSE": "bar" },
#ebuilds to test interaction with REQUIRED_USE
- "app-portage/A-1": { "DEPEND": "app-portage/B[foo]", "EAPI": 2 },
- "app-portage/A-2": { "DEPEND": "app-portage/B[foo=]", "IUSE": "+foo", "REQUIRED_USE": "foo", "EAPI": "4" },
+ "app-portage/A-1": { "DEPEND": "app-portage/B[foo]", "EAPI": 2 },
+ "app-portage/A-2": { "DEPEND": "app-portage/B[foo=]", "IUSE": "+foo", "REQUIRED_USE": "foo", "EAPI": "4" },
- "app-portage/B-1": { "IUSE": "foo +bar", "REQUIRED_USE": "^^ ( foo bar )", "EAPI": "4" },
+ "app-portage/B-1": { "IUSE": "foo +bar", "REQUIRED_USE": "^^ ( foo bar )", "EAPI": "4" },
"app-portage/C-1": { "IUSE": "+foo +bar", "REQUIRED_USE": "^^ ( foo bar )", "EAPI": "4" },
}
@@ -69,183 +69,183 @@ class AutounmaskTestCase(TestCase):
ResolverPlaygroundTestCase(
["dev-libs/A:1"],
- options = {"--autounmask": "n"},
- success = False),
+ options={"--autounmask": "n"},
+ success=False),
ResolverPlaygroundTestCase(
["dev-libs/A:1"],
- options = {"--autounmask": True},
- success = False,
- mergelist = ["dev-libs/C-1", "dev-libs/B-1", "dev-libs/A-1"],
- use_changes = { "dev-libs/B-1": {"foo": True} } ),
+ options={"--autounmask": True},
+ success=False,
+ mergelist=["dev-libs/C-1", "dev-libs/B-1", "dev-libs/A-1"],
+ use_changes={ "dev-libs/B-1": {"foo": True} }),
#Make sure we restart if needed.
ResolverPlaygroundTestCase(
["dev-libs/A:1", "dev-libs/B"],
- options = {"--autounmask": True},
- all_permutations = True,
- success = False,
- mergelist = ["dev-libs/C-1", "dev-libs/B-1", "dev-libs/A-1"],
- use_changes = { "dev-libs/B-1": {"foo": True} } ),
+ options={"--autounmask": True},
+ all_permutations=True,
+ success=False,
+ mergelist=["dev-libs/C-1", "dev-libs/B-1", "dev-libs/A-1"],
+ use_changes={ "dev-libs/B-1": {"foo": True} }),
ResolverPlaygroundTestCase(
["dev-libs/A:1", "dev-libs/A:2", "dev-libs/B"],
- options = {"--autounmask": True},
- all_permutations = True,
- success = False,
- mergelist = ["dev-libs/D-1", "dev-libs/C-1", "dev-libs/B-1", "dev-libs/A-1", "dev-libs/A-2"],
- ignore_mergelist_order = True,
- use_changes = { "dev-libs/B-1": {"foo": True, "bar": True} } ),
+ options={"--autounmask": True},
+ all_permutations=True,
+ success=False,
+ mergelist=["dev-libs/D-1", "dev-libs/C-1", "dev-libs/B-1", "dev-libs/A-1", "dev-libs/A-2"],
+ ignore_mergelist_order=True,
+ use_changes={ "dev-libs/B-1": {"foo": True, "bar": True} }),
#Test keywording.
#The simple case.
ResolverPlaygroundTestCase(
["app-misc/Z"],
- options = {"--autounmask": "n"},
- success = False),
+ options={"--autounmask": "n"},
+ success=False),
ResolverPlaygroundTestCase(
["app-misc/Z"],
- options = {"--autounmask": True},
- success = False,
- mergelist = ["app-misc/Y-1", "app-misc/Z-1"],
- unstable_keywords = ["app-misc/Y-1", "app-misc/Z-1"]),
+ options={"--autounmask": True},
+ success=False,
+ mergelist=["app-misc/Y-1", "app-misc/Z-1"],
+ unstable_keywords=["app-misc/Y-1", "app-misc/Z-1"]),
#Make sure that the backtracking for slot conflicts handles our mess.
ResolverPlaygroundTestCase(
["=app-misc/V-1", "app-misc/W"],
- options = {"--autounmask": True},
- all_permutations = True,
- success = False,
- mergelist = ["app-misc/W-2", "app-misc/V-1"],
- unstable_keywords = ["app-misc/W-2", "app-misc/V-1"]),
+ options={"--autounmask": True},
+ all_permutations=True,
+ success=False,
+ mergelist=["app-misc/W-2", "app-misc/V-1"],
+ unstable_keywords=["app-misc/W-2", "app-misc/V-1"]),
#Mixed testing
#Make sure we don't change use for something in a || dep if there is another choice
#that needs no change.
-
+
ResolverPlaygroundTestCase(
["=sci-libs/K-1"],
- options = {"--autounmask": True},
- success = True,
- mergelist = ["sci-libs/P-1", "sci-libs/K-1"]),
+ options={"--autounmask": True},
+ success=True,
+ mergelist=["sci-libs/P-1", "sci-libs/K-1"]),
ResolverPlaygroundTestCase(
["=sci-libs/K-2"],
- options = {"--autounmask": True},
- success = True,
- mergelist = ["sci-libs/P-1", "sci-libs/K-2"]),
+ options={"--autounmask": True},
+ success=True,
+ mergelist=["sci-libs/P-1", "sci-libs/K-2"]),
ResolverPlaygroundTestCase(
["=sci-libs/K-3"],
- options = {"--autounmask": True},
- success = True,
- mergelist = ["sci-libs/P-1", "sci-libs/K-3"]),
+ options={"--autounmask": True},
+ success=True,
+ mergelist=["sci-libs/P-1", "sci-libs/K-3"]),
ResolverPlaygroundTestCase(
["=sci-libs/K-4"],
- options = {"--autounmask": True},
- success = True,
- mergelist = ["sci-libs/P-1", "sci-libs/K-4"]),
+ options={"--autounmask": True},
+ success=True,
+ mergelist=["sci-libs/P-1", "sci-libs/K-4"]),
ResolverPlaygroundTestCase(
["=sci-libs/K-5"],
- options = {"--autounmask": True},
- success = True,
- mergelist = ["sci-libs/P-1", "sci-libs/K-5"]),
+ options={"--autounmask": True},
+ success=True,
+ mergelist=["sci-libs/P-1", "sci-libs/K-5"]),
ResolverPlaygroundTestCase(
["=sci-libs/K-6"],
- options = {"--autounmask": True},
- success = True,
- mergelist = ["sci-libs/P-1", "sci-libs/K-6"]),
+ options={"--autounmask": True},
+ success=True,
+ mergelist=["sci-libs/P-1", "sci-libs/K-6"]),
#Make sure we prefer use changes over keyword changes.
ResolverPlaygroundTestCase(
["=sci-libs/K-7"],
- options = {"--autounmask": True},
- success = False,
- mergelist = ["sci-libs/L-1", "sci-libs/K-7"],
- use_changes = { "sci-libs/L-1": { "bar": True } }),
+ options={"--autounmask": True},
+ success=False,
+ mergelist=["sci-libs/L-1", "sci-libs/K-7"],
+ use_changes={ "sci-libs/L-1": { "bar": True } }),
ResolverPlaygroundTestCase(
["=sci-libs/K-8"],
- options = {"--autounmask": True},
- success = False,
- mergelist = ["sci-libs/L-1", "sci-libs/K-8"],
- use_changes = { "sci-libs/L-1": { "bar": True } }),
+ options={"--autounmask": True},
+ success=False,
+ mergelist=["sci-libs/L-1", "sci-libs/K-8"],
+ use_changes={ "sci-libs/L-1": { "bar": True } }),
#Test these nice "required by cat/pkg[foo]" messages.
ResolverPlaygroundTestCase(
["=dev-util/Q-1"],
- options = {"--autounmask": True},
- success = False,
- mergelist = ["dev-util/R-1", "dev-util/Q-1"],
- use_changes = { "dev-util/R-1": { "bar": True } }),
+ options={"--autounmask": True},
+ success=False,
+ mergelist=["dev-util/R-1", "dev-util/Q-1"],
+ use_changes={ "dev-util/R-1": { "bar": True } }),
ResolverPlaygroundTestCase(
["=dev-util/Q-2"],
- options = {"--autounmask": True},
- success = False,
- mergelist = ["dev-util/R-1", "dev-util/Q-2"],
- use_changes = { "dev-util/R-1": { "bar": True } }),
+ options={"--autounmask": True},
+ success=False,
+ mergelist=["dev-util/R-1", "dev-util/Q-2"],
+ use_changes={ "dev-util/R-1": { "bar": True } }),
#Test interaction with REQUIRED_USE.
ResolverPlaygroundTestCase(
["=app-portage/A-1"],
- options = { "--autounmask": True },
- use_changes = None,
- success = False),
+ options={ "--autounmask": True },
+ use_changes=None,
+ success=False),
ResolverPlaygroundTestCase(
["=app-portage/A-2"],
- options = { "--autounmask": True },
- use_changes = None,
- success = False),
+ options={ "--autounmask": True },
+ use_changes=None,
+ success=False),
ResolverPlaygroundTestCase(
["=app-portage/C-1"],
- options = { "--autounmask": True },
- use_changes = None,
- success = False),
+ options={ "--autounmask": True },
+ use_changes=None,
+ success=False),
#Make sure we don't change masked/forced flags.
ResolverPlaygroundTestCase(
["dev-libs/E:1"],
- options = {"--autounmask": True},
- use_changes = None,
- success = False),
+ options={"--autounmask": True},
+ use_changes=None,
+ success=False),
ResolverPlaygroundTestCase(
["dev-libs/E:2"],
- options = {"--autounmask": True},
- use_changes = None,
- success = False),
+ options={"--autounmask": True},
+ use_changes=None,
+ success=False),
#Test mask and keyword changes.
ResolverPlaygroundTestCase(
["app-text/A"],
- options = {"--autounmask": True},
- success = False,
- mergelist = ["app-text/A-1"],
- needed_p_mask_changes = ["app-text/A-1"]),
+ options={"--autounmask": True},
+ success=False,
+ mergelist=["app-text/A-1"],
+ needed_p_mask_changes=["app-text/A-1"]),
ResolverPlaygroundTestCase(
["app-text/B"],
- options = {"--autounmask": True},
- success = False,
- mergelist = ["app-text/B-1"],
- unstable_keywords = ["app-text/B-1"],
- needed_p_mask_changes = ["app-text/B-1"]),
+ options={"--autounmask": True},
+ success=False,
+ mergelist=["app-text/B-1"],
+ unstable_keywords=["app-text/B-1"],
+ needed_p_mask_changes=["app-text/B-1"]),
ResolverPlaygroundTestCase(
["app-text/C"],
- options = {"--autounmask": True},
- success = False,
- mergelist = ["app-text/C-1"],
- unstable_keywords = ["app-text/C-1"],
- needed_p_mask_changes = ["app-text/C-1"]),
+ options={"--autounmask": True},
+ success=False,
+ mergelist=["app-text/C-1"],
+ unstable_keywords=["app-text/C-1"],
+ needed_p_mask_changes=["app-text/C-1"]),
#Make sure unstable keyword is preferred over missing keyword
ResolverPlaygroundTestCase(
["app-text/D"],
- options = {"--autounmask": True},
- success = False,
- mergelist = ["app-text/D-1"],
- unstable_keywords = ["app-text/D-1"]),
+ options={"--autounmask": True},
+ success=False,
+ mergelist=["app-text/D-1"],
+ unstable_keywords=["app-text/D-1"]),
#Test missing keyword
ResolverPlaygroundTestCase(
["=app-text/D-2"],
- options = {"--autounmask": True},
- success = False,
- mergelist = ["app-text/D-2"],
- unstable_keywords = ["app-text/D-2"])
+ options={"--autounmask": True},
+ success=False,
+ mergelist=["app-text/D-2"],
+ unstable_keywords=["app-text/D-2"])
)
profile = {
@@ -279,7 +279,7 @@ class AutounmaskTestCase(TestCase):
"dev-libs/A-1": { "LICENSE": "TEST" },
"dev-libs/B-1": { "LICENSE": "TEST", "IUSE": "foo", "KEYWORDS": "~x86"},
"dev-libs/C-1": { "DEPEND": "dev-libs/B[foo]", "EAPI": 2 },
-
+
"dev-libs/D-1": { "DEPEND": "dev-libs/E dev-libs/F", "LICENSE": "TEST" },
"dev-libs/E-1": { "LICENSE": "TEST" },
"dev-libs/E-2": { "LICENSE": "TEST" },
@@ -292,40 +292,40 @@ class AutounmaskTestCase(TestCase):
test_cases = (
ResolverPlaygroundTestCase(
["=dev-libs/A-1"],
- options = {"--autounmask": 'n'},
- success = False),
+ options={"--autounmask": 'n'},
+ success=False),
ResolverPlaygroundTestCase(
["=dev-libs/A-1"],
- options = {"--autounmask": True},
- success = False,
- mergelist = ["dev-libs/A-1"],
- license_changes = { "dev-libs/A-1": set(["TEST"]) }),
+ options={"--autounmask": True},
+ success=False,
+ mergelist=["dev-libs/A-1"],
+ license_changes={ "dev-libs/A-1": set(["TEST"]) }),
#Test license+keyword+use change at once.
ResolverPlaygroundTestCase(
["=dev-libs/C-1"],
- options = {"--autounmask": True},
- success = False,
- mergelist = ["dev-libs/B-1", "dev-libs/C-1"],
- license_changes = { "dev-libs/B-1": set(["TEST"]) },
- unstable_keywords = ["dev-libs/B-1"],
- use_changes = { "dev-libs/B-1": { "foo": True } }),
+ options={"--autounmask": True},
+ success=False,
+ mergelist=["dev-libs/B-1", "dev-libs/C-1"],
+ license_changes={ "dev-libs/B-1": set(["TEST"]) },
+ unstable_keywords=["dev-libs/B-1"],
+ use_changes={ "dev-libs/B-1": { "foo": True } }),
#Test license with backtracking.
ResolverPlaygroundTestCase(
["=dev-libs/D-1"],
- options = {"--autounmask": True},
- success = False,
- mergelist = ["dev-libs/E-1", "dev-libs/F-1", "dev-libs/D-1"],
- license_changes = { "dev-libs/D-1": set(["TEST"]), "dev-libs/E-1": set(["TEST"]), "dev-libs/E-2": set(["TEST"]), "dev-libs/F-1": set(["TEST"]) }),
+ options={"--autounmask": True},
+ success=False,
+ mergelist=["dev-libs/E-1", "dev-libs/F-1", "dev-libs/D-1"],
+ license_changes={ "dev-libs/D-1": set(["TEST"]), "dev-libs/E-1": set(["TEST"]), "dev-libs/E-2": set(["TEST"]), "dev-libs/F-1": set(["TEST"]) }),
#Test license only for bug #420847
ResolverPlaygroundTestCase(
["dev-java/sun-jdk"],
- options = {"--autounmask": True},
- success = False,
- mergelist = ["dev-java/sun-jdk-1.6.0.31"],
- license_changes = { "dev-java/sun-jdk-1.6.0.31": set(["TEST"]) }),
+ options={"--autounmask": True},
+ success=False,
+ mergelist=["dev-java/sun-jdk-1.6.0.31"],
+ license_changes={ "dev-java/sun-jdk-1.6.0.31": set(["TEST"]) }),
)
playground = ResolverPlayground(ebuilds=ebuilds)
@@ -348,7 +348,7 @@ class AutounmaskTestCase(TestCase):
"dev-libs/D-1": { "DEPEND": "dev-libs/A" },
}
- world_sets = [ "@test-set" ]
+ world_sets = ["@test-set"]
sets = {
"test-set": (
"dev-libs/A", "dev-libs/B", "dev-libs/C", "dev-libs/D",
@@ -362,29 +362,29 @@ class AutounmaskTestCase(TestCase):
ResolverPlaygroundTestCase(
["dev-libs/B", "dev-libs/C", "dev-libs/D"],
all_permutations=True,
- options = {"--autounmask": "y"},
+ options={"--autounmask": "y"},
mergelist=["dev-libs/A-2", "dev-libs/B-1", "dev-libs/C-1", "dev-libs/D-1"],
ignore_mergelist_order=True,
- unstable_keywords = ["dev-libs/A-2"],
- success = False),
+ unstable_keywords=["dev-libs/A-2"],
+ success=False),
ResolverPlaygroundTestCase(
["@test-set"],
all_permutations=True,
- options = {"--autounmask": "y"},
+ options={"--autounmask": "y"},
mergelist=["dev-libs/A-2", "dev-libs/B-1", "dev-libs/C-1", "dev-libs/D-1"],
ignore_mergelist_order=True,
- unstable_keywords = ["dev-libs/A-2"],
- success = False),
+ unstable_keywords=["dev-libs/A-2"],
+ success=False),
ResolverPlaygroundTestCase(
["@world"],
all_permutations=True,
- options = {"--autounmask": "y"},
+ options={"--autounmask": "y"},
mergelist=["dev-libs/A-2", "dev-libs/B-1", "dev-libs/C-1", "dev-libs/D-1"],
ignore_mergelist_order=True,
- unstable_keywords = ["dev-libs/A-2"],
- success = False),
+ unstable_keywords=["dev-libs/A-2"],
+ success=False),
)
@@ -411,16 +411,16 @@ class AutounmaskTestCase(TestCase):
#Test mask and keyword changes.
ResolverPlaygroundTestCase(
["app-text/A"],
- options = {"--autounmask": True,
- "--autounmask-keep-masks": "y"},
- success = False),
+ options={"--autounmask": True,
+ "--autounmask-keep-masks": "y"},
+ success=False),
ResolverPlaygroundTestCase(
["app-text/A"],
- options = {"--autounmask": True,
- "--autounmask-keep-masks": "n"},
- success = False,
- mergelist = ["app-text/A-1"],
- needed_p_mask_changes = ["app-text/A-1"]),
+ options={"--autounmask": True,
+ "--autounmask-keep-masks": "n"},
+ success=False,
+ mergelist=["app-text/A-1"],
+ needed_p_mask_changes=["app-text/A-1"]),
)
profile = {
@@ -460,16 +460,16 @@ class AutounmaskTestCase(TestCase):
test_cases = (
ResolverPlaygroundTestCase(
["dev-libs/B"],
- success = False,
- mergelist = ["dev-libs/A-2", "dev-libs/B-1"],
- needed_p_mask_changes = set(["dev-libs/A-2"])),
+ success=False,
+ mergelist=["dev-libs/A-2", "dev-libs/B-1"],
+ needed_p_mask_changes=set(["dev-libs/A-2"])),
ResolverPlaygroundTestCase(
["dev-libs/C"],
- success = False,
- mergelist = ["dev-libs/A-9999", "dev-libs/C-1"],
- unstable_keywords = set(["dev-libs/A-9999"]),
- needed_p_mask_changes = set(["dev-libs/A-9999"])),
+ success=False,
+ mergelist=["dev-libs/A-9999", "dev-libs/C-1"],
+ unstable_keywords=set(["dev-libs/A-9999"]),
+ needed_p_mask_changes=set(["dev-libs/A-9999"])),
)
playground = ResolverPlayground(ebuilds=ebuilds, profile=profile)
diff --git a/pym/portage/tests/resolver/test_autounmask_multilib_use.py b/pym/portage/tests/resolver/test_autounmask_multilib_use.py
new file mode 100644
index 000000000..e160c77ce
--- /dev/null
+++ b/pym/portage/tests/resolver/test_autounmask_multilib_use.py
@@ -0,0 +1,85 @@
+# Copyright 2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import (ResolverPlayground,
+ ResolverPlaygroundTestCase)
+
+class AutounmaskMultilibUseTestCase(TestCase):
+
+ def testAutounmaskMultilibUse(self):
+
+ self.todo = True
+
+ ebuilds = {
+ "x11-proto/xextproto-7.2.1-r1": {
+ "EAPI": "5",
+ "IUSE": "abi_x86_32 abi_x86_64",
+ },
+ "x11-libs/libXaw-1.0.11-r2": {
+ "EAPI": "5",
+ "IUSE": "abi_x86_32 abi_x86_64",
+ "RDEPEND": "x11-proto/xextproto[abi_x86_32(-)?,abi_x86_64(-)?]"
+ },
+ "app-emulation/emul-linux-x86-xlibs-20130224-r2": {
+ "EAPI": "5",
+ "RDEPEND": "x11-libs/libXaw[abi_x86_32]"
+ },
+ "games-util/steam-client-meta-0-r20130514": {
+ "EAPI": "5",
+ "RDEPEND": "app-emulation/emul-linux-x86-xlibs"
+ }
+ }
+
+ installed = {
+ "x11-proto/xextproto-7.2.1-r1": {
+ "EAPI": "5",
+ "IUSE": "abi_x86_32 abi_x86_64",
+ "USE": "abi_x86_32 abi_x86_64"
+ },
+ "x11-libs/libXaw-1.0.11-r2": {
+ "EAPI": "5",
+ "IUSE": "abi_x86_32 abi_x86_64",
+ "RDEPEND": "x11-proto/xextproto[abi_x86_32(-)?,abi_x86_64(-)?]",
+ "USE": "abi_x86_32 abi_x86_64"
+ },
+ "app-emulation/emul-linux-x86-xlibs-20130224-r2": {
+ "EAPI": "5",
+ "RDEPEND": "x11-libs/libXaw[abi_x86_32]"
+ },
+ "games-util/steam-client-meta-0-r20130514": {
+ "EAPI": "5",
+ "RDEPEND": "app-emulation/emul-linux-x86-xlibs"
+ }
+ }
+
+ user_config = {
+ #"make.conf" : ("USE=\"abi_x86_32 abi_x86_64\"",)
+ "make.conf" : ("USE=\"abi_x86_64\"",)
+ }
+
+ world = ("games-util/steam-client-meta",)
+
+ test_cases = (
+
+ # Test autounmask solving of multilib use deps for bug #481628.
+ # We would like it to suggest some USE changes, but instead it
+ # currently fails with a SLOT conflict.
+
+ ResolverPlaygroundTestCase(
+ ["x11-proto/xextproto", "x11-libs/libXaw"],
+ options = {"--oneshot": True, "--autounmask": True,
+ "--backtrack": 30},
+ mergelist = ["x11-proto/xextproto-7.2.1-r1", "x11-libs/libXaw-1.0.11-r2"],
+ success = True),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds, installed=installed,
+ user_config=user_config, world=world, debug=False)
+
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
diff --git a/pym/portage/tests/resolver/test_backtracking.py b/pym/portage/tests/resolver/test_backtracking.py
index 600f68216..3b69eda09 100644
--- a/pym/portage/tests/resolver/test_backtracking.py
+++ b/pym/portage/tests/resolver/test_backtracking.py
@@ -1,4 +1,4 @@
-# Copyright 2010 Gentoo Foundation
+# Copyright 2010-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
from portage.tests import TestCase
@@ -31,7 +31,7 @@ class BacktrackingTestCase(TestCase):
playground.cleanup()
- def testHittingTheBacktrackLimit(self):
+ def testBacktrackNotNeeded(self):
ebuilds = {
"dev-libs/A-1": {},
"dev-libs/A-2": {},
@@ -45,47 +45,9 @@ class BacktrackingTestCase(TestCase):
ResolverPlaygroundTestCase(
["dev-libs/C", "dev-libs/D"],
all_permutations = True,
- mergelist = ["dev-libs/A-1", "dev-libs/B-1", "dev-libs/C-1", "dev-libs/D-1"],
- ignore_mergelist_order = True,
- success = True),
- #This one hits the backtrack limit. Be aware that this depends on the argument order.
- ResolverPlaygroundTestCase(
- ["dev-libs/D", "dev-libs/C"],
options = { "--backtrack": 1 },
- mergelist = ["dev-libs/A-1", "dev-libs/B-1", "dev-libs/A-2", "dev-libs/B-2", "dev-libs/C-1", "dev-libs/D-1"],
+ mergelist = ["dev-libs/A-1", "dev-libs/B-1", "dev-libs/C-1", "dev-libs/D-1"],
ignore_mergelist_order = True,
- slot_collision_solutions = [],
- success = False),
- )
-
- playground = ResolverPlayground(ebuilds=ebuilds)
-
- try:
- for test_case in test_cases:
- playground.run_TestCase(test_case)
- self.assertEqual(test_case.test_success, True, test_case.fail_msg)
- finally:
- playground.cleanup()
-
-
- def testBacktrackingGoodVersionFirst(self):
- """
- When backtracking due to slot conflicts, we masked the version that has been pulled
- in first. This is not always a good idea. Mask the highest version instead.
- """
-
- ebuilds = {
- "dev-libs/A-1": { "DEPEND": "=dev-libs/C-1 dev-libs/B" },
- "dev-libs/B-1": { "DEPEND": "=dev-libs/C-1" },
- "dev-libs/B-2": { "DEPEND": "=dev-libs/C-2" },
- "dev-libs/C-1": { },
- "dev-libs/C-2": { },
- }
-
- test_cases = (
- ResolverPlaygroundTestCase(
- ["dev-libs/A"],
- mergelist = ["dev-libs/C-1", "dev-libs/B-1", "dev-libs/A-1", ],
success = True),
)
@@ -118,7 +80,7 @@ class BacktrackingTestCase(TestCase):
ResolverPlaygroundTestCase(
["dev-libs/B", "dev-libs/A"],
all_permutations = True,
- mergelist = ["dev-libs/Z-2", "dev-libs/B-1", "dev-libs/A-1", ],
+ mergelist = ["dev-libs/Z-2", "dev-libs/B-1", "dev-libs/A-1",],
ignore_mergelist_order = True,
success = True),
)
@@ -190,7 +152,7 @@ class BacktrackingTestCase(TestCase):
"dev-libs/D-1": { "RDEPEND": "<dev-libs/A-2" },
}
- world = [ "dev-libs/B", "dev-libs/C" ]
+ world = ["dev-libs/B", "dev-libs/C"]
options = {'--update' : True, '--deep' : True, '--selective' : True}
diff --git a/pym/portage/tests/resolver/test_blocker.py b/pym/portage/tests/resolver/test_blocker.py
new file mode 100644
index 000000000..94a88b8b4
--- /dev/null
+++ b/pym/portage/tests/resolver/test_blocker.py
@@ -0,0 +1,48 @@
+# Copyright 2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import ResolverPlayground, ResolverPlaygroundTestCase
+
+class SlotConflictWithBlockerTestCase(TestCase):
+
+ def testBlocker(self):
+ ebuilds = {
+ "dev-libs/A-1": { "DEPEND": "dev-libs/X" },
+ "dev-libs/B-1": { "DEPEND": "<dev-libs/X-2" },
+ "dev-libs/C-1": { "DEPEND": "<dev-libs/X-3" },
+
+ "dev-libs/X-1": { "EAPI": "2", "RDEPEND": "!=dev-libs/Y-1" },
+ "dev-libs/X-2": { "EAPI": "2", "RDEPEND": "!=dev-libs/Y-2" },
+ "dev-libs/X-3": { "EAPI": "2", "RDEPEND": "!=dev-libs/Y-3" },
+
+ "dev-libs/Y-1": { "SLOT": "1" },
+ "dev-libs/Y-2": { "SLOT": "2" },
+ "dev-libs/Y-3": { "SLOT": "3" },
+ }
+
+ installed = {
+ "dev-libs/Y-1": { "SLOT": "1" },
+ "dev-libs/Y-2": { "SLOT": "2" },
+ "dev-libs/Y-3": { "SLOT": "3" },
+ }
+
+ test_cases = (
+ ResolverPlaygroundTestCase(
+ ["dev-libs/A", "dev-libs/B", "dev-libs/C"],
+ options = { "--backtrack": 0 },
+ all_permutations = True,
+ success = True,
+ ambiguous_merge_order = True,
+ mergelist = ["dev-libs/X-1", "[uninstall]dev-libs/Y-1", "!=dev-libs/Y-1", \
+ ("dev-libs/A-1", "dev-libs/B-1", "dev-libs/C-1")]),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds,
+ installed=installed, debug=False)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
diff --git a/pym/portage/tests/resolver/test_complete_graph.py b/pym/portage/tests/resolver/test_complete_graph.py
index 1b0342c67..95b1f8809 100644
--- a/pym/portage/tests/resolver/test_complete_graph.py
+++ b/pym/portage/tests/resolver/test_complete_graph.py
@@ -93,7 +93,7 @@ class CompleteGraphTestCase(TestCase):
test_cases = (
ResolverPlaygroundTestCase(
[">=sys-libs/x-2"],
- options = {"--complete-graph-if-new-ver" : "n", "--rebuild-if-new-slot-abi": "n"},
+ options = {"--complete-graph-if-new-ver" : "n", "--rebuild-if-new-slot": "n"},
mergelist = ["sys-libs/x-2"],
success = True,
),
@@ -106,7 +106,7 @@ class CompleteGraphTestCase(TestCase):
),
ResolverPlaygroundTestCase(
["<sys-libs/x-1"],
- options = {"--complete-graph-if-new-ver" : "n", "--rebuild-if-new-slot-abi": "n"},
+ options = {"--complete-graph-if-new-ver" : "n", "--rebuild-if-new-slot": "n"},
mergelist = ["sys-libs/x-0.1"],
success = True,
),
diff --git a/pym/portage/tests/resolver/test_complete_if_new_subslot_without_revbump.py b/pym/portage/tests/resolver/test_complete_if_new_subslot_without_revbump.py
new file mode 100644
index 000000000..fddbead7c
--- /dev/null
+++ b/pym/portage/tests/resolver/test_complete_if_new_subslot_without_revbump.py
@@ -0,0 +1,74 @@
+# Copyright 2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import (ResolverPlayground,
+ ResolverPlaygroundTestCase)
+
+class CompeteIfNewSubSlotWithoutRevBumpTestCase(TestCase):
+
+ def testCompeteIfNewSubSlotWithoutRevBump(self):
+
+ ebuilds = {
+ "media-libs/libpng-1.5.14" : {
+ "EAPI": "5",
+ "SLOT": "0"
+ },
+
+ "x11-libs/gdk-pixbuf-2.26.5" : {
+ "EAPI": "5",
+ "DEPEND": ">=media-libs/libpng-1.4:=",
+ "RDEPEND": ">=media-libs/libpng-1.4:="
+ },
+ }
+
+ binpkgs = {
+ "x11-libs/gdk-pixbuf-2.26.5" : {
+ "EAPI": "5",
+ "DEPEND": ">=media-libs/libpng-1.4:0/15=",
+ "RDEPEND": ">=media-libs/libpng-1.4:0/15="
+ },
+ }
+
+ installed = {
+ "media-libs/libpng-1.5.14" : {
+ "EAPI": "5",
+ "SLOT": "0/15"
+ },
+
+ "x11-libs/gdk-pixbuf-2.26.5" : {
+ "EAPI": "5",
+ "DEPEND": ">=media-libs/libpng-1.4:0/15=",
+ "RDEPEND": ">=media-libs/libpng-1.4:0/15="
+ },
+ }
+
+ world = ["x11-libs/gdk-pixbuf"]
+
+ test_cases = (
+ # Test that --complete-graph-if-new-ver=y triggers rebuild
+ # when the sub-slot changes without a revbump.
+ ResolverPlaygroundTestCase(
+ ["media-libs/libpng"],
+ options = {
+ "--oneshot": True,
+ "--complete-graph-if-new-ver": "y",
+ "--rebuild-if-new-slot": "n",
+ "--usepkg": True
+ },
+ success = True,
+ mergelist = [
+ "media-libs/libpng-1.5.14",
+ "x11-libs/gdk-pixbuf-2.26.5"
+ ]
+ ),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds, binpkgs=binpkgs,
+ installed=installed, world=world, debug=False)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
diff --git a/pym/portage/tests/resolver/test_depclean.py b/pym/portage/tests/resolver/test_depclean.py
index ba70144b8..42350be8b 100644
--- a/pym/portage/tests/resolver/test_depclean.py
+++ b/pym/portage/tests/resolver/test_depclean.py
@@ -23,9 +23,9 @@ class SimpleDepcleanTestCase(TestCase):
test_cases = (
ResolverPlaygroundTestCase(
[],
- options = {"--depclean": True},
- success = True,
- cleanlist = ["dev-libs/B-1"]),
+ options={"--depclean": True},
+ success=True,
+ cleanlist=["dev-libs/B-1"]),
)
playground = ResolverPlayground(ebuilds=ebuilds, installed=installed, world=world)
@@ -63,9 +63,9 @@ class DepcleanWithDepsTestCase(TestCase):
test_cases = (
ResolverPlaygroundTestCase(
[],
- options = {"--depclean": True},
- success = True,
- cleanlist = ["dev-libs/B-1", "dev-libs/D-1",
+ options={"--depclean": True},
+ success=True,
+ cleanlist=["dev-libs/B-1", "dev-libs/D-1",
"dev-libs/E-1", "dev-libs/F-1"]),
)
@@ -104,10 +104,10 @@ class DepcleanWithInstalledMaskedTestCase(TestCase):
test_cases = (
ResolverPlaygroundTestCase(
[],
- options = {"--depclean": True},
- success = True,
- #cleanlist = ["dev-libs/C-1"]),
- cleanlist = ["dev-libs/B-1"]),
+ options={"--depclean": True},
+ success=True,
+ #cleanlist=["dev-libs/C-1"]),
+ cleanlist=["dev-libs/B-1"]),
)
playground = ResolverPlayground(ebuilds=ebuilds, installed=installed, world=world)
@@ -143,9 +143,9 @@ class DepcleanInstalledKeywordMaskedSlotTestCase(TestCase):
test_cases = (
ResolverPlaygroundTestCase(
[],
- options = {"--depclean": True},
- success = True,
- cleanlist = ["dev-libs/B-2.7"]),
+ options={"--depclean": True},
+ success=True,
+ cleanlist=["dev-libs/B-2.7"]),
)
playground = ResolverPlayground(ebuilds=ebuilds, installed=installed, world=world)
@@ -169,31 +169,31 @@ class DepcleanWithExcludeTestCase(TestCase):
#Without --exclude.
ResolverPlaygroundTestCase(
[],
- options = {"--depclean": True},
- success = True,
- cleanlist = ["dev-libs/B-1", "dev-libs/A-1"]),
+ options={"--depclean": True},
+ success=True,
+ cleanlist=["dev-libs/B-1", "dev-libs/A-1"]),
ResolverPlaygroundTestCase(
["dev-libs/A"],
- options = {"--depclean": True},
- success = True,
- cleanlist = []),
+ options={"--depclean": True},
+ success=True,
+ cleanlist=[]),
ResolverPlaygroundTestCase(
["dev-libs/B"],
- options = {"--depclean": True},
- success = True,
- cleanlist = ["dev-libs/B-1"]),
+ options={"--depclean": True},
+ success=True,
+ cleanlist=["dev-libs/B-1"]),
#With --exclude
ResolverPlaygroundTestCase(
[],
- options = {"--depclean": True, "--exclude": ["dev-libs/A"]},
- success = True,
- cleanlist = ["dev-libs/B-1"]),
+ options={"--depclean": True, "--exclude": ["dev-libs/A"]},
+ success=True,
+ cleanlist=["dev-libs/B-1"]),
ResolverPlaygroundTestCase(
["dev-libs/B"],
- options = {"--depclean": True, "--exclude": ["dev-libs/B"]},
- success = True,
- cleanlist = []),
+ options={"--depclean": True, "--exclude": ["dev-libs/B"]},
+ success=True,
+ cleanlist=[]),
)
playground = ResolverPlayground(installed=installed)
@@ -215,25 +215,25 @@ class DepcleanWithExcludeAndSlotsTestCase(TestCase):
"dev-libs/Y-2": { "RDEPEND": "=dev-libs/Z-2", "SLOT": 2 },
}
- world = [ "dev-libs/Y" ]
+ world=["dev-libs/Y"]
test_cases = (
#Without --exclude.
ResolverPlaygroundTestCase(
[],
- options = {"--depclean": True},
- success = True,
- cleanlist = ["dev-libs/Y-1", "dev-libs/Z-1"]),
+ options={"--depclean": True},
+ success=True,
+ cleanlist=["dev-libs/Y-1", "dev-libs/Z-1"]),
ResolverPlaygroundTestCase(
[],
- options = {"--depclean": True, "--exclude": ["dev-libs/Z"]},
- success = True,
- cleanlist = ["dev-libs/Y-1"]),
+ options={"--depclean": True, "--exclude": ["dev-libs/Z"]},
+ success=True,
+ cleanlist=["dev-libs/Y-1"]),
ResolverPlaygroundTestCase(
[],
- options = {"--depclean": True, "--exclude": ["dev-libs/Y"]},
- success = True,
- cleanlist = []),
+ options={"--depclean": True, "--exclude": ["dev-libs/Y"]},
+ success=True,
+ cleanlist=[]),
)
playground = ResolverPlayground(installed=installed, world=world)
@@ -256,24 +256,24 @@ class DepcleanAndWildcardsTestCase(TestCase):
test_cases = (
ResolverPlaygroundTestCase(
["*/*"],
- options = {"--depclean": True},
- success = True,
- cleanlist = ["dev-libs/A-1", "dev-libs/B-1"]),
+ options={"--depclean": True},
+ success=True,
+ cleanlist=["dev-libs/A-1", "dev-libs/B-1"]),
ResolverPlaygroundTestCase(
["dev-libs/*"],
- options = {"--depclean": True},
- success = True,
- cleanlist = ["dev-libs/A-1", "dev-libs/B-1"]),
+ options={"--depclean": True},
+ success=True,
+ cleanlist=["dev-libs/A-1", "dev-libs/B-1"]),
ResolverPlaygroundTestCase(
["*/A"],
- options = {"--depclean": True},
- success = True,
- cleanlist = ["dev-libs/A-1"]),
+ options={"--depclean": True},
+ success=True,
+ cleanlist=["dev-libs/A-1"]),
ResolverPlaygroundTestCase(
["*/B"],
- options = {"--depclean": True},
- success = True,
- cleanlist = []),
+ options={"--depclean": True},
+ success=True,
+ cleanlist=[]),
)
playground = ResolverPlayground(installed=installed)
diff --git a/pym/portage/tests/resolver/test_depclean_order.py b/pym/portage/tests/resolver/test_depclean_order.py
new file mode 100644
index 000000000..9511d292c
--- /dev/null
+++ b/pym/portage/tests/resolver/test_depclean_order.py
@@ -0,0 +1,57 @@
+# Copyright 2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import ResolverPlayground, ResolverPlaygroundTestCase
+
+class SimpleDepcleanTestCase(TestCase):
+
+ def testSimpleDepclean(self):
+
+ ebuilds = {
+ "dev-libs/A-1": {
+ "EAPI": "5",
+ "RDEPEND": "dev-libs/B:=",
+ },
+ "dev-libs/B-1": {
+ "EAPI": "5",
+ "RDEPEND": "dev-libs/A",
+ },
+ "dev-libs/C-1": {},
+ }
+
+ installed = {
+ "dev-libs/A-1": {
+ "EAPI": "5",
+ "RDEPEND": "dev-libs/B:0/0=",
+ },
+ "dev-libs/B-1": {
+ "EAPI": "5",
+ "RDEPEND": "dev-libs/A",
+ },
+ "dev-libs/C-1": {},
+ }
+
+ world = (
+ "dev-libs/C",
+ )
+
+ test_cases = (
+ # Remove dev-libs/A-1 first because of dev-libs/B:0/0= (built
+ # slot-operator dep).
+ ResolverPlaygroundTestCase(
+ [],
+ options={"--depclean": True},
+ success=True,
+ ordered=True,
+ cleanlist=["dev-libs/A-1", "dev-libs/B-1"]),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds,
+ installed=installed, world=world)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
diff --git a/pym/portage/tests/resolver/test_depclean_slot_unavailable.py b/pym/portage/tests/resolver/test_depclean_slot_unavailable.py
new file mode 100644
index 000000000..689392bb5
--- /dev/null
+++ b/pym/portage/tests/resolver/test_depclean_slot_unavailable.py
@@ -0,0 +1,78 @@
+# Copyright 2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import (ResolverPlayground,
+ ResolverPlaygroundTestCase)
+
+class DepcleanUnavailableSlotTestCase(TestCase):
+
+ def testDepcleanUnavailableSlot(self):
+ """
+ Test bug #445506, where we want to remove the slot
+ for which the ebuild is no longer available, even
+ though its version is higher.
+ """
+
+ ebuilds = {
+ "sys-kernel/gentoo-sources-3.0.53": {
+ "SLOT": "3.0.53",
+ "KEYWORDS": "x86"
+ },
+ }
+
+ installed = {
+ "sys-kernel/gentoo-sources-3.0.53": {
+ "SLOT": "3.0.53",
+ "KEYWORDS": "x86"
+ },
+ "sys-kernel/gentoo-sources-3.2.21": {
+ "SLOT": "3.2.21",
+ "KEYWORDS": "x86"
+ },
+ }
+
+ world = ["sys-kernel/gentoo-sources"]
+
+ test_cases = (
+ ResolverPlaygroundTestCase(
+ [],
+ options={"--depclean": True},
+ success=True,
+ cleanlist=["sys-kernel/gentoo-sources-3.2.21"]),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds,
+ installed=installed, world=world, debug=False)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
+
+ # Now make the newer version availale and verify that
+ # the lower version is depcleaned.
+ ebuilds.update({
+ "sys-kernel/gentoo-sources-3.2.21": {
+ "SLOT": "3.2.21",
+ "KEYWORDS": "x86"
+ },
+ })
+
+ test_cases = (
+ ResolverPlaygroundTestCase(
+ [],
+ options={"--depclean": True},
+ success=True,
+ cleanlist=["sys-kernel/gentoo-sources-3.0.53"]),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds,
+ installed=installed, world=world, debug=False)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
diff --git a/pym/portage/tests/resolver/test_features_test_use.py b/pym/portage/tests/resolver/test_features_test_use.py
new file mode 100644
index 000000000..bdd179d7a
--- /dev/null
+++ b/pym/portage/tests/resolver/test_features_test_use.py
@@ -0,0 +1,68 @@
+# Copyright 2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import (ResolverPlayground,
+ ResolverPlaygroundTestCase)
+
+class FeaturesTestUse(TestCase):
+
+ def testFeaturesTestUse(self):
+ ebuilds = {
+ "dev-libs/A-1" : {
+ "IUSE": "test"
+ },
+ "dev-libs/B-1" : {
+ "IUSE": "test foo"
+ },
+ }
+
+ installed = {
+ "dev-libs/A-1" : {
+ "USE": "",
+ "IUSE": "test"
+ },
+ "dev-libs/B-1" : {
+ "USE": "foo",
+ "IUSE": "test foo"
+ },
+ }
+
+ user_config = {
+ "make.conf" : ("FEATURES=test", "USE=\"-test -foo\"")
+ }
+
+ test_cases = (
+
+ # USE=test state should not trigger --newuse rebuilds, as
+ # specified in bug #373209, comment #3.
+ ResolverPlaygroundTestCase(
+ ["dev-libs/A"],
+ options = {"--newuse": True, "--selective": True},
+ success = True,
+ mergelist = []),
+
+ # USE=-test -> USE=test, with USE=test forced by FEATURES=test
+ ResolverPlaygroundTestCase(
+ ["dev-libs/A"],
+ options = {},
+ success = True,
+ mergelist = ["dev-libs/A-1"]),
+
+ # USE=foo -> USE=-foo, with USE=test forced by FEATURES=test
+ ResolverPlaygroundTestCase(
+ ["dev-libs/B"],
+ options = {"--newuse": True, "--selective": True},
+ success = True,
+ mergelist = ["dev-libs/B-1"]),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds,
+ installed=installed, user_config=user_config, debug=False)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
+
diff --git a/pym/portage/tests/resolver/test_merge_order.py b/pym/portage/tests/resolver/test_merge_order.py
index 5b5709afe..5d000d12b 100644
--- a/pym/portage/tests/resolver/test_merge_order.py
+++ b/pym/portage/tests/resolver/test_merge_order.py
@@ -1,4 +1,4 @@
-# Copyright 2011 Gentoo Foundation
+# Copyright 2011-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
import portage
@@ -191,6 +191,12 @@ class MergeOrderTestCase(TestCase):
"DEPEND" : "kde-base/libkdegames",
"RDEPEND" : "kde-base/libkdegames",
},
+ "media-libs/mesa-9.1.3" : {
+ "EAPI" : "5",
+ "IUSE" : "+xorg",
+ "DEPEND" : "xorg? ( x11-base/xorg-server:= )",
+ "RDEPEND" : "xorg? ( x11-base/xorg-server:= )",
+ },
"media-video/libav-0.7_pre20110327" : {
"EAPI" : "2",
"IUSE" : "X +encode",
@@ -205,6 +211,12 @@ class MergeOrderTestCase(TestCase):
"IUSE" : "X +encode",
"RDEPEND" : "|| ( >=media-video/ffmpeg-0.6.90_rc0-r2[X=,encode=] >=media-video/libav-0.6.90_rc[X=,encode=] )",
},
+ "x11-base/xorg-server-1.14.1" : {
+ "EAPI" : "5",
+ "SLOT": "0/1.14.1",
+ "DEPEND" : "media-libs/mesa",
+ "RDEPEND" : "media-libs/mesa",
+ },
}
installed = {
@@ -256,6 +268,13 @@ class MergeOrderTestCase(TestCase):
"RDEPEND": "",
},
"app-arch/xz-utils-5.0.1" : {},
+ "media-libs/mesa-9.1.3" : {
+ "EAPI" : "5",
+ "IUSE" : "+xorg",
+ "USE": "xorg",
+ "DEPEND" : "x11-base/xorg-server:0/1.14.1=",
+ "RDEPEND" : "x11-base/xorg-server:0/1.14.1=",
+ },
"media-video/ffmpeg-0.7_rc1" : {
"EAPI" : "2",
"IUSE" : "X +encode",
@@ -267,6 +286,12 @@ class MergeOrderTestCase(TestCase):
"USE" : "encode",
"RDEPEND" : "|| ( >=media-video/ffmpeg-0.6.90_rc0-r2[X=,encode=] >=media-video/libav-0.6.90_rc[X=,encode=] )",
},
+ "x11-base/xorg-server-1.14.1" : {
+ "EAPI" : "5",
+ "SLOT": "0/1.14.1",
+ "DEPEND" : "media-libs/mesa",
+ "RDEPEND" : "media-libs/mesa",
+ },
}
test_cases = (
@@ -434,6 +459,14 @@ class MergeOrderTestCase(TestCase):
('kde-base/libkdegames-3.5.7', 'kde-base/kmines-3.5.7'),
),
mergelist = [('kde-base/kdelibs-3.5.7', 'dev-util/pkgconfig-0.25-r2', 'kde-misc/kdnssd-avahi-0.1.2', 'app-arch/xz-utils-5.0.2', 'kde-base/libkdegames-3.5.7', 'kde-base/kdnssd-3.5.7', 'kde-base/kmines-3.5.7')]),
+ # Test satisfied circular DEPEND/RDEPEND with one := operator.
+ # Both deps are already satisfied by installed packages, but
+ # the := dep is given higher priority in merge order.
+ ResolverPlaygroundTestCase(
+ ["media-libs/mesa", "x11-base/xorg-server"],
+ success=True,
+ all_permutations = True,
+ mergelist = ['x11-base/xorg-server-1.14.1', 'media-libs/mesa-9.1.3']),
)
playground = ResolverPlayground(ebuilds=ebuilds, installed=installed)
diff --git a/pym/portage/tests/resolver/test_multirepo.py b/pym/portage/tests/resolver/test_multirepo.py
index 34c6d45a0..2b1a6d073 100644
--- a/pym/portage/tests/resolver/test_multirepo.py
+++ b/pym/portage/tests/resolver/test_multirepo.py
@@ -1,4 +1,4 @@
-# Copyright 2010-2011 Gentoo Foundation
+# Copyright 2010-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
from portage.tests import TestCase
@@ -37,16 +37,25 @@ class MultirepoTestCase(TestCase):
"dev-libs/I-1::repo2": { "SLOT" : "1"},
"dev-libs/I-2::repo2": { "SLOT" : "2"},
+
+ "dev-libs/K-1::repo2": { },
}
installed = {
"dev-libs/H-1": { "RDEPEND" : "|| ( dev-libs/I:2 dev-libs/I:1 )"},
"dev-libs/I-2::repo1": {"SLOT" : "2"},
+ "dev-libs/K-1::repo1": { },
+ }
+
+ binpkgs = {
+ "dev-libs/C-1::repo2": { },
+ "dev-libs/I-2::repo1": {"SLOT" : "2"},
+ "dev-libs/K-1::repo2": { },
}
sets = {
- "multirepotest":
- ( "dev-libs/A::test_repo", )
+ "multirepotest":
+ ("dev-libs/A::test_repo",)
}
test_cases = (
@@ -96,6 +105,68 @@ class MultirepoTestCase(TestCase):
check_repo_names = True,
mergelist = ["dev-libs/D-1::repo2"]),
+ #--usepkg: don't reinstall on new repo without --newrepo
+ ResolverPlaygroundTestCase(
+ ["dev-libs/C"],
+ options = {"--usepkg": True, "--selective": True},
+ success = True,
+ check_repo_names = True,
+ mergelist = ["[binary]dev-libs/C-1::repo2"]),
+
+ #--usepkgonly: don't reinstall on new repo without --newrepo
+ ResolverPlaygroundTestCase(
+ ["dev-libs/C"],
+ options = {"--usepkgonly": True, "--selective": True},
+ success = True,
+ check_repo_names = True,
+ mergelist = ["[binary]dev-libs/C-1::repo2"]),
+
+ #--newrepo: pick ebuild if binpkg/ebuild have different repo
+ ResolverPlaygroundTestCase(
+ ["dev-libs/C"],
+ options = {"--usepkg": True, "--newrepo": True, "--selective": True},
+ success = True,
+ check_repo_names = True,
+ mergelist = ["dev-libs/C-1::repo1"]),
+
+ #--newrepo --usepkgonly: ebuild is ignored
+ ResolverPlaygroundTestCase(
+ ["dev-libs/C"],
+ options = {"--usepkgonly": True, "--newrepo": True, "--selective": True},
+ success = True,
+ check_repo_names = True,
+ mergelist = ["[binary]dev-libs/C-1::repo2"]),
+
+ #--newrepo: pick ebuild if binpkg/ebuild have different repo
+ ResolverPlaygroundTestCase(
+ ["dev-libs/I"],
+ options = {"--usepkg": True, "--newrepo": True, "--selective": True},
+ success = True,
+ check_repo_names = True,
+ mergelist = ["dev-libs/I-2::repo2"]),
+
+ #--newrepo --usepkgonly: if binpkg matches installed, do nothing
+ ResolverPlaygroundTestCase(
+ ["dev-libs/I"],
+ options = {"--usepkgonly": True, "--newrepo": True, "--selective": True},
+ success = True,
+ mergelist = []),
+
+ #--newrepo --usepkgonly: reinstall if binpkg has new repo.
+ ResolverPlaygroundTestCase(
+ ["dev-libs/K"],
+ options = {"--usepkgonly": True, "--newrepo": True, "--selective": True},
+ success = True,
+ check_repo_names = True,
+ mergelist = ["[binary]dev-libs/K-1::repo2"]),
+
+ #--usepkgonly: don't reinstall on new repo without --newrepo.
+ ResolverPlaygroundTestCase(
+ ["dev-libs/K"],
+ options = {"--usepkgonly": True, "--selective": True},
+ success = True,
+ mergelist = []),
+
#Atoms with slots
ResolverPlaygroundTestCase(
["dev-libs/E"],
@@ -137,6 +208,15 @@ class MultirepoTestCase(TestCase):
success = True,
mergelist = []),
+ # Dependency on installed dev-libs/I-2 ebuild should trigger reinstall
+ # when --newrepo flag is used.
+ ResolverPlaygroundTestCase(
+ ["dev-libs/H"],
+ options = {"--update": True, "--deep": True, "--newrepo": True},
+ success = True,
+ check_repo_names = True,
+ mergelist = ["dev-libs/I-2::repo2"]),
+
# Check interaction between repo priority and unsatisfied
# REQUIRED_USE, for bug #350254.
ResolverPlaygroundTestCase(
@@ -147,7 +227,7 @@ class MultirepoTestCase(TestCase):
)
playground = ResolverPlayground(ebuilds=ebuilds,
- installed=installed, sets=sets)
+ binpkgs=binpkgs, installed=installed, sets=sets)
try:
for test_case in test_cases:
playground.run_TestCase(test_case)
diff --git a/pym/portage/tests/resolver/test_onlydeps.py b/pym/portage/tests/resolver/test_onlydeps.py
new file mode 100644
index 000000000..986769aee
--- /dev/null
+++ b/pym/portage/tests/resolver/test_onlydeps.py
@@ -0,0 +1,34 @@
+# Copyright 2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import ResolverPlayground, ResolverPlaygroundTestCase
+
+class OnlydepsTestCase(TestCase):
+
+ def testOnlydeps(self):
+ ebuilds = {
+ "dev-libs/A-1": { "DEPEND": "dev-libs/B" },
+ "dev-libs/B-1": { },
+ }
+ installed = {
+ "dev-libs/B-1": { },
+ }
+
+ test_cases = (
+ ResolverPlaygroundTestCase(
+ ["dev-libs/A", "dev-libs/B"],
+ all_permutations = True,
+ success = True,
+ options = { "--onlydeps": True },
+ mergelist = ["dev-libs/B-1"]),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds,
+ installed=installed, debug=False)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
diff --git a/pym/portage/tests/resolver/test_or_choices.py b/pym/portage/tests/resolver/test_or_choices.py
new file mode 100644
index 000000000..90e681408
--- /dev/null
+++ b/pym/portage/tests/resolver/test_or_choices.py
@@ -0,0 +1,134 @@
+# Copyright 2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import (ResolverPlayground,
+ ResolverPlaygroundTestCase)
+
+class OrChoicesTestCase(TestCase):
+
+ def testOrChoices(self):
+ ebuilds = {
+ "dev-lang/vala-0.20.0" : {
+ "EAPI": "5",
+ "SLOT": "0.20"
+ },
+ "dev-lang/vala-0.18.0" : {
+ "EAPI": "5",
+ "SLOT": "0.18"
+ },
+ #"dev-libs/gobject-introspection-1.36.0" : {
+ # "EAPI": "5",
+ # "RDEPEND" : "!<dev-lang/vala-0.20.0",
+ #},
+ "dev-libs/gobject-introspection-1.34.0" : {
+ "EAPI": "5"
+ },
+ "sys-apps/systemd-ui-2" : {
+ "EAPI": "5",
+ "RDEPEND" : "|| ( dev-lang/vala:0.20 dev-lang/vala:0.18 )"
+ },
+ }
+
+ installed = {
+ "dev-lang/vala-0.18.0" : {
+ "EAPI": "5",
+ "SLOT": "0.18"
+ },
+ "dev-libs/gobject-introspection-1.34.0" : {
+ "EAPI": "5"
+ },
+ "sys-apps/systemd-ui-2" : {
+ "EAPI": "5",
+ "RDEPEND" : "|| ( dev-lang/vala:0.20 dev-lang/vala:0.18 )"
+ },
+ }
+
+ world = ["dev-libs/gobject-introspection", "sys-apps/systemd-ui"]
+
+ test_cases = (
+ # Demonstrate that vala:0.20 update is pulled in, for bug #478188
+ ResolverPlaygroundTestCase(
+ ["@world"],
+ options = {"--update": True, "--deep": True},
+ success=True,
+ all_permutations = True,
+ mergelist = ['dev-lang/vala-0.20.0']),
+ # Verify that vala:0.20 is not pulled in without --deep
+ ResolverPlaygroundTestCase(
+ ["@world"],
+ options = {"--update": True},
+ success=True,
+ all_permutations = True,
+ mergelist = []),
+ # Verify that vala:0.20 is not pulled in without --update
+ ResolverPlaygroundTestCase(
+ ["@world"],
+ options = {"--selective": True, "--deep": True},
+ success=True,
+ all_permutations = True,
+ mergelist = []),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds, installed=installed, world=world)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
+
+ def testOrChoicesLibpostproc(self):
+ ebuilds = {
+ "media-video/ffmpeg-0.10" : {
+ "EAPI": "5",
+ "SLOT": "0.10"
+ },
+ "media-video/ffmpeg-1.2.2" : {
+ "EAPI": "5",
+ "SLOT": "0"
+ },
+ "media-libs/libpostproc-0.8.0.20121125" : {
+ "EAPI": "5"
+ },
+ "media-plugins/gst-plugins-ffmpeg-0.10.13_p201211-r1" : {
+ "EAPI": "5",
+ "RDEPEND" : "|| ( media-video/ffmpeg:0 media-libs/libpostproc )"
+ },
+ }
+
+ installed = {
+ "media-video/ffmpeg-0.10" : {
+ "EAPI": "5",
+ "SLOT": "0.10"
+ },
+ "media-libs/libpostproc-0.8.0.20121125" : {
+ "EAPI": "5"
+ },
+ "media-plugins/gst-plugins-ffmpeg-0.10.13_p201211-r1" : {
+ "EAPI": "5",
+ "RDEPEND" : "|| ( media-video/ffmpeg:0 media-libs/libpostproc )"
+ },
+ }
+
+ world = ["media-plugins/gst-plugins-ffmpeg"]
+
+ test_cases = (
+ # Demonstrate that libpostproc is preferred
+ # over ffmpeg:0 for bug #480736.
+ ResolverPlaygroundTestCase(
+ ["@world"],
+ options = {"--update": True, "--deep": True},
+ success=True,
+ all_permutations = True,
+ mergelist = []),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds, installed=installed,
+ world=world, debug=False)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
diff --git a/pym/portage/tests/resolver/test_package_tracker.py b/pym/portage/tests/resolver/test_package_tracker.py
new file mode 100644
index 000000000..8fa3513e6
--- /dev/null
+++ b/pym/portage/tests/resolver/test_package_tracker.py
@@ -0,0 +1,261 @@
+# Copyright 2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import collections
+
+from portage.dep import Atom
+from portage.tests import TestCase
+from _emerge.resolver.package_tracker import PackageTracker, PackageTrackerDbapiWrapper
+
+class PackageTrackerTestCase(TestCase):
+
+ FakePackage = collections.namedtuple("FakePackage",
+ ["root", "cp", "cpv", "slot", "slot_atom", "version", "repo"])
+
+ FakeConflict = collections.namedtuple("FakeConflict",
+ ["description", "root", "pkgs"])
+
+ def make_pkg(self, root, atom, repo="test_repo"):
+ atom = Atom(atom)
+ slot_atom = Atom("%s:%s" % (atom.cp, atom.slot))
+ slot = atom.slot
+
+ return self.FakePackage(root=root, cp=atom.cp, cpv=atom.cpv,
+ slot=slot, slot_atom=slot_atom, version=atom.version, repo=repo)
+
+ def make_conflict(self, description, root, pkgs):
+ return self.FakeConflict(description=description, root=root, pkgs=pkgs)
+
+ def test_add_remove_discard(self):
+ p = PackageTracker()
+
+ x1 = self.make_pkg("/", "=dev-libs/X-1:0")
+ x2 = self.make_pkg("/", "=dev-libs/X-2:0")
+
+ p.add_pkg(x1)
+ self.assertTrue(x1 in p)
+ self.assertTrue(p.contains(x1, installed=True))
+ self.assertTrue(p.contains(x1, installed=False))
+ p.remove_pkg(x1)
+ self.assertTrue(x1 not in p)
+
+ p.add_pkg(x1)
+ self.assertTrue(x1 in p)
+ p.add_pkg(x1)
+ self.assertTrue(x1 in p)
+
+ self.assertRaises(KeyError, p.remove_pkg, x2)
+
+ p.add_pkg(x2)
+ self.assertTrue(x2 in p)
+ p.remove_pkg(x2)
+ self.assertTrue(x2 not in p)
+ p.discard_pkg(x2)
+ self.assertTrue(x2 not in p)
+ p.add_pkg(x2)
+ self.assertTrue(x2 in p)
+
+ all_pkgs = list(p.all_pkgs("/"))
+ self.assertEqual(len(all_pkgs), 2)
+ self.assertTrue(all_pkgs[0] is x1 and all_pkgs[1] is x2)
+
+ self.assertEqual(len(list(p.all_pkgs("/"))), 2)
+ self.assertEqual(len(list(p.all_pkgs("/xxx"))), 0)
+
+ def test_match(self):
+ p = PackageTracker()
+ x1 = self.make_pkg("/", "=dev-libs/X-1:0")
+ x2 = self.make_pkg("/", "=dev-libs/X-2:0")
+ x3 = self.make_pkg("/", "=dev-libs/X-3:1")
+
+ p.add_pkg(x2)
+ p.add_pkg(x1)
+
+ matches = list(p.match("/", Atom("=dev-libs/X-1")))
+ self.assertTrue(x1 in matches)
+ self.assertEqual(len(matches), 1)
+
+ matches = list(p.match("/", Atom("dev-libs/X")))
+ self.assertTrue(x1 is matches[0] and x2 is matches[1])
+ self.assertEqual(len(matches), 2)
+
+ matches = list(p.match("/xxx", Atom("dev-libs/X")))
+ self.assertEqual(len(matches), 0)
+
+ matches = list(p.match("/", Atom("dev-libs/Y")))
+ self.assertEqual(len(matches), 0)
+
+ p.add_pkg(x3)
+ matches = list(p.match("/", Atom("dev-libs/X")))
+ self.assertTrue(x1 is matches[0] and x2 is matches[1] and x3 is matches[2])
+ self.assertEqual(len(matches), 3)
+
+ p.remove_pkg(x3)
+ matches = list(p.match("/", Atom("dev-libs/X")))
+ self.assertTrue(x1 is matches[0] and x2 is matches[1])
+ self.assertEqual(len(matches), 2)
+
+ def test_dbapi_interface(self):
+ p = PackageTracker()
+ dbapi = PackageTrackerDbapiWrapper("/", p)
+ installed = self.make_pkg("/", "=dev-libs/X-0:0")
+ x1 = self.make_pkg("/", "=dev-libs/X-1:0")
+ x2 = self.make_pkg("/", "=dev-libs/X-2:0")
+ x3 = self.make_pkg("/", "=dev-libs/X-3:0")
+ x4 = self.make_pkg("/", "=dev-libs/X-4:6")
+ x5 = self.make_pkg("/xxx", "=dev-libs/X-5:6")
+
+ def check_dbapi(pkgs):
+ all_pkgs = set(dbapi)
+ self.assertEqual(len(all_pkgs), len(pkgs))
+
+ x_atom = "dev-libs/X"
+ y_atom = "dev-libs/Y"
+ matches = dbapi.cp_list(x_atom)
+ for pkg in pkgs:
+ if pkg.root == "/" and pkg.cp == x_atom:
+ self.assertTrue(pkg in matches)
+ self.assertTrue(not dbapi.cp_list(y_atom))
+ matches = dbapi.match(x_atom)
+ for pkg in pkgs:
+ if pkg.root == "/" and pkg.cp == x_atom:
+ self.assertTrue(pkg in matches)
+ self.assertTrue(not dbapi.match(y_atom))
+
+ check_dbapi([])
+
+ p.add_installed_pkg(installed)
+ check_dbapi([installed])
+
+ p.add_pkg(x1)
+ check_dbapi([x1])
+
+ p.remove_pkg(x1)
+ check_dbapi([installed])
+
+ dbapi.cpv_inject(x1)
+ check_dbapi([x1])
+
+ dbapi.cpv_inject(x2)
+ check_dbapi([x1, x2])
+
+ p.remove_pkg(x1)
+ check_dbapi([x2])
+
+ p.add_pkg(x5)
+ check_dbapi([x2])
+
+
+ def test_installed(self):
+ p = PackageTracker()
+ x1 = self.make_pkg("/", "=dev-libs/X-1:0")
+ x1b = self.make_pkg("/", "=dev-libs/X-1.1:0")
+ x2 = self.make_pkg("/", "=dev-libs/X-2:0")
+ x3 = self.make_pkg("/", "=dev-libs/X-3:1")
+
+ def check_installed(x, should_contain, num_pkgs):
+ self.assertEqual(x in p, should_contain)
+ self.assertEqual(p.contains(x), should_contain)
+ self.assertEqual(p.contains(x1, installed=True), should_contain)
+ self.assertEqual(p.contains(x1, installed=False), False)
+ self.assertEqual(len(list(p.all_pkgs("/"))), num_pkgs)
+
+ def check_matches(atom, expected):
+ matches = list(p.match("/", Atom(atom)))
+ self.assertEqual(len(matches), len(expected))
+ for x, y in zip(matches, expected):
+ self.assertTrue(x is y)
+
+ p.add_installed_pkg(x1)
+ check_installed(x1, True, 1)
+ check_matches("dev-libs/X", [x1])
+
+ p.add_installed_pkg(x1)
+ check_installed(x1, True, 1)
+ check_matches("dev-libs/X", [x1])
+
+ p.add_pkg(x2)
+ check_installed(x1, False, 1)
+ check_matches("dev-libs/X", [x2])
+
+ p.add_installed_pkg(x1)
+ check_installed(x1, False, 1)
+ check_matches("dev-libs/X", [x2])
+
+ p.add_installed_pkg(x1b)
+ check_installed(x1, False, 1)
+ check_installed(x1b, False, 1)
+ check_matches("dev-libs/X", [x2])
+
+ p.remove_pkg(x2)
+ check_installed(x1, True, 2)
+ check_installed(x1b, True, 2)
+ check_matches("dev-libs/X", [x1, x1b])
+
+ def test_conflicts(self):
+ p = PackageTracker()
+ installed1 = self.make_pkg("/", "=dev-libs/X-0:0")
+ installed2 = self.make_pkg("/", "=dev-libs/X-0.1:0")
+ x1 = self.make_pkg("/", "=dev-libs/X-1:0")
+ x2 = self.make_pkg("/", "=dev-libs/X-2:0")
+ x3 = self.make_pkg("/", "=dev-libs/X-3:0")
+ x4 = self.make_pkg("/", "=dev-libs/X-4:4")
+ x4b = self.make_pkg("/", "=dev-libs/X-4:4b::x-repo")
+
+ def check_conflicts(expected, slot_conflicts_only=False):
+ if slot_conflicts_only:
+ conflicts = list(p.slot_conflicts())
+ else:
+ conflicts = list(p.conflicts())
+ self.assertEqual(len(conflicts), len(expected))
+ for got, exp in zip(conflicts, expected):
+ self.assertEqual(got.description, exp.description)
+ self.assertEqual(got.root, exp.root)
+ self.assertEqual(len(got.pkgs), len(exp.pkgs))
+ self.assertEqual(len(got), len(exp.pkgs))
+ for x, y in zip(got.pkgs, exp.pkgs):
+ self.assertTrue(x is y)
+ for x, y in zip(got, exp.pkgs):
+ self.assertTrue(x is y)
+ for x in exp.pkgs:
+ self.assertTrue(x in got)
+
+ check_conflicts([])
+ check_conflicts([])
+
+ p.add_installed_pkg(installed1)
+ p.add_installed_pkg(installed2)
+ check_conflicts([])
+
+ p.add_pkg(x1)
+ check_conflicts([])
+ p.add_pkg(x2)
+ check_conflicts([self.make_conflict("slot conflict", "/", [x1, x2])])
+ p.add_pkg(x3)
+ check_conflicts([self.make_conflict("slot conflict", "/", [x1, x2, x3])])
+ p.remove_pkg(x3)
+ check_conflicts([self.make_conflict("slot conflict", "/", [x1, x2])])
+ p.remove_pkg(x2)
+ check_conflicts([])
+ p.add_pkg(x3)
+ check_conflicts([self.make_conflict("slot conflict", "/", [x1, x3])])
+ p.add_pkg(x2)
+ check_conflicts([self.make_conflict("slot conflict", "/", [x1, x3, x2])])
+
+ p.add_pkg(x4)
+ check_conflicts([self.make_conflict("slot conflict", "/", [x1, x3, x2])])
+
+ p.add_pkg(x4b)
+ check_conflicts(
+ [
+ self.make_conflict("slot conflict", "/", [x1, x3, x2]),
+ self.make_conflict("cpv conflict", "/", [x4, x4b]),
+ ]
+ )
+
+ check_conflicts(
+ [
+ self.make_conflict("slot conflict", "/", [x1, x3, x2]),
+ ],
+ slot_conflicts_only=True
+ )
diff --git a/pym/portage/tests/resolver/test_regular_slot_change_without_revbump.py b/pym/portage/tests/resolver/test_regular_slot_change_without_revbump.py
new file mode 100644
index 000000000..415277bc7
--- /dev/null
+++ b/pym/portage/tests/resolver/test_regular_slot_change_without_revbump.py
@@ -0,0 +1,59 @@
+# Copyright 2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import (ResolverPlayground,
+ ResolverPlaygroundTestCase)
+
+class RegularSlotChangeWithoutRevBumpTestCase(TestCase):
+
+ def testRegularSlotChangeWithoutRevBumpTestCase(self):
+
+ ebuilds = {
+ "dev-libs/boost-1.52.0" : {
+ "SLOT": "0"
+ },
+ "app-office/libreoffice-4.0.0.2" : {
+ "EAPI": "5",
+ "DEPEND": ">=dev-libs/boost-1.46:=",
+ "RDEPEND": ">=dev-libs/boost-1.46:=",
+ },
+ }
+
+ binpkgs = {
+ "dev-libs/boost-1.52.0" : {
+ "SLOT": "1.52"
+ },
+ }
+
+ installed = {
+ "dev-libs/boost-1.52.0" : {
+ "SLOT": "1.52"
+ },
+ }
+
+ world = []
+
+ test_cases = (
+ # Test that @__auto_slot_operator_replace_installed__
+ # pulls in the available slot, even though it's
+ # different from the installed slot (0 instead of 1.52).
+ ResolverPlaygroundTestCase(
+ ["app-office/libreoffice"],
+ options = {"--oneshot": True, "--usepkg": True},
+ success = True,
+ mergelist = [
+ 'dev-libs/boost-1.52.0',
+ 'app-office/libreoffice-4.0.0.2'
+ ]
+ ),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds, binpkgs=binpkgs,
+ installed=installed, world=world, debug=False)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
diff --git a/pym/portage/tests/resolver/test_slot_abi.py b/pym/portage/tests/resolver/test_slot_abi.py
index 6381bcc4d..7263504b8 100644
--- a/pym/portage/tests/resolver/test_slot_abi.py
+++ b/pym/portage/tests/resolver/test_slot_abi.py
@@ -1,4 +1,4 @@
-# Copyright 2012 Gentoo Foundation
+# Copyright 2012-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
from portage.tests import TestCase
@@ -65,7 +65,7 @@ class SlotAbiTestCase(TestCase):
ResolverPlaygroundTestCase(
["dev-libs/icu"],
- options = {"--oneshot": True, "--ignore-built-slot-abi-deps": "y"},
+ options = {"--oneshot": True, "--ignore-built-slot-operator-deps": "y"},
success = True,
mergelist = ["dev-libs/icu-49"]),
@@ -83,7 +83,7 @@ class SlotAbiTestCase(TestCase):
ResolverPlaygroundTestCase(
["dev-libs/icu"],
- options = {"--oneshot": True, "--usepkgonly": True, "--ignore-built-slot-abi-deps": "y"},
+ options = {"--oneshot": True, "--usepkgonly": True, "--ignore-built-slot-operator-deps": "y"},
success = True,
mergelist = ["[binary]dev-libs/icu-49"]),
@@ -95,7 +95,7 @@ class SlotAbiTestCase(TestCase):
ResolverPlaygroundTestCase(
["@world"],
- options = {"--update": True, "--deep": True, "--ignore-built-slot-abi-deps": "y"},
+ options = {"--update": True, "--deep": True, "--ignore-built-slot-operator-deps": "y"},
success = True,
mergelist = ["dev-libs/icu-49"]),
@@ -113,7 +113,7 @@ class SlotAbiTestCase(TestCase):
ResolverPlaygroundTestCase(
["@world"],
- options = {"--update": True, "--deep": True, "--usepkgonly": True, "--ignore-built-slot-abi-deps": "y"},
+ options = {"--update": True, "--deep": True, "--usepkgonly": True, "--ignore-built-slot-operator-deps": "y"},
success = True,
mergelist = ["[binary]dev-libs/icu-49"]),
@@ -178,7 +178,7 @@ class SlotAbiTestCase(TestCase):
ResolverPlaygroundTestCase(
["sys-libs/db"],
- options = {"--oneshot": True, "--ignore-built-slot-abi-deps": "y"},
+ options = {"--oneshot": True, "--ignore-built-slot-operator-deps": "y"},
success = True,
mergelist = ["sys-libs/db-4.8"]),
@@ -196,7 +196,7 @@ class SlotAbiTestCase(TestCase):
ResolverPlaygroundTestCase(
["sys-libs/db"],
- options = {"--oneshot": True, "--rebuild-if-new-slot-abi": "n"},
+ options = {"--oneshot": True, "--rebuild-if-new-slot": "n"},
success = True,
mergelist = ["sys-libs/db-4.8"]),
@@ -214,7 +214,7 @@ class SlotAbiTestCase(TestCase):
ResolverPlaygroundTestCase(
["@world"],
- options = {"--update": True, "--deep": True, "--usepkg": True, "--ignore-built-slot-abi-deps": "y"},
+ options = {"--update": True, "--deep": True, "--usepkg": True, "--ignore-built-slot-operator-deps": "y"},
success = True,
mergelist = ["[binary]sys-libs/db-4.8"]),
@@ -226,13 +226,13 @@ class SlotAbiTestCase(TestCase):
ResolverPlaygroundTestCase(
["@world"],
- options = {"--update": True, "--deep": True, "--usepkgonly": True, "--ignore-built-slot-abi-deps": "y"},
+ options = {"--update": True, "--deep": True, "--usepkgonly": True, "--ignore-built-slot-operator-deps": "y"},
success = True,
mergelist = ["[binary]sys-libs/db-4.8"]),
ResolverPlaygroundTestCase(
["@world"],
- options = {"--update": True, "--deep": True, "--rebuild-if-new-slot-abi": "n"},
+ options = {"--update": True, "--deep": True, "--rebuild-if-new-slot": "n"},
success = True,
mergelist = []),
@@ -247,6 +247,89 @@ class SlotAbiTestCase(TestCase):
finally:
playground.cleanup()
+
+ def testWholeSlotConditional(self):
+ ebuilds = {
+ "dev-libs/libnl-3.2.14" : {
+ "SLOT": "3"
+ },
+ "dev-libs/libnl-1.1-r3" : {
+ "SLOT": "1.1"
+ },
+ "net-misc/networkmanager-0.9.6.4-r1" : {
+ "EAPI": "5",
+ "IUSE": "wimax",
+ "DEPEND": "wimax? ( dev-libs/libnl:1.1= ) !wimax? ( dev-libs/libnl:3= )",
+ "RDEPEND": "wimax? ( dev-libs/libnl:1.1= ) !wimax? ( dev-libs/libnl:3= )"
+ },
+ }
+ installed = {
+ "dev-libs/libnl-1.1-r3" : {
+ "SLOT": "1.1"
+ },
+ "net-misc/networkmanager-0.9.6.4-r1" : {
+ "EAPI": "5",
+ "IUSE": "wimax",
+ "USE": "wimax",
+ "DEPEND": "dev-libs/libnl:1.1/1.1=",
+ "RDEPEND": "dev-libs/libnl:1.1/1.1="
+ },
+ }
+
+ user_config = {
+ "make.conf" : ("USE=\"wimax\"",)
+ }
+
+ world = ["net-misc/networkmanager"]
+
+ test_cases = (
+
+ # Demonstrate bug #460304, where _slot_operator_update_probe needs
+ # to account for USE conditional deps.
+ ResolverPlaygroundTestCase(
+ ["@world"],
+ options = {"--update": True, "--deep": True},
+ success = True,
+ mergelist = []),
+
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds,
+ installed=installed, user_config=user_config, world=world,
+ debug=False)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
+
+ user_config = {
+ "make.conf" : ("USE=\"-wimax\"",)
+ }
+
+ test_cases = (
+
+ # Demonstrate bug #460304 again, but with inverted USE
+ # settings this time.
+ ResolverPlaygroundTestCase(
+ ["@world"],
+ options = {"--update": True, "--deep": True},
+ success = True,
+ mergelist = ['dev-libs/libnl-3.2.14', 'net-misc/networkmanager-0.9.6.4-r1']),
+
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds,
+ installed=installed, user_config=user_config, world=world,
+ debug=False)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
+
def testWholeSlotSubSlotMix(self):
ebuilds = {
"dev-libs/glib-1.2.10" : {
@@ -312,7 +395,7 @@ class SlotAbiTestCase(TestCase):
ResolverPlaygroundTestCase(
["dev-libs/glib"],
- options = {"--oneshot": True, "--ignore-built-slot-abi-deps": "y"},
+ options = {"--oneshot": True, "--ignore-built-slot-operator-deps": "y"},
success = True,
mergelist = ["dev-libs/glib-2.32.3"]),
@@ -330,7 +413,7 @@ class SlotAbiTestCase(TestCase):
ResolverPlaygroundTestCase(
["dev-libs/glib"],
- options = {"--oneshot": True, "--usepkgonly": True, "--ignore-built-slot-abi-deps": "y"},
+ options = {"--oneshot": True, "--usepkgonly": True, "--ignore-built-slot-operator-deps": "y"},
success = True,
mergelist = ["[binary]dev-libs/glib-2.32.3"]),
@@ -342,7 +425,7 @@ class SlotAbiTestCase(TestCase):
ResolverPlaygroundTestCase(
["@world"],
- options = {"--update": True, "--deep": True, "--ignore-built-slot-abi-deps": "y"},
+ options = {"--update": True, "--deep": True, "--ignore-built-slot-operator-deps": "y"},
success = True,
mergelist = ["dev-libs/glib-2.32.3"]),
@@ -360,7 +443,7 @@ class SlotAbiTestCase(TestCase):
ResolverPlaygroundTestCase(
["@world"],
- options = {"--update": True, "--deep": True, "--usepkgonly": True, "--ignore-built-slot-abi-deps": "y"},
+ options = {"--update": True, "--deep": True, "--usepkgonly": True, "--ignore-built-slot-operator-deps": "y"},
success = True,
mergelist = ["[binary]dev-libs/glib-2.32.3"]),
diff --git a/pym/portage/tests/resolver/test_slot_abi_downgrade.py b/pym/portage/tests/resolver/test_slot_abi_downgrade.py
index 45a7555c2..08e9a9db2 100644
--- a/pym/portage/tests/resolver/test_slot_abi_downgrade.py
+++ b/pym/portage/tests/resolver/test_slot_abi_downgrade.py
@@ -61,7 +61,7 @@ class SlotAbiDowngradeTestCase(TestCase):
ResolverPlaygroundTestCase(
["dev-libs/icu"],
- options = {"--oneshot": True, "--ignore-built-slot-abi-deps": "y"},
+ options = {"--oneshot": True, "--ignore-built-slot-operator-deps": "y"},
success = True,
mergelist = ["dev-libs/icu-4.8"]),
@@ -85,7 +85,7 @@ class SlotAbiDowngradeTestCase(TestCase):
ResolverPlaygroundTestCase(
["@world"],
- options = {"--update": True, "--deep": True, "--ignore-built-slot-abi-deps": "y"},
+ options = {"--update": True, "--deep": True, "--ignore-built-slot-operator-deps": "y"},
success = True,
mergelist = ["dev-libs/icu-4.8"]),
@@ -173,7 +173,7 @@ class SlotAbiDowngradeTestCase(TestCase):
ResolverPlaygroundTestCase(
["dev-libs/glib"],
- options = {"--oneshot": True, "--ignore-built-slot-abi-deps": "y"},
+ options = {"--oneshot": True, "--ignore-built-slot-operator-deps": "y"},
success = True,
mergelist = ["dev-libs/glib-2.30.2"]),
@@ -197,7 +197,7 @@ class SlotAbiDowngradeTestCase(TestCase):
ResolverPlaygroundTestCase(
["@world"],
- options = {"--update": True, "--deep": True, "--ignore-built-slot-abi-deps": "y"},
+ options = {"--update": True, "--deep": True, "--ignore-built-slot-operator-deps": "y"},
success = True,
mergelist = ["dev-libs/glib-2.30.2"]),
diff --git a/pym/portage/tests/resolver/test_slot_change_without_revbump.py b/pym/portage/tests/resolver/test_slot_change_without_revbump.py
new file mode 100644
index 000000000..d85ff7e05
--- /dev/null
+++ b/pym/portage/tests/resolver/test_slot_change_without_revbump.py
@@ -0,0 +1,69 @@
+# Copyright 2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import (ResolverPlayground,
+ ResolverPlaygroundTestCase)
+
+class SlotChangeWithoutRevBumpTestCase(TestCase):
+
+ def testSlotChangeWithoutRevBump(self):
+
+ ebuilds = {
+ "app-arch/libarchive-3.1.1" : {
+ "EAPI": "5",
+ "SLOT": "0/13"
+ },
+ "app-arch/libarchive-3.0.4-r1" : {
+ "EAPI": "5",
+ "SLOT": "0"
+ },
+ "kde-base/ark-4.10.0" : {
+ "EAPI": "5",
+ "DEPEND": "app-arch/libarchive:=",
+ "RDEPEND": "app-arch/libarchive:="
+ },
+ }
+
+ binpkgs = {
+ "app-arch/libarchive-3.1.1" : {
+ "EAPI": "5",
+ "SLOT": "0"
+ },
+ }
+
+ installed = {
+ "app-arch/libarchive-3.1.1" : {
+ "EAPI": "5",
+ "SLOT": "0"
+ },
+
+ "kde-base/ark-4.10.0" : {
+ "EAPI": "5",
+ "DEPEND": "app-arch/libarchive:0/0=",
+ "RDEPEND": "app-arch/libarchive:0/0="
+ },
+ }
+
+ world = ["kde-base/ark"]
+
+ test_cases = (
+
+ # Demonstrate bug #456208, where a sub-slot change
+ # without revbump needs to trigger a rebuild.
+ ResolverPlaygroundTestCase(
+ ["kde-base/ark"],
+ options = {"--oneshot": True, "--usepkg": True},
+ success = True,
+ mergelist = ['app-arch/libarchive-3.1.1', "kde-base/ark-4.10.0"]),
+
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds, binpkgs=binpkgs,
+ installed=installed, world=world, debug=False)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
diff --git a/pym/portage/tests/resolver/test_slot_collisions.py b/pym/portage/tests/resolver/test_slot_collisions.py
index 95d68fe04..9fcd5294a 100644
--- a/pym/portage/tests/resolver/test_slot_collisions.py
+++ b/pym/portage/tests/resolver/test_slot_collisions.py
@@ -1,4 +1,4 @@
-# Copyright 2010-2011 Gentoo Foundation
+# Copyright 2010-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
from portage.tests import TestCase
@@ -153,3 +153,107 @@ class SlotCollisionTestCase(TestCase):
self.assertEqual(test_case.test_success, True, test_case.fail_msg)
finally:
playground.cleanup()
+
+ def testConnectedCollision(self):
+ """
+ Ensure that we are able to solve connected slot conflicts
+ which cannot be solved each on their own.
+ """
+ ebuilds = {
+ "dev-libs/A-1": { "RDEPEND": "=dev-libs/X-1" },
+ "dev-libs/B-1": { "RDEPEND": "dev-libs/X" },
+
+ "dev-libs/X-1": { "RDEPEND": "=dev-libs/Y-1" },
+ "dev-libs/X-2": { "RDEPEND": "=dev-libs/Y-2" },
+
+ "dev-libs/Y-1": { "PDEPEND": "=dev-libs/X-1" },
+ "dev-libs/Y-2": { "PDEPEND": "=dev-libs/X-2" },
+ }
+
+ test_cases = (
+ ResolverPlaygroundTestCase(
+ ["dev-libs/A", "dev-libs/B"],
+ all_permutations = True,
+ options = { "--backtrack": 0 },
+ success = True,
+ ambiguous_merge_order = True,
+ mergelist = ["dev-libs/Y-1", "dev-libs/X-1", ("dev-libs/A-1", "dev-libs/B-1")]),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds, debug=False)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
+
+
+ def testDeeplyConnectedCollision(self):
+ """
+ Like testConnectedCollision, except that there is another
+ level of dependencies between the two conflicts.
+ """
+ ebuilds = {
+ "dev-libs/A-1": { "RDEPEND": "=dev-libs/X-1" },
+ "dev-libs/B-1": { "RDEPEND": "dev-libs/X" },
+
+ "dev-libs/X-1": { "RDEPEND": "dev-libs/K" },
+ "dev-libs/X-2": { "RDEPEND": "dev-libs/L" },
+
+ "dev-libs/K-1": { "RDEPEND": "=dev-libs/Y-1" },
+ "dev-libs/L-1": { "RDEPEND": "=dev-libs/Y-2" },
+
+ "dev-libs/Y-1": { "PDEPEND": "=dev-libs/X-1" },
+ "dev-libs/Y-2": { "PDEPEND": "=dev-libs/X-2" },
+ }
+
+ test_cases = (
+ ResolverPlaygroundTestCase(
+ ["dev-libs/A", "dev-libs/B"],
+ all_permutations = True,
+ options = { "--backtrack": 0 },
+ success = True,
+ ignore_mergelist_order = True,
+ mergelist = ["dev-libs/Y-1", "dev-libs/X-1", "dev-libs/K-1", \
+ "dev-libs/A-1", "dev-libs/B-1"]),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds, debug=False)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
+
+
+ def testSelfDEPENDRemovalCrash(self):
+ """
+ Make sure we don't try to remove a packages twice. This happened
+ in the past when a package had a DEPEND on itself.
+ """
+ ebuilds = {
+ "dev-libs/A-1": { "RDEPEND": "=dev-libs/X-1" },
+ "dev-libs/B-1": { "RDEPEND": "dev-libs/X" },
+
+ "dev-libs/X-1": { },
+ "dev-libs/X-2": { "DEPEND": ">=dev-libs/X-2" },
+ }
+
+ test_cases = (
+ ResolverPlaygroundTestCase(
+ ["dev-libs/A", "dev-libs/B"],
+ all_permutations = True,
+ success = True,
+ ignore_mergelist_order = True,
+ mergelist = ["dev-libs/X-1", "dev-libs/A-1", "dev-libs/B-1"]),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds, debug=False)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
diff --git a/pym/portage/tests/resolver/test_slot_conflict_mask_update.py b/pym/portage/tests/resolver/test_slot_conflict_mask_update.py
new file mode 100644
index 000000000..a90eeac29
--- /dev/null
+++ b/pym/portage/tests/resolver/test_slot_conflict_mask_update.py
@@ -0,0 +1,41 @@
+# Copyright 2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import (ResolverPlayground,
+ ResolverPlaygroundTestCase)
+
+class SlotConflictMaskUpdateTestCase(TestCase):
+
+ def testBacktrackingGoodVersionFirst(self):
+ """
+ When backtracking due to slot conflicts, we masked the version that has been pulled
+ in first. This is not always a good idea. Mask the highest version instead.
+ """
+
+
+ self.todo = True
+
+ ebuilds = {
+ "dev-libs/A-1": { "DEPEND": "=dev-libs/C-1 dev-libs/B" },
+ "dev-libs/B-1": { "DEPEND": "=dev-libs/C-1" },
+ "dev-libs/B-2": { "DEPEND": "=dev-libs/C-2" },
+ "dev-libs/C-1": { },
+ "dev-libs/C-2": { },
+ }
+
+ test_cases = (
+ ResolverPlaygroundTestCase(
+ ["dev-libs/A"],
+ mergelist = ["dev-libs/C-1", "dev-libs/B-1", "dev-libs/A-1",],
+ success = True),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds)
+
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
diff --git a/pym/portage/tests/resolver/test_slot_conflict_rebuild.py b/pym/portage/tests/resolver/test_slot_conflict_rebuild.py
new file mode 100644
index 000000000..17737cf45
--- /dev/null
+++ b/pym/portage/tests/resolver/test_slot_conflict_rebuild.py
@@ -0,0 +1,408 @@
+# Copyright 2012-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import (ResolverPlayground,
+ ResolverPlaygroundTestCase)
+
+class SlotConflictRebuildTestCase(TestCase):
+
+ def testSlotConflictRebuild(self):
+
+ ebuilds = {
+
+ "app-misc/A-1" : {
+ "EAPI": "5",
+ "SLOT": "0/1"
+ },
+
+ "app-misc/A-2" : {
+ "EAPI": "5",
+ "SLOT": "0/2"
+ },
+
+ "app-misc/B-0" : {
+ "EAPI": "5",
+ "DEPEND": "app-misc/A:=",
+ "RDEPEND": "app-misc/A:="
+ },
+
+ "app-misc/C-0" : {
+ "EAPI": "5",
+ "DEPEND": "<app-misc/A-2",
+ "RDEPEND": "<app-misc/A-2"
+ },
+
+ "app-misc/D-1" : {
+ "EAPI": "5",
+ "SLOT": "0/1"
+ },
+
+ "app-misc/D-2" : {
+ "EAPI": "5",
+ "SLOT": "0/2"
+ },
+
+ "app-misc/E-0" : {
+ "EAPI": "5",
+ "DEPEND": "app-misc/D:=",
+ "RDEPEND": "app-misc/D:="
+ },
+
+ }
+
+ installed = {
+
+ "app-misc/A-1" : {
+ "EAPI": "5",
+ "SLOT": "0/1"
+ },
+
+ "app-misc/B-0" : {
+ "EAPI": "5",
+ "DEPEND": "app-misc/A:0/1=",
+ "RDEPEND": "app-misc/A:0/1="
+ },
+
+ "app-misc/C-0" : {
+ "EAPI": "5",
+ "DEPEND": "<app-misc/A-2",
+ "RDEPEND": "<app-misc/A-2"
+ },
+
+ "app-misc/D-1" : {
+ "EAPI": "5",
+ "SLOT": "0/1"
+ },
+
+ "app-misc/E-0" : {
+ "EAPI": "5",
+ "DEPEND": "app-misc/D:0/1=",
+ "RDEPEND": "app-misc/D:0/1="
+ },
+
+ }
+
+ world = ["app-misc/B", "app-misc/C", "app-misc/E"]
+
+ test_cases = (
+
+ # Test bug #439688, where a slot conflict prevents an
+ # upgrade and we don't want to trigger unnecessary rebuilds.
+ ResolverPlaygroundTestCase(
+ ["@world"],
+ options = {"--update": True, "--deep": True},
+ success = True,
+ mergelist = ["app-misc/D-2", "app-misc/E-0"]),
+
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds,
+ installed=installed, world=world, debug=False)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
+
+
+ def testSlotConflictMassRebuild(self):
+ """
+ Bug 486580
+ Before this bug was fixed, emerge would backtrack for each package that needs
+ a rebuild. This could cause it to hit the backtrack limit and not rebuild all
+ needed packages.
+ """
+ ebuilds = {
+
+ "app-misc/A-1" : {
+ "EAPI": "5",
+ "DEPEND": "app-misc/B:=",
+ "RDEPEND": "app-misc/B:="
+ },
+
+ "app-misc/B-1" : {
+ "EAPI": "5",
+ "SLOT": "1"
+ },
+
+ "app-misc/B-2" : {
+ "EAPI": "5",
+ "SLOT": "2/2"
+ },
+ }
+
+ installed = {
+ "app-misc/B-1" : {
+ "EAPI": "5",
+ "SLOT": "1"
+ },
+ }
+
+ expected_mergelist = ['app-misc/A-1', 'app-misc/B-2']
+
+ for i in range(5):
+ ebuilds["app-misc/C%sC-1" % i] = {
+ "EAPI": "5",
+ "DEPEND": "app-misc/B:=",
+ "RDEPEND": "app-misc/B:="
+ }
+
+ installed["app-misc/C%sC-1" % i] = {
+ "EAPI": "5",
+ "DEPEND": "app-misc/B:1/1=",
+ "RDEPEND": "app-misc/B:1/1="
+ }
+ for x in ("DEPEND", "RDEPEND"):
+ ebuilds["app-misc/A-1"][x] += " app-misc/C%sC" % i
+
+ expected_mergelist.append("app-misc/C%sC-1" % i)
+
+
+ test_cases = (
+ ResolverPlaygroundTestCase(
+ ["app-misc/A"],
+ ignore_mergelist_order=True,
+ all_permutations=True,
+ options = {"--backtrack": 3, '--deep': True},
+ success = True,
+ mergelist = expected_mergelist),
+ )
+
+ world = []
+
+ playground = ResolverPlayground(ebuilds=ebuilds,
+ installed=installed, world=world, debug=False)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
+
+ def testSlotConflictForgottenChild(self):
+ """
+ Similar to testSlotConflictMassRebuild above, but this time the rebuilds are scheduled,
+ but the package causing the rebuild (the child) is not installed.
+ """
+ ebuilds = {
+
+ "app-misc/A-2" : {
+ "EAPI": "5",
+ "DEPEND": "app-misc/B:= app-misc/C",
+ "RDEPEND": "app-misc/B:= app-misc/C",
+ },
+
+ "app-misc/B-2" : {
+ "EAPI": "5",
+ "SLOT": "2"
+ },
+
+ "app-misc/C-1": {
+ "EAPI": "5",
+ "DEPEND": "app-misc/B:=",
+ "RDEPEND": "app-misc/B:="
+ },
+ }
+
+ installed = {
+ "app-misc/A-1" : {
+ "EAPI": "5",
+ "DEPEND": "app-misc/B:1/1= app-misc/C",
+ "RDEPEND": "app-misc/B:1/1= app-misc/C",
+ },
+
+ "app-misc/B-1" : {
+ "EAPI": "5",
+ "SLOT": "1"
+ },
+
+ "app-misc/C-1": {
+ "EAPI": "5",
+ "DEPEND": "app-misc/B:1/1=",
+ "RDEPEND": "app-misc/B:1/1="
+ },
+ }
+
+ test_cases = (
+ ResolverPlaygroundTestCase(
+ ["app-misc/A"],
+ success = True,
+ mergelist = ['app-misc/B-2', 'app-misc/C-1', 'app-misc/A-2']),
+ )
+
+ world = []
+
+ playground = ResolverPlayground(ebuilds=ebuilds,
+ installed=installed, world=world, debug=False)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
+
+
+ def testSlotConflictDepChange(self):
+ """
+ Bug 490362
+ The dependency in the ebuild was changed form slot operator to
+ no slot operator. The vdb contained the slot operator and emerge
+ would refuse to rebuild.
+ """
+ ebuilds = {
+ "app-misc/A-1" : {
+ "EAPI": "5",
+ "DEPEND": "app-misc/B",
+ "RDEPEND": "app-misc/B"
+ },
+
+ "app-misc/B-1" : {
+ "EAPI": "5",
+ "SLOT": "0/1"
+ },
+
+ "app-misc/B-2" : {
+ "EAPI": "5",
+ "SLOT": "0/2"
+ },
+ }
+
+ installed = {
+ "app-misc/A-1" : {
+ "EAPI": "5",
+ "DEPEND": "app-misc/B:0/1=",
+ "RDEPEND": "app-misc/B:0/1="
+ },
+ "app-misc/B-1" : {
+ "EAPI": "5",
+ "SLOT": "0/1"
+ },
+ }
+
+ test_cases = (
+ ResolverPlaygroundTestCase(
+ ["app-misc/B"],
+ success = True,
+ mergelist = ['app-misc/B-2', 'app-misc/A-1']),
+ )
+
+ world = ["app-misc/A"]
+
+ playground = ResolverPlayground(ebuilds=ebuilds,
+ installed=installed, world=world, debug=False)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
+
+
+ def testSlotConflictMixedDependencies(self):
+ """
+ Bug 487198
+ For parents with mixed >= and < dependencies, we scheduled rebuilds for the
+ >= atom, but in the end didn't install the child update because of the < atom.
+ """
+ ebuilds = {
+ "cat/slotted-lib-1" : {
+ "EAPI": "5",
+ "SLOT": "1"
+ },
+ "cat/slotted-lib-2" : {
+ "EAPI": "5",
+ "SLOT": "2"
+ },
+ "cat/slotted-lib-3" : {
+ "EAPI": "5",
+ "SLOT": "3"
+ },
+ "cat/slotted-lib-4" : {
+ "EAPI": "5",
+ "SLOT": "4"
+ },
+ "cat/slotted-lib-5" : {
+ "EAPI": "5",
+ "SLOT": "5"
+ },
+ "cat/user-1" : {
+ "EAPI": "5",
+ "DEPEND": ">=cat/slotted-lib-2:= <cat/slotted-lib-4:=",
+ "RDEPEND": ">=cat/slotted-lib-2:= <cat/slotted-lib-4:=",
+ },
+ }
+
+ installed = {
+ "cat/slotted-lib-3" : {
+ "EAPI": "5",
+ "SLOT": "3"
+ },
+ "cat/user-1" : {
+ "EAPI": "5",
+ "DEPEND": ">=cat/slotted-lib-2:3/3= <cat/slotted-lib-4:3/3=",
+ "RDEPEND": ">=cat/slotted-lib-2:3/3= <cat/slotted-lib-4:3/3=",
+ },
+ }
+
+ test_cases = (
+ ResolverPlaygroundTestCase(
+ ["cat/user"],
+ options = {"--deep": True, "--update": True},
+ success = True,
+ mergelist = []),
+ )
+
+ world = []
+
+ playground = ResolverPlayground(ebuilds=ebuilds,
+ installed=installed, world=world, debug=False)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
+
+
+ def testSlotConflictMultiRepo(self):
+ """
+ Bug 497238
+ Different repositories contain the same cpv with different sub-slots for
+ a slot operator child.
+ Downgrading the slot operator parent would result in a sub-slot change of
+ the installed package by changing the source repository.
+ Make sure we don't perform this undesirable rebuild.
+ """
+ ebuilds = {
+ "net-firewall/iptables-1.4.21::overlay" : { "EAPI": "5", "SLOT": "0/10" },
+ "sys-apps/iproute2-3.11.0::overlay" : { "EAPI": "5", "RDEPEND": "net-firewall/iptables:=" },
+
+ "net-firewall/iptables-1.4.21" : { "EAPI": "5", "SLOT": "0" },
+ "sys-apps/iproute2-3.12.0": { "EAPI": "5", "RDEPEND": "net-firewall/iptables:=" },
+ }
+
+ installed = {
+ "net-firewall/iptables-1.4.21::overlay" : { "EAPI": "5", "SLOT": "0/10" },
+ "sys-apps/iproute2-3.12.0": { "EAPI": "5", "RDEPEND": "net-firewall/iptables:0/10=" },
+ }
+
+ world = ["sys-apps/iproute2"]
+
+ test_cases = (
+ ResolverPlaygroundTestCase(
+ ["@world"],
+ options = {"--deep": True, "--update": True, "--verbose": True},
+ success = True,
+ mergelist = []),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds,
+ installed=installed, world=world, debug=False)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
diff --git a/pym/portage/tests/resolver/test_slot_conflict_update.py b/pym/portage/tests/resolver/test_slot_conflict_update.py
new file mode 100644
index 000000000..331e5788b
--- /dev/null
+++ b/pym/portage/tests/resolver/test_slot_conflict_update.py
@@ -0,0 +1,98 @@
+# Copyright 2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import (ResolverPlayground,
+ ResolverPlaygroundTestCase)
+
+class SlotConflictUpdateTestCase(TestCase):
+
+ def testSlotConflictUpdate(self):
+
+ ebuilds = {
+
+ "app-text/podofo-0.9.2" : {
+ "EAPI": "5",
+ "RDEPEND" : "dev-util/boost-build"
+ },
+
+ "dev-cpp/libcmis-0.3.1" : {
+ "EAPI": "5",
+ "RDEPEND" : "dev-libs/boost:="
+ },
+
+ "dev-libs/boost-1.53.0" : {
+ "EAPI": "5",
+ "SLOT": "0/1.53",
+ "RDEPEND" : "=dev-util/boost-build-1.53.0"
+ },
+
+ "dev-libs/boost-1.52.0" : {
+ "EAPI": "5",
+ "SLOT": "0/1.52",
+ "RDEPEND" : "=dev-util/boost-build-1.52.0"
+ },
+
+ "dev-util/boost-build-1.53.0" : {
+ "EAPI": "5",
+ "SLOT": "0"
+ },
+
+ "dev-util/boost-build-1.52.0" : {
+ "EAPI": "5",
+ "SLOT": "0"
+ },
+
+
+ }
+
+ installed = {
+
+ "app-text/podofo-0.9.2" : {
+ "EAPI": "5",
+ "RDEPEND" : "dev-util/boost-build"
+ },
+
+ "dev-cpp/libcmis-0.3.1" : {
+ "EAPI": "5",
+ "RDEPEND" : "dev-libs/boost:0/1.52="
+ },
+
+ "dev-util/boost-build-1.52.0" : {
+ "EAPI": "5",
+ "SLOT": "0"
+ },
+
+ "dev-libs/boost-1.52.0" : {
+ "EAPI": "5",
+ "SLOT": "0/1.52",
+ "RDEPEND" : "=dev-util/boost-build-1.52.0"
+ }
+
+ }
+
+ world = ["dev-cpp/libcmis", "dev-libs/boost", "app-text/podofo"]
+
+ test_cases = (
+
+ # In order to avoid a missed update, first mask lower
+ # versions that conflict with higher versions. Note that
+ # this behavior makes SlotConflictMaskUpdateTestCase
+ # fail.
+ ResolverPlaygroundTestCase(
+ world,
+ all_permutations = True,
+ options = {"--update": True, "--deep": True},
+ success = True,
+ mergelist = ['dev-util/boost-build-1.53.0', 'dev-libs/boost-1.53.0', 'dev-cpp/libcmis-0.3.1']),
+
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds,
+ installed=installed, world=world, debug=False)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
diff --git a/pym/portage/tests/resolver/test_slot_operator_autounmask.py b/pym/portage/tests/resolver/test_slot_operator_autounmask.py
new file mode 100644
index 000000000..624271b39
--- /dev/null
+++ b/pym/portage/tests/resolver/test_slot_operator_autounmask.py
@@ -0,0 +1,120 @@
+# Copyright 2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import (ResolverPlayground,
+ ResolverPlaygroundTestCase)
+
+class SlotOperatorAutoUnmaskTestCase(TestCase):
+
+ def __init__(self, *args, **kwargs):
+ super(SlotOperatorAutoUnmaskTestCase, self).__init__(*args, **kwargs)
+
+ def testSubSlot(self):
+ ebuilds = {
+ "dev-libs/icu-49" : {
+ "EAPI": "4-slot-abi",
+ "SLOT": "0/49"
+ },
+ "dev-libs/icu-4.8" : {
+ "EAPI": "4-slot-abi",
+ "SLOT": "0/48"
+ },
+ "dev-libs/libxml2-2.7.8" : {
+ "EAPI": "4-slot-abi",
+ "DEPEND": "dev-libs/icu:=",
+ "RDEPEND": "dev-libs/icu:=",
+ "KEYWORDS": "~x86"
+ },
+ }
+ binpkgs = {
+ "dev-libs/icu-49" : {
+ "EAPI": "4-slot-abi",
+ "SLOT": "0/49"
+ },
+ "dev-libs/icu-4.8" : {
+ "EAPI": "4-slot-abi",
+ "SLOT": "0/48"
+ },
+ "dev-libs/libxml2-2.7.8" : {
+ "EAPI": "4-slot-abi",
+ "DEPEND": "dev-libs/icu:0/48=",
+ "RDEPEND": "dev-libs/icu:0/48="
+ },
+ }
+ installed = {
+ "dev-libs/icu-4.8" : {
+ "EAPI": "4-slot-abi",
+ "SLOT": "0/48"
+ },
+ "dev-libs/libxml2-2.7.8" : {
+ "EAPI": "4-slot-abi",
+ "DEPEND": "dev-libs/icu:0/48=",
+ "RDEPEND": "dev-libs/icu:0/48="
+ },
+ }
+
+ world = ["dev-libs/libxml2"]
+
+ test_cases = (
+
+ ResolverPlaygroundTestCase(
+ ["dev-libs/icu"],
+ options = {"--autounmask": True, "--oneshot": True},
+ success = False,
+ mergelist = ["dev-libs/icu-49", "dev-libs/libxml2-2.7.8" ],
+ unstable_keywords = ['dev-libs/libxml2-2.7.8']),
+
+ ResolverPlaygroundTestCase(
+ ["dev-libs/icu"],
+ options = {"--oneshot": True, "--ignore-built-slot-operator-deps": "y"},
+ success = True,
+ mergelist = ["dev-libs/icu-49"]),
+
+ ResolverPlaygroundTestCase(
+ ["dev-libs/icu"],
+ options = {"--oneshot": True, "--usepkg": True},
+ success = False,
+ mergelist = ["[binary]dev-libs/icu-49", "dev-libs/libxml2-2.7.8" ],
+ unstable_keywords = ['dev-libs/libxml2-2.7.8']),
+
+ ResolverPlaygroundTestCase(
+ ["dev-libs/icu"],
+ options = {"--oneshot": True, "--usepkgonly": True},
+ success = True,
+ mergelist = ["[binary]dev-libs/icu-4.8"]),
+
+ ResolverPlaygroundTestCase(
+ ["dev-libs/icu"],
+ options = {"--oneshot": True, "--usepkgonly": True, "--ignore-built-slot-operator-deps": "y"},
+ success = True,
+ mergelist = ["[binary]dev-libs/icu-49"]),
+
+ ResolverPlaygroundTestCase(
+ ["@world"],
+ options = {"--update": True, "--deep": True, "--ignore-built-slot-operator-deps": "y"},
+ success = True,
+ mergelist = ["dev-libs/icu-49"]),
+
+ ResolverPlaygroundTestCase(
+ ["@world"],
+ options = {"--update": True, "--deep": True, "--usepkgonly": True},
+ success = True,
+ mergelist = []),
+
+ ResolverPlaygroundTestCase(
+ ["@world"],
+ options = {"--update": True, "--deep": True, "--usepkgonly": True, "--ignore-built-slot-operator-deps": "y"},
+ success = True,
+ mergelist = ["[binary]dev-libs/icu-49"]),
+
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds, binpkgs=binpkgs,
+ installed=installed, world=world, debug=False)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
diff --git a/pym/portage/tests/resolver/test_slot_operator_unsatisfied.py b/pym/portage/tests/resolver/test_slot_operator_unsatisfied.py
new file mode 100644
index 000000000..e3b53d159
--- /dev/null
+++ b/pym/portage/tests/resolver/test_slot_operator_unsatisfied.py
@@ -0,0 +1,70 @@
+# Copyright 2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import (ResolverPlayground,
+ ResolverPlaygroundTestCase)
+
+class SlotOperatorUnsatisfiedTestCase(TestCase):
+
+ def testSlotOperatorUnsatisfied(self):
+
+ ebuilds = {
+ "app-misc/A-1" : {
+ "EAPI": "5",
+ "SLOT": "0/1"
+ },
+ "app-misc/A-2" : {
+ "EAPI": "5",
+ "SLOT": "0/2"
+ },
+ "app-misc/B-0" : {
+ "EAPI": "5",
+ "DEPEND": "app-misc/A:=",
+ "RDEPEND": "app-misc/A:="
+ },
+ }
+
+ installed = {
+ "app-misc/A-2" : {
+ "EAPI": "5",
+ "SLOT": "0/2"
+ },
+
+ "app-misc/B-0" : {
+ "EAPI": "5",
+ "DEPEND": "app-misc/A:0/1=",
+ "RDEPEND": "app-misc/A:0/1="
+ },
+ }
+
+ world = ["app-misc/B"]
+
+ test_cases = (
+
+ # Demonstrate bug #439694, where a broken slot-operator
+ # sub-slot dependency needs to trigger a rebuild.
+ ResolverPlaygroundTestCase(
+ ["@world"],
+ options = {"--update": True, "--deep": True},
+ success = True,
+ mergelist = ["app-misc/B-0"]),
+
+ # This doesn't trigger a rebuild, since there's no version
+ # change to trigger complete graph mode, and initially
+ # unsatisfied deps are ignored in complete graph mode anyway.
+ ResolverPlaygroundTestCase(
+ ["app-misc/A"],
+ options = {"--oneshot": True},
+ success = True,
+ mergelist = ["app-misc/A-2"]),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds,
+ installed=installed, world=world, debug=False)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
diff --git a/pym/portage/tests/resolver/test_slot_operator_unsolved.py b/pym/portage/tests/resolver/test_slot_operator_unsolved.py
new file mode 100644
index 000000000..c19783ddf
--- /dev/null
+++ b/pym/portage/tests/resolver/test_slot_operator_unsolved.py
@@ -0,0 +1,88 @@
+# Copyright 2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import (ResolverPlayground,
+ ResolverPlaygroundTestCase)
+
+class SlotOperatorUnsolvedTestCase(TestCase):
+ """
+ Demonstrate bug #456340, where an unsolved circular dependency
+ interacts with an unsatisfied built slot-operator dep.
+ """
+ def __init__(self, *args, **kwargs):
+ super(SlotOperatorUnsolvedTestCase, self).__init__(*args, **kwargs)
+
+ def testSlotOperatorUnsolved(self):
+ ebuilds = {
+ "dev-libs/icu-50.1.2" : {
+ "EAPI": "5",
+ "SLOT": "0/50.1.2"
+ },
+ "net-libs/webkit-gtk-1.10.2-r300" : {
+ "EAPI": "5",
+ "DEPEND": ">=dev-libs/icu-3.8.1-r1:=",
+ "RDEPEND": ">=dev-libs/icu-3.8.1-r1:="
+ },
+ "dev-ruby/rdoc-3.12.1" : {
+ "EAPI": "5",
+ "IUSE": "test",
+ "DEPEND": "test? ( >=dev-ruby/hoe-2.7.0 )",
+ },
+ "dev-ruby/hoe-2.13.0" : {
+ "EAPI": "5",
+ "IUSE": "test",
+ "DEPEND": "test? ( >=dev-ruby/rdoc-3.10 )",
+ "RDEPEND": "test? ( >=dev-ruby/rdoc-3.10 )",
+ },
+ }
+
+ binpkgs = {
+ "net-libs/webkit-gtk-1.10.2-r300" : {
+ "EAPI": "5",
+ "DEPEND": ">=dev-libs/icu-3.8.1-r1:0/50=",
+ "RDEPEND": ">=dev-libs/icu-3.8.1-r1:0/50="
+ },
+ }
+
+ installed = {
+ "dev-libs/icu-50.1.2" : {
+ "EAPI": "5",
+ "SLOT": "0/50.1.2"
+ },
+ "net-libs/webkit-gtk-1.10.2-r300" : {
+ "EAPI": "5",
+ "DEPEND": ">=dev-libs/icu-3.8.1-r1:0/50=",
+ "RDEPEND": ">=dev-libs/icu-3.8.1-r1:0/50="
+ },
+ }
+
+ user_config = {
+ "make.conf" : ("FEATURES=test",)
+ }
+
+ world = ["net-libs/webkit-gtk", "dev-ruby/hoe"]
+
+ test_cases = (
+
+ ResolverPlaygroundTestCase(
+ ["@world"],
+ options = {"--update": True, "--deep": True, "--usepkg": True},
+ circular_dependency_solutions = {
+ 'dev-ruby/hoe-2.13.0': frozenset([frozenset([('test', False)])]),
+ 'dev-ruby/rdoc-3.12.1': frozenset([frozenset([('test', False)])])
+ },
+ success = False
+ ),
+
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds, binpkgs=binpkgs,
+ installed=installed, user_config=user_config,
+ world=world, debug=False)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
diff --git a/pym/portage/tests/resolver/test_targetroot.py b/pym/portage/tests/resolver/test_targetroot.py
new file mode 100644
index 000000000..db6c60de3
--- /dev/null
+++ b/pym/portage/tests/resolver/test_targetroot.py
@@ -0,0 +1,85 @@
+# Copyright 2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import ResolverPlayground, ResolverPlaygroundTestCase
+
+class TargetRootTestCase(TestCase):
+
+ def testTargetRoot(self):
+ ebuilds = {
+ "dev-lang/python-3.2": {
+ "EAPI": "5-hdepend",
+ "IUSE": "targetroot",
+ "HDEPEND": "targetroot? ( ~dev-lang/python-3.2 )",
+ },
+ "dev-libs/A-1": {
+ "EAPI": "4",
+ "DEPEND": "dev-libs/B",
+ "RDEPEND": "dev-libs/C",
+ },
+ "dev-libs/B-1": {},
+ "dev-libs/C-1": {},
+ }
+
+ test_cases = (
+ ResolverPlaygroundTestCase(
+ ["dev-lang/python"],
+ options = {},
+ success = True,
+ mergelist = ["dev-lang/python-3.2", "dev-lang/python-3.2{targetroot}"]),
+ ResolverPlaygroundTestCase(
+ ["dev-lang/python"],
+ options = {"--root-deps": True},
+ success = True,
+ mergelist = ["dev-lang/python-3.2", "dev-lang/python-3.2{targetroot}"]),
+ ResolverPlaygroundTestCase(
+ ["dev-lang/python"],
+ options = {"--root-deps": "rdeps"},
+ success = True,
+ mergelist = ["dev-lang/python-3.2", "dev-lang/python-3.2{targetroot}"]),
+ ResolverPlaygroundTestCase(
+ ["dev-libs/A"],
+ options = {},
+ ambiguous_merge_order = True,
+ success = True,
+ mergelist = [("dev-libs/B-1", "dev-libs/C-1{targetroot}"), "dev-libs/A-1{targetroot}"]),
+ ResolverPlaygroundTestCase(
+ ["dev-libs/A"],
+ options = {"--root-deps": True},
+ ambiguous_merge_order = True,
+ success = True,
+ mergelist = [("dev-libs/B-1{targetroot}", "dev-libs/C-1{targetroot}"), "dev-libs/A-1{targetroot}"]),
+ ResolverPlaygroundTestCase(
+ ["dev-libs/A"],
+ options = {"--root-deps": "rdeps"},
+ ambiguous_merge_order = True,
+ success = True,
+ mergelist = [("dev-libs/C-1{targetroot}"), "dev-libs/A-1{targetroot}"]),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds, targetroot=True,
+ debug=False)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
+
+ test_cases = (
+ ResolverPlaygroundTestCase(
+ ["dev-lang/python"],
+ options = {},
+ success = True,
+ mergelist = ["dev-lang/python-3.2"]),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds, targetroot=False,
+ debug=False)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
diff --git a/pym/portage/tests/resolver/test_unpack_dependencies.py b/pym/portage/tests/resolver/test_unpack_dependencies.py
new file mode 100644
index 000000000..cfceff4b1
--- /dev/null
+++ b/pym/portage/tests/resolver/test_unpack_dependencies.py
@@ -0,0 +1,65 @@
+# Copyright 2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import ResolverPlayground, ResolverPlaygroundTestCase
+
+class UnpackDependenciesTestCase(TestCase):
+ def testUnpackDependencies(self):
+ distfiles = {
+ "A-1.tar.gz": b"binary\0content",
+ "B-1.TAR.XZ": b"binary\0content",
+ "B-docs-1.tar.bz2": b"binary\0content",
+ "C-1.TAR.XZ": b"binary\0content",
+ "C-docs-1.tar.bz2": b"binary\0content",
+ }
+
+ ebuilds = {
+ "dev-libs/A-1": {"SRC_URI": "A-1.tar.gz", "EAPI": "5-progress"},
+ "dev-libs/B-1": {"IUSE": "doc", "SRC_URI": "B-1.TAR.XZ doc? ( B-docs-1.tar.bz2 )", "EAPI": "5-progress"},
+ "dev-libs/C-1": {"IUSE": "doc", "SRC_URI": "C-1.TAR.XZ doc? ( C-docs-1.tar.bz2 )", "EAPI": "5-progress"},
+ "app-arch/bzip2-1": {},
+ "app-arch/gzip-1": {},
+ "app-arch/tar-1": {},
+ "app-arch/xz-utils-1": {},
+ }
+
+ repo_configs = {
+ "test_repo": {
+ "unpack_dependencies/5-progress": (
+ "tar.bz2 app-arch/tar app-arch/bzip2",
+ "tar.gz app-arch/tar app-arch/gzip",
+ "tar.xz app-arch/tar app-arch/xz-utils",
+ ),
+ },
+ }
+
+ test_cases = (
+ ResolverPlaygroundTestCase(
+ ["dev-libs/A"],
+ success = True,
+ ignore_mergelist_order = True,
+ mergelist = ["app-arch/tar-1", "app-arch/gzip-1", "dev-libs/A-1"]),
+ ResolverPlaygroundTestCase(
+ ["dev-libs/B"],
+ success = True,
+ ignore_mergelist_order = True,
+ mergelist = ["app-arch/tar-1", "app-arch/xz-utils-1", "dev-libs/B-1"]),
+ ResolverPlaygroundTestCase(
+ ["dev-libs/C"],
+ success = True,
+ ignore_mergelist_order = True,
+ mergelist = ["app-arch/tar-1", "app-arch/xz-utils-1", "app-arch/bzip2-1", "dev-libs/C-1"]),
+ )
+
+ user_config = {
+ "package.use": ("dev-libs/C doc",)
+ }
+
+ playground = ResolverPlayground(distfiles=distfiles, ebuilds=ebuilds, repo_configs=repo_configs, user_config=user_config)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
diff --git a/pym/portage/tests/resolver/test_use_aliases.py b/pym/portage/tests/resolver/test_use_aliases.py
new file mode 100644
index 000000000..7c2debbb1
--- /dev/null
+++ b/pym/portage/tests/resolver/test_use_aliases.py
@@ -0,0 +1,131 @@
+# Copyright 2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import ResolverPlayground, ResolverPlaygroundTestCase
+
+class UseAliasesTestCase(TestCase):
+ def testUseAliases(self):
+ ebuilds = {
+ "dev-libs/A-1": {"DEPEND": "dev-libs/K[x]", "RDEPEND": "dev-libs/K[x]", "EAPI": "5"},
+ "dev-libs/B-1": {"DEPEND": "dev-libs/L[x]", "RDEPEND": "dev-libs/L[x]", "EAPI": "5"},
+ "dev-libs/C-1": {"DEPEND": "dev-libs/M[xx]", "RDEPEND": "dev-libs/M[xx]", "EAPI": "5"},
+ "dev-libs/D-1": {"DEPEND": "dev-libs/N[-x]", "RDEPEND": "dev-libs/N[-x]", "EAPI": "5"},
+ "dev-libs/E-1": {"DEPEND": "dev-libs/O[-xx]", "RDEPEND": "dev-libs/O[-xx]", "EAPI": "5"},
+ "dev-libs/F-1": {"DEPEND": "dev-libs/P[-xx]", "RDEPEND": "dev-libs/P[-xx]", "EAPI": "5"},
+ "dev-libs/G-1": {"DEPEND": "dev-libs/Q[x-y]", "RDEPEND": "dev-libs/Q[x-y]", "EAPI": "5"},
+ "dev-libs/H-1": {"DEPEND": "=dev-libs/R-1*[yy]", "RDEPEND": "=dev-libs/R-1*[yy]", "EAPI": "5"},
+ "dev-libs/H-2": {"DEPEND": "=dev-libs/R-2*[yy]", "RDEPEND": "=dev-libs/R-2*[yy]", "EAPI": "5"},
+ "dev-libs/I-1": {"DEPEND": "dev-libs/S[y-z]", "RDEPEND": "dev-libs/S[y-z]", "EAPI": "5"},
+ "dev-libs/I-2": {"DEPEND": "dev-libs/S[y_z]", "RDEPEND": "dev-libs/S[y_z]", "EAPI": "5"},
+ "dev-libs/J-1": {"DEPEND": "dev-libs/T[x]", "RDEPEND": "dev-libs/T[x]", "EAPI": "5"},
+ "dev-libs/K-1": {"IUSE": "+x", "EAPI": "5"},
+ "dev-libs/K-2::repo1": {"IUSE": "+X", "EAPI": "5-progress"},
+ "dev-libs/L-1": {"IUSE": "+x", "EAPI": "5"},
+ "dev-libs/M-1::repo1": {"IUSE": "X", "EAPI": "5-progress"},
+ "dev-libs/N-1": {"IUSE": "x", "EAPI": "5"},
+ "dev-libs/N-2::repo1": {"IUSE": "X", "EAPI": "5-progress"},
+ "dev-libs/O-1": {"IUSE": "x", "EAPI": "5"},
+ "dev-libs/P-1::repo1": {"IUSE": "+X", "EAPI": "5-progress"},
+ "dev-libs/Q-1::repo2": {"IUSE": "X.Y", "EAPI": "5-progress"},
+ "dev-libs/R-1::repo1": {"IUSE": "Y", "EAPI": "5-progress"},
+ "dev-libs/R-2::repo1": {"IUSE": "y", "EAPI": "5-progress"},
+ "dev-libs/S-1::repo2": {"IUSE": "Y.Z", "EAPI": "5-progress"},
+ "dev-libs/S-2::repo2": {"IUSE": "Y.Z", "EAPI": "5-progress"},
+ "dev-libs/T-1::repo1": {"IUSE": "+X", "EAPI": "5"},
+ }
+
+ installed = {
+ "dev-libs/L-2::repo1": {"IUSE": "+X", "USE": "X", "EAPI": "5-progress"},
+ "dev-libs/O-2::repo1": {"IUSE": "X", "USE": "", "EAPI": "5-progress"},
+ }
+
+ repo_configs = {
+ "repo1": {
+ "use.aliases": ("X x xx",),
+ "package.use.aliases": (
+ "=dev-libs/R-1* Y yy",
+ "=dev-libs/R-2* y yy",
+ )
+ },
+ "repo2": {
+ "eapi": ("5-progress",),
+ "use.aliases": ("X.Y x-y",),
+ "package.use.aliases": (
+ "=dev-libs/S-1* Y.Z y-z",
+ "=dev-libs/S-2* Y.Z y_z",
+ ),
+ },
+ }
+
+ test_cases = (
+ ResolverPlaygroundTestCase(
+ ["dev-libs/A"],
+ success = True,
+ mergelist = ["dev-libs/K-2", "dev-libs/A-1"]),
+ ResolverPlaygroundTestCase(
+ ["dev-libs/B"],
+ success = True,
+ mergelist = ["dev-libs/B-1"]),
+ ResolverPlaygroundTestCase(
+ ["dev-libs/C"],
+ options = {"--autounmask": True},
+ success = False,
+ mergelist = ["dev-libs/M-1", "dev-libs/C-1"],
+ use_changes = {"dev-libs/M-1": {"X": True}}),
+ ResolverPlaygroundTestCase(
+ ["dev-libs/D"],
+ success = True,
+ mergelist = ["dev-libs/N-2", "dev-libs/D-1"]),
+ ResolverPlaygroundTestCase(
+ ["dev-libs/E"],
+ success = True,
+ mergelist = ["dev-libs/E-1"]),
+ ResolverPlaygroundTestCase(
+ ["dev-libs/F"],
+ options = {"--autounmask": True},
+ success = False,
+ mergelist = ["dev-libs/P-1", "dev-libs/F-1"],
+ use_changes = {"dev-libs/P-1": {"X": False}}),
+ ResolverPlaygroundTestCase(
+ ["dev-libs/G"],
+ options = {"--autounmask": True},
+ success = False,
+ mergelist = ["dev-libs/Q-1", "dev-libs/G-1"],
+ use_changes = {"dev-libs/Q-1": {"X.Y": True}}),
+ ResolverPlaygroundTestCase(
+ ["=dev-libs/H-1*"],
+ options = {"--autounmask": True},
+ success = False,
+ mergelist = ["dev-libs/R-1", "dev-libs/H-1"],
+ use_changes = {"dev-libs/R-1": {"Y": True}}),
+ ResolverPlaygroundTestCase(
+ ["=dev-libs/H-2*"],
+ options = {"--autounmask": True},
+ success = False,
+ mergelist = ["dev-libs/R-2", "dev-libs/H-2"],
+ use_changes = {"dev-libs/R-2": {"y": True}}),
+ ResolverPlaygroundTestCase(
+ ["=dev-libs/I-1*"],
+ options = {"--autounmask": True},
+ success = False,
+ mergelist = ["dev-libs/S-1", "dev-libs/I-1"],
+ use_changes = {"dev-libs/S-1": {"Y.Z": True}}),
+ ResolverPlaygroundTestCase(
+ ["=dev-libs/I-2*"],
+ options = {"--autounmask": True},
+ success = False,
+ mergelist = ["dev-libs/S-2", "dev-libs/I-2"],
+ use_changes = {"dev-libs/S-2": {"Y.Z": True}}),
+ ResolverPlaygroundTestCase(
+ ["dev-libs/J"],
+ success = False),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds, installed=installed, repo_configs=repo_configs)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
diff --git a/pym/portage/tests/resolver/test_useflags.py b/pym/portage/tests/resolver/test_useflags.py
new file mode 100644
index 000000000..0a5f3b3ff
--- /dev/null
+++ b/pym/portage/tests/resolver/test_useflags.py
@@ -0,0 +1,78 @@
+# Copyright 2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import ResolverPlayground, ResolverPlaygroundTestCase
+
+class UseFlagsTestCase(TestCase):
+
+ def testUseFlags(self):
+ ebuilds = {
+ "dev-libs/A-1": { "IUSE": "X", },
+ "dev-libs/B-1": { "IUSE": "X Y", },
+ }
+
+ installed = {
+ "dev-libs/A-1": { "IUSE": "X", },
+ "dev-libs/B-1": { "IUSE": "X", },
+ }
+
+ binpkgs = installed
+
+ user_config = {
+ "package.use": ( "dev-libs/A X", ),
+ "use.force": ( "Y", ),
+ }
+
+ test_cases = (
+ #default: don't reinstall on use flag change
+ ResolverPlaygroundTestCase(
+ ["dev-libs/A"],
+ options = {"--selective": True, "--usepkg": True},
+ success = True,
+ mergelist = []),
+
+ #default: respect use flags for binpkgs
+ ResolverPlaygroundTestCase(
+ ["dev-libs/A"],
+ options = {"--usepkg": True},
+ success = True,
+ mergelist = ["dev-libs/A-1"]),
+
+ #--binpkg-respect-use=n: use binpkgs with different use flags
+ ResolverPlaygroundTestCase(
+ ["dev-libs/A"],
+ options = {"--binpkg-respect-use": "n", "--usepkg": True},
+ success = True,
+ mergelist = ["[binary]dev-libs/A-1"]),
+
+ #--reinstall=changed-use: reinstall if use flag changed
+ ResolverPlaygroundTestCase(
+ ["dev-libs/A"],
+ options = {"--reinstall": "changed-use", "--usepkg": True},
+ success = True,
+ mergelist = ["dev-libs/A-1"]),
+
+ #--reinstall=changed-use: don't reinstall on new use flag
+ ResolverPlaygroundTestCase(
+ ["dev-libs/B"],
+ options = {"--reinstall": "changed-use", "--usepkg": True},
+ success = True,
+ mergelist = []),
+
+ #--newuse: reinstall on new use flag
+ ResolverPlaygroundTestCase(
+ ["dev-libs/B"],
+ options = {"--newuse": True, "--usepkg": True},
+ success = True,
+ mergelist = ["dev-libs/B-1"]),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds,
+ binpkgs=binpkgs, installed=installed, user_config=user_config)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
diff --git a/pym/portage/tests/runTests b/pym/portage/tests/runTests
index 1c1008dff..9c452764f 100755
--- a/pym/portage/tests/runTests
+++ b/pym/portage/tests/runTests
@@ -1,6 +1,6 @@
-#!/usr/bin/python -Wd
+#!/usr/bin/python -bWd
# runTests.py -- Portage Unit Test Functionality
-# Copyright 2006-2012 Gentoo Foundation
+# Copyright 2006-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
import os, sys
@@ -29,9 +29,10 @@ os.environ["PORTAGE_GRPNAME"] = grp.getgrgid(os.getgid()).gr_name
# Insert our parent dir so we can do shiny import "tests"
# This line courtesy of Marienz and Pkgcore ;)
-sys.path.insert(0, osp.dirname(osp.dirname(osp.dirname(osp.abspath(__file__)))))
+sys.path.insert(0, osp.dirname(osp.dirname(osp.dirname(osp.realpath(__file__)))))
import portage
+portage._internal_caller = True
# Ensure that we don't instantiate portage.settings, so that tests should
# work the same regardless of global configuration file state/existence.
@@ -44,11 +45,17 @@ import portage.tests as tests
from portage.const import PORTAGE_BIN_PATH
path = os.environ.get("PATH", "").split(":")
path = [x for x in path if x]
-if not path or not os.path.samefile(path[0], PORTAGE_BIN_PATH):
+
+insert_bin_path = True
+try:
+ insert_bin_path = not path or \
+ not os.path.samefile(path[0], PORTAGE_BIN_PATH)
+except OSError:
+ pass
+
+if insert_bin_path:
path.insert(0, PORTAGE_BIN_PATH)
os.environ["PATH"] = ":".join(path)
-del path
-
if __name__ == "__main__":
sys.exit(tests.main())
diff --git a/pym/portage/tests/unicode/test_string_format.py b/pym/portage/tests/unicode/test_string_format.py
index fb6e8e02e..9d4366a91 100644
--- a/pym/portage/tests/unicode/test_string_format.py
+++ b/pym/portage/tests/unicode/test_string_format.py
@@ -1,15 +1,18 @@
-# Copyright 2010 Gentoo Foundation
+# Copyright 2010-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
+from __future__ import unicode_literals
+
import sys
-from portage import _encodings, _unicode_decode
+from portage import _encodings, _unicode_encode
from portage.exception import PortageException
from portage.tests import TestCase
from _emerge.DependencyArg import DependencyArg
from _emerge.UseFlagDisplay import UseFlagDisplay
if sys.hexversion >= 0x3000000:
+ # pylint: disable=W0622
basestring = str
STR_IS_UNICODE = sys.hexversion >= 0x3000000
@@ -20,27 +23,25 @@ class StringFormatTestCase(TestCase):
which may be either python2 or python3.
"""
- # In order to get some unicode test strings in a way that works in
- # both python2 and python3, write them here as byte strings and
- # decode them before use. This assumes _encodings['content'] is
- # utf_8.
+ # We need unicode_literals in order to get some unicode test strings
+ # in a way that works in both python2 and python3.
unicode_strings = (
- b'\xE2\x80\x98',
- b'\xE2\x80\x99',
+ '\u2018',
+ '\u2019',
)
def testDependencyArg(self):
self.assertEqual(_encodings['content'], 'utf_8')
- for arg_bytes in self.unicode_strings:
- arg_unicode = _unicode_decode(arg_bytes, encoding=_encodings['content'])
+ for arg_unicode in self.unicode_strings:
+ arg_bytes = _unicode_encode(arg_unicode, encoding=_encodings['content'])
dependency_arg = DependencyArg(arg=arg_unicode)
- # Force unicode format string so that __unicode__() is
- # called in python2.
- formatted_str = _unicode_decode("%s") % (dependency_arg,)
+ # Use unicode_literals for unicode format string so that
+ # __unicode__() is called in Python 2.
+ formatted_str = "%s" % (dependency_arg,)
self.assertEqual(formatted_str, arg_unicode)
if STR_IS_UNICODE:
@@ -52,20 +53,20 @@ class StringFormatTestCase(TestCase):
else:
# Test the __str__ method which returns encoded bytes in python2
- formatted_bytes = "%s" % (dependency_arg,)
+ formatted_bytes = b"%s" % (dependency_arg,)
self.assertEqual(formatted_bytes, arg_bytes)
def testPortageException(self):
self.assertEqual(_encodings['content'], 'utf_8')
- for arg_bytes in self.unicode_strings:
- arg_unicode = _unicode_decode(arg_bytes, encoding=_encodings['content'])
+ for arg_unicode in self.unicode_strings:
+ arg_bytes = _unicode_encode(arg_unicode, encoding=_encodings['content'])
e = PortageException(arg_unicode)
- # Force unicode format string so that __unicode__() is
- # called in python2.
- formatted_str = _unicode_decode("%s") % (e,)
+ # Use unicode_literals for unicode format string so that
+ # __unicode__() is called in Python 2.
+ formatted_str = "%s" % (e,)
self.assertEqual(formatted_str, arg_unicode)
if STR_IS_UNICODE:
@@ -77,7 +78,7 @@ class StringFormatTestCase(TestCase):
else:
# Test the __str__ method which returns encoded bytes in python2
- formatted_bytes = "%s" % (e,)
+ formatted_bytes = b"%s" % (e,)
self.assertEqual(formatted_bytes, arg_bytes)
def testUseFlagDisplay(self):
@@ -86,13 +87,12 @@ class StringFormatTestCase(TestCase):
for enabled in (True, False):
for forced in (True, False):
- for arg_bytes in self.unicode_strings:
- arg_unicode = _unicode_decode(arg_bytes, encoding=_encodings['content'])
+ for arg_unicode in self.unicode_strings:
e = UseFlagDisplay(arg_unicode, enabled, forced)
- # Force unicode format string so that __unicode__() is
- # called in python2.
- formatted_str = _unicode_decode("%s") % (e,)
+ # Use unicode_literals for unicode format string so that
+ # __unicode__() is called in Python 2.
+ formatted_str = "%s" % (e,)
self.assertEqual(isinstance(formatted_str, basestring), True)
if STR_IS_UNICODE:
@@ -104,5 +104,5 @@ class StringFormatTestCase(TestCase):
else:
# Test the __str__ method which returns encoded bytes in python2
- formatted_bytes = "%s" % (e,)
+ formatted_bytes = b"%s" % (e,)
self.assertEqual(isinstance(formatted_bytes, bytes), True)
diff --git a/pym/portage/tests/update/test_move_ent.py b/pym/portage/tests/update/test_move_ent.py
index 2504dee2b..d9647a95e 100644
--- a/pym/portage/tests/update/test_move_ent.py
+++ b/pym/portage/tests/update/test_move_ent.py
@@ -1,4 +1,4 @@
-# Copyright 2012 Gentoo Foundation
+# Copyright 2012-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
import textwrap
@@ -59,12 +59,12 @@ class MoveEntTestCase(TestCase):
settings = playground.settings
trees = playground.trees
eroot = settings["EROOT"]
- portdir = settings["PORTDIR"]
+ test_repo_location = settings.repositories["test_repo"].location
portdb = trees[eroot]["porttree"].dbapi
vardb = trees[eroot]["vartree"].dbapi
bindb = trees[eroot]["bintree"].dbapi
- updates_dir = os.path.join(portdir, "profiles", "updates")
+ updates_dir = os.path.join(test_repo_location, "profiles", "updates")
try:
ensure_dirs(updates_dir)
diff --git a/pym/portage/tests/update/test_move_slot_ent.py b/pym/portage/tests/update/test_move_slot_ent.py
index fcb0cc64c..3e49e1144 100644
--- a/pym/portage/tests/update/test_move_slot_ent.py
+++ b/pym/portage/tests/update/test_move_slot_ent.py
@@ -1,4 +1,4 @@
-# Copyright 2012 Gentoo Foundation
+# Copyright 2012-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
import textwrap
@@ -94,12 +94,12 @@ class MoveSlotEntTestCase(TestCase):
settings = playground.settings
trees = playground.trees
eroot = settings["EROOT"]
- portdir = settings["PORTDIR"]
+ test_repo_location = settings.repositories["test_repo"].location
portdb = trees[eroot]["porttree"].dbapi
vardb = trees[eroot]["vartree"].dbapi
bindb = trees[eroot]["bintree"].dbapi
- updates_dir = os.path.join(portdir, "profiles", "updates")
+ updates_dir = os.path.join(test_repo_location, "profiles", "updates")
try:
ensure_dirs(updates_dir)
diff --git a/pym/portage/tests/update/test_update_dbentry.py b/pym/portage/tests/update/test_update_dbentry.py
index e13cfed74..88951149a 100644
--- a/pym/portage/tests/update/test_update_dbentry.py
+++ b/pym/portage/tests/update/test_update_dbentry.py
@@ -1,4 +1,4 @@
-# Copyright 2012 Gentoo Foundation
+# Copyright 2012-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
import re
@@ -6,14 +6,107 @@ import textwrap
import portage
from portage import os
+from portage.dep import Atom
from portage.tests import TestCase
from portage.tests.resolver.ResolverPlayground import ResolverPlayground
+from portage.update import update_dbentry
from portage.util import ensure_dirs
+from portage.versions import _pkg_str
from portage._global_updates import _do_global_updates
class UpdateDbentryTestCase(TestCase):
def testUpdateDbentryTestCase(self):
+ cases = (
+
+ (("move", Atom("dev-libs/A"), Atom("dev-libs/B")), "1",
+ " dev-libs/A:0 ", " dev-libs/B:0 "),
+
+ (("move", Atom("dev-libs/A"), Atom("dev-libs/B")), "1",
+ " >=dev-libs/A-1:0 ", " >=dev-libs/B-1:0 "),
+
+ (("move", Atom("dev-libs/A"), Atom("dev-libs/B")), "2",
+ " dev-libs/A[foo] ", " dev-libs/B[foo] "),
+
+ (("move", Atom("dev-libs/A"), Atom("dev-libs/B")), "5",
+ " dev-libs/A:0/1=[foo] ", " dev-libs/B:0/1=[foo] "),
+
+ (("move", Atom("dev-libs/A"), Atom("dev-libs/B")), "5",
+ " dev-libs/A:0/1[foo] ", " dev-libs/B:0/1[foo] "),
+
+ (("move", Atom("dev-libs/A"), Atom("dev-libs/B")), "5",
+ " dev-libs/A:0/0[foo] ", " dev-libs/B:0/0[foo] "),
+
+ (("move", Atom("dev-libs/A"), Atom("dev-libs/B")), "5",
+ " dev-libs/A:0=[foo] ", " dev-libs/B:0=[foo] "),
+
+ (("slotmove", Atom("dev-libs/A"), "0", "1"), "1",
+ " dev-libs/A:0 ", " dev-libs/A:1 "),
+
+ (("slotmove", Atom("dev-libs/A"), "0", "1"), "1",
+ " >=dev-libs/A-1:0 ", " >=dev-libs/A-1:1 "),
+
+ (("slotmove", Atom("dev-libs/A"), "0", "1"), "5",
+ " dev-libs/A:0/1=[foo] ", " dev-libs/A:1/1=[foo] "),
+
+ (("slotmove", Atom("dev-libs/A"), "0", "1"), "5",
+ " dev-libs/A:0/1[foo] ", " dev-libs/A:1/1[foo] "),
+
+ (("slotmove", Atom("dev-libs/A"), "0", "1"), "5",
+ " dev-libs/A:0/0[foo] ", " dev-libs/A:1/1[foo] "),
+
+ (("slotmove", Atom("dev-libs/A"), "0", "1"), "5",
+ " dev-libs/A:0=[foo] ", " dev-libs/A:1=[foo] "),
+ )
+ for update_cmd, eapi, input_str, output_str in cases:
+ result = update_dbentry(update_cmd, input_str, eapi=eapi)
+ self.assertEqual(result, output_str)
+
+
+ def testUpdateDbentryBlockerTestCase(self):
+ """
+ Avoid creating self-blockers for bug #367215.
+ """
+ cases = (
+
+ (("move", Atom("dev-libs/A"), Atom("dev-libs/B")),
+ _pkg_str("dev-libs/B-1", eapi="1", slot="0"),
+ " !dev-libs/A ", " !dev-libs/A "),
+
+ (("move", Atom("dev-libs/A"), Atom("dev-libs/B")),
+ _pkg_str("dev-libs/C-1", eapi="1", slot="0"),
+ " !dev-libs/A ", " !dev-libs/B "),
+
+ (("move", Atom("dev-libs/A"), Atom("dev-libs/B")),
+ _pkg_str("dev-libs/B-1", eapi="1", slot="0"),
+ " !dev-libs/A:0 ", " !dev-libs/A:0 "),
+
+ (("move", Atom("dev-libs/A"), Atom("dev-libs/B")),
+ _pkg_str("dev-libs/C-1", eapi="1", slot="0"),
+ " !dev-libs/A:0 ", " !dev-libs/B:0 "),
+
+ (("move", Atom("dev-libs/A"), Atom("dev-libs/B")),
+ _pkg_str("dev-libs/C-1", eapi="1", slot="0"),
+ " !>=dev-libs/A-1:0 ", " !>=dev-libs/B-1:0 "),
+
+ (("move", Atom("dev-libs/A"), Atom("dev-libs/B")),
+ _pkg_str("dev-libs/B-1", eapi="1", slot="0"),
+ " !>=dev-libs/A-1:0 ", " !>=dev-libs/A-1:0 "),
+
+ (("move", Atom("dev-libs/A"), Atom("dev-libs/B")),
+ _pkg_str("dev-libs/C-1", eapi="1", slot="0"),
+ " !>=dev-libs/A-1 ", " !>=dev-libs/B-1 "),
+
+ (("move", Atom("dev-libs/A"), Atom("dev-libs/B")),
+ _pkg_str("dev-libs/B-1", eapi="1", slot="0"),
+ " !>=dev-libs/A-1 ", " !>=dev-libs/A-1 "),
+
+ )
+ for update_cmd, parent, input_str, output_str in cases:
+ result = update_dbentry(update_cmd, input_str, parent=parent)
+ self.assertEqual(result, output_str)
+
+ def testUpdateDbentryDbapiTestCase(self):
ebuilds = {
@@ -96,14 +189,14 @@ class UpdateDbentryTestCase(TestCase):
settings = playground.settings
trees = playground.trees
eroot = settings["EROOT"]
- portdir = settings["PORTDIR"]
+ test_repo_location = settings.repositories["test_repo"].location
portdb = trees[eroot]["porttree"].dbapi
vardb = trees[eroot]["vartree"].dbapi
bindb = trees[eroot]["bintree"].dbapi
setconfig = trees[eroot]["root_config"].setconfig
selected_set = setconfig.getSets()["selected"]
- updates_dir = os.path.join(portdir, "profiles", "updates")
+ updates_dir = os.path.join(test_repo_location, "profiles", "updates")
try:
ensure_dirs(updates_dir)
@@ -143,7 +236,7 @@ class UpdateDbentryTestCase(TestCase):
self.assertTrue(old_pattern.search(rdepend) is None)
self.assertTrue("dev-libs/M-moved" in rdepend)
- # EAPI 4-python N -> N.moved
+ # EAPI 4-python/*-progress N -> N.moved
rdepend = vardb.aux_get("dev-libs/B-1", ["RDEPEND"])[0]
old_pattern = re.compile(r"\bdev-libs/N(\s|$)")
self.assertTrue(old_pattern.search(rdepend) is None)
diff --git a/pym/portage/tests/util/test_getconfig.py b/pym/portage/tests/util/test_getconfig.py
index f13b75358..e5fd60f6d 100644
--- a/pym/portage/tests/util/test_getconfig.py
+++ b/pym/portage/tests/util/test_getconfig.py
@@ -1,13 +1,15 @@
-# Copyright 2010-2012 Gentoo Foundation
+# Copyright 2010-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
import tempfile
from portage import os
+from portage import shutil
from portage import _unicode_encode
from portage.const import PORTAGE_BASE_PATH
from portage.tests import TestCase
from portage.util import getconfig
+from portage.exception import ParseError
class GetConfigTestCase(TestCase):
"""
@@ -18,8 +20,8 @@ class GetConfigTestCase(TestCase):
_cases = {
'FETCHCOMMAND' : 'wget -t 3 -T 60 --passive-ftp -O "${DISTDIR}/${FILE}" "${URI}"',
'FETCHCOMMAND_RSYNC' : 'rsync -avP "${URI}" "${DISTDIR}/${FILE}"',
- 'FETCHCOMMAND_SFTP' : 'bash -c "x=\\${2#sftp://} ; host=\\${x%%/*} ; port=\\${host##*:} ; host=\\${host%:*} ; [[ \\${host} = \\${port} ]] && port=22 ; exec sftp -P \\${port} \\"\\${host}:/\\${x#*/}\\" \\"\\$1\\"" sftp "${DISTDIR}/${FILE}" "${URI}"',
- 'FETCHCOMMAND_SSH' : 'bash -c "x=\\${2#ssh://} ; host=\\${x%%/*} ; port=\\${host##*:} ; host=\\${host%:*} ; [[ \\${host} = \\${port} ]] && port=22 ; exec rsync --rsh=\\"ssh -p\\${port}\\" -avP \\"\\${host}:/\\${x#*/}\\" \\"\\$1\\"" rsync "${DISTDIR}/${FILE}" "${URI}"',
+ 'FETCHCOMMAND_SFTP' : 'bash -c "x=\\${2#sftp://} ; host=\\${x%%/*} ; port=\\${host##*:} ; host=\\${host%:*} ; [[ \\${host} = \\${port} ]] && port=22 ; eval \\"declare -a ssh_opts=(\\${3})\\" ; exec sftp -P \\${port} \\"\\${ssh_opts[@]}\\" \\"\\${host}:/\\${x#*/}\\" \\"\\$1\\"" sftp "${DISTDIR}/${FILE}" "${URI}" "${PORTAGE_SSH_OPTS}"',
+ 'FETCHCOMMAND_SSH' : 'bash -c "x=\\${2#ssh://} ; host=\\${x%%/*} ; port=\\${host##*:} ; host=\\${host%:*} ; [[ \\${host} = \\${port} ]] && port=22 ; exec rsync --rsh=\\"ssh -p\\${port} \\${3}\\" -avP \\"\\${host}:/\\${x#*/}\\" \\"\\$1\\"" rsync "${DISTDIR}/${FILE}" "${URI}" "${PORTAGE_SSH_OPTS}"',
'PORTAGE_ELOG_MAILSUBJECT' : '[portage] ebuild log for ${PACKAGE} on ${HOST}'
}
@@ -31,6 +33,29 @@ class GetConfigTestCase(TestCase):
for k, v in self._cases.items():
self.assertEqual(d[k], v)
+ def testGetConfigSourceLex(self):
+ try:
+ tempdir = tempfile.mkdtemp()
+ make_conf_file = os.path.join(tempdir, 'make.conf')
+ with open(make_conf_file, 'w') as f:
+ f.write('source "${DIR}/sourced_file"\n')
+ sourced_file = os.path.join(tempdir, 'sourced_file')
+ with open(sourced_file, 'w') as f:
+ f.write('PASSES_SOURCING_TEST="True"\n')
+
+ d = getconfig(make_conf_file, allow_sourcing=True, expand={"DIR": tempdir})
+
+ # PASSES_SOURCING_TEST should exist in getconfig result.
+ self.assertTrue(d is not None)
+ self.assertEqual("True", d['PASSES_SOURCING_TEST'])
+
+ # With allow_sourcing=True and empty expand map, this should
+ # throw a FileNotFound exception.
+ self.assertRaisesMsg("An empty expand map should throw an exception",
+ ParseError, getconfig, make_conf_file, allow_sourcing=True, expand={})
+ finally:
+ shutil.rmtree(tempdir)
+
def testGetConfigProfileEnv(self):
# Test the mode which is used to parse /etc/env.d and /etc/profile.env.
diff --git a/pym/portage/tests/util/test_stackDictList.py b/pym/portage/tests/util/test_stackDictList.py
index 678001c38..25a723c69 100644
--- a/pym/portage/tests/util/test_stackDictList.py
+++ b/pym/portage/tests/util/test_stackDictList.py
@@ -8,10 +8,12 @@ class StackDictListTestCase(TestCase):
def testStackDictList(self):
from portage.util import stack_dictlist
-
- tests = [ ({'a':'b'},{'x':'y'},False,{'a':['b'],'x':['y']}) ]
- tests.append(( {'KEYWORDS':['alpha','x86']},{'KEYWORDS':['-*']},True,{} ))
- tests.append(( {'KEYWORDS':['alpha','x86']},{'KEYWORDS':['-x86']},True,{'KEYWORDS':['alpha']} ))
+
+ tests = [
+ ({'a': 'b'}, {'x': 'y'}, False, {'a': ['b'], 'x': ['y']}),
+ ({'KEYWORDS': ['alpha', 'x86']}, {'KEYWORDS': ['-*']}, True, {}),
+ ({'KEYWORDS': ['alpha', 'x86']}, {'KEYWORDS': ['-x86']}, True, {'KEYWORDS': ['alpha']}),
+ ]
for test in tests:
self.assertEqual(
- stack_dictlist([test[0],test[1]],incremental=test[2]), test[3] )
+ stack_dictlist([test[0], test[1]], incremental=test[2]), test[3])
diff --git a/pym/portage/tests/util/test_stackDicts.py b/pym/portage/tests/util/test_stackDicts.py
index 0d2cadd0c..0c1dcdb78 100644
--- a/pym/portage/tests/util/test_stackDicts.py
+++ b/pym/portage/tests/util/test_stackDicts.py
@@ -7,30 +7,27 @@ from portage.util import stack_dicts
class StackDictsTestCase(TestCase):
-
- def testStackDictsPass(self):
-
- tests = [ ( [ { "a":"b" }, { "b":"c" } ], { "a":"b", "b":"c" },
- False, [], False ),
- ( [ { "a":"b" }, { "a":"c" } ], { "a":"b c" },
- True, [], False ),
- ( [ { "a":"b" }, { "a":"c" } ], { "a":"b c" },
- False, ["a"], False ),
- ( [ { "a":"b" }, None ], { "a":"b" },
- False, [], True ),
- ( [ None ], {}, False, [], False ),
- ( [ None, {}], {}, False, [], True ) ]
+ def testStackDictsPass(self):
+ tests = [
+ ([{'a': 'b'}, {'b': 'c'}], {'a': 'b', 'b': 'c'}, False, [], False),
+ ([{'a': 'b'}, {'a': 'c'}], {'a': 'b c'}, True, [], False),
+ ([{'a': 'b'}, {'a': 'c'}], {'a': 'b c'}, False, ['a'], False),
+ ([{'a': 'b'}, None], {'a': 'b'}, False, [], True),
+ ([None], {}, False, [], False),
+ ([None, {}], {}, False, [], True)
+ ]
for test in tests:
- result = stack_dicts( test[0], test[2], test[3], test[4] )
- self.assertEqual( result, test[1] )
-
+ result = stack_dicts(test[0], test[2], test[3], test[4])
+ self.assertEqual(result, test[1])
+
def testStackDictsFail(self):
-
- tests = [ ( [ None, {} ], None, False, [], True ),
- ( [ { "a":"b"}, {"a":"c" } ], { "a":"b c" },
- False, [], False ) ]
+
+ tests = [
+ ([None, {}], None, False, [], True),
+ ([{'a': 'b'}, {'a': 'c'}], {'a': 'b c'}, False, [], False)
+ ]
for test in tests:
- result = stack_dicts( test[0], test[2], test[3], test[4] )
- self.assertNotEqual( result , test[1] )
+ result = stack_dicts(test[0], test[2], test[3], test[4])
+ self.assertNotEqual(result, test[1])
diff --git a/pym/portage/tests/util/test_stackLists.py b/pym/portage/tests/util/test_stackLists.py
index e52477255..3ba69ecd2 100644
--- a/pym/portage/tests/util/test_stackLists.py
+++ b/pym/portage/tests/util/test_stackLists.py
@@ -6,14 +6,16 @@ from portage.tests import TestCase
from portage.util import stack_lists
class StackListsTestCase(TestCase):
-
+
def testStackLists(self):
-
- tests = [ ( [ ['a','b','c'], ['d','e','f'] ], ['a','c','b','e','d','f'], False ),
- ( [ ['a','x'], ['b','x'] ], ['a','x','b'], False ),
- ( [ ['a','b','c'], ['-*'] ], [], True ),
- ( [ ['a'], ['-a'] ], [], True ) ]
+
+ tests = [
+ ([['a', 'b', 'c'], ['d', 'e', 'f']], ['a', 'c', 'b', 'e', 'd', 'f'], False),
+ ([['a', 'x'], ['b', 'x']], ['a', 'x', 'b'], False),
+ ([['a', 'b', 'c'], ['-*']], [], True),
+ ([['a'], ['-a']], [], True)
+ ]
for test in tests:
- result = stack_lists( test[0], test[2] )
- self.assertEqual( set(result) , set(test[1]) )
+ result = stack_lists(test[0], test[2])
+ self.assertEqual(set(result), set(test[1]))
diff --git a/pym/portage/tests/util/test_uniqueArray.py b/pym/portage/tests/util/test_uniqueArray.py
index e23428c31..aae88cce8 100644
--- a/pym/portage/tests/util/test_uniqueArray.py
+++ b/pym/portage/tests/util/test_uniqueArray.py
@@ -7,18 +7,20 @@ from portage.tests import TestCase
from portage.util import unique_array
class UniqueArrayTestCase(TestCase):
-
+
def testUniqueArrayPass(self):
"""
test portage.util.uniqueArray()
"""
- tests = [ ( ["a","a","a",os,os,[],[],[]], ['a',os,[]] ),
- ( [1,1,1,2,3,4,4] , [1,2,3,4]) ]
+ tests = [
+ (['a', 'a', 'a', os, os, [], [], []], ['a', os, []]),
+ ([1, 1, 1, 2, 3, 4, 4], [1, 2, 3, 4])
+ ]
for test in tests:
- result = unique_array( test[0] )
+ result = unique_array(test[0])
for item in test[1]:
number = result.count(item)
- self.assertFalse( number != 1, msg=("%s contains %s of %s, "
- "should be only 1") % (result, number, item) )
+ self.assertFalse(number != 1, msg=("%s contains %s of %s, "
+ "should be only 1") % (result, number, item))
diff --git a/pym/portage/tests/util/test_varExpand.py b/pym/portage/tests/util/test_varExpand.py
index 7b528d6db..498b50ead 100644
--- a/pym/portage/tests/util/test_varExpand.py
+++ b/pym/portage/tests/util/test_varExpand.py
@@ -6,20 +6,20 @@ from portage.tests import TestCase
from portage.util import varexpand
class VarExpandTestCase(TestCase):
-
+
def testVarExpandPass(self):
- varDict = { "a":"5", "b":"7", "c":"-5" }
+ varDict = {"a": "5", "b": "7", "c": "-5"}
for key in varDict:
- result = varexpand( "$%s" % key, varDict )
-
- self.assertFalse( result != varDict[key],
- msg="Got %s != %s, from varexpand( %s, %s )" % \
- ( result, varDict[key], "$%s" % key, varDict ) )
- result = varexpand( "${%s}" % key, varDict )
- self.assertFalse( result != varDict[key],
- msg="Got %s != %s, from varexpand( %s, %s )" % \
- ( result, varDict[key], "${%s}" % key, varDict ) )
+ result = varexpand("$%s" % key, varDict)
+
+ self.assertFalse(result != varDict[key],
+ msg="Got %s != %s, from varexpand(%s, %s)" %
+ (result, varDict[key], "$%s" % key, varDict))
+ result = varexpand("${%s}" % key, varDict)
+ self.assertFalse(result != varDict[key],
+ msg="Got %s != %s, from varexpand(%s, %s)" %
+ (result, varDict[key], "${%s}" % key, varDict))
def testVarExpandBackslashes(self):
"""
@@ -49,44 +49,44 @@ class VarExpandTestCase(TestCase):
("\\'", "\\'"),
]
for test in tests:
- result = varexpand( test[0], varDict )
- self.assertFalse( result != test[1],
- msg="Got %s != %s from varexpand( %s, %s )" \
- % ( result, test[1], test[0], varDict ) )
+ result = varexpand(test[0], varDict)
+ self.assertFalse(result != test[1],
+ msg="Got %s != %s from varexpand(%s, %s)"
+ % (result, test[1], test[0], varDict))
def testVarExpandDoubleQuotes(self):
-
- varDict = { "a":"5" }
- tests = [ ("\"${a}\"", "\"5\"") ]
+
+ varDict = {"a": "5"}
+ tests = [("\"${a}\"", "\"5\"")]
for test in tests:
- result = varexpand( test[0], varDict )
- self.assertFalse( result != test[1],
- msg="Got %s != %s from varexpand( %s, %s )" \
- % ( result, test[1], test[0], varDict ) )
+ result = varexpand(test[0], varDict)
+ self.assertFalse(result != test[1],
+ msg="Got %s != %s from varexpand(%s, %s)"
+ % (result, test[1], test[0], varDict))
def testVarExpandSingleQuotes(self):
-
- varDict = { "a":"5" }
- tests = [ ("\'${a}\'", "\'${a}\'") ]
+
+ varDict = {"a": "5"}
+ tests = [("\'${a}\'", "\'${a}\'")]
for test in tests:
- result = varexpand( test[0], varDict )
- self.assertFalse( result != test[1],
- msg="Got %s != %s from varexpand( %s, %s )" \
- % ( result, test[1], test[0], varDict ) )
+ result = varexpand(test[0], varDict)
+ self.assertFalse(result != test[1],
+ msg="Got %s != %s from varexpand(%s, %s)"
+ % (result, test[1], test[0], varDict))
def testVarExpandFail(self):
- varDict = { "a":"5", "b":"7", "c":"15" }
+ varDict = {"a": "5", "b": "7", "c": "15"}
- testVars = [ "fail" ]
+ testVars = ["fail"]
for var in testVars:
- result = varexpand( "$%s" % var, varDict )
- self.assertFalse( len(result),
- msg="Got %s == %s, from varexpand( %s, %s )" \
- % ( result, var, "$%s" % var, varDict ) )
-
- result = varexpand( "${%s}" % var, varDict )
- self.assertFalse( len(result),
- msg="Got %s == %s, from varexpand( %s, %s )" \
- % ( result, var, "${%s}" % var, varDict ) )
+ result = varexpand("$%s" % var, varDict)
+ self.assertFalse(len(result),
+ msg="Got %s == %s, from varexpand(%s, %s)"
+ % (result, var, "$%s" % var, varDict))
+
+ result = varexpand("${%s}" % var, varDict)
+ self.assertFalse(len(result),
+ msg="Got %s == %s, from varexpand(%s, %s)"
+ % (result, var, "${%s}" % var, varDict))
diff --git a/pym/portage/tests/util/test_whirlpool.py b/pym/portage/tests/util/test_whirlpool.py
index dd0de899a..fbe7cae56 100644
--- a/pym/portage/tests/util/test_whirlpool.py
+++ b/pym/portage/tests/util/test_whirlpool.py
@@ -1,4 +1,4 @@
-# Copyright 2011 Gentoo Foundation
+# Copyright 2011-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
import subprocess
@@ -11,6 +11,6 @@ from portage.tests import TestCase
class WhirlpoolTestCase(TestCase):
def testBundledWhirlpool(self):
# execute the tests bundled with the whirlpool module
- retval = subprocess.call([portage._python_interpreter, "-Wd",
+ retval = subprocess.call([portage._python_interpreter, "-b", "-Wd",
os.path.join(PORTAGE_PYM_PATH, "portage/util/whirlpool.py")])
self.assertEqual(retval, os.EX_OK)
diff --git a/pym/portage/tests/versions/test_cpv_sort_key.py b/pym/portage/tests/versions/test_cpv_sort_key.py
index a223d78c7..eeb0eae69 100644
--- a/pym/portage/tests/versions/test_cpv_sort_key.py
+++ b/pym/portage/tests/versions/test_cpv_sort_key.py
@@ -8,9 +8,10 @@ class CpvSortKeyTestCase(TestCase):
def testCpvSortKey(self):
- tests = [ (("a/b-2_alpha", "a", "b", "a/b-2", "a/a-1", "a/b-1"),
- ( "a", "a/a-1", "a/b-1", "a/b-2_alpha", "a/b-2", "b")),
+ tests = [
+ (("a/b-2_alpha", "a", "b", "a/b-2", "a/a-1", "a/b-1"),
+ ("a", "a/a-1", "a/b-1", "a/b-2_alpha", "a/b-2", "b")),
]
for test in tests:
- self.assertEqual( tuple(sorted(test[0], key=cpv_sort_key())), test[1] )
+ self.assertEqual(tuple(sorted(test[0], key=cpv_sort_key())), test[1])
diff --git a/pym/portage/tests/versions/test_vercmp.py b/pym/portage/tests/versions/test_vercmp.py
index aa7969ce8..78fe7ede8 100644
--- a/pym/portage/tests/versions/test_vercmp.py
+++ b/pym/portage/tests/versions/test_vercmp.py
@@ -8,10 +8,11 @@ from portage.versions import vercmp
class VerCmpTestCase(TestCase):
""" A simple testCase for portage.versions.vercmp()
"""
-
+
def testVerCmpGreater(self):
-
- tests = [ ( "6.0", "5.0"), ("5.0","5"),
+
+ tests = [
+ ("6.0", "5.0"), ("5.0", "5"),
("1.0-r1", "1.0-r0"),
("1.0-r1", "1.0"),
("cvs.9999", "9999"),
@@ -24,14 +25,15 @@ class VerCmpTestCase(TestCase):
("12.2.5", "12.2b"),
]
for test in tests:
- self.assertFalse( vercmp( test[0], test[1] ) <= 0, msg="%s < %s? Wrong!" % (test[0],test[1]) )
+ self.assertFalse(vercmp(test[0], test[1]) <= 0, msg="%s < %s? Wrong!" % (test[0], test[1]))
def testVerCmpLess(self):
"""
pre < alpha < beta < rc < p -> test each of these, they are inductive (or should be..)
"""
- tests = [ ( "4.0", "5.0"), ("5", "5.0"), ("1.0_pre2","1.0_p2"),
- ("1.0_alpha2", "1.0_p2"),("1.0_alpha1", "1.0_beta1"),("1.0_beta3","1.0_rc3"),
+ tests = [
+ ("4.0", "5.0"), ("5", "5.0"), ("1.0_pre2", "1.0_p2"),
+ ("1.0_alpha2", "1.0_p2"), ("1.0_alpha1", "1.0_beta1"), ("1.0_beta3", "1.0_rc3"),
("1.001000000000000000001", "1.001000000000000000002"),
("1.00100000000", "1.0010000000000000001"),
("9999", "cvs.9999"),
@@ -47,23 +49,25 @@ class VerCmpTestCase(TestCase):
("12.2b", "12.2.5"),
]
for test in tests:
- self.assertFalse( vercmp( test[0], test[1]) >= 0, msg="%s > %s? Wrong!" % (test[0],test[1]))
-
-
+ self.assertFalse(vercmp(test[0], test[1]) >= 0, msg="%s > %s? Wrong!" % (test[0], test[1]))
+
def testVerCmpEqual(self):
-
- tests = [ ("4.0", "4.0"),
+
+ tests = [
+ ("4.0", "4.0"),
("1.0", "1.0"),
("1.0-r0", "1.0"),
("1.0", "1.0-r0"),
("1.0-r0", "1.0-r0"),
- ("1.0-r1", "1.0-r1")]
+ ("1.0-r1", "1.0-r1")
+ ]
for test in tests:
- self.assertFalse( vercmp( test[0], test[1]) != 0, msg="%s != %s? Wrong!" % (test[0],test[1]))
-
+ self.assertFalse(vercmp(test[0], test[1]) != 0, msg="%s != %s? Wrong!" % (test[0], test[1]))
+
def testVerNotEqual(self):
-
- tests = [ ("1","2"),("1.0_alpha","1.0_pre"),("1.0_beta","1.0_alpha"),
+
+ tests = [
+ ("1", "2"), ("1.0_alpha", "1.0_pre"), ("1.0_beta", "1.0_alpha"),
("0", "0.0"),
("cvs.9999", "9999"),
("1.0-r0", "1.0-r1"),
@@ -77,4 +81,4 @@ class VerCmpTestCase(TestCase):
("12.2b", "12.2"),
]
for test in tests:
- self.assertFalse( vercmp( test[0], test[1]) == 0, msg="%s == %s? Wrong!" % (test[0],test[1]))
+ self.assertFalse(vercmp(test[0], test[1]) == 0, msg="%s == %s? Wrong!" % (test[0], test[1]))
diff --git a/pym/portage/update.py b/pym/portage/update.py
index 121e95720..df4e11b54 100644
--- a/pym/portage/update.py
+++ b/pym/portage/update.py
@@ -1,11 +1,14 @@
-# Copyright 1999-2011 Gentoo Foundation
+# Copyright 1999-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
+from __future__ import unicode_literals
+
import errno
import io
import re
import stat
import sys
+import warnings
from portage import os
from portage import _encodings
@@ -13,21 +16,19 @@ from portage import _unicode_decode
from portage import _unicode_encode
import portage
portage.proxy.lazyimport.lazyimport(globals(),
- 'portage.dep:Atom,dep_getkey,isvalidatom,' + \
- 'remove_slot',
+ 'portage.dep:Atom,dep_getkey,isvalidatom,match_from_list',
'portage.util:ConfigProtect,new_protect_filename,' + \
'normalize_path,write_atomic,writemsg',
- 'portage.util.listdir:_ignorecvs_dirs',
- 'portage.versions:catsplit,ververify'
+ 'portage.versions:_get_slot_re',
)
-from portage.const import USER_CONFIG_PATH
-from portage.dep import _get_slot_re
+from portage.const import USER_CONFIG_PATH, VCS_DIRS
from portage.eapi import _get_eapi_attrs
from portage.exception import DirectoryNotFound, InvalidAtom, PortageException
from portage.localization import _
if sys.hexversion >= 0x3000000:
+ # pylint: disable=W0622
long = int
_unicode = str
else:
@@ -35,7 +36,10 @@ else:
ignored_dbentries = ("CONTENTS", "environment.bz2")
-def update_dbentry(update_cmd, mycontent, eapi=None):
+def update_dbentry(update_cmd, mycontent, eapi=None, parent=None):
+
+ if parent is not None:
+ eapi = parent.eapi
if update_cmd[0] == "move":
old_value = _unicode(update_cmd[1])
@@ -44,28 +48,76 @@ def update_dbentry(update_cmd, mycontent, eapi=None):
# Use isvalidatom() to check if this move is valid for the
# EAPI (characters allowed in package names may vary).
if old_value in mycontent and isvalidatom(new_value, eapi=eapi):
- old_value = re.escape(old_value);
- mycontent = re.sub(old_value+"(:|$|\\s)", new_value+"\\1", mycontent)
- def myreplace(matchobj):
- # Strip slot and * operator if necessary
- # so that ververify works.
- ver = remove_slot(matchobj.group(2))
- ver = ver.rstrip("*")
- if ververify(ver):
- return "%s-%s" % (new_value, matchobj.group(2))
- else:
- return "".join(matchobj.groups())
- mycontent = re.sub("(%s-)(\\S*)" % old_value, myreplace, mycontent)
+ # this split preserves existing whitespace
+ split_content = re.split(r'(\s+)', mycontent)
+ modified = False
+ for i, token in enumerate(split_content):
+ if old_value not in token:
+ continue
+ try:
+ atom = Atom(token, eapi=eapi)
+ except InvalidAtom:
+ continue
+ if atom.cp != old_value:
+ continue
+
+ new_atom = Atom(token.replace(old_value, new_value, 1),
+ eapi=eapi)
+
+ # Avoid creating self-blockers for bug #367215.
+ if new_atom.blocker and parent is not None and \
+ parent.cp == new_atom.cp and \
+ match_from_list(new_atom, [parent]):
+ continue
+
+ split_content[i] = _unicode(new_atom)
+ modified = True
+
+ if modified:
+ mycontent = "".join(split_content)
+
elif update_cmd[0] == "slotmove" and update_cmd[1].operator is None:
- pkg, origslot, newslot = update_cmd[1:]
- old_value = "%s:%s" % (pkg, origslot)
- if old_value in mycontent:
- old_value = re.escape(old_value)
- new_value = "%s:%s" % (pkg, newslot)
- mycontent = re.sub(old_value+"($|\\s)", new_value+"\\1", mycontent)
+ orig_atom, origslot, newslot = update_cmd[1:]
+ orig_cp = orig_atom.cp
+
+ # We don't support versioned slotmove atoms here, since it can be
+ # difficult to determine if the version constraints really match
+ # the atoms that we're trying to update.
+ if orig_atom.version is None and orig_cp in mycontent:
+ # this split preserves existing whitespace
+ split_content = re.split(r'(\s+)', mycontent)
+ modified = False
+ for i, token in enumerate(split_content):
+ if orig_cp not in token:
+ continue
+ try:
+ atom = Atom(token, eapi=eapi)
+ except InvalidAtom:
+ continue
+ if atom.cp != orig_cp:
+ continue
+ if atom.slot is None or atom.slot != origslot:
+ continue
+
+ slot_part = newslot
+ if atom.sub_slot is not None:
+ if atom.sub_slot == origslot:
+ sub_slot = newslot
+ else:
+ sub_slot = atom.sub_slot
+ slot_part += "/" + sub_slot
+ if atom.slot_operator is not None:
+ slot_part += atom.slot_operator
+
+ split_content[i] = atom.with_slot(slot_part)
+ modified = True
+
+ if modified:
+ mycontent = "".join(split_content)
+
return mycontent
-def update_dbentries(update_iter, mydata, eapi=None):
+def update_dbentries(update_iter, mydata, eapi=None, parent=None):
"""Performs update commands and returns a
dict containing only the updated items."""
updated_items = {}
@@ -79,7 +131,8 @@ def update_dbentries(update_iter, mydata, eapi=None):
is_encoded = mycontent is not orig_content
orig_content = mycontent
for update_cmd in update_iter:
- mycontent = update_dbentry(update_cmd, mycontent, eapi=eapi)
+ mycontent = update_dbentry(update_cmd, mycontent,
+ eapi=eapi, parent=parent)
if mycontent != orig_content:
if is_encoded:
mycontent = _unicode_encode(mycontent,
@@ -88,10 +141,14 @@ def update_dbentries(update_iter, mydata, eapi=None):
updated_items[k] = mycontent
return updated_items
-def fixdbentries(update_iter, dbdir, eapi=None):
+def fixdbentries(update_iter, dbdir, eapi=None, parent=None):
"""Performs update commands which result in search and replace operations
for each of the files in dbdir (excluding CONTENTS and environment.bz2).
Returns True when actual modifications are necessary and False otherwise."""
+
+ warnings.warn("portage.update.fixdbentries() is deprecated",
+ DeprecationWarning, stacklevel=2)
+
mydata = {}
for myfile in [f for f in os.listdir(dbdir) if f not in ignored_dbentries]:
file_path = os.path.join(dbdir, myfile)
@@ -100,7 +157,8 @@ def fixdbentries(update_iter, dbdir, eapi=None):
mode='r', encoding=_encodings['repo.content'],
errors='replace') as f:
mydata[myfile] = f.read()
- updated_items = update_dbentries(update_iter, mydata, eapi=eapi)
+ updated_items = update_dbentries(update_iter, mydata,
+ eapi=eapi, parent=parent)
for myfile, mycontent in updated_items.items():
file_path = os.path.join(dbdir, myfile)
write_atomic(file_path, mycontent, encoding=_encodings['repo.content'])
@@ -225,7 +283,8 @@ def parse_updates(mycontent):
return myupd, errors
def update_config_files(config_root, protect, protect_mask, update_iter, match_callback = None):
- """Perform global updates on /etc/portage/package.*.
+ """Perform global updates on /etc/portage/package.*, /etc/portage/profile/package.*,
+ /etc/portage/profile/packages and /etc/portage/sets.
config_root - location of files to update
protect - list of paths from CONFIG_PROTECT
protect_mask - list of paths from CONFIG_PROTECT_MASK
@@ -248,9 +307,15 @@ def update_config_files(config_root, protect, protect_mask, update_iter, match_c
"package.accept_keywords", "package.env",
"package.keywords", "package.license",
"package.mask", "package.properties",
- "package.unmask", "package.use"
+ "package.unmask", "package.use", "sets"
]
- myxfiles += [os.path.join("profile", x) for x in myxfiles]
+ myxfiles += [os.path.join("profile", x) for x in (
+ "packages", "package.accept_keywords",
+ "package.keywords", "package.mask",
+ "package.unmask", "package.use",
+ "package.use.force", "package.use.mask",
+ "package.use.stable.force", "package.use.stable.mask"
+ )]
abs_user_config = os.path.join(config_root, USER_CONFIG_PATH)
recursivefiles = []
for x in myxfiles:
@@ -269,7 +334,7 @@ def update_config_files(config_root, protect, protect_mask, update_iter, match_c
except UnicodeDecodeError:
dirs.remove(y_enc)
continue
- if y.startswith(".") or y in _ignorecvs_dirs:
+ if y.startswith(".") or y in VCS_DIRS:
dirs.remove(y_enc)
for y in files:
try:
@@ -299,7 +364,6 @@ def update_config_files(config_root, protect, protect_mask, update_iter, match_c
if f is not None:
f.close()
- # update /etc/portage/packages.*
ignore_line_re = re.compile(r'^#|^\s*$')
if repo_dict is None:
update_items = [(None, update_iter)]
@@ -319,6 +383,9 @@ def update_config_files(config_root, protect, protect_mask, update_iter, match_c
if atom[:1] == "-":
# package.mask supports incrementals
atom = atom[1:]
+ if atom[:1] == "*":
+ # packages file supports "*"-prefixed atoms as indication of system packages.
+ atom = atom[1:]
if not isvalidatom(atom):
continue
new_atom = update_dbentry(update_cmd, atom)
diff --git a/pym/portage/util/ExtractKernelVersion.py b/pym/portage/util/ExtractKernelVersion.py
index 69bd58a68..af4a4fe63 100644
--- a/pym/portage/util/ExtractKernelVersion.py
+++ b/pym/portage/util/ExtractKernelVersion.py
@@ -61,18 +61,18 @@ def ExtractKernelVersion(base_dir):
# Grab a list of files named localversion* and sort them
localversions = os.listdir(base_dir)
- for x in range(len(localversions)-1,-1,-1):
+ for x in range(len(localversions) - 1, -1, -1):
if localversions[x][:12] != "localversion":
del localversions[x]
localversions.sort()
# Append the contents of each to the version string, stripping ALL whitespace
for lv in localversions:
- version += "".join( " ".join( grabfile( base_dir+ "/" + lv ) ).split() )
+ version += "".join(" ".join(grabfile(base_dir + "/" + lv)).split())
# Check the .config for a CONFIG_LOCALVERSION and append that too, also stripping whitespace
kernelconfig = getconfig(base_dir+"/.config")
if kernelconfig and "CONFIG_LOCALVERSION" in kernelconfig:
version += "".join(kernelconfig["CONFIG_LOCALVERSION"].split())
- return (version,None)
+ return (version, None)
diff --git a/pym/portage/util/SlotObject.py b/pym/portage/util/SlotObject.py
index a59dfc199..4bb682258 100644
--- a/pym/portage/util/SlotObject.py
+++ b/pym/portage/util/SlotObject.py
@@ -48,4 +48,3 @@ class SlotObject(object):
setattr(obj, myattr, getattr(self, myattr))
return obj
-
diff --git a/pym/portage/util/_ShelveUnicodeWrapper.py b/pym/portage/util/_ShelveUnicodeWrapper.py
new file mode 100644
index 000000000..adbd5199f
--- /dev/null
+++ b/pym/portage/util/_ShelveUnicodeWrapper.py
@@ -0,0 +1,45 @@
+# Copyright 2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+class ShelveUnicodeWrapper(object):
+ """
+ Convert unicode to str and back again, since python-2.x shelve
+ module doesn't support unicode.
+ """
+ def __init__(self, shelve_instance):
+ self._shelve = shelve_instance
+
+ def _encode(self, s):
+ if isinstance(s, unicode):
+ s = s.encode('utf_8')
+ return s
+
+ def __len__(self):
+ return len(self._shelve)
+
+ def __contains__(self, k):
+ return self._encode(k) in self._shelve
+
+ def __iter__(self):
+ return self._shelve.__iter__()
+
+ def items(self):
+ return self._shelve.iteritems()
+
+ def __setitem__(self, k, v):
+ self._shelve[self._encode(k)] = self._encode(v)
+
+ def __getitem__(self, k):
+ return self._shelve[self._encode(k)]
+
+ def __delitem__(self, k):
+ del self._shelve[self._encode(k)]
+
+ def get(self, k, *args):
+ return self._shelve.get(self._encode(k), *args)
+
+ def close(self):
+ self._shelve.close()
+
+ def clear(self):
+ self._shelve.clear()
diff --git a/pym/portage/util/__init__.py b/pym/portage/util/__init__.py
index 4645be52f..614b2b388 100644
--- a/pym/portage/util/__init__.py
+++ b/pym/portage/util/__init__.py
@@ -1,6 +1,8 @@
-# Copyright 2004-2012 Gentoo Foundation
+# Copyright 2004-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
+from __future__ import unicode_literals
+
__all__ = ['apply_permissions', 'apply_recursive_permissions',
'apply_secpass_permissions', 'apply_stat_permissions', 'atomic_ofstream',
'cmp_sort_key', 'ConfigProtect', 'dump_traceback', 'ensure_dirs',
@@ -31,21 +33,26 @@ import portage
portage.proxy.lazyimport.lazyimport(globals(),
'pickle',
'portage.dep:Atom',
- 'portage.util.listdir:_ignorecvs_dirs'
+ 'subprocess',
)
from portage import os
-from portage import subprocess_getstatusoutput
from portage import _encodings
from portage import _os_merge
from portage import _unicode_encode
from portage import _unicode_decode
+from portage.const import VCS_DIRS
from portage.exception import InvalidAtom, PortageException, FileNotFound, \
OperationNotPermitted, ParseError, PermissionDenied, ReadOnlyFileSystem
from portage.localization import _
from portage.proxy.objectproxy import ObjectProxy
from portage.cache.mappings import UserDict
+if sys.hexversion >= 0x3000000:
+ _unicode = str
+else:
+ _unicode = unicode
+
noiselimit = 0
def initialize_logger(level=logging.WARN):
@@ -57,7 +64,7 @@ def initialize_logger(level=logging.WARN):
"""
logging.basicConfig(level=logging.WARN, format='[%(levelname)-4s] %(message)s')
-def writemsg(mystr,noiselevel=0,fd=None):
+def writemsg(mystr, noiselevel=0, fd=None):
"""Prints out warning and debug messages based on the noiselimit setting"""
global noiselimit
if fd is None:
@@ -75,7 +82,7 @@ def writemsg(mystr,noiselevel=0,fd=None):
fd.write(mystr)
fd.flush()
-def writemsg_stdout(mystr,noiselevel=0):
+def writemsg_stdout(mystr, noiselevel=0):
"""Prints messages stdout based on the noiselimit setting"""
writemsg(mystr, noiselevel=noiselevel, fd=sys.stdout)
@@ -100,7 +107,7 @@ def writemsg_level(msg, level=0, noiselevel=0):
writemsg(msg, noiselevel=noiselevel, fd=fd)
def normalize_path(mypath):
- """
+ """
os.path.normpath("//foo") returns "//foo" instead of "/foo"
We dislike this behavior so we create our own normpath func
to fix it.
@@ -120,8 +127,8 @@ def grabfile(myfilename, compat_level=0, recursive=0, remember_source_file=False
"""This function grabs the lines in a file, normalizes whitespace and returns lines in a list; if a line
begins with a #, it is ignored, as are empty lines"""
- mylines=grablines(myfilename, recursive, remember_source_file=True)
- newlines=[]
+ mylines = grablines(myfilename, recursive, remember_source_file=True)
+ newlines = []
for x, source_file in mylines:
#the split/join thing removes leading and trailing whitespace, and converts any whitespace in the line
@@ -139,10 +146,10 @@ def grabfile(myfilename, compat_level=0, recursive=0, remember_source_file=False
myline = " ".join(myline)
if not myline:
continue
- if myline[0]=="#":
+ if myline[0] == "#":
# Check if we have a compat-level string. BC-integration data.
# '##COMPAT==>N<==' 'some string attached to it'
- mylinetest = myline.split("<==",1)
+ mylinetest = myline.split("<==", 1)
if len(mylinetest) == 2:
myline_potential = mylinetest[1]
mylinetest = mylinetest[0].split("##COMPAT==>")
@@ -159,7 +166,7 @@ def grabfile(myfilename, compat_level=0, recursive=0, remember_source_file=False
newlines.append(myline)
return newlines
-def map_dictlist_vals(func,myDict):
+def map_dictlist_vals(func, myDict):
"""Performs a function on each value of each key in a dictlist.
Returns a new dictlist."""
new_dl = {}
@@ -173,7 +180,7 @@ def stack_dictlist(original_dicts, incremental=0, incrementals=[], ignore_none=0
Stacks an array of dict-types into one array. Optionally merging or
overwriting matching key/value pairs for the dict[key]->list.
Returns a single dict. Higher index in lists is preferenced.
-
+
Example usage:
>>> from portage.util import stack_dictlist
>>> print stack_dictlist( [{'a':'b'},{'x':'y'}])
@@ -188,7 +195,7 @@ def stack_dictlist(original_dicts, incremental=0, incrementals=[], ignore_none=0
>>> { 'KEYWORDS':['alpha'] }
>>> print stack_dictlist( [a,b], incrementals=['KEYWORDS'])
>>> { 'KEYWORDS':['alpha'] }
-
+
@param original_dicts a list of (dictionary objects or None)
@type list
@param incremental True or false depending on whether new keys should overwrite
@@ -199,7 +206,7 @@ def stack_dictlist(original_dicts, incremental=0, incrementals=[], ignore_none=0
@type list
@param ignore_none Appears to be ignored, but probably was used long long ago.
@type boolean
-
+
"""
final_dict = {}
for mydict in original_dicts:
@@ -208,7 +215,7 @@ def stack_dictlist(original_dicts, incremental=0, incrementals=[], ignore_none=0
for y in mydict:
if not y in final_dict:
final_dict[y] = []
-
+
for thing in mydict[y]:
if thing:
if incremental or y in incrementals:
@@ -245,12 +252,13 @@ def stack_dicts(dicts, incremental=0, incrementals=[], ignore_none=0):
def append_repo(atom_list, repo_name, remember_source_file=False):
"""
Takes a list of valid atoms without repo spec and appends ::repo_name.
+ If an atom already has a repo part, then it is preserved (see bug #461948).
"""
if remember_source_file:
- return [(Atom(atom + "::" + repo_name, allow_wildcard=True, allow_repo=True), source) \
+ return [(atom.repo is not None and atom or atom.with_repo(repo_name), source) \
for atom, source in atom_list]
else:
- return [Atom(atom + "::" + repo_name, allow_wildcard=True, allow_repo=True) \
+ return [atom.repo is not None and atom or atom.with_repo(repo_name) \
for atom in atom_list]
def stack_lists(lists, incremental=1, remember_source_file=False,
@@ -334,7 +342,7 @@ def stack_lists(lists, incremental=1, remember_source_file=False,
def grabdict(myfilename, juststrings=0, empty=0, recursive=0, incremental=1):
"""
This function grabs the lines in a file, normalizes whitespace and returns lines in a dictionary
-
+
@param myfilename: file to process
@type myfilename: string (path)
@param juststrings: only return strings
@@ -350,9 +358,9 @@ def grabdict(myfilename, juststrings=0, empty=0, recursive=0, incremental=1):
1. Returns the lines in a file in a dictionary, for example:
'sys-apps/portage x86 amd64 ppc'
would return
- { "sys-apps/portage" : [ 'x86', 'amd64', 'ppc' ]
+ {"sys-apps/portage" : ['x86', 'amd64', 'ppc']}
"""
- newdict={}
+ newdict = {}
for x in grablines(myfilename, recursive):
#the split/join thing removes leading and trailing whitespace, and converts any whitespace in the line
#into single spaces.
@@ -379,52 +387,75 @@ def grabdict(myfilename, juststrings=0, empty=0, recursive=0, incremental=1):
newdict[k] = " ".join(v)
return newdict
-def read_corresponding_eapi_file(filename):
+_eapi_cache = {}
+
+def read_corresponding_eapi_file(filename, default="0"):
"""
Read the 'eapi' file from the directory 'filename' is in.
Returns "0" if the file is not present or invalid.
"""
- default = "0"
eapi_file = os.path.join(os.path.dirname(filename), "eapi")
try:
- f = io.open(_unicode_encode(eapi_file,
+ eapi = _eapi_cache[eapi_file]
+ except KeyError:
+ pass
+ else:
+ if eapi is None:
+ return default
+ return eapi
+
+ eapi = None
+ try:
+ with io.open(_unicode_encode(eapi_file,
encoding=_encodings['fs'], errors='strict'),
- mode='r', encoding=_encodings['repo.content'], errors='replace')
- lines = f.readlines()
+ mode='r', encoding=_encodings['repo.content'], errors='replace') as f:
+ lines = f.readlines()
if len(lines) == 1:
eapi = lines[0].rstrip("\n")
else:
writemsg(_("--- Invalid 'eapi' file (doesn't contain exactly one line): %s\n") % (eapi_file),
noiselevel=-1)
- eapi = default
- f.close()
except IOError:
- eapi = default
+ pass
+ _eapi_cache[eapi_file] = eapi
+ if eapi is None:
+ return default
return eapi
def grabdict_package(myfilename, juststrings=0, recursive=0, allow_wildcard=False, allow_repo=False,
verify_eapi=False, eapi=None):
""" Does the same thing as grabdict except it validates keys
with isvalidatom()"""
- pkgs=grabdict(myfilename, juststrings, empty=1, recursive=recursive)
- if not pkgs:
- return pkgs
- if verify_eapi and eapi is None:
- eapi = read_corresponding_eapi_file(myfilename)
- # We need to call keys() here in order to avoid the possibility of
- # "RuntimeError: dictionary changed size during iteration"
- # when an invalid atom is deleted.
+ if recursive:
+ file_list = _recursive_file_list(myfilename)
+ else:
+ file_list = [myfilename]
+
atoms = {}
- for k, v in pkgs.items():
- try:
- k = Atom(k, allow_wildcard=allow_wildcard, allow_repo=allow_repo, eapi=eapi)
- except InvalidAtom as e:
- writemsg(_("--- Invalid atom in %s: %s\n") % (myfilename, e),
- noiselevel=-1)
- else:
- atoms[k] = v
+ for filename in file_list:
+ d = grabdict(filename, juststrings=False,
+ empty=True, recursive=False, incremental=True)
+ if not d:
+ continue
+ if verify_eapi and eapi is None:
+ eapi = read_corresponding_eapi_file(myfilename)
+
+ for k, v in d.items():
+ try:
+ k = Atom(k, allow_wildcard=allow_wildcard,
+ allow_repo=allow_repo, eapi=eapi)
+ except InvalidAtom as e:
+ writemsg(_("--- Invalid atom in %s: %s\n") % (filename, e),
+ noiselevel=-1)
+ else:
+ atoms.setdefault(k, []).extend(v)
+
+ if juststrings:
+ for k, v in atoms.items():
+ atoms[k] = " ".join(v)
+
return atoms
def grabfile_package(myfilename, compatlevel=0, recursive=0, allow_wildcard=False, allow_repo=False,
@@ -450,7 +481,7 @@ def grabfile_package(myfilename, compatlevel=0, recursive=0, allow_wildcard=Fals
writemsg(_("--- Invalid atom in %s: %s\n") % (source_file, e),
noiselevel=-1)
else:
- if pkg_orig == str(pkg):
+ if pkg_orig == _unicode(pkg):
# normal atom, so return as Atom instance
if remember_source_file:
atoms.append((pkg, source_file))
@@ -464,35 +495,63 @@ def grabfile_package(myfilename, compatlevel=0, recursive=0, allow_wildcard=Fals
atoms.append(pkg_orig)
return atoms
-def grablines(myfilename, recursive=0, remember_source_file=False):
- mylines=[]
- if recursive and os.path.isdir(myfilename):
- if os.path.basename(myfilename) in _ignorecvs_dirs:
- return mylines
+def _recursive_basename_filter(f):
+ return not f.startswith(".") and not f.endswith("~")
+
+def _recursive_file_list(path):
+ # path may be a regular file or a directory
+
+ def onerror(e):
+ if e.errno == PermissionDenied.errno:
+ raise PermissionDenied(path)
+
+ stack = [os.path.split(path)]
+
+ while stack:
+ parent, fname = stack.pop()
+ fullpath = os.path.join(parent, fname)
+
try:
- dirlist = os.listdir(myfilename)
+ st = os.stat(fullpath)
except OSError as e:
- if e.errno == PermissionDenied.errno:
- raise PermissionDenied(myfilename)
- elif e.errno in (errno.ENOENT, errno.ESTALE):
- return mylines
- else:
- raise
- dirlist.sort()
- for f in dirlist:
- if not f.startswith(".") and not f.endswith("~"):
- mylines.extend(grablines(
- os.path.join(myfilename, f), recursive, remember_source_file))
+ onerror(e)
+ continue
+
+ if stat.S_ISDIR(st.st_mode):
+ if fname in VCS_DIRS or not _recursive_basename_filter(fname):
+ continue
+ try:
+ children = os.listdir(fullpath)
+ except OSError as e:
+ onerror(e)
+ continue
+
+ # Sort in reverse, since we pop from the end of the stack.
+ # Include regular files in the stack, so files are sorted
+ # together with directories.
+ children.sort(reverse=True)
+ stack.extend((fullpath, x) for x in children)
+
+ elif stat.S_ISREG(st.st_mode):
+ if _recursive_basename_filter(fname):
+ yield fullpath
+
+def grablines(myfilename, recursive=0, remember_source_file=False):
+ mylines = []
+ if recursive:
+ for f in _recursive_file_list(myfilename):
+ mylines.extend(grablines(f, recursive=False,
+ remember_source_file=remember_source_file))
+
else:
try:
- myfile = io.open(_unicode_encode(myfilename,
+ with io.open(_unicode_encode(myfilename,
encoding=_encodings['fs'], errors='strict'),
- mode='r', encoding=_encodings['content'], errors='replace')
- if remember_source_file:
- mylines = [(line, myfilename) for line in myfile.readlines()]
- else:
- mylines = myfile.readlines()
- myfile.close()
+ mode='r', encoding=_encodings['content'], errors='replace') as myfile:
+ if remember_source_file:
+ mylines = [(line, myfilename) for line in myfile.readlines()]
+ else:
+ mylines = myfile.readlines()
except IOError as e:
if e.errno == PermissionDenied.errno:
raise PermissionDenied(myfilename)
@@ -502,7 +561,7 @@ def grablines(myfilename, recursive=0, remember_source_file=False):
raise
return mylines
-def writedict(mydict,myfilename,writekey=True):
+def writedict(mydict, myfilename, writekey=True):
"""Writes out a dict to a file; writekey=0 mode doesn't write out
the key and assumes all values are strings, not lists."""
lines = []
@@ -528,18 +587,44 @@ def shlex_split(s):
rval = [_unicode_decode(x) for x in rval]
return rval
-class _tolerant_shlex(shlex.shlex):
+class _getconfig_shlex(shlex.shlex):
+
+ def __init__(self, portage_tolerant=False, **kwargs):
+ shlex.shlex.__init__(self, **kwargs)
+ self.__portage_tolerant = portage_tolerant
+
+ def allow_sourcing(self, var_expand_map):
+ self.source = portage._native_string("source")
+ self.var_expand_map = var_expand_map
+
def sourcehook(self, newfile):
try:
+ newfile = varexpand(newfile, self.var_expand_map)
return shlex.shlex.sourcehook(self, newfile)
except EnvironmentError as e:
- writemsg(_("!!! Parse error in '%s': source command failed: %s\n") % \
- (self.infile, str(e)), noiselevel=-1)
+ if e.errno == PermissionDenied.errno:
+ raise PermissionDenied(newfile)
+ if e.errno not in (errno.ENOENT, errno.ENOTDIR):
+ writemsg("open('%s', 'r'): %s\n" % (newfile, e), noiselevel=-1)
+ raise
+
+ msg = self.error_leader()
+ if e.errno == errno.ENOTDIR:
+ msg += _("%s: Not a directory") % newfile
+ else:
+ msg += _("%s: No such file or directory") % newfile
+
+ if self.__portage_tolerant:
+ writemsg("%s\n" % msg, noiselevel=-1)
+ else:
+ raise ParseError(msg)
return (newfile, io.StringIO())
_invalid_var_name_re = re.compile(r'^\d|\W')
-def getconfig(mycfg, tolerant=0, allow_sourcing=False, expand=True):
+def getconfig(mycfg, tolerant=False, allow_sourcing=False, expand=True,
+ recursive=False):
+
if isinstance(expand, dict):
# Some existing variable definitions have been
# passed in, for use in substitutions.
@@ -548,6 +633,21 @@ def getconfig(mycfg, tolerant=0, allow_sourcing=False, expand=True):
else:
expand_map = {}
mykeys = {}
+
+ if recursive:
+ # Emulate source commands so that syntax error messages
+ # can display real file names and line numbers.
+ if not expand:
+ expand_map = False
+ fname = None
+ for fname in _recursive_file_list(mycfg):
+ mykeys.update(getconfig(fname, tolerant=tolerant,
+ allow_sourcing=allow_sourcing, expand=expand_map,
+ recursive=False) or {})
+ if fname is None:
+ return None
+ return mykeys
+
f = None
try:
# NOTE: shlex doesn't support unicode objects with Python 2
@@ -572,49 +672,53 @@ def getconfig(mycfg, tolerant=0, allow_sourcing=False, expand=True):
if f is not None:
f.close()
+ # Since this file has unicode_literals enabled, and Python 2's
+ # shlex implementation does not support unicode, the following code
+ # uses _native_string() to encode unicode literals when necessary.
+
# Workaround for avoiding a silent error in shlex that is
# triggered by a source statement at the end of the file
# without a trailing newline after the source statement.
- if content and content[-1] != '\n':
- content += '\n'
+ if content and content[-1] != portage._native_string('\n'):
+ content += portage._native_string('\n')
# Warn about dos-style line endings since that prevents
# people from being able to source them with bash.
- if '\r' in content:
+ if portage._native_string('\r') in content:
writemsg(("!!! " + _("Please use dos2unix to convert line endings " + \
"in config file: '%s'") + "\n") % mycfg, noiselevel=-1)
lex = None
try:
- if tolerant:
- shlex_class = _tolerant_shlex
- else:
- shlex_class = shlex.shlex
# The default shlex.sourcehook() implementation
# only joins relative paths when the infile
# attribute is properly set.
- lex = shlex_class(content, infile=mycfg, posix=True)
- lex.wordchars = string.digits + string.ascii_letters + \
- "~!@#$%*_\:;?,./-+{}"
- lex.quotes="\"'"
+ lex = _getconfig_shlex(instream=content, infile=mycfg, posix=True,
+ portage_tolerant=tolerant)
+ lex.wordchars = portage._native_string(string.digits +
+ string.ascii_letters + "~!@#$%*_\:;?,./-+{}")
+ lex.quotes = portage._native_string("\"'")
if allow_sourcing:
- lex.source="source"
- while 1:
- key=lex.get_token()
+ lex.allow_sourcing(expand_map)
+
+ while True:
+ key = _unicode_decode(lex.get_token())
if key == "export":
- key = lex.get_token()
+ key = _unicode_decode(lex.get_token())
if key is None:
#normal end of file
- break;
- equ=lex.get_token()
- if (equ==''):
+ break
+
+ equ = _unicode_decode(lex.get_token())
+ if not equ:
msg = lex.error_leader() + _("Unexpected EOF")
if not tolerant:
raise ParseError(msg)
else:
writemsg("%s\n" % msg, noiselevel=-1)
return mykeys
- elif (equ!='='):
+
+ elif equ != "=":
msg = lex.error_leader() + \
_("Invalid token '%s' (not '=')") % (equ,)
if not tolerant:
@@ -622,7 +726,8 @@ def getconfig(mycfg, tolerant=0, allow_sourcing=False, expand=True):
else:
writemsg("%s\n" % msg, noiselevel=-1)
return mykeys
- val=lex.get_token()
+
+ val = _unicode_decode(lex.get_token())
if val is None:
msg = lex.error_leader() + \
_("Unexpected end of config file: variable '%s'") % (key,)
@@ -631,8 +736,6 @@ def getconfig(mycfg, tolerant=0, allow_sourcing=False, expand=True):
else:
writemsg("%s\n" % msg, noiselevel=-1)
return mykeys
- key = _unicode_decode(key)
- val = _unicode_decode(val)
if _invalid_var_name_re.search(key) is not None:
msg = lex.error_leader() + \
@@ -653,7 +756,7 @@ def getconfig(mycfg, tolerant=0, allow_sourcing=False, expand=True):
except Exception as e:
if isinstance(e, ParseError) or lex is None:
raise
- msg = _unicode_decode("%s%s") % (lex.error_leader(), e)
+ msg = "%s%s" % (lex.error_leader(), e)
writemsg("%s\n" % msg, noiselevel=-1)
raise
@@ -671,10 +774,10 @@ def varexpand(mystring, mydict=None, error_leader=None):
This code is used by the configfile code, as well as others (parser)
This would be a good bunch of code to port to C.
"""
- numvars=0
- #in single, double quotes
- insing=0
- indoub=0
+ numvars = 0
+ # in single, double quotes
+ insing = 0
+ indoub = 0
pos = 0
length = len(mystring)
newstring = []
@@ -686,7 +789,7 @@ def varexpand(mystring, mydict=None, error_leader=None):
else:
newstring.append("'") # Quote removal is handled by shlex.
insing=not insing
- pos=pos+1
+ pos += 1
continue
elif current == '"':
if (insing):
@@ -694,9 +797,9 @@ def varexpand(mystring, mydict=None, error_leader=None):
else:
newstring.append('"') # Quote removal is handled by shlex.
indoub=not indoub
- pos=pos+1
+ pos += 1
continue
- if (not insing):
+ if not insing:
#expansion time
if current == "\n":
#convert newlines to spaces
@@ -711,7 +814,7 @@ def varexpand(mystring, mydict=None, error_leader=None):
# escaped newline characters. Note that we don't handle
# escaped quotes here, since getconfig() uses shlex
# to handle that earlier.
- if (pos+1>=len(mystring)):
+ if pos + 1 >= len(mystring):
newstring.append(current)
break
else:
@@ -733,15 +836,15 @@ def varexpand(mystring, mydict=None, error_leader=None):
newstring.append(mystring[pos - 2:pos])
continue
elif current == "$":
- pos=pos+1
- if mystring[pos]=="{":
- pos=pos+1
- braced=True
+ pos += 1
+ if mystring[pos] == "{":
+ pos += 1
+ braced = True
else:
- braced=False
- myvstart=pos
+ braced = False
+ myvstart = pos
while mystring[pos] in _varexpand_word_chars:
- if (pos+1)>=len(mystring):
+ if pos + 1 >= len(mystring):
if braced:
msg = _varexpand_unexpected_eof_msg
if error_leader is not None:
@@ -749,20 +852,20 @@ def varexpand(mystring, mydict=None, error_leader=None):
writemsg(msg + "\n", noiselevel=-1)
return ""
else:
- pos=pos+1
+ pos += 1
break
- pos=pos+1
- myvarname=mystring[myvstart:pos]
+ pos += 1
+ myvarname = mystring[myvstart:pos]
if braced:
- if mystring[pos]!="}":
+ if mystring[pos] != "}":
msg = _varexpand_unexpected_eof_msg
if error_leader is not None:
msg = error_leader() + msg
writemsg(msg + "\n", noiselevel=-1)
return ""
else:
- pos=pos+1
- if len(myvarname)==0:
+ pos += 1
+ if len(myvarname) == 0:
msg = "$"
if braced:
msg += "{}"
@@ -771,7 +874,7 @@ def varexpand(mystring, mydict=None, error_leader=None):
msg = error_leader() + msg
writemsg(msg + "\n", noiselevel=-1)
return ""
- numvars=numvars+1
+ numvars += 1
if myvarname in mydict:
newstring.append(mydict[myvarname])
else:
@@ -786,9 +889,9 @@ def varexpand(mystring, mydict=None, error_leader=None):
# broken and removed, but can still be imported
pickle_write = None
-def pickle_read(filename,default=None,debug=0):
+def pickle_read(filename, default=None, debug=0):
if not os.access(filename, os.R_OK):
- writemsg(_("pickle_read(): File not readable. '")+filename+"'\n",1)
+ writemsg(_("pickle_read(): File not readable. '") + filename + "'\n", 1)
return default
data = None
try:
@@ -797,12 +900,12 @@ def pickle_read(filename,default=None,debug=0):
mypickle = pickle.Unpickler(myf)
data = mypickle.load()
myf.close()
- del mypickle,myf
- writemsg(_("pickle_read(): Loaded pickle. '")+filename+"'\n",1)
+ del mypickle, myf
+ writemsg(_("pickle_read(): Loaded pickle. '") + filename + "'\n", 1)
except SystemExit as e:
raise
except Exception as e:
- writemsg(_("!!! Failed to load pickle: ")+str(e)+"\n",1)
+ writemsg(_("!!! Failed to load pickle: ") + str(e) + "\n", 1)
data = default
return data
@@ -830,6 +933,9 @@ class cmp_sort_key(object):
list.sort(), making it easier to port code for python-3.0 compatibility.
It works by generating key objects which use the given cmp function to
implement their __lt__ method.
+
+ Beginning with Python 2.7 and 3.2, equivalent functionality is provided
+ by functools.cmp_to_key().
"""
__slots__ = ("_cmp_func",)
@@ -922,6 +1028,10 @@ def apply_permissions(filename, uid=-1, gid=-1, mode=-1, mask=-1,
modified = False
+ # Since Python 3.4, chown requires int type (no proxies).
+ uid = int(uid)
+ gid = int(gid)
+
if stat_cached is None:
try:
if follow_links:
@@ -1141,7 +1251,7 @@ class atomic_ofstream(ObjectProxy):
object.__setattr__(self, '_file',
open_func(_unicode_encode(tmp_name,
encoding=_encodings['fs'], errors='strict'),
- mode=mode, **kargs))
+ mode=mode, **portage._native_kwargs(kargs)))
return
except IOError as e:
if canonical_path == filename:
@@ -1223,7 +1333,7 @@ class atomic_ofstream(ObjectProxy):
self.close()
def __del__(self):
- """If the user does not explicitely call close(), it is
+ """If the user does not explicitly call close(), it is
assumed that an error has occurred, so we abort()."""
try:
f = object.__getattribute__(self, '_file')
@@ -1402,9 +1512,9 @@ class LazyItemsDict(UserDict):
lazy_item = self.lazy_items.get(k)
if lazy_item is not None:
if not lazy_item.singleton:
- raise TypeError(_unicode_decode("LazyItemsDict " + \
+ raise TypeError("LazyItemsDict " + \
"deepcopy is unsafe with lazy items that are " + \
- "not singletons: key=%s value=%s") % (k, lazy_item,))
+ "not singletons: key=%s value=%s" % (k, lazy_item,))
UserDict.__setitem__(result, k_copy, deepcopy(self[k], memo))
return result
@@ -1576,13 +1686,13 @@ def find_updated_config_files(target_root, config_protect):
"""
Return a tuple of configuration files that needs to be updated.
The tuple contains lists organized like this:
- [ protected_dir, file_list ]
+ [protected_dir, file_list]
If the protected config isn't a protected_dir but a procted_file, list is:
- [ protected_file, None ]
+ [protected_file, None]
If no configuration files needs to be updated, None is returned
"""
- os = _os_merge
+ encoding = _encodings['fs']
if config_protect:
# directories with some protect files in them
@@ -1614,10 +1724,24 @@ def find_updated_config_files(target_root, config_protect):
mycommand = "find '%s' -maxdepth 1 -name '._cfg????_%s'" % \
os.path.split(x.rstrip(os.path.sep))
mycommand += " ! -name '.*~' ! -iname '.*.bak' -print0"
- a = subprocess_getstatusoutput(mycommand)
-
- if a[0] == 0:
- files = a[1].split('\0')
+ cmd = shlex_split(mycommand)
+
+ if sys.hexversion < 0x3020000 and sys.hexversion >= 0x3000000:
+ # Python 3.1 _execvp throws TypeError for non-absolute executable
+ # path passed as bytes (see http://bugs.python.org/issue8513).
+ fullname = portage.process.find_binary(cmd[0])
+ if fullname is None:
+ raise portage.exception.CommandNotFound(cmd[0])
+ cmd[0] = fullname
+
+ cmd = [_unicode_encode(arg, encoding=encoding, errors='strict')
+ for arg in cmd]
+ proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT)
+ output = _unicode_decode(proc.communicate()[0], encoding=encoding)
+ status = proc.wait()
+ if os.WIFEXITED(status) and os.WEXITSTATUS(status) == os.EX_OK:
+ files = output.split('\0')
# split always produces an empty string as the last element
if files and not files[-1]:
del files[-1]
diff --git a/pym/portage/util/_argparse.py b/pym/portage/util/_argparse.py
new file mode 100644
index 000000000..6ca785235
--- /dev/null
+++ b/pym/portage/util/_argparse.py
@@ -0,0 +1,42 @@
+# Copyright 2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+__all__ = ['ArgumentParser']
+
+try:
+ from argparse import ArgumentParser
+except ImportError:
+ # Compatibility with Python 2.6 and 3.1
+ from optparse import OptionGroup, OptionParser
+
+ from portage.localization import _
+
+ class ArgumentParser(object):
+ def __init__(self, **kwargs):
+ add_help = kwargs.pop("add_help", None)
+ if add_help is not None:
+ kwargs["add_help_option"] = add_help
+ parser = OptionParser(**kwargs)
+ self._parser = parser
+ self.add_argument = parser.add_option
+ self.print_help = parser.print_help
+ self.error = parser.error
+
+ def add_argument_group(self, title=None, **kwargs):
+ optiongroup = OptionGroup(self._parser, title, **kwargs)
+ self._parser.add_option_group(optiongroup)
+ return _ArgumentGroup(optiongroup)
+
+ def parse_known_args(self, args=None, namespace=None):
+ return self._parser.parse_args(args, namespace)
+
+ def parse_args(self, args=None, namespace=None):
+ args, argv = self.parse_known_args(args, namespace)
+ if argv:
+ msg = _('unrecognized arguments: %s')
+ self.error(msg % ' '.join(argv))
+ return args
+
+ class _ArgumentGroup(object):
+ def __init__(self, optiongroup):
+ self.add_argument = optiongroup.add_option
diff --git a/pym/portage/util/_async/AsyncScheduler.py b/pym/portage/util/_async/AsyncScheduler.py
new file mode 100644
index 000000000..9b96c6f36
--- /dev/null
+++ b/pym/portage/util/_async/AsyncScheduler.py
@@ -0,0 +1,102 @@
+# Copyright 2012-2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage import os
+from _emerge.AsynchronousTask import AsynchronousTask
+from _emerge.PollScheduler import PollScheduler
+
+class AsyncScheduler(AsynchronousTask, PollScheduler):
+
+ def __init__(self, max_jobs=None, max_load=None, **kwargs):
+ AsynchronousTask.__init__(self)
+ PollScheduler.__init__(self, **kwargs)
+
+ if max_jobs is None:
+ max_jobs = 1
+ self._max_jobs = max_jobs
+ self._max_load = max_load
+ self._error_count = 0
+ self._running_tasks = set()
+ self._remaining_tasks = True
+ self._term_check_id = None
+ self._loadavg_check_id = None
+
+ def _poll(self):
+ if not (self._is_work_scheduled() or self._keep_scheduling()):
+ self.wait()
+ return self.returncode
+
+ def _cancel(self):
+ self._terminated.set()
+ self._termination_check()
+
+ def _terminate_tasks(self):
+ for task in list(self._running_tasks):
+ task.cancel()
+
+ def _next_task(self):
+ raise NotImplementedError(self)
+
+ def _keep_scheduling(self):
+ return self._remaining_tasks and not self._terminated.is_set()
+
+ def _running_job_count(self):
+ return len(self._running_tasks)
+
+ def _schedule_tasks(self):
+ while self._keep_scheduling() and self._can_add_job():
+ try:
+ task = self._next_task()
+ except StopIteration:
+ self._remaining_tasks = False
+ else:
+ self._running_tasks.add(task)
+ task.scheduler = self._sched_iface
+ task.addExitListener(self._task_exit)
+ task.start()
+
+ # Triggers cleanup and exit listeners if there's nothing left to do.
+ self.poll()
+
+ def _task_exit(self, task):
+ self._running_tasks.discard(task)
+ if task.returncode != os.EX_OK:
+ self._error_count += 1
+ self._schedule()
+
+ def _start(self):
+ self._term_check_id = self._event_loop.idle_add(self._termination_check)
+ if self._max_load is not None and \
+ self._loadavg_latency is not None and \
+ (self._max_jobs is True or self._max_jobs > 1):
+ # We have to schedule periodically, in case the load
+ # average has changed since the last call.
+ self._loadavg_check_id = self._event_loop.timeout_add(
+ self._loadavg_latency, self._schedule)
+ self._schedule()
+
+ def _wait(self):
+ # Loop while there are jobs to be scheduled.
+ while self._keep_scheduling():
+ self._event_loop.iteration()
+
+ # Clean shutdown of previously scheduled jobs. In the
+ # case of termination, this allows for basic cleanup
+ # such as flushing of buffered output to logs.
+ while self._is_work_scheduled():
+ self._event_loop.iteration()
+
+ if self._term_check_id is not None:
+ self._event_loop.source_remove(self._term_check_id)
+ self._term_check_id = None
+
+ if self._loadavg_check_id is not None:
+ self._event_loop.source_remove(self._loadavg_check_id)
+ self._loadavg_check_id = None
+
+ if self._error_count > 0:
+ self.returncode = 1
+ else:
+ self.returncode = os.EX_OK
+
+ return self.returncode
diff --git a/pym/portage/util/_async/FileCopier.py b/pym/portage/util/_async/FileCopier.py
new file mode 100644
index 000000000..27e5ab4c0
--- /dev/null
+++ b/pym/portage/util/_async/FileCopier.py
@@ -0,0 +1,17 @@
+# Copyright 2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage import os
+from portage import shutil
+from portage.util._async.ForkProcess import ForkProcess
+
+class FileCopier(ForkProcess):
+ """
+ Asynchronously copy a file.
+ """
+
+ __slots__ = ('src_path', 'dest_path')
+
+ def _run(self):
+ shutil.copy(self.src_path, self.dest_path)
+ return os.EX_OK
diff --git a/pym/portage/util/_async/FileDigester.py b/pym/portage/util/_async/FileDigester.py
new file mode 100644
index 000000000..881c69280
--- /dev/null
+++ b/pym/portage/util/_async/FileDigester.py
@@ -0,0 +1,73 @@
+# Copyright 2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage import os
+from portage.checksum import perform_multiple_checksums
+from portage.util._async.ForkProcess import ForkProcess
+from _emerge.PipeReader import PipeReader
+
+class FileDigester(ForkProcess):
+ """
+ Asynchronously generate file digests. Pass in file_path and
+ hash_names, and after successful execution, the digests
+ attribute will be a dict containing all of the requested
+ digests.
+ """
+
+ __slots__ = ('file_path', 'digests', 'hash_names',
+ '_digest_pipe_reader', '_digest_pw')
+
+ def _start(self):
+ pr, pw = os.pipe()
+ self.fd_pipes = {}
+ self.fd_pipes[pw] = pw
+ self._digest_pw = pw
+ self._digest_pipe_reader = PipeReader(
+ input_files={"input":pr},
+ scheduler=self.scheduler)
+ self._digest_pipe_reader.addExitListener(self._digest_pipe_reader_exit)
+ self._digest_pipe_reader.start()
+ ForkProcess._start(self)
+ os.close(pw)
+
+ def _run(self):
+ digests = perform_multiple_checksums(self.file_path,
+ hashes=self.hash_names)
+
+ buf = "".join("%s=%s\n" % item
+ for item in digests.items()).encode('utf_8')
+
+ while buf:
+ buf = buf[os.write(self._digest_pw, buf):]
+
+ return os.EX_OK
+
+ def _parse_digests(self, data):
+
+ digests = {}
+ for line in data.decode('utf_8').splitlines():
+ parts = line.split('=', 1)
+ if len(parts) == 2:
+ digests[parts[0]] = parts[1]
+
+ self.digests = digests
+
+ def _pipe_logger_exit(self, pipe_logger):
+ # Ignore this event, since we want to ensure that we
+ # exit only after _digest_pipe_reader has reached EOF.
+ self._pipe_logger = None
+
+ def _digest_pipe_reader_exit(self, pipe_reader):
+ self._parse_digests(pipe_reader.getvalue())
+ self._digest_pipe_reader = None
+ self._unregister()
+ self.wait()
+
+ def _unregister(self):
+ ForkProcess._unregister(self)
+
+ pipe_reader = self._digest_pipe_reader
+ if pipe_reader is not None:
+ self._digest_pipe_reader = None
+ pipe_reader.removeExitListener(self._digest_pipe_reader_exit)
+ pipe_reader.cancel()
diff --git a/pym/portage/util/_async/ForkProcess.py b/pym/portage/util/_async/ForkProcess.py
new file mode 100644
index 000000000..25f72d308
--- /dev/null
+++ b/pym/portage/util/_async/ForkProcess.py
@@ -0,0 +1,65 @@
+# Copyright 2012-2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import signal
+import sys
+import traceback
+
+import portage
+from portage import os
+from _emerge.SpawnProcess import SpawnProcess
+
+class ForkProcess(SpawnProcess):
+
+ __slots__ = ()
+
+ def _spawn(self, args, fd_pipes=None, **kwargs):
+ """
+ Fork a subprocess, apply local settings, and call fetch().
+ """
+
+ parent_pid = os.getpid()
+ pid = None
+ try:
+ pid = os.fork()
+
+ if pid != 0:
+ if not isinstance(pid, int):
+ raise AssertionError(
+ "fork returned non-integer: %s" % (repr(pid),))
+ return [pid]
+
+ rval = 1
+ try:
+
+ # Use default signal handlers in order to avoid problems
+ # killing subprocesses as reported in bug #353239.
+ signal.signal(signal.SIGINT, signal.SIG_DFL)
+ signal.signal(signal.SIGTERM, signal.SIG_DFL)
+
+ portage.locks._close_fds()
+ # We don't exec, so use close_fds=False
+ # (see _setup_pipes docstring).
+ portage.process._setup_pipes(fd_pipes, close_fds=False)
+
+ rval = self._run()
+ except SystemExit:
+ raise
+ except:
+ traceback.print_exc()
+ # os._exit() skips stderr flush!
+ sys.stderr.flush()
+ finally:
+ os._exit(rval)
+
+ finally:
+ if pid == 0 or (pid is None and os.getpid() != parent_pid):
+ # Call os._exit() from a finally block in order
+ # to suppress any finally blocks from earlier
+ # in the call stack (see bug #345289). This
+ # finally block has to be setup before the fork
+ # in order to avoid a race condition.
+ os._exit(1)
+
+ def _run(self):
+ raise NotImplementedError(self)
diff --git a/pym/portage/util/_async/PipeLogger.py b/pym/portage/util/_async/PipeLogger.py
new file mode 100644
index 000000000..aa605d94d
--- /dev/null
+++ b/pym/portage/util/_async/PipeLogger.py
@@ -0,0 +1,163 @@
+# Copyright 2008-2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import fcntl
+import errno
+import gzip
+import sys
+
+import portage
+from portage import os, _encodings, _unicode_encode
+from _emerge.AbstractPollTask import AbstractPollTask
+
+class PipeLogger(AbstractPollTask):
+
+ """
+ This can be used for logging output of a child process,
+ optionally outputing to log_file_path and/or stdout_fd. It can
+ also monitor for EOF on input_fd, which may be used to detect
+ termination of a child process. If log_file_path ends with
+ '.gz' then the log file is written with compression.
+ """
+
+ __slots__ = ("input_fd", "log_file_path", "stdout_fd") + \
+ ("_log_file", "_log_file_real", "_reg_id")
+
+ def _start(self):
+
+ log_file_path = self.log_file_path
+ if log_file_path is not None:
+
+ self._log_file = open(_unicode_encode(log_file_path,
+ encoding=_encodings['fs'], errors='strict'), mode='ab')
+ if log_file_path.endswith('.gz'):
+ self._log_file_real = self._log_file
+ self._log_file = gzip.GzipFile(filename='', mode='ab',
+ fileobj=self._log_file)
+
+ portage.util.apply_secpass_permissions(log_file_path,
+ uid=portage.portage_uid, gid=portage.portage_gid,
+ mode=0o660)
+
+ if isinstance(self.input_fd, int):
+ fd = self.input_fd
+ else:
+ fd = self.input_fd.fileno()
+
+ fcntl.fcntl(fd, fcntl.F_SETFL,
+ fcntl.fcntl(fd, fcntl.F_GETFL) | os.O_NONBLOCK)
+
+ # FD_CLOEXEC is enabled by default in Python >=3.4.
+ if sys.hexversion < 0x3040000:
+ try:
+ fcntl.FD_CLOEXEC
+ except AttributeError:
+ pass
+ else:
+ fcntl.fcntl(fd, fcntl.F_SETFD,
+ fcntl.fcntl(fd, fcntl.F_GETFD) | fcntl.FD_CLOEXEC)
+
+ self._reg_id = self.scheduler.io_add_watch(fd,
+ self._registered_events, self._output_handler)
+ self._registered = True
+
+ def _cancel(self):
+ self._unregister()
+ if self.returncode is None:
+ self.returncode = self._cancelled_returncode
+
+ def _wait(self):
+ if self.returncode is not None:
+ return self.returncode
+ self._wait_loop()
+ self.returncode = os.EX_OK
+ return self.returncode
+
+ def _output_handler(self, fd, event):
+
+ background = self.background
+ stdout_fd = self.stdout_fd
+ log_file = self._log_file
+
+ while True:
+ buf = self._read_buf(fd, event)
+
+ if buf is None:
+ # not a POLLIN event, EAGAIN, etc...
+ break
+
+ if not buf:
+ # EOF
+ self._unregister()
+ self.wait()
+ break
+
+ else:
+ if not background and stdout_fd is not None:
+ failures = 0
+ stdout_buf = buf
+ while stdout_buf:
+ try:
+ stdout_buf = \
+ stdout_buf[os.write(stdout_fd, stdout_buf):]
+ except OSError as e:
+ if e.errno != errno.EAGAIN:
+ raise
+ del e
+ failures += 1
+ if failures > 50:
+ # Avoid a potentially infinite loop. In
+ # most cases, the failure count is zero
+ # and it's unlikely to exceed 1.
+ raise
+
+ # This means that a subprocess has put an inherited
+ # stdio file descriptor (typically stdin) into
+ # O_NONBLOCK mode. This is not acceptable (see bug
+ # #264435), so revert it. We need to use a loop
+ # here since there's a race condition due to
+ # parallel processes being able to change the
+ # flags on the inherited file descriptor.
+ # TODO: When possible, avoid having child processes
+ # inherit stdio file descriptors from portage
+ # (maybe it can't be avoided with
+ # PROPERTIES=interactive).
+ fcntl.fcntl(stdout_fd, fcntl.F_SETFL,
+ fcntl.fcntl(stdout_fd,
+ fcntl.F_GETFL) ^ os.O_NONBLOCK)
+
+ if log_file is not None:
+ log_file.write(buf)
+ log_file.flush()
+
+ self._unregister_if_appropriate(event)
+
+ return True
+
+ def _unregister(self):
+
+ if self._reg_id is not None:
+ self.scheduler.source_remove(self._reg_id)
+ self._reg_id = None
+
+ if self.input_fd is not None:
+ if isinstance(self.input_fd, int):
+ os.close(self.input_fd)
+ else:
+ self.input_fd.close()
+ self.input_fd = None
+
+ if self.stdout_fd is not None:
+ os.close(self.stdout_fd)
+ self.stdout_fd = None
+
+ if self._log_file is not None:
+ self._log_file.close()
+ self._log_file = None
+
+ if self._log_file_real is not None:
+ # Avoid "ResourceWarning: unclosed file" since python 3.2.
+ self._log_file_real.close()
+ self._log_file_real = None
+
+ self._registered = False
diff --git a/pym/portage/util/_async/PipeReaderBlockingIO.py b/pym/portage/util/_async/PipeReaderBlockingIO.py
new file mode 100644
index 000000000..b06adf6ed
--- /dev/null
+++ b/pym/portage/util/_async/PipeReaderBlockingIO.py
@@ -0,0 +1,91 @@
+# Copyright 2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+try:
+ import threading
+except ImportError:
+ # dummy_threading will not suffice
+ threading = None
+
+from portage import os
+from _emerge.AbstractPollTask import AbstractPollTask
+
+class PipeReaderBlockingIO(AbstractPollTask):
+ """
+ Reads output from one or more files and saves it in memory, for
+ retrieval via the getvalue() method. This is driven by a thread
+ for each input file, in order to support blocking IO. This may
+ be useful for using threads to handle blocking IO with Jython,
+ since Jython lacks the fcntl module which is needed for
+ non-blocking IO (see http://bugs.jython.org/issue1074).
+ """
+
+ __slots__ = ("input_files", "_read_data", "_terminate",
+ "_threads", "_thread_rlock")
+
+ def _start(self):
+ self._terminate = threading.Event()
+ self._threads = {}
+ self._read_data = []
+
+ self._registered = True
+ self._thread_rlock = threading.RLock()
+ with self._thread_rlock:
+ for f in self.input_files.values():
+ t = threading.Thread(target=self._reader_thread, args=(f,))
+ t.daemon = True
+ t.start()
+ self._threads[f] = t
+
+ def _reader_thread(self, f):
+ try:
+ terminated = self._terminate.is_set
+ except AttributeError:
+ # Jython 2.7.0a2
+ terminated = self._terminate.isSet
+ bufsize = self._bufsize
+ while not terminated():
+ buf = f.read(bufsize)
+ with self._thread_rlock:
+ if terminated():
+ break
+ elif buf:
+ self._read_data.append(buf)
+ else:
+ del self._threads[f]
+ if not self._threads:
+ # Thread-safe callback to EventLoop
+ self.scheduler.idle_add(self._eof)
+ break
+ f.close()
+
+ def _eof(self):
+ self._registered = False
+ if self.returncode is None:
+ self.returncode = os.EX_OK
+ self.wait()
+ return False
+
+ def _cancel(self):
+ self._terminate.set()
+ self._registered = False
+ if self.returncode is None:
+ self.returncode = self._cancelled_returncode
+ self.wait()
+
+ def _wait(self):
+ if self.returncode is not None:
+ return self.returncode
+ self._wait_loop()
+ self.returncode = os.EX_OK
+ return self.returncode
+
+ def getvalue(self):
+ """Retrieve the entire contents"""
+ with self._thread_rlock:
+ return b''.join(self._read_data)
+
+ def close(self):
+ """Free the memory buffer."""
+ with self._thread_rlock:
+ self._read_data = None
diff --git a/pym/portage/util/_async/PopenProcess.py b/pym/portage/util/_async/PopenProcess.py
new file mode 100644
index 000000000..2fc56d295
--- /dev/null
+++ b/pym/portage/util/_async/PopenProcess.py
@@ -0,0 +1,33 @@
+# Copyright 2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from _emerge.SubProcess import SubProcess
+
+class PopenProcess(SubProcess):
+
+ __slots__ = ("pipe_reader", "proc",)
+
+ def _start(self):
+
+ self.pid = self.proc.pid
+ self._registered = True
+
+ if self.pipe_reader is None:
+ self._reg_id = self.scheduler.child_watch_add(
+ self.pid, self._child_watch_cb)
+ else:
+ try:
+ self.pipe_reader.scheduler = self.scheduler
+ except AttributeError:
+ pass
+ self.pipe_reader.addExitListener(self._pipe_reader_exit)
+ self.pipe_reader.start()
+
+ def _pipe_reader_exit(self, pipe_reader):
+ self._reg_id = self.scheduler.child_watch_add(
+ self.pid, self._child_watch_cb)
+
+ def _child_watch_cb(self, pid, condition, user_data=None):
+ self._reg_id = None
+ self._waitpid_cb(pid, condition)
+ self.wait()
diff --git a/pym/portage/util/_async/SchedulerInterface.py b/pym/portage/util/_async/SchedulerInterface.py
new file mode 100644
index 000000000..2ab668ee4
--- /dev/null
+++ b/pym/portage/util/_async/SchedulerInterface.py
@@ -0,0 +1,79 @@
+# Copyright 2012-2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import gzip
+import errno
+
+from portage import _encodings
+from portage import _unicode_encode
+from portage.util import writemsg_level
+from ..SlotObject import SlotObject
+
+class SchedulerInterface(SlotObject):
+
+ _event_loop_attrs = ("IO_ERR", "IO_HUP", "IO_IN",
+ "IO_NVAL", "IO_OUT", "IO_PRI",
+ "child_watch_add", "idle_add", "io_add_watch",
+ "iteration", "source_remove", "timeout_add")
+
+ __slots__ = _event_loop_attrs + ("_event_loop", "_is_background")
+
+ def __init__(self, event_loop, is_background=None, **kwargs):
+ SlotObject.__init__(self, **kwargs)
+ self._event_loop = event_loop
+ if is_background is None:
+ is_background = self._return_false
+ self._is_background = is_background
+ for k in self._event_loop_attrs:
+ setattr(self, k, getattr(event_loop, k))
+
+ @staticmethod
+ def _return_false():
+ return False
+
+ def output(self, msg, log_path=None, background=None,
+ level=0, noiselevel=-1):
+ """
+ Output msg to stdout if not self._is_background(). If log_path
+ is not None then append msg to the log (appends with
+ compression if the filename extension of log_path corresponds
+ to a supported compression type).
+ """
+
+ global_background = self._is_background()
+ if background is None or global_background:
+ # Use the global value if the task does not have a local
+ # background value. For example, parallel-fetch tasks run
+ # in the background while other tasks concurrently run in
+ # the foreground.
+ background = global_background
+
+ msg_shown = False
+ if not background:
+ writemsg_level(msg, level=level, noiselevel=noiselevel)
+ msg_shown = True
+
+ if log_path is not None:
+ try:
+ f = open(_unicode_encode(log_path,
+ encoding=_encodings['fs'], errors='strict'),
+ mode='ab')
+ f_real = f
+ except IOError as e:
+ if e.errno not in (errno.ENOENT, errno.ESTALE):
+ raise
+ if not msg_shown:
+ writemsg_level(msg, level=level, noiselevel=noiselevel)
+ else:
+
+ if log_path.endswith('.gz'):
+ # NOTE: The empty filename argument prevents us from
+ # triggering a bug in python3 which causes GzipFile
+ # to raise AttributeError if fileobj.name is bytes
+ # instead of unicode.
+ f = gzip.GzipFile(filename='', mode='ab', fileobj=f)
+
+ f.write(_unicode_encode(msg))
+ f.close()
+ if f_real is not f:
+ f_real.close()
diff --git a/pym/portage/util/_async/TaskScheduler.py b/pym/portage/util/_async/TaskScheduler.py
new file mode 100644
index 000000000..35b3875a4
--- /dev/null
+++ b/pym/portage/util/_async/TaskScheduler.py
@@ -0,0 +1,20 @@
+# Copyright 2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from .AsyncScheduler import AsyncScheduler
+
+class TaskScheduler(AsyncScheduler):
+
+ """
+ A simple way to handle scheduling of AbstractPollTask instances. Simply
+ pass a task iterator into the constructor and call start(). Use the
+ poll, wait, or addExitListener methods to be notified when all of the
+ tasks have completed.
+ """
+
+ def __init__(self, task_iter, **kwargs):
+ AsyncScheduler.__init__(self, **kwargs)
+ self._task_iter = task_iter
+
+ def _next_task(self):
+ return next(self._task_iter)
diff --git a/pym/portage/util/_async/__init__.py b/pym/portage/util/_async/__init__.py
new file mode 100644
index 000000000..418ad862b
--- /dev/null
+++ b/pym/portage/util/_async/__init__.py
@@ -0,0 +1,2 @@
+# Copyright 2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
diff --git a/pym/portage/util/_async/run_main_scheduler.py b/pym/portage/util/_async/run_main_scheduler.py
new file mode 100644
index 000000000..10fed34b3
--- /dev/null
+++ b/pym/portage/util/_async/run_main_scheduler.py
@@ -0,0 +1,41 @@
+
+import signal
+
+def run_main_scheduler(scheduler):
+ """
+ Start and run an AsyncScheduler (or compatible object), and handle
+ SIGINT or SIGTERM by calling its terminate() method and waiting
+ for it to clean up after itself. If SIGINT or SIGTERM is received,
+ return signum, else return None. Any previous SIGINT or SIGTERM
+ signal handlers are automatically saved and restored before
+ returning.
+ """
+
+ received_signal = []
+
+ def sighandler(signum, frame):
+ signal.signal(signal.SIGINT, signal.SIG_IGN)
+ signal.signal(signal.SIGTERM, signal.SIG_IGN)
+ received_signal.append(signum)
+ scheduler.terminate()
+
+ earlier_sigint_handler = signal.signal(signal.SIGINT, sighandler)
+ earlier_sigterm_handler = signal.signal(signal.SIGTERM, sighandler)
+
+ try:
+ scheduler.start()
+ scheduler.wait()
+ finally:
+ # Restore previous handlers
+ if earlier_sigint_handler is not None:
+ signal.signal(signal.SIGINT, earlier_sigint_handler)
+ else:
+ signal.signal(signal.SIGINT, signal.SIG_DFL)
+ if earlier_sigterm_handler is not None:
+ signal.signal(signal.SIGTERM, earlier_sigterm_handler)
+ else:
+ signal.signal(signal.SIGTERM, signal.SIG_DFL)
+
+ if received_signal:
+ return received_signal[0]
+ return None
diff --git a/pym/portage/util/_ctypes.py b/pym/portage/util/_ctypes.py
new file mode 100644
index 000000000..aeceebcca
--- /dev/null
+++ b/pym/portage/util/_ctypes.py
@@ -0,0 +1,47 @@
+# Copyright 2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+try:
+ import ctypes
+ import ctypes.util
+except ImportError:
+ ctypes = None
+else:
+ try:
+ ctypes.cdll
+ except AttributeError:
+ ctypes = None
+
+_library_names = {}
+
+def find_library(name):
+ """
+ Calls ctype.util.find_library() if the ctypes module is available,
+ and otherwise returns None. Results are cached for future invocations.
+ """
+ filename = _library_names.get(name)
+ if filename is None:
+ if ctypes is not None:
+ filename = ctypes.util.find_library(name)
+ if filename is None:
+ filename = False
+ _library_names[name] = filename
+
+ if filename is False:
+ return None
+ return filename
+
+_library_handles = {}
+
+def LoadLibrary(name):
+ """
+ Calls ctypes.cdll.LoadLibrary(name) if the ctypes module is available,
+ and otherwise returns None. Results are cached for future invocations.
+ """
+ handle = _library_handles.get(name)
+
+ if handle is None and ctypes is not None:
+ handle = ctypes.CDLL(name, use_errno=True)
+ _library_handles[name] = handle
+
+ return handle
diff --git a/pym/portage/util/_desktop_entry.py b/pym/portage/util/_desktop_entry.py
index 790178013..0b4954735 100644
--- a/pym/portage/util/_desktop_entry.py
+++ b/pym/portage/util/_desktop_entry.py
@@ -1,7 +1,8 @@
-# Copyright 2012 Gentoo Foundation
+# Copyright 2012-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
import io
+import re
import subprocess
import sys
@@ -10,7 +11,9 @@ try:
except ImportError:
from ConfigParser import Error as ConfigParserError, RawConfigParser
+import portage
from portage import _encodings, _unicode_encode, _unicode_decode
+from portage.util import writemsg
def parse_desktop_entry(path):
"""
@@ -31,45 +34,71 @@ def parse_desktop_entry(path):
encoding=_encodings['fs'], errors='strict'),
mode='r', encoding=_encodings['repo.content'],
errors='replace') as f:
- read_file(f)
+ content = f.read()
+
+ # In Python 3.2, read_file does not support bytes in file names
+ # (see bug #429544), so use StringIO to hide the file name.
+ read_file(io.StringIO(content))
return parser
-_ignored_service_errors = (
- 'error: required key "Name" in group "Desktop Entry" is not present',
- 'error: key "Actions" is present in group "Desktop Entry", but the type is "Service" while this key is only valid for type "Application"',
- 'error: key "MimeType" is present in group "Desktop Entry", but the type is "Service" while this key is only valid for type "Application"',
+_trivial_warnings = re.compile(r' looks redundant with value ')
+
+_ignored_errors = (
+ # Ignore error for emacs.desktop:
+ # https://bugs.freedesktop.org/show_bug.cgi?id=35844#c6
+ 'error: (will be fatal in the future): value "TextEditor" in key "Categories" in group "Desktop Entry" requires another category to be present among the following categories: Utility',
+ 'warning: key "Encoding" in group "Desktop Entry" is deprecated'
+)
+
+_ShowIn_exemptions = (
+ # See bug #480586.
+ 'contains an unregistered value "Pantheon"',
)
def validate_desktop_entry(path):
args = ["desktop-file-validate", path]
- if sys.hexversion < 0x3000000 or sys.hexversion >= 0x3020000:
- # Python 3.1 does not support bytes in Popen args.
- args = [_unicode_encode(x, errors='strict') for x in args]
+
+ if sys.hexversion < 0x3020000 and sys.hexversion >= 0x3000000:
+ # Python 3.1 _execvp throws TypeError for non-absolute executable
+ # path passed as bytes (see http://bugs.python.org/issue8513).
+ fullname = portage.process.find_binary(args[0])
+ if fullname is None:
+ raise portage.exception.CommandNotFound(args[0])
+ args[0] = fullname
+
+ args = [_unicode_encode(x, errors='strict') for x in args]
proc = subprocess.Popen(args,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
output_lines = _unicode_decode(proc.communicate()[0]).splitlines()
proc.wait()
if output_lines:
- try:
- desktop_entry = parse_desktop_entry(path)
- except ConfigParserError:
- pass
- else:
- if desktop_entry.has_section("Desktop Entry"):
- try:
- entry_type = desktop_entry.get("Desktop Entry", "Type")
- except ConfigParserError:
- pass
- else:
- if entry_type == "Service":
- # Filter false errors for Type=Service (bug #414125).
- filtered_output = []
- for line in output_lines:
- if line[len(path)+2:] in _ignored_service_errors:
- continue
- filtered_output.append(line)
- output_lines = filtered_output
+ filtered_output = []
+ for line in output_lines:
+ msg = line[len(path)+2:]
+ # "hint:" output is new in desktop-file-utils-0.21
+ if msg.startswith('hint: ') or msg in _ignored_errors:
+ continue
+ if 'for key "NotShowIn" in group "Desktop Entry"' in msg or \
+ 'for key "OnlyShowIn" in group "Desktop Entry"' in msg:
+ exempt = False
+ for s in _ShowIn_exemptions:
+ if s in msg:
+ exempt = True
+ break
+ if exempt:
+ continue
+ filtered_output.append(line)
+ output_lines = filtered_output
+
+ if output_lines:
+ output_lines = [line for line in output_lines
+ if _trivial_warnings.search(line) is None]
return output_lines
+
+if __name__ == "__main__":
+ for arg in sys.argv[1:]:
+ for line in validate_desktop_entry(arg):
+ writemsg(line + "\n", noiselevel=-1)
diff --git a/pym/portage/util/_dyn_libs/LinkageMapELF.py b/pym/portage/util/_dyn_libs/LinkageMapELF.py
index e71ac735a..3920f9487 100644
--- a/pym/portage/util/_dyn_libs/LinkageMapELF.py
+++ b/pym/portage/util/_dyn_libs/LinkageMapELF.py
@@ -1,4 +1,4 @@
-# Copyright 1998-2011 Gentoo Foundation
+# Copyright 1998-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
import errno
@@ -26,7 +26,7 @@ class LinkageMapELF(object):
_soname_map_class = slot_dict_class(
("consumers", "providers"), prefix="")
- class _obj_properies_class(object):
+ class _obj_properties_class(object):
__slots__ = ("arch", "needed", "runpaths", "soname", "alt_paths",
"owner",)
@@ -316,7 +316,7 @@ class LinkageMapELF(object):
myprops = obj_properties.get(obj_key)
if myprops is None:
indexed = False
- myprops = self._obj_properies_class(
+ myprops = self._obj_properties_class(
arch, needed, path, soname, [], owner)
obj_properties[obj_key] = myprops
# All object paths are added into the obj_properties tuple.
@@ -678,7 +678,7 @@ class LinkageMapELF(object):
rValue[soname].add(provider)
return rValue
- def findConsumers(self, obj, exclude_providers=None):
+ def findConsumers(self, obj, exclude_providers=None, greedy=True):
"""
Find consumers of an object or object key.
@@ -715,6 +715,9 @@ class LinkageMapELF(object):
'/usr/lib/libssl.so.0.9.8'), and return True if the library is
owned by a provider which is planned for removal.
@type exclude_providers: collection
+ @param greedy: If True, then include consumers that are satisfied
+ by alternative providers, otherwise omit them. Default is True.
+ @type greedy: Boolean
@rtype: set of strings (example: set(['/bin/foo', '/usr/bin/bar']))
@return: The return value is a soname -> set-of-library-paths, where
set-of-library-paths satisfy soname.
@@ -769,16 +772,19 @@ class LinkageMapELF(object):
defpath_keys = set(self._path_key(x) for x in self._defpath)
satisfied_consumer_keys = set()
if soname_node is not None:
- if exclude_providers is not None:
+ if exclude_providers is not None or not greedy:
relevant_dir_keys = set()
for provider_key in soname_node.providers:
+ if not greedy and provider_key == obj_key:
+ continue
provider_objs = self._obj_properties[provider_key].alt_paths
for p in provider_objs:
provider_excluded = False
- for excluded_provider_isowner in exclude_providers:
- if excluded_provider_isowner(p):
- provider_excluded = True
- break
+ if exclude_providers is not None:
+ for excluded_provider_isowner in exclude_providers:
+ if excluded_provider_isowner(p):
+ provider_excluded = True
+ break
if not provider_excluded:
# This provider is not excluded. It will
# satisfy a consumer of this soname if it
diff --git a/pym/portage/util/_dyn_libs/PreservedLibsRegistry.py b/pym/portage/util/_dyn_libs/PreservedLibsRegistry.py
index 4bc64dbfe..a422ffefd 100644
--- a/pym/portage/util/_dyn_libs/PreservedLibsRegistry.py
+++ b/pym/portage/util/_dyn_libs/PreservedLibsRegistry.py
@@ -1,4 +1,4 @@
-# Copyright 1998-2012 Gentoo Foundation
+# Copyright 1998-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
import errno
@@ -25,6 +25,7 @@ from portage.versions import cpv_getkey
from portage.locks import lockfile, unlockfile
if sys.hexversion >= 0x3000000:
+ # pylint: disable=W0622
basestring = str
class PreservedLibsRegistry(object):
diff --git a/pym/portage/util/_dyn_libs/display_preserved_libs.py b/pym/portage/util/_dyn_libs/display_preserved_libs.py
new file mode 100644
index 000000000..b16478d2b
--- /dev/null
+++ b/pym/portage/util/_dyn_libs/display_preserved_libs.py
@@ -0,0 +1,98 @@
+# Copyright 2007-2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import print_function
+
+import logging
+
+import portage
+from portage.output import colorize
+
+def display_preserved_libs(vardb):
+
+ MAX_DISPLAY = 3
+
+ plibdata = vardb._plib_registry.getPreservedLibs()
+ linkmap = vardb._linkmap
+ consumer_map = {}
+ owners = {}
+
+ try:
+ linkmap.rebuild()
+ except portage.exception.CommandNotFound as e:
+ portage.util.writemsg_level("!!! Command Not Found: %s\n" % (e,),
+ level=logging.ERROR, noiselevel=-1)
+ else:
+ search_for_owners = set()
+ for cpv in plibdata:
+ internal_plib_keys = set(linkmap._obj_key(f) \
+ for f in plibdata[cpv])
+ for f in plibdata[cpv]:
+ if f in consumer_map:
+ continue
+ consumers = []
+ for c in linkmap.findConsumers(f, greedy=False):
+ # Filter out any consumers that are also preserved libs
+ # belonging to the same package as the provider.
+ if linkmap._obj_key(c) not in internal_plib_keys:
+ consumers.append(c)
+ consumers.sort()
+ consumer_map[f] = consumers
+ search_for_owners.update(consumers[:MAX_DISPLAY+1])
+
+ owners = {}
+ for f in search_for_owners:
+ owner_set = set()
+ for owner in linkmap.getOwners(f):
+ owner_dblink = vardb._dblink(owner)
+ if owner_dblink.exists():
+ owner_set.add(owner_dblink)
+ if owner_set:
+ owners[f] = owner_set
+
+ all_preserved = set()
+ all_preserved.update(*plibdata.values())
+
+ for cpv in plibdata:
+ print(colorize("WARN", ">>>") + " package: %s" % cpv)
+ samefile_map = {}
+ for f in plibdata[cpv]:
+ obj_key = linkmap._obj_key(f)
+ alt_paths = samefile_map.get(obj_key)
+ if alt_paths is None:
+ alt_paths = set()
+ samefile_map[obj_key] = alt_paths
+ alt_paths.add(f)
+
+ for alt_paths in samefile_map.values():
+ alt_paths = sorted(alt_paths)
+ for p in alt_paths:
+ print(colorize("WARN", " * ") + " - %s" % (p,))
+ f = alt_paths[0]
+ consumers = consumer_map.get(f, [])
+ consumers_non_preserved = [c for c in consumers
+ if c not in all_preserved]
+ if consumers_non_preserved:
+ # Filter the consumers that are preserved libraries, since
+ # they don't need to be rebuilt (see bug #461908).
+ consumers = consumers_non_preserved
+
+ if len(consumers) == MAX_DISPLAY + 1:
+ # Display 1 extra consumer, instead of displaying
+ # "used by 1 other files".
+ max_display = MAX_DISPLAY + 1
+ else:
+ max_display = MAX_DISPLAY
+ for c in consumers[:max_display]:
+ if c in all_preserved:
+ # The owner is displayed elsewhere due to having
+ # its libs preserved, so distinguish this special
+ # case (see bug #461908).
+ owners_desc = "preserved"
+ else:
+ owners_desc = ", ".join(x.mycpv for x in owners.get(c, []))
+ print(colorize("WARN", " * ") + " used by %s (%s)" % \
+ (c, owners_desc))
+ if len(consumers) > max_display:
+ print(colorize("WARN", " * ") + " used by %d other files" %
+ (len(consumers) - max_display))
diff --git a/pym/portage/util/_eventloop/EventLoop.py b/pym/portage/util/_eventloop/EventLoop.py
index bbbce5261..9ffcc74d9 100644
--- a/pym/portage/util/_eventloop/EventLoop.py
+++ b/pym/portage/util/_eventloop/EventLoop.py
@@ -1,20 +1,37 @@
-# Copyright 1999-2012 Gentoo Foundation
+# Copyright 1999-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
import errno
-import fcntl
import logging
import os
import select
import signal
+import sys
import time
+try:
+ import fcntl
+except ImportError:
+ # http://bugs.jython.org/issue1074
+ fcntl = None
+
+try:
+ import threading
+except ImportError:
+ import dummy_threading as threading
+
from portage.util import writemsg_level
from ..SlotObject import SlotObject
from .PollConstants import PollConstants
from .PollSelectAdapter import PollSelectAdapter
class EventLoop(object):
+ """
+ An event loop, intended to be compatible with the GLib event loop.
+ Call the iteration method in order to execute one iteration of the
+ loop. The idle_add and timeout_add methods serve as thread-safe
+ means to interact with the loop's thread.
+ """
supports_multiprocessing = True
@@ -43,7 +60,9 @@ class EventLoop(object):
that global_event_loop does not need constructor arguments)
@type main: bool
"""
- self._use_signal = main
+ self._use_signal = main and fcntl is not None
+ self._thread_rlock = threading.RLock()
+ self._thread_condition = threading.Condition(self._thread_rlock)
self._poll_event_queue = []
self._poll_event_handlers = {}
self._poll_event_handler_ids = {}
@@ -52,14 +71,48 @@ class EventLoop(object):
self._idle_callbacks = {}
self._timeout_handlers = {}
self._timeout_interval = None
- self._poll_obj = create_poll_instance()
- self.IO_ERR = PollConstants.POLLERR
- self.IO_HUP = PollConstants.POLLHUP
- self.IO_IN = PollConstants.POLLIN
- self.IO_NVAL = PollConstants.POLLNVAL
- self.IO_OUT = PollConstants.POLLOUT
- self.IO_PRI = PollConstants.POLLPRI
+ self._poll_obj = None
+ try:
+ select.epoll
+ except AttributeError:
+ pass
+ else:
+ try:
+ epoll_obj = select.epoll()
+ except IOError:
+ # This happens with Linux 2.4 kernels:
+ # IOError: [Errno 38] Function not implemented
+ pass
+ else:
+
+ # FD_CLOEXEC is enabled by default in Python >=3.4.
+ if sys.hexversion < 0x3040000 and fcntl is not None:
+ try:
+ fcntl.FD_CLOEXEC
+ except AttributeError:
+ pass
+ else:
+ fcntl.fcntl(epoll_obj.fileno(), fcntl.F_SETFD,
+ fcntl.fcntl(epoll_obj.fileno(),
+ fcntl.F_GETFD) | fcntl.FD_CLOEXEC)
+
+ self._poll_obj = _epoll_adapter(epoll_obj)
+ self.IO_ERR = select.EPOLLERR
+ self.IO_HUP = select.EPOLLHUP
+ self.IO_IN = select.EPOLLIN
+ self.IO_NVAL = 0
+ self.IO_OUT = select.EPOLLOUT
+ self.IO_PRI = select.EPOLLPRI
+
+ if self._poll_obj is None:
+ self._poll_obj = create_poll_instance()
+ self.IO_ERR = PollConstants.POLLERR
+ self.IO_HUP = PollConstants.POLLHUP
+ self.IO_IN = PollConstants.POLLIN
+ self.IO_NVAL = PollConstants.POLLNVAL
+ self.IO_OUT = PollConstants.POLLOUT
+ self.IO_PRI = PollConstants.POLLPRI
self._child_handlers = {}
self._sigchld_read = None
@@ -67,6 +120,14 @@ class EventLoop(object):
self._sigchld_src_id = None
self._pid = os.getpid()
+ def _new_source_id(self):
+ """
+ Generate a new source id. This method is thread-safe.
+ """
+ with self._thread_rlock:
+ self._event_handler_id += 1
+ return self._event_handler_id
+
def _poll(self, timeout=None):
"""
All poll() calls pass through here. The poll events
@@ -85,9 +146,11 @@ class EventLoop(object):
try:
self._poll_event_queue.extend(self._poll_obj.poll(timeout))
break
- except select.error as e:
+ except (IOError, select.error) as e:
# Silently handle EINTR, which is normal when we have
- # received a signal such as SIGINT.
+ # received a signal such as SIGINT (epoll objects may
+ # raise IOError rather than select.error, at least in
+ # Python 3.2).
if not (e.args and e.args[0] == errno.EINTR):
writemsg_level("\n!!! select error: %s\n" % (e,),
level=logging.ERROR, noiselevel=-1)
@@ -101,7 +164,19 @@ class EventLoop(object):
def iteration(self, *args):
"""
- Like glib.MainContext.iteration(), runs a single iteration.
+ Like glib.MainContext.iteration(), runs a single iteration. In order
+ to avoid blocking forever when may_block is True (the default),
+ callers must be careful to ensure that at least one of the following
+ conditions is met:
+ 1) An event source or timeout is registered which is guaranteed
+ to trigger at least on event (a call to an idle function
+ only counts as an event if it returns a False value which
+ causes it to stop being called)
+ 2) Another thread is guaranteed to call one of the thread-safe
+ methods which notify iteration to stop waiting (such as
+ idle_add or timeout_add).
+ These rules ensure that iteration is able to block until an event
+ arrives, without doing any busy waiting that would waste CPU time.
@type may_block: bool
@param may_block: if True the call may block waiting for an event
(default is True).
@@ -120,23 +195,32 @@ class EventLoop(object):
event_queue = self._poll_event_queue
event_handlers = self._poll_event_handlers
events_handled = 0
+ timeouts_checked = False
if not event_handlers:
- if self._run_timeouts():
- events_handled += 1
- if not event_handlers:
- if not events_handled and may_block and \
- self._timeout_interval is not None:
+ with self._thread_condition:
+ if self._run_timeouts():
+ events_handled += 1
+ timeouts_checked = True
+ if not event_handlers and not events_handled and may_block:
# Block so that we don't waste cpu time by looping too
# quickly. This makes EventLoop useful for code that needs
# to wait for timeout callbacks regardless of whether or
# not any IO handlers are currently registered.
- try:
- self._poll(timeout=self._timeout_interval)
- except StopIteration:
- pass
+ timeout = self._get_poll_timeout()
+ if timeout is None:
+ wait_timeout = None
+ else:
+ wait_timeout = float(timeout) / 1000
+ # NOTE: In order to avoid a possible infinite wait when
+ # wait_timeout is None, the previous _run_timeouts()
+ # call must have returned False *with* _thread_condition
+ # acquired. Otherwise, we would risk going to sleep after
+ # our only notify event has already passed.
+ self._thread_condition.wait(wait_timeout)
if self._run_timeouts():
events_handled += 1
+ timeouts_checked = True
# If any timeouts have executed, then return immediately,
# in order to minimize latency in termination of iteration
@@ -147,14 +231,18 @@ class EventLoop(object):
if not event_queue:
if may_block:
- if self._child_handlers:
- if self._timeout_interval is None:
- timeout = self._sigchld_interval
- else:
- timeout = min(self._sigchld_interval,
- self._timeout_interval)
- else:
- timeout = self._timeout_interval
+ timeout = self._get_poll_timeout()
+
+ # Avoid blocking for IO if there are any timeout
+ # or idle callbacks available to process.
+ if timeout != 0 and not timeouts_checked:
+ if self._run_timeouts():
+ events_handled += 1
+ timeouts_checked = True
+ if events_handled:
+ # Minimize latency for loops controlled
+ # by timeout or idle callback events.
+ timeout = 0
else:
timeout = 0
@@ -170,17 +258,37 @@ class EventLoop(object):
while event_queue:
events_handled += 1
f, event = event_queue.pop()
- x = event_handlers[f]
+ try:
+ x = event_handlers[f]
+ except KeyError:
+ # This is known to be triggered by the epoll
+ # implementation in qemu-user-1.2.2, and appears
+ # to be harmless (see bug #451326).
+ continue
if not x.callback(f, event, *x.args):
self.source_remove(x.source_id)
- # Run timeouts last, in order to minimize latency in
- # termination of iteration loops that they may control.
- if self._run_timeouts():
- events_handled += 1
+ if not timeouts_checked:
+ if self._run_timeouts():
+ events_handled += 1
+ timeouts_checked = True
return bool(events_handled)
+ def _get_poll_timeout(self):
+
+ with self._thread_rlock:
+ if self._child_handlers:
+ if self._timeout_interval is None:
+ timeout = self._sigchld_interval
+ else:
+ timeout = min(self._sigchld_interval,
+ self._timeout_interval)
+ else:
+ timeout = self._timeout_interval
+
+ return timeout
+
def child_watch_add(self, pid, callback, data=None):
"""
Like glib.child_watch_add(), sets callback to be called with the
@@ -201,18 +309,29 @@ class EventLoop(object):
@rtype: int
@return: an integer ID
"""
- self._event_handler_id += 1
- source_id = self._event_handler_id
+ source_id = self._new_source_id()
self._child_handlers[source_id] = self._child_callback_class(
callback=callback, data=data, pid=pid, source_id=source_id)
if self._use_signal:
if self._sigchld_read is None:
self._sigchld_read, self._sigchld_write = os.pipe()
+
fcntl.fcntl(self._sigchld_read, fcntl.F_SETFL,
fcntl.fcntl(self._sigchld_read,
fcntl.F_GETFL) | os.O_NONBLOCK)
+ # FD_CLOEXEC is enabled by default in Python >=3.4.
+ if sys.hexversion < 0x3040000:
+ try:
+ fcntl.FD_CLOEXEC
+ except AttributeError:
+ pass
+ else:
+ fcntl.fcntl(self._sigchld_read, fcntl.F_SETFD,
+ fcntl.fcntl(self._sigchld_read,
+ fcntl.F_GETFD) | fcntl.FD_CLOEXEC)
+
# The IO watch is dynamically registered and unregistered as
# needed, since we don't want to consider it as a valid source
# of events when there are no child listeners. It's important
@@ -276,22 +395,25 @@ class EventLoop(object):
"""
Like glib.idle_add(), if callback returns False it is
automatically removed from the list of event sources and will
- not be called again.
+ not be called again. This method is thread-safe.
@type callback: callable
@param callback: a function to call
@rtype: int
@return: an integer ID
"""
- self._event_handler_id += 1
- source_id = self._event_handler_id
- self._idle_callbacks[source_id] = self._idle_callback_class(
- args=args, callback=callback, source_id=source_id)
+ with self._thread_condition:
+ source_id = self._new_source_id()
+ self._idle_callbacks[source_id] = self._idle_callback_class(
+ args=args, callback=callback, source_id=source_id)
+ self._thread_condition.notify()
return source_id
def _run_idle_callbacks(self):
+ # assumes caller has acquired self._thread_rlock
if not self._idle_callbacks:
- return
+ return False
+ state_change = 0
# Iterate of our local list, since self._idle_callbacks can be
# modified during the exection of these callbacks.
for x in list(self._idle_callbacks.values()):
@@ -304,26 +426,32 @@ class EventLoop(object):
x.calling = True
try:
if not x.callback(*x.args):
+ state_change += 1
self.source_remove(x.source_id)
finally:
x.calling = False
+ return bool(state_change)
+
def timeout_add(self, interval, function, *args):
"""
Like glib.timeout_add(), interval argument is the number of
milliseconds between calls to your function, and your function
should return False to stop being called, or True to continue
being called. Any additional positional arguments given here
- are passed to your function when it's called.
+ are passed to your function when it's called. This method is
+ thread-safe.
"""
- self._event_handler_id += 1
- source_id = self._event_handler_id
- self._timeout_handlers[source_id] = \
- self._timeout_handler_class(
- interval=interval, function=function, args=args,
- source_id=source_id, timestamp=time.time())
- if self._timeout_interval is None or self._timeout_interval > interval:
- self._timeout_interval = interval
+ with self._thread_condition:
+ source_id = self._new_source_id()
+ self._timeout_handlers[source_id] = \
+ self._timeout_handler_class(
+ interval=interval, function=function, args=args,
+ source_id=source_id, timestamp=time.time())
+ if self._timeout_interval is None or \
+ self._timeout_interval > interval:
+ self._timeout_interval = interval
+ self._thread_condition.notify()
return source_id
def _run_timeouts(self):
@@ -333,37 +461,40 @@ class EventLoop(object):
if self._poll_child_processes():
calls += 1
- self._run_idle_callbacks()
-
- if not self._timeout_handlers:
- return bool(calls)
+ with self._thread_rlock:
- ready_timeouts = []
- current_time = time.time()
- for x in self._timeout_handlers.values():
- elapsed_seconds = current_time - x.timestamp
- # elapsed_seconds < 0 means the system clock has been adjusted
- if elapsed_seconds < 0 or \
- (x.interval - 1000 * elapsed_seconds) <= 0:
- ready_timeouts.append(x)
+ if self._run_idle_callbacks():
+ calls += 1
- # Iterate of our local list, since self._timeout_handlers can be
- # modified during the exection of these callbacks.
- for x in ready_timeouts:
- if x.source_id not in self._timeout_handlers:
- # it got cancelled while executing another timeout
- continue
- if x.calling:
- # don't call it recursively
- continue
- calls += 1
- x.calling = True
- try:
- x.timestamp = time.time()
- if not x.function(*x.args):
- self.source_remove(x.source_id)
- finally:
- x.calling = False
+ if not self._timeout_handlers:
+ return bool(calls)
+
+ ready_timeouts = []
+ current_time = time.time()
+ for x in self._timeout_handlers.values():
+ elapsed_seconds = current_time - x.timestamp
+ # elapsed_seconds < 0 means the system clock has been adjusted
+ if elapsed_seconds < 0 or \
+ (x.interval - 1000 * elapsed_seconds) <= 0:
+ ready_timeouts.append(x)
+
+ # Iterate of our local list, since self._timeout_handlers can be
+ # modified during the exection of these callbacks.
+ for x in ready_timeouts:
+ if x.source_id not in self._timeout_handlers:
+ # it got cancelled while executing another timeout
+ continue
+ if x.calling:
+ # don't call it recursively
+ continue
+ calls += 1
+ x.calling = True
+ try:
+ x.timestamp = time.time()
+ if not x.function(*x.args):
+ self.source_remove(x.source_id)
+ finally:
+ x.calling = False
return bool(calls)
@@ -385,8 +516,7 @@ class EventLoop(object):
"""
if f in self._poll_event_handlers:
raise AssertionError("fd %d is already registered" % f)
- self._event_handler_id += 1
- source_id = self._event_handler_id
+ source_id = self._new_source_id()
self._poll_event_handler_ids[source_id] = f
self._poll_event_handlers[f] = self._io_handler_class(
args=args, callback=callback, f=f, source_id=source_id)
@@ -406,18 +536,21 @@ class EventLoop(object):
self.source_remove(self._sigchld_src_id)
self._sigchld_src_id = None
return True
- idle_callback = self._idle_callbacks.pop(reg_id, None)
- if idle_callback is not None:
- return True
- timeout_handler = self._timeout_handlers.pop(reg_id, None)
- if timeout_handler is not None:
- if timeout_handler.interval == self._timeout_interval:
- if self._timeout_handlers:
- self._timeout_interval = \
- min(x.interval for x in self._timeout_handlers.values())
- else:
- self._timeout_interval = None
- return True
+
+ with self._thread_rlock:
+ idle_callback = self._idle_callbacks.pop(reg_id, None)
+ if idle_callback is not None:
+ return True
+ timeout_handler = self._timeout_handlers.pop(reg_id, None)
+ if timeout_handler is not None:
+ if timeout_handler.interval == self._timeout_interval:
+ if self._timeout_handlers:
+ self._timeout_interval = min(x.interval
+ for x in self._timeout_handlers.values())
+ else:
+ self._timeout_interval = None
+ return True
+
f = self._poll_event_handler_ids.pop(reg_id, None)
if f is None:
return False
@@ -467,7 +600,12 @@ def can_poll_device():
return _can_poll_device
p = select.poll()
- p.register(dev_null.fileno(), PollConstants.POLLIN)
+ try:
+ p.register(dev_null.fileno(), PollConstants.POLLIN)
+ except TypeError:
+ # Jython: Object 'org.python.core.io.FileIO@f8f175' is not watchable
+ _can_poll_device = False
+ return _can_poll_device
invalid_request = False
for f, event in p.poll():
@@ -488,3 +626,37 @@ def create_poll_instance():
if can_poll_device():
return select.poll()
return PollSelectAdapter()
+
+class _epoll_adapter(object):
+ """
+ Wraps a select.epoll instance in order to make it compatible
+ with select.poll instances. This is necessary since epoll instances
+ interpret timeout arguments differently. Note that the file descriptor
+ that is associated with an epoll instance will close automatically when
+ it is garbage collected, so it's not necessary to close it explicitly.
+ """
+ __slots__ = ('_epoll_obj',)
+
+ def __init__(self, epoll_obj):
+ self._epoll_obj = epoll_obj
+
+ def register(self, fd, *args):
+ self._epoll_obj.register(fd, *args)
+
+ def unregister(self, fd):
+ self._epoll_obj.unregister(fd)
+
+ def poll(self, *args):
+ if len(args) > 1:
+ raise TypeError(
+ "poll expected at most 2 arguments, got " + \
+ repr(1 + len(args)))
+ timeout = -1
+ if args:
+ timeout = args[0]
+ if timeout is None or timeout < 0:
+ timeout = -1
+ elif timeout != 0:
+ timeout = float(timeout) / 1000
+
+ return self._epoll_obj.poll(timeout)
diff --git a/pym/portage/util/_eventloop/PollSelectAdapter.py b/pym/portage/util/_eventloop/PollSelectAdapter.py
index 17e63d918..244788c57 100644
--- a/pym/portage/util/_eventloop/PollSelectAdapter.py
+++ b/pym/portage/util/_eventloop/PollSelectAdapter.py
@@ -64,7 +64,7 @@ class PollSelectAdapter(object):
if timeout is not None and timeout < 0:
timeout = None
if timeout is not None:
- select_args.append(timeout / 1000)
+ select_args.append(float(timeout) / 1000)
select_events = select.select(*select_args)
poll_events = []
diff --git a/pym/portage/util/_get_vm_info.py b/pym/portage/util/_get_vm_info.py
new file mode 100644
index 000000000..e8ad93805
--- /dev/null
+++ b/pym/portage/util/_get_vm_info.py
@@ -0,0 +1,80 @@
+# Copyright 2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import os
+import platform
+import subprocess
+
+from portage import _unicode_decode
+
+def get_vm_info():
+
+ vm_info = {}
+
+ if platform.system() == 'Linux':
+ try:
+ proc = subprocess.Popen(["free"],
+ stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+ except OSError:
+ pass
+ else:
+ output = _unicode_decode(proc.communicate()[0])
+ if proc.wait() == os.EX_OK:
+ for line in output.splitlines():
+ line = line.split()
+ if len(line) < 2:
+ continue
+ if line[0] == "Mem:":
+ try:
+ vm_info["ram.total"] = int(line[1]) * 1024
+ except ValueError:
+ pass
+ if len(line) > 3:
+ try:
+ vm_info["ram.free"] = int(line[3]) * 1024
+ except ValueError:
+ pass
+ elif line[0] == "Swap:":
+ try:
+ vm_info["swap.total"] = int(line[1]) * 1024
+ except ValueError:
+ pass
+ if len(line) > 3:
+ try:
+ vm_info["swap.free"] = int(line[3]) * 1024
+ except ValueError:
+ pass
+
+ else:
+
+ try:
+ proc = subprocess.Popen(["sysctl", "-a"],
+ stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+ except OSError:
+ pass
+ else:
+ output = _unicode_decode(proc.communicate()[0])
+ if proc.wait() == os.EX_OK:
+ for line in output.splitlines():
+ line = line.split(":", 1)
+ if len(line) != 2:
+ continue
+ line[1] = line[1].strip()
+ if line[0] == "hw.physmem":
+ try:
+ vm_info["ram.total"] = int(line[1])
+ except ValueError:
+ pass
+ elif line[0] == "vm.swap_total":
+ try:
+ vm_info["swap.total"] = int(line[1])
+ except ValueError:
+ pass
+ elif line[0] == "Free Memory Pages":
+ if line[1][-1] == "K":
+ try:
+ vm_info["ram.free"] = int(line[1][:-1]) * 1024
+ except ValueError:
+ pass
+
+ return vm_info
diff --git a/pym/portage/util/_info_files.py b/pym/portage/util/_info_files.py
new file mode 100644
index 000000000..fabf74b0f
--- /dev/null
+++ b/pym/portage/util/_info_files.py
@@ -0,0 +1,138 @@
+# Copyright 1999-2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import errno
+import logging
+import re
+import stat
+import subprocess
+
+import portage
+from portage import os
+
+def chk_updated_info_files(root, infodirs, prev_mtimes):
+
+ if os.path.exists("/usr/bin/install-info"):
+ out = portage.output.EOutput()
+ regen_infodirs = []
+ for z in infodirs:
+ if z == '':
+ continue
+ inforoot = portage.util.normalize_path(root + z)
+ if os.path.isdir(inforoot) and \
+ not [x for x in os.listdir(inforoot) \
+ if x.startswith('.keepinfodir')]:
+ infomtime = os.stat(inforoot)[stat.ST_MTIME]
+ if inforoot not in prev_mtimes or \
+ prev_mtimes[inforoot] != infomtime:
+ regen_infodirs.append(inforoot)
+
+ if not regen_infodirs:
+ portage.util.writemsg_stdout("\n")
+ if portage.util.noiselimit >= 0:
+ out.einfo("GNU info directory index is up-to-date.")
+ else:
+ portage.util.writemsg_stdout("\n")
+ if portage.util.noiselimit >= 0:
+ out.einfo("Regenerating GNU info directory index...")
+
+ dir_extensions = ("", ".gz", ".bz2")
+ icount = 0
+ badcount = 0
+ errmsg = ""
+ for inforoot in regen_infodirs:
+ if inforoot == '':
+ continue
+
+ if not os.path.isdir(inforoot) or \
+ not os.access(inforoot, os.W_OK):
+ continue
+
+ file_list = os.listdir(inforoot)
+ file_list.sort()
+ dir_file = os.path.join(inforoot, "dir")
+ moved_old_dir = False
+ processed_count = 0
+ for x in file_list:
+ if x.startswith(".") or \
+ os.path.isdir(os.path.join(inforoot, x)):
+ continue
+ if x.startswith("dir"):
+ skip = False
+ for ext in dir_extensions:
+ if x == "dir" + ext or \
+ x == "dir" + ext + ".old":
+ skip = True
+ break
+ if skip:
+ continue
+ if processed_count == 0:
+ for ext in dir_extensions:
+ try:
+ os.rename(dir_file + ext, dir_file + ext + ".old")
+ moved_old_dir = True
+ except EnvironmentError as e:
+ if e.errno != errno.ENOENT:
+ raise
+ del e
+ processed_count += 1
+ try:
+ proc = subprocess.Popen(
+ ['/usr/bin/install-info',
+ '--dir-file=%s' % os.path.join(inforoot, "dir"),
+ os.path.join(inforoot, x)],
+ env=dict(os.environ, LANG="C", LANGUAGE="C"),
+ stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+ except OSError:
+ myso = None
+ else:
+ myso = portage._unicode_decode(
+ proc.communicate()[0]).rstrip("\n")
+ proc.wait()
+ existsstr = "already exists, for file `"
+ if myso:
+ if re.search(existsstr, myso):
+ # Already exists... Don't increment the count for this.
+ pass
+ elif myso[:44] == "install-info: warning: no info dir entry in ":
+ # This info file doesn't contain a DIR-header: install-info produces this
+ # (harmless) warning (the --quiet switch doesn't seem to work).
+ # Don't increment the count for this.
+ pass
+ else:
+ badcount += 1
+ errmsg += myso + "\n"
+ icount += 1
+
+ if moved_old_dir and not os.path.exists(dir_file):
+ # We didn't generate a new dir file, so put the old file
+ # back where it was originally found.
+ for ext in dir_extensions:
+ try:
+ os.rename(dir_file + ext + ".old", dir_file + ext)
+ except EnvironmentError as e:
+ if e.errno != errno.ENOENT:
+ raise
+ del e
+
+ # Clean dir.old cruft so that they don't prevent
+ # unmerge of otherwise empty directories.
+ for ext in dir_extensions:
+ try:
+ os.unlink(dir_file + ext + ".old")
+ except EnvironmentError as e:
+ if e.errno != errno.ENOENT:
+ raise
+ del e
+
+ #update mtime so we can potentially avoid regenerating.
+ prev_mtimes[inforoot] = os.stat(inforoot)[stat.ST_MTIME]
+
+ if badcount:
+ out.eerror("Processed %d info files; %d errors." % \
+ (icount, badcount))
+ portage.util.writemsg_level(errmsg,
+ level=logging.ERROR, noiselevel=-1)
+ else:
+ if icount > 0 and portage.util.noiselimit >= 0:
+ out.einfo("Processed %d info files." % (icount,))
diff --git a/pym/portage/util/_path.py b/pym/portage/util/_path.py
new file mode 100644
index 000000000..6fbcb438c
--- /dev/null
+++ b/pym/portage/util/_path.py
@@ -0,0 +1,27 @@
+# Copyright 2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import stat
+
+from portage import os
+from portage.exception import PermissionDenied
+
+def exists_raise_eaccess(path):
+ try:
+ os.stat(path)
+ except OSError as e:
+ if e.errno == PermissionDenied.errno:
+ raise PermissionDenied("stat('%s')" % path)
+ return False
+ else:
+ return True
+
+def isdir_raise_eaccess(path):
+ try:
+ st = os.stat(path)
+ except OSError as e:
+ if e.errno == PermissionDenied.errno:
+ raise PermissionDenied("stat('%s')" % path)
+ return False
+ else:
+ return stat.S_ISDIR(st.st_mode)
diff --git a/pym/portage/util/_urlopen.py b/pym/portage/util/_urlopen.py
index 307624bc4..4cfe183b1 100644
--- a/pym/portage/util/_urlopen.py
+++ b/pym/portage/util/_urlopen.py
@@ -1,7 +1,11 @@
-# Copyright 2012 Gentoo Foundation
+# Copyright 2012-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
+import io
import sys
+from datetime import datetime
+from time import mktime
+from email.utils import formatdate, parsedate
try:
from urllib.request import urlopen as _urlopen
@@ -14,29 +18,75 @@ except ImportError:
import urllib2 as urllib_request
from urllib import splituser as urllib_parse_splituser
-def urlopen(url):
- try:
- return _urlopen(url)
- except SystemExit:
- raise
- except Exception:
- if sys.hexversion < 0x3000000:
- raise
- parse_result = urllib_parse.urlparse(url)
- if parse_result.scheme not in ("http", "https") or \
- not parse_result.username:
- raise
-
- return _new_urlopen(url)
-
-def _new_urlopen(url):
- # This is experimental code for bug #413983.
+if sys.hexversion >= 0x3000000:
+ # pylint: disable=W0622
+ long = int
+
+# to account for the difference between TIMESTAMP of the index' contents
+# and the file-'mtime'
+TIMESTAMP_TOLERANCE = 5
+
+def urlopen(url, if_modified_since=None):
parse_result = urllib_parse.urlparse(url)
- netloc = urllib_parse_splituser(parse_result.netloc)[1]
- url = urllib_parse.urlunparse((parse_result.scheme, netloc, parse_result.path, parse_result.params, parse_result.query, parse_result.fragment))
- password_manager = urllib_request.HTTPPasswordMgrWithDefaultRealm()
- if parse_result.username is not None:
- password_manager.add_password(None, url, parse_result.username, parse_result.password)
- auth_handler = urllib_request.HTTPBasicAuthHandler(password_manager)
- opener = urllib_request.build_opener(auth_handler)
- return opener.open(url)
+ if parse_result.scheme not in ("http", "https"):
+ return _urlopen(url)
+ else:
+ netloc = urllib_parse_splituser(parse_result.netloc)[1]
+ url = urllib_parse.urlunparse((parse_result.scheme, netloc, parse_result.path, parse_result.params, parse_result.query, parse_result.fragment))
+ password_manager = urllib_request.HTTPPasswordMgrWithDefaultRealm()
+ request = urllib_request.Request(url)
+ request.add_header('User-Agent', 'Gentoo Portage')
+ if if_modified_since:
+ request.add_header('If-Modified-Since', _timestamp_to_http(if_modified_since))
+ if parse_result.username is not None:
+ password_manager.add_password(None, url, parse_result.username, parse_result.password)
+ auth_handler = CompressedResponseProcessor(password_manager)
+ opener = urllib_request.build_opener(auth_handler)
+ hdl = opener.open(request)
+ if hdl.headers.get('last-modified', ''):
+ try:
+ add_header = hdl.headers.add_header
+ except AttributeError:
+ # Python 2
+ add_header = hdl.headers.addheader
+ add_header('timestamp', _http_to_timestamp(hdl.headers.get('last-modified')))
+ return hdl
+
+def _timestamp_to_http(timestamp):
+ dt = datetime.fromtimestamp(float(long(timestamp)+TIMESTAMP_TOLERANCE))
+ stamp = mktime(dt.timetuple())
+ return formatdate(timeval=stamp, localtime=False, usegmt=True)
+
+def _http_to_timestamp(http_datetime_string):
+ tuple = parsedate(http_datetime_string)
+ timestamp = mktime(tuple)
+ return str(long(timestamp))
+
+class CompressedResponseProcessor(urllib_request.HTTPBasicAuthHandler):
+ # Handler for compressed responses.
+
+ def http_request(self, req):
+ req.add_header('Accept-Encoding', 'bzip2,gzip,deflate')
+ return req
+ https_request = http_request
+
+ def http_response(self, req, response):
+ decompressed = None
+ if response.headers.get('content-encoding') == 'bzip2':
+ import bz2
+ decompressed = io.BytesIO(bz2.decompress(response.read()))
+ elif response.headers.get('content-encoding') == 'gzip':
+ from gzip import GzipFile
+ decompressed = GzipFile(fileobj=io.BytesIO(response.read()), mode='r')
+ elif response.headers.get('content-encoding') == 'deflate':
+ import zlib
+ try:
+ decompressed = io.BytesIO(zlib.decompress(response.read()))
+ except zlib.error: # they ignored RFC1950
+ decompressed = io.BytesIO(zlib.decompress(response.read(), -zlib.MAX_WBITS))
+ if decompressed:
+ old_response = response
+ response = urllib_request.addinfourl(decompressed, old_response.headers, old_response.url, old_response.code)
+ response.msg = old_response.msg
+ return response
+ https_response = http_response
diff --git a/pym/portage/util/digraph.py b/pym/portage/util/digraph.py
index f3ae658c9..4a9cb43b6 100644
--- a/pym/portage/util/digraph.py
+++ b/pym/portage/util/digraph.py
@@ -1,12 +1,13 @@
-# Copyright 2010-2011 Gentoo Foundation
+# Copyright 2010-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
+from __future__ import unicode_literals
+
__all__ = ['digraph']
from collections import deque
import sys
-from portage import _unicode_decode
from portage.util import writemsg
class digraph(object):
@@ -16,24 +17,24 @@ class digraph(object):
def __init__(self):
"""Create an empty digraph"""
-
+
# { node : ( { child : priority } , { parent : priority } ) }
self.nodes = {}
self.order = []
def add(self, node, parent, priority=0):
"""Adds the specified node with the specified parent.
-
+
If the dep is a soft-dep and the node already has a hard
relationship to the parent, the relationship is left as hard."""
-
+
if node not in self.nodes:
self.nodes[node] = ({}, {}, node)
self.order.append(node)
-
+
if not parent:
return
-
+
if parent not in self.nodes:
self.nodes[parent] = ({}, {}, parent)
self.order.append(parent)
@@ -46,19 +47,29 @@ class digraph(object):
priorities.append(priority)
priorities.sort()
+ def discard(self, node):
+ """
+ Like remove(), except it doesn't raises KeyError if the
+ node doesn't exist.
+ """
+ try:
+ self.remove(node)
+ except KeyError:
+ pass
+
def remove(self, node):
"""Removes the specified node from the digraph, also removing
and ties to other nodes in the digraph. Raises KeyError if the
node doesn't exist."""
-
+
if node not in self.nodes:
raise KeyError(node)
-
+
for parent in self.nodes[node][1]:
del self.nodes[parent][0][node]
for child in self.nodes[node][0]:
del self.nodes[child][1][node]
-
+
del self.nodes[node]
self.order.remove(node)
@@ -157,10 +168,10 @@ class digraph(object):
def leaf_nodes(self, ignore_priority=None):
"""Return all nodes that have no children
-
+
If ignore_soft_deps is True, soft deps are not counted as
children in calculations."""
-
+
leaf_nodes = []
if ignore_priority is None:
for node in self.order:
@@ -191,10 +202,10 @@ class digraph(object):
def root_nodes(self, ignore_priority=None):
"""Return all nodes that have no parents.
-
+
If ignore_soft_deps is True, soft deps are not counted as
parents in calculations."""
-
+
root_nodes = []
if ignore_priority is None:
for node in self.order:
@@ -272,18 +283,17 @@ class digraph(object):
def debug_print(self):
def output(s):
writemsg(s, noiselevel=-1)
- # Use _unicode_decode() to force unicode format
+ # Use unicode_literals to force unicode format
# strings for python-2.x safety, ensuring that
# node.__unicode__() is used when necessary.
for node in self.nodes:
- output(_unicode_decode("%s ") % (node,))
+ output("%s " % (node,))
if self.nodes[node][0]:
output("depends on\n")
else:
output("(no children)\n")
for child, priorities in self.nodes[node][0].items():
- output(_unicode_decode(" %s (%s)\n") % \
- (child, priorities[-1],))
+ output(" %s (%s)\n" % (child, priorities[-1],))
def bfs(self, start, ignore_priority=None):
if start not in self:
diff --git a/pym/portage/util/env_update.py b/pym/portage/util/env_update.py
index ace4077f7..c0a93a83b 100644
--- a/pym/portage/util/env_update.py
+++ b/pym/portage/util/env_update.py
@@ -1,16 +1,17 @@
-# Copyright 2010-2011 Gentoo Foundation
+# Copyright 2010-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
__all__ = ['env_update']
import errno
+import glob
import io
import stat
import sys
import time
import portage
-from portage import os, _encodings, _unicode_encode
+from portage import os, _encodings, _unicode_decode, _unicode_encode
from portage.checksum import prelink_capable
from portage.data import ostype
from portage.exception import ParseError
@@ -23,6 +24,7 @@ from portage.dbapi.vartree import vartree
from portage.package.ebuild.config import config
if sys.hexversion >= 0x3000000:
+ # pylint: disable=W0622
long = int
def env_update(makelinks=1, target_root=None, prev_mtimes=None, contents=None,
@@ -88,7 +90,8 @@ def _env_update(makelinks, target_root, prev_mtimes, contents, env,
eprefix = settings.get("EPREFIX", "")
eprefix_lstrip = eprefix.lstrip(os.sep)
- envd_dir = os.path.join(target_root, eprefix_lstrip, "etc", "env.d")
+ eroot = normalize_path(os.path.join(target_root, eprefix_lstrip)).rstrip(os.sep) + os.sep
+ envd_dir = os.path.join(eroot, "etc", "env.d")
ensure_dirs(envd_dir, mode=0o755)
fns = listdir(envd_dir, EmptyOnError=1)
fns.sort()
@@ -164,15 +167,14 @@ def _env_update(makelinks, target_root, prev_mtimes, contents, env,
they won't be overwritten by this dict.update call."""
env.update(myconfig)
- ldsoconf_path = os.path.join(
- target_root, eprefix_lstrip, "etc", "ld.so.conf")
+ ldsoconf_path = os.path.join(eroot, "etc", "ld.so.conf")
try:
myld = io.open(_unicode_encode(ldsoconf_path,
encoding=_encodings['fs'], errors='strict'),
mode='r', encoding=_encodings['content'], errors='replace')
- myldlines=myld.readlines()
+ myldlines = myld.readlines()
myld.close()
- oldld=[]
+ oldld = []
for x in myldlines:
#each line has at least one char (a newline)
if x[:1] == "#":
@@ -193,20 +195,34 @@ def _env_update(makelinks, target_root, prev_mtimes, contents, env,
myfd.write(x + "\n")
myfd.close()
+ potential_lib_dirs = set()
+ for lib_dir_glob in ('usr/lib*', 'lib*'):
+ x = os.path.join(eroot, lib_dir_glob)
+ for y in glob.glob(_unicode_encode(x,
+ encoding=_encodings['fs'], errors='strict')):
+ try:
+ y = _unicode_decode(y,
+ encoding=_encodings['fs'], errors='strict')
+ except UnicodeDecodeError:
+ continue
+ if os.path.basename(y) != 'libexec':
+ potential_lib_dirs.add(y[len(eroot):])
+
# Update prelink.conf if we are prelink-enabled
if prelink_capable:
- newprelink = atomic_ofstream(os.path.join(
- target_root, eprefix_lstrip, "etc", "prelink.conf"))
+ prelink_d = os.path.join(eroot, 'etc', 'prelink.conf.d')
+ ensure_dirs(prelink_d)
+ newprelink = atomic_ofstream(os.path.join(prelink_d, 'portage.conf'))
newprelink.write("# prelink.conf autogenerated by env-update; make all changes to\n")
newprelink.write("# contents of /etc/env.d directory\n")
- for x in ["/bin","/sbin","/usr/bin","/usr/sbin","/lib","/usr/lib"]:
- newprelink.write("-l %s\n" % (x,));
- prelink_paths = []
- prelink_paths += specials.get("LDPATH", [])
- prelink_paths += specials.get("PATH", [])
- prelink_paths += specials.get("PRELINK_PATH", [])
- prelink_path_mask = specials.get("PRELINK_PATH_MASK", [])
+ for x in sorted(potential_lib_dirs) + ['bin', 'sbin']:
+ newprelink.write('-l /%s\n' % (x,));
+ prelink_paths = set()
+ prelink_paths |= set(specials.get('LDPATH', []))
+ prelink_paths |= set(specials.get('PATH', []))
+ prelink_paths |= set(specials.get('PRELINK_PATH', []))
+ prelink_path_mask = specials.get('PRELINK_PATH_MASK', [])
for x in prelink_paths:
if not x:
continue
@@ -227,12 +243,26 @@ def _env_update(makelinks, target_root, prev_mtimes, contents, env,
newprelink.write("-b %s\n" % (x,))
newprelink.close()
+ # Migration code path. If /etc/prelink.conf was generated by us, then
+ # point it to the new stuff until the prelink package re-installs.
+ prelink_conf = os.path.join(eroot, 'etc', 'prelink.conf')
+ try:
+ with open(_unicode_encode(prelink_conf,
+ encoding=_encodings['fs'], errors='strict'), 'rb') as f:
+ if f.readline() == b'# prelink.conf autogenerated by env-update; make all changes to\n':
+ f = atomic_ofstream(prelink_conf)
+ f.write('-c /etc/prelink.conf.d/*.conf\n')
+ f.close()
+ except IOError as e:
+ if e.errno != errno.ENOENT:
+ raise
+
current_time = long(time.time())
mtime_changed = False
+
lib_dirs = set()
- for lib_dir in set(specials["LDPATH"] + \
- ['usr/lib','usr/lib64','usr/lib32','lib','lib64','lib32']):
- x = os.path.join(target_root, eprefix_lstrip, lib_dir.lstrip(os.sep))
+ for lib_dir in set(specials['LDPATH']) | potential_lib_dirs:
+ x = os.path.join(eroot, lib_dir.lstrip(os.sep))
try:
newldpathtime = os.stat(x)[stat.ST_MTIME]
lib_dirs.add(normalize_path(x))
@@ -292,7 +322,7 @@ def _env_update(makelinks, target_root, prev_mtimes, contents, env,
writemsg_level(_(">>> Regenerating %setc/ld.so.cache...\n") % \
(target_root,))
os.system("cd / ; %s -X -r '%s'" % (ldconfig, target_root))
- elif ostype in ("FreeBSD","DragonFly"):
+ elif ostype in ("FreeBSD", "DragonFly"):
writemsg_level(_(">>> Regenerating %svar/run/ld-elf.so.hints...\n") % \
target_root)
os.system(("cd / ; %s -elf -i " + \
@@ -308,11 +338,10 @@ def _env_update(makelinks, target_root, prev_mtimes, contents, env,
cenvnotice += "# GO INTO /etc/csh.cshrc NOT /etc/csh.env\n\n"
#create /etc/profile.env for bash support
- outfile = atomic_ofstream(os.path.join(
- target_root, eprefix_lstrip, "etc", "profile.env"))
+ outfile = atomic_ofstream(os.path.join(eroot, "etc", "profile.env"))
outfile.write(penvnotice)
- env_keys = [ x for x in env if x != "LDPATH" ]
+ env_keys = [x for x in env if x != "LDPATH"]
env_keys.sort()
for k in env_keys:
v = env[k]
@@ -323,8 +352,7 @@ def _env_update(makelinks, target_root, prev_mtimes, contents, env,
outfile.close()
#create /etc/csh.env for (t)csh support
- outfile = atomic_ofstream(os.path.join(
- target_root, eprefix_lstrip, "etc", "csh.env"))
+ outfile = atomic_ofstream(os.path.join(eroot, "etc", "csh.env"))
outfile.write(cenvnotice)
for x in env_keys:
outfile.write("setenv %s '%s'\n" % (x, env[x]))
diff --git a/pym/portage/util/lafilefixer.py b/pym/portage/util/lafilefixer.py
index 54ff20de5..2562d9a77 100644
--- a/pym/portage/util/lafilefixer.py
+++ b/pym/portage/util/lafilefixer.py
@@ -11,7 +11,7 @@ from portage.exception import InvalidData
# This an re-implementaion of dev-util/lafilefixer-0.5.
# rewrite_lafile() takes the contents of an lafile as a string
# It then parses the dependency_libs and inherited_linker_flags
-# entries.
+# entries.
# We insist on dependency_libs being present. inherited_linker_flags
# is optional.
# There are strict rules about the syntax imposed by libtool's libltdl.
@@ -21,7 +21,7 @@ from portage.exception import InvalidData
# lafilefixer does).
# What it does:
# * Replaces all .la files with absolut paths in dependency_libs with
-# corresponding -l* and -L* entries
+# corresponding -l* and -L* entries
# (/usr/lib64/libfoo.la -> -L/usr/lib64 -lfoo)
# * Moves various flags (see flag_re below) to inherited_linker_flags,
# if such an entry was present.
@@ -36,7 +36,7 @@ from portage.exception import InvalidData
dep_libs_re = re.compile(b"dependency_libs='(?P<value>[^']*)'$")
inh_link_flags_re = re.compile(b"inherited_linker_flags='(?P<value>[^']*)'$")
-#regexes for replacing stuff in -L entries.
+#regexes for replacing stuff in -L entries.
#replace 'X11R6/lib' and 'local/lib' with 'lib', no idea what's this about.
X11_local_sub = re.compile(b"X11R6/lib|local/lib")
#get rid of the '..'
@@ -129,11 +129,11 @@ def rewrite_lafile(contents):
#This allows us to place all -L entries at the beginning
#of 'dependency_libs'.
ladir = dep_libs_entry
-
+
ladir = X11_local_sub.sub(b"lib", ladir)
ladir = pkgconfig_sub1.sub(b"usr", ladir)
ladir = pkgconfig_sub2.sub(b"\g<usrlib>", ladir)
-
+
if ladir not in libladir:
libladir.append(ladir)
diff --git a/pym/portage/util/listdir.py b/pym/portage/util/listdir.py
index c2628cbfe..2012e145f 100644
--- a/pym/portage/util/listdir.py
+++ b/pym/portage/util/listdir.py
@@ -1,36 +1,33 @@
-# Copyright 2010-2011 Gentoo Foundation
+# Copyright 2010-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
__all__ = ['cacheddir', 'listdir']
import errno
import stat
-import time
+import sys
+
+if sys.hexversion < 0x3000000:
+ from itertools import izip as zip
from portage import os
+from portage.const import VCS_DIRS
from portage.exception import DirectoryNotFound, PermissionDenied, PortageException
-from portage.util import normalize_path, writemsg
-
-_ignorecvs_dirs = ('CVS', 'RCS', 'SCCS', '.svn', '.git')
+from portage.util import normalize_path
+
+# The global dircache is no longer supported, since it could
+# be a memory leak for API consumers. Any cacheddir callers
+# should use higher-level caches instead, when necessary.
+# TODO: Remove dircache variable after stable portage does
+# not use is (keep it for now, in case API consumers clear
+# it manually).
dircache = {}
-cacheHit = 0
-cacheMiss = 0
-cacheStale = 0
def cacheddir(my_original_path, ignorecvs, ignorelist, EmptyOnError, followSymlinks=True):
- global cacheHit,cacheMiss,cacheStale
mypath = normalize_path(my_original_path)
- if mypath in dircache:
- cacheHit += 1
- cached_mtime, list, ftype = dircache[mypath]
- else:
- cacheMiss += 1
- cached_mtime, list, ftype = -1, [], []
try:
pathstat = os.stat(mypath)
- if stat.S_ISDIR(pathstat[stat.ST_MODE]):
- mtime = pathstat.st_mtime
- else:
+ if not stat.S_ISDIR(pathstat.st_mode):
raise DirectoryNotFound(mypath)
except EnvironmentError as e:
if e.errno == PermissionDenied.errno:
@@ -39,19 +36,16 @@ def cacheddir(my_original_path, ignorecvs, ignorelist, EmptyOnError, followSymli
return [], []
except PortageException:
return [], []
- # Python retuns mtime in seconds, so if it was changed in the last few seconds, it could be invalid
- if mtime != cached_mtime or time.time() - mtime < 4:
- if mypath in dircache:
- cacheStale += 1
+ else:
try:
- list = os.listdir(mypath)
+ fpaths = os.listdir(mypath)
except EnvironmentError as e:
if e.errno != errno.EACCES:
raise
del e
raise PermissionDenied(mypath)
ftype = []
- for x in list:
+ for x in fpaths:
try:
if followSymlinks:
pathstat = os.stat(mypath+"/"+x)
@@ -68,23 +62,22 @@ def cacheddir(my_original_path, ignorecvs, ignorelist, EmptyOnError, followSymli
ftype.append(3)
except (IOError, OSError):
ftype.append(3)
- dircache[mypath] = mtime, list, ftype
-
- ret_list = []
- ret_ftype = []
- for x in range(0, len(list)):
- if list[x] in ignorelist:
- pass
- elif ignorecvs:
- if list[x][:2] != ".#" and \
- not (ftype[x] == 1 and list[x] in _ignorecvs_dirs):
- ret_list.append(list[x])
- ret_ftype.append(ftype[x])
- else:
- ret_list.append(list[x])
- ret_ftype.append(ftype[x])
-
- writemsg("cacheddirStats: H:%d/M:%d/S:%d\n" % (cacheHit, cacheMiss, cacheStale),10)
+
+ if ignorelist or ignorecvs:
+ ret_list = []
+ ret_ftype = []
+ for file_path, file_type in zip(fpaths, ftype):
+ if file_path in ignorelist:
+ pass
+ elif ignorecvs:
+ if file_path[:2] != ".#" and \
+ not (file_type == 1 and file_path in VCS_DIRS):
+ ret_list.append(file_path)
+ ret_ftype.append(file_type)
+ else:
+ ret_list = fpaths
+ ret_ftype = ftype
+
return ret_list, ret_ftype
def listdir(mypath, recursive=False, filesonly=False, ignorecvs=False, ignorelist=[], followSymlinks=True,
@@ -98,7 +91,7 @@ def listdir(mypath, recursive=False, filesonly=False, ignorecvs=False, ignorelis
@type recursive: Boolean
@param filesonly; Only return files, not more directories
@type filesonly: Boolean
- @param ignorecvs: Ignore CVS directories ('CVS','SCCS','.svn','.git')
+ @param ignorecvs: Ignore VCS directories
@type ignorecvs: Boolean
@param ignorelist: List of filenames/directories to exclude
@type ignorelist: List
@@ -112,40 +105,35 @@ def listdir(mypath, recursive=False, filesonly=False, ignorecvs=False, ignorelis
@return: A list of files and directories (or just files or just directories) or an empty list.
"""
- list, ftype = cacheddir(mypath, ignorecvs, ignorelist, EmptyOnError, followSymlinks)
+ fpaths, ftype = cacheddir(mypath, ignorecvs, ignorelist, EmptyOnError, followSymlinks)
- if list is None:
- list=[]
+ if fpaths is None:
+ fpaths = []
if ftype is None:
- ftype=[]
+ ftype = []
if not (filesonly or dirsonly or recursive):
- return list
+ return fpaths
if recursive:
- x=0
- while x<len(ftype):
- if ftype[x] == 1:
- l,f = cacheddir(mypath+"/"+list[x], ignorecvs, ignorelist, EmptyOnError,
- followSymlinks)
-
- l=l[:]
- for y in range(0,len(l)):
- l[y]=list[x]+"/"+l[y]
- list=list+l
- ftype=ftype+f
- x+=1
+ stack = list(zip(fpaths, ftype))
+ fpaths = []
+ ftype = []
+ while stack:
+ file_path, file_type = stack.pop()
+ fpaths.append(file_path)
+ ftype.append(file_type)
+ if file_type == 1:
+ subdir_list, subdir_types = cacheddir(
+ os.path.join(mypath, file_path), ignorecvs,
+ ignorelist, EmptyOnError, followSymlinks)
+ stack.extend((os.path.join(file_path, x), x_type)
+ for x, x_type in zip(subdir_list, subdir_types))
+
if filesonly:
- rlist=[]
- for x in range(0,len(ftype)):
- if ftype[x]==0:
- rlist=rlist+[list[x]]
+ fpaths = [x for x, x_type in zip(fpaths, ftype) if x_type == 0]
+
elif dirsonly:
- rlist = []
- for x in range(0, len(ftype)):
- if ftype[x] == 1:
- rlist = rlist + [list[x]]
- else:
- rlist=list
+ fpaths = [x for x, x_type in zip(fpaths, ftype) if x_type == 1]
- return rlist
+ return fpaths
diff --git a/pym/portage/util/movefile.py b/pym/portage/util/movefile.py
index 10577b565..452e77f0d 100644
--- a/pym/portage/util/movefile.py
+++ b/pym/portage/util/movefile.py
@@ -1,18 +1,22 @@
-# Copyright 2010-2012 Gentoo Foundation
+# Copyright 2010-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
+from __future__ import unicode_literals
+
__all__ = ['movefile']
import errno
+import fnmatch
import os as _os
import shutil as _shutil
import stat
+import sys
import subprocess
import textwrap
import portage
from portage import bsd_chflags, _encodings, _os_overrides, _selinux, \
- _unicode_decode, _unicode_encode, _unicode_func_wrapper,\
+ _unicode_decode, _unicode_encode, _unicode_func_wrapper, \
_unicode_module_wrapper
from portage.const import MOVE_BINARY
from portage.exception import OperationNotSupported
@@ -24,43 +28,113 @@ def _apply_stat(src_stat, dest):
_os.chown(dest, src_stat.st_uid, src_stat.st_gid)
_os.chmod(dest, stat.S_IMODE(src_stat.st_mode))
+_xattr_excluder_cache = {}
+
+def _get_xattr_excluder(pattern):
+
+ try:
+ value = _xattr_excluder_cache[pattern]
+ except KeyError:
+ value = _xattr_excluder(pattern)
+ _xattr_excluder_cache[pattern] = value
+
+ return value
+
+class _xattr_excluder(object):
+
+ __slots__ = ('_pattern_split',)
+
+ def __init__(self, pattern):
+
+ if pattern is None:
+ self._pattern_split = None
+ else:
+ pattern = pattern.split()
+ if not pattern:
+ self._pattern_split = None
+ else:
+ pattern.sort()
+ self._pattern_split = tuple(pattern)
+
+ def __call__(self, attr):
+
+ if self._pattern_split is None:
+ return False
+
+ match = fnmatch.fnmatch
+ for x in self._pattern_split:
+ if match(attr, x):
+ return True
+
+ return False
+
if hasattr(_os, "getxattr"):
# Python >=3.3 and GNU/Linux
- def _copyxattr(src, dest):
- for attr in _os.listxattr(src):
+ def _copyxattr(src, dest, exclude=None):
+
+ try:
+ attrs = _os.listxattr(src)
+ except OSError as e:
+ if e.errno != OperationNotSupported.errno:
+ raise
+ attrs = ()
+ if attrs:
+ if exclude is not None and isinstance(attrs[0], bytes):
+ exclude = exclude.encode(_encodings['fs'])
+ exclude = _get_xattr_excluder(exclude)
+
+ for attr in attrs:
+ if exclude(attr):
+ continue
try:
_os.setxattr(dest, attr, _os.getxattr(src, attr))
raise_exception = False
except OSError:
raise_exception = True
if raise_exception:
- raise OperationNotSupported("Filesystem containing file '%s' does not support extended attributes" % dest)
+ raise OperationNotSupported(_("Filesystem containing file '%s' "
+ "does not support extended attribute '%s'") %
+ (_unicode_decode(dest), _unicode_decode(attr)))
else:
try:
import xattr
except ImportError:
xattr = None
if xattr is not None:
- def _copyxattr(src, dest):
- for attr in xattr.list(src):
+ def _copyxattr(src, dest, exclude=None):
+
+ try:
+ attrs = xattr.list(src)
+ except IOError as e:
+ if e.errno != OperationNotSupported.errno:
+ raise
+ attrs = ()
+
+ if attrs:
+ if exclude is not None and isinstance(attrs[0], bytes):
+ exclude = exclude.encode(_encodings['fs'])
+ exclude = _get_xattr_excluder(exclude)
+
+ for attr in attrs:
+ if exclude(attr):
+ continue
try:
xattr.set(dest, attr, xattr.get(src, attr))
raise_exception = False
except IOError:
raise_exception = True
if raise_exception:
- raise OperationNotSupported("Filesystem containing file '%s' does not support extended attributes" % dest)
+ raise OperationNotSupported(_("Filesystem containing file '%s' "
+ "does not support extended attribute '%s'") %
+ (_unicode_decode(dest), _unicode_decode(attr)))
else:
- _devnull = open("/dev/null", "wb")
try:
- subprocess.call(["getfattr", "--version"], stdout=_devnull)
- subprocess.call(["setfattr", "--version"], stdout=_devnull)
- _has_getfattr_and_setfattr = True
+ with open(_os.devnull, 'wb') as f:
+ subprocess.call(["getfattr", "--version"], stdout=f)
+ subprocess.call(["setfattr", "--version"], stdout=f)
except OSError:
- _has_getfattr_and_setfattr = False
- _devnull.close()
- if _has_getfattr_and_setfattr:
- def _copyxattr(src, dest):
+ def _copyxattr(src, dest, exclude=None):
+ # TODO: implement exclude
getfattr_process = subprocess.Popen(["getfattr", "-d", "--absolute-names", src], stdout=subprocess.PIPE)
getfattr_process.wait()
extended_attributes = getfattr_process.stdout.readlines()
@@ -72,14 +146,15 @@ else:
if setfattr_process.returncode != 0:
raise OperationNotSupported("Filesystem containing file '%s' does not support extended attributes" % dest)
else:
- def _copyxattr(src, dest):
+ def _copyxattr(src, dest, exclude=None):
pass
def movefile(src, dest, newmtime=None, sstat=None, mysettings=None,
hardlink_candidates=None, encoding=_encodings['fs']):
"""moves a file from src to dest, preserving all permissions and attributes; mtime will
- be preserved even when moving across filesystems. Returns true on success and false on
- failure. Move is atomic."""
+ be preserved even when moving across filesystems. Returns mtime as integer on success
+ and None on failure. mtime is expressed in seconds in Python <3.3 and nanoseconds in
+ Python >=3.3. Move is atomic."""
if mysettings is None:
mysettings = portage.settings
@@ -102,22 +177,22 @@ def movefile(src, dest, newmtime=None, sstat=None, mysettings=None,
try:
if not sstat:
- sstat=os.lstat(src)
+ sstat = os.lstat(src)
except SystemExit as e:
raise
except Exception as e:
writemsg("!!! %s\n" % _("Stating source file failed... movefile()"),
noiselevel=-1)
- writemsg(_unicode_decode("!!! %s\n") % (e,), noiselevel=-1)
+ writemsg("!!! %s\n" % (e,), noiselevel=-1)
return None
- destexists=1
+ destexists = 1
try:
- dstat=os.lstat(dest)
+ dstat = os.lstat(dest)
except (OSError, IOError):
- dstat=os.lstat(os.path.dirname(dest))
- destexists=0
+ dstat = os.lstat(os.path.dirname(dest))
+ destexists = 0
if bsd_chflags:
if destexists and dstat.st_flags != 0:
@@ -132,7 +207,7 @@ def movefile(src, dest, newmtime=None, sstat=None, mysettings=None,
if stat.S_ISLNK(dstat[stat.ST_MODE]):
try:
os.unlink(dest)
- destexists=0
+ destexists = 0
except SystemExit as e:
raise
except Exception as e:
@@ -140,7 +215,7 @@ def movefile(src, dest, newmtime=None, sstat=None, mysettings=None,
if stat.S_ISLNK(sstat[stat.ST_MODE]):
try:
- target=os.readlink(src)
+ target = os.readlink(src)
if mysettings and "D" in mysettings and \
target.startswith(mysettings["D"]):
target = target[len(mysettings["D"])-1:]
@@ -159,17 +234,32 @@ def movefile(src, dest, newmtime=None, sstat=None, mysettings=None,
if e.errno not in (errno.ENOENT, errno.EEXIST) or \
target != os.readlink(dest):
raise
- lchown(dest,sstat[stat.ST_UID],sstat[stat.ST_GID])
- # utime() only works on the target of a symlink, so it's not
- # possible to perserve mtime on symlinks.
- return os.lstat(dest)[stat.ST_MTIME]
+ lchown(dest, sstat[stat.ST_UID], sstat[stat.ST_GID])
+
+ try:
+ _os.unlink(src_bytes)
+ except OSError:
+ pass
+
+ if sys.hexversion >= 0x3030000:
+ try:
+ os.utime(dest, ns=(sstat.st_mtime_ns, sstat.st_mtime_ns), follow_symlinks=False)
+ except NotImplementedError:
+ # utimensat() and lutimes() missing in libc.
+ return os.stat(dest, follow_symlinks=False).st_mtime_ns
+ else:
+ return sstat.st_mtime_ns
+ else:
+ # utime() in Python <3.3 only works on the target of a symlink, so it's not
+ # possible to preserve mtime on symlinks.
+ return os.lstat(dest)[stat.ST_MTIME]
except SystemExit as e:
raise
except Exception as e:
writemsg("!!! %s\n" % _("failed to properly create symlink:"),
noiselevel=-1)
writemsg("!!! %s -> %s\n" % (dest, target), noiselevel=-1)
- writemsg(_unicode_decode("!!! %s\n") % (e,), noiselevel=-1)
+ writemsg("!!! %s\n" % (e,), noiselevel=-1)
return None
hardlinked = False
@@ -204,9 +294,13 @@ def movefile(src, dest, newmtime=None, sstat=None, mysettings=None,
writemsg("!!! %s\n" % (e,), noiselevel=-1)
return None
hardlinked = True
+ try:
+ _os.unlink(src_bytes)
+ except OSError:
+ pass
break
- renamefailed=1
+ renamefailed = 1
if hardlinked:
renamefailed = False
if not hardlinked and (selinux_enabled or sstat.st_dev == dstat.st_dev):
@@ -214,14 +308,14 @@ def movefile(src, dest, newmtime=None, sstat=None, mysettings=None,
if selinux_enabled:
selinux.rename(src, dest)
else:
- os.rename(src,dest)
- renamefailed=0
+ os.rename(src, dest)
+ renamefailed = 0
except OSError as e:
if e.errno != errno.EXDEV:
# Some random error.
writemsg("!!! %s\n" % _("Failed to move %(src)s to %(dest)s") %
{"src": src, "dest": dest}, noiselevel=-1)
- writemsg(_unicode_decode("!!! %s\n") % (e,), noiselevel=-1)
+ writemsg("!!! %s\n" % (e,), noiselevel=-1)
return None
# Invalid cross-device-link 'bind' mounted or actually Cross-Device
if renamefailed:
@@ -233,7 +327,8 @@ def movefile(src, dest, newmtime=None, sstat=None, mysettings=None,
_copyfile(src_bytes, dest_tmp_bytes)
if xattr_enabled:
try:
- _copyxattr(src_bytes, dest_tmp_bytes)
+ _copyxattr(src_bytes, dest_tmp_bytes,
+ exclude=mysettings.get("PORTAGE_XATTR_EXCLUDE", "security.* system.nfs4_acl"))
except SystemExit:
raise
except:
@@ -252,7 +347,7 @@ def movefile(src, dest, newmtime=None, sstat=None, mysettings=None,
except Exception as e:
writemsg("!!! %s\n" % _('copy %(src)s -> %(dest)s failed.') %
{"src": src, "dest": dest}, noiselevel=-1)
- writemsg(_unicode_decode("!!! %s\n") % (e,), noiselevel=-1)
+ writemsg("!!! %s\n" % (e,), noiselevel=-1)
return None
else:
#we don't yet handle special, so we need to fall back to /bin/mv
@@ -265,35 +360,54 @@ def movefile(src, dest, newmtime=None, sstat=None, mysettings=None,
writemsg("!!! %s\n" % a, noiselevel=-1)
return None # failure
- # Always use stat_obj[stat.ST_MTIME] for the integral timestamp which
- # is returned, since the stat_obj.st_mtime float attribute rounds *up*
+ # In Python <3.3 always use stat_obj[stat.ST_MTIME] for the integral timestamp
+ # which is returned, since the stat_obj.st_mtime float attribute rounds *up*
# if the nanosecond part of the timestamp is 999999881 ns or greater.
try:
if hardlinked:
- newmtime = os.stat(dest)[stat.ST_MTIME]
+ if sys.hexversion >= 0x3030000:
+ newmtime = os.stat(dest).st_mtime_ns
+ else:
+ newmtime = os.stat(dest)[stat.ST_MTIME]
else:
# Note: It is not possible to preserve nanosecond precision
# (supported in POSIX.1-2008 via utimensat) with the IEEE 754
# double precision float which only has a 53 bit significand.
if newmtime is not None:
- os.utime(dest, (newmtime, newmtime))
+ if sys.hexversion >= 0x3030000:
+ os.utime(dest, ns=(newmtime, newmtime))
+ else:
+ os.utime(dest, (newmtime, newmtime))
else:
- newmtime = sstat[stat.ST_MTIME]
+ if sys.hexversion >= 0x3030000:
+ newmtime = sstat.st_mtime_ns
+ else:
+ newmtime = sstat[stat.ST_MTIME]
if renamefailed:
- # If rename succeeded then timestamps are automatically
- # preserved with complete precision because the source
- # and destination inode are the same. Otherwise, round
- # down to the nearest whole second since python's float
- # st_mtime cannot be used to preserve the st_mtim.tv_nsec
- # field with complete precision. Note that we have to use
- # stat_obj[stat.ST_MTIME] here because the float
- # stat_obj.st_mtime rounds *up* sometimes.
- os.utime(dest, (newmtime, newmtime))
+ if sys.hexversion >= 0x3030000:
+ # If rename succeeded then timestamps are automatically
+ # preserved with complete precision because the source
+ # and destination inodes are the same. Otherwise, manually
+ # update timestamps with nanosecond precision.
+ os.utime(dest, ns=(newmtime, newmtime))
+ else:
+ # If rename succeeded then timestamps are automatically
+ # preserved with complete precision because the source
+ # and destination inodes are the same. Otherwise, round
+ # down to the nearest whole second since python's float
+ # st_mtime cannot be used to preserve the st_mtim.tv_nsec
+ # field with complete precision. Note that we have to use
+ # stat_obj[stat.ST_MTIME] here because the float
+ # stat_obj.st_mtime rounds *up* sometimes.
+ os.utime(dest, (newmtime, newmtime))
except OSError:
# The utime can fail here with EPERM even though the move succeeded.
# Instead of failing, use stat to return the mtime if possible.
try:
- newmtime = os.stat(dest)[stat.ST_MTIME]
+ if sys.hexversion >= 0x3030000:
+ newmtime = os.stat(dest).st_mtime_ns
+ else:
+ newmtime = os.stat(dest)[stat.ST_MTIME]
except OSError as e:
writemsg(_("!!! Failed to stat in movefile()\n"), noiselevel=-1)
writemsg("!!! %s\n" % dest, noiselevel=-1)
diff --git a/pym/portage/util/whirlpool.py b/pym/portage/util/whirlpool.py
index c696f6fc0..170ae73f8 100644
--- a/pym/portage/util/whirlpool.py
+++ b/pym/portage/util/whirlpool.py
@@ -639,6 +639,8 @@ def WhirlpoolInit(ctx):
return
def WhirlpoolAdd(source, sourceBits, ctx):
+ if not isinstance(source, bytes):
+ raise TypeError("Expected %s, got %s" % (bytes, type(source)))
if sys.hexversion < 0x3000000:
source = [ord(s)&0xff for s in source]
diff --git a/pym/portage/util/writeable_check.py b/pym/portage/util/writeable_check.py
new file mode 100644
index 000000000..e6ddce680
--- /dev/null
+++ b/pym/portage/util/writeable_check.py
@@ -0,0 +1,79 @@
+#-*- coding:utf-8 -*-
+# Copyright 2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+"""
+Methods to check whether Portage is going to write to read-only filesystems.
+Since the methods are not portable across different OSes, each OS needs its
+own method. To expand RO checking for different OSes, add a method which
+accepts a list of directories and returns a list of mounts which need to be
+remounted RW, then add "elif ostype == (the ostype value for your OS)" to
+get_ro_checker().
+"""
+from __future__ import unicode_literals
+
+import io
+import logging
+import re
+
+from portage import _encodings
+from portage.util import writemsg_level
+from portage.localization import _
+from portage.data import ostype
+
+
+def get_ro_checker():
+ """
+ Uses the system type to find an appropriate method for testing whether Portage
+ is going to write to any read-only filesystems.
+
+ @return:
+ 1. A method for testing for RO filesystems appropriate to the current system.
+ """
+ return _CHECKERS.get(ostype, empty_ro_checker)
+
+
+def linux_ro_checker(dir_list):
+ """
+ Use /proc/mounts to check that no directories installed by the ebuild are set
+ to be installed to a read-only filesystem.
+
+ @param dir_list: A list of directories installed by the ebuild.
+ @type dir_list: List
+ @return:
+ 1. A list of filesystems which are both set to be written to and are mounted
+ read-only, may be empty.
+ """
+ ro_filesystems = set()
+
+ try:
+ with io.open("/proc/mounts", mode='r', encoding=_encodings['content'],
+ errors='replace') as f:
+ roregex = re.compile(r'(\A|,)ro(\Z|,)')
+ for line in f:
+ if roregex.search(line.split(" ")[3].strip()) is not None:
+ romount = line.split(" ")[1].strip()
+ ro_filesystems.add(romount)
+
+ # If /proc/mounts can't be read, assume that there are no RO
+ # filesystems and return.
+ except EnvironmentError:
+ writemsg_level(_("!!! /proc/mounts cannot be read"),
+ level=logging.WARNING, noiselevel=-1)
+ return []
+
+ return set.intersection(ro_filesystems, set(dir_list))
+
+
+def empty_ro_checker(dir_list):
+ """
+ Always returns [], this is the fallback function if the system does not have
+ an ro_checker method defined.
+ """
+ return []
+
+
+# _CHECKERS is a map from ostype output to the appropriate function to return
+# in get_ro_checker.
+_CHECKERS = {
+ "Linux": linux_ro_checker,
+}
diff --git a/pym/portage/versions.py b/pym/portage/versions.py
index 27947532b..2c9fe5bda 100644
--- a/pym/portage/versions.py
+++ b/pym/portage/versions.py
@@ -1,7 +1,9 @@
# versions.py -- core Portage functionality
-# Copyright 1998-2012 Gentoo Foundation
+# Copyright 1998-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
+from __future__ import unicode_literals
+
__all__ = [
'best', 'catpkgsplit', 'catsplit',
'cpv_getkey', 'cpv_getversion', 'cpv_sort_key', 'pkgcmp', 'pkgsplit',
@@ -19,7 +21,6 @@ else:
import portage
portage.proxy.lazyimport.lazyimport(globals(),
- 'portage.dep:_get_slot_re',
'portage.repository.config:_gen_valid_repo',
'portage.util:cmp_sort_key',
)
@@ -32,6 +33,10 @@ _unknown_repo = "__unknown__"
# \w is [a-zA-Z0-9_]
+# PMS 3.1.3: A slot name may contain any of the characters [A-Za-z0-9+_.-].
+# It must not begin with a hyphen or a dot.
+_slot = r'([\w+][\w+.-]*)'
+
# 2.1.1 A category name may contain any of the characters [A-Za-z0-9+_.-].
# It must not begin with a hyphen or a dot.
_cat = r'[\w+][\w+.-]*'
@@ -66,6 +71,24 @@ suffix_regexp = re.compile("^(alpha|beta|rc|pre|p)(\\d*)$")
suffix_value = {"pre": -2, "p": 0, "alpha": -4, "beta": -3, "rc": -1}
endversion_keys = ["pre", "p", "alpha", "beta", "rc"]
+_slot_re_cache = {}
+
+def _get_slot_re(eapi_attrs):
+ cache_key = eapi_attrs.slot_operator
+ slot_re = _slot_re_cache.get(cache_key)
+ if slot_re is not None:
+ return slot_re
+
+ if eapi_attrs.slot_operator:
+ slot_re = _slot + r'(/' + _slot + r')?'
+ else:
+ slot_re = _slot
+
+ slot_re = re.compile('^' + slot_re + '$', re.VERBOSE | re.UNICODE)
+
+ _slot_re_cache[cache_key] = slot_re
+ return slot_re
+
_pv_re_cache = {}
def _get_pv_re(eapi_attrs):
@@ -79,18 +102,18 @@ def _get_pv_re(eapi_attrs):
else:
pv_re = _pv['dots_disallowed_in_PN']
- pv_re = re.compile('^' + pv_re + '$', re.VERBOSE)
+ pv_re = re.compile(r'^' + pv_re + r'$', re.VERBOSE | re.UNICODE)
_pv_re_cache[cache_key] = pv_re
return pv_re
def ververify(myver, silent=1):
if ver_regexp.match(myver):
- return 1
+ return True
else:
if not silent:
print(_("!!! syntax error in version: %s") % myver)
- return 0
+ return False
def vercmp(ver1, ver2, silent=1):
"""
@@ -292,7 +315,7 @@ def _pkgsplit(mypkg, eapi=None):
return (m.group('pn'), m.group('ver'), rev)
-_cat_re = re.compile('^%s$' % _cat)
+_cat_re = re.compile('^%s$' % _cat, re.UNICODE)
_missing_cat = 'null'
def catpkgsplit(mydata, silent=1, eapi=None):
@@ -314,11 +337,11 @@ def catpkgsplit(mydata, silent=1, eapi=None):
except AttributeError:
pass
mysplit = mydata.split('/', 1)
- p_split=None
- if len(mysplit)==1:
+ p_split = None
+ if len(mysplit) == 1:
cat = _missing_cat
p_split = _pkgsplit(mydata, eapi=eapi)
- elif len(mysplit)==2:
+ elif len(mysplit) == 2:
cat = mysplit[0]
if _cat_re.match(cat) is not None:
p_split = _pkgsplit(mysplit[1], eapi=eapi)
@@ -337,14 +360,23 @@ class _pkg_str(_unicode):
manually convert them to a plain unicode object first.
"""
- def __new__(cls, cpv, slot=None, repo=None, eapi=None):
+ def __new__(cls, cpv, metadata=None, settings=None, eapi=None,
+ repo=None, slot=None):
return _unicode.__new__(cls, cpv)
- def __init__(self, cpv, slot=None, repo=None, eapi=None):
+ def __init__(self, cpv, metadata=None, settings=None, eapi=None,
+ repo=None, slot=None):
if not isinstance(cpv, _unicode):
# Avoid TypeError from _unicode.__init__ with PyPy.
cpv = _unicode_decode(cpv)
_unicode.__init__(cpv)
+ if metadata is not None:
+ self.__dict__['_metadata'] = metadata
+ slot = metadata.get('SLOT', slot)
+ repo = metadata.get('repository', repo)
+ eapi = metadata.get('EAPI', eapi)
+ if settings is not None:
+ self.__dict__['_settings'] = settings
if eapi is not None:
self.__dict__['eapi'] = eapi
self.__dict__['cpv_split'] = catpkgsplit(cpv, eapi=eapi)
@@ -363,19 +395,19 @@ class _pkg_str(_unicode):
if slot_match is None:
# Avoid an InvalidAtom exception when creating SLOT atoms
self.__dict__['slot'] = '0'
- self.__dict__['slot_abi'] = '0'
+ self.__dict__['sub_slot'] = '0'
self.__dict__['slot_invalid'] = slot
else:
- if eapi_attrs.slot_abi:
+ if eapi_attrs.slot_operator:
slot_split = slot.split("/")
self.__dict__['slot'] = slot_split[0]
if len(slot_split) > 1:
- self.__dict__['slot_abi'] = slot_split[1]
+ self.__dict__['sub_slot'] = slot_split[1]
else:
- self.__dict__['slot_abi'] = slot_split[0]
+ self.__dict__['sub_slot'] = slot_split[0]
else:
self.__dict__['slot'] = slot
- self.__dict__['slot_abi'] = slot
+ self.__dict__['sub_slot'] = slot
if repo is not None:
repo = _gen_valid_repo(repo)
@@ -387,6 +419,25 @@ class _pkg_str(_unicode):
raise AttributeError("_pkg_str instances are immutable",
self.__class__, name, value)
+ @property
+ def stable(self):
+ try:
+ return self._stable
+ except AttributeError:
+ try:
+ metadata = self._metadata
+ settings = self._settings
+ except AttributeError:
+ raise AttributeError('stable')
+ if not settings.local_config:
+ # Since repoman uses different config instances for
+ # different profiles, our local instance does not
+ # refer to the correct profile.
+ raise AssertionError('invalid context')
+ stable = settings._isStable(self)
+ self.__dict__['_stable'] = stable
+ return stable
+
def pkgsplit(mypkg, silent=1, eapi=None):
"""
@param mypkg: either a pv or cpv
@@ -488,7 +539,7 @@ def cpv_sort_key(eapi=None):
return cmp_sort_key(cmp_cpv)
def catsplit(mydep):
- return mydep.split("/", 1)
+ return mydep.split("/", 1)
def best(mymatches, eapi=None):
"""Accepts None arguments; assumes matches are valid."""
diff --git a/pym/portage/xml/metadata.py b/pym/portage/xml/metadata.py
index f820e5414..fcd9dc0e3 100644
--- a/pym/portage/xml/metadata.py
+++ b/pym/portage/xml/metadata.py
@@ -1,4 +1,4 @@
-# Copyright 2010-2012 Gentoo Foundation
+# Copyright 2010-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
"""Provides an easy-to-use python interface to Gentoo's metadata.xml file.
@@ -28,6 +28,8 @@
'Thomas Mills Hinkle'
"""
+from __future__ import unicode_literals
+
__all__ = ('MetaDataXML',)
import sys
@@ -58,8 +60,7 @@ except (ImportError, SystemError, RuntimeError, Exception):
import re
import xml.etree.ElementTree
-import portage
-from portage import os, _unicode_decode
+from portage import _encodings, _unicode_encode
from portage.util import unique_everseen
class _MetadataTreeBuilder(xml.etree.ElementTree.TreeBuilder):
@@ -203,12 +204,13 @@ class MetaDataXML(object):
self._xml_tree = None
try:
- self._xml_tree = etree.parse(metadata_xml_path,
+ self._xml_tree = etree.parse(_unicode_encode(metadata_xml_path,
+ encoding=_encodings['fs'], errors='strict'),
parser=etree.XMLParser(target=_MetadataTreeBuilder()))
except ImportError:
pass
except ExpatError as e:
- raise SyntaxError(_unicode_decode("%s") % (e,))
+ raise SyntaxError("%s" % (e,))
if isinstance(herds, etree.ElementTree):
herds_etree = herds
@@ -241,7 +243,8 @@ class MetaDataXML(object):
if self._herdstree is None:
try:
- self._herdstree = etree.parse(self._herds_path,
+ self._herdstree = etree.parse(_unicode_encode(self._herds_path,
+ encoding=_encodings['fs'], errors='strict'),
parser=etree.XMLParser(target=_MetadataTreeBuilder()))
except (ImportError, IOError, SyntaxError):
return None
diff --git a/pym/portage/xpak.py b/pym/portage/xpak.py
index 73f84ab75..b4567be05 100644
--- a/pym/portage/xpak.py
+++ b/pym/portage/xpak.py
@@ -1,4 +1,4 @@
-# Copyright 2001-2012 Gentoo Foundation
+# Copyright 2001-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
@@ -15,10 +15,12 @@
# (integer) == encodeint(integer) ===> 4 characters (big-endian copy)
# '+' means concatenate the fields ===> All chunks are strings
-__all__ = ['addtolist', 'decodeint', 'encodeint', 'getboth',
+__all__ = [
+ 'addtolist', 'decodeint', 'encodeint', 'getboth',
'getindex', 'getindex_mem', 'getitem', 'listindex',
'searchindex', 'tbz2', 'xpak_mem', 'xpak', 'xpand',
- 'xsplit', 'xsplit_mem']
+ 'xsplit', 'xsplit_mem',
+]
import array
import errno
diff --git a/pym/repoman/checks.py b/pym/repoman/checks.py
index ca4c260b1..8032b28df 100644
--- a/pym/repoman/checks.py
+++ b/pym/repoman/checks.py
@@ -1,10 +1,12 @@
# repoman: Checks
-# Copyright 2007-2012 Gentoo Foundation
+# Copyright 2007-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
"""This module contains functions used in Repoman to ascertain the quality
and correctness of an ebuild."""
+from __future__ import unicode_literals
+
import codecs
from itertools import chain
import re
@@ -13,8 +15,7 @@ import repoman.errors as errors
import portage
from portage.eapi import eapi_supports_prefix, eapi_has_implicit_rdepend, \
eapi_has_src_prepare_and_src_configure, eapi_has_dosed_dohard, \
- eapi_exports_AA
-from portage.const import _ENABLE_INHERIT_CHECK
+ eapi_exports_AA, eapi_has_pkg_pretend
class LineCheck(object):
"""Run a check on a line of an ebuild."""
@@ -69,7 +70,7 @@ class EbuildHeader(LineCheck):
Copyright header errors
CVS header errors
License header errors
-
+
Args:
modification_year - Year the ebuild was last modified
"""
@@ -112,7 +113,7 @@ class EbuildWhitespace(LineCheck):
ignore_line = re.compile(r'(^$)|(^(\t)*#)')
ignore_comment = False
leading_spaces = re.compile(r'^[\S\t]')
- trailing_whitespace = re.compile(r'.*([\S]$)')
+ trailing_whitespace = re.compile(r'.*([\S]$)')
def check(self, num, line):
if self.leading_spaces.match(line) is None:
@@ -162,6 +163,9 @@ class EbuildQuote(LineCheck):
"GAMES_DATADIR_BASE", "GAMES_SYSCONFDIR", "GAMES_STATEDIR",
"GAMES_LOGDIR", "GAMES_BINDIR"]
+ # variables for multibuild.eclass
+ var_names += ["BUILD_DIR"]
+
var_names = "(%s)" % "|".join(var_names)
var_reference = re.compile(r'\$(\{'+var_names+'\}|' + \
var_names + '\W)')
@@ -169,7 +173,7 @@ class EbuildQuote(LineCheck):
r'\}?[^"\'\s]*(\s|$)')
cond_begin = re.compile(r'(^|\s+)\[\[($|\\$|\s+)')
cond_end = re.compile(r'(^|\s+)\]\]($|\\$|\s+)')
-
+
def check(self, num, line):
if self.var_reference.search(line) is None:
return
@@ -221,21 +225,13 @@ class EbuildAssignment(LineCheck):
"""Ensure ebuilds don't assign to readonly variables."""
repoman_check_name = 'variable.readonly'
-
readonly_assignment = re.compile(r'^\s*(export\s+)?(A|CATEGORY|P|PV|PN|PR|PVR|PF|D|WORKDIR|FILESDIR|FEATURES|USE)=')
- line_continuation = re.compile(r'([^#]*\S)(\s+|\t)\\$')
- ignore_line = re.compile(r'(^$)|(^(\t)*#)')
- ignore_comment = False
-
- def __init__(self):
- self.previous_line = None
def check(self, num, line):
match = self.readonly_assignment.match(line)
e = None
- if match and (not self.previous_line or not self.line_continuation.match(self.previous_line)):
+ if match is not None:
e = errors.READONLY_ASSIGNMENT_ERROR
- self.previous_line = line
return e
class Eapi3EbuildAssignment(EbuildAssignment):
@@ -247,11 +243,11 @@ class Eapi3EbuildAssignment(EbuildAssignment):
return eapi_supports_prefix(eapi)
class EbuildNestedDie(LineCheck):
- """Check ebuild for nested die statements (die statements in subshells"""
-
+ """Check ebuild for nested die statements (die statements in subshells)"""
+
repoman_check_name = 'ebuild.nesteddie'
nesteddie_re = re.compile(r'^[^#]*\s\(\s[^)]*\bdie\b')
-
+
def check(self, num, line):
if self.nesteddie_re.match(line):
return errors.NESTED_DIE_ERROR
@@ -296,7 +292,7 @@ class EapiDefinition(LineCheck):
_eapi_re = portage._pms_eapi_re
def new(self, pkg):
- self._cached_eapi = pkg.metadata['EAPI']
+ self._cached_eapi = pkg.eapi
self._parsed_eapi = None
self._eapi_line_num = None
@@ -386,13 +382,18 @@ class InheritDeprecated(LineCheck):
# deprecated eclass : new eclass (False if no new eclass)
deprecated_classes = {
"bash-completion": "bash-completion-r1",
+ "boost-utils": False,
+ "distutils": "distutils-r1",
"gems": "ruby-fakegem",
"git": "git-2",
+ "mono": "mono-env",
"mozconfig-2": "mozconfig-3",
"mozcoreconf": "mozcoreconf-2",
"php-ext-pecl-r1": "php-ext-pecl-r2",
"php-ext-source-r1": "php-ext-source-r2",
"php-pear": "php-pear-r1",
+ "python": "python-r1 / python-single-r1 / python-any-r1",
+ "python-distutils-ng": "python-r1 + distutils-r1",
"qt3": False,
"qt4": "qt4-r2",
"ruby": "ruby-ng",
@@ -471,13 +472,13 @@ class InheritEclass(LineCheck):
self._inherit_re = re.compile(r'^(\s*|.*[|&]\s*)\binherit\s(.*\s)?%s(\s|$)' % inherit_re)
# Match when the function is preceded only by leading whitespace, a
# shell operator such as (, {, |, ||, or &&, or optional variable
- # setting(s). This prevents false postives in things like elog
+ # setting(s). This prevents false positives in things like elog
# messages, as reported in bug #413285.
self._func_re = re.compile(r'(^|[|&{(])\s*(\w+=.*)?\b(' + '|'.join(funcs) + r')\b')
def new(self, pkg):
self.repoman_check_name = 'inherit.missing'
- # We can't use pkg.inherited because that tells us all the eclass that
+ # We can't use pkg.inherited because that tells us all the eclasses that
# have been inherited and not just the ones we inherit directly.
self._inherit = False
self._func_call = False
@@ -486,6 +487,7 @@ class InheritEclass(LineCheck):
self._disabled = any(x in inherited for x in self._exempt_eclasses)
else:
self._disabled = False
+ self._eapi = pkg.eapi
def check(self, num, line):
if not self._inherit:
@@ -494,10 +496,14 @@ class InheritEclass(LineCheck):
if self._disabled or self._ignore_missing:
return
s = self._func_re.search(line)
- if s:
- self._func_call = True
- return '%s.eclass is not inherited, but "%s" found at line: %s' % \
- (self._eclass, s.group(3), '%d')
+ if s is not None:
+ func_name = s.group(3)
+ eapi_func = _eclass_eapi_functions.get(func_name)
+ if eapi_func is None or not eapi_func(self._eapi):
+ self._func_call = True
+ return ('%s.eclass is not inherited, '
+ 'but "%s" found at line: %s') % \
+ (self._eclass, func_name, '%d')
elif not self._func_call:
self._func_call = self._func_re.search(line)
@@ -506,6 +512,10 @@ class InheritEclass(LineCheck):
self.repoman_check_name = 'inherit.unused'
yield 'no function called from %s.eclass; please drop' % self._eclass
+_eclass_eapi_functions = {
+ "usex" : lambda eapi: eapi not in ("0", "1", "2", "3", "4", "4-python", "4-slot-abi")
+}
+
# eclasses that export ${ECLASS}_src_(compile|configure|install)
_eclass_export_functions = (
'ant-tasks', 'apache-2', 'apache-module', 'aspell-dict',
@@ -558,8 +568,7 @@ _eclass_info = {
'funcs': (
'estack_push', 'estack_pop', 'eshopts_push', 'eshopts_pop',
'eumask_push', 'eumask_pop', 'epatch', 'epatch_user',
- 'emktemp', 'edos2unix', 'in_iuse', 'use_if_iuse', 'usex',
- 'makeopts_jobs'
+ 'emktemp', 'edos2unix', 'in_iuse', 'use_if_iuse', 'usex'
),
'comprehensive': False,
@@ -589,8 +598,16 @@ _eclass_info = {
),
# These are "eclasses are the whole ebuild" type thing.
- 'exempt_eclasses': _eclass_export_functions + ('autotools', 'libtool'),
+ 'exempt_eclasses': _eclass_export_functions + ('autotools', 'libtool',
+ 'multilib-minimal'),
+
+ 'comprehensive': False
+ },
+ 'multiprocessing': {
+ 'funcs': (
+ 'makeopts_jobs',
+ ),
'comprehensive': False
},
@@ -617,49 +634,6 @@ _eclass_info = {
}
}
-if not _ENABLE_INHERIT_CHECK:
- # Since the InheritEclass check is experimental, in the stable branch
- # we emulate the old eprefixify.defined and inherit.autotools checks.
- _eclass_info = {
- 'autotools': {
- 'funcs': (
- 'eaclocal', 'eautoconf', 'eautoheader',
- 'eautomake', 'eautoreconf', '_elibtoolize',
- 'eautopoint'
- ),
- 'comprehensive': True,
- 'ignore_missing': True,
- 'exempt_eclasses': ('git', 'git-2', 'subversion', 'autotools-utils')
- },
-
- 'prefix': {
- 'funcs': (
- 'eprefixify',
- ),
- 'comprehensive': False
- }
- }
-
-class IUseUndefined(LineCheck):
- """
- Make sure the ebuild defines IUSE (style guideline
- says to define IUSE even when empty).
- """
-
- repoman_check_name = 'IUSE.undefined'
- _iuse_def_re = re.compile(r'^IUSE=.*')
-
- def new(self, pkg):
- self._iuse_def = None
-
- def check(self, num, line):
- if self._iuse_def is None:
- self._iuse_def = self._iuse_def_re.match(line)
-
- def end(self):
- if self._iuse_def is None:
- yield 'IUSE is not defined'
-
class EMakeParallelDisabled(PhaseCheck):
"""Check for emake -j1 calls which disable parallelization."""
repoman_check_name = 'upstream.workaround'
@@ -684,8 +658,8 @@ class NoAsNeeded(LineCheck):
error = errors.NO_AS_NEEDED
class PreserveOldLib(LineCheck):
- """Check for calls to the preserve_old_lib function."""
- repoman_check_name = 'upstream.workaround'
+ """Check for calls to the deprecated preserve_old_lib function."""
+ repoman_check_name = 'ebuild.minorsyn'
re = re.compile(r'.*preserve_old_lib')
error = errors.PRESERVE_OLD_LIB
@@ -757,6 +731,21 @@ class DeprecatedHasq(LineCheck):
re = re.compile(r'(^|.*\b)hasq\b')
error = errors.HASQ_ERROR
+# EAPI <2 checks
+class UndefinedSrcPrepareSrcConfigurePhases(LineCheck):
+ repoman_check_name = 'EAPI.incompatible'
+ src_configprepare_re = re.compile(r'\s*(src_configure|src_prepare)\s*\(\)')
+
+ def check_eapi(self, eapi):
+ return not eapi_has_src_prepare_and_src_configure(eapi)
+
+ def check(self, num, line):
+ m = self.src_configprepare_re.match(line)
+ if m is not None:
+ return ("'%s'" % m.group(1)) + \
+ " phase is not defined in EAPI < 2 on line: %d"
+
+
# EAPI-3 checks
class Eapi3DeprecatedFuncs(LineCheck):
repoman_check_name = 'EAPI.deprecated'
@@ -771,6 +760,20 @@ class Eapi3DeprecatedFuncs(LineCheck):
return ("'%s'" % m.group(1)) + \
" has been deprecated in EAPI=3 on line: %d"
+# EAPI <4 checks
+class UndefinedPkgPretendPhase(LineCheck):
+ repoman_check_name = 'EAPI.incompatible'
+ pkg_pretend_re = re.compile(r'\s*(pkg_pretend)\s*\(\)')
+
+ def check_eapi(self, eapi):
+ return not eapi_has_pkg_pretend(eapi)
+
+ def check(self, num, line):
+ m = self.pkg_pretend_re.match(line)
+ if m is not None:
+ return ("'%s'" % m.group(1)) + \
+ " phase is not defined in EAPI < 4 on line: %d"
+
# EAPI-4 checks
class Eapi4IncompatibleFuncs(LineCheck):
repoman_check_name = 'EAPI.incompatible'
@@ -803,7 +806,7 @@ class PortageInternal(LineCheck):
repoman_check_name = 'portage.internal'
ignore_comment = True
# Match when the command is preceded only by leading whitespace or a shell
- # operator such as (, {, |, ||, or &&. This prevents false postives in
+ # operator such as (, {, |, ||, or &&. This prevents false positives in
# things like elog messages, as reported in bug #413285.
re = re.compile(r'^(\s*|.*[|&{(]+\s*)\b(ecompress|ecompressdir|env-update|prepall|prepalldocs|preplib)\b')
@@ -813,19 +816,52 @@ class PortageInternal(LineCheck):
if m is not None:
return ("'%s'" % m.group(2)) + " called on line: %d"
-_constant_checks = tuple(chain((c() for c in (
- EbuildHeader, EbuildWhitespace, EbuildBlankLine, EbuildQuote,
- EbuildAssignment, Eapi3EbuildAssignment, EbuildUselessDodoc,
- EbuildUselessCdS, EbuildNestedDie,
- EbuildPatches, EbuildQuotedA, EapiDefinition,
- ImplicitRuntimeDeps, IUseUndefined,
- EMakeParallelDisabled, EMakeParallelDisabledViaMAKEOPTS, NoAsNeeded,
- DeprecatedBindnowFlags, SrcUnpackPatches, WantAutoDefaultValue,
- SrcCompileEconf, Eapi3DeprecatedFuncs, NoOffsetWithHelpers,
- Eapi4IncompatibleFuncs, Eapi4GoneVars, BuiltWithUse,
- PreserveOldLib, SandboxAddpredict, PortageInternal,
- DeprecatedUseq, DeprecatedHasq)),
- (InheritEclass(k, **kwargs) for k, kwargs in _eclass_info.items())))
+class PortageInternalVariableAssignment(LineCheck):
+ repoman_check_name = 'portage.internal'
+ internal_assignment = re.compile(r'\s*(export\s+)?(EXTRA_ECONF|EXTRA_EMAKE)\+?=')
+
+ def check(self, num, line):
+ match = self.internal_assignment.match(line)
+ e = None
+ if match is not None:
+ e = 'Assignment to variable %s' % match.group(2)
+ e += ' on line: %d'
+ return e
+
+_base_check_classes = (InheritEclass, LineCheck, PhaseCheck)
+_constant_checks = None
+
+def _init(experimental_inherit=False):
+
+ global _constant_checks, _eclass_info
+
+ if not experimental_inherit:
+ # Emulate the old eprefixify.defined and inherit.autotools checks.
+ _eclass_info = {
+ 'autotools': {
+ 'funcs': (
+ 'eaclocal', 'eautoconf', 'eautoheader',
+ 'eautomake', 'eautoreconf', '_elibtoolize',
+ 'eautopoint'
+ ),
+ 'comprehensive': True,
+ 'ignore_missing': True,
+ 'exempt_eclasses': ('git', 'git-2', 'subversion', 'autotools-utils')
+ },
+
+ 'prefix': {
+ 'funcs': (
+ 'eprefixify',
+ ),
+ 'comprehensive': False
+ }
+ }
+
+ _constant_checks = tuple(chain((v() for k, v in globals().items()
+ if isinstance(v, type) and issubclass(v, LineCheck) and
+ v not in _base_check_classes),
+ (InheritEclass(k, **portage._native_kwargs(kwargs))
+ for k, kwargs in _eclass_info.items())))
_here_doc_re = re.compile(r'.*\s<<[-]?(\w+)$')
_ignore_comment_re = re.compile(r'^\s*#')
@@ -833,6 +869,8 @@ _ignore_comment_re = re.compile(r'^\s*#')
def run_checks(contents, pkg):
unicode_escape_codec = codecs.lookup('unicode_escape')
unicode_escape = lambda x: unicode_escape_codec.decode(x)[0]
+ if _constant_checks is None:
+ _init()
checks = _constant_checks
here_doc_delim = None
multiline = None
@@ -888,17 +926,18 @@ def run_checks(contents, pkg):
multiline = line
continue
- # Finally we have a full line to parse.
- is_comment = _ignore_comment_re.match(line) is not None
- for lc in checks:
- if is_comment and lc.ignore_comment:
- continue
- if lc.check_eapi(pkg.metadata['EAPI']):
- ignore = lc.ignore_line
- if not ignore or not ignore.match(line):
- e = lc.check(num, line)
- if e:
- yield lc.repoman_check_name, e % (num + 1)
+ if not line.endswith("#nowarn\n"):
+ # Finally we have a full line to parse.
+ is_comment = _ignore_comment_re.match(line) is not None
+ for lc in checks:
+ if is_comment and lc.ignore_comment:
+ continue
+ if lc.check_eapi(pkg.eapi):
+ ignore = lc.ignore_line
+ if not ignore or not ignore.match(line):
+ e = lc.check(num, line)
+ if e:
+ yield lc.repoman_check_name, e % (num + 1)
for lc in checks:
i = lc.end()
diff --git a/pym/repoman/errors.py b/pym/repoman/errors.py
index c515502c4..3833be671 100644
--- a/pym/repoman/errors.py
+++ b/pym/repoman/errors.py
@@ -1,7 +1,9 @@
# repoman: Error Messages
-# Copyright 2007-2011 Gentoo Foundation
+# Copyright 2007-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
+from __future__ import unicode_literals
+
COPYRIGHT_ERROR = 'Invalid Gentoo Copyright on line: %d'
LICENSE_ERROR = 'Invalid Gentoo/GPL License on line: %d'
CVS_HEADER_ERROR = 'Malformed CVS Header on line: %d'
@@ -17,7 +19,7 @@ EMAKE_PARALLEL_DISABLED_VIA_MAKEOPTS = 'Upstream parallel compilation bug (MAKEO
DEPRECATED_BINDNOW_FLAGS = 'Deprecated bindnow-flags call on line: %d'
EAPI_DEFINED_AFTER_INHERIT = 'EAPI defined after inherit on line: %d'
NO_AS_NEEDED = 'Upstream asneeded linking bug (no-as-needed on line: %d)'
-PRESERVE_OLD_LIB = 'Upstream ABI change workaround on line: %d'
+PRESERVE_OLD_LIB = 'Ebuild calls deprecated preserve_old_lib on line: %d'
BUILT_WITH_USE = 'built_with_use on line: %d'
NO_OFFSET_WITH_HELPERS = "Helper function is used with D, ROOT, ED, EROOT or EPREFIX on line :%d"
SANDBOX_ADDPREDICT = 'Ebuild calls addpredict on line: %d'
diff --git a/pym/repoman/herdbase.py b/pym/repoman/herdbase.py
index fcf58b36c..c5b88ff17 100644
--- a/pym/repoman/herdbase.py
+++ b/pym/repoman/herdbase.py
@@ -1,8 +1,10 @@
# -*- coding: utf-8 -*-
# repoman: Herd database analysis
-# Copyright 2010-2012 Gentoo Foundation
+# Copyright 2010-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2 or later
+from __future__ import unicode_literals
+
import errno
import xml.etree.ElementTree
try:
@@ -17,6 +19,8 @@ except (ImportError, SystemError, RuntimeError, Exception):
# modules, so that ImportModulesTestCase can succeed (or
# possibly alert us about unexpected import failures).
pass
+
+from portage import _encodings, _unicode_encode
from portage.exception import FileNotFound, ParseError, PermissionDenied
__all__ = [
@@ -56,11 +60,12 @@ def make_herd_base(filename):
all_emails = set()
try:
- xml_tree = xml.etree.ElementTree.parse(filename,
+ xml_tree = xml.etree.ElementTree.parse(_unicode_encode(filename,
+ encoding=_encodings['fs'], errors='strict'),
parser=xml.etree.ElementTree.XMLParser(
target=_HerdsTreeBuilder()))
except ExpatError as e:
- raise ParseError("metadata.xml: " + str(e))
+ raise ParseError("metadata.xml: %s" % (e,))
except EnvironmentError as e:
func_call = "open('%s')" % filename
if e.errno == errno.EACCES:
diff --git a/pym/repoman/utilities.py b/pym/repoman/utilities.py
index 013858a6d..aec61fe2f 100644
--- a/pym/repoman/utilities.py
+++ b/pym/repoman/utilities.py
@@ -1,11 +1,11 @@
# repoman: Utilities
-# Copyright 2007-2012 Gentoo Foundation
+# Copyright 2007-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
"""This module contains utility functions to help repoman find ebuilds to
scan"""
-from __future__ import print_function
+from __future__ import print_function, unicode_literals
__all__ = [
"detect_vcs_conflicts",
@@ -25,6 +25,7 @@ __all__ = [
"UpdateChangeLog"
]
+import collections
import errno
import io
from itertools import chain
@@ -33,18 +34,20 @@ import pwd
import re
import stat
import sys
+import subprocess
import time
import textwrap
import difflib
from tempfile import mkstemp
+import portage
from portage import os
from portage import shutil
-from portage import subprocess_getstatusoutput
from portage import _encodings
from portage import _unicode_decode
from portage import _unicode_encode
from portage import output
+from portage.const import BASH_BINARY
from portage.localization import _
from portage.output import red, green
from portage.process import find_binary
@@ -71,22 +74,31 @@ def detect_vcs_conflicts(options, vcs):
Returns:
None (calls sys.exit on fatal problems)
"""
- retval = ("","")
+
+ cmd = None
if vcs == 'cvs':
logging.info("Performing a " + output.green("cvs -n up") + \
" with a little magic grep to check for updates.")
- retval = subprocess_getstatusoutput("cvs -n up 2>/dev/null | " + \
+ cmd = "cvs -n up 2>/dev/null | " + \
"egrep '^[^\?] .*' | " + \
- "egrep -v '^. .*/digest-[^/]+|^cvs server: .* -- ignored$'")
+ "egrep -v '^. .*/digest-[^/]+|^cvs server: .* -- ignored$'"
if vcs == 'svn':
logging.info("Performing a " + output.green("svn status -u") + \
" with a little magic grep to check for updates.")
- retval = subprocess_getstatusoutput("svn status -u 2>&1 | " + \
+ cmd = "svn status -u 2>&1 | " + \
"egrep -v '^. +.*/digest-[^/]+' | " + \
- "head -n-1")
-
- if vcs in ['cvs', 'svn']:
- mylines = retval[1].splitlines()
+ "head -n-1"
+
+ if cmd is not None:
+ # Use Popen instead of getstatusoutput(), in order to avoid
+ # unicode handling problems (see bug #310789).
+ args = [BASH_BINARY, "-c", cmd]
+ args = [_unicode_encode(x) for x in args]
+ proc = subprocess.Popen(args, stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT)
+ out = _unicode_decode(proc.communicate()[0])
+ proc.wait()
+ mylines = out.splitlines()
myupdates = []
for line in mylines:
if not line:
@@ -98,7 +110,7 @@ def detect_vcs_conflicts(options, vcs):
logging.error(red("!!! Please fix the following issues reported " + \
"from cvs: ")+green("(U,P,M,A,R,D are ok)"))
logging.error(red("!!! Note: This is a pretend/no-modify pass..."))
- logging.error(retval[1])
+ logging.error(out)
sys.exit(1)
elif vcs == 'cvs' and line[0] in "UP":
myupdates.append(line[2:])
@@ -298,12 +310,12 @@ def format_qa_output(formatter, stats, fails, dofull, dofail, options, qawarning
# we only want key value pairs where value > 0
for category, number in \
filter(lambda myitem: myitem[1] > 0, iter(stats.items())):
- formatter.add_literal_data(_unicode_decode(" " + category.ljust(30)))
+ formatter.add_literal_data(" " + category.ljust(30))
if category in qawarnings:
formatter.push_style("WARN")
else:
formatter.push_style("BAD")
- formatter.add_literal_data(_unicode_decode(str(number)))
+ formatter.add_literal_data("%s" % number)
formatter.pop_style()
formatter.add_line_break()
if not dofull:
@@ -314,10 +326,54 @@ def format_qa_output(formatter, stats, fails, dofull, dofail, options, qawarning
if not full and len(fails_list) > 12:
fails_list = fails_list[:12]
for failure in fails_list:
- formatter.add_literal_data(_unicode_decode(" " + failure))
+ formatter.add_literal_data(" " + failure)
formatter.add_line_break()
+def format_qa_output_column(formatter, stats, fails, dofull, dofail, options, qawarnings):
+ """Helper function that formats output in a machine-parseable column format
+
+ @param formatter: an instance of Formatter
+ @type formatter: Formatter
+ @param path: dict of qa status items
+ @type path: dict
+ @param fails: dict of qa status failures
+ @type fails: dict
+ @param dofull: Whether to print full results or a summary
+ @type dofull: boolean
+ @param dofail: Whether failure was hard or soft
+ @type dofail: boolean
+ @param options: The command-line options provided to repoman
+ @type options: Namespace
+ @param qawarnings: the set of warning types
+ @type qawarnings: set
+ @return: None (modifies formatter)
+ """
+ full = options.mode == 'full'
+ for category, number in stats.items():
+ # we only want key value pairs where value > 0
+ if number < 1:
+ continue
+
+ formatter.add_literal_data("NumberOf " + category + " ")
+ if category in qawarnings:
+ formatter.push_style("WARN")
+ else:
+ formatter.push_style("BAD")
+ formatter.add_literal_data("%s" % number)
+ formatter.pop_style()
+ formatter.add_line_break()
+ if not dofull:
+ if not full and dofail and category in qawarnings:
+ # warnings are considered noise when there are failures
+ continue
+ fails_list = fails[category]
+ if not full and len(fails_list) > 12:
+ fails_list = fails_list[:12]
+ for failure in fails_list:
+ formatter.add_literal_data(category + " " + failure)
+ formatter.add_line_break()
+
def editor_is_executable(editor):
"""
Given an EDITOR string, validate that it refers to
@@ -367,10 +423,11 @@ def get_commit_message_with_editor(editor, message=None):
if not (os.WIFEXITED(retval) and os.WEXITSTATUS(retval) == os.EX_OK):
return None
try:
- mylines = io.open(_unicode_encode(filename,
+ with io.open(_unicode_encode(filename,
encoding=_encodings['fs'], errors='strict'),
mode='r', encoding=_encodings['content'], errors='replace'
- ).readlines()
+ ) as f:
+ mylines = f.readlines()
except OSError as e:
if e.errno != errno.ENOENT:
raise
@@ -427,7 +484,7 @@ def FindPortdir(settings):
portdir = None
portdir_overlay = None
location = os.getcwd()
- pwd = os.environ.get('PWD', '')
+ pwd = _unicode_decode(os.environ.get('PWD', ''), encoding=_encodings['fs'])
if pwd and pwd != location and os.path.realpath(pwd) == location:
# getcwd() returns the canonical path but that makes it hard for repoman to
# orient itself if the user has symlinks in their portage tree structure.
@@ -449,7 +506,7 @@ def FindPortdir(settings):
if location[-1] != "/":
location += "/"
- for overlay in settings["PORTDIR_OVERLAY"].split():
+ for overlay in portage.util.shlex_split(settings["PORTDIR_OVERLAY"]):
overlay = os.path.realpath(overlay)
try:
s = os.stat(overlay)
@@ -509,6 +566,28 @@ def FindPortdir(settings):
return [normalize_path(x) for x in (portdir, portdir_overlay, location)]
+_vcs_type = collections.namedtuple('_vcs_type',
+ 'name dir_name')
+
+_FindVCS_data = (
+ _vcs_type(
+ name = 'git',
+ dir_name = '.git'
+ ),
+ _vcs_type(
+ name = 'bzr',
+ dir_name = '.bzr'
+ ),
+ _vcs_type(
+ name = 'hg',
+ dir_name = '.hg'
+ ),
+ _vcs_type(
+ name = 'svn',
+ dir_name = '.svn'
+ )
+)
+
def FindVCS():
""" Try to figure out in what VCS' working tree we are. """
@@ -520,14 +599,13 @@ def FindVCS():
pathprep = ''
while depth is None or depth > 0:
- if os.path.isdir(os.path.join(pathprep, '.git')):
- retvcs.append('git')
- if os.path.isdir(os.path.join(pathprep, '.bzr')):
- retvcs.append('bzr')
- if os.path.isdir(os.path.join(pathprep, '.hg')):
- retvcs.append('hg')
- if os.path.isdir(os.path.join(pathprep, '.svn')): # >=1.7
- retvcs.append('svn')
+ for vcs_type in _FindVCS_data:
+ vcs_dir = os.path.join(pathprep, vcs_type.dir_name)
+ if os.path.isdir(vcs_dir):
+ logging.debug('FindVCS: found %(name)s dir: %(vcs_dir)s' %
+ {'name': vcs_type.name,
+ 'vcs_dir': os.path.abspath(vcs_dir)})
+ retvcs.append(vcs_type.name)
if retvcs:
break
@@ -763,7 +841,7 @@ def UpdateChangeLog(pkgdir, user, msg, skel_path, category, package,
line = line.replace('<PACKAGE_NAME>', package)
line = _update_copyright_year(year, line)
header_lines.append(line)
- header_lines.append(_unicode_decode('\n'))
+ header_lines.append('\n')
clskel_file.close()
# write new ChangeLog entry
@@ -773,10 +851,10 @@ def UpdateChangeLog(pkgdir, user, msg, skel_path, category, package,
if not fn.endswith('.ebuild'):
continue
ebuild = fn.split(os.sep)[-1][0:-7]
- clnew_lines.append(_unicode_decode('*%s (%s)\n' % (ebuild, date)))
+ clnew_lines.append('*%s (%s)\n' % (ebuild, date))
newebuild = True
if newebuild:
- clnew_lines.append(_unicode_decode('\n'))
+ clnew_lines.append('\n')
trivial_files = ('ChangeLog', 'Manifest')
display_new = ['+' + elem for elem in new
if elem not in trivial_files]
@@ -803,19 +881,19 @@ def UpdateChangeLog(pkgdir, user, msg, skel_path, category, package,
for line in textwrap.wrap(mesg, 80, \
initial_indent=' ', subsequent_indent=' ', \
break_on_hyphens=False):
- clnew_lines.append(_unicode_decode('%s\n' % line))
+ clnew_lines.append('%s\n' % line)
for line in textwrap.wrap(msg, 80, \
initial_indent=' ', subsequent_indent=' '):
- clnew_lines.append(_unicode_decode('%s\n' % line))
+ clnew_lines.append('%s\n' % line)
# Don't append a trailing newline if the file is new.
if clold_file is not None:
- clnew_lines.append(_unicode_decode('\n'))
+ clnew_lines.append('\n')
f = io.open(f, mode='w', encoding=_encodings['repo.content'],
errors='backslashreplace')
for line in clnew_lines:
- f.write(_unicode_decode(line))
+ f.write(line)
# append stuff from old ChangeLog
if clold_file is not None: