diff options
author | Wim Lavrijsen <WLavrijsen@lbl.gov> | 2021-02-09 19:35:45 -0800 |
---|---|---|
committer | Wim Lavrijsen <WLavrijsen@lbl.gov> | 2021-02-09 19:35:45 -0800 |
commit | 3179f643d142f8a7025695c280b5089d330d078c (patch) | |
tree | 8bf948d55909c6f9f240bbc29db5ad418f339d54 | |
parent | more correct way of finding the backend library (diff) | |
parent | backport changes to _ctypes needed for maxos BigSur from py3.7 (diff) | |
download | pypy-3179f643d142f8a7025695c280b5089d330d078c.tar.gz pypy-3179f643d142f8a7025695c280b5089d330d078c.tar.bz2 pypy-3179f643d142f8a7025695c280b5089d330d078c.zip |
merge default into cppyy_packaging branch
69 files changed, 2305 insertions, 579 deletions
@@ -31,7 +31,7 @@ directories is licensed as follows: DEALINGS IN THE SOFTWARE. -PyPy Copyright holders 2003-2020 +PyPy Copyright holders 2003-2021 -------------------------------- Except when otherwise stated (look for LICENSE files or information at diff --git a/extra_tests/test_os.py b/extra_tests/test_os.py index 334590df21..e1fd05def8 100644 --- a/extra_tests/test_os.py +++ b/extra_tests/test_os.py @@ -95,7 +95,7 @@ if hasattr(os, "spawnv"): [python, '-c', 'raise(SystemExit(42))']) assert ret == 42 -if hasattr(os, "spawnve"): +if hasattr(os, "spawnve") and os.path.exists('/bin/sh'): def test_spawnve(): env = {'FOOBAR': '42'} cmd = "exit $FOOBAR" diff --git a/lib-python/2.7/ctypes/macholib/dyld.py b/lib-python/2.7/ctypes/macholib/dyld.py index 1fdf8d648f..9aede10e8b 100644 --- a/lib-python/2.7/ctypes/macholib/dyld.py +++ b/lib-python/2.7/ctypes/macholib/dyld.py @@ -7,6 +7,12 @@ from framework import framework_info from dylib import dylib_info from itertools import * +try: + from _ctypes import _dyld_shared_cache_contains_path +except ImportError: + def _dyld_shared_cache_contains_path(*args): + raise NotImplementedError + __all__ = [ 'dyld_find', 'framework_find', 'framework_info', 'dylib_info', @@ -130,8 +136,15 @@ def dyld_find(name, executable_path=None, env=None): dyld_executable_path_search(name, executable_path), dyld_default_search(name, env), ), env): + if os.path.isfile(path): return path + try: + if _dyld_shared_cache_contains_path(path): + return path + except NotImplementedError: + pass + raise ValueError("dylib %s could not be found" % (name,)) def framework_find(fn, executable_path=None, env=None): diff --git a/lib-python/2.7/ctypes/test/test_macholib.py b/lib-python/2.7/ctypes/test/test_macholib.py index 9779b2f3f4..97dfa9b6d8 100644 --- a/lib-python/2.7/ctypes/test/test_macholib.py +++ b/lib-python/2.7/ctypes/test/test_macholib.py @@ -48,18 +48,22 @@ class MachOTest(unittest.TestCase): @unittest.skipUnless(sys.platform == "darwin", 'OSX-specific test') def test_find(self): - self.assertEqual(find_lib('pthread'), - '/usr/lib/libSystem.B.dylib') + # On Mac OS 11, system dylibs are only present in the shared cache, + # so symlinks like libpthread.dylib -> libSystem.B.dylib will not + # be resolved by dyld_find + self.assertIn(find_lib('pthread'), + ('/usr/lib/libSystem.B.dylib', '/usr/lib/libpthread.dylib')) result = find_lib('z') # Issue #21093: dyld default search path includes $HOME/lib and # /usr/local/lib before /usr/lib, which caused test failures if # a local copy of libz exists in one of them. Now ignore the head # of the path. - self.assertRegexpMatches(result, r".*/lib/libz\..*.*\.dylib") + self.assertRegexpMatches(result, r".*/lib/libz\.*\.dylib") - self.assertEqual(find_lib('IOKit'), - '/System/Library/Frameworks/IOKit.framework/Versions/A/IOKit') + self.assertIn(find_lib('IOKit'), + ('/System/Library/Frameworks/IOKit.framework/Versions/A/IOKit', + '/System/Library/Frameworks/IOKit.framework/IOKit')) if __name__ == "__main__": unittest.main() diff --git a/lib-python/2.7/distutils/_msvccompiler.py b/lib-python/2.7/distutils/_msvccompiler.py new file mode 100644 index 0000000000..c3dd15340c --- /dev/null +++ b/lib-python/2.7/distutils/_msvccompiler.py @@ -0,0 +1,534 @@ +"""distutils._msvccompiler + +Contains MSVCCompiler, an implementation of the abstract CCompiler class +for Microsoft Visual Studio 2015. + +The module is compatible with VS 2015 and later. You can find legacy support +for older versions in distutils.msvc9compiler and distutils.msvccompiler. +""" + +# Written by Perry Stoll +# hacked by Robin Becker and Thomas Heller to do a better job of +# finding DevStudio (through the registry) +# ported to VS 2005 and VS 2008 by Christian Heimes +# ported to VS 2015 by Steve Dower + +import os +import subprocess +import contextlib +import warnings +import _winreg as winreg + +from distutils.errors import DistutilsExecError, DistutilsPlatformError, \ + CompileError, LibError, LinkError +from distutils.ccompiler import CCompiler, gen_lib_options +from distutils import log +from distutils.util import get_platform + +from itertools import count + +def _find_vc2015(): + try: + key = winreg.OpenKeyEx( + winreg.HKEY_LOCAL_MACHINE, + r"Software\Microsoft\VisualStudio\SxS\VC7", + access=winreg.KEY_READ | winreg.KEY_WOW64_32KEY + ) + except OSError: + log.debug("Visual C++ is not registered") + return None, None + + best_version = 0 + best_dir = None + with key: + for i in count(): + try: + v, vc_dir, vt = winreg.EnumValue(key, i) + except OSError: + break + if v and vt == winreg.REG_SZ and os.path.isdir(vc_dir): + try: + version = int(float(v)) + except (ValueError, TypeError): + continue + if version >= 14 and version > best_version: + best_version, best_dir = version, vc_dir + return best_version, best_dir + +def _find_vc2017(): + """Returns "15, path" based on the result of invoking vswhere.exe + If no install is found, returns "None, None" + + The version is returned to avoid unnecessarily changing the function + result. It may be ignored when the path is not None. + + If vswhere.exe is not available, by definition, VS 2017 is not + installed. + """ + root = os.environ.get("ProgramFiles(x86)") or os.environ.get("ProgramFiles") + if not root: + return None, None + + try: + path = subprocess.check_output([ + os.path.join(root, "Microsoft Visual Studio", "Installer", "vswhere.exe"), + "-latest", + "-prerelease", + "-requires", "Microsoft.VisualStudio.Component.VC.Tools.x86.x64", + "-property", "installationPath", + "-products", "*", + ]).strip() + except (subprocess.CalledProcessError, OSError, UnicodeDecodeError): + return None, None + + path = os.path.join(path, "VC", "Auxiliary", "Build") + if os.path.isdir(path): + return 15, path + + return None, None + +PLAT_SPEC_TO_RUNTIME = { + 'x86' : 'x86', + 'x86_amd64' : 'x64', + 'x86_arm' : 'arm', + 'x86_arm64' : 'arm64' +} + +def _find_vcvarsall(plat_spec): + # bpo-38597: Removed vcruntime return value + _, best_dir = _find_vc2017() + + if not best_dir: + best_version, best_dir = _find_vc2015() + + if not best_dir: + log.debug("No suitable Visual C++ version found") + return None, None + + vcvarsall = os.path.join(best_dir, "vcvarsall.bat") + if not os.path.isfile(vcvarsall): + log.debug("%s cannot be found", vcvarsall) + return None, None + + return vcvarsall, None + +def _get_vc_env(plat_spec): + if os.getenv("DISTUTILS_USE_SDK"): + return { + key.lower(): value + for key, value in os.environ.items() + } + + vcvarsall, _ = _find_vcvarsall(plat_spec) + if not vcvarsall: + raise DistutilsPlatformError("Unable to find vcvarsall.bat") + + try: + out = subprocess.check_output( + 'cmd /u /c "{}" {} && set'.format(vcvarsall, plat_spec), + stderr=subprocess.STDOUT, + ).decode('utf-16le', errors='replace').encode('utf-8') + except subprocess.CalledProcessError as exc: + log.error(exc.output) + raise DistutilsPlatformError("Error executing {}" + .format(exc.cmd)) + + env = { + key.lower(): value + for key, _, value in + (line.partition('=') for line in out.splitlines()) + if key and value + } + + return env + +def _find_exe(exe, paths=None): + """Return path to an MSVC executable program. + + Tries to find the program in several places: first, one of the + MSVC program search paths from the registry; next, the directories + in the PATH environment variable. If any of those work, return an + absolute path that is known to exist. If none of them work, just + return the original program name, 'exe'. + """ + if not paths: + paths = os.getenv('path').split(os.pathsep) + for p in paths: + fn = os.path.join(os.path.abspath(p), exe) + if os.path.isfile(fn): + return fn + return exe + +# A map keyed by get_platform() return values to values accepted by +# 'vcvarsall.bat'. Always cross-compile from x86 to work with the +# lighter-weight MSVC installs that do not include native 64-bit tools. +PLAT_TO_VCVARS = { + 'win32' : 'x86', + 'win-amd64' : 'x86_amd64', + 'win-arm32' : 'x86_arm', + 'win-arm64' : 'x86_arm64' +} + +class MSVCCompiler(CCompiler) : + """Concrete class that implements an interface to Microsoft Visual C++, + as defined by the CCompiler abstract class.""" + + compiler_type = 'msvc' + + # Just set this so CCompiler's constructor doesn't barf. We currently + # don't use the 'set_executables()' bureaucracy provided by CCompiler, + # as it really isn't necessary for this sort of single-compiler class. + # Would be nice to have a consistent interface with UnixCCompiler, + # though, so it's worth thinking about. + executables = {} + + # Private class data (need to distinguish C from C++ source for compiler) + _c_extensions = ['.c'] + _cpp_extensions = ['.cc', '.cpp', '.cxx'] + _rc_extensions = ['.rc'] + _mc_extensions = ['.mc'] + + # Needed for the filename generation methods provided by the + # base class, CCompiler. + src_extensions = (_c_extensions + _cpp_extensions + + _rc_extensions + _mc_extensions) + res_extension = '.res' + obj_extension = '.obj' + static_lib_extension = '.lib' + shared_lib_extension = '.dll' + static_lib_format = shared_lib_format = '%s%s' + exe_extension = '.exe' + + + def __init__(self, verbose=0, dry_run=0, force=0): + CCompiler.__init__ (self, verbose, dry_run, force) + # target platform (.plat_name is consistent with 'bdist') + self.plat_name = None + self.initialized = False + + def initialize(self, plat_name=None): + # multi-init means we would need to check platform same each time... + assert not self.initialized, "don't init multiple times" + if plat_name is None: + plat_name = get_platform() + # sanity check for platforms to prevent obscure errors later. + if plat_name not in PLAT_TO_VCVARS: + raise DistutilsPlatformError("--plat-name must be one of {}" + .format(tuple(PLAT_TO_VCVARS))) + + # Get the vcvarsall.bat spec for the requested platform. + plat_spec = PLAT_TO_VCVARS[plat_name] + + vc_env = _get_vc_env(plat_spec) + if not vc_env: + raise DistutilsPlatformError("Unable to find a compatible " + "Visual Studio installation.") + + self._paths = vc_env.get('path', '') + paths = self._paths.split(os.pathsep) + self.cc = _find_exe("cl.exe", paths) + self.linker = _find_exe("link.exe", paths) + self.lib = _find_exe("lib.exe", paths) + self.rc = _find_exe("rc.exe", paths) # resource compiler + self.mc = _find_exe("mc.exe", paths) # message compiler + self.mt = _find_exe("mt.exe", paths) # message compiler + + for dir in vc_env.get('include', '').split(os.pathsep): + if dir: + self.add_include_dir(dir.rstrip(os.sep)) + + for dir in vc_env.get('lib', '').split(os.pathsep): + if dir: + self.add_library_dir(dir.rstrip(os.sep)) + + self.preprocess_options = None + # bpo-38597: Always compile with dynamic linking + # Future releases of Python 3.x will include all past + # versions of vcruntime*.dll for compatibility. + self.compile_options = [ + '/nologo', '/Ox', '/W3', '/GL', '/DNDEBUG', '/MD' + ] + + self.compile_options_debug = [ + '/nologo', '/Od', '/MDd', '/Zi', '/W3', '/D_DEBUG' + ] + + ldflags = [ + '/nologo', '/INCREMENTAL:NO', '/LTCG' + ] + + ldflags_debug = [ + '/nologo', '/INCREMENTAL:NO', '/LTCG', '/DEBUG:FULL' + ] + + self.ldflags_exe = ldflags + ['/MANIFEST:EMBED,ID=1'] + self.ldflags_exe_debug = ldflags_debug + ['/MANIFEST:EMBED,ID=1'] + self.ldflags_shared = ldflags + ['/DLL', '/MANIFEST:EMBED,ID=2', '/MANIFESTUAC:NO'] + self.ldflags_shared_debug = ldflags_debug + ['/DLL', '/MANIFEST:EMBED,ID=2', '/MANIFESTUAC:NO'] + self.ldflags_static = ldflags[:] + self.ldflags_static_debug = ldflags_debug[:] + + self._ldflags = { + (CCompiler.EXECUTABLE, None): self.ldflags_exe, + (CCompiler.EXECUTABLE, False): self.ldflags_exe, + (CCompiler.EXECUTABLE, True): self.ldflags_exe_debug, + (CCompiler.SHARED_OBJECT, None): self.ldflags_shared, + (CCompiler.SHARED_OBJECT, False): self.ldflags_shared, + (CCompiler.SHARED_OBJECT, True): self.ldflags_shared_debug, + (CCompiler.SHARED_LIBRARY, None): self.ldflags_static, + (CCompiler.SHARED_LIBRARY, False): self.ldflags_static, + (CCompiler.SHARED_LIBRARY, True): self.ldflags_static_debug, + } + + self.initialized = True + + # -- Worker methods ------------------------------------------------ + + def object_filenames(self, + source_filenames, + strip_dir=0, + output_dir=''): + ext_map = {ext: self.obj_extension for ext in self.src_extensions} + ext_map.update({ext: self.res_extension for ext in self._rc_extensions + self._mc_extensions}) + output_dir = output_dir or '' + + def make_out_path(p): + base, ext = os.path.splitext(p) + if strip_dir: + base = os.path.basename(base) + else: + _, base = os.path.splitdrive(base) + if base.startswith((os.path.sep, os.path.altsep)): + base = base[1:] + try: + # XXX: This may produce absurdly long paths. We should check + # the length of the result and trim base until we fit within + # 260 characters. + return os.path.join(output_dir, base + ext_map[ext]) + except LookupError: + # Better to raise an exception instead of silently continuing + # and later complain about sources and targets having + # different lengths + raise CompileError("Don't know how to compile {}".format(p)) + + return list(map(make_out_path, source_filenames)) + + + def compile(self, sources, + output_dir=None, macros=None, include_dirs=None, debug=0, + extra_preargs=None, extra_postargs=None, depends=None): + + if not self.initialized: + self.initialize() + compile_info = self._setup_compile(output_dir, macros, include_dirs, + sources, depends, extra_postargs) + macros, objects, extra_postargs, pp_opts, build = compile_info + + compile_opts = extra_preargs or [] + compile_opts.append('/c') + if debug: + compile_opts.extend(self.compile_options_debug) + else: + compile_opts.extend(self.compile_options) + + + add_cpp_opts = False + + for obj in objects: + try: + src, ext = build[obj] + except KeyError: + continue + if debug: + # pass the full pathname to MSVC in debug mode, + # this allows the debugger to find the source file + # without asking the user to browse for it + src = os.path.abspath(src) + + if ext in self._c_extensions: + input_opt = "/Tc" + src + elif ext in self._cpp_extensions: + input_opt = "/Tp" + src + add_cpp_opts = True + elif ext in self._rc_extensions: + # compile .RC to .RES file + input_opt = src + output_opt = "/fo" + obj + try: + self.spawn([self.rc] + pp_opts + [output_opt, input_opt]) + except DistutilsExecError as msg: + raise CompileError(msg) + continue + elif ext in self._mc_extensions: + # Compile .MC to .RC file to .RES file. + # * '-h dir' specifies the directory for the + # generated include file + # * '-r dir' specifies the target directory of the + # generated RC file and the binary message resource + # it includes + # + # For now (since there are no options to change this), + # we use the source-directory for the include file and + # the build directory for the RC file and message + # resources. This works at least for win32all. + h_dir = os.path.dirname(src) + rc_dir = os.path.dirname(obj) + try: + # first compile .MC to .RC and .H file + self.spawn([self.mc, '-h', h_dir, '-r', rc_dir, src]) + base, _ = os.path.splitext(os.path.basename (src)) + rc_file = os.path.join(rc_dir, base + '.rc') + # then compile .RC to .RES file + self.spawn([self.rc, "/fo" + obj, rc_file]) + + except DistutilsExecError as msg: + raise CompileError(msg) + continue + else: + # how to handle this file? + raise CompileError("Don't know how to compile {} to {}" + .format(src, obj)) + + args = [self.cc] + compile_opts + pp_opts + if add_cpp_opts: + args.append('/EHsc') + args.append(input_opt) + args.append("/Fo" + obj) + args.extend(extra_postargs) + + try: + self.spawn(args) + except DistutilsExecError as msg: + raise CompileError(msg) + + return objects + + + def create_static_lib(self, + objects, + output_libname, + output_dir=None, + debug=0, + target_lang=None): + + if not self.initialized: + self.initialize() + objects, output_dir = self._fix_object_args(objects, output_dir) + output_filename = self.library_filename(output_libname, + output_dir=output_dir) + + if self._need_link(objects, output_filename): + lib_args = objects + ['/OUT:' + output_filename] + if debug: + pass # XXX what goes here? + try: + log.debug('Executing "%s" %s', self.lib, ' '.join(lib_args)) + self.spawn([self.lib] + lib_args) + except DistutilsExecError as msg: + raise LibError(msg) + else: + log.debug("skipping %s (up-to-date)", output_filename) + + + def link(self, + target_desc, + objects, + output_filename, + output_dir=None, + libraries=None, + library_dirs=None, + runtime_library_dirs=None, + export_symbols=None, + debug=0, + extra_preargs=None, + extra_postargs=None, + build_temp=None, + target_lang=None): + + if not self.initialized: + self.initialize() + objects, output_dir = self._fix_object_args(objects, output_dir) + fixed_args = self._fix_lib_args(libraries, library_dirs, + runtime_library_dirs) + libraries, library_dirs, runtime_library_dirs = fixed_args + + if runtime_library_dirs: + self.warn("I don't know what to do with 'runtime_library_dirs': " + + str(runtime_library_dirs)) + + lib_opts = gen_lib_options(self, + library_dirs, runtime_library_dirs, + libraries) + if output_dir is not None: + output_filename = os.path.join(output_dir, output_filename) + + if self._need_link(objects, output_filename): + ldflags = self._ldflags[target_desc, debug] + + export_opts = ["/EXPORT:" + sym for sym in (export_symbols or [])] + + ld_args = (ldflags + lib_opts + export_opts + + objects + ['/OUT:' + output_filename]) + + # The MSVC linker generates .lib and .exp files, which cannot be + # suppressed by any linker switches. The .lib files may even be + # needed! Make sure they are generated in the temporary build + # directory. Since they have different names for debug and release + # builds, they can go into the same directory. + build_temp = os.path.dirname(objects[0]) + if export_symbols is not None: + (dll_name, dll_ext) = os.path.splitext( + os.path.basename(output_filename)) + implib_file = os.path.join( + build_temp, + self.library_filename(dll_name)) + ld_args.append ('/IMPLIB:' + implib_file) + + if extra_preargs: + ld_args[:0] = extra_preargs + if extra_postargs: + ld_args.extend(extra_postargs) + + output_dir = os.path.dirname(os.path.abspath(output_filename)) + self.mkpath(output_dir) + try: + log.debug('Executing "%s" %s', self.linker, ' '.join(ld_args)) + self.spawn([self.linker] + ld_args) + except DistutilsExecError as msg: + raise LinkError(msg) + else: + log.debug("skipping %s (up-to-date)", output_filename) + + def spawn(self, cmd): + env = dict(os.environ, PATH=self._paths) + return CCompiler.spawn(self, cmd, env=env) + + # -- Miscellaneous methods ----------------------------------------- + # These are all used by the 'gen_lib_options() function, in + # ccompiler.py. + + def library_dir_option(self, dir): + return "/LIBPATH:" + dir + + def runtime_library_dir_option(self, dir): + raise DistutilsPlatformError( + "don't know how to set runtime library search path for MSVC") + + def library_option(self, lib): + return self.library_filename(lib) + + def find_library_file(self, dirs, lib, debug=0): + # Prefer a debugging library if found (and requested), but deal + # with it if we don't have one. + if debug: + try_names = [lib + "_d", lib] + else: + try_names = [lib] + for dir in dirs: + for name in try_names: + libfile = os.path.join(dir, self.library_filename(name)) + if os.path.isfile(libfile): + return libfile + else: + # Oops, didn't find it in *any* of 'dirs' + return None diff --git a/lib-python/2.7/distutils/ccompiler.py b/lib-python/2.7/distutils/ccompiler.py index 3a7b5b84e1..7d6261b1fb 100644 --- a/lib-python/2.7/distutils/ccompiler.py +++ b/lib-python/2.7/distutils/ccompiler.py @@ -873,8 +873,8 @@ int main (int argc, char **argv) { def execute(self, func, args, msg=None, level=1): execute(func, args, msg, self.dry_run) - def spawn(self, cmd): - spawn(cmd, dry_run=self.dry_run) + def spawn(self, cmd, **kwargs): + spawn(cmd, dry_run=self.dry_run, **kwargs) def move_file(self, src, dst): return move_file(src, dst, dry_run=self.dry_run) @@ -897,7 +897,6 @@ _default_compilers = ( # on a cygwin built python we can use gcc like an ordinary UNIXish # compiler ('cygwin.*', 'unix'), - ('os2emx', 'emx'), # OS name mappings ('posix', 'unix'), @@ -906,15 +905,14 @@ _default_compilers = ( ) def get_default_compiler(osname=None, platform=None): - """ Determine the default compiler to use for the given platform. + """Determine the default compiler to use for the given platform. - osname should be one of the standard Python OS names (i.e. the - ones returned by os.name) and platform the common value - returned by sys.platform for the platform in question. - - The default values are os.name and sys.platform in case the - parameters are not given. + osname should be one of the standard Python OS names (i.e. the + ones returned by os.name) and platform the common value + returned by sys.platform for the platform in question. + The default values are os.name and sys.platform in case the + parameters are not given. """ if osname is None: osname = os.name @@ -932,7 +930,7 @@ def get_default_compiler(osname=None, platform=None): # is assumed to be in the 'distutils' package.) compiler_class = { 'unix': ('unixccompiler', 'UnixCCompiler', "standard UNIX-style compiler"), - 'msvc': ('msvccompiler', 'MSVCCompiler', + 'msvc': ('_msvccompiler', 'MSVCCompiler', "Microsoft Visual C++"), 'cygwin': ('cygwinccompiler', 'CygwinCCompiler', "Cygwin port of GNU C Compiler for Win32"), @@ -940,8 +938,6 @@ compiler_class = { 'unix': ('unixccompiler', 'UnixCCompiler', "Mingw32 port of GNU C Compiler for Win32"), 'bcpp': ('bcppcompiler', 'BCPPCompiler', "Borland C++ Compiler"), - 'emx': ('emxccompiler', 'EMXCCompiler', - "EMX port of GNU C Compiler for OS/2"), } def show_compilers(): @@ -984,7 +980,7 @@ def new_compiler(plat=None, compiler=None, verbose=0, dry_run=0, force=0): msg = "don't know how to compile C/C++ code on platform '%s'" % plat if compiler is not None: msg = msg + " with '%s' compiler" % compiler - raise DistutilsPlatformError, msg + raise DistutilsPlatformError(msg) try: module_name = "distutils." + module_name @@ -992,13 +988,13 @@ def new_compiler(plat=None, compiler=None, verbose=0, dry_run=0, force=0): module = sys.modules[module_name] klass = vars(module)[class_name] except ImportError: - raise DistutilsModuleError, \ + raise DistutilsModuleError( "can't compile C/C++ code: unable to load module '%s'" % \ - module_name + module_name) except KeyError: - raise DistutilsModuleError, \ - ("can't compile C/C++ code: unable to find class '%s' " + - "in module '%s'") % (class_name, module_name) + raise DistutilsModuleError( + "can't compile C/C++ code: unable to find class '%s' " + "in module '%s'" % (class_name, module_name)) # XXX The None is necessary to preserve backwards compatibility # with classes that expect verbose to be the first positional diff --git a/lib-python/2.7/distutils/msvc9compiler.py b/lib-python/2.7/distutils/msvc9compiler.py index 56ec9d3bc8..261fae5fb9 100644 --- a/lib-python/2.7/distutils/msvc9compiler.py +++ b/lib-python/2.7/distutils/msvc9compiler.py @@ -12,30 +12,28 @@ for older versions of VS in distutils.msvccompiler. # finding DevStudio (through the registry) # ported to VS2005 and VS 2008 by Christian Heimes -__revision__ = "$Id$" - import os import subprocess import sys import re -from distutils.errors import (DistutilsExecError, DistutilsPlatformError, - CompileError, LibError, LinkError) +from distutils.errors import DistutilsExecError, DistutilsPlatformError, \ + CompileError, LibError, LinkError from distutils.ccompiler import CCompiler, gen_lib_options from distutils import log from distutils.util import get_platform -import _winreg +import _winreg as winreg -RegOpenKeyEx = _winreg.OpenKeyEx -RegEnumKey = _winreg.EnumKey -RegEnumValue = _winreg.EnumValue -RegError = _winreg.error +RegOpenKeyEx = winreg.OpenKeyEx +RegEnumKey = winreg.EnumKey +RegEnumValue = winreg.EnumValue +RegError = winreg.error -HKEYS = (_winreg.HKEY_USERS, - _winreg.HKEY_CURRENT_USER, - _winreg.HKEY_LOCAL_MACHINE, - _winreg.HKEY_CLASSES_ROOT) +HKEYS = (winreg.HKEY_USERS, + winreg.HKEY_CURRENT_USER, + winreg.HKEY_LOCAL_MACHINE, + winreg.HKEY_CLASSES_ROOT) NATIVE_WIN64 = (sys.platform == 'win32' and sys.maxsize > 2**32) if NATIVE_WIN64: @@ -58,7 +56,6 @@ else: PLAT_TO_VCVARS = { 'win32' : 'x86', 'win-amd64' : 'amd64', - 'win-ia64' : 'ia64', } class Reg: @@ -230,6 +227,7 @@ def find_vcvarsall(version): productdir = Reg.get_value(r"%s\Setup\VC" % vsbase, "productdir") except KeyError: + log.debug("Unable to find productdir in registry") productdir = None # trying Express edition @@ -267,7 +265,7 @@ def query_vcvarsall(version, arch="x86"): """Launch vcvarsall.bat and read the settings from its environment """ vcvarsall = find_vcvarsall(version) - interesting = set(("include", "lib", "libpath", "path")) + interesting = {"include", "lib", "libpath", "path"} result = {} if vcvarsall is None: @@ -356,7 +354,7 @@ class MSVCCompiler(CCompiler) : if plat_name is None: plat_name = get_platform() # sanity check for platforms to prevent obscure errors later. - ok_plats = 'win32', 'win-amd64', 'win-ia64' + ok_plats = 'win32', 'win-amd64' if plat_name not in ok_plats: raise DistutilsPlatformError("--plat-name must be one of %s" % (ok_plats,)) @@ -374,7 +372,6 @@ class MSVCCompiler(CCompiler) : # to cross compile, you use 'x86_amd64'. # On AMD64, 'vcvars32.bat amd64' is a native build env; to cross # compile use 'x86' (ie, it runs the x86 compiler directly) - # No idea how itanium handles this, if at all. if plat_name == get_platform() or plat_name == 'win32': # native build or cross-compile to win32 plat_spec = PLAT_TO_VCVARS[plat_name] @@ -507,7 +504,7 @@ class MSVCCompiler(CCompiler) : try: self.spawn([self.rc] + pp_opts + [output_opt] + [input_opt]) - except DistutilsExecError, msg: + except DistutilsExecError as msg: raise CompileError(msg) continue elif ext in self._mc_extensions: @@ -534,7 +531,7 @@ class MSVCCompiler(CCompiler) : self.spawn([self.rc] + ["/fo" + obj] + [rc_file]) - except DistutilsExecError, msg: + except DistutilsExecError as msg: raise CompileError(msg) continue else: @@ -547,7 +544,7 @@ class MSVCCompiler(CCompiler) : self.spawn([self.cc] + compile_opts + pp_opts + [input_opt, output_opt] + extra_postargs) - except DistutilsExecError, msg: + except DistutilsExecError as msg: raise CompileError(msg) return objects @@ -572,7 +569,7 @@ class MSVCCompiler(CCompiler) : pass # XXX what goes here? try: self.spawn([self.lib] + lib_args) - except DistutilsExecError, msg: + except DistutilsExecError as msg: raise LibError(msg) else: log.debug("skipping %s (up-to-date)", output_filename) @@ -653,7 +650,7 @@ class MSVCCompiler(CCompiler) : self.mkpath(os.path.dirname(output_filename)) try: self.spawn([self.linker] + ld_args) - except DistutilsExecError, msg: + except DistutilsExecError as msg: raise LinkError(msg) # embed the manifest @@ -668,7 +665,7 @@ class MSVCCompiler(CCompiler) : try: self.spawn(['mt.exe', '-nologo', '-manifest', mffilename, out_arg]) - except DistutilsExecError, msg: + except DistutilsExecError as msg: raise LinkError(msg) else: log.debug("skipping %s (up-to-date)", output_filename) @@ -683,7 +680,6 @@ class MSVCCompiler(CCompiler) : temp_manifest = os.path.join( build_temp, os.path.basename(output_filename) + ".manifest") - ld_args.append('/MANIFEST') ld_args.append('/MANIFESTFILE:' + temp_manifest) def manifest_get_embed_info(self, target_desc, ld_args): @@ -730,7 +726,7 @@ class MSVCCompiler(CCompiler) : r"""VC\d{2}\.CRT("|').*?(/>|</assemblyIdentity>)""", re.DOTALL) manifest_buf = re.sub(pattern, "", manifest_buf) - pattern = "<dependentAssembly>\s*</dependentAssembly>" + pattern = r"<dependentAssembly>\s*</dependentAssembly>" manifest_buf = re.sub(pattern, "", manifest_buf) # Now see if any other assemblies are referenced - if not, we # don't want a manifest embedded. diff --git a/lib-python/2.7/distutils/msvccompiler.py b/lib-python/2.7/distutils/msvccompiler.py index 0e69fd368c..6ddf326be6 100644 --- a/lib-python/2.7/distutils/msvccompiler.py +++ b/lib-python/2.7/distutils/msvccompiler.py @@ -8,41 +8,37 @@ for the Microsoft Visual Studio. # hacked by Robin Becker and Thomas Heller to do a better job of # finding DevStudio (through the registry) -__revision__ = "$Id$" - -import sys -import os -import string - -from distutils.errors import (DistutilsExecError, DistutilsPlatformError, - CompileError, LibError, LinkError) -from distutils.ccompiler import CCompiler, gen_lib_options +import sys, os +from distutils.errors import \ + DistutilsExecError, DistutilsPlatformError, \ + CompileError, LibError, LinkError +from distutils.ccompiler import \ + CCompiler, gen_lib_options from distutils import log -_can_read_reg = 0 +_can_read_reg = False try: - import _winreg + import _winreg as winreg - _can_read_reg = 1 - hkey_mod = _winreg + _can_read_reg = True + hkey_mod = winreg - RegOpenKeyEx = _winreg.OpenKeyEx - RegEnumKey = _winreg.EnumKey - RegEnumValue = _winreg.EnumValue - RegError = _winreg.error + RegOpenKeyEx = winreg.OpenKeyEx + RegEnumKey = winreg.EnumKey + RegEnumValue = winreg.EnumValue + RegError = winreg.error except ImportError: try: import win32api import win32con - _can_read_reg = 1 + _can_read_reg = True hkey_mod = win32con RegOpenKeyEx = win32api.RegOpenKeyEx RegEnumKey = win32api.RegEnumKey RegEnumValue = win32api.RegEnumValue RegError = win32api.error - except ImportError: log.info("Warning: Can't read registry to find the " "necessary compiler setting\n" @@ -58,20 +54,19 @@ if _can_read_reg: def read_keys(base, key): """Return list of registry keys.""" - try: handle = RegOpenKeyEx(base, key) except RegError: return None L = [] i = 0 - while 1: + while True: try: k = RegEnumKey(handle, i) except RegError: break L.append(k) - i = i + 1 + i += 1 return L def read_values(base, key): @@ -85,27 +80,26 @@ def read_values(base, key): return None d = {} i = 0 - while 1: + while True: try: name, value, type = RegEnumValue(handle, i) except RegError: break name = name.lower() d[convert_mbcs(name)] = convert_mbcs(value) - i = i + 1 + i += 1 return d def convert_mbcs(s): - enc = getattr(s, "encode", None) - if enc is not None: + dec = getattr(s, "decode", None) + if dec is not None: try: - s = enc("mbcs") + s = dec("mbcs") except UnicodeError: pass return s class MacroExpander: - def __init__(self, version): self.macros = {} self.load_macros(version) @@ -128,9 +122,9 @@ class MacroExpander: self.set_macro("FrameworkSDKDir", net, "sdkinstallrootv1.1") else: self.set_macro("FrameworkSDKDir", net, "sdkinstallroot") - except KeyError: - raise DistutilsPlatformError, \ - ("""Python was built with Visual Studio 2003; + except KeyError as exc: # + raise DistutilsPlatformError( + """Python was built with Visual Studio 2003; extensions must be built with a compiler than can generate compatible binaries. Visual Studio 2003 was not found on this system. If you have Cygwin installed, you can try compiling with MingW32, by passing "-c mingw32" to setup.py.""") @@ -147,7 +141,7 @@ you can try compiling with MingW32, by passing "-c mingw32" to setup.py.""") def sub(self, s): for k, v in self.macros.items(): - s = string.replace(s, k, v) + s = s.replace(k, v) return s def get_build_version(): @@ -156,14 +150,16 @@ def get_build_version(): For Python 2.3 and up, the version number is included in sys.version. For earlier versions, assume the compiler is MSVC 6. """ - prefix = "MSC v." - i = string.find(sys.version, prefix) + i = sys.version.find(prefix) if i == -1: return 6 i = i + len(prefix) s, rest = sys.version[i:].split(" ", 1) majorVersion = int(s[:-2]) - 6 + if majorVersion >= 13: + # v13 was skipped and should be v14 + majorVersion += 1 minorVersion = int(s[2:3]) / 10.0 # I don't think paths are affected by minor version in version 6 if majorVersion == 6: @@ -176,14 +172,14 @@ def get_build_version(): def get_build_architecture(): """Return the processor architecture. - Possible results are "Intel", "Itanium", or "AMD64". + Possible results are "Intel" or "AMD64". """ prefix = " bit (" - i = string.find(sys.version, prefix) + i = sys.version.find(prefix) if i == -1: return "Intel" - j = string.find(sys.version, ")", i) + j = sys.version.find(")", i) return sys.version[i+len(prefix):j] def normalize_and_reduce_paths(paths): @@ -201,7 +197,7 @@ def normalize_and_reduce_paths(paths): return reduced_paths -class MSVCCompiler (CCompiler) : +class MSVCCompiler(CCompiler) : """Concrete class that implements an interface to Microsoft Visual C++, as defined by the CCompiler abstract class.""" @@ -231,7 +227,7 @@ class MSVCCompiler (CCompiler) : static_lib_format = shared_lib_format = '%s%s' exe_extension = '.exe' - def __init__ (self, verbose=0, dry_run=0, force=0): + def __init__(self, verbose=0, dry_run=0, force=0): CCompiler.__init__ (self, verbose, dry_run, force) self.__version = get_build_version() self.__arch = get_build_architecture() @@ -262,11 +258,11 @@ class MSVCCompiler (CCompiler) : else: self.__paths = self.get_msvc_paths("path") - if len (self.__paths) == 0: - raise DistutilsPlatformError, \ - ("Python was built with %s, " + if len(self.__paths) == 0: + raise DistutilsPlatformError("Python was built with %s, " "and extensions need to be built with the same " - "version of the compiler, but it isn't installed." % self.__product) + "version of the compiler, but it isn't installed." + % self.__product) self.cc = self.find_exe("cl.exe") self.linker = self.find_exe("link.exe") @@ -278,12 +274,12 @@ class MSVCCompiler (CCompiler) : # extend the MSVC path with the current path try: - for p in string.split(os.environ['path'], ';'): + for p in os.environ['path'].split(';'): self.__paths.append(p) except KeyError: pass self.__paths = normalize_and_reduce_paths(self.__paths) - os.environ['path'] = string.join(self.__paths, ';') + os.environ['path'] = ";".join(self.__paths) self.preprocess_options = None if self.__arch == "Intel": @@ -313,10 +309,10 @@ class MSVCCompiler (CCompiler) : # -- Worker methods ------------------------------------------------ - def object_filenames (self, - source_filenames, - strip_dir=0, - output_dir=''): + def object_filenames(self, + source_filenames, + strip_dir=0, + output_dir=''): # Copied from ccompiler.py, extended to return .res as 'object'-file # for .rc input file if output_dir is None: output_dir = '' @@ -343,17 +339,16 @@ class MSVCCompiler (CCompiler) : base + self.obj_extension)) return obj_names - # object_filenames () - def compile(self, sources, output_dir=None, macros=None, include_dirs=None, debug=0, extra_preargs=None, extra_postargs=None, depends=None): - if not self.initialized: self.initialize() - macros, objects, extra_postargs, pp_opts, build = \ - self._setup_compile(output_dir, macros, include_dirs, sources, - depends, extra_postargs) + if not self.initialized: + self.initialize() + compile_info = self._setup_compile(output_dir, macros, include_dirs, + sources, depends, extra_postargs) + macros, objects, extra_postargs, pp_opts, build = compile_info compile_opts = extra_preargs or [] compile_opts.append ('/c') @@ -382,13 +377,12 @@ class MSVCCompiler (CCompiler) : input_opt = src output_opt = "/fo" + obj try: - self.spawn ([self.rc] + pp_opts + - [output_opt] + [input_opt]) - except DistutilsExecError, msg: - raise CompileError, msg + self.spawn([self.rc] + pp_opts + + [output_opt] + [input_opt]) + except DistutilsExecError as msg: + raise CompileError(msg) continue elif ext in self._mc_extensions: - # Compile .MC to .RC file to .RES file. # * '-h dir' specifies the directory for the # generated include file @@ -400,99 +394,95 @@ class MSVCCompiler (CCompiler) : # we use the source-directory for the include file and # the build directory for the RC file and message # resources. This works at least for win32all. - - h_dir = os.path.dirname (src) - rc_dir = os.path.dirname (obj) + h_dir = os.path.dirname(src) + rc_dir = os.path.dirname(obj) try: # first compile .MC to .RC and .H file - self.spawn ([self.mc] + - ['-h', h_dir, '-r', rc_dir] + [src]) + self.spawn([self.mc] + + ['-h', h_dir, '-r', rc_dir] + [src]) base, _ = os.path.splitext (os.path.basename (src)) rc_file = os.path.join (rc_dir, base + '.rc') # then compile .RC to .RES file - self.spawn ([self.rc] + - ["/fo" + obj] + [rc_file]) + self.spawn([self.rc] + + ["/fo" + obj] + [rc_file]) - except DistutilsExecError, msg: - raise CompileError, msg + except DistutilsExecError as msg: + raise CompileError(msg) continue else: # how to handle this file? - raise CompileError ( - "Don't know how to compile %s to %s" % \ - (src, obj)) + raise CompileError("Don't know how to compile %s to %s" + % (src, obj)) output_opt = "/Fo" + obj try: - self.spawn ([self.cc] + compile_opts + pp_opts + - [input_opt, output_opt] + - extra_postargs) - except DistutilsExecError, msg: - raise CompileError, msg + self.spawn([self.cc] + compile_opts + pp_opts + + [input_opt, output_opt] + + extra_postargs) + except DistutilsExecError as msg: + raise CompileError(msg) return objects - # compile () - - def create_static_lib (self, - objects, - output_libname, - output_dir=None, - debug=0, - target_lang=None): + def create_static_lib(self, + objects, + output_libname, + output_dir=None, + debug=0, + target_lang=None): - if not self.initialized: self.initialize() - (objects, output_dir) = self._fix_object_args (objects, output_dir) - output_filename = \ - self.library_filename (output_libname, output_dir=output_dir) + if not self.initialized: + self.initialize() + (objects, output_dir) = self._fix_object_args(objects, output_dir) + output_filename = self.library_filename(output_libname, + output_dir=output_dir) - if self._need_link (objects, output_filename): + if self._need_link(objects, output_filename): lib_args = objects + ['/OUT:' + output_filename] if debug: - pass # XXX what goes here? + pass # XXX what goes here? try: - self.spawn ([self.lib] + lib_args) - except DistutilsExecError, msg: - raise LibError, msg - + self.spawn([self.lib] + lib_args) + except DistutilsExecError as msg: + raise LibError(msg) else: log.debug("skipping %s (up-to-date)", output_filename) - # create_static_lib () - - def link (self, - target_desc, - objects, - output_filename, - output_dir=None, - libraries=None, - library_dirs=None, - runtime_library_dirs=None, - export_symbols=None, - debug=0, - extra_preargs=None, - extra_postargs=None, - build_temp=None, - target_lang=None): - - if not self.initialized: self.initialize() - (objects, output_dir) = self._fix_object_args (objects, output_dir) - (libraries, library_dirs, runtime_library_dirs) = \ - self._fix_lib_args (libraries, library_dirs, runtime_library_dirs) + + def link(self, + target_desc, + objects, + output_filename, + output_dir=None, + libraries=None, + library_dirs=None, + runtime_library_dirs=None, + export_symbols=None, + debug=0, + extra_preargs=None, + extra_postargs=None, + build_temp=None, + target_lang=None): + + if not self.initialized: + self.initialize() + (objects, output_dir) = self._fix_object_args(objects, output_dir) + fixed_args = self._fix_lib_args(libraries, library_dirs, + runtime_library_dirs) + (libraries, library_dirs, runtime_library_dirs) = fixed_args if runtime_library_dirs: self.warn ("I don't know what to do with 'runtime_library_dirs': " + str (runtime_library_dirs)) - lib_opts = gen_lib_options (self, - library_dirs, runtime_library_dirs, - libraries) + lib_opts = gen_lib_options(self, + library_dirs, runtime_library_dirs, + libraries) if output_dir is not None: - output_filename = os.path.join (output_dir, output_filename) - - if self._need_link (objects, output_filename): + output_filename = os.path.join(output_dir, output_filename) + if self._need_link(objects, output_filename): if target_desc == CCompiler.EXECUTABLE: if debug: ldflags = self.ldflags_shared_debug[1:] @@ -529,34 +519,32 @@ class MSVCCompiler (CCompiler) : if extra_postargs: ld_args.extend(extra_postargs) - self.mkpath (os.path.dirname (output_filename)) + self.mkpath(os.path.dirname(output_filename)) try: - self.spawn ([self.linker] + ld_args) - except DistutilsExecError, msg: - raise LinkError, msg + self.spawn([self.linker] + ld_args) + except DistutilsExecError as msg: + raise LinkError(msg) else: log.debug("skipping %s (up-to-date)", output_filename) - # link () - # -- Miscellaneous methods ----------------------------------------- # These are all used by the 'gen_lib_options() function, in # ccompiler.py. - def library_dir_option (self, dir): + def library_dir_option(self, dir): return "/LIBPATH:" + dir - def runtime_library_dir_option (self, dir): - raise DistutilsPlatformError, \ - "don't know how to set runtime library search path for MSVC++" + def runtime_library_dir_option(self, dir): + raise DistutilsPlatformError( + "don't know how to set runtime library search path for MSVC++") - def library_option (self, lib): - return self.library_filename (lib) + def library_option(self, lib): + return self.library_filename(lib) - def find_library_file (self, dirs, lib, debug=0): + def find_library_file(self, dirs, lib, debug=0): # Prefer a debugging library if found (and requested), but deal # with it if we don't have one. if debug: @@ -572,8 +560,6 @@ class MSVCCompiler (CCompiler) : # Oops, didn't find it in *any* of 'dirs' return None - # find_library_file () - # Helper methods for using the MSVC registry settings def find_exe(self, exe): @@ -585,14 +571,13 @@ class MSVCCompiler (CCompiler) : absolute path that is known to exist. If none of them work, just return the original program name, 'exe'. """ - for p in self.__paths: fn = os.path.join(os.path.abspath(p), exe) if os.path.isfile(fn): return fn # didn't find it; try existing path - for p in string.split(os.environ['Path'],';'): + for p in os.environ['Path'].split(';'): fn = os.path.join(os.path.abspath(p),exe) if os.path.isfile(fn): return fn @@ -605,7 +590,6 @@ class MSVCCompiler (CCompiler) : Return a list of strings. The list will be empty if unable to access the registry or appropriate registry keys not found. """ - if not _can_read_reg: return [] @@ -621,9 +605,9 @@ class MSVCCompiler (CCompiler) : d = read_values(base, key) if d: if self.__version >= 7: - return string.split(self.__macros.sub(d[path]), ";") + return self.__macros.sub(d[path]).split(";") else: - return string.split(d[path], ";") + return d[path].split(";") # MSVC 6 seems to create the registry entries we need only when # the GUI is run. if self.__version == 6: @@ -648,7 +632,7 @@ class MSVCCompiler (CCompiler) : else: p = self.get_msvc_paths(name) if p: - os.environ[name] = string.join(p, ';') + os.environ[name] = ';'.join(p) if get_build_version() >= 8.0: diff --git a/lib-python/2.7/distutils/spawn.py b/lib-python/2.7/distutils/spawn.py index 6a8df1b9de..d8d8204155 100644 --- a/lib-python/2.7/distutils/spawn.py +++ b/lib-python/2.7/distutils/spawn.py @@ -15,7 +15,7 @@ from distutils.errors import DistutilsPlatformError, DistutilsExecError from distutils.debug import DEBUG from distutils import log -def spawn(cmd, search_path=1, verbose=0, dry_run=0): +def spawn(cmd, search_path=1, verbose=0, dry_run=0, **kwargs): """Run another program, specified as a command list 'cmd', in a new process. 'cmd' is just the argument list for the new process, ie. @@ -37,7 +37,7 @@ def spawn(cmd, search_path=1, verbose=0, dry_run=0): if os.name == 'posix': _spawn_posix(cmd, search_path, dry_run=dry_run) elif os.name == 'nt': - _spawn_nt(cmd, search_path, dry_run=dry_run) + _spawn_nt(cmd, search_path, dry_run=dry_run, **kwargs) elif os.name == 'os2': _spawn_os2(cmd, search_path, dry_run=dry_run) else: @@ -60,7 +60,7 @@ def _nt_quote_args(args): args[i] = '"%s"' % arg return args -def _spawn_nt(cmd, search_path=1, verbose=0, dry_run=0): +def _spawn_nt(cmd, search_path=1, verbose=0, dry_run=0, **kwargs): executable = cmd[0] if search_path: # either we find one or it stays the same @@ -70,7 +70,7 @@ def _spawn_nt(cmd, search_path=1, verbose=0, dry_run=0): # spawn for NT requires a full path to the .exe try: import subprocess - rc = subprocess.call(cmd) + rc = subprocess.call(cmd, **kwargs) except OSError, exc: # this seems to happen when the command isn't found if not DEBUG: diff --git a/lib-python/2.7/distutils/tests/test_msvccompiler.py b/lib-python/2.7/distutils/tests/test_msvccompiler.py new file mode 100644 index 0000000000..46a51cd0a7 --- /dev/null +++ b/lib-python/2.7/distutils/tests/test_msvccompiler.py @@ -0,0 +1,138 @@ +"""Tests for distutils._msvccompiler.""" +import sys +import unittest +import os +import threading + +from distutils.errors import DistutilsPlatformError +from distutils.tests import support +from test.support import run_unittest + + +SKIP_MESSAGE = (None if sys.platform == "win32" else + "These tests are only for win32") + +@unittest.skipUnless(SKIP_MESSAGE is None, SKIP_MESSAGE) +class msvccompilerTestCase(support.TempdirManager, + unittest.TestCase): + + def test_no_compiler(self): + import distutils._msvccompiler as _msvccompiler + # makes sure query_vcvarsall raises + # a DistutilsPlatformError if the compiler + # is not found + def _find_vcvarsall(plat_spec): + return None, None + + old_find_vcvarsall = _msvccompiler._find_vcvarsall + _msvccompiler._find_vcvarsall = _find_vcvarsall + try: + self.assertRaises(DistutilsPlatformError, + _msvccompiler._get_vc_env, + 'wont find this version') + finally: + _msvccompiler._find_vcvarsall = old_find_vcvarsall + + def test_get_vc_env_unicode(self): + import distutils._msvccompiler as _msvccompiler + + test_var = 'ṰḖṤṪ┅ṼẨṜ' + test_value = '₃⁴₅' + + # Ensure we don't early exit from _get_vc_env + old_distutils_use_sdk = os.environ.pop('DISTUTILS_USE_SDK', None) + os.environ[test_var] = test_value + try: + env = _msvccompiler._get_vc_env('x86') + self.assertIn(test_var.lower(), env) + self.assertEqual(test_value, env[test_var.lower()]) + finally: + os.environ.pop(test_var) + if old_distutils_use_sdk: + os.environ['DISTUTILS_USE_SDK'] = old_distutils_use_sdk + + def test_get_vc2017(self): + import distutils._msvccompiler as _msvccompiler + + # This function cannot be mocked, so pass it if we find VS 2017 + # and mark it skipped if we do not. + version, path = _msvccompiler._find_vc2017() + if version: + self.assertGreaterEqual(version, 15) + self.assertTrue(os.path.isdir(path)) + else: + raise unittest.SkipTest("VS 2017 is not installed") + + def test_get_vc2015(self): + import distutils._msvccompiler as _msvccompiler + + # This function cannot be mocked, so pass it if we find VS 2015 + # and mark it skipped if we do not. + version, path = _msvccompiler._find_vc2015() + if version: + self.assertGreaterEqual(version, 14) + self.assertTrue(os.path.isdir(path)) + else: + raise unittest.SkipTest("VS 2015 is not installed") + + +class CheckThread(threading.Thread): + exc_info = None + + def run(self): + try: + super().run() + except Exception: + self.exc_info = sys.exc_info() + + def __bool__(self): + return not self.exc_info + + +class TestSpawn(unittest.TestCase): + def test_concurrent_safe(self): + """ + Concurrent calls to spawn should have consistent results. + """ + import distutils._msvccompiler as _msvccompiler + compiler = _msvccompiler.MSVCCompiler() + compiler._paths = "expected" + inner_cmd = 'import os; assert os.environ["PATH"] == "expected"' + command = ['python', '-c', inner_cmd] + + threads = [ + CheckThread(target=compiler.spawn, args=[command]) + for n in range(100) + ] + for thread in threads: + thread.start() + for thread in threads: + thread.join() + assert all(threads) + + def test_concurrent_safe_fallback(self): + """ + If CCompiler.spawn has been monkey-patched without support + for an env, it should still execute. + """ + import distutils._msvccompiler as _msvccompiler + from distutils import ccompiler + compiler = _msvccompiler.MSVCCompiler() + compiler._paths = "expected" + + def CCompiler_spawn(self, cmd): + "A spawn without an env argument." + assert os.environ["PATH"] == "expected" + + with unittest.mock.patch.object( + ccompiler.CCompiler, 'spawn', CCompiler_spawn): + compiler.spawn(["n/a"]) + + assert os.environ.get("PATH") != "expected" + + +def test_suite(): + return unittest.makeSuite(msvccompilerTestCase) + +if __name__ == "__main__": + run_unittest(test_suite()) diff --git a/lib_pypy/_ctypes/__init__.py b/lib_pypy/_ctypes/__init__.py index da30f5f6d9..6a0de8d703 100644 --- a/lib_pypy/_ctypes/__init__.py +++ b/lib_pypy/_ctypes/__init__.py @@ -13,15 +13,15 @@ from _ctypes.builtin import ( _string_at_addr, _wstring_at_addr, set_conversion_mode) from _ctypes.union import Union +try: from __pypy__ import builtinify +except ImportError: builtinify = lambda f: f + import os as _os if _os.name in ("nt", "ce"): from _rawffi import FormatError from _rawffi import check_HRESULT as _check_HRESULT - try: from __pypy__ import builtinify - except ImportError: builtinify = lambda f: f - @builtinify def CopyComPointer(src, dst): from ctypes import c_void_p, cast @@ -32,8 +32,6 @@ if _os.name in ("nt", "ce"): dst[0] = cast(src, c_void_p).value return 0 - del builtinify - LoadLibrary = dlopen from _rawffi import FUNCFLAG_STDCALL, FUNCFLAG_CDECL, FUNCFLAG_PYTHONAPI @@ -43,6 +41,19 @@ from _ctypes.builtin import get_errno, set_errno if _os.name in ("nt", "ce"): from _ctypes.builtin import get_last_error, set_last_error +import sys as _sys +if _sys.platform == 'darwin': + try: + from ._ctypes_cffi import lib as _lib + if hasattr(_lib, 'dyld_shared_cache_contains_path'): + @builtinify + def _dyld_shared_cache_contains_path(path): + return _lib.dyld_shared_cache_contains_path(path.encode()) + except ImportError: + pass + +del builtinify + __version__ = '1.1.0' #XXX platform dependant? RTLD_LOCAL = 0 diff --git a/lib_pypy/_ctypes/_ctypes_build.py b/lib_pypy/_ctypes/_ctypes_build.py new file mode 100644 index 0000000000..7faeb5f80a --- /dev/null +++ b/lib_pypy/_ctypes/_ctypes_build.py @@ -0,0 +1,22 @@ +import os + +from cffi import FFI + +ffi = FFI() +ffi.cdef('bool dyld_shared_cache_contains_path(const char* path);') +ffi.set_source('_ctypes_cffi', r''' +#include <stdbool.h> +#include <mach-o/dyld.h> + +bool _dyld_shared_cache_contains_path(const char* path) __attribute__((weak_import)); +bool dyld_shared_cache_contains_path(const char* path) { + if (_dyld_shared_cache_contains_path == NULL) { + return false; + } + return _dyld_shared_cache_contains_path(path); +} +''') + +if __name__ == '__main__': + os.chdir(os.path.dirname(__file__)) + ffi.compile() diff --git a/lib_pypy/_ctypes/primitive.py b/lib_pypy/_ctypes/primitive.py index 54e3ecd2a7..a4d6086104 100644 --- a/lib_pypy/_ctypes/primitive.py +++ b/lib_pypy/_ctypes/primitive.py @@ -286,9 +286,19 @@ class SimpleType(_CDataMeta): # other code may set their own restypes. We need out own # restype here. oleaut32 = WinDLL("oleaut32") + import ctypes SysAllocStringLen = oleaut32.SysAllocStringLen SysStringLen = oleaut32.SysStringLen SysFreeString = oleaut32.SysFreeString + if ctypes.sizeof(ctypes.c_void_p) == 4: + ptype = ctypes.c_int + else: + ptype = ctypes.c_longlong + SysAllocStringLen.argtypes=[ptype, ctypes.c_uint] + SysAllocStringLen.restype = ptype + SysStringLen.argtypes=[ptype] + SysStringLen.restype = ctypes.c_uint + SysFreeString.argtypes=[ptype] def _getvalue(self): addr = self._buffer[0] if addr == 0: diff --git a/lib_pypy/_hashlib/__init__.py b/lib_pypy/_hashlib/__init__.py index 05852ccc0b..5ebc521a57 100644 --- a/lib_pypy/_hashlib/__init__.py +++ b/lib_pypy/_hashlib/__init__.py @@ -9,11 +9,11 @@ except ImportError: builtinify = lambda f: f def new(name, string=b''): - h = Hash(name) + h = HASH(name) h.update(string) return h -class Hash(object): +class HASH(object): def __init__(self, name, copy_from=None): self.ctx = ffi.NULL @@ -67,7 +67,7 @@ class Hash(object): def copy(self): """Return a copy of the hash object.""" with self.lock: - return Hash(self.name, copy_from=self.ctx) + return HASH(self.name, copy_from=self.ctx) def digest(self): """Return the digest value as a string of binary data.""" diff --git a/lib_pypy/_tkinter/app.py b/lib_pypy/_tkinter/app.py index 1375c24fcc..45da13b17f 100644 --- a/lib_pypy/_tkinter/app.py +++ b/lib_pypy/_tkinter/app.py @@ -36,8 +36,8 @@ def Tcl_AppInit(app): from os.path import join, dirname, exists, sep if sys.platform == 'win32': lib_path = join(dirname(dirname(dirname(__file__))), 'tcl') - tcl_path = join(lib_path, 'tcl8.5') - tk_path = join(lib_path, 'tk8.5') + tcl_path = join(lib_path, 'tcl8.6') + tk_path = join(lib_path, 'tk8.6') tcl_path = tcl_path.replace(sep, '/') tk_path = tk_path.replace(sep, '/') else: diff --git a/lib_pypy/_tkinter/tklib_build.py b/lib_pypy/_tkinter/tklib_build.py index 81c3be37aa..95a44a5b71 100644 --- a/lib_pypy/_tkinter/tklib_build.py +++ b/lib_pypy/_tkinter/tklib_build.py @@ -15,7 +15,7 @@ elif sys.platform.startswith("freebsd"): libdirs = ['/usr/local/lib'] elif sys.platform == 'win32': incdirs = [] - linklibs = ['tcl85', 'tk85'] + linklibs = ['tcl86t', 'tk86t'] libdirs = [] elif sys.platform == 'darwin': # homebrew diff --git a/lib_pypy/msvcrt.py b/lib_pypy/msvcrt.py index 8d4b97a532..67c142745a 100644 --- a/lib_pypy/msvcrt.py +++ b/lib_pypy/msvcrt.py @@ -19,9 +19,7 @@ _lib = _ffi.dlopen(_rawffi.get_libc().name) import errno -try: from __pypy__ import builtinify, validate_fd -except ImportError: builtinify = validate_fd = lambda f: f - +from __pypy__ import builtinify, get_osfhandle as _get_osfhandle def _ioerr(): e = _ffi.errno @@ -47,11 +45,7 @@ def get_osfhandle(fd): Return the file handle for the file descriptor fd. Raises IOError if fd is not recognized.""" - try: - validate_fd(fd) - except OSError as e: - raise IOError(*e.args) - result = _lib._get_osfhandle(fd) + result = _get_osfhandle(fd) if result == -1: _ioerr() return result diff --git a/lib_pypy/pypy_tools/build_cffi_imports.py b/lib_pypy/pypy_tools/build_cffi_imports.py index 12e791d695..7bb96c85bd 100644 --- a/lib_pypy/pypy_tools/build_cffi_imports.py +++ b/lib_pypy/pypy_tools/build_cffi_imports.py @@ -1,5 +1,5 @@ from __future__ import print_function -import sys, shutil, os, tempfile, hashlib +import sys, shutil, os, tempfile, hashlib, collections import sysconfig from os.path import join @@ -22,18 +22,20 @@ class MissingDependenciesError(Exception): pass -cffi_build_scripts = { - "_ssl": "_ssl_build.py", - "sqlite3": "_sqlite3_build.py", - "audioop": "_audioop_build.py", - "_tkinter": "_tkinter/tklib_build.py", - "curses": "_curses_build.py" if sys.platform != "win32" else None, - "syslog": "_syslog_build.py" if sys.platform != "win32" else None, - "gdbm": "_gdbm_build.py" if sys.platform != "win32" else None, - "grp": "_pwdgrp_build.py" if sys.platform != "win32" else None, - "resource": "_resource_build.py" if sys.platform != "win32" else None, - "xx": None, # for testing: 'None' should be completely ignored - } +cffi_build_scripts = collections.OrderedDict({ + ("_ctypes._ctypes_cffi", + "_ctypes/_ctypes_build.py" if sys.platform == 'darwin' else None), + ("_ssl", "_ssl_build.py"), + ("sqlite3", "_sqlite3_build.py"), + ("audioop", "_audioop_build.py"), + ("_tkinter", "_tkinter/tklib_build.py"), + ("curses", "_curses_build.py" if sys.platform != "win32" else None), + ("syslog", "_syslog_build.py" if sys.platform != "win32" else None), + ("gdbm", "_gdbm_build.py" if sys.platform != "win32" else None), + ("grp", "_pwdgrp_build.py" if sys.platform != "win32" else None), + ("resource", "_resource_build.py" if sys.platform != "win32" else None), + ("xx", None), # for testing: 'None' should be completely ignored + }) # for distribution, we may want to fetch dependencies not provided by # the OS, such as a recent openssl/libressl. diff --git a/pypy/doc/conf.py b/pypy/doc/conf.py index 781566b52d..bdb78832b0 100644 --- a/pypy/doc/conf.py +++ b/pypy/doc/conf.py @@ -75,7 +75,7 @@ master_doc = 'index' # General information about the project. project = u'PyPy' -copyright = u'2020, The PyPy Project' +copyright = u'2021, The PyPy Project' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the diff --git a/pypy/doc/cpython_differences.rst b/pypy/doc/cpython_differences.rst index 4d2dad06be..66ed78980d 100644 --- a/pypy/doc/cpython_differences.rst +++ b/pypy/doc/cpython_differences.rst @@ -365,14 +365,17 @@ Miscellaneous implementation detail that shows up because of internal C-level slots that PyPy does not have. -* on CPython, ``[].__add__`` is a ``method-wrapper``, and - ``list.__add__`` is a ``slot wrapper``. On PyPy these are normal - bound or unbound method objects. This can occasionally confuse some +* on CPython, ``[].__add__`` is a ``method-wrapper``, ``list.__add__`` + is a ``slot wrapper`` and ``list.extend`` is a (built-in) ``method`` + object. On PyPy these are all normal method or function objects (or + unbound method objects on PyPy2). This can occasionally confuse some tools that inspect built-in types. For example, the standard library ``inspect`` module has a function ``ismethod()`` that returns True on unbound method objects but False on method-wrappers or slot - wrappers. On PyPy we can't tell the difference, so - ``ismethod([].__add__) == ismethod(list.__add__) == True``. + wrappers. On PyPy we can't tell the difference. So on PyPy2 we + have ``ismethod([].__add__) == ismethod(list.extend) == True``; + on PyPy3 we have ``isfunction(list.extend) == True``. On CPython + all of these are False. * in CPython, the built-in types have attributes that can be implemented in various ways. Depending on the way, if you try to @@ -422,7 +425,8 @@ Miscellaneous probably be ignored by an implementation of ``sys.getsizeof()``, but their overhead is important in some cases if they are many instances with unique maps. Conversely, equal strings may share their internal - string data even if they are different objects---or empty containers + string data even if they are different objects---even a unicode string + and its utf8-encoded ``bytes`` version are shared---or empty containers may share parts of their internals as long as they are empty. Even stranger, some lists create objects as you read them; if you try to estimate the size in memory of ``range(10**6)`` as the sum of all diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst index 5eebce1f1b..3e2410d9b7 100644 --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -38,3 +38,39 @@ Add ``rffi.constcharpsize2str`` .. branch: document-win64 Refactor documentation of win64 from future plans to what was executed + +.. branch: sync-distutils + +Backport msvc detection from python3, which probably breaks using Visual Studio +2008 (MSVC9, or the version that used to be used to build CPython2.7 on +Windows) + +.. branch: py2.7-winreg + +Backport fixes to winreg adding reflection and fix for passing None (bpo +21151). + +.. branch: pymodule_new-const-charp + +Change parameter type of ``PyModule_New`` to ``const char*``, add +``PyModule_Check`` and ``PyModule_CheckExact`` + +.. branch: rpython-never-allocate + +Introduce a ``@never_allocate`` class decorator, which ensure that a certain +RPython class is never actually instantiated at runtime. Useful to ensure that +e.g. it's always constant-folded away + +.. branch: map-improvements + +Optimize instances with integer or float fields to have more efficent field +reads and writes. They also use less memory if they have at least two such +fields. + +.. branch: win-tcl8.6 + +Update the version of Tk/Tcl on windows to 8.6 + +.. branch: big-sur-dyld-cache + +Backport changes to ``_ctypes`` needed for maxos BigSur from py3.7 diff --git a/pypy/interpreter/astcompiler/test/test_ast.py b/pypy/interpreter/astcompiler/test/test_ast.py index 2affff7f62..c66e9950a9 100644 --- a/pypy/interpreter/astcompiler/test/test_ast.py +++ b/pypy/interpreter/astcompiler/test/test_ast.py @@ -8,7 +8,7 @@ class TestAstToObject: value = space.wrap(42) node = ast.Num(value, lineno=1, col_offset=1) w_node = node.to_object(space) - assert space.getattr(w_node, space.wrap("n")) is value + assert space.is_w(space.getattr(w_node, space.wrap("n")), value) def test_expr(self, space): value = space.wrap(42) @@ -16,8 +16,8 @@ class TestAstToObject: expr = ast.Expr(node, lineno=1, col_offset=1) w_node = expr.to_object(space) # node.value.n - assert space.getattr(space.getattr(w_node, space.wrap("value")), - space.wrap("n")) is value + assert space.is_w(space.getattr(space.getattr(w_node, space.wrap("value")), + space.wrap("n")), value) def test_operation(self, space): val1 = ast.Num(space.wrap(1), lineno=1, col_offset=1) @@ -35,7 +35,7 @@ class TestAstToObject: space.setattr(w_node, space.wrap('lineno'), space.wrap(1)) space.setattr(w_node, space.wrap('col_offset'), space.wrap(1)) node = ast.Num.from_object(space, w_node) - assert node.n is value + assert space.is_w(node.n, value) def test_fields(self, space): w_fields = space.getattr(ast.get(space).w_FunctionDef, diff --git a/pypy/module/__pypy__/interp_magic.py b/pypy/module/__pypy__/interp_magic.py index c3b7773b3e..776507e7ee 100644 --- a/pypy/module/__pypy__/interp_magic.py +++ b/pypy/module/__pypy__/interp_magic.py @@ -10,6 +10,7 @@ from pypy.objspace.std.setobject import W_BaseSetObject from pypy.objspace.std.typeobject import MethodCache from pypy.objspace.std.mapdict import MapAttrCache from rpython.rlib import rposix, rgc, rstack +from rpython.rtyper.lltypesystem import rffi def internal_repr(space, w_object): @@ -93,7 +94,7 @@ def do_what_I_mean(space, w_crash=None): def strategy(space, w_obj): - """ strategy(dict or list or set) + """ strategy(dict or list or set or instance) Return the underlying strategy currently used by a dict, list or set object """ @@ -104,7 +105,11 @@ def strategy(space, w_obj): elif isinstance(w_obj, W_BaseSetObject): name = w_obj.strategy.__class__.__name__ else: - raise oefmt(space.w_TypeError, "expecting dict or list or set object") + m = w_obj._get_mapdict_map() + if m is not None: + name = m.repr() + else: + raise oefmt(space.w_TypeError, "expecting dict or list or set object, or instance of some kind") return space.newtext(name) def get_console_cp(space): @@ -118,6 +123,19 @@ def get_console_cp(space): space.newtext('cp%d' % rwin32.GetConsoleOutputCP()), ]) +@unwrap_spec(fd=int) +def get_osfhandle(space, fd): + """get_osfhandle() + + Return the handle corresponding to the file descriptor (windows only) + """ + from rpython.rlib import rwin32 # Windows only + try: + ret = rwin32.get_osfhandle(fd) + return space.newint(rffi.cast(rffi.INT, ret)) + except OSError as e: + raise wrap_oserror(space, e) + @unwrap_spec(sizehint=int) def resizelist_hint(space, w_list, sizehint): """ Reallocate the underlying storage of the argument list to sizehint """ diff --git a/pypy/module/__pypy__/moduledef.py b/pypy/module/__pypy__/moduledef.py index 22491d42ca..8a608d2868 100644 --- a/pypy/module/__pypy__/moduledef.py +++ b/pypy/module/__pypy__/moduledef.py @@ -122,6 +122,7 @@ class Module(MixedModule): } if sys.platform == 'win32': interpleveldefs['get_console_cp'] = 'interp_magic.get_console_cp' + interpleveldefs['get_osfhandle'] = 'interp_magic.get_osfhandle' submodules = { "builders": BuildersModule, diff --git a/pypy/module/__pypy__/test/test_magic.py b/pypy/module/__pypy__/test/test_magic.py index e6a6a58e5f..c79b2c196f 100644 --- a/pypy/module/__pypy__/test/test_magic.py +++ b/pypy/module/__pypy__/test/test_magic.py @@ -1,8 +1,13 @@ # encoding: utf-8 +import pytest +import sys class AppTestMagic: spaceconfig = dict(usemodules=['__pypy__']) + def setup_class(cls): + cls.w_file = cls.space.wrap(__file__) + def test_save_module_content_for_future_reload(self): import sys, __pypy__ d = sys.dont_write_bytecode @@ -66,3 +71,10 @@ def f(): from __pypy__ import utf8content assert utf8content(u"a") == b"a" assert utf8content(u"\xe4") == b'\xc3\xa4' + + @pytest.mark.skipif(sys.platform != 'win32', reason="win32 only") + def test_get_osfhandle(self): + from __pypy__ import get_osfhandle + with open(self.file) as fid: + f = get_osfhandle(fid.fileno()) + raises(OSError, get_osfhandle, 2**30) diff --git a/pypy/module/__pypy__/test/test_special.py b/pypy/module/__pypy__/test/test_special.py index 1b8581fea7..6b84403e23 100644 --- a/pypy/module/__pypy__/test/test_special.py +++ b/pypy/module/__pypy__/test/test_special.py @@ -131,6 +131,15 @@ class AppTest(object): s = set([2, 3, 4]) assert strategy(s) == "IntegerSetStrategy" + def test_instance_strategy(self): + from __pypy__ import strategy + class A(object): + pass + a = A() + a.x = 1 + a.y = 2 + assert strategy(a).startswith("<UnboxedPlainAttribute y DICT 0 1 <UnboxedPlainAttribute x DICT 0 0 <DictTerminator w_cls=<W_TypeObject 'A'") + class AppTestJitFeatures(object): spaceconfig = {"translation.jit": True} diff --git a/pypy/module/_pypyjson/interp_decoder.py b/pypy/module/_pypyjson/interp_decoder.py index 8ee4ef2abe..8dac984733 100644 --- a/pypy/module/_pypyjson/interp_decoder.py +++ b/pypy/module/_pypyjson/interp_decoder.py @@ -238,7 +238,7 @@ class JSONDecoder(W_Root): def decode_float(self, i): from rpython.rlib import rdtoa start = rffi.ptradd(self.ll_chars, i) - floatval = rdtoa.dg_strtod(start, self.end_ptr) + floatval = rdtoa.dg_strtod(rffi.cast(rffi.CONST_CCHARP, start), self.end_ptr) diff = rffi.cast(rffi.SIGNED, self.end_ptr[0]) - rffi.cast(rffi.SIGNED, start) self.pos = i + diff return self.space.newfloat(floatval) diff --git a/pypy/module/_winreg/interp_winreg.py b/pypy/module/_winreg/interp_winreg.py index c7af44d6da..dc29cd844c 100644 --- a/pypy/module/_winreg/interp_winreg.py +++ b/pypy/module/_winreg/interp_winreg.py @@ -6,6 +6,61 @@ from pypy.interpreter.error import OperationError, oefmt, wrap_windowserror from rpython.rtyper.lltypesystem import rffi, lltype from rpython.rlib import rwinreg, rwin32 from rpython.rlib.rarithmetic import r_uint, intmask +from rpython.translator.tool.cbuild import ExternalCompilationInfo + + +# wrappers needed to call the reflection functions loaded at runtime +# using WINAPI convention +eci = ExternalCompilationInfo( + includes=['windows.h'], + post_include_bits=[ + "RPY_EXTERN LONG\n" + "pypy_RegChangeReflectionKey(FARPROC address, HKEY key);\n" + "RPY_EXTERN LONG\n" + "pypy_RegQueryReflectionKey(FARPROC address, HKEY key, LPBOOL isDisabled);\n" + "RPY_EXTERN LONG\n" + "pypy_RegDeleteKeyExA(FARPROC address, HKEY key, LPCSTR subkey,\n" + " REGSAM sam, DWORD reserved);\n" + ], + separate_module_sources=[''' + LONG + pypy_RegChangeReflectionKey(FARPROC address, HKEY key) { + LONG (WINAPI *func)(HKEY); + *(FARPROC*)&func = address; + return func(key); + } + + LONG + pypy_RegQueryReflectionKey(FARPROC address, HKEY key, LPBOOL isDisabled) { + LONG (WINAPI *func)(HKEY, LPBOOL); + *(FARPROC*)&func = address; + return func(key, isDisabled); + } + + LONG + pypy_RegDeleteKeyExA(FARPROC address, HKEY key, LPCSTR subkey, + REGSAM sam, DWORD reserved) { + LONG (WINAPI *func)(HKEY, LPCSTR, REGSAM, DWORD); + *(FARPROC*)&func = address; + return func(key, subkey, sam, reserved); + } + '''], +) +pypy_RegChangeReflectionKey = rffi.llexternal( + 'pypy_RegChangeReflectionKey', + [rffi.VOIDP, rwinreg.HKEY], + rffi.LONG, compilation_info=eci) + +pypy_RegQueryReflectionKey = rffi.llexternal( + 'pypy_RegQueryReflectionKey', + [rffi.VOIDP, rwinreg.HKEY, rwin32.LPBOOL], + rffi.LONG, compilation_info=eci) + +pypy_RegDeleteKeyExA = rffi.llexternal( + 'pypy_RegDeleteKeyExA', + [rffi.VOIDP, rwinreg.HKEY, rffi.CCHARP, rwinreg.REGSAM, rwin32.DWORD], + rffi.LONG, compilation_info=eci) + def raiseWindowsError(space, errcode, context): message = rwin32.FormatError(errcode) @@ -258,6 +313,8 @@ But the underlying API call doesn't return the type, Lame Lame Lame, DONT USE TH if ret != 0: raiseWindowsError(space, ret, 'RegQueryValue') length = intmask(bufsize_p[0]) + if length == 0: + return space.w_None return space.newtext(rffi.charp2strn(buf, length - 1)) def convert_to_regdata(space, w_value, typ): @@ -328,8 +385,7 @@ def convert_to_regdata(space, w_value, typ): else: # REG_BINARY and ALL unknown data types. if space.is_w(w_value, space.w_None): buflen = 0 - buf = lltype.malloc(rffi.CCHARP.TO, 1, flavor='raw') - buf[0] = '\0' + buf = lltype.nullptr(rffi.CCHARP.TO) else: try: value = w_value.readbuf_w(space) @@ -385,7 +441,10 @@ def convert_from_regdata(space, buf, buflen, typ): return space.newlist(l) else: # REG_BINARY and all other types - return space.newbytes(rffi.charpsize2str(buf, buflen)) + if buflen == 0: + return space.w_None + else: + return space.newbytes(rffi.charpsize2str(buf, buflen)) @unwrap_spec(value_name="text", typ=int) def SetValueEx(space, w_hkey, value_name, w_reserved, typ, w_value): @@ -424,7 +483,8 @@ the configuration registry. This helps the registry perform efficiently.""" try: ret = rwinreg.RegSetValueExA(hkey, value_name, 0, typ, buf, buflen) finally: - lltype.free(buf, flavor='raw') + if buf != lltype.nullptr(rffi.CCHARP.TO): + lltype.free(buf, flavor='raw') if ret != 0: raiseWindowsError(space, ret, 'RegSetValueEx') @@ -707,30 +767,84 @@ def ExpandEnvironmentStrings(space, w_source): except WindowsError as e: raise wrap_windowserror(space, e) + +class ReflectionFunction(object): + def __init__(self, name, stdcall_wrapper): + self.name = name + self.handle = lltype.nullptr(rffi.VOIDP.TO) + self.wrapper = stdcall_wrapper + + def check(self): + if self.handle != lltype.nullptr(rffi.VOIDP.TO): + return True + from rpython.rlib.rdynload import GetModuleHandle, dlsym + lib = GetModuleHandle("advapi32.dll") + try: + handle = dlsym(lib, self.name) + except KeyError: + return False + self.handle = handle + return True + + def call(self, *args): + assert self.handle != lltype.nullptr(rffi.VOIDP.TO) + return self.wrapper(self.handle, *args) + + +_RegDisableReflectionKey = ReflectionFunction( + "RegDisableReflectionKey", pypy_RegChangeReflectionKey) +_RegEnableReflectionKey = ReflectionFunction( + "RegEnableReflectionKey", pypy_RegChangeReflectionKey) +_RegQueryReflectionKey = ReflectionFunction( + "RegQueryReflectionKey", pypy_RegQueryReflectionKey) +_RegDeleteKeyExA = ReflectionFunction("RegDeleteKeyExA", pypy_RegDeleteKeyExA) + + def DisableReflectionKey(space, w_key): """Disables registry reflection for 32-bit processes running on a 64-bit Operating System. Will generally raise NotImplemented if executed on a 32-bit Operating System. If the key is not on the reflection list, the function succeeds but has no effect. Disabling reflection for a key does not affect reflection of any subkeys.""" - raise oefmt(space.w_NotImplementedError, - "not implemented on this platform") + if not _RegDisableReflectionKey.check(): + raise oefmt(space.w_NotImplementedError, + "not implemented on this platform") + else: + hkey = hkey_w(w_key, space) + ret = _RegDisableReflectionKey.call(hkey) + if ret != 0: + raiseWindowsError(space, ret, 'RegDisableReflectionKey') def EnableReflectionKey(space, w_key): """Restores registry reflection for the specified disabled key. Will generally raise NotImplemented if executed on a 32-bit Operating System. Restoring reflection for a key does not affect reflection of any subkeys.""" - raise oefmt(space.w_NotImplementedError, - "not implemented on this platform") + if not _RegEnableReflectionKey.check(): + raise oefmt(space.w_NotImplementedError, + "not implemented on this platform") + else: + hkey = hkey_w(w_key, space) + ret = _RegEnableReflectionKey.call(hkey) + if ret != 0: + raiseWindowsError(space, ret, 'RegEnableReflectionKey') def QueryReflectionKey(space, w_key): """bool = QueryReflectionKey(hkey) - Determines the reflection state for the specified key. Will generally raise NotImplemented if executed on a 32-bit Operating System.""" - raise oefmt(space.w_NotImplementedError, - "not implemented on this platform") + if not _RegQueryReflectionKey.check(): + raise oefmt(space.w_NotImplementedError, + "not implemented on this platform") + else: + hkey = hkey_w(w_key, space) + with lltype.scoped_alloc(rwin32.LPBOOL.TO, 1) as isDisabled: + ret = _RegQueryReflectionKey.call(hkey, isDisabled) + if ret != 0: + raiseWindowsError(space, ret, 'RegQueryReflectionKey') + return space.newbool(intmask(isDisabled[0]) != 0) -@unwrap_spec(subkey="text") -def DeleteKeyEx(space, w_key, subkey): + +@unwrap_spec(sub_key="text", access=r_uint, reserved=int) +def DeleteKeyEx(space, w_key, sub_key, access=rwinreg.KEY_WOW64_64KEY, reserved=0): """DeleteKeyEx(key, sub_key, sam, res) - Deletes the specified key. key is an already open key, or any one of the predefined HKEY_* constants. @@ -744,5 +858,11 @@ def DeleteKeyEx(space, w_key, subkey): If the method succeeds, the entire key, including all of its values, is removed. If the method fails, a WindowsError exception is raised. On unsupported Windows versions, NotImplementedError is raised.""" - raise oefmt(space.w_NotImplementedError, - "not implemented on this platform") + if not _RegDeleteKeyExA.check(): + raise oefmt(space.w_NotImplementedError, + "not implemented on this platform") + else: + hkey = hkey_w(w_key, space) + ret = _RegDeleteKeyExA.call(hkey, sub_key, access, reserved) + if ret != 0: + raiseWindowsError(space, ret, 'RegDeleteKeyEx') diff --git a/pypy/module/_winreg/moduledef.py b/pypy/module/_winreg/moduledef.py index d865ca691e..b20bd15d62 100644 --- a/pypy/module/_winreg/moduledef.py +++ b/pypy/module/_winreg/moduledef.py @@ -72,3 +72,8 @@ to see what constants are used, and where.""" for name, value in constants.iteritems(): interpleveldefs[name] = "space.wrap(%s)" % (value,) + + import pypy.module.sys.version + if pypy.module.sys.version.CPYTHON_VERSION < (3, 6): + del interpleveldefs["REG_QWORD"] + del interpleveldefs["REG_QWORD_LITTLE_ENDIAN"] diff --git a/pypy/module/_winreg/test/test_winreg.py b/pypy/module/_winreg/test/test_winreg.py index ae2d8ee227..1d25cbb900 100644 --- a/pypy/module/_winreg/test/test_winreg.py +++ b/pypy/module/_winreg/test/test_winreg.py @@ -31,6 +31,7 @@ class AppTestFfi: def setup_class(cls): import _winreg + from platform import machine space = cls.space cls.root_key = _winreg.HKEY_CURRENT_USER cls.test_key_name = "SOFTWARE\\Pypy Registry Test Key - Delete Me" @@ -38,6 +39,7 @@ class AppTestFfi: cls.w_test_key_name = space.wrap(cls.test_key_name) cls.w_canSaveKey = space.wrap(canSaveKey) cls.w_tmpfilename = space.wrap(str(udir.join('winreg-temp'))) + cls.w_win64_machine = space.wrap(machine() == "AMD64") test_data = [ ("Int Value", 0xFEDCBA98, _winreg.REG_DWORD), @@ -45,6 +47,7 @@ class AppTestFfi: ("Unicode Value", u"A unicode Value", _winreg.REG_SZ), ("Str Expand", "The path is %path%", _winreg.REG_EXPAND_SZ), ("Multi Str", ["Several", "string", u"values"], _winreg.REG_MULTI_SZ), + ("Raw None", None, _winreg.REG_BINARY), ("Raw data", "binary"+chr(0)+"data", _winreg.REG_BINARY), ] cls.w_test_data = space.wrap(test_data) @@ -175,14 +178,19 @@ class AppTestFfi: def test_delete(self): # must be run after test_SetValueEx - from _winreg import OpenKey, KEY_ALL_ACCESS, DeleteValue, DeleteKey + from _winreg import OpenKey, KEY_ALL_ACCESS, DeleteValue, DeleteKey, DeleteKeyEx key = OpenKey(self.root_key, self.test_key_name, 0, KEY_ALL_ACCESS) sub_key = OpenKey(key, "sub_key", 0, KEY_ALL_ACCESS) for name, value, type in self.test_data: DeleteValue(sub_key, name) - DeleteKey(key, "sub_key") + if self.win64_machine: + DeleteKeyEx(key, "sub_key", KEY_ALL_ACCESS, 0) + else: + DeleteKey(key, "sub_key") + + raises(OSError, OpenKey, key, "sub_key") def test_connect(self): from _winreg import ConnectRegistry, HKEY_LOCAL_MACHINE @@ -255,3 +263,18 @@ class AppTestFfi: raises(NotImplementedError, DeleteKeyEx, self.root_key, self.test_key_name) + def test_reflection(self): + import sys + from _winreg import DisableReflectionKey, EnableReflectionKey, \ + QueryReflectionKey, OpenKey, HKEY_LOCAL_MACHINE + # Adapted from lib-python test + if not self.win64_machine: + skip("Requires 64-bit host") + # Test that we can call the query, enable, and disable functions + # on a key which isn't on the reflection list with no consequences. + with OpenKey(HKEY_LOCAL_MACHINE, "Software") as key: + # HKLM\Software is redirected but not reflected in all OSes + assert QueryReflectionKey(key) + assert EnableReflectionKey(key) is None + assert DisableReflectionKey(key) is None + assert QueryReflectionKey(key) diff --git a/pypy/module/cpyext/eval.py b/pypy/module/cpyext/eval.py index a9bb200094..5e16133b94 100644 --- a/pypy/module/cpyext/eval.py +++ b/pypy/module/cpyext/eval.py @@ -64,7 +64,7 @@ def PyEval_GetFrame(space): caller = space.getexecutioncontext().gettopframe_nohidden() return caller # borrowed ref, may be null -@cpython_api([PyCodeObject, PyObject, PyObject], PyObject) +@cpython_api([PyObject, PyObject, PyObject], PyObject) def PyEval_EvalCode(space, w_code, w_globals, w_locals): """This is a simplified interface to PyEval_EvalCodeEx(), with just the code object, and the dictionaries of global and local variables. diff --git a/pypy/module/cpyext/modsupport.py b/pypy/module/cpyext/modsupport.py index ea27269b5b..b5d212b81b 100644 --- a/pypy/module/cpyext/modsupport.py +++ b/pypy/module/cpyext/modsupport.py @@ -1,7 +1,7 @@ from rpython.rtyper.lltypesystem import rffi, lltype from pypy.module.cpyext.api import cpython_api, cpython_struct, \ METH_STATIC, METH_CLASS, METH_COEXIST, CANNOT_FAIL, CONST_STRING, \ - METH_NOARGS, METH_O, METH_VARARGS + METH_NOARGS, METH_O, METH_VARARGS, build_type_checkers from pypy.module.cpyext.pyobject import PyObject, as_pyobj from pypy.interpreter.module import Module from pypy.module.cpyext.methodobject import ( @@ -11,7 +11,9 @@ from pypy.module.cpyext.pyerrors import PyErr_BadInternalCall from pypy.module.cpyext.state import State from pypy.interpreter.error import oefmt -@cpython_api([rffi.CCHARP], PyObject) +PyModule_Check, PyModule_CheckExact = build_type_checkers("Module", Module) + +@cpython_api([CONST_STRING], PyObject) def PyModule_New(space, name): """ Return a new module object with the __name__ attribute set to name. @@ -116,13 +118,6 @@ def convert_method_defs(space, dict_w, methods, w_type, w_self=None, name=None): dict_w[methodname] = w_obj -@cpython_api([PyObject], rffi.INT_real, error=CANNOT_FAIL) -def PyModule_Check(space, w_obj): - w_type = space.gettypeobject(Module.typedef) - w_obj_type = space.type(w_obj) - return int(space.is_w(w_type, w_obj_type) or - space.issubtype_w(w_obj_type, w_type)) - @cpython_api([PyObject], PyObject, result_borrowed=True) def PyModule_GetDict(space, w_mod): if PyModule_Check(space, w_mod): diff --git a/pypy/module/cpyext/pystrtod.py b/pypy/module/cpyext/pystrtod.py index 1f70b3e7d4..85864c60ba 100644 --- a/pypy/module/cpyext/pystrtod.py +++ b/pypy/module/cpyext/pystrtod.py @@ -1,6 +1,6 @@ import errno from pypy.interpreter.error import oefmt -from pypy.module.cpyext.api import cpython_api, CONST_STRING, INTP_real +from pypy.module.cpyext.api import cpython_api, INTP_real from pypy.module.cpyext.pyobject import PyObject from rpython.rlib import rdtoa from rpython.rlib import rfloat @@ -22,7 +22,7 @@ DOUBLE_TO_STRING_TYPES_MAP = { rfloat.DIST_NAN: Py_DTST_NAN } -@cpython_api([CONST_STRING, rffi.CCHARPP, PyObject], rffi.DOUBLE, error=-1.0) +@cpython_api([rffi.CONST_CCHARP, rffi.CCHARPP, PyObject], rffi.DOUBLE, error=-1.0) @jit.dont_look_inside # direct use of _get_errno() def PyOS_string_to_double(space, s, endptr, w_overflow_exception): """Convert a string s to a double, raising a Python @@ -63,6 +63,45 @@ def PyOS_string_to_double(space, s, endptr, w_overflow_exception): endpos = (rffi.cast(rffi.LONG, endptr[0]) - rffi.cast(rffi.LONG, s)) if endpos == 0 or (not user_endptr and not endptr[0][0] == '\0'): + low = rffi.constcharp2str(s).lower() + sz = 0 + if len(low) < 3: + pass + elif low[0] == '-': + if low.startswith('-infinity'): + result = -rfloat.INFINITY + sz = len("-infinity") + elif low.startswith("-inf"): + result = -rfloat.INFINITY + sz = 4 + elif low.startswith("-nan"): + result = -rfloat.NAN + sz = 4 + elif low[0] == '+': + if low.startswith("+infinity"): + result = rfloat.INFINITY + sz = len("+infinity") + elif low.startswith("+inf"): + result = rfloat.INFINITY + sz = 4 + elif low.startswith("+nan"): + result = rfloat.NAN + sz = 4 + elif low.startswith("infinity"): + result = rfloat.INFINITY + sz = len("infinity") + elif low.startswith("inf"): + result = rfloat.INFINITY + sz = 3 + elif low.startswith("nan"): + result = rfloat.NAN + sz = 3 + # result is set to 0.0 for a parse_error in dtoa.c + # if it changed, we must have sucessfully converted + if result != 0.0: + if endptr: + endptr[0] = rffi.cast(rffi.CCHARP, rffi.ptradd(s, sz)) + return result raise oefmt(space.w_ValueError, "invalid input at position %d", endpos) err = rffi.cast(lltype.Signed, rposix._get_errno()) diff --git a/pypy/module/cpyext/stubs.py b/pypy/module/cpyext/stubs.py index 3dba4ffa8a..23e818ee94 100644 --- a/pypy/module/cpyext/stubs.py +++ b/pypy/module/cpyext/stubs.py @@ -1203,13 +1203,6 @@ def PyMethod_ClearFreeList(space): """ raise NotImplementedError -@cpython_api([PyObject], rffi.INT_real, error=CANNOT_FAIL) -def PyModule_CheckExact(space, p): - """Return true if p is a module object, but not a subtype of - PyModule_Type. - """ - raise NotImplementedError - @cpython_api([PyObject], rffi.CCHARP) def PyModule_GetFilename(space, module): """Return the name of the file from which module was loaded using module's diff --git a/pypy/module/cpyext/test/test_cpyext.py b/pypy/module/cpyext/test/test_cpyext.py index 997357716a..3b5d41bf38 100644 --- a/pypy/module/cpyext/test/test_cpyext.py +++ b/pypy/module/cpyext/test/test_cpyext.py @@ -34,7 +34,9 @@ class TestApi: def test_signature(self): common_functions = api.FUNCTIONS_BY_HEADER[api.pypy_decl] assert 'PyModule_Check' in common_functions - assert common_functions['PyModule_Check'].argtypes == [api.PyObject] + assert common_functions['PyModule_Check'].argtypes == [cts.gettype("void *")] + assert 'PyModule_GetDict' in common_functions + assert common_functions['PyModule_GetDict'].argtypes == [api.PyObject] class SpaceCompiler(SystemCompilationInfo): diff --git a/pypy/module/cpyext/test/test_floatobject.py b/pypy/module/cpyext/test/test_floatobject.py index ea3789fd01..cfcfa04f51 100644 --- a/pypy/module/cpyext/test/test_floatobject.py +++ b/pypy/module/cpyext/test/test_floatobject.py @@ -185,12 +185,18 @@ class AppTestFloatMacros(AppTestCpythonExtensionBase): assert module.test() == float('inf') def test_Py_NAN(self): + import sys module = self.import_extension('foo', [ ("test", "METH_NOARGS", """ return PyFloat_FromDouble(Py_NAN); """), ]) - import struct - float_bits = struct.Struct('d').pack - assert float_bits(module.test()) == float_bits(float('nan')) + if sys.platform == 'win32': + # CPython does not enforce bit-compatibility between the NANs + import math + assert math.isnan(module.test()) + else: + import struct + float_bits = struct.Struct('d').pack + assert float_bits(module.test()) == float_bits(float('nan')) diff --git a/pypy/module/cpyext/test/test_pystrtod.py b/pypy/module/cpyext/test/test_pystrtod.py index 445c473d85..e4f1dd575b 100644 --- a/pypy/module/cpyext/test/test_pystrtod.py +++ b/pypy/module/cpyext/test/test_pystrtod.py @@ -10,28 +10,28 @@ from pypy.module.cpyext.pystrtod import PyOS_string_to_double, INTP_real class TestPyOS_string_to_double(BaseApiTest): def test_simple_float(self, space): - s = rffi.str2charp('0.4') + s = rffi.str2constcharp('0.4') null = lltype.nullptr(rffi.CCHARPP.TO) r = PyOS_string_to_double(space, s, null, None) assert r == 0.4 rffi.free_charp(s) def test_empty_string(self, space): - s = rffi.str2charp('') + s = rffi.str2constcharp('') null = lltype.nullptr(rffi.CCHARPP.TO) with raises_w(space, ValueError): PyOS_string_to_double(space, s, null, None) rffi.free_charp(s) def test_bad_string(self, space): - s = rffi.str2charp(' 0.4') + s = rffi.str2constcharp(' 0.4') null = lltype.nullptr(rffi.CCHARPP.TO) with raises_w(space, ValueError): PyOS_string_to_double(space, s, null, None) rffi.free_charp(s) def test_overflow_pos(self, space): - s = rffi.str2charp('1e500') + s = rffi.str2constcharp('1e500') null = lltype.nullptr(rffi.CCHARPP.TO) r = PyOS_string_to_double(space, s, null, None) assert math.isinf(r) @@ -39,7 +39,7 @@ class TestPyOS_string_to_double(BaseApiTest): rffi.free_charp(s) def test_overflow_neg(self, space): - s = rffi.str2charp('-1e500') + s = rffi.str2constcharp('-1e500') null = lltype.nullptr(rffi.CCHARPP.TO) r = PyOS_string_to_double(space, s, null, None) assert math.isinf(r) @@ -47,14 +47,14 @@ class TestPyOS_string_to_double(BaseApiTest): rffi.free_charp(s) def test_overflow_exc(self, space): - s = rffi.str2charp('1e500') + s = rffi.str2constcharp('1e500') null = lltype.nullptr(rffi.CCHARPP.TO) with raises_w(space, ValueError): PyOS_string_to_double(space, s, null, space.w_ValueError) rffi.free_charp(s) def test_endptr_number(self, space): - s = rffi.str2charp('0.4') + s = rffi.str2constcharp('0.4') endp = lltype.malloc(rffi.CCHARPP.TO, 1, flavor='raw') r = PyOS_string_to_double(space, s, endp, None) assert r == 0.4 @@ -65,7 +65,7 @@ class TestPyOS_string_to_double(BaseApiTest): lltype.free(endp, flavor='raw') def test_endptr_tail(self, space): - s = rffi.str2charp('0.4 foo') + s = rffi.str2constcharp('0.4 foo') endp = lltype.malloc(rffi.CCHARPP.TO, 1, flavor='raw') r = PyOS_string_to_double(space, s, endp, None) assert r == 0.4 @@ -76,7 +76,7 @@ class TestPyOS_string_to_double(BaseApiTest): lltype.free(endp, flavor='raw') def test_endptr_no_conversion(self, space): - s = rffi.str2charp('foo') + s = rffi.str2constcharp('foo') endp = lltype.malloc(rffi.CCHARPP.TO, 1, flavor='raw') with raises_w(space, ValueError): PyOS_string_to_double(space, s, endp, None) @@ -86,6 +86,25 @@ class TestPyOS_string_to_double(BaseApiTest): rffi.free_charp(s) lltype.free(endp, flavor='raw') + def test_endptr_inf(self, space): + endp = lltype.malloc(rffi.CCHARPP.TO, 1, flavor='raw') + for test in ('inf', '+infinity', 'INF'): + s = rffi.str2constcharp(test) + r = PyOS_string_to_double(space, s, endp, None) + assert r == float('inf') + endp_addr = rffi.cast(rffi.LONG, endp[0]) + s_addr = rffi.cast(rffi.LONG, s) + assert endp_addr == s_addr + len(test) + rffi.free_charp(s) + s = rffi.str2constcharp('inf aaa') + r = PyOS_string_to_double(space, s, endp, None) + assert r == float('inf') + endp_addr = rffi.cast(rffi.LONG, endp[0]) + s_addr = rffi.cast(rffi.LONG, s) + # CPython returns 3 + assert endp_addr == s_addr + 3 + rffi.free_charp(s) + lltype.free(endp, flavor='raw') class TestPyOS_double_to_string(BaseApiTest): diff --git a/pypy/module/pypyjit/test_pypy_c/test_call.py b/pypy/module/pypyjit/test_pypy_c/test_call.py index 8346818c9c..e0cb75222a 100644 --- a/pypy/module/pypyjit/test_pypy_c/test_call.py +++ b/pypy/module/pypyjit/test_pypy_c/test_call.py @@ -114,6 +114,8 @@ class TestCall(BaseTestPyPyC): self.a = a def f(self, i): return self.a + i + a = A("a") # stop field unboxing + i = 0 a = A(1) while i < n: diff --git a/pypy/module/pypyjit/test_pypy_c/test_containers.py b/pypy/module/pypyjit/test_pypy_c/test_containers.py index 4e619a3e90..2a42915a8a 100644 --- a/pypy/module/pypyjit/test_pypy_c/test_containers.py +++ b/pypy/module/pypyjit/test_pypy_c/test_containers.py @@ -9,6 +9,7 @@ class TestDicts(BaseTestPyPyC): class A(object): pass a = A() + a.x = "x" # stop field unboxing a.x = 1 for s in sys.modules.keys() * 1000: d.get(s) # force pending setfields etc. diff --git a/pypy/module/pypyjit/test_pypy_c/test_instance.py b/pypy/module/pypyjit/test_pypy_c/test_instance.py index 6b4b2bb92e..1aa66bd586 100644 --- a/pypy/module/pypyjit/test_pypy_c/test_instance.py +++ b/pypy/module/pypyjit/test_pypy_c/test_instance.py @@ -234,6 +234,7 @@ class TestInstance(BaseTestPyPyC): self.x = x i = 0 + B("abc") # prevent field unboxing b = B(1) while i < 100: v = b.x # ID: loadattr1 @@ -321,3 +322,57 @@ class TestInstance(BaseTestPyPyC): --TICK-- jump(..., descr=...) """) + + def test_float_instance_field_read(self): + def main(): + class A(object): + def __init__(self, x, y): + self.x = float(x) + self.y = float(y) + + l = [A(i, i * 5) for i in range(2000)] + + res = 0 + for x in l: + res += x.x + x.y # ID: get + return res + log = self.run(main, []) + listcomp, loop, = log.loops_by_filename(self.filepath) + loop.match_by_id('get', """ + p67 = getfield_gc_r(p63, descr=...) # map + guard_value(p67, ConstPtr(ptr68), descr=...) # promote map + guard_not_invalidated(descr=...) + p69 = getfield_gc_r(p63, descr=...) # value0 + f71 = getarrayitem_gc_f(p69, 0, descr=...) # x + f73 = getarrayitem_gc_f(p69, 1, descr=...) # y + f74 = float_add(f71, f73) # add them + f75 = float_add(f57, f74) + --TICK-- +""") + + def test_float_instance_field_write(self): + def main(): + class A(object): + def __init__(self, x): + self.x = float(x) + + l = [A(i) for i in range(2000)] + + for a in l: + a.x += 3.4 # ID: set + log = self.run(main, []) + listcomp, loop, = log.loops_by_filename(self.filepath) + loop.match_by_id('set', """ + p60 = getfield_gc_r(p56, descr=...) # map + guard_value(p60, ConstPtr(ptr61), descr=...) + guard_not_invalidated(descr=...) + p62 = getfield_gc_r(p56, descr=...) # value + f64 = getarrayitem_gc_f(p62, 0, descr=...) # x + f66 = float_add(f64, 3.400000) + i68 = getfield_raw_i(..., descr=...) + setarrayitem_gc(p62, 0, f66, descr=...) # store x + i71 = int_lt(i68, 0) + guard_false(i71, descr=...) +""") + + diff --git a/pypy/module/pypyjit/test_pypy_c/test_misc.py b/pypy/module/pypyjit/test_pypy_c/test_misc.py index 9bddf12a67..1f613387ef 100644 --- a/pypy/module/pypyjit/test_pypy_c/test_misc.py +++ b/pypy/module/pypyjit/test_pypy_c/test_misc.py @@ -96,6 +96,7 @@ class TestMisc(BaseTestPyPyC): class A(object): def __init__(self, val): self.val1 = self.val2 = val + A("x") # prevent field unboxing a = A(1) b = A(1) sa = 0 diff --git a/pypy/module/pypyjit/test_pypy_c/test_thread.py b/pypy/module/pypyjit/test_pypy_c/test_thread.py index 04c1c86fe2..0558808730 100644 --- a/pypy/module/pypyjit/test_pypy_c/test_thread.py +++ b/pypy/module/pypyjit/test_pypy_c/test_thread.py @@ -31,6 +31,7 @@ class TestThread(BaseTestPyPyC): def main(n): import thread local = thread._local() + local.x = "abc" # prevent type unboxing local.x = 1 i = 0 while i < n: diff --git a/pypy/module/time/test/test_time.py b/pypy/module/time/test/test_time.py index 306a804d6c..d8f817977c 100644 --- a/pypy/module/time/test/test_time.py +++ b/pypy/module/time/test/test_time.py @@ -126,10 +126,7 @@ class AppTestTime: assert time.mktime(time.localtime(-1)) == -1 res = time.mktime((2000, 1, 1, 0, 0, 0, -1, -1, -1)) - if os.name == 'nt': - assert time.ctime(res) == 'Sat Jan 01 00:00:00 2000' - else: - assert time.ctime(res) == 'Sat Jan 1 00:00:00 2000' + assert time.ctime(res) == 'Sat Jan 1 00:00:00 2000' def test_asctime(self): import time diff --git a/pypy/objspace/std/mapdict.py b/pypy/objspace/std/mapdict.py index 98be5a10ea..245aedfb4e 100644 --- a/pypy/objspace/std/mapdict.py +++ b/pypy/objspace/std/mapdict.py @@ -1,7 +1,8 @@ import weakref, sys from rpython.rlib import jit, objectmodel, debug, rerased -from rpython.rlib.rarithmetic import intmask, r_uint +from rpython.rlib.rarithmetic import intmask, r_uint, LONG_BIT +from rpython.rlib.longlong2float import longlong2float, float2longlong from pypy.interpreter.baseobjspace import W_Root from pypy.objspace.std.dictmultiobject import ( @@ -12,10 +13,13 @@ from pypy.objspace.std.dictmultiobject import ( from pypy.objspace.std.typeobject import MutableCell + erase_item, unerase_item = rerased.new_erasing_pair("mapdict storage item") erase_map, unerase_map = rerased.new_erasing_pair("map") erase_list, unerase_list = rerased.new_erasing_pair("mapdict storage list") +erase_unboxed, unerase_unboxed = rerased.new_erasing_pair("mapdict unwrapped storage") +ALLOW_UNBOXING_INTS = LONG_BIT == 64 # ____________________________________________________________ # attribute shapes @@ -33,44 +37,39 @@ LIMIT_MAP_ATTRIBUTES = 80 class AbstractAttribute(object): _immutable_fields_ = ['terminator'] cache_attrs = None - _size_estimate = 0 def __init__(self, space, terminator): self.space = space assert isinstance(terminator, Terminator) self.terminator = terminator - def read(self, obj, name, index): - attr = self.find_map_attr(name, index) + def read(self, obj, name, attrkind): + attr = self.find_map_attr(name, attrkind) if attr is None: - return self.terminator._read_terminator(obj, name, index) + return self.terminator._read_terminator(obj, name, attrkind) if ( - jit.isconstant(attr.storageindex) and + jit.isconstant(attr) and jit.isconstant(obj) and not attr.ever_mutated ): - return self._pure_mapdict_read_storage(obj, attr.storageindex) + return attr._pure_direct_read(obj) else: - return obj._mapdict_read_storage(attr.storageindex) - - @jit.elidable - def _pure_mapdict_read_storage(self, obj, storageindex): - return obj._mapdict_read_storage(storageindex) + return attr._direct_read(obj) - def write(self, obj, name, index, w_value): - attr = self.find_map_attr(name, index) + def write(self, obj, name, attrkind, w_value): + attr = self.find_map_attr(name, attrkind) if attr is None: - return self.terminator._write_terminator(obj, name, index, w_value) + return self.terminator._write_terminator(obj, name, attrkind, w_value) if not attr.ever_mutated: attr.ever_mutated = True - obj._mapdict_write_storage(attr.storageindex, w_value) + attr._direct_write(obj, w_value) return True - def delete(self, obj, name, index): - pass + def delete(self, obj, name, attrkind): + return None @jit.elidable - def find_map_attr(self, name, index): + def find_map_attr(self, name, attrkind): # attr cache space = self.space cache = space.fromcache(MapAttrCache) @@ -84,7 +83,7 @@ class AbstractAttribute(object): c1 = 0x345678 c2 = 1000003 hash_name = objectmodel.compute_hash(name) - hash_selector = intmask((c2 * ((c2 * c1) ^ hash_name)) ^ index) + hash_selector = intmask((c2 * ((c2 * c1) ^ hash_name)) ^ attrkind) product = intmask(attrs_as_int * hash_selector) attr_hash = (r_uint(product) ^ (r_uint(product) << SHIFT1)) >> SHIFT2 # ^^^Note2: same comment too @@ -92,24 +91,24 @@ class AbstractAttribute(object): if cached_attr is self: cached_name = cache.names[attr_hash] cached_index = cache.indexes[attr_hash] - if cached_name == name and cached_index == index: + if cached_name == name and cached_index == attrkind: attr = cache.cached_attrs[attr_hash] if space.config.objspace.std.withmethodcachecounter: cache.hits[name] = cache.hits.get(name, 0) + 1 return attr - attr = self._find_map_attr(name, index) + attr = self._find_map_attr(name, attrkind) if space._side_effects_ok(): cache.attrs[attr_hash] = self cache.names[attr_hash] = name - cache.indexes[attr_hash] = index + cache.indexes[attr_hash] = attrkind cache.cached_attrs[attr_hash] = attr if space.config.objspace.std.withmethodcachecounter: cache.misses[name] = cache.misses.get(name, 0) + 1 return attr - def _find_map_attr(self, name, index): + def _find_map_attr(self, name, attrkind): while isinstance(self, PlainAttribute): - if index == self.index and name == self.name: + if attrkind == self.attrkind and name == self.name: return self self = self.back return None @@ -117,7 +116,13 @@ class AbstractAttribute(object): def copy(self, obj): raise NotImplementedError("abstract base class") - def length(self): + def storage_needed(self): + """ number of storage slots needed to represent the content of an + instance that uses self as its map """ + raise NotImplementedError("abstract base class") + + def num_attributes(self): + """ number of attributes represented by self. """ raise NotImplementedError("abstract base class") def get_terminator(self): @@ -126,83 +131,54 @@ class AbstractAttribute(object): def set_terminator(self, obj, terminator): raise NotImplementedError("abstract base class") - @jit.elidable - def size_estimate(self): - return self._size_estimate >> NUM_DIGITS - def search(self, attrtype): return None @jit.elidable - def _get_new_attr(self, name, index): + def _get_new_attr(self, name, attrkind, unbox_type): cache = self.cache_attrs if cache is None: cache = self.cache_attrs = {} - attr = cache.get((name, index), None) - if attr is None: - attr = PlainAttribute(name, index, self) - cache[name, index] = attr - return attr - - def add_attr(self, obj, name, index, w_value): - self._reorder_and_add(obj, name, index, w_value) - if not jit.we_are_jitted(): - oldattr = self - attr = obj._get_mapdict_map() - size_est = (oldattr._size_estimate + attr.size_estimate() - - oldattr.size_estimate()) - assert size_est >= (oldattr.length() * NUM_DIGITS_POW2) - oldattr._size_estimate = size_est - - def _add_attr_without_reordering(self, obj, name, index, w_value): - attr = self._get_new_attr(name, index) - attr._switch_map_and_write_storage(obj, w_value) - - @jit.unroll_safe - def _switch_map_and_write_storage(self, obj, w_value): - if self.length() > obj._mapdict_storage_length(): - # note that self.size_estimate() is always at least self.length() - new_storage = [None] * self.size_estimate() - for i in range(obj._mapdict_storage_length()): - new_storage[i] = obj._mapdict_read_storage(i) - obj._set_mapdict_storage_and_map(new_storage, self) - - # the order is important here: first change the map, then the storage, - # for the benefit of the special subclasses - obj._set_mapdict_map(self) - obj._mapdict_write_storage(self.storageindex, w_value) + key = (name, attrkind) + holder = cache.get(key, None) + if holder is None: + holder = cache[key] = CachedAttributeHolder(name, attrkind, self, unbox_type) + return holder + def add_attr(self, obj, name, attrkind, w_value): + space = self.space + self._reorder_and_add(obj, name, attrkind, w_value) @jit.elidable - def _find_branch_to_move_into(self, name, index): + def _find_branch_to_move_into(self, name, attrkind, unbox_type): # walk up the map chain to find an ancestor with lower order that # already has the current name as a child inserted current_order = sys.maxint number_to_readd = 0 current = self - key = (name, index) + key = (name, attrkind) while True: - attr = None + holder = None if current.cache_attrs is not None: - attr = current.cache_attrs.get(key, None) - if attr is None or attr.order > current_order: + holder = current.cache_attrs.get(key, None) + if holder is None or holder.order > current_order: # we reached the top, so we didn't find it anywhere, # just add it to the top attribute if not isinstance(current, PlainAttribute): - return 0, self._get_new_attr(name, index) + return 0, self._get_new_attr(name, attrkind, unbox_type) else: - return number_to_readd, attr + return number_to_readd, holder # if not found try parent number_to_readd += 1 current_order = current.order current = current.back - @jit.look_inside_iff(lambda self, obj, name, index, w_value: + @jit.look_inside_iff(lambda self, obj, name, attrkind, w_value: jit.isconstant(self) and jit.isconstant(name) and - jit.isconstant(index)) - def _reorder_and_add(self, obj, name, index, w_value): + jit.isconstant(attrkind)) + def _reorder_and_add(self, obj, name, attrkind, w_value): # the idea is as follows: the subtrees of any map are ordered by # insertion. the invariant is that subtrees that are inserted later # must not contain the name of the attribute of any earlier inserted @@ -226,17 +202,23 @@ class AbstractAttribute(object): stack_index = 0 while True: current = self - number_to_readd, attr = self._find_branch_to_move_into(name, index) + unbox_type = None + if self.terminator.allow_unboxing: + if ALLOW_UNBOXING_INTS and type(w_value) is self.space.IntObjectCls: + unbox_type = self.space.IntObjectCls + elif type(w_value) is self.space.FloatObjectCls: + unbox_type = self.space.FloatObjectCls + number_to_readd, holder = self._find_branch_to_move_into(name, attrkind, unbox_type) + attr = holder.pick_attr(unbox_type) # we found the attributes further up, need to save the # previous values of the attributes we passed if number_to_readd: if stack is None: - stack = [erase_map(None)] * (self.length() * 2) + stack = [erase_map(None)] * (self.num_attributes() * 2) current = self for i in range(number_to_readd): assert isinstance(current, PlainAttribute) - w_self_value = obj._mapdict_read_storage( - current.storageindex) + w_self_value = current._prim_direct_read(obj) stack[stack_index] = erase_map(current) stack[stack_index + 1] = erase_item(w_self_value) stack_index += 2 @@ -251,7 +233,7 @@ class AbstractAttribute(object): next_map = unerase_map(stack[stack_index]) w_value = unerase_item(stack[stack_index + 1]) name = next_map.name - index = next_map.index + attrkind = next_map.attrkind self = obj._get_mapdict_map() def materialize_r_dict(self, space, obj, dict_w): @@ -263,23 +245,27 @@ class AbstractAttribute(object): def remove_dict_entries(self, obj): raise NotImplementedError("abstract base class") - def __repr__(self): + def repr(self): return "<%s>" % (self.__class__.__name__,) + def __repr__(self): + return self.repr() + class Terminator(AbstractAttribute): - _immutable_fields_ = ['w_cls'] + _immutable_fields_ = ['w_cls', 'allow_unboxing?'] def __init__(self, space, w_cls): AbstractAttribute.__init__(self, space, self) self.w_cls = w_cls + self.allow_unboxing = True - def _read_terminator(self, obj, name, index): + def _read_terminator(self, obj, name, attrkind): return None - def _write_terminator(self, obj, name, index, w_value): - obj._get_mapdict_map().add_attr(obj, name, index, w_value) - if index == DICT and obj._get_mapdict_map().length() >= LIMIT_MAP_ATTRIBUTES: + def _write_terminator(self, obj, name, attrkind, w_value): + obj._get_mapdict_map().add_attr(obj, name, attrkind, w_value) + if attrkind == DICT and obj._get_mapdict_map().num_attributes() >= LIMIT_MAP_ATTRIBUTES: space = self.space w_dict = obj.getdict(space) assert isinstance(w_dict, W_DictMultiObject) @@ -294,7 +280,10 @@ class Terminator(AbstractAttribute): result._mapdict_init_empty(self) return result - def length(self): + def storage_needed(self): + return 0 + + def num_attributes(self): return 0 def set_terminator(self, obj, terminator): @@ -306,7 +295,7 @@ class Terminator(AbstractAttribute): def remove_dict_entries(self, obj): return self.copy(obj) - def __repr__(self): + def repr(self): return "<%s w_cls=%s>" % (self.__class__.__name__, self.w_cls) class DictTerminator(Terminator): @@ -329,31 +318,31 @@ class DictTerminator(Terminator): class NoDictTerminator(Terminator): - def _write_terminator(self, obj, name, index, w_value): - if index == DICT: + def _write_terminator(self, obj, name, attrkind, w_value): + if attrkind == DICT: return False - return Terminator._write_terminator(self, obj, name, index, w_value) + return Terminator._write_terminator(self, obj, name, attrkind, w_value) class DevolvedDictTerminator(Terminator): - def _read_terminator(self, obj, name, index): - if index == DICT: + def _read_terminator(self, obj, name, attrkind): + if attrkind == DICT: space = self.space w_dict = obj.getdict(space) return space.finditem_str(w_dict, name) - return Terminator._read_terminator(self, obj, name, index) + return Terminator._read_terminator(self, obj, name, attrkind) - def _write_terminator(self, obj, name, index, w_value): - if index == DICT: + def _write_terminator(self, obj, name, attrkind, w_value): + if attrkind == DICT: space = self.space w_dict = obj.getdict(space) space.setitem_str(w_dict, name, w_value) return True - return Terminator._write_terminator(self, obj, name, index, w_value) + return Terminator._write_terminator(self, obj, name, attrkind, w_value) - def delete(self, obj, name, index): + def delete(self, obj, name, attrkind): from pypy.interpreter.error import OperationError - if index == DICT: + if attrkind == DICT: space = self.space w_dict = obj.getdict(space) try: @@ -362,7 +351,7 @@ class DevolvedDictTerminator(Terminator): if not ex.match(space, space.w_KeyError): raise return Terminator.copy(self, obj) - return Terminator.delete(self, obj, name, index) + return Terminator.delete(self, obj, name, attrkind) def remove_dict_entries(self, obj): assert 0, "should be unreachable" @@ -374,29 +363,51 @@ class DevolvedDictTerminator(Terminator): return Terminator.set_terminator(self, obj, terminator) class PlainAttribute(AbstractAttribute): - _immutable_fields_ = ['name', 'index', 'storageindex', 'back', 'ever_mutated?', 'order'] + _immutable_fields_ = ['name', 'attrkind', 'storageindex', '_num_attributes', 'back', 'ever_mutated?', 'order'] - def __init__(self, name, index, back): + def __init__(self, name, attrkind, back, order): AbstractAttribute.__init__(self, back.space, back.terminator) self.name = name - self.index = index - self.storageindex = back.length() + self.attrkind = attrkind + self.storageindex = back.storage_needed() + self._num_attributes = back.num_attributes() + 1 self.back = back - self._size_estimate = self.length() * NUM_DIGITS_POW2 self.ever_mutated = False - self.order = len(back.cache_attrs) if back.cache_attrs else 0 + self.order = order def _copy_attr(self, obj, new_obj): - w_value = self.read(obj, self.name, self.index) - new_obj._get_mapdict_map().add_attr(new_obj, self.name, self.index, w_value) + w_value = self._prim_direct_read(obj) + new_obj._get_mapdict_map().add_attr(new_obj, self.name, self.attrkind, w_value) + + def _direct_read(self, obj): + return unerase_item(obj._mapdict_read_storage(self.storageindex)) + + _prim_direct_read = _direct_read + + @jit.elidable + def _pure_direct_read(self, obj): + return unerase_item(obj._mapdict_read_storage(self.storageindex)) + + def _direct_write(self, obj, w_value): + obj._mapdict_write_storage(self.storageindex, erase_item(w_value)) + + def _switch_map_and_write_storage(self, obj, w_value): + if self.storage_needed() > obj._mapdict_storage_length(): + obj._set_mapdict_increase_storage(self, erase_item(w_value)) + return - def delete(self, obj, name, index): - if index == self.index and name == self.name: + # the order is important here: first change the map, then the storage, + # for the benefit of the special subclasses + obj._set_mapdict_map(self) + self._direct_write(obj, w_value) + + def delete(self, obj, name, attrkind): + if attrkind == self.attrkind and name == self.name: # ok, attribute is deleted if not self.ever_mutated: self.ever_mutated = True return self.back.copy(obj) - new_obj = self.back.delete(obj, name, index) + new_obj = self.back.delete(obj, name, attrkind) if new_obj is not None: self._copy_attr(obj, new_obj) return new_obj @@ -406,44 +417,203 @@ class PlainAttribute(AbstractAttribute): self._copy_attr(obj, new_obj) return new_obj - def length(self): + def storage_needed(self): return self.storageindex + 1 + def num_attributes(self): + return self._num_attributes + def set_terminator(self, obj, terminator): new_obj = self.back.set_terminator(obj, terminator) self._copy_attr(obj, new_obj) return new_obj def search(self, attrtype): - if self.index == attrtype: + if self.attrkind == attrtype: return self return self.back.search(attrtype) def materialize_r_dict(self, space, obj, dict_w): new_obj = self.back.materialize_r_dict(space, obj, dict_w) - if self.index == DICT: + if self.attrkind == DICT: w_attr = space.newtext(self.name) - dict_w[w_attr] = obj._mapdict_read_storage(self.storageindex) + dict_w[w_attr] = self._prim_direct_read(obj) else: self._copy_attr(obj, new_obj) return new_obj def materialize_str_dict(self, space, obj, str_dict): new_obj = self.back.materialize_str_dict(space, obj, str_dict) - if self.index == DICT: - str_dict[self.name] = obj._mapdict_read_storage(self.storageindex) + if self.attrkind == DICT: + str_dict[self.name] = self._prim_direct_read(obj) else: self._copy_attr(obj, new_obj) return new_obj def remove_dict_entries(self, obj): new_obj = self.back.remove_dict_entries(obj) - if self.index != DICT: + if self.attrkind != DICT: self._copy_attr(obj, new_obj) return new_obj - def __repr__(self): - return "<PlainAttribute %s %s %s %r>" % (self.name, self.index, self.storageindex, self.back) + def repr(self): + return "<PlainAttribute %s %s %s %s>" % ( + self.name, attrkind_name(self.attrkind), self.storageindex, + self.back.repr()) + + +class UnboxedPlainAttribute(PlainAttribute): + _immutable_fields_ = ["listindex", "firstunwrapped", "typ"] + def __init__(self, name, attrkind, back, order, typ): + AbstractAttribute.__init__(self, back.space, back.terminator) + # don't call PlainAttribute.__init__, that runs into weird problems + self.name = name + self.attrkind = attrkind + self.back = back + self.ever_mutated = False + self.order = order + # here, storageindex is where the list of floats is stored + # and listindex is where in the list the actual value goes + self.firstunwrapped = False + self._compute_storageindex_listindex() + self._num_attributes = back.num_attributes() + 1 + self.typ = typ + + def _compute_storageindex_listindex(self): + attr = self.back + storageindex = -1 + while isinstance(attr, PlainAttribute): + if isinstance(attr, UnboxedPlainAttribute): + storageindex = attr.storageindex + listindex = attr.listindex + 1 + break + attr = attr.back + else: + storageindex = self.back.storage_needed() + listindex = 0 + self.firstunwrapped = True + self.storageindex = storageindex + self.listindex = listindex + + def storage_needed(self): + if self.firstunwrapped: + return self.storageindex + 1 + return self.back.storage_needed() + + + def _unbox(self, w_value): + space = self.space + assert type(w_value) is self.typ + if type(w_value) is space.IntObjectCls: + return longlong2float(space.int_w(w_value)) + else: + return space.float_w(w_value) + + def _box(self, val): + space = self.space + if self.typ is space.IntObjectCls: + return space.newint(float2longlong(val)) + else: + return space.newfloat(val) + + def _convert_to_boxed(self, obj): + new_obj = obj._get_mapdict_map().copy(obj) + map = new_obj.map + obj._set_mapdict_storage_and_map(new_obj.storage, map) + return map + + def _direct_read(self, obj): + w_res = self._prim_direct_read(obj) + if self.terminator.allow_unboxing == False: + # oops, some other object using the same class isn't type stable! + # stop using unboxing altogether to not get too many variants of maps + self._convert_to_boxed(obj) + return w_res + + def _prim_direct_read(self, obj): + return self._box(unerase_unboxed(obj._mapdict_read_storage(self.storageindex))[self.listindex]) + + def _pure_direct_read(self, obj): + # somewhat tricky! note that _direct_read isn't really elidable (it has + # potential side effects, and the boxes aren't always the same) + # but _pure_unboxed_read is elidable, and we can let the jit see the + # boxing + return self._box(self._pure_unboxed_read(obj)) + + @jit.elidable + def _pure_unboxed_read(self, obj): + return unerase_unboxed(obj._mapdict_read_storage(self.storageindex))[self.listindex] + + def _direct_write(self, obj, w_value): + if type(w_value) is self.typ: + val = self._unbox(w_value) + unboxed = unerase_unboxed(obj._mapdict_read_storage(self.storageindex)) + unboxed[self.listindex] = val + return + # type change not supposed to happen. according to the principle + # of type freezing, we just give up, and will never unbox anything + # from that class again + self.terminator.allow_unboxing = False + map = self._convert_to_boxed(obj) + # now obj won't have any UnboxedPlainAttribute in its chain any + # more, because allow_unboxing is False + map.write(obj, self.name, self.attrkind, w_value) + + def _switch_map_and_write_storage(self, obj, w_value): + from rpython.rlib.debug import make_sure_not_resized + val = self._unbox(w_value) + if self.firstunwrapped: + unboxed = erase_unboxed(make_sure_not_resized([val])) + if self.storage_needed() > obj._mapdict_storage_length(): + obj._set_mapdict_increase_storage(self, unboxed) + return + + obj._set_mapdict_map(self) + obj._mapdict_write_storage(self.storageindex, unboxed) + else: + unboxed = unerase_unboxed(obj._mapdict_read_storage(self.storageindex)) + + obj._set_mapdict_map(self) + if len(unboxed) <= self.listindex: + # size can only increase by 1 + assert len(unboxed) == self.listindex + unboxed = unboxed + [val] + obj._mapdict_write_storage(self.storageindex, erase_unboxed(unboxed)) + else: + # the unboxed list is already large enough, due to reordering + unboxed[self.listindex] = val + + def repr(self): + return "<UnboxedPlainAttribute %s %s %s %s %s>" % ( + self.name, attrkind_name(self.attrkind), self.storageindex, + self.listindex, self.back.repr()) + + +class CachedAttributeHolder(object): + _immutable_fields_ = ['attr?', 'typ?'] + + def __init__(self, name, attrkind, back, unbox_type): + self.order = len(back.cache_attrs) if back.cache_attrs else 0 + if unbox_type is None: + attr = PlainAttribute(name, attrkind, back, self.order) + else: + attr = UnboxedPlainAttribute(name, attrkind, back, self.order, unbox_type) + self.attr = attr + self.typ = unbox_type + + def pick_attr(self, unbox_type): + if self.typ is None or self.typ is unbox_type: + return self.attr + self.typ = None + # this will never be traced, because the previous assignment + # invalidates a quasi-immutable field + self.attr.terminator.allow_unboxing = False + name = self.attr.name + attrkind = self.attr.attrkind + back = self.attr.back + attr = self.attr = PlainAttribute(name, attrkind, back, self.order) + return attr + class MapAttrCache(object): def __init__(self, space): @@ -476,6 +646,15 @@ SPECIAL = 1 INVALID = 2 SLOTS_STARTING_FROM = 3 +def attrkind_name(attrkind): + if attrkind == DICT: + return "DICT" + if attrkind == SPECIAL: + return "SPECIAL" + if attrkind == INVALID: + return "INVALID" + return str(attrkind) + # a little bit of a mess of mixin classes that implement various pieces of # objspace user object functionality in terms of mapdict @@ -511,16 +690,16 @@ class BaseUserClassMapdict: # methods needed for slots def getslotvalue(self, slotindex): - index = SLOTS_STARTING_FROM + slotindex - return self._get_mapdict_map().read(self, "slot", index) + attrkind = SLOTS_STARTING_FROM + slotindex + return self._get_mapdict_map().read(self, "slot", attrkind) def setslotvalue(self, slotindex, w_value): - index = SLOTS_STARTING_FROM + slotindex - self._get_mapdict_map().write(self, "slot", index, w_value) + attrkind = SLOTS_STARTING_FROM + slotindex + self._get_mapdict_map().write(self, "slot", attrkind, w_value) def delslotvalue(self, slotindex): - index = SLOTS_STARTING_FROM + slotindex - new_obj = self._get_mapdict_map().delete(self, "slot", index) + attrkind = SLOTS_STARTING_FROM + slotindex + new_obj = self._get_mapdict_map().delete(self, "slot", attrkind) if new_obj is None: return False self._set_mapdict_storage_and_map(new_obj.storage, new_obj.map) @@ -620,8 +799,8 @@ class MapdictStorageMixin(object): def _mapdict_init_empty(self, map): from rpython.rlib.debug import make_sure_not_resized - self.map = map - self.storage = make_sure_not_resized([None] * map.size_estimate()) + self._set_mapdict_map(map) + self.storage = make_sure_not_resized([]) def _mapdict_read_storage(self, storageindex): assert storageindex >= 0 @@ -631,11 +810,22 @@ class MapdictStorageMixin(object): self.storage[storageindex] = value def _mapdict_storage_length(self): + """ return the size of the storage (which should be longer or equal in + size to self.map.storage_needed() due to overallocation). """ return len(self.storage) + def _set_mapdict_increase_storage(self, map, value): + """ increase storage size, adding value """ + len_storage = len(self.storage) + new_storage = self.storage + [erase_item(None)] * (map.storage_needed() - len_storage) + new_storage[len_storage] = value + self._set_mapdict_map(map) + self.storage = new_storage + def _set_mapdict_storage_and_map(self, storage, map): + """ store a new complete storage list, and also a new map """ self.storage = storage - self.map = map + self._set_mapdict_map(map) class ObjectWithoutDict(W_Root): # mainly for tests @@ -666,15 +856,18 @@ def _make_storage_mixin_size_n(n=SUBCLASSES_NUM_FIELDS): def _get_mapdict_map(self): return jit.promote(self.map) def _set_mapdict_map(self, map): + if self._has_storage_list() and map.storage_needed() <= n: + # weird corner case interacting with unboxing, see test_unbox_reorder_bug3 + if map.storage_needed() == n: + setattr(self, valnmin1, self._mapdict_get_storage_list()[0]) self.map = map def _mapdict_init_empty(self, map): - for i in rangenmin1: - setattr(self, "_value%s" % i, None) - setattr(self, valnmin1, erase_item(None)) self.map = map + for i in rangen: + setattr(self, "_value%s" % i, erase_item(None)) def _has_storage_list(self): - return self.map.length() > n + return self.map.storage_needed() > n def _mapdict_get_storage_list(self): erased = getattr(self, valnmin1) @@ -688,8 +881,8 @@ def _make_storage_mixin_size_n(n=SUBCLASSES_NUM_FIELDS): return getattr(self, "_value%s" % i) if self._has_storage_list(): return self._mapdict_get_storage_list()[storageindex - nmin1] - erased = getattr(self, "_value%s" % nmin1) - return unerase_item(erased) + erased = getattr(self, valnmin1) + return erased def _mapdict_write_storage(self, storageindex, value): assert storageindex >= 0 @@ -701,7 +894,7 @@ def _make_storage_mixin_size_n(n=SUBCLASSES_NUM_FIELDS): if self._has_storage_list(): self._mapdict_get_storage_list()[storageindex - nmin1] = value return - setattr(self, "_value%s" % nmin1, erase_item(value)) + setattr(self, valnmin1, value) def _mapdict_storage_length(self): if self._has_storage_list(): @@ -709,13 +902,13 @@ def _make_storage_mixin_size_n(n=SUBCLASSES_NUM_FIELDS): return n def _set_mapdict_storage_and_map(self, storage, map): - self.map = map + self._set_mapdict_map(map) len_storage = len(storage) for i in rangenmin1: if i < len_storage: erased = storage[i] else: - erased = None + erased = erase_item(None) setattr(self, "_value%s" % i, erased) has_storage_list = self._has_storage_list() if len_storage < n: @@ -723,19 +916,32 @@ def _make_storage_mixin_size_n(n=SUBCLASSES_NUM_FIELDS): erased = erase_item(None) elif len_storage == n: assert not has_storage_list - erased = erase_item(storage[nmin1]) + erased = storage[nmin1] elif not has_storage_list: - # storage is longer than self.map.length() only due to + # storage is longer than self.map.storage_needed() only due to # overallocation - erased = erase_item(storage[nmin1]) + erased = storage[nmin1] # in theory, we should be ultra-paranoid and check all entries, # but checking just one should catch most problems anyway: - assert storage[n] is None + assert unerase_item(storage[n]) is None else: storage_list = storage[nmin1:] erased = erase_list(storage_list) setattr(self, "_value%s" % nmin1, erased) + def _set_mapdict_increase_storage(self, map, value): + storage_needed = map.storage_needed() + if self.map.storage_needed() == n: + erased = getattr(self, "_value%s" % nmin1) + new_storage = [erased, value] + else: + new_storage = [erase_item(None)] * (storage_needed - self._mapdict_storage_length()) + new_storage = self._mapdict_get_storage_list() + new_storage + new_storage[storage_needed - n] = value + self._set_mapdict_map(map) + erased = erase_list(new_storage) + setattr(self, "_value%s" % nmin1, erased) + subcls.__name__ = "Size%s" % n return subcls @@ -890,7 +1096,7 @@ class IteratorMixin(object): def _init(self, strategy, w_dict): w_obj = strategy.unerase(w_dict.dstorage) self.w_obj = w_obj - self.orig_map = curr_map = w_obj._get_mapdict_map() + curr_map = w_obj._get_mapdict_map() # We enumerate non-lazily the attributes, and store them in the # 'attrs' list. We then walk that list in opposite order. This # gives an ordering that is more natural (roughly corresponding @@ -917,8 +1123,6 @@ class MapDictIteratorKeys(BaseKeyIterator): def next_key_entry(self): assert isinstance(self.w_dict.get_strategy(), MapDictStrategy) - if self.orig_map is not self.w_obj._get_mapdict_map(): - return None attrs = self.attrs if len(attrs) > 0: attr = attrs.pop() @@ -936,8 +1140,6 @@ class MapDictIteratorValues(BaseValueIterator): def next_value_entry(self): assert isinstance(self.w_dict.get_strategy(), MapDictStrategy) - if self.orig_map is not self.w_obj._get_mapdict_map(): - return None attrs = self.attrs if len(attrs) > 0: attr = attrs.pop() @@ -954,8 +1156,6 @@ class MapDictIteratorItems(BaseItemIterator): def next_item_entry(self): assert isinstance(self.w_dict.get_strategy(), MapDictStrategy) - if self.orig_map is not self.w_obj._get_mapdict_map(): - return None, None attrs = self.attrs if len(attrs) > 0: attr = attrs.pop() @@ -969,7 +1169,6 @@ class MapDictIteratorItems(BaseItemIterator): class CacheEntry(object): version_tag = None - storageindex = 0 w_method = None # for callmethod success_counter = 0 failure_counter = 0 @@ -1002,7 +1201,7 @@ def init_mapdict_cache(pycode): pycode._mapdict_caches = [INVALID_CACHE_ENTRY] * num_entries @jit.dont_look_inside -def _fill_cache(pycode, nameindex, map, version_tag, storageindex, w_method=None): +def _fill_cache(pycode, nameindex, map, version_tag, attr, w_method=None): if not pycode.space._side_effects_ok(): return entry = pycode._mapdict_caches[nameindex] @@ -1010,8 +1209,11 @@ def _fill_cache(pycode, nameindex, map, version_tag, storageindex, w_method=None entry = CacheEntry() pycode._mapdict_caches[nameindex] = entry entry.map_wref = weakref.ref(map) + if attr: + entry.attr_wref = weakref.ref(attr) + else: + entry.attr_wref = None entry.version_tag = version_tag - entry.storageindex = storageindex entry.w_method = w_method if pycode.space.config.objspace.std.withmethodcachecounter: entry.failure_counter += 1 @@ -1023,7 +1225,9 @@ def LOAD_ATTR_caching(pycode, w_obj, nameindex): map = w_obj._get_mapdict_map() if entry.is_valid_for_map(map) and entry.w_method is None: # everything matches, it's incredibly fast - return w_obj._mapdict_read_storage(entry.storageindex) + attr = entry.attr_wref() + if attr is not None: + return attr._direct_read(w_obj) return LOAD_ATTR_slowpath(pycode, w_obj, nameindex, map) LOAD_ATTR_caching._always_inline_ = True @@ -1043,9 +1247,9 @@ def LOAD_ATTR_slowpath(pycode, w_obj, nameindex, map): _, w_descr = w_type._pure_lookup_where_with_method_cache( name, version_tag) # - attrname, index = ("", INVALID) + attrname, attrkind = ("", INVALID) if w_descr is None: - attrname, index = (name, DICT) # common case: no such attr in the class + attrname, attrkind = (name, DICT) # common case: no such attr in the class elif isinstance(w_descr, MutableCell): pass # we have a MutableCell in the class: give up elif space.is_data_descr(w_descr): @@ -1053,7 +1257,7 @@ def LOAD_ATTR_slowpath(pycode, w_obj, nameindex, map): # (if any) has no relevance. from pypy.interpreter.typedef import Member if isinstance(w_descr, Member): # it is a slot -- easy case - attrname, index = ("slot", SLOTS_STARTING_FROM + w_descr.index) + attrname, attrkind = ("slot", SLOTS_STARTING_FROM + w_descr.index) else: # There is a non-data descriptor in the class. If there is # also a dict attribute, use the latter, caching its storageindex. @@ -1061,16 +1265,16 @@ def LOAD_ATTR_slowpath(pycode, w_obj, nameindex, map): # but we don't care too much; the common case of a method # invocation is handled by LOOKUP_METHOD_xxx below. attrname = name - index = DICT + attrkind = DICT # - if index != INVALID: - attr = map.find_map_attr(attrname, index) + if attrkind != INVALID: + attr = map.find_map_attr(attrname, attrkind) if attr is not None: # Note that if map.terminator is a DevolvedDictTerminator # or the class provides its own dict, not using mapdict, then: - # map.find_map_attr will always return None if index==DICT. - _fill_cache(pycode, nameindex, map, version_tag, attr.storageindex) - return w_obj._mapdict_read_storage(attr.storageindex) + # map.find_map_attr will always return None if attrkind==DICT. + _fill_cache(pycode, nameindex, map, version_tag, attr) + return attr._direct_read(w_obj) if space.config.objspace.std.withmethodcachecounter: INVALID_CACHE_ENTRY.failure_counter += 1 return space.getattr(w_obj, w_name) @@ -1103,7 +1307,7 @@ def LOOKUP_METHOD_mapdict_fill_cache_method(space, pycode, name, nameindex, map = w_obj._get_mapdict_map() if map is None or isinstance(map.terminator, DevolvedDictTerminator): return - _fill_cache(pycode, nameindex, map, version_tag, -1, w_method) + _fill_cache(pycode, nameindex, map, version_tag, None, w_method) # XXX fix me: if a function contains a loop with both LOAD_ATTR and # XXX LOOKUP_METHOD on the same attribute name, it keeps trashing and diff --git a/pypy/objspace/std/objspace.py b/pypy/objspace/std/objspace.py index 3555194e23..5b40323c47 100644 --- a/pypy/objspace/std/objspace.py +++ b/pypy/objspace/std/objspace.py @@ -51,6 +51,8 @@ class StdObjSpace(ObjSpace): self.FrameClass = frame.build_frame(self) self.StringObjectCls = W_BytesObject self.UnicodeObjectCls = W_UnicodeObject + self.IntObjectCls = W_IntObject + self.FloatObjectCls = W_FloatObject # singletons self.w_None = W_NoneObject.w_None diff --git a/pypy/objspace/std/test/test_dictmultiobject.py b/pypy/objspace/std/test/test_dictmultiobject.py index 141eb4512a..12d8654354 100644 --- a/pypy/objspace/std/test/test_dictmultiobject.py +++ b/pypy/objspace/std/test/test_dictmultiobject.py @@ -1253,9 +1253,13 @@ class FakeSpace: assert isinstance(integer, int) return integer + def float_w(self, fl, allow_conversion=True): + assert isinstance(fl, float) + return fl + def wrap(self, obj): return obj - newtext = newbytes = wrap + newtext = newbytes = newint = newfloat = wrap def isinstance_w(self, obj, klass): return isinstance(obj, klass) @@ -1297,6 +1301,8 @@ class FakeSpace: w_float = float StringObjectCls = FakeString UnicodeObjectCls = FakeUnicode + IntObjectCls = int + FloatObjectCls = float w_dict = W_DictObject iter = iter fixedview = list diff --git a/pypy/objspace/std/test/test_mapdict.py b/pypy/objspace/std/test/test_mapdict.py index 6e631f7c2d..8c61b7ee3f 100644 --- a/pypy/objspace/std/test/test_mapdict.py +++ b/pypy/objspace/std/test/test_mapdict.py @@ -1,6 +1,10 @@ +import pytest from pypy.objspace.std.test.test_dictmultiobject import FakeSpace, W_DictObject from pypy.objspace.std.mapdict import * + +skip_if_no_int_unboxing = pytest.mark.skipif(not ALLOW_UNBOXING_INTS, reason="int unboxing disabled on 32bit") + class Config: class objspace: class std: @@ -11,12 +15,14 @@ space = FakeSpace() space.config = Config class Class(object): - def __init__(self, hasdict=True): + def __init__(self, hasdict=True, allow_unboxing=False): self.hasdict = hasdict if hasdict: self.terminator = DictTerminator(space, self) + self.terminator.devolved_dict_terminator.allow_unboxing = allow_unboxing else: self.terminator = NoDictTerminator(space, self) + self.terminator.allow_unboxing = allow_unboxing def instantiate(self, sp=None): if sp is None: @@ -32,51 +38,77 @@ class ObjectWithoutDict(ObjectWithoutDict): class typedef: hasdict = False + @property + def checkstorage(self): + return [unerase_item(x) for x in self.storage] + + @checkstorage.setter + def checkstorage(self, value): + self.storage = [erase_item(x) for x in value] + + class Object(Object): class typedef: hasdict = False + @property + def checkstorage(self): + return [unerase_item(x) for x in self.storage] + + @checkstorage.setter + def checkstorage(self, value): + self.storage = [erase_item(x) for x in value] + + def _check_unboxed_storage_consistency(self): + curr = self._get_mapdict_map() + while not isinstance(curr, UnboxedPlainAttribute): + if isinstance(curr, Terminator): + return + curr = curr.back + assert len(unerase_unboxed(self._mapdict_read_storage(curr.storageindex))) == curr.listindex + 1 + + def test_plain_attribute(): w_cls = "class" aa = PlainAttribute("b", DICT, PlainAttribute("a", DICT, - Terminator(space, w_cls))) + Terminator(space, w_cls), 0), 0) assert aa.space is space assert aa.terminator.w_cls is w_cls assert aa.get_terminator() is aa.terminator obj = Object() - obj.map, obj.storage = aa, [10, 20] + obj.map, obj.checkstorage = aa, [10, 20] assert obj.getdictvalue(space, "a") == 10 assert obj.getdictvalue(space, "b") == 20 assert obj.getdictvalue(space, "c") is None obj = Object() - obj.map, obj.storage = aa, [30, 40] + obj.map, obj.checkstorage = aa, [30, 40] obj.setdictvalue(space, "a", 50) - assert obj.storage == [50, 40] + assert obj.checkstorage == [50, 40] assert obj.getdictvalue(space, "a") == 50 obj.setdictvalue(space, "b", 60) - assert obj.storage == [50, 60] + assert obj.checkstorage == [50, 60] assert obj.getdictvalue(space, "b") == 60 - assert aa.length() == 2 + assert aa.storage_needed() == 2 assert aa.get_terminator() is aa.back.back def test_huge_chain(): current = Terminator(space, "cls") for i in range(20000): - current = PlainAttribute(str(i), DICT, current) + current = PlainAttribute(str(i), DICT, current, 0) assert current.find_map_attr("0", DICT).storageindex == 0 def test_search(): - aa = PlainAttribute("b", DICT, PlainAttribute("a", DICT, Terminator(None, None))) + aa = PlainAttribute("b", DICT, PlainAttribute("a", DICT, Terminator(None, None), 0), 0) assert aa.search(DICT) is aa assert aa.search(SLOTS_STARTING_FROM) is None assert aa.search(SPECIAL) is None - bb = PlainAttribute("C", SPECIAL, PlainAttribute("A", SLOTS_STARTING_FROM, aa)) + bb = PlainAttribute("C", SPECIAL, PlainAttribute("A", SLOTS_STARTING_FROM, aa, 0), 0) assert bb.search(DICT) is aa assert bb.search(SLOTS_STARTING_FROM) is bb.back assert bb.search(SPECIAL) is bb @@ -85,7 +117,7 @@ def test_add_attribute(): cls = Class() obj = cls.instantiate() obj.setdictvalue(space, "a", 10) - assert obj.storage == [10] + assert obj.checkstorage == [10] assert obj.getdictvalue(space, "a") == 10 assert obj.getdictvalue(space, "b") is None assert obj.getdictvalue(space, "c") is None @@ -95,7 +127,7 @@ def test_add_attribute(): assert obj.getdictvalue(space, "c") is None obj.setdictvalue(space, "b", 30) - assert obj.storage == [20, 30] + assert obj.checkstorage == [20, 30] assert obj.getdictvalue(space, "a") == 20 assert obj.getdictvalue(space, "b") == 30 assert obj.getdictvalue(space, "c") is None @@ -121,7 +153,7 @@ def test_add_attribute_limit(): for i in range(1000): obj.setdictvalue(space, str(i), i) # moved to dict (which is the remaining non-slot item) - assert len(obj.storage) == 1 + numslots + assert len(obj.checkstorage) == 1 + numslots for i in range(1000): assert obj.getdictvalue(space, str(i)) == i @@ -133,7 +165,7 @@ def test_add_attribute_limit(): obj = cls.instantiate() for i in range(1000): obj.setslotvalue(i, i) - assert len(obj.storage) == 1000 + assert len(obj.checkstorage) == 1000 for i in range(1000): assert obj.getslotvalue(i) == i @@ -291,18 +323,17 @@ def test_attr_immutability(monkeypatch): obj.setdictvalue(space, "a", 10) obj.setdictvalue(space, "b", 20) obj.setdictvalue(space, "b", 30) - assert obj.storage == [10, 30] + assert obj.checkstorage == [10, 30] assert obj.map.ever_mutated == True assert obj.map.back.ever_mutated == False indices = [] - def _pure_mapdict_read_storage(obj, storageindex): - assert storageindex == 0 - indices.append(storageindex) - return obj._mapdict_read_storage(storageindex) + def _pure_direct_read(obj): + indices.append(0) + return unerase_item(obj._mapdict_read_storage(0)) - obj.map._pure_mapdict_read_storage = _pure_mapdict_read_storage + obj.map.back._pure_direct_read = _pure_direct_read monkeypatch.setattr(jit, "isconstant", lambda c: True) assert obj.getdictvalue(space, "a") == 10 @@ -339,12 +370,12 @@ def test_delete(): obj.setdictvalue(space, "a", 50) obj.setdictvalue(space, "b", 60) obj.setdictvalue(space, "c", 70) - assert obj.storage == [50, 60, 70] + assert obj.checkstorage == [50, 60, 70] res = obj.deldictvalue(space, dattr) assert res s = [50, 60, 70] del s[i] - assert obj.storage == s + assert obj.checkstorage == s obj = c.instantiate() obj.setdictvalue(space, "a", 50) @@ -367,7 +398,7 @@ def test_class(): c2 = Class() obj.setclass(space, c2) assert obj.getclass(space) is c2 - assert obj.storage == [50, 60, 70] + assert obj.checkstorage == [50, 60, 70] def test_special(): from pypy.module._weakref.interp__weakref import WeakrefLifeline @@ -383,7 +414,7 @@ def test_special(): assert obj.getdictvalue(space, "a") == 50 assert obj.getdictvalue(space, "b") == 60 assert obj.getdictvalue(space, "c") == 70 - assert obj.storage == [50, 60, 70, lifeline1] + assert obj.checkstorage == [50, 60, 70, lifeline1] assert obj.getweakref() is lifeline1 obj2 = c.instantiate() @@ -391,7 +422,7 @@ def test_special(): obj2.setdictvalue(space, "b", 160) obj2.setdictvalue(space, "c", 170) obj2.setweakref(space, lifeline2) - assert obj2.storage == [150, 160, 170, lifeline2] + assert obj2.checkstorage == [150, 160, 170, lifeline2] assert obj2.getweakref() is lifeline2 assert obj2.map is obj.map @@ -421,7 +452,7 @@ def test_slots(): assert obj.getslotvalue(a) == 50 assert obj.getslotvalue(b) == 60 assert obj.getslotvalue(c) == 70 - assert obj.storage == [50, 60, 70] + assert obj.checkstorage == [50, 60, 70] obj.setdictvalue(space, "a", 5) obj.setdictvalue(space, "b", 6) @@ -432,7 +463,7 @@ def test_slots(): assert obj.getslotvalue(a) == 50 assert obj.getslotvalue(b) == 60 assert obj.getslotvalue(c) == 70 - assert obj.storage == [50, 60, 70, 5, 6, 7] + assert obj.checkstorage == [50, 60, 70, 5, 6, 7] obj2 = cls.instantiate() obj2.setslotvalue(a, 501) @@ -441,13 +472,13 @@ def test_slots(): obj2.setdictvalue(space, "a", 51) obj2.setdictvalue(space, "b", 61) obj2.setdictvalue(space, "c", 71) - assert obj2.storage == [501, 601, 701, 51, 61, 71] + assert obj2.checkstorage == [501, 601, 701, 51, 61, 71] assert obj.map is obj2.map assert obj2.getslotvalue(b) == 601 assert obj2.delslotvalue(b) assert obj2.getslotvalue(b) is None - assert obj2.storage == [501, 701, 51, 61, 71] + assert obj2.checkstorage == [501, 701, 51, 61, 71] assert not obj2.delslotvalue(b) @@ -460,7 +491,7 @@ def test_slots_no_dict(): obj.setslotvalue(b, 60) assert obj.getslotvalue(a) == 50 assert obj.getslotvalue(b) == 60 - assert obj.storage == [50, 60] + assert obj.checkstorage == [50, 60] assert not obj.setdictvalue(space, "a", 70) assert obj.getdict(space) is None assert obj.getdictvalue(space, "a") is None @@ -488,7 +519,7 @@ def test_materialize_r_dict(): obj.setdictvalue(space, "a", 5) obj.setdictvalue(space, "b", 6) obj.setdictvalue(space, "c", 7) - assert obj.storage == [50, 60, 70, 5, 6, 7] + assert obj.checkstorage == [50, 60, 70, 5, 6, 7] class FakeDict(W_DictObject): def __init__(self, d): @@ -505,29 +536,271 @@ def test_materialize_r_dict(): assert flag materialize_r_dict(space, obj, d) assert d == {"a": 5, "b": 6, "c": 7} - assert obj.storage == [50, 60, 70, w_d] + assert obj.checkstorage == [50, 60, 70, w_d] -def test_size_prediction(): - for i in range(10): - c = Class() - assert c.terminator.size_estimate() == 0 - for j in range(1000): - obj = c.instantiate() - for a in "abcdefghij"[:i]: - obj.setdictvalue(space, a, 50) - assert c.terminator.size_estimate() == i - for i in range(1, 10): - c = Class() - assert c.terminator.size_estimate() == 0 - for j in range(1000): - obj = c.instantiate() - for a in "abcdefghij"[:i]: - obj.setdictvalue(space, a, 50) - obj = c.instantiate() - for a in "klmnopqars": - obj.setdictvalue(space, a, 50) - assert c.terminator.size_estimate() in [(i + 10) // 2, (i + 11) // 2] +# ___________________________________________________________ +# unboxed tests + +def test_unboxed_compute_indices(): + w_cls = "class" + aa = UnboxedPlainAttribute("b", DICT, + PlainAttribute("a", DICT, + Terminator(space, w_cls), 0), 0, + int) + assert aa.storageindex == 1 + assert aa.firstunwrapped + assert aa.listindex == 0 + + c = UnboxedPlainAttribute("c", DICT, aa, 0, int) + assert c.storageindex == 1 + assert c.listindex == 1 + assert not c.firstunwrapped + +def test_unboxed_storage_needed(): + w_cls = "class" + bb = UnboxedPlainAttribute("c", DICT, + Terminator(space, w_cls), 0, + int) + assert bb.storage_needed() == 1 + aa = UnboxedPlainAttribute("b", DICT, + PlainAttribute("a", DICT, + UnboxedPlainAttribute("c", DICT, + Terminator(space, w_cls), 0, + int), 0), 0, + int) + assert aa.storage_needed() == 2 + +@skip_if_no_int_unboxing +def test_unboxed_write_int(): + cls = Class(allow_unboxing=True) + w_obj = cls.instantiate(space) + w_obj.setdictvalue(space, "a", 15) + w_obj.getdictvalue(space, "a") == 15 + assert isinstance(w_obj.map, UnboxedPlainAttribute) + + w_obj.setdictvalue(space, "b", 20) + w_obj.getdictvalue(space, "b") == 20 + w_obj.getdictvalue(space, "a") == 15 + assert isinstance(w_obj.map, UnboxedPlainAttribute) + assert isinstance(w_obj.map.back, UnboxedPlainAttribute) + assert unerase_unboxed(w_obj.storage[0]) == [longlong2float(15), longlong2float(20)] + +def test_unboxed_write_float(): + cls = Class(allow_unboxing=True) + w_obj = cls.instantiate(space) + w_obj.setdictvalue(space, "a", 15.0) + w_obj.getdictvalue(space, "a") == 15.0 + assert isinstance(w_obj.map, UnboxedPlainAttribute) + + w_obj.setdictvalue(space, "b", 20.0) + w_obj.getdictvalue(space, "b") == 20.0 + w_obj.getdictvalue(space, "a") == 15.0 + assert isinstance(w_obj.map, UnboxedPlainAttribute) + assert isinstance(w_obj.map.back, UnboxedPlainAttribute) + assert unerase_unboxed(w_obj.storage[0]) == [15.0, 20.0] + +@skip_if_no_int_unboxing +def test_unboxed_write_mixed(): + cls = Class(allow_unboxing=True) + w_obj = cls.instantiate(space) + w_obj.setdictvalue(space, "a", None) + w_obj.setdictvalue(space, "b", 15) + w_obj.setdictvalue(space, "c", 20.1) + w_obj.setdictvalue(space, "d", None) + w_obj.getdictvalue(space, "a") is None + w_obj.getdictvalue(space, "b") == 15 + w_obj.getdictvalue(space, "c") == 20.1 + w_obj.setdictvalue(space, "d", None) + +@skip_if_no_int_unboxing +def test_no_int_unboxing(monkeypatch): + from pypy.objspace.std import mapdict + monkeypatch.setattr(mapdict, "ALLOW_UNBOXING_INTS", False) + cls = Class(allow_unboxing=True) + w_obj = cls.instantiate(space) + w_obj.setdictvalue(space, "a", 15) + assert type(w_obj.map) is PlainAttribute + w_obj.setdictvalue(space, "b", 15.0) + assert type(w_obj.map) is UnboxedPlainAttribute + +def test_unboxed_type_change(): + cls = Class(allow_unboxing=True) + w_obj = cls.instantiate(space) + w_obj.setdictvalue(space, "b", 15.12) + w_obj.setdictvalue(space, "b", "woopsie") + assert w_obj.getdictvalue(space, "b") == "woopsie" + assert type(w_obj.map) is PlainAttribute + assert w_obj.map.terminator.allow_unboxing == False + + w_obj = cls.instantiate(space) + w_obj.setdictvalue(space, "b", 15.12) + # next time we won't unbox + assert type(w_obj.map) is PlainAttribute + +def test_unboxed_type_change_other_object(): + cls = Class(allow_unboxing=True) + w_obj1 = cls.instantiate(space) + w_obj1.setdictvalue(space, "b", 15.12) + w_obj2 = cls.instantiate(space) + w_obj2.setdictvalue(space, "b", 16.12) + assert w_obj1.map is w_obj2.map + assert type(w_obj1.map) is UnboxedPlainAttribute + + # type change + w_obj1.setdictvalue(space, "b", "woopsie") + assert w_obj1.getdictvalue(space, "b") == "woopsie" + assert type(w_obj1.map) is PlainAttribute + assert w_obj1.map.terminator.allow_unboxing == False + + # w_obj2 is unaffected so far + assert type(w_obj2.map) is UnboxedPlainAttribute + assert w_obj2.getdictvalue(space, "b") == 16.12 + # now it's switched + assert type(w_obj2.map) is PlainAttribute + # but the value stays of course + assert w_obj2.getdictvalue(space, "b") == 16.12 + +def test_unboxed_mixed_two_different_instances(): + cls = Class(allow_unboxing=True) + w_obj1 = cls.instantiate(space) + w_obj1.setdictvalue(space, "b", 15.12) + + w_obj2 = cls.instantiate(space) + w_obj2.setdictvalue(space, "b", "abc") + + assert w_obj2.map.terminator.allow_unboxing == False + +def test_unboxed_attr_immutability(monkeypatch): + cls = Class(allow_unboxing=True) + obj = cls.instantiate() + obj.setdictvalue(space, "a", 10.12) + obj.setdictvalue(space, "b", 20.12) + obj.setdictvalue(space, "b", 30.12) + assert obj.map.ever_mutated == True + assert obj.map.back.ever_mutated == False + + indices = [] + + def _pure_unboxed_read(obj): + indices.append(0) + return 10.12 + + obj.map.back._pure_unboxed_read = _pure_unboxed_read + monkeypatch.setattr(jit, "isconstant", lambda c: True) + + assert obj.getdictvalue(space, "a") == 10.12 + assert obj.getdictvalue(space, "b") == 30.12 + assert obj.getdictvalue(space, "a") == 10.12 + assert indices == [0, 0] + + obj2 = cls.instantiate() + obj2.setdictvalue(space, "a", 15.12) + obj2.setdictvalue(space, "b", 25.12) + assert obj2.map is obj.map + assert obj2.map.ever_mutated == True + assert obj2.map.back.ever_mutated == False + + # mutating obj2 changes the map + obj2.setdictvalue(space, "a", 50.12) + assert obj2.map.back.ever_mutated == True + assert obj2.map is obj.map + + +def test_unboxed_bug(): + cls = Class(allow_unboxing=True) + w_obj = cls.instantiate(space) + w_obj.setdictvalue(space, "flags", 0.0) + w_obj.setdictvalue(space, "open", []) + w_obj.setdictvalue(space, "groups", 1.0) + w_obj.setdictvalue(space, "groupdict", {}) + w_obj.setdictvalue(space, "lookbehind", 0.0) + + assert w_obj.getdictvalue(space, "flags") == 0.0 + assert w_obj.getdictvalue(space, "open") == [] + assert w_obj.getdictvalue(space, "groups") == 1.0 + assert w_obj.getdictvalue(space, "groupdict") == {} + assert w_obj.getdictvalue(space, "lookbehind") == 0.0 + + +def test_unboxed_reorder_add_bug(): + cls = Class(allow_unboxing=True) + obj = cls.instantiate() + obj.setdictvalue(space, "a", 10.0) + obj.setdictvalue(space, "b", 20.0) + obj.setdictvalue(space, "c", 20.0) + + obj2 = cls.instantiate() + obj2.setdictvalue(space, "b", 30.0) + obj2.setdictvalue(space, "c", 40.0) + obj2.setdictvalue(space, "a", 23.0) + + assert obj.map is obj2.map + +def test_unboxed_reorder_add_bug2(): + cls = Class(allow_unboxing=True) + obj = cls.instantiate() + obj.setdictvalue(space, "a", 10.0) + obj.setdictvalue(space, "b", "20") + obj.setdictvalue(space, "c", "20") + + obj2 = cls.instantiate() + obj2.setdictvalue(space, "b", "30") + obj2.setdictvalue(space, "c", "40") + obj2.setdictvalue(space, "a", 23.0) + + assert obj.map is obj2.map + +def test_unbox_reorder_bug3(): + from pypy.objspace.std.mapdict import _make_storage_mixin_size_n + from pypy.objspace.std.objectobject import W_ObjectObject + class objectcls(W_ObjectObject): + objectmodel.import_from_mixin(BaseUserClassMapdict) + objectmodel.import_from_mixin(MapdictDictSupport) + objectmodel.import_from_mixin(_make_storage_mixin_size_n(5)) + cls = Class(allow_unboxing=True) + obj = objectcls() + obj.user_setup(space, cls) + obj.setdictvalue(space, "_frame", "frame") # plain 0 + obj.setdictvalue(space, "_is_started", 0.0) # unboxed 1 0 + obj.setdictvalue(space, "func", "func") # plain 2 + obj.setdictvalue(space, "alive", "alive") # plain 3 + obj.setdictvalue(space, "blocked", "blocked") # plain 4 + obj.setdictvalue(space, "_task_id", 1.0) # unboxed 1 1 + obj.setdictvalue(space, "label", "label") # plain 5 + + obj2 = objectcls() + obj2.user_setup(space, cls) + obj2.setdictvalue(space, "_frame", "frame2") # plain 0 + obj2.setdictvalue(space, "_is_started", 5.0) # unboxed 1 0 + obj2.setdictvalue(space, "func", "func2") # plain 2 + obj2.setdictvalue(space, "alive", "alive2") # plain 3 + obj2.setdictvalue(space, "blocked", "blocked2") # plain 4 + obj2.setdictvalue(space, "label", "label2") # plain 5 + obj2.setdictvalue(space, "_task_id", 6.0) # reorder + assert obj2.getdictvalue(space, "blocked") == "blocked2" + + +def test_unboxed_insert_different_orders_perm(): + from itertools import permutations + cls = Class(allow_unboxing=True) + seen_maps = {} + for preexisting in ['', 'x', 'xy']: + for i, attributes in enumerate(permutations("abcdef")): + obj = cls.instantiate() + for i, attr in enumerate(preexisting): + obj.setdictvalue(space, attr, str(i*1000)) + key = preexisting + for j, attr in enumerate(attributes): + obj.setdictvalue(space, attr, i*10.0+j) + obj._check_unboxed_storage_consistency() + key = "".join(sorted(key+attr)) + if key in seen_maps: + assert obj.map is seen_maps[key] + else: + seen_maps[key] = obj.map + + print len(seen_maps) # ___________________________________________________________ # dict tests @@ -643,12 +916,12 @@ def test_specialized_class(): obj = objectcls() obj.user_setup(space, cls) obj.setdictvalue(space, "a", w1) - assert obj._value0 is w1 + assert unerase_item(obj._value0) is w1 assert obj.getdictvalue(space, "a") is w1 assert obj.getdictvalue(space, "b") is None assert obj.getdictvalue(space, "c") is None obj.setdictvalue(space, "a", w2) - assert obj._value0 is w2 + assert unerase_item(obj._value0) is w2 assert obj.getdictvalue(space, "a") == w2 assert obj.getdictvalue(space, "b") is None assert obj.getdictvalue(space, "c") is None @@ -666,7 +939,7 @@ def test_specialized_class(): res = obj.deldictvalue(space, "a") assert res - assert obj._value0 is w4 + assert unerase_item(obj._value0) is w4 assert obj.getdictvalue(space, "a") is None assert obj.getdictvalue(space, "b") is w4 assert obj.getdictvalue(space, "c") is None @@ -679,10 +952,45 @@ def test_specialized_class(): assert obj2.getdictvalue(space, "b") is w6 assert obj2.map is abmap + +def test_specialized_class_overflow(): + from pypy.objspace.std.mapdict import _make_storage_mixin_size_n + from pypy.objspace.std.objectobject import W_ObjectObject + classes = [_make_storage_mixin_size_n(i) for i in range(2, 10)] + w1 = W_Root() + w2 = W_Root() + w3 = W_Root() + w4 = W_Root() + w5 = W_Root() + w6 = W_Root() + objs = [w1, w2, 4, w3, w4, w5, w6, 6, 12.6] + class objectcls(W_ObjectObject): + objectmodel.import_from_mixin(BaseUserClassMapdict) + objectmodel.import_from_mixin(MapdictDictSupport) + objectmodel.import_from_mixin(_make_storage_mixin_size_n(5)) + cls = Class() + obj = objectcls() + obj.user_setup(space, cls) + for i in range(20): + obj.setdictvalue(space, str(i), objs[i % len(objs)]) + for i in range(20): + assert obj.getdictvalue(space, str(i)) is objs[i % len(objs)] + for i in range(20): + obj.setdictvalue(space, str(i), objs[(i + 1) % len(objs)]) + for i in range(20): + assert obj.getdictvalue(space, str(i)) is objs[(i + 1) % len(objs)] + assert obj._has_storage_list() + for i in range(20): + assert obj.deldictvalue(space, str(i)) + for j in range(i + 1): + assert obj.getdictvalue(space, str(j)) is None + for j in range(i + 1, 20): + assert obj.getdictvalue(space, str(j)) is objs[(j + 1) % len(objs)] + + # ___________________________________________________________ # integration tests -# XXX write more class AppTestWithMapDict(object): @@ -907,6 +1215,29 @@ class AppTestWithMapDict(object): for key in d: assert d[key] == int(key) + def test_bug_iter_checks_map_is_wrong(self): + # obvious in hindsight, but this test shows that checking that the map + # stays the same during a.__dict__ iterations is too strict now + class A(object): + pass + + # an instance with unboxed storage + a = A() + a.x = "a" + a.y = 1 + a.z = "b" + + a1 = A() + a1.x = "a" + a1.y = 1 + a1.z = "b" + a1.y = None # mark the terminator as allow_unboxing = False + + d = a.__dict__ + # reading a.y during iteration changes the map! now that the iterators + # store all the attrs anyway, just remove the check + res = list(d.iteritems()) + assert res == [('x', 'a'), ('y', 1), ('z', 'b')] class AppTestWithMapDictAndCounters(object): diff --git a/pypy/tool/release/package.py b/pypy/tool/release/package.py index cb732e5e99..458adf645e 100755 --- a/pypy/tool/release/package.py +++ b/pypy/tool/release/package.py @@ -154,7 +154,7 @@ def create_package(basedir, options, _fake=False): ('sqlite3.dll', lib_pypy)] if not options.no__tkinter: tkinter_dir = lib_pypy.join('_tkinter') - win_extras += [('tcl85.dll', tkinter_dir), ('tk85.dll', tkinter_dir)] + win_extras += [('tcl86t.dll', tkinter_dir), ('tk86t.dll', tkinter_dir)] for extra,target_dir in win_extras: p = pypy_c.dirpath().join(extra) @@ -181,15 +181,15 @@ def create_package(basedir, options, _fake=False): # library was created? if not options.no__tkinter: try: - p = pypy_c.dirpath().join('tcl85.dll') + p = pypy_c.dirpath().join('tcl86t.dll') if not p.check(): - p = py.path.local.sysfind('tcl85.dll') + p = py.path.local.sysfind('tcl86t.dll') if p is None: - raise WindowsError("tcl85.dll not found") + raise WindowsError("tcl86t.dll not found") tktcldir = p.dirpath().join('..').join('lib') shutil.copytree(str(tktcldir), str(pypydir.join('tcl'))) except WindowsError: - print("Packaging Tk runtime failed. tk85.dll and tcl85.dll " + print("Packaging Tk runtime failed. tk86t.dll and tcl86t.dll " "found in %s, expecting to find runtime in %s directory " "next to the dlls, as per build " "instructions." %(p, tktcldir), file=sys.stderr) diff --git a/requirements.txt b/requirements.txt index 652db52373..a1282d0eb2 100644 --- a/requirements.txt +++ b/requirements.txt @@ -4,7 +4,7 @@ cffi>=1.4.0 # parse log files in rvmprof tests -# vmprof>=0.4.10; 'x86' in platform.machine #skip arm, s390x +vmprof>=0.4.10; 'x86' in platform.machine #skip arm, s390x # hypothesis is used for test generation on untranslated tests hypothesis<4.40 diff --git a/rpython/doc/conf.py b/rpython/doc/conf.py index c8c845e0cb..9216d2a41e 100644 --- a/rpython/doc/conf.py +++ b/rpython/doc/conf.py @@ -73,16 +73,16 @@ master_doc = 'index' # General information about the project. project = u'RPython' -copyright = u'2016, The PyPy Project' +copyright = u'2021, The PyPy Project' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. -version = '4.0' +version = '7.3' # The full version, including alpha/beta/rc tags. -release = '4.0.0' +release = '7.3.3' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/rpython/jit/metainterp/heapcache.py b/rpython/jit/metainterp/heapcache.py index e63b268beb..80a6be7c50 100644 --- a/rpython/jit/metainterp/heapcache.py +++ b/rpython/jit/metainterp/heapcache.py @@ -437,10 +437,12 @@ class HeapCache(object): return self._set_flag(box, HF_KNOWN_NULLITY) - def is_nonstandard_virtualizable(self, box): + def is_known_nonstandard_virtualizable(self, box): return self._check_flag(box, HF_NONSTD_VABLE) or self._check_flag(box, HF_SEEN_ALLOCATION) def nonstandard_virtualizables_now_known(self, box): + if isinstance(box, Const): + return self._set_flag(box, HF_NONSTD_VABLE) def is_unescaped(self, box): diff --git a/rpython/jit/metainterp/optimizeopt/intbounds.py b/rpython/jit/metainterp/optimizeopt/intbounds.py index c87d545f3d..a9445a5f8e 100644 --- a/rpython/jit/metainterp/optimizeopt/intbounds.py +++ b/rpython/jit/metainterp/optimizeopt/intbounds.py @@ -155,6 +155,12 @@ class OptIntBounds(Optimization): b2 = self.getintbound(op.getarg(1)) r = self.getintbound(op) b = b1.add_bound(b2) + # NB: the result only gets its bound updated if b has an upper and a + # lower bound. This is important, to do the right thing in the presence + # of overflow. Example: + # y = x + 1 where x >= 0 + # here it's tempting to give a bound of y >= 1, but that would be + # wrong, due to wraparound if b.bounded(): r.intersect(b) diff --git a/rpython/jit/metainterp/optimizeopt/intutils.py b/rpython/jit/metainterp/optimizeopt/intutils.py index a63af3df3c..2590aad376 100644 --- a/rpython/jit/metainterp/optimizeopt/intutils.py +++ b/rpython/jit/metainterp/optimizeopt/intutils.py @@ -42,28 +42,46 @@ class IntBound(AbstractInfo): # Returns True if the bound was updated def make_le(self, other): if other.has_upper: - if not self.has_upper or other.upper < self.upper: - self.has_upper = True - self.upper = other.upper - return True + return self.make_le_const(other.upper) + return False + + def make_le_const(self, other): + if not self.has_upper or other < self.upper: + self.has_upper = True + self.upper = other + return True return False def make_lt(self, other): - return self.make_le(other.add(-1)) + if other.has_upper: + return self.make_lt_const(other.upper) + return False + + def make_lt_const(self, other): + try: + other = ovfcheck(other - 1) + except OverflowError: + return False + return self.make_le_const(other) def make_ge(self, other): if other.has_lower: - if not self.has_lower or other.lower > self.lower: - self.has_lower = True - self.lower = other.lower - return True + return self.make_ge_const(other.lower) return False def make_ge_const(self, other): - return self.make_ge(ConstIntBound(other)) + if not self.has_lower or other > self.lower: + self.has_lower = True + self.lower = other + return True + return False def make_gt_const(self, other): - return self.make_gt(ConstIntBound(other)) + try: + other = ovfcheck(other + 1) + except OverflowError: + return False + return self.make_ge_const(other) def make_eq_const(self, intval): self.has_upper = True @@ -72,7 +90,9 @@ class IntBound(AbstractInfo): self.lower = intval def make_gt(self, other): - return self.make_ge(other.add(1)) + if other.has_lower: + return self.make_gt_const(other.lower) + return False def is_constant(self): return self.has_upper and self.has_lower and self.lower == self.upper @@ -213,11 +233,11 @@ class IntBound(AbstractInfo): if other.is_constant(): val = other.getint() if val >= 0: # with Python's modulo: 0 <= (x % pos) < pos - r.make_ge(IntBound(0, 0)) - r.make_lt(IntBound(val, val)) + r.make_ge_const(0) + r.make_lt_const(val) else: # with Python's modulo: neg < (x % neg) <= 0 - r.make_gt(IntBound(val, val)) - r.make_le(IntBound(0, 0)) + r.make_gt_const(val) + r.make_le_const(0) return r def lshift_bound(self, other): @@ -254,7 +274,7 @@ class IntBound(AbstractInfo): pos2 = other.known_nonnegative() r = IntUnbounded() if pos1 or pos2: - r.make_ge(IntBound(0, 0)) + r.make_ge_const(0) if pos1: r.make_le(self) if pos2: @@ -269,7 +289,7 @@ class IntBound(AbstractInfo): mostsignificant = self.upper | other.upper r.intersect(IntBound(0, next_pow2_m1(mostsignificant))) else: - r.make_ge(IntBound(0, 0)) + r.make_ge_const(0) return r def contains(self, val): diff --git a/rpython/jit/metainterp/optimizeopt/rewrite.py b/rpython/jit/metainterp/optimizeopt/rewrite.py index fc4cb8fa37..47b88dbfdf 100644 --- a/rpython/jit/metainterp/optimizeopt/rewrite.py +++ b/rpython/jit/metainterp/optimizeopt/rewrite.py @@ -841,8 +841,6 @@ class OptRewrite(Optimization): self.make_constant_int(op, 0) self.last_emitted_operation = REMOVED return True - # This is Python's integer division: 'x // (2**shift)' can always - # be replaced with 'x >> shift', even for negative values of x if not b2.is_constant(): return False val = b2.getint() @@ -917,6 +915,14 @@ class OptRewrite(Optimization): self.optimizer.pure_from_args(rop.CAST_PTR_TO_INT, [op], op.getarg(0)) return self.emit(op) + def optimize_CONVERT_FLOAT_BYTES_TO_LONGLONG(self, op): + self.optimizer.pure_from_args(rop.CONVERT_LONGLONG_BYTES_TO_FLOAT, [op], op.getarg(0)) + return self.emit(op) + + def optimize_CONVERT_LONGLONG_BYTES_TO_FLOAT(self, op): + self.optimizer.pure_from_args(rop.CONVERT_FLOAT_BYTES_TO_LONGLONG, [op], op.getarg(0)) + return self.emit(op) + def optimize_SAME_AS_I(self, op): self.make_equal_to(op, op.getarg(0)) optimize_SAME_AS_R = optimize_SAME_AS_I diff --git a/rpython/jit/metainterp/optimizeopt/test/test_intbound.py b/rpython/jit/metainterp/optimizeopt/test/test_intbound.py index ea243168e4..45b1635136 100644 --- a/rpython/jit/metainterp/optimizeopt/test/test_intbound.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_intbound.py @@ -48,13 +48,6 @@ def build_bound_with_contained_number(a, b, c): assert r.contains(b) return r, b -bound_with_contained_number = strategies.builds( - build_bound_with_contained_number, - ints_or_none, - ints_or_none, - ints -) - unbounded = strategies.builds( lambda x: (bound(None, None), int(x)), ints @@ -380,6 +373,21 @@ def test_next_pow2_m1(): @given(bound_with_contained_number, bound_with_contained_number) +def test_make_random(t1, t2): + def d(b): + return b.has_lower, b.lower, b.has_upper, b.upper + b1, n1 = t1 + b2, n2 = t2 + + for meth in [IntBound.make_le, IntBound.make_lt, IntBound.make_ge, IntBound.make_gt]: + b = b1.clone() + meth(b, b2) + data = d(b) + assert not meth(b, b2) + assert data == d(b) # idempotent + + +@given(bound_with_contained_number, bound_with_contained_number) def test_add_bound_random(t1, t2): b1, n1 = t1 b2, n2 = t2 diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py index 8f1ce2bfb0..4daf55065d 100644 --- a/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py @@ -6303,3 +6303,25 @@ class TestOptimizeBasic(BaseTestBasic): i57 = int_or(i51, i52) """ self.optimize_loop(ops, expected) + + def test_convert_float_bytes_to_longlong(self): + ops = """ + [f0, i0] + i1 = convert_float_bytes_to_longlong(f0) + f1 = convert_longlong_bytes_to_float(i1) + escape_f(f1) + + f2 = convert_longlong_bytes_to_float(i0) + i2 = convert_float_bytes_to_longlong(f2) + escape_i(i2) + """ + + expected = """ + [f0, i0] + i1 = convert_float_bytes_to_longlong(f0) + escape_f(f0) + + f2 = convert_longlong_bytes_to_float(i0) + escape_i(i0) + """ + self.optimize_loop(ops, expected) diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py index eb402805a3..ead579c98f 100644 --- a/rpython/jit/metainterp/pyjitpl.py +++ b/rpython/jit/metainterp/pyjitpl.py @@ -917,7 +917,7 @@ class MIFrame(object): def _nonstandard_virtualizable(self, pc, box, fielddescr): # returns True if 'box' is actually not the "standard" virtualizable # that is stored in metainterp.virtualizable_boxes[-1] - if self.metainterp.heapcache.is_nonstandard_virtualizable(box): + if self.metainterp.heapcache.is_known_nonstandard_virtualizable(box): self.metainterp.staticdata.profiler.count_ops(rop.PTR_EQ, Counters.HEAPCACHED_OPS) return True if box is self.metainterp.forced_virtualizable: @@ -1833,7 +1833,7 @@ class MIFrame(object): standard_box = self.metainterp.virtualizable_boxes[-1] if standard_box is vref_box: return vref_box - if self.metainterp.heapcache.is_nonstandard_virtualizable(vref_box): + if self.metainterp.heapcache.is_known_nonstandard_virtualizable(vref_box): self.metainterp.staticdata.profiler.count_ops(rop.PTR_EQ, Counters.HEAPCACHED_OPS) return None eqbox = self.metainterp.execute_and_record(rop.PTR_EQ, None, vref_box, standard_box) diff --git a/rpython/jit/metainterp/test/test_heapcache.py b/rpython/jit/metainterp/test/test_heapcache.py index c6cd32d6d1..344c7baf4f 100644 --- a/rpython/jit/metainterp/test/test_heapcache.py +++ b/rpython/jit/metainterp/test/test_heapcache.py @@ -103,25 +103,33 @@ class TestHeapCache(object): h = HeapCache() box1 = RefFrontendOp(1) box2 = RefFrontendOp(2) - assert not h.is_nonstandard_virtualizable(box1) - assert not h.is_nonstandard_virtualizable(box2) + assert not h.is_known_nonstandard_virtualizable(box1) + assert not h.is_known_nonstandard_virtualizable(box2) h.nonstandard_virtualizables_now_known(box1) - assert h.is_nonstandard_virtualizable(box1) - assert not h.is_nonstandard_virtualizable(box2) + assert h.is_known_nonstandard_virtualizable(box1) + assert not h.is_known_nonstandard_virtualizable(box2) h.reset() - assert not h.is_nonstandard_virtualizable(box1) - assert not h.is_nonstandard_virtualizable(box2) + assert not h.is_known_nonstandard_virtualizable(box1) + assert not h.is_known_nonstandard_virtualizable(box2) + + def test_nonstandard_virtualizable_const(self): + h = HeapCache() + # rare but not impossible situation for some interpreters: we have a + # *constant* nonstandard virtualizable + c_box = ConstPtr(ConstPtr.value) + h.nonstandard_virtualizables_now_known(c_box) # should not crash + assert not h.is_known_nonstandard_virtualizable(c_box) def test_nonstandard_virtualizable_allocation(self): h = HeapCache() box1 = RefFrontendOp(1) h.new(box1) # we've seen the allocation, so it's not the virtualizable - assert h.is_nonstandard_virtualizable(box1) + assert h.is_known_nonstandard_virtualizable(box1) h.reset() - assert not h.is_nonstandard_virtualizable(box1) + assert not h.is_known_nonstandard_virtualizable(box1) def test_heapcache_fields(self): h = HeapCache() diff --git a/rpython/memory/gctransform/transform.py b/rpython/memory/gctransform/transform.py index d793f91ef2..d3dd64702f 100644 --- a/rpython/memory/gctransform/transform.py +++ b/rpython/memory/gctransform/transform.py @@ -19,6 +19,8 @@ from rpython.rtyper.lltypesystem.lloperation import llop from rpython.translator.simplify import cleanup_graph from rpython.memory.gctransform.log import log +class GCTransformError(Exception): + pass class GcHighLevelOp(object): def __init__(self, gct, op, index, llops): @@ -236,8 +238,12 @@ class BaseGCTransformer(object): inserted_empty_startblock = True is_borrowed = self.compute_borrowed_vars(graph) - for block in graph.iterblocks(): - self.transform_block(block, is_borrowed) + try: + for block in graph.iterblocks(): + self.transform_block(block, is_borrowed) + except GCTransformError as e: + e.args = ('[function %s]: %s' % (graph.name, e.message),) + raise for link, livecounts in self.links_to_split.iteritems(): llops = LowLevelOpList() @@ -519,6 +525,12 @@ class GCTransformer(BaseGCTransformer): def gct_malloc(self, hop, add_flags=None): TYPE = hop.spaceop.result.concretetype.TO + if TYPE._hints.get('never_allocate'): + raise GCTransformError( + "struct %s was marked as @never_allocate but a call to malloc() " + "was found. This probably means that the corresponding class is " + "supposed to be constant-folded away, but for some reason it was not." + % TYPE._name) assert not TYPE._is_varsize() flags = hop.spaceop.args[1].value flavor = flags['flavor'] diff --git a/rpython/rlib/objectmodel.py b/rpython/rlib/objectmodel.py index 5faa6850fd..37ccdf9cb1 100644 --- a/rpython/rlib/objectmodel.py +++ b/rpython/rlib/objectmodel.py @@ -1067,3 +1067,13 @@ def import_from_mixin(M, special_methods=['__init__', '__del__']): target[key] = value if immutable_fields: target['_immutable_fields_'] = target.get('_immutable_fields_', []) + immutable_fields + +def never_allocate(cls): + """ + Class decorator to ensure that a class is NEVER instantiated at runtime. + + Useful e.g for context manager which are expected to be constant-folded + away. + """ + cls._rpython_never_allocate_ = True + return cls diff --git a/rpython/rlib/rdtoa.py b/rpython/rlib/rdtoa.py index cf88a26ffb..9f83a01ced 100644 --- a/rpython/rlib/rdtoa.py +++ b/rpython/rlib/rdtoa.py @@ -39,7 +39,7 @@ eci = ExternalCompilationInfo( _INT_LIMIT = 0x7ffff000 dg_strtod = rffi.llexternal( - '_PyPy_dg_strtod', [rffi.CCHARP, rffi.CCHARPP], rffi.DOUBLE, + '_PyPy_dg_strtod', [rffi.CONST_CCHARP, rffi.CCHARPP], rffi.DOUBLE, compilation_info=eci, sandboxsafe=True) dg_dtoa = rffi.llexternal( @@ -62,7 +62,7 @@ def strtod(input): # break some tests because this function is used by the GC ll_input, llobj, flag = rffi.get_nonmovingbuffer_ll_final_null(input) try: - result = dg_strtod(ll_input, end_ptr) + result = dg_strtod(rffi.cast(rffi.CONST_CCHARP, ll_input), end_ptr) endpos = (rffi.cast(lltype.Signed, end_ptr[0]) - rffi.cast(lltype.Signed, ll_input)) diff --git a/rpython/rlib/rerased.py b/rpython/rlib/rerased.py index 66666505bd..b27dc3c8ff 100644 --- a/rpython/rlib/rerased.py +++ b/rpython/rlib/rerased.py @@ -81,6 +81,7 @@ def new_erasing_pair(name): identity = ErasingPairIdentity(name) def erase(x): + assert not isinstance(x, Erased) return Erased(x, identity) def unerase(y): diff --git a/rpython/rlib/test/test_objectmodel.py b/rpython/rlib/test/test_objectmodel.py index 6bcb9d60c6..5f14528556 100644 --- a/rpython/rlib/test/test_objectmodel.py +++ b/rpython/rlib/test/test_objectmodel.py @@ -7,7 +7,8 @@ from rpython.rlib.objectmodel import ( resizelist_hint, is_annotation_constant, always_inline, NOT_CONSTANT, iterkeys_with_hash, iteritems_with_hash, contains_with_hash, setitem_with_hash, getitem_with_hash, delitem_with_hash, import_from_mixin, - fetch_translated_config, try_inline, delitem_if_value_is, move_to_end) + fetch_translated_config, try_inline, delitem_if_value_is, move_to_end, + never_allocate, dont_inline) from rpython.translator.translator import TranslationContext, graphof from rpython.rtyper.test.tool import BaseRtypingTest from rpython.rtyper.test.test_llinterp import interpret @@ -851,3 +852,37 @@ def test_import_from_mixin_immutable_fields(): import_from_mixin(C) assert BA._immutable_fields_ == ['c', 'a'] + + +def test_never_allocate(): + from rpython.translator.c.test.test_genc import compile as c_compile + from rpython.memory.gctransform.transform import GCTransformError + + @never_allocate + class MyClass(object): + def __init__(self, x): + self.x = x + 1 + + @dont_inline + def allocate_MyClass(x): + return MyClass(x) + + def f(x): + # this fails because the allocation of MyClass can't be + # constant-folded (because it's inside a @dont_inline function) + return allocate_MyClass(x).x + + def g(x): + # this works because MyClass is constant folded, so the GC transformer + # never sees a malloc(MyClass) + return MyClass(x).x + + # test what happens if MyClass escapes + with py.test.raises(GCTransformError) as exc: + c_compile(f, [int]) + assert '[function allocate_MyClass]' in str(exc) + assert 'was marked as @never_allocate' in str(exc) + + # test that it works in the "normal" case + compiled_g = c_compile(g, [int]) + assert compiled_g(41) == 42 diff --git a/rpython/rtyper/lltypesystem/ll2ctypes.py b/rpython/rtyper/lltypesystem/ll2ctypes.py index d9fdd322d7..efa6f11a2f 100644 --- a/rpython/rtyper/lltypesystem/ll2ctypes.py +++ b/rpython/rtyper/lltypesystem/ll2ctypes.py @@ -374,9 +374,9 @@ def build_new_ctypes_type(T, delayed_builders): elif isinstance(T, lltype.OpaqueType): if T is lltype.RuntimeTypeInfo: return ctypes.c_char * 2 - if T.hints.get('external', None) != 'C': + if T._hints.get('external', None) != 'C': raise TypeError("%s is not external" % T) - return ctypes.c_char * T.hints['getsize']() + return ctypes.c_char * T._hints['getsize']() else: _setup_ctypes_cache() if T in _ctypes_cache: @@ -934,7 +934,7 @@ def lltype2ctypes(llobj, normalize=True): convert_array(container) elif isinstance(T.TO, lltype.OpaqueType): if T.TO != lltype.RuntimeTypeInfo: - cbuf = ctypes.create_string_buffer(T.TO.hints['getsize']()) + cbuf = ctypes.create_string_buffer(T.TO._hints['getsize']()) else: cbuf = ctypes.create_string_buffer("\x00") cbuf = ctypes.cast(cbuf, ctypes.c_void_p) diff --git a/rpython/rtyper/lltypesystem/lltype.py b/rpython/rtyper/lltypesystem/lltype.py index eaa51f1edc..c75fd90e7a 100644 --- a/rpython/rtyper/lltypesystem/lltype.py +++ b/rpython/rtyper/lltypesystem/lltype.py @@ -584,7 +584,7 @@ class OpaqueType(ContainerType): """ self.tag = tag self.__name__ = tag - self.hints = frozendict(hints) + self._hints = frozendict(hints) def __str__(self): return "%s (opaque)" % self.tag diff --git a/rpython/rtyper/lltypesystem/rffi.py b/rpython/rtyper/lltypesystem/rffi.py index 59d977c3ee..8196f19dff 100644 --- a/rpython/rtyper/lltypesystem/rffi.py +++ b/rpython/rtyper/lltypesystem/rffi.py @@ -1049,6 +1049,13 @@ def constcharpsize2str(cp, size): return charpsize2str(cp, size) constcharpsize2str._annenforceargs_ = [lltype.SomePtr(CONST_CCHARP), int] +def str2constcharp(s): + """ + Like str2charp, but returns a CONST_CCHARP instead + """ + cp = str2charp(s) + return cast(CONST_CCHARP, cp) +str2constcharp._annenforceargs_ = [str] @not_rpython def _deprecated_get_nonmovingbuffer(*args): diff --git a/rpython/rtyper/rclass.py b/rpython/rtyper/rclass.py index 0d09123617..a98a04649b 100644 --- a/rpython/rtyper/rclass.py +++ b/rpython/rtyper/rclass.py @@ -527,6 +527,9 @@ class InstanceRepr(Repr): if hints is None: hints = {} hints = self._check_for_immutable_hints(hints) + if self.classdef.classdesc.get_param('_rpython_never_allocate_'): + hints['never_allocate'] = True + kwds = {} if self.gcflavor == 'gc': kwds['rtti'] = True diff --git a/rpython/translator/c/database.py b/rpython/translator/c/database.py index f21f661ba3..bf0ced6418 100644 --- a/rpython/translator/c/database.py +++ b/rpython/translator/c/database.py @@ -84,7 +84,7 @@ class LowLevelDatabase(object): node = BareBoneArrayDefNode(self, T, varlength) else: node = ArrayDefNode(self, T, varlength) - elif isinstance(T, OpaqueType) and T.hints.get("render_structure", False): + elif isinstance(T, OpaqueType) and T._hints.get("render_structure", False): node = ExtTypeOpaqueDefNode(self, T) elif T == WeakRef: REALT = self.gcpolicy.get_real_weakref_type() @@ -102,8 +102,8 @@ class LowLevelDatabase(object): return '%s @' % T.c_name elif isinstance(T, Ptr): if (isinstance(T.TO, OpaqueType) and - T.TO.hints.get('c_pointer_typedef') is not None): - return '%s @' % T.TO.hints['c_pointer_typedef'] + T.TO._hints.get('c_pointer_typedef') is not None): + return '%s @' % T.TO._hints['c_pointer_typedef'] try: node = self.gettypedefnode(T.TO) except NoCorrespondingNode: @@ -134,13 +134,13 @@ class LowLevelDatabase(object): elif isinstance(T, OpaqueType): if T == RuntimeTypeInfo: return self.gcpolicy.rtti_type() - elif T.hints.get("render_structure", False): + elif T._hints.get("render_structure", False): node = self.gettypedefnode(T, varlength=varlength) if who_asks is not None: who_asks.dependencies.add(node) return 'struct %s @' % node.name - elif T.hints.get('external', None) == 'C': - return '%s @' % T.hints['c_name'] + elif T._hints.get('external', None) == 'C': + return '%s @' % T._hints['c_name'] else: #raise Exception("don't know about opaque type %r" % (T,)) return 'struct %s @' % ( @@ -182,7 +182,7 @@ class LowLevelDatabase(object): return PrimitiveName[T](obj, self) elif isinstance(T, Ptr): if (isinstance(T.TO, OpaqueType) and - T.TO.hints.get('c_pointer_typedef') is not None): + T.TO._hints.get('c_pointer_typedef') is not None): if obj._obj is not None: value = rffi.cast(rffi.SSIZE_T, obj) return '((%s) %s)' % (cdecl(self.gettype(T), ''), diff --git a/rpython/translator/c/node.py b/rpython/translator/c/node.py index 91458b643c..7898be2bdb 100644 --- a/rpython/translator/c/node.py +++ b/rpython/translator/c/node.py @@ -947,7 +947,7 @@ class ExtType_OpaqueNode(ContainerNode): def opaquenode_factory(db, T, obj): if T == RuntimeTypeInfo: return db.gcpolicy.rtti_node_factory()(db, T, obj) - if T.hints.get("render_structure", False): + if T._hints.get("render_structure", False): return ExtType_OpaqueNode(db, T, obj) raise Exception("don't know about %r" % (T,)) |