diff --git a/Android/android.py b/Android/android.py index 25bb4ca70b581f..d1a10be776ed16 100755 --- a/Android/android.py +++ b/Android/android.py @@ -29,6 +29,7 @@ ANDROID_DIR.name == "Android" and (PYTHON_DIR / "pyconfig.h.in").exists() ) +ENV_SCRIPT = ANDROID_DIR / "android-env.sh" TESTBED_DIR = ANDROID_DIR / "testbed" CROSS_BUILD_DIR = PYTHON_DIR / "cross-build" @@ -129,12 +130,11 @@ def android_env(host): sysconfig_filename = next(sysconfig_files).name host = re.fullmatch(r"_sysconfigdata__android_(.+).py", sysconfig_filename)[1] - env_script = ANDROID_DIR / "android-env.sh" env_output = subprocess.run( f"set -eu; " f"HOST={host}; " f"PREFIX={prefix}; " - f". {env_script}; " + f". {ENV_SCRIPT}; " f"export", check=True, shell=True, capture_output=True, encoding='utf-8', ).stdout @@ -151,7 +151,7 @@ def android_env(host): env[key] = value if not env: - raise ValueError(f"Found no variables in {env_script.name} output:\n" + raise ValueError(f"Found no variables in {ENV_SCRIPT.name} output:\n" + env_output) return env @@ -281,15 +281,30 @@ def clean_all(context): def setup_ci(): - # https://github.blog/changelog/2024-04-02-github-actions-hardware-accelerated-android-virtualization-now-available/ - if "GITHUB_ACTIONS" in os.environ and platform.system() == "Linux": - run( - ["sudo", "tee", "/etc/udev/rules.d/99-kvm4all.rules"], - input='KERNEL=="kvm", GROUP="kvm", MODE="0666", OPTIONS+="static_node=kvm"\n', - text=True, - ) - run(["sudo", "udevadm", "control", "--reload-rules"]) - run(["sudo", "udevadm", "trigger", "--name-match=kvm"]) + if "GITHUB_ACTIONS" in os.environ: + # Enable emulator hardware acceleration + # (https://github.blog/changelog/2024-04-02-github-actions-hardware-accelerated-android-virtualization-now-available/). + if platform.system() == "Linux": + run( + ["sudo", "tee", "/etc/udev/rules.d/99-kvm4all.rules"], + input='KERNEL=="kvm", GROUP="kvm", MODE="0666", OPTIONS+="static_node=kvm"\n', + text=True, + ) + run(["sudo", "udevadm", "control", "--reload-rules"]) + run(["sudo", "udevadm", "trigger", "--name-match=kvm"]) + + # Free up disk space by deleting unused versions of the NDK + # (https://github.com/freakboy3742/pyspamsum/pull/108). + for line in ENV_SCRIPT.read_text().splitlines(): + if match := re.fullmatch(r"ndk_version=(.+)", line): + ndk_version = match[1] + break + else: + raise ValueError(f"Failed to find NDK version in {ENV_SCRIPT.name}") + + for item in (android_home / "ndk").iterdir(): + if item.name[0].isdigit() and item.name != ndk_version: + delete_glob(item) def setup_sdk(): diff --git a/Android/testbed/app/build.gradle.kts b/Android/testbed/app/build.gradle.kts index 4de628a279ca3f..14d43d8c4d5c42 100644 --- a/Android/testbed/app/build.gradle.kts +++ b/Android/testbed/app/build.gradle.kts @@ -79,7 +79,7 @@ android { val androidEnvFile = file("../../android-env.sh").absoluteFile namespace = "org.python.testbed" - compileSdk = 34 + compileSdk = 35 defaultConfig { applicationId = "org.python.testbed" @@ -92,7 +92,7 @@ android { } throw GradleException("Failed to find API level in $androidEnvFile") } - targetSdk = 34 + targetSdk = 35 versionCode = 1 versionName = "1.0" diff --git a/Doc/c-api/set.rst b/Doc/c-api/set.rst index cba823aa027bd6..09c0fb6b9c5f23 100644 --- a/Doc/c-api/set.rst +++ b/Doc/c-api/set.rst @@ -147,7 +147,7 @@ subtypes but not for instances of :class:`frozenset` or its subtypes. Return ``1`` if found and removed, ``0`` if not found (no action taken), and ``-1`` if an error is encountered. Does not raise :exc:`KeyError` for missing keys. Raise a - :exc:`TypeError` if the *key* is unhashable. Unlike the Python :meth:`~frozenset.discard` + :exc:`TypeError` if the *key* is unhashable. Unlike the Python :meth:`~set.discard` method, this function does not automatically convert unhashable sets into temporary frozensets. Raise :exc:`SystemError` if *set* is not an instance of :class:`set` or its subtype. diff --git a/Doc/library/readline.rst b/Doc/library/readline.rst index 75db832c546b64..780cc77340366a 100644 --- a/Doc/library/readline.rst +++ b/Doc/library/readline.rst @@ -246,6 +246,15 @@ Startup hooks if Python was compiled for a version of the library that supports it. +.. function:: get_pre_input_hook() + + Get the current pre-input hook function, or ``None`` if no pre-input hook + function has been set. This function only exists if Python was compiled + for a version of the library that supports it. + + .. versionadded:: next + + .. _readline-completion: Completion diff --git a/Doc/library/stdtypes.rst b/Doc/library/stdtypes.rst index 086da1a705c30f..3899e5b59d8852 100644 --- a/Doc/library/stdtypes.rst +++ b/Doc/library/stdtypes.rst @@ -4826,7 +4826,7 @@ other sequence-like behavior. There are currently two built-in set types, :class:`set` and :class:`frozenset`. The :class:`set` type is mutable --- the contents can be changed using methods -like :meth:`add ` and :meth:`remove `. +like :meth:`~set.add` and :meth:`~set.remove`. Since it is mutable, it has no hash value and cannot be used as either a dictionary key or as an element of another set. The :class:`frozenset` type is immutable and :term:`hashable` --- @@ -4848,164 +4848,172 @@ The constructors for both classes work the same: objects. If *iterable* is not specified, a new empty set is returned. - Sets can be created by several means: +Sets can be created by several means: - * Use a comma-separated list of elements within braces: ``{'jack', 'sjoerd'}`` - * Use a set comprehension: ``{c for c in 'abracadabra' if c not in 'abc'}`` - * Use the type constructor: ``set()``, ``set('foobar')``, ``set(['a', 'b', 'foo'])`` +* Use a comma-separated list of elements within braces: ``{'jack', 'sjoerd'}`` +* Use a set comprehension: ``{c for c in 'abracadabra' if c not in 'abc'}`` +* Use the type constructor: ``set()``, ``set('foobar')``, ``set(['a', 'b', 'foo'])`` - Instances of :class:`set` and :class:`frozenset` provide the following - operations: +Instances of :class:`set` and :class:`frozenset` provide the following +operations: - .. describe:: len(s) +.. describe:: len(s) - Return the number of elements in set *s* (cardinality of *s*). + Return the number of elements in set *s* (cardinality of *s*). - .. describe:: x in s +.. describe:: x in s - Test *x* for membership in *s*. + Test *x* for membership in *s*. - .. describe:: x not in s +.. describe:: x not in s - Test *x* for non-membership in *s*. + Test *x* for non-membership in *s*. - .. method:: isdisjoint(other, /) +.. method:: frozenset.isdisjoint(other, /) + set.isdisjoint(other, /) - Return ``True`` if the set has no elements in common with *other*. Sets are - disjoint if and only if their intersection is the empty set. + Return ``True`` if the set has no elements in common with *other*. Sets are + disjoint if and only if their intersection is the empty set. - .. method:: issubset(other, /) - set <= other +.. method:: frozenset.issubset(other, /) + set.issubset(other, /) +.. describe:: set <= other - Test whether every element in the set is in *other*. + Test whether every element in the set is in *other*. - .. method:: set < other +.. describe:: set < other - Test whether the set is a proper subset of *other*, that is, - ``set <= other and set != other``. + Test whether the set is a proper subset of *other*, that is, + ``set <= other and set != other``. - .. method:: issuperset(other, /) - set >= other +.. method:: frozenset.issuperset(other, /) + set.issuperset(other, /) +.. describe:: set >= other - Test whether every element in *other* is in the set. + Test whether every element in *other* is in the set. - .. method:: set > other +.. describe:: set > other - Test whether the set is a proper superset of *other*, that is, ``set >= - other and set != other``. + Test whether the set is a proper superset of *other*, that is, ``set >= + other and set != other``. - .. method:: union(*others) - set | other | ... +.. method:: frozenset.union(*others) + set.union(*others) +.. describe:: set | other | ... - Return a new set with elements from the set and all others. + Return a new set with elements from the set and all others. - .. method:: intersection(*others) - set & other & ... +.. method:: frozenset.intersection(*others) + set.intersection(*others) +.. describe:: set & other & ... - Return a new set with elements common to the set and all others. + Return a new set with elements common to the set and all others. - .. method:: difference(*others) - set - other - ... +.. method:: frozenset.difference(*others) + set.difference(*others) +.. describe:: set - other - ... - Return a new set with elements in the set that are not in the others. + Return a new set with elements in the set that are not in the others. - .. method:: symmetric_difference(other, /) - set ^ other +.. method:: frozenset.symmetric_difference(other, /) + set.symmetric_difference(other, /) +.. describe:: set ^ other - Return a new set with elements in either the set or *other* but not both. + Return a new set with elements in either the set or *other* but not both. - .. method:: copy() +.. method:: frozenset.copy() + set.copy() - Return a shallow copy of the set. + Return a shallow copy of the set. - Note, the non-operator versions of :meth:`union`, :meth:`intersection`, - :meth:`difference`, :meth:`symmetric_difference`, :meth:`issubset`, and - :meth:`issuperset` methods will accept any iterable as an argument. In - contrast, their operator based counterparts require their arguments to be - sets. This precludes error-prone constructions like ``set('abc') & 'cbs'`` - in favor of the more readable ``set('abc').intersection('cbs')``. +Note, the non-operator versions of :meth:`~frozenset.union`, +:meth:`~frozenset.intersection`, :meth:`~frozenset.difference`, :meth:`~frozenset.symmetric_difference`, :meth:`~frozenset.issubset`, and +:meth:`~frozenset.issuperset` methods will accept any iterable as an argument. In +contrast, their operator based counterparts require their arguments to be +sets. This precludes error-prone constructions like ``set('abc') & 'cbs'`` +in favor of the more readable ``set('abc').intersection('cbs')``. - Both :class:`set` and :class:`frozenset` support set to set comparisons. Two - sets are equal if and only if every element of each set is contained in the - other (each is a subset of the other). A set is less than another set if and - only if the first set is a proper subset of the second set (is a subset, but - is not equal). A set is greater than another set if and only if the first set - is a proper superset of the second set (is a superset, but is not equal). +Both :class:`set` and :class:`frozenset` support set to set comparisons. Two +sets are equal if and only if every element of each set is contained in the +other (each is a subset of the other). A set is less than another set if and +only if the first set is a proper subset of the second set (is a subset, but +is not equal). A set is greater than another set if and only if the first set +is a proper superset of the second set (is a superset, but is not equal). - Instances of :class:`set` are compared to instances of :class:`frozenset` - based on their members. For example, ``set('abc') == frozenset('abc')`` - returns ``True`` and so does ``set('abc') in set([frozenset('abc')])``. +Instances of :class:`set` are compared to instances of :class:`frozenset` +based on their members. For example, ``set('abc') == frozenset('abc')`` +returns ``True`` and so does ``set('abc') in set([frozenset('abc')])``. - The subset and equality comparisons do not generalize to a total ordering - function. For example, any two nonempty disjoint sets are not equal and are not - subsets of each other, so *all* of the following return ``False``: ``ab``. +The subset and equality comparisons do not generalize to a total ordering +function. For example, any two nonempty disjoint sets are not equal and are not +subsets of each other, so *all* of the following return ``False``: ``ab``. - Since sets only define partial ordering (subset relationships), the output of - the :meth:`list.sort` method is undefined for lists of sets. +Since sets only define partial ordering (subset relationships), the output of +the :meth:`list.sort` method is undefined for lists of sets. - Set elements, like dictionary keys, must be :term:`hashable`. +Set elements, like dictionary keys, must be :term:`hashable`. - Binary operations that mix :class:`set` instances with :class:`frozenset` - return the type of the first operand. For example: ``frozenset('ab') | - set('bc')`` returns an instance of :class:`frozenset`. +Binary operations that mix :class:`set` instances with :class:`frozenset` +return the type of the first operand. For example: ``frozenset('ab') | +set('bc')`` returns an instance of :class:`frozenset`. - The following table lists operations available for :class:`set` that do not - apply to immutable instances of :class:`frozenset`: +The following table lists operations available for :class:`set` that do not +apply to immutable instances of :class:`frozenset`: - .. method:: update(*others) - set |= other | ... +.. method:: set.update(*others) +.. describe:: set |= other | ... - Update the set, adding elements from all others. + Update the set, adding elements from all others. - .. method:: intersection_update(*others) - set &= other & ... +.. method:: set.intersection_update(*others) +.. describe:: set &= other & ... - Update the set, keeping only elements found in it and all others. + Update the set, keeping only elements found in it and all others. - .. method:: difference_update(*others) - set -= other | ... +.. method:: set.difference_update(*others) +.. describe:: set -= other | ... - Update the set, removing elements found in others. + Update the set, removing elements found in others. - .. method:: symmetric_difference_update(other, /) - set ^= other +.. method:: set.symmetric_difference_update(other, /) +.. describe:: set ^= other - Update the set, keeping only elements found in either set, but not in both. + Update the set, keeping only elements found in either set, but not in both. - .. method:: add(elem, /) +.. method:: set.add(elem, /) - Add element *elem* to the set. + Add element *elem* to the set. - .. method:: remove(elem, /) +.. method:: set.remove(elem, /) - Remove element *elem* from the set. Raises :exc:`KeyError` if *elem* is - not contained in the set. + Remove element *elem* from the set. Raises :exc:`KeyError` if *elem* is + not contained in the set. - .. method:: discard(elem, /) +.. method:: set.discard(elem, /) - Remove element *elem* from the set if it is present. + Remove element *elem* from the set if it is present. - .. method:: pop() +.. method:: set.pop() - Remove and return an arbitrary element from the set. Raises - :exc:`KeyError` if the set is empty. + Remove and return an arbitrary element from the set. Raises + :exc:`KeyError` if the set is empty. - .. method:: clear() +.. method:: set.clear() - Remove all elements from the set. + Remove all elements from the set. - Note, the non-operator versions of the :meth:`update`, - :meth:`intersection_update`, :meth:`difference_update`, and - :meth:`symmetric_difference_update` methods will accept any iterable as an - argument. +Note, the non-operator versions of the :meth:`~set.update`, +:meth:`~set.intersection_update`, :meth:`~set.difference_update`, and +:meth:`~set.symmetric_difference_update` methods will accept any iterable as an +argument. - Note, the *elem* argument to the :meth:`~object.__contains__`, - :meth:`remove`, and - :meth:`discard` methods may be a set. To support searching for an equivalent - frozenset, a temporary one is created from *elem*. +Note, the *elem* argument to the :meth:`~object.__contains__`, +:meth:`~set.remove`, and +:meth:`~set.discard` methods may be a set. To support searching for an equivalent +frozenset, a temporary one is created from *elem*. .. _typesmapping: diff --git a/Doc/reference/datamodel.rst b/Doc/reference/datamodel.rst index ebadbc215a0eed..5f79c6fe8f50ff 100644 --- a/Doc/reference/datamodel.rst +++ b/Doc/reference/datamodel.rst @@ -449,7 +449,7 @@ Sets These represent a mutable set. They are created by the built-in :func:`set` constructor and can be modified afterwards by several methods, such as - :meth:`add `. + :meth:`~set.add`. Frozen sets diff --git a/Doc/whatsnew/2.3.rst b/Doc/whatsnew/2.3.rst index b7e4e73f4ce4aa..f43692b3dce9e8 100644 --- a/Doc/whatsnew/2.3.rst +++ b/Doc/whatsnew/2.3.rst @@ -66,7 +66,7 @@ Here's a simple example:: The union and intersection of sets can be computed with the :meth:`~frozenset.union` and :meth:`~frozenset.intersection` methods; an alternative notation uses the bitwise operators ``&`` and ``|``. Mutable sets also have in-place versions of these methods, -:meth:`!union_update` and :meth:`~frozenset.intersection_update`. :: +:meth:`!union_update` and :meth:`~set.intersection_update`. :: >>> S1 = sets.Set([1,2,3]) >>> S2 = sets.Set([4,5,6]) @@ -87,7 +87,7 @@ It's also possible to take the symmetric difference of two sets. This is the set of all elements in the union that aren't in the intersection. Another way of putting it is that the symmetric difference contains all elements that are in exactly one set. Again, there's an alternative notation (``^``), and an -in-place version with the ungainly name :meth:`~frozenset.symmetric_difference_update`. :: +in-place version with the ungainly name :meth:`~set.symmetric_difference_update`. :: >>> S1 = sets.Set([1,2,3,4]) >>> S2 = sets.Set([3,4,5,6]) diff --git a/Include/pymacro.h b/Include/pymacro.h index 857cdf12db9bf2..7ecce44a0d2a42 100644 --- a/Include/pymacro.h +++ b/Include/pymacro.h @@ -116,6 +116,12 @@ /* Absolute value of the number x */ #define Py_ABS(x) ((x) < 0 ? -(x) : (x)) +/* Safer implementation that avoids an undefined behavior for the minimal + value of the signed integer type if its absolute value is larger than + the maximal value of the signed integer type (in the two's complement + representations, which is common). + */ +#define _Py_ABS_CAST(T, x) ((x) >= 0 ? ((T) (x)) : ((T) (((T) -((x) + 1)) + 1u))) #define _Py_XSTRINGIFY(x) #x diff --git a/Lib/argparse.py b/Lib/argparse.py index 55ecdadd8c9398..10393b6a02b0be 100644 --- a/Lib/argparse.py +++ b/Lib/argparse.py @@ -166,7 +166,6 @@ def __init__( indent_increment=2, max_help_position=24, width=None, - color=True, ): # default setting for width if width is None: @@ -174,7 +173,6 @@ def __init__( width = shutil.get_terminal_size().columns width -= 2 - self._set_color(color) self._prog = prog self._indent_increment = indent_increment self._max_help_position = min(max_help_position, @@ -1570,8 +1568,8 @@ def add_argument(self, *args, **kwargs): f'instance of it must be passed') # raise an error if the metavar does not match the type - if hasattr(self, "_get_formatter"): - formatter = self._get_formatter() + if hasattr(self, "_get_validation_formatter"): + formatter = self._get_validation_formatter() try: formatter._format_args(action, None) except TypeError: @@ -1765,8 +1763,8 @@ def _handle_conflict_resolve(self, action, conflicting_actions): action.container._remove_action(action) def _check_help(self, action): - if action.help and hasattr(self, "_get_formatter"): - formatter = self._get_formatter() + if action.help and hasattr(self, "_get_validation_formatter"): + formatter = self._get_validation_formatter() try: formatter._expand_help(action) except (ValueError, TypeError, KeyError) as exc: @@ -1921,6 +1919,9 @@ def __init__(self, self.suggest_on_error = suggest_on_error self.color = color + # Cached formatter for validation (avoids repeated _set_color calls) + self._cached_formatter = None + add_group = self.add_argument_group self._positionals = add_group(_('positional arguments')) self._optionals = add_group(_('options')) @@ -2752,6 +2753,13 @@ def _get_formatter(self): formatter._set_color(self.color) return formatter + def _get_validation_formatter(self): + # Return cached formatter for read-only validation operations + # (_expand_help and _format_args). Avoids repeated slow _set_color calls. + if self._cached_formatter is None: + self._cached_formatter = self._get_formatter() + return self._cached_formatter + # ===================== # Help-printing methods # ===================== diff --git a/Lib/pickle.py b/Lib/pickle.py index 729c215514ad24..f3025776623d2c 100644 --- a/Lib/pickle.py +++ b/Lib/pickle.py @@ -189,6 +189,11 @@ def __init__(self, value): __all__.extend(x for x in dir() if x.isupper() and not x.startswith('_')) +# Data larger than this will be read in chunks, to prevent extreme +# overallocation. +_MIN_READ_BUF_SIZE = (1 << 20) + + class _Framer: _FRAME_SIZE_MIN = 4 @@ -287,7 +292,7 @@ def read(self, n): "pickle exhausted before end of frame") return data else: - return self.file_read(n) + return self._chunked_file_read(n) def readline(self): if self.current_frame: @@ -302,11 +307,23 @@ def readline(self): else: return self.file_readline() + def _chunked_file_read(self, size): + cursize = min(size, _MIN_READ_BUF_SIZE) + b = self.file_read(cursize) + while cursize < size and len(b) == cursize: + delta = min(cursize, size - cursize) + b += self.file_read(delta) + cursize += delta + return b + def load_frame(self, frame_size): if self.current_frame and self.current_frame.read() != b'': raise UnpicklingError( "beginning of a new frame before end of current frame") - self.current_frame = io.BytesIO(self.file_read(frame_size)) + data = self._chunked_file_read(frame_size) + if len(data) < frame_size: + raise EOFError + self.current_frame = io.BytesIO(data) # Tools used for pickling. @@ -1496,12 +1513,17 @@ def load_binbytes8(self): dispatch[BINBYTES8[0]] = load_binbytes8 def load_bytearray8(self): - len, = unpack(' maxsize: + size, = unpack(' maxsize: raise UnpicklingError("BYTEARRAY8 exceeds system's maximum size " "of %d bytes" % maxsize) - b = bytearray(len) - self.readinto(b) + cursize = min(size, _MIN_READ_BUF_SIZE) + b = bytearray(cursize) + if self.readinto(b) == cursize: + while cursize < size and len(b) == cursize: + delta = min(cursize, size - cursize) + b += self.read(delta) + cursize += delta self.append(b) dispatch[BYTEARRAY8[0]] = load_bytearray8 diff --git a/Lib/test/pickletester.py b/Lib/test/pickletester.py index e3663e44546ded..4e3468bfcde9c3 100644 --- a/Lib/test/pickletester.py +++ b/Lib/test/pickletester.py @@ -74,6 +74,15 @@ def count_opcode(code, pickle): def identity(x): return x +def itersize(start, stop): + # Produce geometrical increasing sequence from start to stop + # (inclusively) for tests. + size = start + while size < stop: + yield size + size <<= 1 + yield stop + class UnseekableIO(io.BytesIO): def peek(self, *args): @@ -853,9 +862,8 @@ def assert_is_copy(self, obj, objcopy, msg=None): self.assertEqual(getattr(obj, slot, None), getattr(objcopy, slot, None), msg=msg) - def check_unpickling_error(self, errors, data): - with self.subTest(data=data), \ - self.assertRaises(errors): + def check_unpickling_error_strict(self, errors, data): + with self.assertRaises(errors): try: self.loads(data) except BaseException as exc: @@ -864,6 +872,10 @@ def check_unpickling_error(self, errors, data): (data, exc.__class__.__name__, exc)) raise + def check_unpickling_error(self, errors, data): + with self.subTest(data=data): + self.check_unpickling_error_strict(errors, data) + def test_load_from_data0(self): self.assert_is_copy(self._testdata, self.loads(DATA0)) @@ -1150,6 +1162,155 @@ def test_negative_32b_binput(self): dumped = b'\x80\x03X\x01\x00\x00\x00ar\xff\xff\xff\xff.' self.check_unpickling_error(ValueError, dumped) + def test_too_large_put(self): + # Test that PUT with large id does not cause allocation of + # too large memo table. The C implementation uses a dict-based memo + # for sparse indices (when idx > memo_len * 2) instead of allocating + # a massive array. This test verifies large sparse indices work without + # causing memory exhaustion. + # + # The following simple pickle creates an empty list, memoizes it + # using a large index, then loads it back on the stack, builds + # a tuple containing 2 identical empty lists and returns it. + data = lambda n: (b'((lp' + str(n).encode() + b'\n' + + b'g' + str(n).encode() + b'\nt.') + # 0: ( MARK + # 1: ( MARK + # 2: l LIST (MARK at 1) + # 3: p PUT 1000000000000 + # 18: g GET 1000000000000 + # 33: t TUPLE (MARK at 0) + # 34: . STOP + for idx in [10**6, 10**9, 10**12]: + if idx > sys.maxsize: + continue + self.assertEqual(self.loads(data(idx)), ([],)*2) + + def test_too_large_long_binput(self): + # Test that LONG_BINPUT with large id does not cause allocation of + # too large memo table. The C implementation uses a dict-based memo + # for sparse indices (when idx > memo_len * 2) instead of allocating + # a massive array. This test verifies large sparse indices work without + # causing memory exhaustion. + # + # The following simple pickle creates an empty list, memoizes it + # using a large index, then loads it back on the stack, builds + # a tuple containing 2 identical empty lists and returns it. + data = lambda n: (b'(]r' + struct.pack(' sys.maxsize')) + + def test_truncated_large_binunicode8(self): + data = lambda size: b'\x8d' + struct.pack('> 1 + n = 1 << 100 + while n: + for expected in (-n, -n+1, n-1, n): + self.helper(expected) + n = n >> 1 def test_int64(self): # Simulate int marshaling with TYPE_INT64. diff --git a/Lib/test/test_memoryview.py b/Lib/test/test_memoryview.py index 64f440f180bbf0..1bd58eb6408833 100644 --- a/Lib/test/test_memoryview.py +++ b/Lib/test/test_memoryview.py @@ -600,6 +600,25 @@ def test_memoryview_hex(self): m2 = m1[::-1] self.assertEqual(m2.hex(), '30' * 200000) + def test_memoryview_hex_separator(self): + x = bytes(range(97, 102)) + m1 = memoryview(x) + m2 = m1[::-1] + self.assertEqual(m2.hex(':'), '65:64:63:62:61') + self.assertEqual(m2.hex(':', 2), '65:6463:6261') + self.assertEqual(m2.hex(':', -2), '6564:6362:61') + self.assertEqual(m2.hex(sep=':', bytes_per_sep=2), '65:6463:6261') + self.assertEqual(m2.hex(sep=':', bytes_per_sep=-2), '6564:6362:61') + for bytes_per_sep in 5, -5, 2**31-1, -(2**31-1): + with self.subTest(bytes_per_sep=bytes_per_sep): + self.assertEqual(m2.hex(':', bytes_per_sep), '6564636261') + for bytes_per_sep in 2**31, -2**31, 2**1000, -2**1000: + with self.subTest(bytes_per_sep=bytes_per_sep): + try: + self.assertEqual(m2.hex(':', bytes_per_sep), '6564636261') + except OverflowError: + pass + def test_copy(self): m = memoryview(b'abc') with self.assertRaises(TypeError): diff --git a/Lib/test/test_pickle.py b/Lib/test/test_pickle.py index e2384b33345a45..22c70327fb061d 100644 --- a/Lib/test/test_pickle.py +++ b/Lib/test/test_pickle.py @@ -59,6 +59,8 @@ class PyUnpicklerTests(AbstractUnpickleTests, unittest.TestCase): truncated_errors = (pickle.UnpicklingError, EOFError, AttributeError, ValueError, struct.error, IndexError, ImportError) + truncated_data_error = (EOFError, '') + size_overflow_error = (pickle.UnpicklingError, 'exceeds') def loads(self, buf, **kwds): f = io.BytesIO(buf) @@ -103,6 +105,8 @@ class InMemoryPickleTests(AbstractPickleTests, AbstractUnpickleTests, truncated_errors = (pickle.UnpicklingError, EOFError, AttributeError, ValueError, struct.error, IndexError, ImportError) + truncated_data_error = ((pickle.UnpicklingError, EOFError), '') + size_overflow_error = ((OverflowError, pickle.UnpicklingError), 'exceeds') def dumps(self, arg, protocol=None, **kwargs): return pickle.dumps(arg, protocol, **kwargs) @@ -375,6 +379,8 @@ class CUnpicklerTests(PyUnpicklerTests): unpickler = _pickle.Unpickler bad_stack_errors = (pickle.UnpicklingError,) truncated_errors = (pickle.UnpicklingError,) + truncated_data_error = (pickle.UnpicklingError, 'truncated') + size_overflow_error = (OverflowError, 'exceeds') class CPicklingErrorTests(PyPicklingErrorTests): pickler = _pickle.Pickler @@ -478,7 +484,7 @@ def test_pickler(self): 0) # Write buffer is cleared after every dump(). def test_unpickler(self): - basesize = support.calcobjsize('2P2n2P 2P2n2i5P 2P3n8P2n2i') + basesize = support.calcobjsize('2P2n3P 2P2n2i5P 2P3n8P2n2i') unpickler = _pickle.Unpickler P = struct.calcsize('P') # Size of memo table entry. n = struct.calcsize('n') # Size of mark table entry. diff --git a/Lib/test/test_readline.py b/Lib/test/test_readline.py index 45192fe508270d..3982686dd10aec 100644 --- a/Lib/test/test_readline.py +++ b/Lib/test/test_readline.py @@ -413,6 +413,24 @@ def test_write_read_limited_history(self): # So, we've only tested that the read did not fail. # See TestHistoryManipulation for the full test. + @unittest.skipUnless(hasattr(readline, "get_pre_input_hook"), + "get_pre_input_hook not available") + def test_get_pre_input_hook(self): + # Save and restore the original hook to avoid side effects + original_hook = readline.get_pre_input_hook() + self.addCleanup(readline.set_pre_input_hook, original_hook) + + # Test that get_pre_input_hook returns None when no hook is set + readline.set_pre_input_hook(None) + self.assertIsNone(readline.get_pre_input_hook()) + + # Set a hook and verify we can retrieve it + def my_hook(): + pass + + readline.set_pre_input_hook(my_hook) + self.assertIs(readline.get_pre_input_hook(), my_hook) + @unittest.skipUnless(support.Py_GIL_DISABLED, 'these tests can only possibly fail with GIL disabled') class FreeThreadingTest(unittest.TestCase): diff --git a/Lib/test/test_zipfile/test_core.py b/Lib/test/test_zipfile/test_core.py index 1edb5dde998658..6887a5e5cc4d18 100644 --- a/Lib/test/test_zipfile/test_core.py +++ b/Lib/test/test_zipfile/test_core.py @@ -2531,6 +2531,10 @@ def test_decompress_without_3rd_party_library(self): @requires_zlib() def test_full_overlap_different_names(self): + # The ZIP file contains two central directory entries with + # different names which refer to the same local header. + # The name of the local header matches the name of the first + # central directory entry. data = ( b'PK\x03\x04\x14\x00\x00\x00\x08\x00\xa0lH\x05\xe2\x1e' b'8\xbb\x10\x00\x00\x00\t\x04\x00\x00\x01\x00\x00\x00b\xed' @@ -2560,6 +2564,10 @@ def test_full_overlap_different_names(self): @requires_zlib() def test_full_overlap_different_names2(self): + # The ZIP file contains two central directory entries with + # different names which refer to the same local header. + # The name of the local header matches the name of the second + # central directory entry. data = ( b'PK\x03\x04\x14\x00\x00\x00\x08\x00\xa0lH\x05\xe2\x1e' b'8\xbb\x10\x00\x00\x00\t\x04\x00\x00\x01\x00\x00\x00a\xed' @@ -2591,6 +2599,8 @@ def test_full_overlap_different_names2(self): @requires_zlib() def test_full_overlap_same_name(self): + # The ZIP file contains two central directory entries with + # the same name which refer to the same local header. data = ( b'PK\x03\x04\x14\x00\x00\x00\x08\x00\xa0lH\x05\xe2\x1e' b'8\xbb\x10\x00\x00\x00\t\x04\x00\x00\x01\x00\x00\x00a\xed' @@ -2623,6 +2633,8 @@ def test_full_overlap_same_name(self): @requires_zlib() def test_quoted_overlap(self): + # The ZIP file contains two files. The second local header + # is contained in the range of the first file. data = ( b'PK\x03\x04\x14\x00\x00\x00\x08\x00\xa0lH\x05Y\xfc' b'8\x044\x00\x00\x00(\x04\x00\x00\x01\x00\x00\x00a\x00' @@ -2654,6 +2666,7 @@ def test_quoted_overlap(self): @requires_zlib() def test_overlap_with_central_dir(self): + # The local header offset is equal to the central directory offset. data = ( b'PK\x01\x02\x14\x03\x14\x00\x00\x00\x08\x00G_|Z' b'\xe2\x1e8\xbb\x0b\x00\x00\x00\t\x04\x00\x00\x01\x00\x00\x00' @@ -2668,11 +2681,15 @@ def test_overlap_with_central_dir(self): self.assertEqual(zi.header_offset, 0) self.assertEqual(zi.compress_size, 11) self.assertEqual(zi.file_size, 1033) + # Found central directory signature PK\x01\x02 instead of + # local header signature PK\x03\x04. with self.assertRaisesRegex(zipfile.BadZipFile, 'Bad magic number'): zipf.read('a') @requires_zlib() def test_overlap_with_archive_comment(self): + # The local header is written after the central directory, + # in the archive comment. data = ( b'PK\x01\x02\x14\x03\x14\x00\x00\x00\x08\x00G_|Z' b'\xe2\x1e8\xbb\x0b\x00\x00\x00\t\x04\x00\x00\x01\x00\x00\x00' diff --git a/Misc/NEWS.d/next/Library/2024-05-20-12-35-52.gh-issue-115952.J6n_Kf.rst b/Misc/NEWS.d/next/Library/2024-05-20-12-35-52.gh-issue-115952.J6n_Kf.rst new file mode 100644 index 00000000000000..4c4c65d45d78a4 --- /dev/null +++ b/Misc/NEWS.d/next/Library/2024-05-20-12-35-52.gh-issue-115952.J6n_Kf.rst @@ -0,0 +1,7 @@ +Fix a potential memory denial of service in the :mod:`pickle` module. +When reading a pickled data received from untrusted source, it could cause +an arbitrary amount of memory to be allocated, even if the code that is +allowed to execute is restricted by overriding the +:meth:`~pickle.Unpickler.find_class` method. +This could have led to symptoms including a :exc:`MemoryError`, swapping, out +of memory (OOM) killed processes or containers, or even system crashes. diff --git a/Misc/NEWS.d/next/Library/2025-11-15-11-10-16.gh-issue-48752.aB3xYz.rst b/Misc/NEWS.d/next/Library/2025-11-15-11-10-16.gh-issue-48752.aB3xYz.rst new file mode 100644 index 00000000000000..37b91196658589 --- /dev/null +++ b/Misc/NEWS.d/next/Library/2025-11-15-11-10-16.gh-issue-48752.aB3xYz.rst @@ -0,0 +1,3 @@ +Add :func:`readline.get_pre_input_hook` function to retrieve the current +pre-input hook. This allows applications to save and restore the hook +without overwriting user settings. Patch by Sanyam Khurana. diff --git a/Misc/NEWS.d/next/Library/2025-12-04-23-24-24.gh-issue-139862.NBfsD4.rst b/Misc/NEWS.d/next/Library/2025-12-04-23-24-24.gh-issue-139862.NBfsD4.rst new file mode 100644 index 00000000000000..2bee8881a75749 --- /dev/null +++ b/Misc/NEWS.d/next/Library/2025-12-04-23-24-24.gh-issue-139862.NBfsD4.rst @@ -0,0 +1 @@ +Remove ``color`` parameter from :class:`!argparse.HelpFormatter` constructor. Color is controlled by :class:`~argparse.ArgumentParser`. diff --git a/Misc/NEWS.d/next/Library/2025-12-04-23-26-12.gh-issue-142267.yOM6fP.rst b/Misc/NEWS.d/next/Library/2025-12-04-23-26-12.gh-issue-142267.yOM6fP.rst new file mode 100644 index 00000000000000..f46e82105fc2f5 --- /dev/null +++ b/Misc/NEWS.d/next/Library/2025-12-04-23-26-12.gh-issue-142267.yOM6fP.rst @@ -0,0 +1 @@ +Improve :mod:`argparse` performance by caching the formatter used for argument validation. diff --git a/Modules/_pickle.c b/Modules/_pickle.c index bfb2830f3893d6..608598eb5a536c 100644 --- a/Modules/_pickle.c +++ b/Modules/_pickle.c @@ -155,6 +155,9 @@ enum { /* Prefetch size when unpickling (disabled on unpeekable streams) */ PREFETCH = 8192 * 16, + /* Data larger that this will be read in chunks, to prevent extreme + overallocation. */ + MIN_READ_BUF_SIZE = 1 << 20, FRAME_SIZE_MIN = 4, FRAME_SIZE_TARGET = 64 * 1024, @@ -647,10 +650,11 @@ typedef struct UnpicklerObject { Pdata *stack; /* Pickle data stack, store unpickled objects. */ /* The unpickler memo is just an array of PyObject *s. Using a dict - is unnecessary, since the keys are contiguous ints. */ + is unnecessary, since the keys usually are contiguous ints. */ PyObject **memo; size_t memo_size; /* Capacity of the memo array */ size_t memo_len; /* Number of objects in the memo */ + PyObject *memo_dict; /* The backup memo dict for non-continuous keys. */ PyObject *persistent_load; /* persistent_load() method, can be NULL. */ PyObject *persistent_load_attr; /* instance attribute, can be NULL. */ @@ -1247,6 +1251,66 @@ _Unpickler_SkipConsumed(UnpicklerObject *self) static const Py_ssize_t READ_WHOLE_LINE = -1; +/* Don't call it directly: use _Unpickler_ReadInto() */ +static Py_ssize_t +_Unpickler_ReadIntoFromFile(PickleState *state, UnpicklerObject *self, char *buf, + Py_ssize_t n) +{ + assert(n != READ_WHOLE_LINE); + + if (!self->readinto) { + /* readinto() not supported on file-like object, fall back to read() + * and copy into destination buffer (bpo-39681) */ + PyObject* len = PyLong_FromSsize_t(n); + if (len == NULL) { + return -1; + } + PyObject* data = _Pickle_FastCall(self->read, len); + if (data == NULL) { + return -1; + } + if (!PyBytes_Check(data)) { + PyErr_Format(PyExc_ValueError, + "read() returned non-bytes object (%R)", + Py_TYPE(data)); + Py_DECREF(data); + return -1; + } + Py_ssize_t read_size = PyBytes_GET_SIZE(data); + if (read_size < n) { + Py_DECREF(data); + return bad_readline(state); + } + memcpy(buf, PyBytes_AS_STRING(data), n); + Py_DECREF(data); + return n; + } + + /* Call readinto() into user buffer */ + PyObject *buf_obj = PyMemoryView_FromMemory(buf, n, PyBUF_WRITE); + if (buf_obj == NULL) { + return -1; + } + PyObject *read_size_obj = _Pickle_FastCall(self->readinto, buf_obj); + if (read_size_obj == NULL) { + return -1; + } + Py_ssize_t read_size = PyLong_AsSsize_t(read_size_obj); + Py_DECREF(read_size_obj); + + if (read_size < 0) { + if (!PyErr_Occurred()) { + PyErr_SetString(PyExc_ValueError, + "readinto() returned negative size"); + } + return -1; + } + if (read_size < n) { + return bad_readline(state); + } + return n; +} + /* If reading from a file, we need to only pull the bytes we need, since there may be multiple pickle objects arranged contiguously in the same input buffer. @@ -1262,7 +1326,7 @@ static const Py_ssize_t READ_WHOLE_LINE = -1; causing the Unpickler to go back to the file for more data. Use the returned size to tell you how much data you can process. */ static Py_ssize_t -_Unpickler_ReadFromFile(UnpicklerObject *self, Py_ssize_t n) +_Unpickler_ReadFromFile(PickleState *state, UnpicklerObject *self, Py_ssize_t n) { PyObject *data; Py_ssize_t read_size; @@ -1274,6 +1338,9 @@ _Unpickler_ReadFromFile(UnpicklerObject *self, Py_ssize_t n) if (n == READ_WHOLE_LINE) { data = PyObject_CallNoArgs(self->readline); + if (data == NULL) { + return -1; + } } else { PyObject *len; @@ -1302,13 +1369,29 @@ _Unpickler_ReadFromFile(UnpicklerObject *self, Py_ssize_t n) return n; } } - len = PyLong_FromSsize_t(n); + Py_ssize_t cursize = Py_MIN(n, MIN_READ_BUF_SIZE); + len = PyLong_FromSsize_t(cursize); if (len == NULL) return -1; data = _Pickle_FastCall(self->read, len); + if (data == NULL) { + return -1; + } + while (cursize < n) { + Py_ssize_t prevsize = cursize; + // geometrically double the chunk size to avoid CPU DoS + cursize += Py_MIN(cursize, n - cursize); + if (_PyBytes_Resize(&data, cursize) < 0) { + return -1; + } + if (_Unpickler_ReadIntoFromFile(state, self, + PyBytes_AS_STRING(data) + prevsize, cursize - prevsize) < 0) + { + Py_DECREF(data); + return -1; + } + } } - if (data == NULL) - return -1; read_size = _Unpickler_SetStringInput(self, data); Py_DECREF(data); @@ -1335,7 +1418,7 @@ _Unpickler_ReadImpl(UnpicklerObject *self, PickleState *st, char **s, Py_ssize_t return bad_readline(st); /* Extend the buffer to satisfy desired size */ - num_read = _Unpickler_ReadFromFile(self, n); + num_read = _Unpickler_ReadFromFile(st, self, n); if (num_read < 0) return -1; if (num_read < n) @@ -1382,57 +1465,7 @@ _Unpickler_ReadInto(PickleState *state, UnpicklerObject *self, char *buf, return -1; } - if (!self->readinto) { - /* readinto() not supported on file-like object, fall back to read() - * and copy into destination buffer (bpo-39681) */ - PyObject* len = PyLong_FromSsize_t(n); - if (len == NULL) { - return -1; - } - PyObject* data = _Pickle_FastCall(self->read, len); - if (data == NULL) { - return -1; - } - if (!PyBytes_Check(data)) { - PyErr_Format(PyExc_ValueError, - "read() returned non-bytes object (%R)", - Py_TYPE(data)); - Py_DECREF(data); - return -1; - } - Py_ssize_t read_size = PyBytes_GET_SIZE(data); - if (read_size < n) { - Py_DECREF(data); - return bad_readline(state); - } - memcpy(buf, PyBytes_AS_STRING(data), n); - Py_DECREF(data); - return n; - } - - /* Call readinto() into user buffer */ - PyObject *buf_obj = PyMemoryView_FromMemory(buf, n, PyBUF_WRITE); - if (buf_obj == NULL) { - return -1; - } - PyObject *read_size_obj = _Pickle_FastCall(self->readinto, buf_obj); - if (read_size_obj == NULL) { - return -1; - } - Py_ssize_t read_size = PyLong_AsSsize_t(read_size_obj); - Py_DECREF(read_size_obj); - - if (read_size < 0) { - if (!PyErr_Occurred()) { - PyErr_SetString(PyExc_ValueError, - "readinto() returned negative size"); - } - return -1; - } - if (read_size < n) { - return bad_readline(state); - } - return n; + return _Unpickler_ReadIntoFromFile(state, self, buf, n); } /* Read `n` bytes from the unpickler's data source, storing the result in `*s`. @@ -1492,7 +1525,7 @@ _Unpickler_Readline(PickleState *state, UnpicklerObject *self, char **result) if (!self->read) return bad_readline(state); - num_read = _Unpickler_ReadFromFile(self, READ_WHOLE_LINE); + num_read = _Unpickler_ReadFromFile(state, self, READ_WHOLE_LINE); if (num_read < 0) return -1; if (num_read == 0 || self->input_buffer[num_read - 1] != '\n') @@ -1525,12 +1558,35 @@ _Unpickler_ResizeMemoList(UnpicklerObject *self, size_t new_size) /* Returns NULL if idx is out of bounds. */ static PyObject * -_Unpickler_MemoGet(UnpicklerObject *self, size_t idx) +_Unpickler_MemoGet(PickleState *st, UnpicklerObject *self, size_t idx) { - if (idx >= self->memo_size) - return NULL; - - return self->memo[idx]; + PyObject *value; + if (idx < self->memo_size) { + value = self->memo[idx]; + if (value != NULL) { + return value; + } + } + if (self->memo_dict != NULL) { + PyObject *key = PyLong_FromSize_t(idx); + if (key == NULL) { + return NULL; + } + if (idx < self->memo_size) { + (void)PyDict_Pop(self->memo_dict, key, &value); + // Migrate dict entry to array for faster future access + self->memo[idx] = value; + } + else { + value = PyDict_GetItemWithError(self->memo_dict, key); + } + Py_DECREF(key); + if (value != NULL || PyErr_Occurred()) { + return value; + } + } + PyErr_Format(st->UnpicklingError, "Memo value not found at index %zd", idx); + return NULL; } /* Returns -1 (with an exception set) on failure, 0 on success. @@ -1541,6 +1597,27 @@ _Unpickler_MemoPut(UnpicklerObject *self, size_t idx, PyObject *value) PyObject *old_item; if (idx >= self->memo_size) { + if (idx > self->memo_len * 2) { + /* The memo keys are too sparse. Use a dict instead of + * a continuous array for the memo. */ + if (self->memo_dict == NULL) { + self->memo_dict = PyDict_New(); + if (self->memo_dict == NULL) { + return -1; + } + } + PyObject *key = PyLong_FromSize_t(idx); + if (key == NULL) { + return -1; + } + + if (PyDict_SetItem(self->memo_dict, key, value) < 0) { + Py_DECREF(key); + return -1; + } + Py_DECREF(key); + return 0; + } if (_Unpickler_ResizeMemoList(self, idx * 2) < 0) return -1; assert(idx < self->memo_size); @@ -1610,6 +1687,7 @@ _Unpickler_New(PyObject *module) self->memo = memo; self->memo_size = MEMO_SIZE; self->memo_len = 0; + self->memo_dict = NULL; self->persistent_load = NULL; self->persistent_load_attr = NULL; memset(&self->buffer, 0, sizeof(Py_buffer)); @@ -5582,13 +5660,28 @@ load_counted_binbytes(PickleState *state, UnpicklerObject *self, int nbytes) return -1; } - bytes = PyBytes_FromStringAndSize(NULL, size); - if (bytes == NULL) - return -1; - if (_Unpickler_ReadInto(state, self, PyBytes_AS_STRING(bytes), size) < 0) { - Py_DECREF(bytes); + Py_ssize_t cursize = Py_MIN(size, MIN_READ_BUF_SIZE); + Py_ssize_t prevsize = 0; + bytes = PyBytes_FromStringAndSize(NULL, cursize); + if (bytes == NULL) { return -1; } + while (1) { + if (_Unpickler_ReadInto(state, self, + PyBytes_AS_STRING(bytes) + prevsize, cursize - prevsize) < 0) + { + Py_DECREF(bytes); + return -1; + } + if (cursize >= size) { + break; + } + prevsize = cursize; + cursize += Py_MIN(cursize, size - cursize); + if (_PyBytes_Resize(&bytes, cursize) < 0) { + return -1; + } + } PDATA_PUSH(self->stack, bytes, -1); return 0; @@ -5613,14 +5706,27 @@ load_counted_bytearray(PickleState *state, UnpicklerObject *self) return -1; } - bytearray = PyByteArray_FromStringAndSize(NULL, size); + Py_ssize_t cursize = Py_MIN(size, MIN_READ_BUF_SIZE); + Py_ssize_t prevsize = 0; + bytearray = PyByteArray_FromStringAndSize(NULL, cursize); if (bytearray == NULL) { return -1; } - char *str = PyByteArray_AS_STRING(bytearray); - if (_Unpickler_ReadInto(state, self, str, size) < 0) { - Py_DECREF(bytearray); - return -1; + while (1) { + if (_Unpickler_ReadInto(state, self, + PyByteArray_AS_STRING(bytearray) + prevsize, + cursize - prevsize) < 0) { + Py_DECREF(bytearray); + return -1; + } + if (cursize >= size) { + break; + } + prevsize = cursize; + cursize += Py_MIN(cursize, size - cursize); + if (PyByteArray_Resize(bytearray, cursize) < 0) { + return -1; + } } PDATA_PUSH(self->stack, bytearray, -1); @@ -6222,20 +6328,15 @@ load_get(PickleState *st, UnpicklerObject *self) if (key == NULL) return -1; idx = PyLong_AsSsize_t(key); + Py_DECREF(key); if (idx == -1 && PyErr_Occurred()) { - Py_DECREF(key); return -1; } - value = _Unpickler_MemoGet(self, idx); + value = _Unpickler_MemoGet(st, self, idx); if (value == NULL) { - if (!PyErr_Occurred()) { - PyErr_Format(st->UnpicklingError, "Memo value not found at index %ld", idx); - } - Py_DECREF(key); return -1; } - Py_DECREF(key); PDATA_APPEND(self->stack, value, -1); return 0; @@ -6253,13 +6354,8 @@ load_binget(PickleState *st, UnpicklerObject *self) idx = Py_CHARMASK(s[0]); - value = _Unpickler_MemoGet(self, idx); + value = _Unpickler_MemoGet(st, self, idx); if (value == NULL) { - PyObject *key = PyLong_FromSsize_t(idx); - if (key != NULL) { - PyErr_Format(st->UnpicklingError, "Memo value not found at index %ld", idx); - Py_DECREF(key); - } return -1; } @@ -6279,13 +6375,8 @@ load_long_binget(PickleState *st, UnpicklerObject *self) idx = calc_binsize(s, 4); - value = _Unpickler_MemoGet(self, idx); + value = _Unpickler_MemoGet(st, self, idx); if (value == NULL) { - PyObject *key = PyLong_FromSsize_t(idx); - if (key != NULL) { - PyErr_Format(st->UnpicklingError, "Memo value not found at index %ld", idx); - Py_DECREF(key); - } return -1; } @@ -7250,6 +7341,7 @@ Unpickler_clear(PyObject *op) self->buffer.buf = NULL; } + Py_CLEAR(self->memo_dict); _Unpickler_MemoCleanup(self); PyMem_Free(self->marks); self->marks = NULL; @@ -7286,6 +7378,7 @@ Unpickler_traverse(PyObject *op, visitproc visit, void *arg) Py_VISIT(self->persistent_load); Py_VISIT(self->persistent_load_attr); Py_VISIT(self->buffers); + Py_VISIT(self->memo_dict); PyObject **memo = self->memo; if (memo) { Py_ssize_t i = self->memo_size; diff --git a/Modules/clinic/readline.c.h b/Modules/clinic/readline.c.h index 696475f7d00f5b..dc9381e4b976ac 100644 --- a/Modules/clinic/readline.c.h +++ b/Modules/clinic/readline.c.h @@ -349,6 +349,28 @@ readline_set_pre_input_hook(PyObject *module, PyObject *const *args, Py_ssize_t #endif /* defined(HAVE_RL_PRE_INPUT_HOOK) */ +#if defined(HAVE_RL_PRE_INPUT_HOOK) + +PyDoc_STRVAR(readline_get_pre_input_hook__doc__, +"get_pre_input_hook($module, /)\n" +"--\n" +"\n" +"Get the current pre-input hook function."); + +#define READLINE_GET_PRE_INPUT_HOOK_METHODDEF \ + {"get_pre_input_hook", (PyCFunction)readline_get_pre_input_hook, METH_NOARGS, readline_get_pre_input_hook__doc__}, + +static PyObject * +readline_get_pre_input_hook_impl(PyObject *module); + +static PyObject * +readline_get_pre_input_hook(PyObject *module, PyObject *Py_UNUSED(ignored)) +{ + return readline_get_pre_input_hook_impl(module); +} + +#endif /* defined(HAVE_RL_PRE_INPUT_HOOK) */ + PyDoc_STRVAR(readline_get_completion_type__doc__, "get_completion_type($module, /)\n" "--\n" @@ -794,7 +816,11 @@ readline_redisplay(PyObject *module, PyObject *Py_UNUSED(ignored)) #define READLINE_SET_PRE_INPUT_HOOK_METHODDEF #endif /* !defined(READLINE_SET_PRE_INPUT_HOOK_METHODDEF) */ +#ifndef READLINE_GET_PRE_INPUT_HOOK_METHODDEF + #define READLINE_GET_PRE_INPUT_HOOK_METHODDEF +#endif /* !defined(READLINE_GET_PRE_INPUT_HOOK_METHODDEF) */ + #ifndef READLINE_CLEAR_HISTORY_METHODDEF #define READLINE_CLEAR_HISTORY_METHODDEF #endif /* !defined(READLINE_CLEAR_HISTORY_METHODDEF) */ -/*[clinic end generated code: output=88d9812b6caa2102 input=a9049054013a1b77]*/ +/*[clinic end generated code: output=4bd95070973cd0e2 input=a9049054013a1b77]*/ diff --git a/Modules/readline.c b/Modules/readline.c index e89755b0cb4b2a..cc84eb6229e66d 100644 --- a/Modules/readline.c +++ b/Modules/readline.c @@ -572,6 +572,26 @@ readline_set_pre_input_hook_impl(PyObject *module, PyObject *function) return set_hook("pre_input_hook", &state->pre_input_hook, function); } + +/* Get pre-input hook */ + +/*[clinic input] +readline.get_pre_input_hook + +Get the current pre-input hook function. +[clinic start generated code]*/ + +static PyObject * +readline_get_pre_input_hook_impl(PyObject *module) +/*[clinic end generated code: output=ad56b77a8e8981ca input=fb1e1b1fbd94e4e5]*/ +{ + readlinestate *state = get_readline_state(module); + if (state->pre_input_hook == NULL) { + Py_RETURN_NONE; + } + return Py_NewRef(state->pre_input_hook); +} + #endif @@ -1074,6 +1094,7 @@ static struct PyMethodDef readline_methods[] = READLINE_SET_STARTUP_HOOK_METHODDEF #ifdef HAVE_RL_PRE_INPUT_HOOK READLINE_SET_PRE_INPUT_HOOK_METHODDEF + READLINE_GET_PRE_INPUT_HOOK_METHODDEF #endif #ifdef HAVE_RL_COMPLETION_APPEND_CHARACTER READLINE_CLEAR_HISTORY_METHODDEF diff --git a/Python/marshal.c b/Python/marshal.c index 8b56de6575559c..69d6dd7cf0f802 100644 --- a/Python/marshal.c +++ b/Python/marshal.c @@ -310,7 +310,7 @@ w_PyLong(const PyLongObject *ob, char flag, WFILE *p) } if (!long_export.digits) { int8_t sign = long_export.value < 0 ? -1 : 1; - uint64_t abs_value = Py_ABS(long_export.value); + uint64_t abs_value = _Py_ABS_CAST(uint64_t, long_export.value); uint64_t d = abs_value; long l = 0; diff --git a/Python/pystrhex.c b/Python/pystrhex.c index 38484f5a7d4227..af2f5c5dce5fca 100644 --- a/Python/pystrhex.c +++ b/Python/pystrhex.c @@ -42,8 +42,7 @@ static PyObject *_Py_strhex_impl(const char* argbuf, const Py_ssize_t arglen, else { bytes_per_sep_group = 0; } - - unsigned int abs_bytes_per_sep = Py_ABS(bytes_per_sep_group); + unsigned int abs_bytes_per_sep = _Py_ABS_CAST(unsigned int, bytes_per_sep_group); Py_ssize_t resultlen = 0; if (bytes_per_sep_group && arglen > 0) { /* How many sep characters we'll be inserting. */ diff --git a/Tools/picklebench/README.md b/Tools/picklebench/README.md new file mode 100644 index 00000000000000..7d52485c386350 --- /dev/null +++ b/Tools/picklebench/README.md @@ -0,0 +1,232 @@ +# Pickle Chunked Reading Benchmark + +This benchmark measures the performance impact of the chunked reading optimization in GH PR #119204 for the pickle module. + +## What This Tests + +The PR adds chunked reading (1MB chunks) to prevent memory exhaustion when unpickling large objects: +- **BINBYTES8** - Large bytes objects (protocol 4+) +- **BINUNICODE8** - Large strings (protocol 4+) +- **BYTEARRAY8** - Large bytearrays (protocol 5) +- **FRAME** - Large frames +- **LONG4** - Large integers +- An antagonistic mode that tests using memory denial of service inducing malicious pickles. + +## Quick Start + +```bash +# Run full benchmark suite (1MiB → 200MiB, takes several minutes) +build/python Tools/picklebench/memory_dos_impact.py + +# Test just a few sizes (quick test: 1, 10, 50 MiB) +build/python Tools/picklebench/memory_dos_impact.py --sizes 1 10 50 + +# Test smaller range for faster results +build/python Tools/picklebench/memory_dos_impact.py --sizes 1 5 10 + +# Output as markdown for reports +build/python Tools/picklebench/memory_dos_impact.py --format markdown > results.md + +# Test with protocol 4 instead of 5 +build/python Tools/picklebench/memory_dos_impact.py --protocol 4 +``` + +**Note:** Sizes are specified in MiB. Use `--sizes 1 2 5` for 1MiB, 2MiB, 5MiB objects. + +## Antagonistic Mode (DoS Protection Test) + +The `--antagonistic` flag tests **malicious pickles** that demonstrate the memory DoS protection: + +```bash +# Quick DoS protection test (claims 10, 50, 100 MB but provides 1KB) +build/python Tools/picklebench/memory_dos_impact.py --antagonistic --sizes 10 50 100 + +# Full DoS test (default: 10, 50, 100, 500, 1000, 5000 MB claimed) +build/python Tools/picklebench/memory_dos_impact.py --antagonistic +``` + +### What This Tests + +Unlike normal benchmarks that test **legitimate pickles**, antagonistic mode tests: +- **Truncated BINBYTES8**: Claims 100MB but provides only 1KB (will fail to unpickle) +- **Truncated BINUNICODE8**: Same for strings +- **Truncated BYTEARRAY8**: Same for bytearrays +- **Sparse memo attacks**: PUT at index 1 billion (would allocate huge array before PR) + +**Key difference:** +- **Normal mode**: Tests real data, shows ~5% time overhead +- **Antagonistic mode**: Tests malicious data, shows ~99% memory savings + +### Expected Results + +``` +100MB Claimed (actual: 1KB) + binbytes8_100MB_claim + Peak memory: 1.00 MB (claimed: 100 MB, saved: 99.00 MB, 99.0%) + Error: UnpicklingError ← Expected! + +Summary: + Average claimed: 126.2 MB + Average peak: 0.54 MB + Average saved: 125.7 MB (99.6% reduction) +Protection Status: ✓ Memory DoS attacks mitigated by chunked reading +``` + +**Before PR**: Would allocate full claimed size (100MB+), potentially crash +**After PR**: Allocates 1MB chunks, fails fast with minimal memory + +This demonstrates the **security improvement** - protection against memory exhaustion attacks. + +## Before/After Comparison + +The benchmark includes an automatic comparison feature that runs the same tests on both a baseline and current Python build. + +### Option 1: Automatic Comparison (Recommended) + +Build both versions, then use `--baseline` to automatically compare: + +```bash +# Build the baseline (main branch without PR) +git checkout main +mkdir -p build-main +cd build-main && ../configure && make -j $(nproc) && cd .. + +# Build the current version (with PR) +git checkout unpickle-overallocate +mkdir -p build +cd build && ../configure && make -j $(nproc) && cd .. + +# Run automatic comparison (quick test with a few sizes) +build/python Tools/picklebench/memory_dos_impact.py \ + --baseline build-main/python \ + --sizes 1 10 50 + +# Full comparison (all default sizes) +build/python Tools/picklebench/memory_dos_impact.py \ + --baseline build-main/python +``` + +The comparison output shows: +- Side-by-side metrics (Current vs Baseline) +- Percentage change for time and memory +- Overall summary statistics + +### Interpreting Comparison Results + +- **Time change**: Small positive % is expected (chunking adds overhead, typically 5-10%) +- **Memory change**: Negative % is good (chunking saves memory, especially for large objects) +- **Trade-off**: Slightly slower but much safer against memory exhaustion attacks + +### Option 2: Manual Comparison + +Save results separately and compare manually: + +```bash +# Baseline results +build-main/python Tools/picklebench/memory_dos_impact.py --format json > baseline.json + +# Current results +build/python Tools/picklebench/memory_dos_impact.py --format json > current.json + +# Manual comparison +diff -y <(jq '.' baseline.json) <(jq '.' current.json) +``` + +## Understanding the Results + +### Critical Sizes + +The default test suite includes: +- **< 1MiB (999,000 bytes)**: No chunking, allocates full size upfront +- **= 1MiB (1,048,576 bytes)**: Threshold, chunking just starts +- **> 1MiB (1,048,577 bytes)**: Chunked reading engaged +- **1, 2, 5, 10MiB**: Show scaling behavior with chunking +- **20, 50, 100, 200MiB**: Stress test large object handling + +**Note:** The full suite may require more than 16GiB of RAM. + +### Key Metrics + +- **Time (mean)**: Average unpickling time - should be similar before/after +- **Time (stdev)**: Consistency - lower is better +- **Peak Memory**: Maximum memory during unpickling - **expected to be LOWER after PR** +- **Pickle Size**: Size of the serialized data on disk + +### Test Types + +| Test | What It Stresses | +|------|------------------| +| `bytes_*` | BINBYTES8 opcode, raw binary data | +| `string_ascii_*` | BINUNICODE8 with simple ASCII | +| `string_utf8_*` | BINUNICODE8 with multibyte UTF-8 (€ chars) | +| `bytearray_*` | BYTEARRAY8 opcode (protocol 5) | +| `list_large_items_*` | Multiple chunked reads in sequence | +| `dict_large_values_*` | Chunking in dict deserialization | +| `nested_*` | Realistic mixed data structures | +| `tuple_*` | Immutable structures | + +## Expected Results + +### Before PR (main branch) +- Single large allocation per object +- Risk of memory exhaustion with malicious pickles + +### After PR (unpickle-overallocate branch) +- Chunked allocation (1MB at a time) +- **Slightly higher CPU time** (multiple allocations + resizing) +- **Significantly lower peak memory** (no large pre-allocation) +- Protection against DoS via memory exhaustion + +## Advanced Usage + +### Test Specific Sizes + +```bash +# Test only 5MiB and 10MiB objects +build/python Tools/picklebench/memory_dos_impact.py --sizes 5 10 + +# Test large objects: 50, 100, 200 MiB +build/python Tools/picklebench/memory_dos_impact.py --sizes 50 100 200 +``` + +### More Iterations for Stable Timing + +```bash +# Run 10 iterations per test for better statistics +build/python Tools/picklebench/memory_dos_impact.py --iterations 10 --sizes 1 10 +``` + +### JSON Output for Analysis + +```bash +# Generate JSON for programmatic analysis +build/python Tools/picklebench/memory_dos_impact.py --format json | python -m json.tool +``` + +## Interpreting Memory Results + +The **peak memory** metric shows the maximum memory allocated during unpickling: + +- **Without chunking**: Allocates full size immediately + - 10MB object → 10MB allocation upfront + +- **With chunking**: Allocates in 1MB chunks, grows geometrically + - 10MB object → starts with 1MB, grows: 2MB, 4MB, 8MB (final: ~10MB total) + - Peak is lower because allocation is incremental + +## Typical Results + +On a system with the PR applied, you should see: + +``` +1.00MiB Test Results + bytes_1.00MiB: ~0.3ms, 1.00MiB peak (just at threshold) + +2.00MiB Test Results + bytes_2.00MiB: ~0.8ms, 2.00MiB peak (chunked: 1MiB → 2MiB) + +10.00MiB Test Results + bytes_10.00MiB: ~3-5ms, 10.00MiB peak (chunked: 1→2→4→8→10 MiB) +``` + +Time overhead is minimal (~10-20% for very large objects), but memory safety is significantly improved. diff --git a/Tools/picklebench/memory_dos_impact.py b/Tools/picklebench/memory_dos_impact.py new file mode 100755 index 00000000000000..3bad6586c46943 --- /dev/null +++ b/Tools/picklebench/memory_dos_impact.py @@ -0,0 +1,1069 @@ +#!/usr/bin/env python3 +# +# Author: Claude Sonnet 4.5 as driven by gpshead +# +""" +Microbenchmark for pickle module chunked reading performance (GH PR #119204). + +This script generates Python data structures that act as antagonistic load +tests for the chunked reading code introduced to prevent memory exhaustion when +unpickling large objects. + +The PR adds chunked reading (1MB chunks) for: +- BINBYTES8 (large bytes) +- BINUNICODE8 (large strings) +- BYTEARRAY8 (large bytearrays) +- FRAME (large frames) +- LONG4 (large integers) + +Including an antagonistic mode that exercies memory denial of service pickles. + +Usage: + python memory_dos_impact.py --help +""" + +import argparse +import gc +import io +import json +import os +import pickle +import statistics +import struct +import subprocess +import sys +import tempfile +import tracemalloc +from pathlib import Path +from time import perf_counter +from typing import Any, Dict, List, Tuple, Optional + + +# Configuration +MIN_READ_BUF_SIZE = 1 << 20 # 1MB - matches pickle.py _MIN_READ_BUF_SIZE + +# Test sizes in MiB +DEFAULT_SIZES_MIB = [1, 2, 5, 10, 20, 50, 100, 200] + +# Convert to bytes, plus threshold boundary tests +DEFAULT_SIZES = ( + [999_000] # Below 1MiB (no chunking) + + [size * (1 << 20) for size in DEFAULT_SIZES_MIB] # MiB to bytes + + [1_048_577] # Just above 1MiB (minimal chunking overhead) +) +DEFAULT_SIZES.sort() + +# Baseline benchmark configuration +BASELINE_BENCHMARK_TIMEOUT_SECONDS = 600 # 10 minutes + +# Sparse memo attack test configuration +# Format: test_name -> (memo_index, baseline_memory_note) +SPARSE_MEMO_TESTS = { + "sparse_memo_1M": (1_000_000, "~8 MB array"), + "sparse_memo_100M": (100_000_000, "~800 MB array"), + "sparse_memo_1B": (1_000_000_000, "~8 GB array"), +} + + +# Utility functions + +def _extract_size_mb(size_key: str) -> float: + """Extract numeric MiB value from size_key like '10.00MB' or '1.00MiB'. + + Returns 0.0 for non-numeric keys (they'll be sorted last). + """ + try: + return float(size_key.replace('MB', '').replace('MiB', '')) + except ValueError: + return 999999.0 # Put non-numeric keys last + + +def _format_output(results: Dict[str, Dict[str, Any]], format_type: str, is_antagonistic: bool) -> str: + """Format benchmark results according to requested format. + + Args: + results: Benchmark results dictionary + format_type: Output format ('text', 'markdown', or 'json') + is_antagonistic: Whether these are antagonistic (DoS) test results + + Returns: + Formatted output string + """ + if format_type == 'json': + return Reporter.format_json(results) + elif is_antagonistic: + # Antagonistic mode uses specialized formatter for text/markdown + return Reporter.format_antagonistic(results) + elif format_type == 'text': + return Reporter.format_text(results) + elif format_type == 'markdown': + return Reporter.format_markdown(results) + else: + # Default to text format + return Reporter.format_text(results) + + +class AntagonisticGenerator: + """Generate malicious/truncated pickles for DoS protection testing. + + These pickles claim large sizes but provide minimal data, causing them to fail + during unpickling. They demonstrate the memory protection of chunked reading. + """ + + @staticmethod + def truncated_binbytes8(claimed_size: int, actual_size: int = 1024) -> bytes: + """BINBYTES8 claiming `claimed_size` but providing only `actual_size` bytes. + + This will fail with UnpicklingError but demonstrates peak memory usage. + Before PR: Allocates full claimed_size + After PR: Allocates in 1MB chunks, fails fast + """ + return b'\x8e' + struct.pack(' bytes: + """BINUNICODE8 claiming `claimed_size` but providing only `actual_size` bytes.""" + return b'\x8d' + struct.pack(' bytes: + """BYTEARRAY8 claiming `claimed_size` but providing only `actual_size` bytes.""" + return b'\x96' + struct.pack(' bytes: + """FRAME claiming `claimed_size` but providing minimal data.""" + return b'\x95' + struct.pack(' bytes: + """LONG_BINPUT with huge sparse index. + + Before PR: Tries to allocate array with `index` slots (OOM) + After PR: Uses dict-based memo for sparse indices + """ + return (b'(]r' + struct.pack(' bytes: + """Multiple BINBYTES8 claims in sequence. + + Tests that multiple large claims don't accumulate memory. + """ + data = b'(' # MARK + for _ in range(count): + data += b'\x8e' + struct.pack(' bytes: + """Generate random bytes of specified size.""" + return os.urandom(size) + + @staticmethod + def large_string_ascii(size: int) -> str: + """Generate ASCII string of specified size.""" + return 'x' * size + + @staticmethod + def large_string_multibyte(size: int) -> str: + """Generate multibyte UTF-8 string (3 bytes per char for €).""" + # Each € is 3 bytes in UTF-8 + return '€' * (size // 3) + + @staticmethod + def large_bytearray(size: int) -> bytearray: + """Generate bytearray of specified size.""" + return bytearray(os.urandom(size)) + + @staticmethod + def list_of_large_bytes(item_size: int, count: int) -> List[bytes]: + """Generate list containing multiple large bytes objects.""" + return [os.urandom(item_size) for _ in range(count)] + + @staticmethod + def dict_with_large_values(value_size: int, count: int) -> Dict[str, bytes]: + """Generate dict with large bytes values.""" + return { + f'key_{i}': os.urandom(value_size) + for i in range(count) + } + + @staticmethod + def nested_structure(size: int) -> Dict[str, Any]: + """Generate nested structure with various large objects.""" + chunk_size = size // 4 + return { + 'name': 'test_object', + 'data': { + 'bytes': os.urandom(chunk_size), + 'string': 's' * chunk_size, + 'bytearray': bytearray(b'b' * chunk_size), + }, + 'items': [os.urandom(chunk_size // 4) for _ in range(4)], + 'metadata': { + 'size': size, + 'type': 'nested', + }, + } + + @staticmethod + def tuple_of_large_objects(size: int) -> Tuple[bytes, str, bytearray]: + """Generate tuple with large objects (immutable, different pickle path).""" + chunk_size = size // 3 + return ( + os.urandom(chunk_size), + 'x' * chunk_size, + bytearray(b'y' * chunk_size), + ) + + +class PickleBenchmark: + """Benchmark pickle unpickling performance and memory usage.""" + + def __init__(self, obj: Any, protocol: int = 5, iterations: int = 3): + self.obj = obj + self.protocol = protocol + self.iterations = iterations + self.pickle_data = pickle.dumps(obj, protocol=protocol) + self.pickle_size = len(self.pickle_data) + + def benchmark_time(self) -> Dict[str, float]: + """Measure unpickling time over multiple iterations.""" + times = [] + + for _ in range(self.iterations): + start = perf_counter() + result = pickle.loads(self.pickle_data) + elapsed = perf_counter() - start + times.append(elapsed) + + # Verify correctness (first iteration only) + if len(times) == 1: + if result != self.obj: + raise ValueError("Unpickled object doesn't match original!") + + return { + 'mean': statistics.mean(times), + 'median': statistics.median(times), + 'stdev': statistics.stdev(times) if len(times) > 1 else 0.0, + 'min': min(times), + 'max': max(times), + } + + def benchmark_memory(self) -> int: + """Measure peak memory usage during unpickling.""" + tracemalloc.start() + + # Warmup + pickle.loads(self.pickle_data) + + # Actual measurement + gc.collect() + tracemalloc.reset_peak() + result = pickle.loads(self.pickle_data) + current, peak = tracemalloc.get_traced_memory() + + tracemalloc.stop() + + # Verify correctness + if result != self.obj: + raise ValueError("Unpickled object doesn't match original!") + + return peak + + def run_all(self) -> Dict[str, Any]: + """Run all benchmarks and return comprehensive results.""" + time_stats = self.benchmark_time() + peak_memory = self.benchmark_memory() + + return { + 'pickle_size_bytes': self.pickle_size, + 'pickle_size_mb': self.pickle_size / (1 << 20), + 'protocol': self.protocol, + 'time': time_stats, + 'memory_peak_bytes': peak_memory, + 'memory_peak_mb': peak_memory / (1 << 20), + 'iterations': self.iterations, + } + + +class AntagonisticBenchmark: + """Benchmark antagonistic/malicious pickles that demonstrate DoS protection. + + These pickles are designed to FAIL unpickling, but we measure peak memory + usage before the failure to demonstrate the memory protection. + """ + + def __init__(self, pickle_data: bytes, name: str): + self.pickle_data = pickle_data + self.name = name + + def measure_peak_memory(self, expect_success: bool = False) -> Dict[str, Any]: + """Measure peak memory when attempting to unpickle antagonistic data. + + Args: + expect_success: If True, test expects successful unpickling (e.g., sparse memo). + If False, test expects failure (e.g., truncated data). + """ + tracemalloc.start() + gc.collect() + tracemalloc.reset_peak() + + error_type = None + error_msg = None + succeeded = False + + try: + result = pickle.loads(self.pickle_data) + succeeded = True + if expect_success: + error_type = "Success (expected)" + else: + error_type = "WARNING: Expected failure but succeeded" + except (pickle.UnpicklingError, EOFError, ValueError, OverflowError) as e: + if expect_success: + error_type = f"UNEXPECTED FAILURE: {type(e).__name__}" + error_msg = str(e)[:100] + else: + # Expected failure for truncated data tests + error_type = type(e).__name__ + error_msg = str(e)[:100] + + current, peak = tracemalloc.get_traced_memory() + tracemalloc.stop() + + return { + 'test_name': self.name, + 'peak_memory_bytes': peak, + 'peak_memory_mb': peak / (1 << 20), + 'error_type': error_type, + 'error_msg': error_msg, + 'pickle_size_bytes': len(self.pickle_data), + 'expected_outcome': 'success' if expect_success else 'failure', + 'succeeded': succeeded, + } + + +class AntagonisticTestSuite: + """Manage a suite of antagonistic (DoS protection) tests.""" + + # Default sizes in MB to claim (will provide only 1KB actual data) + DEFAULT_ANTAGONISTIC_SIZES_MB = [10, 50, 100, 500, 1000, 5000] + + def __init__(self, claimed_sizes_mb: List[int]): + self.claimed_sizes_mb = claimed_sizes_mb + + def _run_truncated_test( + self, + test_type: str, + generator_func, + claimed_bytes: int, + claimed_mb: int, + size_key: str, + all_results: Dict[str, Dict[str, Any]] + ) -> None: + """Run a single truncated data test and store results. + + Args: + test_type: Type identifier (e.g., 'binbytes8', 'binunicode8') + generator_func: Function to generate malicious pickle data + claimed_bytes: Size claimed in the pickle (bytes) + claimed_mb: Size claimed in the pickle (MB) + size_key: Result key for this size (e.g., '10MB') + all_results: Dictionary to store results in + """ + test_name = f"{test_type}_{size_key}_claim" + data = generator_func(claimed_bytes) + bench = AntagonisticBenchmark(data, test_name) + result = bench.measure_peak_memory(expect_success=False) + result['claimed_mb'] = claimed_mb + all_results[size_key][test_name] = result + + def run_all_tests(self) -> Dict[str, Dict[str, Any]]: + """Run comprehensive antagonistic test suite.""" + all_results = {} + + for claimed_mb in self.claimed_sizes_mb: + claimed_bytes = claimed_mb << 20 + size_key = f"{claimed_mb}MB" + all_results[size_key] = {} + + # Run truncated data tests (expect failure) + self._run_truncated_test('binbytes8', AntagonisticGenerator.truncated_binbytes8, + claimed_bytes, claimed_mb, size_key, all_results) + self._run_truncated_test('binunicode8', AntagonisticGenerator.truncated_binunicode8, + claimed_bytes, claimed_mb, size_key, all_results) + self._run_truncated_test('bytearray8', AntagonisticGenerator.truncated_bytearray8, + claimed_bytes, claimed_mb, size_key, all_results) + self._run_truncated_test('frame', AntagonisticGenerator.truncated_frame, + claimed_bytes, claimed_mb, size_key, all_results) + + # Test 5: Sparse memo (expect success - dict-based memo works!) + all_results["Sparse Memo (Success Expected)"] = {} + for test_name, (index, baseline_note) in SPARSE_MEMO_TESTS.items(): + data = AntagonisticGenerator.sparse_memo_attack(index) + bench = AntagonisticBenchmark(data, test_name) + result = bench.measure_peak_memory(expect_success=True) + result['claimed_mb'] = "N/A" + result['baseline_note'] = f"Without PR: {baseline_note}" + all_results["Sparse Memo (Success Expected)"][test_name] = result + + # Test 6: Multi-claim attack (expect failure) + test_name = "multi_claim_10x100MB" + data = AntagonisticGenerator.multi_claim_attack(10, 100 << 20) + bench = AntagonisticBenchmark(data, test_name) + result = bench.measure_peak_memory(expect_success=False) + result['claimed_mb'] = 1000 # 10 * 100MB + all_results["Multi-Claim (Failure Expected)"] = {test_name: result} + + return all_results + + +class TestSuite: + """Manage a suite of benchmark tests.""" + + def __init__(self, sizes: List[int], protocol: int = 5, iterations: int = 3): + self.sizes = sizes + self.protocol = protocol + self.iterations = iterations + self.results = {} + + def run_test(self, name: str, obj: Any) -> Dict[str, Any]: + """Run benchmark for a single test object.""" + bench = PickleBenchmark(obj, self.protocol, self.iterations) + results = bench.run_all() + results['test_name'] = name + results['object_type'] = type(obj).__name__ + return results + + def run_all_tests(self) -> Dict[str, Dict[str, Any]]: + """Run comprehensive test suite across all sizes and types.""" + all_results = {} + + for size in self.sizes: + size_key = f"{size / (1 << 20):.2f}MB" + all_results[size_key] = {} + + # Test 1: Large bytes object (BINBYTES8) + test_name = f"bytes_{size_key}" + obj = DataGenerator.large_bytes(size) + all_results[size_key][test_name] = self.run_test(test_name, obj) + + # Test 2: Large ASCII string (BINUNICODE8) + test_name = f"string_ascii_{size_key}" + obj = DataGenerator.large_string_ascii(size) + all_results[size_key][test_name] = self.run_test(test_name, obj) + + # Test 3: Large multibyte UTF-8 string + if size >= 3: + test_name = f"string_utf8_{size_key}" + obj = DataGenerator.large_string_multibyte(size) + all_results[size_key][test_name] = self.run_test(test_name, obj) + + # Test 4: Large bytearray (BYTEARRAY8, protocol 5) + if self.protocol >= 5: + test_name = f"bytearray_{size_key}" + obj = DataGenerator.large_bytearray(size) + all_results[size_key][test_name] = self.run_test(test_name, obj) + + # Test 5: List of large objects (repeated chunking) + if size >= MIN_READ_BUF_SIZE * 2: + test_name = f"list_large_items_{size_key}" + item_size = size // 5 + obj = DataGenerator.list_of_large_bytes(item_size, 5) + all_results[size_key][test_name] = self.run_test(test_name, obj) + + # Test 6: Dict with large values + if size >= MIN_READ_BUF_SIZE * 2: + test_name = f"dict_large_values_{size_key}" + value_size = size // 3 + obj = DataGenerator.dict_with_large_values(value_size, 3) + all_results[size_key][test_name] = self.run_test(test_name, obj) + + # Test 7: Nested structure + if size >= MIN_READ_BUF_SIZE: + test_name = f"nested_{size_key}" + obj = DataGenerator.nested_structure(size) + all_results[size_key][test_name] = self.run_test(test_name, obj) + + # Test 8: Tuple (immutable) + if size >= 3: + test_name = f"tuple_{size_key}" + obj = DataGenerator.tuple_of_large_objects(size) + all_results[size_key][test_name] = self.run_test(test_name, obj) + + return all_results + + +class Comparator: + """Compare benchmark results between current and baseline interpreters.""" + + @staticmethod + def _extract_json_from_output(output: str) -> Dict[str, Dict[str, Any]]: + """Extract JSON data from subprocess output. + + Skips any print statements before the JSON output and parses the JSON. + + Args: + output: Raw stdout from subprocess + + Returns: + Parsed JSON as dictionary + + Raises: + SystemExit: If JSON cannot be found or parsed + """ + output_lines = output.strip().split('\n') + json_start = -1 + for i, line in enumerate(output_lines): + if line.strip().startswith('{'): + json_start = i + break + + if json_start == -1: + print("Error: Could not find JSON output from baseline", file=sys.stderr) + sys.exit(1) + + json_output = '\n'.join(output_lines[json_start:]) + try: + return json.loads(json_output) + except json.JSONDecodeError as e: + print(f"Error: Could not parse baseline JSON output: {e}", file=sys.stderr) + sys.exit(1) + + @staticmethod + def run_baseline_benchmark(baseline_python: str, args: argparse.Namespace) -> Dict[str, Dict[str, Any]]: + """Run the benchmark using the baseline Python interpreter.""" + # Build command to run this script with baseline Python + cmd = [ + baseline_python, + __file__, + '--format', 'json', + '--protocol', str(args.protocol), + '--iterations', str(args.iterations), + ] + + if args.sizes is not None: + cmd.extend(['--sizes'] + [str(s) for s in args.sizes]) + + if args.antagonistic: + cmd.append('--antagonistic') + + print(f"\nRunning baseline benchmark with: {baseline_python}") + print(f"Command: {' '.join(cmd)}\n") + + try: + result = subprocess.run( + cmd, + capture_output=True, + text=True, + timeout=BASELINE_BENCHMARK_TIMEOUT_SECONDS, + ) + + if result.returncode != 0: + print(f"Error running baseline benchmark:", file=sys.stderr) + print(result.stderr, file=sys.stderr) + sys.exit(1) + + # Extract and parse JSON from output + return Comparator._extract_json_from_output(result.stdout) + + except subprocess.TimeoutExpired: + print("Error: Baseline benchmark timed out", file=sys.stderr) + sys.exit(1) + + @staticmethod + def calculate_change(baseline_value: float, current_value: float) -> float: + """Calculate percentage change from baseline to current.""" + if baseline_value == 0: + return 0.0 + return ((current_value - baseline_value) / baseline_value) * 100 + + @staticmethod + def format_comparison( + current_results: Dict[str, Dict[str, Any]], + baseline_results: Dict[str, Dict[str, Any]] + ) -> str: + """Format comparison results as readable text.""" + lines = [] + lines.append("=" * 100) + lines.append("Pickle Unpickling Benchmark Comparison") + lines.append("=" * 100) + lines.append("") + lines.append("Legend: Current vs Baseline | % Change (+ is slower/more memory, - is faster/less memory)") + lines.append("") + + # Sort size keys numerically + for size_key in sorted(current_results.keys(), key=_extract_size_mb): + if size_key not in baseline_results: + continue + + lines.append(f"\n{size_key} Comparison") + lines.append("-" * 100) + + current_tests = current_results[size_key] + baseline_tests = baseline_results[size_key] + + for test_name in sorted(current_tests.keys()): + if test_name not in baseline_tests: + continue + + curr = current_tests[test_name] + base = baseline_tests[test_name] + + time_change = Comparator.calculate_change( + base['time']['mean'], curr['time']['mean'] + ) + mem_change = Comparator.calculate_change( + base['memory_peak_mb'], curr['memory_peak_mb'] + ) + + lines.append(f"\n {curr['test_name']}") + lines.append(f" Time: {curr['time']['mean']*1000:6.2f}ms vs {base['time']['mean']*1000:6.2f}ms | " + f"{time_change:+6.1f}%") + lines.append(f" Memory: {curr['memory_peak_mb']:6.2f}MB vs {base['memory_peak_mb']:6.2f}MB | " + f"{mem_change:+6.1f}%") + + lines.append("\n" + "=" * 100) + lines.append("\nSummary:") + + # Calculate overall statistics + time_changes = [] + mem_changes = [] + + for size_key in current_results.keys(): + if size_key not in baseline_results: + continue + for test_name in current_results[size_key].keys(): + if test_name not in baseline_results[size_key]: + continue + curr = current_results[size_key][test_name] + base = baseline_results[size_key][test_name] + + time_changes.append(Comparator.calculate_change( + base['time']['mean'], curr['time']['mean'] + )) + mem_changes.append(Comparator.calculate_change( + base['memory_peak_mb'], curr['memory_peak_mb'] + )) + + if time_changes: + lines.append(f" Time change: mean={statistics.mean(time_changes):+.1f}%, " + f"median={statistics.median(time_changes):+.1f}%") + if mem_changes: + lines.append(f" Memory change: mean={statistics.mean(mem_changes):+.1f}%, " + f"median={statistics.median(mem_changes):+.1f}%") + + lines.append("=" * 100) + return "\n".join(lines) + + @staticmethod + def format_antagonistic_comparison( + current_results: Dict[str, Dict[str, Any]], + baseline_results: Dict[str, Dict[str, Any]] + ) -> str: + """Format antagonistic benchmark comparison results.""" + lines = [] + lines.append("=" * 100) + lines.append("Antagonistic Pickle Benchmark Comparison (Memory DoS Protection)") + lines.append("=" * 100) + lines.append("") + lines.append("Legend: Current vs Baseline | Memory Change (- is better, shows memory saved)") + lines.append("") + lines.append("This compares TWO types of DoS protection:") + lines.append(" 1. Truncated data → Baseline allocates full claimed size, Current uses chunked reading") + lines.append(" 2. Sparse memo → Baseline uses huge arrays, Current uses dict-based memo") + lines.append("") + + # Track statistics + truncated_memory_changes = [] + sparse_memory_changes = [] + + # Sort size keys numerically + for size_key in sorted(current_results.keys(), key=_extract_size_mb): + if size_key not in baseline_results: + continue + + lines.append(f"\n{size_key} Comparison") + lines.append("-" * 100) + + current_tests = current_results[size_key] + baseline_tests = baseline_results[size_key] + + for test_name in sorted(current_tests.keys()): + if test_name not in baseline_tests: + continue + + curr = current_tests[test_name] + base = baseline_tests[test_name] + + curr_peak_mb = curr['peak_memory_mb'] + base_peak_mb = base['peak_memory_mb'] + expected_outcome = curr.get('expected_outcome', 'failure') + + mem_change = Comparator.calculate_change(base_peak_mb, curr_peak_mb) + mem_saved_mb = base_peak_mb - curr_peak_mb + + lines.append(f"\n {curr['test_name']}") + lines.append(f" Memory: {curr_peak_mb:6.2f}MB vs {base_peak_mb:6.2f}MB | " + f"{mem_change:+6.1f}% ({mem_saved_mb:+.2f}MB saved)") + + # Track based on test type + if expected_outcome == 'success': + sparse_memory_changes.append(mem_change) + if curr.get('baseline_note'): + lines.append(f" Note: {curr['baseline_note']}") + else: + truncated_memory_changes.append(mem_change) + claimed_mb = curr.get('claimed_mb', 'N/A') + if claimed_mb != 'N/A': + lines.append(f" Claimed: {claimed_mb:,}MB") + + # Show status + curr_status = curr.get('error_type', 'Unknown') + base_status = base.get('error_type', 'Unknown') + if curr_status != base_status: + lines.append(f" Status: {curr_status} (baseline: {base_status})") + else: + lines.append(f" Status: {curr_status}") + + lines.append("\n" + "=" * 100) + lines.append("\nSummary:") + lines.append("") + + if truncated_memory_changes: + lines.append(" Truncated Data Protection (chunked reading):") + lines.append(f" Mean memory change: {statistics.mean(truncated_memory_changes):+.1f}%") + lines.append(f" Median memory change: {statistics.median(truncated_memory_changes):+.1f}%") + avg_change = statistics.mean(truncated_memory_changes) + if avg_change < -50: + lines.append(f" Result: ✓ Dramatic memory reduction ({avg_change:.1f}%) - DoS protection working!") + elif avg_change < 0: + lines.append(f" Result: ✓ Memory reduced ({avg_change:.1f}%)") + else: + lines.append(f" Result: ⚠ Memory increased ({avg_change:.1f}%) - unexpected!") + lines.append("") + + if sparse_memory_changes: + lines.append(" Sparse Memo Protection (dict-based memo):") + lines.append(f" Mean memory change: {statistics.mean(sparse_memory_changes):+.1f}%") + lines.append(f" Median memory change: {statistics.median(sparse_memory_changes):+.1f}%") + avg_change = statistics.mean(sparse_memory_changes) + if avg_change < -50: + lines.append(f" Result: ✓ Dramatic memory reduction ({avg_change:.1f}%) - Dict optimization working!") + elif avg_change < 0: + lines.append(f" Result: ✓ Memory reduced ({avg_change:.1f}%)") + else: + lines.append(f" Result: ⚠ Memory increased ({avg_change:.1f}%) - unexpected!") + + lines.append("") + lines.append("=" * 100) + return "\n".join(lines) + + +class Reporter: + """Format and display benchmark results.""" + + @staticmethod + def format_text(results: Dict[str, Dict[str, Any]]) -> str: + """Format results as readable text.""" + lines = [] + lines.append("=" * 80) + lines.append("Pickle Unpickling Benchmark Results") + lines.append("=" * 80) + lines.append("") + + for size_key, tests in results.items(): + lines.append(f"\n{size_key} Test Results") + lines.append("-" * 80) + + for test_name, data in tests.items(): + lines.append(f"\n Test: {data['test_name']}") + lines.append(f" Type: {data['object_type']}") + lines.append(f" Pickle size: {data['pickle_size_mb']:.2f} MB") + lines.append(f" Time (mean): {data['time']['mean']*1000:.2f} ms") + lines.append(f" Time (stdev): {data['time']['stdev']*1000:.2f} ms") + lines.append(f" Peak memory: {data['memory_peak_mb']:.2f} MB") + lines.append(f" Protocol: {data['protocol']}") + + lines.append("\n" + "=" * 80) + return "\n".join(lines) + + @staticmethod + def format_markdown(results: Dict[str, Dict[str, Any]]) -> str: + """Format results as markdown table.""" + lines = [] + lines.append("# Pickle Unpickling Benchmark Results\n") + + for size_key, tests in results.items(): + lines.append(f"## {size_key}\n") + lines.append("| Test | Type | Pickle Size (MB) | Time (ms) | Stdev (ms) | Peak Memory (MB) |") + lines.append("|------|------|------------------|-----------|------------|------------------|") + + for test_name, data in tests.items(): + lines.append( + f"| {data['test_name']} | " + f"{data['object_type']} | " + f"{data['pickle_size_mb']:.2f} | " + f"{data['time']['mean']*1000:.2f} | " + f"{data['time']['stdev']*1000:.2f} | " + f"{data['memory_peak_mb']:.2f} |" + ) + lines.append("") + + return "\n".join(lines) + + @staticmethod + def format_json(results: Dict[str, Dict[str, Any]]) -> str: + """Format results as JSON.""" + import json + return json.dumps(results, indent=2) + + @staticmethod + def format_antagonistic(results: Dict[str, Dict[str, Any]]) -> str: + """Format antagonistic benchmark results.""" + lines = [] + lines.append("=" * 100) + lines.append("Antagonistic Pickle Benchmark (Memory DoS Protection Test)") + lines.append("=" * 100) + lines.append("") + lines.append("This benchmark tests TWO types of DoS protection:") + lines.append(" 1. Truncated data attacks → Expect FAILURE with minimal memory before failure") + lines.append(" 2. Sparse memo attacks → Expect SUCCESS with dict-based memo (vs huge array)") + lines.append("") + + # Sort size keys numerically + for size_key in sorted(results.keys(), key=_extract_size_mb): + tests = results[size_key] + + # Determine test type from first test + if tests: + first_test = next(iter(tests.values())) + expected_outcome = first_test.get('expected_outcome', 'failure') + claimed_mb = first_test.get('claimed_mb', 'N/A') + + # Header varies by test type + if "Sparse Memo" in size_key: + lines.append(f"\n{size_key}") + lines.append("-" * 100) + elif "Multi-Claim" in size_key: + lines.append(f"\n{size_key}") + lines.append("-" * 100) + elif claimed_mb != 'N/A': + lines.append(f"\n{size_key} Claimed (actual: 1KB) - Expect Failure") + lines.append("-" * 100) + else: + lines.append(f"\n{size_key}") + lines.append("-" * 100) + + for test_name, data in tests.items(): + peak_mb = data['peak_memory_mb'] + claimed = data.get('claimed_mb', 'N/A') + expected_outcome = data.get('expected_outcome', 'failure') + succeeded = data.get('succeeded', False) + baseline_note = data.get('baseline_note', '') + + lines.append(f" {data['test_name']}") + + # Format output based on test type + if expected_outcome == 'success': + # Sparse memo test - show success with dict + status_icon = "✓" if succeeded else "✗" + lines.append(f" Peak memory: {peak_mb:8.2f} MB {status_icon}") + lines.append(f" Status: {data['error_type']}") + if baseline_note: + lines.append(f" {baseline_note}") + else: + # Truncated data test - show savings before failure + if claimed != 'N/A': + saved_mb = claimed - peak_mb + savings_pct = (saved_mb / claimed * 100) if claimed > 0 else 0 + lines.append(f" Peak memory: {peak_mb:8.2f} MB (claimed: {claimed:,} MB, saved: {saved_mb:.2f} MB, {savings_pct:.1f}%)") + else: + lines.append(f" Peak memory: {peak_mb:8.2f} MB") + lines.append(f" Status: {data['error_type']}") + + lines.append("\n" + "=" * 100) + + # Calculate statistics by test type + truncated_claimed = 0 + truncated_peak = 0 + truncated_count = 0 + + sparse_peak_total = 0 + sparse_count = 0 + + for size_key, tests in results.items(): + for test_name, data in tests.items(): + expected_outcome = data.get('expected_outcome', 'failure') + + if expected_outcome == 'failure': + # Truncated data test + claimed = data.get('claimed_mb', 0) + if claimed != 'N/A' and claimed > 0: + truncated_claimed += claimed + truncated_peak += data['peak_memory_mb'] + truncated_count += 1 + else: + # Sparse memo test + sparse_peak_total += data['peak_memory_mb'] + sparse_count += 1 + + lines.append("\nSummary:") + lines.append("") + + if truncated_count > 0: + avg_claimed = truncated_claimed / truncated_count + avg_peak = truncated_peak / truncated_count + avg_saved = avg_claimed - avg_peak + avg_savings_pct = (avg_saved / avg_claimed * 100) if avg_claimed > 0 else 0 + + lines.append(" Truncated Data Protection (chunked reading):") + lines.append(f" Average claimed: {avg_claimed:,.1f} MB") + lines.append(f" Average peak: {avg_peak:,.2f} MB") + lines.append(f" Average saved: {avg_saved:,.2f} MB ({avg_savings_pct:.1f}% reduction)") + lines.append(f" Status: ✓ Fails fast with minimal memory") + lines.append("") + + if sparse_count > 0: + avg_sparse_peak = sparse_peak_total / sparse_count + lines.append(" Sparse Memo Protection (dict-based memo):") + lines.append(f" Average peak: {avg_sparse_peak:,.2f} MB") + lines.append(f" Status: ✓ Succeeds with dict (vs GB-sized arrays without PR)") + lines.append(f" Note: Compare with --baseline to see actual memory savings") + + lines.append("") + lines.append("=" * 100) + return "\n".join(lines) + + +def main(): + parser = argparse.ArgumentParser( + description="Benchmark pickle unpickling performance for large objects" + ) + parser.add_argument( + '--sizes', + type=int, + nargs='+', + default=None, + metavar='MiB', + help=f'Object sizes to test in MiB (default: {DEFAULT_SIZES_MIB})' + ) + parser.add_argument( + '--protocol', + type=int, + default=5, + choices=[0, 1, 2, 3, 4, 5], + help='Pickle protocol version (default: 5)' + ) + parser.add_argument( + '--iterations', + type=int, + default=3, + help='Number of benchmark iterations (default: 3)' + ) + parser.add_argument( + '--format', + choices=['text', 'markdown', 'json'], + default='text', + help='Output format (default: text)' + ) + parser.add_argument( + '--baseline', + type=str, + metavar='PYTHON', + help='Path to baseline Python interpreter for comparison (e.g., ../main-build/python)' + ) + parser.add_argument( + '--antagonistic', + action='store_true', + help='Run antagonistic/malicious pickle tests (DoS protection benchmark)' + ) + + args = parser.parse_args() + + # Handle antagonistic mode + if args.antagonistic: + # Antagonistic mode uses claimed sizes in MB, not actual data sizes + if args.sizes is None: + claimed_sizes_mb = AntagonisticTestSuite.DEFAULT_ANTAGONISTIC_SIZES_MB + else: + claimed_sizes_mb = args.sizes + + print(f"Running ANTAGONISTIC pickle benchmark (DoS protection test)...") + print(f"Claimed sizes: {claimed_sizes_mb} MiB (actual data: 1KB each)") + print(f"NOTE: These pickles will FAIL to unpickle (expected)") + print() + + # Run antagonistic benchmark suite + suite = AntagonisticTestSuite(claimed_sizes_mb) + results = suite.run_all_tests() + + # Format and display results + if args.baseline: + # Verify baseline Python exists + baseline_path = Path(args.baseline) + if not baseline_path.exists(): + print(f"Error: Baseline Python not found: {args.baseline}", file=sys.stderr) + return 1 + + # Run baseline benchmark + baseline_results = Comparator.run_baseline_benchmark(args.baseline, args) + + # Show comparison + comparison_output = Comparator.format_antagonistic_comparison(results, baseline_results) + print(comparison_output) + else: + # Format and display results + output = _format_output(results, args.format, is_antagonistic=True) + print(output) + + else: + # Normal mode: legitimate pickle benchmarks + # Convert sizes from MiB to bytes + if args.sizes is None: + sizes_bytes = DEFAULT_SIZES + else: + sizes_bytes = [size * (1 << 20) for size in args.sizes] + + print(f"Running pickle benchmark with protocol {args.protocol}...") + print(f"Test sizes: {[f'{s/(1<<20):.2f}MiB' for s in sizes_bytes]}") + print(f"Iterations per test: {args.iterations}") + print() + + # Run benchmark suite + suite = TestSuite(sizes_bytes, args.protocol, args.iterations) + results = suite.run_all_tests() + + # If baseline comparison requested, run baseline and compare + if args.baseline: + # Verify baseline Python exists + baseline_path = Path(args.baseline) + if not baseline_path.exists(): + print(f"Error: Baseline Python not found: {args.baseline}", file=sys.stderr) + return 1 + + # Run baseline benchmark + baseline_results = Comparator.run_baseline_benchmark(args.baseline, args) + + # Show comparison + comparison_output = Comparator.format_comparison(results, baseline_results) + print(comparison_output) + + else: + # Format and display results + output = _format_output(results, args.format, is_antagonistic=False) + print(output) + + return 0 + + +if __name__ == '__main__': + sys.exit(main())