diff --git a/Doc/library/socket.rst b/Doc/library/socket.rst index dccf78ef8c0128..3a931e25de91e5 100644 --- a/Doc/library/socket.rst +++ b/Doc/library/socket.rst @@ -445,6 +445,11 @@ Constants Added ``IP_PKTINFO``, ``IP_UNBLOCK_SOURCE``, ``IP_BLOCK_SOURCE``, ``IP_ADD_SOURCE_MEMBERSHIP``, ``IP_DROP_SOURCE_MEMBERSHIP``. + .. versionchanged:: 3.13 + Added ``SO_BINDTOIFINDEX``. On Linux this constant can be used in the + same way that ``SO_BINDTODEVICE`` is used, but with the index of a + network interface instead of its name. + .. data:: AF_CAN PF_CAN SOL_CAN_* diff --git a/Include/internal/pycore_time.h b/Include/internal/pycore_time.h index abef52e3f9f0fc..9692bbc89711a5 100644 --- a/Include/internal/pycore_time.h +++ b/Include/internal/pycore_time.h @@ -62,7 +62,6 @@ extern "C" { struct timeval; #endif -typedef PyTime_t PyTime_t; #define _SIZEOF_PYTIME_T 8 typedef enum { @@ -253,37 +252,28 @@ typedef struct { double resolution; } _Py_clock_info_t; -// Get the current time from the system clock. -// -// If the internal clock fails, silently ignore the error and return 0. -// On integer overflow, silently ignore the overflow and clamp the clock to -// [_PyTime_MIN; _PyTime_MAX]. +// Similar to PyTime_Time() but silently ignore the error and return 0 if the +// internal clock fails. // -// Use _PyTime_GetSystemClockWithInfo or the public PyTime_Time() to check +// Use _PyTime_TimeWithInfo() or the public PyTime_Time() to check // for failure. // Export for '_random' shared extension. -PyAPI_FUNC(PyTime_t) _PyTime_GetSystemClock(void); +PyAPI_FUNC(PyTime_t) _PyTime_TimeUnchecked(void); // Get the current time from the system clock. // On success, set *t and *info (if not NULL), and return 0. // On error, raise an exception and return -1. -extern int _PyTime_GetSystemClockWithInfo( +extern int _PyTime_TimeWithInfo( PyTime_t *t, _Py_clock_info_t *info); -// Get the time of a monotonic clock, i.e. a clock that cannot go backwards. -// The clock is not affected by system clock updates. The reference point of -// the returned value is undefined, so that only the difference between the -// results of consecutive calls is valid. +// Similar to PyTime_Monotonic() but silently ignore the error and return 0 if +// the internal clock fails. // -// If the internal clock fails, silently ignore the error and return 0. -// On integer overflow, silently ignore the overflow and clamp the clock to -// [_PyTime_MIN; _PyTime_MAX]. -// -// Use _PyTime_GetMonotonicClockWithInfo or the public PyTime_Monotonic() +// Use _PyTime_MonotonicWithInfo() or the public PyTime_Monotonic() // to check for failure. // Export for '_random' shared extension. -PyAPI_FUNC(PyTime_t) _PyTime_GetMonotonicClock(void); +PyAPI_FUNC(PyTime_t) _PyTime_MonotonicUnchecked(void); // Get the time of a monotonic clock, i.e. a clock that cannot go backwards. // The clock is not affected by system clock updates. The reference point of @@ -294,7 +284,7 @@ PyAPI_FUNC(PyTime_t) _PyTime_GetMonotonicClock(void); // // Return 0 on success, raise an exception and return -1 on error. // Export for '_testsinglephase' shared extension. -PyAPI_FUNC(int) _PyTime_GetMonotonicClockWithInfo( +PyAPI_FUNC(int) _PyTime_MonotonicWithInfo( PyTime_t *t, _Py_clock_info_t *info); @@ -309,17 +299,13 @@ PyAPI_FUNC(int) _PyTime_localtime(time_t t, struct tm *tm); // Export for '_datetime' shared extension. PyAPI_FUNC(int) _PyTime_gmtime(time_t t, struct tm *tm); -// Get the performance counter: clock with the highest available resolution to -// measure a short duration. +// Similar to PyTime_PerfCounter() but silently ignore the error and return 0 +// if the internal clock fails. // -// If the internal clock fails, silently ignore the error and return 0. -// On integer overflow, silently ignore the overflow and clamp the clock to -// [_PyTime_MIN; _PyTime_MAX]. -// -// Use _PyTime_GetPerfCounterWithInfo() or the public PyTime_PerfCounter -// to check for failure. +// Use _PyTime_PerfCounterWithInfo() or the public PyTime_PerfCounter() to +// check for failure. // Export for '_lsprof' shared extension. -PyAPI_FUNC(PyTime_t) _PyTime_GetPerfCounter(void); +PyAPI_FUNC(PyTime_t) _PyTime_PerfCounterUnchecked(void); // Get the performance counter: clock with the highest available resolution to @@ -328,7 +314,7 @@ PyAPI_FUNC(PyTime_t) _PyTime_GetPerfCounter(void); // Fill info (if set) with information of the function used to get the time. // // Return 0 on success, raise an exception and return -1 on error. -extern int _PyTime_GetPerfCounterWithInfo( +extern int _PyTime_PerfCounterWithInfo( PyTime_t *t, _Py_clock_info_t *info); @@ -341,12 +327,12 @@ extern int _PyTime_GetPerfCounterWithInfo( // --- _PyDeadline ----------------------------------------------------------- // Create a deadline. -// Pseudo code: _PyTime_GetMonotonicClock() + timeout. +// Pseudo code: _PyTime_MonotonicUnchecked() + timeout. // Export for '_ssl' shared extension. PyAPI_FUNC(PyTime_t) _PyDeadline_Init(PyTime_t timeout); // Get remaining time from a deadline. -// Pseudo code: deadline - _PyTime_GetMonotonicClock(). +// Pseudo code: deadline - _PyTime_MonotonicUnchecked(). // Export for '_ssl' shared extension. PyAPI_FUNC(PyTime_t) _PyDeadline_Get(PyTime_t deadline); diff --git a/Lib/test/list_tests.py b/Lib/test/list_tests.py index d9ab21d4941cdb..26118e14bb97e0 100644 --- a/Lib/test/list_tests.py +++ b/Lib/test/list_tests.py @@ -562,3 +562,8 @@ def test_exhausted_iterator(self): self.assertEqual(list(exhit), []) self.assertEqual(list(empit), [9]) self.assertEqual(a, self.type2test([1, 2, 3, 9])) + + # gh-115733: Crash when iterating over exhausted iterator + exhit = iter(self.type2test([1, 2, 3])) + for _ in exhit: + next(exhit, 1) diff --git a/Lib/test/test_capi/test_opt.py b/Lib/test/test_capi/test_opt.py index d53d6a057aa872..38c6fa4b47d0c9 100644 --- a/Lib/test/test_capi/test_opt.py +++ b/Lib/test/test_capi/test_opt.py @@ -32,16 +32,16 @@ def clear_executors(func): class TestOptimizerAPI(unittest.TestCase): - def test_get_counter_optimizer_dealloc(self): + def test_new_counter_optimizer_dealloc(self): # See gh-108727 def f(): - _testinternalcapi.get_counter_optimizer() + _testinternalcapi.new_counter_optimizer() f() def test_get_set_optimizer(self): old = _testinternalcapi.get_optimizer() - opt = _testinternalcapi.get_counter_optimizer() + opt = _testinternalcapi.new_counter_optimizer() try: _testinternalcapi.set_optimizer(opt) self.assertEqual(_testinternalcapi.get_optimizer(), opt) @@ -62,7 +62,7 @@ def loop(): loop = ns['loop'] for repeat in range(5): - opt = _testinternalcapi.get_counter_optimizer() + opt = _testinternalcapi.new_counter_optimizer() with temporary_optimizer(opt): self.assertEqual(opt.get_count(), 0) with clear_executors(loop): @@ -90,7 +90,7 @@ def long_loop(): """), ns, ns) long_loop = ns['long_loop'] - opt = _testinternalcapi.get_counter_optimizer() + opt = _testinternalcapi.new_counter_optimizer() with temporary_optimizer(opt): self.assertEqual(opt.get_count(), 0) long_loop() @@ -102,7 +102,7 @@ def testfunc(x): while i < x: i += 1 - opt = _testinternalcapi.get_counter_optimizer() + opt = _testinternalcapi.new_counter_optimizer() with temporary_optimizer(opt): testfunc(1000) code, replace_code = testfunc.__code__, testfunc.__code__.replace() @@ -123,11 +123,20 @@ def get_first_executor(func): return None +def iter_opnames(ex): + for item in ex: + yield item[0] + + +def get_opnames(ex): + return set(iter_opnames(ex)) + + class TestExecutorInvalidation(unittest.TestCase): def setUp(self): self.old = _testinternalcapi.get_optimizer() - self.opt = _testinternalcapi.get_counter_optimizer() + self.opt = _testinternalcapi.new_counter_optimizer() _testinternalcapi.set_optimizer(self.opt) def tearDown(self): @@ -176,7 +185,7 @@ def f(): pass """), ns, ns) f = ns['f'] - opt = _testinternalcapi.get_uop_optimizer() + opt = _testinternalcapi.new_uop_optimizer() with temporary_optimizer(opt): f() exe = get_first_executor(f) @@ -189,7 +198,7 @@ def test_sys__clear_internal_caches(self): def f(): for _ in range(1000): pass - opt = _testinternalcapi.get_uop_optimizer() + opt = _testinternalcapi.new_uop_optimizer() with temporary_optimizer(opt): f() exe = get_first_executor(f) @@ -208,13 +217,13 @@ def testfunc(x): while i < x: i += 1 - opt = _testinternalcapi.get_uop_optimizer() + opt = _testinternalcapi.new_uop_optimizer() with temporary_optimizer(opt): testfunc(1000) ex = get_first_executor(testfunc) self.assertIsNotNone(ex) - uops = {opname for opname, _, _ in ex} + uops = get_opnames(ex) self.assertIn("_SET_IP", uops) self.assertIn("_LOAD_FAST_0", uops) @@ -255,7 +264,7 @@ def many_vars(): """), ns, ns) many_vars = ns["many_vars"] - opt = _testinternalcapi.get_uop_optimizer() + opt = _testinternalcapi.new_uop_optimizer() with temporary_optimizer(opt): ex = get_first_executor(many_vars) self.assertIsNone(ex) @@ -263,7 +272,8 @@ def many_vars(): ex = get_first_executor(many_vars) self.assertIsNotNone(ex) - self.assertIn(("_LOAD_FAST", 259, 0), list(ex)) + self.assertTrue(any((opcode, oparg, operand) == ("_LOAD_FAST", 259, 0) + for opcode, oparg, _, operand in list(ex))) def test_unspecialized_unpack(self): # An example of an unspecialized opcode @@ -277,14 +287,14 @@ def testfunc(x): while i < x: i += 1 - opt = _testinternalcapi.get_uop_optimizer() + opt = _testinternalcapi.new_uop_optimizer() with temporary_optimizer(opt): testfunc(20) ex = get_first_executor(testfunc) self.assertIsNotNone(ex) - uops = {opname for opname, _, _ in ex} + uops = get_opnames(ex) self.assertIn("_UNPACK_SEQUENCE", uops) def test_pop_jump_if_false(self): @@ -293,13 +303,13 @@ def testfunc(n): while i < n: i += 1 - opt = _testinternalcapi.get_uop_optimizer() + opt = _testinternalcapi.new_uop_optimizer() with temporary_optimizer(opt): testfunc(20) ex = get_first_executor(testfunc) self.assertIsNotNone(ex) - uops = {opname for opname, _, _ in ex} + uops = get_opnames(ex) self.assertIn("_GUARD_IS_TRUE_POP", uops) def test_pop_jump_if_none(self): @@ -308,13 +318,13 @@ def testfunc(a): if x is None: x = 0 - opt = _testinternalcapi.get_uop_optimizer() + opt = _testinternalcapi.new_uop_optimizer() with temporary_optimizer(opt): testfunc(range(20)) ex = get_first_executor(testfunc) self.assertIsNotNone(ex) - uops = {opname for opname, _, _ in ex} + uops = get_opnames(ex) self.assertIn("_GUARD_IS_NOT_NONE_POP", uops) def test_pop_jump_if_not_none(self): @@ -324,13 +334,13 @@ def testfunc(a): if x is not None: x = 0 - opt = _testinternalcapi.get_uop_optimizer() + opt = _testinternalcapi.new_uop_optimizer() with temporary_optimizer(opt): testfunc(range(20)) ex = get_first_executor(testfunc) self.assertIsNotNone(ex) - uops = {opname for opname, _, _ in ex} + uops = get_opnames(ex) self.assertIn("_GUARD_IS_NONE_POP", uops) def test_pop_jump_if_true(self): @@ -339,13 +349,13 @@ def testfunc(n): while not i >= n: i += 1 - opt = _testinternalcapi.get_uop_optimizer() + opt = _testinternalcapi.new_uop_optimizer() with temporary_optimizer(opt): testfunc(20) ex = get_first_executor(testfunc) self.assertIsNotNone(ex) - uops = {opname for opname, _, _ in ex} + uops = get_opnames(ex) self.assertIn("_GUARD_IS_FALSE_POP", uops) def test_jump_backward(self): @@ -354,13 +364,13 @@ def testfunc(n): while i < n: i += 1 - opt = _testinternalcapi.get_uop_optimizer() + opt = _testinternalcapi.new_uop_optimizer() with temporary_optimizer(opt): testfunc(20) ex = get_first_executor(testfunc) self.assertIsNotNone(ex) - uops = {opname for opname, _, _ in ex} + uops = get_opnames(ex) self.assertIn("_JUMP_TO_TOP", uops) def test_jump_forward(self): @@ -374,13 +384,13 @@ def testfunc(n): a += 1 return a - opt = _testinternalcapi.get_uop_optimizer() + opt = _testinternalcapi.new_uop_optimizer() with temporary_optimizer(opt): testfunc(20) ex = get_first_executor(testfunc) self.assertIsNotNone(ex) - uops = {opname for opname, _, _ in ex} + uops = get_opnames(ex) # Since there is no JUMP_FORWARD instruction, # look for indirect evidence: the += operator self.assertIn("_BINARY_OP_ADD_INT", uops) @@ -392,7 +402,7 @@ def testfunc(n): total += i return total - opt = _testinternalcapi.get_uop_optimizer() + opt = _testinternalcapi.new_uop_optimizer() with temporary_optimizer(opt): total = testfunc(20) self.assertEqual(total, 190) @@ -401,7 +411,7 @@ def testfunc(n): self.assertIsNotNone(ex) # for i, (opname, oparg) in enumerate(ex): # print(f"{i:4d}: {opname:<20s} {oparg:3d}") - uops = {opname for opname, _, _ in ex} + uops = get_opnames(ex) self.assertIn("_GUARD_NOT_EXHAUSTED_RANGE", uops) # Verification that the jump goes past END_FOR # is done by manual inspection of the output @@ -413,7 +423,7 @@ def testfunc(a): total += i return total - opt = _testinternalcapi.get_uop_optimizer() + opt = _testinternalcapi.new_uop_optimizer() with temporary_optimizer(opt): a = list(range(20)) total = testfunc(a) @@ -423,7 +433,7 @@ def testfunc(a): self.assertIsNotNone(ex) # for i, (opname, oparg) in enumerate(ex): # print(f"{i:4d}: {opname:<20s} {oparg:3d}") - uops = {opname for opname, _, _ in ex} + uops = get_opnames(ex) self.assertIn("_GUARD_NOT_EXHAUSTED_LIST", uops) # Verification that the jump goes past END_FOR # is done by manual inspection of the output @@ -435,7 +445,7 @@ def testfunc(a): total += i return total - opt = _testinternalcapi.get_uop_optimizer() + opt = _testinternalcapi.new_uop_optimizer() with temporary_optimizer(opt): a = tuple(range(20)) total = testfunc(a) @@ -445,7 +455,7 @@ def testfunc(a): self.assertIsNotNone(ex) # for i, (opname, oparg) in enumerate(ex): # print(f"{i:4d}: {opname:<20s} {oparg:3d}") - uops = {opname for opname, _, _ in ex} + uops = get_opnames(ex) self.assertIn("_GUARD_NOT_EXHAUSTED_TUPLE", uops) # Verification that the jump goes past END_FOR # is done by manual inspection of the output @@ -455,7 +465,7 @@ def testfunc(it): for x in it: pass - opt = _testinternalcapi.get_uop_optimizer() + opt = _testinternalcapi.new_uop_optimizer() with temporary_optimizer(opt): a = [1, 2, 3] it = iter(a) @@ -471,13 +481,13 @@ def dummy(x): for i in range(n): dummy(i) - opt = _testinternalcapi.get_uop_optimizer() + opt = _testinternalcapi.new_uop_optimizer() with temporary_optimizer(opt): testfunc(20) ex = get_first_executor(testfunc) self.assertIsNotNone(ex) - uops = {opname for opname, _, _ in ex} + uops = get_opnames(ex) self.assertIn("_PUSH_FRAME", uops) self.assertIn("_BINARY_OP_ADD_INT", uops) @@ -489,13 +499,13 @@ def testfunc(n): else: i = 1 - opt = _testinternalcapi.get_uop_optimizer() + opt = _testinternalcapi.new_uop_optimizer() with temporary_optimizer(opt): testfunc(20) ex = get_first_executor(testfunc) self.assertIsNotNone(ex) - uops = {opname for opname, _, _ in ex} + uops = get_opnames(ex) self.assertIn("_GUARD_IS_FALSE_POP", uops) def test_for_iter_tier_two(self): @@ -517,7 +527,7 @@ def testfunc(n, m): x += 1000*i + j return x - opt = _testinternalcapi.get_uop_optimizer() + opt = _testinternalcapi.new_uop_optimizer() with temporary_optimizer(opt): x = testfunc(10, 10) @@ -525,7 +535,7 @@ def testfunc(n, m): ex = get_first_executor(testfunc) self.assertIsNotNone(ex) - uops = {opname for opname, _, _ in ex} + uops = get_opnames(ex) self.assertIn("_FOR_ITER_TIER_TWO", uops) def test_confidence_score(self): @@ -546,14 +556,14 @@ def testfunc(n): bits += 1 return bits - opt = _testinternalcapi.get_uop_optimizer() + opt = _testinternalcapi.new_uop_optimizer() with temporary_optimizer(opt): x = testfunc(20) self.assertEqual(x, 40) ex = get_first_executor(testfunc) self.assertIsNotNone(ex) - ops = [opname for opname, _, _ in ex] + ops = list(iter_opnames(ex)) #Since branch is 50/50 the trace could go either way. count = ops.count("_GUARD_IS_TRUE_POP") + ops.count("_GUARD_IS_FALSE_POP") self.assertLessEqual(count, 2) @@ -562,7 +572,7 @@ class TestUopsOptimization(unittest.TestCase): def _run_with_optimizer(self, testfunc, arg): res = None - opt = _testinternalcapi.get_uop_optimizer() + opt = _testinternalcapi.new_uop_optimizer() with temporary_optimizer(opt): res = testfunc(arg) @@ -582,8 +592,8 @@ def testfunc(loops): res, ex = self._run_with_optimizer(testfunc, 32) self.assertIsNotNone(ex) self.assertEqual(res, 63) - binop_count = [opname for opname, _, _ in ex if opname == "_BINARY_OP_ADD_INT"] - guard_both_int_count = [opname for opname, _, _ in ex if opname == "_GUARD_BOTH_INT"] + binop_count = [opname for opname in iter_opnames(ex) if opname == "_BINARY_OP_ADD_INT"] + guard_both_int_count = [opname for opname in iter_opnames(ex) if opname == "_GUARD_BOTH_INT"] self.assertGreaterEqual(len(binop_count), 3) self.assertLessEqual(len(guard_both_int_count), 1) @@ -598,7 +608,7 @@ def testfunc(loops): num += 1 return a - opt = _testinternalcapi.get_uop_optimizer() + opt = _testinternalcapi.new_uop_optimizer() res = None with temporary_optimizer(opt): res = testfunc(32) @@ -606,8 +616,8 @@ def testfunc(loops): ex = get_first_executor(testfunc) self.assertIsNotNone(ex) self.assertEqual(res, 124) - binop_count = [opname for opname, _, _ in ex if opname == "_BINARY_OP_ADD_INT"] - guard_both_int_count = [opname for opname, _, _ in ex if opname == "_GUARD_BOTH_INT"] + binop_count = [opname for opname in iter_opnames(ex) if opname == "_BINARY_OP_ADD_INT"] + guard_both_int_count = [opname for opname in iter_opnames(ex) if opname == "_GUARD_BOTH_INT"] self.assertGreaterEqual(len(binop_count), 3) self.assertLessEqual(len(guard_both_int_count), 1) @@ -622,7 +632,7 @@ def testfunc(loops): num += 1 return x - opt = _testinternalcapi.get_uop_optimizer() + opt = _testinternalcapi.new_uop_optimizer() res = None with temporary_optimizer(opt): res = testfunc(32) @@ -630,8 +640,8 @@ def testfunc(loops): ex = get_first_executor(testfunc) self.assertIsNotNone(ex) self.assertEqual(res, 124) - binop_count = [opname for opname, _, _ in ex if opname == "_BINARY_OP_ADD_INT"] - guard_both_int_count = [opname for opname, _, _ in ex if opname == "_GUARD_BOTH_INT"] + binop_count = [opname for opname in iter_opnames(ex) if opname == "_BINARY_OP_ADD_INT"] + guard_both_int_count = [opname for opname in iter_opnames(ex) if opname == "_GUARD_BOTH_INT"] self.assertGreaterEqual(len(binop_count), 3) self.assertLessEqual(len(guard_both_int_count), 1) @@ -648,7 +658,7 @@ def testfunc(loops): res, ex = self._run_with_optimizer(testfunc, 64) self.assertIsNotNone(ex) - binop_count = [opname for opname, _, _ in ex if opname == "_BINARY_OP_ADD_INT"] + binop_count = [opname for opname in iter_opnames(ex) if opname == "_BINARY_OP_ADD_INT"] self.assertGreaterEqual(len(binop_count), 3) def test_call_py_exact_args(self): @@ -660,7 +670,7 @@ def dummy(x): res, ex = self._run_with_optimizer(testfunc, 32) self.assertIsNotNone(ex) - uops = {opname for opname, _, _ in ex} + uops = get_opnames(ex) self.assertIn("_PUSH_FRAME", uops) self.assertIn("_BINARY_OP_ADD_INT", uops) self.assertNotIn("_CHECK_PEP_523", uops) @@ -675,7 +685,7 @@ def testfunc(n): res, ex = self._run_with_optimizer(testfunc, 32) self.assertEqual(res, 62) self.assertIsNotNone(ex) - uops = {opname for opname, _, _ in ex} + uops = get_opnames(ex) self.assertNotIn("_GUARD_BOTH_INT", uops) def test_int_value_numbering(self): @@ -693,9 +703,9 @@ def testfunc(n): res, ex = self._run_with_optimizer(testfunc, 32) self.assertEqual(res, 4) self.assertIsNotNone(ex) - uops = {opname for opname, _, _ in ex} + uops = get_opnames(ex) self.assertIn("_GUARD_BOTH_INT", uops) - guard_count = [opname for opname, _, _ in ex if opname == "_GUARD_BOTH_INT"] + guard_count = [opname for opname in iter_opnames(ex) if opname == "_GUARD_BOTH_INT"] self.assertEqual(len(guard_count), 1) def test_comprehension(self): @@ -706,7 +716,7 @@ def testfunc(n): res, ex = self._run_with_optimizer(testfunc, 32) self.assertEqual(res, list(range(32))) self.assertIsNotNone(ex) - uops = {opname for opname, _, _ in ex} + uops = get_opnames(ex) self.assertNotIn("_BINARY_OP_ADD_INT", uops) def test_call_py_exact_args_disappearing(self): @@ -717,7 +727,7 @@ def testfunc(n): for i in range(n): dummy(i) - opt = _testinternalcapi.get_uop_optimizer() + opt = _testinternalcapi.new_uop_optimizer() # Trigger specialization testfunc(8) with temporary_optimizer(opt): @@ -752,18 +762,21 @@ def get_first_executor(func): pass return None + def get_opnames(ex): + return {item[0] for item in ex} + def testfunc(n): for i in range(n): x = range(i) return x - opt = _testinternalcapi.get_uop_optimizer() + opt = _testinternalcapi.new_uop_optimizer() _testinternalcapi.set_optimizer(opt) testfunc(64) ex = get_first_executor(testfunc) assert ex is not None - uops = {opname for opname, _, _ in ex} + uops = get_opnames(ex) assert "_LOAD_GLOBAL_BUILTINS" not in uops assert "_LOAD_CONST_INLINE_BORROW_WITH_NULL" in uops """)) @@ -779,8 +792,8 @@ def testfunc(n): res, ex = self._run_with_optimizer(testfunc, 32) self.assertAlmostEqual(res, 4.2) self.assertIsNotNone(ex) - uops = {opname for opname, _, _ in ex} - guard_both_float_count = [opname for opname, _, _ in ex if opname == "_GUARD_BOTH_FLOAT"] + uops = get_opnames(ex) + guard_both_float_count = [opname for opname in iter_opnames(ex) if opname == "_GUARD_BOTH_FLOAT"] self.assertLessEqual(len(guard_both_float_count), 1) # TODO gh-115506: this assertion may change after propagating constants. # We'll also need to verify that propagation actually occurs. @@ -796,8 +809,8 @@ def testfunc(n): res, ex = self._run_with_optimizer(testfunc, 32) self.assertAlmostEqual(res, -2.2) self.assertIsNotNone(ex) - uops = {opname for opname, _, _ in ex} - guard_both_float_count = [opname for opname, _, _ in ex if opname == "_GUARD_BOTH_FLOAT"] + uops = get_opnames(ex) + guard_both_float_count = [opname for opname in iter_opnames(ex) if opname == "_GUARD_BOTH_FLOAT"] self.assertLessEqual(len(guard_both_float_count), 1) # TODO gh-115506: this assertion may change after propagating constants. # We'll also need to verify that propagation actually occurs. @@ -813,8 +826,8 @@ def testfunc(n): res, ex = self._run_with_optimizer(testfunc, 32) self.assertAlmostEqual(res, 2 ** 32) self.assertIsNotNone(ex) - uops = {opname for opname, _, _ in ex} - guard_both_float_count = [opname for opname, _, _ in ex if opname == "_GUARD_BOTH_FLOAT"] + uops = get_opnames(ex) + guard_both_float_count = [opname for opname in iter_opnames(ex) if opname == "_GUARD_BOTH_FLOAT"] self.assertLessEqual(len(guard_both_float_count), 1) # TODO gh-115506: this assertion may change after propagating constants. # We'll also need to verify that propagation actually occurs. @@ -833,8 +846,8 @@ def testfunc(n): res, ex = self._run_with_optimizer(testfunc, 32) self.assertTrue(res) self.assertIsNotNone(ex) - uops = {opname for opname, _, _ in ex} - guard_both_float_count = [opname for opname, _, _ in ex if opname == "_GUARD_BOTH_FLOAT"] + uops = get_opnames(ex) + guard_both_float_count = [opname for opname in iter_opnames(ex) if opname == "_GUARD_BOTH_FLOAT"] self.assertLessEqual(len(guard_both_float_count), 1) self.assertIn("_COMPARE_OP_FLOAT", uops) @@ -851,8 +864,8 @@ def testfunc(n): res, ex = self._run_with_optimizer(testfunc, 32) self.assertTrue(res) self.assertIsNotNone(ex) - uops = {opname for opname, _, _ in ex} - guard_both_float_count = [opname for opname, _, _ in ex if opname == "_GUARD_BOTH_INT"] + uops = get_opnames(ex) + guard_both_float_count = [opname for opname in iter_opnames(ex) if opname == "_GUARD_BOTH_INT"] self.assertLessEqual(len(guard_both_float_count), 1) self.assertIn("_COMPARE_OP_INT", uops) @@ -869,8 +882,8 @@ def testfunc(n): res, ex = self._run_with_optimizer(testfunc, 32) self.assertTrue(res) self.assertIsNotNone(ex) - uops = {opname for opname, _, _ in ex} - guard_both_float_count = [opname for opname, _, _ in ex if opname == "_GUARD_BOTH_UNICODE"] + uops = get_opnames(ex) + guard_both_float_count = [opname for opname in iter_opnames(ex) if opname == "_GUARD_BOTH_UNICODE"] self.assertLessEqual(len(guard_both_float_count), 1) self.assertIn("_COMPARE_OP_STR", uops) diff --git a/Lib/test/test_monitoring.py b/Lib/test/test_monitoring.py index 60b6326bfbad5e..58aa4bca7534b1 100644 --- a/Lib/test/test_monitoring.py +++ b/Lib/test/test_monitoring.py @@ -1799,7 +1799,7 @@ class TestOptimizer(MonitoringTestBase, unittest.TestCase): def setUp(self): import _testinternalcapi self.old_opt = _testinternalcapi.get_optimizer() - opt = _testinternalcapi.get_counter_optimizer() + opt = _testinternalcapi.new_counter_optimizer() _testinternalcapi.set_optimizer(opt) super(TestOptimizer, self).setUp() diff --git a/Misc/NEWS.d/next/Core and Builtins/2024-02-20-18-49-02.gh-issue-115733.51Zb85.rst b/Misc/NEWS.d/next/Core and Builtins/2024-02-20-18-49-02.gh-issue-115733.51Zb85.rst new file mode 100644 index 00000000000000..5cbb292065b5da --- /dev/null +++ b/Misc/NEWS.d/next/Core and Builtins/2024-02-20-18-49-02.gh-issue-115733.51Zb85.rst @@ -0,0 +1 @@ +Fix crash when calling ``next()`` on exhausted list iterators. diff --git a/Misc/NEWS.d/next/Library/2023-05-01-22-28-57.gh-issue-104061.vxfBXf.rst b/Misc/NEWS.d/next/Library/2023-05-01-22-28-57.gh-issue-104061.vxfBXf.rst new file mode 100644 index 00000000000000..e15a811f904352 --- /dev/null +++ b/Misc/NEWS.d/next/Library/2023-05-01-22-28-57.gh-issue-104061.vxfBXf.rst @@ -0,0 +1 @@ +Add :data:`socket.SO_BINDTOIFINDEX` constant. diff --git a/Modules/_datetimemodule.c b/Modules/_datetimemodule.c index 3ae95a8c9a87a7..9fd59c34de6cef 100644 --- a/Modules/_datetimemodule.c +++ b/Modules/_datetimemodule.c @@ -5133,7 +5133,7 @@ datetime_from_timestamp(PyObject *cls, TM_FUNC f, PyObject *timestamp, static PyObject * datetime_best_possible(PyObject *cls, TM_FUNC f, PyObject *tzinfo) { - PyTime_t ts = _PyTime_GetSystemClock(); + PyTime_t ts = _PyTime_TimeUnchecked(); time_t secs; int us; diff --git a/Modules/_lsprof.c b/Modules/_lsprof.c index 928baf034f62a3..29a80c70c0db6a 100644 --- a/Modules/_lsprof.c +++ b/Modules/_lsprof.c @@ -121,7 +121,7 @@ call_timer(ProfilerObject *pObj) return CallExternalTimer(pObj); } else { - return _PyTime_GetPerfCounter(); + return _PyTime_PerfCounterUnchecked(); } } diff --git a/Modules/_randommodule.c b/Modules/_randommodule.c index 62f1acaf887296..920645b453536a 100644 --- a/Modules/_randommodule.c +++ b/Modules/_randommodule.c @@ -75,7 +75,7 @@ #include "pycore_modsupport.h" // _PyArg_NoKeywords() #include "pycore_moduleobject.h" // _PyModule_GetState() #include "pycore_pylifecycle.h" // _PyOS_URandomNonblock() -#include "pycore_time.h" // _PyTime_GetSystemClock() +#include "pycore_time.h" // _PyTime_TimeUnchecked() #ifdef HAVE_UNISTD_H # include // getpid() @@ -266,7 +266,7 @@ random_seed_time_pid(RandomObject *self) PyTime_t now; uint32_t key[5]; - now = _PyTime_GetSystemClock(); + now = _PyTime_TimeUnchecked(); key[0] = (uint32_t)(now & 0xffffffffU); key[1] = (uint32_t)(now >> 32); @@ -278,7 +278,7 @@ random_seed_time_pid(RandomObject *self) key[2] = 0; #endif - now = _PyTime_GetMonotonicClock(); + now = _PyTime_MonotonicUnchecked(); key[3] = (uint32_t)(now & 0xffffffffU); key[4] = (uint32_t)(now >> 32); diff --git a/Modules/_testinternalcapi.c b/Modules/_testinternalcapi.c index bcc431a27001f2..0d23b1899f22e4 100644 --- a/Modules/_testinternalcapi.c +++ b/Modules/_testinternalcapi.c @@ -960,13 +960,13 @@ iframe_getlasti(PyObject *self, PyObject *frame) } static PyObject * -get_counter_optimizer(PyObject *self, PyObject *arg) +new_counter_optimizer(PyObject *self, PyObject *arg) { return PyUnstable_Optimizer_NewCounter(); } static PyObject * -get_uop_optimizer(PyObject *self, PyObject *arg) +new_uop_optimizer(PyObject *self, PyObject *arg) { return PyUnstable_Optimizer_NewUOpOptimizer(); } @@ -1711,8 +1711,8 @@ static PyMethodDef module_functions[] = { {"get_optimizer", get_optimizer, METH_NOARGS, NULL}, {"set_optimizer", set_optimizer, METH_O, NULL}, {"get_executor", _PyCFunction_CAST(get_executor), METH_FASTCALL, NULL}, - {"get_counter_optimizer", get_counter_optimizer, METH_NOARGS, NULL}, - {"get_uop_optimizer", get_uop_optimizer, METH_NOARGS, NULL}, + {"new_counter_optimizer", new_counter_optimizer, METH_NOARGS, NULL}, + {"new_uop_optimizer", new_uop_optimizer, METH_NOARGS, NULL}, {"add_executor_dependency", add_executor_dependency, METH_VARARGS, NULL}, {"invalidate_executors", invalidate_executors, METH_O, NULL}, {"pending_threadfunc", _PyCFunction_CAST(pending_threadfunc), diff --git a/Modules/_testinternalcapi/test_lock.c b/Modules/_testinternalcapi/test_lock.c index 724bbd0e8f0c9d..1c5048170e9f2e 100644 --- a/Modules/_testinternalcapi/test_lock.c +++ b/Modules/_testinternalcapi/test_lock.c @@ -2,7 +2,7 @@ #include "parts.h" #include "pycore_lock.h" -#include "pycore_time.h" // _PyTime_GetMonotonicClock() +#include "pycore_time.h" // _PyTime_MonotonicUnchecked() #include "clinic/test_lock.c.h" @@ -290,7 +290,7 @@ _testinternalcapi_benchmark_locks_impl(PyObject *module, goto exit; } - PyTime_t start = _PyTime_GetMonotonicClock(); + PyTime_t start = _PyTime_MonotonicUnchecked(); for (Py_ssize_t i = 0; i < num_threads; i++) { thread_data[i].bench_data = &bench_data; @@ -307,7 +307,7 @@ _testinternalcapi_benchmark_locks_impl(PyObject *module, } Py_ssize_t total_iters = bench_data.total_iters; - PyTime_t end = _PyTime_GetMonotonicClock(); + PyTime_t end = _PyTime_MonotonicUnchecked(); // Return the total number of acquisitions and the number of acquisitions // for each thread. diff --git a/Modules/_testsinglephase.c b/Modules/_testsinglephase.c index dccac2852a567a..58d22e2d5dbe56 100644 --- a/Modules/_testsinglephase.c +++ b/Modules/_testsinglephase.c @@ -71,13 +71,13 @@ _set_initialized(PyTime_t *initialized) { /* We go strictly monotonic to ensure each time is unique. */ PyTime_t prev; - if (_PyTime_GetMonotonicClockWithInfo(&prev, NULL) != 0) { + if (_PyTime_MonotonicWithInfo(&prev, NULL) != 0) { return -1; } /* We do a busy sleep since the interval should be super short. */ PyTime_t t; do { - if (_PyTime_GetMonotonicClockWithInfo(&t, NULL) != 0) { + if (_PyTime_MonotonicWithInfo(&t, NULL) != 0) { return -1; } } while (t == prev); diff --git a/Modules/socketmodule.c b/Modules/socketmodule.c index 298c0e29d0d9b8..836cf6c05b3196 100644 --- a/Modules/socketmodule.c +++ b/Modules/socketmodule.c @@ -7927,6 +7927,9 @@ socket_exec(PyObject *m) #ifdef SO_BINDTODEVICE ADD_INT_MACRO(m, SO_BINDTODEVICE); #endif +#ifdef SO_BINDTOIFINDEX + ADD_INT_MACRO(m, SO_BINDTOIFINDEX); +#endif #ifdef SO_PRIORITY ADD_INT_MACRO(m, SO_PRIORITY); #endif diff --git a/Modules/timemodule.c b/Modules/timemodule.c index 73b9fc067af6ff..28dba903d2b9e8 100644 --- a/Modules/timemodule.c +++ b/Modules/timemodule.c @@ -106,8 +106,8 @@ _PyFloat_FromPyTime(PyTime_t t) static int get_system_time(PyTime_t *t) { - // Avoid _PyTime_GetSystemClock() which silently ignores errors. - return _PyTime_GetSystemClockWithInfo(t, NULL); + // Avoid _PyTime_TimeUnchecked() which silently ignores errors. + return _PyTime_TimeWithInfo(t, NULL); } @@ -1159,8 +1159,8 @@ should not be relied on."); static int get_monotonic(PyTime_t *t) { - // Avoid _PyTime_GetMonotonicClock() which silently ignores errors. - return _PyTime_GetMonotonicClockWithInfo(t, NULL); + // Avoid _PyTime_MonotonicUnchecked() which silently ignores errors. + return _PyTime_MonotonicWithInfo(t, NULL); } @@ -1198,8 +1198,8 @@ Monotonic clock, cannot go backward, as nanoseconds."); static int get_perf_counter(PyTime_t *t) { - // Avoid _PyTime_GetPerfCounter() which silently ignores errors. - return _PyTime_GetPerfCounterWithInfo(t, NULL); + // Avoid _PyTime_PerfCounterUnchecked() which silently ignores errors. + return _PyTime_PerfCounterWithInfo(t, NULL); } @@ -1615,17 +1615,17 @@ time_get_clock_info(PyObject *module, PyObject *args) #endif if (strcmp(name, "time") == 0) { - if (_PyTime_GetSystemClockWithInfo(&t, &info) < 0) { + if (_PyTime_TimeWithInfo(&t, &info) < 0) { return NULL; } } else if (strcmp(name, "monotonic") == 0) { - if (_PyTime_GetMonotonicClockWithInfo(&t, &info) < 0) { + if (_PyTime_MonotonicWithInfo(&t, &info) < 0) { return NULL; } } else if (strcmp(name, "perf_counter") == 0) { - if (_PyTime_GetPerfCounterWithInfo(&t, &info) < 0) { + if (_PyTime_PerfCounterWithInfo(&t, &info) < 0) { return NULL; } } diff --git a/Objects/listobject.c b/Objects/listobject.c index eb466260318ec1..b07970298b8a00 100644 --- a/Objects/listobject.c +++ b/Objects/listobject.c @@ -3537,13 +3537,13 @@ listreviter_next(PyObject *self) { listreviterobject *it = (listreviterobject *)self; assert(it != NULL); - PyListObject *seq = it->it_seq; - assert(PyList_Check(seq)); - Py_ssize_t index = LOAD_SSIZE(it->it_index); if (index < 0) { return NULL; } + + PyListObject *seq = it->it_seq; + assert(PyList_Check(seq)); PyObject *item = list_get_item_ref(seq, index); if (item != NULL) { STORE_SSIZE(it->it_index, index - 1); diff --git a/Python/bytecodes.c b/Python/bytecodes.c index 9d790a9d3e6577..5835b80582b3bc 100644 --- a/Python/bytecodes.c +++ b/Python/bytecodes.c @@ -2612,7 +2612,7 @@ dummy_func( assert(Py_TYPE(iter) == &PyListIter_Type); STAT_INC(FOR_ITER, hit); PyListObject *seq = it->it_seq; - if ((size_t)it->it_index >= (size_t)PyList_GET_SIZE(seq)) { + if (seq == NULL || (size_t)it->it_index >= (size_t)PyList_GET_SIZE(seq)) { it->it_index = -1; #ifndef Py_GIL_DISABLED if (seq != NULL) { @@ -2633,6 +2633,7 @@ dummy_func( _PyListIterObject *it = (_PyListIterObject *)iter; assert(Py_TYPE(iter) == &PyListIter_Type); PyListObject *seq = it->it_seq; + DEOPT_IF(seq == NULL); DEOPT_IF((size_t)it->it_index >= (size_t)PyList_GET_SIZE(seq)); } diff --git a/Python/ceval.c b/Python/ceval.c index b7a5d629c9466b..06c136aeb252c9 100644 --- a/Python/ceval.c +++ b/Python/ceval.c @@ -649,7 +649,10 @@ static const _Py_CODEUNIT _Py_INTERPRETER_TRAMPOLINE_INSTRUCTIONS[] = { extern const struct _PyCode_DEF(8) _Py_InitCleanup; -extern const char *_PyUOpName(int index); +#ifdef Py_DEBUG +extern void _PyUOpPrint(const _PyUOpInstruction *uop); +#endif + /* Disable unused label warnings. They are handy for debugging, even if computed gotos aren't used. */ @@ -1006,14 +1009,14 @@ _PyEval_EvalFrameDefault(PyThreadState *tstate, _PyInterpreterFrame *frame, int assert(next_uop->opcode == _START_EXECUTOR || next_uop->opcode == _COLD_EXIT); for (;;) { uopcode = next_uop->opcode; - DPRINTF(3, - "%4d: uop %s, oparg %d, operand %" PRIu64 ", target %d, stack_level %d\n", - (int)(next_uop - (current_executor == NULL ? next_uop : current_executor->trace)), - _PyUOpName(uopcode), - next_uop->oparg, - next_uop->operand, - next_uop->target, +#ifdef Py_DEBUG + if (lltrace >= 3) { + printf("%4d uop: ", (int)(next_uop - (current_executor == NULL ? next_uop : current_executor->trace))); + _PyUOpPrint(next_uop); + printf(" stack_level=%d\n", (int)(stack_pointer - _PyFrame_Stackbase(frame))); + } +#endif next_uop++; OPT_STAT_INC(uops_executed); UOP_STAT_INC(uopcode, execution_count); @@ -1028,9 +1031,9 @@ _PyEval_EvalFrameDefault(PyThreadState *tstate, _PyInterpreterFrame *frame, int default: #ifdef Py_DEBUG { - fprintf(stderr, "Unknown uop %d, oparg %d, operand %" PRIu64 " @ %d\n", - next_uop[-1].opcode, next_uop[-1].oparg, next_uop[-1].operand, - (int)(next_uop - (current_executor == NULL ? next_uop : current_executor->trace) - 1)); + printf("Unknown uop: "); + _PyUOpPrint(&next_uop[-1]); + printf(" @ %d\n", (int)(next_uop - current_executor->trace - 1)); Py_FatalError("Unknown uop"); } #else @@ -1058,10 +1061,15 @@ _PyEval_EvalFrameDefault(PyThreadState *tstate, _PyInterpreterFrame *frame, int pop_1_error_tier_two: STACK_SHRINK(1); error_tier_two: - DPRINTF(2, "Error: [UOp %d (%s), oparg %d, operand %" PRIu64 ", target %d @ %d -> %s]\n", - uopcode, _PyUOpName(uopcode), next_uop[-1].oparg, next_uop[-1].operand, next_uop[-1].target, - (int)(next_uop - current_executor->trace - 1), - _PyOpcode_OpName[frame->instr_ptr->op.code]); +#ifdef Py_DEBUG + if (lltrace >= 2) { + printf("Error: [UOp "); + _PyUOpPrint(&next_uop[-1]); + printf(" @ %d -> %s]\n", + (int)(next_uop - current_executor->trace - 1), + _PyOpcode_OpName[frame->instr_ptr->op.code]); + } +#endif OPT_HIST(trace_uop_execution_counter, trace_run_length_hist); frame->return_offset = 0; // Don't leave this random _PyFrame_SetStackPointer(frame, stack_pointer); @@ -1072,9 +1080,14 @@ _PyEval_EvalFrameDefault(PyThreadState *tstate, _PyInterpreterFrame *frame, int // Jump here from DEOPT_IF() deoptimize: next_instr = next_uop[-1].target + _PyCode_CODE(_PyFrame_GetCode(frame)); - DPRINTF(2, "DEOPT: [UOp %d (%s), oparg %d, operand %" PRIu64 ", target %d -> %s]\n", - uopcode, _PyUOpName(uopcode), next_uop[-1].oparg, next_uop[-1].operand, next_uop[-1].target, - _PyOpcode_OpName[next_instr->op.code]); +#ifdef Py_DEBUG + if (lltrace >= 2) { + printf("DEOPT: [UOp "); + _PyUOpPrint(&next_uop[-1]); + printf(" -> %s]\n", + _PyOpcode_OpName[frame->instr_ptr->op.code]); + } +#endif OPT_HIST(trace_uop_execution_counter, trace_run_length_hist); UOP_STAT_INC(uopcode, miss); Py_DECREF(current_executor); @@ -1088,9 +1101,15 @@ _PyEval_EvalFrameDefault(PyThreadState *tstate, _PyInterpreterFrame *frame, int uint32_t exit_index = next_uop[-1].exit_index; assert(exit_index < current_executor->exit_count); _PyExitData *exit = ¤t_executor->exits[exit_index]; - DPRINTF(2, "SIDE EXIT: [UOp %d (%s), oparg %d, operand %" PRIu64 ", exit %u, temp %d, target %d -> %s]\n", - uopcode, _PyUOpName(uopcode), next_uop[-1].oparg, next_uop[-1].operand, exit_index, exit->temperature, - exit->target, _PyOpcode_OpName[_PyCode_CODE(_PyFrame_GetCode(frame))[exit->target].op.code]); +#ifdef Py_DEBUG + if (lltrace >= 2) { + printf("SIDE EXIT: [UOp "); + _PyUOpPrint(&next_uop[-1]); + printf(", exit %u, temp %d, target %d -> %s]\n", + exit_index, exit->temperature, exit->target, + _PyOpcode_OpName[frame->instr_ptr->op.code]); + } +#endif Py_INCREF(exit->executor); tstate->previous_executor = (PyObject *)current_executor; GOTO_TIER_TWO(exit->executor); diff --git a/Python/executor_cases.c.h b/Python/executor_cases.c.h index 2ca54b6fe9cd38..974555cbba9dd6 100644 --- a/Python/executor_cases.c.h +++ b/Python/executor_cases.c.h @@ -2427,6 +2427,7 @@ _PyListIterObject *it = (_PyListIterObject *)iter; assert(Py_TYPE(iter) == &PyListIter_Type); PyListObject *seq = it->it_seq; + if (seq == NULL) goto deoptimize; if ((size_t)it->it_index >= (size_t)PyList_GET_SIZE(seq)) goto deoptimize; break; } diff --git a/Python/gc.c b/Python/gc.c index 907f29baa3777a..a031897d235dea 100644 --- a/Python/gc.c +++ b/Python/gc.c @@ -12,7 +12,7 @@ #include "pycore_object_alloc.h" // _PyObject_MallocWithType() #include "pycore_pyerrors.h" #include "pycore_pystate.h" // _PyThreadState_GET() -#include "pycore_time.h" // _PyTime_GetPerfCounter() +#include "pycore_time.h" // _PyTime_PerfCounterUnchecked() #include "pycore_weakref.h" // _PyWeakref_ClearRef() #include "pydtrace.h" @@ -1327,7 +1327,7 @@ gc_collect_main(PyThreadState *tstate, int generation, _PyGC_Reason reason) if (gcstate->debug & _PyGC_DEBUG_STATS) { PySys_WriteStderr("gc: collecting generation %d...\n", generation); show_stats_each_generations(gcstate); - t1 = _PyTime_GetPerfCounter(); + t1 = _PyTime_PerfCounterUnchecked(); } if (PyDTrace_GC_START_ENABLED()) { @@ -1428,7 +1428,7 @@ gc_collect_main(PyThreadState *tstate, int generation, _PyGC_Reason reason) debug_cycle("uncollectable", FROM_GC(gc)); } if (gcstate->debug & _PyGC_DEBUG_STATS) { - double d = _PyTime_AsSecondsDouble(_PyTime_GetPerfCounter() - t1); + double d = _PyTime_AsSecondsDouble(_PyTime_PerfCounterUnchecked() - t1); PySys_WriteStderr( "gc: done, %zd unreachable, %zd uncollectable, %.4fs elapsed\n", n+m, n, d); diff --git a/Python/gc_free_threading.c b/Python/gc_free_threading.c index 88c9c4ae5d77b9..4d886ee369db11 100644 --- a/Python/gc_free_threading.c +++ b/Python/gc_free_threading.c @@ -1108,7 +1108,7 @@ gc_collect_main(PyThreadState *tstate, int generation, _PyGC_Reason reason) if (gcstate->debug & _PyGC_DEBUG_STATS) { PySys_WriteStderr("gc: collecting generation %d...\n", generation); show_stats_each_generations(gcstate); - t1 = _PyTime_GetPerfCounter(); + t1 = _PyTime_PerfCounterUnchecked(); } if (PyDTrace_GC_START_ENABLED()) { @@ -1136,7 +1136,7 @@ gc_collect_main(PyThreadState *tstate, int generation, _PyGC_Reason reason) n = state.uncollectable; if (gcstate->debug & _PyGC_DEBUG_STATS) { - double d = _PyTime_AsSecondsDouble(_PyTime_GetPerfCounter() - t1); + double d = _PyTime_AsSecondsDouble(_PyTime_PerfCounterUnchecked() - t1); PySys_WriteStderr( "gc: done, %zd unreachable, %zd uncollectable, %.4fs elapsed\n", n+m, n, d); diff --git a/Python/generated_cases.c.h b/Python/generated_cases.c.h index 01e67acdc5c0e5..7f46bc8916c8d8 100644 --- a/Python/generated_cases.c.h +++ b/Python/generated_cases.c.h @@ -2560,7 +2560,7 @@ assert(Py_TYPE(iter) == &PyListIter_Type); STAT_INC(FOR_ITER, hit); PyListObject *seq = it->it_seq; - if ((size_t)it->it_index >= (size_t)PyList_GET_SIZE(seq)) { + if (seq == NULL || (size_t)it->it_index >= (size_t)PyList_GET_SIZE(seq)) { it->it_index = -1; #ifndef Py_GIL_DISABLED if (seq != NULL) { diff --git a/Python/import.c b/Python/import.c index a8fed67755256e..dc92708c8b6ea0 100644 --- a/Python/import.c +++ b/Python/import.c @@ -13,7 +13,7 @@ #include "pycore_pymem.h" // _PyMem_SetDefaultAllocator() #include "pycore_pystate.h" // _PyInterpreterState_GET() #include "pycore_sysmodule.h" // _PySys_Audit() -#include "pycore_time.h" // _PyTime_GetPerfCounter() +#include "pycore_time.h" // _PyTime_PerfCounterUnchecked() #include "pycore_weakref.h" // _PyWeakref_GET_REF() #include "marshal.h" // PyMarshal_ReadObjectFromString() @@ -2748,7 +2748,7 @@ import_find_and_load(PyThreadState *tstate, PyObject *abs_name) #undef header import_level++; - t1 = _PyTime_GetPerfCounter(); + t1 = _PyTime_PerfCounterUnchecked(); accumulated = 0; } @@ -2763,7 +2763,7 @@ import_find_and_load(PyThreadState *tstate, PyObject *abs_name) mod != NULL); if (import_time) { - PyTime_t cum = _PyTime_GetPerfCounter() - t1; + PyTime_t cum = _PyTime_PerfCounterUnchecked() - t1; import_level--; fprintf(stderr, "import time: %9ld | %10ld | %*s%s\n", diff --git a/Python/lock.c b/Python/lock.c index a4b044ecff0d70..5fa8bf78da2380 100644 --- a/Python/lock.c +++ b/Python/lock.c @@ -5,7 +5,7 @@ #include "pycore_lock.h" #include "pycore_parking_lot.h" #include "pycore_semaphore.h" -#include "pycore_time.h" // _PyTime_GetMonotonicClock() +#include "pycore_time.h" // _PyTime_MonotonicUnchecked() #ifdef MS_WINDOWS # define WIN32_LEAN_AND_MEAN @@ -66,7 +66,7 @@ _PyMutex_LockTimed(PyMutex *m, PyTime_t timeout, _PyLockFlags flags) return PY_LOCK_FAILURE; } - PyTime_t now = _PyTime_GetMonotonicClock(); + PyTime_t now = _PyTime_MonotonicUnchecked(); PyTime_t endtime = 0; if (timeout > 0) { endtime = _PyTime_Add(now, timeout); @@ -143,7 +143,7 @@ mutex_unpark(PyMutex *m, struct mutex_entry *entry, int has_more_waiters) { uint8_t v = 0; if (entry) { - PyTime_t now = _PyTime_GetMonotonicClock(); + PyTime_t now = _PyTime_MonotonicUnchecked(); int should_be_fair = now > entry->time_to_be_fair; entry->handed_off = should_be_fair; diff --git a/Python/optimizer.c b/Python/optimizer.c index df8f0ed234b59d..74708beea7a53d 100644 --- a/Python/optimizer.c +++ b/Python/optimizer.c @@ -262,8 +262,22 @@ is_valid(PyObject *self, PyObject *Py_UNUSED(ignored)) return PyBool_FromLong(((_PyExecutorObject *)self)->vm_data.valid); } +static PyObject * +get_opcode(PyObject *self, PyObject *Py_UNUSED(ignored)) +{ + return PyLong_FromUnsignedLong(((_PyExecutorObject *)self)->vm_data.opcode); +} + +static PyObject * +get_oparg(PyObject *self, PyObject *Py_UNUSED(ignored)) +{ + return PyLong_FromUnsignedLong(((_PyExecutorObject *)self)->vm_data.oparg); +} + static PyMethodDef executor_methods[] = { { "is_valid", is_valid, METH_NOARGS, NULL }, + { "get_opcode", get_opcode, METH_NOARGS, NULL }, + { "get_oparg", get_oparg, METH_NOARGS, NULL }, { NULL, NULL }, }; @@ -282,9 +296,30 @@ uop_dealloc(_PyExecutorObject *self) { const char * _PyUOpName(int index) { + if (index < 0 || index > MAX_UOP_ID) { + return NULL; + } return _PyOpcode_uop_name[index]; } +#ifdef Py_DEBUG +void +_PyUOpPrint(const _PyUOpInstruction *uop) +{ + const char *name = _PyUOpName(uop->opcode); + if (name == NULL) { + printf("", uop->opcode); + } + else { + printf("%s", name); + } + printf(" (%d, target=%d, operand=%" PRIx64 ")", + uop->oparg, + uop->target, + (uint64_t)uop->operand); +} +#endif + static Py_ssize_t uop_len(_PyExecutorObject *self) { @@ -312,14 +347,21 @@ uop_item(_PyExecutorObject *self, Py_ssize_t index) Py_DECREF(oname); return NULL; } + PyObject *target = PyLong_FromUnsignedLong(self->trace[index].target); + if (oparg == NULL) { + Py_DECREF(oparg); + Py_DECREF(oname); + return NULL; + } PyObject *operand = PyLong_FromUnsignedLongLong(self->trace[index].operand); if (operand == NULL) { + Py_DECREF(target); Py_DECREF(oparg); Py_DECREF(oname); return NULL; } - PyObject *args[3] = { oname, oparg, operand }; - return _PyTuple_FromArraySteal(args, 3); + PyObject *args[4] = { oname, oparg, target, operand }; + return _PyTuple_FromArraySteal(args, 4); } PySequenceMethods uop_as_sequence = { @@ -390,19 +432,29 @@ BRANCH_TO_GUARD[4][2] = { #endif +// Beware: Macro arg order differs from struct member order +#ifdef Py_DEBUG #define ADD_TO_TRACE(OPCODE, OPARG, OPERAND, TARGET) \ - DPRINTF(2, \ - " ADD_TO_TRACE(%s, %d, %" PRIu64 ", %d)\n", \ - _PyUOpName(OPCODE), \ - (OPARG), \ - (uint64_t)(OPERAND), \ - TARGET); \ assert(trace_length < max_length); \ trace[trace_length].opcode = (OPCODE); \ trace[trace_length].oparg = (OPARG); \ + trace[trace_length].target = (TARGET); \ trace[trace_length].operand = (OPERAND); \ + if (lltrace >= 2) { \ + printf("%4d ADD_TO_TRACE: ", trace_length); \ + _PyUOpPrint(&trace[trace_length]); \ + printf("\n"); \ + } \ + trace_length++; +#else +#define ADD_TO_TRACE(OPCODE, OPARG, OPERAND, TARGET) \ + assert(trace_length < max_length); \ + trace[trace_length].opcode = (OPCODE); \ + trace[trace_length].oparg = (OPARG); \ trace[trace_length].target = (TARGET); \ + trace[trace_length].operand = (OPERAND); \ trace_length++; +#endif #define INSTR_IP(INSTR, CODE) \ ((uint32_t)((INSTR) - ((_Py_CODEUNIT *)(CODE)->co_code_adaptive))) @@ -890,12 +942,9 @@ make_executor_from_uops(_PyUOpInstruction *buffer, const _PyBloomFilter *depende if (lltrace >= 2) { printf("Optimized executor (length %d):\n", length); for (int i = 0; i < length; i++) { - printf("%4d %s(%d, %d, %" PRIu64 ")\n", - i, - _PyUOpName(executor->trace[i].opcode), - executor->trace[i].oparg, - executor->trace[i].target, - executor->trace[i].operand); + printf("%4d OPTIMIZED: ", i); + _PyUOpPrint(&executor->trace[i]); + printf("\n"); } } #endif diff --git a/Python/optimizer_analysis.c b/Python/optimizer_analysis.c index b104d2fa7baec9..e7fb1e38c0dfd7 100644 --- a/Python/optimizer_analysis.c +++ b/Python/optimizer_analysis.c @@ -44,6 +44,7 @@ #define MAX_ABSTRACT_FRAME_DEPTH (TRACE_STACK_SIZE + 2) #ifdef Py_DEBUG + extern const char *_PyUOpName(int index); static const char *const DEBUG_ENV = "PYTHON_OPT_DEBUG"; static inline int get_lltrace(void) { char *uop_debug = Py_GETENV(DEBUG_ENV); @@ -632,7 +633,7 @@ uop_redundancy_eliminator( _Py_UOpsSymType **stack_pointer = ctx->frame->stack_pointer; DPRINTF(3, "Abstract interpreting %s:%d ", - _PyOpcode_uop_name[opcode], + _PyUOpName(opcode), oparg); switch (opcode) { #include "tier2_redundancy_eliminator_cases.c.h" diff --git a/Python/parking_lot.c b/Python/parking_lot.c index 9bf8376e485ea4..0a897f9952f648 100644 --- a/Python/parking_lot.c +++ b/Python/parking_lot.c @@ -6,7 +6,7 @@ #include "pycore_pyerrors.h" // _Py_FatalErrorFormat #include "pycore_pystate.h" // _PyThreadState_GET #include "pycore_semaphore.h" // _PySemaphore -#include "pycore_time.h" //_PyTime_GetMonotonicClock() +#include "pycore_time.h" //_PyTime_MonotonicUnchecked() #include @@ -120,13 +120,13 @@ _PySemaphore_PlatformWait(_PySemaphore *sema, PyTime_t timeout) struct timespec ts; #if defined(CLOCK_MONOTONIC) && defined(HAVE_SEM_CLOCKWAIT) - PyTime_t deadline = _PyTime_Add(_PyTime_GetMonotonicClock(), timeout); + PyTime_t deadline = _PyTime_Add(_PyTime_MonotonicUnchecked(), timeout); _PyTime_AsTimespec_clamp(deadline, &ts); err = sem_clockwait(&sema->platform_sem, CLOCK_MONOTONIC, &ts); #else - PyTime_t deadline = _PyTime_Add(_PyTime_GetSystemClock(), timeout); + PyTime_t deadline = _PyTime_Add(_PyTime_TimeUnchecked(), timeout); _PyTime_AsTimespec_clamp(deadline, &ts); @@ -163,7 +163,7 @@ _PySemaphore_PlatformWait(_PySemaphore *sema, PyTime_t timeout) _PyTime_AsTimespec_clamp(timeout, &ts); err = pthread_cond_timedwait_relative_np(&sema->cond, &sema->mutex, &ts); #else - PyTime_t deadline = _PyTime_Add(_PyTime_GetSystemClock(), timeout); + PyTime_t deadline = _PyTime_Add(_PyTime_TimeUnchecked(), timeout); _PyTime_AsTimespec_clamp(deadline, &ts); err = pthread_cond_timedwait(&sema->cond, &sema->mutex, &ts); diff --git a/Python/pytime.c b/Python/pytime.c index f29337eb536409..c3534d9a1ca44b 100644 --- a/Python/pytime.c +++ b/Python/pytime.c @@ -1032,7 +1032,7 @@ py_get_system_clock(PyTime_t *tp, _Py_clock_info_t *info, int raise_exc) PyTime_t -_PyTime_GetSystemClock(void) +_PyTime_TimeUnchecked(void) { PyTime_t t; if (py_get_system_clock(&t, NULL, 0) < 0) { @@ -1048,8 +1048,6 @@ int PyTime_Time(PyTime_t *result) { if (py_get_system_clock(result, NULL, 1) < 0) { - // If clock_gettime(CLOCK_REALTIME) or gettimeofday() fails: - // silently ignore the failure and return 0. *result = 0; return -1; } @@ -1057,7 +1055,7 @@ PyTime_Time(PyTime_t *result) } int -_PyTime_GetSystemClockWithInfo(PyTime_t *t, _Py_clock_info_t *info) +_PyTime_TimeWithInfo(PyTime_t *t, _Py_clock_info_t *info) { return py_get_system_clock(t, info, 1); } @@ -1224,7 +1222,7 @@ py_get_monotonic_clock(PyTime_t *tp, _Py_clock_info_t *info, int raise_exc) PyTime_t -_PyTime_GetMonotonicClock(void) +_PyTime_MonotonicUnchecked(void) { PyTime_t t; if (py_get_monotonic_clock(&t, NULL, 0) < 0) { @@ -1248,7 +1246,7 @@ PyTime_Monotonic(PyTime_t *result) int -_PyTime_GetMonotonicClockWithInfo(PyTime_t *tp, _Py_clock_info_t *info) +_PyTime_MonotonicWithInfo(PyTime_t *tp, _Py_clock_info_t *info) { return py_get_monotonic_clock(tp, info, 1); } @@ -1325,18 +1323,18 @@ py_get_win_perf_counter(PyTime_t *tp, _Py_clock_info_t *info, int raise_exc) int -_PyTime_GetPerfCounterWithInfo(PyTime_t *t, _Py_clock_info_t *info) +_PyTime_PerfCounterWithInfo(PyTime_t *t, _Py_clock_info_t *info) { #ifdef MS_WINDOWS return py_get_win_perf_counter(t, info, 1); #else - return _PyTime_GetMonotonicClockWithInfo(t, info); + return _PyTime_MonotonicWithInfo(t, info); #endif } PyTime_t -_PyTime_GetPerfCounter(void) +_PyTime_PerfCounterUnchecked(void) { PyTime_t t; int res; @@ -1443,7 +1441,7 @@ _PyTime_gmtime(time_t t, struct tm *tm) PyTime_t _PyDeadline_Init(PyTime_t timeout) { - PyTime_t now = _PyTime_GetMonotonicClock(); + PyTime_t now = _PyTime_MonotonicUnchecked(); return _PyTime_Add(now, timeout); } @@ -1451,6 +1449,6 @@ _PyDeadline_Init(PyTime_t timeout) PyTime_t _PyDeadline_Get(PyTime_t deadline) { - PyTime_t now = _PyTime_GetMonotonicClock(); + PyTime_t now = _PyTime_MonotonicUnchecked(); return deadline - now; } diff --git a/Python/specialize.c b/Python/specialize.c index 2256d79b387c56..871979d92298b6 100644 --- a/Python/specialize.c +++ b/Python/specialize.c @@ -17,6 +17,7 @@ #include // rand() +extern const char *_PyUOpName(int index); /* For guidance on adding or extending families of instructions see * ./adaptive.md @@ -246,17 +247,12 @@ print_optimization_stats(FILE *out, OptimizationStats *stats) stats->optimizer_failure_reason_no_memory); const char* const* names; - for (int i = 0; i < 512; i++) { - if (i < 256) { - names = _PyOpcode_OpName; - } else { - names = _PyOpcode_uop_name; - } + for (int i = 0; i <= MAX_UOP_ID; i++) { if (stats->opcode[i].execution_count) { - fprintf(out, "uops[%s].execution_count : %" PRIu64 "\n", names[i], stats->opcode[i].execution_count); + fprintf(out, "uops[%s].execution_count : %" PRIu64 "\n", _PyUOpName(i), stats->opcode[i].execution_count); } if (stats->opcode[i].miss) { - fprintf(out, "uops[%s].specialization.miss : %" PRIu64 "\n", names[i], stats->opcode[i].miss); + fprintf(out, "uops[%s].specialization.miss : %" PRIu64 "\n", _PyUOpName(i), stats->opcode[i].miss); } } diff --git a/Python/thread_nt.h b/Python/thread_nt.h index 307352f592e70e..e7591600c6416a 100644 --- a/Python/thread_nt.h +++ b/Python/thread_nt.h @@ -78,7 +78,7 @@ EnterNonRecursiveMutex(PNRMUTEX mutex, DWORD milliseconds) } else if (milliseconds != 0) { /* wait at least until the deadline */ PyTime_t nanoseconds = _PyTime_FromNanoseconds((PyTime_t)milliseconds * 1000000); - PyTime_t deadline = _PyTime_Add(_PyTime_GetPerfCounter(), nanoseconds); + PyTime_t deadline = _PyTime_Add(_PyTime_PerfCounterUnchecked(), nanoseconds); while (mutex->locked) { PyTime_t microseconds = _PyTime_AsMicroseconds(nanoseconds, _PyTime_ROUND_TIMEOUT); @@ -86,7 +86,7 @@ EnterNonRecursiveMutex(PNRMUTEX mutex, DWORD milliseconds) result = WAIT_FAILED; break; } - nanoseconds = deadline - _PyTime_GetPerfCounter(); + nanoseconds = deadline - _PyTime_PerfCounterUnchecked(); if (nanoseconds <= 0) { break; } diff --git a/Python/thread_pthread.h b/Python/thread_pthread.h index 9db6a4666f510c..17f6ae7eb70553 100644 --- a/Python/thread_pthread.h +++ b/Python/thread_pthread.h @@ -154,12 +154,12 @@ _PyThread_cond_after(long long us, struct timespec *abs) PyTime_t t; #ifdef CONDATTR_MONOTONIC if (condattr_monotonic) { - t = _PyTime_GetMonotonicClock(); + t = _PyTime_MonotonicUnchecked(); } else #endif { - t = _PyTime_GetSystemClock(); + t = _PyTime_TimeUnchecked(); } t = _PyTime_Add(t, timeout); _PyTime_AsTimespec_clamp(t, abs); @@ -502,7 +502,7 @@ PyThread_acquire_lock_timed(PyThread_type_lock lock, PY_TIMEOUT_T microseconds, struct timespec abs_timeout; // Local scope for deadline { - PyTime_t deadline = _PyTime_Add(_PyTime_GetMonotonicClock(), timeout); + PyTime_t deadline = _PyTime_Add(_PyTime_MonotonicUnchecked(), timeout); _PyTime_AsTimespec_clamp(deadline, &abs_timeout); } #else @@ -518,7 +518,7 @@ PyThread_acquire_lock_timed(PyThread_type_lock lock, PY_TIMEOUT_T microseconds, status = fix_status(sem_clockwait(thelock, CLOCK_MONOTONIC, &abs_timeout)); #else - PyTime_t abs_time = _PyTime_Add(_PyTime_GetSystemClock(), + PyTime_t abs_time = _PyTime_Add(_PyTime_TimeUnchecked(), timeout); struct timespec ts; _PyTime_AsTimespec_clamp(abs_time, &ts); diff --git a/Tools/c-analyzer/cpython/ignored.tsv b/Tools/c-analyzer/cpython/ignored.tsv index 94be3375849597..e9b81cf4c7d653 100644 --- a/Tools/c-analyzer/cpython/ignored.tsv +++ b/Tools/c-analyzer/cpython/ignored.tsv @@ -361,6 +361,7 @@ Python/import.c - _PyImport_Inittab - Python/import.c - _PySys_ImplCacheTag - Python/intrinsics.c - _PyIntrinsics_UnaryFunctions - Python/intrinsics.c - _PyIntrinsics_BinaryFunctions - +Python/lock.c - TIME_TO_BE_FAIR_NS - Python/opcode_targets.h - opcode_targets - Python/perf_trampoline.c - _Py_perfmap_callbacks - Python/pyhash.c - PyHash_Func - diff --git a/Tools/scripts/summarize_stats.py b/Tools/scripts/summarize_stats.py index 5bc39fceb4b2a1..6b60b59b3b0e79 100644 --- a/Tools/scripts/summarize_stats.py +++ b/Tools/scripts/summarize_stats.py @@ -102,6 +102,10 @@ def load_raw_data(input: Path) -> RawData: file=sys.stderr, ) continue + # Hack to handle older data files where some uops + # are missing an underscore prefix in their name + if key.startswith("uops[") and key[5:6] != "_": + key = "uops[_" + key[5:] stats[key.strip()] += int(value) stats["__nfiles__"] += 1