aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/Python
diff options
context:
space:
mode:
Diffstat (limited to 'Python')
-rw-r--r--Python/Python-ast.c6
-rw-r--r--Python/_warnings.c3
-rw-r--r--Python/asm_trampoline.S22
-rw-r--r--Python/bltinmodule.c11
-rw-r--r--Python/bytecodes.c285
-rw-r--r--Python/ceval.c62
-rw-r--r--Python/codegen.c41
-rw-r--r--Python/context.c10
-rw-r--r--Python/crossinterp.c456
-rw-r--r--Python/crossinterp_data_lookup.h8
-rw-r--r--Python/errors.c5
-rw-r--r--Python/executor_cases.c.h206
-rw-r--r--Python/flowgraph.c37
-rw-r--r--Python/gc_free_threading.c5
-rw-r--r--Python/generated_cases.c.h238
-rw-r--r--Python/hamt.c6
-rw-r--r--Python/import.c16
-rw-r--r--Python/index_pool.c4
-rw-r--r--Python/initconfig.c8
-rw-r--r--Python/intrinsics.c3
-rw-r--r--Python/lock.c3
-rw-r--r--Python/modsupport.c2
-rw-r--r--Python/optimizer_bytecodes.c43
-rw-r--r--Python/optimizer_cases.c.h47
-rw-r--r--Python/parking_lot.c22
-rw-r--r--Python/perf_jit_trampoline.c1418
-rw-r--r--Python/pylifecycle.c8
-rw-r--r--Python/pystate.c10
-rw-r--r--Python/pythonrun.c18
-rw-r--r--Python/remote_debug.h462
-rw-r--r--Python/remote_debugging.c39
-rw-r--r--Python/specialize.c78
-rw-r--r--Python/stackrefs.c12
-rw-r--r--Python/sysmodule.c36
-rw-r--r--Python/thread.c82
-rw-r--r--Python/thread_nt.h92
-rw-r--r--Python/thread_pthread.h392
-rw-r--r--Python/traceback.c5
38 files changed, 2608 insertions, 1593 deletions
diff --git a/Python/Python-ast.c b/Python/Python-ast.c
index f7625ab1205..660bc598a48 100644
--- a/Python/Python-ast.c
+++ b/Python/Python-ast.c
@@ -5796,7 +5796,7 @@ ast_repr_list(PyObject *list, int depth)
for (Py_ssize_t i = 0; i < Py_MIN(length, 2); i++) {
if (i > 0) {
- if (PyUnicodeWriter_WriteUTF8(writer, ", ", 2) < 0) {
+ if (PyUnicodeWriter_WriteASCII(writer, ", ", 2) < 0) {
goto error;
}
}
@@ -5820,7 +5820,7 @@ ast_repr_list(PyObject *list, int depth)
}
if (i == 0 && length > 2) {
- if (PyUnicodeWriter_WriteUTF8(writer, ", ...", 5) < 0) {
+ if (PyUnicodeWriter_WriteASCII(writer, ", ...", 5) < 0) {
goto error;
}
}
@@ -5924,7 +5924,7 @@ ast_repr_max_depth(AST_object *self, int depth)
}
if (i > 0) {
- if (PyUnicodeWriter_WriteUTF8(writer, ", ", 2) < 0) {
+ if (PyUnicodeWriter_WriteASCII(writer, ", ", 2) < 0) {
Py_DECREF(name);
Py_DECREF(value_repr);
goto error;
diff --git a/Python/_warnings.c b/Python/_warnings.c
index 39bf1b225cc..12e6172b0cf 100644
--- a/Python/_warnings.c
+++ b/Python/_warnings.c
@@ -6,7 +6,6 @@
#include "pycore_long.h" // _PyLong_GetZero()
#include "pycore_pylifecycle.h" // _Py_IsInterpreterFinalizing()
#include "pycore_pystate.h" // _PyThreadState_GET()
-#include "pycore_sysmodule.h" // _PySys_GetOptionalAttr()
#include "pycore_traceback.h" // _Py_DisplaySourceLine()
#include "pycore_unicodeobject.h" // _PyUnicode_EqualToASCIIString()
@@ -678,7 +677,7 @@ show_warning(PyThreadState *tstate, PyObject *filename, int lineno,
goto error;
}
- if (_PySys_GetOptionalAttr(&_Py_ID(stderr), &f_stderr) <= 0) {
+ if (PySys_GetOptionalAttr(&_Py_ID(stderr), &f_stderr) <= 0) {
fprintf(stderr, "lost sys.stderr\n");
goto error;
}
diff --git a/Python/asm_trampoline.S b/Python/asm_trampoline.S
index 0a3265dfeee..616752459ba 100644
--- a/Python/asm_trampoline.S
+++ b/Python/asm_trampoline.S
@@ -9,6 +9,9 @@
# }
_Py_trampoline_func_start:
#ifdef __x86_64__
+#if defined(__CET__) && (__CET__ & 1)
+ endbr64
+#endif
sub $8, %rsp
call *%rcx
add $8, %rsp
@@ -34,3 +37,22 @@ _Py_trampoline_func_start:
.globl _Py_trampoline_func_end
_Py_trampoline_func_end:
.section .note.GNU-stack,"",@progbits
+# Note for indicating the assembly code supports CET
+#if defined(__x86_64__) && defined(__CET__) && (__CET__ & 1)
+ .section .note.gnu.property,"a"
+ .align 8
+ .long 1f - 0f
+ .long 4f - 1f
+ .long 5
+0:
+ .string "GNU"
+1:
+ .align 8
+ .long 0xc0000002
+ .long 3f - 2f
+2:
+ .long 0x3
+3:
+ .align 8
+4:
+#endif // __x86_64__
diff --git a/Python/bltinmodule.c b/Python/bltinmodule.c
index 3d0295ee388..e08c63924ca 100644
--- a/Python/bltinmodule.c
+++ b/Python/bltinmodule.c
@@ -14,7 +14,6 @@
#include "pycore_pyerrors.h" // _PyErr_NoMemory()
#include "pycore_pystate.h" // _PyThreadState_GET()
#include "pycore_pythonrun.h" // _Py_SourceAsString()
-#include "pycore_sysmodule.h" // _PySys_GetRequiredAttr()
#include "pycore_tuple.h" // _PyTuple_FromArray()
#include "pycore_cell.h" // PyCell_GetRef()
@@ -465,7 +464,7 @@ builtin_callable(PyObject *module, PyObject *obj)
static PyObject *
builtin_breakpoint(PyObject *self, PyObject *const *args, Py_ssize_t nargs, PyObject *keywords)
{
- PyObject *hook = _PySys_GetRequiredAttrString("breakpointhook");
+ PyObject *hook = PySys_GetAttrString("breakpointhook");
if (hook == NULL) {
return NULL;
}
@@ -2164,7 +2163,7 @@ builtin_print_impl(PyObject *module, PyObject * const *args,
int i, err;
if (file == Py_None) {
- file = _PySys_GetRequiredAttr(&_Py_ID(stdout));
+ file = PySys_GetAttr(&_Py_ID(stdout));
if (file == NULL) {
return NULL;
}
@@ -2270,7 +2269,7 @@ builtin_input_impl(PyObject *module, PyObject *prompt)
int tty;
/* Check that stdin/out/err are intact */
- fin = _PySys_GetRequiredAttr(&_Py_ID(stdin));
+ fin = PySys_GetAttr(&_Py_ID(stdin));
if (fin == NULL) {
goto error;
}
@@ -2278,7 +2277,7 @@ builtin_input_impl(PyObject *module, PyObject *prompt)
PyErr_SetString(PyExc_RuntimeError, "lost sys.stdin");
goto error;
}
- fout = _PySys_GetRequiredAttr(&_Py_ID(stdout));
+ fout = PySys_GetAttr(&_Py_ID(stdout));
if (fout == NULL) {
goto error;
}
@@ -2286,7 +2285,7 @@ builtin_input_impl(PyObject *module, PyObject *prompt)
PyErr_SetString(PyExc_RuntimeError, "lost sys.stdout");
goto error;
}
- ferr = _PySys_GetRequiredAttr(&_Py_ID(stderr));
+ ferr = PySys_GetAttr(&_Py_ID(stderr));
if (ferr == NULL) {
goto error;
}
diff --git a/Python/bytecodes.c b/Python/bytecodes.c
index be22c7446f5..c4b13da5db4 100644
--- a/Python/bytecodes.c
+++ b/Python/bytecodes.c
@@ -341,7 +341,7 @@ dummy_func(
}
pure inst(POP_TOP, (value --)) {
- PyStackRef_CLOSE(value);
+ PyStackRef_XCLOSE(value);
}
tier2 op(_POP_TWO, (nos, tos --)) {
@@ -362,9 +362,14 @@ dummy_func(
PyStackRef_CLOSE(value);
}
- macro(POP_ITER) = POP_TOP;
- no_save_ip tier1 inst(INSTRUMENTED_END_FOR, (receiver, value -- receiver)) {
+ inst(POP_ITER, (iter, index_or_null -- )) {
+ (void)index_or_null;
+ DEAD(index_or_null);
+ PyStackRef_CLOSE(iter);
+ }
+
+ no_save_ip tier1 inst(INSTRUMENTED_END_FOR, (receiver, index_or_null, value -- receiver, index_or_null)) {
/* Need to create a fake StopIteration error here,
* to conform to PEP 380 */
if (PyStackRef_GenCheck(receiver)) {
@@ -376,7 +381,9 @@ dummy_func(
PyStackRef_CLOSE(value);
}
- tier1 inst(INSTRUMENTED_POP_ITER, (iter -- )) {
+ tier1 inst(INSTRUMENTED_POP_ITER, (iter, index_or_null -- )) {
+ (void)index_or_null;
+ DEAD(index_or_null);
INSTRUMENTED_JUMP(prev_instr, this_instr+1, PY_MONITORING_EVENT_BRANCH_RIGHT);
PyStackRef_CLOSE(iter);
}
@@ -3041,15 +3048,24 @@ dummy_func(
values_or_none = PyStackRef_FromPyObjectSteal(values_or_none_o);
}
- inst(GET_ITER, (iterable -- iter)) {
+ inst(GET_ITER, (iterable -- iter, index_or_null)) {
#ifdef Py_STATS
_Py_GatherStats_GetIter(iterable);
#endif
/* before: [obj]; after [getiter(obj)] */
- PyObject *iter_o = PyObject_GetIter(PyStackRef_AsPyObjectBorrow(iterable));
- PyStackRef_CLOSE(iterable);
- ERROR_IF(iter_o == NULL);
- iter = PyStackRef_FromPyObjectSteal(iter_o);
+ PyTypeObject *tp = PyStackRef_TYPE(iterable);
+ if (tp == &PyTuple_Type || tp == &PyList_Type) {
+ iter = iterable;
+ DEAD(iterable);
+ index_or_null = PyStackRef_TagInt(0);
+ }
+ else {
+ PyObject *iter_o = PyObject_GetIter(PyStackRef_AsPyObjectBorrow(iterable));
+ PyStackRef_CLOSE(iterable);
+ ERROR_IF(iter_o == NULL);
+ iter = PyStackRef_FromPyObjectSteal(iter_o);
+ index_or_null = PyStackRef_NULL;
+ }
}
inst(GET_YIELD_FROM_ITER, (iterable -- iter)) {
@@ -3096,11 +3112,11 @@ dummy_func(
FOR_ITER_GEN,
};
- specializing op(_SPECIALIZE_FOR_ITER, (counter/1, iter -- iter)) {
+ specializing op(_SPECIALIZE_FOR_ITER, (counter/1, iter, null_or_index -- iter, null_or_index)) {
#if ENABLE_SPECIALIZATION_FT
if (ADAPTIVE_COUNTER_TRIGGERS(counter)) {
next_instr = this_instr;
- _Py_Specialize_ForIter(iter, next_instr, oparg);
+ _Py_Specialize_ForIter(iter, null_or_index, next_instr, oparg);
DISPATCH_SAME_OPARG();
}
OPCODE_DEFERRED_INC(FOR_ITER);
@@ -3108,111 +3124,71 @@ dummy_func(
#endif /* ENABLE_SPECIALIZATION_FT */
}
- replaced op(_FOR_ITER, (iter -- iter, next)) {
- /* before: [iter]; after: [iter, iter()] *or* [] (and jump over END_FOR.) */
- PyObject *iter_o = PyStackRef_AsPyObjectBorrow(iter);
- PyObject *next_o = (*Py_TYPE(iter_o)->tp_iternext)(iter_o);
- if (next_o == NULL) {
- if (_PyErr_Occurred(tstate)) {
- int matches = _PyErr_ExceptionMatches(tstate, PyExc_StopIteration);
- if (!matches) {
- ERROR_NO_POP();
- }
- _PyEval_MonitorRaise(tstate, frame, this_instr);
- _PyErr_Clear(tstate);
+ replaced op(_FOR_ITER, (iter, null_or_index -- iter, null_or_index, next)) {
+ _PyStackRef item = _PyForIter_VirtualIteratorNext(tstate, frame, iter, &null_or_index);
+ if (!PyStackRef_IsValid(item)) {
+ if (PyStackRef_IsError(item)) {
+ ERROR_NO_POP();
}
- /* iterator ended normally */
- assert(next_instr[oparg].op.code == END_FOR ||
- next_instr[oparg].op.code == INSTRUMENTED_END_FOR);
- /* Jump forward oparg, then skip following END_FOR */
+ // Jump forward by oparg and skip the following END_FOR
JUMPBY(oparg + 1);
DISPATCH();
}
- next = PyStackRef_FromPyObjectSteal(next_o);
- // Common case: no jump, leave it to the code generator
+ next = item;
}
- op(_FOR_ITER_TIER_TWO, (iter -- iter, next)) {
- /* before: [iter]; after: [iter, iter()] *or* [] (and jump over END_FOR.) */
- PyObject *iter_o = PyStackRef_AsPyObjectBorrow(iter);
- PyObject *next_o = (*Py_TYPE(iter_o)->tp_iternext)(iter_o);
- if (next_o == NULL) {
- if (_PyErr_Occurred(tstate)) {
- int matches = _PyErr_ExceptionMatches(tstate, PyExc_StopIteration);
- if (!matches) {
- ERROR_NO_POP();
- }
- _PyEval_MonitorRaise(tstate, frame, frame->instr_ptr);
- _PyErr_Clear(tstate);
+ op(_FOR_ITER_TIER_TWO, (iter, null_or_index -- iter, null_or_index, next)) {
+ _PyStackRef item = _PyForIter_VirtualIteratorNext(tstate, frame, iter, &null_or_index);
+ if (!PyStackRef_IsValid(item)) {
+ if (PyStackRef_IsError(item)) {
+ ERROR_NO_POP();
}
/* iterator ended normally */
/* The translator sets the deopt target just past the matching END_FOR */
EXIT_IF(true);
}
- next = PyStackRef_FromPyObjectSteal(next_o);
- // Common case: no jump, leave it to the code generator
+ next = item;
}
+
macro(FOR_ITER) = _SPECIALIZE_FOR_ITER + _FOR_ITER;
- inst(INSTRUMENTED_FOR_ITER, (unused/1, iter -- iter, next)) {
- PyObject *iter_o = PyStackRef_AsPyObjectBorrow(iter);
- PyObject *next_o = (*Py_TYPE(iter_o)->tp_iternext)(iter_o);
- if (next_o != NULL) {
- next = PyStackRef_FromPyObjectSteal(next_o);
- INSTRUMENTED_JUMP(this_instr, next_instr, PY_MONITORING_EVENT_BRANCH_LEFT);
- }
- else {
- if (_PyErr_Occurred(tstate)) {
- int matches = _PyErr_ExceptionMatches(tstate, PyExc_StopIteration);
- if (!matches) {
- ERROR_NO_POP();
- }
- _PyEval_MonitorRaise(tstate, frame, this_instr);
- _PyErr_Clear(tstate);
+ inst(INSTRUMENTED_FOR_ITER, (unused/1, iter, null_or_index -- iter, null_or_index, next)) {
+ _PyStackRef item = _PyForIter_VirtualIteratorNext(tstate, frame, iter, &null_or_index);
+ if (!PyStackRef_IsValid(item)) {
+ if (PyStackRef_IsError(item)) {
+ ERROR_NO_POP();
}
- /* iterator ended normally */
- assert(next_instr[oparg].op.code == END_FOR ||
- next_instr[oparg].op.code == INSTRUMENTED_END_FOR);
- /* Skip END_FOR */
+ // Jump forward by oparg and skip the following END_FOR
JUMPBY(oparg + 1);
DISPATCH();
}
+ next = item;
+ INSTRUMENTED_JUMP(this_instr, next_instr, PY_MONITORING_EVENT_BRANCH_LEFT);
}
-
- op(_ITER_CHECK_LIST, (iter -- iter)) {
+ op(_ITER_CHECK_LIST, (iter, null_or_index -- iter, null_or_index)) {
PyObject *iter_o = PyStackRef_AsPyObjectBorrow(iter);
- EXIT_IF(Py_TYPE(iter_o) != &PyListIter_Type);
+ EXIT_IF(Py_TYPE(iter_o) != &PyList_Type);
+ assert(PyStackRef_IsTaggedInt(null_or_index));
#ifdef Py_GIL_DISABLED
- EXIT_IF(!_PyObject_IsUniquelyReferenced(iter_o));
- _PyListIterObject *it = (_PyListIterObject *)iter_o;
- EXIT_IF(!_Py_IsOwnedByCurrentThread((PyObject *)it->it_seq) ||
- !_PyObject_GC_IS_SHARED(it->it_seq));
+ EXIT_IF(!_Py_IsOwnedByCurrentThread(iter_o) && !_PyObject_GC_IS_SHARED(iter_o));
#endif
}
- replaced op(_ITER_JUMP_LIST, (iter -- iter)) {
- PyObject *iter_o = PyStackRef_AsPyObjectBorrow(iter);
- assert(Py_TYPE(iter_o) == &PyListIter_Type);
-// For free-threaded Python, the loop exit can happen at any point during
-// item retrieval, so it doesn't make much sense to check and jump
-// separately before item retrieval. Any length check we do here can be
-// invalid by the time we actually try to fetch the item.
+ replaced op(_ITER_JUMP_LIST, (iter, null_or_index -- iter, null_or_index)) {
#ifdef Py_GIL_DISABLED
- assert(_PyObject_IsUniquelyReferenced(iter_o));
- (void)iter_o;
+ // For free-threaded Python, the loop exit can happen at any point during
+ // item retrieval, so it doesn't make much sense to check and jump
+ // separately before item retrieval. Any length check we do here can be
+ // invalid by the time we actually try to fetch the item.
#else
- _PyListIterObject *it = (_PyListIterObject *)iter_o;
+ PyObject *list_o = PyStackRef_AsPyObjectBorrow(iter);
+ assert(Py_TYPE(list_o) == &PyList_Type);
STAT_INC(FOR_ITER, hit);
- PyListObject *seq = it->it_seq;
- if (seq == NULL || (size_t)it->it_index >= (size_t)PyList_GET_SIZE(seq)) {
- it->it_index = -1;
- if (seq != NULL) {
- it->it_seq = NULL;
- Py_DECREF(seq);
- }
+ if ((size_t)PyStackRef_UntagInt(null_or_index) >= (size_t)PyList_GET_SIZE(list_o)) {
+ null_or_index = PyStackRef_TagInt(-1);
/* Jump forward oparg, then skip following END_FOR instruction */
JUMPBY(oparg + 1);
DISPATCH();
@@ -3221,73 +3197,54 @@ dummy_func(
}
// Only used by Tier 2
- op(_GUARD_NOT_EXHAUSTED_LIST, (iter -- iter)) {
+ op(_GUARD_NOT_EXHAUSTED_LIST, (iter, null_or_index -- iter, null_or_index)) {
#ifndef Py_GIL_DISABLED
- PyObject *iter_o = PyStackRef_AsPyObjectBorrow(iter);
- _PyListIterObject *it = (_PyListIterObject *)iter_o;
- assert(Py_TYPE(iter_o) == &PyListIter_Type);
- PyListObject *seq = it->it_seq;
- EXIT_IF(seq == NULL);
- if ((size_t)it->it_index >= (size_t)PyList_GET_SIZE(seq)) {
- it->it_index = -1;
- EXIT_IF(1);
- }
+ PyObject *list_o = PyStackRef_AsPyObjectBorrow(iter);
+ assert(Py_TYPE(list_o) == &PyList_Type);
+ EXIT_IF((size_t)PyStackRef_UntagInt(null_or_index) >= (size_t)PyList_GET_SIZE(list_o));
#endif
}
- replaced op(_ITER_NEXT_LIST, (iter -- iter, next)) {
- PyObject *iter_o = PyStackRef_AsPyObjectBorrow(iter);
- _PyListIterObject *it = (_PyListIterObject *)iter_o;
- assert(Py_TYPE(iter_o) == &PyListIter_Type);
- PyListObject *seq = it->it_seq;
- assert(seq);
+ replaced op(_ITER_NEXT_LIST, (iter, null_or_index -- iter, null_or_index, next)) {
+ PyObject *list_o = PyStackRef_AsPyObjectBorrow(iter);
+ assert(PyList_CheckExact(list_o));
#ifdef Py_GIL_DISABLED
- assert(_PyObject_IsUniquelyReferenced(iter_o));
- assert(_Py_IsOwnedByCurrentThread((PyObject *)seq) ||
- _PyObject_GC_IS_SHARED(seq));
+ assert(_Py_IsOwnedByCurrentThread(list_o) ||
+ _PyObject_GC_IS_SHARED(list_o));
STAT_INC(FOR_ITER, hit);
- int result = _PyList_GetItemRefNoLock(seq, it->it_index, &next);
+ int result = _PyList_GetItemRefNoLock((PyListObject *)list_o, PyStackRef_UntagInt(null_or_index), &next);
// A negative result means we lost a race with another thread
// and we need to take the slow path.
DEOPT_IF(result < 0);
if (result == 0) {
- it->it_index = -1;
+ null_or_index = PyStackRef_TagInt(-1);
/* Jump forward oparg, then skip following END_FOR instruction */
JUMPBY(oparg + 1);
DISPATCH();
}
- it->it_index++;
#else
- assert(it->it_index < PyList_GET_SIZE(seq));
- next = PyStackRef_FromPyObjectNew(PyList_GET_ITEM(seq, it->it_index++));
+ next = PyStackRef_FromPyObjectNew(PyList_GET_ITEM(list_o, PyStackRef_UntagInt(null_or_index)));
#endif
+ null_or_index = PyStackRef_IncrementTaggedIntNoOverflow(null_or_index);
}
// Only used by Tier 2
- op(_ITER_NEXT_LIST_TIER_TWO, (iter -- iter, next)) {
- PyObject *iter_o = PyStackRef_AsPyObjectBorrow(iter);
- _PyListIterObject *it = (_PyListIterObject *)iter_o;
- assert(Py_TYPE(iter_o) == &PyListIter_Type);
- PyListObject *seq = it->it_seq;
- assert(seq);
+ op(_ITER_NEXT_LIST_TIER_TWO, (iter, null_or_index -- iter, null_or_index, next)) {
+ PyObject *list_o = PyStackRef_AsPyObjectBorrow(iter);
+ assert(PyList_CheckExact(list_o));
#ifdef Py_GIL_DISABLED
- assert(_PyObject_IsUniquelyReferenced(iter_o));
- assert(_Py_IsOwnedByCurrentThread((PyObject *)seq) ||
- _PyObject_GC_IS_SHARED(seq));
+ assert(_Py_IsOwnedByCurrentThread((PyObject *)list_o) ||
+ _PyObject_GC_IS_SHARED(list_o));
STAT_INC(FOR_ITER, hit);
- int result = _PyList_GetItemRefNoLock(seq, it->it_index, &next);
+ int result = _PyList_GetItemRefNoLock((PyListObject *)list_o, PyStackRef_UntagInt(null_or_index), &next);
// A negative result means we lost a race with another thread
// and we need to take the slow path.
- EXIT_IF(result < 0);
- if (result == 0) {
- it->it_index = -1;
- EXIT_IF(1);
- }
- it->it_index++;
+ DEOPT_IF(result <= 0);
#else
- assert(it->it_index < PyList_GET_SIZE(seq));
- next = PyStackRef_FromPyObjectNew(PyList_GET_ITEM(seq, it->it_index++));
+ assert(PyStackRef_UntagInt(null_or_index) < PyList_GET_SIZE(list_o));
+ next = PyStackRef_FromPyObjectNew(PyList_GET_ITEM(list_o, PyStackRef_UntagInt(null_or_index)));
#endif
+ null_or_index = PyStackRef_IncrementTaggedIntNoOverflow(null_or_index);
}
macro(FOR_ITER_LIST) =
@@ -3296,31 +3253,19 @@ dummy_func(
_ITER_JUMP_LIST +
_ITER_NEXT_LIST;
- op(_ITER_CHECK_TUPLE, (iter -- iter)) {
+ op(_ITER_CHECK_TUPLE, (iter, null_or_index -- iter, null_or_index)) {
PyObject *iter_o = PyStackRef_AsPyObjectBorrow(iter);
- EXIT_IF(Py_TYPE(iter_o) != &PyTupleIter_Type);
-#ifdef Py_GIL_DISABLED
- EXIT_IF(!_PyObject_IsUniquelyReferenced(iter_o));
-#endif
+ EXIT_IF(Py_TYPE(iter_o) != &PyTuple_Type);
+ assert(PyStackRef_IsTaggedInt(null_or_index));
}
- replaced op(_ITER_JUMP_TUPLE, (iter -- iter)) {
- PyObject *iter_o = PyStackRef_AsPyObjectBorrow(iter);
- (void)iter_o;
- assert(Py_TYPE(iter_o) == &PyTupleIter_Type);
-#ifdef Py_GIL_DISABLED
- assert(_PyObject_IsUniquelyReferenced(iter_o));
-#endif
- _PyTupleIterObject *it = (_PyTupleIterObject *)iter_o;
+ replaced op(_ITER_JUMP_TUPLE, (iter, null_or_index -- iter, null_or_index)) {
+ PyObject *tuple_o = PyStackRef_AsPyObjectBorrow(iter);
+ (void)tuple_o;
+ assert(Py_TYPE(tuple_o) == &PyTuple_Type);
STAT_INC(FOR_ITER, hit);
- PyTupleObject *seq = it->it_seq;
- if (seq == NULL || (size_t)it->it_index >= (size_t)PyTuple_GET_SIZE(seq)) {
-#ifndef Py_GIL_DISABLED
- if (seq != NULL) {
- it->it_seq = NULL;
- Py_DECREF(seq);
- }
-#endif
+ if ((size_t)PyStackRef_UntagInt(null_or_index) >= (size_t)PyTuple_GET_SIZE(tuple_o)) {
+ null_or_index = PyStackRef_TagInt(-1);
/* Jump forward oparg, then skip following END_FOR instruction */
JUMPBY(oparg + 1);
DISPATCH();
@@ -3328,29 +3273,19 @@ dummy_func(
}
// Only used by Tier 2
- op(_GUARD_NOT_EXHAUSTED_TUPLE, (iter -- iter)) {
- PyObject *iter_o = PyStackRef_AsPyObjectBorrow(iter);
- _PyTupleIterObject *it = (_PyTupleIterObject *)iter_o;
- assert(Py_TYPE(iter_o) == &PyTupleIter_Type);
-#ifdef Py_GIL_DISABLED
- assert(_PyObject_IsUniquelyReferenced(iter_o));
-#endif
- PyTupleObject *seq = it->it_seq;
- EXIT_IF(seq == NULL);
- EXIT_IF(it->it_index >= PyTuple_GET_SIZE(seq));
+ op(_GUARD_NOT_EXHAUSTED_TUPLE, (iter, null_or_index -- iter, null_or_index)) {
+ PyObject *tuple_o = PyStackRef_AsPyObjectBorrow(iter);
+ assert(Py_TYPE(tuple_o) == &PyTuple_Type);
+ EXIT_IF((size_t)PyStackRef_UntagInt(null_or_index) >= (size_t)PyTuple_GET_SIZE(tuple_o));
}
- op(_ITER_NEXT_TUPLE, (iter -- iter, next)) {
- PyObject *iter_o = PyStackRef_AsPyObjectBorrow(iter);
- _PyTupleIterObject *it = (_PyTupleIterObject *)iter_o;
- assert(Py_TYPE(iter_o) == &PyTupleIter_Type);
- PyTupleObject *seq = it->it_seq;
-#ifdef Py_GIL_DISABLED
- assert(_PyObject_IsUniquelyReferenced(iter_o));
-#endif
- assert(seq);
- assert(it->it_index < PyTuple_GET_SIZE(seq));
- next = PyStackRef_FromPyObjectNew(PyTuple_GET_ITEM(seq, it->it_index++));
+ op(_ITER_NEXT_TUPLE, (iter, null_or_index -- iter, null_or_index, next)) {
+ PyObject *tuple_o = PyStackRef_AsPyObjectBorrow(iter);
+ assert(Py_TYPE(tuple_o) == &PyTuple_Type);
+ uintptr_t i = PyStackRef_UntagInt(null_or_index);
+ assert((size_t)i < (size_t)PyTuple_GET_SIZE(tuple_o));
+ next = PyStackRef_FromPyObjectNew(PyTuple_GET_ITEM(tuple_o, i));
+ null_or_index = PyStackRef_IncrementTaggedIntNoOverflow(null_or_index);
}
macro(FOR_ITER_TUPLE) =
@@ -3359,7 +3294,7 @@ dummy_func(
_ITER_JUMP_TUPLE +
_ITER_NEXT_TUPLE;
- op(_ITER_CHECK_RANGE, (iter -- iter)) {
+ op(_ITER_CHECK_RANGE, (iter, null_or_index -- iter, null_or_index)) {
_PyRangeIterObject *r = (_PyRangeIterObject *)PyStackRef_AsPyObjectBorrow(iter);
EXIT_IF(Py_TYPE(r) != &PyRangeIter_Type);
#ifdef Py_GIL_DISABLED
@@ -3367,7 +3302,7 @@ dummy_func(
#endif
}
- replaced op(_ITER_JUMP_RANGE, (iter -- iter)) {
+ replaced op(_ITER_JUMP_RANGE, (iter, null_or_index -- iter, null_or_index)) {
_PyRangeIterObject *r = (_PyRangeIterObject *)PyStackRef_AsPyObjectBorrow(iter);
assert(Py_TYPE(r) == &PyRangeIter_Type);
#ifdef Py_GIL_DISABLED
@@ -3382,13 +3317,13 @@ dummy_func(
}
// Only used by Tier 2
- op(_GUARD_NOT_EXHAUSTED_RANGE, (iter -- iter)) {
+ op(_GUARD_NOT_EXHAUSTED_RANGE, (iter, null_or_index -- iter, null_or_index)) {
_PyRangeIterObject *r = (_PyRangeIterObject *)PyStackRef_AsPyObjectBorrow(iter);
assert(Py_TYPE(r) == &PyRangeIter_Type);
EXIT_IF(r->len <= 0);
}
- op(_ITER_NEXT_RANGE, (iter -- iter, next)) {
+ op(_ITER_NEXT_RANGE, (iter, null_or_index -- iter, null_or_index, next)) {
_PyRangeIterObject *r = (_PyRangeIterObject *)PyStackRef_AsPyObjectBorrow(iter);
assert(Py_TYPE(r) == &PyRangeIter_Type);
#ifdef Py_GIL_DISABLED
@@ -3409,7 +3344,7 @@ dummy_func(
_ITER_JUMP_RANGE +
_ITER_NEXT_RANGE;
- op(_FOR_ITER_GEN_FRAME, (iter -- iter, gen_frame: _PyInterpreterFrame*)) {
+ op(_FOR_ITER_GEN_FRAME, (iter, null -- iter, null, gen_frame: _PyInterpreterFrame*)) {
PyGenObject *gen = (PyGenObject *)PyStackRef_AsPyObjectBorrow(iter);
DEOPT_IF(Py_TYPE(gen) != &PyGen_Type);
#ifdef Py_GIL_DISABLED
diff --git a/Python/ceval.c b/Python/ceval.c
index d1b6b9f1a1a..4cfe4bb88f4 100644
--- a/Python/ceval.c
+++ b/Python/ceval.c
@@ -139,6 +139,19 @@
#endif
+static void
+check_invalid_reentrancy(void)
+{
+#if defined(Py_DEBUG) && defined(Py_GIL_DISABLED)
+ // In the free-threaded build, the interpreter must not be re-entered if
+ // the world-is-stopped. If so, that's a bug somewhere (quite likely in
+ // the painfully complex typeobject code).
+ PyInterpreterState *interp = _PyInterpreterState_GET();
+ assert(!interp->stoptheworld.world_stopped);
+#endif
+}
+
+
#ifdef Py_DEBUG
static void
dump_item(_PyStackRef item)
@@ -996,6 +1009,7 @@ PyObject* _Py_HOT_FUNCTION DONT_SLP_VECTORIZE
_PyEval_EvalFrameDefault(PyThreadState *tstate, _PyInterpreterFrame *frame, int throwflag)
{
_Py_EnsureTstateNotNULL(tstate);
+ check_invalid_reentrancy();
CALL_STAT_INC(pyeval_calls);
#if USE_COMPUTED_GOTOS && !Py_TAIL_CALL_INTERP
@@ -2965,7 +2979,7 @@ _PyEval_ImportFrom(PyThreadState *tstate, PyObject *v, PyObject *name)
int is_possibly_shadowing_stdlib = 0;
if (is_possibly_shadowing) {
PyObject *stdlib_modules;
- if (_PySys_GetOptionalAttrString("stdlib_module_names", &stdlib_modules) < 0) {
+ if (PySys_GetOptionalAttrString("stdlib_module_names", &stdlib_modules) < 0) {
goto done;
}
if (stdlib_modules && PyAnySet_Check(stdlib_modules)) {
@@ -3176,7 +3190,7 @@ _PyEval_FormatKwargsError(PyThreadState *tstate, PyObject *func, PyObject *kwarg
else if (_PyErr_ExceptionMatches(tstate, PyExc_KeyError)) {
PyObject *exc = _PyErr_GetRaisedException(tstate);
PyObject *args = PyException_GetArgs(exc);
- if (exc && PyTuple_Check(args) && PyTuple_GET_SIZE(args) == 1) {
+ if (PyTuple_Check(args) && PyTuple_GET_SIZE(args) == 1) {
_PyErr_Clear(tstate);
PyObject *funcstr = _PyObject_FunctionStr(func);
if (funcstr != NULL) {
@@ -3425,6 +3439,50 @@ _PyEval_LoadName(PyThreadState *tstate, _PyInterpreterFrame *frame, PyObject *na
return value;
}
+static _PyStackRef
+foriter_next(PyObject *seq, _PyStackRef index)
+{
+ assert(PyStackRef_IsTaggedInt(index));
+ assert(PyTuple_CheckExact(seq) || PyList_CheckExact(seq));
+ intptr_t i = PyStackRef_UntagInt(index);
+ if (PyTuple_CheckExact(seq)) {
+ size_t size = PyTuple_GET_SIZE(seq);
+ if ((size_t)i >= size) {
+ return PyStackRef_NULL;
+ }
+ return PyStackRef_FromPyObjectNew(PyTuple_GET_ITEM(seq, i));
+ }
+ PyObject *item = _PyList_GetItemRef((PyListObject *)seq, i);
+ if (item == NULL) {
+ return PyStackRef_NULL;
+ }
+ return PyStackRef_FromPyObjectSteal(item);
+}
+
+_PyStackRef _PyForIter_VirtualIteratorNext(PyThreadState* tstate, _PyInterpreterFrame* frame, _PyStackRef iter, _PyStackRef* index_ptr)
+{
+ PyObject *iter_o = PyStackRef_AsPyObjectBorrow(iter);
+ _PyStackRef index = *index_ptr;
+ if (PyStackRef_IsTaggedInt(index)) {
+ *index_ptr = PyStackRef_IncrementTaggedIntNoOverflow(index);
+ return foriter_next(iter_o, index);
+ }
+ PyObject *next_o = (*Py_TYPE(iter_o)->tp_iternext)(iter_o);
+ if (next_o == NULL) {
+ if (_PyErr_Occurred(tstate)) {
+ if (_PyErr_ExceptionMatches(tstate, PyExc_StopIteration)) {
+ _PyEval_MonitorRaise(tstate, frame, frame->instr_ptr);
+ _PyErr_Clear(tstate);
+ }
+ else {
+ return PyStackRef_ERROR;
+ }
+ }
+ return PyStackRef_NULL;
+ }
+ return PyStackRef_FromPyObjectSteal(next_o);
+}
+
/* Check if a 'cls' provides the given special method. */
static inline int
type_has_special_method(PyTypeObject *cls, PyObject *name)
diff --git a/Python/codegen.c b/Python/codegen.c
index 683601103ec..0023d72cd5e 100644
--- a/Python/codegen.c
+++ b/Python/codegen.c
@@ -527,6 +527,15 @@ codegen_unwind_fblock(compiler *c, location *ploc,
case COMPILE_FBLOCK_FOR_LOOP:
/* Pop the iterator */
if (preserve_tos) {
+ ADDOP_I(c, *ploc, SWAP, 3);
+ }
+ ADDOP(c, *ploc, POP_TOP);
+ ADDOP(c, *ploc, POP_TOP);
+ return SUCCESS;
+
+ case COMPILE_FBLOCK_ASYNC_FOR_LOOP:
+ /* Pop the iterator */
+ if (preserve_tos) {
ADDOP_I(c, *ploc, SWAP, 2);
}
ADDOP(c, *ploc, POP_TOP);
@@ -629,7 +638,8 @@ codegen_unwind_fblock_stack(compiler *c, location *ploc,
c, *ploc, "'break', 'continue' and 'return' cannot appear in an except* block");
}
if (loop != NULL && (top->fb_type == COMPILE_FBLOCK_WHILE_LOOP ||
- top->fb_type == COMPILE_FBLOCK_FOR_LOOP)) {
+ top->fb_type == COMPILE_FBLOCK_FOR_LOOP ||
+ top->fb_type == COMPILE_FBLOCK_ASYNC_FOR_LOOP)) {
*loop = top;
return SUCCESS;
}
@@ -2125,7 +2135,7 @@ codegen_async_for(compiler *c, stmt_ty s)
ADDOP(c, LOC(s->v.AsyncFor.iter), GET_AITER);
USE_LABEL(c, start);
- RETURN_IF_ERROR(_PyCompile_PushFBlock(c, loc, COMPILE_FBLOCK_FOR_LOOP, start, end, NULL));
+ RETURN_IF_ERROR(_PyCompile_PushFBlock(c, loc, COMPILE_FBLOCK_ASYNC_FOR_LOOP, start, end, NULL));
/* SETUP_FINALLY to guard the __anext__ call */
ADDOP_JUMP(c, loc, SETUP_FINALLY, except);
@@ -2142,7 +2152,7 @@ codegen_async_for(compiler *c, stmt_ty s)
/* Mark jump as artificial */
ADDOP_JUMP(c, NO_LOCATION, JUMP, start);
- _PyCompile_PopFBlock(c, COMPILE_FBLOCK_FOR_LOOP, start);
+ _PyCompile_PopFBlock(c, COMPILE_FBLOCK_ASYNC_FOR_LOOP, start);
/* Except block for __anext__ */
USE_LABEL(c, except);
@@ -3895,10 +3905,11 @@ maybe_optimize_function_call(compiler *c, expr_ty e, jump_target_label end)
NEW_JUMP_TARGET_LABEL(c, loop);
NEW_JUMP_TARGET_LABEL(c, cleanup);
+ ADDOP(c, loc, PUSH_NULL); // Push NULL index for loop
USE_LABEL(c, loop);
ADDOP_JUMP(c, loc, FOR_ITER, cleanup);
if (const_oparg == CONSTANT_BUILTIN_TUPLE) {
- ADDOP_I(c, loc, LIST_APPEND, 2);
+ ADDOP_I(c, loc, LIST_APPEND, 3);
ADDOP_JUMP(c, loc, JUMP, loop);
}
else {
@@ -4442,13 +4453,12 @@ codegen_sync_comprehension_generator(compiler *c, location loc,
}
if (IS_JUMP_TARGET_LABEL(start)) {
VISIT(c, expr, gen->iter);
- ADDOP(c, LOC(gen->iter), GET_ITER);
}
}
}
if (IS_JUMP_TARGET_LABEL(start)) {
- depth++;
+ depth += 2;
ADDOP(c, LOC(gen->iter), GET_ITER);
USE_LABEL(c, start);
ADDOP_JUMP(c, LOC(gen->iter), FOR_ITER, anchor);
@@ -4543,9 +4553,9 @@ codegen_async_comprehension_generator(compiler *c, location loc,
else {
/* Sub-iter - calculate on the fly */
VISIT(c, expr, gen->iter);
- ADDOP(c, LOC(gen->iter), GET_AITER);
}
}
+ ADDOP(c, LOC(gen->iter), GET_AITER);
USE_LABEL(c, start);
/* Runtime will push a block here, so we need to account for that */
@@ -4757,19 +4767,6 @@ pop_inlined_comprehension_state(compiler *c, location loc,
return SUCCESS;
}
-static inline int
-codegen_comprehension_iter(compiler *c, comprehension_ty comp)
-{
- VISIT(c, expr, comp->iter);
- if (comp->is_async) {
- ADDOP(c, LOC(comp->iter), GET_AITER);
- }
- else {
- ADDOP(c, LOC(comp->iter), GET_ITER);
- }
- return SUCCESS;
-}
-
static int
codegen_comprehension(compiler *c, expr_ty e, int type,
identifier name, asdl_comprehension_seq *generators, expr_ty elt,
@@ -4789,9 +4786,7 @@ codegen_comprehension(compiler *c, expr_ty e, int type,
outermost = (comprehension_ty) asdl_seq_GET(generators, 0);
if (is_inlined) {
- if (codegen_comprehension_iter(c, outermost)) {
- goto error;
- }
+ VISIT(c, expr, outermost->iter);
if (push_inlined_comprehension_state(c, loc, entry, &inline_state)) {
goto error;
}
diff --git a/Python/context.c b/Python/context.c
index dceaae9b429..9927cab915c 100644
--- a/Python/context.c
+++ b/Python/context.c
@@ -979,7 +979,7 @@ contextvar_tp_repr(PyObject *op)
return NULL;
}
- if (PyUnicodeWriter_WriteUTF8(writer, "<ContextVar name=", 17) < 0) {
+ if (PyUnicodeWriter_WriteASCII(writer, "<ContextVar name=", 17) < 0) {
goto error;
}
if (PyUnicodeWriter_WriteRepr(writer, self->var_name) < 0) {
@@ -987,7 +987,7 @@ contextvar_tp_repr(PyObject *op)
}
if (self->var_default != NULL) {
- if (PyUnicodeWriter_WriteUTF8(writer, " default=", 9) < 0) {
+ if (PyUnicodeWriter_WriteASCII(writer, " default=", 9) < 0) {
goto error;
}
if (PyUnicodeWriter_WriteRepr(writer, self->var_default) < 0) {
@@ -1182,15 +1182,15 @@ token_tp_repr(PyObject *op)
if (writer == NULL) {
return NULL;
}
- if (PyUnicodeWriter_WriteUTF8(writer, "<Token", 6) < 0) {
+ if (PyUnicodeWriter_WriteASCII(writer, "<Token", 6) < 0) {
goto error;
}
if (self->tok_used) {
- if (PyUnicodeWriter_WriteUTF8(writer, " used", 5) < 0) {
+ if (PyUnicodeWriter_WriteASCII(writer, " used", 5) < 0) {
goto error;
}
}
- if (PyUnicodeWriter_WriteUTF8(writer, " var=", 5) < 0) {
+ if (PyUnicodeWriter_WriteASCII(writer, " var=", 5) < 0) {
goto error;
}
if (PyUnicodeWriter_WriteRepr(writer, (PyObject *)self->tok_var) < 0) {
diff --git a/Python/crossinterp.c b/Python/crossinterp.c
index 26eecdddf4b..5e73ab28f2b 100644
--- a/Python/crossinterp.c
+++ b/Python/crossinterp.c
@@ -70,6 +70,17 @@ runpy_run_path(const char *filename, const char *modname)
}
+static void
+set_exc_with_cause(PyObject *exctype, const char *msg)
+{
+ PyObject *cause = PyErr_GetRaisedException();
+ PyErr_SetString(exctype, msg);
+ PyObject *exc = PyErr_GetRaisedException();
+ PyException_SetCause(exc, cause);
+ PyErr_SetRaisedException(exc);
+}
+
+
static PyObject *
pyerr_get_message(PyObject *exc)
{
@@ -908,8 +919,15 @@ get_script_xidata(PyThreadState *tstate, PyObject *obj, int pure,
}
goto error;
}
+#ifdef Py_GIL_DISABLED
+ // Don't immortalize code constants to avoid memory leaks.
+ ((_PyThreadStateImpl *)tstate)->suppress_co_const_immortalization++;
+#endif
code = Py_CompileStringExFlags(
script, filename, Py_file_input, &cf, optimize);
+#ifdef Py_GIL_DISABLED
+ ((_PyThreadStateImpl *)tstate)->suppress_co_const_immortalization--;
+#endif
Py_XDECREF(ref);
if (code == NULL) {
goto error;
@@ -1307,7 +1325,7 @@ _excinfo_normalize_type(struct _excinfo_type *info,
}
static void
-_PyXI_excinfo_Clear(_PyXI_excinfo *info)
+_PyXI_excinfo_clear(_PyXI_excinfo *info)
{
_excinfo_clear_type(&info->type);
if (info->msg != NULL) {
@@ -1357,7 +1375,7 @@ _PyXI_excinfo_InitFromException(_PyXI_excinfo *info, PyObject *exc)
assert(exc != NULL);
if (PyErr_GivenExceptionMatches(exc, PyExc_MemoryError)) {
- _PyXI_excinfo_Clear(info);
+ _PyXI_excinfo_clear(info);
return NULL;
}
const char *failure = NULL;
@@ -1403,7 +1421,7 @@ _PyXI_excinfo_InitFromException(_PyXI_excinfo *info, PyObject *exc)
error:
assert(failure != NULL);
- _PyXI_excinfo_Clear(info);
+ _PyXI_excinfo_clear(info);
return failure;
}
@@ -1454,7 +1472,7 @@ _PyXI_excinfo_InitFromObject(_PyXI_excinfo *info, PyObject *obj)
error:
assert(failure != NULL);
- _PyXI_excinfo_Clear(info);
+ _PyXI_excinfo_clear(info);
return failure;
}
@@ -1649,7 +1667,7 @@ _PyXI_ExcInfoAsObject(_PyXI_excinfo *info)
void
_PyXI_ClearExcInfo(_PyXI_excinfo *info)
{
- _PyXI_excinfo_Clear(info);
+ _PyXI_excinfo_clear(info);
}
@@ -1687,6 +1705,14 @@ _PyXI_ApplyErrorCode(_PyXI_errcode code, PyInterpreterState *interp)
PyErr_SetString(PyExc_InterpreterError,
"failed to apply namespace to __main__");
break;
+ case _PyXI_ERR_PRESERVE_FAILURE:
+ PyErr_SetString(PyExc_InterpreterError,
+ "failed to preserve objects across session");
+ break;
+ case _PyXI_ERR_EXC_PROPAGATION_FAILURE:
+ PyErr_SetString(PyExc_InterpreterError,
+ "failed to transfer exception between interpreters");
+ break;
case _PyXI_ERR_NOT_SHAREABLE:
_set_xid_lookup_failure(tstate, NULL, NULL, NULL);
break;
@@ -1736,7 +1762,7 @@ _PyXI_InitError(_PyXI_error *error, PyObject *excobj, _PyXI_errcode code)
assert(excobj == NULL);
assert(code != _PyXI_ERR_NO_ERROR);
error->code = code;
- _PyXI_excinfo_Clear(&error->uncaught);
+ _PyXI_excinfo_clear(&error->uncaught);
}
return failure;
}
@@ -1746,7 +1772,7 @@ _PyXI_ApplyError(_PyXI_error *error)
{
PyThreadState *tstate = PyThreadState_Get();
if (error->code == _PyXI_ERR_UNCAUGHT_EXCEPTION) {
- // Raise an exception that proxies the propagated exception.
+ // We will raise an exception that proxies the propagated exception.
return _PyXI_excinfo_AsObject(&error->uncaught);
}
else if (error->code == _PyXI_ERR_NOT_SHAREABLE) {
@@ -1832,7 +1858,8 @@ _sharednsitem_has_value(_PyXI_namespace_item *item, int64_t *p_interpid)
}
static int
-_sharednsitem_set_value(_PyXI_namespace_item *item, PyObject *value)
+_sharednsitem_set_value(_PyXI_namespace_item *item, PyObject *value,
+ xidata_fallback_t fallback)
{
assert(_sharednsitem_is_initialized(item));
assert(item->xidata == NULL);
@@ -1841,8 +1868,7 @@ _sharednsitem_set_value(_PyXI_namespace_item *item, PyObject *value)
return -1;
}
PyThreadState *tstate = PyThreadState_Get();
- // XXX Use _PyObject_GetXIDataWithFallback()?
- if (_PyObject_GetXIDataNoFallback(tstate, value, item->xidata) != 0) {
+ if (_PyObject_GetXIData(tstate, value, fallback, item->xidata) < 0) {
PyMem_RawFree(item->xidata);
item->xidata = NULL;
// The caller may want to propagate PyExc_NotShareableError
@@ -1874,7 +1900,8 @@ _sharednsitem_clear(_PyXI_namespace_item *item)
}
static int
-_sharednsitem_copy_from_ns(struct _sharednsitem *item, PyObject *ns)
+_sharednsitem_copy_from_ns(struct _sharednsitem *item, PyObject *ns,
+ xidata_fallback_t fallback)
{
assert(item->name != NULL);
assert(item->xidata == NULL);
@@ -1886,7 +1913,7 @@ _sharednsitem_copy_from_ns(struct _sharednsitem *item, PyObject *ns)
// When applied, this item will be set to the default (or fail).
return 0;
}
- if (_sharednsitem_set_value(item, value) < 0) {
+ if (_sharednsitem_set_value(item, value, fallback) < 0) {
return -1;
}
return 0;
@@ -2137,18 +2164,21 @@ error:
return NULL;
}
-static void _propagate_not_shareable_error(_PyXI_session *);
+static void _propagate_not_shareable_error(_PyXI_errcode *);
static int
-_fill_sharedns(_PyXI_namespace *ns, PyObject *nsobj, _PyXI_session *session)
+_fill_sharedns(_PyXI_namespace *ns, PyObject *nsobj,
+ xidata_fallback_t fallback, _PyXI_errcode *p_errcode)
{
// All items are expected to be shareable.
assert(_sharedns_check_counts(ns));
assert(ns->numnames == ns->maxitems);
assert(ns->numvalues == 0);
for (Py_ssize_t i=0; i < ns->maxitems; i++) {
- if (_sharednsitem_copy_from_ns(&ns->items[i], nsobj) < 0) {
- _propagate_not_shareable_error(session);
+ if (_sharednsitem_copy_from_ns(&ns->items[i], nsobj, fallback) < 0) {
+ if (p_errcode != NULL) {
+ _propagate_not_shareable_error(p_errcode);
+ }
// Clear out the ones we set so far.
for (Py_ssize_t j=0; j < i; j++) {
_sharednsitem_clear_value(&ns->items[j]);
@@ -2214,6 +2244,18 @@ _apply_sharedns(_PyXI_namespace *ns, PyObject *nsobj, PyObject *dflt)
/* switched-interpreter sessions */
/*********************************/
+struct xi_session_error {
+ // This is set if the interpreter is entered and raised an exception
+ // that needs to be handled in some special way during exit.
+ _PyXI_errcode *override;
+ // This is set if exit captured an exception to propagate.
+ _PyXI_error *info;
+
+ // -- pre-allocated memory --
+ _PyXI_error _info;
+ _PyXI_errcode _override;
+};
+
struct xi_session {
#define SESSION_UNUSED 0
#define SESSION_ACTIVE 1
@@ -2242,18 +2284,14 @@ struct xi_session {
// beginning of the session as a convenience.
PyObject *main_ns;
- // This is set if the interpreter is entered and raised an exception
- // that needs to be handled in some special way during exit.
- _PyXI_errcode *error_override;
- // This is set if exit captured an exception to propagate.
- _PyXI_error *error;
+ // This is a dict of objects that will be available (via sharing)
+ // once the session exits. Do not access this directly; use
+ // _PyXI_Preserve() and _PyXI_GetPreserved() instead;
+ PyObject *_preserved;
- // -- pre-allocated memory --
- _PyXI_error _error;
- _PyXI_errcode _error_override;
+ struct xi_session_error error;
};
-
_PyXI_session *
_PyXI_NewSession(void)
{
@@ -2279,9 +2317,25 @@ _session_is_active(_PyXI_session *session)
return session->status == SESSION_ACTIVE;
}
-static int _ensure_main_ns(_PyXI_session *);
+static int
+_session_pop_error(_PyXI_session *session, struct xi_session_error *err)
+{
+ if (session->error.info == NULL) {
+ assert(session->error.override == NULL);
+ *err = (struct xi_session_error){0};
+ return 0;
+ }
+ *err = session->error;
+ err->info = &err->_info;
+ if (err->override != NULL) {
+ err->override = &err->_override;
+ }
+ session->error = (struct xi_session_error){0};
+ return 1;
+}
+
+static int _ensure_main_ns(_PyXI_session *, _PyXI_errcode *);
static inline void _session_set_error(_PyXI_session *, _PyXI_errcode);
-static void _capture_current_exception(_PyXI_session *);
/* enter/exit a cross-interpreter session */
@@ -2298,9 +2352,9 @@ _enter_session(_PyXI_session *session, PyInterpreterState *interp)
assert(!session->running);
assert(session->main_ns == NULL);
// Set elsewhere and cleared in _capture_current_exception().
- assert(session->error_override == NULL);
- // Set elsewhere and cleared in _PyXI_ApplyCapturedException().
- assert(session->error == NULL);
+ assert(session->error.override == NULL);
+ // Set elsewhere and cleared in _PyXI_Exit().
+ assert(session->error.info == NULL);
// Switch to interpreter.
PyThreadState *tstate = PyThreadState_Get();
@@ -2329,14 +2383,16 @@ _exit_session(_PyXI_session *session)
PyThreadState *tstate = session->init_tstate;
assert(tstate != NULL);
assert(PyThreadState_Get() == tstate);
+ assert(!_PyErr_Occurred(tstate));
// Release any of the entered interpreters resources.
Py_CLEAR(session->main_ns);
+ Py_CLEAR(session->_preserved);
// Ensure this thread no longer owns __main__.
if (session->running) {
_PyInterpreterState_SetNotRunningMain(tstate->interp);
- assert(!PyErr_Occurred());
+ assert(!_PyErr_Occurred(tstate));
session->running = 0;
}
@@ -2353,21 +2409,16 @@ _exit_session(_PyXI_session *session)
assert(!session->own_init_tstate);
}
- // For now the error data persists past the exit.
- *session = (_PyXI_session){
- .error_override = session->error_override,
- .error = session->error,
- ._error = session->_error,
- ._error_override = session->_error_override,
- };
+ assert(session->error.info == NULL);
+ assert(session->error.override == _PyXI_ERR_NO_ERROR);
+
+ *session = (_PyXI_session){0};
}
static void
-_propagate_not_shareable_error(_PyXI_session *session)
+_propagate_not_shareable_error(_PyXI_errcode *p_errcode)
{
- if (session == NULL) {
- return;
- }
+ assert(p_errcode != NULL);
PyThreadState *tstate = PyThreadState_Get();
PyObject *exctype = get_notshareableerror_type(tstate);
if (exctype == NULL) {
@@ -2377,46 +2428,46 @@ _propagate_not_shareable_error(_PyXI_session *session)
}
if (PyErr_ExceptionMatches(exctype)) {
// We want to propagate the exception directly.
- _session_set_error(session, _PyXI_ERR_NOT_SHAREABLE);
+ *p_errcode = _PyXI_ERR_NOT_SHAREABLE;
}
}
-PyObject *
-_PyXI_ApplyCapturedException(_PyXI_session *session)
-{
- assert(!PyErr_Occurred());
- assert(session->error != NULL);
- PyObject *res = _PyXI_ApplyError(session->error);
- assert((res == NULL) != (PyErr_Occurred() == NULL));
- session->error = NULL;
- return res;
-}
-
-int
-_PyXI_HasCapturedException(_PyXI_session *session)
-{
- return session->error != NULL;
-}
-
int
_PyXI_Enter(_PyXI_session *session,
- PyInterpreterState *interp, PyObject *nsupdates)
+ PyInterpreterState *interp, PyObject *nsupdates,
+ _PyXI_session_result *result)
{
// Convert the attrs for cross-interpreter use.
_PyXI_namespace *sharedns = NULL;
if (nsupdates != NULL) {
Py_ssize_t len = PyDict_Size(nsupdates);
if (len < 0) {
+ if (result != NULL) {
+ result->errcode = _PyXI_ERR_APPLY_NS_FAILURE;
+ }
return -1;
}
if (len > 0) {
sharedns = _create_sharedns(nsupdates);
if (sharedns == NULL) {
+ if (result != NULL) {
+ result->errcode = _PyXI_ERR_APPLY_NS_FAILURE;
+ }
return -1;
}
- if (_fill_sharedns(sharedns, nsupdates, NULL) < 0) {
- assert(session->error == NULL);
+ // For now we limit it to shareable objects.
+ xidata_fallback_t fallback = _PyXIDATA_XIDATA_ONLY;
+ _PyXI_errcode errcode = _PyXI_ERR_NO_ERROR;
+ if (_fill_sharedns(sharedns, nsupdates, fallback, &errcode) < 0) {
+ assert(PyErr_Occurred());
+ assert(session->error.info == NULL);
+ if (errcode == _PyXI_ERR_NO_ERROR) {
+ errcode = _PyXI_ERR_UNCAUGHT_EXCEPTION;
+ }
_destroy_sharedns(sharedns);
+ if (result != NULL) {
+ result->errcode = errcode;
+ }
return -1;
}
}
@@ -2438,8 +2489,7 @@ _PyXI_Enter(_PyXI_session *session,
// Apply the cross-interpreter data.
if (sharedns != NULL) {
- if (_ensure_main_ns(session) < 0) {
- errcode = _PyXI_ERR_MAIN_NS_FAILURE;
+ if (_ensure_main_ns(session, &errcode) < 0) {
goto error;
}
if (_apply_sharedns(sharedns, session->main_ns, NULL) < 0) {
@@ -2455,19 +2505,124 @@ _PyXI_Enter(_PyXI_session *session,
error:
// We want to propagate all exceptions here directly (best effort).
+ assert(errcode != _PyXI_ERR_NO_ERROR);
_session_set_error(session, errcode);
+ assert(!PyErr_Occurred());
+
+ // Exit the session.
+ struct xi_session_error err;
+ (void)_session_pop_error(session, &err);
_exit_session(session);
+
if (sharedns != NULL) {
_destroy_sharedns(sharedns);
}
+
+ // Apply the error from the other interpreter.
+ PyObject *excinfo = _PyXI_ApplyError(err.info);
+ _PyXI_excinfo_clear(&err.info->uncaught);
+ if (excinfo != NULL) {
+ if (result != NULL) {
+ result->excinfo = excinfo;
+ }
+ else {
+#ifdef Py_DEBUG
+ fprintf(stderr, "_PyXI_Enter(): uncaught exception discarded");
+#endif
+ }
+ }
+ assert(PyErr_Occurred());
+
return -1;
}
-void
-_PyXI_Exit(_PyXI_session *session)
+static int _pop_preserved(_PyXI_session *, _PyXI_namespace **, PyObject **,
+ _PyXI_errcode *);
+static int _finish_preserved(_PyXI_namespace *, PyObject **);
+
+int
+_PyXI_Exit(_PyXI_session *session, _PyXI_errcode errcode,
+ _PyXI_session_result *result)
{
- _capture_current_exception(session);
+ int res = 0;
+
+ // Capture the raised exception, if any.
+ assert(session->error.info == NULL);
+ if (PyErr_Occurred()) {
+ _session_set_error(session, errcode);
+ assert(!PyErr_Occurred());
+ }
+ else {
+ assert(errcode == _PyXI_ERR_NO_ERROR);
+ assert(session->error.override == NULL);
+ }
+
+ // Capture the preserved namespace.
+ _PyXI_namespace *preserved = NULL;
+ PyObject *preservedobj = NULL;
+ if (result != NULL) {
+ errcode = _PyXI_ERR_NO_ERROR;
+ if (_pop_preserved(session, &preserved, &preservedobj, &errcode) < 0) {
+ if (session->error.info != NULL) {
+ // XXX Chain the exception (i.e. set __context__)?
+ PyErr_FormatUnraisable(
+ "Exception ignored while capturing preserved objects");
+ }
+ else {
+ _session_set_error(session, errcode);
+ }
+ }
+ }
+
+ // Exit the session.
+ struct xi_session_error err;
+ (void)_session_pop_error(session, &err);
_exit_session(session);
+
+ // Restore the preserved namespace.
+ assert(preserved == NULL || preservedobj == NULL);
+ if (_finish_preserved(preserved, &preservedobj) < 0) {
+ assert(preservedobj == NULL);
+ if (err.info != NULL) {
+ // XXX Chain the exception (i.e. set __context__)?
+ PyErr_FormatUnraisable(
+ "Exception ignored while capturing preserved objects");
+ }
+ else {
+ errcode = _PyXI_ERR_PRESERVE_FAILURE;
+ _propagate_not_shareable_error(&errcode);
+ }
+ }
+ if (result != NULL) {
+ result->preserved = preservedobj;
+ result->errcode = errcode;
+ }
+
+ // Apply the error from the other interpreter, if any.
+ if (err.info != NULL) {
+ res = -1;
+ assert(!PyErr_Occurred());
+ PyObject *excinfo = _PyXI_ApplyError(err.info);
+ _PyXI_excinfo_clear(&err.info->uncaught);
+ if (excinfo == NULL) {
+ assert(PyErr_Occurred());
+ if (result != NULL) {
+ _PyXI_ClearResult(result);
+ *result = (_PyXI_session_result){
+ .errcode = _PyXI_ERR_EXC_PROPAGATION_FAILURE,
+ };
+ }
+ }
+ else if (result != NULL) {
+ result->excinfo = excinfo;
+ }
+ else {
+#ifdef Py_DEBUG
+ fprintf(stderr, "_PyXI_Exit(): uncaught exception discarded");
+#endif
+ }
+ }
+ return res;
}
@@ -2476,15 +2631,15 @@ _PyXI_Exit(_PyXI_session *session)
static void
_capture_current_exception(_PyXI_session *session)
{
- assert(session->error == NULL);
+ assert(session->error.info == NULL);
if (!PyErr_Occurred()) {
- assert(session->error_override == NULL);
+ assert(session->error.override == NULL);
return;
}
// Handle the exception override.
- _PyXI_errcode *override = session->error_override;
- session->error_override = NULL;
+ _PyXI_errcode *override = session->error.override;
+ session->error.override = NULL;
_PyXI_errcode errcode = override != NULL
? *override
: _PyXI_ERR_UNCAUGHT_EXCEPTION;
@@ -2507,7 +2662,7 @@ _capture_current_exception(_PyXI_session *session)
}
// Capture the exception.
- _PyXI_error *err = &session->_error;
+ _PyXI_error *err = &session->error._info;
*err = (_PyXI_error){
.interp = session->init_tstate->interp,
};
@@ -2534,7 +2689,7 @@ _capture_current_exception(_PyXI_session *session)
// Finished!
assert(!PyErr_Occurred());
- session->error = err;
+ session->error.info = err;
}
static inline void
@@ -2542,15 +2697,19 @@ _session_set_error(_PyXI_session *session, _PyXI_errcode errcode)
{
assert(_session_is_active(session));
assert(PyErr_Occurred());
+ if (errcode == _PyXI_ERR_NO_ERROR) {
+ // We're a bit forgiving here.
+ errcode = _PyXI_ERR_UNCAUGHT_EXCEPTION;
+ }
if (errcode != _PyXI_ERR_UNCAUGHT_EXCEPTION) {
- session->_error_override = errcode;
- session->error_override = &session->_error_override;
+ session->error._override = errcode;
+ session->error.override = &session->error._override;
}
_capture_current_exception(session);
}
static int
-_ensure_main_ns(_PyXI_session *session)
+_ensure_main_ns(_PyXI_session *session, _PyXI_errcode *p_errcode)
{
assert(_session_is_active(session));
if (session->main_ns != NULL) {
@@ -2559,11 +2718,17 @@ _ensure_main_ns(_PyXI_session *session)
// Cache __main__.__dict__.
PyObject *main_mod = _Py_GetMainModule(session->init_tstate);
if (_Py_CheckMainModule(main_mod) < 0) {
+ if (p_errcode != NULL) {
+ *p_errcode = _PyXI_ERR_MAIN_NS_FAILURE;
+ }
return -1;
}
PyObject *ns = PyModule_GetDict(main_mod); // borrowed
Py_DECREF(main_mod);
if (ns == NULL) {
+ if (p_errcode != NULL) {
+ *p_errcode = _PyXI_ERR_MAIN_NS_FAILURE;
+ }
return -1;
}
session->main_ns = Py_NewRef(ns);
@@ -2571,21 +2736,150 @@ _ensure_main_ns(_PyXI_session *session)
}
PyObject *
-_PyXI_GetMainNamespace(_PyXI_session *session)
+_PyXI_GetMainNamespace(_PyXI_session *session, _PyXI_errcode *p_errcode)
{
if (!_session_is_active(session)) {
PyErr_SetString(PyExc_RuntimeError, "session not active");
return NULL;
}
- if (_ensure_main_ns(session) < 0) {
- _session_set_error(session, _PyXI_ERR_MAIN_NS_FAILURE);
- _capture_current_exception(session);
+ if (_ensure_main_ns(session, p_errcode) < 0) {
return NULL;
}
return session->main_ns;
}
+static int
+_pop_preserved(_PyXI_session *session,
+ _PyXI_namespace **p_xidata, PyObject **p_obj,
+ _PyXI_errcode *p_errcode)
+{
+ assert(_PyThreadState_GET() == session->init_tstate); // active session
+ if (session->_preserved == NULL) {
+ *p_xidata = NULL;
+ *p_obj = NULL;
+ return 0;
+ }
+ if (session->init_tstate == session->prev_tstate) {
+ // We did not switch interpreters.
+ *p_xidata = NULL;
+ *p_obj = session->_preserved;
+ session->_preserved = NULL;
+ return 0;
+ }
+ *p_obj = NULL;
+
+ // We did switch interpreters.
+ Py_ssize_t len = PyDict_Size(session->_preserved);
+ if (len < 0) {
+ if (p_errcode != NULL) {
+ *p_errcode = _PyXI_ERR_PRESERVE_FAILURE;
+ }
+ return -1;
+ }
+ else if (len == 0) {
+ *p_xidata = NULL;
+ }
+ else {
+ _PyXI_namespace *xidata = _create_sharedns(session->_preserved);
+ if (xidata == NULL) {
+ if (p_errcode != NULL) {
+ *p_errcode = _PyXI_ERR_PRESERVE_FAILURE;
+ }
+ return -1;
+ }
+ _PyXI_errcode errcode = _PyXI_ERR_NO_ERROR;
+ if (_fill_sharedns(xidata, session->_preserved,
+ _PyXIDATA_FULL_FALLBACK, &errcode) < 0)
+ {
+ assert(session->error.info == NULL);
+ if (errcode != _PyXI_ERR_NOT_SHAREABLE) {
+ errcode = _PyXI_ERR_PRESERVE_FAILURE;
+ }
+ if (p_errcode != NULL) {
+ *p_errcode = errcode;
+ }
+ _destroy_sharedns(xidata);
+ return -1;
+ }
+ *p_xidata = xidata;
+ }
+ Py_CLEAR(session->_preserved);
+ return 0;
+}
+
+static int
+_finish_preserved(_PyXI_namespace *xidata, PyObject **p_preserved)
+{
+ if (xidata == NULL) {
+ return 0;
+ }
+ int res = -1;
+ if (p_preserved != NULL) {
+ PyObject *ns = PyDict_New();
+ if (ns == NULL) {
+ goto finally;
+ }
+ if (_apply_sharedns(xidata, ns, NULL) < 0) {
+ Py_CLEAR(ns);
+ goto finally;
+ }
+ *p_preserved = ns;
+ }
+ res = 0;
+
+finally:
+ _destroy_sharedns(xidata);
+ return res;
+}
+
+int
+_PyXI_Preserve(_PyXI_session *session, const char *name, PyObject *value,
+ _PyXI_errcode *p_errcode)
+{
+ if (!_session_is_active(session)) {
+ PyErr_SetString(PyExc_RuntimeError, "session not active");
+ return -1;
+ }
+ if (session->_preserved == NULL) {
+ session->_preserved = PyDict_New();
+ if (session->_preserved == NULL) {
+ set_exc_with_cause(PyExc_RuntimeError,
+ "failed to initialize preserved objects");
+ if (p_errcode != NULL) {
+ *p_errcode = _PyXI_ERR_PRESERVE_FAILURE;
+ }
+ return -1;
+ }
+ }
+ if (PyDict_SetItemString(session->_preserved, name, value) < 0) {
+ set_exc_with_cause(PyExc_RuntimeError, "failed to preserve object");
+ if (p_errcode != NULL) {
+ *p_errcode = _PyXI_ERR_PRESERVE_FAILURE;
+ }
+ return -1;
+ }
+ return 0;
+}
+
+PyObject *
+_PyXI_GetPreserved(_PyXI_session_result *result, const char *name)
+{
+ PyObject *value = NULL;
+ if (result->preserved != NULL) {
+ (void)PyDict_GetItemStringRef(result->preserved, name, &value);
+ }
+ return value;
+}
+
+void
+_PyXI_ClearResult(_PyXI_session_result *result)
+{
+ Py_CLEAR(result->preserved);
+ Py_CLEAR(result->excinfo);
+}
+
+
/*********************/
/* runtime lifecycle */
/*********************/
diff --git a/Python/crossinterp_data_lookup.h b/Python/crossinterp_data_lookup.h
index 88eb41da89e..b16f38b847f 100644
--- a/Python/crossinterp_data_lookup.h
+++ b/Python/crossinterp_data_lookup.h
@@ -701,6 +701,14 @@ _PyFunction_FromXIData(_PyXIData_t *xidata)
Py_DECREF(code);
return NULL;
}
+ PyThreadState *tstate = _PyThreadState_GET();
+ if (PyDict_SetItem(globals, &_Py_ID(__builtins__),
+ tstate->interp->builtins) < 0)
+ {
+ Py_DECREF(code);
+ Py_DECREF(globals);
+ return NULL;
+ }
PyObject *func = PyFunction_New(code, globals);
Py_DECREF(code);
Py_DECREF(globals);
diff --git a/Python/errors.c b/Python/errors.c
index 81f267b043a..a3122f76bdd 100644
--- a/Python/errors.c
+++ b/Python/errors.c
@@ -10,7 +10,6 @@
#include "pycore_pystate.h" // _PyThreadState_GET()
#include "pycore_runtime.h" // _Py_ID()
#include "pycore_structseq.h" // _PyStructSequence_FiniBuiltin()
-#include "pycore_sysmodule.h" // _PySys_GetOptionalAttr()
#include "pycore_traceback.h" // _PyTraceBack_FromFrame()
#include "pycore_unicodeobject.h" // _PyUnicode_Equal()
@@ -1570,7 +1569,7 @@ write_unraisable_exc(PyThreadState *tstate, PyObject *exc_type,
PyObject *obj)
{
PyObject *file;
- if (_PySys_GetOptionalAttr(&_Py_ID(stderr), &file) < 0) {
+ if (PySys_GetOptionalAttr(&_Py_ID(stderr), &file) < 0) {
return -1;
}
if (file == NULL || file == Py_None) {
@@ -1677,7 +1676,7 @@ format_unraisable_v(const char *format, va_list va, PyObject *obj)
}
PyObject *hook;
- if (_PySys_GetOptionalAttr(&_Py_ID(unraisablehook), &hook) < 0) {
+ if (PySys_GetOptionalAttr(&_Py_ID(unraisablehook), &hook) < 0) {
Py_DECREF(hook_args);
err_msg_str = NULL;
obj = NULL;
diff --git a/Python/executor_cases.c.h b/Python/executor_cases.c.h
index 40090e692e4..d19605169d5 100644
--- a/Python/executor_cases.c.h
+++ b/Python/executor_cases.c.h
@@ -534,7 +534,7 @@
stack_pointer += -1;
assert(WITHIN_STACK_BOUNDS());
_PyFrame_SetStackPointer(frame, stack_pointer);
- PyStackRef_CLOSE(value);
+ PyStackRef_XCLOSE(value);
stack_pointer = _PyFrame_GetStackPointer(frame);
break;
}
@@ -577,6 +577,20 @@
break;
}
+ case _POP_ITER: {
+ _PyStackRef index_or_null;
+ _PyStackRef iter;
+ index_or_null = stack_pointer[-1];
+ iter = stack_pointer[-2];
+ (void)index_or_null;
+ stack_pointer += -2;
+ assert(WITHIN_STACK_BOUNDS());
+ _PyFrame_SetStackPointer(frame, stack_pointer);
+ PyStackRef_CLOSE(iter);
+ stack_pointer = _PyFrame_GetStackPointer(frame);
+ break;
+ }
+
case _END_SEND: {
_PyStackRef value;
_PyStackRef receiver;
@@ -4172,25 +4186,37 @@
case _GET_ITER: {
_PyStackRef iterable;
_PyStackRef iter;
+ _PyStackRef index_or_null;
iterable = stack_pointer[-1];
#ifdef Py_STATS
_PyFrame_SetStackPointer(frame, stack_pointer);
_Py_GatherStats_GetIter(iterable);
stack_pointer = _PyFrame_GetStackPointer(frame);
#endif
- _PyFrame_SetStackPointer(frame, stack_pointer);
- PyObject *iter_o = PyObject_GetIter(PyStackRef_AsPyObjectBorrow(iterable));
- stack_pointer = _PyFrame_GetStackPointer(frame);
- stack_pointer += -1;
- assert(WITHIN_STACK_BOUNDS());
- _PyFrame_SetStackPointer(frame, stack_pointer);
- PyStackRef_CLOSE(iterable);
- stack_pointer = _PyFrame_GetStackPointer(frame);
- if (iter_o == NULL) {
- JUMP_TO_ERROR();
+
+ PyTypeObject *tp = PyStackRef_TYPE(iterable);
+ if (tp == &PyTuple_Type || tp == &PyList_Type) {
+ iter = iterable;
+ index_or_null = PyStackRef_TagInt(0);
}
- iter = PyStackRef_FromPyObjectSteal(iter_o);
- stack_pointer[0] = iter;
+ else {
+ _PyFrame_SetStackPointer(frame, stack_pointer);
+ PyObject *iter_o = PyObject_GetIter(PyStackRef_AsPyObjectBorrow(iterable));
+ stack_pointer = _PyFrame_GetStackPointer(frame);
+ stack_pointer += -1;
+ assert(WITHIN_STACK_BOUNDS());
+ _PyFrame_SetStackPointer(frame, stack_pointer);
+ PyStackRef_CLOSE(iterable);
+ stack_pointer = _PyFrame_GetStackPointer(frame);
+ if (iter_o == NULL) {
+ JUMP_TO_ERROR();
+ }
+ iter = PyStackRef_FromPyObjectSteal(iter_o);
+ index_or_null = PyStackRef_NULL;
+ stack_pointer += 1;
+ }
+ stack_pointer[-1] = iter;
+ stack_pointer[0] = index_or_null;
stack_pointer += 1;
assert(WITHIN_STACK_BOUNDS());
break;
@@ -4237,32 +4263,25 @@
/* _FOR_ITER is not a viable micro-op for tier 2 because it is replaced */
case _FOR_ITER_TIER_TWO: {
+ _PyStackRef null_or_index;
_PyStackRef iter;
_PyStackRef next;
- iter = stack_pointer[-1];
- PyObject *iter_o = PyStackRef_AsPyObjectBorrow(iter);
+ null_or_index = stack_pointer[-1];
+ iter = stack_pointer[-2];
_PyFrame_SetStackPointer(frame, stack_pointer);
- PyObject *next_o = (*Py_TYPE(iter_o)->tp_iternext)(iter_o);
+ _PyStackRef item = _PyForIter_VirtualIteratorNext(tstate, frame, iter, &null_or_index);
stack_pointer = _PyFrame_GetStackPointer(frame);
- if (next_o == NULL) {
- if (_PyErr_Occurred(tstate)) {
- _PyFrame_SetStackPointer(frame, stack_pointer);
- int matches = _PyErr_ExceptionMatches(tstate, PyExc_StopIteration);
- stack_pointer = _PyFrame_GetStackPointer(frame);
- if (!matches) {
- JUMP_TO_ERROR();
- }
- _PyFrame_SetStackPointer(frame, stack_pointer);
- _PyEval_MonitorRaise(tstate, frame, frame->instr_ptr);
- _PyErr_Clear(tstate);
- stack_pointer = _PyFrame_GetStackPointer(frame);
+ if (!PyStackRef_IsValid(item)) {
+ if (PyStackRef_IsError(item)) {
+ JUMP_TO_ERROR();
}
if (true) {
UOP_STAT_INC(uopcode, miss);
JUMP_TO_JUMP_TARGET();
}
}
- next = PyStackRef_FromPyObjectSteal(next_o);
+ next = item;
+ stack_pointer[-1] = null_or_index;
stack_pointer[0] = next;
stack_pointer += 1;
assert(WITHIN_STACK_BOUNDS());
@@ -4272,21 +4291,18 @@
/* _INSTRUMENTED_FOR_ITER is not a viable micro-op for tier 2 because it is instrumented */
case _ITER_CHECK_LIST: {
+ _PyStackRef null_or_index;
_PyStackRef iter;
- iter = stack_pointer[-1];
+ null_or_index = stack_pointer[-1];
+ iter = stack_pointer[-2];
PyObject *iter_o = PyStackRef_AsPyObjectBorrow(iter);
- if (Py_TYPE(iter_o) != &PyListIter_Type) {
+ if (Py_TYPE(iter_o) != &PyList_Type) {
UOP_STAT_INC(uopcode, miss);
JUMP_TO_JUMP_TARGET();
}
+ assert(PyStackRef_IsTaggedInt(null_or_index));
#ifdef Py_GIL_DISABLED
- if (!_PyObject_IsUniquelyReferenced(iter_o)) {
- UOP_STAT_INC(uopcode, miss);
- JUMP_TO_JUMP_TARGET();
- }
- _PyListIterObject *it = (_PyListIterObject *)iter_o;
- if (!_Py_IsOwnedByCurrentThread((PyObject *)it->it_seq) ||
- !_PyObject_GC_IS_SHARED(it->it_seq)) {
+ if (!_Py_IsOwnedByCurrentThread(iter_o) && !_PyObject_GC_IS_SHARED(iter_o)) {
UOP_STAT_INC(uopcode, miss);
JUMP_TO_JUMP_TARGET();
}
@@ -4297,24 +4313,17 @@
/* _ITER_JUMP_LIST is not a viable micro-op for tier 2 because it is replaced */
case _GUARD_NOT_EXHAUSTED_LIST: {
+ _PyStackRef null_or_index;
_PyStackRef iter;
- iter = stack_pointer[-1];
+ null_or_index = stack_pointer[-1];
+ iter = stack_pointer[-2];
#ifndef Py_GIL_DISABLED
- PyObject *iter_o = PyStackRef_AsPyObjectBorrow(iter);
- _PyListIterObject *it = (_PyListIterObject *)iter_o;
- assert(Py_TYPE(iter_o) == &PyListIter_Type);
- PyListObject *seq = it->it_seq;
- if (seq == NULL) {
+ PyObject *list_o = PyStackRef_AsPyObjectBorrow(iter);
+ assert(Py_TYPE(list_o) == &PyList_Type);
+ if ((size_t)PyStackRef_UntagInt(null_or_index) >= (size_t)PyList_GET_SIZE(list_o)) {
UOP_STAT_INC(uopcode, miss);
JUMP_TO_JUMP_TARGET();
}
- if ((size_t)it->it_index >= (size_t)PyList_GET_SIZE(seq)) {
- it->it_index = -1;
- if (1) {
- UOP_STAT_INC(uopcode, miss);
- JUMP_TO_JUMP_TARGET();
- }
- }
#endif
break;
}
@@ -4322,38 +4331,30 @@
/* _ITER_NEXT_LIST is not a viable micro-op for tier 2 because it is replaced */
case _ITER_NEXT_LIST_TIER_TWO: {
+ _PyStackRef null_or_index;
_PyStackRef iter;
_PyStackRef next;
- iter = stack_pointer[-1];
- PyObject *iter_o = PyStackRef_AsPyObjectBorrow(iter);
- _PyListIterObject *it = (_PyListIterObject *)iter_o;
- assert(Py_TYPE(iter_o) == &PyListIter_Type);
- PyListObject *seq = it->it_seq;
- assert(seq);
+ null_or_index = stack_pointer[-1];
+ iter = stack_pointer[-2];
+ PyObject *list_o = PyStackRef_AsPyObjectBorrow(iter);
+ assert(PyList_CheckExact(list_o));
#ifdef Py_GIL_DISABLED
- assert(_PyObject_IsUniquelyReferenced(iter_o));
- assert(_Py_IsOwnedByCurrentThread((PyObject *)seq) ||
- _PyObject_GC_IS_SHARED(seq));
+ assert(_Py_IsOwnedByCurrentThread((PyObject *)list_o) ||
+ _PyObject_GC_IS_SHARED(list_o));
STAT_INC(FOR_ITER, hit);
_PyFrame_SetStackPointer(frame, stack_pointer);
- int result = _PyList_GetItemRefNoLock(seq, it->it_index, &next);
+ int result = _PyList_GetItemRefNoLock((PyListObject *)list_o, PyStackRef_UntagInt(null_or_index), &next);
stack_pointer = _PyFrame_GetStackPointer(frame);
- if (result < 0) {
+ if (result <= 0) {
UOP_STAT_INC(uopcode, miss);
JUMP_TO_JUMP_TARGET();
}
- if (result == 0) {
- it->it_index = -1;
- if (1) {
- UOP_STAT_INC(uopcode, miss);
- JUMP_TO_JUMP_TARGET();
- }
- }
- it->it_index++;
#else
- assert(it->it_index < PyList_GET_SIZE(seq));
- next = PyStackRef_FromPyObjectNew(PyList_GET_ITEM(seq, it->it_index++));
+ assert(PyStackRef_UntagInt(null_or_index) < PyList_GET_SIZE(list_o));
+ next = PyStackRef_FromPyObjectNew(PyList_GET_ITEM(list_o, PyStackRef_UntagInt(null_or_index)));
#endif
+ null_or_index = PyStackRef_IncrementTaggedIntNoOverflow(null_or_index);
+ stack_pointer[-1] = null_or_index;
stack_pointer[0] = next;
stack_pointer += 1;
assert(WITHIN_STACK_BOUNDS());
@@ -4361,39 +4362,29 @@
}
case _ITER_CHECK_TUPLE: {
+ _PyStackRef null_or_index;
_PyStackRef iter;
- iter = stack_pointer[-1];
+ null_or_index = stack_pointer[-1];
+ iter = stack_pointer[-2];
PyObject *iter_o = PyStackRef_AsPyObjectBorrow(iter);
- if (Py_TYPE(iter_o) != &PyTupleIter_Type) {
+ if (Py_TYPE(iter_o) != &PyTuple_Type) {
UOP_STAT_INC(uopcode, miss);
JUMP_TO_JUMP_TARGET();
}
- #ifdef Py_GIL_DISABLED
- if (!_PyObject_IsUniquelyReferenced(iter_o)) {
- UOP_STAT_INC(uopcode, miss);
- JUMP_TO_JUMP_TARGET();
- }
- #endif
+ assert(PyStackRef_IsTaggedInt(null_or_index));
break;
}
/* _ITER_JUMP_TUPLE is not a viable micro-op for tier 2 because it is replaced */
case _GUARD_NOT_EXHAUSTED_TUPLE: {
+ _PyStackRef null_or_index;
_PyStackRef iter;
- iter = stack_pointer[-1];
- PyObject *iter_o = PyStackRef_AsPyObjectBorrow(iter);
- _PyTupleIterObject *it = (_PyTupleIterObject *)iter_o;
- assert(Py_TYPE(iter_o) == &PyTupleIter_Type);
- #ifdef Py_GIL_DISABLED
- assert(_PyObject_IsUniquelyReferenced(iter_o));
- #endif
- PyTupleObject *seq = it->it_seq;
- if (seq == NULL) {
- UOP_STAT_INC(uopcode, miss);
- JUMP_TO_JUMP_TARGET();
- }
- if (it->it_index >= PyTuple_GET_SIZE(seq)) {
+ null_or_index = stack_pointer[-1];
+ iter = stack_pointer[-2];
+ PyObject *tuple_o = PyStackRef_AsPyObjectBorrow(iter);
+ assert(Py_TYPE(tuple_o) == &PyTuple_Type);
+ if ((size_t)PyStackRef_UntagInt(null_or_index) >= (size_t)PyTuple_GET_SIZE(tuple_o)) {
UOP_STAT_INC(uopcode, miss);
JUMP_TO_JUMP_TARGET();
}
@@ -4401,19 +4392,18 @@
}
case _ITER_NEXT_TUPLE: {
+ _PyStackRef null_or_index;
_PyStackRef iter;
_PyStackRef next;
- iter = stack_pointer[-1];
- PyObject *iter_o = PyStackRef_AsPyObjectBorrow(iter);
- _PyTupleIterObject *it = (_PyTupleIterObject *)iter_o;
- assert(Py_TYPE(iter_o) == &PyTupleIter_Type);
- PyTupleObject *seq = it->it_seq;
- #ifdef Py_GIL_DISABLED
- assert(_PyObject_IsUniquelyReferenced(iter_o));
- #endif
- assert(seq);
- assert(it->it_index < PyTuple_GET_SIZE(seq));
- next = PyStackRef_FromPyObjectNew(PyTuple_GET_ITEM(seq, it->it_index++));
+ null_or_index = stack_pointer[-1];
+ iter = stack_pointer[-2];
+ PyObject *tuple_o = PyStackRef_AsPyObjectBorrow(iter);
+ assert(Py_TYPE(tuple_o) == &PyTuple_Type);
+ uintptr_t i = PyStackRef_UntagInt(null_or_index);
+ assert((size_t)i < (size_t)PyTuple_GET_SIZE(tuple_o));
+ next = PyStackRef_FromPyObjectNew(PyTuple_GET_ITEM(tuple_o, i));
+ null_or_index = PyStackRef_IncrementTaggedIntNoOverflow(null_or_index);
+ stack_pointer[-1] = null_or_index;
stack_pointer[0] = next;
stack_pointer += 1;
assert(WITHIN_STACK_BOUNDS());
@@ -4422,7 +4412,7 @@
case _ITER_CHECK_RANGE: {
_PyStackRef iter;
- iter = stack_pointer[-1];
+ iter = stack_pointer[-2];
_PyRangeIterObject *r = (_PyRangeIterObject *)PyStackRef_AsPyObjectBorrow(iter);
if (Py_TYPE(r) != &PyRangeIter_Type) {
UOP_STAT_INC(uopcode, miss);
@@ -4441,7 +4431,7 @@
case _GUARD_NOT_EXHAUSTED_RANGE: {
_PyStackRef iter;
- iter = stack_pointer[-1];
+ iter = stack_pointer[-2];
_PyRangeIterObject *r = (_PyRangeIterObject *)PyStackRef_AsPyObjectBorrow(iter);
assert(Py_TYPE(r) == &PyRangeIter_Type);
if (r->len <= 0) {
@@ -4454,7 +4444,7 @@
case _ITER_NEXT_RANGE: {
_PyStackRef iter;
_PyStackRef next;
- iter = stack_pointer[-1];
+ iter = stack_pointer[-2];
_PyRangeIterObject *r = (_PyRangeIterObject *)PyStackRef_AsPyObjectBorrow(iter);
assert(Py_TYPE(r) == &PyRangeIter_Type);
#ifdef Py_GIL_DISABLED
@@ -4479,7 +4469,7 @@
_PyStackRef iter;
_PyInterpreterFrame *gen_frame;
oparg = CURRENT_OPARG();
- iter = stack_pointer[-1];
+ iter = stack_pointer[-2];
PyGenObject *gen = (PyGenObject *)PyStackRef_AsPyObjectBorrow(iter);
if (Py_TYPE(gen) != &PyGen_Type) {
UOP_STAT_INC(uopcode, miss);
diff --git a/Python/flowgraph.c b/Python/flowgraph.c
index 78ef02a911a..2adc8c84d83 100644
--- a/Python/flowgraph.c
+++ b/Python/flowgraph.c
@@ -299,26 +299,34 @@ basicblock_returns(const basicblock *b) {
}
static void
-dump_basicblock(const basicblock *b)
+dump_basicblock(const basicblock *b, bool highlight)
{
const char *b_return = basicblock_returns(b) ? "return " : "";
+ if (highlight) {
+ fprintf(stderr, ">>> ");
+ }
fprintf(stderr, "%d: [EH=%d CLD=%d WRM=%d NO_FT=%d %p] used: %d, depth: %d, preds: %d %s\n",
b->b_label.id, b->b_except_handler, b->b_cold, b->b_warm, BB_NO_FALLTHROUGH(b), b, b->b_iused,
b->b_startdepth, b->b_predecessors, b_return);
+ int depth = b->b_startdepth;
if (b->b_instr) {
int i;
for (i = 0; i < b->b_iused; i++) {
- fprintf(stderr, " [%02d] ", i);
+ fprintf(stderr, " [%02d] depth: %d ", i, depth);
dump_instr(b->b_instr + i);
+
+ int popped = _PyOpcode_num_popped(b->b_instr[i].i_opcode, b->b_instr[i].i_oparg);
+ int pushed = _PyOpcode_num_pushed(b->b_instr[i].i_opcode, b->b_instr[i].i_oparg);
+ depth += (pushed - popped);
}
}
}
void
-_PyCfgBuilder_DumpGraph(const basicblock *entryblock)
+_PyCfgBuilder_DumpGraph(const basicblock *entryblock, const basicblock *mark)
{
for (const basicblock *b = entryblock; b != NULL; b = b->b_next) {
- dump_basicblock(b);
+ dump_basicblock(b, b == mark);
}
}
@@ -2862,8 +2870,11 @@ optimize_load_fast(cfg_builder *g)
// how many inputs should be left on the stack.
// Opcodes that consume no inputs
+ case FORMAT_SIMPLE:
case GET_ANEXT:
+ case GET_ITER:
case GET_LEN:
+ case GET_YIELD_FROM_ITER:
case IMPORT_FROM:
case MATCH_KEYS:
case MATCH_MAPPING:
@@ -2898,6 +2909,16 @@ optimize_load_fast(cfg_builder *g)
break;
}
+ case END_SEND:
+ case SET_FUNCTION_ATTRIBUTE: {
+ assert(_PyOpcode_num_popped(opcode, oparg) == 2);
+ assert(_PyOpcode_num_pushed(opcode, oparg) == 1);
+ ref tos = ref_stack_pop(&refs);
+ ref_stack_pop(&refs);
+ PUSH_REF(tos.instr, tos.local);
+ break;
+ }
+
// Opcodes that consume some inputs and push new values
case CHECK_EXC_MATCH: {
ref_stack_pop(&refs);
@@ -2927,6 +2948,14 @@ optimize_load_fast(cfg_builder *g)
break;
}
+ case LOAD_SPECIAL:
+ case PUSH_EXC_INFO: {
+ ref tos = ref_stack_pop(&refs);
+ PUSH_REF(i, NOT_LOCAL);
+ PUSH_REF(tos.instr, tos.local);
+ break;
+ }
+
case SEND: {
load_fast_push_block(&sp, instr->i_target, refs.size);
ref_stack_pop(&refs);
diff --git a/Python/gc_free_threading.c b/Python/gc_free_threading.c
index d2ea5b5e06b..5aaa68c5b51 100644
--- a/Python/gc_free_threading.c
+++ b/Python/gc_free_threading.c
@@ -2062,7 +2062,7 @@ gc_should_collect_mem_usage(GCState *gcstate)
// 70,000 new container objects.
return true;
}
- Py_ssize_t last_mem = gcstate->last_mem;
+ Py_ssize_t last_mem = _Py_atomic_load_ssize_relaxed(&gcstate->last_mem);
Py_ssize_t mem_threshold = Py_MAX(last_mem / 10, 128);
if ((mem - last_mem) > mem_threshold) {
// The process memory usage has increased too much, do a collection.
@@ -2245,7 +2245,8 @@ gc_collect_internal(PyInterpreterState *interp, struct collection_state *state,
// Store the current memory usage, can be smaller now if breaking cycles
// freed some memory.
- state->gcstate->last_mem = get_process_mem_usage();
+ Py_ssize_t last_mem = get_process_mem_usage();
+ _Py_atomic_store_ssize_relaxed(&state->gcstate->last_mem, last_mem);
// Append objects with legacy finalizers to the "gc.garbage" list.
handle_legacy_finalizers(state);
diff --git a/Python/generated_cases.c.h b/Python/generated_cases.c.h
index 1c98e1ce4fc..c8825df3ade 100644
--- a/Python/generated_cases.c.h
+++ b/Python/generated_cases.c.h
@@ -5731,17 +5731,19 @@
_Py_CODEUNIT* const this_instr = next_instr - 2;
(void)this_instr;
_PyStackRef iter;
+ _PyStackRef null_or_index;
_PyStackRef next;
// _SPECIALIZE_FOR_ITER
{
- iter = stack_pointer[-1];
+ null_or_index = stack_pointer[-1];
+ iter = stack_pointer[-2];
uint16_t counter = read_u16(&this_instr[1].cache);
(void)counter;
#if ENABLE_SPECIALIZATION_FT
if (ADAPTIVE_COUNTER_TRIGGERS(counter)) {
next_instr = this_instr;
_PyFrame_SetStackPointer(frame, stack_pointer);
- _Py_Specialize_ForIter(iter, next_instr, oparg);
+ _Py_Specialize_ForIter(iter, null_or_index, next_instr, oparg);
stack_pointer = _PyFrame_GetStackPointer(frame);
DISPATCH_SAME_OPARG();
}
@@ -5751,30 +5753,20 @@
}
// _FOR_ITER
{
- PyObject *iter_o = PyStackRef_AsPyObjectBorrow(iter);
_PyFrame_SetStackPointer(frame, stack_pointer);
- PyObject *next_o = (*Py_TYPE(iter_o)->tp_iternext)(iter_o);
+ _PyStackRef item = _PyForIter_VirtualIteratorNext(tstate, frame, iter, &null_or_index);
stack_pointer = _PyFrame_GetStackPointer(frame);
- if (next_o == NULL) {
- if (_PyErr_Occurred(tstate)) {
- _PyFrame_SetStackPointer(frame, stack_pointer);
- int matches = _PyErr_ExceptionMatches(tstate, PyExc_StopIteration);
- stack_pointer = _PyFrame_GetStackPointer(frame);
- if (!matches) {
- JUMP_TO_LABEL(error);
- }
- _PyFrame_SetStackPointer(frame, stack_pointer);
- _PyEval_MonitorRaise(tstate, frame, this_instr);
- _PyErr_Clear(tstate);
- stack_pointer = _PyFrame_GetStackPointer(frame);
+ if (!PyStackRef_IsValid(item)) {
+ if (PyStackRef_IsError(item)) {
+ JUMP_TO_LABEL(error);
}
- assert(next_instr[oparg].op.code == END_FOR ||
- next_instr[oparg].op.code == INSTRUMENTED_END_FOR);
JUMPBY(oparg + 1);
+ stack_pointer[-1] = null_or_index;
DISPATCH();
}
- next = PyStackRef_FromPyObjectSteal(next_o);
+ next = item;
}
+ stack_pointer[-1] = null_or_index;
stack_pointer[0] = next;
stack_pointer += 1;
assert(WITHIN_STACK_BOUNDS());
@@ -5806,7 +5798,7 @@
}
// _FOR_ITER_GEN_FRAME
{
- iter = stack_pointer[-1];
+ iter = stack_pointer[-2];
PyGenObject *gen = (PyGenObject *)PyStackRef_AsPyObjectBorrow(iter);
if (Py_TYPE(gen) != &PyGen_Type) {
UPDATE_MISS_STATS(FOR_ITER);
@@ -5863,26 +5855,22 @@
INSTRUCTION_STATS(FOR_ITER_LIST);
static_assert(INLINE_CACHE_ENTRIES_FOR_ITER == 1, "incorrect cache size");
_PyStackRef iter;
+ _PyStackRef null_or_index;
_PyStackRef next;
/* Skip 1 cache entry */
// _ITER_CHECK_LIST
{
- iter = stack_pointer[-1];
+ null_or_index = stack_pointer[-1];
+ iter = stack_pointer[-2];
PyObject *iter_o = PyStackRef_AsPyObjectBorrow(iter);
- if (Py_TYPE(iter_o) != &PyListIter_Type) {
+ if (Py_TYPE(iter_o) != &PyList_Type) {
UPDATE_MISS_STATS(FOR_ITER);
assert(_PyOpcode_Deopt[opcode] == (FOR_ITER));
JUMP_TO_PREDICTED(FOR_ITER);
}
+ assert(PyStackRef_IsTaggedInt(null_or_index));
#ifdef Py_GIL_DISABLED
- if (!_PyObject_IsUniquelyReferenced(iter_o)) {
- UPDATE_MISS_STATS(FOR_ITER);
- assert(_PyOpcode_Deopt[opcode] == (FOR_ITER));
- JUMP_TO_PREDICTED(FOR_ITER);
- }
- _PyListIterObject *it = (_PyListIterObject *)iter_o;
- if (!_Py_IsOwnedByCurrentThread((PyObject *)it->it_seq) ||
- !_PyObject_GC_IS_SHARED(it->it_seq)) {
+ if (!_Py_IsOwnedByCurrentThread(iter_o) && !_PyObject_GC_IS_SHARED(iter_o)) {
UPDATE_MISS_STATS(FOR_ITER);
assert(_PyOpcode_Deopt[opcode] == (FOR_ITER));
JUMP_TO_PREDICTED(FOR_ITER);
@@ -5891,42 +5879,30 @@
}
// _ITER_JUMP_LIST
{
- PyObject *iter_o = PyStackRef_AsPyObjectBorrow(iter);
- assert(Py_TYPE(iter_o) == &PyListIter_Type);
#ifdef Py_GIL_DISABLED
- assert(_PyObject_IsUniquelyReferenced(iter_o));
- (void)iter_o;
+
#else
- _PyListIterObject *it = (_PyListIterObject *)iter_o;
+ PyObject *list_o = PyStackRef_AsPyObjectBorrow(iter);
+ assert(Py_TYPE(list_o) == &PyList_Type);
STAT_INC(FOR_ITER, hit);
- PyListObject *seq = it->it_seq;
- if (seq == NULL || (size_t)it->it_index >= (size_t)PyList_GET_SIZE(seq)) {
- it->it_index = -1;
- if (seq != NULL) {
- it->it_seq = NULL;
- _PyFrame_SetStackPointer(frame, stack_pointer);
- Py_DECREF(seq);
- stack_pointer = _PyFrame_GetStackPointer(frame);
- }
+ if ((size_t)PyStackRef_UntagInt(null_or_index) >= (size_t)PyList_GET_SIZE(list_o)) {
+ null_or_index = PyStackRef_TagInt(-1);
JUMPBY(oparg + 1);
+ stack_pointer[-1] = null_or_index;
DISPATCH();
}
#endif
}
// _ITER_NEXT_LIST
{
- PyObject *iter_o = PyStackRef_AsPyObjectBorrow(iter);
- _PyListIterObject *it = (_PyListIterObject *)iter_o;
- assert(Py_TYPE(iter_o) == &PyListIter_Type);
- PyListObject *seq = it->it_seq;
- assert(seq);
+ PyObject *list_o = PyStackRef_AsPyObjectBorrow(iter);
+ assert(PyList_CheckExact(list_o));
#ifdef Py_GIL_DISABLED
- assert(_PyObject_IsUniquelyReferenced(iter_o));
- assert(_Py_IsOwnedByCurrentThread((PyObject *)seq) ||
- _PyObject_GC_IS_SHARED(seq));
+ assert(_Py_IsOwnedByCurrentThread(list_o) ||
+ _PyObject_GC_IS_SHARED(list_o));
STAT_INC(FOR_ITER, hit);
_PyFrame_SetStackPointer(frame, stack_pointer);
- int result = _PyList_GetItemRefNoLock(seq, it->it_index, &next);
+ int result = _PyList_GetItemRefNoLock((PyListObject *)list_o, PyStackRef_UntagInt(null_or_index), &next);
stack_pointer = _PyFrame_GetStackPointer(frame);
if (result < 0) {
UPDATE_MISS_STATS(FOR_ITER);
@@ -5934,16 +5910,17 @@
JUMP_TO_PREDICTED(FOR_ITER);
}
if (result == 0) {
- it->it_index = -1;
+ null_or_index = PyStackRef_TagInt(-1);
JUMPBY(oparg + 1);
+ stack_pointer[-1] = null_or_index;
DISPATCH();
}
- it->it_index++;
#else
- assert(it->it_index < PyList_GET_SIZE(seq));
- next = PyStackRef_FromPyObjectNew(PyList_GET_ITEM(seq, it->it_index++));
+ next = PyStackRef_FromPyObjectNew(PyList_GET_ITEM(list_o, PyStackRef_UntagInt(null_or_index)));
#endif
+ null_or_index = PyStackRef_IncrementTaggedIntNoOverflow(null_or_index);
}
+ stack_pointer[-1] = null_or_index;
stack_pointer[0] = next;
stack_pointer += 1;
assert(WITHIN_STACK_BOUNDS());
@@ -5966,7 +5943,7 @@
/* Skip 1 cache entry */
// _ITER_CHECK_RANGE
{
- iter = stack_pointer[-1];
+ iter = stack_pointer[-2];
_PyRangeIterObject *r = (_PyRangeIterObject *)PyStackRef_AsPyObjectBorrow(iter);
if (Py_TYPE(r) != &PyRangeIter_Type) {
UPDATE_MISS_STATS(FOR_ITER);
@@ -6029,63 +6006,44 @@
INSTRUCTION_STATS(FOR_ITER_TUPLE);
static_assert(INLINE_CACHE_ENTRIES_FOR_ITER == 1, "incorrect cache size");
_PyStackRef iter;
+ _PyStackRef null_or_index;
_PyStackRef next;
/* Skip 1 cache entry */
// _ITER_CHECK_TUPLE
{
- iter = stack_pointer[-1];
+ null_or_index = stack_pointer[-1];
+ iter = stack_pointer[-2];
PyObject *iter_o = PyStackRef_AsPyObjectBorrow(iter);
- if (Py_TYPE(iter_o) != &PyTupleIter_Type) {
+ if (Py_TYPE(iter_o) != &PyTuple_Type) {
UPDATE_MISS_STATS(FOR_ITER);
assert(_PyOpcode_Deopt[opcode] == (FOR_ITER));
JUMP_TO_PREDICTED(FOR_ITER);
}
- #ifdef Py_GIL_DISABLED
- if (!_PyObject_IsUniquelyReferenced(iter_o)) {
- UPDATE_MISS_STATS(FOR_ITER);
- assert(_PyOpcode_Deopt[opcode] == (FOR_ITER));
- JUMP_TO_PREDICTED(FOR_ITER);
- }
- #endif
+ assert(PyStackRef_IsTaggedInt(null_or_index));
}
// _ITER_JUMP_TUPLE
{
- PyObject *iter_o = PyStackRef_AsPyObjectBorrow(iter);
- (void)iter_o;
- assert(Py_TYPE(iter_o) == &PyTupleIter_Type);
- #ifdef Py_GIL_DISABLED
- assert(_PyObject_IsUniquelyReferenced(iter_o));
- #endif
- _PyTupleIterObject *it = (_PyTupleIterObject *)iter_o;
+ PyObject *tuple_o = PyStackRef_AsPyObjectBorrow(iter);
+ (void)tuple_o;
+ assert(Py_TYPE(tuple_o) == &PyTuple_Type);
STAT_INC(FOR_ITER, hit);
- PyTupleObject *seq = it->it_seq;
- if (seq == NULL || (size_t)it->it_index >= (size_t)PyTuple_GET_SIZE(seq)) {
- #ifndef Py_GIL_DISABLED
- if (seq != NULL) {
- it->it_seq = NULL;
- _PyFrame_SetStackPointer(frame, stack_pointer);
- Py_DECREF(seq);
- stack_pointer = _PyFrame_GetStackPointer(frame);
- }
- #endif
-
+ if ((size_t)PyStackRef_UntagInt(null_or_index) >= (size_t)PyTuple_GET_SIZE(tuple_o)) {
+ null_or_index = PyStackRef_TagInt(-1);
JUMPBY(oparg + 1);
+ stack_pointer[-1] = null_or_index;
DISPATCH();
}
}
// _ITER_NEXT_TUPLE
{
- PyObject *iter_o = PyStackRef_AsPyObjectBorrow(iter);
- _PyTupleIterObject *it = (_PyTupleIterObject *)iter_o;
- assert(Py_TYPE(iter_o) == &PyTupleIter_Type);
- PyTupleObject *seq = it->it_seq;
- #ifdef Py_GIL_DISABLED
- assert(_PyObject_IsUniquelyReferenced(iter_o));
- #endif
- assert(seq);
- assert(it->it_index < PyTuple_GET_SIZE(seq));
- next = PyStackRef_FromPyObjectNew(PyTuple_GET_ITEM(seq, it->it_index++));
+ PyObject *tuple_o = PyStackRef_AsPyObjectBorrow(iter);
+ assert(Py_TYPE(tuple_o) == &PyTuple_Type);
+ uintptr_t i = PyStackRef_UntagInt(null_or_index);
+ assert((size_t)i < (size_t)PyTuple_GET_SIZE(tuple_o));
+ next = PyStackRef_FromPyObjectNew(PyTuple_GET_ITEM(tuple_o, i));
+ null_or_index = PyStackRef_IncrementTaggedIntNoOverflow(null_or_index);
}
+ stack_pointer[-1] = null_or_index;
stack_pointer[0] = next;
stack_pointer += 1;
assert(WITHIN_STACK_BOUNDS());
@@ -6216,25 +6174,37 @@
INSTRUCTION_STATS(GET_ITER);
_PyStackRef iterable;
_PyStackRef iter;
+ _PyStackRef index_or_null;
iterable = stack_pointer[-1];
#ifdef Py_STATS
_PyFrame_SetStackPointer(frame, stack_pointer);
_Py_GatherStats_GetIter(iterable);
stack_pointer = _PyFrame_GetStackPointer(frame);
#endif
- _PyFrame_SetStackPointer(frame, stack_pointer);
- PyObject *iter_o = PyObject_GetIter(PyStackRef_AsPyObjectBorrow(iterable));
- stack_pointer = _PyFrame_GetStackPointer(frame);
- stack_pointer += -1;
- assert(WITHIN_STACK_BOUNDS());
- _PyFrame_SetStackPointer(frame, stack_pointer);
- PyStackRef_CLOSE(iterable);
- stack_pointer = _PyFrame_GetStackPointer(frame);
- if (iter_o == NULL) {
- JUMP_TO_LABEL(error);
+
+ PyTypeObject *tp = PyStackRef_TYPE(iterable);
+ if (tp == &PyTuple_Type || tp == &PyList_Type) {
+ iter = iterable;
+ index_or_null = PyStackRef_TagInt(0);
}
- iter = PyStackRef_FromPyObjectSteal(iter_o);
- stack_pointer[0] = iter;
+ else {
+ _PyFrame_SetStackPointer(frame, stack_pointer);
+ PyObject *iter_o = PyObject_GetIter(PyStackRef_AsPyObjectBorrow(iterable));
+ stack_pointer = _PyFrame_GetStackPointer(frame);
+ stack_pointer += -1;
+ assert(WITHIN_STACK_BOUNDS());
+ _PyFrame_SetStackPointer(frame, stack_pointer);
+ PyStackRef_CLOSE(iterable);
+ stack_pointer = _PyFrame_GetStackPointer(frame);
+ if (iter_o == NULL) {
+ JUMP_TO_LABEL(error);
+ }
+ iter = PyStackRef_FromPyObjectSteal(iter_o);
+ index_or_null = PyStackRef_NULL;
+ stack_pointer += 1;
+ }
+ stack_pointer[-1] = iter;
+ stack_pointer[0] = index_or_null;
stack_pointer += 1;
assert(WITHIN_STACK_BOUNDS());
DISPATCH();
@@ -6999,7 +6969,7 @@
_PyStackRef receiver;
_PyStackRef value;
value = stack_pointer[-1];
- receiver = stack_pointer[-2];
+ receiver = stack_pointer[-3];
if (PyStackRef_GenCheck(receiver)) {
_PyFrame_SetStackPointer(frame, stack_pointer);
int err = monitor_stop_iteration(tstate, frame, this_instr, PyStackRef_AsPyObjectBorrow(value));
@@ -7061,35 +7031,25 @@
next_instr += 2;
INSTRUCTION_STATS(INSTRUMENTED_FOR_ITER);
_PyStackRef iter;
+ _PyStackRef null_or_index;
_PyStackRef next;
/* Skip 1 cache entry */
- iter = stack_pointer[-1];
- PyObject *iter_o = PyStackRef_AsPyObjectBorrow(iter);
+ null_or_index = stack_pointer[-1];
+ iter = stack_pointer[-2];
_PyFrame_SetStackPointer(frame, stack_pointer);
- PyObject *next_o = (*Py_TYPE(iter_o)->tp_iternext)(iter_o);
+ _PyStackRef item = _PyForIter_VirtualIteratorNext(tstate, frame, iter, &null_or_index);
stack_pointer = _PyFrame_GetStackPointer(frame);
- if (next_o != NULL) {
- next = PyStackRef_FromPyObjectSteal(next_o);
- INSTRUMENTED_JUMP(this_instr, next_instr, PY_MONITORING_EVENT_BRANCH_LEFT);
- }
- else {
- if (_PyErr_Occurred(tstate)) {
- _PyFrame_SetStackPointer(frame, stack_pointer);
- int matches = _PyErr_ExceptionMatches(tstate, PyExc_StopIteration);
- stack_pointer = _PyFrame_GetStackPointer(frame);
- if (!matches) {
- JUMP_TO_LABEL(error);
- }
- _PyFrame_SetStackPointer(frame, stack_pointer);
- _PyEval_MonitorRaise(tstate, frame, this_instr);
- _PyErr_Clear(tstate);
- stack_pointer = _PyFrame_GetStackPointer(frame);
+ if (!PyStackRef_IsValid(item)) {
+ if (PyStackRef_IsError(item)) {
+ JUMP_TO_LABEL(error);
}
- assert(next_instr[oparg].op.code == END_FOR ||
- next_instr[oparg].op.code == INSTRUMENTED_END_FOR);
JUMPBY(oparg + 1);
+ stack_pointer[-1] = null_or_index;
DISPATCH();
}
+ next = item;
+ INSTRUMENTED_JUMP(this_instr, next_instr, PY_MONITORING_EVENT_BRANCH_LEFT);
+ stack_pointer[-1] = null_or_index;
stack_pointer[0] = next;
stack_pointer += 1;
assert(WITHIN_STACK_BOUNDS());
@@ -7356,9 +7316,12 @@
next_instr += 1;
INSTRUCTION_STATS(INSTRUMENTED_POP_ITER);
_PyStackRef iter;
- iter = stack_pointer[-1];
+ _PyStackRef index_or_null;
+ index_or_null = stack_pointer[-1];
+ iter = stack_pointer[-2];
+ (void)index_or_null;
INSTRUMENTED_JUMP(prev_instr, this_instr+1, PY_MONITORING_EVENT_BRANCH_RIGHT);
- stack_pointer += -1;
+ stack_pointer += -2;
assert(WITHIN_STACK_BOUNDS());
_PyFrame_SetStackPointer(frame, stack_pointer);
PyStackRef_CLOSE(iter);
@@ -10122,12 +10085,15 @@
frame->instr_ptr = next_instr;
next_instr += 1;
INSTRUCTION_STATS(POP_ITER);
- _PyStackRef value;
- value = stack_pointer[-1];
- stack_pointer += -1;
+ _PyStackRef iter;
+ _PyStackRef index_or_null;
+ index_or_null = stack_pointer[-1];
+ iter = stack_pointer[-2];
+ (void)index_or_null;
+ stack_pointer += -2;
assert(WITHIN_STACK_BOUNDS());
_PyFrame_SetStackPointer(frame, stack_pointer);
- PyStackRef_CLOSE(value);
+ PyStackRef_CLOSE(iter);
stack_pointer = _PyFrame_GetStackPointer(frame);
DISPATCH();
}
@@ -10275,7 +10241,7 @@
stack_pointer += -1;
assert(WITHIN_STACK_BOUNDS());
_PyFrame_SetStackPointer(frame, stack_pointer);
- PyStackRef_CLOSE(value);
+ PyStackRef_XCLOSE(value);
stack_pointer = _PyFrame_GetStackPointer(frame);
DISPATCH();
}
diff --git a/Python/hamt.c b/Python/hamt.c
index f9bbf63961d..906149cc6cd 100644
--- a/Python/hamt.c
+++ b/Python/hamt.c
@@ -1176,7 +1176,7 @@ hamt_node_bitmap_dump(PyHamtNode_Bitmap *node,
}
if (key_or_null == NULL) {
- if (PyUnicodeWriter_WriteUTF8(writer, "NULL:\n", -1) < 0) {
+ if (PyUnicodeWriter_WriteASCII(writer, "NULL:\n", 6) < 0) {
goto error;
}
@@ -1194,7 +1194,7 @@ hamt_node_bitmap_dump(PyHamtNode_Bitmap *node,
}
}
- if (PyUnicodeWriter_WriteUTF8(writer, "\n", 1) < 0) {
+ if (PyUnicodeWriter_WriteASCII(writer, "\n", 1) < 0) {
goto error;
}
}
@@ -1915,7 +1915,7 @@ hamt_node_array_dump(PyHamtNode_Array *node,
goto error;
}
- if (PyUnicodeWriter_WriteUTF8(writer, "\n", 1) < 0) {
+ if (PyUnicodeWriter_WriteASCII(writer, "\n", 1) < 0) {
goto error;
}
}
diff --git a/Python/import.c b/Python/import.c
index e7be1b90751..184dede335d 100644
--- a/Python/import.c
+++ b/Python/import.c
@@ -3369,11 +3369,11 @@ PyObject *
PyImport_GetImporter(PyObject *path)
{
PyThreadState *tstate = _PyThreadState_GET();
- PyObject *path_importer_cache = _PySys_GetRequiredAttrString("path_importer_cache");
+ PyObject *path_importer_cache = PySys_GetAttrString("path_importer_cache");
if (path_importer_cache == NULL) {
return NULL;
}
- PyObject *path_hooks = _PySys_GetRequiredAttrString("path_hooks");
+ PyObject *path_hooks = PySys_GetAttrString("path_hooks");
if (path_hooks == NULL) {
Py_DECREF(path_importer_cache);
return NULL;
@@ -3682,14 +3682,14 @@ import_find_and_load(PyThreadState *tstate, PyObject *abs_name)
PyTime_t t1 = 0, accumulated_copy = accumulated;
PyObject *sys_path, *sys_meta_path, *sys_path_hooks;
- if (_PySys_GetOptionalAttrString("path", &sys_path) < 0) {
+ if (PySys_GetOptionalAttrString("path", &sys_path) < 0) {
return NULL;
}
- if (_PySys_GetOptionalAttrString("meta_path", &sys_meta_path) < 0) {
+ if (PySys_GetOptionalAttrString("meta_path", &sys_meta_path) < 0) {
Py_XDECREF(sys_path);
return NULL;
}
- if (_PySys_GetOptionalAttrString("path_hooks", &sys_path_hooks) < 0) {
+ if (PySys_GetOptionalAttrString("path_hooks", &sys_path_hooks) < 0) {
Py_XDECREF(sys_meta_path);
Py_XDECREF(sys_path);
return NULL;
@@ -3964,8 +3964,10 @@ PyImport_Import(PyObject *module_name)
if (globals != NULL) {
Py_INCREF(globals);
builtins = PyObject_GetItem(globals, &_Py_ID(__builtins__));
- if (builtins == NULL)
+ if (builtins == NULL) {
+ // XXX Fall back to interp->builtins or sys.modules['builtins']?
goto err;
+ }
}
else {
/* No globals -- use standard builtins, and fake globals */
@@ -4127,7 +4129,7 @@ _PyImport_FiniCore(PyInterpreterState *interp)
static int
init_zipimport(PyThreadState *tstate, int verbose)
{
- PyObject *path_hooks = _PySys_GetRequiredAttrString("path_hooks");
+ PyObject *path_hooks = PySys_GetAttrString("path_hooks");
if (path_hooks == NULL) {
return -1;
}
diff --git a/Python/index_pool.c b/Python/index_pool.c
index 007c81a0fc1..520a65938ec 100644
--- a/Python/index_pool.c
+++ b/Python/index_pool.c
@@ -172,6 +172,9 @@ _PyIndexPool_AllocIndex(_PyIndexPool *pool)
else {
index = heap_pop(free_indices);
}
+
+ pool->tlbc_generation++;
+
UNLOCK_POOL(pool);
return index;
}
@@ -180,6 +183,7 @@ void
_PyIndexPool_FreeIndex(_PyIndexPool *pool, int32_t index)
{
LOCK_POOL(pool);
+ pool->tlbc_generation++;
heap_add(&pool->free_indices, index);
UNLOCK_POOL(pool);
}
diff --git a/Python/initconfig.c b/Python/initconfig.c
index 25e30aa648e..71d7cfed5c4 100644
--- a/Python/initconfig.c
+++ b/Python/initconfig.c
@@ -3647,7 +3647,7 @@ _Py_DumpPathConfig(PyThreadState *tstate)
#define DUMP_SYS(NAME) \
do { \
PySys_FormatStderr(" sys.%s = ", #NAME); \
- if (_PySys_GetOptionalAttrString(#NAME, &obj) < 0) { \
+ if (PySys_GetOptionalAttrString(#NAME, &obj) < 0) { \
PyErr_Clear(); \
} \
if (obj != NULL) { \
@@ -3671,7 +3671,7 @@ _Py_DumpPathConfig(PyThreadState *tstate)
#undef DUMP_SYS
PyObject *sys_path;
- (void) _PySys_GetOptionalAttrString("path", &sys_path);
+ (void) PySys_GetOptionalAttrString("path", &sys_path);
if (sys_path != NULL && PyList_Check(sys_path)) {
PySys_WriteStderr(" sys.path = [\n");
Py_ssize_t len = PyList_GET_SIZE(sys_path);
@@ -4294,7 +4294,7 @@ _PyConfig_CreateXOptionsDict(const PyConfig *config)
static int
config_get_sys_write_bytecode(const PyConfig *config, int *value)
{
- PyObject *attr = _PySys_GetRequiredAttrString("dont_write_bytecode");
+ PyObject *attr = PySys_GetAttrString("dont_write_bytecode");
if (attr == NULL) {
return -1;
}
@@ -4315,7 +4315,7 @@ config_get(const PyConfig *config, const PyConfigSpec *spec,
{
if (use_sys) {
if (spec->sys.attr != NULL) {
- return _PySys_GetRequiredAttrString(spec->sys.attr);
+ return PySys_GetAttrString(spec->sys.attr);
}
if (strcmp(spec->name, "write_bytecode") == 0) {
diff --git a/Python/intrinsics.c b/Python/intrinsics.c
index ff44ba0ee64..8ea920e690c 100644
--- a/Python/intrinsics.c
+++ b/Python/intrinsics.c
@@ -9,7 +9,6 @@
#include "pycore_intrinsics.h" // INTRINSIC_PRINT
#include "pycore_pyerrors.h" // _PyErr_SetString()
#include "pycore_runtime.h" // _Py_ID()
-#include "pycore_sysmodule.h" // _PySys_GetRequiredAttr()
#include "pycore_tuple.h" // _PyTuple_FromArray()
#include "pycore_typevarobject.h" // _Py_make_typevar()
#include "pycore_unicodeobject.h" // _PyUnicode_FromASCII()
@@ -27,7 +26,7 @@ no_intrinsic1(PyThreadState* tstate, PyObject *unused)
static PyObject *
print_expr(PyThreadState* Py_UNUSED(ignored), PyObject *value)
{
- PyObject *hook = _PySys_GetRequiredAttr(&_Py_ID(displayhook));
+ PyObject *hook = PySys_GetAttr(&_Py_ID(displayhook));
if (hook == NULL) {
return NULL;
}
diff --git a/Python/lock.c b/Python/lock.c
index 28a12ad1835..b125ad0c9e3 100644
--- a/Python/lock.c
+++ b/Python/lock.c
@@ -119,6 +119,9 @@ _PyMutex_LockTimed(PyMutex *m, PyTime_t timeout, _PyLockFlags flags)
return PY_LOCK_INTR;
}
}
+ else if (ret == Py_PARK_INTR && (flags & _PY_FAIL_IF_INTERRUPTED)) {
+ return PY_LOCK_INTR;
+ }
else if (ret == Py_PARK_TIMEOUT) {
assert(timeout >= 0);
return PY_LOCK_FAILURE;
diff --git a/Python/modsupport.c b/Python/modsupport.c
index 2caf595949d..437ad412027 100644
--- a/Python/modsupport.c
+++ b/Python/modsupport.c
@@ -669,5 +669,5 @@ Py_PACK_FULL_VERSION(int x, int y, int z, int level, int serial)
uint32_t
Py_PACK_VERSION(int x, int y)
{
- return Py_PACK_FULL_VERSION(x, y, 0, 0, 0);
+ return _Py_PACK_VERSION(x, y);
}
diff --git a/Python/optimizer_bytecodes.c b/Python/optimizer_bytecodes.c
index 49c6bfb6c1b..fbf4dfd3db6 100644
--- a/Python/optimizer_bytecodes.c
+++ b/Python/optimizer_bytecodes.c
@@ -840,7 +840,18 @@ dummy_func(void) {
value = sym_new_unknown(ctx);
}
- op(_FOR_ITER_GEN_FRAME, (unused -- unused, gen_frame: _Py_UOpsAbstractFrame*)) {
+ op(_GET_ITER, (iterable -- iter, index_or_null)) {
+ if (sym_matches_type(iterable, &PyTuple_Type) || sym_matches_type(iterable, &PyList_Type)) {
+ iter = iterable;
+ index_or_null = sym_new_not_null(ctx);
+ }
+ else {
+ iter = sym_new_not_null(ctx);
+ index_or_null = sym_new_unknown(ctx);
+ }
+ }
+
+ op(_FOR_ITER_GEN_FRAME, (unused, unused -- unused, unused, gen_frame: _Py_UOpsAbstractFrame*)) {
gen_frame = NULL;
/* We are about to hit the end of the trace */
ctx->done = true;
@@ -914,13 +925,23 @@ dummy_func(void) {
}
}
- op(_ITER_NEXT_RANGE, (iter -- iter, next)) {
+ op(_ITER_CHECK_TUPLE, (iter, null_or_index -- iter, null_or_index)) {
+ if (sym_matches_type(iter, &PyTuple_Type)) {
+ REPLACE_OP(this_instr, _NOP, 0, 0);
+ }
+ sym_set_type(iter, &PyTuple_Type);
+ }
+
+ op(_ITER_NEXT_RANGE, (iter, null_or_index -- iter, null_or_index, next)) {
next = sym_new_type(ctx, &PyLong_Type);
}
op(_CALL_TYPE_1, (unused, unused, arg -- res)) {
- if (sym_has_type(arg)) {
- res = sym_new_const(ctx, (PyObject *)sym_get_type(arg));
+ PyObject* type = (PyObject *)sym_get_type(arg);
+ if (type) {
+ res = sym_new_const(ctx, type);
+ REPLACE_OP(this_instr, _POP_CALL_ONE_LOAD_CONST_INLINE_BORROW, 0,
+ (uintptr_t)type);
}
else {
res = sym_new_not_null(ctx);
@@ -1216,6 +1237,20 @@ dummy_func(void) {
sym_set_const(callable, list_append);
}
+ op(_BINARY_SLICE, (container, start, stop -- res)) {
+ // Slicing a string/list/tuple always returns the same type.
+ PyTypeObject *type = sym_get_type(container);
+ if (type == &PyUnicode_Type ||
+ type == &PyList_Type ||
+ type == &PyTuple_Type)
+ {
+ res = sym_new_type(ctx, type);
+ }
+ else {
+ res = sym_new_not_null(ctx);
+ }
+ }
+
// END BYTECODES //
}
diff --git a/Python/optimizer_cases.c.h b/Python/optimizer_cases.c.h
index bf7ac72d457..b42f47c75ea 100644
--- a/Python/optimizer_cases.c.h
+++ b/Python/optimizer_cases.c.h
@@ -126,6 +126,12 @@
break;
}
+ case _POP_ITER: {
+ stack_pointer += -2;
+ assert(WITHIN_STACK_BOUNDS());
+ break;
+ }
+
case _END_SEND: {
JitOptSymbol *val;
val = sym_new_not_null(ctx);
@@ -562,8 +568,19 @@
}
case _BINARY_SLICE: {
+ JitOptSymbol *container;
JitOptSymbol *res;
- res = sym_new_not_null(ctx);
+ container = stack_pointer[-3];
+ PyTypeObject *type = sym_get_type(container);
+ if (type == &PyUnicode_Type ||
+ type == &PyList_Type ||
+ type == &PyTuple_Type)
+ {
+ res = sym_new_type(ctx, type);
+ }
+ else {
+ res = sym_new_not_null(ctx);
+ }
stack_pointer[-3] = res;
stack_pointer += -2;
assert(WITHIN_STACK_BOUNDS());
@@ -1556,9 +1573,22 @@
}
case _GET_ITER: {
+ JitOptSymbol *iterable;
JitOptSymbol *iter;
- iter = sym_new_not_null(ctx);
+ JitOptSymbol *index_or_null;
+ iterable = stack_pointer[-1];
+ if (sym_matches_type(iterable, &PyTuple_Type) || sym_matches_type(iterable, &PyList_Type)) {
+ iter = iterable;
+ index_or_null = sym_new_not_null(ctx);
+ }
+ else {
+ iter = sym_new_not_null(ctx);
+ index_or_null = sym_new_unknown(ctx);
+ }
stack_pointer[-1] = iter;
+ stack_pointer[0] = index_or_null;
+ stack_pointer += 1;
+ assert(WITHIN_STACK_BOUNDS());
break;
}
@@ -1604,6 +1634,12 @@
}
case _ITER_CHECK_TUPLE: {
+ JitOptSymbol *iter;
+ iter = stack_pointer[-2];
+ if (sym_matches_type(iter, &PyTuple_Type)) {
+ REPLACE_OP(this_instr, _NOP, 0, 0);
+ }
+ sym_set_type(iter, &PyTuple_Type);
break;
}
@@ -2031,8 +2067,11 @@
JitOptSymbol *arg;
JitOptSymbol *res;
arg = stack_pointer[-1];
- if (sym_has_type(arg)) {
- res = sym_new_const(ctx, (PyObject *)sym_get_type(arg));
+ PyObject* type = (PyObject *)sym_get_type(arg);
+ if (type) {
+ res = sym_new_const(ctx, type);
+ REPLACE_OP(this_instr, _POP_CALL_ONE_LOAD_CONST_INLINE_BORROW, 0,
+ (uintptr_t)type);
}
else {
res = sym_new_not_null(ctx);
diff --git a/Python/parking_lot.c b/Python/parking_lot.c
index 8edf4323594..e896dea0271 100644
--- a/Python/parking_lot.c
+++ b/Python/parking_lot.c
@@ -112,17 +112,27 @@ _PySemaphore_PlatformWait(_PySemaphore *sema, PyTime_t timeout)
}
}
- // NOTE: we wait on the sigint event even in non-main threads to match the
- // behavior of the other platforms. Non-main threads will ignore the
- // Py_PARK_INTR result.
- HANDLE sigint_event = _PyOS_SigintEvent();
- HANDLE handles[2] = { sema->platform_sem, sigint_event };
- DWORD count = sigint_event != NULL ? 2 : 1;
+ HANDLE handles[2] = { sema->platform_sem, NULL };
+ HANDLE sigint_event = NULL;
+ DWORD count = 1;
+ if (_Py_IsMainThread()) {
+ // gh-135099: Wait on the SIGINT event only in the main thread. Other
+ // threads would ignore the result anyways, and accessing
+ // `_PyOS_SigintEvent()` from non-main threads may race with
+ // interpreter shutdown, which closes the event handle. Note that
+ // non-main interpreters will ignore the result.
+ sigint_event = _PyOS_SigintEvent();
+ if (sigint_event != NULL) {
+ handles[1] = sigint_event;
+ count = 2;
+ }
+ }
wait = WaitForMultipleObjects(count, handles, FALSE, millis);
if (wait == WAIT_OBJECT_0) {
res = Py_PARK_OK;
}
else if (wait == WAIT_OBJECT_0 + 1) {
+ assert(sigint_event != NULL);
ResetEvent(sigint_event);
res = Py_PARK_INTR;
}
diff --git a/Python/perf_jit_trampoline.c b/Python/perf_jit_trampoline.c
index 1211e0e9f11..2ca18c23593 100644
--- a/Python/perf_jit_trampoline.c
+++ b/Python/perf_jit_trampoline.c
@@ -1,241 +1,354 @@
+/*
+ * Python Perf Trampoline Support - JIT Dump Implementation
+ *
+ * This file implements the perf jitdump API for Python's performance profiling
+ * integration. It allows perf (Linux performance analysis tool) to understand
+ * and profile dynamically generated Python bytecode by creating JIT dump files
+ * that perf can inject into its analysis.
+ *
+ *
+ * IMPORTANT: This file exports specific callback functions that are part of
+ * Python's internal API. Do not modify the function signatures or behavior
+ * of exported functions without coordinating with the Python core team.
+ *
+ * Usually the binary and libraries are mapped in separate region like below:
+ *
+ * address ->
+ * --+---------------------+--//--+---------------------+--
+ * | .text | .data | ... | | .text | .data | ... |
+ * --+---------------------+--//--+---------------------+--
+ * myprog libc.so
+ *
+ * So it'd be easy and straight-forward to find a mapped binary or library from an
+ * address.
+ *
+ * But for JIT code, the code arena only cares about the code section. But the
+ * resulting DSOs (which is generated by perf inject -j) contain ELF headers and
+ * unwind info too. Then it'd generate following address space with synthesized
+ * MMAP events. Let's say it has a sample between address B and C.
+ *
+ * sample
+ * |
+ * address -> A B v C
+ * ---------------------------------------------------------------------------------------------------
+ * /tmp/jitted-PID-0.so | (headers) | .text | unwind info |
+ * /tmp/jitted-PID-1.so | (headers) | .text | unwind info |
+ * /tmp/jitted-PID-2.so | (headers) | .text | unwind info |
+ * ...
+ * ---------------------------------------------------------------------------------------------------
+ *
+ * If it only maps the .text section, it'd find the jitted-PID-1.so but cannot see
+ * the unwind info. If it maps both .text section and unwind sections, the sample
+ * could be mapped to either jitted-PID-0.so or jitted-PID-1.so and it's confusing
+ * which one is right. So to make perf happy we have non-overlapping ranges for each
+ * DSO:
+ *
+ * address ->
+ * -------------------------------------------------------------------------------------------------------
+ * /tmp/jitted-PID-0.so | (headers) | .text | unwind info |
+ * /tmp/jitted-PID-1.so | (headers) | .text | unwind info |
+ * /tmp/jitted-PID-2.so | (headers) | .text | unwind info |
+ * ...
+ * -------------------------------------------------------------------------------------------------------
+ *
+ * As the trampolines are constant, we add a constant padding but in general the padding needs to have the
+ * size of the unwind info rounded to 16 bytes. In general, for our trampolines this is 0x50
+ */
+
+
+
#include "Python.h"
#include "pycore_ceval.h" // _PyPerf_Callbacks
#include "pycore_frame.h"
#include "pycore_interp.h"
#include "pycore_runtime.h" // _PyRuntime
-
#ifdef PY_HAVE_PERF_TRAMPOLINE
-#include <fcntl.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <sys/mman.h> // mmap()
-#include <sys/types.h>
-#include <unistd.h> // sysconf()
-#include <sys/time.h> // gettimeofday()
-#include <sys/syscall.h>
-
-// ----------------------------------
-// Perf jitdump API
-// ----------------------------------
-
-typedef struct {
- FILE* perf_map;
- PyThread_type_lock map_lock;
- void* mapped_buffer;
- size_t mapped_size;
- int code_id;
-} PerfMapJitState;
-
-static PerfMapJitState perf_jit_map_state;
+/* Standard library includes for perf jitdump implementation */
+#include <elf.h> // ELF architecture constants
+#include <fcntl.h> // File control operations
+#include <stdio.h> // Standard I/O operations
+#include <stdlib.h> // Standard library functions
+#include <sys/mman.h> // Memory mapping functions (mmap)
+#include <sys/types.h> // System data types
+#include <unistd.h> // System calls (sysconf, getpid)
+#include <sys/time.h> // Time functions (gettimeofday)
+#include <sys/syscall.h> // System call interface
+
+// =============================================================================
+// CONSTANTS AND CONFIGURATION
+// =============================================================================
/*
-Usually the binary and libraries are mapped in separate region like below:
-
- address ->
- --+---------------------+--//--+---------------------+--
- | .text | .data | ... | | .text | .data | ... |
- --+---------------------+--//--+---------------------+--
- myprog libc.so
-
-So it'd be easy and straight-forward to find a mapped binary or library from an
-address.
-
-But for JIT code, the code arena only cares about the code section. But the
-resulting DSOs (which is generated by perf inject -j) contain ELF headers and
-unwind info too. Then it'd generate following address space with synthesized
-MMAP events. Let's say it has a sample between address B and C.
-
- sample
- |
- address -> A B v C
- ---------------------------------------------------------------------------------------------------
- /tmp/jitted-PID-0.so | (headers) | .text | unwind info |
- /tmp/jitted-PID-1.so | (headers) | .text | unwind info |
- /tmp/jitted-PID-2.so | (headers) | .text | unwind info |
- ...
- ---------------------------------------------------------------------------------------------------
-
-If it only maps the .text section, it'd find the jitted-PID-1.so but cannot see
-the unwind info. If it maps both .text section and unwind sections, the sample
-could be mapped to either jitted-PID-0.so or jitted-PID-1.so and it's confusing
-which one is right. So to make perf happy we have non-overlapping ranges for each
-DSO:
-
- address ->
- -------------------------------------------------------------------------------------------------------
- /tmp/jitted-PID-0.so | (headers) | .text | unwind info |
- /tmp/jitted-PID-1.so | (headers) | .text | unwind info |
- /tmp/jitted-PID-2.so | (headers) | .text | unwind info |
- ...
- -------------------------------------------------------------------------------------------------------
-
-As the trampolines are constant, we add a constant padding but in general the padding needs to have the
-size of the unwind info rounded to 16 bytes. In general, for our trampolines this is 0x50
+ * Memory layout considerations for perf jitdump:
+ *
+ * Perf expects non-overlapping memory regions for each JIT-compiled function.
+ * When perf processes the jitdump file, it creates synthetic DSO (Dynamic
+ * Shared Object) files that contain:
+ * - ELF headers
+ * - .text section (actual machine code)
+ * - Unwind information (for stack traces)
+ *
+ * To ensure proper address space layout, we add padding between code regions.
+ * This prevents address conflicts when perf maps the synthesized DSOs.
+ *
+ * Memory layout example:
+ * /tmp/jitted-PID-0.so: [headers][.text][unwind_info][padding]
+ * /tmp/jitted-PID-1.so: [headers][.text][unwind_info][padding]
+ *
+ * The padding size (0x100) is chosen to accommodate typical unwind info sizes
+ * while maintaining 16-byte alignment requirements.
*/
-
#define PERF_JIT_CODE_PADDING 0x100
-#define trampoline_api _PyRuntime.ceval.perf.trampoline_api
-
-typedef uint64_t uword;
-typedef const char* CodeComments;
-#define Pd "d"
-#define MB (1024 * 1024)
-
-#define EM_386 3
-#define EM_X86_64 62
-#define EM_ARM 40
-#define EM_AARCH64 183
-#define EM_RISCV 243
+/* Convenient access to the global trampoline API state */
+#define trampoline_api _PyRuntime.ceval.perf.trampoline_api
-#define TARGET_ARCH_IA32 0
-#define TARGET_ARCH_X64 0
-#define TARGET_ARCH_ARM 0
-#define TARGET_ARCH_ARM64 0
-#define TARGET_ARCH_RISCV32 0
-#define TARGET_ARCH_RISCV64 0
+/* Type aliases for clarity and portability */
+typedef uint64_t uword; // Word-sized unsigned integer
+typedef const char* CodeComments; // Code comment strings
-#define FLAG_generate_perf_jitdump 0
-#define FLAG_write_protect_code 0
-#define FLAG_write_protect_vm_isolate 0
-#define FLAG_code_comments 0
+/* Memory size constants */
+#define MB (1024 * 1024) // 1 Megabyte for buffer sizing
-#define UNREACHABLE()
+// =============================================================================
+// ARCHITECTURE-SPECIFIC DEFINITIONS
+// =============================================================================
-static uword GetElfMachineArchitecture(void) {
-#if TARGET_ARCH_IA32
- return EM_386;
-#elif TARGET_ARCH_X64
+/*
+ * Returns the ELF machine architecture constant for the current platform.
+ * This is required for the jitdump header to correctly identify the target
+ * architecture for perf processing.
+ *
+ */
+static uint64_t GetElfMachineArchitecture(void) {
+#if defined(__x86_64__) || defined(_M_X64)
return EM_X86_64;
-#elif TARGET_ARCH_ARM
- return EM_ARM;
-#elif TARGET_ARCH_ARM64
+#elif defined(__i386__) || defined(_M_IX86)
+ return EM_386;
+#elif defined(__aarch64__)
return EM_AARCH64;
-#elif TARGET_ARCH_RISCV32 || TARGET_ARCH_RISCV64
+#elif defined(__arm__) || defined(_M_ARM)
+ return EM_ARM;
+#elif defined(__riscv)
return EM_RISCV;
#else
- UNREACHABLE();
+ Py_UNREACHABLE(); // Unsupported architecture - should never reach here
return 0;
#endif
}
+// =============================================================================
+// PERF JITDUMP DATA STRUCTURES
+// =============================================================================
+
+/*
+ * Perf jitdump file format structures
+ *
+ * These structures define the binary format that perf expects for JIT dump files.
+ * The format is documented in the Linux perf tools source code and must match
+ * exactly for proper perf integration.
+ */
+
+/*
+ * Jitdump file header - written once at the beginning of each jitdump file
+ * Contains metadata about the process and jitdump format version
+ */
typedef struct {
- uint32_t magic;
- uint32_t version;
- uint32_t size;
- uint32_t elf_mach_target;
- uint32_t reserved;
- uint32_t process_id;
- uint64_t time_stamp;
- uint64_t flags;
+ uint32_t magic; // Magic number (0x4A695444 = "JiTD")
+ uint32_t version; // Jitdump format version (currently 1)
+ uint32_t size; // Size of this header structure
+ uint32_t elf_mach_target; // Target architecture (from GetElfMachineArchitecture)
+ uint32_t reserved; // Reserved field (must be 0)
+ uint32_t process_id; // Process ID of the JIT compiler
+ uint64_t time_stamp; // Timestamp when jitdump was created
+ uint64_t flags; // Feature flags (currently unused)
} Header;
- enum PerfEvent {
- PerfLoad = 0,
- PerfMove = 1,
- PerfDebugInfo = 2,
- PerfClose = 3,
- PerfUnwindingInfo = 4
+/*
+ * Perf event types supported by the jitdump format
+ * Each event type has a corresponding structure format
+ */
+enum PerfEvent {
+ PerfLoad = 0, // Code load event (new JIT function)
+ PerfMove = 1, // Code move event (function relocated)
+ PerfDebugInfo = 2, // Debug information event
+ PerfClose = 3, // JIT session close event
+ PerfUnwindingInfo = 4 // Stack unwinding information event
};
+/*
+ * Base event structure - common header for all perf events
+ * Every event in the jitdump file starts with this structure
+ */
struct BaseEvent {
- uint32_t event;
- uint32_t size;
- uint64_t time_stamp;
- };
+ uint32_t event; // Event type (from PerfEvent enum)
+ uint32_t size; // Total size of this event including payload
+ uint64_t time_stamp; // Timestamp when event occurred
+};
+/*
+ * Code load event - indicates a new JIT-compiled function is available
+ * This is the most important event type for Python profiling
+ */
typedef struct {
- struct BaseEvent base;
- uint32_t process_id;
- uint32_t thread_id;
- uint64_t vma;
- uint64_t code_address;
- uint64_t code_size;
- uint64_t code_id;
+ struct BaseEvent base; // Common event header
+ uint32_t process_id; // Process ID where code was generated
+ uint32_t thread_id; // Thread ID where code was generated
+ uint64_t vma; // Virtual memory address where code is loaded
+ uint64_t code_address; // Address of the actual machine code
+ uint64_t code_size; // Size of the machine code in bytes
+ uint64_t code_id; // Unique identifier for this code region
+ /* Followed by:
+ * - null-terminated function name string
+ * - raw machine code bytes
+ */
} CodeLoadEvent;
+/*
+ * Code unwinding information event - provides DWARF data for stack traces
+ * Essential for proper stack unwinding during profiling
+ */
typedef struct {
- struct BaseEvent base;
- uint64_t unwind_data_size;
- uint64_t eh_frame_hdr_size;
- uint64_t mapped_size;
+ struct BaseEvent base; // Common event header
+ uint64_t unwind_data_size; // Size of the unwinding data
+ uint64_t eh_frame_hdr_size; // Size of the EH frame header
+ uint64_t mapped_size; // Total mapped size (with padding)
+ /* Followed by:
+ * - EH frame header
+ * - DWARF unwinding information
+ * - Padding to alignment boundary
+ */
} CodeUnwindingInfoEvent;
-static const intptr_t nanoseconds_per_second = 1000000000;
-
-// Dwarf encoding constants
+// =============================================================================
+// GLOBAL STATE MANAGEMENT
+// =============================================================================
-static const uint8_t DwarfUData4 = 0x03;
-static const uint8_t DwarfSData4 = 0x0b;
-static const uint8_t DwarfPcRel = 0x10;
-static const uint8_t DwarfDataRel = 0x30;
-// static uint8_t DwarfOmit = 0xff;
+/*
+ * Global state for the perf jitdump implementation
+ *
+ * This structure maintains all the state needed for generating jitdump files.
+ * It's designed as a singleton since there's typically only one jitdump file
+ * per Python process.
+ */
typedef struct {
- unsigned char version;
- unsigned char eh_frame_ptr_enc;
- unsigned char fde_count_enc;
- unsigned char table_enc;
- int32_t eh_frame_ptr;
- int32_t eh_fde_count;
- int32_t from;
- int32_t to;
-} EhFrameHeader;
+ FILE* perf_map; // File handle for the jitdump file
+ PyThread_type_lock map_lock; // Thread synchronization lock
+ void* mapped_buffer; // Memory-mapped region (signals perf we're active)
+ size_t mapped_size; // Size of the mapped region
+ int code_id; // Counter for unique code region identifiers
+} PerfMapJitState;
+
+/* Global singleton instance */
+static PerfMapJitState perf_jit_map_state;
+
+// =============================================================================
+// TIME UTILITIES
+// =============================================================================
+/* Time conversion constant */
+static const intptr_t nanoseconds_per_second = 1000000000;
+
+/*
+ * Get current monotonic time in nanoseconds
+ *
+ * Monotonic time is preferred for event timestamps because it's not affected
+ * by system clock adjustments. This ensures consistent timing relationships
+ * between events even if the system clock is changed.
+ *
+ * Returns: Current monotonic time in nanoseconds since an arbitrary epoch
+ */
static int64_t get_current_monotonic_ticks(void) {
struct timespec ts;
if (clock_gettime(CLOCK_MONOTONIC, &ts) != 0) {
- UNREACHABLE();
+ Py_UNREACHABLE(); // Should never fail on supported systems
return 0;
}
- // Convert to nanoseconds.
+
+ /* Convert to nanoseconds for maximum precision */
int64_t result = ts.tv_sec;
result *= nanoseconds_per_second;
result += ts.tv_nsec;
return result;
}
+/*
+ * Get current wall clock time in microseconds
+ *
+ * Used for the jitdump file header timestamp. Unlike monotonic time,
+ * this represents actual wall clock time that can be correlated with
+ * other system events.
+ *
+ * Returns: Current time in microseconds since Unix epoch
+ */
static int64_t get_current_time_microseconds(void) {
- // gettimeofday has microsecond resolution.
- struct timeval tv;
- if (gettimeofday(&tv, NULL) < 0) {
- UNREACHABLE();
- return 0;
- }
- return ((int64_t)(tv.tv_sec) * 1000000) + tv.tv_usec;
+ struct timeval tv;
+ if (gettimeofday(&tv, NULL) < 0) {
+ Py_UNREACHABLE(); // Should never fail on supported systems
+ return 0;
+ }
+ return ((int64_t)(tv.tv_sec) * 1000000) + tv.tv_usec;
}
+// =============================================================================
+// UTILITY FUNCTIONS
+// =============================================================================
+/*
+ * Round up a value to the next multiple of a given number
+ *
+ * This is essential for maintaining proper alignment requirements in the
+ * jitdump format. Many structures need to be aligned to specific boundaries
+ * (typically 8 or 16 bytes) for efficient processing by perf.
+ *
+ * Args:
+ * value: The value to round up
+ * multiple: The multiple to round up to
+ *
+ * Returns: The smallest value >= input that is a multiple of 'multiple'
+ */
static size_t round_up(int64_t value, int64_t multiple) {
if (multiple == 0) {
- // Avoid division by zero
- return value;
+ return value; // Avoid division by zero
}
int64_t remainder = value % multiple;
if (remainder == 0) {
- // Value is already a multiple of 'multiple'
- return value;
+ return value; // Already aligned
}
- // Calculate the difference to the next multiple
+ /* Calculate how much to add to reach the next multiple */
int64_t difference = multiple - remainder;
-
- // Add the difference to the value
int64_t rounded_up_value = value + difference;
return rounded_up_value;
}
+// =============================================================================
+// FILE I/O UTILITIES
+// =============================================================================
+/*
+ * Write data to the jitdump file with error handling
+ *
+ * This function ensures that all data is written to the file, handling
+ * partial writes that can occur with large buffers or when the system
+ * is under load.
+ *
+ * Args:
+ * buffer: Pointer to data to write
+ * size: Number of bytes to write
+ */
static void perf_map_jit_write_fully(const void* buffer, size_t size) {
FILE* out_file = perf_jit_map_state.perf_map;
const char* ptr = (const char*)(buffer);
+
while (size > 0) {
const size_t written = fwrite(ptr, 1, size, out_file);
if (written == 0) {
- UNREACHABLE();
+ Py_UNREACHABLE(); // Write failure - should be very rare
break;
}
size -= written;
@@ -243,284 +356,724 @@ static void perf_map_jit_write_fully(const void* buffer, size_t size) {
}
}
+/*
+ * Write the jitdump file header
+ *
+ * The header must be written exactly once at the beginning of each jitdump
+ * file. It provides metadata that perf uses to parse the rest of the file.
+ *
+ * Args:
+ * pid: Process ID to include in the header
+ * out_file: File handle to write to (currently unused, uses global state)
+ */
static void perf_map_jit_write_header(int pid, FILE* out_file) {
Header header;
- header.magic = 0x4A695444;
- header.version = 1;
- header.size = sizeof(Header);
- header.elf_mach_target = GetElfMachineArchitecture();
- header.process_id = pid;
- header.time_stamp = get_current_time_microseconds();
- header.flags = 0;
- perf_map_jit_write_fully(&header, sizeof(header));
-}
-static void* perf_map_jit_init(void) {
- char filename[100];
- int pid = getpid();
- snprintf(filename, sizeof(filename) - 1, "/tmp/jit-%d.dump", pid);
- const int fd = open(filename, O_CREAT | O_TRUNC | O_RDWR, 0666);
- if (fd == -1) {
- return NULL;
- }
+ /* Initialize header with required values */
+ header.magic = 0x4A695444; // "JiTD" magic number
+ header.version = 1; // Current jitdump version
+ header.size = sizeof(Header); // Header size for validation
+ header.elf_mach_target = GetElfMachineArchitecture(); // Target architecture
+ header.process_id = pid; // Process identifier
+ header.time_stamp = get_current_time_microseconds(); // Creation time
+ header.flags = 0; // No special flags currently used
- const long page_size = sysconf(_SC_PAGESIZE); // NOLINT(runtime/int)
- if (page_size == -1) {
- close(fd);
- return NULL;
- }
-
- // The perf jit interface forces us to map the first page of the file
- // to signal that we are using the interface.
- perf_jit_map_state.mapped_buffer = mmap(NULL, page_size, PROT_READ | PROT_EXEC, MAP_PRIVATE, fd, 0);
- if (perf_jit_map_state.mapped_buffer == NULL) {
- close(fd);
- return NULL;
- }
- perf_jit_map_state.mapped_size = page_size;
- perf_jit_map_state.perf_map = fdopen(fd, "w+");
- if (perf_jit_map_state.perf_map == NULL) {
- close(fd);
- return NULL;
- }
- setvbuf(perf_jit_map_state.perf_map, NULL, _IOFBF, 2 * MB);
- perf_map_jit_write_header(pid, perf_jit_map_state.perf_map);
-
- perf_jit_map_state.map_lock = PyThread_allocate_lock();
- if (perf_jit_map_state.map_lock == NULL) {
- fclose(perf_jit_map_state.perf_map);
- return NULL;
- }
- perf_jit_map_state.code_id = 0;
-
- trampoline_api.code_padding = PERF_JIT_CODE_PADDING;
- return &perf_jit_map_state;
+ perf_map_jit_write_fully(&header, sizeof(header));
}
-/* DWARF definitions. */
+// =============================================================================
+// DWARF CONSTANTS AND UTILITIES
+// =============================================================================
+
+/*
+ * DWARF (Debug With Arbitrary Record Formats) constants
+ *
+ * DWARF is a debugging data format used to provide stack unwinding information.
+ * These constants define the various encoding types and opcodes used in
+ * DWARF Call Frame Information (CFI) records.
+ */
+/* DWARF Call Frame Information version */
#define DWRF_CIE_VERSION 1
+/* DWARF CFA (Call Frame Address) opcodes */
enum {
- DWRF_CFA_nop = 0x0,
- DWRF_CFA_offset_extended = 0x5,
- DWRF_CFA_def_cfa = 0xc,
- DWRF_CFA_def_cfa_offset = 0xe,
- DWRF_CFA_offset_extended_sf = 0x11,
- DWRF_CFA_advance_loc = 0x40,
- DWRF_CFA_offset = 0x80
+ DWRF_CFA_nop = 0x0, // No operation
+ DWRF_CFA_offset_extended = 0x5, // Extended offset instruction
+ DWRF_CFA_def_cfa = 0xc, // Define CFA rule
+ DWRF_CFA_def_cfa_offset = 0xe, // Define CFA offset
+ DWRF_CFA_offset_extended_sf = 0x11, // Extended signed offset
+ DWRF_CFA_advance_loc = 0x40, // Advance location counter
+ DWRF_CFA_offset = 0x80 // Simple offset instruction
};
-enum
- {
- DWRF_EH_PE_absptr = 0x00,
- DWRF_EH_PE_omit = 0xff,
-
- /* FDE data encoding. */
- DWRF_EH_PE_uleb128 = 0x01,
- DWRF_EH_PE_udata2 = 0x02,
- DWRF_EH_PE_udata4 = 0x03,
- DWRF_EH_PE_udata8 = 0x04,
- DWRF_EH_PE_sleb128 = 0x09,
- DWRF_EH_PE_sdata2 = 0x0a,
- DWRF_EH_PE_sdata4 = 0x0b,
- DWRF_EH_PE_sdata8 = 0x0c,
- DWRF_EH_PE_signed = 0x08,
-
- /* FDE flags. */
- DWRF_EH_PE_pcrel = 0x10,
- DWRF_EH_PE_textrel = 0x20,
- DWRF_EH_PE_datarel = 0x30,
- DWRF_EH_PE_funcrel = 0x40,
- DWRF_EH_PE_aligned = 0x50,
-
- DWRF_EH_PE_indirect = 0x80
- };
+/* DWARF Exception Handling pointer encodings */
+enum {
+ DWRF_EH_PE_absptr = 0x00, // Absolute pointer
+ DWRF_EH_PE_omit = 0xff, // Omitted value
+
+ /* Data type encodings */
+ DWRF_EH_PE_uleb128 = 0x01, // Unsigned LEB128
+ DWRF_EH_PE_udata2 = 0x02, // Unsigned 2-byte
+ DWRF_EH_PE_udata4 = 0x03, // Unsigned 4-byte
+ DWRF_EH_PE_udata8 = 0x04, // Unsigned 8-byte
+ DWRF_EH_PE_sleb128 = 0x09, // Signed LEB128
+ DWRF_EH_PE_sdata2 = 0x0a, // Signed 2-byte
+ DWRF_EH_PE_sdata4 = 0x0b, // Signed 4-byte
+ DWRF_EH_PE_sdata8 = 0x0c, // Signed 8-byte
+ DWRF_EH_PE_signed = 0x08, // Signed flag
+
+ /* Reference type encodings */
+ DWRF_EH_PE_pcrel = 0x10, // PC-relative
+ DWRF_EH_PE_textrel = 0x20, // Text-relative
+ DWRF_EH_PE_datarel = 0x30, // Data-relative
+ DWRF_EH_PE_funcrel = 0x40, // Function-relative
+ DWRF_EH_PE_aligned = 0x50, // Aligned
+ DWRF_EH_PE_indirect = 0x80 // Indirect
+};
+/* Additional DWARF constants for debug information */
enum { DWRF_TAG_compile_unit = 0x11 };
-
enum { DWRF_children_no = 0, DWRF_children_yes = 1 };
+enum {
+ DWRF_AT_name = 0x03, // Name attribute
+ DWRF_AT_stmt_list = 0x10, // Statement list
+ DWRF_AT_low_pc = 0x11, // Low PC address
+ DWRF_AT_high_pc = 0x12 // High PC address
+};
+enum {
+ DWRF_FORM_addr = 0x01, // Address form
+ DWRF_FORM_data4 = 0x06, // 4-byte data
+ DWRF_FORM_string = 0x08 // String form
+};
-enum { DWRF_AT_name = 0x03, DWRF_AT_stmt_list = 0x10, DWRF_AT_low_pc = 0x11, DWRF_AT_high_pc = 0x12 };
-
-enum { DWRF_FORM_addr = 0x01, DWRF_FORM_data4 = 0x06, DWRF_FORM_string = 0x08 };
-
-enum { DWRF_LNS_extended_op = 0, DWRF_LNS_copy = 1, DWRF_LNS_advance_pc = 2, DWRF_LNS_advance_line = 3 };
+/* Line number program opcodes */
+enum {
+ DWRF_LNS_extended_op = 0, // Extended opcode
+ DWRF_LNS_copy = 1, // Copy operation
+ DWRF_LNS_advance_pc = 2, // Advance program counter
+ DWRF_LNS_advance_line = 3 // Advance line number
+};
-enum { DWRF_LNE_end_sequence = 1, DWRF_LNE_set_address = 2 };
+/* Line number extended opcodes */
+enum {
+ DWRF_LNE_end_sequence = 1, // End of sequence
+ DWRF_LNE_set_address = 2 // Set address
+};
+/*
+ * Architecture-specific DWARF register numbers
+ *
+ * These constants define the register numbering scheme used by DWARF
+ * for each supported architecture. The numbers must match the ABI
+ * specification for proper stack unwinding.
+ */
enum {
#ifdef __x86_64__
- /* Yes, the order is strange, but correct. */
- DWRF_REG_AX,
- DWRF_REG_DX,
- DWRF_REG_CX,
- DWRF_REG_BX,
- DWRF_REG_SI,
- DWRF_REG_DI,
- DWRF_REG_BP,
- DWRF_REG_SP,
- DWRF_REG_8,
- DWRF_REG_9,
- DWRF_REG_10,
- DWRF_REG_11,
- DWRF_REG_12,
- DWRF_REG_13,
- DWRF_REG_14,
- DWRF_REG_15,
- DWRF_REG_RA,
+ /* x86_64 register numbering (note: order is defined by x86_64 ABI) */
+ DWRF_REG_AX, // RAX
+ DWRF_REG_DX, // RDX
+ DWRF_REG_CX, // RCX
+ DWRF_REG_BX, // RBX
+ DWRF_REG_SI, // RSI
+ DWRF_REG_DI, // RDI
+ DWRF_REG_BP, // RBP
+ DWRF_REG_SP, // RSP
+ DWRF_REG_8, // R8
+ DWRF_REG_9, // R9
+ DWRF_REG_10, // R10
+ DWRF_REG_11, // R11
+ DWRF_REG_12, // R12
+ DWRF_REG_13, // R13
+ DWRF_REG_14, // R14
+ DWRF_REG_15, // R15
+ DWRF_REG_RA, // Return address (RIP)
#elif defined(__aarch64__) && defined(__AARCH64EL__) && !defined(__ILP32__)
- DWRF_REG_SP = 31,
- DWRF_REG_RA = 30,
+ /* AArch64 register numbering */
+ DWRF_REG_FP = 29, // Frame Pointer
+ DWRF_REG_RA = 30, // Link register (return address)
+ DWRF_REG_SP = 31, // Stack pointer
#else
# error "Unsupported target architecture"
#endif
};
-typedef struct ELFObjectContext
-{
- uint8_t* p; /* Pointer to next address in obj.space. */
- uint8_t* startp; /* Pointer to start address in obj.space. */
- uint8_t* eh_frame_p; /* Pointer to start address in obj.space. */
- uint32_t code_size; /* Size of machine code. */
+/* DWARF encoding constants used in EH frame headers */
+static const uint8_t DwarfUData4 = 0x03; // Unsigned 4-byte data
+static const uint8_t DwarfSData4 = 0x0b; // Signed 4-byte data
+static const uint8_t DwarfPcRel = 0x10; // PC-relative encoding
+static const uint8_t DwarfDataRel = 0x30; // Data-relative encoding
+
+// =============================================================================
+// ELF OBJECT CONTEXT
+// =============================================================================
+
+/*
+ * Context for building ELF/DWARF structures
+ *
+ * This structure maintains state while constructing DWARF unwind information.
+ * It acts as a simple buffer manager with pointers to track current position
+ * and important landmarks within the buffer.
+ */
+typedef struct ELFObjectContext {
+ uint8_t* p; // Current write position in buffer
+ uint8_t* startp; // Start of buffer (for offset calculations)
+ uint8_t* eh_frame_p; // Start of EH frame data (for relative offsets)
+ uint32_t code_size; // Size of the code being described
} ELFObjectContext;
-/* Append a null-terminated string. */
-static uint32_t
-elfctx_append_string(ELFObjectContext* ctx, const char* str)
-{
+/*
+ * EH Frame Header structure for DWARF unwinding
+ *
+ * This structure provides metadata about the DWARF unwinding information
+ * that follows. It's required by the perf jitdump format to enable proper
+ * stack unwinding during profiling.
+ */
+typedef struct {
+ unsigned char version; // EH frame version (always 1)
+ unsigned char eh_frame_ptr_enc; // Encoding of EH frame pointer
+ unsigned char fde_count_enc; // Encoding of FDE count
+ unsigned char table_enc; // Encoding of table entries
+ int32_t eh_frame_ptr; // Pointer to EH frame data
+ int32_t eh_fde_count; // Number of FDEs (Frame Description Entries)
+ int32_t from; // Start address of code range
+ int32_t to; // End address of code range
+} EhFrameHeader;
+
+// =============================================================================
+// DWARF GENERATION UTILITIES
+// =============================================================================
+
+/*
+ * Append a null-terminated string to the ELF context buffer
+ *
+ * Args:
+ * ctx: ELF object context
+ * str: String to append (must be null-terminated)
+ *
+ * Returns: Offset from start of buffer where string was written
+ */
+static uint32_t elfctx_append_string(ELFObjectContext* ctx, const char* str) {
uint8_t* p = ctx->p;
uint32_t ofs = (uint32_t)(p - ctx->startp);
+
+ /* Copy string including null terminator */
do {
*p++ = (uint8_t)*str;
} while (*str++);
+
ctx->p = p;
return ofs;
}
-/* Append a SLEB128 value. */
-static void
-elfctx_append_sleb128(ELFObjectContext* ctx, int32_t v)
-{
+/*
+ * Append a SLEB128 (Signed Little Endian Base 128) value
+ *
+ * SLEB128 is a variable-length encoding used extensively in DWARF.
+ * It efficiently encodes small numbers in fewer bytes.
+ *
+ * Args:
+ * ctx: ELF object context
+ * v: Signed value to encode
+ */
+static void elfctx_append_sleb128(ELFObjectContext* ctx, int32_t v) {
uint8_t* p = ctx->p;
+
+ /* Encode 7 bits at a time, with continuation bit in MSB */
for (; (uint32_t)(v + 0x40) >= 0x80; v >>= 7) {
- *p++ = (uint8_t)((v & 0x7f) | 0x80);
+ *p++ = (uint8_t)((v & 0x7f) | 0x80); // Set continuation bit
}
- *p++ = (uint8_t)(v & 0x7f);
+ *p++ = (uint8_t)(v & 0x7f); // Final byte without continuation bit
+
ctx->p = p;
}
-/* Append a ULEB128 to buffer. */
-static void
-elfctx_append_uleb128(ELFObjectContext* ctx, uint32_t v)
-{
+/*
+ * Append a ULEB128 (Unsigned Little Endian Base 128) value
+ *
+ * Similar to SLEB128 but for unsigned values.
+ *
+ * Args:
+ * ctx: ELF object context
+ * v: Unsigned value to encode
+ */
+static void elfctx_append_uleb128(ELFObjectContext* ctx, uint32_t v) {
uint8_t* p = ctx->p;
+
+ /* Encode 7 bits at a time, with continuation bit in MSB */
for (; v >= 0x80; v >>= 7) {
- *p++ = (char)((v & 0x7f) | 0x80);
+ *p++ = (char)((v & 0x7f) | 0x80); // Set continuation bit
}
- *p++ = (char)v;
+ *p++ = (char)v; // Final byte without continuation bit
+
ctx->p = p;
}
-/* Shortcuts to generate DWARF structures. */
-#define DWRF_U8(x) (*p++ = (x))
-#define DWRF_I8(x) (*(int8_t*)p = (x), p++)
-#define DWRF_U16(x) (*(uint16_t*)p = (x), p += 2)
-#define DWRF_U32(x) (*(uint32_t*)p = (x), p += 4)
-#define DWRF_ADDR(x) (*(uintptr_t*)p = (x), p += sizeof(uintptr_t))
-#define DWRF_UV(x) (ctx->p = p, elfctx_append_uleb128(ctx, (x)), p = ctx->p)
-#define DWRF_SV(x) (ctx->p = p, elfctx_append_sleb128(ctx, (x)), p = ctx->p)
-#define DWRF_STR(str) (ctx->p = p, elfctx_append_string(ctx, (str)), p = ctx->p)
-#define DWRF_ALIGNNOP(s) \
- while ((uintptr_t)p & ((s)-1)) { \
- *p++ = DWRF_CFA_nop; \
+/*
+ * Macros for generating DWARF structures
+ *
+ * These macros provide a convenient way to write various data types
+ * to the DWARF buffer while automatically advancing the pointer.
+ */
+#define DWRF_U8(x) (*p++ = (x)) // Write unsigned 8-bit
+#define DWRF_I8(x) (*(int8_t*)p = (x), p++) // Write signed 8-bit
+#define DWRF_U16(x) (*(uint16_t*)p = (x), p += 2) // Write unsigned 16-bit
+#define DWRF_U32(x) (*(uint32_t*)p = (x), p += 4) // Write unsigned 32-bit
+#define DWRF_ADDR(x) (*(uintptr_t*)p = (x), p += sizeof(uintptr_t)) // Write address
+#define DWRF_UV(x) (ctx->p = p, elfctx_append_uleb128(ctx, (x)), p = ctx->p) // Write ULEB128
+#define DWRF_SV(x) (ctx->p = p, elfctx_append_sleb128(ctx, (x)), p = ctx->p) // Write SLEB128
+#define DWRF_STR(str) (ctx->p = p, elfctx_append_string(ctx, (str)), p = ctx->p) // Write string
+
+/* Align to specified boundary with NOP instructions */
+#define DWRF_ALIGNNOP(s) \
+ while ((uintptr_t)p & ((s)-1)) { \
+ *p++ = DWRF_CFA_nop; \
}
-#define DWRF_SECTION(name, stmt) \
- { \
- uint32_t* szp_##name = (uint32_t*)p; \
- p += 4; \
- stmt; \
- *szp_##name = (uint32_t)((p - (uint8_t*)szp_##name) - 4); \
+
+/* Write a DWARF section with automatic size calculation */
+#define DWRF_SECTION(name, stmt) \
+ { \
+ uint32_t* szp_##name = (uint32_t*)p; \
+ p += 4; \
+ stmt; \
+ *szp_##name = (uint32_t)((p - (uint8_t*)szp_##name) - 4); \
}
-/* Initialize .eh_frame section. */
-static void
-elf_init_ehframe(ELFObjectContext* ctx)
-{
+// =============================================================================
+// DWARF EH FRAME GENERATION
+// =============================================================================
+
+/*
+ * Initialize DWARF .eh_frame section for a code region
+ *
+ * The .eh_frame section contains Call Frame Information (CFI) that describes
+ * how to unwind the stack at any point in the code. This is essential for
+ * proper profiling as it allows perf to generate accurate call graphs.
+ *
+ * The function generates two main components:
+ * 1. CIE (Common Information Entry) - describes calling conventions
+ * 2. FDE (Frame Description Entry) - describes specific function unwinding
+ *
+ * Args:
+ * ctx: ELF object context containing code size and buffer pointers
+ */
+static void elf_init_ehframe(ELFObjectContext* ctx) {
uint8_t* p = ctx->p;
- uint8_t* framep = p;
-
- /* Emit DWARF EH CIE. */
- DWRF_SECTION(CIE, DWRF_U32(0); /* Offset to CIE itself. */
- DWRF_U8(DWRF_CIE_VERSION);
- DWRF_STR("zR"); /* Augmentation. */
- DWRF_UV(1); /* Code alignment factor. */
- DWRF_SV(-(int64_t)sizeof(uintptr_t)); /* Data alignment factor. */
- DWRF_U8(DWRF_REG_RA); /* Return address register. */
- DWRF_UV(1);
- DWRF_U8(DWRF_EH_PE_pcrel | DWRF_EH_PE_sdata4); /* Augmentation data. */
- DWRF_U8(DWRF_CFA_def_cfa); DWRF_UV(DWRF_REG_SP); DWRF_UV(sizeof(uintptr_t));
- DWRF_U8(DWRF_CFA_offset|DWRF_REG_RA); DWRF_UV(1);
- DWRF_ALIGNNOP(sizeof(uintptr_t));
+ uint8_t* framep = p; // Remember start of frame data
+
+ /*
+ * DWARF Unwind Table for Trampoline Function
+ *
+ * This section defines DWARF Call Frame Information (CFI) using encoded macros
+ * like `DWRF_U8`, `DWRF_UV`, and `DWRF_SECTION` to describe how the trampoline function
+ * preserves and restores registers. This is used by profiling tools (e.g., `perf`)
+ * and debuggers for stack unwinding in JIT-compiled code.
+ *
+ * -------------------------------------------------
+ * TO REGENERATE THIS TABLE FROM GCC OBJECTS:
+ * -------------------------------------------------
+ *
+ * 1. Create a trampoline source file (e.g., `trampoline.c`):
+ *
+ * #include <Python.h>
+ * typedef PyObject* (*py_evaluator)(void*, void*, int);
+ * PyObject* trampoline(void *ts, void *f, int throwflag, py_evaluator evaluator) {
+ * return evaluator(ts, f, throwflag);
+ * }
+ *
+ * 2. Compile to an object file with frame pointer preservation:
+ *
+ * gcc trampoline.c -I. -I./Include -O2 -fno-omit-frame-pointer -mno-omit-leaf-frame-pointer -c
+ *
+ * 3. Extract DWARF unwind info from the object file:
+ *
+ * readelf -w trampoline.o
+ *
+ * Example output from `.eh_frame`:
+ *
+ * 00000000 CIE
+ * Version: 1
+ * Augmentation: "zR"
+ * Code alignment factor: 4
+ * Data alignment factor: -8
+ * Return address column: 30
+ * DW_CFA_def_cfa: r31 (sp) ofs 0
+ *
+ * 00000014 FDE cie=00000000 pc=0..14
+ * DW_CFA_advance_loc: 4
+ * DW_CFA_def_cfa_offset: 16
+ * DW_CFA_offset: r29 at cfa-16
+ * DW_CFA_offset: r30 at cfa-8
+ * DW_CFA_advance_loc: 12
+ * DW_CFA_restore: r30
+ * DW_CFA_restore: r29
+ * DW_CFA_def_cfa_offset: 0
+ *
+ * -- These values can be verified by comparing with `readelf -w` or `llvm-dwarfdump --eh-frame`.
+ *
+ * ----------------------------------
+ * HOW TO TRANSLATE TO DWRF_* MACROS:
+ * ----------------------------------
+ *
+ * After compiling your trampoline with:
+ *
+ * gcc trampoline.c -I. -I./Include -O2 -fno-omit-frame-pointer -mno-omit-leaf-frame-pointer -c
+ *
+ * run:
+ *
+ * readelf -w trampoline.o
+ *
+ * to inspect the generated `.eh_frame` data. You will see two main components:
+ *
+ * 1. A CIE (Common Information Entry): shared configuration used by all FDEs.
+ * 2. An FDE (Frame Description Entry): function-specific unwind instructions.
+ *
+ * ---------------------
+ * Translating the CIE:
+ * ---------------------
+ * From `readelf -w`, you might see:
+ *
+ * 00000000 0000000000000010 00000000 CIE
+ * Version: 1
+ * Augmentation: "zR"
+ * Code alignment factor: 4
+ * Data alignment factor: -8
+ * Return address column: 30
+ * Augmentation data: 1b
+ * DW_CFA_def_cfa: r31 (sp) ofs 0
+ *
+ * Map this to:
+ *
+ * DWRF_SECTION(CIE,
+ * DWRF_U32(0); // CIE ID (always 0 for CIEs)
+ * DWRF_U8(DWRF_CIE_VERSION); // Version: 1
+ * DWRF_STR("zR"); // Augmentation string "zR"
+ * DWRF_UV(4); // Code alignment factor = 4
+ * DWRF_SV(-8); // Data alignment factor = -8
+ * DWRF_U8(DWRF_REG_RA); // Return address register (e.g., x30 = 30)
+ * DWRF_UV(1); // Augmentation data length = 1
+ * DWRF_U8(DWRF_EH_PE_pcrel | DWRF_EH_PE_sdata4); // Encoding for FDE pointers
+ *
+ * DWRF_U8(DWRF_CFA_def_cfa); // DW_CFA_def_cfa
+ * DWRF_UV(DWRF_REG_SP); // Register: SP (r31)
+ * DWRF_UV(0); // Offset = 0
+ *
+ * DWRF_ALIGNNOP(sizeof(uintptr_t)); // Align to pointer size boundary
+ * )
+ *
+ * Notes:
+ * - Use `DWRF_UV` for unsigned LEB128, `DWRF_SV` for signed LEB128.
+ * - `DWRF_REG_RA` and `DWRF_REG_SP` are architecture-defined constants.
+ *
+ * ---------------------
+ * Translating the FDE:
+ * ---------------------
+ * From `readelf -w`:
+ *
+ * 00000014 0000000000000020 00000018 FDE cie=00000000 pc=0000000000000000..0000000000000014
+ * DW_CFA_advance_loc: 4
+ * DW_CFA_def_cfa_offset: 16
+ * DW_CFA_offset: r29 at cfa-16
+ * DW_CFA_offset: r30 at cfa-8
+ * DW_CFA_advance_loc: 12
+ * DW_CFA_restore: r30
+ * DW_CFA_restore: r29
+ * DW_CFA_def_cfa_offset: 0
+ *
+ * Map the FDE header and instructions to:
+ *
+ * DWRF_SECTION(FDE,
+ * DWRF_U32((uint32_t)(p - framep)); // Offset to CIE (relative from here)
+ * DWRF_U32(-0x30); // Initial PC-relative location of the code
+ * DWRF_U32(ctx->code_size); // Code range covered by this FDE
+ * DWRF_U8(0); // Augmentation data length (none)
+ *
+ * DWRF_U8(DWRF_CFA_advance_loc | 1); // Advance location by 1 unit (1 * 4 = 4 bytes)
+ * DWRF_U8(DWRF_CFA_def_cfa_offset); // CFA = SP + 16
+ * DWRF_UV(16);
+ *
+ * DWRF_U8(DWRF_CFA_offset | DWRF_REG_FP); // Save x29 (frame pointer)
+ * DWRF_UV(2); // At offset 2 * 8 = 16 bytes
+ *
+ * DWRF_U8(DWRF_CFA_offset | DWRF_REG_RA); // Save x30 (return address)
+ * DWRF_UV(1); // At offset 1 * 8 = 8 bytes
+ *
+ * DWRF_U8(DWRF_CFA_advance_loc | 3); // Advance location by 3 units (3 * 4 = 12 bytes)
+ *
+ * DWRF_U8(DWRF_CFA_offset | DWRF_REG_RA); // Restore x30
+ * DWRF_U8(DWRF_CFA_offset | DWRF_REG_FP); // Restore x29
+ *
+ * DWRF_U8(DWRF_CFA_def_cfa_offset); // CFA = SP
+ * DWRF_UV(0);
+ * )
+ *
+ * To regenerate:
+ * 1. Get the `code alignment factor`, `data alignment factor`, and `RA column` from the CIE.
+ * 2. Note the range of the function from the FDE's `pc=...` line and map it to the JIT code as
+ * the code is in a different address space every time.
+ * 3. For each `DW_CFA_*` entry, use the corresponding `DWRF_*` macro:
+ * - `DW_CFA_def_cfa_offset` → DWRF_U8(DWRF_CFA_def_cfa_offset), DWRF_UV(value)
+ * - `DW_CFA_offset: rX` → DWRF_U8(DWRF_CFA_offset | reg), DWRF_UV(offset)
+ * - `DW_CFA_restore: rX` → DWRF_U8(DWRF_CFA_offset | reg) // restore is same as reusing offset
+ * - `DW_CFA_advance_loc: N` → DWRF_U8(DWRF_CFA_advance_loc | (N / code_alignment_factor))
+ * 4. Use `DWRF_REG_FP`, `DWRF_REG_RA`, etc., for register numbers.
+ * 5. Use `sizeof(uintptr_t)` (typically 8) for pointer size calculations and alignment.
+ */
+
+ /*
+ * Emit DWARF EH CIE (Common Information Entry)
+ *
+ * The CIE describes the calling conventions and basic unwinding rules
+ * that apply to all functions in this compilation unit.
+ */
+ DWRF_SECTION(CIE,
+ DWRF_U32(0); // CIE ID (0 indicates this is a CIE)
+ DWRF_U8(DWRF_CIE_VERSION); // CIE version (1)
+ DWRF_STR("zR"); // Augmentation string ("zR" = has LSDA)
+ DWRF_UV(1); // Code alignment factor
+ DWRF_SV(-(int64_t)sizeof(uintptr_t)); // Data alignment factor (negative)
+ DWRF_U8(DWRF_REG_RA); // Return address register number
+ DWRF_UV(1); // Augmentation data length
+ DWRF_U8(DWRF_EH_PE_pcrel | DWRF_EH_PE_sdata4); // FDE pointer encoding
+
+ /* Initial CFI instructions - describe default calling convention */
+ DWRF_U8(DWRF_CFA_def_cfa); // Define CFA (Call Frame Address)
+ DWRF_UV(DWRF_REG_SP); // CFA = SP register
+ DWRF_UV(sizeof(uintptr_t)); // CFA = SP + pointer_size
+ DWRF_U8(DWRF_CFA_offset|DWRF_REG_RA); // Return address is saved
+ DWRF_UV(1); // At offset 1 from CFA
+
+ DWRF_ALIGNNOP(sizeof(uintptr_t)); // Align to pointer boundary
)
- ctx->eh_frame_p = p;
-
- /* Emit DWARF EH FDE. */
- DWRF_SECTION(FDE, DWRF_U32((uint32_t)(p - framep)); /* Offset to CIE. */
- DWRF_U32(-0x30); /* Machine code offset relative to .text. */
- DWRF_U32(ctx->code_size); /* Machine code length. */
- DWRF_U8(0); /* Augmentation data. */
- /* Registers saved in CFRAME. */
+ ctx->eh_frame_p = p; // Remember start of FDE data
+
+ /*
+ * Emit DWARF EH FDE (Frame Description Entry)
+ *
+ * The FDE describes unwinding information specific to this function.
+ * It references the CIE and provides function-specific CFI instructions.
+ */
+ DWRF_SECTION(FDE,
+ DWRF_U32((uint32_t)(p - framep)); // Offset to CIE (backwards reference)
+ DWRF_U32(-0x30); // Machine code offset relative to .text
+ DWRF_U32(ctx->code_size); // Address range covered by this FDE (code lenght)
+ DWRF_U8(0); // Augmentation data length (none)
+
+ /*
+ * Architecture-specific CFI instructions
+ *
+ * These instructions describe how registers are saved and restored
+ * during function calls. Each architecture has different calling
+ * conventions and register usage patterns.
+ */
#ifdef __x86_64__
- DWRF_U8(DWRF_CFA_advance_loc | 4);
- DWRF_U8(DWRF_CFA_def_cfa_offset); DWRF_UV(16);
- DWRF_U8(DWRF_CFA_advance_loc | 6);
- DWRF_U8(DWRF_CFA_def_cfa_offset); DWRF_UV(8);
- /* Extra registers saved for JIT-compiled code. */
+ /* x86_64 calling convention unwinding rules */
+# if defined(__CET__) && (__CET__ & 1)
+ DWRF_U8(DWRF_CFA_advance_loc | 8); // Advance location by 8 bytes when CET protection is enabled
+# else
+ DWRF_U8(DWRF_CFA_advance_loc | 4); // Advance location by 4 bytes
+# endif
+ DWRF_U8(DWRF_CFA_def_cfa_offset); // Redefine CFA offset
+ DWRF_UV(16); // New offset: SP + 16
+ DWRF_U8(DWRF_CFA_advance_loc | 6); // Advance location by 6 bytes
+ DWRF_U8(DWRF_CFA_def_cfa_offset); // Redefine CFA offset
+ DWRF_UV(8); // New offset: SP + 8
#elif defined(__aarch64__) && defined(__AARCH64EL__) && !defined(__ILP32__)
- DWRF_U8(DWRF_CFA_advance_loc | 1);
- DWRF_U8(DWRF_CFA_def_cfa_offset); DWRF_UV(16);
- DWRF_U8(DWRF_CFA_offset | 29); DWRF_UV(2);
- DWRF_U8(DWRF_CFA_offset | 30); DWRF_UV(1);
- DWRF_U8(DWRF_CFA_advance_loc | 3);
- DWRF_U8(DWRF_CFA_offset | -(64 - 29));
- DWRF_U8(DWRF_CFA_offset | -(64 - 30));
- DWRF_U8(DWRF_CFA_def_cfa_offset);
- DWRF_UV(0);
+ /* AArch64 calling convention unwinding rules */
+ DWRF_U8(DWRF_CFA_advance_loc | 1); // Advance location by 1 instruction (stp x29, x30)
+ DWRF_U8(DWRF_CFA_def_cfa_offset); // Redefine CFA offset
+ DWRF_UV(16); // CFA = SP + 16 (stack pointer after push)
+ DWRF_U8(DWRF_CFA_offset | DWRF_REG_FP); // Frame pointer (x29) saved
+ DWRF_UV(2); // At offset 2 from CFA (2 * 8 = 16 bytes)
+ DWRF_U8(DWRF_CFA_offset | DWRF_REG_RA); // Link register (x30) saved
+ DWRF_UV(1); // At offset 1 from CFA (1 * 8 = 8 bytes)
+ DWRF_U8(DWRF_CFA_advance_loc | 3); // Advance by 3 instructions (mov x16, x3; mov x29, sp; ldp...)
+ DWRF_U8(DWRF_CFA_offset | DWRF_REG_FP); // Restore frame pointer (x29)
+ DWRF_U8(DWRF_CFA_offset | DWRF_REG_RA); // Restore link register (x30)
+ DWRF_U8(DWRF_CFA_def_cfa_offset); // Final CFA adjustment
+ DWRF_UV(0); // CFA = SP + 0 (stack restored)
+
#else
# error "Unsupported target architecture"
#endif
- DWRF_ALIGNNOP(sizeof(uintptr_t));)
- ctx->p = p;
+ DWRF_ALIGNNOP(sizeof(uintptr_t)); // Align to pointer boundary
+ )
+
+ ctx->p = p; // Update context pointer to end of generated data
+}
+
+// =============================================================================
+// JITDUMP INITIALIZATION
+// =============================================================================
+
+/*
+ * Initialize the perf jitdump interface
+ *
+ * This function sets up everything needed to generate jitdump files:
+ * 1. Creates the jitdump file with a unique name
+ * 2. Maps the first page to signal perf that we're using the interface
+ * 3. Writes the jitdump header
+ * 4. Initializes synchronization primitives
+ *
+ * The memory mapping is crucial - perf detects jitdump files by scanning
+ * for processes that have mapped files matching the pattern /tmp/jit-*.dump
+ *
+ * Returns: Pointer to initialized state, or NULL on failure
+ */
+static void* perf_map_jit_init(void) {
+ char filename[100];
+ int pid = getpid();
+
+ /* Create unique filename based on process ID */
+ snprintf(filename, sizeof(filename) - 1, "/tmp/jit-%d.dump", pid);
+
+ /* Create/open the jitdump file with appropriate permissions */
+ const int fd = open(filename, O_CREAT | O_TRUNC | O_RDWR, 0666);
+ if (fd == -1) {
+ return NULL; // Failed to create file
+ }
+
+ /* Get system page size for memory mapping */
+ const long page_size = sysconf(_SC_PAGESIZE);
+ if (page_size == -1) {
+ close(fd);
+ return NULL; // Failed to get page size
+ }
+
+ /*
+ * Map the first page of the jitdump file
+ *
+ * This memory mapping serves as a signal to perf that this process
+ * is generating JIT code. Perf scans /proc/.../maps looking for mapped
+ * files that match the jitdump naming pattern.
+ *
+ * The mapping must be PROT_READ | PROT_EXEC to be detected by perf.
+ */
+ perf_jit_map_state.mapped_buffer = mmap(
+ NULL, // Let kernel choose address
+ page_size, // Map one page
+ PROT_READ | PROT_EXEC, // Read and execute permissions (required by perf)
+ MAP_PRIVATE, // Private mapping
+ fd, // File descriptor
+ 0 // Offset 0 (first page)
+ );
+
+ if (perf_jit_map_state.mapped_buffer == NULL) {
+ close(fd);
+ return NULL; // Memory mapping failed
+ }
+
+ perf_jit_map_state.mapped_size = page_size;
+
+ /* Convert file descriptor to FILE* for easier I/O operations */
+ perf_jit_map_state.perf_map = fdopen(fd, "w+");
+ if (perf_jit_map_state.perf_map == NULL) {
+ close(fd);
+ return NULL; // Failed to create FILE*
+ }
+
+ /*
+ * Set up file buffering for better performance
+ *
+ * We use a large buffer (2MB) because jitdump files can be written
+ * frequently during program execution. Buffering reduces system call
+ * overhead and improves overall performance.
+ */
+ setvbuf(perf_jit_map_state.perf_map, NULL, _IOFBF, 2 * MB);
+
+ /* Write the jitdump file header */
+ perf_map_jit_write_header(pid, perf_jit_map_state.perf_map);
+
+ /*
+ * Initialize thread synchronization lock
+ *
+ * Multiple threads may attempt to write to the jitdump file
+ * simultaneously. This lock ensures thread-safe access to the
+ * global jitdump state.
+ */
+ perf_jit_map_state.map_lock = PyThread_allocate_lock();
+ if (perf_jit_map_state.map_lock == NULL) {
+ fclose(perf_jit_map_state.perf_map);
+ return NULL; // Failed to create lock
+ }
+
+ /* Initialize code ID counter */
+ perf_jit_map_state.code_id = 0;
+
+ /* Configure trampoline API with padding information */
+ trampoline_api.code_padding = PERF_JIT_CODE_PADDING;
+
+ return &perf_jit_map_state;
}
+// =============================================================================
+// MAIN JITDUMP ENTRY WRITING
+// =============================================================================
+
+/*
+ * Write a complete jitdump entry for a Python function
+ *
+ * This is the main function called by Python's trampoline system whenever
+ * a new piece of JIT-compiled code needs to be recorded. It writes both
+ * the unwinding information and the code load event to the jitdump file.
+ *
+ * The function performs these steps:
+ * 1. Initialize jitdump system if not already done
+ * 2. Extract function name and filename from Python code object
+ * 3. Generate DWARF unwinding information
+ * 4. Write unwinding info event to jitdump file
+ * 5. Write code load event to jitdump file
+ *
+ * Args:
+ * state: Jitdump state (currently unused, uses global state)
+ * code_addr: Address where the compiled code resides
+ * code_size: Size of the compiled code in bytes
+ * co: Python code object containing metadata
+ *
+ * IMPORTANT: This function signature is part of Python's internal API
+ * and must not be changed without coordinating with core Python development.
+ */
static void perf_map_jit_write_entry(void *state, const void *code_addr,
- unsigned int code_size, PyCodeObject *co)
+ unsigned int code_size, PyCodeObject *co)
{
-
+ /* Initialize jitdump system on first use */
if (perf_jit_map_state.perf_map == NULL) {
void* ret = perf_map_jit_init();
if(ret == NULL){
- return;
+ return; // Initialization failed, silently abort
}
}
+ /*
+ * Extract function information from Python code object
+ *
+ * We create a human-readable function name by combining the qualified
+ * name (includes class/module context) with the filename. This helps
+ * developers identify functions in perf reports.
+ */
const char *entry = "";
if (co->co_qualname != NULL) {
entry = PyUnicode_AsUTF8(co->co_qualname);
}
+
const char *filename = "";
if (co->co_filename != NULL) {
filename = PyUnicode_AsUTF8(co->co_filename);
}
-
+ /*
+ * Create formatted function name for perf display
+ *
+ * Format: "py::<function_name>:<filename>"
+ * The "py::" prefix helps identify Python functions in mixed-language
+ * profiles (e.g., when profiling C extensions alongside Python code).
+ */
size_t perf_map_entry_size = snprintf(NULL, 0, "py::%s:%s", entry, filename) + 1;
char* perf_map_entry = (char*) PyMem_RawMalloc(perf_map_entry_size);
if (perf_map_entry == NULL) {
- return;
+ return; // Memory allocation failed
}
snprintf(perf_map_entry, perf_map_entry_size, "py::%s:%s", entry, filename);
@@ -528,90 +1081,185 @@ static void perf_map_jit_write_entry(void *state, const void *code_addr,
uword base = (uword)code_addr;
uword size = code_size;
- // Write the code unwinding info event.
-
- // Create unwinding information (eh frame)
+ /*
+ * Generate DWARF unwinding information
+ *
+ * DWARF data is essential for proper stack unwinding during profiling.
+ * Without it, perf cannot generate accurate call graphs, especially
+ * in optimized code where frame pointers may be omitted.
+ */
ELFObjectContext ctx;
- char buffer[1024];
+ char buffer[1024]; // Buffer for DWARF data (1KB should be sufficient)
ctx.code_size = code_size;
ctx.startp = ctx.p = (uint8_t*)buffer;
+
+ /* Generate EH frame (Exception Handling frame) data */
elf_init_ehframe(&ctx);
int eh_frame_size = ctx.p - ctx.startp;
- // Populate the unwind info event for perf
+ /*
+ * Write Code Unwinding Information Event
+ *
+ * This event must be written before the code load event to ensure
+ * perf has the unwinding information available when it processes
+ * the code region.
+ */
CodeUnwindingInfoEvent ev2;
ev2.base.event = PerfUnwindingInfo;
ev2.base.time_stamp = get_current_monotonic_ticks();
ev2.unwind_data_size = sizeof(EhFrameHeader) + eh_frame_size;
- // Ensure we have enough space between DSOs when perf maps them
+
+ /* Verify we don't exceed our padding budget */
assert(ev2.unwind_data_size <= PERF_JIT_CODE_PADDING);
+
ev2.eh_frame_hdr_size = sizeof(EhFrameHeader);
- ev2.mapped_size = round_up(ev2.unwind_data_size, 16);
+ ev2.mapped_size = round_up(ev2.unwind_data_size, 16); // 16-byte alignment
+
+ /* Calculate total event size with padding */
int content_size = sizeof(ev2) + sizeof(EhFrameHeader) + eh_frame_size;
- int padding_size = round_up(content_size, 8) - content_size;
+ int padding_size = round_up(content_size, 8) - content_size; // 8-byte align
ev2.base.size = content_size + padding_size;
- perf_map_jit_write_fully(&ev2, sizeof(ev2));
+ /* Write the unwinding info event header */
+ perf_map_jit_write_fully(&ev2, sizeof(ev2));
- // Populate the eh Frame header
+ /*
+ * Write EH Frame Header
+ *
+ * The EH frame header provides metadata about the DWARF unwinding
+ * information that follows. It includes pointers and counts that
+ * help perf navigate the unwinding data efficiently.
+ */
EhFrameHeader f;
f.version = 1;
- f.eh_frame_ptr_enc = DwarfSData4 | DwarfPcRel;
- f.fde_count_enc = DwarfUData4;
- f.table_enc = DwarfSData4 | DwarfDataRel;
+ f.eh_frame_ptr_enc = DwarfSData4 | DwarfPcRel; // PC-relative signed 4-byte
+ f.fde_count_enc = DwarfUData4; // Unsigned 4-byte count
+ f.table_enc = DwarfSData4 | DwarfDataRel; // Data-relative signed 4-byte
+
+ /* Calculate relative offsets for EH frame navigation */
f.eh_frame_ptr = -(eh_frame_size + 4 * sizeof(unsigned char));
- f.eh_fde_count = 1;
+ f.eh_fde_count = 1; // We generate exactly one FDE per function
f.from = -(round_up(code_size, 8) + eh_frame_size);
+
int cie_size = ctx.eh_frame_p - ctx.startp;
f.to = -(eh_frame_size - cie_size);
+ /* Write EH frame data and header */
perf_map_jit_write_fully(ctx.startp, eh_frame_size);
perf_map_jit_write_fully(&f, sizeof(f));
+ /* Write padding to maintain alignment */
char padding_bytes[] = "\0\0\0\0\0\0\0\0";
perf_map_jit_write_fully(&padding_bytes, padding_size);
- // Write the code load event.
+ /*
+ * Write Code Load Event
+ *
+ * This event tells perf about the new code region. It includes:
+ * - Memory addresses and sizes
+ * - Process and thread identification
+ * - Function name for symbol resolution
+ * - The actual machine code bytes
+ */
CodeLoadEvent ev;
ev.base.event = PerfLoad;
ev.base.size = sizeof(ev) + (name_length+1) + size;
ev.base.time_stamp = get_current_monotonic_ticks();
ev.process_id = getpid();
- ev.thread_id = syscall(SYS_gettid);
- ev.vma = base;
- ev.code_address = base;
+ ev.thread_id = syscall(SYS_gettid); // Get thread ID via system call
+ ev.vma = base; // Virtual memory address
+ ev.code_address = base; // Same as VMA for our use case
ev.code_size = size;
+
+ /* Assign unique code ID and increment counter */
perf_jit_map_state.code_id += 1;
ev.code_id = perf_jit_map_state.code_id;
+ /* Write code load event and associated data */
perf_map_jit_write_fully(&ev, sizeof(ev));
- perf_map_jit_write_fully(perf_map_entry, name_length+1);
- perf_map_jit_write_fully((void*)(base), size);
- return;
+ perf_map_jit_write_fully(perf_map_entry, name_length+1); // Include null terminator
+ perf_map_jit_write_fully((void*)(base), size); // Copy actual machine code
+
+ /* Clean up allocated memory */
+ PyMem_RawFree(perf_map_entry);
}
+// =============================================================================
+// CLEANUP AND FINALIZATION
+// =============================================================================
+
+/*
+ * Finalize and cleanup the perf jitdump system
+ *
+ * This function is called when Python is shutting down or when the
+ * perf trampoline system is being disabled. It ensures all resources
+ * are properly released and all buffered data is flushed to disk.
+ *
+ * Args:
+ * state: Jitdump state (currently unused, uses global state)
+ *
+ * Returns: 0 on success
+ *
+ * IMPORTANT: This function signature is part of Python's internal API
+ * and must not be changed without coordinating with core Python development.
+ */
static int perf_map_jit_fini(void* state) {
+ /*
+ * Close jitdump file with proper synchronization
+ *
+ * We need to acquire the lock to ensure no other threads are
+ * writing to the file when we close it. This prevents corruption
+ * and ensures all data is properly flushed.
+ */
if (perf_jit_map_state.perf_map != NULL) {
- // close the file
PyThread_acquire_lock(perf_jit_map_state.map_lock, 1);
- fclose(perf_jit_map_state.perf_map);
+ fclose(perf_jit_map_state.perf_map); // This also flushes buffers
PyThread_release_lock(perf_jit_map_state.map_lock);
- // clean up the lock and state
+ /* Clean up synchronization primitive */
PyThread_free_lock(perf_jit_map_state.map_lock);
perf_jit_map_state.perf_map = NULL;
}
+
+ /*
+ * Unmap the memory region
+ *
+ * This removes the signal to perf that we were generating JIT code.
+ * After this point, perf will no longer detect this process as
+ * having JIT capabilities.
+ */
if (perf_jit_map_state.mapped_buffer != NULL) {
munmap(perf_jit_map_state.mapped_buffer, perf_jit_map_state.mapped_size);
+ perf_jit_map_state.mapped_buffer = NULL;
}
+
+ /* Clear global state reference */
trampoline_api.state = NULL;
- return 0;
+
+ return 0; // Success
}
+// =============================================================================
+// PUBLIC API EXPORT
+// =============================================================================
+
+/*
+ * Python Perf Callbacks Structure
+ *
+ * This structure defines the callback interface that Python's trampoline
+ * system uses to integrate with perf profiling. It contains function
+ * pointers for initialization, event writing, and cleanup.
+ *
+ * CRITICAL: This structure and its contents are part of Python's internal
+ * API. The function signatures and behavior must remain stable to maintain
+ * compatibility with the Python interpreter's perf integration system.
+ *
+ * Used by: Python's _PyPerf_Callbacks system in pycore_ceval.h
+ */
_PyPerf_Callbacks _Py_perfmap_jit_callbacks = {
- &perf_map_jit_init,
- &perf_map_jit_write_entry,
- &perf_map_jit_fini,
+ &perf_map_jit_init, // Initialization function
+ &perf_map_jit_write_entry, // Event writing function
+ &perf_map_jit_fini, // Cleanup function
};
-#endif
+#endif /* PY_HAVE_PERF_TRAMPOLINE */ \ No newline at end of file
diff --git a/Python/pylifecycle.c b/Python/pylifecycle.c
index 8394245d373..724fda63511 100644
--- a/Python/pylifecycle.c
+++ b/Python/pylifecycle.c
@@ -1283,7 +1283,7 @@ init_interp_main(PyThreadState *tstate)
if (is_main_interp) {
/* Initialize warnings. */
PyObject *warnoptions;
- if (_PySys_GetOptionalAttrString("warnoptions", &warnoptions) < 0) {
+ if (PySys_GetOptionalAttrString("warnoptions", &warnoptions) < 0) {
return _PyStatus_ERR("can't initialize warnings");
}
if (warnoptions != NULL && PyList_Check(warnoptions) &&
@@ -1806,7 +1806,7 @@ flush_std_files(void)
PyObject *file;
int status = 0;
- if (_PySys_GetOptionalAttr(&_Py_ID(stdout), &file) < 0) {
+ if (PySys_GetOptionalAttr(&_Py_ID(stdout), &file) < 0) {
status = -1;
}
else if (file != NULL && file != Py_None && !file_is_closed(file)) {
@@ -1819,7 +1819,7 @@ flush_std_files(void)
}
Py_XDECREF(file);
- if (_PySys_GetOptionalAttr(&_Py_ID(stderr), &file) < 0) {
+ if (PySys_GetOptionalAttr(&_Py_ID(stderr), &file) < 0) {
PyErr_Clear();
status = -1;
}
@@ -3046,7 +3046,7 @@ _Py_FatalError_PrintExc(PyThreadState *tstate)
}
PyObject *ferr;
- if (_PySys_GetOptionalAttr(&_Py_ID(stderr), &ferr) < 0) {
+ if (PySys_GetOptionalAttr(&_Py_ID(stderr), &ferr) < 0) {
_PyErr_Clear(tstate);
}
if (ferr == NULL || ferr == Py_None) {
diff --git a/Python/pystate.c b/Python/pystate.c
index 4144e6edefc..0544b15aad1 100644
--- a/Python/pystate.c
+++ b/Python/pystate.c
@@ -567,6 +567,7 @@ init_interpreter(PyInterpreterState *interp,
}
interp->sys_profile_initialized = false;
interp->sys_trace_initialized = false;
+ interp->_code_object_generation = 0;
interp->jit = false;
interp->executor_list_head = NULL;
interp->executor_deletion_list_head = NULL;
@@ -777,6 +778,10 @@ interpreter_clear(PyInterpreterState *interp, PyThreadState *tstate)
for (int t = 0; t < PY_MONITORING_TOOL_IDS; t++) {
Py_CLEAR(interp->monitoring_tool_names[t]);
}
+ interp->_code_object_generation = 0;
+#ifdef Py_GIL_DISABLED
+ interp->tlbc_indices.tlbc_generation = 0;
+#endif
PyConfig_Clear(&interp->config);
_PyCodec_Fini(interp);
@@ -1346,9 +1351,6 @@ tstate_is_alive(PyThreadState *tstate)
// lifecycle
//----------
-/* Minimum size of data stack chunk */
-#define DATA_STACK_CHUNK_SIZE (16*1024)
-
static _PyStackChunk*
allocate_chunk(int size_in_bytes, _PyStackChunk* previous)
{
@@ -2897,7 +2899,7 @@ _PyInterpreterState_HasFeature(PyInterpreterState *interp, unsigned long feature
static PyObject **
push_chunk(PyThreadState *tstate, int size)
{
- int allocate_size = DATA_STACK_CHUNK_SIZE;
+ int allocate_size = _PY_DATA_STACK_CHUNK_SIZE;
while (allocate_size < (int)sizeof(PyObject*)*(size + MINIMUM_OVERHEAD)) {
allocate_size *= 2;
}
diff --git a/Python/pythonrun.c b/Python/pythonrun.c
index f67b72aa91f..8f1c78bf831 100644
--- a/Python/pythonrun.c
+++ b/Python/pythonrun.c
@@ -114,7 +114,7 @@ _PyRun_InteractiveLoopObject(FILE *fp, PyObject *filename, PyCompilerFlags *flag
}
PyObject *v;
- if (_PySys_GetOptionalAttr(&_Py_ID(ps1), &v) < 0) {
+ if (PySys_GetOptionalAttr(&_Py_ID(ps1), &v) < 0) {
PyErr_Print();
return -1;
}
@@ -128,7 +128,7 @@ _PyRun_InteractiveLoopObject(FILE *fp, PyObject *filename, PyCompilerFlags *flag
}
}
Py_XDECREF(v);
- if (_PySys_GetOptionalAttr(&_Py_ID(ps2), &v) < 0) {
+ if (PySys_GetOptionalAttr(&_Py_ID(ps2), &v) < 0) {
PyErr_Print();
return -1;
}
@@ -206,7 +206,7 @@ pyrun_one_parse_ast(FILE *fp, PyObject *filename,
PyObject *encoding_obj = NULL;
const char *encoding = NULL;
if (fp == stdin) {
- if (_PySys_GetOptionalAttr(&_Py_ID(stdin), &attr) < 0) {
+ if (PySys_GetOptionalAttr(&_Py_ID(stdin), &attr) < 0) {
PyErr_Clear();
}
else if (attr != NULL && attr != Py_None) {
@@ -226,7 +226,7 @@ pyrun_one_parse_ast(FILE *fp, PyObject *filename,
// Get sys.ps1 (as UTF-8)
PyObject *ps1_obj = NULL;
const char *ps1 = "";
- if (_PySys_GetOptionalAttr(&_Py_ID(ps1), &attr) < 0) {
+ if (PySys_GetOptionalAttr(&_Py_ID(ps1), &attr) < 0) {
PyErr_Clear();
}
else if (attr != NULL) {
@@ -247,7 +247,7 @@ pyrun_one_parse_ast(FILE *fp, PyObject *filename,
// Get sys.ps2 (as UTF-8)
PyObject *ps2_obj = NULL;
const char *ps2 = "";
- if (_PySys_GetOptionalAttr(&_Py_ID(ps2), &attr) < 0) {
+ if (PySys_GetOptionalAttr(&_Py_ID(ps2), &attr) < 0) {
PyErr_Clear();
}
else if (attr != NULL) {
@@ -658,7 +658,7 @@ _Py_HandleSystemExitAndKeyboardInterrupt(int *exitcode_p)
}
PyObject *sys_stderr;
- if (_PySys_GetOptionalAttr(&_Py_ID(stderr), &sys_stderr) < 0) {
+ if (PySys_GetOptionalAttr(&_Py_ID(stderr), &sys_stderr) < 0) {
PyErr_Clear();
}
else if (sys_stderr != NULL && sys_stderr != Py_None) {
@@ -722,7 +722,7 @@ _PyErr_PrintEx(PyThreadState *tstate, int set_sys_last_vars)
_PyErr_Clear(tstate);
}
}
- if (_PySys_GetOptionalAttr(&_Py_ID(excepthook), &hook) < 0) {
+ if (PySys_GetOptionalAttr(&_Py_ID(excepthook), &hook) < 0) {
PyErr_Clear();
}
if (_PySys_Audit(tstate, "sys.excepthook", "OOOO", hook ? hook : Py_None,
@@ -1197,7 +1197,7 @@ void
PyErr_Display(PyObject *unused, PyObject *value, PyObject *tb)
{
PyObject *file;
- if (_PySys_GetOptionalAttr(&_Py_ID(stderr), &file) < 0) {
+ if (PySys_GetOptionalAttr(&_Py_ID(stderr), &file) < 0) {
PyObject *exc = PyErr_GetRaisedException();
_PyObject_Dump(value);
fprintf(stderr, "lost sys.stderr\n");
@@ -1321,7 +1321,7 @@ static void
flush_io_stream(PyThreadState *tstate, PyObject *name)
{
PyObject *f;
- if (_PySys_GetOptionalAttr(name, &f) < 0) {
+ if (PySys_GetOptionalAttr(name, &f) < 0) {
PyErr_Clear();
}
if (f != NULL) {
diff --git a/Python/remote_debug.h b/Python/remote_debug.h
index edc77c30291..0a817bdbd48 100644
--- a/Python/remote_debug.h
+++ b/Python/remote_debug.h
@@ -35,7 +35,7 @@ extern "C" {
# include <sys/mman.h>
#endif
-#if defined(__APPLE__) && TARGET_OS_OSX
+#if defined(__APPLE__) && defined(TARGET_OS_OSX) && TARGET_OS_OSX
# include <libproc.h>
# include <mach-o/fat.h>
# include <mach-o/loader.h>
@@ -73,27 +73,105 @@ extern "C" {
# define HAVE_PROCESS_VM_READV 0
#endif
+#define _set_debug_exception_cause(exception, format, ...) \
+ do { \
+ if (!PyErr_ExceptionMatches(PyExc_PermissionError)) { \
+ PyThreadState *tstate = _PyThreadState_GET(); \
+ if (!_PyErr_Occurred(tstate)) { \
+ _PyErr_Format(tstate, exception, format, ##__VA_ARGS__); \
+ } else { \
+ _PyErr_FormatFromCause(exception, format, ##__VA_ARGS__); \
+ } \
+ } \
+ } while (0)
+
+static inline size_t
+get_page_size(void) {
+ size_t page_size = 0;
+ if (page_size == 0) {
+#ifdef MS_WINDOWS
+ SYSTEM_INFO si;
+ GetSystemInfo(&si);
+ page_size = si.dwPageSize;
+#else
+ page_size = (size_t)getpagesize();
+#endif
+ }
+ return page_size;
+}
+
+typedef struct page_cache_entry {
+ uintptr_t page_addr; // page-aligned base address
+ char *data;
+ int valid;
+ struct page_cache_entry *next;
+} page_cache_entry_t;
+
+#define MAX_PAGES 1024
+
// Define a platform-independent process handle structure
typedef struct {
pid_t pid;
-#ifdef MS_WINDOWS
+#if defined(__APPLE__) && defined(TARGET_OS_OSX) && TARGET_OS_OSX
+ mach_port_t task;
+#elif defined(MS_WINDOWS)
HANDLE hProcess;
+#elif defined(__linux__)
+ int memfd;
#endif
+ page_cache_entry_t pages[MAX_PAGES];
+ Py_ssize_t page_size;
} proc_handle_t;
+static void
+_Py_RemoteDebug_FreePageCache(proc_handle_t *handle)
+{
+ for (int i = 0; i < MAX_PAGES; i++) {
+ PyMem_RawFree(handle->pages[i].data);
+ handle->pages[i].data = NULL;
+ handle->pages[i].valid = 0;
+ }
+}
+
+void
+_Py_RemoteDebug_ClearCache(proc_handle_t *handle)
+{
+ for (int i = 0; i < MAX_PAGES; i++) {
+ handle->pages[i].valid = 0;
+ }
+}
+
+#if defined(__APPLE__) && defined(TARGET_OS_OSX) && TARGET_OS_OSX
+static mach_port_t pid_to_task(pid_t pid);
+#endif
+
// Initialize the process handle
static int
_Py_RemoteDebug_InitProcHandle(proc_handle_t *handle, pid_t pid) {
handle->pid = pid;
-#ifdef MS_WINDOWS
+#if defined(__APPLE__) && defined(TARGET_OS_OSX) && TARGET_OS_OSX
+ handle->task = pid_to_task(handle->pid);
+ if (handle->task == 0) {
+ _set_debug_exception_cause(PyExc_RuntimeError, "Failed to initialize macOS process handle");
+ return -1;
+ }
+#elif defined(MS_WINDOWS)
handle->hProcess = OpenProcess(
PROCESS_VM_READ | PROCESS_VM_WRITE | PROCESS_VM_OPERATION | PROCESS_QUERY_INFORMATION,
FALSE, pid);
if (handle->hProcess == NULL) {
PyErr_SetFromWindowsErr(0);
+ _set_debug_exception_cause(PyExc_RuntimeError, "Failed to initialize Windows process handle");
return -1;
}
+#elif defined(__linux__)
+ handle->memfd = -1;
#endif
+ handle->page_size = get_page_size();
+ for (int i = 0; i < MAX_PAGES; i++) {
+ handle->pages[i].data = NULL;
+ handle->pages[i].valid = 0;
+ }
return 0;
}
@@ -105,11 +183,17 @@ _Py_RemoteDebug_CleanupProcHandle(proc_handle_t *handle) {
CloseHandle(handle->hProcess);
handle->hProcess = NULL;
}
+#elif defined(__linux__)
+ if (handle->memfd != -1) {
+ close(handle->memfd);
+ handle->memfd = -1;
+ }
#endif
handle->pid = 0;
+ _Py_RemoteDebug_FreePageCache(handle);
}
-#if defined(__APPLE__) && TARGET_OS_OSX
+#if defined(__APPLE__) && defined(TARGET_OS_OSX) && TARGET_OS_OSX
static uintptr_t
return_section_address64(
@@ -148,8 +232,10 @@ return_section_address64(
&object_name
);
if (ret != KERN_SUCCESS) {
- PyErr_SetString(
- PyExc_RuntimeError, "Cannot get any more VM maps.\n");
+ PyErr_Format(PyExc_RuntimeError,
+ "mach_vm_region failed while parsing 64-bit Mach-O binary "
+ "at base address 0x%lx (kern_return_t: %d)",
+ base, ret);
return 0;
}
}
@@ -169,9 +255,6 @@ return_section_address64(
cmd = (struct segment_command_64*)((void*)cmd + cmd->cmdsize);
}
- // We should not be here, but if we are there, we should say about this
- PyErr_SetString(
- PyExc_RuntimeError, "Cannot find section address.\n");
return 0;
}
@@ -212,8 +295,10 @@ return_section_address32(
&object_name
);
if (ret != KERN_SUCCESS) {
- PyErr_SetString(
- PyExc_RuntimeError, "Cannot get any more VM maps.\n");
+ PyErr_Format(PyExc_RuntimeError,
+ "mach_vm_region failed while parsing 32-bit Mach-O binary "
+ "at base address 0x%lx (kern_return_t: %d)",
+ base, ret);
return 0;
}
}
@@ -233,9 +318,6 @@ return_section_address32(
cmd = (struct segment_command*)((void*)cmd + cmd->cmdsize);
}
- // We should not be here, but if we are there, we should say about this
- PyErr_SetString(
- PyExc_RuntimeError, "Cannot find section address.\n");
return 0;
}
@@ -253,8 +335,20 @@ return_section_address_fat(
int is_abi64;
size_t cpu_size = sizeof(cpu), abi64_size = sizeof(is_abi64);
- sysctlbyname("hw.cputype", &cpu, &cpu_size, NULL, 0);
- sysctlbyname("hw.cpu64bit_capable", &is_abi64, &abi64_size, NULL, 0);
+ if (sysctlbyname("hw.cputype", &cpu, &cpu_size, NULL, 0) != 0) {
+ PyErr_Format(PyExc_OSError,
+ "Failed to determine CPU type via sysctlbyname "
+ "for fat binary analysis at 0x%lx: %s",
+ base, strerror(errno));
+ return 0;
+ }
+ if (sysctlbyname("hw.cpu64bit_capable", &is_abi64, &abi64_size, NULL, 0) != 0) {
+ PyErr_Format(PyExc_OSError,
+ "Failed to determine CPU ABI capability via sysctlbyname "
+ "for fat binary analysis at 0x%lx: %s",
+ base, strerror(errno));
+ return 0;
+ }
cpu |= is_abi64 * CPU_ARCH_ABI64;
@@ -285,13 +379,18 @@ return_section_address_fat(
return return_section_address64(section, proc_ref, base, (void*)hdr);
default:
- PyErr_SetString(PyExc_RuntimeError, "Unknown Mach-O magic in fat binary.\n");
+ PyErr_Format(PyExc_RuntimeError,
+ "Unknown Mach-O magic number 0x%x in fat binary architecture %u at base 0x%lx",
+ hdr->magic, i, base);
return 0;
}
}
}
- PyErr_SetString(PyExc_RuntimeError, "No matching architecture found in fat binary.\n");
+ PyErr_Format(PyExc_RuntimeError,
+ "No matching architecture found for CPU type 0x%x "
+ "in fat binary at base 0x%lx (%u architectures examined)",
+ cpu, base, nfat_arch);
return 0;
}
@@ -300,20 +399,26 @@ search_section_in_file(const char* secname, char* path, uintptr_t base, mach_vm_
{
int fd = open(path, O_RDONLY);
if (fd == -1) {
- PyErr_Format(PyExc_RuntimeError, "Cannot open binary %s\n", path);
+ PyErr_Format(PyExc_OSError,
+ "Cannot open binary file '%s' for section '%s' search: %s",
+ path, secname, strerror(errno));
return 0;
}
struct stat fs;
if (fstat(fd, &fs) == -1) {
- PyErr_Format(PyExc_RuntimeError, "Cannot get size of binary %s\n", path);
+ PyErr_Format(PyExc_OSError,
+ "Cannot get file size for binary '%s' during section '%s' search: %s",
+ path, secname, strerror(errno));
close(fd);
return 0;
}
void* map = mmap(0, fs.st_size, PROT_READ, MAP_SHARED, fd, 0);
if (map == MAP_FAILED) {
- PyErr_Format(PyExc_RuntimeError, "Cannot map binary %s\n", path);
+ PyErr_Format(PyExc_OSError,
+ "Cannot memory map binary file '%s' (size: %lld bytes) for section '%s' search: %s",
+ path, (long long)fs.st_size, secname, strerror(errno));
close(fd);
return 0;
}
@@ -335,13 +440,22 @@ search_section_in_file(const char* secname, char* path, uintptr_t base, mach_vm_
result = return_section_address_fat(secname, proc_ref, base, map);
break;
default:
- PyErr_SetString(PyExc_RuntimeError, "Unknown Mach-O magic");
+ PyErr_Format(PyExc_RuntimeError,
+ "Unrecognized Mach-O magic number 0x%x in binary file '%s' for section '%s' search",
+ magic, path, secname);
break;
}
- munmap(map, fs.st_size);
+ if (munmap(map, fs.st_size) != 0) {
+ PyErr_Format(PyExc_OSError,
+ "Failed to unmap binary file '%s' (size: %lld bytes): %s",
+ path, (long long)fs.st_size, strerror(errno));
+ result = 0;
+ }
if (close(fd) != 0) {
- PyErr_SetFromErrno(PyExc_OSError);
+ PyErr_Format(PyExc_OSError,
+ "Failed to close binary file '%s': %s",
+ path, strerror(errno));
result = 0;
}
return result;
@@ -356,7 +470,10 @@ pid_to_task(pid_t pid)
result = task_for_pid(mach_task_self(), pid, &task);
if (result != KERN_SUCCESS) {
- PyErr_Format(PyExc_PermissionError, "Cannot get task for PID %d", pid);
+ PyErr_Format(PyExc_PermissionError,
+ "Cannot get task port for PID %d (kern_return_t: %d). "
+ "This typically requires running as root or having the 'com.apple.system-task-ports' entitlement.",
+ pid, result);
return 0;
}
return task;
@@ -373,13 +490,15 @@ search_map_for_section(proc_handle_t *handle, const char* secname, const char* s
mach_port_t proc_ref = pid_to_task(handle->pid);
if (proc_ref == 0) {
if (!PyErr_Occurred()) {
- PyErr_SetString(PyExc_PermissionError, "Cannot get task for PID");
+ PyErr_Format(PyExc_PermissionError,
+ "Cannot get task port for PID %d during section search",
+ handle->pid);
}
return 0;
}
- int match_found = 0;
char map_filename[MAXPATHLEN + 1];
+
while (mach_vm_region(
proc_ref,
&address,
@@ -389,6 +508,7 @@ search_map_for_section(proc_handle_t *handle, const char* secname, const char* s
&count,
&object_name) == KERN_SUCCESS)
{
+
if ((region_info.protection & VM_PROT_READ) == 0
|| (region_info.protection & VM_PROT_EXECUTE) == 0) {
address += size;
@@ -409,21 +529,21 @@ search_map_for_section(proc_handle_t *handle, const char* secname, const char* s
filename = map_filename; // No path, use the whole string
}
- if (!match_found && strncmp(filename, substr, strlen(substr)) == 0) {
- match_found = 1;
- return search_section_in_file(
+ if (strncmp(filename, substr, strlen(substr)) == 0) {
+ uintptr_t result = search_section_in_file(
secname, map_filename, address, size, proc_ref);
+ if (result != 0) {
+ return result;
+ }
}
address += size;
}
- PyErr_SetString(PyExc_RuntimeError,
- "mach_vm_region failed to find the section");
return 0;
}
-#endif // (__APPLE__ && TARGET_OS_OSX)
+#endif // (__APPLE__ && defined(TARGET_OS_OSX) && TARGET_OS_OSX)
#if defined(__linux__) && HAVE_PROCESS_VM_READV
static uintptr_t
@@ -442,24 +562,38 @@ search_elf_file_for_section(
int fd = open(elf_file, O_RDONLY);
if (fd < 0) {
- PyErr_SetFromErrno(PyExc_OSError);
+ PyErr_Format(PyExc_OSError,
+ "Cannot open ELF file '%s' for section '%s' search: %s",
+ elf_file, secname, strerror(errno));
goto exit;
}
struct stat file_stats;
if (fstat(fd, &file_stats) != 0) {
- PyErr_SetFromErrno(PyExc_OSError);
+ PyErr_Format(PyExc_OSError,
+ "Cannot get file size for ELF file '%s' during section '%s' search: %s",
+ elf_file, secname, strerror(errno));
goto exit;
}
file_memory = mmap(NULL, file_stats.st_size, PROT_READ, MAP_PRIVATE, fd, 0);
if (file_memory == MAP_FAILED) {
- PyErr_SetFromErrno(PyExc_OSError);
+ PyErr_Format(PyExc_OSError,
+ "Cannot memory map ELF file '%s' (size: %lld bytes) for section '%s' search: %s",
+ elf_file, (long long)file_stats.st_size, secname, strerror(errno));
goto exit;
}
Elf_Ehdr* elf_header = (Elf_Ehdr*)file_memory;
+ // Validate ELF header
+ if (elf_header->e_shstrndx >= elf_header->e_shnum) {
+ PyErr_Format(PyExc_RuntimeError,
+ "Invalid ELF file '%s': string table index %u >= section count %u",
+ elf_file, elf_header->e_shstrndx, elf_header->e_shnum);
+ goto exit;
+ }
+
Elf_Shdr* section_header_table = (Elf_Shdr*)(file_memory + elf_header->e_shoff);
Elf_Shdr* shstrtab_section = &section_header_table[elf_header->e_shstrndx];
@@ -476,6 +610,10 @@ search_elf_file_for_section(
}
}
+ if (section == NULL) {
+ goto exit;
+ }
+
Elf_Phdr* program_header_table = (Elf_Phdr*)(file_memory + elf_header->e_phoff);
// Find the first PT_LOAD segment
Elf_Phdr* first_load_segment = NULL;
@@ -486,18 +624,25 @@ search_elf_file_for_section(
}
}
- if (section != NULL && first_load_segment != NULL) {
- uintptr_t elf_load_addr = first_load_segment->p_vaddr
- - (first_load_segment->p_vaddr % first_load_segment->p_align);
- result = start_address + (uintptr_t)section->sh_addr - elf_load_addr;
+ if (first_load_segment == NULL) {
+ PyErr_Format(PyExc_RuntimeError,
+ "No PT_LOAD segment found in ELF file '%s' (%u program headers examined)",
+ elf_file, elf_header->e_phnum);
+ goto exit;
}
+ uintptr_t elf_load_addr = first_load_segment->p_vaddr
+ - (first_load_segment->p_vaddr % first_load_segment->p_align);
+ result = start_address + (uintptr_t)section->sh_addr - elf_load_addr;
+
exit:
if (file_memory != NULL) {
munmap(file_memory, file_stats.st_size);
}
if (fd >= 0 && close(fd) != 0) {
- PyErr_SetFromErrno(PyExc_OSError);
+ PyErr_Format(PyExc_OSError,
+ "Failed to close ELF file '%s': %s",
+ elf_file, strerror(errno));
result = 0;
}
return result;
@@ -511,7 +656,9 @@ search_linux_map_for_section(proc_handle_t *handle, const char* secname, const c
FILE* maps_file = fopen(maps_file_path, "r");
if (maps_file == NULL) {
- PyErr_SetFromErrno(PyExc_OSError);
+ PyErr_Format(PyExc_OSError,
+ "Cannot open process memory map file '%s' for PID %d section search: %s",
+ maps_file_path, handle->pid, strerror(errno));
return 0;
}
@@ -520,11 +667,16 @@ search_linux_map_for_section(proc_handle_t *handle, const char* secname, const c
char *line = PyMem_Malloc(linesz);
if (!line) {
fclose(maps_file);
- PyErr_NoMemory();
+ _set_debug_exception_cause(PyExc_MemoryError,
+ "Cannot allocate memory for reading process map file '%s'",
+ maps_file_path);
return 0;
}
uintptr_t retval = 0;
+ int lines_processed = 0;
+ int matches_found = 0;
+
while (fgets(line + linelen, linesz - linelen, maps_file) != NULL) {
linelen = strlen(line);
if (line[linelen - 1] != '\n') {
@@ -535,7 +687,9 @@ search_linux_map_for_section(proc_handle_t *handle, const char* secname, const c
if (!biggerline) {
PyMem_Free(line);
fclose(maps_file);
- PyErr_NoMemory();
+ _set_debug_exception_cause(PyExc_MemoryError,
+ "Cannot reallocate memory while reading process map file '%s' (attempted size: %zu)",
+ maps_file_path, linesz);
return 0;
}
line = biggerline;
@@ -546,6 +700,7 @@ search_linux_map_for_section(proc_handle_t *handle, const char* secname, const c
line[linelen - 1] = '\0';
// and prepare to read the next line into the start of the buffer.
linelen = 0;
+ lines_processed++;
unsigned long start = 0;
unsigned long path_pos = 0;
@@ -566,6 +721,7 @@ search_linux_map_for_section(proc_handle_t *handle, const char* secname, const c
}
if (strstr(filename, substr)) {
+ matches_found++;
retval = search_elf_file_for_section(handle, secname, start, path);
if (retval) {
break;
@@ -575,7 +731,9 @@ search_linux_map_for_section(proc_handle_t *handle, const char* secname, const c
PyMem_Free(line);
if (fclose(maps_file) != 0) {
- PyErr_SetFromErrno(PyExc_OSError);
+ PyErr_Format(PyExc_OSError,
+ "Failed to close process map file '%s': %s",
+ maps_file_path, strerror(errno));
retval = 0;
}
@@ -591,11 +749,20 @@ static void* analyze_pe(const wchar_t* mod_path, BYTE* remote_base, const char*
HANDLE hFile = CreateFileW(mod_path, GENERIC_READ, FILE_SHARE_READ, NULL, OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, NULL);
if (hFile == INVALID_HANDLE_VALUE) {
PyErr_SetFromWindowsErr(0);
+ DWORD error = GetLastError();
+ PyErr_Format(PyExc_OSError,
+ "Cannot open PE file for section '%s' analysis (error %lu)",
+ secname, error);
return NULL;
}
+
HANDLE hMap = CreateFileMapping(hFile, NULL, PAGE_READONLY, 0, 0, 0);
if (!hMap) {
PyErr_SetFromWindowsErr(0);
+ DWORD error = GetLastError();
+ PyErr_Format(PyExc_OSError,
+ "Cannot create file mapping for PE file section '%s' analysis (error %lu)",
+ secname, error);
CloseHandle(hFile);
return NULL;
}
@@ -603,6 +770,10 @@ static void* analyze_pe(const wchar_t* mod_path, BYTE* remote_base, const char*
BYTE* mapView = (BYTE*)MapViewOfFile(hMap, FILE_MAP_READ, 0, 0, 0);
if (!mapView) {
PyErr_SetFromWindowsErr(0);
+ DWORD error = GetLastError();
+ PyErr_Format(PyExc_OSError,
+ "Cannot map view of PE file for section '%s' analysis (error %lu)",
+ secname, error);
CloseHandle(hMap);
CloseHandle(hFile);
return NULL;
@@ -610,7 +781,9 @@ static void* analyze_pe(const wchar_t* mod_path, BYTE* remote_base, const char*
IMAGE_DOS_HEADER* pDOSHeader = (IMAGE_DOS_HEADER*)mapView;
if (pDOSHeader->e_magic != IMAGE_DOS_SIGNATURE) {
- PyErr_SetString(PyExc_RuntimeError, "Invalid DOS signature.");
+ PyErr_Format(PyExc_RuntimeError,
+ "Invalid DOS signature (0x%x) in PE file for section '%s' analysis (expected 0x%x)",
+ pDOSHeader->e_magic, secname, IMAGE_DOS_SIGNATURE);
UnmapViewOfFile(mapView);
CloseHandle(hMap);
CloseHandle(hFile);
@@ -619,7 +792,9 @@ static void* analyze_pe(const wchar_t* mod_path, BYTE* remote_base, const char*
IMAGE_NT_HEADERS* pNTHeaders = (IMAGE_NT_HEADERS*)(mapView + pDOSHeader->e_lfanew);
if (pNTHeaders->Signature != IMAGE_NT_SIGNATURE) {
- PyErr_SetString(PyExc_RuntimeError, "Invalid NT signature.");
+ PyErr_Format(PyExc_RuntimeError,
+ "Invalid NT signature (0x%lx) in PE file for section '%s' analysis (expected 0x%lx)",
+ pNTHeaders->Signature, secname, IMAGE_NT_SIGNATURE);
UnmapViewOfFile(mapView);
CloseHandle(hMap);
CloseHandle(hFile);
@@ -653,17 +828,27 @@ search_windows_map_for_section(proc_handle_t* handle, const char* secname, const
} while (hProcSnap == INVALID_HANDLE_VALUE && GetLastError() == ERROR_BAD_LENGTH);
if (hProcSnap == INVALID_HANDLE_VALUE) {
- PyErr_SetString(PyExc_PermissionError, "Unable to create module snapshot. Check permissions or PID.");
+ PyErr_SetFromWindowsErr(0);
+ DWORD error = GetLastError();
+ PyErr_Format(PyExc_PermissionError,
+ "Unable to create module snapshot for PID %d section '%s' "
+ "search (error %lu). Check permissions or PID validity",
+ handle->pid, secname, error);
return 0;
}
MODULEENTRY32W moduleEntry;
moduleEntry.dwSize = sizeof(moduleEntry);
void* runtime_addr = NULL;
+ int modules_examined = 0;
+ int matches_found = 0;
for (BOOL hasModule = Module32FirstW(hProcSnap, &moduleEntry); hasModule; hasModule = Module32NextW(hProcSnap, &moduleEntry)) {
+ modules_examined++;
+
// Look for either python executable or DLL
if (wcsstr(moduleEntry.szModule, substr)) {
+ matches_found++;
runtime_addr = analyze_pe(moduleEntry.szExePath, moduleEntry.modBaseAddr, secname);
if (runtime_addr != NULL) {
break;
@@ -672,6 +857,7 @@ search_windows_map_for_section(proc_handle_t* handle, const char* secname, const
}
CloseHandle(hProcSnap);
+
return (uintptr_t)runtime_addr;
}
@@ -689,7 +875,9 @@ _Py_RemoteDebug_GetPyRuntimeAddress(proc_handle_t* handle)
if (address == 0) {
// Error out: 'python' substring covers both executable and DLL
PyObject *exc = PyErr_GetRaisedException();
- PyErr_SetString(PyExc_RuntimeError, "Failed to find the PyRuntime section in the process.");
+ PyErr_Format(PyExc_RuntimeError,
+ "Failed to find the PyRuntime section in process %d on Windows platform",
+ handle->pid);
_PyErr_ChainExceptions1(exc);
}
#elif defined(__linux__)
@@ -698,16 +886,28 @@ _Py_RemoteDebug_GetPyRuntimeAddress(proc_handle_t* handle)
if (address == 0) {
// Error out: 'python' substring covers both executable and DLL
PyObject *exc = PyErr_GetRaisedException();
- PyErr_SetString(PyExc_RuntimeError, "Failed to find the PyRuntime section in the process.");
+ PyErr_Format(PyExc_RuntimeError,
+ "Failed to find the PyRuntime section in process %d on Linux platform",
+ handle->pid);
_PyErr_ChainExceptions1(exc);
}
-#elif defined(__APPLE__) && TARGET_OS_OSX
+#elif defined(__APPLE__) && defined(TARGET_OS_OSX) && TARGET_OS_OSX
// On macOS, try libpython first, then fall back to python
- address = search_map_for_section(handle, "PyRuntime", "libpython");
- if (address == 0) {
- // TODO: Differentiate between not found and error
+ const char* candidates[] = {"libpython", "python", "Python", NULL};
+ for (const char** candidate = candidates; *candidate; candidate++) {
PyErr_Clear();
- address = search_map_for_section(handle, "PyRuntime", "python");
+ address = search_map_for_section(handle, "PyRuntime", *candidate);
+ if (address != 0) {
+ break;
+ }
+ }
+ if (address == 0) {
+ PyObject *exc = PyErr_GetRaisedException();
+ PyErr_Format(PyExc_RuntimeError,
+ "Failed to find the PyRuntime section in process %d "
+ "on macOS platform (tried both libpython and python)",
+ handle->pid);
+ _PyErr_ChainExceptions1(exc);
}
#else
Py_UNREACHABLE();
@@ -716,6 +916,61 @@ _Py_RemoteDebug_GetPyRuntimeAddress(proc_handle_t* handle)
return address;
}
+#if defined(__linux__) && HAVE_PROCESS_VM_READV
+
+static int
+open_proc_mem_fd(proc_handle_t *handle)
+{
+ char mem_file_path[64];
+ sprintf(mem_file_path, "/proc/%d/mem", handle->pid);
+
+ handle->memfd = open(mem_file_path, O_RDWR);
+ if (handle->memfd == -1) {
+ PyErr_SetFromErrno(PyExc_OSError);
+ _set_debug_exception_cause(PyExc_OSError,
+ "failed to open file %s: %s", mem_file_path, strerror(errno));
+ return -1;
+ }
+ return 0;
+}
+
+// Why is pwritev not guarded? Except on Android API level 23 (no longer
+// supported), HAVE_PROCESS_VM_READV is sufficient.
+static int
+read_remote_memory_fallback(proc_handle_t *handle, uintptr_t remote_address, size_t len, void* dst)
+{
+ if (handle->memfd == -1) {
+ if (open_proc_mem_fd(handle) < 0) {
+ return -1;
+ }
+ }
+
+ struct iovec local[1];
+ Py_ssize_t result = 0;
+ Py_ssize_t read_bytes = 0;
+
+ do {
+ local[0].iov_base = (char*)dst + result;
+ local[0].iov_len = len - result;
+ off_t offset = remote_address + result;
+
+ read_bytes = preadv(handle->memfd, local, 1, offset);
+ if (read_bytes < 0) {
+ PyErr_SetFromErrno(PyExc_OSError);
+ _set_debug_exception_cause(PyExc_OSError,
+ "preadv failed for PID %d at address 0x%lx "
+ "(size %zu, partial read %zd bytes): %s",
+ handle->pid, remote_address + result, len - result, result, strerror(errno));
+ return -1;
+ }
+
+ result += read_bytes;
+ } while ((size_t)read_bytes != local[0].iov_len);
+ return 0;
+}
+
+#endif // __linux__
+
// Platform-independent memory read function
static int
_Py_RemoteDebug_ReadRemoteMemory(proc_handle_t *handle, uintptr_t remote_address, size_t len, void* dst)
@@ -726,12 +981,20 @@ _Py_RemoteDebug_ReadRemoteMemory(proc_handle_t *handle, uintptr_t remote_address
do {
if (!ReadProcessMemory(handle->hProcess, (LPCVOID)(remote_address + result), (char*)dst + result, len - result, &read_bytes)) {
PyErr_SetFromWindowsErr(0);
+ DWORD error = GetLastError();
+ _set_debug_exception_cause(PyExc_OSError,
+ "ReadProcessMemory failed for PID %d at address 0x%lx "
+ "(size %zu, partial read %zu bytes): Windows error %lu",
+ handle->pid, remote_address + result, len - result, result, error);
return -1;
}
result += read_bytes;
} while (result < len);
return 0;
#elif defined(__linux__) && HAVE_PROCESS_VM_READV
+ if (handle->memfd != -1) {
+ return read_remote_memory_fallback(handle, remote_address, len, dst);
+ }
struct iovec local[1];
struct iovec remote[1];
Py_ssize_t result = 0;
@@ -745,17 +1008,24 @@ _Py_RemoteDebug_ReadRemoteMemory(proc_handle_t *handle, uintptr_t remote_address
read_bytes = process_vm_readv(handle->pid, local, 1, remote, 1, 0);
if (read_bytes < 0) {
+ if (errno == ENOSYS) {
+ return read_remote_memory_fallback(handle, remote_address, len, dst);
+ }
PyErr_SetFromErrno(PyExc_OSError);
+ _set_debug_exception_cause(PyExc_OSError,
+ "process_vm_readv failed for PID %d at address 0x%lx "
+ "(size %zu, partial read %zd bytes): %s",
+ handle->pid, remote_address + result, len - result, result, strerror(errno));
return -1;
}
result += read_bytes;
} while ((size_t)read_bytes != local[0].iov_len);
return 0;
-#elif defined(__APPLE__) && TARGET_OS_OSX
+#elif defined(__APPLE__) && defined(TARGET_OS_OSX) && TARGET_OS_OSX
Py_ssize_t result = -1;
kern_return_t kr = mach_vm_read_overwrite(
- pid_to_task(handle->pid),
+ handle->task,
(mach_vm_address_t)remote_address,
len,
(mach_vm_address_t)dst,
@@ -764,13 +1034,22 @@ _Py_RemoteDebug_ReadRemoteMemory(proc_handle_t *handle, uintptr_t remote_address
if (kr != KERN_SUCCESS) {
switch (kr) {
case KERN_PROTECTION_FAILURE:
- PyErr_SetString(PyExc_PermissionError, "Not enough permissions to read memory");
+ PyErr_Format(PyExc_PermissionError,
+ "Memory protection failure reading from PID %d at address "
+ "0x%lx (size %zu): insufficient permissions",
+ handle->pid, remote_address, len);
break;
case KERN_INVALID_ARGUMENT:
- PyErr_SetString(PyExc_PermissionError, "Invalid argument to mach_vm_read_overwrite");
+ PyErr_Format(PyExc_ValueError,
+ "Invalid argument to mach_vm_read_overwrite for PID %d at "
+ "address 0x%lx (size %zu)",
+ handle->pid, remote_address, len);
break;
default:
- PyErr_SetString(PyExc_RuntimeError, "Unknown error reading memory");
+ PyErr_Format(PyExc_RuntimeError,
+ "mach_vm_read_overwrite failed for PID %d at address 0x%lx "
+ "(size %zu): kern_return_t %d",
+ handle->pid, remote_address, len, kr);
}
return -1;
}
@@ -780,6 +1059,62 @@ _Py_RemoteDebug_ReadRemoteMemory(proc_handle_t *handle, uintptr_t remote_address
#endif
}
+int
+_Py_RemoteDebug_PagedReadRemoteMemory(proc_handle_t *handle,
+ uintptr_t addr,
+ size_t size,
+ void *out)
+{
+ size_t page_size = handle->page_size;
+ uintptr_t page_base = addr & ~(page_size - 1);
+ size_t offset_in_page = addr - page_base;
+
+ if (offset_in_page + size > page_size) {
+ return _Py_RemoteDebug_ReadRemoteMemory(handle, addr, size, out);
+ }
+
+ // Search for valid cached page
+ for (int i = 0; i < MAX_PAGES; i++) {
+ page_cache_entry_t *entry = &handle->pages[i];
+ if (entry->valid && entry->page_addr == page_base) {
+ memcpy(out, entry->data + offset_in_page, size);
+ return 0;
+ }
+ }
+
+ // Find reusable slot
+ for (int i = 0; i < MAX_PAGES; i++) {
+ page_cache_entry_t *entry = &handle->pages[i];
+ if (!entry->valid) {
+ if (entry->data == NULL) {
+ entry->data = PyMem_RawMalloc(page_size);
+ if (entry->data == NULL) {
+ _set_debug_exception_cause(PyExc_MemoryError,
+ "Cannot allocate %zu bytes for page cache entry "
+ "during read from PID %d at address 0x%lx",
+ page_size, handle->pid, addr);
+ return -1;
+ }
+ }
+
+ if (_Py_RemoteDebug_ReadRemoteMemory(handle, page_base, page_size, entry->data) < 0) {
+ // Try to just copy the exact ammount as a fallback
+ PyErr_Clear();
+ goto fallback;
+ }
+
+ entry->page_addr = page_base;
+ entry->valid = 1;
+ memcpy(out, entry->data + offset_in_page, size);
+ return 0;
+ }
+ }
+
+fallback:
+ // Cache full — fallback to uncached read
+ return _Py_RemoteDebug_ReadRemoteMemory(handle, addr, size, out);
+}
+
static int
_Py_RemoteDebug_ReadDebugOffsets(
proc_handle_t *handle,
@@ -789,13 +1124,16 @@ _Py_RemoteDebug_ReadDebugOffsets(
*runtime_start_address = _Py_RemoteDebug_GetPyRuntimeAddress(handle);
if (!*runtime_start_address) {
if (!PyErr_Occurred()) {
- PyErr_SetString(
- PyExc_RuntimeError, "Failed to get PyRuntime address");
+ PyErr_Format(PyExc_RuntimeError,
+ "Failed to locate PyRuntime address for PID %d",
+ handle->pid);
}
+ _set_debug_exception_cause(PyExc_RuntimeError, "PyRuntime address lookup failed during debug offsets initialization");
return -1;
}
size_t size = sizeof(struct _Py_DebugOffsets);
if (0 != _Py_RemoteDebug_ReadRemoteMemory(handle, *runtime_start_address, size, debug_offsets)) {
+ _set_debug_exception_cause(PyExc_RuntimeError, "Failed to read debug offsets structure from remote process");
return -1;
}
return 0;
diff --git a/Python/remote_debugging.c b/Python/remote_debugging.c
index dd55b7812d4..7aee87ef05a 100644
--- a/Python/remote_debugging.c
+++ b/Python/remote_debugging.c
@@ -24,6 +24,39 @@ read_memory(proc_handle_t *handle, uint64_t remote_address, size_t len, void* ds
return _Py_RemoteDebug_ReadRemoteMemory(handle, remote_address, len, dst);
}
+// Why is pwritev not guarded? Except on Android API level 23 (no longer
+// supported), HAVE_PROCESS_VM_READV is sufficient.
+#if defined(__linux__) && HAVE_PROCESS_VM_READV
+static int
+write_memory_fallback(proc_handle_t *handle, uintptr_t remote_address, size_t len, const void* src)
+{
+ if (handle->memfd == -1) {
+ if (open_proc_mem_fd(handle) < 0) {
+ return -1;
+ }
+ }
+
+ struct iovec local[1];
+ Py_ssize_t result = 0;
+ Py_ssize_t written = 0;
+
+ do {
+ local[0].iov_base = (char*)src + result;
+ local[0].iov_len = len - result;
+ off_t offset = remote_address + result;
+
+ written = pwritev(handle->memfd, local, 1, offset);
+ if (written < 0) {
+ PyErr_SetFromErrno(PyExc_OSError);
+ return -1;
+ }
+
+ result += written;
+ } while ((size_t)written != local[0].iov_len);
+ return 0;
+}
+#endif // __linux__
+
static int
write_memory(proc_handle_t *handle, uintptr_t remote_address, size_t len, const void* src)
{
@@ -39,6 +72,9 @@ write_memory(proc_handle_t *handle, uintptr_t remote_address, size_t len, const
} while (result < len);
return 0;
#elif defined(__linux__) && HAVE_PROCESS_VM_READV
+ if (handle->memfd != -1) {
+ return write_memory_fallback(handle, remote_address, len, src);
+ }
struct iovec local[1];
struct iovec remote[1];
Py_ssize_t result = 0;
@@ -52,6 +88,9 @@ write_memory(proc_handle_t *handle, uintptr_t remote_address, size_t len, const
written = process_vm_writev(handle->pid, local, 1, remote, 1, 0);
if (written < 0) {
+ if (errno == ENOSYS) {
+ return write_memory_fallback(handle, remote_address, len, src);
+ }
PyErr_SetFromErrno(PyExc_OSError);
return -1;
}
diff --git a/Python/specialize.c b/Python/specialize.c
index 06995d46d8b..92f79d39d55 100644
--- a/Python/specialize.c
+++ b/Python/specialize.c
@@ -2904,53 +2904,57 @@ int
#endif // Py_STATS
Py_NO_INLINE void
-_Py_Specialize_ForIter(_PyStackRef iter, _Py_CODEUNIT *instr, int oparg)
+_Py_Specialize_ForIter(_PyStackRef iter, _PyStackRef null_or_index, _Py_CODEUNIT *instr, int oparg)
{
assert(ENABLE_SPECIALIZATION_FT);
assert(_PyOpcode_Caches[FOR_ITER] == INLINE_CACHE_ENTRIES_FOR_ITER);
PyObject *iter_o = PyStackRef_AsPyObjectBorrow(iter);
PyTypeObject *tp = Py_TYPE(iter_o);
+
+ if (PyStackRef_IsNull(null_or_index)) {
#ifdef Py_GIL_DISABLED
- // Only specialize for uniquely referenced iterators, so that we know
- // they're only referenced by this one thread. This is more limiting
- // than we need (even `it = iter(mylist); for item in it:` won't get
- // specialized) but we don't have a way to check whether we're the only
- // _thread_ who has access to the object.
- if (!_PyObject_IsUniquelyReferenced(iter_o))
- goto failure;
-#endif
- if (tp == &PyListIter_Type) {
-#ifdef Py_GIL_DISABLED
- _PyListIterObject *it = (_PyListIterObject *)iter_o;
- if (!_Py_IsOwnedByCurrentThread((PyObject *)it->it_seq) &&
- !_PyObject_GC_IS_SHARED(it->it_seq)) {
- // Maybe this should just set GC_IS_SHARED in a critical
- // section, instead of leaving it to the first iteration?
+ // Only specialize for uniquely referenced iterators, so that we know
+ // they're only referenced by this one thread. This is more limiting
+ // than we need (even `it = iter(mylist); for item in it:` won't get
+ // specialized) but we don't have a way to check whether we're the only
+ // _thread_ who has access to the object.
+ if (!_PyObject_IsUniquelyReferenced(iter_o)) {
goto failure;
}
#endif
- specialize(instr, FOR_ITER_LIST);
- return;
- }
- else if (tp == &PyTupleIter_Type) {
- specialize(instr, FOR_ITER_TUPLE);
- return;
- }
- else if (tp == &PyRangeIter_Type) {
- specialize(instr, FOR_ITER_RANGE);
- return;
+ if (tp == &PyRangeIter_Type) {
+ specialize(instr, FOR_ITER_RANGE);
+ return;
+ }
+ else if (tp == &PyGen_Type && oparg <= SHRT_MAX) {
+ // Generators are very much not thread-safe, so don't worry about
+ // the specialization not being thread-safe.
+ assert(instr[oparg + INLINE_CACHE_ENTRIES_FOR_ITER + 1].op.code == END_FOR ||
+ instr[oparg + INLINE_CACHE_ENTRIES_FOR_ITER + 1].op.code == INSTRUMENTED_END_FOR
+ );
+ /* Don't specialize if PEP 523 is active */
+ if (_PyInterpreterState_GET()->eval_frame) {
+ goto failure;
+ }
+ specialize(instr, FOR_ITER_GEN);
+ return;
+ }
}
- else if (tp == &PyGen_Type && oparg <= SHRT_MAX) {
- // Generators are very much not thread-safe, so don't worry about
- // the specialization not being thread-safe.
- assert(instr[oparg + INLINE_CACHE_ENTRIES_FOR_ITER + 1].op.code == END_FOR ||
- instr[oparg + INLINE_CACHE_ENTRIES_FOR_ITER + 1].op.code == INSTRUMENTED_END_FOR
- );
- /* Don't specialize if PEP 523 is active */
- if (_PyInterpreterState_GET()->eval_frame)
- goto failure;
- specialize(instr, FOR_ITER_GEN);
- return;
+ else {
+ if (tp == &PyList_Type) {
+#ifdef Py_GIL_DISABLED
+ // Only specialize for lists owned by this thread or shared
+ if (!_Py_IsOwnedByCurrentThread(iter_o) && !_PyObject_GC_IS_SHARED(iter_o)) {
+ goto failure;
+ }
+#endif
+ specialize(instr, FOR_ITER_LIST);
+ return;
+ }
+ else if (tp == &PyTuple_Type) {
+ specialize(instr, FOR_ITER_TUPLE);
+ return;
+ }
}
failure:
SPECIALIZATION_FAIL(FOR_ITER,
diff --git a/Python/stackrefs.c b/Python/stackrefs.c
index 69d4e8b9431..ecc0012ef17 100644
--- a/Python/stackrefs.c
+++ b/Python/stackrefs.c
@@ -40,6 +40,7 @@ make_table_entry(PyObject *obj, const char *filename, int linenumber)
PyObject *
_Py_stackref_get_object(_PyStackRef ref)
{
+ assert(!PyStackRef_IsError(ref));
if (ref.index == 0) {
return NULL;
}
@@ -64,6 +65,7 @@ PyStackRef_Is(_PyStackRef a, _PyStackRef b)
PyObject *
_Py_stackref_close(_PyStackRef ref, const char *filename, int linenumber)
{
+ assert(!PyStackRef_IsError(ref));
PyInterpreterState *interp = PyInterpreterState_Get();
if (ref.index >= interp->next_stackref) {
_Py_FatalErrorFormat(__func__, "Invalid StackRef with ID %" PRIu64 " at %s:%d\n", (void *)ref.index, filename, linenumber);
@@ -128,6 +130,7 @@ _Py_stackref_create(PyObject *obj, const char *filename, int linenumber)
void
_Py_stackref_record_borrow(_PyStackRef ref, const char *filename, int linenumber)
{
+ assert(!PyStackRef_IsError(ref));
if (ref.index < INITIAL_STACKREF_INDEX) {
return;
}
@@ -152,6 +155,7 @@ _Py_stackref_record_borrow(_PyStackRef ref, const char *filename, int linenumber
void
_Py_stackref_associate(PyInterpreterState *interp, PyObject *obj, _PyStackRef ref)
{
+ assert(!PyStackRef_IsError(ref));
assert(ref.index < INITIAL_STACKREF_INDEX);
TableEntry *entry = make_table_entry(obj, "builtin-object", 0);
if (entry == NULL) {
@@ -216,4 +220,12 @@ PyStackRef_IsNullOrInt(_PyStackRef ref)
return PyStackRef_IsNull(ref) || PyStackRef_IsTaggedInt(ref);
}
+_PyStackRef
+PyStackRef_IncrementTaggedIntNoOverflow(_PyStackRef ref)
+{
+ assert(ref.index <= INT_MAX - 2); // No overflow
+ return (_PyStackRef){ .index = ref.index + 2 };
+}
+
+
#endif
diff --git a/Python/sysmodule.c b/Python/sysmodule.c
index 4ed045e3297..e5ae841d195 100644
--- a/Python/sysmodule.c
+++ b/Python/sysmodule.c
@@ -76,12 +76,12 @@ module sys
PyObject *
-_PySys_GetRequiredAttr(PyObject *name)
+PySys_GetAttr(PyObject *name)
{
if (!PyUnicode_Check(name)) {
PyErr_Format(PyExc_TypeError,
- "attribute name must be string, not '%.200s'",
- Py_TYPE(name)->tp_name);
+ "attribute name must be string, not '%T'",
+ name);
return NULL;
}
PyThreadState *tstate = _PyThreadState_GET();
@@ -98,7 +98,7 @@ _PySys_GetRequiredAttr(PyObject *name)
}
PyObject *
-_PySys_GetRequiredAttrString(const char *name)
+PySys_GetAttrString(const char *name)
{
PyThreadState *tstate = _PyThreadState_GET();
PyObject *sysdict = tstate->interp->sysdict;
@@ -114,12 +114,12 @@ _PySys_GetRequiredAttrString(const char *name)
}
int
-_PySys_GetOptionalAttr(PyObject *name, PyObject **value)
+PySys_GetOptionalAttr(PyObject *name, PyObject **value)
{
if (!PyUnicode_Check(name)) {
PyErr_Format(PyExc_TypeError,
- "attribute name must be string, not '%.200s'",
- Py_TYPE(name)->tp_name);
+ "attribute name must be string, not '%T'",
+ name);
*value = NULL;
return -1;
}
@@ -133,7 +133,7 @@ _PySys_GetOptionalAttr(PyObject *name, PyObject **value)
}
int
-_PySys_GetOptionalAttrString(const char *name, PyObject **value)
+PySys_GetOptionalAttrString(const char *name, PyObject **value)
{
PyThreadState *tstate = _PyThreadState_GET();
PyObject *sysdict = tstate->interp->sysdict;
@@ -773,7 +773,7 @@ sys_displayhook(PyObject *module, PyObject *o)
}
if (PyObject_SetAttr(builtins, _Py_LATIN1_CHR('_'), Py_None) != 0)
return NULL;
- outf = _PySys_GetRequiredAttr(&_Py_ID(stdout));
+ outf = PySys_GetAttr(&_Py_ID(stdout));
if (outf == NULL) {
return NULL;
}
@@ -3005,7 +3005,7 @@ static PyObject *
get_warnoptions(PyThreadState *tstate)
{
PyObject *warnoptions;
- if (_PySys_GetOptionalAttr(&_Py_ID(warnoptions), &warnoptions) < 0) {
+ if (PySys_GetOptionalAttr(&_Py_ID(warnoptions), &warnoptions) < 0) {
return NULL;
}
if (warnoptions == NULL || !PyList_Check(warnoptions)) {
@@ -3042,7 +3042,7 @@ PySys_ResetWarnOptions(void)
}
PyObject *warnoptions;
- if (_PySys_GetOptionalAttr(&_Py_ID(warnoptions), &warnoptions) < 0) {
+ if (PySys_GetOptionalAttr(&_Py_ID(warnoptions), &warnoptions) < 0) {
PyErr_Clear();
return;
}
@@ -3106,7 +3106,7 @@ PyAPI_FUNC(int)
PySys_HasWarnOptions(void)
{
PyObject *warnoptions;
- if (_PySys_GetOptionalAttr(&_Py_ID(warnoptions), &warnoptions) < 0) {
+ if (PySys_GetOptionalAttr(&_Py_ID(warnoptions), &warnoptions) < 0) {
PyErr_Clear();
return 0;
}
@@ -3120,7 +3120,7 @@ static PyObject *
get_xoptions(PyThreadState *tstate)
{
PyObject *xoptions;
- if (_PySys_GetOptionalAttr(&_Py_ID(_xoptions), &xoptions) < 0) {
+ if (PySys_GetOptionalAttr(&_Py_ID(_xoptions), &xoptions) < 0) {
return NULL;
}
if (xoptions == NULL || !PyDict_Check(xoptions)) {
@@ -3373,7 +3373,7 @@ sys_set_flag(PyObject *flags, Py_ssize_t pos, PyObject *value)
int
_PySys_SetFlagObj(Py_ssize_t pos, PyObject *value)
{
- PyObject *flags = _PySys_GetRequiredAttrString("flags");
+ PyObject *flags = PySys_GetAttrString("flags");
if (flags == NULL) {
return -1;
}
@@ -3935,7 +3935,7 @@ _PySys_UpdateConfig(PyThreadState *tstate)
#undef COPY_WSTR
// sys.flags
- PyObject *flags = _PySys_GetRequiredAttrString("flags");
+ PyObject *flags = PySys_GetAttrString("flags");
if (flags == NULL) {
return -1;
}
@@ -4251,7 +4251,7 @@ PySys_SetArgvEx(int argc, wchar_t **argv, int updatepath)
}
PyObject *sys_path;
- if (_PySys_GetOptionalAttr(&_Py_ID(path), &sys_path) < 0) {
+ if (PySys_GetOptionalAttr(&_Py_ID(path), &sys_path) < 0) {
Py_FatalError("can't get sys.path");
}
else if (sys_path != NULL) {
@@ -4347,7 +4347,7 @@ sys_write(PyObject *key, FILE *fp, const char *format, va_list va)
PyObject *exc = _PyErr_GetRaisedException(tstate);
written = PyOS_vsnprintf(buffer, sizeof(buffer), format, va);
- file = _PySys_GetRequiredAttr(key);
+ file = PySys_GetAttr(key);
if (sys_pyfile_write(buffer, file) != 0) {
_PyErr_Clear(tstate);
fputs(buffer, fp);
@@ -4391,7 +4391,7 @@ sys_format(PyObject *key, FILE *fp, const char *format, va_list va)
PyObject *exc = _PyErr_GetRaisedException(tstate);
message = PyUnicode_FromFormatV(format, va);
if (message != NULL) {
- file = _PySys_GetRequiredAttr(key);
+ file = PySys_GetAttr(key);
if (sys_pyfile_write_unicode(message, file) != 0) {
_PyErr_Clear(tstate);
utf8 = PyUnicode_AsUTF8(message);
diff --git a/Python/thread.c b/Python/thread.c
index 4ff5f11a348..18c4af7f634 100644
--- a/Python/thread.c
+++ b/Python/thread.c
@@ -39,7 +39,8 @@
const long long PY_TIMEOUT_MAX = PY_TIMEOUT_MAX_VALUE;
-static void PyThread__init_thread(void); /* Forward */
+/* Forward declaration */
+static void PyThread__init_thread(void);
#define initialized _PyRuntime.threads.initialized
@@ -71,6 +72,79 @@ PyThread_init_thread(void)
#endif
+/*
+ * Lock support.
+ */
+
+PyThread_type_lock
+PyThread_allocate_lock(void)
+{
+ if (!initialized) {
+ PyThread_init_thread();
+ }
+
+ PyMutex *lock = (PyMutex *)PyMem_RawMalloc(sizeof(PyMutex));
+ if (lock) {
+ *lock = (PyMutex){0};
+ }
+
+ return (PyThread_type_lock)lock;
+}
+
+void
+PyThread_free_lock(PyThread_type_lock lock)
+{
+ PyMem_RawFree(lock);
+}
+
+PyLockStatus
+PyThread_acquire_lock_timed(PyThread_type_lock lock, PY_TIMEOUT_T microseconds,
+ int intr_flag)
+{
+ PyTime_t timeout; // relative timeout
+ if (microseconds >= 0) {
+ // bpo-41710: PyThread_acquire_lock_timed() cannot report timeout
+ // overflow to the caller, so clamp the timeout to
+ // [PyTime_MIN, PyTime_MAX].
+ //
+ // PyTime_MAX nanoseconds is around 292.3 years.
+ //
+ // _thread.Lock.acquire() and _thread.RLock.acquire() raise an
+ // OverflowError if microseconds is greater than PY_TIMEOUT_MAX.
+ timeout = _PyTime_FromMicrosecondsClamp(microseconds);
+ }
+ else {
+ timeout = -1;
+ }
+
+ _PyLockFlags flags = _Py_LOCK_DONT_DETACH;
+ if (intr_flag) {
+ flags |= _PY_FAIL_IF_INTERRUPTED;
+ }
+
+ return _PyMutex_LockTimed((PyMutex *)lock, timeout, flags);
+}
+
+void
+PyThread_release_lock(PyThread_type_lock lock)
+{
+ PyMutex_Unlock((PyMutex *)lock);
+}
+
+int
+_PyThread_at_fork_reinit(PyThread_type_lock *lock)
+{
+ _PyMutex_at_fork_reinit((PyMutex *)lock);
+ return 0;
+}
+
+int
+PyThread_acquire_lock(PyThread_type_lock lock, int waitflag)
+{
+ return PyThread_acquire_lock_timed(lock, waitflag ? -1 : 0, /*intr_flag=*/0);
+}
+
+
/* return the current thread stack size */
size_t
PyThread_get_stacksize(void)
@@ -261,11 +335,7 @@ PyThread_GetInfo(void)
#ifdef HAVE_PTHREAD_STUBS
value = Py_NewRef(Py_None);
#elif defined(_POSIX_THREADS)
-#ifdef USE_SEMAPHORES
- value = PyUnicode_FromString("semaphore");
-#else
- value = PyUnicode_FromString("mutex+cond");
-#endif
+ value = PyUnicode_FromString("pymutex");
if (value == NULL) {
Py_DECREF(threadinfo);
return NULL;
diff --git a/Python/thread_nt.h b/Python/thread_nt.h
index e078b98be3c..9a29d14ef67 100644
--- a/Python/thread_nt.h
+++ b/Python/thread_nt.h
@@ -300,98 +300,6 @@ PyThread_hang_thread(void)
}
}
-/*
- * Lock support. It has to be implemented as semaphores.
- * I [Dag] tried to implement it with mutex but I could find a way to
- * tell whether a thread already own the lock or not.
- */
-PyThread_type_lock
-PyThread_allocate_lock(void)
-{
- PNRMUTEX mutex;
-
- if (!initialized)
- PyThread_init_thread();
-
- mutex = AllocNonRecursiveMutex() ;
-
- PyThread_type_lock aLock = (PyThread_type_lock) mutex;
- assert(aLock);
-
- return aLock;
-}
-
-void
-PyThread_free_lock(PyThread_type_lock aLock)
-{
- FreeNonRecursiveMutex(aLock) ;
-}
-
-// WaitForSingleObject() accepts timeout in milliseconds in the range
-// [0; 0xFFFFFFFE] (DWORD type). INFINITE value (0xFFFFFFFF) means no
-// timeout. 0xFFFFFFFE milliseconds is around 49.7 days.
-const DWORD TIMEOUT_MS_MAX = 0xFFFFFFFE;
-
-/*
- * Return 1 on success if the lock was acquired
- *
- * and 0 if the lock was not acquired. This means a 0 is returned
- * if the lock has already been acquired by this thread!
- */
-PyLockStatus
-PyThread_acquire_lock_timed(PyThread_type_lock aLock,
- PY_TIMEOUT_T microseconds, int intr_flag)
-{
- assert(aLock);
-
- /* Fow now, intr_flag does nothing on Windows, and lock acquires are
- * uninterruptible. */
- PyLockStatus success;
- PY_TIMEOUT_T milliseconds;
-
- if (microseconds >= 0) {
- milliseconds = microseconds / 1000;
- // Round milliseconds away from zero
- if (microseconds % 1000 > 0) {
- milliseconds++;
- }
- if (milliseconds > (PY_TIMEOUT_T)TIMEOUT_MS_MAX) {
- // bpo-41710: PyThread_acquire_lock_timed() cannot report timeout
- // overflow to the caller, so clamp the timeout to
- // [0, TIMEOUT_MS_MAX] milliseconds.
- //
- // _thread.Lock.acquire() and _thread.RLock.acquire() raise an
- // OverflowError if microseconds is greater than PY_TIMEOUT_MAX.
- milliseconds = TIMEOUT_MS_MAX;
- }
- assert(milliseconds != INFINITE);
- }
- else {
- milliseconds = INFINITE;
- }
-
- if (EnterNonRecursiveMutex((PNRMUTEX)aLock,
- (DWORD)milliseconds) == WAIT_OBJECT_0) {
- success = PY_LOCK_ACQUIRED;
- }
- else {
- success = PY_LOCK_FAILURE;
- }
-
- return success;
-}
-int
-PyThread_acquire_lock(PyThread_type_lock aLock, int waitflag)
-{
- return PyThread_acquire_lock_timed(aLock, waitflag ? -1 : 0, 0);
-}
-
-void
-PyThread_release_lock(PyThread_type_lock aLock)
-{
- assert(aLock);
- (void)LeaveNonRecursiveMutex((PNRMUTEX) aLock);
-}
/* minimum/maximum thread stack sizes supported */
#define THREAD_MIN_STACKSIZE 0x8000 /* 32 KiB */
diff --git a/Python/thread_pthread.h b/Python/thread_pthread.h
index da405824244..13992f95723 100644
--- a/Python/thread_pthread.h
+++ b/Python/thread_pthread.h
@@ -99,16 +99,6 @@
#undef HAVE_SEM_CLOCKWAIT
#endif
-/* Whether or not to use semaphores directly rather than emulating them with
- * mutexes and condition variables:
- */
-#if (defined(_POSIX_SEMAPHORES) && !defined(HAVE_BROKEN_POSIX_SEMAPHORES) && \
- (defined(HAVE_SEM_TIMEDWAIT) || defined(HAVE_SEM_CLOCKWAIT)))
-# define USE_SEMAPHORES
-#else
-# undef USE_SEMAPHORES
-#endif
-
/* On platforms that don't use standard POSIX threads pthread_sigmask()
* isn't present. DEC threads uses sigprocmask() instead as do most
@@ -442,388 +432,6 @@ PyThread_hang_thread(void)
}
}
-#ifdef USE_SEMAPHORES
-
-/*
- * Lock support.
- */
-
-PyThread_type_lock
-PyThread_allocate_lock(void)
-{
- sem_t *lock;
- int status, error = 0;
-
- if (!initialized)
- PyThread_init_thread();
-
- lock = (sem_t *)PyMem_RawMalloc(sizeof(sem_t));
-
- if (lock) {
- status = sem_init(lock,0,1);
- CHECK_STATUS("sem_init");
-
- if (error) {
- PyMem_RawFree((void *)lock);
- lock = NULL;
- }
- }
-
- return (PyThread_type_lock)lock;
-}
-
-void
-PyThread_free_lock(PyThread_type_lock lock)
-{
- sem_t *thelock = (sem_t *)lock;
- int status, error = 0;
-
- (void) error; /* silence unused-but-set-variable warning */
-
- if (!thelock)
- return;
-
- status = sem_destroy(thelock);
- CHECK_STATUS("sem_destroy");
-
- PyMem_RawFree((void *)thelock);
-}
-
-/*
- * As of February 2002, Cygwin thread implementations mistakenly report error
- * codes in the return value of the sem_ calls (like the pthread_ functions).
- * Correct implementations return -1 and put the code in errno. This supports
- * either.
- */
-static int
-fix_status(int status)
-{
- return (status == -1) ? errno : status;
-}
-
-PyLockStatus
-PyThread_acquire_lock_timed(PyThread_type_lock lock, PY_TIMEOUT_T microseconds,
- int intr_flag)
-{
- PyLockStatus success;
- sem_t *thelock = (sem_t *)lock;
- int status, error = 0;
-
- (void) error; /* silence unused-but-set-variable warning */
-
- PyTime_t timeout; // relative timeout
- if (microseconds >= 0) {
- // bpo-41710: PyThread_acquire_lock_timed() cannot report timeout
- // overflow to the caller, so clamp the timeout to
- // [PyTime_MIN, PyTime_MAX].
- //
- // PyTime_MAX nanoseconds is around 292.3 years.
- //
- // _thread.Lock.acquire() and _thread.RLock.acquire() raise an
- // OverflowError if microseconds is greater than PY_TIMEOUT_MAX.
- timeout = _PyTime_FromMicrosecondsClamp(microseconds);
- }
- else {
- timeout = -1;
- }
-
-#ifdef HAVE_SEM_CLOCKWAIT
- struct timespec abs_timeout;
- // Local scope for deadline
- {
- PyTime_t now;
- // silently ignore error: cannot report error to the caller
- (void)PyTime_MonotonicRaw(&now);
- PyTime_t deadline = _PyTime_Add(now, timeout);
- _PyTime_AsTimespec_clamp(deadline, &abs_timeout);
- }
-#else
- PyTime_t deadline = 0;
- if (timeout > 0 && !intr_flag) {
- deadline = _PyDeadline_Init(timeout);
- }
-#endif
-
- while (1) {
- if (timeout > 0) {
-#ifdef HAVE_SEM_CLOCKWAIT
- status = fix_status(sem_clockwait(thelock, CLOCK_MONOTONIC,
- &abs_timeout));
-#else
- PyTime_t now;
- // silently ignore error: cannot report error to the caller
- (void)PyTime_TimeRaw(&now);
- PyTime_t abs_time = _PyTime_Add(now, timeout);
-
- struct timespec ts;
- _PyTime_AsTimespec_clamp(abs_time, &ts);
- status = fix_status(sem_timedwait(thelock, &ts));
-#endif
- }
- else if (timeout == 0) {
- status = fix_status(sem_trywait(thelock));
- }
- else {
- status = fix_status(sem_wait(thelock));
- }
-
- /* Retry if interrupted by a signal, unless the caller wants to be
- notified. */
- if (intr_flag || status != EINTR) {
- break;
- }
-
- // sem_clockwait() uses an absolute timeout, there is no need
- // to recompute the relative timeout.
-#ifndef HAVE_SEM_CLOCKWAIT
- if (timeout > 0) {
- /* wait interrupted by a signal (EINTR): recompute the timeout */
- timeout = _PyDeadline_Get(deadline);
- if (timeout < 0) {
- status = ETIMEDOUT;
- break;
- }
- }
-#endif
- }
-
- /* Don't check the status if we're stopping because of an interrupt. */
- if (!(intr_flag && status == EINTR)) {
- if (timeout > 0) {
- if (status != ETIMEDOUT) {
-#ifdef HAVE_SEM_CLOCKWAIT
- CHECK_STATUS("sem_clockwait");
-#else
- CHECK_STATUS("sem_timedwait");
-#endif
- }
- }
- else if (timeout == 0) {
- if (status != EAGAIN) {
- CHECK_STATUS("sem_trywait");
- }
- }
- else {
- CHECK_STATUS("sem_wait");
- }
- }
-
- if (status == 0) {
- success = PY_LOCK_ACQUIRED;
- } else if (intr_flag && status == EINTR) {
- success = PY_LOCK_INTR;
- } else {
- success = PY_LOCK_FAILURE;
- }
-
- return success;
-}
-
-void
-PyThread_release_lock(PyThread_type_lock lock)
-{
- sem_t *thelock = (sem_t *)lock;
- int status, error = 0;
-
- (void) error; /* silence unused-but-set-variable warning */
-
- status = sem_post(thelock);
- CHECK_STATUS("sem_post");
-}
-
-#else /* USE_SEMAPHORES */
-
-/*
- * Lock support.
- */
-PyThread_type_lock
-PyThread_allocate_lock(void)
-{
- pthread_lock *lock;
- int status, error = 0;
-
- if (!initialized)
- PyThread_init_thread();
-
- lock = (pthread_lock *) PyMem_RawCalloc(1, sizeof(pthread_lock));
- if (lock) {
- lock->locked = 0;
-
- status = pthread_mutex_init(&lock->mut, NULL);
- CHECK_STATUS_PTHREAD("pthread_mutex_init");
- /* Mark the pthread mutex underlying a Python mutex as
- pure happens-before. We can't simply mark the
- Python-level mutex as a mutex because it can be
- acquired and released in different threads, which
- will cause errors. */
- _Py_ANNOTATE_PURE_HAPPENS_BEFORE_MUTEX(&lock->mut);
-
- status = _PyThread_cond_init(&lock->lock_released);
- CHECK_STATUS_PTHREAD("pthread_cond_init");
-
- if (error) {
- PyMem_RawFree((void *)lock);
- lock = 0;
- }
- }
-
- return (PyThread_type_lock) lock;
-}
-
-void
-PyThread_free_lock(PyThread_type_lock lock)
-{
- pthread_lock *thelock = (pthread_lock *)lock;
- int status, error = 0;
-
- (void) error; /* silence unused-but-set-variable warning */
-
- /* some pthread-like implementations tie the mutex to the cond
- * and must have the cond destroyed first.
- */
- status = pthread_cond_destroy( &thelock->lock_released );
- CHECK_STATUS_PTHREAD("pthread_cond_destroy");
-
- status = pthread_mutex_destroy( &thelock->mut );
- CHECK_STATUS_PTHREAD("pthread_mutex_destroy");
-
- PyMem_RawFree((void *)thelock);
-}
-
-PyLockStatus
-PyThread_acquire_lock_timed(PyThread_type_lock lock, PY_TIMEOUT_T microseconds,
- int intr_flag)
-{
- PyLockStatus success = PY_LOCK_FAILURE;
- pthread_lock *thelock = (pthread_lock *)lock;
- int status, error = 0;
-
- if (microseconds == 0) {
- status = pthread_mutex_trylock( &thelock->mut );
- if (status != EBUSY) {
- CHECK_STATUS_PTHREAD("pthread_mutex_trylock[1]");
- }
- }
- else {
- status = pthread_mutex_lock( &thelock->mut );
- CHECK_STATUS_PTHREAD("pthread_mutex_lock[1]");
- }
- if (status != 0) {
- goto done;
- }
-
- if (thelock->locked == 0) {
- success = PY_LOCK_ACQUIRED;
- goto unlock;
- }
- if (microseconds == 0) {
- goto unlock;
- }
-
- struct timespec abs_timeout;
- if (microseconds > 0) {
- _PyThread_cond_after(microseconds, &abs_timeout);
- }
- // Continue trying until we get the lock
-
- // mut must be locked by me -- part of the condition protocol
- while (1) {
- if (microseconds > 0) {
- status = pthread_cond_timedwait(&thelock->lock_released,
- &thelock->mut, &abs_timeout);
- if (status == 1) {
- break;
- }
- if (status == ETIMEDOUT) {
- break;
- }
- CHECK_STATUS_PTHREAD("pthread_cond_timedwait");
- }
- else {
- status = pthread_cond_wait(
- &thelock->lock_released,
- &thelock->mut);
- CHECK_STATUS_PTHREAD("pthread_cond_wait");
- }
-
- if (intr_flag && status == 0 && thelock->locked) {
- // We were woken up, but didn't get the lock. We probably received
- // a signal. Return PY_LOCK_INTR to allow the caller to handle
- // it and retry.
- success = PY_LOCK_INTR;
- break;
- }
-
- if (status == 0 && !thelock->locked) {
- success = PY_LOCK_ACQUIRED;
- break;
- }
-
- // Wait got interrupted by a signal: retry
- }
-
-unlock:
- if (success == PY_LOCK_ACQUIRED) {
- thelock->locked = 1;
- }
- status = pthread_mutex_unlock( &thelock->mut );
- CHECK_STATUS_PTHREAD("pthread_mutex_unlock[1]");
-
-done:
- if (error) {
- success = PY_LOCK_FAILURE;
- }
- return success;
-}
-
-void
-PyThread_release_lock(PyThread_type_lock lock)
-{
- pthread_lock *thelock = (pthread_lock *)lock;
- int status, error = 0;
-
- (void) error; /* silence unused-but-set-variable warning */
-
- status = pthread_mutex_lock( &thelock->mut );
- CHECK_STATUS_PTHREAD("pthread_mutex_lock[3]");
-
- thelock->locked = 0;
-
- /* wake up someone (anyone, if any) waiting on the lock */
- status = pthread_cond_signal( &thelock->lock_released );
- CHECK_STATUS_PTHREAD("pthread_cond_signal");
-
- status = pthread_mutex_unlock( &thelock->mut );
- CHECK_STATUS_PTHREAD("pthread_mutex_unlock[3]");
-}
-
-#endif /* USE_SEMAPHORES */
-
-int
-_PyThread_at_fork_reinit(PyThread_type_lock *lock)
-{
- PyThread_type_lock new_lock = PyThread_allocate_lock();
- if (new_lock == NULL) {
- return -1;
- }
-
- /* bpo-6721, bpo-40089: The old lock can be in an inconsistent state.
- fork() can be called in the middle of an operation on the lock done by
- another thread. So don't call PyThread_free_lock(*lock).
-
- Leak memory on purpose. Don't release the memory either since the
- address of a mutex is relevant. Putting two mutexes at the same address
- can lead to problems. */
-
- *lock = new_lock;
- return 0;
-}
-
-int
-PyThread_acquire_lock(PyThread_type_lock lock, int waitflag)
-{
- return PyThread_acquire_lock_timed(lock, waitflag ? -1 : 0, /*intr_flag=*/0);
-}
/* set the thread stack size.
* Return 0 if size is valid, -1 if size is invalid,
diff --git a/Python/traceback.c b/Python/traceback.c
index c06cb1a5908..4f674eaf557 100644
--- a/Python/traceback.c
+++ b/Python/traceback.c
@@ -9,7 +9,6 @@
#include "pycore_interpframe.h" // _PyFrame_GetCode()
#include "pycore_pyerrors.h" // _PyErr_GetRaisedException()
#include "pycore_pystate.h" // _PyThreadState_GET()
-#include "pycore_sysmodule.h" // _PySys_GetOptionalAttr()
#include "pycore_traceback.h" // EXCEPTION_TB_HEADER
#include "frameobject.h" // PyFrame_New()
@@ -399,7 +398,7 @@ _Py_FindSourceFile(PyObject *filename, char* namebuf, size_t namelen, PyObject *
taillen = strlen(tail);
PyThreadState *tstate = _PyThreadState_GET();
- if (_PySys_GetOptionalAttr(&_Py_ID(path), &syspath) < 0) {
+ if (PySys_GetOptionalAttr(&_Py_ID(path), &syspath) < 0) {
PyErr_Clear();
goto error;
}
@@ -777,7 +776,7 @@ _PyTraceBack_Print(PyObject *v, const char *header, PyObject *f)
PyErr_BadInternalCall();
return -1;
}
- if (_PySys_GetOptionalAttrString("tracebacklimit", &limitv) < 0) {
+ if (PySys_GetOptionalAttrString("tracebacklimit", &limitv) < 0) {
return -1;
}
else if (limitv != NULL && PyLong_Check(limitv)) {