aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/Python
diff options
context:
space:
mode:
Diffstat (limited to 'Python')
-rw-r--r--Python/asm_trampoline.S22
-rw-r--r--Python/bytecodes.c97
-rw-r--r--Python/ceval.c30
-rw-r--r--Python/crossinterp.c449
-rw-r--r--Python/executor_cases.c.h25
-rw-r--r--Python/flowgraph.c20
-rw-r--r--Python/generated_cases.c.h93
-rw-r--r--Python/import.c4
-rw-r--r--Python/lock.c3
-rw-r--r--Python/optimizer_bytecodes.c32
-rw-r--r--Python/optimizer_cases.c.h32
-rw-r--r--Python/parking_lot.c22
-rw-r--r--Python/perf_jit_trampoline.c4
-rw-r--r--Python/remote_debug.h70
-rw-r--r--Python/remote_debugging.c39
-rw-r--r--Python/stackrefs.c4
-rw-r--r--Python/thread.c82
-rw-r--r--Python/thread_nt.h92
-rw-r--r--Python/thread_pthread.h392
19 files changed, 760 insertions, 752 deletions
diff --git a/Python/asm_trampoline.S b/Python/asm_trampoline.S
index 0a3265dfeee..616752459ba 100644
--- a/Python/asm_trampoline.S
+++ b/Python/asm_trampoline.S
@@ -9,6 +9,9 @@
# }
_Py_trampoline_func_start:
#ifdef __x86_64__
+#if defined(__CET__) && (__CET__ & 1)
+ endbr64
+#endif
sub $8, %rsp
call *%rcx
add $8, %rsp
@@ -34,3 +37,22 @@ _Py_trampoline_func_start:
.globl _Py_trampoline_func_end
_Py_trampoline_func_end:
.section .note.GNU-stack,"",@progbits
+# Note for indicating the assembly code supports CET
+#if defined(__x86_64__) && defined(__CET__) && (__CET__ & 1)
+ .section .note.gnu.property,"a"
+ .align 8
+ .long 1f - 0f
+ .long 4f - 1f
+ .long 5
+0:
+ .string "GNU"
+1:
+ .align 8
+ .long 0xc0000002
+ .long 3f - 2f
+2:
+ .long 0x3
+3:
+ .align 8
+4:
+#endif // __x86_64__
diff --git a/Python/bytecodes.c b/Python/bytecodes.c
index a1f8d360528..c4b13da5db4 100644
--- a/Python/bytecodes.c
+++ b/Python/bytecodes.c
@@ -3125,100 +3125,49 @@ dummy_func(
}
replaced op(_FOR_ITER, (iter, null_or_index -- iter, null_or_index, next)) {
- /* before: [iter]; after: [iter, iter()] *or* [] (and jump over END_FOR.) */
- PyObject *iter_o = PyStackRef_AsPyObjectBorrow(iter);
- if (PyStackRef_IsTaggedInt(null_or_index)) {
- next = _PyForIter_NextWithIndex(iter_o, null_or_index);
- if (PyStackRef_IsNull(next)) {
- null_or_index = PyStackRef_TagInt(-1);
- JUMPBY(oparg + 1);
- DISPATCH();
- }
- null_or_index = PyStackRef_IncrementTaggedIntNoOverflow(null_or_index);
- }
- else {
- PyObject *next_o = (*Py_TYPE(iter_o)->tp_iternext)(iter_o);
- if (next_o == NULL) {
- if (_PyErr_Occurred(tstate)) {
- int matches = _PyErr_ExceptionMatches(tstate, PyExc_StopIteration);
- if (!matches) {
- ERROR_NO_POP();
- }
- _PyEval_MonitorRaise(tstate, frame, this_instr);
- _PyErr_Clear(tstate);
- }
- /* iterator ended normally */
- assert(next_instr[oparg].op.code == END_FOR ||
- next_instr[oparg].op.code == INSTRUMENTED_END_FOR);
- /* Jump forward oparg, then skip following END_FOR */
- JUMPBY(oparg + 1);
- DISPATCH();
+ _PyStackRef item = _PyForIter_VirtualIteratorNext(tstate, frame, iter, &null_or_index);
+ if (!PyStackRef_IsValid(item)) {
+ if (PyStackRef_IsError(item)) {
+ ERROR_NO_POP();
}
- next = PyStackRef_FromPyObjectSteal(next_o);
+ // Jump forward by oparg and skip the following END_FOR
+ JUMPBY(oparg + 1);
+ DISPATCH();
}
+ next = item;
}
op(_FOR_ITER_TIER_TWO, (iter, null_or_index -- iter, null_or_index, next)) {
- /* before: [iter]; after: [iter, iter()] *or* [] (and jump over END_FOR.) */
- PyObject *iter_o = PyStackRef_AsPyObjectBorrow(iter);
- EXIT_IF(!PyStackRef_IsNull(null_or_index));
- PyObject *next_o = (*Py_TYPE(iter_o)->tp_iternext)(iter_o);
- if (next_o == NULL) {
- if (_PyErr_Occurred(tstate)) {
- int matches = _PyErr_ExceptionMatches(tstate, PyExc_StopIteration);
- if (!matches) {
- ERROR_NO_POP();
- }
- _PyEval_MonitorRaise(tstate, frame, frame->instr_ptr);
- _PyErr_Clear(tstate);
+ _PyStackRef item = _PyForIter_VirtualIteratorNext(tstate, frame, iter, &null_or_index);
+ if (!PyStackRef_IsValid(item)) {
+ if (PyStackRef_IsError(item)) {
+ ERROR_NO_POP();
}
/* iterator ended normally */
/* The translator sets the deopt target just past the matching END_FOR */
EXIT_IF(true);
}
- next = PyStackRef_FromPyObjectSteal(next_o);
- // Common case: no jump, leave it to the code generator
+ next = item;
}
+
macro(FOR_ITER) = _SPECIALIZE_FOR_ITER + _FOR_ITER;
inst(INSTRUMENTED_FOR_ITER, (unused/1, iter, null_or_index -- iter, null_or_index, next)) {
- PyObject *iter_o = PyStackRef_AsPyObjectBorrow(iter);
- if (PyStackRef_IsTaggedInt(null_or_index)) {
- next = _PyForIter_NextWithIndex(iter_o, null_or_index);
- if (PyStackRef_IsNull(next)) {
- JUMPBY(oparg + 1);
- DISPATCH();
- }
- INSTRUMENTED_JUMP(this_instr, next_instr, PY_MONITORING_EVENT_BRANCH_LEFT);
- }
- else {
- PyObject *next_o = (*Py_TYPE(iter_o)->tp_iternext)(iter_o);
- if (next_o != NULL) {
- next = PyStackRef_FromPyObjectSteal(next_o);
- INSTRUMENTED_JUMP(this_instr, next_instr, PY_MONITORING_EVENT_BRANCH_LEFT);
- }
- else {
- if (_PyErr_Occurred(tstate)) {
- int matches = _PyErr_ExceptionMatches(tstate, PyExc_StopIteration);
- if (!matches) {
- ERROR_NO_POP();
- }
- _PyEval_MonitorRaise(tstate, frame, this_instr);
- _PyErr_Clear(tstate);
- }
- /* iterator ended normally */
- assert(next_instr[oparg].op.code == END_FOR ||
- next_instr[oparg].op.code == INSTRUMENTED_END_FOR);
- /* Skip END_FOR */
- JUMPBY(oparg + 1);
- DISPATCH();
+ _PyStackRef item = _PyForIter_VirtualIteratorNext(tstate, frame, iter, &null_or_index);
+ if (!PyStackRef_IsValid(item)) {
+ if (PyStackRef_IsError(item)) {
+ ERROR_NO_POP();
}
+ // Jump forward by oparg and skip the following END_FOR
+ JUMPBY(oparg + 1);
+ DISPATCH();
}
+ next = item;
+ INSTRUMENTED_JUMP(this_instr, next_instr, PY_MONITORING_EVENT_BRANCH_LEFT);
}
-
op(_ITER_CHECK_LIST, (iter, null_or_index -- iter, null_or_index)) {
PyObject *iter_o = PyStackRef_AsPyObjectBorrow(iter);
EXIT_IF(Py_TYPE(iter_o) != &PyList_Type);
diff --git a/Python/ceval.c b/Python/ceval.c
index 7aec196cb85..4cfe4bb88f4 100644
--- a/Python/ceval.c
+++ b/Python/ceval.c
@@ -3190,7 +3190,7 @@ _PyEval_FormatKwargsError(PyThreadState *tstate, PyObject *func, PyObject *kwarg
else if (_PyErr_ExceptionMatches(tstate, PyExc_KeyError)) {
PyObject *exc = _PyErr_GetRaisedException(tstate);
PyObject *args = PyException_GetArgs(exc);
- if (exc && PyTuple_Check(args) && PyTuple_GET_SIZE(args) == 1) {
+ if (PyTuple_Check(args) && PyTuple_GET_SIZE(args) == 1) {
_PyErr_Clear(tstate);
PyObject *funcstr = _PyObject_FunctionStr(func);
if (funcstr != NULL) {
@@ -3439,8 +3439,8 @@ _PyEval_LoadName(PyThreadState *tstate, _PyInterpreterFrame *frame, PyObject *na
return value;
}
-_PyStackRef
-_PyForIter_NextWithIndex(PyObject *seq, _PyStackRef index)
+static _PyStackRef
+foriter_next(PyObject *seq, _PyStackRef index)
{
assert(PyStackRef_IsTaggedInt(index));
assert(PyTuple_CheckExact(seq) || PyList_CheckExact(seq));
@@ -3459,6 +3459,30 @@ _PyForIter_NextWithIndex(PyObject *seq, _PyStackRef index)
return PyStackRef_FromPyObjectSteal(item);
}
+_PyStackRef _PyForIter_VirtualIteratorNext(PyThreadState* tstate, _PyInterpreterFrame* frame, _PyStackRef iter, _PyStackRef* index_ptr)
+{
+ PyObject *iter_o = PyStackRef_AsPyObjectBorrow(iter);
+ _PyStackRef index = *index_ptr;
+ if (PyStackRef_IsTaggedInt(index)) {
+ *index_ptr = PyStackRef_IncrementTaggedIntNoOverflow(index);
+ return foriter_next(iter_o, index);
+ }
+ PyObject *next_o = (*Py_TYPE(iter_o)->tp_iternext)(iter_o);
+ if (next_o == NULL) {
+ if (_PyErr_Occurred(tstate)) {
+ if (_PyErr_ExceptionMatches(tstate, PyExc_StopIteration)) {
+ _PyEval_MonitorRaise(tstate, frame, frame->instr_ptr);
+ _PyErr_Clear(tstate);
+ }
+ else {
+ return PyStackRef_ERROR;
+ }
+ }
+ return PyStackRef_NULL;
+ }
+ return PyStackRef_FromPyObjectSteal(next_o);
+}
+
/* Check if a 'cls' provides the given special method. */
static inline int
type_has_special_method(PyTypeObject *cls, PyObject *name)
diff --git a/Python/crossinterp.c b/Python/crossinterp.c
index 13d91c508c4..5e73ab28f2b 100644
--- a/Python/crossinterp.c
+++ b/Python/crossinterp.c
@@ -70,6 +70,17 @@ runpy_run_path(const char *filename, const char *modname)
}
+static void
+set_exc_with_cause(PyObject *exctype, const char *msg)
+{
+ PyObject *cause = PyErr_GetRaisedException();
+ PyErr_SetString(exctype, msg);
+ PyObject *exc = PyErr_GetRaisedException();
+ PyException_SetCause(exc, cause);
+ PyErr_SetRaisedException(exc);
+}
+
+
static PyObject *
pyerr_get_message(PyObject *exc)
{
@@ -1314,7 +1325,7 @@ _excinfo_normalize_type(struct _excinfo_type *info,
}
static void
-_PyXI_excinfo_Clear(_PyXI_excinfo *info)
+_PyXI_excinfo_clear(_PyXI_excinfo *info)
{
_excinfo_clear_type(&info->type);
if (info->msg != NULL) {
@@ -1364,7 +1375,7 @@ _PyXI_excinfo_InitFromException(_PyXI_excinfo *info, PyObject *exc)
assert(exc != NULL);
if (PyErr_GivenExceptionMatches(exc, PyExc_MemoryError)) {
- _PyXI_excinfo_Clear(info);
+ _PyXI_excinfo_clear(info);
return NULL;
}
const char *failure = NULL;
@@ -1410,7 +1421,7 @@ _PyXI_excinfo_InitFromException(_PyXI_excinfo *info, PyObject *exc)
error:
assert(failure != NULL);
- _PyXI_excinfo_Clear(info);
+ _PyXI_excinfo_clear(info);
return failure;
}
@@ -1461,7 +1472,7 @@ _PyXI_excinfo_InitFromObject(_PyXI_excinfo *info, PyObject *obj)
error:
assert(failure != NULL);
- _PyXI_excinfo_Clear(info);
+ _PyXI_excinfo_clear(info);
return failure;
}
@@ -1656,7 +1667,7 @@ _PyXI_ExcInfoAsObject(_PyXI_excinfo *info)
void
_PyXI_ClearExcInfo(_PyXI_excinfo *info)
{
- _PyXI_excinfo_Clear(info);
+ _PyXI_excinfo_clear(info);
}
@@ -1694,6 +1705,14 @@ _PyXI_ApplyErrorCode(_PyXI_errcode code, PyInterpreterState *interp)
PyErr_SetString(PyExc_InterpreterError,
"failed to apply namespace to __main__");
break;
+ case _PyXI_ERR_PRESERVE_FAILURE:
+ PyErr_SetString(PyExc_InterpreterError,
+ "failed to preserve objects across session");
+ break;
+ case _PyXI_ERR_EXC_PROPAGATION_FAILURE:
+ PyErr_SetString(PyExc_InterpreterError,
+ "failed to transfer exception between interpreters");
+ break;
case _PyXI_ERR_NOT_SHAREABLE:
_set_xid_lookup_failure(tstate, NULL, NULL, NULL);
break;
@@ -1743,7 +1762,7 @@ _PyXI_InitError(_PyXI_error *error, PyObject *excobj, _PyXI_errcode code)
assert(excobj == NULL);
assert(code != _PyXI_ERR_NO_ERROR);
error->code = code;
- _PyXI_excinfo_Clear(&error->uncaught);
+ _PyXI_excinfo_clear(&error->uncaught);
}
return failure;
}
@@ -1753,7 +1772,7 @@ _PyXI_ApplyError(_PyXI_error *error)
{
PyThreadState *tstate = PyThreadState_Get();
if (error->code == _PyXI_ERR_UNCAUGHT_EXCEPTION) {
- // Raise an exception that proxies the propagated exception.
+ // We will raise an exception that proxies the propagated exception.
return _PyXI_excinfo_AsObject(&error->uncaught);
}
else if (error->code == _PyXI_ERR_NOT_SHAREABLE) {
@@ -1839,7 +1858,8 @@ _sharednsitem_has_value(_PyXI_namespace_item *item, int64_t *p_interpid)
}
static int
-_sharednsitem_set_value(_PyXI_namespace_item *item, PyObject *value)
+_sharednsitem_set_value(_PyXI_namespace_item *item, PyObject *value,
+ xidata_fallback_t fallback)
{
assert(_sharednsitem_is_initialized(item));
assert(item->xidata == NULL);
@@ -1848,8 +1868,7 @@ _sharednsitem_set_value(_PyXI_namespace_item *item, PyObject *value)
return -1;
}
PyThreadState *tstate = PyThreadState_Get();
- // XXX Use _PyObject_GetXIDataWithFallback()?
- if (_PyObject_GetXIDataNoFallback(tstate, value, item->xidata) != 0) {
+ if (_PyObject_GetXIData(tstate, value, fallback, item->xidata) < 0) {
PyMem_RawFree(item->xidata);
item->xidata = NULL;
// The caller may want to propagate PyExc_NotShareableError
@@ -1881,7 +1900,8 @@ _sharednsitem_clear(_PyXI_namespace_item *item)
}
static int
-_sharednsitem_copy_from_ns(struct _sharednsitem *item, PyObject *ns)
+_sharednsitem_copy_from_ns(struct _sharednsitem *item, PyObject *ns,
+ xidata_fallback_t fallback)
{
assert(item->name != NULL);
assert(item->xidata == NULL);
@@ -1893,7 +1913,7 @@ _sharednsitem_copy_from_ns(struct _sharednsitem *item, PyObject *ns)
// When applied, this item will be set to the default (or fail).
return 0;
}
- if (_sharednsitem_set_value(item, value) < 0) {
+ if (_sharednsitem_set_value(item, value, fallback) < 0) {
return -1;
}
return 0;
@@ -2144,18 +2164,21 @@ error:
return NULL;
}
-static void _propagate_not_shareable_error(_PyXI_session *);
+static void _propagate_not_shareable_error(_PyXI_errcode *);
static int
-_fill_sharedns(_PyXI_namespace *ns, PyObject *nsobj, _PyXI_session *session)
+_fill_sharedns(_PyXI_namespace *ns, PyObject *nsobj,
+ xidata_fallback_t fallback, _PyXI_errcode *p_errcode)
{
// All items are expected to be shareable.
assert(_sharedns_check_counts(ns));
assert(ns->numnames == ns->maxitems);
assert(ns->numvalues == 0);
for (Py_ssize_t i=0; i < ns->maxitems; i++) {
- if (_sharednsitem_copy_from_ns(&ns->items[i], nsobj) < 0) {
- _propagate_not_shareable_error(session);
+ if (_sharednsitem_copy_from_ns(&ns->items[i], nsobj, fallback) < 0) {
+ if (p_errcode != NULL) {
+ _propagate_not_shareable_error(p_errcode);
+ }
// Clear out the ones we set so far.
for (Py_ssize_t j=0; j < i; j++) {
_sharednsitem_clear_value(&ns->items[j]);
@@ -2221,6 +2244,18 @@ _apply_sharedns(_PyXI_namespace *ns, PyObject *nsobj, PyObject *dflt)
/* switched-interpreter sessions */
/*********************************/
+struct xi_session_error {
+ // This is set if the interpreter is entered and raised an exception
+ // that needs to be handled in some special way during exit.
+ _PyXI_errcode *override;
+ // This is set if exit captured an exception to propagate.
+ _PyXI_error *info;
+
+ // -- pre-allocated memory --
+ _PyXI_error _info;
+ _PyXI_errcode _override;
+};
+
struct xi_session {
#define SESSION_UNUSED 0
#define SESSION_ACTIVE 1
@@ -2249,18 +2284,14 @@ struct xi_session {
// beginning of the session as a convenience.
PyObject *main_ns;
- // This is set if the interpreter is entered and raised an exception
- // that needs to be handled in some special way during exit.
- _PyXI_errcode *error_override;
- // This is set if exit captured an exception to propagate.
- _PyXI_error *error;
+ // This is a dict of objects that will be available (via sharing)
+ // once the session exits. Do not access this directly; use
+ // _PyXI_Preserve() and _PyXI_GetPreserved() instead;
+ PyObject *_preserved;
- // -- pre-allocated memory --
- _PyXI_error _error;
- _PyXI_errcode _error_override;
+ struct xi_session_error error;
};
-
_PyXI_session *
_PyXI_NewSession(void)
{
@@ -2286,9 +2317,25 @@ _session_is_active(_PyXI_session *session)
return session->status == SESSION_ACTIVE;
}
-static int _ensure_main_ns(_PyXI_session *);
+static int
+_session_pop_error(_PyXI_session *session, struct xi_session_error *err)
+{
+ if (session->error.info == NULL) {
+ assert(session->error.override == NULL);
+ *err = (struct xi_session_error){0};
+ return 0;
+ }
+ *err = session->error;
+ err->info = &err->_info;
+ if (err->override != NULL) {
+ err->override = &err->_override;
+ }
+ session->error = (struct xi_session_error){0};
+ return 1;
+}
+
+static int _ensure_main_ns(_PyXI_session *, _PyXI_errcode *);
static inline void _session_set_error(_PyXI_session *, _PyXI_errcode);
-static void _capture_current_exception(_PyXI_session *);
/* enter/exit a cross-interpreter session */
@@ -2305,9 +2352,9 @@ _enter_session(_PyXI_session *session, PyInterpreterState *interp)
assert(!session->running);
assert(session->main_ns == NULL);
// Set elsewhere and cleared in _capture_current_exception().
- assert(session->error_override == NULL);
- // Set elsewhere and cleared in _PyXI_ApplyCapturedException().
- assert(session->error == NULL);
+ assert(session->error.override == NULL);
+ // Set elsewhere and cleared in _PyXI_Exit().
+ assert(session->error.info == NULL);
// Switch to interpreter.
PyThreadState *tstate = PyThreadState_Get();
@@ -2336,14 +2383,16 @@ _exit_session(_PyXI_session *session)
PyThreadState *tstate = session->init_tstate;
assert(tstate != NULL);
assert(PyThreadState_Get() == tstate);
+ assert(!_PyErr_Occurred(tstate));
// Release any of the entered interpreters resources.
Py_CLEAR(session->main_ns);
+ Py_CLEAR(session->_preserved);
// Ensure this thread no longer owns __main__.
if (session->running) {
_PyInterpreterState_SetNotRunningMain(tstate->interp);
- assert(!PyErr_Occurred());
+ assert(!_PyErr_Occurred(tstate));
session->running = 0;
}
@@ -2360,21 +2409,16 @@ _exit_session(_PyXI_session *session)
assert(!session->own_init_tstate);
}
- // For now the error data persists past the exit.
- *session = (_PyXI_session){
- .error_override = session->error_override,
- .error = session->error,
- ._error = session->_error,
- ._error_override = session->_error_override,
- };
+ assert(session->error.info == NULL);
+ assert(session->error.override == _PyXI_ERR_NO_ERROR);
+
+ *session = (_PyXI_session){0};
}
static void
-_propagate_not_shareable_error(_PyXI_session *session)
+_propagate_not_shareable_error(_PyXI_errcode *p_errcode)
{
- if (session == NULL) {
- return;
- }
+ assert(p_errcode != NULL);
PyThreadState *tstate = PyThreadState_Get();
PyObject *exctype = get_notshareableerror_type(tstate);
if (exctype == NULL) {
@@ -2384,46 +2428,46 @@ _propagate_not_shareable_error(_PyXI_session *session)
}
if (PyErr_ExceptionMatches(exctype)) {
// We want to propagate the exception directly.
- _session_set_error(session, _PyXI_ERR_NOT_SHAREABLE);
+ *p_errcode = _PyXI_ERR_NOT_SHAREABLE;
}
}
-PyObject *
-_PyXI_ApplyCapturedException(_PyXI_session *session)
-{
- assert(!PyErr_Occurred());
- assert(session->error != NULL);
- PyObject *res = _PyXI_ApplyError(session->error);
- assert((res == NULL) != (PyErr_Occurred() == NULL));
- session->error = NULL;
- return res;
-}
-
-int
-_PyXI_HasCapturedException(_PyXI_session *session)
-{
- return session->error != NULL;
-}
-
int
_PyXI_Enter(_PyXI_session *session,
- PyInterpreterState *interp, PyObject *nsupdates)
+ PyInterpreterState *interp, PyObject *nsupdates,
+ _PyXI_session_result *result)
{
// Convert the attrs for cross-interpreter use.
_PyXI_namespace *sharedns = NULL;
if (nsupdates != NULL) {
Py_ssize_t len = PyDict_Size(nsupdates);
if (len < 0) {
+ if (result != NULL) {
+ result->errcode = _PyXI_ERR_APPLY_NS_FAILURE;
+ }
return -1;
}
if (len > 0) {
sharedns = _create_sharedns(nsupdates);
if (sharedns == NULL) {
+ if (result != NULL) {
+ result->errcode = _PyXI_ERR_APPLY_NS_FAILURE;
+ }
return -1;
}
- if (_fill_sharedns(sharedns, nsupdates, NULL) < 0) {
- assert(session->error == NULL);
+ // For now we limit it to shareable objects.
+ xidata_fallback_t fallback = _PyXIDATA_XIDATA_ONLY;
+ _PyXI_errcode errcode = _PyXI_ERR_NO_ERROR;
+ if (_fill_sharedns(sharedns, nsupdates, fallback, &errcode) < 0) {
+ assert(PyErr_Occurred());
+ assert(session->error.info == NULL);
+ if (errcode == _PyXI_ERR_NO_ERROR) {
+ errcode = _PyXI_ERR_UNCAUGHT_EXCEPTION;
+ }
_destroy_sharedns(sharedns);
+ if (result != NULL) {
+ result->errcode = errcode;
+ }
return -1;
}
}
@@ -2445,8 +2489,7 @@ _PyXI_Enter(_PyXI_session *session,
// Apply the cross-interpreter data.
if (sharedns != NULL) {
- if (_ensure_main_ns(session) < 0) {
- errcode = _PyXI_ERR_MAIN_NS_FAILURE;
+ if (_ensure_main_ns(session, &errcode) < 0) {
goto error;
}
if (_apply_sharedns(sharedns, session->main_ns, NULL) < 0) {
@@ -2462,19 +2505,124 @@ _PyXI_Enter(_PyXI_session *session,
error:
// We want to propagate all exceptions here directly (best effort).
+ assert(errcode != _PyXI_ERR_NO_ERROR);
_session_set_error(session, errcode);
+ assert(!PyErr_Occurred());
+
+ // Exit the session.
+ struct xi_session_error err;
+ (void)_session_pop_error(session, &err);
_exit_session(session);
+
if (sharedns != NULL) {
_destroy_sharedns(sharedns);
}
+
+ // Apply the error from the other interpreter.
+ PyObject *excinfo = _PyXI_ApplyError(err.info);
+ _PyXI_excinfo_clear(&err.info->uncaught);
+ if (excinfo != NULL) {
+ if (result != NULL) {
+ result->excinfo = excinfo;
+ }
+ else {
+#ifdef Py_DEBUG
+ fprintf(stderr, "_PyXI_Enter(): uncaught exception discarded");
+#endif
+ }
+ }
+ assert(PyErr_Occurred());
+
return -1;
}
-void
-_PyXI_Exit(_PyXI_session *session)
+static int _pop_preserved(_PyXI_session *, _PyXI_namespace **, PyObject **,
+ _PyXI_errcode *);
+static int _finish_preserved(_PyXI_namespace *, PyObject **);
+
+int
+_PyXI_Exit(_PyXI_session *session, _PyXI_errcode errcode,
+ _PyXI_session_result *result)
{
- _capture_current_exception(session);
+ int res = 0;
+
+ // Capture the raised exception, if any.
+ assert(session->error.info == NULL);
+ if (PyErr_Occurred()) {
+ _session_set_error(session, errcode);
+ assert(!PyErr_Occurred());
+ }
+ else {
+ assert(errcode == _PyXI_ERR_NO_ERROR);
+ assert(session->error.override == NULL);
+ }
+
+ // Capture the preserved namespace.
+ _PyXI_namespace *preserved = NULL;
+ PyObject *preservedobj = NULL;
+ if (result != NULL) {
+ errcode = _PyXI_ERR_NO_ERROR;
+ if (_pop_preserved(session, &preserved, &preservedobj, &errcode) < 0) {
+ if (session->error.info != NULL) {
+ // XXX Chain the exception (i.e. set __context__)?
+ PyErr_FormatUnraisable(
+ "Exception ignored while capturing preserved objects");
+ }
+ else {
+ _session_set_error(session, errcode);
+ }
+ }
+ }
+
+ // Exit the session.
+ struct xi_session_error err;
+ (void)_session_pop_error(session, &err);
_exit_session(session);
+
+ // Restore the preserved namespace.
+ assert(preserved == NULL || preservedobj == NULL);
+ if (_finish_preserved(preserved, &preservedobj) < 0) {
+ assert(preservedobj == NULL);
+ if (err.info != NULL) {
+ // XXX Chain the exception (i.e. set __context__)?
+ PyErr_FormatUnraisable(
+ "Exception ignored while capturing preserved objects");
+ }
+ else {
+ errcode = _PyXI_ERR_PRESERVE_FAILURE;
+ _propagate_not_shareable_error(&errcode);
+ }
+ }
+ if (result != NULL) {
+ result->preserved = preservedobj;
+ result->errcode = errcode;
+ }
+
+ // Apply the error from the other interpreter, if any.
+ if (err.info != NULL) {
+ res = -1;
+ assert(!PyErr_Occurred());
+ PyObject *excinfo = _PyXI_ApplyError(err.info);
+ _PyXI_excinfo_clear(&err.info->uncaught);
+ if (excinfo == NULL) {
+ assert(PyErr_Occurred());
+ if (result != NULL) {
+ _PyXI_ClearResult(result);
+ *result = (_PyXI_session_result){
+ .errcode = _PyXI_ERR_EXC_PROPAGATION_FAILURE,
+ };
+ }
+ }
+ else if (result != NULL) {
+ result->excinfo = excinfo;
+ }
+ else {
+#ifdef Py_DEBUG
+ fprintf(stderr, "_PyXI_Exit(): uncaught exception discarded");
+#endif
+ }
+ }
+ return res;
}
@@ -2483,15 +2631,15 @@ _PyXI_Exit(_PyXI_session *session)
static void
_capture_current_exception(_PyXI_session *session)
{
- assert(session->error == NULL);
+ assert(session->error.info == NULL);
if (!PyErr_Occurred()) {
- assert(session->error_override == NULL);
+ assert(session->error.override == NULL);
return;
}
// Handle the exception override.
- _PyXI_errcode *override = session->error_override;
- session->error_override = NULL;
+ _PyXI_errcode *override = session->error.override;
+ session->error.override = NULL;
_PyXI_errcode errcode = override != NULL
? *override
: _PyXI_ERR_UNCAUGHT_EXCEPTION;
@@ -2514,7 +2662,7 @@ _capture_current_exception(_PyXI_session *session)
}
// Capture the exception.
- _PyXI_error *err = &session->_error;
+ _PyXI_error *err = &session->error._info;
*err = (_PyXI_error){
.interp = session->init_tstate->interp,
};
@@ -2541,7 +2689,7 @@ _capture_current_exception(_PyXI_session *session)
// Finished!
assert(!PyErr_Occurred());
- session->error = err;
+ session->error.info = err;
}
static inline void
@@ -2549,15 +2697,19 @@ _session_set_error(_PyXI_session *session, _PyXI_errcode errcode)
{
assert(_session_is_active(session));
assert(PyErr_Occurred());
+ if (errcode == _PyXI_ERR_NO_ERROR) {
+ // We're a bit forgiving here.
+ errcode = _PyXI_ERR_UNCAUGHT_EXCEPTION;
+ }
if (errcode != _PyXI_ERR_UNCAUGHT_EXCEPTION) {
- session->_error_override = errcode;
- session->error_override = &session->_error_override;
+ session->error._override = errcode;
+ session->error.override = &session->error._override;
}
_capture_current_exception(session);
}
static int
-_ensure_main_ns(_PyXI_session *session)
+_ensure_main_ns(_PyXI_session *session, _PyXI_errcode *p_errcode)
{
assert(_session_is_active(session));
if (session->main_ns != NULL) {
@@ -2566,11 +2718,17 @@ _ensure_main_ns(_PyXI_session *session)
// Cache __main__.__dict__.
PyObject *main_mod = _Py_GetMainModule(session->init_tstate);
if (_Py_CheckMainModule(main_mod) < 0) {
+ if (p_errcode != NULL) {
+ *p_errcode = _PyXI_ERR_MAIN_NS_FAILURE;
+ }
return -1;
}
PyObject *ns = PyModule_GetDict(main_mod); // borrowed
Py_DECREF(main_mod);
if (ns == NULL) {
+ if (p_errcode != NULL) {
+ *p_errcode = _PyXI_ERR_MAIN_NS_FAILURE;
+ }
return -1;
}
session->main_ns = Py_NewRef(ns);
@@ -2578,21 +2736,150 @@ _ensure_main_ns(_PyXI_session *session)
}
PyObject *
-_PyXI_GetMainNamespace(_PyXI_session *session)
+_PyXI_GetMainNamespace(_PyXI_session *session, _PyXI_errcode *p_errcode)
{
if (!_session_is_active(session)) {
PyErr_SetString(PyExc_RuntimeError, "session not active");
return NULL;
}
- if (_ensure_main_ns(session) < 0) {
- _session_set_error(session, _PyXI_ERR_MAIN_NS_FAILURE);
- _capture_current_exception(session);
+ if (_ensure_main_ns(session, p_errcode) < 0) {
return NULL;
}
return session->main_ns;
}
+static int
+_pop_preserved(_PyXI_session *session,
+ _PyXI_namespace **p_xidata, PyObject **p_obj,
+ _PyXI_errcode *p_errcode)
+{
+ assert(_PyThreadState_GET() == session->init_tstate); // active session
+ if (session->_preserved == NULL) {
+ *p_xidata = NULL;
+ *p_obj = NULL;
+ return 0;
+ }
+ if (session->init_tstate == session->prev_tstate) {
+ // We did not switch interpreters.
+ *p_xidata = NULL;
+ *p_obj = session->_preserved;
+ session->_preserved = NULL;
+ return 0;
+ }
+ *p_obj = NULL;
+
+ // We did switch interpreters.
+ Py_ssize_t len = PyDict_Size(session->_preserved);
+ if (len < 0) {
+ if (p_errcode != NULL) {
+ *p_errcode = _PyXI_ERR_PRESERVE_FAILURE;
+ }
+ return -1;
+ }
+ else if (len == 0) {
+ *p_xidata = NULL;
+ }
+ else {
+ _PyXI_namespace *xidata = _create_sharedns(session->_preserved);
+ if (xidata == NULL) {
+ if (p_errcode != NULL) {
+ *p_errcode = _PyXI_ERR_PRESERVE_FAILURE;
+ }
+ return -1;
+ }
+ _PyXI_errcode errcode = _PyXI_ERR_NO_ERROR;
+ if (_fill_sharedns(xidata, session->_preserved,
+ _PyXIDATA_FULL_FALLBACK, &errcode) < 0)
+ {
+ assert(session->error.info == NULL);
+ if (errcode != _PyXI_ERR_NOT_SHAREABLE) {
+ errcode = _PyXI_ERR_PRESERVE_FAILURE;
+ }
+ if (p_errcode != NULL) {
+ *p_errcode = errcode;
+ }
+ _destroy_sharedns(xidata);
+ return -1;
+ }
+ *p_xidata = xidata;
+ }
+ Py_CLEAR(session->_preserved);
+ return 0;
+}
+
+static int
+_finish_preserved(_PyXI_namespace *xidata, PyObject **p_preserved)
+{
+ if (xidata == NULL) {
+ return 0;
+ }
+ int res = -1;
+ if (p_preserved != NULL) {
+ PyObject *ns = PyDict_New();
+ if (ns == NULL) {
+ goto finally;
+ }
+ if (_apply_sharedns(xidata, ns, NULL) < 0) {
+ Py_CLEAR(ns);
+ goto finally;
+ }
+ *p_preserved = ns;
+ }
+ res = 0;
+
+finally:
+ _destroy_sharedns(xidata);
+ return res;
+}
+
+int
+_PyXI_Preserve(_PyXI_session *session, const char *name, PyObject *value,
+ _PyXI_errcode *p_errcode)
+{
+ if (!_session_is_active(session)) {
+ PyErr_SetString(PyExc_RuntimeError, "session not active");
+ return -1;
+ }
+ if (session->_preserved == NULL) {
+ session->_preserved = PyDict_New();
+ if (session->_preserved == NULL) {
+ set_exc_with_cause(PyExc_RuntimeError,
+ "failed to initialize preserved objects");
+ if (p_errcode != NULL) {
+ *p_errcode = _PyXI_ERR_PRESERVE_FAILURE;
+ }
+ return -1;
+ }
+ }
+ if (PyDict_SetItemString(session->_preserved, name, value) < 0) {
+ set_exc_with_cause(PyExc_RuntimeError, "failed to preserve object");
+ if (p_errcode != NULL) {
+ *p_errcode = _PyXI_ERR_PRESERVE_FAILURE;
+ }
+ return -1;
+ }
+ return 0;
+}
+
+PyObject *
+_PyXI_GetPreserved(_PyXI_session_result *result, const char *name)
+{
+ PyObject *value = NULL;
+ if (result->preserved != NULL) {
+ (void)PyDict_GetItemStringRef(result->preserved, name, &value);
+ }
+ return value;
+}
+
+void
+_PyXI_ClearResult(_PyXI_session_result *result)
+{
+ Py_CLEAR(result->preserved);
+ Py_CLEAR(result->excinfo);
+}
+
+
/*********************/
/* runtime lifecycle */
/*********************/
diff --git a/Python/executor_cases.c.h b/Python/executor_cases.c.h
index 35b29940cb4..d19605169d5 100644
--- a/Python/executor_cases.c.h
+++ b/Python/executor_cases.c.h
@@ -4268,33 +4268,20 @@
_PyStackRef next;
null_or_index = stack_pointer[-1];
iter = stack_pointer[-2];
- PyObject *iter_o = PyStackRef_AsPyObjectBorrow(iter);
- if (!PyStackRef_IsNull(null_or_index)) {
- UOP_STAT_INC(uopcode, miss);
- JUMP_TO_JUMP_TARGET();
- }
_PyFrame_SetStackPointer(frame, stack_pointer);
- PyObject *next_o = (*Py_TYPE(iter_o)->tp_iternext)(iter_o);
+ _PyStackRef item = _PyForIter_VirtualIteratorNext(tstate, frame, iter, &null_or_index);
stack_pointer = _PyFrame_GetStackPointer(frame);
- if (next_o == NULL) {
- if (_PyErr_Occurred(tstate)) {
- _PyFrame_SetStackPointer(frame, stack_pointer);
- int matches = _PyErr_ExceptionMatches(tstate, PyExc_StopIteration);
- stack_pointer = _PyFrame_GetStackPointer(frame);
- if (!matches) {
- JUMP_TO_ERROR();
- }
- _PyFrame_SetStackPointer(frame, stack_pointer);
- _PyEval_MonitorRaise(tstate, frame, frame->instr_ptr);
- _PyErr_Clear(tstate);
- stack_pointer = _PyFrame_GetStackPointer(frame);
+ if (!PyStackRef_IsValid(item)) {
+ if (PyStackRef_IsError(item)) {
+ JUMP_TO_ERROR();
}
if (true) {
UOP_STAT_INC(uopcode, miss);
JUMP_TO_JUMP_TARGET();
}
}
- next = PyStackRef_FromPyObjectSteal(next_o);
+ next = item;
+ stack_pointer[-1] = null_or_index;
stack_pointer[0] = next;
stack_pointer += 1;
assert(WITHIN_STACK_BOUNDS());
diff --git a/Python/flowgraph.c b/Python/flowgraph.c
index 67ccf350b72..2adc8c84d83 100644
--- a/Python/flowgraph.c
+++ b/Python/flowgraph.c
@@ -2870,9 +2870,11 @@ optimize_load_fast(cfg_builder *g)
// how many inputs should be left on the stack.
// Opcodes that consume no inputs
+ case FORMAT_SIMPLE:
case GET_ANEXT:
case GET_ITER:
case GET_LEN:
+ case GET_YIELD_FROM_ITER:
case IMPORT_FROM:
case MATCH_KEYS:
case MATCH_MAPPING:
@@ -2907,6 +2909,16 @@ optimize_load_fast(cfg_builder *g)
break;
}
+ case END_SEND:
+ case SET_FUNCTION_ATTRIBUTE: {
+ assert(_PyOpcode_num_popped(opcode, oparg) == 2);
+ assert(_PyOpcode_num_pushed(opcode, oparg) == 1);
+ ref tos = ref_stack_pop(&refs);
+ ref_stack_pop(&refs);
+ PUSH_REF(tos.instr, tos.local);
+ break;
+ }
+
// Opcodes that consume some inputs and push new values
case CHECK_EXC_MATCH: {
ref_stack_pop(&refs);
@@ -2936,6 +2948,14 @@ optimize_load_fast(cfg_builder *g)
break;
}
+ case LOAD_SPECIAL:
+ case PUSH_EXC_INFO: {
+ ref tos = ref_stack_pop(&refs);
+ PUSH_REF(i, NOT_LOCAL);
+ PUSH_REF(tos.instr, tos.local);
+ break;
+ }
+
case SEND: {
load_fast_push_block(&sp, instr->i_target, refs.size);
ref_stack_pop(&refs);
diff --git a/Python/generated_cases.c.h b/Python/generated_cases.c.h
index 5be2671700a..c8825df3ade 100644
--- a/Python/generated_cases.c.h
+++ b/Python/generated_cases.c.h
@@ -5753,43 +5753,18 @@
}
// _FOR_ITER
{
- PyObject *iter_o = PyStackRef_AsPyObjectBorrow(iter);
- if (PyStackRef_IsTaggedInt(null_or_index)) {
- _PyFrame_SetStackPointer(frame, stack_pointer);
- next = _PyForIter_NextWithIndex(iter_o, null_or_index);
- stack_pointer = _PyFrame_GetStackPointer(frame);
- if (PyStackRef_IsNull(next)) {
- null_or_index = PyStackRef_TagInt(-1);
- JUMPBY(oparg + 1);
- stack_pointer[-1] = null_or_index;
- DISPATCH();
- }
- null_or_index = PyStackRef_IncrementTaggedIntNoOverflow(null_or_index);
- }
- else {
- _PyFrame_SetStackPointer(frame, stack_pointer);
- PyObject *next_o = (*Py_TYPE(iter_o)->tp_iternext)(iter_o);
- stack_pointer = _PyFrame_GetStackPointer(frame);
- if (next_o == NULL) {
- if (_PyErr_Occurred(tstate)) {
- _PyFrame_SetStackPointer(frame, stack_pointer);
- int matches = _PyErr_ExceptionMatches(tstate, PyExc_StopIteration);
- stack_pointer = _PyFrame_GetStackPointer(frame);
- if (!matches) {
- JUMP_TO_LABEL(error);
- }
- _PyFrame_SetStackPointer(frame, stack_pointer);
- _PyEval_MonitorRaise(tstate, frame, this_instr);
- _PyErr_Clear(tstate);
- stack_pointer = _PyFrame_GetStackPointer(frame);
- }
- assert(next_instr[oparg].op.code == END_FOR ||
- next_instr[oparg].op.code == INSTRUMENTED_END_FOR);
- JUMPBY(oparg + 1);
- DISPATCH();
+ _PyFrame_SetStackPointer(frame, stack_pointer);
+ _PyStackRef item = _PyForIter_VirtualIteratorNext(tstate, frame, iter, &null_or_index);
+ stack_pointer = _PyFrame_GetStackPointer(frame);
+ if (!PyStackRef_IsValid(item)) {
+ if (PyStackRef_IsError(item)) {
+ JUMP_TO_LABEL(error);
}
- next = PyStackRef_FromPyObjectSteal(next_o);
+ JUMPBY(oparg + 1);
+ stack_pointer[-1] = null_or_index;
+ DISPATCH();
}
+ next = item;
}
stack_pointer[-1] = null_or_index;
stack_pointer[0] = next;
@@ -7061,44 +7036,20 @@
/* Skip 1 cache entry */
null_or_index = stack_pointer[-1];
iter = stack_pointer[-2];
- PyObject *iter_o = PyStackRef_AsPyObjectBorrow(iter);
- if (PyStackRef_IsTaggedInt(null_or_index)) {
- _PyFrame_SetStackPointer(frame, stack_pointer);
- next = _PyForIter_NextWithIndex(iter_o, null_or_index);
- stack_pointer = _PyFrame_GetStackPointer(frame);
- if (PyStackRef_IsNull(next)) {
- JUMPBY(oparg + 1);
- DISPATCH();
- }
- INSTRUMENTED_JUMP(this_instr, next_instr, PY_MONITORING_EVENT_BRANCH_LEFT);
- }
- else {
- _PyFrame_SetStackPointer(frame, stack_pointer);
- PyObject *next_o = (*Py_TYPE(iter_o)->tp_iternext)(iter_o);
- stack_pointer = _PyFrame_GetStackPointer(frame);
- if (next_o != NULL) {
- next = PyStackRef_FromPyObjectSteal(next_o);
- INSTRUMENTED_JUMP(this_instr, next_instr, PY_MONITORING_EVENT_BRANCH_LEFT);
- }
- else {
- if (_PyErr_Occurred(tstate)) {
- _PyFrame_SetStackPointer(frame, stack_pointer);
- int matches = _PyErr_ExceptionMatches(tstate, PyExc_StopIteration);
- stack_pointer = _PyFrame_GetStackPointer(frame);
- if (!matches) {
- JUMP_TO_LABEL(error);
- }
- _PyFrame_SetStackPointer(frame, stack_pointer);
- _PyEval_MonitorRaise(tstate, frame, this_instr);
- _PyErr_Clear(tstate);
- stack_pointer = _PyFrame_GetStackPointer(frame);
- }
- assert(next_instr[oparg].op.code == END_FOR ||
- next_instr[oparg].op.code == INSTRUMENTED_END_FOR);
- JUMPBY(oparg + 1);
- DISPATCH();
+ _PyFrame_SetStackPointer(frame, stack_pointer);
+ _PyStackRef item = _PyForIter_VirtualIteratorNext(tstate, frame, iter, &null_or_index);
+ stack_pointer = _PyFrame_GetStackPointer(frame);
+ if (!PyStackRef_IsValid(item)) {
+ if (PyStackRef_IsError(item)) {
+ JUMP_TO_LABEL(error);
}
+ JUMPBY(oparg + 1);
+ stack_pointer[-1] = null_or_index;
+ DISPATCH();
}
+ next = item;
+ INSTRUMENTED_JUMP(this_instr, next_instr, PY_MONITORING_EVENT_BRANCH_LEFT);
+ stack_pointer[-1] = null_or_index;
stack_pointer[0] = next;
stack_pointer += 1;
assert(WITHIN_STACK_BOUNDS());
diff --git a/Python/import.c b/Python/import.c
index 98557991378..184dede335d 100644
--- a/Python/import.c
+++ b/Python/import.c
@@ -3964,8 +3964,10 @@ PyImport_Import(PyObject *module_name)
if (globals != NULL) {
Py_INCREF(globals);
builtins = PyObject_GetItem(globals, &_Py_ID(__builtins__));
- if (builtins == NULL)
+ if (builtins == NULL) {
+ // XXX Fall back to interp->builtins or sys.modules['builtins']?
goto err;
+ }
}
else {
/* No globals -- use standard builtins, and fake globals */
diff --git a/Python/lock.c b/Python/lock.c
index 28a12ad1835..b125ad0c9e3 100644
--- a/Python/lock.c
+++ b/Python/lock.c
@@ -119,6 +119,9 @@ _PyMutex_LockTimed(PyMutex *m, PyTime_t timeout, _PyLockFlags flags)
return PY_LOCK_INTR;
}
}
+ else if (ret == Py_PARK_INTR && (flags & _PY_FAIL_IF_INTERRUPTED)) {
+ return PY_LOCK_INTR;
+ }
else if (ret == Py_PARK_TIMEOUT) {
assert(timeout >= 0);
return PY_LOCK_FAILURE;
diff --git a/Python/optimizer_bytecodes.c b/Python/optimizer_bytecodes.c
index e1209209660..fbf4dfd3db6 100644
--- a/Python/optimizer_bytecodes.c
+++ b/Python/optimizer_bytecodes.c
@@ -840,6 +840,17 @@ dummy_func(void) {
value = sym_new_unknown(ctx);
}
+ op(_GET_ITER, (iterable -- iter, index_or_null)) {
+ if (sym_matches_type(iterable, &PyTuple_Type) || sym_matches_type(iterable, &PyList_Type)) {
+ iter = iterable;
+ index_or_null = sym_new_not_null(ctx);
+ }
+ else {
+ iter = sym_new_not_null(ctx);
+ index_or_null = sym_new_unknown(ctx);
+ }
+ }
+
op(_FOR_ITER_GEN_FRAME, (unused, unused -- unused, unused, gen_frame: _Py_UOpsAbstractFrame*)) {
gen_frame = NULL;
/* We are about to hit the end of the trace */
@@ -926,8 +937,11 @@ dummy_func(void) {
}
op(_CALL_TYPE_1, (unused, unused, arg -- res)) {
- if (sym_has_type(arg)) {
- res = sym_new_const(ctx, (PyObject *)sym_get_type(arg));
+ PyObject* type = (PyObject *)sym_get_type(arg);
+ if (type) {
+ res = sym_new_const(ctx, type);
+ REPLACE_OP(this_instr, _POP_CALL_ONE_LOAD_CONST_INLINE_BORROW, 0,
+ (uintptr_t)type);
}
else {
res = sym_new_not_null(ctx);
@@ -1223,6 +1237,20 @@ dummy_func(void) {
sym_set_const(callable, list_append);
}
+ op(_BINARY_SLICE, (container, start, stop -- res)) {
+ // Slicing a string/list/tuple always returns the same type.
+ PyTypeObject *type = sym_get_type(container);
+ if (type == &PyUnicode_Type ||
+ type == &PyList_Type ||
+ type == &PyTuple_Type)
+ {
+ res = sym_new_type(ctx, type);
+ }
+ else {
+ res = sym_new_not_null(ctx);
+ }
+ }
+
// END BYTECODES //
}
diff --git a/Python/optimizer_cases.c.h b/Python/optimizer_cases.c.h
index db86edcc785..b42f47c75ea 100644
--- a/Python/optimizer_cases.c.h
+++ b/Python/optimizer_cases.c.h
@@ -568,8 +568,19 @@
}
case _BINARY_SLICE: {
+ JitOptSymbol *container;
JitOptSymbol *res;
- res = sym_new_not_null(ctx);
+ container = stack_pointer[-3];
+ PyTypeObject *type = sym_get_type(container);
+ if (type == &PyUnicode_Type ||
+ type == &PyList_Type ||
+ type == &PyTuple_Type)
+ {
+ res = sym_new_type(ctx, type);
+ }
+ else {
+ res = sym_new_not_null(ctx);
+ }
stack_pointer[-3] = res;
stack_pointer += -2;
assert(WITHIN_STACK_BOUNDS());
@@ -1562,10 +1573,18 @@
}
case _GET_ITER: {
+ JitOptSymbol *iterable;
JitOptSymbol *iter;
JitOptSymbol *index_or_null;
- iter = sym_new_not_null(ctx);
- index_or_null = sym_new_not_null(ctx);
+ iterable = stack_pointer[-1];
+ if (sym_matches_type(iterable, &PyTuple_Type) || sym_matches_type(iterable, &PyList_Type)) {
+ iter = iterable;
+ index_or_null = sym_new_not_null(ctx);
+ }
+ else {
+ iter = sym_new_not_null(ctx);
+ index_or_null = sym_new_unknown(ctx);
+ }
stack_pointer[-1] = iter;
stack_pointer[0] = index_or_null;
stack_pointer += 1;
@@ -2048,8 +2067,11 @@
JitOptSymbol *arg;
JitOptSymbol *res;
arg = stack_pointer[-1];
- if (sym_has_type(arg)) {
- res = sym_new_const(ctx, (PyObject *)sym_get_type(arg));
+ PyObject* type = (PyObject *)sym_get_type(arg);
+ if (type) {
+ res = sym_new_const(ctx, type);
+ REPLACE_OP(this_instr, _POP_CALL_ONE_LOAD_CONST_INLINE_BORROW, 0,
+ (uintptr_t)type);
}
else {
res = sym_new_not_null(ctx);
diff --git a/Python/parking_lot.c b/Python/parking_lot.c
index 8edf4323594..e896dea0271 100644
--- a/Python/parking_lot.c
+++ b/Python/parking_lot.c
@@ -112,17 +112,27 @@ _PySemaphore_PlatformWait(_PySemaphore *sema, PyTime_t timeout)
}
}
- // NOTE: we wait on the sigint event even in non-main threads to match the
- // behavior of the other platforms. Non-main threads will ignore the
- // Py_PARK_INTR result.
- HANDLE sigint_event = _PyOS_SigintEvent();
- HANDLE handles[2] = { sema->platform_sem, sigint_event };
- DWORD count = sigint_event != NULL ? 2 : 1;
+ HANDLE handles[2] = { sema->platform_sem, NULL };
+ HANDLE sigint_event = NULL;
+ DWORD count = 1;
+ if (_Py_IsMainThread()) {
+ // gh-135099: Wait on the SIGINT event only in the main thread. Other
+ // threads would ignore the result anyways, and accessing
+ // `_PyOS_SigintEvent()` from non-main threads may race with
+ // interpreter shutdown, which closes the event handle. Note that
+ // non-main interpreters will ignore the result.
+ sigint_event = _PyOS_SigintEvent();
+ if (sigint_event != NULL) {
+ handles[1] = sigint_event;
+ count = 2;
+ }
+ }
wait = WaitForMultipleObjects(count, handles, FALSE, millis);
if (wait == WAIT_OBJECT_0) {
res = Py_PARK_OK;
}
else if (wait == WAIT_OBJECT_0 + 1) {
+ assert(sigint_event != NULL);
ResetEvent(sigint_event);
res = Py_PARK_INTR;
}
diff --git a/Python/perf_jit_trampoline.c b/Python/perf_jit_trampoline.c
index 5c7cb5b0a99..2ca18c23593 100644
--- a/Python/perf_jit_trampoline.c
+++ b/Python/perf_jit_trampoline.c
@@ -869,7 +869,11 @@ static void elf_init_ehframe(ELFObjectContext* ctx) {
*/
#ifdef __x86_64__
/* x86_64 calling convention unwinding rules */
+# if defined(__CET__) && (__CET__ & 1)
+ DWRF_U8(DWRF_CFA_advance_loc | 8); // Advance location by 8 bytes when CET protection is enabled
+# else
DWRF_U8(DWRF_CFA_advance_loc | 4); // Advance location by 4 bytes
+# endif
DWRF_U8(DWRF_CFA_def_cfa_offset); // Redefine CFA offset
DWRF_UV(16); // New offset: SP + 16
DWRF_U8(DWRF_CFA_advance_loc | 6); // Advance location by 6 bytes
diff --git a/Python/remote_debug.h b/Python/remote_debug.h
index 6cbf1c8deaa..0a817bdbd48 100644
--- a/Python/remote_debug.h
+++ b/Python/remote_debug.h
@@ -116,6 +116,8 @@ typedef struct {
mach_port_t task;
#elif defined(MS_WINDOWS)
HANDLE hProcess;
+#elif defined(__linux__)
+ int memfd;
#endif
page_cache_entry_t pages[MAX_PAGES];
Py_ssize_t page_size;
@@ -162,6 +164,8 @@ _Py_RemoteDebug_InitProcHandle(proc_handle_t *handle, pid_t pid) {
_set_debug_exception_cause(PyExc_RuntimeError, "Failed to initialize Windows process handle");
return -1;
}
+#elif defined(__linux__)
+ handle->memfd = -1;
#endif
handle->page_size = get_page_size();
for (int i = 0; i < MAX_PAGES; i++) {
@@ -179,6 +183,11 @@ _Py_RemoteDebug_CleanupProcHandle(proc_handle_t *handle) {
CloseHandle(handle->hProcess);
handle->hProcess = NULL;
}
+#elif defined(__linux__)
+ if (handle->memfd != -1) {
+ close(handle->memfd);
+ handle->memfd = -1;
+ }
#endif
handle->pid = 0;
_Py_RemoteDebug_FreePageCache(handle);
@@ -907,6 +916,61 @@ _Py_RemoteDebug_GetPyRuntimeAddress(proc_handle_t* handle)
return address;
}
+#if defined(__linux__) && HAVE_PROCESS_VM_READV
+
+static int
+open_proc_mem_fd(proc_handle_t *handle)
+{
+ char mem_file_path[64];
+ sprintf(mem_file_path, "/proc/%d/mem", handle->pid);
+
+ handle->memfd = open(mem_file_path, O_RDWR);
+ if (handle->memfd == -1) {
+ PyErr_SetFromErrno(PyExc_OSError);
+ _set_debug_exception_cause(PyExc_OSError,
+ "failed to open file %s: %s", mem_file_path, strerror(errno));
+ return -1;
+ }
+ return 0;
+}
+
+// Why is pwritev not guarded? Except on Android API level 23 (no longer
+// supported), HAVE_PROCESS_VM_READV is sufficient.
+static int
+read_remote_memory_fallback(proc_handle_t *handle, uintptr_t remote_address, size_t len, void* dst)
+{
+ if (handle->memfd == -1) {
+ if (open_proc_mem_fd(handle) < 0) {
+ return -1;
+ }
+ }
+
+ struct iovec local[1];
+ Py_ssize_t result = 0;
+ Py_ssize_t read_bytes = 0;
+
+ do {
+ local[0].iov_base = (char*)dst + result;
+ local[0].iov_len = len - result;
+ off_t offset = remote_address + result;
+
+ read_bytes = preadv(handle->memfd, local, 1, offset);
+ if (read_bytes < 0) {
+ PyErr_SetFromErrno(PyExc_OSError);
+ _set_debug_exception_cause(PyExc_OSError,
+ "preadv failed for PID %d at address 0x%lx "
+ "(size %zu, partial read %zd bytes): %s",
+ handle->pid, remote_address + result, len - result, result, strerror(errno));
+ return -1;
+ }
+
+ result += read_bytes;
+ } while ((size_t)read_bytes != local[0].iov_len);
+ return 0;
+}
+
+#endif // __linux__
+
// Platform-independent memory read function
static int
_Py_RemoteDebug_ReadRemoteMemory(proc_handle_t *handle, uintptr_t remote_address, size_t len, void* dst)
@@ -928,6 +992,9 @@ _Py_RemoteDebug_ReadRemoteMemory(proc_handle_t *handle, uintptr_t remote_address
} while (result < len);
return 0;
#elif defined(__linux__) && HAVE_PROCESS_VM_READV
+ if (handle->memfd != -1) {
+ return read_remote_memory_fallback(handle, remote_address, len, dst);
+ }
struct iovec local[1];
struct iovec remote[1];
Py_ssize_t result = 0;
@@ -941,6 +1008,9 @@ _Py_RemoteDebug_ReadRemoteMemory(proc_handle_t *handle, uintptr_t remote_address
read_bytes = process_vm_readv(handle->pid, local, 1, remote, 1, 0);
if (read_bytes < 0) {
+ if (errno == ENOSYS) {
+ return read_remote_memory_fallback(handle, remote_address, len, dst);
+ }
PyErr_SetFromErrno(PyExc_OSError);
_set_debug_exception_cause(PyExc_OSError,
"process_vm_readv failed for PID %d at address 0x%lx "
diff --git a/Python/remote_debugging.c b/Python/remote_debugging.c
index dd55b7812d4..7aee87ef05a 100644
--- a/Python/remote_debugging.c
+++ b/Python/remote_debugging.c
@@ -24,6 +24,39 @@ read_memory(proc_handle_t *handle, uint64_t remote_address, size_t len, void* ds
return _Py_RemoteDebug_ReadRemoteMemory(handle, remote_address, len, dst);
}
+// Why is pwritev not guarded? Except on Android API level 23 (no longer
+// supported), HAVE_PROCESS_VM_READV is sufficient.
+#if defined(__linux__) && HAVE_PROCESS_VM_READV
+static int
+write_memory_fallback(proc_handle_t *handle, uintptr_t remote_address, size_t len, const void* src)
+{
+ if (handle->memfd == -1) {
+ if (open_proc_mem_fd(handle) < 0) {
+ return -1;
+ }
+ }
+
+ struct iovec local[1];
+ Py_ssize_t result = 0;
+ Py_ssize_t written = 0;
+
+ do {
+ local[0].iov_base = (char*)src + result;
+ local[0].iov_len = len - result;
+ off_t offset = remote_address + result;
+
+ written = pwritev(handle->memfd, local, 1, offset);
+ if (written < 0) {
+ PyErr_SetFromErrno(PyExc_OSError);
+ return -1;
+ }
+
+ result += written;
+ } while ((size_t)written != local[0].iov_len);
+ return 0;
+}
+#endif // __linux__
+
static int
write_memory(proc_handle_t *handle, uintptr_t remote_address, size_t len, const void* src)
{
@@ -39,6 +72,9 @@ write_memory(proc_handle_t *handle, uintptr_t remote_address, size_t len, const
} while (result < len);
return 0;
#elif defined(__linux__) && HAVE_PROCESS_VM_READV
+ if (handle->memfd != -1) {
+ return write_memory_fallback(handle, remote_address, len, src);
+ }
struct iovec local[1];
struct iovec remote[1];
Py_ssize_t result = 0;
@@ -52,6 +88,9 @@ write_memory(proc_handle_t *handle, uintptr_t remote_address, size_t len, const
written = process_vm_writev(handle->pid, local, 1, remote, 1, 0);
if (written < 0) {
+ if (errno == ENOSYS) {
+ return write_memory_fallback(handle, remote_address, len, src);
+ }
PyErr_SetFromErrno(PyExc_OSError);
return -1;
}
diff --git a/Python/stackrefs.c b/Python/stackrefs.c
index b2a1369031a..ecc0012ef17 100644
--- a/Python/stackrefs.c
+++ b/Python/stackrefs.c
@@ -40,6 +40,7 @@ make_table_entry(PyObject *obj, const char *filename, int linenumber)
PyObject *
_Py_stackref_get_object(_PyStackRef ref)
{
+ assert(!PyStackRef_IsError(ref));
if (ref.index == 0) {
return NULL;
}
@@ -64,6 +65,7 @@ PyStackRef_Is(_PyStackRef a, _PyStackRef b)
PyObject *
_Py_stackref_close(_PyStackRef ref, const char *filename, int linenumber)
{
+ assert(!PyStackRef_IsError(ref));
PyInterpreterState *interp = PyInterpreterState_Get();
if (ref.index >= interp->next_stackref) {
_Py_FatalErrorFormat(__func__, "Invalid StackRef with ID %" PRIu64 " at %s:%d\n", (void *)ref.index, filename, linenumber);
@@ -128,6 +130,7 @@ _Py_stackref_create(PyObject *obj, const char *filename, int linenumber)
void
_Py_stackref_record_borrow(_PyStackRef ref, const char *filename, int linenumber)
{
+ assert(!PyStackRef_IsError(ref));
if (ref.index < INITIAL_STACKREF_INDEX) {
return;
}
@@ -152,6 +155,7 @@ _Py_stackref_record_borrow(_PyStackRef ref, const char *filename, int linenumber
void
_Py_stackref_associate(PyInterpreterState *interp, PyObject *obj, _PyStackRef ref)
{
+ assert(!PyStackRef_IsError(ref));
assert(ref.index < INITIAL_STACKREF_INDEX);
TableEntry *entry = make_table_entry(obj, "builtin-object", 0);
if (entry == NULL) {
diff --git a/Python/thread.c b/Python/thread.c
index 4ff5f11a348..18c4af7f634 100644
--- a/Python/thread.c
+++ b/Python/thread.c
@@ -39,7 +39,8 @@
const long long PY_TIMEOUT_MAX = PY_TIMEOUT_MAX_VALUE;
-static void PyThread__init_thread(void); /* Forward */
+/* Forward declaration */
+static void PyThread__init_thread(void);
#define initialized _PyRuntime.threads.initialized
@@ -71,6 +72,79 @@ PyThread_init_thread(void)
#endif
+/*
+ * Lock support.
+ */
+
+PyThread_type_lock
+PyThread_allocate_lock(void)
+{
+ if (!initialized) {
+ PyThread_init_thread();
+ }
+
+ PyMutex *lock = (PyMutex *)PyMem_RawMalloc(sizeof(PyMutex));
+ if (lock) {
+ *lock = (PyMutex){0};
+ }
+
+ return (PyThread_type_lock)lock;
+}
+
+void
+PyThread_free_lock(PyThread_type_lock lock)
+{
+ PyMem_RawFree(lock);
+}
+
+PyLockStatus
+PyThread_acquire_lock_timed(PyThread_type_lock lock, PY_TIMEOUT_T microseconds,
+ int intr_flag)
+{
+ PyTime_t timeout; // relative timeout
+ if (microseconds >= 0) {
+ // bpo-41710: PyThread_acquire_lock_timed() cannot report timeout
+ // overflow to the caller, so clamp the timeout to
+ // [PyTime_MIN, PyTime_MAX].
+ //
+ // PyTime_MAX nanoseconds is around 292.3 years.
+ //
+ // _thread.Lock.acquire() and _thread.RLock.acquire() raise an
+ // OverflowError if microseconds is greater than PY_TIMEOUT_MAX.
+ timeout = _PyTime_FromMicrosecondsClamp(microseconds);
+ }
+ else {
+ timeout = -1;
+ }
+
+ _PyLockFlags flags = _Py_LOCK_DONT_DETACH;
+ if (intr_flag) {
+ flags |= _PY_FAIL_IF_INTERRUPTED;
+ }
+
+ return _PyMutex_LockTimed((PyMutex *)lock, timeout, flags);
+}
+
+void
+PyThread_release_lock(PyThread_type_lock lock)
+{
+ PyMutex_Unlock((PyMutex *)lock);
+}
+
+int
+_PyThread_at_fork_reinit(PyThread_type_lock *lock)
+{
+ _PyMutex_at_fork_reinit((PyMutex *)lock);
+ return 0;
+}
+
+int
+PyThread_acquire_lock(PyThread_type_lock lock, int waitflag)
+{
+ return PyThread_acquire_lock_timed(lock, waitflag ? -1 : 0, /*intr_flag=*/0);
+}
+
+
/* return the current thread stack size */
size_t
PyThread_get_stacksize(void)
@@ -261,11 +335,7 @@ PyThread_GetInfo(void)
#ifdef HAVE_PTHREAD_STUBS
value = Py_NewRef(Py_None);
#elif defined(_POSIX_THREADS)
-#ifdef USE_SEMAPHORES
- value = PyUnicode_FromString("semaphore");
-#else
- value = PyUnicode_FromString("mutex+cond");
-#endif
+ value = PyUnicode_FromString("pymutex");
if (value == NULL) {
Py_DECREF(threadinfo);
return NULL;
diff --git a/Python/thread_nt.h b/Python/thread_nt.h
index e078b98be3c..9a29d14ef67 100644
--- a/Python/thread_nt.h
+++ b/Python/thread_nt.h
@@ -300,98 +300,6 @@ PyThread_hang_thread(void)
}
}
-/*
- * Lock support. It has to be implemented as semaphores.
- * I [Dag] tried to implement it with mutex but I could find a way to
- * tell whether a thread already own the lock or not.
- */
-PyThread_type_lock
-PyThread_allocate_lock(void)
-{
- PNRMUTEX mutex;
-
- if (!initialized)
- PyThread_init_thread();
-
- mutex = AllocNonRecursiveMutex() ;
-
- PyThread_type_lock aLock = (PyThread_type_lock) mutex;
- assert(aLock);
-
- return aLock;
-}
-
-void
-PyThread_free_lock(PyThread_type_lock aLock)
-{
- FreeNonRecursiveMutex(aLock) ;
-}
-
-// WaitForSingleObject() accepts timeout in milliseconds in the range
-// [0; 0xFFFFFFFE] (DWORD type). INFINITE value (0xFFFFFFFF) means no
-// timeout. 0xFFFFFFFE milliseconds is around 49.7 days.
-const DWORD TIMEOUT_MS_MAX = 0xFFFFFFFE;
-
-/*
- * Return 1 on success if the lock was acquired
- *
- * and 0 if the lock was not acquired. This means a 0 is returned
- * if the lock has already been acquired by this thread!
- */
-PyLockStatus
-PyThread_acquire_lock_timed(PyThread_type_lock aLock,
- PY_TIMEOUT_T microseconds, int intr_flag)
-{
- assert(aLock);
-
- /* Fow now, intr_flag does nothing on Windows, and lock acquires are
- * uninterruptible. */
- PyLockStatus success;
- PY_TIMEOUT_T milliseconds;
-
- if (microseconds >= 0) {
- milliseconds = microseconds / 1000;
- // Round milliseconds away from zero
- if (microseconds % 1000 > 0) {
- milliseconds++;
- }
- if (milliseconds > (PY_TIMEOUT_T)TIMEOUT_MS_MAX) {
- // bpo-41710: PyThread_acquire_lock_timed() cannot report timeout
- // overflow to the caller, so clamp the timeout to
- // [0, TIMEOUT_MS_MAX] milliseconds.
- //
- // _thread.Lock.acquire() and _thread.RLock.acquire() raise an
- // OverflowError if microseconds is greater than PY_TIMEOUT_MAX.
- milliseconds = TIMEOUT_MS_MAX;
- }
- assert(milliseconds != INFINITE);
- }
- else {
- milliseconds = INFINITE;
- }
-
- if (EnterNonRecursiveMutex((PNRMUTEX)aLock,
- (DWORD)milliseconds) == WAIT_OBJECT_0) {
- success = PY_LOCK_ACQUIRED;
- }
- else {
- success = PY_LOCK_FAILURE;
- }
-
- return success;
-}
-int
-PyThread_acquire_lock(PyThread_type_lock aLock, int waitflag)
-{
- return PyThread_acquire_lock_timed(aLock, waitflag ? -1 : 0, 0);
-}
-
-void
-PyThread_release_lock(PyThread_type_lock aLock)
-{
- assert(aLock);
- (void)LeaveNonRecursiveMutex((PNRMUTEX) aLock);
-}
/* minimum/maximum thread stack sizes supported */
#define THREAD_MIN_STACKSIZE 0x8000 /* 32 KiB */
diff --git a/Python/thread_pthread.h b/Python/thread_pthread.h
index da405824244..13992f95723 100644
--- a/Python/thread_pthread.h
+++ b/Python/thread_pthread.h
@@ -99,16 +99,6 @@
#undef HAVE_SEM_CLOCKWAIT
#endif
-/* Whether or not to use semaphores directly rather than emulating them with
- * mutexes and condition variables:
- */
-#if (defined(_POSIX_SEMAPHORES) && !defined(HAVE_BROKEN_POSIX_SEMAPHORES) && \
- (defined(HAVE_SEM_TIMEDWAIT) || defined(HAVE_SEM_CLOCKWAIT)))
-# define USE_SEMAPHORES
-#else
-# undef USE_SEMAPHORES
-#endif
-
/* On platforms that don't use standard POSIX threads pthread_sigmask()
* isn't present. DEC threads uses sigprocmask() instead as do most
@@ -442,388 +432,6 @@ PyThread_hang_thread(void)
}
}
-#ifdef USE_SEMAPHORES
-
-/*
- * Lock support.
- */
-
-PyThread_type_lock
-PyThread_allocate_lock(void)
-{
- sem_t *lock;
- int status, error = 0;
-
- if (!initialized)
- PyThread_init_thread();
-
- lock = (sem_t *)PyMem_RawMalloc(sizeof(sem_t));
-
- if (lock) {
- status = sem_init(lock,0,1);
- CHECK_STATUS("sem_init");
-
- if (error) {
- PyMem_RawFree((void *)lock);
- lock = NULL;
- }
- }
-
- return (PyThread_type_lock)lock;
-}
-
-void
-PyThread_free_lock(PyThread_type_lock lock)
-{
- sem_t *thelock = (sem_t *)lock;
- int status, error = 0;
-
- (void) error; /* silence unused-but-set-variable warning */
-
- if (!thelock)
- return;
-
- status = sem_destroy(thelock);
- CHECK_STATUS("sem_destroy");
-
- PyMem_RawFree((void *)thelock);
-}
-
-/*
- * As of February 2002, Cygwin thread implementations mistakenly report error
- * codes in the return value of the sem_ calls (like the pthread_ functions).
- * Correct implementations return -1 and put the code in errno. This supports
- * either.
- */
-static int
-fix_status(int status)
-{
- return (status == -1) ? errno : status;
-}
-
-PyLockStatus
-PyThread_acquire_lock_timed(PyThread_type_lock lock, PY_TIMEOUT_T microseconds,
- int intr_flag)
-{
- PyLockStatus success;
- sem_t *thelock = (sem_t *)lock;
- int status, error = 0;
-
- (void) error; /* silence unused-but-set-variable warning */
-
- PyTime_t timeout; // relative timeout
- if (microseconds >= 0) {
- // bpo-41710: PyThread_acquire_lock_timed() cannot report timeout
- // overflow to the caller, so clamp the timeout to
- // [PyTime_MIN, PyTime_MAX].
- //
- // PyTime_MAX nanoseconds is around 292.3 years.
- //
- // _thread.Lock.acquire() and _thread.RLock.acquire() raise an
- // OverflowError if microseconds is greater than PY_TIMEOUT_MAX.
- timeout = _PyTime_FromMicrosecondsClamp(microseconds);
- }
- else {
- timeout = -1;
- }
-
-#ifdef HAVE_SEM_CLOCKWAIT
- struct timespec abs_timeout;
- // Local scope for deadline
- {
- PyTime_t now;
- // silently ignore error: cannot report error to the caller
- (void)PyTime_MonotonicRaw(&now);
- PyTime_t deadline = _PyTime_Add(now, timeout);
- _PyTime_AsTimespec_clamp(deadline, &abs_timeout);
- }
-#else
- PyTime_t deadline = 0;
- if (timeout > 0 && !intr_flag) {
- deadline = _PyDeadline_Init(timeout);
- }
-#endif
-
- while (1) {
- if (timeout > 0) {
-#ifdef HAVE_SEM_CLOCKWAIT
- status = fix_status(sem_clockwait(thelock, CLOCK_MONOTONIC,
- &abs_timeout));
-#else
- PyTime_t now;
- // silently ignore error: cannot report error to the caller
- (void)PyTime_TimeRaw(&now);
- PyTime_t abs_time = _PyTime_Add(now, timeout);
-
- struct timespec ts;
- _PyTime_AsTimespec_clamp(abs_time, &ts);
- status = fix_status(sem_timedwait(thelock, &ts));
-#endif
- }
- else if (timeout == 0) {
- status = fix_status(sem_trywait(thelock));
- }
- else {
- status = fix_status(sem_wait(thelock));
- }
-
- /* Retry if interrupted by a signal, unless the caller wants to be
- notified. */
- if (intr_flag || status != EINTR) {
- break;
- }
-
- // sem_clockwait() uses an absolute timeout, there is no need
- // to recompute the relative timeout.
-#ifndef HAVE_SEM_CLOCKWAIT
- if (timeout > 0) {
- /* wait interrupted by a signal (EINTR): recompute the timeout */
- timeout = _PyDeadline_Get(deadline);
- if (timeout < 0) {
- status = ETIMEDOUT;
- break;
- }
- }
-#endif
- }
-
- /* Don't check the status if we're stopping because of an interrupt. */
- if (!(intr_flag && status == EINTR)) {
- if (timeout > 0) {
- if (status != ETIMEDOUT) {
-#ifdef HAVE_SEM_CLOCKWAIT
- CHECK_STATUS("sem_clockwait");
-#else
- CHECK_STATUS("sem_timedwait");
-#endif
- }
- }
- else if (timeout == 0) {
- if (status != EAGAIN) {
- CHECK_STATUS("sem_trywait");
- }
- }
- else {
- CHECK_STATUS("sem_wait");
- }
- }
-
- if (status == 0) {
- success = PY_LOCK_ACQUIRED;
- } else if (intr_flag && status == EINTR) {
- success = PY_LOCK_INTR;
- } else {
- success = PY_LOCK_FAILURE;
- }
-
- return success;
-}
-
-void
-PyThread_release_lock(PyThread_type_lock lock)
-{
- sem_t *thelock = (sem_t *)lock;
- int status, error = 0;
-
- (void) error; /* silence unused-but-set-variable warning */
-
- status = sem_post(thelock);
- CHECK_STATUS("sem_post");
-}
-
-#else /* USE_SEMAPHORES */
-
-/*
- * Lock support.
- */
-PyThread_type_lock
-PyThread_allocate_lock(void)
-{
- pthread_lock *lock;
- int status, error = 0;
-
- if (!initialized)
- PyThread_init_thread();
-
- lock = (pthread_lock *) PyMem_RawCalloc(1, sizeof(pthread_lock));
- if (lock) {
- lock->locked = 0;
-
- status = pthread_mutex_init(&lock->mut, NULL);
- CHECK_STATUS_PTHREAD("pthread_mutex_init");
- /* Mark the pthread mutex underlying a Python mutex as
- pure happens-before. We can't simply mark the
- Python-level mutex as a mutex because it can be
- acquired and released in different threads, which
- will cause errors. */
- _Py_ANNOTATE_PURE_HAPPENS_BEFORE_MUTEX(&lock->mut);
-
- status = _PyThread_cond_init(&lock->lock_released);
- CHECK_STATUS_PTHREAD("pthread_cond_init");
-
- if (error) {
- PyMem_RawFree((void *)lock);
- lock = 0;
- }
- }
-
- return (PyThread_type_lock) lock;
-}
-
-void
-PyThread_free_lock(PyThread_type_lock lock)
-{
- pthread_lock *thelock = (pthread_lock *)lock;
- int status, error = 0;
-
- (void) error; /* silence unused-but-set-variable warning */
-
- /* some pthread-like implementations tie the mutex to the cond
- * and must have the cond destroyed first.
- */
- status = pthread_cond_destroy( &thelock->lock_released );
- CHECK_STATUS_PTHREAD("pthread_cond_destroy");
-
- status = pthread_mutex_destroy( &thelock->mut );
- CHECK_STATUS_PTHREAD("pthread_mutex_destroy");
-
- PyMem_RawFree((void *)thelock);
-}
-
-PyLockStatus
-PyThread_acquire_lock_timed(PyThread_type_lock lock, PY_TIMEOUT_T microseconds,
- int intr_flag)
-{
- PyLockStatus success = PY_LOCK_FAILURE;
- pthread_lock *thelock = (pthread_lock *)lock;
- int status, error = 0;
-
- if (microseconds == 0) {
- status = pthread_mutex_trylock( &thelock->mut );
- if (status != EBUSY) {
- CHECK_STATUS_PTHREAD("pthread_mutex_trylock[1]");
- }
- }
- else {
- status = pthread_mutex_lock( &thelock->mut );
- CHECK_STATUS_PTHREAD("pthread_mutex_lock[1]");
- }
- if (status != 0) {
- goto done;
- }
-
- if (thelock->locked == 0) {
- success = PY_LOCK_ACQUIRED;
- goto unlock;
- }
- if (microseconds == 0) {
- goto unlock;
- }
-
- struct timespec abs_timeout;
- if (microseconds > 0) {
- _PyThread_cond_after(microseconds, &abs_timeout);
- }
- // Continue trying until we get the lock
-
- // mut must be locked by me -- part of the condition protocol
- while (1) {
- if (microseconds > 0) {
- status = pthread_cond_timedwait(&thelock->lock_released,
- &thelock->mut, &abs_timeout);
- if (status == 1) {
- break;
- }
- if (status == ETIMEDOUT) {
- break;
- }
- CHECK_STATUS_PTHREAD("pthread_cond_timedwait");
- }
- else {
- status = pthread_cond_wait(
- &thelock->lock_released,
- &thelock->mut);
- CHECK_STATUS_PTHREAD("pthread_cond_wait");
- }
-
- if (intr_flag && status == 0 && thelock->locked) {
- // We were woken up, but didn't get the lock. We probably received
- // a signal. Return PY_LOCK_INTR to allow the caller to handle
- // it and retry.
- success = PY_LOCK_INTR;
- break;
- }
-
- if (status == 0 && !thelock->locked) {
- success = PY_LOCK_ACQUIRED;
- break;
- }
-
- // Wait got interrupted by a signal: retry
- }
-
-unlock:
- if (success == PY_LOCK_ACQUIRED) {
- thelock->locked = 1;
- }
- status = pthread_mutex_unlock( &thelock->mut );
- CHECK_STATUS_PTHREAD("pthread_mutex_unlock[1]");
-
-done:
- if (error) {
- success = PY_LOCK_FAILURE;
- }
- return success;
-}
-
-void
-PyThread_release_lock(PyThread_type_lock lock)
-{
- pthread_lock *thelock = (pthread_lock *)lock;
- int status, error = 0;
-
- (void) error; /* silence unused-but-set-variable warning */
-
- status = pthread_mutex_lock( &thelock->mut );
- CHECK_STATUS_PTHREAD("pthread_mutex_lock[3]");
-
- thelock->locked = 0;
-
- /* wake up someone (anyone, if any) waiting on the lock */
- status = pthread_cond_signal( &thelock->lock_released );
- CHECK_STATUS_PTHREAD("pthread_cond_signal");
-
- status = pthread_mutex_unlock( &thelock->mut );
- CHECK_STATUS_PTHREAD("pthread_mutex_unlock[3]");
-}
-
-#endif /* USE_SEMAPHORES */
-
-int
-_PyThread_at_fork_reinit(PyThread_type_lock *lock)
-{
- PyThread_type_lock new_lock = PyThread_allocate_lock();
- if (new_lock == NULL) {
- return -1;
- }
-
- /* bpo-6721, bpo-40089: The old lock can be in an inconsistent state.
- fork() can be called in the middle of an operation on the lock done by
- another thread. So don't call PyThread_free_lock(*lock).
-
- Leak memory on purpose. Don't release the memory either since the
- address of a mutex is relevant. Putting two mutexes at the same address
- can lead to problems. */
-
- *lock = new_lock;
- return 0;
-}
-
-int
-PyThread_acquire_lock(PyThread_type_lock lock, int waitflag)
-{
- return PyThread_acquire_lock_timed(lock, waitflag ? -1 : 0, /*intr_flag=*/0);
-}
/* set the thread stack size.
* Return 0 if size is valid, -1 if size is invalid,