aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/Python
diff options
context:
space:
mode:
Diffstat (limited to 'Python')
-rw-r--r--Python/bytecodes.c15
-rw-r--r--Python/ceval.c14
-rw-r--r--Python/executor_cases.c.h17
-rw-r--r--Python/flowgraph.c4
-rw-r--r--Python/generated_cases.c.h17
-rw-r--r--Python/lock.c20
-rw-r--r--Python/marshal.c3
-rw-r--r--Python/optimizer_bytecodes.c4
-rw-r--r--Python/optimizer_cases.c.h6
-rw-r--r--Python/remote_debug.h78
-rw-r--r--Python/stdlib_module_names.h3
11 files changed, 62 insertions, 119 deletions
diff --git a/Python/bytecodes.c b/Python/bytecodes.c
index 1a5a9ff13a2..a5b74d88d7d 100644
--- a/Python/bytecodes.c
+++ b/Python/bytecodes.c
@@ -2327,19 +2327,18 @@ dummy_func(
#endif /* ENABLE_SPECIALIZATION_FT */
}
- op(_LOAD_ATTR, (owner -- attr, self_or_null[oparg&1])) {
+ op(_LOAD_ATTR, (owner -- attr[1], self_or_null[oparg&1])) {
PyObject *name = GETITEM(FRAME_CO_NAMES, oparg >> 1);
- PyObject *attr_o;
if (oparg & 1) {
/* Designed to work in tandem with CALL, pushes two values. */
- attr_o = NULL;
- int is_meth = _PyObject_GetMethod(PyStackRef_AsPyObjectBorrow(owner), name, &attr_o);
+ *attr = PyStackRef_NULL;
+ int is_meth = _PyObject_GetMethodStackRef(tstate, PyStackRef_AsPyObjectBorrow(owner), name, attr);
if (is_meth) {
/* We can bypass temporary bound method object.
meth is unbound method and obj is self.
meth | self | arg1 | ... | argN
*/
- assert(attr_o != NULL); // No errors on this branch
+ assert(!PyStackRef_IsNull(*attr)); // No errors on this branch
self_or_null[0] = owner; // Transfer ownership
DEAD(owner);
}
@@ -2351,17 +2350,17 @@ dummy_func(
meth | NULL | arg1 | ... | argN
*/
PyStackRef_CLOSE(owner);
- ERROR_IF(attr_o == NULL);
+ ERROR_IF(PyStackRef_IsNull(*attr));
self_or_null[0] = PyStackRef_NULL;
}
}
else {
/* Classic, pushes one value. */
- attr_o = PyObject_GetAttr(PyStackRef_AsPyObjectBorrow(owner), name);
+ PyObject *attr_o = PyObject_GetAttr(PyStackRef_AsPyObjectBorrow(owner), name);
PyStackRef_CLOSE(owner);
ERROR_IF(attr_o == NULL);
+ *attr = PyStackRef_FromPyObjectSteal(attr_o);
}
- attr = PyStackRef_FromPyObjectSteal(attr_o);
}
macro(LOAD_ATTR) =
diff --git a/Python/ceval.c b/Python/ceval.c
index d1de4875656..50665defd38 100644
--- a/Python/ceval.c
+++ b/Python/ceval.c
@@ -346,13 +346,13 @@ _Py_ReachedRecursionLimitWithMargin(PyThreadState *tstate, int margin_count)
{
uintptr_t here_addr = _Py_get_machine_stack_pointer();
_PyThreadStateImpl *_tstate = (_PyThreadStateImpl *)tstate;
- if (here_addr > _tstate->c_stack_soft_limit + margin_count * PYOS_STACK_MARGIN_BYTES) {
+ if (here_addr > _tstate->c_stack_soft_limit + margin_count * _PyOS_STACK_MARGIN_BYTES) {
return 0;
}
if (_tstate->c_stack_hard_limit == 0) {
_Py_InitializeRecursionLimits(tstate);
}
- return here_addr <= _tstate->c_stack_soft_limit + margin_count * PYOS_STACK_MARGIN_BYTES;
+ return here_addr <= _tstate->c_stack_soft_limit + margin_count * _PyOS_STACK_MARGIN_BYTES;
}
void
@@ -448,8 +448,8 @@ _Py_InitializeRecursionLimits(PyThreadState *tstate)
_tstate->c_stack_top = (uintptr_t)high;
ULONG guarantee = 0;
SetThreadStackGuarantee(&guarantee);
- _tstate->c_stack_hard_limit = ((uintptr_t)low) + guarantee + PYOS_STACK_MARGIN_BYTES;
- _tstate->c_stack_soft_limit = _tstate->c_stack_hard_limit + PYOS_STACK_MARGIN_BYTES;
+ _tstate->c_stack_hard_limit = ((uintptr_t)low) + guarantee + _PyOS_STACK_MARGIN_BYTES;
+ _tstate->c_stack_soft_limit = _tstate->c_stack_hard_limit + _PyOS_STACK_MARGIN_BYTES;
#else
uintptr_t here_addr = _Py_get_machine_stack_pointer();
# if defined(HAVE_PTHREAD_GETATTR_NP) && !defined(_AIX) && !defined(__NetBSD__)
@@ -469,9 +469,9 @@ _Py_InitializeRecursionLimits(PyThreadState *tstate)
// Thread sanitizer crashes if we use a bit more than half the stack.
_tstate->c_stack_soft_limit = base + (stack_size / 2);
#else
- _tstate->c_stack_soft_limit = base + PYOS_STACK_MARGIN_BYTES * 2;
+ _tstate->c_stack_soft_limit = base + _PyOS_STACK_MARGIN_BYTES * 2;
#endif
- _tstate->c_stack_hard_limit = base + PYOS_STACK_MARGIN_BYTES;
+ _tstate->c_stack_hard_limit = base + _PyOS_STACK_MARGIN_BYTES;
assert(_tstate->c_stack_soft_limit < here_addr);
assert(here_addr < _tstate->c_stack_top);
return;
@@ -479,7 +479,7 @@ _Py_InitializeRecursionLimits(PyThreadState *tstate)
# endif
_tstate->c_stack_top = _Py_SIZE_ROUND_UP(here_addr, 4096);
_tstate->c_stack_soft_limit = _tstate->c_stack_top - Py_C_STACK_SIZE;
- _tstate->c_stack_hard_limit = _tstate->c_stack_top - (Py_C_STACK_SIZE + PYOS_STACK_MARGIN_BYTES);
+ _tstate->c_stack_hard_limit = _tstate->c_stack_top - (Py_C_STACK_SIZE + _PyOS_STACK_MARGIN_BYTES);
#endif
}
diff --git a/Python/executor_cases.c.h b/Python/executor_cases.c.h
index 46fc164a5b3..276c320c5f4 100644
--- a/Python/executor_cases.c.h
+++ b/Python/executor_cases.c.h
@@ -3301,20 +3301,20 @@
case _LOAD_ATTR: {
_PyStackRef owner;
- _PyStackRef attr;
+ _PyStackRef *attr;
_PyStackRef *self_or_null;
oparg = CURRENT_OPARG();
owner = stack_pointer[-1];
+ attr = &stack_pointer[-1];
self_or_null = &stack_pointer[0];
PyObject *name = GETITEM(FRAME_CO_NAMES, oparg >> 1);
- PyObject *attr_o;
if (oparg & 1) {
- attr_o = NULL;
+ *attr = PyStackRef_NULL;
_PyFrame_SetStackPointer(frame, stack_pointer);
- int is_meth = _PyObject_GetMethod(PyStackRef_AsPyObjectBorrow(owner), name, &attr_o);
+ int is_meth = _PyObject_GetMethodStackRef(tstate, PyStackRef_AsPyObjectBorrow(owner), name, attr);
stack_pointer = _PyFrame_GetStackPointer(frame);
if (is_meth) {
- assert(attr_o != NULL);
+ assert(!PyStackRef_IsNull(*attr));
self_or_null[0] = owner;
}
else {
@@ -3323,7 +3323,7 @@
_PyFrame_SetStackPointer(frame, stack_pointer);
PyStackRef_CLOSE(owner);
stack_pointer = _PyFrame_GetStackPointer(frame);
- if (attr_o == NULL) {
+ if (PyStackRef_IsNull(*attr)) {
JUMP_TO_ERROR();
}
self_or_null[0] = PyStackRef_NULL;
@@ -3332,7 +3332,7 @@
}
else {
_PyFrame_SetStackPointer(frame, stack_pointer);
- attr_o = PyObject_GetAttr(PyStackRef_AsPyObjectBorrow(owner), name);
+ PyObject *attr_o = PyObject_GetAttr(PyStackRef_AsPyObjectBorrow(owner), name);
stack_pointer = _PyFrame_GetStackPointer(frame);
stack_pointer += -1;
assert(WITHIN_STACK_BOUNDS());
@@ -3342,10 +3342,9 @@
if (attr_o == NULL) {
JUMP_TO_ERROR();
}
+ *attr = PyStackRef_FromPyObjectSteal(attr_o);
stack_pointer += 1;
}
- attr = PyStackRef_FromPyObjectSteal(attr_o);
- stack_pointer[-1] = attr;
stack_pointer += (oparg&1);
assert(WITHIN_STACK_BOUNDS());
break;
diff --git a/Python/flowgraph.c b/Python/flowgraph.c
index 2adc8c84d83..1cb6f03169e 100644
--- a/Python/flowgraph.c
+++ b/Python/flowgraph.c
@@ -1892,6 +1892,10 @@ eval_const_unaryop(PyObject *operand, int opcode, int oparg)
result = PyNumber_Negative(operand);
break;
case UNARY_INVERT:
+ // XXX: This should be removed once the ~bool depreciation expires.
+ if (PyBool_Check(operand)) {
+ return NULL;
+ }
result = PyNumber_Invert(operand);
break;
case UNARY_NOT: {
diff --git a/Python/generated_cases.c.h b/Python/generated_cases.c.h
index 8f7932f0033..bb153bc1c0e 100644
--- a/Python/generated_cases.c.h
+++ b/Python/generated_cases.c.h
@@ -7941,7 +7941,7 @@
_Py_CODEUNIT* const this_instr = next_instr - 10;
(void)this_instr;
_PyStackRef owner;
- _PyStackRef attr;
+ _PyStackRef *attr;
_PyStackRef *self_or_null;
// _SPECIALIZE_LOAD_ATTR
{
@@ -7964,16 +7964,16 @@
/* Skip 8 cache entries */
// _LOAD_ATTR
{
+ attr = &stack_pointer[-1];
self_or_null = &stack_pointer[0];
PyObject *name = GETITEM(FRAME_CO_NAMES, oparg >> 1);
- PyObject *attr_o;
if (oparg & 1) {
- attr_o = NULL;
+ *attr = PyStackRef_NULL;
_PyFrame_SetStackPointer(frame, stack_pointer);
- int is_meth = _PyObject_GetMethod(PyStackRef_AsPyObjectBorrow(owner), name, &attr_o);
+ int is_meth = _PyObject_GetMethodStackRef(tstate, PyStackRef_AsPyObjectBorrow(owner), name, attr);
stack_pointer = _PyFrame_GetStackPointer(frame);
if (is_meth) {
- assert(attr_o != NULL);
+ assert(!PyStackRef_IsNull(*attr));
self_or_null[0] = owner;
}
else {
@@ -7982,7 +7982,7 @@
_PyFrame_SetStackPointer(frame, stack_pointer);
PyStackRef_CLOSE(owner);
stack_pointer = _PyFrame_GetStackPointer(frame);
- if (attr_o == NULL) {
+ if (PyStackRef_IsNull(*attr)) {
JUMP_TO_LABEL(error);
}
self_or_null[0] = PyStackRef_NULL;
@@ -7991,7 +7991,7 @@
}
else {
_PyFrame_SetStackPointer(frame, stack_pointer);
- attr_o = PyObject_GetAttr(PyStackRef_AsPyObjectBorrow(owner), name);
+ PyObject *attr_o = PyObject_GetAttr(PyStackRef_AsPyObjectBorrow(owner), name);
stack_pointer = _PyFrame_GetStackPointer(frame);
stack_pointer += -1;
assert(WITHIN_STACK_BOUNDS());
@@ -8001,11 +8001,10 @@
if (attr_o == NULL) {
JUMP_TO_LABEL(error);
}
+ *attr = PyStackRef_FromPyObjectSteal(attr_o);
stack_pointer += 1;
}
- attr = PyStackRef_FromPyObjectSteal(attr_o);
}
- stack_pointer[-1] = attr;
stack_pointer += (oparg&1);
assert(WITHIN_STACK_BOUNDS());
DISPATCH();
diff --git a/Python/lock.c b/Python/lock.c
index ea6ac00bfec..a49d587a168 100644
--- a/Python/lock.c
+++ b/Python/lock.c
@@ -95,6 +95,18 @@ _PyMutex_LockTimed(PyMutex *m, PyTime_t timeout, _PyLockFlags flags)
if (timeout == 0) {
return PY_LOCK_FAILURE;
}
+ if ((flags & _PY_LOCK_PYTHONLOCK) && Py_IsFinalizing()) {
+ // At this phase of runtime shutdown, only the finalization thread
+ // can have attached thread state; others hang if they try
+ // attaching. And since operations on this lock requires attached
+ // thread state (_PY_LOCK_PYTHONLOCK), the finalization thread is
+ // running this code, and no other thread can unlock.
+ // Raise rather than hang. (_PY_LOCK_PYTHONLOCK allows raising
+ // exceptons.)
+ PyErr_SetString(PyExc_PythonFinalizationError,
+ "cannot acquire lock at interpreter finalization");
+ return PY_LOCK_FAILURE;
+ }
uint8_t newv = v;
if (!(v & _Py_HAS_PARKED)) {
@@ -622,3 +634,11 @@ PyMutex_Unlock(PyMutex *m)
Py_FatalError("unlocking mutex that is not locked");
}
}
+
+
+#undef PyMutex_IsLocked
+int
+PyMutex_IsLocked(PyMutex *m)
+{
+ return _PyMutex_IsLocked(m);
+}
diff --git a/Python/marshal.c b/Python/marshal.c
index afbef6ee679..15dd25d6268 100644
--- a/Python/marshal.c
+++ b/Python/marshal.c
@@ -1656,6 +1656,9 @@ r_object(RFILE *p)
case TYPE_SLICE:
{
Py_ssize_t idx = r_ref_reserve(flag, p);
+ if (idx < 0) {
+ break;
+ }
PyObject *stop = NULL;
PyObject *step = NULL;
PyObject *start = r_object(p);
diff --git a/Python/optimizer_bytecodes.c b/Python/optimizer_bytecodes.c
index 3182e8b3b70..aeff76affd8 100644
--- a/Python/optimizer_bytecodes.c
+++ b/Python/optimizer_bytecodes.c
@@ -590,9 +590,9 @@ dummy_func(void) {
}
}
- op(_LOAD_ATTR, (owner -- attr, self_or_null[oparg&1])) {
+ op(_LOAD_ATTR, (owner -- attr[1], self_or_null[oparg&1])) {
(void)owner;
- attr = sym_new_not_null(ctx);
+ *attr = sym_new_not_null(ctx);
if (oparg & 1) {
self_or_null[0] = sym_new_unknown(ctx);
}
diff --git a/Python/optimizer_cases.c.h b/Python/optimizer_cases.c.h
index 8d30df3aa7d..82660d02a4e 100644
--- a/Python/optimizer_cases.c.h
+++ b/Python/optimizer_cases.c.h
@@ -1414,16 +1414,16 @@
case _LOAD_ATTR: {
JitOptRef owner;
- JitOptRef attr;
+ JitOptRef *attr;
JitOptRef *self_or_null;
owner = stack_pointer[-1];
+ attr = &stack_pointer[-1];
self_or_null = &stack_pointer[0];
(void)owner;
- attr = sym_new_not_null(ctx);
+ *attr = sym_new_not_null(ctx);
if (oparg & 1) {
self_or_null[0] = sym_new_unknown(ctx);
}
- stack_pointer[-1] = attr;
stack_pointer += (oparg&1);
assert(WITHIN_STACK_BOUNDS());
break;
diff --git a/Python/remote_debug.h b/Python/remote_debug.h
index 8f9b6cd4c49..d1fcb478d2b 100644
--- a/Python/remote_debug.h
+++ b/Python/remote_debug.h
@@ -110,14 +110,6 @@ get_page_size(void) {
return page_size;
}
-typedef struct page_cache_entry {
- uintptr_t page_addr; // page-aligned base address
- char *data;
- int valid;
- struct page_cache_entry *next;
-} page_cache_entry_t;
-
-#define MAX_PAGES 1024
// Define a platform-independent process handle structure
typedef struct {
@@ -129,27 +121,9 @@ typedef struct {
#elif defined(__linux__)
int memfd;
#endif
- page_cache_entry_t pages[MAX_PAGES];
Py_ssize_t page_size;
} proc_handle_t;
-static void
-_Py_RemoteDebug_FreePageCache(proc_handle_t *handle)
-{
- for (int i = 0; i < MAX_PAGES; i++) {
- PyMem_RawFree(handle->pages[i].data);
- handle->pages[i].data = NULL;
- handle->pages[i].valid = 0;
- }
-}
-
-UNUSED static void
-_Py_RemoteDebug_ClearCache(proc_handle_t *handle)
-{
- for (int i = 0; i < MAX_PAGES; i++) {
- handle->pages[i].valid = 0;
- }
-}
#if defined(__APPLE__) && defined(TARGET_OS_OSX) && TARGET_OS_OSX
static mach_port_t pid_to_task(pid_t pid);
@@ -178,10 +152,6 @@ _Py_RemoteDebug_InitProcHandle(proc_handle_t *handle, pid_t pid) {
handle->memfd = -1;
#endif
handle->page_size = get_page_size();
- for (int i = 0; i < MAX_PAGES; i++) {
- handle->pages[i].data = NULL;
- handle->pages[i].valid = 0;
- }
return 0;
}
@@ -200,7 +170,6 @@ _Py_RemoteDebug_CleanupProcHandle(proc_handle_t *handle) {
}
#endif
handle->pid = 0;
- _Py_RemoteDebug_FreePageCache(handle);
}
#if defined(__APPLE__) && defined(TARGET_OS_OSX) && TARGET_OS_OSX
@@ -1066,53 +1035,6 @@ _Py_RemoteDebug_PagedReadRemoteMemory(proc_handle_t *handle,
size_t size,
void *out)
{
- size_t page_size = handle->page_size;
- uintptr_t page_base = addr & ~(page_size - 1);
- size_t offset_in_page = addr - page_base;
-
- if (offset_in_page + size > page_size) {
- return _Py_RemoteDebug_ReadRemoteMemory(handle, addr, size, out);
- }
-
- // Search for valid cached page
- for (int i = 0; i < MAX_PAGES; i++) {
- page_cache_entry_t *entry = &handle->pages[i];
- if (entry->valid && entry->page_addr == page_base) {
- memcpy(out, entry->data + offset_in_page, size);
- return 0;
- }
- }
-
- // Find reusable slot
- for (int i = 0; i < MAX_PAGES; i++) {
- page_cache_entry_t *entry = &handle->pages[i];
- if (!entry->valid) {
- if (entry->data == NULL) {
- entry->data = PyMem_RawMalloc(page_size);
- if (entry->data == NULL) {
- _set_debug_exception_cause(PyExc_MemoryError,
- "Cannot allocate %zu bytes for page cache entry "
- "during read from PID %d at address 0x%lx",
- page_size, handle->pid, addr);
- return -1;
- }
- }
-
- if (_Py_RemoteDebug_ReadRemoteMemory(handle, page_base, page_size, entry->data) < 0) {
- // Try to just copy the exact ammount as a fallback
- PyErr_Clear();
- goto fallback;
- }
-
- entry->page_addr = page_base;
- entry->valid = 1;
- memcpy(out, entry->data + offset_in_page, size);
- return 0;
- }
- }
-
-fallback:
- // Cache full — fallback to uncached read
return _Py_RemoteDebug_ReadRemoteMemory(handle, addr, size, out);
}
diff --git a/Python/stdlib_module_names.h b/Python/stdlib_module_names.h
index 56e349a544c..63e4599c31e 100644
--- a/Python/stdlib_module_names.h
+++ b/Python/stdlib_module_names.h
@@ -245,9 +245,6 @@ static const char* _Py_stdlib_module_names[] = {
"socket",
"socketserver",
"sqlite3",
-"sre_compile",
-"sre_constants",
-"sre_parse",
"ssl",
"stat",
"statistics",