aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/Objects
diff options
context:
space:
mode:
Diffstat (limited to 'Objects')
-rw-r--r--Objects/bytesobject.c41
-rw-r--r--Objects/call.c35
-rw-r--r--Objects/codeobject.c2
-rw-r--r--Objects/dictobject.c5
-rw-r--r--Objects/frameobject.c4
-rw-r--r--Objects/funcobject.c34
-rw-r--r--Objects/genericaliasobject.c6
-rw-r--r--Objects/genobject.c49
-rw-r--r--Objects/interpolationobject.c2
-rw-r--r--Objects/longobject.c13
-rw-r--r--Objects/moduleobject.c3
-rw-r--r--Objects/object.c135
-rw-r--r--Objects/obmalloc.c22
-rw-r--r--Objects/templateobject.c5
-rw-r--r--Objects/typeobject.c729
-rw-r--r--Objects/typevarobject.c4
-rw-r--r--Objects/unicodeobject.c73
-rw-r--r--Objects/unionobject.c8
18 files changed, 877 insertions, 293 deletions
diff --git a/Objects/bytesobject.c b/Objects/bytesobject.c
index fc407ec6bf9..87ea1162e03 100644
--- a/Objects/bytesobject.c
+++ b/Objects/bytesobject.c
@@ -1075,10 +1075,11 @@ _PyBytes_FormatEx(const char *format, Py_ssize_t format_len,
}
/* Unescape a backslash-escaped string. */
-PyObject *_PyBytes_DecodeEscape(const char *s,
+PyObject *_PyBytes_DecodeEscape2(const char *s,
Py_ssize_t len,
const char *errors,
- const char **first_invalid_escape)
+ int *first_invalid_escape_char,
+ const char **first_invalid_escape_ptr)
{
int c;
char *p;
@@ -1092,7 +1093,8 @@ PyObject *_PyBytes_DecodeEscape(const char *s,
return NULL;
writer.overallocate = 1;
- *first_invalid_escape = NULL;
+ *first_invalid_escape_char = -1;
+ *first_invalid_escape_ptr = NULL;
end = s + len;
while (s < end) {
@@ -1130,9 +1132,10 @@ PyObject *_PyBytes_DecodeEscape(const char *s,
c = (c<<3) + *s++ - '0';
}
if (c > 0377) {
- if (*first_invalid_escape == NULL) {
- *first_invalid_escape = s-3; /* Back up 3 chars, since we've
- already incremented s. */
+ if (*first_invalid_escape_char == -1) {
+ *first_invalid_escape_char = c;
+ /* Back up 3 chars, since we've already incremented s. */
+ *first_invalid_escape_ptr = s - 3;
}
}
*p++ = c;
@@ -1173,9 +1176,10 @@ PyObject *_PyBytes_DecodeEscape(const char *s,
break;
default:
- if (*first_invalid_escape == NULL) {
- *first_invalid_escape = s-1; /* Back up one char, since we've
- already incremented s. */
+ if (*first_invalid_escape_char == -1) {
+ *first_invalid_escape_char = (unsigned char)s[-1];
+ /* Back up one char, since we've already incremented s. */
+ *first_invalid_escape_ptr = s - 1;
}
*p++ = '\\';
s--;
@@ -1195,18 +1199,19 @@ PyObject *PyBytes_DecodeEscape(const char *s,
Py_ssize_t Py_UNUSED(unicode),
const char *Py_UNUSED(recode_encoding))
{
- const char* first_invalid_escape;
- PyObject *result = _PyBytes_DecodeEscape(s, len, errors,
- &first_invalid_escape);
+ int first_invalid_escape_char;
+ const char *first_invalid_escape_ptr;
+ PyObject *result = _PyBytes_DecodeEscape2(s, len, errors,
+ &first_invalid_escape_char,
+ &first_invalid_escape_ptr);
if (result == NULL)
return NULL;
- if (first_invalid_escape != NULL) {
- unsigned char c = *first_invalid_escape;
- if ('4' <= c && c <= '7') {
+ if (first_invalid_escape_char != -1) {
+ if (first_invalid_escape_char > 0xff) {
if (PyErr_WarnFormat(PyExc_DeprecationWarning, 1,
- "b\"\\%.3s\" is an invalid octal escape sequence. "
+ "b\"\\%o\" is an invalid octal escape sequence. "
"Such sequences will not work in the future. ",
- first_invalid_escape) < 0)
+ first_invalid_escape_char) < 0)
{
Py_DECREF(result);
return NULL;
@@ -1216,7 +1221,7 @@ PyObject *PyBytes_DecodeEscape(const char *s,
if (PyErr_WarnFormat(PyExc_DeprecationWarning, 1,
"b\"\\%c\" is an invalid escape sequence. "
"Such sequences will not work in the future. ",
- c) < 0)
+ first_invalid_escape_char) < 0)
{
Py_DECREF(result);
return NULL;
diff --git a/Objects/call.c b/Objects/call.c
index b1610dababd..c9a18bcc3da 100644
--- a/Objects/call.c
+++ b/Objects/call.c
@@ -834,12 +834,15 @@ PyObject_VectorcallMethod(PyObject *name, PyObject *const *args,
assert(PyVectorcall_NARGS(nargsf) >= 1);
PyThreadState *tstate = _PyThreadState_GET();
- PyObject *callable = NULL;
+ _PyCStackRef method;
+ _PyThreadState_PushCStackRef(tstate, &method);
/* Use args[0] as "self" argument */
- int unbound = _PyObject_GetMethod(args[0], name, &callable);
- if (callable == NULL) {
+ int unbound = _PyObject_GetMethodStackRef(tstate, args[0], name, &method.ref);
+ if (PyStackRef_IsNull(method.ref)) {
+ _PyThreadState_PopCStackRef(tstate, &method);
return NULL;
}
+ PyObject *callable = PyStackRef_AsPyObjectBorrow(method.ref);
if (unbound) {
/* We must remove PY_VECTORCALL_ARGUMENTS_OFFSET since
@@ -855,7 +858,7 @@ PyObject_VectorcallMethod(PyObject *name, PyObject *const *args,
EVAL_CALL_STAT_INC_IF_FUNCTION(EVAL_CALL_METHOD, callable);
PyObject *result = _PyObject_VectorcallTstate(tstate, callable,
args, nargsf, kwnames);
- Py_DECREF(callable);
+ _PyThreadState_PopCStackRef(tstate, &method);
return result;
}
@@ -868,11 +871,14 @@ PyObject_CallMethodObjArgs(PyObject *obj, PyObject *name, ...)
return null_error(tstate);
}
- PyObject *callable = NULL;
- int is_method = _PyObject_GetMethod(obj, name, &callable);
- if (callable == NULL) {
+ _PyCStackRef method;
+ _PyThreadState_PushCStackRef(tstate, &method);
+ int is_method = _PyObject_GetMethodStackRef(tstate, obj, name, &method.ref);
+ if (PyStackRef_IsNull(method.ref)) {
+ _PyThreadState_PopCStackRef(tstate, &method);
return NULL;
}
+ PyObject *callable = PyStackRef_AsPyObjectBorrow(method.ref);
obj = is_method ? obj : NULL;
va_list vargs;
@@ -880,7 +886,7 @@ PyObject_CallMethodObjArgs(PyObject *obj, PyObject *name, ...)
PyObject *result = object_vacall(tstate, obj, callable, vargs);
va_end(vargs);
- Py_DECREF(callable);
+ _PyThreadState_PopCStackRef(tstate, &method);
return result;
}
@@ -897,12 +903,15 @@ _PyObject_CallMethodIdObjArgs(PyObject *obj, _Py_Identifier *name, ...)
if (!oname) {
return NULL;
}
-
- PyObject *callable = NULL;
- int is_method = _PyObject_GetMethod(obj, oname, &callable);
- if (callable == NULL) {
+ _PyCStackRef method;
+ _PyThreadState_PushCStackRef(tstate, &method);
+ int is_method = _PyObject_GetMethodStackRef(tstate, obj, oname, &method.ref);
+ if (PyStackRef_IsNull(method.ref)) {
+ _PyThreadState_PopCStackRef(tstate, &method);
return NULL;
}
+ PyObject *callable = PyStackRef_AsPyObjectBorrow(method.ref);
+
obj = is_method ? obj : NULL;
va_list vargs;
@@ -910,7 +919,7 @@ _PyObject_CallMethodIdObjArgs(PyObject *obj, _Py_Identifier *name, ...)
PyObject *result = object_vacall(tstate, obj, callable, vargs);
va_end(vargs);
- Py_DECREF(callable);
+ _PyThreadState_PopCStackRef(tstate, &method);
return result;
}
diff --git a/Objects/codeobject.c b/Objects/codeobject.c
index 4f06a36a130..ee869d991d9 100644
--- a/Objects/codeobject.c
+++ b/Objects/codeobject.c
@@ -2364,6 +2364,8 @@ free_monitoring_data(_PyCoMonitoringData *data)
static void
code_dealloc(PyObject *self)
{
+ PyThreadState *tstate = PyThreadState_GET();
+ _Py_atomic_add_uint64(&tstate->interp->_code_object_generation, 1);
PyCodeObject *co = _PyCodeObject_CAST(self);
_PyObject_ResurrectStart(self);
notify_code_watchers(PY_CODE_EVENT_DESTROY, co);
diff --git a/Objects/dictobject.c b/Objects/dictobject.c
index ce27e47dabf..fd8ccf56324 100644
--- a/Objects/dictobject.c
+++ b/Objects/dictobject.c
@@ -2916,6 +2916,11 @@ clear_lock_held(PyObject *op)
}
void
+_PyDict_Clear_LockHeld(PyObject *op) {
+ clear_lock_held(op);
+}
+
+void
PyDict_Clear(PyObject *op)
{
Py_BEGIN_CRITICAL_SECTION(op);
diff --git a/Objects/frameobject.c b/Objects/frameobject.c
index 76b52efccf8..601fc69c4b1 100644
--- a/Objects/frameobject.c
+++ b/Objects/frameobject.c
@@ -1386,6 +1386,10 @@ mark_stacks(PyCodeObject *code_obj, int len)
stacks[j] = next_stack;
break;
case GET_ITER:
+ next_stack = push_value(pop_value(next_stack), Iterator);
+ next_stack = push_value(next_stack, Iterator);
+ stacks[next_i] = next_stack;
+ break;
case GET_AITER:
next_stack = push_value(pop_value(next_stack), Iterator);
stacks[next_i] = next_stack;
diff --git a/Objects/funcobject.c b/Objects/funcobject.c
index 27214a129c2..f87b0e5d8f1 100644
--- a/Objects/funcobject.c
+++ b/Objects/funcobject.c
@@ -1264,26 +1264,32 @@ _PyFunction_VerifyStateless(PyThreadState *tstate, PyObject *func)
}
// Disallow __defaults__.
PyObject *defaults = PyFunction_GET_DEFAULTS(func);
- if (defaults != NULL && defaults != Py_None && PyDict_Size(defaults) > 0)
- {
- _PyErr_SetString(tstate, PyExc_ValueError, "defaults not supported");
- return -1;
+ if (defaults != NULL) {
+ assert(PyTuple_Check(defaults)); // per PyFunction_New()
+ if (PyTuple_GET_SIZE(defaults) > 0) {
+ _PyErr_SetString(tstate, PyExc_ValueError,
+ "defaults not supported");
+ return -1;
+ }
}
// Disallow __kwdefaults__.
PyObject *kwdefaults = PyFunction_GET_KW_DEFAULTS(func);
- if (kwdefaults != NULL && kwdefaults != Py_None
- && PyDict_Size(kwdefaults) > 0)
- {
- _PyErr_SetString(tstate, PyExc_ValueError,
- "keyword defaults not supported");
- return -1;
+ if (kwdefaults != NULL) {
+ assert(PyDict_Check(kwdefaults)); // per PyFunction_New()
+ if (PyDict_Size(kwdefaults) > 0) {
+ _PyErr_SetString(tstate, PyExc_ValueError,
+ "keyword defaults not supported");
+ return -1;
+ }
}
// Disallow __closure__.
PyObject *closure = PyFunction_GET_CLOSURE(func);
- if (closure != NULL && closure != Py_None && PyTuple_GET_SIZE(closure) > 0)
- {
- _PyErr_SetString(tstate, PyExc_ValueError, "closures not supported");
- return -1;
+ if (closure != NULL) {
+ assert(PyTuple_Check(closure)); // per PyFunction_New()
+ if (PyTuple_GET_SIZE(closure) > 0) {
+ _PyErr_SetString(tstate, PyExc_ValueError, "closures not supported");
+ return -1;
+ }
}
// Check the code.
PyCodeObject *co = (PyCodeObject *)PyFunction_GET_CODE(func);
diff --git a/Objects/genericaliasobject.c b/Objects/genericaliasobject.c
index ec3d01f00a3..07b57f0c552 100644
--- a/Objects/genericaliasobject.c
+++ b/Objects/genericaliasobject.c
@@ -65,7 +65,7 @@ ga_repr_items_list(PyUnicodeWriter *writer, PyObject *p)
for (Py_ssize_t i = 0; i < len; i++) {
if (i > 0) {
- if (PyUnicodeWriter_WriteUTF8(writer, ", ", 2) < 0) {
+ if (PyUnicodeWriter_WriteASCII(writer, ", ", 2) < 0) {
return -1;
}
}
@@ -109,7 +109,7 @@ ga_repr(PyObject *self)
}
for (Py_ssize_t i = 0; i < len; i++) {
if (i > 0) {
- if (PyUnicodeWriter_WriteUTF8(writer, ", ", 2) < 0) {
+ if (PyUnicodeWriter_WriteASCII(writer, ", ", 2) < 0) {
goto error;
}
}
@@ -126,7 +126,7 @@ ga_repr(PyObject *self)
}
if (len == 0) {
// for something like tuple[()] we should print a "()"
- if (PyUnicodeWriter_WriteUTF8(writer, "()", 2) < 0) {
+ if (PyUnicodeWriter_WriteASCII(writer, "()", 2) < 0) {
goto error;
}
}
diff --git a/Objects/genobject.c b/Objects/genobject.c
index 98b2c5004df..da1462deaaa 100644
--- a/Objects/genobject.c
+++ b/Objects/genobject.c
@@ -1451,7 +1451,9 @@ typedef struct PyAsyncGenAThrow {
/* Can be NULL, when in the "aclose()" mode
(equivalent of "athrow(GeneratorExit)") */
- PyObject *agt_args;
+ PyObject *agt_typ;
+ PyObject *agt_tb;
+ PyObject *agt_val;
AwaitableState agt_state;
} PyAsyncGenAThrow;
@@ -2078,7 +2080,9 @@ async_gen_athrow_dealloc(PyObject *self)
_PyObject_GC_UNTRACK(self);
Py_CLEAR(agt->agt_gen);
- Py_CLEAR(agt->agt_args);
+ Py_XDECREF(agt->agt_typ);
+ Py_XDECREF(agt->agt_tb);
+ Py_XDECREF(agt->agt_val);
PyObject_GC_Del(self);
}
@@ -2088,7 +2092,9 @@ async_gen_athrow_traverse(PyObject *self, visitproc visit, void *arg)
{
PyAsyncGenAThrow *agt = _PyAsyncGenAThrow_CAST(self);
Py_VISIT(agt->agt_gen);
- Py_VISIT(agt->agt_args);
+ Py_VISIT(agt->agt_typ);
+ Py_VISIT(agt->agt_tb);
+ Py_VISIT(agt->agt_val);
return 0;
}
@@ -2116,7 +2122,7 @@ async_gen_athrow_send(PyObject *self, PyObject *arg)
if (o->agt_state == AWAITABLE_STATE_INIT) {
if (o->agt_gen->ag_running_async) {
o->agt_state = AWAITABLE_STATE_CLOSED;
- if (o->agt_args == NULL) {
+ if (o->agt_typ == NULL) {
PyErr_SetString(
PyExc_RuntimeError,
"aclose(): asynchronous generator is already running");
@@ -2143,7 +2149,7 @@ async_gen_athrow_send(PyObject *self, PyObject *arg)
o->agt_state = AWAITABLE_STATE_ITER;
o->agt_gen->ag_running_async = 1;
- if (o->agt_args == NULL) {
+ if (o->agt_typ == NULL) {
/* aclose() mode */
o->agt_gen->ag_closed = 1;
@@ -2157,19 +2163,10 @@ async_gen_athrow_send(PyObject *self, PyObject *arg)
goto yield_close;
}
} else {
- PyObject *typ;
- PyObject *tb = NULL;
- PyObject *val = NULL;
-
- if (!PyArg_UnpackTuple(o->agt_args, "athrow", 1, 3,
- &typ, &val, &tb)) {
- return NULL;
- }
-
retval = _gen_throw((PyGenObject *)gen,
0, /* Do not close generator when
PyExc_GeneratorExit is passed */
- typ, val, tb);
+ o->agt_typ, o->agt_val, o->agt_tb);
retval = async_gen_unwrap_value(o->agt_gen, retval);
}
if (retval == NULL) {
@@ -2181,7 +2178,7 @@ async_gen_athrow_send(PyObject *self, PyObject *arg)
assert(o->agt_state == AWAITABLE_STATE_ITER);
retval = gen_send((PyObject *)gen, arg);
- if (o->agt_args) {
+ if (o->agt_typ) {
return async_gen_unwrap_value(o->agt_gen, retval);
} else {
/* aclose() mode */
@@ -2212,7 +2209,7 @@ check_error:
if (PyErr_ExceptionMatches(PyExc_StopAsyncIteration) ||
PyErr_ExceptionMatches(PyExc_GeneratorExit))
{
- if (o->agt_args == NULL) {
+ if (o->agt_typ == NULL) {
/* when aclose() is called we don't want to propagate
StopAsyncIteration or GeneratorExit; just raise
StopIteration, signalling that this 'aclose()' await
@@ -2241,7 +2238,7 @@ async_gen_athrow_throw(PyObject *self, PyObject *const *args, Py_ssize_t nargs)
if (o->agt_state == AWAITABLE_STATE_INIT) {
if (o->agt_gen->ag_running_async) {
o->agt_state = AWAITABLE_STATE_CLOSED;
- if (o->agt_args == NULL) {
+ if (o->agt_typ == NULL) {
PyErr_SetString(
PyExc_RuntimeError,
"aclose(): asynchronous generator is already running");
@@ -2259,7 +2256,7 @@ async_gen_athrow_throw(PyObject *self, PyObject *const *args, Py_ssize_t nargs)
}
PyObject *retval = gen_throw((PyObject*)o->agt_gen, args, nargs);
- if (o->agt_args) {
+ if (o->agt_typ) {
retval = async_gen_unwrap_value(o->agt_gen, retval);
if (retval == NULL) {
o->agt_gen->ag_running_async = 0;
@@ -2334,7 +2331,7 @@ async_gen_athrow_finalize(PyObject *op)
{
PyAsyncGenAThrow *o = (PyAsyncGenAThrow*)op;
if (o->agt_state == AWAITABLE_STATE_INIT) {
- PyObject *method = o->agt_args ? &_Py_ID(athrow) : &_Py_ID(aclose);
+ PyObject *method = o->agt_typ ? &_Py_ID(athrow) : &_Py_ID(aclose);
_PyErr_WarnUnawaitedAgenMethod(o->agt_gen, method);
}
}
@@ -2403,13 +2400,23 @@ PyTypeObject _PyAsyncGenAThrow_Type = {
static PyObject *
async_gen_athrow_new(PyAsyncGenObject *gen, PyObject *args)
{
+ PyObject *typ = NULL;
+ PyObject *tb = NULL;
+ PyObject *val = NULL;
+ if (args && !PyArg_UnpackTuple(args, "athrow", 1, 3, &typ, &val, &tb)) {
+ return NULL;
+ }
+
PyAsyncGenAThrow *o;
o = PyObject_GC_New(PyAsyncGenAThrow, &_PyAsyncGenAThrow_Type);
if (o == NULL) {
return NULL;
}
o->agt_gen = (PyAsyncGenObject*)Py_NewRef(gen);
- o->agt_args = Py_XNewRef(args);
+ o->agt_typ = Py_XNewRef(typ);
+ o->agt_tb = Py_XNewRef(tb);
+ o->agt_val = Py_XNewRef(val);
+
o->agt_state = AWAITABLE_STATE_INIT;
_PyObject_GC_TRACK((PyObject*)o);
return (PyObject*)o;
diff --git a/Objects/interpolationobject.c b/Objects/interpolationobject.c
index aaea3b8c067..a5d407a7b0e 100644
--- a/Objects/interpolationobject.c
+++ b/Objects/interpolationobject.c
@@ -137,6 +137,8 @@ interpolation_reduce(PyObject *op, PyObject *Py_UNUSED(dummy))
static PyMethodDef interpolation_methods[] = {
{"__reduce__", interpolation_reduce, METH_NOARGS,
PyDoc_STR("__reduce__() -> (cls, state)")},
+ {"__class_getitem__", Py_GenericAlias,
+ METH_O|METH_CLASS, PyDoc_STR("See PEP 585")},
{NULL, NULL},
};
diff --git a/Objects/longobject.c b/Objects/longobject.c
index 0b2dfa003fa..2b533312fee 100644
--- a/Objects/longobject.c
+++ b/Objects/longobject.c
@@ -971,16 +971,9 @@ _PyLong_FromByteArray(const unsigned char* bytes, size_t n,
++numsignificantbytes;
}
- /* How many Python int digits do we need? We have
- 8*numsignificantbytes bits, and each Python int digit has
- PyLong_SHIFT bits, so it's the ceiling of the quotient. */
- /* catch overflow before it happens */
- if (numsignificantbytes > (PY_SSIZE_T_MAX - PyLong_SHIFT) / 8) {
- PyErr_SetString(PyExc_OverflowError,
- "byte array too long to convert to int");
- return NULL;
- }
- ndigits = (numsignificantbytes * 8 + PyLong_SHIFT - 1) / PyLong_SHIFT;
+ /* avoid integer overflow */
+ ndigits = numsignificantbytes / PyLong_SHIFT * 8
+ + (numsignificantbytes % PyLong_SHIFT * 8 + PyLong_SHIFT - 1) / PyLong_SHIFT;
v = long_alloc(ndigits);
if (v == NULL)
return NULL;
diff --git a/Objects/moduleobject.c b/Objects/moduleobject.c
index f363ef173cb..ba86b41e945 100644
--- a/Objects/moduleobject.c
+++ b/Objects/moduleobject.c
@@ -12,7 +12,6 @@
#include "pycore_object.h" // _PyType_AllocNoTrack
#include "pycore_pyerrors.h" // _PyErr_FormatFromCause()
#include "pycore_pystate.h" // _PyInterpreterState_GET()
-#include "pycore_sysmodule.h" // _PySys_GetOptionalAttrString()
#include "pycore_unicodeobject.h" // _PyUnicode_EqualToASCIIString()
#include "osdefs.h" // MAXPATHLEN
@@ -1058,7 +1057,7 @@ _Py_module_getattro_impl(PyModuleObject *m, PyObject *name, int suppress)
int is_possibly_shadowing_stdlib = 0;
if (is_possibly_shadowing) {
PyObject *stdlib_modules;
- if (_PySys_GetOptionalAttrString("stdlib_module_names", &stdlib_modules) < 0) {
+ if (PySys_GetOptionalAttrString("stdlib_module_names", &stdlib_modules) < 0) {
goto done;
}
if (stdlib_modules && PyAnySet_Check(stdlib_modules)) {
diff --git a/Objects/object.c b/Objects/object.c
index 723b0427e69..9fe61ba7f15 100644
--- a/Objects/object.c
+++ b/Objects/object.c
@@ -1664,6 +1664,116 @@ _PyObject_GetMethod(PyObject *obj, PyObject *name, PyObject **method)
return 0;
}
+int
+_PyObject_GetMethodStackRef(PyThreadState *ts, PyObject *obj,
+ PyObject *name, _PyStackRef *method)
+{
+ int meth_found = 0;
+
+ assert(PyStackRef_IsNull(*method));
+
+ PyTypeObject *tp = Py_TYPE(obj);
+ if (!_PyType_IsReady(tp)) {
+ if (PyType_Ready(tp) < 0) {
+ return 0;
+ }
+ }
+
+ if (tp->tp_getattro != PyObject_GenericGetAttr || !PyUnicode_CheckExact(name)) {
+ PyObject *res = PyObject_GetAttr(obj, name);
+ if (res != NULL) {
+ *method = PyStackRef_FromPyObjectSteal(res);
+ }
+ return 0;
+ }
+
+ _PyType_LookupStackRefAndVersion(tp, name, method);
+ PyObject *descr = PyStackRef_AsPyObjectBorrow(*method);
+ descrgetfunc f = NULL;
+ if (descr != NULL) {
+ if (_PyType_HasFeature(Py_TYPE(descr), Py_TPFLAGS_METHOD_DESCRIPTOR)) {
+ meth_found = 1;
+ }
+ else {
+ f = Py_TYPE(descr)->tp_descr_get;
+ if (f != NULL && PyDescr_IsData(descr)) {
+ PyObject *value = f(descr, obj, (PyObject *)Py_TYPE(obj));
+ PyStackRef_CLEAR(*method);
+ if (value != NULL) {
+ *method = PyStackRef_FromPyObjectSteal(value);
+ }
+ return 0;
+ }
+ }
+ }
+ PyObject *dict, *attr;
+ if ((tp->tp_flags & Py_TPFLAGS_INLINE_VALUES) &&
+ _PyObject_TryGetInstanceAttribute(obj, name, &attr)) {
+ if (attr != NULL) {
+ PyStackRef_CLEAR(*method);
+ *method = PyStackRef_FromPyObjectSteal(attr);
+ return 0;
+ }
+ dict = NULL;
+ }
+ else if ((tp->tp_flags & Py_TPFLAGS_MANAGED_DICT)) {
+ dict = (PyObject *)_PyObject_GetManagedDict(obj);
+ }
+ else {
+ PyObject **dictptr = _PyObject_ComputedDictPointer(obj);
+ if (dictptr != NULL) {
+ dict = FT_ATOMIC_LOAD_PTR_ACQUIRE(*dictptr);
+ }
+ else {
+ dict = NULL;
+ }
+ }
+ if (dict != NULL) {
+ // TODO: use _Py_dict_lookup_threadsafe_stackref
+ Py_INCREF(dict);
+ PyObject *value;
+ if (PyDict_GetItemRef(dict, name, &value) != 0) {
+ // found or error
+ Py_DECREF(dict);
+ PyStackRef_CLEAR(*method);
+ if (value != NULL) {
+ *method = PyStackRef_FromPyObjectSteal(value);
+ }
+ return 0;
+ }
+ // not found
+ Py_DECREF(dict);
+ }
+
+ if (meth_found) {
+ assert(!PyStackRef_IsNull(*method));
+ return 1;
+ }
+
+ if (f != NULL) {
+ PyObject *value = f(descr, obj, (PyObject *)Py_TYPE(obj));
+ PyStackRef_CLEAR(*method);
+ if (value) {
+ *method = PyStackRef_FromPyObjectSteal(value);
+ }
+ return 0;
+ }
+
+ if (descr != NULL) {
+ assert(!PyStackRef_IsNull(*method));
+ return 0;
+ }
+
+ PyErr_Format(PyExc_AttributeError,
+ "'%.100s' object has no attribute '%U'",
+ tp->tp_name, name);
+
+ _PyObject_SetAttributeErrorContext(obj, name);
+ assert(PyStackRef_IsNull(*method));
+ return 0;
+}
+
+
/* Generic GetAttr functions - put these in your tp_[gs]etattro slot. */
PyObject *
@@ -1906,34 +2016,11 @@ PyObject_GenericSetAttr(PyObject *obj, PyObject *name, PyObject *value)
int
PyObject_GenericSetDict(PyObject *obj, PyObject *value, void *context)
{
- PyObject **dictptr = _PyObject_GetDictPtr(obj);
- if (dictptr == NULL) {
- if (_PyType_HasFeature(Py_TYPE(obj), Py_TPFLAGS_INLINE_VALUES) &&
- _PyObject_GetManagedDict(obj) == NULL
- ) {
- /* Was unable to convert to dict */
- PyErr_NoMemory();
- }
- else {
- PyErr_SetString(PyExc_AttributeError,
- "This object has no __dict__");
- }
- return -1;
- }
if (value == NULL) {
PyErr_SetString(PyExc_TypeError, "cannot delete __dict__");
return -1;
}
- if (!PyDict_Check(value)) {
- PyErr_Format(PyExc_TypeError,
- "__dict__ must be set to a dictionary, "
- "not a '%.200s'", Py_TYPE(value)->tp_name);
- return -1;
- }
- Py_BEGIN_CRITICAL_SECTION(obj);
- Py_XSETREF(*dictptr, Py_NewRef(value));
- Py_END_CRITICAL_SECTION();
- return 0;
+ return _PyObject_SetDict(obj, value);
}
diff --git a/Objects/obmalloc.c b/Objects/obmalloc.c
index b209808da90..d3931aab623 100644
--- a/Objects/obmalloc.c
+++ b/Objects/obmalloc.c
@@ -1238,7 +1238,7 @@ work_queue_first(struct llist_node *head)
}
static void
-process_queue(struct llist_node *head, struct _qsbr_thread_state *qsbr,
+process_queue(struct llist_node *head, _PyThreadStateImpl *tstate,
bool keep_empty, delayed_dealloc_cb cb, void *state)
{
while (!llist_empty(head)) {
@@ -1246,7 +1246,7 @@ process_queue(struct llist_node *head, struct _qsbr_thread_state *qsbr,
if (buf->rd_idx < buf->wr_idx) {
struct _mem_work_item *item = &buf->array[buf->rd_idx];
- if (!_Py_qsbr_poll(qsbr, item->qsbr_goal)) {
+ if (!_Py_qsbr_poll(tstate->qsbr, item->qsbr_goal)) {
return;
}
@@ -1270,11 +1270,11 @@ process_queue(struct llist_node *head, struct _qsbr_thread_state *qsbr,
static void
process_interp_queue(struct _Py_mem_interp_free_queue *queue,
- struct _qsbr_thread_state *qsbr, delayed_dealloc_cb cb,
+ _PyThreadStateImpl *tstate, delayed_dealloc_cb cb,
void *state)
{
assert(PyMutex_IsLocked(&queue->mutex));
- process_queue(&queue->head, qsbr, false, cb, state);
+ process_queue(&queue->head, tstate, false, cb, state);
int more_work = !llist_empty(&queue->head);
_Py_atomic_store_int_relaxed(&queue->has_work, more_work);
@@ -1282,7 +1282,7 @@ process_interp_queue(struct _Py_mem_interp_free_queue *queue,
static void
maybe_process_interp_queue(struct _Py_mem_interp_free_queue *queue,
- struct _qsbr_thread_state *qsbr, delayed_dealloc_cb cb,
+ _PyThreadStateImpl *tstate, delayed_dealloc_cb cb,
void *state)
{
if (!_Py_atomic_load_int_relaxed(&queue->has_work)) {
@@ -1291,7 +1291,7 @@ maybe_process_interp_queue(struct _Py_mem_interp_free_queue *queue,
// Try to acquire the lock, but don't block if it's already held.
if (_PyMutex_LockTimed(&queue->mutex, 0, 0) == PY_LOCK_ACQUIRED) {
- process_interp_queue(queue, qsbr, cb, state);
+ process_interp_queue(queue, tstate, cb, state);
PyMutex_Unlock(&queue->mutex);
}
}
@@ -1303,10 +1303,10 @@ _PyMem_ProcessDelayed(PyThreadState *tstate)
_PyThreadStateImpl *tstate_impl = (_PyThreadStateImpl *)tstate;
// Process thread-local work
- process_queue(&tstate_impl->mem_free_queue, tstate_impl->qsbr, true, NULL, NULL);
+ process_queue(&tstate_impl->mem_free_queue, tstate_impl, true, NULL, NULL);
// Process shared interpreter work
- maybe_process_interp_queue(&interp->mem_free_queue, tstate_impl->qsbr, NULL, NULL);
+ maybe_process_interp_queue(&interp->mem_free_queue, tstate_impl, NULL, NULL);
}
void
@@ -1316,10 +1316,10 @@ _PyMem_ProcessDelayedNoDealloc(PyThreadState *tstate, delayed_dealloc_cb cb, voi
_PyThreadStateImpl *tstate_impl = (_PyThreadStateImpl *)tstate;
// Process thread-local work
- process_queue(&tstate_impl->mem_free_queue, tstate_impl->qsbr, true, cb, state);
+ process_queue(&tstate_impl->mem_free_queue, tstate_impl, true, cb, state);
// Process shared interpreter work
- maybe_process_interp_queue(&interp->mem_free_queue, tstate_impl->qsbr, cb, state);
+ maybe_process_interp_queue(&interp->mem_free_queue, tstate_impl, cb, state);
}
void
@@ -1348,7 +1348,7 @@ _PyMem_AbandonDelayed(PyThreadState *tstate)
// Process the merged queue now (see gh-130794).
_PyThreadStateImpl *this_tstate = (_PyThreadStateImpl *)_PyThreadState_GET();
- process_interp_queue(&interp->mem_free_queue, this_tstate->qsbr, NULL, NULL);
+ process_interp_queue(&interp->mem_free_queue, this_tstate, NULL, NULL);
PyMutex_Unlock(&interp->mem_free_queue.mutex);
diff --git a/Objects/templateobject.c b/Objects/templateobject.c
index 7d356980b56..4293a311c44 100644
--- a/Objects/templateobject.c
+++ b/Objects/templateobject.c
@@ -23,6 +23,9 @@ templateiter_next(PyObject *op)
if (self->from_strings) {
item = PyIter_Next(self->stringsiter);
self->from_strings = 0;
+ if (item == NULL) {
+ return NULL;
+ }
if (PyUnicode_GET_LENGTH(item) == 0) {
Py_SETREF(item, PyIter_Next(self->interpolationsiter));
self->from_strings = 1;
@@ -444,6 +447,8 @@ template_reduce(PyObject *op, PyObject *Py_UNUSED(dummy))
static PyMethodDef template_methods[] = {
{"__reduce__", template_reduce, METH_NOARGS, NULL},
+ {"__class_getitem__", Py_GenericAlias,
+ METH_O|METH_CLASS, PyDoc_STR("See PEP 585")},
{NULL, NULL},
};
diff --git a/Objects/typeobject.c b/Objects/typeobject.c
index a7ab69fef4c..db923c16477 100644
--- a/Objects/typeobject.c
+++ b/Objects/typeobject.c
@@ -48,7 +48,7 @@ class object "PyObject *" "&PyBaseObject_Type"
& ((1 << MCACHE_SIZE_EXP) - 1))
#define MCACHE_HASH_METHOD(type, name) \
- MCACHE_HASH(FT_ATOMIC_LOAD_UINT32_RELAXED((type)->tp_version_tag), \
+ MCACHE_HASH(FT_ATOMIC_LOAD_UINT_RELAXED((type)->tp_version_tag), \
((Py_ssize_t)(name)) >> 3)
#define MCACHE_CACHEABLE_NAME(name) \
PyUnicode_CheckExact(name) && \
@@ -60,11 +60,19 @@ class object "PyObject *" "&PyBaseObject_Type"
#ifdef Py_GIL_DISABLED
-// There's a global lock for mutation of types. This avoids having to take
-// additional locks while doing various subclass processing which may result
-// in odd behaviors w.r.t. running with the GIL as the outer type lock could
-// be released and reacquired during a subclass update if there's contention
-// on the subclass lock.
+// There's a global lock for types that ensures that tp_version_tag and
+// _spec_cache are correctly updated if the type is modified. It also protects
+// tp_mro, tp_bases, and tp_base. This avoids having to take additional locks
+// while doing various subclass processing which may result in odd behaviors
+// w.r.t. running with the GIL as the outer type lock could be released and
+// reacquired during a subclass update if there's contention on the subclass
+// lock.
+//
+// Note that this lock does not protect updates of other type slots or the
+// tp_flags member. Instead, we either ensure those updates are done before
+// the type has been revealed to other threads or we only do those updates
+// while the stop-the-world mechanism is active. The slots and flags are read
+// in many places without holding a lock and without atomics.
#define TYPE_LOCK &PyInterpreterState_Get()->types.mutex
#define BEGIN_TYPE_LOCK() Py_BEGIN_CRITICAL_SECTION_MUT(TYPE_LOCK)
#define END_TYPE_LOCK() Py_END_CRITICAL_SECTION()
@@ -74,8 +82,100 @@ class object "PyObject *" "&PyBaseObject_Type"
#define END_TYPE_DICT_LOCK() Py_END_CRITICAL_SECTION2()
+#ifdef Py_DEBUG
+// Return true if the world is currently stopped.
+static bool
+types_world_is_stopped(void)
+{
+ PyInterpreterState *interp = _PyInterpreterState_GET();
+ return interp->stoptheworld.world_stopped;
+}
+#endif
+
+// Checks that the type has not yet been revealed (exposed) to other
+// threads. The _Py_TYPE_REVEALED_FLAG flag is set by type_new() and
+// PyType_FromMetaclass() to indicate that a newly initialized type might be
+// revealed. We only have ob_flags on 64-bit platforms.
+#if SIZEOF_VOID_P > 4
+#define TYPE_IS_REVEALED(tp) ((((PyObject *)(tp))->ob_flags & _Py_TYPE_REVEALED_FLAG) != 0)
+#else
+#define TYPE_IS_REVEALED(tp) 0
+#endif
+
+#ifdef Py_DEBUG
#define ASSERT_TYPE_LOCK_HELD() \
- _Py_CRITICAL_SECTION_ASSERT_MUTEX_LOCKED(TYPE_LOCK)
+ if (!types_world_is_stopped()) { _Py_CRITICAL_SECTION_ASSERT_MUTEX_LOCKED(TYPE_LOCK); }
+
+// Checks if we can safely update type slots or tp_flags.
+#define ASSERT_WORLD_STOPPED_OR_NEW_TYPE(tp) \
+ assert(!TYPE_IS_REVEALED(tp) || types_world_is_stopped())
+
+#define ASSERT_NEW_TYPE_OR_LOCKED(tp) \
+ if (TYPE_IS_REVEALED(tp)) { ASSERT_TYPE_LOCK_HELD(); }
+#else
+#define ASSERT_TYPE_LOCK_HELD()
+#define ASSERT_WORLD_STOPPED_OR_NEW_TYPE(tp)
+#define ASSERT_NEW_TYPE_OR_LOCKED(tp)
+#endif
+
+static void
+types_stop_world(void)
+{
+ assert(!types_world_is_stopped());
+ PyInterpreterState *interp = _PyInterpreterState_GET();
+ _PyEval_StopTheWorld(interp);
+ assert(types_world_is_stopped());
+}
+
+static void
+types_start_world(void)
+{
+ assert(types_world_is_stopped());
+ PyInterpreterState *interp = _PyInterpreterState_GET();
+ _PyEval_StartTheWorld(interp);
+ assert(!types_world_is_stopped());
+}
+
+// This is used to temporarily prevent the TYPE_LOCK from being suspended
+// when held by the topmost critical section.
+static void
+type_lock_prevent_release(void)
+{
+ PyThreadState *tstate = _PyThreadState_GET();
+ uintptr_t *tagptr = &tstate->critical_section;
+ PyCriticalSection *c = (PyCriticalSection *)(*tagptr & ~_Py_CRITICAL_SECTION_MASK);
+ if (!(*tagptr & _Py_CRITICAL_SECTION_TWO_MUTEXES)) {
+ assert(c->_cs_mutex == TYPE_LOCK);
+ c->_cs_mutex = NULL;
+ }
+ else {
+ PyCriticalSection2 *c2 = (PyCriticalSection2 *)c;
+ if (c->_cs_mutex == TYPE_LOCK) {
+ c->_cs_mutex = c2->_cs_mutex2;
+ c2->_cs_mutex2 = NULL;
+ } else {
+ assert(c2->_cs_mutex2 == TYPE_LOCK);
+ c2->_cs_mutex2 = NULL;
+ }
+ }
+}
+
+static void
+type_lock_allow_release(void)
+{
+ PyThreadState *tstate = _PyThreadState_GET();
+ uintptr_t *tagptr = &tstate->critical_section;
+ PyCriticalSection *c = (PyCriticalSection *)(*tagptr & ~_Py_CRITICAL_SECTION_MASK);
+ if (!(*tagptr & _Py_CRITICAL_SECTION_TWO_MUTEXES)) {
+ assert(c->_cs_mutex == NULL);
+ c->_cs_mutex = TYPE_LOCK;
+ }
+ else {
+ PyCriticalSection2 *c2 = (PyCriticalSection2 *)c;
+ assert(c2->_cs_mutex2 == NULL);
+ c2->_cs_mutex2 = TYPE_LOCK;
+ }
+}
#else
@@ -84,6 +184,12 @@ class object "PyObject *" "&PyBaseObject_Type"
#define BEGIN_TYPE_DICT_LOCK(d)
#define END_TYPE_DICT_LOCK()
#define ASSERT_TYPE_LOCK_HELD()
+#define TYPE_IS_REVEALED(tp) 0
+#define ASSERT_WORLD_STOPPED_OR_NEW_TYPE(tp)
+#define ASSERT_NEW_TYPE_OR_LOCKED(tp)
+#define types_world_is_stopped() 1
+#define types_stop_world()
+#define types_start_world()
#endif
@@ -106,6 +212,9 @@ slot_tp_new(PyTypeObject *type, PyObject *args, PyObject *kwds);
static int
slot_tp_setattro(PyObject *self, PyObject *name, PyObject *value);
+static PyObject *
+slot_tp_call(PyObject *self, PyObject *args, PyObject *kwds);
+
static inline PyTypeObject *
type_from_ref(PyObject *ref)
{
@@ -346,21 +455,14 @@ _PyStaticType_GetBuiltins(void)
static void
type_set_flags(PyTypeObject *tp, unsigned long flags)
{
- if (tp->tp_flags & Py_TPFLAGS_READY) {
- // It's possible the type object has been exposed to other threads
- // if it's been marked ready. In that case, the type lock should be
- // held when flags are modified.
- ASSERT_TYPE_LOCK_HELD();
- }
- // Since PyType_HasFeature() reads the flags without holding the type
- // lock, we need an atomic store here.
- FT_ATOMIC_STORE_ULONG_RELAXED(tp->tp_flags, flags);
+ ASSERT_WORLD_STOPPED_OR_NEW_TYPE(tp);
+ tp->tp_flags = flags;
}
static void
type_set_flags_with_mask(PyTypeObject *tp, unsigned long mask, unsigned long flags)
{
- ASSERT_TYPE_LOCK_HELD();
+ ASSERT_WORLD_STOPPED_OR_NEW_TYPE(tp);
unsigned long new_flags = (tp->tp_flags & ~mask) | flags;
type_set_flags(tp, new_flags);
}
@@ -498,6 +600,7 @@ static inline void
set_tp_bases(PyTypeObject *self, PyObject *bases, int initial)
{
assert(PyTuple_Check(bases));
+ ASSERT_NEW_TYPE_OR_LOCKED(self);
if (self->tp_flags & _Py_TPFLAGS_STATIC_BUILTIN) {
// XXX tp_bases can probably be statically allocated for each
// static builtin type.
@@ -542,7 +645,7 @@ clear_tp_bases(PyTypeObject *self, int final)
static inline PyObject *
lookup_tp_mro(PyTypeObject *self)
{
- ASSERT_TYPE_LOCK_HELD();
+ ASSERT_NEW_TYPE_OR_LOCKED(self);
return self->tp_mro;
}
@@ -1027,7 +1130,6 @@ PyType_Unwatch(int watcher_id, PyObject* obj)
static void
set_version_unlocked(PyTypeObject *tp, unsigned int version)
{
- ASSERT_TYPE_LOCK_HELD();
assert(version == 0 || (tp->tp_versions_used != _Py_ATTR_CACHE_UNUSED));
#ifndef Py_GIL_DISABLED
PyInterpreterState *interp = _PyInterpreterState_GET();
@@ -1075,7 +1177,12 @@ type_modified_unlocked(PyTypeObject *type)
We don't assign new version tags eagerly, but only as
needed.
*/
- ASSERT_TYPE_LOCK_HELD();
+ ASSERT_NEW_TYPE_OR_LOCKED(type);
+#ifdef Py_GIL_DISABLED
+ // This function is re-entrant and it's not safe to call it
+ // with the world stopped.
+ assert(!types_world_is_stopped());
+#endif
if (type->tp_version_tag == 0) {
return;
}
@@ -1106,6 +1213,8 @@ type_modified_unlocked(PyTypeObject *type)
while (bits) {
assert(i < TYPE_MAX_WATCHERS);
if (bits & 1) {
+ // Note that PyErr_FormatUnraisable is potentially re-entrant
+ // and the watcher callback might be too.
PyType_WatchCallback cb = interp->type_watchers[i];
if (cb && (cb(type) < 0)) {
PyErr_FormatUnraisable(
@@ -1245,14 +1354,6 @@ _PyType_LookupByVersion(unsigned int version)
#endif
}
-unsigned int
-_PyType_GetVersionForCurrentState(PyTypeObject *tp)
-{
- return tp->tp_version_tag;
-}
-
-
-
#define MAX_VERSIONS_PER_CLASS 1000
#if _Py_ATTR_CACHE_UNUSED < MAX_VERSIONS_PER_CLASS
#error "_Py_ATTR_CACHE_UNUSED must be bigger than max"
@@ -1586,10 +1687,13 @@ type_set_abstractmethods(PyObject *tp, PyObject *value, void *Py_UNUSED(closure)
BEGIN_TYPE_LOCK();
type_modified_unlocked(type);
+ types_stop_world();
if (abstract)
type_add_flags(type, Py_TPFLAGS_IS_ABSTRACT);
else
type_clear_flags(type, Py_TPFLAGS_IS_ABSTRACT);
+ types_start_world();
+ ASSERT_TYPE_LOCK_HELD();
END_TYPE_LOCK();
return 0;
@@ -1624,15 +1728,15 @@ type_get_mro(PyObject *tp, void *Py_UNUSED(closure))
return mro;
}
-static PyTypeObject *best_base(PyObject *);
-static int mro_internal(PyTypeObject *, PyObject **);
+static PyTypeObject *find_best_base(PyObject *);
+static int mro_internal(PyTypeObject *, int, PyObject **);
static int type_is_subtype_base_chain(PyTypeObject *, PyTypeObject *);
static int compatible_for_assignment(PyTypeObject *, PyTypeObject *, const char *);
static int add_subclass(PyTypeObject*, PyTypeObject*);
static int add_all_subclasses(PyTypeObject *type, PyObject *bases);
static void remove_subclass(PyTypeObject *, PyTypeObject *);
static void remove_all_subclasses(PyTypeObject *type, PyObject *bases);
-static void update_all_slots(PyTypeObject *);
+static int update_all_slots(PyTypeObject *);
typedef int (*update_callback)(PyTypeObject *, void *);
static int update_subclasses(PyTypeObject *type, PyObject *attr_name,
@@ -1640,13 +1744,15 @@ static int update_subclasses(PyTypeObject *type, PyObject *attr_name,
static int recurse_down_subclasses(PyTypeObject *type, PyObject *name,
update_callback callback, void *data);
+// Compute tp_mro for this type and all of its subclasses. This
+// is called after __bases__ is assigned to an existing type.
static int
mro_hierarchy(PyTypeObject *type, PyObject *temp)
{
ASSERT_TYPE_LOCK_HELD();
PyObject *old_mro;
- int res = mro_internal(type, &old_mro);
+ int res = mro_internal(type, 0, &old_mro);
if (res <= 0) {
/* error / reentrance */
return res;
@@ -1708,9 +1814,9 @@ mro_hierarchy(PyTypeObject *type, PyObject *temp)
}
static int
-type_set_bases_unlocked(PyTypeObject *type, PyObject *new_bases)
+type_check_new_bases(PyTypeObject *type, PyObject *new_bases, PyTypeObject **best_base)
{
- // Check arguments
+ // Check arguments, this is re-entrant due to the PySys_Audit() call
if (!check_set_special_type_attr(type, new_bases, "__bases__")) {
return -1;
}
@@ -1759,20 +1865,29 @@ type_set_bases_unlocked(PyTypeObject *type, PyObject *new_bases)
}
// Compute the new MRO and the new base class
- PyTypeObject *new_base = best_base(new_bases);
- if (new_base == NULL)
+ *best_base = find_best_base(new_bases);
+ if (*best_base == NULL)
return -1;
- if (!compatible_for_assignment(type->tp_base, new_base, "__bases__")) {
+ if (!compatible_for_assignment(type->tp_base, *best_base, "__bases__")) {
return -1;
}
+ return 0;
+}
+
+static int
+type_set_bases_unlocked(PyTypeObject *type, PyObject *new_bases, PyTypeObject *best_base)
+{
+ ASSERT_TYPE_LOCK_HELD();
+
+ Py_ssize_t n;
PyObject *old_bases = lookup_tp_bases(type);
assert(old_bases != NULL);
PyTypeObject *old_base = type->tp_base;
set_tp_bases(type, Py_NewRef(new_bases), 0);
- type->tp_base = (PyTypeObject *)Py_NewRef(new_base);
+ type->tp_base = (PyTypeObject *)Py_NewRef(best_base);
PyObject *temp = PyList_New(0);
if (temp == NULL) {
@@ -1796,7 +1911,11 @@ type_set_bases_unlocked(PyTypeObject *type, PyObject *new_bases)
add to all new_bases */
remove_all_subclasses(type, old_bases);
res = add_all_subclasses(type, new_bases);
- update_all_slots(type);
+ if (update_all_slots(type) < 0) {
+ goto bail;
+ }
+ /* Clear the VALID_VERSION flag of 'type' and all its subclasses. */
+ type_modified_unlocked(type);
}
else {
res = 0;
@@ -1827,13 +1946,13 @@ type_set_bases_unlocked(PyTypeObject *type, PyObject *new_bases)
bail:
if (lookup_tp_bases(type) == new_bases) {
- assert(type->tp_base == new_base);
+ assert(type->tp_base == best_base);
set_tp_bases(type, old_bases, 0);
type->tp_base = old_base;
Py_DECREF(new_bases);
- Py_DECREF(new_base);
+ Py_DECREF(best_base);
}
else {
Py_DECREF(old_bases);
@@ -1848,9 +1967,13 @@ static int
type_set_bases(PyObject *tp, PyObject *new_bases, void *Py_UNUSED(closure))
{
PyTypeObject *type = PyTypeObject_CAST(tp);
+ PyTypeObject *best_base;
int res;
BEGIN_TYPE_LOCK();
- res = type_set_bases_unlocked(type, new_bases);
+ res = type_check_new_bases(type, new_bases, &best_base);
+ if (res == 0) {
+ res = type_set_bases_unlocked(type, new_bases, best_base);
+ }
END_TYPE_LOCK();
return res;
}
@@ -2065,19 +2188,46 @@ type_set_annotations(PyObject *tp, PyObject *value, void *Py_UNUSED(closure))
return -1;
}
- int result;
PyObject *dict = PyType_GetDict(type);
- if (value != NULL) {
- /* set */
- result = PyDict_SetItem(dict, &_Py_ID(__annotations_cache__), value);
- } else {
- /* delete */
- result = PyDict_Pop(dict, &_Py_ID(__annotations_cache__), NULL);
- if (result == 0) {
- PyErr_SetString(PyExc_AttributeError, "__annotations__");
+ int result = PyDict_ContainsString(dict, "__annotations__");
+ if (result < 0) {
+ Py_DECREF(dict);
+ return -1;
+ }
+ if (result) {
+ // If __annotations__ is currently in the dict, we update it,
+ if (value != NULL) {
+ result = PyDict_SetItem(dict, &_Py_ID(__annotations__), value);
+ } else {
+ result = PyDict_Pop(dict, &_Py_ID(__annotations__), NULL);
+ if (result == 0) {
+ // Somebody else just deleted it?
+ PyErr_SetString(PyExc_AttributeError, "__annotations__");
+ Py_DECREF(dict);
+ return -1;
+ }
+ }
+ if (result < 0) {
Py_DECREF(dict);
return -1;
}
+ // Also clear __annotations_cache__ just in case.
+ result = PyDict_Pop(dict, &_Py_ID(__annotations_cache__), NULL);
+ }
+ else {
+ // Else we update only __annotations_cache__.
+ if (value != NULL) {
+ /* set */
+ result = PyDict_SetItem(dict, &_Py_ID(__annotations_cache__), value);
+ } else {
+ /* delete */
+ result = PyDict_Pop(dict, &_Py_ID(__annotations_cache__), NULL);
+ if (result == 0) {
+ PyErr_SetString(PyExc_AttributeError, "__annotations__");
+ Py_DECREF(dict);
+ return -1;
+ }
+ }
}
if (result < 0) {
Py_DECREF(dict);
@@ -3051,6 +3201,7 @@ static PyObject *
class_name(PyObject *cls)
{
PyObject *name;
+ // Note that this is potentially re-entrant.
if (PyObject_GetOptionalAttr(cls, &_Py_ID(__name__), &name) == 0) {
name = PyObject_Repr(cls);
}
@@ -3387,9 +3538,13 @@ mro_invoke(PyTypeObject *type)
const int custom = !Py_IS_TYPE(type, &PyType_Type);
if (custom) {
+ // Custom mro() method on metaclass. This is potentially re-entrant.
+ // We are called either from type_ready() or from type_set_bases().
mro_result = call_method_noarg((PyObject *)type, &_Py_ID(mro));
}
else {
+ // In this case, the mro() method on the type object is being used and
+ // we know that these calls are not re-entrant.
mro_result = mro_implementation_unlocked(type);
}
if (mro_result == NULL)
@@ -3437,7 +3592,7 @@ mro_invoke(PyTypeObject *type)
- Returns -1 in case of an error.
*/
static int
-mro_internal_unlocked(PyTypeObject *type, int initial, PyObject **p_old_mro)
+mro_internal(PyTypeObject *type, int initial, PyObject **p_old_mro)
{
ASSERT_TYPE_LOCK_HELD();
@@ -3485,21 +3640,11 @@ mro_internal_unlocked(PyTypeObject *type, int initial, PyObject **p_old_mro)
return 1;
}
-static int
-mro_internal(PyTypeObject *type, PyObject **p_old_mro)
-{
- int res;
- BEGIN_TYPE_LOCK();
- res = mro_internal_unlocked(type, 0, p_old_mro);
- END_TYPE_LOCK();
- return res;
-}
-
/* Calculate the best base amongst multiple base classes.
This is the first one that's on the path to the "solid base". */
static PyTypeObject *
-best_base(PyObject *bases)
+find_best_base(PyObject *bases)
{
Py_ssize_t i, n;
PyTypeObject *base, *winner, *candidate;
@@ -3581,13 +3726,167 @@ solid_base(PyTypeObject *type)
}
}
+#ifdef Py_GIL_DISABLED
+
+// The structures and functions below are used in the free-threaded build
+// to safely make updates to type slots, on type_setattro() for a slot
+// or when __bases__ is re-assigned. Since the slots are read without atomic
+// operations and without locking, we can only safely update them while the
+// world is stopped. However, with the world stopped, we are very limited on
+// which APIs can be safely used. For example, calling _PyObject_HashFast()
+// or _PyDict_GetItemRef_KnownHash() are not safe and can potentially cause
+// deadlocks. Hashing can be re-entrant and _PyDict_GetItemRef_KnownHash can
+// acquire a lock if the dictionary is not owned by the current thread, to
+// mark it shared on reading.
+//
+// We do the slot updates in two steps. First, with TYPE_LOCK held, we lookup
+// the descriptor for each slot, for each subclass. We build a queue of
+// updates to perform but don't actually update the type structures. After we
+// are finished the lookups, we stop-the-world and apply all of the updates.
+// The apply_slot_updates() code is simple and easy to confirm that it is
+// safe.
+
+typedef struct {
+ PyTypeObject *type;
+ void **slot_ptr;
+ void *slot_value;
+} slot_update_item_t;
+
+// The number of slot updates performed is based on the number of changed
+// slots and the number of subclasses. It's possible there are many updates
+// required if there are many subclasses (potentially an unbounded amount).
+// Usually the number of slot updates is small, most often zero or one. When
+// running the unit tests, we don't exceed 20. The chunk size is set to
+// handle the common case with a single chunk and to not require too many
+// chunk allocations if there are many subclasses.
+#define SLOT_UPDATE_CHUNK_SIZE 30
+
+typedef struct _slot_update {
+ struct _slot_update *prev;
+ Py_ssize_t n;
+ slot_update_item_t updates[SLOT_UPDATE_CHUNK_SIZE];
+} slot_update_chunk_t;
+
+// a queue of updates to be performed
+typedef struct {
+ slot_update_chunk_t *head;
+} slot_update_t;
+
+static slot_update_chunk_t *
+slot_update_new_chunk(void)
+{
+ slot_update_chunk_t *chunk = PyMem_Malloc(sizeof(slot_update_chunk_t));
+ if (chunk == NULL) {
+ PyErr_NoMemory();
+ return NULL;
+ }
+ chunk->prev = NULL;
+ chunk->n = 0;
+ return chunk;
+}
+
+static void
+slot_update_free_chunks(slot_update_t *updates)
+{
+ slot_update_chunk_t *chunk = updates->head;
+ while (chunk != NULL) {
+ slot_update_chunk_t *prev = chunk->prev;
+ PyMem_Free(chunk);
+ chunk = prev;
+ }
+}
+
+static int
+queue_slot_update(slot_update_t *updates, PyTypeObject *type,
+ void **slot_ptr, void *slot_value)
+{
+ if (*slot_ptr == slot_value) {
+ return 0; // slot pointer not actually changed, don't queue update
+ }
+ if (updates->head == NULL || updates->head->n == SLOT_UPDATE_CHUNK_SIZE) {
+ slot_update_chunk_t *chunk = slot_update_new_chunk();
+ if (chunk == NULL) {
+ return -1; // out-of-memory
+ }
+ chunk->prev = updates->head;
+ updates->head = chunk;
+ }
+ slot_update_item_t *item = &updates->head->updates[updates->head->n];
+ item->type = type;
+ item->slot_ptr = slot_ptr;
+ item->slot_value = slot_value;
+ updates->head->n++;
+ assert(updates->head->n <= SLOT_UPDATE_CHUNK_SIZE);
+ return 0;
+}
+
+static void
+apply_slot_updates(slot_update_t *updates)
+{
+ assert(types_world_is_stopped());
+ slot_update_chunk_t *chunk = updates->head;
+ while (chunk != NULL) {
+ for (Py_ssize_t i = 0; i < chunk->n; i++) {
+ slot_update_item_t *item = &chunk->updates[i];
+ *(item->slot_ptr) = item->slot_value;
+ if (item->slot_value == slot_tp_call) {
+ /* A generic __call__ is incompatible with vectorcall */
+ type_clear_flags(item->type, Py_TPFLAGS_HAVE_VECTORCALL);
+ }
+ }
+ chunk = chunk->prev;
+ }
+}
+
+static void
+apply_type_slot_updates(slot_update_t *updates)
+{
+ // This must be done carefully to avoid data races and deadlocks. We
+ // have just updated the type __dict__, while holding TYPE_LOCK. We have
+ // collected all of the required type slot updates into the 'updates'
+ // queue. Note that those updates can apply to multiple types since
+ // subclasses might also be affected by the dict change.
+ //
+ // We need to prevent other threads from writing to the dict before we can
+ // finish updating the slots. The actual stores to the slots are done
+ // with the world stopped. If we block on the stop-the-world mutex then
+ // we could release TYPE_LOCK mutex and potentially allow other threads
+ // to update the dict. That's because TYPE_LOCK was acquired using a
+ // critical section.
+ //
+ // The type_lock_prevent_release() call prevents the TYPE_LOCK mutex from
+ // being released even if we block on the STM mutex. We need to take care
+ // that we do not deadlock because of that. It is safe because we always
+ // acquire locks in the same order: first the TYPE_LOCK mutex and then the
+ // STM mutex.
+ type_lock_prevent_release();
+ types_stop_world();
+ apply_slot_updates(updates);
+ types_start_world();
+ type_lock_allow_release();
+}
+
+#else
+
+// dummy definition, this parameter is only NULL in the default build
+typedef void slot_update_t;
+
+#endif
+
+/// data passed to update_slots_callback()
+typedef struct {
+ slot_update_t *queued_updates;
+ pytype_slotdef **defs;
+} update_callback_data_t;
+
static void object_dealloc(PyObject *);
static PyObject *object_new(PyTypeObject *, PyObject *, PyObject *);
static int object_init(PyObject *, PyObject *, PyObject *);
-static int update_slot(PyTypeObject *, PyObject *);
+static int update_slot(PyTypeObject *, PyObject *, slot_update_t *update);
static void fixup_slot_dispatchers(PyTypeObject *);
static int type_new_set_names(PyTypeObject *);
static int type_new_init_subclass(PyTypeObject *, PyObject *);
+static bool has_slotdef(PyObject *);
/*
* Helpers for __dict__ descriptor. We don't want to expose the dicts
@@ -3649,10 +3948,39 @@ subtype_dict(PyObject *obj, void *context)
return PyObject_GenericGetDict(obj, context);
}
+int
+_PyObject_SetDict(PyObject *obj, PyObject *value)
+{
+ if (value != NULL && !PyDict_Check(value)) {
+ PyErr_Format(PyExc_TypeError,
+ "__dict__ must be set to a dictionary, "
+ "not a '%.200s'", Py_TYPE(value)->tp_name);
+ return -1;
+ }
+ if (Py_TYPE(obj)->tp_flags & Py_TPFLAGS_MANAGED_DICT) {
+ return _PyObject_SetManagedDict(obj, value);
+ }
+ PyObject **dictptr = _PyObject_ComputedDictPointer(obj);
+ if (dictptr == NULL) {
+ PyErr_SetString(PyExc_AttributeError,
+ "This object has no __dict__");
+ return -1;
+ }
+ Py_BEGIN_CRITICAL_SECTION(obj);
+ PyObject *olddict = *dictptr;
+ FT_ATOMIC_STORE_PTR_RELEASE(*dictptr, Py_NewRef(value));
+#ifdef Py_GIL_DISABLED
+ _PyObject_XDecRefDelayed(olddict);
+#else
+ Py_XDECREF(olddict);
+#endif
+ Py_END_CRITICAL_SECTION();
+ return 0;
+}
+
static int
subtype_setdict(PyObject *obj, PyObject *value, void *context)
{
- PyObject **dictptr;
PyTypeObject *base;
base = get_builtin_base_with_dict(Py_TYPE(obj));
@@ -3670,28 +3998,7 @@ subtype_setdict(PyObject *obj, PyObject *value, void *context)
}
return func(descr, obj, value);
}
- /* Almost like PyObject_GenericSetDict, but allow __dict__ to be deleted. */
- if (value != NULL && !PyDict_Check(value)) {
- PyErr_Format(PyExc_TypeError,
- "__dict__ must be set to a dictionary, "
- "not a '%.200s'", Py_TYPE(value)->tp_name);
- return -1;
- }
-
- if (Py_TYPE(obj)->tp_flags & Py_TPFLAGS_MANAGED_DICT) {
- return _PyObject_SetManagedDict(obj, value);
- }
- else {
- dictptr = _PyObject_ComputedDictPointer(obj);
- if (dictptr == NULL) {
- PyErr_SetString(PyExc_AttributeError,
- "This object has no __dict__");
- return -1;
- }
- Py_CLEAR(*dictptr);
- *dictptr = Py_XNewRef(value);
- }
- return 0;
+ return _PyObject_SetDict(obj, value);
}
static PyObject *
@@ -3785,7 +4092,7 @@ type_init(PyObject *cls, PyObject *args, PyObject *kwds)
unsigned long
PyType_GetFlags(PyTypeObject *type)
{
- return FT_ATOMIC_LOAD_ULONG_RELAXED(type->tp_flags);
+ return type->tp_flags;
}
@@ -4563,6 +4870,10 @@ type_new_impl(type_new_ctx *ctx)
}
assert(_PyType_CheckConsistency(type));
+#if defined(Py_GIL_DISABLED) && defined(Py_DEBUG) && SIZEOF_VOID_P > 4
+ // After this point, other threads can potentally use this type.
+ ((PyObject*)type)->ob_flags |= _Py_TYPE_REVEALED_FLAG;
+#endif
return (PyObject *)type;
@@ -4625,7 +4936,7 @@ type_new_get_bases(type_new_ctx *ctx, PyObject **type)
}
/* Calculate best base, and check that all bases are type objects */
- PyTypeObject *base = best_base(ctx->bases);
+ PyTypeObject *base = find_best_base(ctx->bases);
if (base == NULL) {
return -1;
}
@@ -5040,12 +5351,12 @@ PyType_FromMetaclass(
}
/* Calculate best base, and check that all bases are type objects */
- PyTypeObject *base = best_base(bases); // borrowed ref
+ PyTypeObject *base = find_best_base(bases); // borrowed ref
if (base == NULL) {
goto finally;
}
- // best_base should check Py_TPFLAGS_BASETYPE & raise a proper exception,
- // here we just check its work
+ // find_best_base() should check Py_TPFLAGS_BASETYPE & raise a proper
+ // exception, here we just check its work
assert(_PyType_HasFeature(base, Py_TPFLAGS_BASETYPE));
/* Calculate sizes */
@@ -5276,6 +5587,10 @@ PyType_FromMetaclass(
}
assert(_PyType_CheckConsistency(type));
+#if defined(Py_GIL_DISABLED) && defined(Py_DEBUG) && SIZEOF_VOID_P > 4
+ // After this point, other threads can potentally use this type.
+ ((PyObject*)type)->ob_flags |= _Py_TYPE_REVEALED_FLAG;
+#endif
finally:
if (PyErr_Occurred()) {
@@ -5571,8 +5886,6 @@ PyObject_GetItemData(PyObject *obj)
static PyObject *
find_name_in_mro(PyTypeObject *type, PyObject *name, int *error)
{
- ASSERT_TYPE_LOCK_HELD();
-
Py_hash_t hash = _PyObject_HashFast(name);
if (hash == -1) {
*error = -1;
@@ -5881,9 +6194,13 @@ _PyType_CacheGetItemForSpecialization(PyHeapTypeObject *ht, PyObject *descriptor
void
_PyType_SetFlags(PyTypeObject *self, unsigned long mask, unsigned long flags)
{
- BEGIN_TYPE_LOCK();
- type_set_flags_with_mask(self, mask, flags);
- END_TYPE_LOCK();
+ unsigned long new_flags = (self->tp_flags & ~mask) | flags;
+ if (new_flags != self->tp_flags) {
+ types_stop_world();
+ // can't use new_flags here since they could be out-of-date
+ self->tp_flags = (self->tp_flags & ~mask) | flags;
+ types_start_world();
+ }
}
int
@@ -5930,9 +6247,9 @@ set_flags_recursive(PyTypeObject *self, unsigned long mask, unsigned long flags)
void
_PyType_SetFlagsRecursive(PyTypeObject *self, unsigned long mask, unsigned long flags)
{
- BEGIN_TYPE_LOCK();
+ types_stop_world();
set_flags_recursive(self, mask, flags);
- END_TYPE_LOCK();
+ types_start_world();
}
/* This is similar to PyObject_GenericGetAttr(),
@@ -6046,6 +6363,8 @@ _Py_type_getattro(PyObject *tp, PyObject *name)
return _Py_type_getattro_impl(type, name, NULL);
}
+// Called by type_setattro(). Updates both the type dict and
+// the type versions.
static int
type_update_dict(PyTypeObject *type, PyDictObject *dict, PyObject *name,
PyObject *value, PyObject **old_value)
@@ -6075,10 +6394,30 @@ type_update_dict(PyTypeObject *type, PyDictObject *dict, PyObject *name,
return -1;
}
- if (is_dunder_name(name)) {
- return update_slot(type, name);
- }
+ return 0;
+}
+static int
+update_slot_after_setattr(PyTypeObject *type, PyObject *name)
+{
+#ifdef Py_GIL_DISABLED
+ // stack allocate one chunk since that's all we need
+ assert(SLOT_UPDATE_CHUNK_SIZE >= MAX_EQUIV);
+ slot_update_chunk_t chunk = {0};
+ slot_update_t queued_updates = {&chunk};
+
+ if (update_slot(type, name, &queued_updates) < 0) {
+ return -1;
+ }
+ if (queued_updates.head->n > 0) {
+ apply_type_slot_updates(&queued_updates);
+ ASSERT_TYPE_LOCK_HELD();
+ // should never allocate another chunk
+ assert(chunk.prev == NULL);
+ }
+#else
+ update_slot(type, name, NULL);
+#endif
return 0;
}
@@ -6136,7 +6475,9 @@ type_setattro(PyObject *self, PyObject *name, PyObject *value)
PyObject *dict = type->tp_dict;
if (dict == NULL) {
- // We don't just do PyType_Ready because we could already be readying
+ // This is an unlikely case. PyType_Ready has not yet been done and
+ // we need to initialize tp_dict. We don't just do PyType_Ready
+ // because we could already be readying.
BEGIN_TYPE_LOCK();
dict = type->tp_dict;
if (dict == NULL) {
@@ -6152,6 +6493,12 @@ type_setattro(PyObject *self, PyObject *name, PyObject *value)
BEGIN_TYPE_DICT_LOCK(dict);
res = type_update_dict(type, (PyDictObject *)dict, name, value, &old_value);
assert(_PyType_CheckConsistency(type));
+ if (res == 0) {
+ if (is_dunder_name(name) && has_slotdef(name)) {
+ // The name corresponds to a type slot.
+ res = update_slot_after_setattr(type, name);
+ }
+ }
END_TYPE_DICT_LOCK();
done:
@@ -7081,15 +7428,10 @@ object_set_class(PyObject *self, PyObject *value, void *closure)
return -1;
}
-#ifdef Py_GIL_DISABLED
- PyInterpreterState *interp = _PyInterpreterState_GET();
- _PyEval_StopTheWorld(interp);
-#endif
+ types_stop_world();
PyTypeObject *oldto = Py_TYPE(self);
int res = object_set_class_world_stopped(self, newto);
-#ifdef Py_GIL_DISABLED
- _PyEval_StartTheWorld(interp);
-#endif
+ types_start_world();
if (res == 0) {
if (oldto->tp_flags & Py_TPFLAGS_HEAPTYPE) {
Py_DECREF(oldto);
@@ -8497,7 +8839,7 @@ type_ready_mro(PyTypeObject *type, int initial)
}
/* Calculate method resolution order */
- if (mro_internal_unlocked(type, initial, NULL) < 0) {
+ if (mro_internal(type, initial, NULL) < 0) {
return -1;
}
PyObject *mro = lookup_tp_mro(type);
@@ -11020,12 +11362,21 @@ resolve_slotdups(PyTypeObject *type, PyObject *name)
{
/* XXX Maybe this could be optimized more -- but is it worth it? */
+#ifdef Py_GIL_DISABLED
+ pytype_slotdef *ptrs[MAX_EQUIV];
+ pytype_slotdef **pp = ptrs;
+ /* Collect all slotdefs that match name into ptrs. */
+ for (pytype_slotdef *p = slotdefs; p->name_strobj; p++) {
+ if (p->name_strobj == name)
+ *pp++ = p;
+ }
+ *pp = NULL;
+#else
/* pname and ptrs act as a little cache */
PyInterpreterState *interp = _PyInterpreterState_GET();
#define pname _Py_INTERP_CACHED_OBJECT(interp, type_slots_pname)
#define ptrs _Py_INTERP_CACHED_OBJECT(interp, type_slots_ptrs)
pytype_slotdef *p, **pp;
- void **res, **ptr;
if (pname != name) {
/* Collect all slotdefs that match name into ptrs. */
@@ -11037,10 +11388,12 @@ resolve_slotdups(PyTypeObject *type, PyObject *name)
}
*pp = NULL;
}
+#endif
/* Look in all slots of the type matching the name. If exactly one of these
has a filled-in slot, return a pointer to that slot.
Otherwise, return NULL. */
+ void **res, **ptr;
res = NULL;
for (pp = ptrs; *pp; pp++) {
ptr = slotptr(type, (*pp)->offset);
@@ -11050,11 +11403,25 @@ resolve_slotdups(PyTypeObject *type, PyObject *name)
return NULL;
res = ptr;
}
- return res;
+#ifndef Py_GIL_DISABLED
#undef pname
#undef ptrs
+#endif
+ return res;
}
+// Return true if "name" corresponds to at least one slot definition. This is
+// a more accurate but more expensive test compared to is_dunder_name().
+static bool
+has_slotdef(PyObject *name)
+{
+ for (pytype_slotdef *p = slotdefs; p->name_strobj; p++) {
+ if (p->name_strobj == name) {
+ return true;
+ }
+ }
+ return false;
+}
/* Common code for update_slots_callback() and fixup_slot_dispatchers().
*
@@ -11107,13 +11474,22 @@ resolve_slotdups(PyTypeObject *type, PyObject *name)
* There are some further special cases for specific slots, like supporting
* __hash__ = None for tp_hash and special code for tp_new.
*
- * When done, return a pointer to the next slotdef with a different offset,
- * because that's convenient for fixup_slot_dispatchers(). This function never
- * sets an exception: if an internal error happens (unlikely), it's ignored. */
-static pytype_slotdef *
-update_one_slot(PyTypeObject *type, pytype_slotdef *p)
+ * When done, next_p is set to the next slotdef with a different offset,
+ * because that's convenient for fixup_slot_dispatchers().
+ *
+ * If the queued_updates pointer is provided, the actual updates to the slot
+ * pointers are queued, rather than being immediately performed. That argument
+ * is only used for the free-threaded build since those updates need to be
+ * done while the world is stopped.
+ *
+ * This function will only return an error if the queued_updates argument is
+ * provided and allocating memory for the queue fails. Other exceptions that
+ * occur internally are ignored, such as when looking up descriptors. */
+static int
+update_one_slot(PyTypeObject *type, pytype_slotdef *p, pytype_slotdef **next_p,
+ slot_update_t *queued_updates)
{
- ASSERT_TYPE_LOCK_HELD();
+ ASSERT_NEW_TYPE_OR_LOCKED(type);
PyObject *descr;
PyWrapperDescrObject *d;
@@ -11136,7 +11512,10 @@ update_one_slot(PyTypeObject *type, pytype_slotdef *p)
do {
++p;
} while (p->offset == offset);
- return p;
+ if (next_p != NULL) {
+ *next_p = p;
+ }
+ return 0;
}
/* We may end up clearing live exceptions below, so make sure it's ours. */
assert(!PyErr_Occurred());
@@ -11219,16 +11598,41 @@ update_one_slot(PyTypeObject *type, pytype_slotdef *p)
}
if (p->function == slot_tp_call) {
/* A generic __call__ is incompatible with vectorcall */
- type_clear_flags(type, Py_TPFLAGS_HAVE_VECTORCALL);
+ if (queued_updates == NULL) {
+ type_clear_flags(type, Py_TPFLAGS_HAVE_VECTORCALL);
+ }
}
}
Py_DECREF(descr);
} while ((++p)->offset == offset);
- if (specific && !use_generic)
- *ptr = specific;
- else
- *ptr = generic;
- return p;
+
+ void *slot_value;
+ if (specific && !use_generic) {
+ slot_value = specific;
+ } else {
+ slot_value = generic;
+ }
+
+#ifdef Py_GIL_DISABLED
+ if (queued_updates != NULL) {
+ // queue the update to perform later, while world is stopped
+ if (queue_slot_update(queued_updates, type, ptr, slot_value) < 0) {
+ return -1;
+ }
+ } else {
+ // do the update to the type structure now
+ *ptr = slot_value;
+ }
+#else
+ // always do the update immediately
+ assert(queued_updates == NULL);
+ *ptr = slot_value;
+#endif
+
+ if (next_p != NULL) {
+ *next_p = p;
+ }
+ return 0;
}
/* In the type, update the slots whose slotdefs are gathered in the pp array.
@@ -11236,18 +11640,21 @@ update_one_slot(PyTypeObject *type, pytype_slotdef *p)
static int
update_slots_callback(PyTypeObject *type, void *data)
{
- ASSERT_TYPE_LOCK_HELD();
+ ASSERT_NEW_TYPE_OR_LOCKED(type);
- pytype_slotdef **pp = (pytype_slotdef **)data;
+ update_callback_data_t *update_data = (update_callback_data_t *)data;
+ pytype_slotdef **pp = update_data->defs;
for (; *pp; pp++) {
- update_one_slot(type, *pp);
+ if (update_one_slot(type, *pp, NULL, update_data->queued_updates) < 0) {
+ return -1;
+ }
}
return 0;
}
/* Update the slots after assignment to a class (type) attribute. */
static int
-update_slot(PyTypeObject *type, PyObject *name)
+update_slot(PyTypeObject *type, PyObject *name, slot_update_t *queued_updates)
{
pytype_slotdef *ptrs[MAX_EQUIV];
pytype_slotdef *p;
@@ -11278,8 +11685,12 @@ update_slot(PyTypeObject *type, PyObject *name)
}
if (ptrs[0] == NULL)
return 0; /* Not an attribute that affects any slots */
+
+ update_callback_data_t callback_data;
+ callback_data.defs = ptrs;
+ callback_data.queued_updates = queued_updates;
return update_subclasses(type, name,
- update_slots_callback, (void *)ptrs);
+ update_slots_callback, (void *)&callback_data);
}
/* Store the proper functions in the slot dispatches at class (type)
@@ -11288,35 +11699,56 @@ update_slot(PyTypeObject *type, PyObject *name)
static void
fixup_slot_dispatchers(PyTypeObject *type)
{
- // This lock isn't strictly necessary because the type has not been
- // exposed to anyone else yet, but update_ont_slot calls find_name_in_mro
- // where we'd like to assert that the type is locked.
- BEGIN_TYPE_LOCK();
-
assert(!PyErr_Occurred());
for (pytype_slotdef *p = slotdefs; p->name; ) {
- p = update_one_slot(type, p);
+ update_one_slot(type, p, &p, NULL);
}
-
- END_TYPE_LOCK();
}
-static void
+#ifdef Py_GIL_DISABLED
+
+// Called when __bases__ is re-assigned.
+static int
update_all_slots(PyTypeObject* type)
{
- pytype_slotdef *p;
-
- ASSERT_TYPE_LOCK_HELD();
+ // Note that update_slot() can fail due to out-of-memory when allocating
+ // the queue chunks to hold the updates. That's unlikely since the number
+ // of updates is normally small but we handle that case. update_slot()
+ // can fail internally for other reasons (a lookup fails) but those
+ // errors are suppressed.
+ slot_update_t queued_updates = {0};
+ for (pytype_slotdef *p = slotdefs; p->name; p++) {
+ if (update_slot(type, p->name_strobj, &queued_updates) < 0) {
+ if (queued_updates.head) {
+ slot_update_free_chunks(&queued_updates);
+ }
+ return -1;
+ }
+ }
+ if (queued_updates.head != NULL) {
+ apply_type_slot_updates(&queued_updates);
+ ASSERT_TYPE_LOCK_HELD();
+ slot_update_free_chunks(&queued_updates);
+ }
+ return 0;
+}
- /* Clear the VALID_VERSION flag of 'type' and all its subclasses. */
- type_modified_unlocked(type);
+#else
+// Called when __bases__ is re-assigned.
+static int
+update_all_slots(PyTypeObject* type)
+{
+ pytype_slotdef *p;
for (p = slotdefs; p->name; p++) {
- /* update_slot returns int but can't actually fail */
- update_slot(type, p->name_strobj);
+ /* update_slot returns int but can't actually fail in this case*/
+ update_slot(type, p->name_strobj, NULL);
}
+ return 0;
}
+#endif
+
PyObject *
_PyType_GetSlotWrapperNames(void)
@@ -11586,7 +12018,10 @@ PyType_Freeze(PyTypeObject *type)
}
BEGIN_TYPE_LOCK();
+ types_stop_world();
type_add_flags(type, Py_TPFLAGS_IMMUTABLETYPE);
+ types_start_world();
+ ASSERT_TYPE_LOCK_HELD();
type_modified_unlocked(type);
END_TYPE_LOCK();
diff --git a/Objects/typevarobject.c b/Objects/typevarobject.c
index 6c199a52aa0..cead6e69af5 100644
--- a/Objects/typevarobject.c
+++ b/Objects/typevarobject.c
@@ -192,7 +192,7 @@ constevaluator_call(PyObject *self, PyObject *args, PyObject *kwargs)
for (Py_ssize_t i = 0; i < PyTuple_GET_SIZE(value); i++) {
PyObject *item = PyTuple_GET_ITEM(value, i);
if (i > 0) {
- if (PyUnicodeWriter_WriteUTF8(writer, ", ", 2) < 0) {
+ if (PyUnicodeWriter_WriteASCII(writer, ", ", 2) < 0) {
PyUnicodeWriter_Discard(writer);
return NULL;
}
@@ -273,7 +273,7 @@ _Py_typing_type_repr(PyUnicodeWriter *writer, PyObject *p)
}
if (p == (PyObject *)&_PyNone_Type) {
- return PyUnicodeWriter_WriteUTF8(writer, "None", 4);
+ return PyUnicodeWriter_WriteASCII(writer, "None", 4);
}
if ((rc = PyObject_HasAttrWithError(p, &_Py_ID(__origin__))) > 0 &&
diff --git a/Objects/unicodeobject.c b/Objects/unicodeobject.c
index f3f0c9646a6..5c2308a0121 100644
--- a/Objects/unicodeobject.c
+++ b/Objects/unicodeobject.c
@@ -167,11 +167,7 @@ static inline void PyUnicode_SET_UTF8_LENGTH(PyObject *op, Py_ssize_t length)
#define _PyUnicode_HASH(op) \
(_PyASCIIObject_CAST(op)->hash)
-static inline Py_hash_t PyUnicode_HASH(PyObject *op)
-{
- assert(_PyUnicode_CHECK(op));
- return FT_ATOMIC_LOAD_SSIZE_RELAXED(_PyASCIIObject_CAST(op)->hash);
-}
+#define PyUnicode_HASH PyUnstable_Unicode_GET_CACHED_HASH
static inline void PyUnicode_SET_HASH(PyObject *op, Py_hash_t hash)
{
@@ -6596,13 +6592,15 @@ _PyUnicode_GetNameCAPI(void)
/* --- Unicode Escape Codec ----------------------------------------------- */
PyObject *
-_PyUnicode_DecodeUnicodeEscapeInternal(const char *s,
+_PyUnicode_DecodeUnicodeEscapeInternal2(const char *s,
Py_ssize_t size,
const char *errors,
Py_ssize_t *consumed,
- const char **first_invalid_escape)
+ int *first_invalid_escape_char,
+ const char **first_invalid_escape_ptr)
{
const char *starts = s;
+ const char *initial_starts = starts;
_PyUnicodeWriter writer;
const char *end;
PyObject *errorHandler = NULL;
@@ -6610,7 +6608,8 @@ _PyUnicode_DecodeUnicodeEscapeInternal(const char *s,
_PyUnicode_Name_CAPI *ucnhash_capi;
// so we can remember if we've seen an invalid escape char or not
- *first_invalid_escape = NULL;
+ *first_invalid_escape_char = -1;
+ *first_invalid_escape_ptr = NULL;
if (size == 0) {
if (consumed) {
@@ -6698,9 +6697,12 @@ _PyUnicode_DecodeUnicodeEscapeInternal(const char *s,
}
}
if (ch > 0377) {
- if (*first_invalid_escape == NULL) {
- *first_invalid_escape = s-3; /* Back up 3 chars, since we've
- already incremented s. */
+ if (*first_invalid_escape_char == -1) {
+ *first_invalid_escape_char = ch;
+ if (starts == initial_starts) {
+ /* Back up 3 chars, since we've already incremented s. */
+ *first_invalid_escape_ptr = s - 3;
+ }
}
}
WRITE_CHAR(ch);
@@ -6795,9 +6797,12 @@ _PyUnicode_DecodeUnicodeEscapeInternal(const char *s,
goto error;
default:
- if (*first_invalid_escape == NULL) {
- *first_invalid_escape = s-1; /* Back up one char, since we've
- already incremented s. */
+ if (*first_invalid_escape_char == -1) {
+ *first_invalid_escape_char = c;
+ if (starts == initial_starts) {
+ /* Back up one char, since we've already incremented s. */
+ *first_invalid_escape_ptr = s - 1;
+ }
}
WRITE_ASCII_CHAR('\\');
WRITE_CHAR(c);
@@ -6842,19 +6847,20 @@ _PyUnicode_DecodeUnicodeEscapeStateful(const char *s,
const char *errors,
Py_ssize_t *consumed)
{
- const char *first_invalid_escape;
- PyObject *result = _PyUnicode_DecodeUnicodeEscapeInternal(s, size, errors,
+ int first_invalid_escape_char;
+ const char *first_invalid_escape_ptr;
+ PyObject *result = _PyUnicode_DecodeUnicodeEscapeInternal2(s, size, errors,
consumed,
- &first_invalid_escape);
+ &first_invalid_escape_char,
+ &first_invalid_escape_ptr);
if (result == NULL)
return NULL;
- if (first_invalid_escape != NULL) {
- unsigned char c = *first_invalid_escape;
- if ('4' <= c && c <= '7') {
+ if (first_invalid_escape_char != -1) {
+ if (first_invalid_escape_char > 0xff) {
if (PyErr_WarnFormat(PyExc_DeprecationWarning, 1,
- "\"\\%.3s\" is an invalid octal escape sequence. "
+ "\"\\%o\" is an invalid octal escape sequence. "
"Such sequences will not work in the future. ",
- first_invalid_escape) < 0)
+ first_invalid_escape_char) < 0)
{
Py_DECREF(result);
return NULL;
@@ -6864,7 +6870,7 @@ _PyUnicode_DecodeUnicodeEscapeStateful(const char *s,
if (PyErr_WarnFormat(PyExc_DeprecationWarning, 1,
"\"\\%c\" is an invalid escape sequence. "
"Such sequences will not work in the future. ",
- c) < 0)
+ first_invalid_escape_char) < 0)
{
Py_DECREF(result);
return NULL;
@@ -13919,7 +13925,12 @@ _PyUnicodeWriter_WriteStr(_PyUnicodeWriter *writer, PyObject *str)
int
PyUnicodeWriter_WriteStr(PyUnicodeWriter *writer, PyObject *obj)
{
- if (Py_TYPE(obj) == &PyLong_Type) {
+ PyTypeObject *type = Py_TYPE(obj);
+ if (type == &PyUnicode_Type) {
+ return _PyUnicodeWriter_WriteStr((_PyUnicodeWriter*)writer, obj);
+ }
+
+ if (type == &PyLong_Type) {
return _PyLong_FormatWriter((_PyUnicodeWriter*)writer, obj, 10, 0);
}
@@ -14068,6 +14079,20 @@ _PyUnicodeWriter_WriteASCIIString(_PyUnicodeWriter *writer,
return 0;
}
+
+int
+PyUnicodeWriter_WriteASCII(PyUnicodeWriter *writer,
+ const char *str,
+ Py_ssize_t size)
+{
+ assert(writer != NULL);
+ _Py_AssertHoldsTstate();
+
+ _PyUnicodeWriter *priv_writer = (_PyUnicodeWriter*)writer;
+ return _PyUnicodeWriter_WriteASCIIString(priv_writer, str, size);
+}
+
+
int
PyUnicodeWriter_WriteUTF8(PyUnicodeWriter *writer,
const char *str,
diff --git a/Objects/unionobject.c b/Objects/unionobject.c
index 66435924b6c..00ca5b9bf80 100644
--- a/Objects/unionobject.c
+++ b/Objects/unionobject.c
@@ -290,7 +290,7 @@ union_repr(PyObject *self)
}
for (Py_ssize_t i = 0; i < len; i++) {
- if (i > 0 && PyUnicodeWriter_WriteUTF8(writer, " | ", 3) < 0) {
+ if (i > 0 && PyUnicodeWriter_WriteASCII(writer, " | ", 3) < 0) {
goto error;
}
PyObject *p = PyTuple_GET_ITEM(alias->args, i);
@@ -300,12 +300,12 @@ union_repr(PyObject *self)
}
#if 0
- PyUnicodeWriter_WriteUTF8(writer, "|args=", 6);
+ PyUnicodeWriter_WriteASCII(writer, "|args=", 6);
PyUnicodeWriter_WriteRepr(writer, alias->args);
- PyUnicodeWriter_WriteUTF8(writer, "|h=", 3);
+ PyUnicodeWriter_WriteASCII(writer, "|h=", 3);
PyUnicodeWriter_WriteRepr(writer, alias->hashable_args);
if (alias->unhashable_args) {
- PyUnicodeWriter_WriteUTF8(writer, "|u=", 3);
+ PyUnicodeWriter_WriteASCII(writer, "|u=", 3);
PyUnicodeWriter_WriteRepr(writer, alias->unhashable_args);
}
#endif