diff options
Diffstat (limited to 'Modules')
-rw-r--r-- | Modules/_hashopenssl.c | 318 | ||||
-rw-r--r-- | Modules/_heapqmodule.c | 54 | ||||
-rw-r--r-- | Modules/blake2module.c | 725 | ||||
-rw-r--r-- | Modules/clinic/_heapqmodule.c.h | 23 | ||||
-rw-r--r-- | Modules/hmacmodule.c | 4 | ||||
-rw-r--r-- | Modules/md5module.c | 2 | ||||
-rw-r--r-- | Modules/socketmodule.c | 24 |
7 files changed, 633 insertions, 517 deletions
diff --git a/Modules/_hashopenssl.c b/Modules/_hashopenssl.c index 50cf3c57491..ce9603d5db8 100644 --- a/Modules/_hashopenssl.c +++ b/Modules/_hashopenssl.c @@ -311,8 +311,9 @@ class _hashlib.HMAC "HMACobject *" "((_hashlibstate *)PyModule_GetState(module)) /* Set an exception of given type using the given OpenSSL error code. */ static void -set_ssl_exception_from_errcode(PyObject *exc, unsigned long errcode) +set_ssl_exception_from_errcode(PyObject *exc_type, unsigned long errcode) { + assert(exc_type != NULL); assert(errcode != 0); /* ERR_ERROR_STRING(3) ensures that the messages below are ASCII */ @@ -321,13 +322,29 @@ set_ssl_exception_from_errcode(PyObject *exc, unsigned long errcode) const char *reason = ERR_reason_error_string(errcode); if (lib && func) { - PyErr_Format(exc, "[%s: %s] %s", lib, func, reason); + PyErr_Format(exc_type, "[%s: %s] %s", lib, func, reason); } else if (lib) { - PyErr_Format(exc, "[%s] %s", lib, reason); + PyErr_Format(exc_type, "[%s] %s", lib, reason); } else { - PyErr_SetString(exc, reason); + PyErr_SetString(exc_type, reason); + } +} + +/* + * Get an appropriate exception type for the given OpenSSL error code. + * + * The exception type depends on the error code reason. + */ +static PyObject * +get_smart_ssl_exception_type(unsigned long errcode, PyObject *default_exc_type) +{ + switch (ERR_GET_REASON(errcode)) { + case ERR_R_MALLOC_FAILURE: + return PyExc_MemoryError; + default: + return default_exc_type; } } @@ -335,36 +352,83 @@ set_ssl_exception_from_errcode(PyObject *exc, unsigned long errcode) * Set an exception of given type. * * By default, the exception's message is constructed by using the last SSL - * error that occurred. If no error occurred, the 'fallback_format' is used - * to create a C-style formatted fallback message. + * error that occurred. If no error occurred, the 'fallback_message' is used + * to create an exception message. */ static void -raise_ssl_error(PyObject *exc, const char *fallback_format, ...) +raise_ssl_error(PyObject *exc_type, const char *fallback_message) +{ + assert(fallback_message != NULL); + unsigned long errcode = ERR_peek_last_error(); + if (errcode) { + ERR_clear_error(); + set_ssl_exception_from_errcode(exc_type, errcode); + } + else { + PyErr_SetString(exc_type, fallback_message); + } +} + +/* Same as raise_ssl_error() but with a C-style formatted message. */ +static void +raise_ssl_error_f(PyObject *exc_type, const char *fallback_format, ...) { assert(fallback_format != NULL); unsigned long errcode = ERR_peek_last_error(); if (errcode) { ERR_clear_error(); - set_ssl_exception_from_errcode(exc, errcode); + set_ssl_exception_from_errcode(exc_type, errcode); } else { va_list vargs; va_start(vargs, fallback_format); - PyErr_FormatV(exc, fallback_format, vargs); + PyErr_FormatV(exc_type, fallback_format, vargs); + va_end(vargs); + } +} + +/* Same as raise_ssl_error_f() with smart exception types. */ +static void +raise_smart_ssl_error_f(PyObject *exc_type, const char *fallback_format, ...) +{ + unsigned long errcode = ERR_peek_last_error(); + if (errcode) { + ERR_clear_error(); + exc_type = get_smart_ssl_exception_type(errcode, exc_type); + set_ssl_exception_from_errcode(exc_type, errcode); + } + else { + va_list vargs; + va_start(vargs, fallback_format); + PyErr_FormatV(exc_type, fallback_format, vargs); va_end(vargs); } } /* - * Set an exception with a generic default message after an error occurred. - * - * It can also be used without previous calls to SSL built-in functions, - * in which case a generic error message is provided. + * Raise a ValueError with a default message after an error occurred. + * It can also be used without previous calls to SSL built-in functions. */ static inline void -notify_ssl_error_occurred(void) +notify_ssl_error_occurred(const char *message) +{ + raise_ssl_error(PyExc_ValueError, message); +} + +/* Same as notify_ssl_error_occurred() for failed OpenSSL functions. */ +static inline void +notify_ssl_error_occurred_in(const char *funcname) { - raise_ssl_error(PyExc_ValueError, "no reason supplied"); + raise_ssl_error_f(PyExc_ValueError, + "error in OpenSSL function %s()", funcname); +} + +/* Same as notify_ssl_error_occurred_in() with smart exception types. */ +static inline void +notify_smart_ssl_error_occurred_in(const char *funcname) +{ + raise_smart_ssl_error_f(PyExc_ValueError, + "error in OpenSSL function %s()", funcname); } /* LCOV_EXCL_STOP */ @@ -408,16 +472,19 @@ get_asn1_utf8name_by_nid(int nid) // In OpenSSL 3.0 and later, OBJ_nid*() are thread-safe and may raise. assert(ERR_peek_last_error() != 0); if (ERR_GET_REASON(ERR_peek_last_error()) != OBJ_R_UNKNOWN_NID) { - notify_ssl_error_occurred(); - return NULL; + goto error; } // fallback to short name and unconditionally propagate errors name = OBJ_nid2sn(nid); if (name == NULL) { - raise_ssl_error(PyExc_ValueError, "cannot resolve NID %d", nid); + goto error; } } return name; + +error: + raise_ssl_error_f(PyExc_ValueError, "cannot resolve NID %d", nid); + return NULL; } /* @@ -449,8 +516,7 @@ static PY_EVP_MD * get_openssl_evp_md_by_utf8name(PyObject *module, const char *name, Py_hash_type py_ht) { - PY_EVP_MD *digest = NULL; - PY_EVP_MD *other_digest = NULL; + PY_EVP_MD *digest = NULL, *other_digest = NULL; _hashlibstate *state = get_hashlib_state(module); py_hashentry_t *entry = (py_hashentry_t *)_Py_hashtable_get( state->hashtable, (const void*)name @@ -484,15 +550,16 @@ get_openssl_evp_md_by_utf8name(PyObject *module, const char *name, #endif } break; + default: + goto invalid_hash_type; } // if another thread same thing at same time make sure we got same ptr assert(other_digest == NULL || other_digest == digest); - if (digest != NULL) { - if (other_digest == NULL) { - PY_EVP_MD_up_ref(digest); - } + if (digest != NULL && other_digest == NULL) { + PY_EVP_MD_up_ref(digest); } - } else { + } + else { // Fall back for looking up an unindexed OpenSSL specific name. switch (py_ht) { case Py_ht_evp: @@ -503,14 +570,21 @@ get_openssl_evp_md_by_utf8name(PyObject *module, const char *name, case Py_ht_evp_nosecurity: digest = PY_EVP_MD_fetch(name, "-fips"); break; + default: + goto invalid_hash_type; } } if (digest == NULL) { - raise_ssl_error(state->unsupported_digestmod_error, - "unsupported hash type %s", name); + raise_ssl_error_f(state->unsupported_digestmod_error, + "unsupported digest name: %s", name); return NULL; } return digest; + +invalid_hash_type: + assert(digest == NULL); + PyErr_Format(PyExc_SystemError, "unsupported hash type %d", py_ht); + return NULL; } /* @@ -556,6 +630,22 @@ get_openssl_evp_md(PyObject *module, PyObject *digestmod, Py_hash_type py_ht) return get_openssl_evp_md_by_utf8name(module, name, py_ht); } +// --- OpenSSL HASH wrappers -------------------------------------------------- + +/* Thin wrapper around EVP_MD_CTX_new() which sets an exception on failure. */ +static EVP_MD_CTX * +py_wrapper_EVP_MD_CTX_new(void) +{ + EVP_MD_CTX *ctx = EVP_MD_CTX_new(); + if (ctx == NULL) { + PyErr_NoMemory(); + return NULL; + } + return ctx; +} + +// --- HASH interface --------------------------------------------------------- + static HASHobject * new_hash_object(PyTypeObject *type) { @@ -565,10 +655,9 @@ new_hash_object(PyTypeObject *type) } HASHLIB_INIT_MUTEX(retval); - retval->ctx = EVP_MD_CTX_new(); + retval->ctx = py_wrapper_EVP_MD_CTX_new(); if (retval->ctx == NULL) { Py_DECREF(retval); - PyErr_NoMemory(); return NULL; } @@ -586,7 +675,7 @@ _hashlib_HASH_hash(HASHobject *self, const void *vp, Py_ssize_t len) else process = Py_SAFE_DOWNCAST(len, Py_ssize_t, unsigned int); if (!EVP_DigestUpdate(self->ctx, (const void*)cp, process)) { - notify_ssl_error_occurred(); + notify_ssl_error_occurred_in(Py_STRINGIFY(EVP_DigestUpdate)); return -1; } len -= process; @@ -614,7 +703,11 @@ _hashlib_HASH_copy_locked(HASHobject *self, EVP_MD_CTX *new_ctx_p) ENTER_HASHLIB(self); result = EVP_MD_CTX_copy(new_ctx_p, self->ctx); LEAVE_HASHLIB(self); - return result; + if (result == 0) { + notify_smart_ssl_error_occurred_in(Py_STRINGIFY(EVP_MD_CTX_copy)); + return -1; + } + return 0; } /* External methods for a hash object */ @@ -634,14 +727,36 @@ _hashlib_HASH_copy_impl(HASHobject *self) if ((newobj = new_hash_object(Py_TYPE(self))) == NULL) return NULL; - if (!_hashlib_HASH_copy_locked(self, newobj->ctx)) { + if (_hashlib_HASH_copy_locked(self, newobj->ctx) < 0) { Py_DECREF(newobj); - notify_ssl_error_occurred(); return NULL; } return (PyObject *)newobj; } +static Py_ssize_t +_hashlib_HASH_digest_compute(HASHobject *self, unsigned char *digest) +{ + EVP_MD_CTX *ctx = py_wrapper_EVP_MD_CTX_new(); + if (ctx == NULL) { + return -1; + } + if (_hashlib_HASH_copy_locked(self, ctx) < 0) { + goto error; + } + Py_ssize_t digest_size = EVP_MD_CTX_size(ctx); + if (!EVP_DigestFinal(ctx, digest, NULL)) { + notify_ssl_error_occurred_in(Py_STRINGIFY(EVP_DigestFinal)); + goto error; + } + EVP_MD_CTX_free(ctx); + return digest_size; + +error: + EVP_MD_CTX_free(ctx); + return -1; +} + /*[clinic input] _hashlib.HASH.digest @@ -653,32 +768,8 @@ _hashlib_HASH_digest_impl(HASHobject *self) /*[clinic end generated code: output=3fc6f9671d712850 input=d8d528d6e50af0de]*/ { unsigned char digest[EVP_MAX_MD_SIZE]; - EVP_MD_CTX *temp_ctx; - PyObject *retval; - unsigned int digest_size; - - temp_ctx = EVP_MD_CTX_new(); - if (temp_ctx == NULL) { - PyErr_NoMemory(); - return NULL; - } - - if (!_hashlib_HASH_copy_locked(self, temp_ctx)) { - goto error; - } - digest_size = EVP_MD_CTX_size(temp_ctx); - if (!EVP_DigestFinal(temp_ctx, digest, NULL)) { - goto error; - } - - retval = PyBytes_FromStringAndSize((const char *)digest, digest_size); - EVP_MD_CTX_free(temp_ctx); - return retval; - -error: - EVP_MD_CTX_free(temp_ctx); - notify_ssl_error_occurred(); - return NULL; + Py_ssize_t n = _hashlib_HASH_digest_compute(self, digest); + return n < 0 ? NULL : PyBytes_FromStringAndSize((const char *)digest, n); } /*[clinic input] @@ -692,32 +783,8 @@ _hashlib_HASH_hexdigest_impl(HASHobject *self) /*[clinic end generated code: output=1b8e60d9711e7f4d input=ae7553f78f8372d8]*/ { unsigned char digest[EVP_MAX_MD_SIZE]; - EVP_MD_CTX *temp_ctx; - unsigned int digest_size; - - temp_ctx = EVP_MD_CTX_new(); - if (temp_ctx == NULL) { - PyErr_NoMemory(); - return NULL; - } - - /* Get the raw (binary) digest value */ - if (!_hashlib_HASH_copy_locked(self, temp_ctx)) { - goto error; - } - digest_size = EVP_MD_CTX_size(temp_ctx); - if (!EVP_DigestFinal(temp_ctx, digest, NULL)) { - goto error; - } - - EVP_MD_CTX_free(temp_ctx); - - return _Py_strhex((const char *)digest, (Py_ssize_t)digest_size); - -error: - EVP_MD_CTX_free(temp_ctx); - notify_ssl_error_occurred(); - return NULL; + Py_ssize_t n = _hashlib_HASH_digest_compute(self, digest); + return n < 0 ? NULL : _Py_strhex((const char *)digest, n); } /*[clinic input] @@ -788,7 +855,7 @@ _hashlib_HASH_get_name(PyObject *op, void *Py_UNUSED(closure)) HASHobject *self = HASHobject_CAST(op); const EVP_MD *md = EVP_MD_CTX_md(self->ctx); if (md == NULL) { - notify_ssl_error_occurred(); + notify_ssl_error_occurred("missing EVP_MD for HASH context"); return NULL; } const char *name = get_hashlib_utf8name_by_evp_md(md); @@ -877,20 +944,20 @@ _hashlib_HASHXOF_digest_impl(HASHobject *self, Py_ssize_t length) return NULL; } - temp_ctx = EVP_MD_CTX_new(); + temp_ctx = py_wrapper_EVP_MD_CTX_new(); if (temp_ctx == NULL) { Py_DECREF(retval); - PyErr_NoMemory(); return NULL; } - if (!_hashlib_HASH_copy_locked(self, temp_ctx)) { + if (_hashlib_HASH_copy_locked(self, temp_ctx) < 0) { goto error; } if (!EVP_DigestFinalXOF(temp_ctx, (unsigned char*)PyBytes_AS_STRING(retval), length)) { + notify_ssl_error_occurred_in(Py_STRINGIFY(EVP_DigestFinalXOF)); goto error; } @@ -900,7 +967,6 @@ _hashlib_HASHXOF_digest_impl(HASHobject *self, Py_ssize_t length) error: Py_DECREF(retval); EVP_MD_CTX_free(temp_ctx); - notify_ssl_error_occurred(); return NULL; } @@ -926,18 +992,18 @@ _hashlib_HASHXOF_hexdigest_impl(HASHobject *self, Py_ssize_t length) return NULL; } - temp_ctx = EVP_MD_CTX_new(); + temp_ctx = py_wrapper_EVP_MD_CTX_new(); if (temp_ctx == NULL) { PyMem_Free(digest); - PyErr_NoMemory(); return NULL; } /* Get the raw (binary) digest value */ - if (!_hashlib_HASH_copy_locked(self, temp_ctx)) { + if (_hashlib_HASH_copy_locked(self, temp_ctx) < 0) { goto error; } if (!EVP_DigestFinalXOF(temp_ctx, digest, length)) { + notify_ssl_error_occurred_in(Py_STRINGIFY(EVP_DigestFinalXOF)); goto error; } @@ -950,7 +1016,6 @@ _hashlib_HASHXOF_hexdigest_impl(HASHobject *self, Py_ssize_t length) error: PyMem_Free(digest); EVP_MD_CTX_free(temp_ctx); - notify_ssl_error_occurred(); return NULL; } @@ -1054,7 +1119,7 @@ _hashlib_HASH(PyObject *module, const char *digestname, PyObject *data_obj, int result = EVP_DigestInit_ex(self->ctx, digest, NULL); if (!result) { - notify_ssl_error_occurred(); + notify_ssl_error_occurred_in(Py_STRINGIFY(EVP_DigestInit_ex)); Py_CLEAR(self); goto exit; } @@ -1463,7 +1528,7 @@ pbkdf2_hmac_impl(PyObject *module, const char *hash_name, if (!retval) { Py_CLEAR(key_obj); - notify_ssl_error_occurred(); + notify_ssl_error_occurred_in(Py_STRINGIFY(PKCS5_PBKDF2_HMAC)); goto end; } @@ -1539,8 +1604,8 @@ _hashlib_scrypt_impl(PyObject *module, Py_buffer *password, Py_buffer *salt, /* let OpenSSL validate the rest */ retval = EVP_PBE_scrypt(NULL, 0, NULL, 0, n, r, p, maxmem, NULL, 0); if (!retval) { - raise_ssl_error(PyExc_ValueError, - "Invalid parameter combination for n, r, p, maxmem."); + notify_ssl_error_occurred( + "Invalid parameter combination for n, r, p, maxmem."); return NULL; } @@ -1561,7 +1626,7 @@ _hashlib_scrypt_impl(PyObject *module, Py_buffer *password, Py_buffer *salt, if (!retval) { Py_CLEAR(key_obj); - notify_ssl_error_occurred(); + notify_ssl_error_occurred_in(Py_STRINGIFY(EVP_PBE_scrypt)); return NULL; } return key_obj; @@ -1618,7 +1683,7 @@ _hashlib_hmac_singleshot_impl(PyObject *module, Py_buffer *key, PY_EVP_MD_free(evp); if (result == NULL) { - notify_ssl_error_occurred(); + notify_ssl_error_occurred_in(Py_STRINGIFY(HMAC)); return NULL; } return PyBytes_FromStringAndSize((const char*)md, md_len); @@ -1627,6 +1692,18 @@ _hashlib_hmac_singleshot_impl(PyObject *module, Py_buffer *key, /* OpenSSL-based HMAC implementation */ +/* Thin wrapper around HMAC_CTX_new() which sets an exception on failure. */ +static HMAC_CTX * +py_openssl_wrapper_HMAC_CTX_new(void) +{ + HMAC_CTX *ctx = HMAC_CTX_new(); + if (ctx == NULL) { + PyErr_NoMemory(); + return NULL; + } + return ctx; +} + static int _hmac_update(HMACobject*, PyObject*); static const EVP_MD * @@ -1634,7 +1711,7 @@ _hashlib_hmac_get_md(HMACobject *self) { const EVP_MD *md = HMAC_CTX_get_md(self->ctx); if (md == NULL) { - raise_ssl_error(PyExc_ValueError, "missing EVP_MD for HMAC context"); + notify_ssl_error_occurred("missing EVP_MD for HMAC context"); } return md; } @@ -1676,17 +1753,16 @@ _hashlib_hmac_new_impl(PyObject *module, Py_buffer *key, PyObject *msg_obj, return NULL; } - ctx = HMAC_CTX_new(); + ctx = py_openssl_wrapper_HMAC_CTX_new(); if (ctx == NULL) { PY_EVP_MD_free(digest); - PyErr_NoMemory(); goto error; } r = HMAC_Init_ex(ctx, key->buf, (int)key->len, digest, NULL /* impl */); PY_EVP_MD_free(digest); if (r == 0) { - notify_ssl_error_occurred(); + notify_ssl_error_occurred_in(Py_STRINGIFY(HMAC_Init_ex)); goto error; } @@ -1721,7 +1797,11 @@ locked_HMAC_CTX_copy(HMAC_CTX *new_ctx_p, HMACobject *self) ENTER_HASHLIB(self); result = HMAC_CTX_copy(new_ctx_p, self->ctx); LEAVE_HASHLIB(self); - return result; + if (result == 0) { + notify_smart_ssl_error_occurred_in(Py_STRINGIFY(HMAC_CTX_copy)); + return -1; + } + return 0; } /* returning 0 means that an error occurred and an exception is set */ @@ -1735,7 +1815,7 @@ _hashlib_hmac_digest_size(HMACobject *self) unsigned int digest_size = EVP_MD_size(md); assert(digest_size <= EVP_MAX_MD_SIZE); if (digest_size == 0) { - raise_ssl_error(PyExc_ValueError, "invalid digest size"); + notify_ssl_error_occurred("invalid digest size"); } return digest_size; } @@ -1768,7 +1848,7 @@ _hmac_update(HMACobject *self, PyObject *obj) PyBuffer_Release(&view); if (r == 0) { - notify_ssl_error_occurred(); + notify_ssl_error_occurred_in(Py_STRINGIFY(HMAC_Update)); return 0; } return 1; @@ -1786,13 +1866,12 @@ _hashlib_HMAC_copy_impl(HMACobject *self) { HMACobject *retval; - HMAC_CTX *ctx = HMAC_CTX_new(); + HMAC_CTX *ctx = py_openssl_wrapper_HMAC_CTX_new(); if (ctx == NULL) { - return PyErr_NoMemory(); + return NULL; } - if (!locked_HMAC_CTX_copy(ctx, self)) { + if (locked_HMAC_CTX_copy(ctx, self) < 0) { HMAC_CTX_free(ctx); - notify_ssl_error_occurred(); return NULL; } @@ -1854,20 +1933,18 @@ _hashlib_HMAC_update_impl(HMACobject *self, PyObject *msg) static int _hmac_digest(HMACobject *self, unsigned char *buf, unsigned int len) { - HMAC_CTX *temp_ctx = HMAC_CTX_new(); + HMAC_CTX *temp_ctx = py_openssl_wrapper_HMAC_CTX_new(); if (temp_ctx == NULL) { - (void)PyErr_NoMemory(); return 0; } - if (!locked_HMAC_CTX_copy(temp_ctx, self)) { + if (locked_HMAC_CTX_copy(temp_ctx, self) < 0) { HMAC_CTX_free(temp_ctx); - notify_ssl_error_occurred(); return 0; } int r = HMAC_Final(temp_ctx, buf, &len); HMAC_CTX_free(temp_ctx); if (r == 0) { - notify_ssl_error_occurred(); + notify_ssl_error_occurred_in(Py_STRINGIFY(HMAC_Final)); return 0; } return 1; @@ -2088,16 +2165,13 @@ _hashlib_get_fips_mode_impl(PyObject *module) #else ERR_clear_error(); int result = FIPS_mode(); - if (result == 0) { + if (result == 0 && ERR_peek_last_error()) { // "If the library was built without support of the FIPS Object Module, // then the function will return 0 with an error code of // CRYPTO_R_FIPS_MODE_NOT_SUPPORTED (0x0f06d065)." // But 0 is also a valid result value. - unsigned long errcode = ERR_peek_last_error(); - if (errcode) { - notify_ssl_error_occurred(); - return -1; - } + notify_ssl_error_occurred_in(Py_STRINGIFY(FIPS_mode)); + return -1; } return result; #endif diff --git a/Modules/_heapqmodule.c b/Modules/_heapqmodule.c index 095866eec7d..7784cdcd9ff 100644 --- a/Modules/_heapqmodule.c +++ b/Modules/_heapqmodule.c @@ -11,7 +11,7 @@ annotated by François Pinard, and converted to C by Raymond Hettinger. #endif #include "Python.h" -#include "pycore_list.h" // _PyList_ITEMS() +#include "pycore_list.h" // _PyList_ITEMS(), _PyList_AppendTakeRef() #include "clinic/_heapqmodule.c.h" @@ -117,6 +117,7 @@ siftup(PyListObject *heap, Py_ssize_t pos) } /*[clinic input] +@critical_section heap _heapq.heappush heap: object(subclass_of='&PyList_Type') @@ -128,13 +129,22 @@ Push item onto heap, maintaining the heap invariant. static PyObject * _heapq_heappush_impl(PyObject *module, PyObject *heap, PyObject *item) -/*[clinic end generated code: output=912c094f47663935 input=7c69611f3698aceb]*/ +/*[clinic end generated code: output=912c094f47663935 input=f7a4f03ef8d52e67]*/ { - if (PyList_Append(heap, item)) + if (item == NULL) { + PyErr_BadInternalCall(); return NULL; + } + + // In a free-threaded build, the heap is locked at this point. + // Therefore, calling _PyList_AppendTakeRef() is safe and no overhead. + if (_PyList_AppendTakeRef((PyListObject *)heap, Py_NewRef(item))) { + return NULL; + } - if (siftdown((PyListObject *)heap, 0, PyList_GET_SIZE(heap)-1)) + if (siftdown((PyListObject *)heap, 0, PyList_GET_SIZE(heap)-1)) { return NULL; + } Py_RETURN_NONE; } @@ -171,6 +181,7 @@ heappop_internal(PyObject *heap, int siftup_func(PyListObject *, Py_ssize_t)) } /*[clinic input] +@critical_section heap _heapq.heappop heap: object(subclass_of='&PyList_Type') @@ -181,7 +192,7 @@ Pop the smallest item off the heap, maintaining the heap invariant. static PyObject * _heapq_heappop_impl(PyObject *module, PyObject *heap) -/*[clinic end generated code: output=96dfe82d37d9af76 input=91487987a583c856]*/ +/*[clinic end generated code: output=96dfe82d37d9af76 input=ed396461b153dd51]*/ { return heappop_internal(heap, siftup); } @@ -207,6 +218,7 @@ heapreplace_internal(PyObject *heap, PyObject *item, int siftup_func(PyListObjec /*[clinic input] +@critical_section heap _heapq.heapreplace heap: object(subclass_of='&PyList_Type') @@ -226,12 +238,13 @@ this routine unless written as part of a conditional replacement: static PyObject * _heapq_heapreplace_impl(PyObject *module, PyObject *heap, PyObject *item) -/*[clinic end generated code: output=82ea55be8fbe24b4 input=719202ac02ba10c8]*/ +/*[clinic end generated code: output=82ea55be8fbe24b4 input=9be1678b817ef1a9]*/ { return heapreplace_internal(heap, item, siftup); } /*[clinic input] +@critical_section heap _heapq.heappushpop heap: object(subclass_of='&PyList_Type') @@ -246,7 +259,7 @@ a separate call to heappop(). static PyObject * _heapq_heappushpop_impl(PyObject *module, PyObject *heap, PyObject *item) -/*[clinic end generated code: output=67231dc98ed5774f input=5dc701f1eb4a4aa7]*/ +/*[clinic end generated code: output=67231dc98ed5774f input=db05c81b1dd92c44]*/ { PyObject *returnitem; int cmp; @@ -371,6 +384,7 @@ heapify_internal(PyObject *heap, int siftup_func(PyListObject *, Py_ssize_t)) } /*[clinic input] +@critical_section heap _heapq.heapify heap: object(subclass_of='&PyList_Type') @@ -381,7 +395,7 @@ Transform list into a heap, in-place, in O(len(heap)) time. static PyObject * _heapq_heapify_impl(PyObject *module, PyObject *heap) -/*[clinic end generated code: output=e63a636fcf83d6d0 input=53bb7a2166febb73]*/ +/*[clinic end generated code: output=e63a636fcf83d6d0 input=aaaaa028b9b6af08]*/ { return heapify_internal(heap, siftup); } @@ -481,6 +495,7 @@ siftup_max(PyListObject *heap, Py_ssize_t pos) } /*[clinic input] +@critical_section heap _heapq.heappush_max heap: object(subclass_of='&PyList_Type') @@ -492,9 +507,16 @@ Push item onto max heap, maintaining the heap invariant. static PyObject * _heapq_heappush_max_impl(PyObject *module, PyObject *heap, PyObject *item) -/*[clinic end generated code: output=c869d5f9deb08277 input=4743d7db137b6e2b]*/ +/*[clinic end generated code: output=c869d5f9deb08277 input=c437e3d1ff8dcb70]*/ { - if (PyList_Append(heap, item)) { + if (item == NULL) { + PyErr_BadInternalCall(); + return NULL; + } + + // In a free-threaded build, the heap is locked at this point. + // Therefore, calling _PyList_AppendTakeRef() is safe and no overhead. + if (_PyList_AppendTakeRef((PyListObject *)heap, Py_NewRef(item))) { return NULL; } @@ -506,6 +528,7 @@ _heapq_heappush_max_impl(PyObject *module, PyObject *heap, PyObject *item) } /*[clinic input] +@critical_section heap _heapq.heappop_max heap: object(subclass_of='&PyList_Type') @@ -516,12 +539,13 @@ Maxheap variant of heappop. static PyObject * _heapq_heappop_max_impl(PyObject *module, PyObject *heap) -/*[clinic end generated code: output=2f051195ab404b77 input=e62b14016a5a26de]*/ +/*[clinic end generated code: output=2f051195ab404b77 input=5d70c997798aec64]*/ { return heappop_internal(heap, siftup_max); } /*[clinic input] +@critical_section heap _heapq.heapreplace_max heap: object(subclass_of='&PyList_Type') @@ -533,12 +557,13 @@ Maxheap variant of heapreplace. static PyObject * _heapq_heapreplace_max_impl(PyObject *module, PyObject *heap, PyObject *item) -/*[clinic end generated code: output=8770778b5a9cbe9b input=21a3d28d757c881c]*/ +/*[clinic end generated code: output=8770778b5a9cbe9b input=fe70175356e4a649]*/ { return heapreplace_internal(heap, item, siftup_max); } /*[clinic input] +@critical_section heap _heapq.heapify_max heap: object(subclass_of='&PyList_Type') @@ -549,12 +574,13 @@ Maxheap variant of heapify. static PyObject * _heapq_heapify_max_impl(PyObject *module, PyObject *heap) -/*[clinic end generated code: output=8401af3856529807 input=edda4255728c431e]*/ +/*[clinic end generated code: output=8401af3856529807 input=4eee63231e7d1573]*/ { return heapify_internal(heap, siftup_max); } /*[clinic input] +@critical_section heap _heapq.heappushpop_max heap: object(subclass_of='&PyList_Type') @@ -569,7 +595,7 @@ a separate call to heappop_max(). static PyObject * _heapq_heappushpop_max_impl(PyObject *module, PyObject *heap, PyObject *item) -/*[clinic end generated code: output=ff0019f0941aca0d input=525a843013cbd6c0]*/ +/*[clinic end generated code: output=ff0019f0941aca0d input=24d0defa6fd6df4a]*/ { PyObject *returnitem; int cmp; diff --git a/Modules/blake2module.c b/Modules/blake2module.c index 07aa89f573f..2ce8c0cd3d7 100644 --- a/Modules/blake2module.c +++ b/Modules/blake2module.c @@ -13,7 +13,6 @@ # define Py_BUILD_CORE_MODULE 1 #endif -#include "pyconfig.h" #include "Python.h" #include "hashlib.h" #include "pycore_strhex.h" // _Py_strhex() @@ -51,96 +50,19 @@ # undef HACL_CAN_COMPILE_SIMD256 #endif -// ECX -#define ECX_SSE3 (1 << 0) -#define ECX_SSSE3 (1 << 9) -#define ECX_SSE4_1 (1 << 19) -#define ECX_SSE4_2 (1 << 20) -#define ECX_AVX (1 << 28) - -// EBX -#define EBX_AVX2 (1 << 5) - -// EDX -#define EDX_SSE (1 << 25) -#define EDX_SSE2 (1 << 26) -#define EDX_CMOV (1 << 15) - -// zero-initialized by default -typedef struct { - bool sse, sse2, sse3, sse41, sse42, cmov, avx, avx2; - bool done; -} cpu_flags; - -void detect_cpu_features(cpu_flags *flags) { - if (!flags->done) { - int eax1 = 0, ebx1 = 0, ecx1 = 0, edx1 = 0; - int eax7 = 0, ebx7 = 0, ecx7 = 0, edx7 = 0; -#if defined(__x86_64__) && defined(__GNUC__) - __cpuid_count(1, 0, eax1, ebx1, ecx1, edx1); - __cpuid_count(7, 0, eax7, ebx7, ecx7, edx7); -#elif defined(_M_X64) - int info1[4] = { 0 }; - int info7[4] = { 0 }; - __cpuidex(info1, 1, 0); - __cpuidex(info7, 7, 0); - eax1 = info1[0]; - ebx1 = info1[1]; - ecx1 = info1[2]; - edx1 = info1[3]; - eax7 = info7[0]; - ebx7 = info7[1]; - ecx7 = info7[2]; - edx7 = info7[3]; -#endif - (void) eax1; (void) ebx1; (void) ecx1; (void) edx1; - (void) eax7; (void) ebx7; (void) ecx7; (void) edx7; - - - flags->avx = (ecx1 & ECX_AVX) != 0; - - flags->avx2 = (ebx7 & EBX_AVX2) != 0; - - flags->sse = (edx1 & EDX_SSE) != 0; - flags->sse2 = (edx1 & EDX_SSE2) != 0; - flags->cmov = (edx1 & EDX_CMOV) != 0; - - flags->sse3 = (ecx1 & ECX_SSE3) != 0; - /* ssse3 = (ecx1 & ECX_SSSE3) != 0; */ - flags->sse41 = (ecx1 & ECX_SSE4_1) != 0; - flags->sse42 = (ecx1 & ECX_SSE4_2) != 0; - - flags->done = true; - } -} - -#ifdef HACL_CAN_COMPILE_SIMD128 -static inline bool has_simd128(cpu_flags *flags) { - // For now this is Intel-only, could conceivably be #ifdef'd to something - // else. - return flags->sse && flags->sse2 && flags->sse3 && flags->sse41 && flags->sse42 && flags->cmov; -} -#endif - -#ifdef HACL_CAN_COMPILE_SIMD256 -static inline bool has_simd256(cpu_flags *flags) { - return flags->avx && flags->avx2; -} -#endif - // Small mismatch between the variable names Python defines as part of configure // at the ones HACL* expects to be set in order to enable those headers. #define HACL_CAN_COMPILE_VEC128 HACL_CAN_COMPILE_SIMD128 #define HACL_CAN_COMPILE_VEC256 HACL_CAN_COMPILE_SIMD256 -#include "_hacl/Hacl_Hash_Blake2b.h" #include "_hacl/Hacl_Hash_Blake2s.h" -#if HACL_CAN_COMPILE_SIMD256 -#include "_hacl/Hacl_Hash_Blake2b_Simd256.h" -#endif +#include "_hacl/Hacl_Hash_Blake2b.h" #if HACL_CAN_COMPILE_SIMD128 #include "_hacl/Hacl_Hash_Blake2s_Simd128.h" #endif +#if HACL_CAN_COMPILE_SIMD256 +#include "_hacl/Hacl_Hash_Blake2b_Simd256.h" +#endif // MODULE TYPE SLOTS @@ -148,16 +70,16 @@ static PyType_Spec blake2b_type_spec; static PyType_Spec blake2s_type_spec; PyDoc_STRVAR(blake2mod__doc__, -"_blake2b provides BLAKE2b for hashlib\n" -); + "_blake2 provides BLAKE2b and BLAKE2s for hashlib\n"); typedef struct { - PyTypeObject* blake2b_type; - PyTypeObject* blake2s_type; - cpu_flags flags; + PyTypeObject *blake2b_type; + PyTypeObject *blake2s_type; + bool can_run_simd128; + bool can_run_simd256; } Blake2State; -static inline Blake2State* +static inline Blake2State * blake2_get_state(PyObject *module) { void *state = _PyModule_GetState(module); @@ -166,7 +88,7 @@ blake2_get_state(PyObject *module) } #if defined(HACL_CAN_COMPILE_SIMD128) || defined(HACL_CAN_COMPILE_SIMD256) -static inline Blake2State* +static inline Blake2State * blake2_get_state_from_type(PyTypeObject *module) { void *state = _PyType_GetModuleState(module); @@ -203,31 +125,107 @@ _blake2_free(void *module) (void)_blake2_clear((PyObject *)module); } -#define ADD_INT(d, name, value) do { \ - PyObject *x = PyLong_FromLong(value); \ - if (!x) \ - return -1; \ - if (PyDict_SetItemString(d, name, x) < 0) { \ - Py_DECREF(x); \ - return -1; \ - } \ - Py_DECREF(x); \ -} while(0) - -#define ADD_INT_CONST(NAME, VALUE) do { \ - if (PyModule_AddIntConstant(m, NAME, VALUE) < 0) { \ - return -1; \ - } \ -} while (0) +static void +blake2module_init_cpu_features(Blake2State *state) +{ + /* This must be kept in sync with hmacmodule_init_cpu_features() + * in hmacmodule.c */ + int eax1 = 0, ebx1 = 0, ecx1 = 0, edx1 = 0; + int eax7 = 0, ebx7 = 0, ecx7 = 0, edx7 = 0; +#if defined(__x86_64__) && defined(__GNUC__) + __cpuid_count(1, 0, eax1, ebx1, ecx1, edx1); + __cpuid_count(7, 0, eax7, ebx7, ecx7, edx7); +#elif defined(_M_X64) + int info1[4] = {0}; + __cpuidex(info1, 1, 0); + eax1 = info1[0], ebx1 = info1[1], ecx1 = info1[2], edx1 = info1[3]; + + int info7[4] = {0}; + __cpuidex(info7, 7, 0); + eax7 = info7[0], ebx7 = info7[1], ecx7 = info7[2], edx7 = info7[3]; +#endif + // fmt: off + (void)eax1; (void)ebx1; (void)ecx1; (void)edx1; + (void)eax7; (void)ebx7; (void)ecx7; (void)edx7; + // fmt: on + +#define EBX_AVX2 (1 << 5) +#define ECX_SSE3 (1 << 0) +#define ECX_SSSE3 (1 << 9) +#define ECX_SSE4_1 (1 << 19) +#define ECX_SSE4_2 (1 << 20) +#define ECX_AVX (1 << 28) +#define EDX_SSE (1 << 25) +#define EDX_SSE2 (1 << 26) +#define EDX_CMOV (1 << 15) + + bool avx = (ecx1 & ECX_AVX) != 0; + bool avx2 = (ebx7 & EBX_AVX2) != 0; + + bool sse = (edx1 & EDX_SSE) != 0; + bool sse2 = (edx1 & EDX_SSE2) != 0; + bool cmov = (edx1 & EDX_CMOV) != 0; + + bool sse3 = (ecx1 & ECX_SSE3) != 0; + bool sse41 = (ecx1 & ECX_SSE4_1) != 0; + bool sse42 = (ecx1 & ECX_SSE4_2) != 0; + +#undef EDX_CMOV +#undef EDX_SSE2 +#undef EDX_SSE +#undef ECX_AVX +#undef ECX_SSE4_2 +#undef ECX_SSE4_1 +#undef ECX_SSSE3 +#undef ECX_SSE3 +#undef EBX_AVX2 + +#if HACL_CAN_COMPILE_SIMD128 + // TODO(picnixz): use py_cpuid_features (gh-125022) to improve detection + state->can_run_simd128 = sse && sse2 && sse3 && sse41 && sse42 && cmov; +#else + // fmt: off + (void)sse; (void)sse2; (void)sse3; (void)sse41; (void)sse42; (void)cmov; + // fmt: on + state->can_run_simd128 = false; +#endif + +#if HACL_CAN_COMPILE_SIMD256 + // TODO(picnixz): use py_cpuid_features (gh-125022) to improve detection + state->can_run_simd256 = state->can_run_simd128 && avx && avx2; +#else + // fmt: off + (void)avx; (void)avx2; + // fmt: on + state->can_run_simd256 = false; +#endif +} static int blake2_exec(PyObject *m) { - Blake2State* st = blake2_get_state(m); - - // This is called at module initialization-time, and so appears to be as - // good a place as any to probe the CPU flags. - detect_cpu_features(&st->flags); + Blake2State *st = blake2_get_state(m); + blake2module_init_cpu_features(st); + +#define ADD_INT(DICT, NAME, VALUE) \ + do { \ + PyObject *x = PyLong_FromLong(VALUE); \ + if (x == NULL) { \ + return -1; \ + } \ + int rc = PyDict_SetItemString(DICT, NAME, x); \ + Py_DECREF(x); \ + if (rc < 0) { \ + return -1; \ + } \ + } while(0) + +#define ADD_INT_CONST(NAME, VALUE) \ + do { \ + if (PyModule_AddIntConstant(m, NAME, VALUE) < 0) { \ + return -1; \ + } \ + } while (0) ADD_INT_CONST("_GIL_MINSIZE", HASHLIB_GIL_MINSIZE); @@ -237,7 +235,6 @@ blake2_exec(PyObject *m) if (st->blake2b_type == NULL) { return -1; } - /* BLAKE2b */ if (PyModule_AddType(m, st->blake2b_type) < 0) { return -1; } @@ -257,9 +254,9 @@ blake2_exec(PyObject *m) st->blake2s_type = (PyTypeObject *)PyType_FromModuleAndSpec( m, &blake2s_type_spec, NULL); - if (NULL == st->blake2s_type) + if (st->blake2s_type == NULL) { return -1; - + } if (PyModule_AddType(m, st->blake2s_type) < 0) { return -1; } @@ -275,12 +272,11 @@ blake2_exec(PyObject *m) ADD_INT_CONST("BLAKE2S_MAX_KEY_SIZE", HACL_HASH_BLAKE2S_KEY_BYTES); ADD_INT_CONST("BLAKE2S_MAX_DIGEST_SIZE", HACL_HASH_BLAKE2S_OUT_BYTES); +#undef ADD_INT_CONST +#undef ADD_INT return 0; } -#undef ADD_INT -#undef ADD_INT_CONST - static PyModuleDef_Slot _blake2_slots[] = { {Py_mod_exec, blake2_exec}, {Py_mod_multiple_interpreters, Py_MOD_PER_INTERPRETER_GIL_SUPPORTED}, @@ -320,35 +316,39 @@ PyInit__blake2(void) // set. typedef enum { Blake2s, Blake2b, Blake2s_128, Blake2b_256 } blake2_impl; -static inline bool is_blake2b(blake2_impl impl) { - return impl == Blake2b || impl == Blake2b_256; +static inline bool +is_blake2b(blake2_impl impl) +{ + return impl == Blake2b || impl == Blake2b_256; } -static inline bool is_blake2s(blake2_impl impl) { - return !is_blake2b(impl); +static inline bool +is_blake2s(blake2_impl impl) +{ + return impl == Blake2s || impl == Blake2s_128; } -static inline blake2_impl type_to_impl(PyTypeObject *type) { +static inline blake2_impl +type_to_impl(PyTypeObject *type) +{ #if defined(HACL_CAN_COMPILE_SIMD128) || defined(HACL_CAN_COMPILE_SIMD256) - Blake2State* st = blake2_get_state_from_type(type); + Blake2State *st = blake2_get_state_from_type(type); #endif if (!strcmp(type->tp_name, blake2b_type_spec.name)) { -#ifdef HACL_CAN_COMPILE_SIMD256 - if (has_simd256(&st->flags)) - return Blake2b_256; - else -#endif +#if HACL_CAN_COMPILE_SIMD256 + return st->can_run_simd256 ? Blake2b_256 : Blake2b; +#else return Blake2b; - } else if (!strcmp(type->tp_name, blake2s_type_spec.name)) { -#ifdef HACL_CAN_COMPILE_SIMD128 - if (has_simd128(&st->flags)) - return Blake2s_128; - else #endif + } + else if (!strcmp(type->tp_name, blake2s_type_spec.name)) { +#if HACL_CAN_COMPILE_SIMD128 + return st->can_run_simd128 ? Blake2s_128 : Blake2s; +#else return Blake2s; - } else { - Py_UNREACHABLE(); +#endif } + Py_UNREACHABLE(); } typedef struct { @@ -356,10 +356,10 @@ typedef struct { union { Hacl_Hash_Blake2s_state_t *blake2s_state; Hacl_Hash_Blake2b_state_t *blake2b_state; -#ifdef HACL_CAN_COMPILE_SIMD128 +#if HACL_CAN_COMPILE_SIMD128 Hacl_Hash_Blake2s_Simd128_state_t *blake2s_128_state; #endif -#ifdef HACL_CAN_COMPILE_SIMD256 +#if HACL_CAN_COMPILE_SIMD256 Hacl_Hash_Blake2b_Simd256_state_t *blake2b_256_state; #endif }; @@ -425,39 +425,124 @@ static void update(Blake2Object *self, uint8_t *buf, Py_ssize_t len) { switch (self->impl) { - // These need to be ifdef'd out otherwise it's an unresolved symbol at - // link-time. -#ifdef HACL_CAN_COMPILE_SIMD256 + // blake2b_256_state and blake2s_128_state must be if'd since + // otherwise this results in an unresolved symbol at link-time. +#if HACL_CAN_COMPILE_SIMD256 case Blake2b_256: - HACL_UPDATE(Hacl_Hash_Blake2b_Simd256_update,self->blake2b_256_state, buf, len); + HACL_UPDATE(Hacl_Hash_Blake2b_Simd256_update, + self->blake2b_256_state, buf, len); return; #endif -#ifdef HACL_CAN_COMPILE_SIMD128 +#if HACL_CAN_COMPILE_SIMD128 case Blake2s_128: - HACL_UPDATE(Hacl_Hash_Blake2s_Simd128_update,self->blake2s_128_state, buf, len); + HACL_UPDATE(Hacl_Hash_Blake2s_Simd128_update, + self->blake2s_128_state, buf, len); return; #endif case Blake2b: - HACL_UPDATE(Hacl_Hash_Blake2b_update,self->blake2b_state, buf, len); + HACL_UPDATE(Hacl_Hash_Blake2b_update, + self->blake2b_state, buf, len); return; case Blake2s: - HACL_UPDATE(Hacl_Hash_Blake2s_update,self->blake2s_state, buf, len); + HACL_UPDATE(Hacl_Hash_Blake2s_update, + self->blake2s_state, buf, len); return; default: Py_UNREACHABLE(); } } -static PyObject * -py_blake2b_or_s_new(PyTypeObject *type, PyObject *data, int digest_size, - Py_buffer *key, Py_buffer *salt, Py_buffer *person, - int fanout, int depth, unsigned long leaf_size, - unsigned long long node_offset, int node_depth, - int inner_size, int last_node, int usedforsecurity) +#define BLAKE2_IMPLNAME(SELF) \ + (is_blake2b((SELF)->impl) ? "blake2b" : "blake2s") +#define GET_BLAKE2_CONST(SELF, NAME) \ + (is_blake2b((SELF)->impl) \ + ? HACL_HASH_BLAKE2B_ ## NAME \ + : HACL_HASH_BLAKE2S_ ## NAME) + +#define MAX_OUT_BYTES(SELF) GET_BLAKE2_CONST(SELF, OUT_BYTES) +#define MAX_SALT_LENGTH(SELF) GET_BLAKE2_CONST(SELF, SALT_BYTES) +#define MAX_KEY_BYTES(SELF) GET_BLAKE2_CONST(SELF, KEY_BYTES) +#define MAX_PERSONAL_BYTES(SELF) GET_BLAKE2_CONST(SELF, PERSONAL_BYTES) +static int +py_blake2_validate_params(Blake2Object *self, + int digest_size, + Py_buffer *key, Py_buffer *salt, Py_buffer *person, + int fanout, int depth, unsigned long leaf_size, + unsigned long long node_offset, int node_depth, + int inner_size) +{ + /* Validate digest size. */ + if (digest_size <= 0 || (unsigned int)digest_size > MAX_OUT_BYTES(self)) { + PyErr_Format( + PyExc_ValueError, + "digest_size for %s must be between 1 and %d bytes, got %d", + BLAKE2_IMPLNAME(self), MAX_OUT_BYTES(self), digest_size + ); + goto error; + } + +#define CHECK_LENGTH(NAME, VALUE, MAX) \ + do { \ + if ((size_t)(VALUE) > (size_t)(MAX)) { \ + PyErr_Format(PyExc_ValueError, \ + "maximum %s length is %zu bytes, got %zd", \ + (NAME), (size_t)(MAX), (Py_ssize_t)(VALUE)); \ + goto error; \ + } \ + } while (0) + /* Validate key parameter. */ + if (key->obj && key->len) { + CHECK_LENGTH("key", key->len, MAX_KEY_BYTES(self)); + } + /* Validate salt parameter. */ + if (salt->obj && salt->len) { + CHECK_LENGTH("salt", salt->len, MAX_SALT_LENGTH(self)); + } + /* Validate personalization parameter. */ + if (person->obj && person->len) { + CHECK_LENGTH("person", person->len, MAX_PERSONAL_BYTES(self)); + } +#undef CHECK_LENGTH +#define CHECK_TREE(NAME, VALUE, MIN, MAX) \ + do { \ + if ((VALUE) < (MIN) || (size_t)(VALUE) > (size_t)(MAX)) { \ + PyErr_Format(PyExc_ValueError, \ + "'%s' must be between %zu and %zu", \ + (NAME), (size_t)(MIN), (size_t)(MAX)); \ + goto error; \ + } \ + } while (0) + /* Validate tree parameters. */ + CHECK_TREE("fanout", fanout, 0, 255); + CHECK_TREE("depth", depth, 1, 255); + CHECK_TREE("node_depth", node_depth, 0, 255); + CHECK_TREE("inner_size", inner_size, 0, MAX_OUT_BYTES(self)); +#undef CHECK_TREE + if (leaf_size > 0xFFFFFFFFU) { + /* maximum: 2**32 - 1 */ + PyErr_SetString(PyExc_OverflowError, "'leaf_size' is too large"); + goto error; + } + if (is_blake2s(self->impl) && node_offset > 0xFFFFFFFFFFFFULL) { + /* maximum: 2**48 - 1 */ + PyErr_SetString(PyExc_OverflowError, "'node_offset' is too large"); + goto error; + } + return 0; +error: + return -1; +} + + +static PyObject * +py_blake2_new(PyTypeObject *type, PyObject *data, int digest_size, + Py_buffer *key, Py_buffer *salt, Py_buffer *person, + int fanout, int depth, unsigned long leaf_size, + unsigned long long node_offset, int node_depth, + int inner_size, int last_node, int usedforsecurity) { Blake2Object *self = NULL; - Py_buffer buf; self = new_Blake2Object(type); if (self == NULL) { @@ -487,96 +572,31 @@ py_blake2b_or_s_new(PyTypeObject *type, PyObject *data, int digest_size, default: Py_UNREACHABLE(); } - // Using Blake2b because we statically know that these are greater than the - // Blake2s sizes -- this avoids a VLA. - uint8_t salt_[HACL_HASH_BLAKE2B_SALT_BYTES] = { 0 }; - uint8_t personal_[HACL_HASH_BLAKE2B_PERSONAL_BYTES] = { 0 }; - /* Validate digest size. */ - if (digest_size <= 0 || - (unsigned) digest_size > (is_blake2b(self->impl) ? HACL_HASH_BLAKE2B_OUT_BYTES : HACL_HASH_BLAKE2S_OUT_BYTES)) + // Unlike the state types, the parameters share a single (client-friendly) + // structure. + if (py_blake2_validate_params(self, + digest_size, + key, salt, person, + fanout, depth, leaf_size, + node_offset, node_depth, inner_size) < 0) { - PyErr_Format(PyExc_ValueError, - "digest_size for %s must be between 1 and %d bytes, here it is %d", - is_blake2b(self->impl) ? "Blake2b" : "Blake2s", - is_blake2b(self->impl) ? HACL_HASH_BLAKE2B_OUT_BYTES : HACL_HASH_BLAKE2S_OUT_BYTES, - digest_size); - goto error; - } - - /* Validate salt parameter. */ - if ((salt->obj != NULL) && salt->len) { - if ((size_t)salt->len > (is_blake2b(self->impl) ? HACL_HASH_BLAKE2B_SALT_BYTES : HACL_HASH_BLAKE2S_SALT_BYTES)) { - PyErr_Format(PyExc_ValueError, - "maximum salt length is %d bytes", - (is_blake2b(self->impl) ? HACL_HASH_BLAKE2B_SALT_BYTES : HACL_HASH_BLAKE2S_SALT_BYTES)); - goto error; - } - memcpy(salt_, salt->buf, salt->len); - } - - /* Validate personalization parameter. */ - if ((person->obj != NULL) && person->len) { - if ((size_t)person->len > (is_blake2b(self->impl) ? HACL_HASH_BLAKE2B_PERSONAL_BYTES : HACL_HASH_BLAKE2S_PERSONAL_BYTES)) { - PyErr_Format(PyExc_ValueError, - "maximum person length is %d bytes", - (is_blake2b(self->impl) ? HACL_HASH_BLAKE2B_PERSONAL_BYTES : HACL_HASH_BLAKE2S_PERSONAL_BYTES)); - goto error; - } - memcpy(personal_, person->buf, person->len); - } - - /* Validate tree parameters. */ - if (fanout < 0 || fanout > 255) { - PyErr_SetString(PyExc_ValueError, - "fanout must be between 0 and 255"); - goto error; - } - - if (depth <= 0 || depth > 255) { - PyErr_SetString(PyExc_ValueError, - "depth must be between 1 and 255"); - goto error; - } - - if (leaf_size > 0xFFFFFFFFU) { - PyErr_SetString(PyExc_OverflowError, "leaf_size is too large"); - goto error; - } - - if (is_blake2s(self->impl) && node_offset > 0xFFFFFFFFFFFFULL) { - /* maximum 2**48 - 1 */ - PyErr_SetString(PyExc_OverflowError, "node_offset is too large"); - goto error; - } - - if (node_depth < 0 || node_depth > 255) { - PyErr_SetString(PyExc_ValueError, - "node_depth must be between 0 and 255"); goto error; } - if (inner_size < 0 || - (unsigned) inner_size > (is_blake2b(self->impl) ? HACL_HASH_BLAKE2B_OUT_BYTES : HACL_HASH_BLAKE2S_OUT_BYTES)) { - PyErr_Format(PyExc_ValueError, - "inner_size must be between 0 and is %d", - (is_blake2b(self->impl) ? HACL_HASH_BLAKE2B_OUT_BYTES : HACL_HASH_BLAKE2S_OUT_BYTES)); - goto error; + // Using Blake2b because we statically know that these are greater than the + // Blake2s sizes -- this avoids a VLA. + uint8_t salt_buffer[HACL_HASH_BLAKE2B_SALT_BYTES] = {0}; + uint8_t personal_buffer[HACL_HASH_BLAKE2B_PERSONAL_BYTES] = {0}; + if (salt->obj != NULL) { + assert(salt->buf != NULL); + memcpy(salt_buffer, salt->buf, salt->len); } - - /* Set key length. */ - if ((key->obj != NULL) && key->len) { - if ((size_t)key->len > (is_blake2b(self->impl) ? HACL_HASH_BLAKE2B_KEY_BYTES : HACL_HASH_BLAKE2S_KEY_BYTES)) { - PyErr_Format(PyExc_ValueError, - "maximum key length is %d bytes", - (is_blake2b(self->impl) ? HACL_HASH_BLAKE2B_KEY_BYTES : HACL_HASH_BLAKE2S_KEY_BYTES)); - goto error; - } + if (person->obj != NULL) { + assert(person->buf != NULL); + memcpy(personal_buffer, person->buf, person->len); } - // Unlike the state types, the parameters share a single (client-friendly) - // structure. - Hacl_Hash_Blake2b_blake2_params params = { .digest_length = digest_size, .key_length = (uint8_t)key->len, @@ -586,55 +606,46 @@ py_blake2b_or_s_new(PyTypeObject *type, PyObject *data, int digest_size, .node_offset = node_offset, .node_depth = node_depth, .inner_length = inner_size, - .salt = salt_, - .personal = personal_ + .salt = salt_buffer, + .personal = personal_buffer }; +#define BLAKE2_MALLOC(TYPE, STATE) \ + do { \ + STATE = Hacl_Hash_ ## TYPE ## _malloc_with_params_and_key( \ + ¶ms, last_node, key->buf); \ + if (STATE == NULL) { \ + (void)PyErr_NoMemory(); \ + goto error; \ + } \ + } while (0) + switch (self->impl) { #if HACL_CAN_COMPILE_SIMD256 - case Blake2b_256: { - self->blake2b_256_state = Hacl_Hash_Blake2b_Simd256_malloc_with_params_and_key(¶ms, last_node, key->buf); - if (self->blake2b_256_state == NULL) { - (void)PyErr_NoMemory(); - goto error; - } + case Blake2b_256: + BLAKE2_MALLOC(Blake2b_Simd256, self->blake2b_256_state); break; - } #endif #if HACL_CAN_COMPILE_SIMD128 - case Blake2s_128: { - self->blake2s_128_state = Hacl_Hash_Blake2s_Simd128_malloc_with_params_and_key(¶ms, last_node, key->buf); - if (self->blake2s_128_state == NULL) { - (void)PyErr_NoMemory(); - goto error; - } + case Blake2s_128: + BLAKE2_MALLOC(Blake2s_Simd128, self->blake2s_128_state); break; - } #endif - case Blake2b: { - self->blake2b_state = Hacl_Hash_Blake2b_malloc_with_params_and_key(¶ms, last_node, key->buf); - if (self->blake2b_state == NULL) { - (void)PyErr_NoMemory(); - goto error; - } + case Blake2b: + BLAKE2_MALLOC(Blake2b, self->blake2b_state); break; - } - case Blake2s: { - self->blake2s_state = Hacl_Hash_Blake2s_malloc_with_params_and_key(¶ms, last_node, key->buf); - if (self->blake2s_state == NULL) { - (void)PyErr_NoMemory(); - goto error; - } + case Blake2s: + BLAKE2_MALLOC(Blake2s, self->blake2s_state); break; - } default: Py_UNREACHABLE(); } +#undef BLAKE2_MALLOC /* Process initial data if any. */ if (data != NULL) { + Py_buffer buf; GET_BUFFER_VIEW_OR_ERROR(data, &buf, goto error); - if (buf.len >= HASHLIB_GIL_MINSIZE) { Py_BEGIN_ALLOW_THREADS update(self, buf.buf, buf.len); @@ -687,7 +698,9 @@ py_blake2b_new_impl(PyTypeObject *type, PyObject *data_obj, int digest_size, if (_Py_hashlib_data_argument(&data, data_obj, string) < 0) { return NULL; } - return py_blake2b_or_s_new(type, data, digest_size, key, salt, person, fanout, depth, leaf_size, node_offset, node_depth, inner_size, last_node, usedforsecurity); + return py_blake2_new(type, data, digest_size, key, salt, person, + fanout, depth, leaf_size, node_offset, node_depth, + inner_size, last_node, usedforsecurity); } /*[clinic input] @@ -725,49 +738,44 @@ py_blake2s_new_impl(PyTypeObject *type, PyObject *data_obj, int digest_size, if (_Py_hashlib_data_argument(&data, data_obj, string) < 0) { return NULL; } - return py_blake2b_or_s_new(type, data, digest_size, key, salt, person, fanout, depth, leaf_size, node_offset, node_depth, inner_size, last_node, usedforsecurity); + return py_blake2_new(type, data, digest_size, key, salt, person, + fanout, depth, leaf_size, node_offset, node_depth, + inner_size, last_node, usedforsecurity); } static int blake2_blake2b_copy_locked(Blake2Object *self, Blake2Object *cpy) { assert(cpy != NULL); +#define BLAKE2_COPY(TYPE, STATE_ATTR) \ + do { \ + cpy->STATE_ATTR = Hacl_Hash_ ## TYPE ## _copy(self->STATE_ATTR); \ + if (cpy->STATE_ATTR == NULL) { \ + goto error; \ + } \ + } while (0) + switch (self->impl) { #if HACL_CAN_COMPILE_SIMD256 - case Blake2b_256: { - cpy->blake2b_256_state = Hacl_Hash_Blake2b_Simd256_copy(self->blake2b_256_state); - if (cpy->blake2b_256_state == NULL) { - goto error; - } + case Blake2b_256: + BLAKE2_COPY(Blake2b_Simd256, blake2b_256_state); break; - } #endif #if HACL_CAN_COMPILE_SIMD128 - case Blake2s_128: { - cpy->blake2s_128_state = Hacl_Hash_Blake2s_Simd128_copy(self->blake2s_128_state); - if (cpy->blake2s_128_state == NULL) { - goto error; - } + case Blake2s_128: + BLAKE2_COPY(Blake2s_Simd128, blake2s_128_state); break; - } #endif - case Blake2b: { - cpy->blake2b_state = Hacl_Hash_Blake2b_copy(self->blake2b_state); - if (cpy->blake2b_state == NULL) { - goto error; - } + case Blake2b: + BLAKE2_COPY(Blake2b, blake2b_state); break; - } - case Blake2s: { - cpy->blake2s_state = Hacl_Hash_Blake2s_copy(self->blake2s_state); - if (cpy->blake2s_state == NULL) { - goto error; - } + case Blake2s: + BLAKE2_COPY(Blake2s, blake2s_state); break; - } default: Py_UNREACHABLE(); } +#undef BLAKE2_COPY cpy->impl = self->impl; return 0; @@ -829,7 +837,8 @@ _blake2_blake2b_update_impl(Blake2Object *self, PyObject *data) update(self, buf.buf, buf.len); PyMutex_Unlock(&self->mutex); Py_END_ALLOW_THREADS - } else { + } + else { update(self, buf.buf, buf.len); } @@ -838,40 +847,42 @@ _blake2_blake2b_update_impl(Blake2Object *self, PyObject *data) Py_RETURN_NONE; } -/*[clinic input] -_blake2.blake2b.digest - -Return the digest value as a bytes object. -[clinic start generated code]*/ - -static PyObject * -_blake2_blake2b_digest_impl(Blake2Object *self) -/*[clinic end generated code: output=31ab8ad477f4a2f7 input=7d21659e9c5fff02]*/ +static uint8_t +blake2_blake2b_compute_digest(Blake2Object *self, uint8_t *digest) { - uint8_t digest[HACL_HASH_BLAKE2B_OUT_BYTES]; - - ENTER_HASHLIB(self); - uint8_t digest_length = 0; switch (self->impl) { #if HACL_CAN_COMPILE_SIMD256 case Blake2b_256: - digest_length = Hacl_Hash_Blake2b_Simd256_digest(self->blake2b_256_state, digest); - break; + return Hacl_Hash_Blake2b_Simd256_digest( + self->blake2b_256_state, digest); #endif #if HACL_CAN_COMPILE_SIMD128 case Blake2s_128: - digest_length = Hacl_Hash_Blake2s_Simd128_digest(self->blake2s_128_state, digest); - break; + return Hacl_Hash_Blake2s_Simd128_digest( + self->blake2s_128_state, digest); #endif case Blake2b: - digest_length = Hacl_Hash_Blake2b_digest(self->blake2b_state, digest); - break; + return Hacl_Hash_Blake2b_digest(self->blake2b_state, digest); case Blake2s: - digest_length = Hacl_Hash_Blake2s_digest(self->blake2s_state, digest); - break; + return Hacl_Hash_Blake2s_digest(self->blake2s_state, digest); default: Py_UNREACHABLE(); } +} + +/*[clinic input] +_blake2.blake2b.digest + +Return the digest value as a bytes object. +[clinic start generated code]*/ + +static PyObject * +_blake2_blake2b_digest_impl(Blake2Object *self) +/*[clinic end generated code: output=31ab8ad477f4a2f7 input=7d21659e9c5fff02]*/ +{ + uint8_t digest_length = 0, digest[HACL_HASH_BLAKE2B_OUT_BYTES]; + ENTER_HASHLIB(self); + digest_length = blake2_blake2b_compute_digest(self, digest); LEAVE_HASHLIB(self); return PyBytes_FromStringAndSize((const char *)digest, digest_length); } @@ -886,30 +897,9 @@ static PyObject * _blake2_blake2b_hexdigest_impl(Blake2Object *self) /*[clinic end generated code: output=5ef54b138db6610a input=76930f6946351f56]*/ { - uint8_t digest[HACL_HASH_BLAKE2B_OUT_BYTES]; - + uint8_t digest_length = 0, digest[HACL_HASH_BLAKE2B_OUT_BYTES]; ENTER_HASHLIB(self); - uint8_t digest_length = 0; - switch (self->impl) { -#if HACL_CAN_COMPILE_SIMD256 - case Blake2b_256: - digest_length = Hacl_Hash_Blake2b_Simd256_digest(self->blake2b_256_state, digest); - break; -#endif -#if HACL_CAN_COMPILE_SIMD128 - case Blake2s_128: - digest_length = Hacl_Hash_Blake2s_Simd128_digest(self->blake2s_128_state, digest); - break; -#endif - case Blake2b: - digest_length = Hacl_Hash_Blake2b_digest(self->blake2b_state, digest); - break; - case Blake2s: - digest_length = Hacl_Hash_Blake2s_digest(self->blake2s_state, digest); - break; - default: - Py_UNREACHABLE(); - } + digest_length = blake2_blake2b_compute_digest(self, digest); LEAVE_HASHLIB(self); return _Py_strhex((const char *)digest, digest_length); } @@ -928,43 +918,49 @@ static PyObject * py_blake2b_get_name(PyObject *op, void *Py_UNUSED(closure)) { Blake2Object *self = _Blake2Object_CAST(op); - return PyUnicode_FromString(is_blake2b(self->impl) ? "blake2b" : "blake2s"); + return PyUnicode_FromString(BLAKE2_IMPLNAME(self)); } - static PyObject * py_blake2b_get_block_size(PyObject *op, void *Py_UNUSED(closure)) { Blake2Object *self = _Blake2Object_CAST(op); - return PyLong_FromLong(is_blake2b(self->impl) ? HACL_HASH_BLAKE2B_BLOCK_BYTES : HACL_HASH_BLAKE2S_BLOCK_BYTES); + return PyLong_FromLong(GET_BLAKE2_CONST(self, BLOCK_BYTES)); } - -static PyObject * -py_blake2b_get_digest_size(PyObject *op, void *Py_UNUSED(closure)) +static Hacl_Hash_Blake2b_index +hacl_get_blake2_info(Blake2Object *self) { - Blake2Object *self = _Blake2Object_CAST(op); switch (self->impl) { #if HACL_CAN_COMPILE_SIMD256 case Blake2b_256: - return PyLong_FromLong(Hacl_Hash_Blake2b_Simd256_info(self->blake2b_256_state).digest_length); + return Hacl_Hash_Blake2b_Simd256_info(self->blake2b_256_state); #endif #if HACL_CAN_COMPILE_SIMD128 case Blake2s_128: - return PyLong_FromLong(Hacl_Hash_Blake2s_Simd128_info(self->blake2s_128_state).digest_length); + return Hacl_Hash_Blake2s_Simd128_info(self->blake2s_128_state); #endif case Blake2b: - return PyLong_FromLong(Hacl_Hash_Blake2b_info(self->blake2b_state).digest_length); + return Hacl_Hash_Blake2b_info(self->blake2b_state); case Blake2s: - return PyLong_FromLong(Hacl_Hash_Blake2s_info(self->blake2s_state).digest_length); + return Hacl_Hash_Blake2s_info(self->blake2s_state); default: Py_UNREACHABLE(); } } +static PyObject * +py_blake2b_get_digest_size(PyObject *op, void *Py_UNUSED(closure)) +{ + Blake2Object *self = _Blake2Object_CAST(op); + Hacl_Hash_Blake2b_index info = hacl_get_blake2_info(self); + return PyLong_FromLong(info.digest_length); +} + + static PyGetSetDef py_blake2b_getsetters[] = { {"name", py_blake2b_get_name, NULL, NULL, NULL}, {"block_size", py_blake2b_get_block_size, NULL, NULL, NULL}, @@ -981,38 +977,35 @@ py_blake2_clear(PyObject *op) // initializes the HACL* internal state to NULL before allocating // it. If an error occurs in the constructor, we should only free // states that were allocated (i.e. that are not NULL). +#define BLAKE2_FREE(TYPE, STATE) \ + do { \ + if (STATE != NULL) { \ + Hacl_Hash_ ## TYPE ## _free(STATE); \ + STATE = NULL; \ + } \ + } while (0) + switch (self->impl) { #if HACL_CAN_COMPILE_SIMD256 case Blake2b_256: - if (self->blake2b_256_state != NULL) { - Hacl_Hash_Blake2b_Simd256_free(self->blake2b_256_state); - self->blake2b_256_state = NULL; - } + BLAKE2_FREE(Blake2b_Simd256, self->blake2b_256_state); break; #endif #if HACL_CAN_COMPILE_SIMD128 case Blake2s_128: - if (self->blake2s_128_state != NULL) { - Hacl_Hash_Blake2s_Simd128_free(self->blake2s_128_state); - self->blake2s_128_state = NULL; - } + BLAKE2_FREE(Blake2s_Simd128, self->blake2s_128_state); break; #endif case Blake2b: - if (self->blake2b_state != NULL) { - Hacl_Hash_Blake2b_free(self->blake2b_state); - self->blake2b_state = NULL; - } + BLAKE2_FREE(Blake2b, self->blake2b_state); break; case Blake2s: - if (self->blake2s_state != NULL) { - Hacl_Hash_Blake2s_free(self->blake2s_state); - self->blake2s_state = NULL; - } + BLAKE2_FREE(Blake2s, self->blake2s_state); break; default: Py_UNREACHABLE(); } +#undef BLAKE2_FREE return 0; } @@ -1041,7 +1034,7 @@ static PyType_Slot blake2b_type_slots[] = { {Py_tp_methods, py_blake2b_methods}, {Py_tp_getset, py_blake2b_getsetters}, {Py_tp_new, py_blake2b_new}, - {0,0} + {0, 0} }; static PyType_Slot blake2s_type_slots[] = { @@ -1054,12 +1047,12 @@ static PyType_Slot blake2s_type_slots[] = { // only the constructor differs, so that it can receive a clinic-generated // default digest length suitable for blake2s {Py_tp_new, py_blake2s_new}, - {0,0} + {0, 0} }; static PyType_Spec blake2b_type_spec = { .name = "_blake2.blake2b", - .basicsize = sizeof(Blake2Object), + .basicsize = sizeof(Blake2Object), .flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_IMMUTABLETYPE | Py_TPFLAGS_HAVE_GC | Py_TPFLAGS_HEAPTYPE, .slots = blake2b_type_slots @@ -1067,7 +1060,7 @@ static PyType_Spec blake2b_type_spec = { static PyType_Spec blake2s_type_spec = { .name = "_blake2.blake2s", - .basicsize = sizeof(Blake2Object), + .basicsize = sizeof(Blake2Object), .flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_IMMUTABLETYPE | Py_TPFLAGS_HAVE_GC | Py_TPFLAGS_HEAPTYPE, .slots = blake2s_type_slots diff --git a/Modules/clinic/_heapqmodule.c.h b/Modules/clinic/_heapqmodule.c.h index 81d10862726..b43155b6c24 100644 --- a/Modules/clinic/_heapqmodule.c.h +++ b/Modules/clinic/_heapqmodule.c.h @@ -2,6 +2,7 @@ preserve [clinic start generated code]*/ +#include "pycore_critical_section.h"// Py_BEGIN_CRITICAL_SECTION() #include "pycore_modsupport.h" // _PyArg_CheckPositional() PyDoc_STRVAR(_heapq_heappush__doc__, @@ -32,7 +33,9 @@ _heapq_heappush(PyObject *module, PyObject *const *args, Py_ssize_t nargs) } heap = args[0]; item = args[1]; + Py_BEGIN_CRITICAL_SECTION(heap); return_value = _heapq_heappush_impl(module, heap, item); + Py_END_CRITICAL_SECTION(); exit: return return_value; @@ -61,7 +64,9 @@ _heapq_heappop(PyObject *module, PyObject *arg) goto exit; } heap = arg; + Py_BEGIN_CRITICAL_SECTION(heap); return_value = _heapq_heappop_impl(module, heap); + Py_END_CRITICAL_SECTION(); exit: return return_value; @@ -103,7 +108,9 @@ _heapq_heapreplace(PyObject *module, PyObject *const *args, Py_ssize_t nargs) } heap = args[0]; item = args[1]; + Py_BEGIN_CRITICAL_SECTION(heap); return_value = _heapq_heapreplace_impl(module, heap, item); + Py_END_CRITICAL_SECTION(); exit: return return_value; @@ -140,7 +147,9 @@ _heapq_heappushpop(PyObject *module, PyObject *const *args, Py_ssize_t nargs) } heap = args[0]; item = args[1]; + Py_BEGIN_CRITICAL_SECTION(heap); return_value = _heapq_heappushpop_impl(module, heap, item); + Py_END_CRITICAL_SECTION(); exit: return return_value; @@ -169,7 +178,9 @@ _heapq_heapify(PyObject *module, PyObject *arg) goto exit; } heap = arg; + Py_BEGIN_CRITICAL_SECTION(heap); return_value = _heapq_heapify_impl(module, heap); + Py_END_CRITICAL_SECTION(); exit: return return_value; @@ -203,7 +214,9 @@ _heapq_heappush_max(PyObject *module, PyObject *const *args, Py_ssize_t nargs) } heap = args[0]; item = args[1]; + Py_BEGIN_CRITICAL_SECTION(heap); return_value = _heapq_heappush_max_impl(module, heap, item); + Py_END_CRITICAL_SECTION(); exit: return return_value; @@ -232,7 +245,9 @@ _heapq_heappop_max(PyObject *module, PyObject *arg) goto exit; } heap = arg; + Py_BEGIN_CRITICAL_SECTION(heap); return_value = _heapq_heappop_max_impl(module, heap); + Py_END_CRITICAL_SECTION(); exit: return return_value; @@ -266,7 +281,9 @@ _heapq_heapreplace_max(PyObject *module, PyObject *const *args, Py_ssize_t nargs } heap = args[0]; item = args[1]; + Py_BEGIN_CRITICAL_SECTION(heap); return_value = _heapq_heapreplace_max_impl(module, heap, item); + Py_END_CRITICAL_SECTION(); exit: return return_value; @@ -295,7 +312,9 @@ _heapq_heapify_max(PyObject *module, PyObject *arg) goto exit; } heap = arg; + Py_BEGIN_CRITICAL_SECTION(heap); return_value = _heapq_heapify_max_impl(module, heap); + Py_END_CRITICAL_SECTION(); exit: return return_value; @@ -332,9 +351,11 @@ _heapq_heappushpop_max(PyObject *module, PyObject *const *args, Py_ssize_t nargs } heap = args[0]; item = args[1]; + Py_BEGIN_CRITICAL_SECTION(heap); return_value = _heapq_heappushpop_max_impl(module, heap, item); + Py_END_CRITICAL_SECTION(); exit: return return_value; } -/*[clinic end generated code: output=f55d8595ce150c76 input=a9049054013a1b77]*/ +/*[clinic end generated code: output=e83d50002c29a96d input=a9049054013a1b77]*/ diff --git a/Modules/hmacmodule.c b/Modules/hmacmodule.c index c7b49d4dee3..b404d5732ec 100644 --- a/Modules/hmacmodule.c +++ b/Modules/hmacmodule.c @@ -1715,11 +1715,11 @@ hmacmodule_init_cpu_features(hmacmodule_state *state) __cpuid_count(1, 0, eax1, ebx1, ecx1, edx1); __cpuid_count(7, 0, eax7, ebx7, ecx7, edx7); #elif defined(_M_X64) - int info1[4] = { 0 }; + int info1[4] = {0}; __cpuidex(info1, 1, 0); eax1 = info1[0], ebx1 = info1[1], ecx1 = info1[2], edx1 = info1[3]; - int info7[4] = { 0 }; + int info7[4] = {0}; __cpuidex(info7, 7, 0); eax7 = info7[0], ebx7 = info7[1], ecx7 = info7[2], edx7 = info7[3]; #endif diff --git a/Modules/md5module.c b/Modules/md5module.c index 9b5ea2d6e02..08dbcd2cbce 100644 --- a/Modules/md5module.c +++ b/Modules/md5module.c @@ -120,7 +120,7 @@ MD5Type_copy_impl(MD5object *self, PyTypeObject *cls) newobj->hash_state = Hacl_Hash_MD5_copy(self->hash_state); LEAVE_HASHLIB(self); if (newobj->hash_state == NULL) { - Py_DECREF(self); + Py_DECREF(newobj); return PyErr_NoMemory(); } return (PyObject *)newobj; diff --git a/Modules/socketmodule.c b/Modules/socketmodule.c index 92c9aa8b510..85c72779bac 100644 --- a/Modules/socketmodule.c +++ b/Modules/socketmodule.c @@ -716,12 +716,6 @@ select_error(void) # define SOCK_INPROGRESS_ERR EINPROGRESS #endif -#ifdef _MSC_VER -# define SUPPRESS_DEPRECATED_CALL __pragma(warning(suppress: 4996)) -#else -# define SUPPRESS_DEPRECATED_CALL -#endif - /* Convenience function to raise an error according to errno and return a NULL pointer from a function. */ @@ -3366,7 +3360,7 @@ sock_setsockopt(PyObject *self, PyObject *args) &level, &optname, &flag)) { #ifdef MS_WINDOWS if (optname == SIO_TCP_SET_ACK_FREQUENCY) { - int dummy; + DWORD dummy; res = WSAIoctl(get_sock_fd(s), SIO_TCP_SET_ACK_FREQUENCY, &flag, sizeof(flag), NULL, 0, &dummy, NULL, NULL); if (res >= 0) { @@ -6195,8 +6189,10 @@ socket_gethostbyname_ex(PyObject *self, PyObject *args) #ifdef USE_GETHOSTBYNAME_LOCK PyThread_acquire_lock(netdb_lock, 1); #endif - SUPPRESS_DEPRECATED_CALL + _Py_COMP_DIAG_PUSH + _Py_COMP_DIAG_IGNORE_DEPR_DECLS h = gethostbyname(name); + _Py_COMP_DIAG_POP #endif /* HAVE_GETHOSTBYNAME_R */ Py_END_ALLOW_THREADS /* Some C libraries would require addr.__ss_family instead of @@ -6300,8 +6296,10 @@ socket_gethostbyaddr(PyObject *self, PyObject *args) #ifdef USE_GETHOSTBYNAME_LOCK PyThread_acquire_lock(netdb_lock, 1); #endif - SUPPRESS_DEPRECATED_CALL + _Py_COMP_DIAG_PUSH + _Py_COMP_DIAG_IGNORE_DEPR_DECLS h = gethostbyaddr(ap, al, af); + _Py_COMP_DIAG_POP #endif /* HAVE_GETHOSTBYNAME_R */ Py_END_ALLOW_THREADS ret = gethost_common(state, h, SAS2SA(&addr), sizeof(addr), af); @@ -6718,8 +6716,10 @@ _socket_inet_aton_impl(PyObject *module, const char *ip_addr) packed_addr = INADDR_BROADCAST; } else { - SUPPRESS_DEPRECATED_CALL + _Py_COMP_DIAG_PUSH + _Py_COMP_DIAG_IGNORE_DEPR_DECLS packed_addr = inet_addr(ip_addr); + _Py_COMP_DIAG_POP if (packed_addr == INADDR_NONE) { /* invalid address */ PyErr_SetString(PyExc_OSError, @@ -6762,8 +6762,10 @@ _socket_inet_ntoa_impl(PyObject *module, Py_buffer *packed_ip) memcpy(&packed_addr, packed_ip->buf, packed_ip->len); PyBuffer_Release(packed_ip); - SUPPRESS_DEPRECATED_CALL + _Py_COMP_DIAG_PUSH + _Py_COMP_DIAG_IGNORE_DEPR_DECLS return PyUnicode_FromString(inet_ntoa(packed_addr)); + _Py_COMP_DIAG_POP } #endif // HAVE_INET_NTOA |