aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/Objects/unicodeobject.c
diff options
context:
space:
mode:
Diffstat (limited to 'Objects/unicodeobject.c')
-rw-r--r--Objects/unicodeobject.c16
1 files changed, 10 insertions, 6 deletions
diff --git a/Objects/unicodeobject.c b/Objects/unicodeobject.c
index 5545eae7950..aa933773233 100644
--- a/Objects/unicodeobject.c
+++ b/Objects/unicodeobject.c
@@ -7186,6 +7186,12 @@ PyUnicode_AsASCIIString(PyObject *unicode)
#define NEED_RETRY
#endif
+/* INT_MAX is the theoretical largest chunk (or INT_MAX / 2 when
+ transcoding from UTF-16), but INT_MAX / 4 perfoms better in
+ both cases also and avoids partial characters overrunning the
+ length limit in MultiByteToWideChar on Windows */
+#define DECODING_CHUNK_SIZE (INT_MAX/4)
+
#ifndef WC_ERR_INVALID_CHARS
# define WC_ERR_INVALID_CHARS 0x0080
#endif
@@ -7422,8 +7428,8 @@ decode_code_page_stateful(int code_page,
do
{
#ifdef NEED_RETRY
- if (size > INT_MAX) {
- chunk_size = INT_MAX;
+ if (size > DECODING_CHUNK_SIZE) {
+ chunk_size = DECODING_CHUNK_SIZE;
final = 0;
done = 0;
}
@@ -7827,10 +7833,8 @@ encode_code_page(int code_page,
do
{
#ifdef NEED_RETRY
- /* UTF-16 encoding may double the size, so use only INT_MAX/2
- chunks. */
- if (len > INT_MAX/2) {
- chunk_len = INT_MAX/2;
+ if (len > DECODING_CHUNK_SIZE) {
+ chunk_len = DECODING_CHUNK_SIZE;
done = 0;
}
else