aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/Python/ceval_macros.h
diff options
context:
space:
mode:
authorGuido van Rossum <guido@python.org>2024-04-04 08:03:27 -0700
committerGitHub <noreply@github.com>2024-04-04 15:03:27 +0000
commit060a96f1a9a901b01ed304aa82b886d248ca1cb6 (patch)
treecb3e95ecac1f90440b7d3752c4aad015ea734bf0 /Python/ceval_macros.h
parent63bbe77d9bb2be4db83ed09b96dd22f2a44ef55b (diff)
downloadcpython-060a96f1a9a901b01ed304aa82b886d248ca1cb6.tar.gz
cpython-060a96f1a9a901b01ed304aa82b886d248ca1cb6.zip
gh-116968: Reimplement Tier 2 counters (#117144)
Introduce a unified 16-bit backoff counter type (``_Py_BackoffCounter``), shared between the Tier 1 adaptive specializer and the Tier 2 optimizer. The API used for adaptive specialization counters is changed but the behavior is (supposed to be) identical. The behavior of the Tier 2 counters is changed: - There are no longer dynamic thresholds (we never varied these). - All counters now use the same exponential backoff. - The counter for ``JUMP_BACKWARD`` starts counting down from 16. - The ``temperature`` in side exits starts counting down from 64.
Diffstat (limited to 'Python/ceval_macros.h')
-rw-r--r--Python/ceval_macros.h31
1 files changed, 15 insertions, 16 deletions
diff --git a/Python/ceval_macros.h b/Python/ceval_macros.h
index 1194c11f8ba..224cd1da7d4 100644
--- a/Python/ceval_macros.h
+++ b/Python/ceval_macros.h
@@ -262,7 +262,7 @@ GETITEM(PyObject *v, Py_ssize_t i) {
STAT_INC(opcode, miss); \
STAT_INC((INSTNAME), miss); \
/* The counter is always the first cache entry: */ \
- if (ADAPTIVE_COUNTER_IS_ZERO(next_instr->cache)) { \
+ if (ADAPTIVE_COUNTER_TRIGGERS(next_instr->cache)) { \
STAT_INC((INSTNAME), deopt); \
} \
} while (0)
@@ -290,29 +290,28 @@ GETITEM(PyObject *v, Py_ssize_t i) {
dtrace_function_entry(frame); \
}
-#define ADAPTIVE_COUNTER_IS_ZERO(COUNTER) \
- (((COUNTER) >> ADAPTIVE_BACKOFF_BITS) == 0)
-
-#define ADAPTIVE_COUNTER_IS_MAX(COUNTER) \
- (((COUNTER) >> ADAPTIVE_BACKOFF_BITS) == ((1 << MAX_BACKOFF_VALUE) - 1))
+/* This takes a uint16_t instead of a _Py_BackoffCounter,
+ * because it is used directly on the cache entry in generated code,
+ * which is always an integral type. */
+#define ADAPTIVE_COUNTER_TRIGGERS(COUNTER) \
+ backoff_counter_triggers(forge_backoff_counter((COUNTER)))
#ifdef Py_GIL_DISABLED
-#define DECREMENT_ADAPTIVE_COUNTER(COUNTER) \
- do { \
- /* gh-115999 tracks progress on addressing this. */ \
+#define ADVANCE_ADAPTIVE_COUNTER(COUNTER) \
+ do { \
+ /* gh-115999 tracks progress on addressing this. */ \
static_assert(0, "The specializing interpreter is not yet thread-safe"); \
} while (0);
#else
-#define DECREMENT_ADAPTIVE_COUNTER(COUNTER) \
- do { \
- assert(!ADAPTIVE_COUNTER_IS_ZERO((COUNTER))); \
- (COUNTER) -= (1 << ADAPTIVE_BACKOFF_BITS); \
+#define ADVANCE_ADAPTIVE_COUNTER(COUNTER) \
+ do { \
+ (COUNTER) = advance_backoff_counter((COUNTER)); \
} while (0);
#endif
-#define INCREMENT_ADAPTIVE_COUNTER(COUNTER) \
- do { \
- (COUNTER) += (1 << ADAPTIVE_BACKOFF_BITS); \
+#define PAUSE_ADAPTIVE_COUNTER(COUNTER) \
+ do { \
+ (COUNTER) = pause_backoff_counter((COUNTER)); \
} while (0);
#define UNBOUNDLOCAL_ERROR_MSG \