summaryrefslogtreecommitdiff
path: root/Python/specialize.c
diff options
context:
space:
mode:
authorIrit Katriel <1055913+iritkatriel@users.noreply.github.com>2021-07-15 13:13:12 +0100
committerGitHub <noreply@github.com>2021-07-15 13:13:12 +0100
commit641345d636320a6fca04a5271fa4c4c5ba3e5437 (patch)
tree94bc416ec61aa551bf4691fa70b3ad92373783ed /Python/specialize.c
parenta0551059ba6a83d32a36fb3b87911c77f26f5b9f (diff)
downloadcpython-git-641345d636320a6fca04a5271fa4c4c5ba3e5437.tar.gz
bpo-26280: Port BINARY_SUBSCR to PEP 659 adaptive interpreter (GH-27043)
Diffstat (limited to 'Python/specialize.c')
-rw-r--r--Python/specialize.c48
1 files changed, 47 insertions, 1 deletions
diff --git a/Python/specialize.c b/Python/specialize.c
index 3277c6bc9e..5ebe596418 100644
--- a/Python/specialize.c
+++ b/Python/specialize.c
@@ -78,6 +78,7 @@ _Py_PrintSpecializationStats(void)
printf("Specialization stats:\n");
print_stats(&_specialization_stats[LOAD_ATTR], "load_attr");
print_stats(&_specialization_stats[LOAD_GLOBAL], "load_global");
+ print_stats(&_specialization_stats[BINARY_SUBSCR], "binary_subscr");
}
#if SPECIALIZATION_STATS_DETAILED
@@ -162,12 +163,14 @@ get_cache_count(SpecializedCacheOrInstruction *quickened) {
static uint8_t adaptive_opcodes[256] = {
[LOAD_ATTR] = LOAD_ATTR_ADAPTIVE,
[LOAD_GLOBAL] = LOAD_GLOBAL_ADAPTIVE,
+ [BINARY_SUBSCR] = BINARY_SUBSCR_ADAPTIVE,
};
/* The number of cache entries required for a "family" of instructions. */
static uint8_t cache_requirements[256] = {
[LOAD_ATTR] = 2, /* _PyAdaptiveEntry and _PyLoadAttrCache */
[LOAD_GLOBAL] = 2, /* _PyAdaptiveEntry and _PyLoadGlobalCache */
+ [BINARY_SUBSCR] = 0,
};
/* Return the oparg for the cache_offset and instruction index.
@@ -251,7 +254,6 @@ optimize(SpecializedCacheOrInstruction *quickened, int len)
previous_opcode = opcode;
continue;
}
- instructions[i] = _Py_MAKECODEUNIT(adaptive_opcode, new_oparg);
previous_opcode = adaptive_opcode;
int entries_needed = cache_requirements[opcode];
if (entries_needed) {
@@ -261,7 +263,11 @@ optimize(SpecializedCacheOrInstruction *quickened, int len)
_GetSpecializedCacheEntry(instructions, cache0_offset);
cache->adaptive.original_oparg = oparg;
cache->adaptive.counter = 0;
+ } else {
+ // oparg is the adaptive cache counter
+ new_oparg = 0;
}
+ instructions[i] = _Py_MAKECODEUNIT(adaptive_opcode, new_oparg);
}
else {
/* Super instructions don't use the cache,
@@ -637,3 +643,43 @@ success:
cache0->counter = saturating_start();
return 0;
}
+
+int
+_Py_Specialize_BinarySubscr(
+ PyObject *container, PyObject *sub, _Py_CODEUNIT *instr)
+{
+ PyTypeObject *container_type = Py_TYPE(container);
+ if (container_type == &PyList_Type) {
+ if (PyLong_CheckExact(sub)) {
+ *instr = _Py_MAKECODEUNIT(BINARY_SUBSCR_LIST_INT, saturating_start());
+ goto success;
+ } else {
+ SPECIALIZATION_FAIL(BINARY_SUBSCR, Py_TYPE(container), sub, "list; non-integer subscr");
+ }
+ }
+ if (container_type == &PyTuple_Type) {
+ if (PyLong_CheckExact(sub)) {
+ *instr = _Py_MAKECODEUNIT(BINARY_SUBSCR_TUPLE_INT, saturating_start());
+ goto success;
+ } else {
+ SPECIALIZATION_FAIL(BINARY_SUBSCR, Py_TYPE(container), sub, "tuple; non-integer subscr");
+ }
+ }
+ if (container_type == &PyDict_Type) {
+ *instr = _Py_MAKECODEUNIT(BINARY_SUBSCR_DICT, saturating_start());
+ goto success;
+ }
+
+ SPECIALIZATION_FAIL(BINARY_SUBSCR, Py_TYPE(container), sub, "not list|tuple|dict");
+ goto fail;
+fail:
+ STAT_INC(BINARY_SUBSCR, specialization_failure);
+ assert(!PyErr_Occurred());
+ *instr = _Py_MAKECODEUNIT(_Py_OPCODE(*instr), ADAPTIVE_CACHE_BACKOFF);
+ return 0;
+success:
+ STAT_INC(BINARY_SUBSCR, specialization_success);
+ assert(!PyErr_Occurred());
+ return 0;
+}
+