summaryrefslogtreecommitdiffstatshomepage
path: root/py
diff options
context:
space:
mode:
authorAlessandro Gatti <a.gatti@frob.it>2025-01-24 09:22:36 +0100
committerDamien George <damien@micropython.org>2025-01-26 23:42:36 +1100
commit55ca3fd67512555707304c6b68b836eb89f09d1c (patch)
tree6d61b6863b1d2c654b083d312c75a7365e06a708 /py
parent40585eaa8f1b603f0094b73764e8ce5623812ecf (diff)
downloadmicropython-55ca3fd67512555707304c6b68b836eb89f09d1c.tar.gz
micropython-55ca3fd67512555707304c6b68b836eb89f09d1c.zip
py/emitnative: Optimise Viper immediate offset load/stores on Xtensa.
This commit introduces the ability to emit optimised code paths on Xtensa for load/store operations indexed via an immediate offset. If an immediate offset for a load/store operation is within a certain range that allows it to be embedded into an available opcode then said opcode is emitted instead of the generic code sequence. Signed-off-by: Alessandro Gatti <a.gatti@frob.it>
Diffstat (limited to 'py')
-rw-r--r--py/emitnative.c28
1 files changed, 28 insertions, 0 deletions
diff --git a/py/emitnative.c b/py/emitnative.c
index f846e8bb4e..82ee729d3d 100644
--- a/py/emitnative.c
+++ b/py/emitnative.c
@@ -1550,6 +1550,11 @@ static void emit_native_load_subscr(emit_t *emit) {
asm_rv32_opcode_lbu(emit->as, REG_RET, reg_base, index_value);
break;
}
+ #elif N_XTENSA || N_XTENSAWIN
+ if (index_value > 0 && index_value < 256) {
+ asm_xtensa_op_l8ui(emit->as, REG_RET, reg_base, index_value);
+ break;
+ }
#endif
need_reg_single(emit, reg_index, 0);
ASM_MOV_REG_IMM(emit->as, reg_index, index_value);
@@ -1573,6 +1578,11 @@ static void emit_native_load_subscr(emit_t *emit) {
asm_rv32_opcode_lhu(emit->as, REG_RET, reg_base, index_value << 1);
break;
}
+ #elif N_XTENSA || N_XTENSAWIN
+ if (index_value > 0 && index_value < 256) {
+ asm_xtensa_op_l16ui(emit->as, REG_RET, reg_base, index_value);
+ break;
+ }
#endif
need_reg_single(emit, reg_index, 0);
ASM_MOV_REG_IMM(emit->as, reg_index, index_value << 1);
@@ -1596,6 +1606,10 @@ static void emit_native_load_subscr(emit_t *emit) {
asm_rv32_opcode_lw(emit->as, REG_RET, reg_base, index_value << 2);
break;
}
+ #elif N_XTENSA || N_XTENSAWIN
+ if (index_value > 0 && index_value < 256) {
+ asm_xtensa_l32i_optimised(emit->as, REG_RET, reg_base, index_value);
+ }
#endif
need_reg_single(emit, reg_index, 0);
ASM_MOV_REG_IMM(emit->as, reg_index, index_value << 2);
@@ -1812,6 +1826,11 @@ static void emit_native_store_subscr(emit_t *emit) {
asm_rv32_opcode_sb(emit->as, reg_value, reg_base, index_value);
break;
}
+ #elif N_XTENSA || N_XTENSAWIN
+ if (index_value > 0 && index_value < 256) {
+ asm_xtensa_op_s8i(emit->as, REG_RET, reg_base, index_value);
+ break;
+ }
#endif
ASM_MOV_REG_IMM(emit->as, reg_index, index_value);
#if N_ARM
@@ -1838,6 +1857,11 @@ static void emit_native_store_subscr(emit_t *emit) {
asm_rv32_opcode_sh(emit->as, reg_value, reg_base, index_value << 1);
break;
}
+ #elif N_XTENSA || N_XTENSAWIN
+ if (index_value > 0 && index_value < 256) {
+ asm_xtensa_op_s16i(emit->as, REG_RET, reg_base, index_value);
+ break;
+ }
#endif
ASM_MOV_REG_IMM(emit->as, reg_index, index_value << 1);
ASM_ADD_REG_REG(emit->as, reg_index, reg_base); // add 2*index to base
@@ -1860,6 +1884,10 @@ static void emit_native_store_subscr(emit_t *emit) {
asm_rv32_opcode_sw(emit->as, reg_value, reg_base, index_value << 2);
break;
}
+ #elif N_XTENSA || N_XTENSAWIN
+ if (index_value > 0 && index_value < 256) {
+ asm_xtensa_s32i_optimised(emit->as, REG_RET, reg_base, index_value);
+ }
#elif N_ARM
ASM_MOV_REG_IMM(emit->as, reg_index, index_value);
asm_arm_str_reg_reg_reg(emit->as, reg_value, reg_base, reg_index);