summaryrefslogtreecommitdiffstatshomepage
path: root/py
diff options
context:
space:
mode:
authorAlessandro Gatti <a.gatti@frob.it>2025-04-19 22:36:13 +0200
committerAlessandro Gatti <a.gatti@frob.it>2025-05-21 02:01:22 +0200
commit6b2792a097a841bf1c0a27e4fcffcaacc4968285 (patch)
tree3a2a406c151a0fd29512a52c7224e5ab9a47992f /py
parent2260fe0828a87b511ad69c0314ecdcb36b66a2cf (diff)
downloadmicropython-6b2792a097a841bf1c0a27e4fcffcaacc4968285.tar.gz
micropython-6b2792a097a841bf1c0a27e4fcffcaacc4968285.zip
py/asmthumb: Generate proper sequences for large register offsets.
This commit lets the Thumb native emitter generate a proper opcode sequence when calculating an indexed register offset for load/store operations with said offset beight both greater than 65535 and not able to be represented as a shifted 8-bit bitmask. The original code would assume the scaled index would always fit in 16 bits and silently discard upper bits of the offset. Now an optimised constant loading sequence is emitted instead, and the final offset is also stored in the correct register in all cases. Signed-off-by: Alessandro Gatti <a.gatti@frob.it>
Diffstat (limited to 'py')
-rw-r--r--py/asmthumb.c6
1 files changed, 3 insertions, 3 deletions
diff --git a/py/asmthumb.c b/py/asmthumb.c
index 420815e802..06021f2bc9 100644
--- a/py/asmthumb.c
+++ b/py/asmthumb.c
@@ -450,12 +450,12 @@ static void asm_thumb_add_reg_reg_offset(asm_thumb_t *as, uint reg_dest, uint re
asm_thumb_lsl_rlo_rlo_i5(as, reg_dest, reg_dest, offset_shift);
asm_thumb_add_rlo_rlo_rlo(as, reg_dest, reg_dest, reg_base);
} else if (reg_dest != reg_base) {
- asm_thumb_mov_rlo_i16(as, reg_dest, offset << offset_shift);
- asm_thumb_add_rlo_rlo_rlo(as, reg_dest, reg_dest, reg_dest);
+ asm_thumb_mov_reg_i32_optimised(as, reg_dest, offset << offset_shift);
+ asm_thumb_add_rlo_rlo_rlo(as, reg_dest, reg_dest, reg_base);
} else {
uint reg_other = reg_dest ^ 7;
asm_thumb_op16(as, OP_PUSH_RLIST((1 << reg_other)));
- asm_thumb_mov_rlo_i16(as, reg_other, offset << offset_shift);
+ asm_thumb_mov_reg_i32_optimised(as, reg_other, offset << offset_shift);
asm_thumb_add_rlo_rlo_rlo(as, reg_dest, reg_dest, reg_other);
asm_thumb_op16(as, OP_POP_RLIST((1 << reg_other)));
}