diff options
author | Alessandro Gatti <a.gatti@frob.it> | 2025-05-22 14:37:21 +0200 |
---|---|---|
committer | Damien George <damien@micropython.org> | 2025-06-10 11:29:02 +1000 |
commit | bbab2e98f5d82b3111544d26e600af592977c0c3 (patch) | |
tree | 6c6e11dbcea2907e4d0da99dc2a8e81c7d410082 /py | |
parent | 901c96dc554100513523cc9136161126e8cda436 (diff) | |
download | micropython-bbab2e98f5d82b3111544d26e600af592977c0c3.tar.gz micropython-bbab2e98f5d82b3111544d26e600af592977c0c3.zip |
py/asmarm: Extend int-indexed 32-bit load/store offset ranges.
This commit extends the range for int-indexed load/store opcode
generators, making them emit correct code sequences for offsets that
span more than 12 bits.
This is necessary due to those generator bits being also used in the
Viper emitter, where it's more probable to reference offsets that can
not be embedded in the LDR/STR opcodes.
Signed-off-by: Alessandro Gatti <a.gatti@frob.it>
Diffstat (limited to 'py')
-rw-r--r-- | py/asmarm.c | 26 | ||||
-rw-r--r-- | py/asmarm.h | 12 | ||||
-rw-r--r-- | py/emitnative.c | 5 |
3 files changed, 26 insertions, 17 deletions
diff --git a/py/asmarm.c b/py/asmarm.c index d304567882..5322321553 100644 --- a/py/asmarm.c +++ b/py/asmarm.c @@ -333,9 +333,16 @@ void asm_arm_asr_reg_reg(asm_arm_t *as, uint rd, uint rs) { emit_al(as, 0x1a00050 | (rd << 12) | (rs << 8) | rd); } -void asm_arm_ldr_reg_reg(asm_arm_t *as, uint rd, uint rn, uint byte_offset) { - // ldr rd, [rn, #off] - emit_al(as, 0x5900000 | (rn << 16) | (rd << 12) | byte_offset); +void asm_arm_ldr_reg_reg_offset(asm_arm_t *as, uint rd, uint rn, uint byte_offset) { + if (byte_offset < 0x1000) { + // ldr rd, [rn, #off] + emit_al(as, 0x5900000 | (rn << 16) | (rd << 12) | byte_offset); + } else { + // mov r8, #off + // ldr rd, [rn, r8] + asm_arm_mov_reg_i32_optimised(as, ASM_ARM_REG_R8, byte_offset); + emit_al(as, 0x7900000 | (rn << 16) | (rd << 12) | ASM_ARM_REG_R8); + } } void asm_arm_ldrh_reg_reg(asm_arm_t *as, uint rd, uint rn) { @@ -376,9 +383,16 @@ void asm_arm_ldr_reg_reg_reg(asm_arm_t *as, uint rd, uint rm, uint rn) { emit_al(as, 0x7900100 | (rm << 16) | (rd << 12) | rn); } -void asm_arm_str_reg_reg(asm_arm_t *as, uint rd, uint rm, uint byte_offset) { - // str rd, [rm, #off] - emit_al(as, 0x5800000 | (rm << 16) | (rd << 12) | byte_offset); +void asm_arm_str_reg_reg_offset(asm_arm_t *as, uint rd, uint rm, uint byte_offset) { + if (byte_offset < 0x1000) { + // str rd, [rm, #off] + emit_al(as, 0x5800000 | (rm << 16) | (rd << 12) | byte_offset); + } else { + // mov r8, #off + // str rd, [rm, r8] + asm_arm_mov_reg_i32_optimised(as, ASM_ARM_REG_R8, byte_offset); + emit_al(as, 0x7800000 | (rm << 16) | (rd << 12) | ASM_ARM_REG_R8); + } } void asm_arm_strh_reg_reg(asm_arm_t *as, uint rd, uint rm) { diff --git a/py/asmarm.h b/py/asmarm.h index 2c3af41312..07ed425c98 100644 --- a/py/asmarm.h +++ b/py/asmarm.h @@ -109,11 +109,11 @@ void asm_arm_lsr_reg_reg(asm_arm_t *as, uint rd, uint rs); void asm_arm_asr_reg_reg(asm_arm_t *as, uint rd, uint rs); // memory -void asm_arm_ldr_reg_reg(asm_arm_t *as, uint rd, uint rn, uint byte_offset); +void asm_arm_ldr_reg_reg_offset(asm_arm_t *as, uint rd, uint rn, uint byte_offset); void asm_arm_ldrh_reg_reg(asm_arm_t *as, uint rd, uint rn); void asm_arm_ldrh_reg_reg_offset(asm_arm_t *as, uint rd, uint rn, uint byte_offset); void asm_arm_ldrb_reg_reg(asm_arm_t *as, uint rd, uint rn); -void asm_arm_str_reg_reg(asm_arm_t *as, uint rd, uint rm, uint byte_offset); +void asm_arm_str_reg_reg_offset(asm_arm_t *as, uint rd, uint rm, uint byte_offset); void asm_arm_strh_reg_reg(asm_arm_t *as, uint rd, uint rm); void asm_arm_strb_reg_reg(asm_arm_t *as, uint rd, uint rm); @@ -212,14 +212,14 @@ void asm_arm_bx_reg(asm_arm_t *as, uint reg_src); #define ASM_LOAD8_REG_REG(as, reg_dest, reg_base) asm_arm_ldrb_reg_reg((as), (reg_dest), (reg_base)) #define ASM_LOAD16_REG_REG(as, reg_dest, reg_base) asm_arm_ldrh_reg_reg((as), (reg_dest), (reg_base)) #define ASM_LOAD16_REG_REG_OFFSET(as, reg_dest, reg_base, uint16_offset) asm_arm_ldrh_reg_reg_offset((as), (reg_dest), (reg_base), 2 * (uint16_offset)) -#define ASM_LOAD32_REG_REG(as, reg_dest, reg_base) asm_arm_ldr_reg_reg((as), (reg_dest), (reg_base), 0) -#define ASM_LOAD32_REG_REG_OFFSET(as, reg_dest, reg_base, word_offset) asm_arm_ldr_reg_reg((as), (reg_dest), (reg_base), 4 * (word_offset)) +#define ASM_LOAD32_REG_REG(as, reg_dest, reg_base) asm_arm_ldr_reg_reg_offset((as), (reg_dest), (reg_base), 0) +#define ASM_LOAD32_REG_REG_OFFSET(as, reg_dest, reg_base, word_offset) asm_arm_ldr_reg_reg_offset((as), (reg_dest), (reg_base), 4 * (word_offset)) #define ASM_STORE_REG_REG_OFFSET(as, reg_value, reg_base, word_offset) ASM_STORE32_REG_REG_OFFSET((as), (reg_value), (reg_base), (word_offset)) #define ASM_STORE8_REG_REG(as, reg_value, reg_base) asm_arm_strb_reg_reg((as), (reg_value), (reg_base)) #define ASM_STORE16_REG_REG(as, reg_value, reg_base) asm_arm_strh_reg_reg((as), (reg_value), (reg_base)) -#define ASM_STORE32_REG_REG(as, reg_value, reg_base) asm_arm_str_reg_reg((as), (reg_value), (reg_base), 0) -#define ASM_STORE32_REG_REG_OFFSET(as, reg_value, reg_base, word_offset) asm_arm_str_reg_reg((as), (reg_value), (reg_base), 4 * (word_offset)) +#define ASM_STORE32_REG_REG(as, reg_value, reg_base) asm_arm_str_reg_reg_offset((as), (reg_value), (reg_base), 0) +#define ASM_STORE32_REG_REG_OFFSET(as, reg_value, reg_base, word_offset) asm_arm_str_reg_reg_offset((as), (reg_value), (reg_base), 4 * (word_offset)) #define ASM_LOAD8_REG_REG_REG(as, reg_dest, reg_base, reg_index) asm_arm_ldrb_reg_reg_reg((as), (reg_dest), (reg_base), (reg_index)) #define ASM_LOAD16_REG_REG_REG(as, reg_dest, reg_base, reg_index) asm_arm_ldrh_reg_reg_reg((as), (reg_dest), (reg_base), (reg_index)) diff --git a/py/emitnative.c b/py/emitnative.c index 8ac9810c8f..7662de69e2 100644 --- a/py/emitnative.c +++ b/py/emitnative.c @@ -1845,11 +1845,6 @@ static void emit_native_store_subscr(emit_t *emit) { #else if (index_value != 0) { // index is a non-zero immediate - #if N_ARM - ASM_MOV_REG_IMM(emit->as, reg_index, index_value); - asm_arm_str_reg_reg_reg(emit->as, reg_value, reg_base, reg_index); - break; - #endif ASM_MOV_REG_IMM(emit->as, reg_index, index_value << 2); ASM_ADD_REG_REG(emit->as, reg_index, reg_base); // add 4*index to base reg_base = reg_index; |