summaryrefslogtreecommitdiffstatshomepage
path: root/py
diff options
context:
space:
mode:
Diffstat (limited to 'py')
-rw-r--r--py/argcheck.c4
-rw-r--r--py/asmarm.c16
-rw-r--r--py/asmarm.h15
-rw-r--r--py/asmrv32.h26
-rw-r--r--py/asmthumb.c6
-rw-r--r--py/asmthumb.h69
-rw-r--r--py/asmx64.h2
-rw-r--r--py/asmx86.h2
-rw-r--r--py/asmxtensa.c67
-rw-r--r--py/asmxtensa.h28
-rw-r--r--py/bc.c2
-rw-r--r--py/dynruntime.h4
-rw-r--r--py/dynruntime.mk3
-rw-r--r--py/emitinlinextensa.c211
-rw-r--r--py/emitnative.c264
-rw-r--r--py/emitndebug.c4
-rw-r--r--py/misc.h2
-rw-r--r--py/modmath.c2
-rw-r--r--py/mpconfig.h13
-rw-r--r--py/mpprint.c11
-rw-r--r--py/mpprint.h14
-rw-r--r--py/mpz.c4
-rw-r--r--py/nativeglue.h2
-rw-r--r--py/nlr.c2
-rw-r--r--py/nlr.h6
-rw-r--r--py/nlraarch64.c2
-rw-r--r--py/nlrmips.c2
-rw-r--r--py/nlrpowerpc.c4
-rw-r--r--py/nlrrv32.c2
-rw-r--r--py/nlrrv64.c2
-rw-r--r--py/nlrthumb.c2
-rw-r--r--py/nlrx64.c2
-rw-r--r--py/nlrx86.c2
-rw-r--r--py/nlrxtensa.c2
-rw-r--r--py/objint.c5
-rw-r--r--py/objstr.c13
-rw-r--r--py/parsenum.c2
-rw-r--r--py/persistentcode.c14
-rw-r--r--py/runtime.c34
-rw-r--r--py/runtime.h36
-rw-r--r--py/scheduler.c26
41 files changed, 649 insertions, 280 deletions
diff --git a/py/argcheck.c b/py/argcheck.c
index 35b116ec0d..298c19bcfd 100644
--- a/py/argcheck.c
+++ b/py/argcheck.c
@@ -137,12 +137,12 @@ void mp_arg_parse_all_kw_array(size_t n_pos, size_t n_kw, const mp_obj_t *args,
mp_arg_parse_all(n_pos, args, &kw_args, n_allowed, allowed, out_vals);
}
-NORETURN void mp_arg_error_terse_mismatch(void) {
+MP_NORETURN void mp_arg_error_terse_mismatch(void) {
mp_raise_TypeError(MP_ERROR_TEXT("argument num/types mismatch"));
}
#if MICROPY_CPYTHON_COMPAT
-NORETURN void mp_arg_error_unimpl_kw(void) {
+MP_NORETURN void mp_arg_error_unimpl_kw(void) {
mp_raise_NotImplementedError(MP_ERROR_TEXT("keyword argument(s) not implemented - use normal args instead"));
}
#endif
diff --git a/py/asmarm.c b/py/asmarm.c
index 6fa751b32e..d304567882 100644
--- a/py/asmarm.c
+++ b/py/asmarm.c
@@ -343,6 +343,12 @@ void asm_arm_ldrh_reg_reg(asm_arm_t *as, uint rd, uint rn) {
emit_al(as, 0x1d000b0 | (rn << 16) | (rd << 12));
}
+void asm_arm_ldrh_reg_reg_reg(asm_arm_t *as, uint rd, uint rm, uint rn) {
+ // ldrh doesn't support scaled register index
+ emit_al(as, 0x1a00080 | (ASM_ARM_REG_R8 << 12) | rn); // mov r8, rn, lsl #1
+ emit_al(as, 0x19000b0 | (rm << 16) | (rd << 12) | ASM_ARM_REG_R8); // ldrh rd, [rm, r8];
+}
+
void asm_arm_ldrh_reg_reg_offset(asm_arm_t *as, uint rd, uint rn, uint byte_offset) {
if (byte_offset < 0x100) {
// ldrh rd, [rn, #off]
@@ -360,6 +366,16 @@ void asm_arm_ldrb_reg_reg(asm_arm_t *as, uint rd, uint rn) {
emit_al(as, 0x5d00000 | (rn << 16) | (rd << 12));
}
+void asm_arm_ldrb_reg_reg_reg(asm_arm_t *as, uint rd, uint rm, uint rn) {
+ // ldrb rd, [rm, rn]
+ emit_al(as, 0x7d00000 | (rm << 16) | (rd << 12) | rn);
+}
+
+void asm_arm_ldr_reg_reg_reg(asm_arm_t *as, uint rd, uint rm, uint rn) {
+ // ldr rd, [rm, rn, lsl #2]
+ emit_al(as, 0x7900100 | (rm << 16) | (rd << 12) | rn);
+}
+
void asm_arm_str_reg_reg(asm_arm_t *as, uint rd, uint rm, uint byte_offset) {
// str rd, [rm, #off]
emit_al(as, 0x5800000 | (rm << 16) | (rd << 12) | byte_offset);
diff --git a/py/asmarm.h b/py/asmarm.h
index 4a4253aef6..42fa1ea212 100644
--- a/py/asmarm.h
+++ b/py/asmarm.h
@@ -116,6 +116,12 @@ void asm_arm_ldrb_reg_reg(asm_arm_t *as, uint rd, uint rn);
void asm_arm_str_reg_reg(asm_arm_t *as, uint rd, uint rm, uint byte_offset);
void asm_arm_strh_reg_reg(asm_arm_t *as, uint rd, uint rm);
void asm_arm_strb_reg_reg(asm_arm_t *as, uint rd, uint rm);
+
+// load from array
+void asm_arm_ldr_reg_reg_reg(asm_arm_t *as, uint rd, uint rm, uint rn);
+void asm_arm_ldrh_reg_reg_reg(asm_arm_t *as, uint rd, uint rm, uint rn);
+void asm_arm_ldrb_reg_reg_reg(asm_arm_t *as, uint rd, uint rm, uint rn);
+
// store to array
void asm_arm_str_reg_reg_reg(asm_arm_t *as, uint rd, uint rm, uint rn);
void asm_arm_strh_reg_reg_reg(asm_arm_t *as, uint rd, uint rm, uint rn);
@@ -202,19 +208,24 @@ void asm_arm_bx_reg(asm_arm_t *as, uint reg_src);
#define ASM_SUB_REG_REG(as, reg_dest, reg_src) asm_arm_sub_reg_reg_reg((as), (reg_dest), (reg_dest), (reg_src))
#define ASM_MUL_REG_REG(as, reg_dest, reg_src) asm_arm_mul_reg_reg_reg((as), (reg_dest), (reg_dest), (reg_src))
-#define ASM_LOAD_REG_REG(as, reg_dest, reg_base) asm_arm_ldr_reg_reg((as), (reg_dest), (reg_base), 0)
#define ASM_LOAD_REG_REG_OFFSET(as, reg_dest, reg_base, word_offset) asm_arm_ldr_reg_reg((as), (reg_dest), (reg_base), 4 * (word_offset))
#define ASM_LOAD8_REG_REG(as, reg_dest, reg_base) asm_arm_ldrb_reg_reg((as), (reg_dest), (reg_base))
#define ASM_LOAD16_REG_REG(as, reg_dest, reg_base) asm_arm_ldrh_reg_reg((as), (reg_dest), (reg_base))
#define ASM_LOAD16_REG_REG_OFFSET(as, reg_dest, reg_base, uint16_offset) asm_arm_ldrh_reg_reg_offset((as), (reg_dest), (reg_base), 2 * (uint16_offset))
#define ASM_LOAD32_REG_REG(as, reg_dest, reg_base) asm_arm_ldr_reg_reg((as), (reg_dest), (reg_base), 0)
-#define ASM_STORE_REG_REG(as, reg_value, reg_base) asm_arm_str_reg_reg((as), (reg_value), (reg_base), 0)
#define ASM_STORE_REG_REG_OFFSET(as, reg_dest, reg_base, word_offset) asm_arm_str_reg_reg((as), (reg_dest), (reg_base), 4 * (word_offset))
#define ASM_STORE8_REG_REG(as, reg_value, reg_base) asm_arm_strb_reg_reg((as), (reg_value), (reg_base))
#define ASM_STORE16_REG_REG(as, reg_value, reg_base) asm_arm_strh_reg_reg((as), (reg_value), (reg_base))
#define ASM_STORE32_REG_REG(as, reg_value, reg_base) asm_arm_str_reg_reg((as), (reg_value), (reg_base), 0)
+#define ASM_LOAD8_REG_REG_REG(as, reg_dest, reg_base, reg_index) asm_arm_ldrb_reg_reg_reg((as), (reg_dest), (reg_base), (reg_index))
+#define ASM_LOAD16_REG_REG_REG(as, reg_dest, reg_base, reg_index) asm_arm_ldrh_reg_reg_reg((as), (reg_dest), (reg_base), (reg_index))
+#define ASM_LOAD32_REG_REG_REG(as, reg_dest, reg_base, reg_index) asm_arm_ldr_reg_reg_reg((as), (reg_dest), (reg_base), (reg_index))
+#define ASM_STORE8_REG_REG_REG(as, reg_val, reg_base, reg_index) asm_arm_strb_reg_reg_reg((as), (reg_val), (reg_base), (reg_index))
+#define ASM_STORE16_REG_REG_REG(as, reg_val, reg_base, reg_index) asm_arm_strh_reg_reg_reg((as), (reg_val), (reg_base), (reg_index))
+#define ASM_STORE32_REG_REG_REG(as, reg_val, reg_base, reg_index) asm_arm_str_reg_reg_reg((as), (reg_val), (reg_base), (reg_index))
+
#endif // GENERIC_ASM_API
#endif // MICROPY_INCLUDED_PY_ASMARM_H
diff --git a/py/asmrv32.h b/py/asmrv32.h
index b09f48eb12..6453b0a3d4 100644
--- a/py/asmrv32.h
+++ b/py/asmrv32.h
@@ -737,7 +737,6 @@ void asm_rv32_emit_store_reg_reg_offset(asm_rv32_t *state, mp_uint_t source, mp_
#define ASM_LOAD32_REG_REG(state, rd, rs) ASM_LOAD_REG_REG_OFFSET(state, rd, rs, 0)
#define ASM_LOAD8_REG_REG(state, rd, rs) asm_rv32_opcode_lbu(state, rd, rs, 0)
#define ASM_LOAD_REG_REG_OFFSET(state, rd, rs, offset) asm_rv32_emit_load_reg_reg_offset(state, rd, rs, offset)
-#define ASM_LOAD_REG_REG(state, rd, rs) ASM_LOAD32_REG_REG(state, rd, rs)
#define ASM_LSL_REG_REG(state, rd, rs) asm_rv32_opcode_sll(state, rd, rd, rs)
#define ASM_LSR_REG_REG(state, rd, rs) asm_rv32_opcode_srl(state, rd, rd, rs)
#define ASM_MOV_LOCAL_REG(state, local, rs) asm_rv32_emit_mov_local_reg(state, local, rs)
@@ -754,10 +753,33 @@ void asm_rv32_emit_store_reg_reg_offset(asm_rv32_t *state, mp_uint_t source, mp_
#define ASM_STORE32_REG_REG(state, rs1, rs2) ASM_STORE_REG_REG_OFFSET(state, rs1, rs2, 0)
#define ASM_STORE8_REG_REG(state, rs1, rs2) asm_rv32_opcode_sb(state, rs1, rs2, 0)
#define ASM_STORE_REG_REG_OFFSET(state, rd, rs, offset) asm_rv32_emit_store_reg_reg_offset(state, rd, rs, offset)
-#define ASM_STORE_REG_REG(state, rs1, rs2) ASM_STORE32_REG_REG(state, rs1, rs2)
#define ASM_SUB_REG_REG(state, rd, rs) asm_rv32_opcode_sub(state, rd, rd, rs)
#define ASM_XOR_REG_REG(state, rd, rs) asm_rv32_emit_optimised_xor(state, rd, rs)
#define ASM_CLR_REG(state, rd)
+#define ASM_LOAD16_REG_REG_REG(state, rd, rs1, rs2) \
+ do { \
+ asm_rv32_opcode_slli(state, rs2, rs2, 1); \
+ asm_rv32_opcode_cadd(state, rs1, rs2); \
+ asm_rv32_opcode_lhu(state, rd, rs1, 0); \
+ } while (0)
+#define ASM_LOAD32_REG_REG_REG(state, rd, rs1, rs2) \
+ do { \
+ asm_rv32_opcode_slli(state, rs2, rs2, 2); \
+ asm_rv32_opcode_cadd(state, rs1, rs2); \
+ asm_rv32_opcode_lw(state, rd, rs1, 0); \
+ } while (0)
+#define ASM_STORE16_REG_REG_REG(state, rd, rs1, rs2) \
+ do { \
+ asm_rv32_opcode_slli(state, rs2, rs2, 1); \
+ asm_rv32_opcode_cadd(state, rs1, rs2); \
+ asm_rv32_opcode_sh(state, rd, rs1, 0); \
+ } while (0)
+#define ASM_STORE32_REG_REG_REG(state, rd, rs1, rs2) \
+ do { \
+ asm_rv32_opcode_slli(state, rs2, rs2, 2); \
+ asm_rv32_opcode_cadd(state, rs1, rs2); \
+ asm_rv32_opcode_sw(state, rd, rs1, 0); \
+ } while (0)
#endif
diff --git a/py/asmthumb.c b/py/asmthumb.c
index 420815e802..06021f2bc9 100644
--- a/py/asmthumb.c
+++ b/py/asmthumb.c
@@ -450,12 +450,12 @@ static void asm_thumb_add_reg_reg_offset(asm_thumb_t *as, uint reg_dest, uint re
asm_thumb_lsl_rlo_rlo_i5(as, reg_dest, reg_dest, offset_shift);
asm_thumb_add_rlo_rlo_rlo(as, reg_dest, reg_dest, reg_base);
} else if (reg_dest != reg_base) {
- asm_thumb_mov_rlo_i16(as, reg_dest, offset << offset_shift);
- asm_thumb_add_rlo_rlo_rlo(as, reg_dest, reg_dest, reg_dest);
+ asm_thumb_mov_reg_i32_optimised(as, reg_dest, offset << offset_shift);
+ asm_thumb_add_rlo_rlo_rlo(as, reg_dest, reg_dest, reg_base);
} else {
uint reg_other = reg_dest ^ 7;
asm_thumb_op16(as, OP_PUSH_RLIST((1 << reg_other)));
- asm_thumb_mov_rlo_i16(as, reg_other, offset << offset_shift);
+ asm_thumb_mov_reg_i32_optimised(as, reg_other, offset << offset_shift);
asm_thumb_add_rlo_rlo_rlo(as, reg_dest, reg_dest, reg_other);
asm_thumb_op16(as, OP_POP_RLIST((1 << reg_other)));
}
diff --git a/py/asmthumb.h b/py/asmthumb.h
index a9e68d7adb..cc4213503b 100644
--- a/py/asmthumb.h
+++ b/py/asmthumb.h
@@ -251,6 +251,50 @@ static inline void asm_thumb_bx_reg(asm_thumb_t *as, uint r_src) {
asm_thumb_format_5(as, ASM_THUMB_FORMAT_5_BX, 0, r_src);
}
+// FORMAT 7: load/store with register offset
+// FORMAT 8: load/store sign-extended byte/halfword
+
+#define ASM_THUMB_FORMAT_7_LDR (0x5800)
+#define ASM_THUMB_FORMAT_7_STR (0x5000)
+#define ASM_THUMB_FORMAT_7_WORD_TRANSFER (0x0000)
+#define ASM_THUMB_FORMAT_7_BYTE_TRANSFER (0x0400)
+#define ASM_THUMB_FORMAT_8_LDRH (0x5A00)
+#define ASM_THUMB_FORMAT_8_STRH (0x5200)
+
+#define ASM_THUMB_FORMAT_7_8_ENCODE(op, rlo_dest, rlo_base, rlo_index) \
+ ((op) | ((rlo_index) << 6) | ((rlo_base) << 3) | ((rlo_dest)))
+
+static inline void asm_thumb_format_7_8(asm_thumb_t *as, uint op, uint rlo_dest, uint rlo_base, uint rlo_index) {
+ assert(rlo_dest < ASM_THUMB_REG_R8);
+ assert(rlo_base < ASM_THUMB_REG_R8);
+ assert(rlo_index < ASM_THUMB_REG_R8);
+ asm_thumb_op16(as, ASM_THUMB_FORMAT_7_8_ENCODE(op, rlo_dest, rlo_base, rlo_index));
+}
+
+static inline void asm_thumb_ldrb_rlo_rlo_rlo(asm_thumb_t *as, uint rlo_dest, uint rlo_base, uint rlo_index) {
+ asm_thumb_format_7_8(as, ASM_THUMB_FORMAT_7_LDR | ASM_THUMB_FORMAT_7_BYTE_TRANSFER, rlo_dest, rlo_base, rlo_index);
+}
+
+static inline void asm_thumb_ldrh_rlo_rlo_rlo(asm_thumb_t *as, uint rlo_dest, uint rlo_base, uint rlo_index) {
+ asm_thumb_format_7_8(as, ASM_THUMB_FORMAT_8_LDRH, rlo_dest, rlo_base, rlo_index);
+}
+
+static inline void asm_thumb_ldr_rlo_rlo_rlo(asm_thumb_t *as, uint rlo_dest, uint rlo_base, uint rlo_index) {
+ asm_thumb_format_7_8(as, ASM_THUMB_FORMAT_7_LDR | ASM_THUMB_FORMAT_7_WORD_TRANSFER, rlo_dest, rlo_base, rlo_index);
+}
+
+static inline void asm_thumb_strb_rlo_rlo_rlo(asm_thumb_t *as, uint rlo_src, uint rlo_base, uint rlo_index) {
+ asm_thumb_format_7_8(as, ASM_THUMB_FORMAT_7_STR | ASM_THUMB_FORMAT_7_BYTE_TRANSFER, rlo_src, rlo_base, rlo_index);
+}
+
+static inline void asm_thumb_strh_rlo_rlo_rlo(asm_thumb_t *as, uint rlo_dest, uint rlo_base, uint rlo_index) {
+ asm_thumb_format_7_8(as, ASM_THUMB_FORMAT_8_STRH, rlo_dest, rlo_base, rlo_index);
+}
+
+static inline void asm_thumb_str_rlo_rlo_rlo(asm_thumb_t *as, uint rlo_src, uint rlo_base, uint rlo_index) {
+ asm_thumb_format_7_8(as, ASM_THUMB_FORMAT_7_STR | ASM_THUMB_FORMAT_7_WORD_TRANSFER, rlo_src, rlo_base, rlo_index);
+}
+
// FORMAT 9: load/store with immediate offset
// For word transfers the offset must be aligned, and >>2
@@ -418,19 +462,40 @@ void asm_thumb_b_rel12(asm_thumb_t *as, int rel);
#define ASM_SUB_REG_REG(as, reg_dest, reg_src) asm_thumb_sub_rlo_rlo_rlo((as), (reg_dest), (reg_dest), (reg_src))
#define ASM_MUL_REG_REG(as, reg_dest, reg_src) asm_thumb_format_4((as), ASM_THUMB_FORMAT_4_MUL, (reg_dest), (reg_src))
-#define ASM_LOAD_REG_REG(as, reg_dest, reg_base) asm_thumb_ldr_rlo_rlo_i5((as), (reg_dest), (reg_base), 0)
#define ASM_LOAD_REG_REG_OFFSET(as, reg_dest, reg_base, word_offset) asm_thumb_ldr_reg_reg_i12_optimised((as), (reg_dest), (reg_base), (word_offset))
#define ASM_LOAD8_REG_REG(as, reg_dest, reg_base) asm_thumb_ldrb_rlo_rlo_i5((as), (reg_dest), (reg_base), 0)
#define ASM_LOAD16_REG_REG(as, reg_dest, reg_base) asm_thumb_ldrh_rlo_rlo_i5((as), (reg_dest), (reg_base), 0)
#define ASM_LOAD16_REG_REG_OFFSET(as, reg_dest, reg_base, uint16_offset) asm_thumb_ldrh_reg_reg_i12_optimised((as), (reg_dest), (reg_base), (uint16_offset))
#define ASM_LOAD32_REG_REG(as, reg_dest, reg_base) asm_thumb_ldr_rlo_rlo_i5((as), (reg_dest), (reg_base), 0)
-#define ASM_STORE_REG_REG(as, reg_src, reg_base) asm_thumb_str_rlo_rlo_i5((as), (reg_src), (reg_base), 0)
#define ASM_STORE_REG_REG_OFFSET(as, reg_src, reg_base, word_offset) asm_thumb_str_rlo_rlo_i5((as), (reg_src), (reg_base), (word_offset))
#define ASM_STORE8_REG_REG(as, reg_src, reg_base) asm_thumb_strb_rlo_rlo_i5((as), (reg_src), (reg_base), 0)
#define ASM_STORE16_REG_REG(as, reg_src, reg_base) asm_thumb_strh_rlo_rlo_i5((as), (reg_src), (reg_base), 0)
#define ASM_STORE32_REG_REG(as, reg_src, reg_base) asm_thumb_str_rlo_rlo_i5((as), (reg_src), (reg_base), 0)
+#define ASM_LOAD8_REG_REG_REG(as, reg_dest, reg_base, reg_index) asm_thumb_ldrb_rlo_rlo_rlo((as), (reg_dest), (reg_base), (reg_index))
+#define ASM_LOAD16_REG_REG_REG(as, reg_dest, reg_base, reg_index) \
+ do { \
+ asm_thumb_lsl_rlo_rlo_i5((as), (reg_index), (reg_index), 1); \
+ asm_thumb_ldrh_rlo_rlo_rlo((as), (reg_dest), (reg_base), (reg_index)); \
+ } while (0)
+#define ASM_LOAD32_REG_REG_REG(as, reg_dest, reg_base, reg_index) \
+ do { \
+ asm_thumb_lsl_rlo_rlo_i5((as), (reg_index), (reg_index), 2); \
+ asm_thumb_ldr_rlo_rlo_rlo((as), (reg_dest), (reg_base), (reg_index)); \
+ } while (0)
+#define ASM_STORE8_REG_REG_REG(as, reg_val, reg_base, reg_index) asm_thumb_strb_rlo_rlo_rlo((as), (reg_val), (reg_base), (reg_index))
+#define ASM_STORE16_REG_REG_REG(as, reg_val, reg_base, reg_index) \
+ do { \
+ asm_thumb_lsl_rlo_rlo_i5((as), (reg_index), (reg_index), 1); \
+ asm_thumb_strh_rlo_rlo_rlo((as), (reg_val), (reg_base), (reg_index)); \
+ } while (0)
+#define ASM_STORE32_REG_REG_REG(as, reg_val, reg_base, reg_index) \
+ do { \
+ asm_thumb_lsl_rlo_rlo_i5((as), (reg_index), (reg_index), 2); \
+ asm_thumb_str_rlo_rlo_rlo((as), (reg_val), (reg_base), (reg_index)); \
+ } while (0)
+
#endif // GENERIC_ASM_API
#endif // MICROPY_INCLUDED_PY_ASMTHUMB_H
diff --git a/py/asmx64.h b/py/asmx64.h
index c63e31797e..30c6efd6d0 100644
--- a/py/asmx64.h
+++ b/py/asmx64.h
@@ -205,14 +205,12 @@ void asm_x64_call_ind(asm_x64_t *as, size_t fun_id, int temp_r32);
#define ASM_SUB_REG_REG(as, reg_dest, reg_src) asm_x64_sub_r64_r64((as), (reg_dest), (reg_src))
#define ASM_MUL_REG_REG(as, reg_dest, reg_src) asm_x64_mul_r64_r64((as), (reg_dest), (reg_src))
-#define ASM_LOAD_REG_REG(as, reg_dest, reg_base) asm_x64_mov_mem64_to_r64((as), (reg_base), 0, (reg_dest))
#define ASM_LOAD_REG_REG_OFFSET(as, reg_dest, reg_base, word_offset) asm_x64_mov_mem64_to_r64((as), (reg_base), 8 * (word_offset), (reg_dest))
#define ASM_LOAD8_REG_REG(as, reg_dest, reg_base) asm_x64_mov_mem8_to_r64zx((as), (reg_base), 0, (reg_dest))
#define ASM_LOAD16_REG_REG(as, reg_dest, reg_base) asm_x64_mov_mem16_to_r64zx((as), (reg_base), 0, (reg_dest))
#define ASM_LOAD16_REG_REG_OFFSET(as, reg_dest, reg_base, uint16_offset) asm_x64_mov_mem16_to_r64zx((as), (reg_base), 2 * (uint16_offset), (reg_dest))
#define ASM_LOAD32_REG_REG(as, reg_dest, reg_base) asm_x64_mov_mem32_to_r64zx((as), (reg_base), 0, (reg_dest))
-#define ASM_STORE_REG_REG(as, reg_src, reg_base) asm_x64_mov_r64_to_mem64((as), (reg_src), (reg_base), 0)
#define ASM_STORE_REG_REG_OFFSET(as, reg_src, reg_base, word_offset) asm_x64_mov_r64_to_mem64((as), (reg_src), (reg_base), 8 * (word_offset))
#define ASM_STORE8_REG_REG(as, reg_src, reg_base) asm_x64_mov_r8_to_mem8((as), (reg_src), (reg_base), 0)
#define ASM_STORE16_REG_REG(as, reg_src, reg_base) asm_x64_mov_r16_to_mem16((as), (reg_src), (reg_base), 0)
diff --git a/py/asmx86.h b/py/asmx86.h
index 027d44151e..af73c163b4 100644
--- a/py/asmx86.h
+++ b/py/asmx86.h
@@ -200,14 +200,12 @@ void asm_x86_call_ind(asm_x86_t *as, size_t fun_id, mp_uint_t n_args, int temp_r
#define ASM_SUB_REG_REG(as, reg_dest, reg_src) asm_x86_sub_r32_r32((as), (reg_dest), (reg_src))
#define ASM_MUL_REG_REG(as, reg_dest, reg_src) asm_x86_mul_r32_r32((as), (reg_dest), (reg_src))
-#define ASM_LOAD_REG_REG(as, reg_dest, reg_base) asm_x86_mov_mem32_to_r32((as), (reg_base), 0, (reg_dest))
#define ASM_LOAD_REG_REG_OFFSET(as, reg_dest, reg_base, word_offset) asm_x86_mov_mem32_to_r32((as), (reg_base), 4 * (word_offset), (reg_dest))
#define ASM_LOAD8_REG_REG(as, reg_dest, reg_base) asm_x86_mov_mem8_to_r32zx((as), (reg_base), 0, (reg_dest))
#define ASM_LOAD16_REG_REG(as, reg_dest, reg_base) asm_x86_mov_mem16_to_r32zx((as), (reg_base), 0, (reg_dest))
#define ASM_LOAD16_REG_REG_OFFSET(as, reg_dest, reg_base, uint16_offset) asm_x86_mov_mem16_to_r32zx((as), (reg_base), 2 * (uint16_offset), (reg_dest))
#define ASM_LOAD32_REG_REG(as, reg_dest, reg_base) asm_x86_mov_mem32_to_r32((as), (reg_base), 0, (reg_dest))
-#define ASM_STORE_REG_REG(as, reg_src, reg_base) asm_x86_mov_r32_to_mem32((as), (reg_src), (reg_base), 0)
#define ASM_STORE_REG_REG_OFFSET(as, reg_src, reg_base, word_offset) asm_x86_mov_r32_to_mem32((as), (reg_src), (reg_base), 4 * (word_offset))
#define ASM_STORE8_REG_REG(as, reg_src, reg_base) asm_x86_mov_r8_to_mem8((as), (reg_src), (reg_base), 0)
#define ASM_STORE16_REG_REG(as, reg_src, reg_base) asm_x86_mov_r16_to_mem16((as), (reg_src), (reg_base), 0)
diff --git a/py/asmxtensa.c b/py/asmxtensa.c
index 0fbe351dcf..ab2e7aeaeb 100644
--- a/py/asmxtensa.c
+++ b/py/asmxtensa.c
@@ -37,6 +37,10 @@
#define WORD_SIZE (4)
#define SIGNED_FIT8(x) ((((x) & 0xffffff80) == 0) || (((x) & 0xffffff80) == 0xffffff80))
#define SIGNED_FIT12(x) ((((x) & 0xfffff800) == 0) || (((x) & 0xfffff800) == 0xfffff800))
+#define SIGNED_FIT18(x) ((((x) & 0xfffe0000) == 0) || (((x) & 0xfffe0000) == 0xfffe0000))
+
+#define ET_OUT_OF_RANGE MP_ERROR_TEXT("ERROR: xtensa %q out of range")
+#define ET_NOT_ALIGNED MP_ERROR_TEXT("ERROR: %q %q not word-aligned")
void asm_xtensa_end_pass(asm_xtensa_t *as) {
as->num_const = as->cur_const;
@@ -47,9 +51,9 @@ void asm_xtensa_end_pass(asm_xtensa_t *as) {
if (as->base.pass == MP_ASM_PASS_EMIT) {
uint8_t *d = as->base.code_base;
printf("XTENSA ASM:");
- for (int i = 0; i < ((as->base.code_size + 15) & ~15); ++i) {
+ for (size_t i = 0; i < ((as->base.code_size + 15) & ~15); ++i) {
if (i % 16 == 0) {
- printf("\n%08x:", (uint32_t)&d[i]);
+ printf("\n%p:", &d[i]);
}
if (i % 2 == 0) {
printf(" ");
@@ -62,10 +66,12 @@ void asm_xtensa_end_pass(asm_xtensa_t *as) {
}
void asm_xtensa_entry(asm_xtensa_t *as, int num_locals) {
- // jump over the constants
- asm_xtensa_op_j(as, as->num_const * WORD_SIZE + 4 - 4);
- mp_asm_base_get_cur_to_write_bytes(&as->base, 1); // padding/alignment byte
- as->const_table = (uint32_t *)mp_asm_base_get_cur_to_write_bytes(&as->base, as->num_const * 4);
+ if (as->num_const > 0) {
+ // jump over the constants
+ asm_xtensa_op_j(as, as->num_const * WORD_SIZE + 4 - 4);
+ mp_asm_base_get_cur_to_write_bytes(&as->base, 1); // padding/alignment byte
+ as->const_table = (uint32_t *)mp_asm_base_get_cur_to_write_bytes(&as->base, as->num_const * 4);
+ }
// adjust the stack-pointer to store a0, a12, a13, a14, a15 and locals, 16-byte aligned
as->stack_adjust = (((ASM_XTENSA_NUM_REGS_SAVED + num_locals) * WORD_SIZE) + 15) & ~15;
@@ -150,7 +156,7 @@ void asm_xtensa_bccz_reg_label(asm_xtensa_t *as, uint cond, uint reg, uint label
uint32_t dest = get_label_dest(as, label);
int32_t rel = dest - as->base.code_offset - 4;
if (as->base.pass == MP_ASM_PASS_EMIT && !SIGNED_FIT12(rel)) {
- printf("ERROR: xtensa bccz out of range\n");
+ mp_raise_msg_varg(&mp_type_RuntimeError, ET_OUT_OF_RANGE, MP_QSTR_bccz);
}
asm_xtensa_op_bccz(as, cond, reg, rel);
}
@@ -159,7 +165,7 @@ void asm_xtensa_bcc_reg_reg_label(asm_xtensa_t *as, uint cond, uint reg1, uint r
uint32_t dest = get_label_dest(as, label);
int32_t rel = dest - as->base.code_offset - 4;
if (as->base.pass == MP_ASM_PASS_EMIT && !SIGNED_FIT8(rel)) {
- printf("ERROR: xtensa bcc out of range\n");
+ mp_raise_msg_varg(&mp_type_RuntimeError, ET_OUT_OF_RANGE, MP_QSTR_bcc);
}
asm_xtensa_op_bcc(as, cond, reg1, reg2, rel);
}
@@ -179,6 +185,8 @@ size_t asm_xtensa_mov_reg_i32(asm_xtensa_t *as, uint reg_dest, uint32_t i32) {
// store the constant in the table
if (as->const_table != NULL) {
as->const_table[as->cur_const] = i32;
+ } else {
+ assert((as->base.pass != MP_ASM_PASS_EMIT) && "Constants table was not built.");
}
++as->cur_const;
return loc;
@@ -264,4 +272,47 @@ void asm_xtensa_call_ind_win(asm_xtensa_t *as, uint idx) {
asm_xtensa_op_callx8(as, ASM_XTENSA_REG_A8);
}
+void asm_xtensa_bit_branch(asm_xtensa_t *as, mp_uint_t reg, mp_uint_t bit, mp_uint_t label, mp_uint_t condition) {
+ uint32_t dest = get_label_dest(as, label);
+ int32_t rel = dest - as->base.code_offset - 4;
+ if (as->base.pass == MP_ASM_PASS_EMIT && !SIGNED_FIT8(rel)) {
+ mp_raise_msg_varg(&mp_type_RuntimeError, ET_OUT_OF_RANGE, MP_QSTR_bit_branch);
+ }
+ asm_xtensa_op24(as, ASM_XTENSA_ENCODE_RRI8(7, condition | ((bit >> 4) & 0x01), reg, bit & 0x0F, rel & 0xFF));
+}
+
+void asm_xtensa_call0(asm_xtensa_t *as, mp_uint_t label) {
+ uint32_t dest = get_label_dest(as, label);
+ int32_t rel = dest - as->base.code_offset - 3;
+ if (as->base.pass == MP_ASM_PASS_EMIT) {
+ if ((dest & 0x03) != 0) {
+ mp_raise_msg_varg(&mp_type_RuntimeError, ET_NOT_ALIGNED, MP_QSTR_call0, MP_QSTR_target);
+ }
+ if ((rel & 0x03) != 0) {
+ mp_raise_msg_varg(&mp_type_RuntimeError, ET_NOT_ALIGNED, MP_QSTR_call0, MP_QSTR_location);
+ }
+ if (!SIGNED_FIT18(rel)) {
+ mp_raise_msg_varg(&mp_type_RuntimeError, ET_OUT_OF_RANGE, MP_QSTR_call0);
+ }
+ }
+ asm_xtensa_op_call0(as, rel);
+}
+
+void asm_xtensa_l32r(asm_xtensa_t *as, mp_uint_t reg, mp_uint_t label) {
+ uint32_t dest = get_label_dest(as, label);
+ int32_t rel = dest - as->base.code_offset;
+ if (as->base.pass == MP_ASM_PASS_EMIT) {
+ if ((dest & 0x03) != 0) {
+ mp_raise_msg_varg(&mp_type_RuntimeError, ET_NOT_ALIGNED, MP_QSTR_l32r, MP_QSTR_target);
+ }
+ if ((rel & 0x03) != 0) {
+ mp_raise_msg_varg(&mp_type_RuntimeError, ET_NOT_ALIGNED, MP_QSTR_l32r, MP_QSTR_location);
+ }
+ if (!SIGNED_FIT18(rel) || (rel >= 0)) {
+ mp_raise_msg_varg(&mp_type_RuntimeError, ET_OUT_OF_RANGE, MP_QSTR_l32r);
+ }
+ }
+ asm_xtensa_op_l32r(as, reg, as->base.code_offset, dest);
+}
+
#endif // MICROPY_EMIT_XTENSA || MICROPY_EMIT_INLINE_XTENSA || MICROPY_EMIT_XTENSAWIN
diff --git a/py/asmxtensa.h b/py/asmxtensa.h
index d2f37bf828..d0fd2dcc18 100644
--- a/py/asmxtensa.h
+++ b/py/asmxtensa.h
@@ -64,9 +64,11 @@
#define ASM_XTENSA_REG_A14 (14)
#define ASM_XTENSA_REG_A15 (15)
-// for bccz
+// for bccz and bcci
#define ASM_XTENSA_CCZ_EQ (0)
#define ASM_XTENSA_CCZ_NE (1)
+#define ASM_XTENSA_CCZ_LT (2)
+#define ASM_XTENSA_CCZ_GE (3)
// for bcc and setcc
#define ASM_XTENSA_CC_NONE (0)
@@ -295,6 +297,10 @@ void asm_xtensa_l32i_optimised(asm_xtensa_t *as, uint reg_dest, uint reg_base, u
void asm_xtensa_s32i_optimised(asm_xtensa_t *as, uint reg_src, uint reg_base, uint word_offset);
void asm_xtensa_call_ind(asm_xtensa_t *as, uint idx);
void asm_xtensa_call_ind_win(asm_xtensa_t *as, uint idx);
+void asm_xtensa_bit_branch(asm_xtensa_t *as, mp_uint_t reg, mp_uint_t bit, mp_uint_t label, mp_uint_t condition);
+void asm_xtensa_immediate_branch(asm_xtensa_t *as, mp_uint_t reg, mp_uint_t immediate, mp_uint_t label, mp_uint_t cond);
+void asm_xtensa_call0(asm_xtensa_t *as, mp_uint_t label);
+void asm_xtensa_l32r(asm_xtensa_t *as, mp_uint_t reg, mp_uint_t label);
// Holds a pointer to mp_fun_table
#define ASM_XTENSA_REG_FUN_TABLE ASM_XTENSA_REG_A15
@@ -411,12 +417,32 @@ void asm_xtensa_call_ind_win(asm_xtensa_t *as, uint idx);
#define ASM_LOAD8_REG_REG(as, reg_dest, reg_base) asm_xtensa_op_l8ui((as), (reg_dest), (reg_base), 0)
#define ASM_LOAD16_REG_REG(as, reg_dest, reg_base) asm_xtensa_op_l16ui((as), (reg_dest), (reg_base), 0)
#define ASM_LOAD16_REG_REG_OFFSET(as, reg_dest, reg_base, uint16_offset) asm_xtensa_op_l16ui((as), (reg_dest), (reg_base), (uint16_offset))
+#define ASM_LOAD16_REG_REG_REG(as, reg_dest, reg_base, reg_index) \
+ do { \
+ asm_xtensa_op_addx2((as), (reg_base), (reg_index), (reg_base)); \
+ asm_xtensa_op_l16ui((as), (reg_dest), (reg_base), 0); \
+ } while (0)
#define ASM_LOAD32_REG_REG(as, reg_dest, reg_base) asm_xtensa_op_l32i_n((as), (reg_dest), (reg_base), 0)
+#define ASM_LOAD32_REG_REG_REG(as, reg_dest, reg_base, reg_index) \
+ do { \
+ asm_xtensa_op_addx4((as), (reg_base), (reg_index), (reg_base)); \
+ asm_xtensa_op_l32i_n((as), (reg_dest), (reg_base), 0); \
+ } while (0)
#define ASM_STORE_REG_REG_OFFSET(as, reg_dest, reg_base, word_offset) asm_xtensa_s32i_optimised((as), (reg_dest), (reg_base), (word_offset))
#define ASM_STORE8_REG_REG(as, reg_src, reg_base) asm_xtensa_op_s8i((as), (reg_src), (reg_base), 0)
#define ASM_STORE16_REG_REG(as, reg_src, reg_base) asm_xtensa_op_s16i((as), (reg_src), (reg_base), 0)
+#define ASM_STORE16_REG_REG_REG(as, reg_val, reg_base, reg_index) \
+ do { \
+ asm_xtensa_op_addx2((as), (reg_base), (reg_index), (reg_base)); \
+ asm_xtensa_op_s16i((as), (reg_val), (reg_base), 0); \
+ } while (0)
#define ASM_STORE32_REG_REG(as, reg_src, reg_base) asm_xtensa_op_s32i_n((as), (reg_src), (reg_base), 0)
+#define ASM_STORE32_REG_REG_REG(as, reg_val, reg_base, reg_index) \
+ do { \
+ asm_xtensa_op_addx4((as), (reg_base), (reg_index), (reg_base)); \
+ asm_xtensa_op_s32i_n((as), (reg_val), (reg_base), 0); \
+ } while (0)
#endif // GENERIC_ASM_API
diff --git a/py/bc.c b/py/bc.c
index 899dbd6a07..cea31c93bd 100644
--- a/py/bc.c
+++ b/py/bc.c
@@ -88,7 +88,7 @@ const byte *mp_decode_uint_skip(const byte *ptr) {
return ptr;
}
-static NORETURN void fun_pos_args_mismatch(mp_obj_fun_bc_t *f, size_t expected, size_t given) {
+static MP_NORETURN void fun_pos_args_mismatch(mp_obj_fun_bc_t *f, size_t expected, size_t given) {
#if MICROPY_ERROR_REPORTING <= MICROPY_ERROR_REPORTING_TERSE
// generic message, used also for other argument issues
(void)f;
diff --git a/py/dynruntime.h b/py/dynruntime.h
index c93111bbd4..0e438da4b7 100644
--- a/py/dynruntime.h
+++ b/py/dynruntime.h
@@ -70,7 +70,7 @@
#define m_realloc(ptr, new_num_bytes) (m_realloc_dyn((ptr), (new_num_bytes)))
#define m_realloc_maybe(ptr, new_num_bytes, allow_move) (m_realloc_maybe_dyn((ptr), (new_num_bytes), (allow_move)))
-static NORETURN inline void m_malloc_fail_dyn(size_t num_bytes) {
+static MP_NORETURN inline void m_malloc_fail_dyn(size_t num_bytes) {
mp_fun_table.raise_msg(
mp_fun_table.load_global(MP_QSTR_MemoryError),
"memory allocation failed");
@@ -295,7 +295,7 @@ static inline mp_obj_t mp_obj_new_exception_arg1_dyn(const mp_obj_type_t *exc_ty
return mp_call_function_n_kw(MP_OBJ_FROM_PTR(exc_type), 1, 0, &args[0]);
}
-static NORETURN inline void mp_raise_dyn(mp_obj_t o) {
+static MP_NORETURN inline void mp_raise_dyn(mp_obj_t o) {
mp_fun_table.raise(o);
for (;;) {
}
diff --git a/py/dynruntime.mk b/py/dynruntime.mk
index 1ef521bd9a..84c78d6225 100644
--- a/py/dynruntime.mk
+++ b/py/dynruntime.mk
@@ -172,6 +172,9 @@ endif
endif
MPY_LD_FLAGS += $(addprefix -l, $(LIBGCC_PATH) $(LIBM_PATH))
endif
+ifneq ($(MPY_EXTERN_SYM_FILE),)
+MPY_LD_FLAGS += --externs "$(realpath $(MPY_EXTERN_SYM_FILE))"
+endif
CFLAGS += $(CFLAGS_EXTRA)
diff --git a/py/emitinlinextensa.c b/py/emitinlinextensa.c
index fed259cfc6..d0eb3d566f 100644
--- a/py/emitinlinextensa.c
+++ b/py/emitinlinextensa.c
@@ -173,7 +173,7 @@ static int get_arg_label(emit_inline_asm_t *emit, const char *op, mp_parse_node_
#define RRI8_B (2)
typedef struct _opcode_table_3arg_t {
- uint16_t name; // actually a qstr, which should fit in 16 bits
+ qstr_short_t name;
uint8_t type;
uint8_t a0 : 4;
uint8_t a1 : 4;
@@ -187,6 +187,13 @@ static const opcode_table_3arg_t opcode_table_3arg[] = {
{MP_QSTR_add, RRR, 0, 8},
{MP_QSTR_sub, RRR, 0, 12},
{MP_QSTR_mull, RRR, 2, 8},
+ {MP_QSTR_addx2, RRR, 0, 9},
+ {MP_QSTR_addx4, RRR, 0, 10},
+ {MP_QSTR_addx8, RRR, 0, 11},
+ {MP_QSTR_subx2, RRR, 0, 13},
+ {MP_QSTR_subx4, RRR, 0, 14},
+ {MP_QSTR_subx8, RRR, 0, 15},
+ {MP_QSTR_src, RRR, 1, 8},
// load/store/addi opcodes: reg, reg, imm
// upper nibble of type encodes the range of the immediate arg
@@ -208,21 +215,62 @@ static const opcode_table_3arg_t opcode_table_3arg[] = {
{MP_QSTR_bge, RRI8_B, ASM_XTENSA_CC_GE, 0},
{MP_QSTR_bgeu, RRI8_B, ASM_XTENSA_CC_GEU, 0},
{MP_QSTR_blt, RRI8_B, ASM_XTENSA_CC_LT, 0},
+ {MP_QSTR_bltu, RRI8_B, ASM_XTENSA_CC_LTU, 0},
{MP_QSTR_bnall, RRI8_B, ASM_XTENSA_CC_NALL, 0},
{MP_QSTR_bne, RRI8_B, ASM_XTENSA_CC_NE, 0},
{MP_QSTR_bnone, RRI8_B, ASM_XTENSA_CC_NONE, 0},
};
+// The index of the first four qstrs matches the CCZ condition value to be
+// embedded into the opcode.
+static const qstr_short_t BCCZ_OPCODES[] = {
+ MP_QSTR_beqz, MP_QSTR_bnez, MP_QSTR_bltz, MP_QSTR_bgez,
+ MP_QSTR_beqz_n, MP_QSTR_bnez_n
+};
+
+#if MICROPY_EMIT_INLINE_XTENSA_UNCOMMON_OPCODES
+typedef struct _single_opcode_t {
+ qstr_short_t name;
+ uint16_t value;
+} single_opcode_t;
+
+static const single_opcode_t NOARGS_OPCODES[] = {
+ {MP_QSTR_dsync, 0x2030},
+ {MP_QSTR_esync, 0x2020},
+ {MP_QSTR_extw, 0x20D0},
+ {MP_QSTR_ill, 0x0000},
+ {MP_QSTR_isync, 0x2000},
+ {MP_QSTR_memw, 0x20C0},
+ {MP_QSTR_rsync, 0x2010},
+};
+#endif
+
static void emit_inline_xtensa_op(emit_inline_asm_t *emit, qstr op, mp_uint_t n_args, mp_parse_node_t *pn_args) {
size_t op_len;
const char *op_str = (const char *)qstr_data(op, &op_len);
if (n_args == 0) {
- if (op == MP_QSTR_ret_n) {
+ if (op == MP_QSTR_ret_n || op == MP_QSTR_ret) {
asm_xtensa_op_ret_n(&emit->as);
- } else {
- goto unknown_op;
+ return;
+ } else if (op == MP_QSTR_nop) {
+ asm_xtensa_op24(&emit->as, 0x20F0);
+ return;
+ } else if (op == MP_QSTR_nop_n) {
+ asm_xtensa_op16(&emit->as, 0xF03D);
+ return;
}
+ #if MICROPY_EMIT_INLINE_XTENSA_UNCOMMON_OPCODES
+ for (size_t index = 0; index < MP_ARRAY_SIZE(NOARGS_OPCODES); index++) {
+ const single_opcode_t *opcode = &NOARGS_OPCODES[index];
+ if (op == opcode->name) {
+ asm_xtensa_op24(&emit->as, opcode->value);
+ return;
+ }
+ }
+ #endif
+
+ goto unknown_op;
} else if (n_args == 1) {
if (op == MP_QSTR_callx0) {
@@ -234,19 +282,45 @@ static void emit_inline_xtensa_op(emit_inline_asm_t *emit, qstr op, mp_uint_t n_
} else if (op == MP_QSTR_jx) {
uint r0 = get_arg_reg(emit, op_str, pn_args[0]);
asm_xtensa_op_jx(&emit->as, r0);
+ } else if (op == MP_QSTR_ssl) {
+ mp_uint_t r0 = get_arg_reg(emit, op_str, pn_args[0]);
+ asm_xtensa_op_ssl(&emit->as, r0);
+ } else if (op == MP_QSTR_ssr) {
+ mp_uint_t r0 = get_arg_reg(emit, op_str, pn_args[0]);
+ asm_xtensa_op_ssr(&emit->as, r0);
+ } else if (op == MP_QSTR_ssai) {
+ mp_uint_t sa = get_arg_i(emit, op_str, pn_args[0], 0, 31);
+ asm_xtensa_op24(&emit->as, ASM_XTENSA_ENCODE_RRR(0, 0, 4, 4, sa & 0x0F, (sa >> 4) & 0x01));
+ } else if (op == MP_QSTR_ssa8b) {
+ mp_uint_t r0 = get_arg_reg(emit, op_str, pn_args[0]);
+ asm_xtensa_op24(&emit->as, ASM_XTENSA_ENCODE_RRR(0, 0, 4, 3, r0, 0));
+ } else if (op == MP_QSTR_ssa8l) {
+ mp_uint_t r0 = get_arg_reg(emit, op_str, pn_args[0]);
+ asm_xtensa_op24(&emit->as, ASM_XTENSA_ENCODE_RRR(0, 0, 4, 2, r0, 0));
+ } else if (op == MP_QSTR_call0) {
+ mp_uint_t label = get_arg_label(emit, op_str, pn_args[0]);
+ asm_xtensa_call0(&emit->as, label);
+ #if MICROPY_EMIT_INLINE_XTENSA_UNCOMMON_OPCODES
+ } else if (op == MP_QSTR_fsync) {
+ mp_uint_t imm3 = get_arg_i(emit, op_str, pn_args[0], 0, 7);
+ asm_xtensa_op24(&emit->as, ASM_XTENSA_ENCODE_RRR(0, 0, 0, 2, 8 | imm3, 0));
+ } else if (op == MP_QSTR_ill_n) {
+ asm_xtensa_op16(&emit->as, 0xF06D);
+ #endif
} else {
goto unknown_op;
}
} else if (n_args == 2) {
uint r0 = get_arg_reg(emit, op_str, pn_args[0]);
- if (op == MP_QSTR_beqz) {
- int label = get_arg_label(emit, op_str, pn_args[1]);
- asm_xtensa_bccz_reg_label(&emit->as, ASM_XTENSA_CCZ_EQ, r0, label);
- } else if (op == MP_QSTR_bnez) {
- int label = get_arg_label(emit, op_str, pn_args[1]);
- asm_xtensa_bccz_reg_label(&emit->as, ASM_XTENSA_CCZ_NE, r0, label);
- } else if (op == MP_QSTR_mov || op == MP_QSTR_mov_n) {
+ for (size_t index = 0; index < MP_ARRAY_SIZE(BCCZ_OPCODES); index++) {
+ if (op == BCCZ_OPCODES[index]) {
+ mp_uint_t label = get_arg_label(emit, op_str, pn_args[1]);
+ asm_xtensa_bccz_reg_label(&emit->as, index & 0x03, r0, label);
+ return;
+ }
+ }
+ if (op == MP_QSTR_mov || op == MP_QSTR_mov_n) {
// we emit mov.n for both "mov" and "mov_n" opcodes
uint r1 = get_arg_reg(emit, op_str, pn_args[1]);
asm_xtensa_op_mov_n(&emit->as, r0, r1);
@@ -254,7 +328,53 @@ static void emit_inline_xtensa_op(emit_inline_asm_t *emit, qstr op, mp_uint_t n_
// for convenience we emit l32r if the integer doesn't fit in movi
uint32_t imm = get_arg_i(emit, op_str, pn_args[1], 0, 0);
asm_xtensa_mov_reg_i32(&emit->as, r0, imm);
- } else {
+ } else if (op == MP_QSTR_abs_) {
+ mp_uint_t r1 = get_arg_reg(emit, op_str, pn_args[1]);
+ asm_xtensa_op24(&emit->as, ASM_XTENSA_ENCODE_RRR(0, 0, 6, r0, 1, r1));
+ } else if (op == MP_QSTR_neg) {
+ mp_uint_t r1 = get_arg_reg(emit, op_str, pn_args[1]);
+ asm_xtensa_op24(&emit->as, ASM_XTENSA_ENCODE_RRR(0, 0, 6, r0, 0, r1));
+ } else if (op == MP_QSTR_sll) {
+ mp_uint_t r1 = get_arg_reg(emit, op_str, pn_args[1]);
+ asm_xtensa_op24(&emit->as, ASM_XTENSA_ENCODE_RRR(0, 1, 10, r0, r1, 0));
+ } else if (op == MP_QSTR_sra) {
+ mp_uint_t r1 = get_arg_reg(emit, op_str, pn_args[1]);
+ asm_xtensa_op24(&emit->as, ASM_XTENSA_ENCODE_RRR(0, 1, 11, r0, 0, r1));
+ } else if (op == MP_QSTR_srl) {
+ mp_uint_t r1 = get_arg_reg(emit, op_str, pn_args[1]);
+ asm_xtensa_op24(&emit->as, ASM_XTENSA_ENCODE_RRR(0, 1, 9, r0, 0, r1));
+ } else if (op == MP_QSTR_nsa) {
+ mp_uint_t r1 = get_arg_reg(emit, op_str, pn_args[1]);
+ asm_xtensa_op24(&emit->as, ASM_XTENSA_ENCODE_RRR(0, 0, 4, 14, r1, r0));
+ } else if (op == MP_QSTR_nsau) {
+ mp_uint_t r1 = get_arg_reg(emit, op_str, pn_args[1]);
+ asm_xtensa_op24(&emit->as, ASM_XTENSA_ENCODE_RRR(0, 0, 4, 15, r1, r0));
+ } else if (op == MP_QSTR_l32r) {
+ mp_uint_t label = get_arg_label(emit, op_str, pn_args[1]);
+ asm_xtensa_l32r(&emit->as, r0, label);
+ } else if (op == MP_QSTR_movi_n) {
+ mp_int_t imm = get_arg_i(emit, op_str, pn_args[1], -32, 95);
+ asm_xtensa_op_movi_n(&emit->as, r0, imm);
+ } else
+ #if MICROPY_EMIT_INLINE_XTENSA_UNCOMMON_OPCODES
+ if (op == MP_QSTR_rsr) {
+ mp_uint_t sr = get_arg_i(emit, op_str, pn_args[1], 0, 255);
+ asm_xtensa_op24(&emit->as, ASM_XTENSA_ENCODE_RSR(0, 3, 0, sr, r0));
+ } else if (op == MP_QSTR_rur) {
+ mp_uint_t imm8 = get_arg_i(emit, op_str, pn_args[1], 0, 255);
+ asm_xtensa_op24(&emit->as, ASM_XTENSA_ENCODE_RRR(0, 3, 14, r0, (imm8 >> 4) & 0x0F, imm8 & 0x0F));
+ } else if (op == MP_QSTR_wsr) {
+ mp_uint_t sr = get_arg_i(emit, op_str, pn_args[1], 0, 255);
+ asm_xtensa_op24(&emit->as, ASM_XTENSA_ENCODE_RSR(0, 3, 1, sr, r0));
+ } else if (op == MP_QSTR_wur) {
+ mp_uint_t sr = get_arg_i(emit, op_str, pn_args[1], 0, 255);
+ asm_xtensa_op24(&emit->as, ASM_XTENSA_ENCODE_RSR(0, 3, 15, sr, r0));
+ } else if (op == MP_QSTR_xsr) {
+ mp_uint_t sr = get_arg_i(emit, op_str, pn_args[1], 0, 255);
+ asm_xtensa_op24(&emit->as, ASM_XTENSA_ENCODE_RSR(0, 1, 6, sr, r0));
+ } else
+ #endif
+ {
goto unknown_op;
}
@@ -288,7 +408,72 @@ static void emit_inline_xtensa_op(emit_inline_asm_t *emit, qstr op, mp_uint_t n_
return;
}
}
- goto unknown_op;
+
+ if (op == MP_QSTR_add_n) {
+ mp_uint_t r0 = get_arg_reg(emit, op_str, pn_args[0]);
+ mp_uint_t r1 = get_arg_reg(emit, op_str, pn_args[1]);
+ mp_uint_t r2 = get_arg_reg(emit, op_str, pn_args[2]);
+ asm_xtensa_op16(&emit->as, ASM_XTENSA_ENCODE_RRRN(10, r0, r1, r2));
+ } else if (op == MP_QSTR_addi_n) {
+ mp_uint_t r0 = get_arg_reg(emit, op_str, pn_args[0]);
+ mp_uint_t r1 = get_arg_reg(emit, op_str, pn_args[1]);
+ mp_int_t imm4 = get_arg_i(emit, op_str, pn_args[2], -1, 15);
+ asm_xtensa_op16(&emit->as, ASM_XTENSA_ENCODE_RRRN(11, r0, r1, (imm4 != 0 ? imm4 : -1)));
+ } else if (op == MP_QSTR_addmi) {
+ mp_uint_t r0 = get_arg_reg(emit, op_str, pn_args[0]);
+ mp_uint_t r1 = get_arg_reg(emit, op_str, pn_args[1]);
+ mp_int_t imm8 = get_arg_i(emit, op_str, pn_args[2], -128 * 256, 127 * 256);
+ if ((imm8 & 0xFF) != 0) {
+ emit_inline_xtensa_error_exc(emit, mp_obj_new_exception_msg_varg(&mp_type_SyntaxError, MP_ERROR_TEXT("%d is not a multiple of %d"), imm8, 256));
+ } else {
+ asm_xtensa_op24(&emit->as, ASM_XTENSA_ENCODE_RRI8(2, 13, r1, r0, imm8 >> 8));
+ }
+ } else if (op == MP_QSTR_bbci) {
+ mp_uint_t r0 = get_arg_reg(emit, op_str, pn_args[0]);
+ mp_uint_t bit = get_arg_i(emit, op_str, pn_args[1], 0, 31);
+ mp_int_t label = get_arg_label(emit, op_str, pn_args[2]);
+ asm_xtensa_bit_branch(&emit->as, r0, bit, label, 6);
+ } else if (op == MP_QSTR_bbsi) {
+ mp_uint_t r0 = get_arg_reg(emit, op_str, pn_args[0]);
+ mp_uint_t bit = get_arg_i(emit, op_str, pn_args[1], 0, 31);
+ mp_uint_t label = get_arg_label(emit, op_str, pn_args[2]);
+ asm_xtensa_bit_branch(&emit->as, r0, bit, label, 14);
+ } else if (op == MP_QSTR_slli) {
+ mp_uint_t r0 = get_arg_reg(emit, op_str, pn_args[0]);
+ mp_uint_t r1 = get_arg_reg(emit, op_str, pn_args[1]);
+ mp_uint_t bits = 32 - get_arg_i(emit, op_str, pn_args[2], 1, 31);
+ asm_xtensa_op24(&emit->as, ASM_XTENSA_ENCODE_RRR(0, 1, 0 | ((bits >> 4) & 0x01), r0, r1, bits & 0x0F));
+ } else if (op == MP_QSTR_srai) {
+ mp_uint_t r0 = get_arg_reg(emit, op_str, pn_args[0]);
+ mp_uint_t r1 = get_arg_reg(emit, op_str, pn_args[1]);
+ mp_uint_t bits = get_arg_i(emit, op_str, pn_args[2], 0, 31);
+ asm_xtensa_op24(&emit->as, ASM_XTENSA_ENCODE_RRR(0, 1, 2 | ((bits >> 4) & 0x01), r0, bits & 0x0F, r1));
+ } else if (op == MP_QSTR_srli) {
+ mp_uint_t r0 = get_arg_reg(emit, op_str, pn_args[0]);
+ mp_uint_t r1 = get_arg_reg(emit, op_str, pn_args[1]);
+ mp_uint_t bits = get_arg_i(emit, op_str, pn_args[2], 0, 15);
+ asm_xtensa_op24(&emit->as, ASM_XTENSA_ENCODE_RRR(0, 1, 4, r0, bits, r1));
+ } else if (op == MP_QSTR_l32i_n) {
+ mp_uint_t r0 = get_arg_reg(emit, op_str, pn_args[0]);
+ mp_uint_t r1 = get_arg_reg(emit, op_str, pn_args[1]);
+ mp_uint_t imm = get_arg_i(emit, op_str, pn_args[2], 0, 60);
+ if ((imm & 0x03) != 0) {
+ emit_inline_xtensa_error_exc(emit, mp_obj_new_exception_msg_varg(&mp_type_SyntaxError, MP_ERROR_TEXT("%d is not a multiple of %d"), imm, 4));
+ } else {
+ asm_xtensa_op_l32i_n(&emit->as, r0, r1, imm >> 2);
+ }
+ } else if (op == MP_QSTR_s32i_n) {
+ mp_uint_t r0 = get_arg_reg(emit, op_str, pn_args[0]);
+ mp_uint_t r1 = get_arg_reg(emit, op_str, pn_args[1]);
+ mp_uint_t imm = get_arg_i(emit, op_str, pn_args[2], 0, 60);
+ if ((imm & 0x03) != 0) {
+ emit_inline_xtensa_error_exc(emit, mp_obj_new_exception_msg_varg(&mp_type_SyntaxError, MP_ERROR_TEXT("%d is not a multiple of %d"), imm, 4));
+ } else {
+ asm_xtensa_op_s32i_n(&emit->as, r0, r1, imm >> 2);
+ }
+ } else {
+ goto unknown_op;
+ }
} else {
goto unknown_op;
diff --git a/py/emitnative.c b/py/emitnative.c
index 1aab0a9eb7..4b470f3c93 100644
--- a/py/emitnative.c
+++ b/py/emitnative.c
@@ -1537,25 +1537,24 @@ static void emit_native_load_subscr(emit_t *emit) {
switch (vtype_base) {
case VTYPE_PTR8: {
// pointer to 8-bit memory
- // TODO optimise to use thumb ldrb r1, [r2, r3]
+ #if N_THUMB
+ if (index_value >= 0 && index_value < 32) {
+ asm_thumb_ldrb_rlo_rlo_i5(emit->as, REG_RET, reg_base, index_value);
+ break;
+ }
+ #elif N_RV32
+ if (FIT_SIGNED(index_value, 12)) {
+ asm_rv32_opcode_lbu(emit->as, REG_RET, reg_base, index_value);
+ break;
+ }
+ #elif N_XTENSA || N_XTENSAWIN
+ if (index_value >= 0 && index_value < 256) {
+ asm_xtensa_op_l8ui(emit->as, REG_RET, reg_base, index_value);
+ break;
+ }
+ #endif
if (index_value != 0) {
// index is non-zero
- #if N_THUMB
- if (index_value > 0 && index_value < 32) {
- asm_thumb_ldrb_rlo_rlo_i5(emit->as, REG_RET, reg_base, index_value);
- break;
- }
- #elif N_RV32
- if (FIT_SIGNED(index_value, 12)) {
- asm_rv32_opcode_lbu(emit->as, REG_RET, reg_base, index_value);
- break;
- }
- #elif N_XTENSA || N_XTENSAWIN
- if (index_value > 0 && index_value < 256) {
- asm_xtensa_op_l8ui(emit->as, REG_RET, reg_base, index_value);
- break;
- }
- #endif
need_reg_single(emit, reg_index, 0);
ASM_MOV_REG_IMM(emit->as, reg_index, index_value);
ASM_ADD_REG_REG(emit->as, reg_index, reg_base); // add index to base
@@ -1566,24 +1565,24 @@ static void emit_native_load_subscr(emit_t *emit) {
}
case VTYPE_PTR16: {
// pointer to 16-bit memory
+ #if N_THUMB
+ if (index_value >= 0 && index_value < 32) {
+ asm_thumb_ldrh_rlo_rlo_i5(emit->as, REG_RET, reg_base, index_value);
+ break;
+ }
+ #elif N_RV32
+ if (FIT_SIGNED(index_value, 11)) {
+ asm_rv32_opcode_lhu(emit->as, REG_RET, reg_base, index_value << 1);
+ break;
+ }
+ #elif N_XTENSA || N_XTENSAWIN
+ if (index_value >= 0 && index_value < 256) {
+ asm_xtensa_op_l16ui(emit->as, REG_RET, reg_base, index_value);
+ break;
+ }
+ #endif
if (index_value != 0) {
// index is a non-zero immediate
- #if N_THUMB
- if (index_value > 0 && index_value < 32) {
- asm_thumb_ldrh_rlo_rlo_i5(emit->as, REG_RET, reg_base, index_value);
- break;
- }
- #elif N_RV32
- if (FIT_SIGNED(index_value, 11)) {
- asm_rv32_opcode_lhu(emit->as, REG_RET, reg_base, index_value << 1);
- break;
- }
- #elif N_XTENSA || N_XTENSAWIN
- if (index_value > 0 && index_value < 256) {
- asm_xtensa_op_l16ui(emit->as, REG_RET, reg_base, index_value);
- break;
- }
- #endif
need_reg_single(emit, reg_index, 0);
ASM_MOV_REG_IMM(emit->as, reg_index, index_value << 1);
ASM_ADD_REG_REG(emit->as, reg_index, reg_base); // add 2*index to base
@@ -1594,24 +1593,24 @@ static void emit_native_load_subscr(emit_t *emit) {
}
case VTYPE_PTR32: {
// pointer to 32-bit memory
+ #if N_THUMB
+ if (index_value >= 0 && index_value < 32) {
+ asm_thumb_ldr_rlo_rlo_i5(emit->as, REG_RET, reg_base, index_value);
+ break;
+ }
+ #elif N_RV32
+ if (FIT_SIGNED(index_value, 10)) {
+ asm_rv32_opcode_lw(emit->as, REG_RET, reg_base, index_value << 2);
+ break;
+ }
+ #elif N_XTENSA || N_XTENSAWIN
+ if (index_value >= 0 && index_value < 256) {
+ asm_xtensa_l32i_optimised(emit->as, REG_RET, reg_base, index_value);
+ break;
+ }
+ #endif
if (index_value != 0) {
// index is a non-zero immediate
- #if N_THUMB
- if (index_value > 0 && index_value < 32) {
- asm_thumb_ldr_rlo_rlo_i5(emit->as, REG_RET, reg_base, index_value);
- break;
- }
- #elif N_RV32
- if (FIT_SIGNED(index_value, 10)) {
- asm_rv32_opcode_lw(emit->as, REG_RET, reg_base, index_value << 2);
- break;
- }
- #elif N_XTENSA || N_XTENSAWIN
- if (index_value > 0 && index_value < 256) {
- asm_xtensa_l32i_optimised(emit->as, REG_RET, reg_base, index_value);
- break;
- }
- #endif
need_reg_single(emit, reg_index, 0);
ASM_MOV_REG_IMM(emit->as, reg_index, index_value << 2);
ASM_ADD_REG_REG(emit->as, reg_index, reg_base); // add 4*index to base
@@ -1638,40 +1637,36 @@ static void emit_native_load_subscr(emit_t *emit) {
switch (vtype_base) {
case VTYPE_PTR8: {
// pointer to 8-bit memory
- // TODO optimise to use thumb ldrb r1, [r2, r3]
+ #ifdef ASM_LOAD8_REG_REG_REG
+ ASM_LOAD8_REG_REG_REG(emit->as, REG_RET, REG_ARG_1, reg_index);
+ #else
ASM_ADD_REG_REG(emit->as, REG_ARG_1, reg_index); // add index to base
ASM_LOAD8_REG_REG(emit->as, REG_RET, REG_ARG_1); // store value to (base+index)
+ #endif
break;
}
case VTYPE_PTR16: {
// pointer to 16-bit memory
- #if N_XTENSA || N_XTENSAWIN
- asm_xtensa_op_addx2(emit->as, REG_ARG_1, reg_index, REG_ARG_1);
- asm_xtensa_op_l16ui(emit->as, REG_RET, REG_ARG_1, 0);
- break;
- #endif
+ #ifdef ASM_LOAD16_REG_REG_REG
+ ASM_LOAD16_REG_REG_REG(emit->as, REG_RET, REG_ARG_1, reg_index);
+ #else
ASM_ADD_REG_REG(emit->as, REG_ARG_1, reg_index); // add index to base
ASM_ADD_REG_REG(emit->as, REG_ARG_1, reg_index); // add index to base
ASM_LOAD16_REG_REG(emit->as, REG_RET, REG_ARG_1); // load from (base+2*index)
+ #endif
break;
}
case VTYPE_PTR32: {
// pointer to word-size memory
- #if N_RV32
- asm_rv32_opcode_slli(emit->as, REG_TEMP2, reg_index, 2);
- asm_rv32_opcode_cadd(emit->as, REG_ARG_1, REG_TEMP2);
- asm_rv32_opcode_lw(emit->as, REG_RET, REG_ARG_1, 0);
- break;
- #elif N_XTENSA || N_XTENSAWIN
- asm_xtensa_op_addx4(emit->as, REG_ARG_1, reg_index, REG_ARG_1);
- asm_xtensa_op_l32i_n(emit->as, REG_RET, REG_ARG_1, 0);
- break;
- #endif
+ #ifdef ASM_LOAD32_REG_REG_REG
+ ASM_LOAD32_REG_REG_REG(emit->as, REG_RET, REG_ARG_1, reg_index);
+ #else
ASM_ADD_REG_REG(emit->as, REG_ARG_1, reg_index); // add index to base
ASM_ADD_REG_REG(emit->as, REG_ARG_1, reg_index); // add index to base
ASM_ADD_REG_REG(emit->as, REG_ARG_1, reg_index); // add index to base
ASM_ADD_REG_REG(emit->as, REG_ARG_1, reg_index); // add index to base
ASM_LOAD32_REG_REG(emit->as, REG_RET, REG_ARG_1); // load from (base+4*index)
+ #endif
break;
}
default:
@@ -1815,28 +1810,28 @@ static void emit_native_store_subscr(emit_t *emit) {
case VTYPE_PTR8: {
// pointer to 8-bit memory
// TODO optimise to use thumb strb r1, [r2, r3]
+ #if N_THUMB
+ if (index_value >= 0 && index_value < 32) {
+ asm_thumb_strb_rlo_rlo_i5(emit->as, reg_value, reg_base, index_value);
+ break;
+ }
+ #elif N_RV32
+ if (FIT_SIGNED(index_value, 12)) {
+ asm_rv32_opcode_sb(emit->as, reg_value, reg_base, index_value);
+ break;
+ }
+ #elif N_XTENSA || N_XTENSAWIN
+ if (index_value >= 0 && index_value < 256) {
+ asm_xtensa_op_s8i(emit->as, reg_value, reg_base, index_value);
+ break;
+ }
+ #endif
if (index_value != 0) {
// index is non-zero
- #if N_THUMB
- if (index_value > 0 && index_value < 32) {
- asm_thumb_strb_rlo_rlo_i5(emit->as, reg_value, reg_base, index_value);
- break;
- }
- #elif N_RV32
- if (FIT_SIGNED(index_value, 12)) {
- asm_rv32_opcode_sb(emit->as, reg_value, reg_base, index_value);
- break;
- }
- #elif N_XTENSA || N_XTENSAWIN
- if (index_value > 0 && index_value < 256) {
- asm_xtensa_op_s8i(emit->as, REG_RET, reg_base, index_value);
- break;
- }
- #endif
ASM_MOV_REG_IMM(emit->as, reg_index, index_value);
#if N_ARM
asm_arm_strb_reg_reg_reg(emit->as, reg_value, reg_base, reg_index);
- return;
+ break;
#endif
ASM_ADD_REG_REG(emit->as, reg_index, reg_base); // add index to base
reg_base = reg_index;
@@ -1846,24 +1841,24 @@ static void emit_native_store_subscr(emit_t *emit) {
}
case VTYPE_PTR16: {
// pointer to 16-bit memory
+ #if N_THUMB
+ if (index_value >= 0 && index_value < 32) {
+ asm_thumb_strh_rlo_rlo_i5(emit->as, reg_value, reg_base, index_value);
+ break;
+ }
+ #elif N_RV32
+ if (FIT_SIGNED(index_value, 11)) {
+ asm_rv32_opcode_sh(emit->as, reg_value, reg_base, index_value << 1);
+ break;
+ }
+ #elif N_XTENSA || N_XTENSAWIN
+ if (index_value >= 0 && index_value < 256) {
+ asm_xtensa_op_s16i(emit->as, reg_value, reg_base, index_value);
+ break;
+ }
+ #endif
if (index_value != 0) {
// index is a non-zero immediate
- #if N_THUMB
- if (index_value > 0 && index_value < 32) {
- asm_thumb_strh_rlo_rlo_i5(emit->as, reg_value, reg_base, index_value);
- break;
- }
- #elif N_RV32
- if (FIT_SIGNED(index_value, 11)) {
- asm_rv32_opcode_sh(emit->as, reg_value, reg_base, index_value << 1);
- break;
- }
- #elif N_XTENSA || N_XTENSAWIN
- if (index_value > 0 && index_value < 256) {
- asm_xtensa_op_s16i(emit->as, REG_RET, reg_base, index_value);
- break;
- }
- #endif
ASM_MOV_REG_IMM(emit->as, reg_index, index_value << 1);
ASM_ADD_REG_REG(emit->as, reg_index, reg_base); // add 2*index to base
reg_base = reg_index;
@@ -1873,27 +1868,28 @@ static void emit_native_store_subscr(emit_t *emit) {
}
case VTYPE_PTR32: {
// pointer to 32-bit memory
+ #if N_THUMB
+ if (index_value >= 0 && index_value < 32) {
+ asm_thumb_str_rlo_rlo_i5(emit->as, reg_value, reg_base, index_value);
+ break;
+ }
+ #elif N_RV32
+ if (FIT_SIGNED(index_value, 10)) {
+ asm_rv32_opcode_sw(emit->as, reg_value, reg_base, index_value << 2);
+ break;
+ }
+ #elif N_XTENSA || N_XTENSAWIN
+ if (index_value >= 0 && index_value < 256) {
+ asm_xtensa_s32i_optimised(emit->as, reg_value, reg_base, index_value);
+ break;
+ }
+ #endif
if (index_value != 0) {
// index is a non-zero immediate
- #if N_THUMB
- if (index_value > 0 && index_value < 32) {
- asm_thumb_str_rlo_rlo_i5(emit->as, reg_value, reg_base, index_value);
- break;
- }
- #elif N_RV32
- if (FIT_SIGNED(index_value, 10)) {
- asm_rv32_opcode_sw(emit->as, reg_value, reg_base, index_value << 2);
- break;
- }
- #elif N_XTENSA || N_XTENSAWIN
- if (index_value > 0 && index_value < 256) {
- asm_xtensa_s32i_optimised(emit->as, REG_RET, reg_base, index_value);
- break;
- }
- #elif N_ARM
+ #if N_ARM
ASM_MOV_REG_IMM(emit->as, reg_index, index_value);
asm_arm_str_reg_reg_reg(emit->as, reg_value, reg_base, reg_index);
- return;
+ break;
#endif
ASM_MOV_REG_IMM(emit->as, reg_index, index_value << 2);
ASM_ADD_REG_REG(emit->as, reg_index, reg_base); // add 4*index to base
@@ -1930,50 +1926,36 @@ static void emit_native_store_subscr(emit_t *emit) {
switch (vtype_base) {
case VTYPE_PTR8: {
// pointer to 8-bit memory
- // TODO optimise to use thumb strb r1, [r2, r3]
- #if N_ARM
- asm_arm_strb_reg_reg_reg(emit->as, reg_value, REG_ARG_1, reg_index);
- break;
- #endif
+ #ifdef ASM_STORE8_REG_REG_REG
+ ASM_STORE8_REG_REG_REG(emit->as, reg_value, REG_ARG_1, reg_index);
+ #else
ASM_ADD_REG_REG(emit->as, REG_ARG_1, reg_index); // add index to base
ASM_STORE8_REG_REG(emit->as, reg_value, REG_ARG_1); // store value to (base+index)
+ #endif
break;
}
case VTYPE_PTR16: {
// pointer to 16-bit memory
- #if N_ARM
- asm_arm_strh_reg_reg_reg(emit->as, reg_value, REG_ARG_1, reg_index);
- break;
- #elif N_XTENSA || N_XTENSAWIN
- asm_xtensa_op_addx2(emit->as, REG_ARG_1, reg_index, REG_ARG_1);
- asm_xtensa_op_s16i(emit->as, reg_value, REG_ARG_1, 0);
- break;
- #endif
+ #ifdef ASM_STORE16_REG_REG_REG
+ ASM_STORE16_REG_REG_REG(emit->as, reg_value, REG_ARG_1, reg_index);
+ #else
ASM_ADD_REG_REG(emit->as, REG_ARG_1, reg_index); // add index to base
ASM_ADD_REG_REG(emit->as, REG_ARG_1, reg_index); // add index to base
ASM_STORE16_REG_REG(emit->as, reg_value, REG_ARG_1); // store value to (base+2*index)
+ #endif
break;
}
case VTYPE_PTR32: {
// pointer to 32-bit memory
- #if N_ARM
- asm_arm_str_reg_reg_reg(emit->as, reg_value, REG_ARG_1, reg_index);
- break;
- #elif N_RV32
- asm_rv32_opcode_slli(emit->as, REG_TEMP2, reg_index, 2);
- asm_rv32_opcode_cadd(emit->as, REG_ARG_1, REG_TEMP2);
- asm_rv32_opcode_sw(emit->as, reg_value, REG_ARG_1, 0);
- break;
- #elif N_XTENSA || N_XTENSAWIN
- asm_xtensa_op_addx4(emit->as, REG_ARG_1, reg_index, REG_ARG_1);
- asm_xtensa_op_s32i_n(emit->as, reg_value, REG_ARG_1, 0);
- break;
- #endif
+ #ifdef ASM_STORE32_REG_REG_REG
+ ASM_STORE32_REG_REG_REG(emit->as, reg_value, REG_ARG_1, reg_index);
+ #else
ASM_ADD_REG_REG(emit->as, REG_ARG_1, reg_index); // add index to base
ASM_ADD_REG_REG(emit->as, REG_ARG_1, reg_index); // add index to base
ASM_ADD_REG_REG(emit->as, REG_ARG_1, reg_index); // add index to base
ASM_ADD_REG_REG(emit->as, REG_ARG_1, reg_index); // add index to base
ASM_STORE32_REG_REG(emit->as, reg_value, REG_ARG_1); // store value to (base+4*index)
+ #endif
break;
}
default:
diff --git a/py/emitndebug.c b/py/emitndebug.c
index bd896a75c8..c068a9a9a1 100644
--- a/py/emitndebug.c
+++ b/py/emitndebug.c
@@ -251,8 +251,6 @@ static void asm_debug_setcc_reg_reg_reg(asm_debug_t *as, int op, int reg1, int r
#define ASM_MUL_REG_REG(as, reg_dest, reg_src) \
asm_debug_reg_reg(as, "mul", reg_dest, reg_src)
-#define ASM_LOAD_REG_REG(as, reg_dest, reg_base) \
- asm_debug_reg_reg(as, "load", reg_dest, reg_base)
#define ASM_LOAD_REG_REG_OFFSET(as, reg_dest, reg_base, word_offset) \
asm_debug_reg_reg_offset(as, "load", reg_dest, reg_base, word_offset)
#define ASM_LOAD8_REG_REG(as, reg_dest, reg_base) \
@@ -264,8 +262,6 @@ static void asm_debug_setcc_reg_reg_reg(asm_debug_t *as, int op, int reg1, int r
#define ASM_LOAD32_REG_REG(as, reg_dest, reg_base) \
asm_debug_reg_reg(as, "load32", reg_dest, reg_base)
-#define ASM_STORE_REG_REG(as, reg_src, reg_base) \
- asm_debug_reg_reg(as, "store", reg_src, reg_base)
#define ASM_STORE_REG_REG_OFFSET(as, reg_src, reg_base, word_offset) \
asm_debug_reg_reg_offset(as, "store", reg_src, reg_base, word_offset)
#define ASM_STORE8_REG_REG(as, reg_src, reg_base) \
diff --git a/py/misc.h b/py/misc.h
index e05fbe61a9..49f2f87111 100644
--- a/py/misc.h
+++ b/py/misc.h
@@ -105,7 +105,7 @@ void *m_realloc(void *ptr, size_t new_num_bytes);
void *m_realloc_maybe(void *ptr, size_t new_num_bytes, bool allow_move);
void m_free(void *ptr);
#endif
-NORETURN void m_malloc_fail(size_t num_bytes);
+MP_NORETURN void m_malloc_fail(size_t num_bytes);
#if MICROPY_TRACKED_ALLOC
// These alloc/free functions track the pointers in a linked list so the GC does not reclaim
diff --git a/py/modmath.c b/py/modmath.c
index 4d51a28d0b..b792d8581d 100644
--- a/py/modmath.c
+++ b/py/modmath.c
@@ -37,7 +37,7 @@
#define MP_PI_4 MICROPY_FLOAT_CONST(0.78539816339744830962)
#define MP_3_PI_4 MICROPY_FLOAT_CONST(2.35619449019234492885)
-static NORETURN void math_error(void) {
+static MP_NORETURN void math_error(void) {
mp_raise_ValueError(MP_ERROR_TEXT("math domain error"));
}
diff --git a/py/mpconfig.h b/py/mpconfig.h
index 9001b8983b..01712bd5b4 100644
--- a/py/mpconfig.h
+++ b/py/mpconfig.h
@@ -406,6 +406,11 @@
#define MICROPY_EMIT_INLINE_XTENSA (0)
#endif
+// Whether to support uncommon Xtensa inline assembler opcodes
+#ifndef MICROPY_EMIT_INLINE_XTENSA_UNCOMMON_OPCODES
+#define MICROPY_EMIT_INLINE_XTENSA_UNCOMMON_OPCODES (0)
+#endif
+
// Whether to emit Xtensa-Windowed native code
#ifndef MICROPY_EMIT_XTENSAWIN
#define MICROPY_EMIT_XTENSAWIN (0)
@@ -2115,8 +2120,12 @@ typedef double mp_float_t;
#endif // INT_FMT
// Modifier for function which doesn't return
-#ifndef NORETURN
-#define NORETURN __attribute__((noreturn))
+#ifndef MP_NORETURN
+#define MP_NORETURN __attribute__((noreturn))
+#endif
+
+#if !MICROPY_PREVIEW_VERSION_2
+#define NORETURN MP_NORETURN
#endif
// Modifier for weak functions
diff --git a/py/mpprint.c b/py/mpprint.c
index 291e4145ff..00a5f944c6 100644
--- a/py/mpprint.c
+++ b/py/mpprint.c
@@ -58,7 +58,7 @@ int mp_print_str(const mp_print_t *print, const char *str) {
return len;
}
-int mp_print_strn(const mp_print_t *print, const char *str, size_t len, int flags, char fill, int width) {
+int mp_print_strn(const mp_print_t *print, const char *str, size_t len, unsigned int flags, char fill, int width) {
int left_pad = 0;
int right_pad = 0;
int pad = width - len;
@@ -201,7 +201,7 @@ static int mp_print_int(const mp_print_t *print, mp_uint_t x, int sgn, int base,
return len;
}
-int mp_print_mp_int(const mp_print_t *print, mp_obj_t x, int base, int base_char, int flags, char fill, int width, int prec) {
+int mp_print_mp_int(const mp_print_t *print, mp_obj_t x, unsigned int base, int base_char, int flags, char fill, int width, int prec) {
// These are the only values for "base" that are required to be supported by this
// function, since Python only allows the user to format integers in these bases.
// If needed this function could be generalised to handle other values.
@@ -248,10 +248,7 @@ int mp_print_mp_int(const mp_print_t *print, mp_obj_t x, int base, int base_char
int prefix_len = prefix - prefix_buf;
prefix = prefix_buf;
- char comma = '\0';
- if (flags & PF_FLAG_SHOW_COMMA) {
- comma = ',';
- }
+ char comma = flags >> PF_FLAG_SEP_POS;
// The size of this buffer is rather arbitrary. If it's not large
// enough, a dynamic one will be allocated.
@@ -340,7 +337,7 @@ int mp_print_mp_int(const mp_print_t *print, mp_obj_t x, int base, int base_char
}
#if MICROPY_PY_BUILTINS_FLOAT
-int mp_print_float(const mp_print_t *print, mp_float_t f, char fmt, int flags, char fill, int width, int prec) {
+int mp_print_float(const mp_print_t *print, mp_float_t f, char fmt, unsigned int flags, char fill, int width, int prec) {
char buf[32];
char sign = '\0';
int chrs = 0;
diff --git a/py/mpprint.h b/py/mpprint.h
index 8383ea8579..511af329ba 100644
--- a/py/mpprint.h
+++ b/py/mpprint.h
@@ -33,11 +33,11 @@
#define PF_FLAG_SPACE_SIGN (0x004)
#define PF_FLAG_NO_TRAILZ (0x008)
#define PF_FLAG_SHOW_PREFIX (0x010)
-#define PF_FLAG_SHOW_COMMA (0x020)
-#define PF_FLAG_PAD_AFTER_SIGN (0x040)
-#define PF_FLAG_CENTER_ADJUST (0x080)
-#define PF_FLAG_ADD_PERCENT (0x100)
-#define PF_FLAG_SHOW_OCTAL_LETTER (0x200)
+#define PF_FLAG_PAD_AFTER_SIGN (0x020)
+#define PF_FLAG_CENTER_ADJUST (0x040)
+#define PF_FLAG_ADD_PERCENT (0x080)
+#define PF_FLAG_SHOW_OCTAL_LETTER (0x100)
+#define PF_FLAG_SEP_POS (9) // must be above all the above PF_FLAGs
#if MICROPY_PY_IO && MICROPY_PY_SYS_STDFILES
#define MP_PYTHON_PRINTER &mp_sys_stdout_print
@@ -69,9 +69,9 @@ extern const mp_print_t mp_sys_stdout_print;
#endif
int mp_print_str(const mp_print_t *print, const char *str);
-int mp_print_strn(const mp_print_t *print, const char *str, size_t len, int flags, char fill, int width);
+int mp_print_strn(const mp_print_t *print, const char *str, size_t len, unsigned int flags, char fill, int width);
#if MICROPY_PY_BUILTINS_FLOAT
-int mp_print_float(const mp_print_t *print, mp_float_t f, char fmt, int flags, char fill, int width, int prec);
+int mp_print_float(const mp_print_t *print, mp_float_t f, char fmt, unsigned int flags, char fill, int width, int prec);
#endif
int mp_printf(const mp_print_t *print, const char *fmt, ...);
diff --git a/py/mpz.c b/py/mpz.c
index 084aebda9e..471bd15981 100644
--- a/py/mpz.c
+++ b/py/mpz.c
@@ -1672,6 +1672,8 @@ size_t mpz_as_str_inpl(const mpz_t *i, unsigned int base, const char *prefix, ch
size_t ilen = i->len;
+ int n_comma = (base == 10) ? 3 : 4;
+
char *s = str;
if (ilen == 0) {
if (prefix) {
@@ -1717,7 +1719,7 @@ size_t mpz_as_str_inpl(const mpz_t *i, unsigned int base, const char *prefix, ch
break;
}
}
- if (!done && comma && (s - last_comma) == 3) {
+ if (!done && comma && (s - last_comma) == n_comma) {
*s++ = comma;
last_comma = s;
}
diff --git a/py/nativeglue.h b/py/nativeglue.h
index e96fd7b66a..2c7923c56d 100644
--- a/py/nativeglue.h
+++ b/py/nativeglue.h
@@ -143,7 +143,7 @@ typedef struct _mp_fun_table_t {
int (*printf_)(const mp_print_t *print, const char *fmt, ...);
int (*vprintf_)(const mp_print_t *print, const char *fmt, va_list args);
#if defined(__GNUC__)
- NORETURN // Only certain compilers support no-return attributes in function pointer declarations
+ MP_NORETURN // Only certain compilers support no-return attributes in function pointer declarations
#endif
void (*raise_msg)(const mp_obj_type_t *exc_type, mp_rom_error_text_t msg);
const mp_obj_type_t *(*obj_get_type)(mp_const_obj_t o_in);
diff --git a/py/nlr.c b/py/nlr.c
index 7ab0c0955a..de2a38ceff 100644
--- a/py/nlr.c
+++ b/py/nlr.c
@@ -81,7 +81,7 @@ void nlr_call_jump_callbacks(nlr_buf_t *nlr) {
}
#if MICROPY_ENABLE_VM_ABORT
-NORETURN void nlr_jump_abort(void) {
+MP_NORETURN void nlr_jump_abort(void) {
MP_STATE_THREAD(nlr_top) = MP_STATE_VM(nlr_abort);
nlr_jump(NULL);
}
diff --git a/py/nlr.h b/py/nlr.h
index ce30bc91d6..47447c5d17 100644
--- a/py/nlr.h
+++ b/py/nlr.h
@@ -177,18 +177,18 @@ unsigned int nlr_push(nlr_buf_t *);
unsigned int nlr_push_tail(nlr_buf_t *top);
void nlr_pop(void);
-NORETURN void nlr_jump(void *val);
+MP_NORETURN void nlr_jump(void *val);
#if MICROPY_ENABLE_VM_ABORT
#define nlr_set_abort(buf) MP_STATE_VM(nlr_abort) = buf
#define nlr_get_abort() MP_STATE_VM(nlr_abort)
-NORETURN void nlr_jump_abort(void);
+MP_NORETURN void nlr_jump_abort(void);
#endif
// This must be implemented by a port. It's called by nlr_jump
// if no nlr buf has been pushed. It must not return, but rather
// should bail out with a fatal error.
-NORETURN void nlr_jump_fail(void *val);
+MP_NORETURN void nlr_jump_fail(void *val);
// use nlr_raise instead of nlr_jump so that debugging is easier
#ifndef MICROPY_DEBUG_NLR
diff --git a/py/nlraarch64.c b/py/nlraarch64.c
index d6d87ebc50..3318004b5e 100644
--- a/py/nlraarch64.c
+++ b/py/nlraarch64.c
@@ -56,7 +56,7 @@ __asm(
#endif
);
-NORETURN void nlr_jump(void *val) {
+MP_NORETURN void nlr_jump(void *val) {
MP_NLR_JUMP_HEAD(val, top)
MP_STATIC_ASSERT(offsetof(nlr_buf_t, regs) == 16); // asm assumes it
diff --git a/py/nlrmips.c b/py/nlrmips.c
index cba52b16a2..5c55db7e26 100644
--- a/py/nlrmips.c
+++ b/py/nlrmips.c
@@ -57,7 +57,7 @@ __asm(
".end nlr_push \n"
);
-NORETURN void nlr_jump(void *val) {
+MP_NORETURN void nlr_jump(void *val) {
MP_NLR_JUMP_HEAD(val, top)
__asm(
"move $4, %0 \n"
diff --git a/py/nlrpowerpc.c b/py/nlrpowerpc.c
index 8a69fe1eec..cf140400e6 100644
--- a/py/nlrpowerpc.c
+++ b/py/nlrpowerpc.c
@@ -78,7 +78,7 @@ unsigned int nlr_push(nlr_buf_t *nlr) {
return 0;
}
-NORETURN void nlr_jump(void *val) {
+MP_NORETURN void nlr_jump(void *val) {
MP_NLR_JUMP_HEAD(val, top)
__asm__ volatile (
@@ -167,7 +167,7 @@ unsigned int nlr_push(nlr_buf_t *nlr) {
return 0;
}
-NORETURN void nlr_jump(void *val) {
+MP_NORETURN void nlr_jump(void *val) {
MP_NLR_JUMP_HEAD(val, top)
__asm__ volatile (
diff --git a/py/nlrrv32.c b/py/nlrrv32.c
index 9a12ede400..565a8629db 100644
--- a/py/nlrrv32.c
+++ b/py/nlrrv32.c
@@ -50,7 +50,7 @@ __attribute__((naked)) unsigned int nlr_push(nlr_buf_t *nlr) {
);
}
-NORETURN void nlr_jump(void *val) {
+MP_NORETURN void nlr_jump(void *val) {
MP_NLR_JUMP_HEAD(val, top)
__asm volatile (
"add x10, x0, %0 \n" // Load nlr_buf address.
diff --git a/py/nlrrv64.c b/py/nlrrv64.c
index e7ba79797b..b7d1467b8f 100644
--- a/py/nlrrv64.c
+++ b/py/nlrrv64.c
@@ -50,7 +50,7 @@ __attribute__((naked)) unsigned int nlr_push(nlr_buf_t *nlr) {
);
}
-NORETURN void nlr_jump(void *val) {
+MP_NORETURN void nlr_jump(void *val) {
MP_NLR_JUMP_HEAD(val, top)
__asm volatile (
"add x10, x0, %0 \n" // Load nlr_buf address.
diff --git a/py/nlrthumb.c b/py/nlrthumb.c
index e7b24f242b..8546308a3d 100644
--- a/py/nlrthumb.c
+++ b/py/nlrthumb.c
@@ -100,7 +100,7 @@ __attribute__((naked)) unsigned int nlr_push(nlr_buf_t *nlr) {
#endif
}
-NORETURN void nlr_jump(void *val) {
+MP_NORETURN void nlr_jump(void *val) {
MP_NLR_JUMP_HEAD(val, top)
__asm volatile (
diff --git a/py/nlrx64.c b/py/nlrx64.c
index d1ad91ff7d..51224729fc 100644
--- a/py/nlrx64.c
+++ b/py/nlrx64.c
@@ -100,7 +100,7 @@ unsigned int nlr_push(nlr_buf_t *nlr) {
#endif
}
-NORETURN void nlr_jump(void *val) {
+MP_NORETURN void nlr_jump(void *val) {
MP_NLR_JUMP_HEAD(val, top)
__asm volatile (
diff --git a/py/nlrx86.c b/py/nlrx86.c
index 085e30d203..26bf0dc6cc 100644
--- a/py/nlrx86.c
+++ b/py/nlrx86.c
@@ -78,7 +78,7 @@ unsigned int nlr_push(nlr_buf_t *nlr) {
#endif
}
-NORETURN void nlr_jump(void *val) {
+MP_NORETURN void nlr_jump(void *val) {
MP_NLR_JUMP_HEAD(val, top)
__asm volatile (
diff --git a/py/nlrxtensa.c b/py/nlrxtensa.c
index ff7af6edee..2d1bf35e38 100644
--- a/py/nlrxtensa.c
+++ b/py/nlrxtensa.c
@@ -55,7 +55,7 @@ unsigned int nlr_push(nlr_buf_t *nlr) {
return 0; // needed to silence compiler warning
}
-NORETURN void nlr_jump(void *val) {
+MP_NORETURN void nlr_jump(void *val) {
MP_NLR_JUMP_HEAD(val, top)
__asm volatile (
diff --git a/py/objint.c b/py/objint.c
index 4be6009a44..87d8a27852 100644
--- a/py/objint.c
+++ b/py/objint.c
@@ -209,7 +209,7 @@ static const uint8_t log_base2_floor[] = {
size_t mp_int_format_size(size_t num_bits, int base, const char *prefix, char comma) {
assert(2 <= base && base <= 16);
size_t num_digits = num_bits / log_base2_floor[base - 1] + 1;
- size_t num_commas = comma ? num_digits / 3 : 0;
+ size_t num_commas = comma ? (base == 10 ? num_digits / 3 : num_digits / 4): 0;
size_t prefix_len = prefix ? strlen(prefix) : 0;
return num_digits + num_commas + prefix_len + 2; // +1 for sign, +1 for null byte
}
@@ -251,6 +251,7 @@ char *mp_obj_int_formatted(char **buf, size_t *buf_size, size_t *fmt_size, mp_co
sign = '-';
}
+ int n_comma = (base == 10) ? 3 : 4;
size_t needed_size = mp_int_format_size(sizeof(fmt_int_t) * 8, base, prefix, comma);
if (needed_size > *buf_size) {
*buf = m_new(char, needed_size);
@@ -275,7 +276,7 @@ char *mp_obj_int_formatted(char **buf, size_t *buf_size, size_t *fmt_size, mp_co
c += '0';
}
*(--b) = c;
- if (comma && num != 0 && b > str && (last_comma - b) == 3) {
+ if (comma && num != 0 && b > str && (last_comma - b) == n_comma) {
*(--b) = comma;
last_comma = b;
}
diff --git a/py/objstr.c b/py/objstr.c
index a160ab415c..c81fc682fd 100644
--- a/py/objstr.c
+++ b/py/objstr.c
@@ -40,7 +40,7 @@ static mp_obj_t str_modulo_format(mp_obj_t pattern, size_t n_args, const mp_obj_
#endif
static mp_obj_t mp_obj_new_bytes_iterator(mp_obj_t str, mp_obj_iter_buf_t *iter_buf);
-static NORETURN void bad_implicit_conversion(mp_obj_t self_in);
+static MP_NORETURN void bad_implicit_conversion(mp_obj_t self_in);
static mp_obj_t mp_obj_new_str_type_from_vstr(const mp_obj_type_t *type, vstr_t *vstr);
@@ -1001,7 +1001,7 @@ static mp_obj_t arg_as_int(mp_obj_t arg) {
#endif
#if MICROPY_ERROR_REPORTING <= MICROPY_ERROR_REPORTING_TERSE
-static NORETURN void terse_str_format_value_error(void) {
+static MP_NORETURN void terse_str_format_value_error(void) {
mp_raise_ValueError(MP_ERROR_TEXT("bad format string"));
}
#else
@@ -1184,7 +1184,7 @@ static vstr_t mp_obj_str_format_helper(const char *str, const char *top, int *ar
int width = -1;
int precision = -1;
char type = '\0';
- int flags = 0;
+ unsigned int flags = 0;
if (format_spec) {
// The format specifier (from http://docs.python.org/2/library/string.html#formatspec)
@@ -1229,8 +1229,9 @@ static vstr_t mp_obj_str_format_helper(const char *str, const char *top, int *ar
}
}
s = str_to_int(s, stop, &width);
- if (*s == ',') {
- flags |= PF_FLAG_SHOW_COMMA;
+ if (*s == ',' || *s == '_') {
+ MP_STATIC_ASSERT((unsigned)'_' << PF_FLAG_SEP_POS >> PF_FLAG_SEP_POS == '_');
+ flags |= (unsigned)*s << PF_FLAG_SEP_POS;
s++;
}
if (*s == '.') {
@@ -2357,7 +2358,7 @@ bool mp_obj_str_equal(mp_obj_t s1, mp_obj_t s2) {
}
}
-static NORETURN void bad_implicit_conversion(mp_obj_t self_in) {
+static MP_NORETURN void bad_implicit_conversion(mp_obj_t self_in) {
#if MICROPY_ERROR_REPORTING <= MICROPY_ERROR_REPORTING_TERSE
mp_raise_TypeError(MP_ERROR_TEXT("can't convert to str implicitly"));
#else
diff --git a/py/parsenum.c b/py/parsenum.c
index 3281eb4b85..a38ce563f9 100644
--- a/py/parsenum.c
+++ b/py/parsenum.c
@@ -36,7 +36,7 @@
#include <math.h>
#endif
-static NORETURN void raise_exc(mp_obj_t exc, mp_lexer_t *lex) {
+static MP_NORETURN void raise_exc(mp_obj_t exc, mp_lexer_t *lex) {
// if lex!=NULL then the parser called us and we need to convert the
// exception's type from ValueError to SyntaxError and add traceback info
if (lex != NULL) {
diff --git a/py/persistentcode.c b/py/persistentcode.c
index 2a42b904bc..43207a0cc8 100644
--- a/py/persistentcode.c
+++ b/py/persistentcode.c
@@ -72,6 +72,8 @@ typedef struct _bytecode_prelude_t {
static int read_byte(mp_reader_t *reader);
static size_t read_uint(mp_reader_t *reader);
+#if MICROPY_EMIT_MACHINE_CODE
+
#if MICROPY_PERSISTENT_CODE_TRACK_FUN_DATA || MICROPY_PERSISTENT_CODE_TRACK_BSS_RODATA
// An mp_obj_list_t that tracks native text/BSS/rodata to prevent the GC from reclaiming them.
@@ -86,8 +88,6 @@ static void track_root_pointer(void *ptr) {
#endif
-#if MICROPY_EMIT_MACHINE_CODE
-
typedef struct _reloc_info_t {
mp_reader_t *reader;
mp_module_context_t *context;
@@ -415,15 +415,17 @@ static mp_raw_code_t *load_raw_code(mp_reader_t *reader, mp_module_context_t *co
// Relocate and commit code to executable address space
reloc_info_t ri = {reader, context, rodata, bss};
+ #if MICROPY_PERSISTENT_CODE_TRACK_FUN_DATA
+ if (native_scope_flags & MP_SCOPE_FLAG_VIPERRELOC) {
+ // Track the function data memory so it's not reclaimed by the GC.
+ track_root_pointer(fun_data);
+ }
+ #endif
#if defined(MP_PLAT_COMMIT_EXEC)
void *opt_ri = (native_scope_flags & MP_SCOPE_FLAG_VIPERRELOC) ? &ri : NULL;
fun_data = MP_PLAT_COMMIT_EXEC(fun_data, fun_data_len, opt_ri);
#else
if (native_scope_flags & MP_SCOPE_FLAG_VIPERRELOC) {
- #if MICROPY_PERSISTENT_CODE_TRACK_FUN_DATA
- // Track the function data memory so it's not reclaimed by the GC.
- track_root_pointer(fun_data);
- #endif
// Do the relocations.
mp_native_relocate(&ri, fun_data, (uintptr_t)fun_data);
}
diff --git a/py/runtime.c b/py/runtime.c
index 58819819ad..7979e520da 100644
--- a/py/runtime.c
+++ b/py/runtime.c
@@ -123,7 +123,7 @@ void mp_init(void) {
MP_STATE_VM(mp_module_builtins_override_dict) = NULL;
#endif
- #if MICROPY_PERSISTENT_CODE_TRACK_FUN_DATA || MICROPY_PERSISTENT_CODE_TRACK_BSS_RODATA
+ #if MICROPY_EMIT_MACHINE_CODE && (MICROPY_PERSISTENT_CODE_TRACK_FUN_DATA || MICROPY_PERSISTENT_CODE_TRACK_BSS_RODATA)
MP_STATE_VM(persistent_code_root_pointers) = MP_OBJ_NULL;
#endif
@@ -1649,7 +1649,7 @@ mp_obj_t mp_parse_compile_execute(mp_lexer_t *lex, mp_parse_input_kind_t parse_i
#endif // MICROPY_ENABLE_COMPILER
-NORETURN void m_malloc_fail(size_t num_bytes) {
+MP_NORETURN void m_malloc_fail(size_t num_bytes) {
DEBUG_printf("memory allocation failed, allocating %u bytes\n", (uint)num_bytes);
#if MICROPY_ENABLE_GC
if (gc_is_locked()) {
@@ -1662,25 +1662,25 @@ NORETURN void m_malloc_fail(size_t num_bytes) {
#if MICROPY_ERROR_REPORTING == MICROPY_ERROR_REPORTING_NONE
-NORETURN void mp_raise_type(const mp_obj_type_t *exc_type) {
+MP_NORETURN void mp_raise_type(const mp_obj_type_t *exc_type) {
nlr_raise(mp_obj_new_exception(exc_type));
}
-NORETURN void mp_raise_ValueError_no_msg(void) {
+MP_NORETURN void mp_raise_ValueError_no_msg(void) {
mp_raise_type(&mp_type_ValueError);
}
-NORETURN void mp_raise_TypeError_no_msg(void) {
+MP_NORETURN void mp_raise_TypeError_no_msg(void) {
mp_raise_type(&mp_type_TypeError);
}
-NORETURN void mp_raise_NotImplementedError_no_msg(void) {
+MP_NORETURN void mp_raise_NotImplementedError_no_msg(void) {
mp_raise_type(&mp_type_NotImplementedError);
}
#else
-NORETURN void mp_raise_msg(const mp_obj_type_t *exc_type, mp_rom_error_text_t msg) {
+MP_NORETURN void mp_raise_msg(const mp_obj_type_t *exc_type, mp_rom_error_text_t msg) {
if (msg == NULL) {
nlr_raise(mp_obj_new_exception(exc_type));
} else {
@@ -1688,7 +1688,7 @@ NORETURN void mp_raise_msg(const mp_obj_type_t *exc_type, mp_rom_error_text_t ms
}
}
-NORETURN void mp_raise_msg_varg(const mp_obj_type_t *exc_type, mp_rom_error_text_t fmt, ...) {
+MP_NORETURN void mp_raise_msg_varg(const mp_obj_type_t *exc_type, mp_rom_error_text_t fmt, ...) {
va_list args;
va_start(args, fmt);
mp_obj_t exc = mp_obj_new_exception_msg_vlist(exc_type, fmt, args);
@@ -1696,25 +1696,25 @@ NORETURN void mp_raise_msg_varg(const mp_obj_type_t *exc_type, mp_rom_error_text
nlr_raise(exc);
}
-NORETURN void mp_raise_ValueError(mp_rom_error_text_t msg) {
+MP_NORETURN void mp_raise_ValueError(mp_rom_error_text_t msg) {
mp_raise_msg(&mp_type_ValueError, msg);
}
-NORETURN void mp_raise_TypeError(mp_rom_error_text_t msg) {
+MP_NORETURN void mp_raise_TypeError(mp_rom_error_text_t msg) {
mp_raise_msg(&mp_type_TypeError, msg);
}
-NORETURN void mp_raise_NotImplementedError(mp_rom_error_text_t msg) {
+MP_NORETURN void mp_raise_NotImplementedError(mp_rom_error_text_t msg) {
mp_raise_msg(&mp_type_NotImplementedError, msg);
}
#endif
-NORETURN void mp_raise_type_arg(const mp_obj_type_t *exc_type, mp_obj_t arg) {
+MP_NORETURN void mp_raise_type_arg(const mp_obj_type_t *exc_type, mp_obj_t arg) {
nlr_raise(mp_obj_new_exception_arg1(exc_type, arg));
}
-NORETURN void mp_raise_StopIteration(mp_obj_t arg) {
+MP_NORETURN void mp_raise_StopIteration(mp_obj_t arg) {
if (arg == MP_OBJ_NULL) {
mp_raise_type(&mp_type_StopIteration);
} else {
@@ -1722,7 +1722,7 @@ NORETURN void mp_raise_StopIteration(mp_obj_t arg) {
}
}
-NORETURN void mp_raise_TypeError_int_conversion(mp_const_obj_t arg) {
+MP_NORETURN void mp_raise_TypeError_int_conversion(mp_const_obj_t arg) {
#if MICROPY_ERROR_REPORTING <= MICROPY_ERROR_REPORTING_TERSE
(void)arg;
mp_raise_TypeError(MP_ERROR_TEXT("can't convert to int"));
@@ -1732,11 +1732,11 @@ NORETURN void mp_raise_TypeError_int_conversion(mp_const_obj_t arg) {
#endif
}
-NORETURN void mp_raise_OSError(int errno_) {
+MP_NORETURN void mp_raise_OSError(int errno_) {
mp_raise_type_arg(&mp_type_OSError, MP_OBJ_NEW_SMALL_INT(errno_));
}
-NORETURN void mp_raise_OSError_with_filename(int errno_, const char *filename) {
+MP_NORETURN void mp_raise_OSError_with_filename(int errno_, const char *filename) {
vstr_t vstr;
vstr_init(&vstr, 32);
vstr_printf(&vstr, "can't open %s", filename);
@@ -1746,7 +1746,7 @@ NORETURN void mp_raise_OSError_with_filename(int errno_, const char *filename) {
}
#if MICROPY_STACK_CHECK || MICROPY_ENABLE_PYSTACK
-NORETURN void mp_raise_recursion_depth(void) {
+MP_NORETURN void mp_raise_recursion_depth(void) {
mp_raise_type_arg(&mp_type_RuntimeError, MP_OBJ_NEW_QSTR(MP_QSTR_maximum_space_recursion_space_depth_space_exceeded));
}
#endif
diff --git a/py/runtime.h b/py/runtime.h
index ffbc3972a3..a93488e2cd 100644
--- a/py/runtime.h
+++ b/py/runtime.h
@@ -128,7 +128,7 @@ void mp_event_wait_indefinite(void);
void mp_event_wait_ms(mp_uint_t timeout_ms);
// extra printing method specifically for mp_obj_t's which are integral type
-int mp_print_mp_int(const mp_print_t *print, mp_obj_t x, int base, int base_char, int flags, char fill, int width, int prec);
+int mp_print_mp_int(const mp_print_t *print, mp_obj_t x, unsigned base, int base_char, int flags, char fill, int width, int prec);
void mp_arg_check_num_sig(size_t n_args, size_t n_kw, uint32_t sig);
static inline void mp_arg_check_num(size_t n_args, size_t n_kw, size_t n_args_min, size_t n_args_max, bool takes_kw) {
@@ -136,8 +136,8 @@ static inline void mp_arg_check_num(size_t n_args, size_t n_kw, size_t n_args_mi
}
void mp_arg_parse_all(size_t n_pos, const mp_obj_t *pos, mp_map_t *kws, size_t n_allowed, const mp_arg_t *allowed, mp_arg_val_t *out_vals);
void mp_arg_parse_all_kw_array(size_t n_pos, size_t n_kw, const mp_obj_t *args, size_t n_allowed, const mp_arg_t *allowed, mp_arg_val_t *out_vals);
-NORETURN void mp_arg_error_terse_mismatch(void);
-NORETURN void mp_arg_error_unimpl_kw(void);
+MP_NORETURN void mp_arg_error_terse_mismatch(void);
+MP_NORETURN void mp_arg_error_unimpl_kw(void);
static inline mp_obj_dict_t *mp_locals_get(void) {
return MP_STATE_THREAD(dict_locals);
@@ -246,10 +246,10 @@ mp_obj_t mp_import_from(mp_obj_t module, qstr name);
void mp_import_all(mp_obj_t module);
#if MICROPY_ERROR_REPORTING == MICROPY_ERROR_REPORTING_NONE
-NORETURN void mp_raise_type(const mp_obj_type_t *exc_type);
-NORETURN void mp_raise_ValueError_no_msg(void);
-NORETURN void mp_raise_TypeError_no_msg(void);
-NORETURN void mp_raise_NotImplementedError_no_msg(void);
+MP_NORETURN void mp_raise_type(const mp_obj_type_t *exc_type);
+MP_NORETURN void mp_raise_ValueError_no_msg(void);
+MP_NORETURN void mp_raise_TypeError_no_msg(void);
+MP_NORETURN void mp_raise_NotImplementedError_no_msg(void);
#define mp_raise_msg(exc_type, msg) mp_raise_type(exc_type)
#define mp_raise_msg_varg(exc_type, ...) mp_raise_type(exc_type)
#define mp_raise_ValueError(msg) mp_raise_ValueError_no_msg()
@@ -257,19 +257,19 @@ NORETURN void mp_raise_NotImplementedError_no_msg(void);
#define mp_raise_NotImplementedError(msg) mp_raise_NotImplementedError_no_msg()
#else
#define mp_raise_type(exc_type) mp_raise_msg(exc_type, NULL)
-NORETURN void mp_raise_msg(const mp_obj_type_t *exc_type, mp_rom_error_text_t msg);
-NORETURN void mp_raise_msg_varg(const mp_obj_type_t *exc_type, mp_rom_error_text_t fmt, ...);
-NORETURN void mp_raise_ValueError(mp_rom_error_text_t msg);
-NORETURN void mp_raise_TypeError(mp_rom_error_text_t msg);
-NORETURN void mp_raise_NotImplementedError(mp_rom_error_text_t msg);
+MP_NORETURN void mp_raise_msg(const mp_obj_type_t *exc_type, mp_rom_error_text_t msg);
+MP_NORETURN void mp_raise_msg_varg(const mp_obj_type_t *exc_type, mp_rom_error_text_t fmt, ...);
+MP_NORETURN void mp_raise_ValueError(mp_rom_error_text_t msg);
+MP_NORETURN void mp_raise_TypeError(mp_rom_error_text_t msg);
+MP_NORETURN void mp_raise_NotImplementedError(mp_rom_error_text_t msg);
#endif
-NORETURN void mp_raise_type_arg(const mp_obj_type_t *exc_type, mp_obj_t arg);
-NORETURN void mp_raise_StopIteration(mp_obj_t arg);
-NORETURN void mp_raise_TypeError_int_conversion(mp_const_obj_t arg);
-NORETURN void mp_raise_OSError(int errno_);
-NORETURN void mp_raise_OSError_with_filename(int errno_, const char *filename);
-NORETURN void mp_raise_recursion_depth(void);
+MP_NORETURN void mp_raise_type_arg(const mp_obj_type_t *exc_type, mp_obj_t arg);
+MP_NORETURN void mp_raise_StopIteration(mp_obj_t arg);
+MP_NORETURN void mp_raise_TypeError_int_conversion(mp_const_obj_t arg);
+MP_NORETURN void mp_raise_OSError(int errno_);
+MP_NORETURN void mp_raise_OSError_with_filename(int errno_, const char *filename);
+MP_NORETURN void mp_raise_recursion_depth(void);
#if MICROPY_BUILTIN_METHOD_CHECK_SELF_ARG
#undef mp_check_self
diff --git a/py/scheduler.c b/py/scheduler.c
index 2170b9577e..d4cdb59efb 100644
--- a/py/scheduler.c
+++ b/py/scheduler.c
@@ -88,17 +88,21 @@ static inline void mp_sched_run_pending(void) {
#if MICROPY_SCHEDULER_STATIC_NODES
// Run all pending C callbacks.
- while (MP_STATE_VM(sched_head) != NULL) {
- mp_sched_node_t *node = MP_STATE_VM(sched_head);
- MP_STATE_VM(sched_head) = node->next;
- if (MP_STATE_VM(sched_head) == NULL) {
- MP_STATE_VM(sched_tail) = NULL;
- }
- mp_sched_callback_t callback = node->callback;
- node->callback = NULL;
- MICROPY_END_ATOMIC_SECTION(atomic_state);
- callback(node);
- atomic_state = MICROPY_BEGIN_ATOMIC_SECTION();
+ mp_sched_node_t *original_tail = MP_STATE_VM(sched_tail);
+ if (original_tail != NULL) {
+ mp_sched_node_t *node;
+ do {
+ node = MP_STATE_VM(sched_head);
+ MP_STATE_VM(sched_head) = node->next;
+ if (MP_STATE_VM(sched_head) == NULL) {
+ MP_STATE_VM(sched_tail) = NULL;
+ }
+ mp_sched_callback_t callback = node->callback;
+ node->callback = NULL;
+ MICROPY_END_ATOMIC_SECTION(atomic_state);
+ callback(node);
+ atomic_state = MICROPY_BEGIN_ATOMIC_SECTION();
+ } while (node != original_tail); // Don't execute any callbacks scheduled during this run
}
#endif