diff options
Diffstat (limited to 'py')
-rw-r--r-- | py/asmarm.c | 112 | ||||
-rw-r--r-- | py/asmarm.h | 100 | ||||
-rw-r--r-- | py/asmbase.c | 102 | ||||
-rw-r--r-- | py/asmbase.h | 69 | ||||
-rw-r--r-- | py/asmthumb.c | 156 | ||||
-rw-r--r-- | py/asmthumb.h | 102 | ||||
-rw-r--r-- | py/asmx64.c | 158 | ||||
-rw-r--r-- | py/asmx64.h | 106 | ||||
-rw-r--r-- | py/asmx86.c | 148 | ||||
-rw-r--r-- | py/asmx86.h | 106 | ||||
-rw-r--r-- | py/asmxtensa.c | 174 | ||||
-rw-r--r-- | py/asmxtensa.h | 324 | ||||
-rw-r--r-- | py/binary.c | 7 | ||||
-rw-r--r-- | py/builtin.h | 2 | ||||
-rw-r--r-- | py/builtinimport.c | 28 | ||||
-rw-r--r-- | py/compile.c | 161 | ||||
-rw-r--r-- | py/compile.h | 2 | ||||
-rw-r--r-- | py/emit.h | 15 | ||||
-rw-r--r-- | py/emitglue.c | 480 | ||||
-rw-r--r-- | py/emitglue.h | 17 | ||||
-rw-r--r-- | py/emitinlinethumb.c | 101 | ||||
-rw-r--r-- | py/emitinlinextensa.c | 345 | ||||
-rw-r--r-- | py/emitnative.c | 465 | ||||
-rw-r--r-- | py/lexer.c | 73 | ||||
-rw-r--r-- | py/lexer.h | 16 | ||||
-rw-r--r-- | py/lexerstr.c | 65 | ||||
-rw-r--r-- | py/lexerunix.c | 96 | ||||
-rw-r--r-- | py/misc.h | 6 | ||||
-rw-r--r-- | py/mkrules.mk | 6 | ||||
-rw-r--r-- | py/modbuiltins.c | 3 | ||||
-rw-r--r-- | py/mpconfig.h | 43 | ||||
-rw-r--r-- | py/mpprint.c | 5 | ||||
-rw-r--r-- | py/mpstate.h | 5 | ||||
-rw-r--r-- | py/mpz.c | 17 | ||||
-rw-r--r-- | py/nativeglue.c | 2 | ||||
-rw-r--r-- | py/obj.c | 8 | ||||
-rw-r--r-- | py/obj.h | 3 | ||||
-rw-r--r-- | py/objboundmeth.c | 16 | ||||
-rw-r--r-- | py/objexcept.c | 14 | ||||
-rw-r--r-- | py/objfun.c | 4 | ||||
-rw-r--r-- | py/objint.c | 32 | ||||
-rw-r--r-- | py/objint.h | 1 | ||||
-rw-r--r-- | py/objint_longlong.c | 11 | ||||
-rw-r--r-- | py/objint_mpz.c | 11 | ||||
-rw-r--r-- | py/objmodule.c | 6 | ||||
-rw-r--r-- | py/objtype.c | 15 | ||||
-rw-r--r-- | py/parse.c | 80 | ||||
-rw-r--r-- | py/parse.h | 9 | ||||
-rw-r--r-- | py/parsenum.c | 18 | ||||
-rw-r--r-- | py/persistentcode.c | 400 | ||||
-rw-r--r-- | py/persistentcode.h | 40 | ||||
-rw-r--r-- | py/py.mk | 20 | ||||
-rw-r--r-- | py/reader.c | 146 | ||||
-rw-r--r-- | py/reader.h | 46 | ||||
-rw-r--r-- | py/runtime.c | 32 | ||||
-rw-r--r-- | py/runtime.h | 1 | ||||
-rw-r--r-- | py/stream.c | 1 | ||||
-rw-r--r-- | py/stream.h | 7 | ||||
-rw-r--r-- | py/unicode.c | 2 |
59 files changed, 2676 insertions, 1864 deletions
diff --git a/py/asmarm.c b/py/asmarm.c index 2c389ac8ce..da07680e31 100644 --- a/py/asmarm.c +++ b/py/asmarm.c @@ -38,52 +38,8 @@ #define SIGNED_FIT24(x) (((x) & 0xff800000) == 0) || (((x) & 0xff000000) == 0xff000000) -struct _asm_arm_t { - uint pass; - mp_uint_t code_offset; - mp_uint_t code_size; - byte *code_base; - byte dummy_data[4]; - - mp_uint_t max_num_labels; - mp_uint_t *label_offsets; - uint push_reglist; - uint stack_adjust; -}; - -asm_arm_t *asm_arm_new(uint max_num_labels) { - asm_arm_t *as; - - as = m_new0(asm_arm_t, 1); - as->max_num_labels = max_num_labels; - as->label_offsets = m_new(mp_uint_t, max_num_labels); - - return as; -} - -void asm_arm_free(asm_arm_t *as, bool free_code) { - if (free_code) { - MP_PLAT_FREE_EXEC(as->code_base, as->code_size); - } - m_del(mp_uint_t, as->label_offsets, as->max_num_labels); - m_del_obj(asm_arm_t, as); -} - -void asm_arm_start_pass(asm_arm_t *as, uint pass) { - if (pass == ASM_ARM_PASS_COMPUTE) { - memset(as->label_offsets, -1, as->max_num_labels * sizeof(mp_uint_t)); - } else if (pass == ASM_ARM_PASS_EMIT) { - MP_PLAT_ALLOC_EXEC(as->code_offset, (void**)&as->code_base, &as->code_size); - if (as->code_base == NULL) { - assert(0); - } - } - as->pass = pass; - as->code_offset = 0; -} - void asm_arm_end_pass(asm_arm_t *as) { - if (as->pass == ASM_ARM_PASS_EMIT) { + if (as->base.pass == MP_ASM_PASS_EMIT) { #ifdef __arm__ // flush I- and D-cache asm volatile( @@ -97,35 +53,12 @@ void asm_arm_end_pass(asm_arm_t *as) { } } -// all functions must go through this one to emit bytes -// if as->pass < ASM_ARM_PASS_EMIT, then this function only returns a buffer of 4 bytes length -STATIC byte *asm_arm_get_cur_to_write_bytes(asm_arm_t *as, int num_bytes_to_write) { - if (as->pass < ASM_ARM_PASS_EMIT) { - as->code_offset += num_bytes_to_write; - return as->dummy_data; - } else { - assert(as->code_offset + num_bytes_to_write <= as->code_size); - byte *c = as->code_base + as->code_offset; - as->code_offset += num_bytes_to_write; - return c; - } -} - -uint asm_arm_get_code_pos(asm_arm_t *as) { - return as->code_offset; -} - -uint asm_arm_get_code_size(asm_arm_t *as) { - return as->code_size; -} - -void *asm_arm_get_code(asm_arm_t *as) { - return as->code_base; -} - // Insert word into instruction flow STATIC void emit(asm_arm_t *as, uint op) { - *(uint*)asm_arm_get_cur_to_write_bytes(as, 4) = op; + uint8_t *c = mp_asm_base_get_cur_to_write_bytes(&as->base, 4); + if (c != NULL) { + *(uint32_t*)c = op; + } } // Insert word into instruction flow, add "ALWAYS" condition code @@ -263,35 +196,6 @@ void asm_arm_pop(asm_arm_t *as, uint reglist) { emit_al(as, asm_arm_op_pop(reglist)); } -void asm_arm_label_assign(asm_arm_t *as, uint label) { - assert(label < as->max_num_labels); - if (as->pass < ASM_ARM_PASS_EMIT) { - // assign label offset - assert(as->label_offsets[label] == -1); - as->label_offsets[label] = as->code_offset; - } else { - // ensure label offset has not changed from PASS_COMPUTE to PASS_EMIT - assert(as->label_offsets[label] == as->code_offset); - } -} - -void asm_arm_align(asm_arm_t* as, uint align) { - // TODO fill unused data with NOPs? - as->code_offset = (as->code_offset + align - 1) & (~(align - 1)); -} - -void asm_arm_data(asm_arm_t* as, uint bytesize, uint val) { - byte *c = asm_arm_get_cur_to_write_bytes(as, bytesize); - // only write to the buffer in the emit pass (otherwise we overflow dummy_data) - if (as->pass == ASM_ARM_PASS_EMIT) { - // little endian - for (uint i = 0; i < bytesize; i++) { - *c++ = val; - val >>= 8; - } - } -} - void asm_arm_mov_reg_reg(asm_arm_t *as, uint reg_dest, uint reg_src) { emit_al(as, asm_arm_op_mov_reg(reg_dest, reg_src)); } @@ -429,9 +333,9 @@ void asm_arm_strb_reg_reg_reg(asm_arm_t *as, uint rd, uint rm, uint rn) { } void asm_arm_bcc_label(asm_arm_t *as, int cond, uint label) { - assert(label < as->max_num_labels); - mp_uint_t dest = as->label_offsets[label]; - mp_int_t rel = dest - as->code_offset; + assert(label < as->base.max_num_labels); + mp_uint_t dest = as->base.label_offsets[label]; + mp_int_t rel = dest - as->base.code_offset; rel -= 8; // account for instruction prefetch, PC is 8 bytes ahead of this instruction rel >>= 2; // in ARM mode the branch target is 32-bit aligned, so the 2 LSB are omitted diff --git a/py/asmarm.h b/py/asmarm.h index 15e7a047b5..e273b98d73 100644 --- a/py/asmarm.h +++ b/py/asmarm.h @@ -28,9 +28,7 @@ #define __MICROPY_INCLUDED_PY_ASMARM_H__ #include "py/misc.h" - -#define ASM_ARM_PASS_COMPUTE (1) -#define ASM_ARM_PASS_EMIT (2) +#include "py/asmbase.h" #define ASM_ARM_REG_R0 (0) #define ASM_ARM_REG_R1 (1) @@ -68,22 +66,16 @@ #define ASM_ARM_CC_LE (0xd << 28) #define ASM_ARM_CC_AL (0xe << 28) -typedef struct _asm_arm_t asm_arm_t; +typedef struct _asm_arm_t { + mp_asm_base_t base; + uint push_reglist; + uint stack_adjust; +} asm_arm_t; -asm_arm_t *asm_arm_new(uint max_num_labels); -void asm_arm_free(asm_arm_t *as, bool free_code); -void asm_arm_start_pass(asm_arm_t *as, uint pass); void asm_arm_end_pass(asm_arm_t *as); -uint asm_arm_get_code_pos(asm_arm_t *as); -uint asm_arm_get_code_size(asm_arm_t *as); -void *asm_arm_get_code(asm_arm_t *as); void asm_arm_entry(asm_arm_t *as, int num_locals); void asm_arm_exit(asm_arm_t *as); -void asm_arm_label_assign(asm_arm_t *as, uint label); - -void asm_arm_align(asm_arm_t* as, uint align); -void asm_arm_data(asm_arm_t* as, uint bytesize, uint val); void asm_arm_bkpt(asm_arm_t *as); @@ -130,4 +122,84 @@ void asm_arm_bcc_label(asm_arm_t *as, int cond, uint label); void asm_arm_b_label(asm_arm_t *as, uint label); void asm_arm_bl_ind(asm_arm_t *as, void *fun_ptr, uint fun_id, uint reg_temp); +#if GENERIC_ASM_API + +// The following macros provide a (mostly) arch-independent API to +// generate native code, and are used by the native emitter. + +#define ASM_WORD_SIZE (4) + +#define REG_RET ASM_ARM_REG_R0 +#define REG_ARG_1 ASM_ARM_REG_R0 +#define REG_ARG_2 ASM_ARM_REG_R1 +#define REG_ARG_3 ASM_ARM_REG_R2 +#define REG_ARG_4 ASM_ARM_REG_R3 + +#define REG_TEMP0 ASM_ARM_REG_R0 +#define REG_TEMP1 ASM_ARM_REG_R1 +#define REG_TEMP2 ASM_ARM_REG_R2 + +#define REG_LOCAL_1 ASM_ARM_REG_R4 +#define REG_LOCAL_2 ASM_ARM_REG_R5 +#define REG_LOCAL_3 ASM_ARM_REG_R6 +#define REG_LOCAL_NUM (3) + +#define ASM_T asm_arm_t +#define ASM_END_PASS asm_arm_end_pass +#define ASM_ENTRY asm_arm_entry +#define ASM_EXIT asm_arm_exit + +#define ASM_JUMP asm_arm_b_label +#define ASM_JUMP_IF_REG_ZERO(as, reg, label) \ + do { \ + asm_arm_cmp_reg_i8(as, reg, 0); \ + asm_arm_bcc_label(as, ASM_ARM_CC_EQ, label); \ + } while (0) +#define ASM_JUMP_IF_REG_NONZERO(as, reg, label) \ + do { \ + asm_arm_cmp_reg_i8(as, reg, 0); \ + asm_arm_bcc_label(as, ASM_ARM_CC_NE, label); \ + } while (0) +#define ASM_JUMP_IF_REG_EQ(as, reg1, reg2, label) \ + do { \ + asm_arm_cmp_reg_reg(as, reg1, reg2); \ + asm_arm_bcc_label(as, ASM_ARM_CC_EQ, label); \ + } while (0) +#define ASM_CALL_IND(as, ptr, idx) asm_arm_bl_ind(as, ptr, idx, ASM_ARM_REG_R3) + +#define ASM_MOV_REG_TO_LOCAL(as, reg, local_num) asm_arm_mov_local_reg(as, (local_num), (reg)) +#define ASM_MOV_IMM_TO_REG(as, imm, reg) asm_arm_mov_reg_i32(as, (reg), (imm)) +#define ASM_MOV_ALIGNED_IMM_TO_REG(as, imm, reg) asm_arm_mov_reg_i32(as, (reg), (imm)) +#define ASM_MOV_IMM_TO_LOCAL_USING(as, imm, local_num, reg_temp) \ + do { \ + asm_arm_mov_reg_i32(as, (reg_temp), (imm)); \ + asm_arm_mov_local_reg(as, (local_num), (reg_temp)); \ + } while (false) +#define ASM_MOV_LOCAL_TO_REG(as, local_num, reg) asm_arm_mov_reg_local(as, (reg), (local_num)) +#define ASM_MOV_REG_REG(as, reg_dest, reg_src) asm_arm_mov_reg_reg((as), (reg_dest), (reg_src)) +#define ASM_MOV_LOCAL_ADDR_TO_REG(as, local_num, reg) asm_arm_mov_reg_local_addr(as, (reg), (local_num)) + +#define ASM_LSL_REG_REG(as, reg_dest, reg_shift) asm_arm_lsl_reg_reg((as), (reg_dest), (reg_shift)) +#define ASM_ASR_REG_REG(as, reg_dest, reg_shift) asm_arm_asr_reg_reg((as), (reg_dest), (reg_shift)) +#define ASM_OR_REG_REG(as, reg_dest, reg_src) asm_arm_orr_reg_reg_reg((as), (reg_dest), (reg_dest), (reg_src)) +#define ASM_XOR_REG_REG(as, reg_dest, reg_src) asm_arm_eor_reg_reg_reg((as), (reg_dest), (reg_dest), (reg_src)) +#define ASM_AND_REG_REG(as, reg_dest, reg_src) asm_arm_and_reg_reg_reg((as), (reg_dest), (reg_dest), (reg_src)) +#define ASM_ADD_REG_REG(as, reg_dest, reg_src) asm_arm_add_reg_reg_reg((as), (reg_dest), (reg_dest), (reg_src)) +#define ASM_SUB_REG_REG(as, reg_dest, reg_src) asm_arm_sub_reg_reg_reg((as), (reg_dest), (reg_dest), (reg_src)) +#define ASM_MUL_REG_REG(as, reg_dest, reg_src) asm_arm_mul_reg_reg_reg((as), (reg_dest), (reg_dest), (reg_src)) + +#define ASM_LOAD_REG_REG(as, reg_dest, reg_base) asm_arm_ldr_reg_reg((as), (reg_dest), (reg_base), 0) +#define ASM_LOAD_REG_REG_OFFSET(as, reg_dest, reg_base, word_offset) asm_arm_ldr_reg_reg((as), (reg_dest), (reg_base), 4 * (word_offset)) +#define ASM_LOAD8_REG_REG(as, reg_dest, reg_base) asm_arm_ldrb_reg_reg((as), (reg_dest), (reg_base)) +#define ASM_LOAD16_REG_REG(as, reg_dest, reg_base) asm_arm_ldrh_reg_reg((as), (reg_dest), (reg_base)) +#define ASM_LOAD32_REG_REG(as, reg_dest, reg_base) asm_arm_ldr_reg_reg((as), (reg_dest), (reg_base), 0) + +#define ASM_STORE_REG_REG(as, reg_value, reg_base) asm_arm_str_reg_reg((as), (reg_value), (reg_base), 0) +#define ASM_STORE_REG_REG_OFFSET(as, reg_dest, reg_base, word_offset) asm_arm_str_reg_reg((as), (reg_dest), (reg_base), 4 * (word_offset)) +#define ASM_STORE8_REG_REG(as, reg_value, reg_base) asm_arm_strb_reg_reg((as), (reg_value), (reg_base)) +#define ASM_STORE16_REG_REG(as, reg_value, reg_base) asm_arm_strh_reg_reg((as), (reg_value), (reg_base)) +#define ASM_STORE32_REG_REG(as, reg_value, reg_base) asm_arm_str_reg_reg((as), (reg_value), (reg_base), 0) + +#endif // GENERIC_ASM_API + #endif // __MICROPY_INCLUDED_PY_ASMARM_H__ diff --git a/py/asmbase.c b/py/asmbase.c new file mode 100644 index 0000000000..c941e917b7 --- /dev/null +++ b/py/asmbase.c @@ -0,0 +1,102 @@ +/* + * This file is part of the MicroPython project, http://micropython.org/ + * + * The MIT License (MIT) + * + * Copyright (c) 2016 Damien P. George + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include <assert.h> +#include <string.h> + +#include "py/obj.h" +#include "py/misc.h" +#include "py/asmbase.h" + +#if MICROPY_EMIT_NATIVE || MICROPY_EMIT_INLINE_ASM + +void mp_asm_base_init(mp_asm_base_t *as, size_t max_num_labels) { + as->max_num_labels = max_num_labels; + as->label_offsets = m_new(size_t, max_num_labels); +} + +void mp_asm_base_deinit(mp_asm_base_t *as, bool free_code) { + if (free_code) { + MP_PLAT_FREE_EXEC(as->code_base, as->code_size); + } + m_del(size_t, as->label_offsets, as->max_num_labels); +} + +void mp_asm_base_start_pass(mp_asm_base_t *as, int pass) { + if (pass == MP_ASM_PASS_COMPUTE) { + // reset all labels + memset(as->label_offsets, -1, as->max_num_labels * sizeof(size_t)); + } else if (pass == MP_ASM_PASS_EMIT) { + // allocating executable RAM is platform specific + MP_PLAT_ALLOC_EXEC(as->code_offset, (void**)&as->code_base, &as->code_size); + assert(as->code_base != NULL); + } + as->pass = pass; + as->code_offset = 0; +} + +// all functions must go through this one to emit bytes +// if as->pass < MP_ASM_PASS_EMIT, then this function just counts the number +// of bytes needed and returns NULL, and callers should not store any data +uint8_t *mp_asm_base_get_cur_to_write_bytes(mp_asm_base_t *as, size_t num_bytes_to_write) { + uint8_t *c = NULL; + if (as->pass == MP_ASM_PASS_EMIT) { + assert(as->code_offset + num_bytes_to_write <= as->code_size); + c = as->code_base + as->code_offset; + } + as->code_offset += num_bytes_to_write; + return c; +} + +void mp_asm_base_label_assign(mp_asm_base_t *as, size_t label) { + assert(label < as->max_num_labels); + if (as->pass < MP_ASM_PASS_EMIT) { + // assign label offset + assert(as->label_offsets[label] == (size_t)-1); + as->label_offsets[label] = as->code_offset; + } else { + // ensure label offset has not changed from PASS_COMPUTE to PASS_EMIT + assert(as->label_offsets[label] == as->code_offset); + } +} + +// align must be a multiple of 2 +void mp_asm_base_align(mp_asm_base_t* as, unsigned int align) { + as->code_offset = (as->code_offset + align - 1) & (~(align - 1)); +} + +// this function assumes a little endian machine +void mp_asm_base_data(mp_asm_base_t* as, unsigned int bytesize, uintptr_t val) { + uint8_t *c = mp_asm_base_get_cur_to_write_bytes(as, bytesize); + if (c != NULL) { + for (unsigned int i = 0; i < bytesize; i++) { + *c++ = val; + val >>= 8; + } + } +} + +#endif // MICROPY_EMIT_NATIVE || MICROPY_EMIT_INLINE_ASM diff --git a/py/asmbase.h b/py/asmbase.h new file mode 100644 index 0000000000..d2b4038931 --- /dev/null +++ b/py/asmbase.h @@ -0,0 +1,69 @@ +/* + * This file is part of the MicroPython project, http://micropython.org/ + * + * The MIT License (MIT) + * + * Copyright (c) 2016 Damien P. George + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ +#ifndef MICROPY_INCLUDED_PY_ASMBASE_H +#define MICROPY_INCLUDED_PY_ASMBASE_H + +#include <stdint.h> +#include <stdbool.h> + +#define MP_ASM_PASS_COMPUTE (1) +#define MP_ASM_PASS_EMIT (2) + +typedef struct _mp_asm_base_t { + int pass; + size_t code_offset; + size_t code_size; + uint8_t *code_base; + + size_t max_num_labels; + size_t *label_offsets; +} mp_asm_base_t; + +void mp_asm_base_init(mp_asm_base_t *as, size_t max_num_labels); +void mp_asm_base_deinit(mp_asm_base_t *as, bool free_code); +void mp_asm_base_start_pass(mp_asm_base_t *as, int pass); +uint8_t *mp_asm_base_get_cur_to_write_bytes(mp_asm_base_t *as, size_t num_bytes_to_write); +void mp_asm_base_label_assign(mp_asm_base_t *as, size_t label); +void mp_asm_base_align(mp_asm_base_t* as, unsigned int align); +void mp_asm_base_data(mp_asm_base_t* as, unsigned int bytesize, uintptr_t val); + +static inline size_t mp_asm_base_get_code_pos(mp_asm_base_t *as) { + return as->code_offset; +} + +static inline size_t mp_asm_base_get_code_size(mp_asm_base_t *as) { + return as->code_size; +} + +static inline void *mp_asm_base_get_code(mp_asm_base_t *as) { + #if defined(MP_PLAT_COMMIT_EXEC) + return MP_PLAT_COMMIT_EXEC(as->code_base, as->code_size); + #else + return as->code_base; + #endif +} + +#endif // MICROPY_INCLUDED_PY_ASMBASE_H diff --git a/py/asmthumb.c b/py/asmthumb.c index 1aae3d38eb..749c1e405b 100644 --- a/py/asmthumb.c +++ b/py/asmthumb.c @@ -42,49 +42,8 @@ #define SIGNED_FIT12(x) (((x) & 0xfffff800) == 0) || (((x) & 0xfffff800) == 0xfffff800) #define SIGNED_FIT23(x) (((x) & 0xffc00000) == 0) || (((x) & 0xffc00000) == 0xffc00000) -struct _asm_thumb_t { - mp_uint_t pass; - mp_uint_t code_offset; - mp_uint_t code_size; - byte *code_base; - byte dummy_data[4]; - - mp_uint_t max_num_labels; - mp_uint_t *label_offsets; - mp_uint_t push_reglist; - mp_uint_t stack_adjust; -}; - -asm_thumb_t *asm_thumb_new(uint max_num_labels) { - asm_thumb_t *as; - - as = m_new0(asm_thumb_t, 1); - as->max_num_labels = max_num_labels; - as->label_offsets = m_new(mp_uint_t, max_num_labels); - - return as; -} - -void asm_thumb_free(asm_thumb_t *as, bool free_code) { - if (free_code) { - MP_PLAT_FREE_EXEC(as->code_base, as->code_size); - } - m_del(mp_uint_t, as->label_offsets, as->max_num_labels); - m_del_obj(asm_thumb_t, as); -} - -void asm_thumb_start_pass(asm_thumb_t *as, uint pass) { - if (pass == ASM_THUMB_PASS_COMPUTE) { - memset(as->label_offsets, -1, as->max_num_labels * sizeof(mp_uint_t)); - } else if (pass == ASM_THUMB_PASS_EMIT) { - MP_PLAT_ALLOC_EXEC(as->code_offset, (void**)&as->code_base, &as->code_size); - if (as->code_base == NULL) { - assert(0); - } - //printf("code_size: %u\n", as->code_size); - } - as->pass = pass; - as->code_offset = 0; +static inline byte *asm_thumb_get_cur_to_write_bytes(asm_thumb_t *as, int n) { + return mp_asm_base_get_cur_to_write_bytes(&as->base, n); } void asm_thumb_end_pass(asm_thumb_t *as) { @@ -92,42 +51,15 @@ void asm_thumb_end_pass(asm_thumb_t *as) { // could check labels are resolved... #if defined(MCU_SERIES_F7) - if (as->pass == ASM_THUMB_PASS_EMIT) { + if (as->base.pass == MP_ASM_PASS_EMIT) { // flush D-cache, so the code emited is stored in memory - SCB_CleanDCache_by_Addr((uint32_t*)as->code_base, as->code_size); + SCB_CleanDCache_by_Addr((uint32_t*)as->base.code_base, as->base.code_size); // invalidate I-cache SCB_InvalidateICache(); } #endif } -// all functions must go through this one to emit bytes -// if as->pass < ASM_THUMB_PASS_EMIT, then this function only returns a buffer of 4 bytes length -STATIC byte *asm_thumb_get_cur_to_write_bytes(asm_thumb_t *as, int num_bytes_to_write) { - //printf("emit %d\n", num_bytes_to_write); - if (as->pass < ASM_THUMB_PASS_EMIT) { - as->code_offset += num_bytes_to_write; - return as->dummy_data; - } else { - assert(as->code_offset + num_bytes_to_write <= as->code_size); - byte *c = as->code_base + as->code_offset; - as->code_offset += num_bytes_to_write; - return c; - } -} - -uint asm_thumb_get_code_pos(asm_thumb_t *as) { - return as->code_offset; -} - -uint asm_thumb_get_code_size(asm_thumb_t *as) { - return as->code_size; -} - -void *asm_thumb_get_code(asm_thumb_t *as) { - return as->code_base; -} - /* STATIC void asm_thumb_write_byte_1(asm_thumb_t *as, byte b1) { byte *c = asm_thumb_get_cur_to_write_bytes(as, 1); @@ -223,55 +155,29 @@ void asm_thumb_exit(asm_thumb_t *as) { asm_thumb_op16(as, OP_POP_RLIST_PC(as->push_reglist)); } -void asm_thumb_label_assign(asm_thumb_t *as, uint label) { - assert(label < as->max_num_labels); - if (as->pass < ASM_THUMB_PASS_EMIT) { - // assign label offset - assert(as->label_offsets[label] == -1); - as->label_offsets[label] = as->code_offset; - } else { - // ensure label offset has not changed from PASS_COMPUTE to PASS_EMIT - //printf("l%d: (at %d=%ld)\n", label, as->label_offsets[label], as->code_offset); - assert(as->label_offsets[label] == as->code_offset); - } -} - -void asm_thumb_align(asm_thumb_t* as, uint align) { - // TODO fill unused data with NOPs? - as->code_offset = (as->code_offset + align - 1) & (~(align - 1)); -} - -void asm_thumb_data(asm_thumb_t* as, uint bytesize, uint val) { - byte *c = asm_thumb_get_cur_to_write_bytes(as, bytesize); - // only write to the buffer in the emit pass (otherwise we overflow dummy_data) - if (as->pass == ASM_THUMB_PASS_EMIT) { - // little endian - for (uint i = 0; i < bytesize; i++) { - *c++ = val; - val >>= 8; - } - } -} - STATIC mp_uint_t get_label_dest(asm_thumb_t *as, uint label) { - assert(label < as->max_num_labels); - return as->label_offsets[label]; + assert(label < as->base.max_num_labels); + return as->base.label_offsets[label]; } void asm_thumb_op16(asm_thumb_t *as, uint op) { byte *c = asm_thumb_get_cur_to_write_bytes(as, 2); - // little endian - c[0] = op; - c[1] = op >> 8; + if (c != NULL) { + // little endian + c[0] = op; + c[1] = op >> 8; + } } void asm_thumb_op32(asm_thumb_t *as, uint op1, uint op2) { byte *c = asm_thumb_get_cur_to_write_bytes(as, 4); - // little endian, op1 then op2 - c[0] = op1; - c[1] = op1 >> 8; - c[2] = op2; - c[3] = op2 >> 8; + if (c != NULL) { + // little endian, op1 then op2 + c[0] = op1; + c[1] = op1 >> 8; + c[2] = op2; + c[3] = op2 >> 8; + } } #define OP_FORMAT_4(op, rlo_dest, rlo_src) ((op) | ((rlo_src) << 3) | (rlo_dest)) @@ -309,10 +215,10 @@ void asm_thumb_mov_reg_i16(asm_thumb_t *as, uint mov_op, uint reg_dest, int i16_ bool asm_thumb_b_n_label(asm_thumb_t *as, uint label) { mp_uint_t dest = get_label_dest(as, label); - mp_int_t rel = dest - as->code_offset; + mp_int_t rel = dest - as->base.code_offset; rel -= 4; // account for instruction prefetch, PC is 4 bytes ahead of this instruction asm_thumb_op16(as, OP_B_N(rel)); - return as->pass != ASM_THUMB_PASS_EMIT || SIGNED_FIT12(rel); + return as->base.pass != MP_ASM_PASS_EMIT || SIGNED_FIT12(rel); } #define OP_BCC_N(cond, byte_offset) (0xd000 | ((cond) << 8) | (((byte_offset) >> 1) & 0x00ff)) @@ -323,11 +229,11 @@ bool asm_thumb_b_n_label(asm_thumb_t *as, uint label) { bool asm_thumb_bcc_nw_label(asm_thumb_t *as, int cond, uint label, bool wide) { mp_uint_t dest = get_label_dest(as, label); - mp_int_t rel = dest - as->code_offset; + mp_int_t rel = dest - as->base.code_offset; rel -= 4; // account for instruction prefetch, PC is 4 bytes ahead of this instruction if (!wide) { asm_thumb_op16(as, OP_BCC_N(cond, rel)); - return as->pass != ASM_THUMB_PASS_EMIT || SIGNED_FIT9(rel); + return as->base.pass != MP_ASM_PASS_EMIT || SIGNED_FIT9(rel); } else { asm_thumb_op32(as, OP_BCC_W_HI(cond, rel), OP_BCC_W_LO(rel)); return true; @@ -339,10 +245,10 @@ bool asm_thumb_bcc_nw_label(asm_thumb_t *as, int cond, uint label, bool wide) { bool asm_thumb_bl_label(asm_thumb_t *as, uint label) { mp_uint_t dest = get_label_dest(as, label); - mp_int_t rel = dest - as->code_offset; + mp_int_t rel = dest - as->base.code_offset; rel -= 4; // account for instruction prefetch, PC is 4 bytes ahead of this instruction asm_thumb_op32(as, OP_BL_HI(rel), OP_BL_LO(rel)); - return as->pass != ASM_THUMB_PASS_EMIT || SIGNED_FIT23(rel); + return as->base.pass != MP_ASM_PASS_EMIT || SIGNED_FIT23(rel); } void asm_thumb_mov_reg_i32(asm_thumb_t *as, uint reg_dest, mp_uint_t i32) { @@ -367,13 +273,13 @@ void asm_thumb_mov_reg_i32_optimised(asm_thumb_t *as, uint reg_dest, int i32) { // TODO this is very inefficient, improve it! void asm_thumb_mov_reg_i32_aligned(asm_thumb_t *as, uint reg_dest, int i32) { // align on machine-word + 2 - if ((as->code_offset & 3) == 0) { + if ((as->base.code_offset & 3) == 0) { asm_thumb_op16(as, ASM_THUMB_OP_NOP); } // jump over the i32 value (instruction prefetch adds 2 to PC) asm_thumb_op16(as, OP_B_N(2)); // store i32 on machine-word aligned boundary - asm_thumb_data(as, 4, i32); + mp_asm_base_data(&as->base, 4, i32); // do the actual load of the i32 value asm_thumb_mov_reg_i32_optimised(as, reg_dest, i32); } @@ -384,14 +290,14 @@ void asm_thumb_mov_reg_i32_aligned(asm_thumb_t *as, uint reg_dest, int i32) { void asm_thumb_mov_local_reg(asm_thumb_t *as, int local_num, uint rlo_src) { assert(rlo_src < ASM_THUMB_REG_R8); int word_offset = local_num; - assert(as->pass < ASM_THUMB_PASS_EMIT || word_offset >= 0); + assert(as->base.pass < MP_ASM_PASS_EMIT || word_offset >= 0); asm_thumb_op16(as, OP_STR_TO_SP_OFFSET(rlo_src, word_offset)); } void asm_thumb_mov_reg_local(asm_thumb_t *as, uint rlo_dest, int local_num) { assert(rlo_dest < ASM_THUMB_REG_R8); int word_offset = local_num; - assert(as->pass < ASM_THUMB_PASS_EMIT || word_offset >= 0); + assert(as->base.pass < MP_ASM_PASS_EMIT || word_offset >= 0); asm_thumb_op16(as, OP_LDR_FROM_SP_OFFSET(rlo_dest, word_offset)); } @@ -400,7 +306,7 @@ void asm_thumb_mov_reg_local(asm_thumb_t *as, uint rlo_dest, int local_num) { void asm_thumb_mov_reg_local_addr(asm_thumb_t *as, uint rlo_dest, int local_num) { assert(rlo_dest < ASM_THUMB_REG_R8); int word_offset = local_num; - assert(as->pass < ASM_THUMB_PASS_EMIT || word_offset >= 0); + assert(as->base.pass < MP_ASM_PASS_EMIT || word_offset >= 0); asm_thumb_op16(as, OP_ADD_REG_SP_OFFSET(rlo_dest, word_offset)); } @@ -410,7 +316,7 @@ void asm_thumb_mov_reg_local_addr(asm_thumb_t *as, uint rlo_dest, int local_num) void asm_thumb_b_label(asm_thumb_t *as, uint label) { mp_uint_t dest = get_label_dest(as, label); - mp_int_t rel = dest - as->code_offset; + mp_int_t rel = dest - as->base.code_offset; rel -= 4; // account for instruction prefetch, PC is 4 bytes ahead of this instruction if (dest != (mp_uint_t)-1 && rel <= -4) { // is a backwards jump, so we know the size of the jump on the first pass @@ -429,7 +335,7 @@ void asm_thumb_b_label(asm_thumb_t *as, uint label) { void asm_thumb_bcc_label(asm_thumb_t *as, int cond, uint label) { mp_uint_t dest = get_label_dest(as, label); - mp_int_t rel = dest - as->code_offset; + mp_int_t rel = dest - as->base.code_offset; rel -= 4; // account for instruction prefetch, PC is 4 bytes ahead of this instruction if (dest != (mp_uint_t)-1 && rel <= -4) { // is a backwards jump, so we know the size of the jump on the first pass diff --git a/py/asmthumb.h b/py/asmthumb.h index 43d6c4286f..52e663b3bd 100644 --- a/py/asmthumb.h +++ b/py/asmthumb.h @@ -27,9 +27,7 @@ #define __MICROPY_INCLUDED_PY_ASMTHUMB_H__ #include "py/misc.h" - -#define ASM_THUMB_PASS_COMPUTE (1) -#define ASM_THUMB_PASS_EMIT (2) +#include "py/asmbase.h" #define ASM_THUMB_REG_R0 (0) #define ASM_THUMB_REG_R1 (1) @@ -64,24 +62,17 @@ #define ASM_THUMB_CC_GT (0xc) #define ASM_THUMB_CC_LE (0xd) -typedef struct _asm_thumb_t asm_thumb_t; +typedef struct _asm_thumb_t { + mp_asm_base_t base; + uint32_t push_reglist; + uint32_t stack_adjust; +} asm_thumb_t; -asm_thumb_t *asm_thumb_new(uint max_num_labels); -void asm_thumb_free(asm_thumb_t *as, bool free_code); -void asm_thumb_start_pass(asm_thumb_t *as, uint pass); void asm_thumb_end_pass(asm_thumb_t *as); -uint asm_thumb_get_code_pos(asm_thumb_t *as); -uint asm_thumb_get_code_size(asm_thumb_t *as); -void *asm_thumb_get_code(asm_thumb_t *as); void asm_thumb_entry(asm_thumb_t *as, int num_locals); void asm_thumb_exit(asm_thumb_t *as); -void asm_thumb_label_assign(asm_thumb_t *as, uint label); - -void asm_thumb_align(asm_thumb_t* as, uint align); -void asm_thumb_data(asm_thumb_t* as, uint bytesize, uint val); - // argument order follows ARM, in general dest is first // note there is a difference between movw and mov.w, and many others! @@ -246,4 +237,85 @@ void asm_thumb_b_label(asm_thumb_t *as, uint label); // convenience: picks narro void asm_thumb_bcc_label(asm_thumb_t *as, int cc, uint label); // convenience: picks narrow or wide branch void asm_thumb_bl_ind(asm_thumb_t *as, void *fun_ptr, uint fun_id, uint reg_temp); // convenience +#if GENERIC_ASM_API + +// The following macros provide a (mostly) arch-independent API to +// generate native code, and are used by the native emitter. + +#define ASM_WORD_SIZE (4) + +#define REG_RET ASM_THUMB_REG_R0 +#define REG_ARG_1 ASM_THUMB_REG_R0 +#define REG_ARG_2 ASM_THUMB_REG_R1 +#define REG_ARG_3 ASM_THUMB_REG_R2 +#define REG_ARG_4 ASM_THUMB_REG_R3 +// rest of args go on stack + +#define REG_TEMP0 ASM_THUMB_REG_R0 +#define REG_TEMP1 ASM_THUMB_REG_R1 +#define REG_TEMP2 ASM_THUMB_REG_R2 + +#define REG_LOCAL_1 ASM_THUMB_REG_R4 +#define REG_LOCAL_2 ASM_THUMB_REG_R5 +#define REG_LOCAL_3 ASM_THUMB_REG_R6 +#define REG_LOCAL_NUM (3) + +#define ASM_T asm_thumb_t +#define ASM_END_PASS asm_thumb_end_pass +#define ASM_ENTRY asm_thumb_entry +#define ASM_EXIT asm_thumb_exit + +#define ASM_JUMP asm_thumb_b_label +#define ASM_JUMP_IF_REG_ZERO(as, reg, label) \ + do { \ + asm_thumb_cmp_rlo_i8(as, reg, 0); \ + asm_thumb_bcc_label(as, ASM_THUMB_CC_EQ, label); \ + } while (0) +#define ASM_JUMP_IF_REG_NONZERO(as, reg, label) \ + do { \ + asm_thumb_cmp_rlo_i8(as, reg, 0); \ + asm_thumb_bcc_label(as, ASM_THUMB_CC_NE, label); \ + } while (0) +#define ASM_JUMP_IF_REG_EQ(as, reg1, reg2, label) \ + do { \ + asm_thumb_cmp_rlo_rlo(as, reg1, reg2); \ + asm_thumb_bcc_label(as, ASM_THUMB_CC_EQ, label); \ + } while (0) +#define ASM_CALL_IND(as, ptr, idx) asm_thumb_bl_ind(as, ptr, idx, ASM_THUMB_REG_R3) + +#define ASM_MOV_REG_TO_LOCAL(as, reg, local_num) asm_thumb_mov_local_reg(as, (local_num), (reg)) +#define ASM_MOV_IMM_TO_REG(as, imm, reg) asm_thumb_mov_reg_i32_optimised(as, (reg), (imm)) +#define ASM_MOV_ALIGNED_IMM_TO_REG(as, imm, reg) asm_thumb_mov_reg_i32_aligned(as, (reg), (imm)) +#define ASM_MOV_IMM_TO_LOCAL_USING(as, imm, local_num, reg_temp) \ + do { \ + asm_thumb_mov_reg_i32_optimised(as, (reg_temp), (imm)); \ + asm_thumb_mov_local_reg(as, (local_num), (reg_temp)); \ + } while (false) +#define ASM_MOV_LOCAL_TO_REG(as, local_num, reg) asm_thumb_mov_reg_local(as, (reg), (local_num)) +#define ASM_MOV_REG_REG(as, reg_dest, reg_src) asm_thumb_mov_reg_reg((as), (reg_dest), (reg_src)) +#define ASM_MOV_LOCAL_ADDR_TO_REG(as, local_num, reg) asm_thumb_mov_reg_local_addr(as, (reg), (local_num)) + +#define ASM_LSL_REG_REG(as, reg_dest, reg_shift) asm_thumb_format_4((as), ASM_THUMB_FORMAT_4_LSL, (reg_dest), (reg_shift)) +#define ASM_ASR_REG_REG(as, reg_dest, reg_shift) asm_thumb_format_4((as), ASM_THUMB_FORMAT_4_ASR, (reg_dest), (reg_shift)) +#define ASM_OR_REG_REG(as, reg_dest, reg_src) asm_thumb_format_4((as), ASM_THUMB_FORMAT_4_ORR, (reg_dest), (reg_src)) +#define ASM_XOR_REG_REG(as, reg_dest, reg_src) asm_thumb_format_4((as), ASM_THUMB_FORMAT_4_EOR, (reg_dest), (reg_src)) +#define ASM_AND_REG_REG(as, reg_dest, reg_src) asm_thumb_format_4((as), ASM_THUMB_FORMAT_4_AND, (reg_dest), (reg_src)) +#define ASM_ADD_REG_REG(as, reg_dest, reg_src) asm_thumb_add_rlo_rlo_rlo((as), (reg_dest), (reg_dest), (reg_src)) +#define ASM_SUB_REG_REG(as, reg_dest, reg_src) asm_thumb_sub_rlo_rlo_rlo((as), (reg_dest), (reg_dest), (reg_src)) +#define ASM_MUL_REG_REG(as, reg_dest, reg_src) asm_thumb_format_4((as), ASM_THUMB_FORMAT_4_MUL, (reg_dest), (reg_src)) + +#define ASM_LOAD_REG_REG(as, reg_dest, reg_base) asm_thumb_ldr_rlo_rlo_i5((as), (reg_dest), (reg_base), 0) +#define ASM_LOAD_REG_REG_OFFSET(as, reg_dest, reg_base, word_offset) asm_thumb_ldr_rlo_rlo_i5((as), (reg_dest), (reg_base), (word_offset)) +#define ASM_LOAD8_REG_REG(as, reg_dest, reg_base) asm_thumb_ldrb_rlo_rlo_i5((as), (reg_dest), (reg_base), 0) +#define ASM_LOAD16_REG_REG(as, reg_dest, reg_base) asm_thumb_ldrh_rlo_rlo_i5((as), (reg_dest), (reg_base), 0) +#define ASM_LOAD32_REG_REG(as, reg_dest, reg_base) asm_thumb_ldr_rlo_rlo_i5((as), (reg_dest), (reg_base), 0) + +#define ASM_STORE_REG_REG(as, reg_src, reg_base) asm_thumb_str_rlo_rlo_i5((as), (reg_src), (reg_base), 0) +#define ASM_STORE_REG_REG_OFFSET(as, reg_src, reg_base, word_offset) asm_thumb_str_rlo_rlo_i5((as), (reg_src), (reg_base), (word_offset)) +#define ASM_STORE8_REG_REG(as, reg_src, reg_base) asm_thumb_strb_rlo_rlo_i5((as), (reg_src), (reg_base), 0) +#define ASM_STORE16_REG_REG(as, reg_src, reg_base) asm_thumb_strh_rlo_rlo_i5((as), (reg_src), (reg_base), 0) +#define ASM_STORE32_REG_REG(as, reg_src, reg_base) asm_thumb_str_rlo_rlo_i5((as), (reg_src), (reg_base), 0) + +#endif // GENERIC_ASM_API + #endif // __MICROPY_INCLUDED_PY_ASMTHUMB_H__ diff --git a/py/asmx64.c b/py/asmx64.c index 5e23a594ed..cf1a86b3f0 100644 --- a/py/asmx64.c +++ b/py/asmx64.c @@ -116,132 +116,55 @@ #define UNSIGNED_FIT32(x) (((x) & 0xffffffff00000000) == 0) #define SIGNED_FIT8(x) (((x) & 0xffffff80) == 0) || (((x) & 0xffffff80) == 0xffffff80) -struct _asm_x64_t { - uint pass; - mp_uint_t code_offset; - mp_uint_t code_size; - byte *code_base; - byte dummy_data[8]; - - mp_uint_t max_num_labels; - mp_uint_t *label_offsets; - int num_locals; -}; - -asm_x64_t *asm_x64_new(mp_uint_t max_num_labels) { - asm_x64_t *as; - - as = m_new0(asm_x64_t, 1); - as->max_num_labels = max_num_labels; - as->label_offsets = m_new(mp_uint_t, max_num_labels); - - return as; -} - -void asm_x64_free(asm_x64_t *as, bool free_code) { - if (free_code) { - MP_PLAT_FREE_EXEC(as->code_base, as->code_size); - } - m_del(mp_uint_t, as->label_offsets, as->max_num_labels); - m_del_obj(asm_x64_t, as); -} - -void asm_x64_start_pass(asm_x64_t *as, uint pass) { - if (pass == ASM_X64_PASS_COMPUTE) { - // reset all labels - memset(as->label_offsets, -1, as->max_num_labels * sizeof(mp_uint_t)); - } if (pass == ASM_X64_PASS_EMIT) { - MP_PLAT_ALLOC_EXEC(as->code_offset, (void**)&as->code_base, &as->code_size); - if (as->code_base == NULL) { - assert(0); - } - //printf("code_size: %u\n", as->code_size); - } - as->pass = pass; - as->code_offset = 0; -} - -void asm_x64_end_pass(asm_x64_t *as) { - // could check labels are resolved... - (void)as; -} - -// all functions must go through this one to emit bytes -STATIC byte *asm_x64_get_cur_to_write_bytes(asm_x64_t *as, int num_bytes_to_write) { - //printf("emit %d\n", num_bytes_to_write); - if (as->pass < ASM_X64_PASS_EMIT) { - as->code_offset += num_bytes_to_write; - return as->dummy_data; - } else { - assert(as->code_offset + num_bytes_to_write <= as->code_size); - byte *c = as->code_base + as->code_offset; - as->code_offset += num_bytes_to_write; - return c; - } -} - -mp_uint_t asm_x64_get_code_pos(asm_x64_t *as) { - return as->code_offset; -} - -mp_uint_t asm_x64_get_code_size(asm_x64_t *as) { - return as->code_size; -} - -void *asm_x64_get_code(asm_x64_t *as) { - return as->code_base; +static inline byte *asm_x64_get_cur_to_write_bytes(asm_x64_t *as, int n) { + return mp_asm_base_get_cur_to_write_bytes(&as->base, n); } STATIC void asm_x64_write_byte_1(asm_x64_t *as, byte b1) { byte* c = asm_x64_get_cur_to_write_bytes(as, 1); - c[0] = b1; + if (c != NULL) { + c[0] = b1; + } } STATIC void asm_x64_write_byte_2(asm_x64_t *as, byte b1, byte b2) { byte* c = asm_x64_get_cur_to_write_bytes(as, 2); - c[0] = b1; - c[1] = b2; + if (c != NULL) { + c[0] = b1; + c[1] = b2; + } } STATIC void asm_x64_write_byte_3(asm_x64_t *as, byte b1, byte b2, byte b3) { byte* c = asm_x64_get_cur_to_write_bytes(as, 3); - c[0] = b1; - c[1] = b2; - c[2] = b3; + if (c != NULL) { + c[0] = b1; + c[1] = b2; + c[2] = b3; + } } STATIC void asm_x64_write_word32(asm_x64_t *as, int w32) { byte* c = asm_x64_get_cur_to_write_bytes(as, 4); - c[0] = IMM32_L0(w32); - c[1] = IMM32_L1(w32); - c[2] = IMM32_L2(w32); - c[3] = IMM32_L3(w32); + if (c != NULL) { + c[0] = IMM32_L0(w32); + c[1] = IMM32_L1(w32); + c[2] = IMM32_L2(w32); + c[3] = IMM32_L3(w32); + } } STATIC void asm_x64_write_word64(asm_x64_t *as, int64_t w64) { byte* c = asm_x64_get_cur_to_write_bytes(as, 8); - c[0] = IMM32_L0(w64); - c[1] = IMM32_L1(w64); - c[2] = IMM32_L2(w64); - c[3] = IMM32_L3(w64); - c[4] = IMM64_L4(w64); - c[5] = IMM64_L5(w64); - c[6] = IMM64_L6(w64); - c[7] = IMM64_L7(w64); -} - -// align must be a multiple of 2 -void asm_x64_align(asm_x64_t* as, mp_uint_t align) { - // TODO fill unused data with NOPs? - as->code_offset = (as->code_offset + align - 1) & (~(align - 1)); -} - -void asm_x64_data(asm_x64_t* as, mp_uint_t bytesize, mp_uint_t val) { - byte *c = asm_x64_get_cur_to_write_bytes(as, bytesize); - // machine is little endian - for (uint i = 0; i < bytesize; i++) { - *c++ = val; - val >>= 8; + if (c != NULL) { + c[0] = IMM32_L0(w64); + c[1] = IMM32_L1(w64); + c[2] = IMM32_L2(w64); + c[3] = IMM32_L3(w64); + c[4] = IMM64_L4(w64); + c[5] = IMM64_L5(w64); + c[6] = IMM64_L6(w64); + c[7] = IMM64_L7(w64); } } @@ -440,7 +363,7 @@ void asm_x64_mov_i64_to_r64_optimised(asm_x64_t *as, int64_t src_i64, int dest_r // src_i64 is stored as a full word in the code, and aligned to machine-word boundary void asm_x64_mov_i64_to_r64_aligned(asm_x64_t *as, int64_t src_i64, int dest_r64) { // mov instruction uses 2 bytes for the instruction, before the i64 - while (((as->code_offset + 2) & (WORD_SIZE - 1)) != 0) { + while (((as->base.code_offset + 2) & (WORD_SIZE - 1)) != 0) { asm_x64_nop(as); } asm_x64_mov_i64_to_r64(as, src_i64, dest_r64); @@ -552,27 +475,14 @@ void asm_x64_setcc_r8(asm_x64_t *as, int jcc_type, int dest_r8) { asm_x64_write_byte_3(as, OPCODE_SETCC_RM8_A, OPCODE_SETCC_RM8_B | jcc_type, MODRM_R64(0) | MODRM_RM_REG | MODRM_RM_R64(dest_r8)); } -void asm_x64_label_assign(asm_x64_t *as, mp_uint_t label) { - assert(label < as->max_num_labels); - if (as->pass < ASM_X64_PASS_EMIT) { - // assign label offset - assert(as->label_offsets[label] == (mp_uint_t)-1); - as->label_offsets[label] = as->code_offset; - } else { - // ensure label offset has not changed from PASS_COMPUTE to PASS_EMIT - //printf("l%d: (at %ld=%ld)\n", label, as->label_offsets[label], as->code_offset); - assert(as->label_offsets[label] == as->code_offset); - } -} - STATIC mp_uint_t get_label_dest(asm_x64_t *as, mp_uint_t label) { - assert(label < as->max_num_labels); - return as->label_offsets[label]; + assert(label < as->base.max_num_labels); + return as->base.label_offsets[label]; } void asm_x64_jmp_label(asm_x64_t *as, mp_uint_t label) { mp_uint_t dest = get_label_dest(as, label); - mp_int_t rel = dest - as->code_offset; + mp_int_t rel = dest - as->base.code_offset; if (dest != (mp_uint_t)-1 && rel < 0) { // is a backwards jump, so we know the size of the jump on the first pass // calculate rel assuming 8 bit relative jump @@ -594,7 +504,7 @@ void asm_x64_jmp_label(asm_x64_t *as, mp_uint_t label) { void asm_x64_jcc_label(asm_x64_t *as, int jcc_type, mp_uint_t label) { mp_uint_t dest = get_label_dest(as, label); - mp_int_t rel = dest - as->code_offset; + mp_int_t rel = dest - as->base.code_offset; if (dest != (mp_uint_t)-1 && rel < 0) { // is a backwards jump, so we know the size of the jump on the first pass // calculate rel assuming 8 bit relative jump diff --git a/py/asmx64.h b/py/asmx64.h index 6fbc2c9069..4499c53c32 100644 --- a/py/asmx64.h +++ b/py/asmx64.h @@ -28,6 +28,7 @@ #include "py/mpconfig.h" #include "py/misc.h" +#include "py/asmbase.h" // AMD64 calling convention is: // - args pass in: RDI, RSI, RDX, RCX, R08, R09 @@ -41,9 +42,6 @@ // NOTE: this is a change from the old convention used in this file and // some functions still use the old (reverse) convention. -#define ASM_X64_PASS_COMPUTE (1) -#define ASM_X64_PASS_EMIT (2) - #define ASM_X64_REG_RAX (0) #define ASM_X64_REG_RCX (1) #define ASM_X64_REG_RDX (2) @@ -72,18 +70,14 @@ #define ASM_X64_CC_JLE (0xe) // less or equal, signed #define ASM_X64_CC_JG (0xf) // greater, signed -typedef struct _asm_x64_t asm_x64_t; - -asm_x64_t* asm_x64_new(mp_uint_t max_num_labels); -void asm_x64_free(asm_x64_t* as, bool free_code); -void asm_x64_start_pass(asm_x64_t *as, uint pass); -void asm_x64_end_pass(asm_x64_t *as); -mp_uint_t asm_x64_get_code_pos(asm_x64_t *as); -mp_uint_t asm_x64_get_code_size(asm_x64_t* as); -void* asm_x64_get_code(asm_x64_t* as); +typedef struct _asm_x64_t { + mp_asm_base_t base; + int num_locals; +} asm_x64_t; -void asm_x64_align(asm_x64_t *as, mp_uint_t align); -void asm_x64_data(asm_x64_t *as, mp_uint_t bytesize, mp_uint_t val); +static inline void asm_x64_end_pass(asm_x64_t *as) { + (void)as; +} void asm_x64_nop(asm_x64_t* as); void asm_x64_push_r64(asm_x64_t* as, int src_r64); @@ -111,7 +105,6 @@ void asm_x64_mul_r64_r64(asm_x64_t* as, int dest_r64, int src_r64); void asm_x64_cmp_r64_with_r64(asm_x64_t* as, int src_r64_a, int src_r64_b); void asm_x64_test_r8_with_r8(asm_x64_t* as, int src_r64_a, int src_r64_b); void asm_x64_setcc_r8(asm_x64_t* as, int jcc_type, int dest_r8); -void asm_x64_label_assign(asm_x64_t* as, mp_uint_t label); void asm_x64_jmp_label(asm_x64_t* as, mp_uint_t label); void asm_x64_jcc_label(asm_x64_t* as, int jcc_type, mp_uint_t label); void asm_x64_entry(asm_x64_t* as, int num_locals); @@ -121,4 +114,87 @@ void asm_x64_mov_r64_to_local(asm_x64_t* as, int src_r64, int dest_local_num); void asm_x64_mov_local_addr_to_r64(asm_x64_t* as, int local_num, int dest_r64); void asm_x64_call_ind(asm_x64_t* as, void* ptr, int temp_r32); +#if GENERIC_ASM_API + +// The following macros provide a (mostly) arch-independent API to +// generate native code, and are used by the native emitter. + +#define ASM_WORD_SIZE (8) + +#define REG_RET ASM_X64_REG_RAX +#define REG_ARG_1 ASM_X64_REG_RDI +#define REG_ARG_2 ASM_X64_REG_RSI +#define REG_ARG_3 ASM_X64_REG_RDX +#define REG_ARG_4 ASM_X64_REG_RCX +#define REG_ARG_5 ASM_X64_REG_R08 + +// caller-save +#define REG_TEMP0 ASM_X64_REG_RAX +#define REG_TEMP1 ASM_X64_REG_RDI +#define REG_TEMP2 ASM_X64_REG_RSI + +// callee-save +#define REG_LOCAL_1 ASM_X64_REG_RBX +#define REG_LOCAL_2 ASM_X64_REG_R12 +#define REG_LOCAL_3 ASM_X64_REG_R13 +#define REG_LOCAL_NUM (3) + +#define ASM_T asm_x64_t +#define ASM_END_PASS asm_x64_end_pass +#define ASM_ENTRY asm_x64_entry +#define ASM_EXIT asm_x64_exit + +#define ASM_JUMP asm_x64_jmp_label +#define ASM_JUMP_IF_REG_ZERO(as, reg, label) \ + do { \ + asm_x64_test_r8_with_r8(as, reg, reg); \ + asm_x64_jcc_label(as, ASM_X64_CC_JZ, label); \ + } while (0) +#define ASM_JUMP_IF_REG_NONZERO(as, reg, label) \ + do { \ + asm_x64_test_r8_with_r8(as, reg, reg); \ + asm_x64_jcc_label(as, ASM_X64_CC_JNZ, label); \ + } while (0) +#define ASM_JUMP_IF_REG_EQ(as, reg1, reg2, label) \ + do { \ + asm_x64_cmp_r64_with_r64(as, reg1, reg2); \ + asm_x64_jcc_label(as, ASM_X64_CC_JE, label); \ + } while (0) +#define ASM_CALL_IND(as, ptr, idx) asm_x64_call_ind(as, ptr, ASM_X64_REG_RAX) + +#define ASM_MOV_REG_TO_LOCAL asm_x64_mov_r64_to_local +#define ASM_MOV_IMM_TO_REG asm_x64_mov_i64_to_r64_optimised +#define ASM_MOV_ALIGNED_IMM_TO_REG asm_x64_mov_i64_to_r64_aligned +#define ASM_MOV_IMM_TO_LOCAL_USING(as, imm, local_num, reg_temp) \ + do { \ + asm_x64_mov_i64_to_r64_optimised(as, (imm), (reg_temp)); \ + asm_x64_mov_r64_to_local(as, (reg_temp), (local_num)); \ + } while (false) +#define ASM_MOV_LOCAL_TO_REG asm_x64_mov_local_to_r64 +#define ASM_MOV_REG_REG(as, reg_dest, reg_src) asm_x64_mov_r64_r64((as), (reg_dest), (reg_src)) +#define ASM_MOV_LOCAL_ADDR_TO_REG asm_x64_mov_local_addr_to_r64 + +#define ASM_LSL_REG(as, reg) asm_x64_shl_r64_cl((as), (reg)) +#define ASM_ASR_REG(as, reg) asm_x64_sar_r64_cl((as), (reg)) +#define ASM_OR_REG_REG(as, reg_dest, reg_src) asm_x64_or_r64_r64((as), (reg_dest), (reg_src)) +#define ASM_XOR_REG_REG(as, reg_dest, reg_src) asm_x64_xor_r64_r64((as), (reg_dest), (reg_src)) +#define ASM_AND_REG_REG(as, reg_dest, reg_src) asm_x64_and_r64_r64((as), (reg_dest), (reg_src)) +#define ASM_ADD_REG_REG(as, reg_dest, reg_src) asm_x64_add_r64_r64((as), (reg_dest), (reg_src)) +#define ASM_SUB_REG_REG(as, reg_dest, reg_src) asm_x64_sub_r64_r64((as), (reg_dest), (reg_src)) +#define ASM_MUL_REG_REG(as, reg_dest, reg_src) asm_x64_mul_r64_r64((as), (reg_dest), (reg_src)) + +#define ASM_LOAD_REG_REG(as, reg_dest, reg_base) asm_x64_mov_mem64_to_r64((as), (reg_base), 0, (reg_dest)) +#define ASM_LOAD_REG_REG_OFFSET(as, reg_dest, reg_base, word_offset) asm_x64_mov_mem64_to_r64((as), (reg_base), 8 * (word_offset), (reg_dest)) +#define ASM_LOAD8_REG_REG(as, reg_dest, reg_base) asm_x64_mov_mem8_to_r64zx((as), (reg_base), 0, (reg_dest)) +#define ASM_LOAD16_REG_REG(as, reg_dest, reg_base) asm_x64_mov_mem16_to_r64zx((as), (reg_base), 0, (reg_dest)) +#define ASM_LOAD32_REG_REG(as, reg_dest, reg_base) asm_x64_mov_mem32_to_r64zx((as), (reg_base), 0, (reg_dest)) + +#define ASM_STORE_REG_REG(as, reg_src, reg_base) asm_x64_mov_r64_to_mem64((as), (reg_src), (reg_base), 0) +#define ASM_STORE_REG_REG_OFFSET(as, reg_src, reg_base, word_offset) asm_x64_mov_r64_to_mem64((as), (reg_src), (reg_base), 8 * (word_offset)) +#define ASM_STORE8_REG_REG(as, reg_src, reg_base) asm_x64_mov_r8_to_mem8((as), (reg_src), (reg_base), 0) +#define ASM_STORE16_REG_REG(as, reg_src, reg_base) asm_x64_mov_r16_to_mem16((as), (reg_src), (reg_base), 0) +#define ASM_STORE32_REG_REG(as, reg_src, reg_base) asm_x64_mov_r32_to_mem32((as), (reg_src), (reg_base), 0) + +#endif // GENERIC_ASM_API + #endif // __MICROPY_INCLUDED_PY_ASMX64_H__ diff --git a/py/asmx86.c b/py/asmx86.c index 40958826fa..dd3ad02242 100644 --- a/py/asmx86.c +++ b/py/asmx86.c @@ -100,118 +100,37 @@ #define SIGNED_FIT8(x) (((x) & 0xffffff80) == 0) || (((x) & 0xffffff80) == 0xffffff80) -struct _asm_x86_t { - uint pass; - mp_uint_t code_offset; - mp_uint_t code_size; - byte *code_base; - byte dummy_data[8]; - - mp_uint_t max_num_labels; - mp_uint_t *label_offsets; - int num_locals; -}; - -asm_x86_t *asm_x86_new(mp_uint_t max_num_labels) { - asm_x86_t *as; - - as = m_new0(asm_x86_t, 1); - as->max_num_labels = max_num_labels; - as->label_offsets = m_new(mp_uint_t, max_num_labels); - - return as; -} - -void asm_x86_free(asm_x86_t *as, bool free_code) { - if (free_code) { - MP_PLAT_FREE_EXEC(as->code_base, as->code_size); - } - m_del(mp_uint_t, as->label_offsets, as->max_num_labels); - m_del_obj(asm_x86_t, as); -} - -void asm_x86_start_pass(asm_x86_t *as, mp_uint_t pass) { - if (pass == ASM_X86_PASS_COMPUTE) { - // reset all labels - memset(as->label_offsets, -1, as->max_num_labels * sizeof(mp_uint_t)); - } else if (pass == ASM_X86_PASS_EMIT) { - MP_PLAT_ALLOC_EXEC(as->code_offset, (void**)&as->code_base, &as->code_size); - if (as->code_base == NULL) { - assert(0); - } - } - as->pass = pass; - as->code_offset = 0; -} - -void asm_x86_end_pass(asm_x86_t *as) { - (void)as; -} - -// all functions must go through this one to emit bytes -STATIC byte *asm_x86_get_cur_to_write_bytes(asm_x86_t *as, int num_bytes_to_write) { - //printf("emit %d\n", num_bytes_to_write); - if (as->pass < ASM_X86_PASS_EMIT) { - as->code_offset += num_bytes_to_write; - return as->dummy_data; - } else { - assert(as->code_offset + num_bytes_to_write <= as->code_size); - byte *c = as->code_base + as->code_offset; - as->code_offset += num_bytes_to_write; - return c; - } -} - -mp_uint_t asm_x86_get_code_pos(asm_x86_t *as) { - return as->code_offset; -} - -mp_uint_t asm_x86_get_code_size(asm_x86_t *as) { - return as->code_size; -} - -void *asm_x86_get_code(asm_x86_t *as) { - return as->code_base; -} - STATIC void asm_x86_write_byte_1(asm_x86_t *as, byte b1) { - byte* c = asm_x86_get_cur_to_write_bytes(as, 1); - c[0] = b1; + byte* c = mp_asm_base_get_cur_to_write_bytes(&as->base, 1); + if (c != NULL) { + c[0] = b1; + } } STATIC void asm_x86_write_byte_2(asm_x86_t *as, byte b1, byte b2) { - byte* c = asm_x86_get_cur_to_write_bytes(as, 2); - c[0] = b1; - c[1] = b2; + byte* c = mp_asm_base_get_cur_to_write_bytes(&as->base, 2); + if (c != NULL) { + c[0] = b1; + c[1] = b2; + } } STATIC void asm_x86_write_byte_3(asm_x86_t *as, byte b1, byte b2, byte b3) { - byte* c = asm_x86_get_cur_to_write_bytes(as, 3); - c[0] = b1; - c[1] = b2; - c[2] = b3; + byte* c = mp_asm_base_get_cur_to_write_bytes(&as->base, 3); + if (c != NULL) { + c[0] = b1; + c[1] = b2; + c[2] = b3; + } } STATIC void asm_x86_write_word32(asm_x86_t *as, int w32) { - byte* c = asm_x86_get_cur_to_write_bytes(as, 4); - c[0] = IMM32_L0(w32); - c[1] = IMM32_L1(w32); - c[2] = IMM32_L2(w32); - c[3] = IMM32_L3(w32); -} - -// align must be a multiple of 2 -void asm_x86_align(asm_x86_t* as, mp_uint_t align) { - // TODO fill unused data with NOPs? - as->code_offset = (as->code_offset + align - 1) & (~(align - 1)); -} - -void asm_x86_data(asm_x86_t* as, mp_uint_t bytesize, mp_uint_t val) { - byte *c = asm_x86_get_cur_to_write_bytes(as, bytesize); - // machine is little endian - for (uint i = 0; i < bytesize; i++) { - *c++ = val; - val >>= 8; + byte* c = mp_asm_base_get_cur_to_write_bytes(&as->base, 4); + if (c != NULL) { + c[0] = IMM32_L0(w32); + c[1] = IMM32_L1(w32); + c[2] = IMM32_L2(w32); + c[3] = IMM32_L3(w32); } } @@ -313,7 +232,7 @@ void asm_x86_mov_i32_to_r32(asm_x86_t *as, int32_t src_i32, int dest_r32) { // src_i32 is stored as a full word in the code, and aligned to machine-word boundary void asm_x86_mov_i32_to_r32_aligned(asm_x86_t *as, int32_t src_i32, int dest_r32) { // mov instruction uses 1 byte for the instruction, before the i32 - while (((as->code_offset + 1) & (WORD_SIZE - 1)) != 0) { + while (((as->base.code_offset + 1) & (WORD_SIZE - 1)) != 0) { asm_x86_nop(as); } asm_x86_mov_i32_to_r32(as, src_i32, dest_r32); @@ -419,27 +338,14 @@ void asm_x86_setcc_r8(asm_x86_t *as, mp_uint_t jcc_type, int dest_r8) { asm_x86_write_byte_3(as, OPCODE_SETCC_RM8_A, OPCODE_SETCC_RM8_B | jcc_type, MODRM_R32(0) | MODRM_RM_REG | MODRM_RM_R32(dest_r8)); } -void asm_x86_label_assign(asm_x86_t *as, mp_uint_t label) { - assert(label < as->max_num_labels); - if (as->pass < ASM_X86_PASS_EMIT) { - // assign label offset - assert(as->label_offsets[label] == (mp_uint_t)-1); - as->label_offsets[label] = as->code_offset; - } else { - // ensure label offset has not changed from PASS_COMPUTE to PASS_EMIT - //printf("l%d: (at %d=%ld)\n", label, as->label_offsets[label], as->code_offset); - assert(as->label_offsets[label] == as->code_offset); - } -} - STATIC mp_uint_t get_label_dest(asm_x86_t *as, mp_uint_t label) { - assert(label < as->max_num_labels); - return as->label_offsets[label]; + assert(label < as->base.max_num_labels); + return as->base.label_offsets[label]; } void asm_x86_jmp_label(asm_x86_t *as, mp_uint_t label) { mp_uint_t dest = get_label_dest(as, label); - mp_int_t rel = dest - as->code_offset; + mp_int_t rel = dest - as->base.code_offset; if (dest != (mp_uint_t)-1 && rel < 0) { // is a backwards jump, so we know the size of the jump on the first pass // calculate rel assuming 8 bit relative jump @@ -461,7 +367,7 @@ void asm_x86_jmp_label(asm_x86_t *as, mp_uint_t label) { void asm_x86_jcc_label(asm_x86_t *as, mp_uint_t jcc_type, mp_uint_t label) { mp_uint_t dest = get_label_dest(as, label); - mp_int_t rel = dest - as->code_offset; + mp_int_t rel = dest - as->base.code_offset; if (dest != (mp_uint_t)-1 && rel < 0) { // is a backwards jump, so we know the size of the jump on the first pass // calculate rel assuming 8 bit relative jump @@ -593,7 +499,7 @@ void asm_x86_call_ind(asm_x86_t *as, void *ptr, mp_uint_t n_args, int temp_r32) // this reduces code size by 2 bytes per call, but doesn't seem to speed it up at all /* asm_x86_write_byte_1(as, OPCODE_CALL_REL32); - asm_x86_write_word32(as, ptr - (void*)(as->code_base + as->code_offset + 4)); + asm_x86_write_word32(as, ptr - (void*)(as->code_base + as->base.code_offset + 4)); */ // the caller must clean up the stack diff --git a/py/asmx86.h b/py/asmx86.h index e0c57722ad..0b44af6639 100644 --- a/py/asmx86.h +++ b/py/asmx86.h @@ -28,6 +28,7 @@ #include "py/mpconfig.h" #include "py/misc.h" +#include "py/asmbase.h" // x86 cdecl calling convention is: // - args passed on the stack in reverse order @@ -42,9 +43,6 @@ // NOTE: this is a change from the old convention used in this file and // some functions still use the old (reverse) convention. -#define ASM_X86_PASS_COMPUTE (1) -#define ASM_X86_PASS_EMIT (2) - #define ASM_X86_REG_EAX (0) #define ASM_X86_REG_ECX (1) #define ASM_X86_REG_EDX (2) @@ -75,18 +73,14 @@ #define ASM_X86_CC_JLE (0xe) // less or equal, signed #define ASM_X86_CC_JG (0xf) // greater, signed -typedef struct _asm_x86_t asm_x86_t; - -asm_x86_t* asm_x86_new(mp_uint_t max_num_labels); -void asm_x86_free(asm_x86_t* as, bool free_code); -void asm_x86_start_pass(asm_x86_t *as, mp_uint_t pass); -void asm_x86_end_pass(asm_x86_t *as); -mp_uint_t asm_x86_get_code_pos(asm_x86_t *as); -mp_uint_t asm_x86_get_code_size(asm_x86_t* as); -void* asm_x86_get_code(asm_x86_t* as); +typedef struct _asm_x86_t { + mp_asm_base_t base; + int num_locals; +} asm_x86_t; -void asm_x86_align(asm_x86_t *as, mp_uint_t align); -void asm_x86_data(asm_x86_t *as, mp_uint_t bytesize, mp_uint_t val); +static inline void asm_x86_end_pass(asm_x86_t *as) { + (void)as; +} void asm_x86_mov_r32_r32(asm_x86_t* as, int dest_r32, int src_r32); void asm_x86_mov_i32_to_r32(asm_x86_t *as, int32_t src_i32, int dest_r32); @@ -108,7 +102,6 @@ void asm_x86_mul_r32_r32(asm_x86_t* as, int dest_r32, int src_r32); void asm_x86_cmp_r32_with_r32(asm_x86_t* as, int src_r32_a, int src_r32_b); void asm_x86_test_r8_with_r8(asm_x86_t* as, int src_r32_a, int src_r32_b); void asm_x86_setcc_r8(asm_x86_t* as, mp_uint_t jcc_type, int dest_r8); -void asm_x86_label_assign(asm_x86_t* as, mp_uint_t label); void asm_x86_jmp_label(asm_x86_t* as, mp_uint_t label); void asm_x86_jcc_label(asm_x86_t* as, mp_uint_t jcc_type, mp_uint_t label); void asm_x86_entry(asm_x86_t* as, mp_uint_t num_locals); @@ -119,4 +112,87 @@ void asm_x86_mov_r32_to_local(asm_x86_t* as, int src_r32, int dest_local_num); void asm_x86_mov_local_addr_to_r32(asm_x86_t* as, int local_num, int dest_r32); void asm_x86_call_ind(asm_x86_t* as, void* ptr, mp_uint_t n_args, int temp_r32); +#if GENERIC_ASM_API + +// The following macros provide a (mostly) arch-independent API to +// generate native code, and are used by the native emitter. + +#define ASM_WORD_SIZE (4) + +#define REG_RET ASM_X86_REG_EAX +#define REG_ARG_1 ASM_X86_REG_ARG_1 +#define REG_ARG_2 ASM_X86_REG_ARG_2 +#define REG_ARG_3 ASM_X86_REG_ARG_3 +#define REG_ARG_4 ASM_X86_REG_ARG_4 +#define REG_ARG_5 ASM_X86_REG_ARG_5 + +// caller-save, so can be used as temporaries +#define REG_TEMP0 ASM_X86_REG_EAX +#define REG_TEMP1 ASM_X86_REG_ECX +#define REG_TEMP2 ASM_X86_REG_EDX + +// callee-save, so can be used as locals +#define REG_LOCAL_1 ASM_X86_REG_EBX +#define REG_LOCAL_2 ASM_X86_REG_ESI +#define REG_LOCAL_3 ASM_X86_REG_EDI +#define REG_LOCAL_NUM (3) + +#define ASM_T asm_x86_t +#define ASM_END_PASS asm_x86_end_pass +#define ASM_ENTRY asm_x86_entry +#define ASM_EXIT asm_x86_exit + +#define ASM_JUMP asm_x86_jmp_label +#define ASM_JUMP_IF_REG_ZERO(as, reg, label) \ + do { \ + asm_x86_test_r8_with_r8(as, reg, reg); \ + asm_x86_jcc_label(as, ASM_X86_CC_JZ, label); \ + } while (0) +#define ASM_JUMP_IF_REG_NONZERO(as, reg, label) \ + do { \ + asm_x86_test_r8_with_r8(as, reg, reg); \ + asm_x86_jcc_label(as, ASM_X86_CC_JNZ, label); \ + } while (0) +#define ASM_JUMP_IF_REG_EQ(as, reg1, reg2, label) \ + do { \ + asm_x86_cmp_r32_with_r32(as, reg1, reg2); \ + asm_x86_jcc_label(as, ASM_X86_CC_JE, label); \ + } while (0) +#define ASM_CALL_IND(as, ptr, idx) asm_x86_call_ind(as, ptr, mp_f_n_args[idx], ASM_X86_REG_EAX) + +#define ASM_MOV_REG_TO_LOCAL asm_x86_mov_r32_to_local +#define ASM_MOV_IMM_TO_REG asm_x86_mov_i32_to_r32 +#define ASM_MOV_ALIGNED_IMM_TO_REG asm_x86_mov_i32_to_r32_aligned +#define ASM_MOV_IMM_TO_LOCAL_USING(as, imm, local_num, reg_temp) \ + do { \ + asm_x86_mov_i32_to_r32(as, (imm), (reg_temp)); \ + asm_x86_mov_r32_to_local(as, (reg_temp), (local_num)); \ + } while (false) +#define ASM_MOV_LOCAL_TO_REG asm_x86_mov_local_to_r32 +#define ASM_MOV_REG_REG(as, reg_dest, reg_src) asm_x86_mov_r32_r32((as), (reg_dest), (reg_src)) +#define ASM_MOV_LOCAL_ADDR_TO_REG asm_x86_mov_local_addr_to_r32 + +#define ASM_LSL_REG(as, reg) asm_x86_shl_r32_cl((as), (reg)) +#define ASM_ASR_REG(as, reg) asm_x86_sar_r32_cl((as), (reg)) +#define ASM_OR_REG_REG(as, reg_dest, reg_src) asm_x86_or_r32_r32((as), (reg_dest), (reg_src)) +#define ASM_XOR_REG_REG(as, reg_dest, reg_src) asm_x86_xor_r32_r32((as), (reg_dest), (reg_src)) +#define ASM_AND_REG_REG(as, reg_dest, reg_src) asm_x86_and_r32_r32((as), (reg_dest), (reg_src)) +#define ASM_ADD_REG_REG(as, reg_dest, reg_src) asm_x86_add_r32_r32((as), (reg_dest), (reg_src)) +#define ASM_SUB_REG_REG(as, reg_dest, reg_src) asm_x86_sub_r32_r32((as), (reg_dest), (reg_src)) +#define ASM_MUL_REG_REG(as, reg_dest, reg_src) asm_x86_mul_r32_r32((as), (reg_dest), (reg_src)) + +#define ASM_LOAD_REG_REG(as, reg_dest, reg_base) asm_x86_mov_mem32_to_r32((as), (reg_base), 0, (reg_dest)) +#define ASM_LOAD_REG_REG_OFFSET(as, reg_dest, reg_base, word_offset) asm_x86_mov_mem32_to_r32((as), (reg_base), 4 * (word_offset), (reg_dest)) +#define ASM_LOAD8_REG_REG(as, reg_dest, reg_base) asm_x86_mov_mem8_to_r32zx((as), (reg_base), 0, (reg_dest)) +#define ASM_LOAD16_REG_REG(as, reg_dest, reg_base) asm_x86_mov_mem16_to_r32zx((as), (reg_base), 0, (reg_dest)) +#define ASM_LOAD32_REG_REG(as, reg_dest, reg_base) asm_x86_mov_mem32_to_r32((as), (reg_base), 0, (reg_dest)) + +#define ASM_STORE_REG_REG(as, reg_src, reg_base) asm_x86_mov_r32_to_mem32((as), (reg_src), (reg_base), 0) +#define ASM_STORE_REG_REG_OFFSET(as, reg_src, reg_base, word_offset) asm_x86_mov_r32_to_mem32((as), (reg_src), (reg_base), 4 * (word_offset)) +#define ASM_STORE8_REG_REG(as, reg_src, reg_base) asm_x86_mov_r8_to_mem8((as), (reg_src), (reg_base), 0) +#define ASM_STORE16_REG_REG(as, reg_src, reg_base) asm_x86_mov_r16_to_mem16((as), (reg_src), (reg_base), 0) +#define ASM_STORE32_REG_REG(as, reg_src, reg_base) asm_x86_mov_r32_to_mem32((as), (reg_src), (reg_base), 0) + +#endif // GENERIC_ASM_API + #endif // __MICROPY_INCLUDED_PY_ASMX86_H__ diff --git a/py/asmxtensa.c b/py/asmxtensa.c new file mode 100644 index 0000000000..00448dfc59 --- /dev/null +++ b/py/asmxtensa.c @@ -0,0 +1,174 @@ +/* + * This file is part of the MicroPython project, http://micropython.org/ + * + * The MIT License (MIT) + * + * Copyright (c) 2016 Damien P. George + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include <stdio.h> +#include <assert.h> + +#include "py/mpconfig.h" + +// wrapper around everything in this file +#if MICROPY_EMIT_XTENSA || MICROPY_EMIT_INLINE_XTENSA + +#include "py/asmxtensa.h" + +#define WORD_SIZE (4) +#define SIGNED_FIT8(x) ((((x) & 0xffffff80) == 0) || (((x) & 0xffffff80) == 0xffffff80)) +#define SIGNED_FIT12(x) ((((x) & 0xfffff800) == 0) || (((x) & 0xfffff800) == 0xfffff800)) + +void asm_xtensa_end_pass(asm_xtensa_t *as) { + as->num_const = as->cur_const; + as->cur_const = 0; + + #if 0 + // make a hex dump of the machine code + if (as->base.pass == MP_ASM_PASS_EMIT) { + uint8_t *d = as->base.code_base; + printf("XTENSA ASM:"); + for (int i = 0; i < ((as->base.code_size + 15) & ~15); ++i) { + if (i % 16 == 0) { + printf("\n%08x:", (uint32_t)&d[i]); + } + if (i % 2 == 0) { + printf(" "); + } + printf("%02x", d[i]); + } + printf("\n"); + } + #endif +} + +void asm_xtensa_entry(asm_xtensa_t *as, int num_locals) { + // jump over the constants + asm_xtensa_op_j(as, as->num_const * WORD_SIZE + 4 - 4); + mp_asm_base_get_cur_to_write_bytes(&as->base, 1); // padding/alignment byte + as->const_table = (uint32_t*)mp_asm_base_get_cur_to_write_bytes(&as->base, as->num_const * 4); + + // adjust the stack-pointer to store a0, a12, a13, a14 and locals, 16-byte aligned + as->stack_adjust = (((4 + num_locals) * WORD_SIZE) + 15) & ~15; + asm_xtensa_op_addi(as, ASM_XTENSA_REG_A1, ASM_XTENSA_REG_A1, -as->stack_adjust); + + // save return value (a0) and callee-save registers (a12, a13, a14) + asm_xtensa_op_s32i_n(as, ASM_XTENSA_REG_A0, ASM_XTENSA_REG_A1, 0); + asm_xtensa_op_s32i_n(as, ASM_XTENSA_REG_A12, ASM_XTENSA_REG_A1, 1); + asm_xtensa_op_s32i_n(as, ASM_XTENSA_REG_A13, ASM_XTENSA_REG_A1, 2); + asm_xtensa_op_s32i_n(as, ASM_XTENSA_REG_A14, ASM_XTENSA_REG_A1, 3); +} + +void asm_xtensa_exit(asm_xtensa_t *as) { + // restore registers + asm_xtensa_op_l32i_n(as, ASM_XTENSA_REG_A14, ASM_XTENSA_REG_A1, 3); + asm_xtensa_op_l32i_n(as, ASM_XTENSA_REG_A13, ASM_XTENSA_REG_A1, 2); + asm_xtensa_op_l32i_n(as, ASM_XTENSA_REG_A12, ASM_XTENSA_REG_A1, 1); + asm_xtensa_op_l32i_n(as, ASM_XTENSA_REG_A0, ASM_XTENSA_REG_A1, 0); + + // restore stack-pointer and return + asm_xtensa_op_addi(as, ASM_XTENSA_REG_A1, ASM_XTENSA_REG_A1, as->stack_adjust); + asm_xtensa_op_ret_n(as); +} + +STATIC uint32_t get_label_dest(asm_xtensa_t *as, uint label) { + assert(label < as->base.max_num_labels); + return as->base.label_offsets[label]; +} + +void asm_xtensa_op16(asm_xtensa_t *as, uint16_t op) { + uint8_t *c = mp_asm_base_get_cur_to_write_bytes(&as->base, 2); + if (c != NULL) { + c[0] = op; + c[1] = op >> 8; + } +} + +void asm_xtensa_op24(asm_xtensa_t *as, uint32_t op) { + uint8_t *c = mp_asm_base_get_cur_to_write_bytes(&as->base, 3); + if (c != NULL) { + c[0] = op; + c[1] = op >> 8; + c[2] = op >> 16; + } +} + +void asm_xtensa_j_label(asm_xtensa_t *as, uint label) { + uint32_t dest = get_label_dest(as, label); + int32_t rel = dest - as->base.code_offset - 4; + // we assume rel, as a signed int, fits in 18-bits + asm_xtensa_op_j(as, rel); +} + +void asm_xtensa_bccz_reg_label(asm_xtensa_t *as, uint cond, uint reg, uint label) { + uint32_t dest = get_label_dest(as, label); + int32_t rel = dest - as->base.code_offset - 4; + if (as->base.pass == MP_ASM_PASS_EMIT && !SIGNED_FIT12(rel)) { + printf("ERROR: xtensa bccz out of range\n"); + } + asm_xtensa_op_bccz(as, cond, reg, rel); +} + +void asm_xtensa_bcc_reg_reg_label(asm_xtensa_t *as, uint cond, uint reg1, uint reg2, uint label) { + uint32_t dest = get_label_dest(as, label); + int32_t rel = dest - as->base.code_offset - 4; + if (as->base.pass == MP_ASM_PASS_EMIT && !SIGNED_FIT8(rel)) { + printf("ERROR: xtensa bcc out of range\n"); + } + asm_xtensa_op_bcc(as, cond, reg1, reg2, rel); +} + +// convenience function; reg_dest must be different from reg_src[12] +void asm_xtensa_setcc_reg_reg_reg(asm_xtensa_t *as, uint cond, uint reg_dest, uint reg_src1, uint reg_src2) { + asm_xtensa_op_movi_n(as, reg_dest, 1); + asm_xtensa_op_bcc(as, cond, reg_src1, reg_src2, 1); + asm_xtensa_op_movi_n(as, reg_dest, 0); +} + +void asm_xtensa_mov_reg_i32(asm_xtensa_t *as, uint reg_dest, uint32_t i32) { + if (SIGNED_FIT12(i32)) { + asm_xtensa_op_movi(as, reg_dest, i32); + } else { + // load the constant + asm_xtensa_op_l32r(as, reg_dest, as->base.code_offset, 4 + as->cur_const * WORD_SIZE); + // store the constant in the table + if (as->const_table != NULL) { + as->const_table[as->cur_const] = i32; + } + ++as->cur_const; + } +} + +void asm_xtensa_mov_local_reg(asm_xtensa_t *as, int local_num, uint reg_src) { + asm_xtensa_op_s32i(as, reg_src, ASM_XTENSA_REG_A1, 4 + local_num); +} + +void asm_xtensa_mov_reg_local(asm_xtensa_t *as, uint reg_dest, int local_num) { + asm_xtensa_op_l32i(as, reg_dest, ASM_XTENSA_REG_A1, 4 + local_num); +} + +void asm_xtensa_mov_reg_local_addr(asm_xtensa_t *as, uint reg_dest, int local_num) { + asm_xtensa_op_mov_n(as, reg_dest, ASM_XTENSA_REG_A1); + asm_xtensa_op_addi(as, reg_dest, reg_dest, (4 + local_num) * WORD_SIZE); +} + +#endif // MICROPY_EMIT_XTENSA || MICROPY_EMIT_INLINE_XTENSA diff --git a/py/asmxtensa.h b/py/asmxtensa.h new file mode 100644 index 0000000000..12083252eb --- /dev/null +++ b/py/asmxtensa.h @@ -0,0 +1,324 @@ +/* + * This file is part of the MicroPython project, http://micropython.org/ + * + * The MIT License (MIT) + * + * Copyright (c) 2016 Damien P. George + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ +#ifndef MICROPY_INCLUDED_PY_ASMXTENSA_H +#define MICROPY_INCLUDED_PY_ASMXTENSA_H + +#include "py/asmbase.h" + +// calling conventions: +// up to 6 args in a2-a7 +// return value in a2 +// PC stored in a0 +// stack pointer is a1, stack full descending, is aligned to 16 bytes +// callee save: a1, a12, a13, a14, a15 +// caller save: a3 + +#define ASM_XTENSA_REG_A0 (0) +#define ASM_XTENSA_REG_A1 (1) +#define ASM_XTENSA_REG_A2 (2) +#define ASM_XTENSA_REG_A3 (3) +#define ASM_XTENSA_REG_A4 (4) +#define ASM_XTENSA_REG_A5 (5) +#define ASM_XTENSA_REG_A6 (6) +#define ASM_XTENSA_REG_A7 (7) +#define ASM_XTENSA_REG_A8 (8) +#define ASM_XTENSA_REG_A9 (9) +#define ASM_XTENSA_REG_A10 (10) +#define ASM_XTENSA_REG_A11 (11) +#define ASM_XTENSA_REG_A12 (12) +#define ASM_XTENSA_REG_A13 (13) +#define ASM_XTENSA_REG_A14 (14) +#define ASM_XTENSA_REG_A15 (15) + +// for bccz +#define ASM_XTENSA_CCZ_EQ (0) +#define ASM_XTENSA_CCZ_NE (1) + +// for bcc and setcc +#define ASM_XTENSA_CC_NONE (0) +#define ASM_XTENSA_CC_EQ (1) +#define ASM_XTENSA_CC_LT (2) +#define ASM_XTENSA_CC_LTU (3) +#define ASM_XTENSA_CC_ALL (4) +#define ASM_XTENSA_CC_BC (5) +#define ASM_XTENSA_CC_ANY (8) +#define ASM_XTENSA_CC_NE (9) +#define ASM_XTENSA_CC_GE (10) +#define ASM_XTENSA_CC_GEU (11) +#define ASM_XTENSA_CC_NALL (12) +#define ASM_XTENSA_CC_BS (13) + +// macros for encoding instructions (little endian versions) +#define ASM_XTENSA_ENCODE_RRR(op0, op1, op2, r, s, t) \ + (((op2) << 20) | ((op1) << 16) | ((r) << 12) | ((s) << 8) | ((t) << 4) | (op0)) +#define ASM_XTENSA_ENCODE_RRI4(op0, op1, r, s, t, imm4) \ + (((imm4) << 20) | ((op1) << 16) | ((r) << 12) | ((s) << 8) | ((t) << 4) | (op0)) +#define ASM_XTENSA_ENCODE_RRI8(op0, r, s, t, imm8) \ + (((imm8) << 16) | ((r) << 12) | ((s) << 8) | ((t) << 4) | (op0)) +#define ASM_XTENSA_ENCODE_RI16(op0, t, imm16) \ + (((imm16) << 8) | ((t) << 4) | (op0)) +#define ASM_XTENSA_ENCODE_RSR(op0, op1, op2, rs, t) \ + (((op2) << 20) | ((op1) << 16) | ((rs) << 8) | ((t) << 4) | (op0)) +#define ASM_XTENSA_ENCODE_CALL(op0, n, offset) \ + (((offset) << 6) | ((n) << 4) | (op0)) +#define ASM_XTENSA_ENCODE_CALLX(op0, op1, op2, r, s, m, n) \ + (((op2) << 20) | ((op1) << 16) | ((r) << 12) | ((s) << 8) | ((m) << 6) | ((n) << 4) | (op0)) +#define ASM_XTENSA_ENCODE_BRI8(op0, r, s, m, n, imm8) \ + (((imm8) << 16) | ((r) << 12) | ((s) << 8) | ((m) << 6) | ((n) << 4) | (op0)) +#define ASM_XTENSA_ENCODE_BRI12(op0, s, m, n, imm12) \ + (((imm12) << 12) | ((s) << 8) | ((m) << 6) | ((n) << 4) | (op0)) +#define ASM_XTENSA_ENCODE_RRRN(op0, r, s, t) \ + (((r) << 12) | ((s) << 8) | ((t) << 4) | (op0)) +#define ASM_XTENSA_ENCODE_RI7(op0, s, imm7) \ + ((((imm7) & 0xf) << 12) | ((s) << 8) | ((imm7) & 0x70) | (op0)) + +typedef struct _asm_xtensa_t { + mp_asm_base_t base; + uint32_t cur_const; + uint32_t num_const; + uint32_t *const_table; + uint32_t stack_adjust; +} asm_xtensa_t; + +void asm_xtensa_end_pass(asm_xtensa_t *as); + +void asm_xtensa_entry(asm_xtensa_t *as, int num_locals); +void asm_xtensa_exit(asm_xtensa_t *as); + +void asm_xtensa_op16(asm_xtensa_t *as, uint16_t op); +void asm_xtensa_op24(asm_xtensa_t *as, uint32_t op); + +// raw instructions + +static inline void asm_xtensa_op_add(asm_xtensa_t *as, uint reg_dest, uint reg_src_a, uint reg_src_b) { + asm_xtensa_op24(as, ASM_XTENSA_ENCODE_RRR(0, 0, 8, reg_dest, reg_src_a, reg_src_b)); +} + +static inline void asm_xtensa_op_addi(asm_xtensa_t *as, uint reg_dest, uint reg_src, int imm8) { + asm_xtensa_op24(as, ASM_XTENSA_ENCODE_RRI8(2, 12, reg_dest, reg_src, imm8 & 0xff)); +} + +static inline void asm_xtensa_op_and(asm_xtensa_t *as, uint reg_dest, uint reg_src_a, uint reg_src_b) { + asm_xtensa_op24(as, ASM_XTENSA_ENCODE_RRR(0, 0, 1, reg_dest, reg_src_a, reg_src_b)); +} + +static inline void asm_xtensa_op_bcc(asm_xtensa_t *as, uint cond, uint reg_src1, uint reg_src2, int32_t rel8) { + asm_xtensa_op24(as, ASM_XTENSA_ENCODE_RRI8(7, cond, reg_src1, reg_src2, rel8 & 0xff)); +} + +static inline void asm_xtensa_op_bccz(asm_xtensa_t *as, uint cond, uint reg_src, int32_t rel12) { + asm_xtensa_op24(as, ASM_XTENSA_ENCODE_BRI12(6, reg_src, cond, 1, rel12 & 0xfff)); +} + +static inline void asm_xtensa_op_callx0(asm_xtensa_t *as, uint reg) { + asm_xtensa_op24(as, ASM_XTENSA_ENCODE_CALLX(0, 0, 0, 0, reg, 3, 0)); +} + +static inline void asm_xtensa_op_j(asm_xtensa_t *as, int32_t rel18) { + asm_xtensa_op24(as, ASM_XTENSA_ENCODE_CALL(6, 0, rel18 & 0x3ffff)); +} + +static inline void asm_xtensa_op_jx(asm_xtensa_t *as, uint reg) { + asm_xtensa_op24(as, ASM_XTENSA_ENCODE_CALLX(0, 0, 0, 0, reg, 2, 2)); +} + +static inline void asm_xtensa_op_l8ui(asm_xtensa_t *as, uint reg_dest, uint reg_base, uint byte_offset) { + asm_xtensa_op24(as, ASM_XTENSA_ENCODE_RRI8(2, 0, reg_base, reg_dest, byte_offset & 0xff)); +} + +static inline void asm_xtensa_op_l16ui(asm_xtensa_t *as, uint reg_dest, uint reg_base, uint half_word_offset) { + asm_xtensa_op24(as, ASM_XTENSA_ENCODE_RRI8(2, 1, reg_base, reg_dest, half_word_offset & 0xff)); +} + +static inline void asm_xtensa_op_l32i(asm_xtensa_t *as, uint reg_dest, uint reg_base, uint word_offset) { + asm_xtensa_op24(as, ASM_XTENSA_ENCODE_RRI8(2, 2, reg_base, reg_dest, word_offset & 0xff)); +} + +static inline void asm_xtensa_op_l32i_n(asm_xtensa_t *as, uint reg_dest, uint reg_base, uint word_offset) { + asm_xtensa_op16(as, ASM_XTENSA_ENCODE_RRRN(8, word_offset & 0xf, reg_base, reg_dest)); +} + +static inline void asm_xtensa_op_l32r(asm_xtensa_t *as, uint reg_dest, uint32_t op_off, uint32_t dest_off) { + asm_xtensa_op24(as, ASM_XTENSA_ENCODE_RI16(1, reg_dest, ((dest_off - ((op_off + 3) & ~3)) >> 2) & 0xffff)); +} + +static inline void asm_xtensa_op_mov_n(asm_xtensa_t *as, uint reg_dest, uint reg_src) { + asm_xtensa_op16(as, ASM_XTENSA_ENCODE_RRRN(13, 0, reg_src, reg_dest)); +} + +static inline void asm_xtensa_op_movi(asm_xtensa_t *as, uint reg_dest, int32_t imm12) { + asm_xtensa_op24(as, ASM_XTENSA_ENCODE_RRI8(2, 10, (imm12 >> 8) & 0xf, reg_dest, imm12 & 0xff)); +} + +static inline void asm_xtensa_op_movi_n(asm_xtensa_t *as, uint reg_dest, int imm4) { + asm_xtensa_op16(as, ASM_XTENSA_ENCODE_RI7(12, reg_dest, imm4)); +} + +static inline void asm_xtensa_op_mull(asm_xtensa_t *as, uint reg_dest, uint reg_src_a, uint reg_src_b) { + asm_xtensa_op24(as, ASM_XTENSA_ENCODE_RRR(0, 2, 8, reg_dest, reg_src_a, reg_src_b)); +} + +static inline void asm_xtensa_op_or(asm_xtensa_t *as, uint reg_dest, uint reg_src_a, uint reg_src_b) { + asm_xtensa_op24(as, ASM_XTENSA_ENCODE_RRR(0, 0, 2, reg_dest, reg_src_a, reg_src_b)); +} + +static inline void asm_xtensa_op_ret_n(asm_xtensa_t *as) { + asm_xtensa_op16(as, ASM_XTENSA_ENCODE_RRRN(13, 15, 0, 0)); +} + +static inline void asm_xtensa_op_s8i(asm_xtensa_t *as, uint reg_src, uint reg_base, uint byte_offset) { + asm_xtensa_op24(as, ASM_XTENSA_ENCODE_RRI8(2, 4, reg_base, reg_src, byte_offset & 0xff)); +} + +static inline void asm_xtensa_op_s16i(asm_xtensa_t *as, uint reg_src, uint reg_base, uint half_word_offset) { + asm_xtensa_op24(as, ASM_XTENSA_ENCODE_RRI8(2, 5, reg_base, reg_src, half_word_offset & 0xff)); +} + +static inline void asm_xtensa_op_s32i(asm_xtensa_t *as, uint reg_src, uint reg_base, uint word_offset) { + asm_xtensa_op24(as, ASM_XTENSA_ENCODE_RRI8(2, 6, reg_base, reg_src, word_offset & 0xff)); +} + +static inline void asm_xtensa_op_s32i_n(asm_xtensa_t *as, uint reg_src, uint reg_base, uint word_offset) { + asm_xtensa_op16(as, ASM_XTENSA_ENCODE_RRRN(9, word_offset & 0xf, reg_base, reg_src)); +} + +static inline void asm_xtensa_op_sll(asm_xtensa_t *as, uint reg_dest, uint reg_src) { + asm_xtensa_op24(as, ASM_XTENSA_ENCODE_RRR(0, 1, 10, reg_dest, reg_src, 0)); +} + +static inline void asm_xtensa_op_sra(asm_xtensa_t *as, uint reg_dest, uint reg_src) { + asm_xtensa_op24(as, ASM_XTENSA_ENCODE_RRR(0, 1, 11, reg_dest, 0, reg_src)); +} + +static inline void asm_xtensa_op_ssl(asm_xtensa_t *as, uint reg_src) { + asm_xtensa_op24(as, ASM_XTENSA_ENCODE_RRR(0, 0, 4, 1, reg_src, 0)); +} + +static inline void asm_xtensa_op_ssr(asm_xtensa_t *as, uint reg_src) { + asm_xtensa_op24(as, ASM_XTENSA_ENCODE_RRR(0, 0, 4, 0, reg_src, 0)); +} + +static inline void asm_xtensa_op_sub(asm_xtensa_t *as, uint reg_dest, uint reg_src_a, uint reg_src_b) { + asm_xtensa_op24(as, ASM_XTENSA_ENCODE_RRR(0, 0, 12, reg_dest, reg_src_a, reg_src_b)); +} + +static inline void asm_xtensa_op_xor(asm_xtensa_t *as, uint reg_dest, uint reg_src_a, uint reg_src_b) { + asm_xtensa_op24(as, ASM_XTENSA_ENCODE_RRR(0, 0, 3, reg_dest, reg_src_a, reg_src_b)); +} + +// convenience functions +void asm_xtensa_j_label(asm_xtensa_t *as, uint label); +void asm_xtensa_bccz_reg_label(asm_xtensa_t *as, uint cond, uint reg, uint label); +void asm_xtensa_bcc_reg_reg_label(asm_xtensa_t *as, uint cond, uint reg1, uint reg2, uint label); +void asm_xtensa_setcc_reg_reg_reg(asm_xtensa_t *as, uint cond, uint reg_dest, uint reg_src1, uint reg_src2); +void asm_xtensa_mov_reg_i32(asm_xtensa_t *as, uint reg_dest, uint32_t i32); +void asm_xtensa_mov_local_reg(asm_xtensa_t *as, int local_num, uint reg_src); +void asm_xtensa_mov_reg_local(asm_xtensa_t *as, uint reg_dest, int local_num); +void asm_xtensa_mov_reg_local_addr(asm_xtensa_t *as, uint reg_dest, int local_num); + +#if GENERIC_ASM_API + +// The following macros provide a (mostly) arch-independent API to +// generate native code, and are used by the native emitter. + +#define ASM_WORD_SIZE (4) + +#define REG_RET ASM_XTENSA_REG_A2 +#define REG_ARG_1 ASM_XTENSA_REG_A2 +#define REG_ARG_2 ASM_XTENSA_REG_A3 +#define REG_ARG_3 ASM_XTENSA_REG_A4 +#define REG_ARG_4 ASM_XTENSA_REG_A5 +#define REG_ARG_5 ASM_XTENSA_REG_A6 + +#define REG_TEMP0 ASM_XTENSA_REG_A2 +#define REG_TEMP1 ASM_XTENSA_REG_A3 +#define REG_TEMP2 ASM_XTENSA_REG_A4 + +#define REG_LOCAL_1 ASM_XTENSA_REG_A12 +#define REG_LOCAL_2 ASM_XTENSA_REG_A13 +#define REG_LOCAL_3 ASM_XTENSA_REG_A14 +#define REG_LOCAL_NUM (3) + +#define ASM_T asm_xtensa_t +#define ASM_END_PASS asm_xtensa_end_pass +#define ASM_ENTRY asm_xtensa_entry +#define ASM_EXIT asm_xtensa_exit + +#define ASM_JUMP asm_xtensa_j_label +#define ASM_JUMP_IF_REG_ZERO(as, reg, label) \ + asm_xtensa_bccz_reg_label(as, ASM_XTENSA_CCZ_EQ, reg, label) +#define ASM_JUMP_IF_REG_NONZERO(as, reg, label) \ + asm_xtensa_bccz_reg_label(as, ASM_XTENSA_CCZ_NE, reg, label) +#define ASM_JUMP_IF_REG_EQ(as, reg1, reg2, label) \ + asm_xtensa_bcc_reg_reg_label(as, ASM_XTENSA_CC_EQ, reg1, reg2, label) +#define ASM_CALL_IND(as, ptr, idx) \ + do { \ + asm_xtensa_mov_reg_i32(as, ASM_XTENSA_REG_A0, (uint32_t)ptr); \ + asm_xtensa_op_callx0(as, ASM_XTENSA_REG_A0); \ + } while (0) + +#define ASM_MOV_REG_TO_LOCAL(as, reg, local_num) asm_xtensa_mov_local_reg(as, (local_num), (reg)) +#define ASM_MOV_IMM_TO_REG(as, imm, reg) asm_xtensa_mov_reg_i32(as, (reg), (imm)) +#define ASM_MOV_ALIGNED_IMM_TO_REG(as, imm, reg) asm_xtensa_mov_reg_i32(as, (reg), (imm)) +#define ASM_MOV_IMM_TO_LOCAL_USING(as, imm, local_num, reg_temp) \ + do { \ + asm_xtensa_mov_reg_i32(as, (reg_temp), (imm)); \ + asm_xtensa_mov_local_reg(as, (local_num), (reg_temp)); \ + } while (0) +#define ASM_MOV_LOCAL_TO_REG(as, local_num, reg) asm_xtensa_mov_reg_local(as, (reg), (local_num)) +#define ASM_MOV_REG_REG(as, reg_dest, reg_src) asm_xtensa_op_mov_n((as), (reg_dest), (reg_src)) +#define ASM_MOV_LOCAL_ADDR_TO_REG(as, local_num, reg) asm_xtensa_mov_reg_local_addr(as, (reg), (local_num)) + +#define ASM_LSL_REG_REG(as, reg_dest, reg_shift) \ + do { \ + asm_xtensa_op_ssl((as), (reg_shift)); \ + asm_xtensa_op_sll((as), (reg_dest), (reg_dest)); \ + } while (0) +#define ASM_ASR_REG_REG(as, reg_dest, reg_shift) \ + do { \ + asm_xtensa_op_ssr((as), (reg_shift)); \ + asm_xtensa_op_sra((as), (reg_dest), (reg_dest)); \ + } while (0) +#define ASM_OR_REG_REG(as, reg_dest, reg_src) asm_xtensa_op_or((as), (reg_dest), (reg_dest), (reg_src)) +#define ASM_XOR_REG_REG(as, reg_dest, reg_src) asm_xtensa_op_xor((as), (reg_dest), (reg_dest), (reg_src)) +#define ASM_AND_REG_REG(as, reg_dest, reg_src) asm_xtensa_op_and((as), (reg_dest), (reg_dest), (reg_src)) +#define ASM_ADD_REG_REG(as, reg_dest, reg_src) asm_xtensa_op_add((as), (reg_dest), (reg_dest), (reg_src)) +#define ASM_SUB_REG_REG(as, reg_dest, reg_src) asm_xtensa_op_sub((as), (reg_dest), (reg_dest), (reg_src)) +#define ASM_MUL_REG_REG(as, reg_dest, reg_src) asm_xtensa_op_mull((as), (reg_dest), (reg_dest), (reg_src)) + +#define ASM_LOAD_REG_REG_OFFSET(as, reg_dest, reg_base, word_offset) asm_xtensa_op_l32i_n((as), (reg_dest), (reg_base), (word_offset)) +#define ASM_LOAD8_REG_REG(as, reg_dest, reg_base) asm_xtensa_op_l8ui((as), (reg_dest), (reg_base), 0) +#define ASM_LOAD16_REG_REG(as, reg_dest, reg_base) asm_xtensa_op_l16ui((as), (reg_dest), (reg_base), 0) +#define ASM_LOAD32_REG_REG(as, reg_dest, reg_base) asm_xtensa_op_l32i_n((as), (reg_dest), (reg_base), 0) + +#define ASM_STORE_REG_REG_OFFSET(as, reg_dest, reg_base, word_offset) asm_xtensa_op_s32i_n((as), (reg_dest), (reg_base), (word_offset)) +#define ASM_STORE8_REG_REG(as, reg_src, reg_base) asm_xtensa_op_s8i((as), (reg_src), (reg_base), 0) +#define ASM_STORE16_REG_REG(as, reg_src, reg_base) asm_xtensa_op_s16i((as), (reg_src), (reg_base), 0) +#define ASM_STORE32_REG_REG(as, reg_src, reg_base) asm_xtensa_op_s32i_n((as), (reg_src), (reg_base), 0) + +#endif // GENERIC_ASM_API + +#endif // MICROPY_INCLUDED_PY_ASMXTENSA_H diff --git a/py/binary.c b/py/binary.c index 699324bc6e..d22e0f342d 100644 --- a/py/binary.c +++ b/py/binary.c @@ -294,9 +294,10 @@ void mp_binary_set_val(char struct_type, char val_type, mp_obj_t val_in, byte ** #endif { val = mp_obj_get_int(val_in); - // sign extend if needed - if (BYTES_PER_WORD < 8 && size > sizeof(val) && is_signed(val_type) && (mp_int_t)val < 0) { - memset(p + sizeof(val), 0xff, size - sizeof(val)); + // zero/sign extend if needed + if (BYTES_PER_WORD < 8 && size > sizeof(val)) { + int c = (is_signed(val_type) && (mp_int_t)val < 0) ? 0xff : 0x00; + memset(p + sizeof(val), c, size - sizeof(val)); } } } diff --git a/py/builtin.h b/py/builtin.h index 4477fd2462..282eb1cc93 100644 --- a/py/builtin.h +++ b/py/builtin.h @@ -108,7 +108,9 @@ extern const mp_obj_module_t mp_module_uheapq; extern const mp_obj_module_t mp_module_uhashlib; extern const mp_obj_module_t mp_module_ubinascii; extern const mp_obj_module_t mp_module_urandom; +extern const mp_obj_module_t mp_module_uselect; extern const mp_obj_module_t mp_module_ussl; +extern const mp_obj_module_t mp_module_utimeq; extern const mp_obj_module_t mp_module_machine; extern const mp_obj_module_t mp_module_lwip; extern const mp_obj_module_t mp_module_websocket; diff --git a/py/builtinimport.c b/py/builtinimport.c index e72eaf4724..4024c5d59a 100644 --- a/py/builtinimport.c +++ b/py/builtinimport.c @@ -32,6 +32,7 @@ #include "py/nlr.h" #include "py/compile.h" #include "py/objmodule.h" +#include "py/persistentcode.h" #include "py/runtime.h" #include "py/builtin.h" #include "py/frozenmod.h" @@ -72,15 +73,8 @@ STATIC mp_import_stat_t mp_import_stat_any(const char *path) { return mp_import_stat(path); } -STATIC mp_import_stat_t stat_dir_or_file(vstr_t *path) { +STATIC mp_import_stat_t stat_file_py_or_mpy(vstr_t *path) { mp_import_stat_t stat = mp_import_stat_any(vstr_null_terminated_str(path)); - DEBUG_printf("stat %s: %d\n", vstr_str(path), stat); - if (stat == MP_IMPORT_STAT_DIR) { - return stat; - } - - vstr_add_str(path, ".py"); - stat = mp_import_stat_any(vstr_null_terminated_str(path)); if (stat == MP_IMPORT_STAT_FILE) { return stat; } @@ -96,6 +90,18 @@ STATIC mp_import_stat_t stat_dir_or_file(vstr_t *path) { return MP_IMPORT_STAT_NO_EXIST; } +STATIC mp_import_stat_t stat_dir_or_file(vstr_t *path) { + mp_import_stat_t stat = mp_import_stat_any(vstr_null_terminated_str(path)); + DEBUG_printf("stat %s: %d\n", vstr_str(path), stat); + if (stat == MP_IMPORT_STAT_DIR) { + return stat; + } + + // not a directory, add .py and try as a file + vstr_add_str(path, ".py"); + return stat_file_py_or_mpy(path); +} + STATIC mp_import_stat_t find_file(const char *file_str, uint file_len, vstr_t *dest) { #if MICROPY_PY_SYS // extract the list of paths @@ -460,15 +466,15 @@ mp_obj_t mp_builtin___import__(size_t n_args, const mp_obj_t *args) { // https://docs.python.org/3/reference/import.html // "Specifically, any module that contains a __path__ attribute is considered a package." mp_store_attr(module_obj, MP_QSTR___path__, mp_obj_new_str(vstr_str(&path), vstr_len(&path), false)); + size_t orig_path_len = path.len; vstr_add_char(&path, PATH_SEP_CHAR); vstr_add_str(&path, "__init__.py"); - if (mp_import_stat_any(vstr_null_terminated_str(&path)) != MP_IMPORT_STAT_FILE) { - vstr_cut_tail_bytes(&path, sizeof("/__init__.py") - 1); // cut off /__init__.py + if (stat_file_py_or_mpy(&path) != MP_IMPORT_STAT_FILE) { //mp_warning("%s is imported as namespace package", vstr_str(&path)); } else { do_load(module_obj, &path); - vstr_cut_tail_bytes(&path, sizeof("/__init__.py") - 1); // cut off /__init__.py } + path.len = orig_path_len; } else { // MP_IMPORT_STAT_FILE do_load(module_obj, &path); // TODO: We cannot just break here, at the very least, we must execute diff --git a/py/compile.c b/py/compile.c index f84d5e2145..b84793d10a 100644 --- a/py/compile.c +++ b/py/compile.c @@ -34,6 +34,7 @@ #include "py/emit.h" #include "py/compile.h" #include "py/runtime.h" +#include "py/asmbase.h" #if MICROPY_ENABLE_COMPILER @@ -69,6 +70,36 @@ typedef enum { #endif +#if MICROPY_EMIT_NATIVE +// define a macro to access external native emitter +#if MICROPY_EMIT_X64 +#define NATIVE_EMITTER(f) emit_native_x64_##f +#elif MICROPY_EMIT_X86 +#define NATIVE_EMITTER(f) emit_native_x86_##f +#elif MICROPY_EMIT_THUMB +#define NATIVE_EMITTER(f) emit_native_thumb_##f +#elif MICROPY_EMIT_ARM +#define NATIVE_EMITTER(f) emit_native_arm_##f +#elif MICROPY_EMIT_XTENSA +#define NATIVE_EMITTER(f) emit_native_xtensa_##f +#else +#error "unknown native emitter" +#endif +#endif + +#if MICROPY_EMIT_INLINE_ASM +// define macros for inline assembler +#if MICROPY_EMIT_INLINE_THUMB +#define ASM_DECORATOR_QSTR MP_QSTR_asm_thumb +#define ASM_EMITTER(f) emit_inline_thumb_##f +#elif MICROPY_EMIT_INLINE_XTENSA +#define ASM_DECORATOR_QSTR MP_QSTR_asm_xtensa +#define ASM_EMITTER(f) emit_inline_xtensa_##f +#else +#error "unknown asm emitter" +#endif +#endif + #define EMIT_INLINE_ASM(fun) (comp->emit_inline_asm_method_table->fun(comp->emit_inline_asm)) #define EMIT_INLINE_ASM_ARG(fun, ...) (comp->emit_inline_asm_method_table->fun(comp->emit_inline_asm, __VA_ARGS__)) @@ -103,7 +134,7 @@ typedef struct _compiler_t { const emit_method_table_t *emit_method_table; // current emit method table #endif - #if MICROPY_EMIT_INLINE_THUMB + #if MICROPY_EMIT_INLINE_ASM emit_inline_asm_t *emit_inline_asm; // current emitter for inline asm const emit_inline_asm_method_table_t *emit_inline_asm_method_table; // current emit method table for inline asm #endif @@ -243,23 +274,13 @@ STATIC void compile_generic_tuple(compiler_t *comp, mp_parse_node_struct_t *pns) c_tuple(comp, MP_PARSE_NODE_NULL, pns); } -STATIC bool node_is_const_false(mp_parse_node_t pn) { - return MP_PARSE_NODE_IS_TOKEN_KIND(pn, MP_TOKEN_KW_FALSE) - || (MP_PARSE_NODE_IS_SMALL_INT(pn) && MP_PARSE_NODE_LEAF_SMALL_INT(pn) == 0); -} - -STATIC bool node_is_const_true(mp_parse_node_t pn) { - return MP_PARSE_NODE_IS_TOKEN_KIND(pn, MP_TOKEN_KW_TRUE) - || (MP_PARSE_NODE_IS_SMALL_INT(pn) && MP_PARSE_NODE_LEAF_SMALL_INT(pn) != 0); -} - STATIC void c_if_cond(compiler_t *comp, mp_parse_node_t pn, bool jump_if, int label) { - if (node_is_const_false(pn)) { + if (mp_parse_node_is_const_false(pn)) { if (jump_if == false) { EMIT_ARG(jump, label); } return; - } else if (node_is_const_true(pn)) { + } else if (mp_parse_node_is_const_true(pn)) { if (jump_if == true) { EMIT_ARG(jump, label); } @@ -760,10 +781,10 @@ STATIC bool compile_built_in_decorator(compiler_t *comp, int name_len, mp_parse_ } else if (attr == MP_QSTR_viper) { *emit_options = MP_EMIT_OPT_VIPER; #endif -#if MICROPY_EMIT_INLINE_THUMB - } else if (attr == MP_QSTR_asm_thumb) { - *emit_options = MP_EMIT_OPT_ASM_THUMB; -#endif + #if MICROPY_EMIT_INLINE_ASM + } else if (attr == ASM_DECORATOR_QSTR) { + *emit_options = MP_EMIT_OPT_ASM; + #endif } else { compile_syntax_error(comp, name_nodes[1], "invalid micropython decorator"); } @@ -1213,19 +1234,17 @@ STATIC void compile_assert_stmt(compiler_t *comp, mp_parse_node_struct_t *pns) { } STATIC void compile_if_stmt(compiler_t *comp, mp_parse_node_struct_t *pns) { - // TODO proper and/or short circuiting - uint l_end = comp_next_label(comp); // optimisation: don't emit anything when "if False" - if (!node_is_const_false(pns->nodes[0])) { + if (!mp_parse_node_is_const_false(pns->nodes[0])) { uint l_fail = comp_next_label(comp); c_if_cond(comp, pns->nodes[0], false, l_fail); // if condition compile_node(comp, pns->nodes[1]); // if block // optimisation: skip everything else when "if True" - if (node_is_const_true(pns->nodes[0])) { + if (mp_parse_node_is_const_true(pns->nodes[0])) { goto done; } @@ -1250,14 +1269,14 @@ STATIC void compile_if_stmt(compiler_t *comp, mp_parse_node_struct_t *pns) { mp_parse_node_struct_t *pns_elif = (mp_parse_node_struct_t*)pn_elif[i]; // optimisation: don't emit anything when "if False" - if (!node_is_const_false(pns_elif->nodes[0])) { + if (!mp_parse_node_is_const_false(pns_elif->nodes[0])) { uint l_fail = comp_next_label(comp); c_if_cond(comp, pns_elif->nodes[0], false, l_fail); // elif condition compile_node(comp, pns_elif->nodes[1]); // elif block // optimisation: skip everything else when "elif True" - if (node_is_const_true(pns_elif->nodes[0])) { + if (mp_parse_node_is_const_true(pns_elif->nodes[0])) { goto done; } @@ -1294,9 +1313,9 @@ done: STATIC void compile_while_stmt(compiler_t *comp, mp_parse_node_struct_t *pns) { START_BREAK_CONTINUE_BLOCK - if (!node_is_const_false(pns->nodes[0])) { // optimisation: don't emit anything for "while False" + if (!mp_parse_node_is_const_false(pns->nodes[0])) { // optimisation: don't emit anything for "while False" uint top_label = comp_next_label(comp); - if (!node_is_const_true(pns->nodes[0])) { // optimisation: don't jump to cond for "while True" + if (!mp_parse_node_is_const_true(pns->nodes[0])) { // optimisation: don't jump to cond for "while True" EMIT_ARG(jump, continue_label); } EMIT_ARG(label_assign, top_label); @@ -1413,13 +1432,13 @@ STATIC void compile_for_stmt(compiler_t *comp, mp_parse_node_struct_t *pns) { if (1 <= n_args && n_args <= 3) { optimize = true; if (n_args == 1) { - pn_range_start = mp_parse_node_new_leaf(MP_PARSE_NODE_SMALL_INT, 0); + pn_range_start = mp_parse_node_new_small_int(0); pn_range_end = args[0]; - pn_range_step = mp_parse_node_new_leaf(MP_PARSE_NODE_SMALL_INT, 1); + pn_range_step = mp_parse_node_new_small_int(1); } else if (n_args == 2) { pn_range_start = args[0]; pn_range_end = args[1]; - pn_range_step = mp_parse_node_new_leaf(MP_PARSE_NODE_SMALL_INT, 1); + pn_range_step = mp_parse_node_new_small_int(1); } else { pn_range_start = args[0]; pn_range_end = args[1]; @@ -3095,7 +3114,7 @@ STATIC void compile_scope(compiler_t *comp, scope_t *scope, pass_kind_t pass) { assert(comp->cur_except_level == 0); } -#if MICROPY_EMIT_INLINE_THUMB +#if MICROPY_EMIT_INLINE_ASM // requires 3 passes: SCOPE, CODE_SIZE, EMIT STATIC void compile_scope_inline_asm(compiler_t *comp, scope_t *scope, pass_kind_t pass) { comp->pass = pass; @@ -3108,7 +3127,7 @@ STATIC void compile_scope_inline_asm(compiler_t *comp, scope_t *scope, pass_kind } if (comp->pass > MP_PASS_SCOPE) { - EMIT_INLINE_ASM_ARG(start_pass, comp->pass, comp->scope_cur, &comp->compile_error); + EMIT_INLINE_ASM_ARG(start_pass, comp->pass, &comp->compile_error); } // get the function definition parse node @@ -3206,7 +3225,8 @@ STATIC void compile_scope_inline_asm(compiler_t *comp, scope_t *scope, pass_kind return; } if (pass > MP_PASS_SCOPE) { - EMIT_INLINE_ASM_ARG(align, MP_PARSE_NODE_LEAF_SMALL_INT(pn_arg[0])); + mp_asm_base_align((mp_asm_base_t*)comp->emit_inline_asm, + MP_PARSE_NODE_LEAF_SMALL_INT(pn_arg[0])); } } else if (op == MP_QSTR_data) { if (!(n_args >= 2 && MP_PARSE_NODE_IS_SMALL_INT(pn_arg[0]))) { @@ -3220,7 +3240,8 @@ STATIC void compile_scope_inline_asm(compiler_t *comp, scope_t *scope, pass_kind compile_syntax_error(comp, nodes[i], "'data' requires integer arguments"); return; } - EMIT_INLINE_ASM_ARG(data, bytesize, MP_PARSE_NODE_LEAF_SMALL_INT(pn_arg[j])); + mp_asm_base_data((mp_asm_base_t*)comp->emit_inline_asm, + bytesize, MP_PARSE_NODE_LEAF_SMALL_INT(pn_arg[j])); } } } else { @@ -3237,6 +3258,13 @@ STATIC void compile_scope_inline_asm(compiler_t *comp, scope_t *scope, pass_kind if (comp->pass > MP_PASS_SCOPE) { EMIT_INLINE_ASM_ARG(end_pass, type_sig); + + if (comp->pass == MP_PASS_EMIT) { + void *f = mp_asm_base_get_code((mp_asm_base_t*)comp->emit_inline_asm); + mp_emit_glue_assign_native(comp->scope_cur->raw_code, MP_CODE_NATIVE_ASM, + f, mp_asm_base_get_code_size((mp_asm_base_t*)comp->emit_inline_asm), + NULL, comp->scope_cur->num_pos_args, 0, type_sig); + } } if (comp->compile_error != MP_OBJ_NULL) { @@ -3352,10 +3380,10 @@ mp_raw_code_t *mp_compile_to_raw_code(mp_parse_tree_t *parse_tree, qstr source_f uint max_num_labels = 0; for (scope_t *s = comp->scope_head; s != NULL && comp->compile_error == MP_OBJ_NULL; s = s->next) { if (false) { -#if MICROPY_EMIT_INLINE_THUMB - } else if (s->emit_options == MP_EMIT_OPT_ASM_THUMB) { + #if MICROPY_EMIT_INLINE_ASM + } else if (s->emit_options == MP_EMIT_OPT_ASM) { compile_scope_inline_asm(comp, s, MP_PASS_SCOPE); -#endif + #endif } else { compile_scope(comp, s, MP_PASS_SCOPE); } @@ -3378,27 +3406,29 @@ mp_raw_code_t *mp_compile_to_raw_code(mp_parse_tree_t *parse_tree, qstr source_f #if MICROPY_EMIT_NATIVE emit_t *emit_native = NULL; #endif -#if MICROPY_EMIT_INLINE_THUMB - emit_inline_asm_t *emit_inline_thumb = NULL; -#endif for (scope_t *s = comp->scope_head; s != NULL && comp->compile_error == MP_OBJ_NULL; s = s->next) { if (false) { // dummy -#if MICROPY_EMIT_INLINE_THUMB - } else if (s->emit_options == MP_EMIT_OPT_ASM_THUMB) { - // inline assembly for thumb - if (emit_inline_thumb == NULL) { - emit_inline_thumb = emit_inline_thumb_new(max_num_labels); + #if MICROPY_EMIT_INLINE_ASM + } else if (s->emit_options == MP_EMIT_OPT_ASM) { + // inline assembly + if (comp->emit_inline_asm == NULL) { + comp->emit_inline_asm = ASM_EMITTER(new)(max_num_labels); } comp->emit = NULL; - comp->emit_inline_asm = emit_inline_thumb; - comp->emit_inline_asm_method_table = &emit_inline_thumb_method_table; + comp->emit_inline_asm_method_table = &ASM_EMITTER(method_table); compile_scope_inline_asm(comp, s, MP_PASS_CODE_SIZE); + #if MICROPY_EMIT_INLINE_XTENSA + // Xtensa requires an extra pass to compute size of l32r const table + // TODO this can be improved by calculating it during SCOPE pass + // but that requires some other structural changes to the asm emitters + compile_scope_inline_asm(comp, s, MP_PASS_CODE_SIZE); + #endif if (comp->compile_error == MP_OBJ_NULL) { compile_scope_inline_asm(comp, s, MP_PASS_EMIT); } -#endif + #endif } else { @@ -3409,27 +3439,10 @@ mp_raw_code_t *mp_compile_to_raw_code(mp_parse_tree_t *parse_tree, qstr source_f #if MICROPY_EMIT_NATIVE case MP_EMIT_OPT_NATIVE_PYTHON: case MP_EMIT_OPT_VIPER: -#if MICROPY_EMIT_X64 - if (emit_native == NULL) { - emit_native = emit_native_x64_new(&comp->compile_error, max_num_labels); - } - comp->emit_method_table = &emit_native_x64_method_table; -#elif MICROPY_EMIT_X86 - if (emit_native == NULL) { - emit_native = emit_native_x86_new(&comp->compile_error, max_num_labels); - } - comp->emit_method_table = &emit_native_x86_method_table; -#elif MICROPY_EMIT_THUMB - if (emit_native == NULL) { - emit_native = emit_native_thumb_new(&comp->compile_error, max_num_labels); - } - comp->emit_method_table = &emit_native_thumb_method_table; -#elif MICROPY_EMIT_ARM if (emit_native == NULL) { - emit_native = emit_native_arm_new(&comp->compile_error, max_num_labels); + emit_native = NATIVE_EMITTER(new)(&comp->compile_error, max_num_labels); } - comp->emit_method_table = &emit_native_arm_method_table; -#endif + comp->emit_method_table = &NATIVE_EMITTER(method_table); comp->emit = emit_native; EMIT_ARG(set_native_type, MP_EMIT_NATIVE_TYPE_ENABLE, s->emit_options == MP_EMIT_OPT_VIPER, 0); break; @@ -3472,22 +3485,14 @@ mp_raw_code_t *mp_compile_to_raw_code(mp_parse_tree_t *parse_tree, qstr source_f emit_bc_free(emit_bc); #if MICROPY_EMIT_NATIVE if (emit_native != NULL) { -#if MICROPY_EMIT_X64 - emit_native_x64_free(emit_native); -#elif MICROPY_EMIT_X86 - emit_native_x86_free(emit_native); -#elif MICROPY_EMIT_THUMB - emit_native_thumb_free(emit_native); -#elif MICROPY_EMIT_ARM - emit_native_arm_free(emit_native); -#endif + NATIVE_EMITTER(free)(emit_native); } #endif -#if MICROPY_EMIT_INLINE_THUMB - if (emit_inline_thumb != NULL) { - emit_inline_thumb_free(emit_inline_thumb); + #if MICROPY_EMIT_INLINE_ASM + if (comp->emit_inline_asm != NULL) { + ASM_EMITTER(free)(comp->emit_inline_asm); } -#endif + #endif // free the parse tree mp_parse_tree_clear(parse_tree); diff --git a/py/compile.h b/py/compile.h index 3cca4cb30b..45a98588d8 100644 --- a/py/compile.h +++ b/py/compile.h @@ -36,7 +36,7 @@ enum { MP_EMIT_OPT_BYTECODE, MP_EMIT_OPT_NATIVE_PYTHON, MP_EMIT_OPT_VIPER, - MP_EMIT_OPT_ASM_THUMB, + MP_EMIT_OPT_ASM, }; // the compiler will raise an exception if an error occurred @@ -149,33 +149,32 @@ void mp_emit_common_get_id_for_load(scope_t *scope, qstr qst); void mp_emit_common_get_id_for_modification(scope_t *scope, qstr qst); void mp_emit_common_id_op(emit_t *emit, const mp_emit_method_table_id_ops_t *emit_method_table, scope_t *scope, qstr qst); -extern const emit_method_table_t emit_cpython_method_table; extern const emit_method_table_t emit_bc_method_table; extern const emit_method_table_t emit_native_x64_method_table; extern const emit_method_table_t emit_native_x86_method_table; extern const emit_method_table_t emit_native_thumb_method_table; extern const emit_method_table_t emit_native_arm_method_table; +extern const emit_method_table_t emit_native_xtensa_method_table; extern const mp_emit_method_table_id_ops_t mp_emit_bc_method_table_load_id_ops; extern const mp_emit_method_table_id_ops_t mp_emit_bc_method_table_store_id_ops; extern const mp_emit_method_table_id_ops_t mp_emit_bc_method_table_delete_id_ops; -emit_t *emit_cpython_new(void); emit_t *emit_bc_new(void); emit_t *emit_native_x64_new(mp_obj_t *error_slot, mp_uint_t max_num_labels); emit_t *emit_native_x86_new(mp_obj_t *error_slot, mp_uint_t max_num_labels); emit_t *emit_native_thumb_new(mp_obj_t *error_slot, mp_uint_t max_num_labels); emit_t *emit_native_arm_new(mp_obj_t *error_slot, mp_uint_t max_num_labels); +emit_t *emit_native_xtensa_new(mp_obj_t *error_slot, mp_uint_t max_num_labels); -void emit_cpython_set_max_num_labels(emit_t* emit, mp_uint_t max_num_labels); void emit_bc_set_max_num_labels(emit_t* emit, mp_uint_t max_num_labels); -void emit_cpython_free(emit_t *emit); void emit_bc_free(emit_t *emit); void emit_native_x64_free(emit_t *emit); void emit_native_x86_free(emit_t *emit); void emit_native_thumb_free(emit_t *emit); void emit_native_arm_free(emit_t *emit); +void emit_native_xtensa_free(emit_t *emit); void mp_emit_bc_start_pass(emit_t *emit, pass_kind_t pass, scope_t *scope); void mp_emit_bc_end_pass(emit_t *emit); @@ -263,19 +262,21 @@ void mp_emit_bc_end_except_handler(emit_t *emit); typedef struct _emit_inline_asm_t emit_inline_asm_t; typedef struct _emit_inline_asm_method_table_t { - void (*start_pass)(emit_inline_asm_t *emit, pass_kind_t pass, scope_t *scope, mp_obj_t *error_slot); + void (*start_pass)(emit_inline_asm_t *emit, pass_kind_t pass, mp_obj_t *error_slot); void (*end_pass)(emit_inline_asm_t *emit, mp_uint_t type_sig); mp_uint_t (*count_params)(emit_inline_asm_t *emit, const byte *p, const byte *ptop); bool (*label)(emit_inline_asm_t *emit, mp_uint_t label_num, qstr label_id); - void (*align)(emit_inline_asm_t *emit, mp_uint_t align); - void (*data)(emit_inline_asm_t *emit, mp_uint_t bytesize, mp_uint_t val); void (*op)(emit_inline_asm_t *emit, qstr op, mp_uint_t n_args, const byte **pn_args); } emit_inline_asm_method_table_t; extern const emit_inline_asm_method_table_t emit_inline_thumb_method_table; +extern const emit_inline_asm_method_table_t emit_inline_xtensa_method_table; emit_inline_asm_t *emit_inline_thumb_new(mp_uint_t max_num_labels); +emit_inline_asm_t *emit_inline_xtensa_new(mp_uint_t max_num_labels); + void emit_inline_thumb_free(emit_inline_asm_t *emit); +void emit_inline_xtensa_free(emit_inline_asm_t *emit); #if MICROPY_WARNINGS void mp_emitter_warning(pass_kind_t pass, const char *msg); diff --git a/py/emitglue.c b/py/emitglue.c index e04eb32c91..ae63463828 100644 --- a/py/emitglue.c +++ b/py/emitglue.c @@ -83,7 +83,7 @@ void mp_emit_glue_assign_bytecode(mp_raw_code_t *rc, const byte *code, mp_uint_t #endif } -#if MICROPY_EMIT_NATIVE || MICROPY_EMIT_INLINE_THUMB +#if MICROPY_EMIT_NATIVE || MICROPY_EMIT_INLINE_ASM void mp_emit_glue_assign_native(mp_raw_code_t *rc, mp_raw_code_kind_t kind, void *fun_data, mp_uint_t fun_len, const mp_uint_t *const_table, mp_uint_t n_pos_args, mp_uint_t scope_flags, mp_uint_t type_sig) { assert(kind == MP_CODE_NATIVE_PY || kind == MP_CODE_NATIVE_VIPER || kind == MP_CODE_NATIVE_ASM); rc->kind = kind; @@ -127,10 +127,6 @@ mp_obj_t mp_make_function_from_raw_code(const mp_raw_code_t *rc, mp_obj_t def_ar // make the function, depending on the raw code kind mp_obj_t fun; switch (rc->kind) { - case MP_CODE_BYTECODE: - no_other_choice: - fun = mp_obj_new_fun_bc(def_args, def_kw_args, rc->data.u_byte.bytecode, rc->data.u_byte.const_table); - break; #if MICROPY_EMIT_NATIVE case MP_CODE_NATIVE_PY: fun = mp_obj_new_fun_native(def_args, def_kw_args, rc->data.u_native.fun_data, rc->data.u_native.const_table); @@ -139,15 +135,16 @@ mp_obj_t mp_make_function_from_raw_code(const mp_raw_code_t *rc, mp_obj_t def_ar fun = mp_obj_new_fun_viper(rc->n_pos_args, rc->data.u_native.fun_data, rc->data.u_native.type_sig); break; #endif - #if MICROPY_EMIT_INLINE_THUMB + #if MICROPY_EMIT_INLINE_ASM case MP_CODE_NATIVE_ASM: fun = mp_obj_new_fun_asm(rc->n_pos_args, rc->data.u_native.fun_data, rc->data.u_native.type_sig); break; #endif default: - // raw code was never set (this should not happen) - assert(0); - goto no_other_choice; // to help flow control analysis + // rc->kind should always be set and BYTECODE is the only remaining case + assert(rc->kind == MP_CODE_BYTECODE); + fun = mp_obj_new_fun_bc(def_args, def_kw_args, rc->data.u_byte.bytecode, rc->data.u_byte.const_table); + break; } // check for generator functions and if so wrap in generator object @@ -172,468 +169,3 @@ mp_obj_t mp_make_closure_from_raw_code(const mp_raw_code_t *rc, mp_uint_t n_clos // wrap function in closure object return mp_obj_new_closure(ffun, n_closed_over & 0xff, args + ((n_closed_over >> 7) & 2)); } - -#if MICROPY_PERSISTENT_CODE_LOAD || MICROPY_PERSISTENT_CODE_SAVE - -#include "py/smallint.h" - -// The feature flags byte encodes the compile-time config options that -// affect the generate bytecode. -#define MPY_FEATURE_FLAGS ( \ - ((MICROPY_OPT_CACHE_MAP_LOOKUP_IN_BYTECODE) << 0) \ - | ((MICROPY_PY_BUILTINS_STR_UNICODE) << 1) \ - ) -// This is a version of the flags that can be configured at runtime. -#define MPY_FEATURE_FLAGS_DYNAMIC ( \ - ((MICROPY_OPT_CACHE_MAP_LOOKUP_IN_BYTECODE_DYNAMIC) << 0) \ - | ((MICROPY_PY_BUILTINS_STR_UNICODE_DYNAMIC) << 1) \ - ) - -#if MICROPY_PERSISTENT_CODE_LOAD || (MICROPY_PERSISTENT_CODE_SAVE && !MICROPY_DYNAMIC_COMPILER) -// The bytecode will depend on the number of bits in a small-int, and -// this function computes that (could make it a fixed constant, but it -// would need to be defined in mpconfigport.h). -STATIC int mp_small_int_bits(void) { - mp_int_t i = MP_SMALL_INT_MAX; - int n = 1; - while (i != 0) { - i >>= 1; - ++n; - } - return n; -} -#endif - -typedef struct _bytecode_prelude_t { - uint n_state; - uint n_exc_stack; - uint scope_flags; - uint n_pos_args; - uint n_kwonly_args; - uint n_def_pos_args; - uint code_info_size; -} bytecode_prelude_t; - -// ip will point to start of opcodes -// ip2 will point to simple_name, source_file qstrs -STATIC void extract_prelude(const byte **ip, const byte **ip2, bytecode_prelude_t *prelude) { - prelude->n_state = mp_decode_uint(ip); - prelude->n_exc_stack = mp_decode_uint(ip); - prelude->scope_flags = *(*ip)++; - prelude->n_pos_args = *(*ip)++; - prelude->n_kwonly_args = *(*ip)++; - prelude->n_def_pos_args = *(*ip)++; - *ip2 = *ip; - prelude->code_info_size = mp_decode_uint(ip2); - *ip += prelude->code_info_size; - while (*(*ip)++ != 255) { - } -} - -#endif // MICROPY_PERSISTENT_CODE_LOAD || MICROPY_PERSISTENT_CODE_SAVE - -#if MICROPY_PERSISTENT_CODE_LOAD - -#include "py/parsenum.h" -#include "py/bc0.h" - -STATIC int read_byte(mp_reader_t *reader) { - return reader->read_byte(reader->data); -} - -STATIC void read_bytes(mp_reader_t *reader, byte *buf, size_t len) { - while (len-- > 0) { - *buf++ = reader->read_byte(reader->data); - } -} - -STATIC mp_uint_t read_uint(mp_reader_t *reader) { - mp_uint_t unum = 0; - for (;;) { - byte b = reader->read_byte(reader->data); - unum = (unum << 7) | (b & 0x7f); - if ((b & 0x80) == 0) { - break; - } - } - return unum; -} - -STATIC qstr load_qstr(mp_reader_t *reader) { - mp_uint_t len = read_uint(reader); - char *str = m_new(char, len); - read_bytes(reader, (byte*)str, len); - qstr qst = qstr_from_strn(str, len); - m_del(char, str, len); - return qst; -} - -STATIC mp_obj_t load_obj(mp_reader_t *reader) { - byte obj_type = read_byte(reader); - if (obj_type == 'e') { - return MP_OBJ_FROM_PTR(&mp_const_ellipsis_obj); - } else { - size_t len = read_uint(reader); - vstr_t vstr; - vstr_init_len(&vstr, len); - read_bytes(reader, (byte*)vstr.buf, len); - if (obj_type == 's' || obj_type == 'b') { - return mp_obj_new_str_from_vstr(obj_type == 's' ? &mp_type_str : &mp_type_bytes, &vstr); - } else if (obj_type == 'i') { - return mp_parse_num_integer(vstr.buf, vstr.len, 10, NULL); - } else { - assert(obj_type == 'f' || obj_type == 'c'); - return mp_parse_num_decimal(vstr.buf, vstr.len, obj_type == 'c', false, NULL); - } - } -} - -STATIC void load_bytecode_qstrs(mp_reader_t *reader, byte *ip, byte *ip_top) { - while (ip < ip_top) { - size_t sz; - uint f = mp_opcode_format(ip, &sz); - if (f == MP_OPCODE_QSTR) { - qstr qst = load_qstr(reader); - ip[1] = qst; - ip[2] = qst >> 8; - } - ip += sz; - } -} - -STATIC mp_raw_code_t *load_raw_code(mp_reader_t *reader) { - // load bytecode - mp_uint_t bc_len = read_uint(reader); - byte *bytecode = m_new(byte, bc_len); - read_bytes(reader, bytecode, bc_len); - - // extract prelude - const byte *ip = bytecode; - const byte *ip2; - bytecode_prelude_t prelude; - extract_prelude(&ip, &ip2, &prelude); - - // load qstrs and link global qstr ids into bytecode - qstr simple_name = load_qstr(reader); - qstr source_file = load_qstr(reader); - ((byte*)ip2)[0] = simple_name; ((byte*)ip2)[1] = simple_name >> 8; - ((byte*)ip2)[2] = source_file; ((byte*)ip2)[3] = source_file >> 8; - load_bytecode_qstrs(reader, (byte*)ip, bytecode + bc_len); - - // load constant table - mp_uint_t n_obj = read_uint(reader); - mp_uint_t n_raw_code = read_uint(reader); - mp_uint_t *const_table = m_new(mp_uint_t, prelude.n_pos_args + prelude.n_kwonly_args + n_obj + n_raw_code); - mp_uint_t *ct = const_table; - for (mp_uint_t i = 0; i < prelude.n_pos_args + prelude.n_kwonly_args; ++i) { - *ct++ = (mp_uint_t)MP_OBJ_NEW_QSTR(load_qstr(reader)); - } - for (mp_uint_t i = 0; i < n_obj; ++i) { - *ct++ = (mp_uint_t)load_obj(reader); - } - for (mp_uint_t i = 0; i < n_raw_code; ++i) { - *ct++ = (mp_uint_t)(uintptr_t)load_raw_code(reader); - } - - // create raw_code and return it - mp_raw_code_t *rc = mp_emit_glue_new_raw_code(); - mp_emit_glue_assign_bytecode(rc, bytecode, bc_len, const_table, - #if MICROPY_PERSISTENT_CODE_SAVE - n_obj, n_raw_code, - #endif - prelude.scope_flags); - return rc; -} - -mp_raw_code_t *mp_raw_code_load(mp_reader_t *reader) { - byte header[4]; - read_bytes(reader, header, sizeof(header)); - if (strncmp((char*)header, "M\x00", 2) != 0) { - mp_raise_ValueError("invalid .mpy file"); - } - if (header[2] != MPY_FEATURE_FLAGS || header[3] > mp_small_int_bits()) { - mp_raise_ValueError("incompatible .mpy file"); - } - return load_raw_code(reader); -} - -typedef struct _mp_mem_reader_t { - const byte *cur; - const byte *end; -} mp_mem_reader_t; - -STATIC mp_uint_t mp_mem_reader_next_byte(void *br_in) { - mp_mem_reader_t *br = br_in; - if (br->cur < br->end) { - return *br->cur++; - } else { - return (mp_uint_t)-1; - } -} - -mp_raw_code_t *mp_raw_code_load_mem(const byte *buf, size_t len) { - mp_mem_reader_t mr = {buf, buf + len}; - mp_reader_t reader = {&mr, mp_mem_reader_next_byte}; - return mp_raw_code_load(&reader); -} - -// here we define mp_raw_code_load_file depending on the port -// TODO abstract this away properly - -#if defined(__i386__) || defined(__x86_64__) || defined(__aarch64__) || defined(__unix__) -// unix file reader - -#include <sys/stat.h> -#include <fcntl.h> -#include <unistd.h> - -typedef struct _mp_lexer_file_buf_t { - int fd; - byte buf[20]; - mp_uint_t len; - mp_uint_t pos; -} mp_lexer_file_buf_t; - -STATIC mp_uint_t file_buf_next_byte(void *fb_in) { - mp_lexer_file_buf_t *fb = fb_in; - if (fb->pos >= fb->len) { - if (fb->len == 0) { - return (mp_uint_t)-1; - } else { - int n = read(fb->fd, fb->buf, sizeof(fb->buf)); - if (n <= 0) { - fb->len = 0; - return (mp_uint_t)-1; - } - fb->len = n; - fb->pos = 0; - } - } - return fb->buf[fb->pos++]; -} - -mp_raw_code_t *mp_raw_code_load_file(const char *filename) { - mp_lexer_file_buf_t fb; - fb.fd = open(filename, O_RDONLY, 0644); - int n = read(fb.fd, fb.buf, sizeof(fb.buf)); - fb.len = n; - fb.pos = 0; - mp_reader_t reader; - reader.data = &fb; - reader.read_byte = file_buf_next_byte; - mp_raw_code_t *rc = mp_raw_code_load(&reader); - close(fb.fd); - return rc; -} - -#elif defined(__thumb2__) || defined(__xtensa__) -// fatfs file reader (assume thumb2 arch uses fatfs...) - -#include "lib/fatfs/ff.h" - -typedef struct _mp_lexer_file_buf_t { - FIL fp; - byte buf[20]; - uint16_t len; - uint16_t pos; -} mp_lexer_file_buf_t; - -STATIC mp_uint_t file_buf_next_byte(void *fb_in) { - mp_lexer_file_buf_t *fb = fb_in; - if (fb->pos >= fb->len) { - if (fb->len < sizeof(fb->buf)) { - return (mp_uint_t)-1; - } else { - UINT n; - f_read(&fb->fp, fb->buf, sizeof(fb->buf), &n); - if (n == 0) { - return (mp_uint_t)-1; - } - fb->len = n; - fb->pos = 0; - } - } - return fb->buf[fb->pos++]; -} - -mp_raw_code_t *mp_raw_code_load_file(const char *filename) { - mp_lexer_file_buf_t fb; - /*FRESULT res =*/ f_open(&fb.fp, filename, FA_READ); - UINT n; - f_read(&fb.fp, fb.buf, sizeof(fb.buf), &n); - fb.len = n; - fb.pos = 0; - - mp_reader_t reader; - reader.data = &fb; - reader.read_byte = file_buf_next_byte; - mp_raw_code_t *rc = mp_raw_code_load(&reader); - - f_close(&fb.fp); - - return rc; -} - -#endif - -#endif // MICROPY_PERSISTENT_CODE_LOAD - -#if MICROPY_PERSISTENT_CODE_SAVE - -#include "py/objstr.h" - -STATIC void mp_print_bytes(mp_print_t *print, const byte *data, size_t len) { - print->print_strn(print->data, (const char*)data, len); -} - -#define BYTES_FOR_INT ((BYTES_PER_WORD * 8 + 6) / 7) -STATIC void mp_print_uint(mp_print_t *print, mp_uint_t n) { - byte buf[BYTES_FOR_INT]; - byte *p = buf + sizeof(buf); - *--p = n & 0x7f; - n >>= 7; - for (; n != 0; n >>= 7) { - *--p = 0x80 | (n & 0x7f); - } - print->print_strn(print->data, (char*)p, buf + sizeof(buf) - p); -} - -STATIC void save_qstr(mp_print_t *print, qstr qst) { - size_t len; - const byte *str = qstr_data(qst, &len); - mp_print_uint(print, len); - mp_print_bytes(print, str, len); -} - -STATIC void save_obj(mp_print_t *print, mp_obj_t o) { - if (MP_OBJ_IS_STR_OR_BYTES(o)) { - byte obj_type; - if (MP_OBJ_IS_STR(o)) { - obj_type = 's'; - } else { - obj_type = 'b'; - } - mp_uint_t len; - const char *str = mp_obj_str_get_data(o, &len); - mp_print_bytes(print, &obj_type, 1); - mp_print_uint(print, len); - mp_print_bytes(print, (const byte*)str, len); - } else if (MP_OBJ_TO_PTR(o) == &mp_const_ellipsis_obj) { - byte obj_type = 'e'; - mp_print_bytes(print, &obj_type, 1); - } else { - // we save numbers using a simplistic text representation - // TODO could be improved - byte obj_type; - if (MP_OBJ_IS_TYPE(o, &mp_type_int)) { - obj_type = 'i'; - } else if (mp_obj_is_float(o)) { - obj_type = 'f'; - } else { - assert(MP_OBJ_IS_TYPE(o, &mp_type_complex)); - obj_type = 'c'; - } - vstr_t vstr; - mp_print_t pr; - vstr_init_print(&vstr, 10, &pr); - mp_obj_print_helper(&pr, o, PRINT_REPR); - mp_print_bytes(print, &obj_type, 1); - mp_print_uint(print, vstr.len); - mp_print_bytes(print, (const byte*)vstr.buf, vstr.len); - vstr_clear(&vstr); - } -} - -STATIC void save_bytecode_qstrs(mp_print_t *print, const byte *ip, const byte *ip_top) { - while (ip < ip_top) { - size_t sz; - uint f = mp_opcode_format(ip, &sz); - if (f == MP_OPCODE_QSTR) { - qstr qst = ip[1] | (ip[2] << 8); - save_qstr(print, qst); - } - ip += sz; - } -} - -STATIC void save_raw_code(mp_print_t *print, mp_raw_code_t *rc) { - if (rc->kind != MP_CODE_BYTECODE) { - mp_raise_ValueError("can only save bytecode"); - } - - // save bytecode - mp_print_uint(print, rc->data.u_byte.bc_len); - mp_print_bytes(print, rc->data.u_byte.bytecode, rc->data.u_byte.bc_len); - - // extract prelude - const byte *ip = rc->data.u_byte.bytecode; - const byte *ip2; - bytecode_prelude_t prelude; - extract_prelude(&ip, &ip2, &prelude); - - // save qstrs - save_qstr(print, ip2[0] | (ip2[1] << 8)); // simple_name - save_qstr(print, ip2[2] | (ip2[3] << 8)); // source_file - save_bytecode_qstrs(print, ip, rc->data.u_byte.bytecode + rc->data.u_byte.bc_len); - - // save constant table - mp_print_uint(print, rc->data.u_byte.n_obj); - mp_print_uint(print, rc->data.u_byte.n_raw_code); - const mp_uint_t *const_table = rc->data.u_byte.const_table; - for (uint i = 0; i < prelude.n_pos_args + prelude.n_kwonly_args; ++i) { - mp_obj_t o = (mp_obj_t)*const_table++; - save_qstr(print, MP_OBJ_QSTR_VALUE(o)); - } - for (uint i = 0; i < rc->data.u_byte.n_obj; ++i) { - save_obj(print, (mp_obj_t)*const_table++); - } - for (uint i = 0; i < rc->data.u_byte.n_raw_code; ++i) { - save_raw_code(print, (mp_raw_code_t*)(uintptr_t)*const_table++); - } -} - -void mp_raw_code_save(mp_raw_code_t *rc, mp_print_t *print) { - // header contains: - // byte 'M' - // byte version - // byte feature flags - // byte number of bits in a small int - byte header[4] = {'M', 0, MPY_FEATURE_FLAGS_DYNAMIC, - #if MICROPY_DYNAMIC_COMPILER - mp_dynamic_compiler.small_int_bits, - #else - mp_small_int_bits(), - #endif - }; - mp_print_bytes(print, header, sizeof(header)); - - save_raw_code(print, rc); -} - -// here we define mp_raw_code_save_file depending on the port -// TODO abstract this away properly - -#if defined(__i386__) || defined(__x86_64__) || (defined(__arm__) && (defined(__unix__))) - -#include <unistd.h> -#include <sys/stat.h> -#include <fcntl.h> - -STATIC void fd_print_strn(void *env, const char *str, size_t len) { - int fd = (intptr_t)env; - ssize_t ret = write(fd, str, len); - (void)ret; -} - -void mp_raw_code_save_file(mp_raw_code_t *rc, const char *filename) { - int fd = open(filename, O_WRONLY | O_CREAT | O_TRUNC, 0644); - mp_print_t fd_print = {(void*)(intptr_t)fd, fd_print_strn}; - mp_raw_code_save(rc, &fd_print); - close(fd); -} - -#else -#error mp_raw_code_save_file not implemented for this platform -#endif - -#endif // MICROPY_PERSISTENT_CODE_SAVE diff --git a/py/emitglue.h b/py/emitglue.h index f5618577d5..37c4f1b18f 100644 --- a/py/emitglue.h +++ b/py/emitglue.h @@ -74,21 +74,4 @@ void mp_emit_glue_assign_native(mp_raw_code_t *rc, mp_raw_code_kind_t kind, void mp_obj_t mp_make_function_from_raw_code(const mp_raw_code_t *rc, mp_obj_t def_args, mp_obj_t def_kw_args); mp_obj_t mp_make_closure_from_raw_code(const mp_raw_code_t *rc, mp_uint_t n_closed_over, const mp_obj_t *args); -#if MICROPY_PERSISTENT_CODE_LOAD -typedef struct _mp_reader_t { - void *data; - mp_uint_t (*read_byte)(void *data); - void (*close)(void *data); -} mp_reader_t; - -mp_raw_code_t *mp_raw_code_load(mp_reader_t *reader); -mp_raw_code_t *mp_raw_code_load_mem(const byte *buf, size_t len); -mp_raw_code_t *mp_raw_code_load_file(const char *filename); -#endif - -#if MICROPY_PERSISTENT_CODE_SAVE -void mp_raw_code_save(mp_raw_code_t *rc, mp_print_t *print); -void mp_raw_code_save_file(mp_raw_code_t *rc, const char *filename); -#endif - #endif // __MICROPY_INCLUDED_PY_EMITGLUE_H__ diff --git a/py/emitinlinethumb.c b/py/emitinlinethumb.c index 9ee1ab170d..c733cf2c73 100644 --- a/py/emitinlinethumb.c +++ b/py/emitinlinethumb.c @@ -43,12 +43,11 @@ typedef enum { } pn_kind_t; struct _emit_inline_asm_t { + asm_thumb_t as; uint16_t pass; - scope_t *scope; mp_obj_t *error_slot; mp_uint_t max_num_labels; qstr *label_lookup; - asm_thumb_t *as; }; STATIC void emit_inline_thumb_error_msg(emit_inline_asm_t *emit, const char *msg) { @@ -61,38 +60,32 @@ STATIC void emit_inline_thumb_error_exc(emit_inline_asm_t *emit, mp_obj_t exc) { emit_inline_asm_t *emit_inline_thumb_new(mp_uint_t max_num_labels) { emit_inline_asm_t *emit = m_new_obj(emit_inline_asm_t); + memset(&emit->as, 0, sizeof(emit->as)); + mp_asm_base_init(&emit->as.base, max_num_labels); emit->max_num_labels = max_num_labels; emit->label_lookup = m_new(qstr, max_num_labels); - emit->as = asm_thumb_new(max_num_labels); return emit; } void emit_inline_thumb_free(emit_inline_asm_t *emit) { m_del(qstr, emit->label_lookup, emit->max_num_labels); - asm_thumb_free(emit->as, false); + mp_asm_base_deinit(&emit->as.base, false); m_del_obj(emit_inline_asm_t, emit); } -STATIC void emit_inline_thumb_start_pass(emit_inline_asm_t *emit, pass_kind_t pass, scope_t *scope, mp_obj_t *error_slot) { +STATIC void emit_inline_thumb_start_pass(emit_inline_asm_t *emit, pass_kind_t pass, mp_obj_t *error_slot) { emit->pass = pass; - emit->scope = scope; emit->error_slot = error_slot; if (emit->pass == MP_PASS_CODE_SIZE) { memset(emit->label_lookup, 0, emit->max_num_labels * sizeof(qstr)); } - asm_thumb_start_pass(emit->as, pass == MP_PASS_EMIT ? ASM_THUMB_PASS_EMIT : ASM_THUMB_PASS_COMPUTE); - asm_thumb_entry(emit->as, 0); + mp_asm_base_start_pass(&emit->as.base, pass == MP_PASS_EMIT ? MP_ASM_PASS_EMIT : MP_ASM_PASS_COMPUTE); + asm_thumb_entry(&emit->as, 0); } STATIC void emit_inline_thumb_end_pass(emit_inline_asm_t *emit, mp_uint_t type_sig) { - asm_thumb_exit(emit->as); - asm_thumb_end_pass(emit->as); - - if (emit->pass == MP_PASS_EMIT) { - void *f = asm_thumb_get_code(emit->as); - mp_emit_glue_assign_native(emit->scope->raw_code, MP_CODE_NATIVE_ASM, f, - asm_thumb_get_code_size(emit->as), NULL, emit->scope->num_pos_args, 0, type_sig); - } + asm_thumb_exit(&emit->as); + asm_thumb_end_pass(&emit->as); } STATIC mp_uint_t emit_inline_thumb_count_params(emit_inline_asm_t *emit, const byte *p, const byte *ptop) { @@ -128,18 +121,10 @@ STATIC bool emit_inline_thumb_label(emit_inline_asm_t *emit, mp_uint_t label_num } } emit->label_lookup[label_num] = label_id; - asm_thumb_label_assign(emit->as, label_num); + mp_asm_base_label_assign(&emit->as.base, label_num); return true; } -STATIC void emit_inline_thumb_align(emit_inline_asm_t *emit, mp_uint_t align) { - asm_thumb_align(emit->as, align); -} - -STATIC void emit_inline_thumb_data(emit_inline_asm_t *emit, mp_uint_t bytesize, mp_uint_t val) { - asm_thumb_data(emit->as, bytesize, val); -} - typedef struct _reg_name_t { byte reg; byte name[3]; } reg_name_t; STATIC const reg_name_t reg_name_table[] = { {0, "r0\0"}, @@ -451,7 +436,7 @@ STATIC void emit_inline_thumb_op(emit_inline_asm_t *emit, qstr op, mp_uint_t n_a op_vfp_twoargs:; mp_uint_t vd = get_arg_vfpreg(emit, op_str, pn_args[0]); mp_uint_t vm = get_arg_vfpreg(emit, op_str, pn_args[1]); - asm_thumb_op32(emit->as, + asm_thumb_op32(&emit->as, op_code_hi | ((vd & 1) << 6), op_code | ((vd & 0x1e) << 11) | ((vm & 1) << 5) | (vm & 0x1e) >> 1); } else if (op == MP_QSTR_vsqrt) { @@ -478,7 +463,7 @@ STATIC void emit_inline_thumb_op(emit_inline_asm_t *emit, qstr op, mp_uint_t n_a const char *reg_str1 = get_arg_str(pn_args[1]); if (strcmp(reg_str1, "FPSCR") == 0) { // FP status to ARM reg - asm_thumb_op32(emit->as, 0xeef1, 0x0a10 | (reg_dest << 12)); + asm_thumb_op32(&emit->as, 0xeef1, 0x0a10 | (reg_dest << 12)); } else { goto unknown_op; } @@ -494,7 +479,7 @@ STATIC void emit_inline_thumb_op(emit_inline_asm_t *emit, qstr op, mp_uint_t n_a vm = get_arg_vfpreg(emit, op_str, pn_args[0]); r_arm = get_arg_reg(emit, op_str, pn_args[1], 15); } - asm_thumb_op32(emit->as, + asm_thumb_op32(&emit->as, op_code_hi | ((vm & 0x1e) >> 1), 0x0a10 | (r_arm << 12) | ((vm & 1) << 7)); } else if (op == MP_QSTR_vldr) { @@ -506,7 +491,7 @@ STATIC void emit_inline_thumb_op(emit_inline_asm_t *emit, qstr op, mp_uint_t n_a mp_uint_t rlo_base = get_arg_reg(emit, op_str, pn_base, 7); mp_uint_t i8; i8 = get_arg_i(emit, op_str, pn_offset, 0x3fc) >> 2; - asm_thumb_op32(emit->as, + asm_thumb_op32(&emit->as, op_code_hi | rlo_base | ((vd & 1) << 6), 0x0a00 | ((vd & 0x1e) << 11) | i8); } @@ -525,7 +510,7 @@ STATIC void emit_inline_thumb_op(emit_inline_asm_t *emit, qstr op, mp_uint_t n_a mp_uint_t vd = get_arg_vfpreg(emit, op_str, pn_args[0]); mp_uint_t vn = get_arg_vfpreg(emit, op_str, pn_args[1]); mp_uint_t vm = get_arg_vfpreg(emit, op_str, pn_args[2]); - asm_thumb_op32(emit->as, + asm_thumb_op32(&emit->as, op_code_hi | ((vd & 1) << 6) | (vn >> 1), op_code | (vm >> 1) | ((vm & 1) << 5) | ((vd & 0x1e) << 11) | ((vn & 1) << 7)); return; @@ -539,9 +524,9 @@ STATIC void emit_inline_thumb_op(emit_inline_asm_t *emit, qstr op, mp_uint_t n_a #endif if (n_args == 0) { if (op == MP_QSTR_nop) { - asm_thumb_op16(emit->as, ASM_THUMB_OP_NOP); + asm_thumb_op16(&emit->as, ASM_THUMB_OP_NOP); } else if (op == MP_QSTR_wfi) { - asm_thumb_op16(emit->as, ASM_THUMB_OP_WFI); + asm_thumb_op16(&emit->as, ASM_THUMB_OP_WFI); } else { goto unknown_op; } @@ -549,17 +534,17 @@ STATIC void emit_inline_thumb_op(emit_inline_asm_t *emit, qstr op, mp_uint_t n_a } else if (n_args == 1) { if (op == MP_QSTR_b) { int label_num = get_arg_label(emit, op_str, pn_args[0]); - if (!asm_thumb_b_n_label(emit->as, label_num)) { + if (!asm_thumb_b_n_label(&emit->as, label_num)) { goto branch_not_in_range; } } else if (op == MP_QSTR_bl) { int label_num = get_arg_label(emit, op_str, pn_args[0]); - if (!asm_thumb_bl_label(emit->as, label_num)) { + if (!asm_thumb_bl_label(&emit->as, label_num)) { goto branch_not_in_range; } } else if (op == MP_QSTR_bx) { mp_uint_t r = get_arg_reg(emit, op_str, pn_args[0], 15); - asm_thumb_op16(emit->as, 0x4700 | (r << 3)); + asm_thumb_op16(&emit->as, 0x4700 | (r << 3)); } else if (op_str[0] == 'b' && (op_len == 3 || (op_len == 5 && op_str[3] == '_' && (op_str[4] == 'n' || (ARMV7M && op_str[4] == 'w'))))) { @@ -573,7 +558,7 @@ STATIC void emit_inline_thumb_op(emit_inline_asm_t *emit, qstr op, mp_uint_t n_a goto unknown_op; } int label_num = get_arg_label(emit, op_str, pn_args[0]); - if (!asm_thumb_bcc_nw_label(emit->as, cc, label_num, op_len == 5 && op_str[4] == 'w')) { + if (!asm_thumb_bcc_nw_label(&emit->as, cc, label_num, op_len == 5 && op_str[4] == 'w')) { goto branch_not_in_range; } } else if (ARMV7M && op_str[0] == 'i' && op_str[1] == 't') { @@ -608,32 +593,32 @@ STATIC void emit_inline_thumb_op(emit_inline_asm_t *emit, qstr op, mp_uint_t n_a goto unknown_op; } } - asm_thumb_it_cc(emit->as, cc, it_mask); + asm_thumb_it_cc(&emit->as, cc, it_mask); } else if (op == MP_QSTR_cpsid) { // TODO check pn_args[0] == i - asm_thumb_op16(emit->as, ASM_THUMB_OP_CPSID_I); + asm_thumb_op16(&emit->as, ASM_THUMB_OP_CPSID_I); } else if (op == MP_QSTR_cpsie) { // TODO check pn_args[0] == i - asm_thumb_op16(emit->as, ASM_THUMB_OP_CPSIE_I); + asm_thumb_op16(&emit->as, ASM_THUMB_OP_CPSIE_I); } else if (op == MP_QSTR_push) { mp_uint_t reglist = get_arg_reglist(emit, op_str, pn_args[0]); if ((reglist & 0xff00) == 0) { - asm_thumb_op16(emit->as, 0xb400 | reglist); + asm_thumb_op16(&emit->as, 0xb400 | reglist); } else { if (!ARMV7M) { goto unknown_op; } - asm_thumb_op32(emit->as, 0xe92d, reglist); + asm_thumb_op32(&emit->as, 0xe92d, reglist); } } else if (op == MP_QSTR_pop) { mp_uint_t reglist = get_arg_reglist(emit, op_str, pn_args[0]); if ((reglist & 0xff00) == 0) { - asm_thumb_op16(emit->as, 0xbc00 | reglist); + asm_thumb_op16(&emit->as, 0xbc00 | reglist); } else { if (!ARMV7M) { goto unknown_op; } - asm_thumb_op32(emit->as, 0xe8bd, reglist); + asm_thumb_op32(&emit->as, 0xe8bd, reglist); } } else { goto unknown_op; @@ -646,7 +631,7 @@ STATIC void emit_inline_thumb_op(emit_inline_asm_t *emit, qstr op, mp_uint_t n_a if (op == MP_QSTR_mov) { mp_uint_t reg_dest = get_arg_reg(emit, op_str, pn_args[0], 15); mp_uint_t reg_src = get_arg_reg(emit, op_str, pn_args[1], 15); - asm_thumb_mov_reg_reg(emit->as, reg_dest, reg_src); + asm_thumb_mov_reg_reg(&emit->as, reg_dest, reg_src); } else if (ARMV7M && op == MP_QSTR_clz) { op_code_hi = 0xfab0; op_code = 0xf080; @@ -654,7 +639,7 @@ STATIC void emit_inline_thumb_op(emit_inline_asm_t *emit, qstr op, mp_uint_t n_a op_clz_rbit: rd = get_arg_reg(emit, op_str, pn_args[0], 15); rm = get_arg_reg(emit, op_str, pn_args[1], 15); - asm_thumb_op32(emit->as, op_code_hi | rm, op_code | (rd << 8) | rm); + asm_thumb_op32(&emit->as, op_code_hi | rm, op_code | (rd << 8) | rm); } else if (ARMV7M && op == MP_QSTR_rbit) { op_code_hi = 0xfa90; op_code = 0xf0a0; @@ -662,7 +647,7 @@ STATIC void emit_inline_thumb_op(emit_inline_asm_t *emit, qstr op, mp_uint_t n_a } else if (ARMV7M && op == MP_QSTR_mrs){ mp_uint_t reg_dest = get_arg_reg(emit, op_str, pn_args[0], 12); mp_uint_t reg_src = get_arg_special_reg(emit, op_str, pn_args[1]); - asm_thumb_op32(emit->as, 0xf3ef, 0x8000 | (reg_dest << 8) | reg_src); + asm_thumb_op32(&emit->as, 0xf3ef, 0x8000 | (reg_dest << 8) | reg_src); } else { if (op == MP_QSTR_and_) { op_code = ASM_THUMB_FORMAT_4_AND; @@ -670,7 +655,7 @@ STATIC void emit_inline_thumb_op(emit_inline_asm_t *emit, qstr op, mp_uint_t n_a op_format_4: reg_dest = get_arg_reg(emit, op_str, pn_args[0], 7); reg_src = get_arg_reg(emit, op_str, pn_args[1], 7); - asm_thumb_format_4(emit->as, op_code, reg_dest, reg_src); + asm_thumb_format_4(&emit->as, op_code, reg_dest, reg_src); return; } // search table for ALU ops @@ -691,7 +676,7 @@ STATIC void emit_inline_thumb_op(emit_inline_asm_t *emit, qstr op, mp_uint_t n_a op_format_3: rlo_dest = get_arg_reg(emit, op_str, pn_args[0], 7); i8_src = get_arg_i(emit, op_str, pn_args[1], 0xff); - asm_thumb_format_3(emit->as, op_code, rlo_dest, i8_src); + asm_thumb_format_3(&emit->as, op_code, rlo_dest, i8_src); } else if (op == MP_QSTR_cmp) { op_code = ASM_THUMB_FORMAT_3_CMP; goto op_format_3; @@ -707,7 +692,7 @@ STATIC void emit_inline_thumb_op(emit_inline_asm_t *emit, qstr op, mp_uint_t n_a op_movw_movt: reg_dest = get_arg_reg(emit, op_str, pn_args[0], 15); int i_src = get_arg_i(emit, op_str, pn_args[1], 0xffff); - asm_thumb_mov_reg_i16(emit->as, op_code, reg_dest, i_src); + asm_thumb_mov_reg_i16(&emit->as, op_code, reg_dest, i_src); } else if (ARMV7M && op == MP_QSTR_movt) { op_code = ASM_THUMB_OP_MOVT; goto op_movw_movt; @@ -715,15 +700,15 @@ STATIC void emit_inline_thumb_op(emit_inline_asm_t *emit, qstr op, mp_uint_t n_a // this is a convenience instruction mp_uint_t reg_dest = get_arg_reg(emit, op_str, pn_args[0], 15); uint32_t i_src = get_arg_i(emit, op_str, pn_args[1], 0xffffffff); - asm_thumb_mov_reg_i16(emit->as, ASM_THUMB_OP_MOVW, reg_dest, i_src & 0xffff); - asm_thumb_mov_reg_i16(emit->as, ASM_THUMB_OP_MOVT, reg_dest, (i_src >> 16) & 0xffff); + asm_thumb_mov_reg_i16(&emit->as, ASM_THUMB_OP_MOVW, reg_dest, i_src & 0xffff); + asm_thumb_mov_reg_i16(&emit->as, ASM_THUMB_OP_MOVT, reg_dest, (i_src >> 16) & 0xffff); } else if (ARMV7M && op == MP_QSTR_ldrex) { mp_uint_t r_dest = get_arg_reg(emit, op_str, pn_args[0], 15); const byte *pn_base, *pn_offset; if (get_arg_addr(emit, op_str, pn_args[1], &pn_base, &pn_offset)) { mp_uint_t r_base = get_arg_reg(emit, op_str, pn_base, 15); mp_uint_t i8 = get_arg_i(emit, op_str, pn_offset, 0xff) >> 2; - asm_thumb_op32(emit->as, 0xe850 | r_base, 0x0f00 | (r_dest << 12) | i8); + asm_thumb_op32(&emit->as, 0xe850 | r_base, 0x0f00 | (r_dest << 12) | i8); } } else { // search table for ldr/str instructions @@ -742,7 +727,7 @@ STATIC void emit_inline_thumb_op(emit_inline_asm_t *emit, qstr op, mp_uint_t n_a } else { i5 = get_arg_i(emit, op_str, pn_offset, 0x7c) >> 2; } - asm_thumb_format_9_10(emit->as, op_code, rlo_dest, rlo_base, i5); + asm_thumb_format_9_10(&emit->as, op_code, rlo_dest, rlo_base, i5); return; } break; @@ -761,7 +746,7 @@ STATIC void emit_inline_thumb_op(emit_inline_asm_t *emit, qstr op, mp_uint_t n_a rlo_dest = get_arg_reg(emit, op_str, pn_args[0], 7); rlo_src = get_arg_reg(emit, op_str, pn_args[1], 7); i5 = get_arg_i(emit, op_str, pn_args[2], 0x1f); - asm_thumb_format_1(emit->as, op_code, rlo_dest, rlo_src, i5); + asm_thumb_format_1(&emit->as, op_code, rlo_dest, rlo_src, i5); } else if (op == MP_QSTR_lsr) { op_code = ASM_THUMB_FORMAT_1_LSR; goto op_format_1; @@ -782,7 +767,7 @@ STATIC void emit_inline_thumb_op(emit_inline_asm_t *emit, qstr op, mp_uint_t n_a op_code |= ASM_THUMB_FORMAT_2_IMM_OPERAND; src_b = get_arg_i(emit, op_str, pn_args[2], 0x7); } - asm_thumb_format_2(emit->as, op_code, rlo_dest, rlo_src, src_b); + asm_thumb_format_2(&emit->as, op_code, rlo_dest, rlo_src, src_b); } else if (ARMV7M && op == MP_QSTR_sdiv) { op_code = 0xfb90; // sdiv high part mp_uint_t rd, rn, rm; @@ -790,7 +775,7 @@ STATIC void emit_inline_thumb_op(emit_inline_asm_t *emit, qstr op, mp_uint_t n_a rd = get_arg_reg(emit, op_str, pn_args[0], 15); rn = get_arg_reg(emit, op_str, pn_args[1], 15); rm = get_arg_reg(emit, op_str, pn_args[2], 15); - asm_thumb_op32(emit->as, op_code | rn, 0xf0f0 | (rd << 8) | rm); + asm_thumb_op32(&emit->as, op_code | rn, 0xf0f0 | (rd << 8) | rm); } else if (ARMV7M && op == MP_QSTR_udiv) { op_code = 0xfbb0; // udiv high part goto op_sdiv_udiv; @@ -804,7 +789,7 @@ STATIC void emit_inline_thumb_op(emit_inline_asm_t *emit, qstr op, mp_uint_t n_a if (get_arg_addr(emit, op_str, pn_args[2], &pn_base, &pn_offset)) { mp_uint_t r_base = get_arg_reg(emit, op_str, pn_base, 15); mp_uint_t i8 = get_arg_i(emit, op_str, pn_offset, 0xff) >> 2; - asm_thumb_op32(emit->as, 0xe840 | r_base, (r_src << 12) | (r_dest << 8) | i8); + asm_thumb_op32(&emit->as, 0xe840 | r_base, (r_src << 12) | (r_dest << 8) | i8); } } else { goto unknown_op; @@ -830,8 +815,6 @@ const emit_inline_asm_method_table_t emit_inline_thumb_method_table = { emit_inline_thumb_end_pass, emit_inline_thumb_count_params, emit_inline_thumb_label, - emit_inline_thumb_align, - emit_inline_thumb_data, emit_inline_thumb_op, }; diff --git a/py/emitinlinextensa.c b/py/emitinlinextensa.c new file mode 100644 index 0000000000..3d3217f5bb --- /dev/null +++ b/py/emitinlinextensa.c @@ -0,0 +1,345 @@ +/* + * This file is part of the MicroPython project, http://micropython.org/ + * + * The MIT License (MIT) + * + * Copyright (c) 2013-2016 Damien P. George + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include <stdint.h> +#include <stdio.h> +#include <string.h> +#include <stdarg.h> +#include <assert.h> + +#include "py/emit.h" +#include "py/asmxtensa.h" + +#if MICROPY_EMIT_INLINE_XTENSA + +struct _emit_inline_asm_t { + asm_xtensa_t as; + uint16_t pass; + mp_obj_t *error_slot; + mp_uint_t max_num_labels; + qstr *label_lookup; +}; + +STATIC void emit_inline_xtensa_error_msg(emit_inline_asm_t *emit, const char *msg) { + *emit->error_slot = mp_obj_new_exception_msg(&mp_type_SyntaxError, msg); +} + +STATIC void emit_inline_xtensa_error_exc(emit_inline_asm_t *emit, mp_obj_t exc) { + *emit->error_slot = exc; +} + +emit_inline_asm_t *emit_inline_xtensa_new(mp_uint_t max_num_labels) { + emit_inline_asm_t *emit = m_new_obj(emit_inline_asm_t); + memset(&emit->as, 0, sizeof(emit->as)); + mp_asm_base_init(&emit->as.base, max_num_labels); + emit->max_num_labels = max_num_labels; + emit->label_lookup = m_new(qstr, max_num_labels); + return emit; +} + +void emit_inline_xtensa_free(emit_inline_asm_t *emit) { + m_del(qstr, emit->label_lookup, emit->max_num_labels); + mp_asm_base_deinit(&emit->as.base, false); + m_del_obj(emit_inline_asm_t, emit); +} + +STATIC void emit_inline_xtensa_start_pass(emit_inline_asm_t *emit, pass_kind_t pass, mp_obj_t *error_slot) { + emit->pass = pass; + emit->error_slot = error_slot; + if (emit->pass == MP_PASS_CODE_SIZE) { + memset(emit->label_lookup, 0, emit->max_num_labels * sizeof(qstr)); + } + mp_asm_base_start_pass(&emit->as.base, pass == MP_PASS_EMIT ? MP_ASM_PASS_EMIT : MP_ASM_PASS_COMPUTE); + asm_xtensa_entry(&emit->as, 0); +} + +STATIC void emit_inline_xtensa_end_pass(emit_inline_asm_t *emit, mp_uint_t type_sig) { + asm_xtensa_exit(&emit->as); + asm_xtensa_end_pass(&emit->as); +} + +STATIC mp_uint_t emit_inline_xtensa_count_params(emit_inline_asm_t *emit, mp_uint_t n_params, mp_parse_node_t *pn_params) { + if (n_params > 4) { + emit_inline_xtensa_error_msg(emit, "can only have up to 4 parameters to Xtensa assembly"); + return 0; + } + for (mp_uint_t i = 0; i < n_params; i++) { + if (!MP_PARSE_NODE_IS_ID(pn_params[i])) { + emit_inline_xtensa_error_msg(emit, "parameters must be registers in sequence a2 to a5"); + return 0; + } + const char *p = qstr_str(MP_PARSE_NODE_LEAF_ARG(pn_params[i])); + if (!(strlen(p) == 2 && p[0] == 'a' && p[1] == '2' + i)) { + emit_inline_xtensa_error_msg(emit, "parameters must be registers in sequence a2 to a5"); + return 0; + } + } + return n_params; +} + +STATIC bool emit_inline_xtensa_label(emit_inline_asm_t *emit, mp_uint_t label_num, qstr label_id) { + assert(label_num < emit->max_num_labels); + if (emit->pass == MP_PASS_CODE_SIZE) { + // check for duplicate label on first pass + for (uint i = 0; i < emit->max_num_labels; i++) { + if (emit->label_lookup[i] == label_id) { + return false; + } + } + } + emit->label_lookup[label_num] = label_id; + mp_asm_base_label_assign(&emit->as.base, label_num); + return true; +} + +typedef struct _reg_name_t { byte reg; byte name[3]; } reg_name_t; +STATIC const reg_name_t reg_name_table[] = { + {0, "a0\0"}, + {1, "a1\0"}, + {2, "a2\0"}, + {3, "a3\0"}, + {4, "a4\0"}, + {5, "a5\0"}, + {6, "a6\0"}, + {7, "a7\0"}, + {8, "a8\0"}, + {9, "a9\0"}, + {10, "a10"}, + {11, "a11"}, + {12, "a12"}, + {13, "a13"}, + {14, "a14"}, + {15, "a15"}, +}; + +// return empty string in case of error, so we can attempt to parse the string +// without a special check if it was in fact a string +STATIC const char *get_arg_str(mp_parse_node_t pn) { + if (MP_PARSE_NODE_IS_ID(pn)) { + qstr qst = MP_PARSE_NODE_LEAF_ARG(pn); + return qstr_str(qst); + } else { + return ""; + } +} + +STATIC mp_uint_t get_arg_reg(emit_inline_asm_t *emit, const char *op, mp_parse_node_t pn) { + const char *reg_str = get_arg_str(pn); + for (mp_uint_t i = 0; i < MP_ARRAY_SIZE(reg_name_table); i++) { + const reg_name_t *r = ®_name_table[i]; + if (reg_str[0] == r->name[0] + && reg_str[1] == r->name[1] + && reg_str[2] == r->name[2] + && (reg_str[2] == '\0' || reg_str[3] == '\0')) { + return r->reg; + } + } + emit_inline_xtensa_error_exc(emit, + mp_obj_new_exception_msg_varg(&mp_type_SyntaxError, + "'%s' expects a register", op)); + return 0; +} + +STATIC uint32_t get_arg_i(emit_inline_asm_t *emit, const char *op, mp_parse_node_t pn, int min, int max) { + mp_obj_t o; + if (!mp_parse_node_get_int_maybe(pn, &o)) { + emit_inline_xtensa_error_exc(emit, mp_obj_new_exception_msg_varg(&mp_type_SyntaxError, "'%s' expects an integer", op)); + return 0; + } + uint32_t i = mp_obj_get_int_truncated(o); + if (min != max && ((int)i < min || (int)i > max)) { + emit_inline_xtensa_error_exc(emit, mp_obj_new_exception_msg_varg(&mp_type_SyntaxError, "'%s' integer %d is not within range %d..%d", op, i, min, max)); + return 0; + } + return i; +} + +STATIC int get_arg_label(emit_inline_asm_t *emit, const char *op, mp_parse_node_t pn) { + if (!MP_PARSE_NODE_IS_ID(pn)) { + emit_inline_xtensa_error_exc(emit, mp_obj_new_exception_msg_varg(&mp_type_SyntaxError, "'%s' expects a label", op)); + return 0; + } + qstr label_qstr = MP_PARSE_NODE_LEAF_ARG(pn); + for (uint i = 0; i < emit->max_num_labels; i++) { + if (emit->label_lookup[i] == label_qstr) { + return i; + } + } + // only need to have the labels on the last pass + if (emit->pass == MP_PASS_EMIT) { + emit_inline_xtensa_error_exc(emit, mp_obj_new_exception_msg_varg(&mp_type_SyntaxError, "label '%q' not defined", label_qstr)); + } + return 0; +} + +#define RRR (0) +#define RRI8 (1) +#define RRI8_B (2) + +typedef struct _opcode_table_3arg_t { + uint16_t name; // actually a qstr, which should fit in 16 bits + uint8_t type; + uint8_t a0 : 4; + uint8_t a1 : 4; +} opcode_table_3arg_t; + +STATIC const opcode_table_3arg_t opcode_table_3arg[] = { + // arithmetic opcodes: reg, reg, reg + {MP_QSTR_and_, RRR, 0, 1}, + {MP_QSTR_or_, RRR, 0, 2}, + {MP_QSTR_xor, RRR, 0, 3}, + {MP_QSTR_add, RRR, 0, 8}, + {MP_QSTR_sub, RRR, 0, 12}, + {MP_QSTR_mull, RRR, 2, 8}, + + // load/store/addi opcodes: reg, reg, imm + // upper nibble of type encodes the range of the immediate arg + {MP_QSTR_l8ui, RRI8 | 0x10, 2, 0}, + {MP_QSTR_l16ui, RRI8 | 0x30, 2, 1}, + {MP_QSTR_l32i, RRI8 | 0x50, 2, 2}, + {MP_QSTR_s8i, RRI8 | 0x10, 2, 4}, + {MP_QSTR_s16i, RRI8 | 0x30, 2, 5}, + {MP_QSTR_s32i, RRI8 | 0x50, 2, 6}, + {MP_QSTR_l16si, RRI8 | 0x30, 2, 9}, + {MP_QSTR_addi, RRI8 | 0x00, 2, 12}, + + // branch opcodes: reg, reg, label + {MP_QSTR_ball, RRI8_B, ASM_XTENSA_CC_ALL, 0}, + {MP_QSTR_bany, RRI8_B, ASM_XTENSA_CC_ANY, 0}, + {MP_QSTR_bbc, RRI8_B, ASM_XTENSA_CC_BC, 0}, + {MP_QSTR_bbs, RRI8_B, ASM_XTENSA_CC_BS, 0}, + {MP_QSTR_beq, RRI8_B, ASM_XTENSA_CC_EQ, 0}, + {MP_QSTR_bge, RRI8_B, ASM_XTENSA_CC_GE, 0}, + {MP_QSTR_bgeu, RRI8_B, ASM_XTENSA_CC_GEU, 0}, + {MP_QSTR_blt, RRI8_B, ASM_XTENSA_CC_LT, 0}, + {MP_QSTR_bnall, RRI8_B, ASM_XTENSA_CC_NALL, 0}, + {MP_QSTR_bne, RRI8_B, ASM_XTENSA_CC_NE, 0}, + {MP_QSTR_bnone, RRI8_B, ASM_XTENSA_CC_NONE, 0}, +}; + +STATIC void emit_inline_xtensa_op(emit_inline_asm_t *emit, qstr op, mp_uint_t n_args, mp_parse_node_t *pn_args) { + size_t op_len; + const char *op_str = (const char*)qstr_data(op, &op_len); + + if (n_args == 0) { + if (op == MP_QSTR_ret_n) { + asm_xtensa_op_ret_n(&emit->as); + } else { + goto unknown_op; + } + + } else if (n_args == 1) { + if (op == MP_QSTR_callx0) { + uint r0 = get_arg_reg(emit, op_str, pn_args[0]); + asm_xtensa_op_callx0(&emit->as, r0); + } else if (op == MP_QSTR_j) { + int label = get_arg_label(emit, op_str, pn_args[0]); + asm_xtensa_j_label(&emit->as, label); + } else if (op == MP_QSTR_jx) { + uint r0 = get_arg_reg(emit, op_str, pn_args[0]); + asm_xtensa_op_jx(&emit->as, r0); + } else { + goto unknown_op; + } + + } else if (n_args == 2) { + uint r0 = get_arg_reg(emit, op_str, pn_args[0]); + if (op == MP_QSTR_beqz) { + int label = get_arg_label(emit, op_str, pn_args[1]); + asm_xtensa_bccz_reg_label(&emit->as, ASM_XTENSA_CCZ_EQ, r0, label); + } else if (op == MP_QSTR_bnez) { + int label = get_arg_label(emit, op_str, pn_args[1]); + asm_xtensa_bccz_reg_label(&emit->as, ASM_XTENSA_CCZ_NE, r0, label); + } else if (op == MP_QSTR_mov || op == MP_QSTR_mov_n) { + // we emit mov.n for both "mov" and "mov_n" opcodes + uint r1 = get_arg_reg(emit, op_str, pn_args[1]); + asm_xtensa_op_mov_n(&emit->as, r0, r1); + } else if (op == MP_QSTR_movi) { + // for convenience we emit l32r if the integer doesn't fit in movi + uint32_t imm = get_arg_i(emit, op_str, pn_args[1], 0, 0); + asm_xtensa_mov_reg_i32(&emit->as, r0, imm); + } else { + goto unknown_op; + } + + } else if (n_args == 3) { + // search table for 3 arg instructions + for (uint i = 0; i < MP_ARRAY_SIZE(opcode_table_3arg); i++) { + const opcode_table_3arg_t *o = &opcode_table_3arg[i]; + if (op == o->name) { + uint r0 = get_arg_reg(emit, op_str, pn_args[0]); + uint r1 = get_arg_reg(emit, op_str, pn_args[1]); + if (o->type == RRR) { + uint r2 = get_arg_reg(emit, op_str, pn_args[2]); + asm_xtensa_op24(&emit->as, ASM_XTENSA_ENCODE_RRR(0, o->a0, o->a1, r0, r1, r2)); + } else if (o->type == RRI8_B) { + int label = get_arg_label(emit, op_str, pn_args[2]); + asm_xtensa_bcc_reg_reg_label(&emit->as, o->a0, r0, r1, label); + } else { + int shift, min, max; + if ((o->type & 0xf0) == 0) { + shift = 0; + min = -128; + max = 127; + } else { + shift = (o->type & 0xf0) >> 5; + min = 0; + max = 0xff << shift; + } + uint32_t imm = get_arg_i(emit, op_str, pn_args[2], min, max); + asm_xtensa_op24(&emit->as, ASM_XTENSA_ENCODE_RRI8(o->a0, o->a1, r1, r0, (imm >> shift) & 0xff)); + } + return; + } + } + goto unknown_op; + + } else { + goto unknown_op; + } + + return; + +unknown_op: + emit_inline_xtensa_error_exc(emit, mp_obj_new_exception_msg_varg(&mp_type_SyntaxError, "unsupported Xtensa instruction '%s' with %d arguments", op_str, n_args)); + return; + + /* +branch_not_in_range: + emit_inline_xtensa_error_msg(emit, "branch not in range"); + return; + */ +} + +const emit_inline_asm_method_table_t emit_inline_xtensa_method_table = { + emit_inline_xtensa_start_pass, + emit_inline_xtensa_end_pass, + emit_inline_xtensa_count_params, + emit_inline_xtensa_label, + emit_inline_xtensa_op, +}; + +#endif // MICROPY_EMIT_INLINE_XTENSA diff --git a/py/emitnative.c b/py/emitnative.c index b54f263d60..2e18d26b4a 100644 --- a/py/emitnative.c +++ b/py/emitnative.c @@ -61,111 +61,22 @@ #if (MICROPY_EMIT_X64 && N_X64) \ || (MICROPY_EMIT_X86 && N_X86) \ || (MICROPY_EMIT_THUMB && N_THUMB) \ - || (MICROPY_EMIT_ARM && N_ARM) + || (MICROPY_EMIT_ARM && N_ARM) \ + || (MICROPY_EMIT_XTENSA && N_XTENSA) \ + +// this is defined so that the assembler exports generic assembler API macros +#define GENERIC_ASM_API (1) #if N_X64 // x64 specific stuff - #include "py/asmx64.h" - #define EXPORT_FUN(name) emit_native_x64_##name -#define ASM_WORD_SIZE (8) - -#define REG_RET ASM_X64_REG_RAX -#define REG_ARG_1 ASM_X64_REG_RDI -#define REG_ARG_2 ASM_X64_REG_RSI -#define REG_ARG_3 ASM_X64_REG_RDX -#define REG_ARG_4 ASM_X64_REG_RCX -#define REG_ARG_5 ASM_X64_REG_R08 - -// caller-save -#define REG_TEMP0 ASM_X64_REG_RAX -#define REG_TEMP1 ASM_X64_REG_RDI -#define REG_TEMP2 ASM_X64_REG_RSI - -// callee-save -#define REG_LOCAL_1 ASM_X64_REG_RBX -#define REG_LOCAL_2 ASM_X64_REG_R12 -#define REG_LOCAL_3 ASM_X64_REG_R13 -#define REG_LOCAL_NUM (3) - -#define ASM_PASS_COMPUTE ASM_X64_PASS_COMPUTE -#define ASM_PASS_EMIT ASM_X64_PASS_EMIT - -#define ASM_T asm_x64_t -#define ASM_NEW asm_x64_new -#define ASM_FREE asm_x64_free -#define ASM_GET_CODE asm_x64_get_code -#define ASM_GET_CODE_POS asm_x64_get_code_pos -#define ASM_GET_CODE_SIZE asm_x64_get_code_size -#define ASM_START_PASS asm_x64_start_pass -#define ASM_END_PASS asm_x64_end_pass -#define ASM_ENTRY asm_x64_entry -#define ASM_EXIT asm_x64_exit - -#define ASM_ALIGN asm_x64_align -#define ASM_DATA asm_x64_data - -#define ASM_LABEL_ASSIGN asm_x64_label_assign -#define ASM_JUMP asm_x64_jmp_label -#define ASM_JUMP_IF_REG_ZERO(as, reg, label) \ - do { \ - asm_x64_test_r8_with_r8(as, reg, reg); \ - asm_x64_jcc_label(as, ASM_X64_CC_JZ, label); \ - } while (0) -#define ASM_JUMP_IF_REG_NONZERO(as, reg, label) \ - do { \ - asm_x64_test_r8_with_r8(as, reg, reg); \ - asm_x64_jcc_label(as, ASM_X64_CC_JNZ, label); \ - } while (0) -#define ASM_JUMP_IF_REG_EQ(as, reg1, reg2, label) \ - do { \ - asm_x64_cmp_r64_with_r64(as, reg1, reg2); \ - asm_x64_jcc_label(as, ASM_X64_CC_JE, label); \ - } while (0) -#define ASM_CALL_IND(as, ptr, idx) asm_x64_call_ind(as, ptr, ASM_X64_REG_RAX) - -#define ASM_MOV_REG_TO_LOCAL asm_x64_mov_r64_to_local -#define ASM_MOV_IMM_TO_REG asm_x64_mov_i64_to_r64_optimised -#define ASM_MOV_ALIGNED_IMM_TO_REG asm_x64_mov_i64_to_r64_aligned -#define ASM_MOV_IMM_TO_LOCAL_USING(as, imm, local_num, reg_temp) \ - do { \ - asm_x64_mov_i64_to_r64_optimised(as, (imm), (reg_temp)); \ - asm_x64_mov_r64_to_local(as, (reg_temp), (local_num)); \ - } while (false) -#define ASM_MOV_LOCAL_TO_REG asm_x64_mov_local_to_r64 -#define ASM_MOV_REG_REG(as, reg_dest, reg_src) asm_x64_mov_r64_r64((as), (reg_dest), (reg_src)) -#define ASM_MOV_LOCAL_ADDR_TO_REG asm_x64_mov_local_addr_to_r64 - -#define ASM_LSL_REG(as, reg) asm_x64_shl_r64_cl((as), (reg)) -#define ASM_ASR_REG(as, reg) asm_x64_sar_r64_cl((as), (reg)) -#define ASM_OR_REG_REG(as, reg_dest, reg_src) asm_x64_or_r64_r64((as), (reg_dest), (reg_src)) -#define ASM_XOR_REG_REG(as, reg_dest, reg_src) asm_x64_xor_r64_r64((as), (reg_dest), (reg_src)) -#define ASM_AND_REG_REG(as, reg_dest, reg_src) asm_x64_and_r64_r64((as), (reg_dest), (reg_src)) -#define ASM_ADD_REG_REG(as, reg_dest, reg_src) asm_x64_add_r64_r64((as), (reg_dest), (reg_src)) -#define ASM_SUB_REG_REG(as, reg_dest, reg_src) asm_x64_sub_r64_r64((as), (reg_dest), (reg_src)) -#define ASM_MUL_REG_REG(as, reg_dest, reg_src) asm_x64_mul_r64_r64((as), (reg_dest), (reg_src)) - -#define ASM_LOAD_REG_REG(as, reg_dest, reg_base) asm_x64_mov_mem64_to_r64((as), (reg_base), 0, (reg_dest)) -#define ASM_LOAD_REG_REG_OFFSET(as, reg_dest, reg_base, word_offset) asm_x64_mov_mem64_to_r64((as), (reg_base), 8 * (word_offset), (reg_dest)) -#define ASM_LOAD8_REG_REG(as, reg_dest, reg_base) asm_x64_mov_mem8_to_r64zx((as), (reg_base), 0, (reg_dest)) -#define ASM_LOAD16_REG_REG(as, reg_dest, reg_base) asm_x64_mov_mem16_to_r64zx((as), (reg_base), 0, (reg_dest)) -#define ASM_LOAD32_REG_REG(as, reg_dest, reg_base) asm_x64_mov_mem32_to_r64zx((as), (reg_base), 0, (reg_dest)) - -#define ASM_STORE_REG_REG(as, reg_src, reg_base) asm_x64_mov_r64_to_mem64((as), (reg_src), (reg_base), 0) -#define ASM_STORE_REG_REG_OFFSET(as, reg_src, reg_base, word_offset) asm_x64_mov_r64_to_mem64((as), (reg_src), (reg_base), 8 * (word_offset)) -#define ASM_STORE8_REG_REG(as, reg_src, reg_base) asm_x64_mov_r8_to_mem8((as), (reg_src), (reg_base), 0) -#define ASM_STORE16_REG_REG(as, reg_src, reg_base) asm_x64_mov_r16_to_mem16((as), (reg_src), (reg_base), 0) -#define ASM_STORE32_REG_REG(as, reg_src, reg_base) asm_x64_mov_r32_to_mem32((as), (reg_src), (reg_base), 0) - #elif N_X86 // x86 specific stuff -#include "py/asmx86.h" - STATIC byte mp_f_n_args[MP_F_NUMBER_OF] = { [MP_F_CONVERT_OBJ_TO_NATIVE] = 2, [MP_F_CONVERT_NATIVE_TO_OBJ] = 2, @@ -214,285 +125,26 @@ STATIC byte mp_f_n_args[MP_F_NUMBER_OF] = { [MP_F_SETUP_CODE_STATE] = 5, }; +#include "py/asmx86.h" #define EXPORT_FUN(name) emit_native_x86_##name -#define ASM_WORD_SIZE (4) - -#define REG_RET ASM_X86_REG_EAX -#define REG_ARG_1 ASM_X86_REG_ARG_1 -#define REG_ARG_2 ASM_X86_REG_ARG_2 -#define REG_ARG_3 ASM_X86_REG_ARG_3 -#define REG_ARG_4 ASM_X86_REG_ARG_4 -#define REG_ARG_5 ASM_X86_REG_ARG_5 - -// caller-save, so can be used as temporaries -#define REG_TEMP0 ASM_X86_REG_EAX -#define REG_TEMP1 ASM_X86_REG_ECX -#define REG_TEMP2 ASM_X86_REG_EDX - -// callee-save, so can be used as locals -#define REG_LOCAL_1 ASM_X86_REG_EBX -#define REG_LOCAL_2 ASM_X86_REG_ESI -#define REG_LOCAL_3 ASM_X86_REG_EDI -#define REG_LOCAL_NUM (3) - -#define ASM_PASS_COMPUTE ASM_X86_PASS_COMPUTE -#define ASM_PASS_EMIT ASM_X86_PASS_EMIT - -#define ASM_T asm_x86_t -#define ASM_NEW asm_x86_new -#define ASM_FREE asm_x86_free -#define ASM_GET_CODE asm_x86_get_code -#define ASM_GET_CODE_POS asm_x86_get_code_pos -#define ASM_GET_CODE_SIZE asm_x86_get_code_size -#define ASM_START_PASS asm_x86_start_pass -#define ASM_END_PASS asm_x86_end_pass -#define ASM_ENTRY asm_x86_entry -#define ASM_EXIT asm_x86_exit - -#define ASM_ALIGN asm_x86_align -#define ASM_DATA asm_x86_data - -#define ASM_LABEL_ASSIGN asm_x86_label_assign -#define ASM_JUMP asm_x86_jmp_label -#define ASM_JUMP_IF_REG_ZERO(as, reg, label) \ - do { \ - asm_x86_test_r8_with_r8(as, reg, reg); \ - asm_x86_jcc_label(as, ASM_X86_CC_JZ, label); \ - } while (0) -#define ASM_JUMP_IF_REG_NONZERO(as, reg, label) \ - do { \ - asm_x86_test_r8_with_r8(as, reg, reg); \ - asm_x86_jcc_label(as, ASM_X86_CC_JNZ, label); \ - } while (0) -#define ASM_JUMP_IF_REG_EQ(as, reg1, reg2, label) \ - do { \ - asm_x86_cmp_r32_with_r32(as, reg1, reg2); \ - asm_x86_jcc_label(as, ASM_X86_CC_JE, label); \ - } while (0) -#define ASM_CALL_IND(as, ptr, idx) asm_x86_call_ind(as, ptr, mp_f_n_args[idx], ASM_X86_REG_EAX) - -#define ASM_MOV_REG_TO_LOCAL asm_x86_mov_r32_to_local -#define ASM_MOV_IMM_TO_REG asm_x86_mov_i32_to_r32 -#define ASM_MOV_ALIGNED_IMM_TO_REG asm_x86_mov_i32_to_r32_aligned -#define ASM_MOV_IMM_TO_LOCAL_USING(as, imm, local_num, reg_temp) \ - do { \ - asm_x86_mov_i32_to_r32(as, (imm), (reg_temp)); \ - asm_x86_mov_r32_to_local(as, (reg_temp), (local_num)); \ - } while (false) -#define ASM_MOV_LOCAL_TO_REG asm_x86_mov_local_to_r32 -#define ASM_MOV_REG_REG(as, reg_dest, reg_src) asm_x86_mov_r32_r32((as), (reg_dest), (reg_src)) -#define ASM_MOV_LOCAL_ADDR_TO_REG asm_x86_mov_local_addr_to_r32 - -#define ASM_LSL_REG(as, reg) asm_x86_shl_r32_cl((as), (reg)) -#define ASM_ASR_REG(as, reg) asm_x86_sar_r32_cl((as), (reg)) -#define ASM_OR_REG_REG(as, reg_dest, reg_src) asm_x86_or_r32_r32((as), (reg_dest), (reg_src)) -#define ASM_XOR_REG_REG(as, reg_dest, reg_src) asm_x86_xor_r32_r32((as), (reg_dest), (reg_src)) -#define ASM_AND_REG_REG(as, reg_dest, reg_src) asm_x86_and_r32_r32((as), (reg_dest), (reg_src)) -#define ASM_ADD_REG_REG(as, reg_dest, reg_src) asm_x86_add_r32_r32((as), (reg_dest), (reg_src)) -#define ASM_SUB_REG_REG(as, reg_dest, reg_src) asm_x86_sub_r32_r32((as), (reg_dest), (reg_src)) -#define ASM_MUL_REG_REG(as, reg_dest, reg_src) asm_x86_mul_r32_r32((as), (reg_dest), (reg_src)) - -#define ASM_LOAD_REG_REG(as, reg_dest, reg_base) asm_x86_mov_mem32_to_r32((as), (reg_base), 0, (reg_dest)) -#define ASM_LOAD_REG_REG_OFFSET(as, reg_dest, reg_base, word_offset) asm_x86_mov_mem32_to_r32((as), (reg_base), 4 * (word_offset), (reg_dest)) -#define ASM_LOAD8_REG_REG(as, reg_dest, reg_base) asm_x86_mov_mem8_to_r32zx((as), (reg_base), 0, (reg_dest)) -#define ASM_LOAD16_REG_REG(as, reg_dest, reg_base) asm_x86_mov_mem16_to_r32zx((as), (reg_base), 0, (reg_dest)) -#define ASM_LOAD32_REG_REG(as, reg_dest, reg_base) asm_x86_mov_mem32_to_r32((as), (reg_base), 0, (reg_dest)) - -#define ASM_STORE_REG_REG(as, reg_src, reg_base) asm_x86_mov_r32_to_mem32((as), (reg_src), (reg_base), 0) -#define ASM_STORE_REG_REG_OFFSET(as, reg_src, reg_base, word_offset) asm_x86_mov_r32_to_mem32((as), (reg_src), (reg_base), 4 * (word_offset)) -#define ASM_STORE8_REG_REG(as, reg_src, reg_base) asm_x86_mov_r8_to_mem8((as), (reg_src), (reg_base), 0) -#define ASM_STORE16_REG_REG(as, reg_src, reg_base) asm_x86_mov_r16_to_mem16((as), (reg_src), (reg_base), 0) -#define ASM_STORE32_REG_REG(as, reg_src, reg_base) asm_x86_mov_r32_to_mem32((as), (reg_src), (reg_base), 0) - #elif N_THUMB // thumb specific stuff - #include "py/asmthumb.h" - #define EXPORT_FUN(name) emit_native_thumb_##name -#define ASM_WORD_SIZE (4) - -#define REG_RET ASM_THUMB_REG_R0 -#define REG_ARG_1 ASM_THUMB_REG_R0 -#define REG_ARG_2 ASM_THUMB_REG_R1 -#define REG_ARG_3 ASM_THUMB_REG_R2 -#define REG_ARG_4 ASM_THUMB_REG_R3 -// rest of args go on stack - -#define REG_TEMP0 ASM_THUMB_REG_R0 -#define REG_TEMP1 ASM_THUMB_REG_R1 -#define REG_TEMP2 ASM_THUMB_REG_R2 - -#define REG_LOCAL_1 ASM_THUMB_REG_R4 -#define REG_LOCAL_2 ASM_THUMB_REG_R5 -#define REG_LOCAL_3 ASM_THUMB_REG_R6 -#define REG_LOCAL_NUM (3) - -#define ASM_PASS_COMPUTE ASM_THUMB_PASS_COMPUTE -#define ASM_PASS_EMIT ASM_THUMB_PASS_EMIT - -#define ASM_T asm_thumb_t -#define ASM_NEW asm_thumb_new -#define ASM_FREE asm_thumb_free -#define ASM_GET_CODE asm_thumb_get_code -#define ASM_GET_CODE_POS asm_thumb_get_code_pos -#define ASM_GET_CODE_SIZE asm_thumb_get_code_size -#define ASM_START_PASS asm_thumb_start_pass -#define ASM_END_PASS asm_thumb_end_pass -#define ASM_ENTRY asm_thumb_entry -#define ASM_EXIT asm_thumb_exit - -#define ASM_ALIGN asm_thumb_align -#define ASM_DATA asm_thumb_data - -#define ASM_LABEL_ASSIGN asm_thumb_label_assign -#define ASM_JUMP asm_thumb_b_label -#define ASM_JUMP_IF_REG_ZERO(as, reg, label) \ - do { \ - asm_thumb_cmp_rlo_i8(as, reg, 0); \ - asm_thumb_bcc_label(as, ASM_THUMB_CC_EQ, label); \ - } while (0) -#define ASM_JUMP_IF_REG_NONZERO(as, reg, label) \ - do { \ - asm_thumb_cmp_rlo_i8(as, reg, 0); \ - asm_thumb_bcc_label(as, ASM_THUMB_CC_NE, label); \ - } while (0) -#define ASM_JUMP_IF_REG_EQ(as, reg1, reg2, label) \ - do { \ - asm_thumb_cmp_rlo_rlo(as, reg1, reg2); \ - asm_thumb_bcc_label(as, ASM_THUMB_CC_EQ, label); \ - } while (0) -#define ASM_CALL_IND(as, ptr, idx) asm_thumb_bl_ind(as, ptr, idx, ASM_THUMB_REG_R3) - -#define ASM_MOV_REG_TO_LOCAL(as, reg, local_num) asm_thumb_mov_local_reg(as, (local_num), (reg)) -#define ASM_MOV_IMM_TO_REG(as, imm, reg) asm_thumb_mov_reg_i32_optimised(as, (reg), (imm)) -#define ASM_MOV_ALIGNED_IMM_TO_REG(as, imm, reg) asm_thumb_mov_reg_i32_aligned(as, (reg), (imm)) -#define ASM_MOV_IMM_TO_LOCAL_USING(as, imm, local_num, reg_temp) \ - do { \ - asm_thumb_mov_reg_i32_optimised(as, (reg_temp), (imm)); \ - asm_thumb_mov_local_reg(as, (local_num), (reg_temp)); \ - } while (false) -#define ASM_MOV_LOCAL_TO_REG(as, local_num, reg) asm_thumb_mov_reg_local(as, (reg), (local_num)) -#define ASM_MOV_REG_REG(as, reg_dest, reg_src) asm_thumb_mov_reg_reg((as), (reg_dest), (reg_src)) -#define ASM_MOV_LOCAL_ADDR_TO_REG(as, local_num, reg) asm_thumb_mov_reg_local_addr(as, (reg), (local_num)) - -#define ASM_LSL_REG_REG(as, reg_dest, reg_shift) asm_thumb_format_4((as), ASM_THUMB_FORMAT_4_LSL, (reg_dest), (reg_shift)) -#define ASM_ASR_REG_REG(as, reg_dest, reg_shift) asm_thumb_format_4((as), ASM_THUMB_FORMAT_4_ASR, (reg_dest), (reg_shift)) -#define ASM_OR_REG_REG(as, reg_dest, reg_src) asm_thumb_format_4((as), ASM_THUMB_FORMAT_4_ORR, (reg_dest), (reg_src)) -#define ASM_XOR_REG_REG(as, reg_dest, reg_src) asm_thumb_format_4((as), ASM_THUMB_FORMAT_4_EOR, (reg_dest), (reg_src)) -#define ASM_AND_REG_REG(as, reg_dest, reg_src) asm_thumb_format_4((as), ASM_THUMB_FORMAT_4_AND, (reg_dest), (reg_src)) -#define ASM_ADD_REG_REG(as, reg_dest, reg_src) asm_thumb_add_rlo_rlo_rlo((as), (reg_dest), (reg_dest), (reg_src)) -#define ASM_SUB_REG_REG(as, reg_dest, reg_src) asm_thumb_sub_rlo_rlo_rlo((as), (reg_dest), (reg_dest), (reg_src)) -#define ASM_MUL_REG_REG(as, reg_dest, reg_src) asm_thumb_format_4((as), ASM_THUMB_FORMAT_4_MUL, (reg_dest), (reg_src)) - -#define ASM_LOAD_REG_REG(as, reg_dest, reg_base) asm_thumb_ldr_rlo_rlo_i5((as), (reg_dest), (reg_base), 0) -#define ASM_LOAD_REG_REG_OFFSET(as, reg_dest, reg_base, word_offset) asm_thumb_ldr_rlo_rlo_i5((as), (reg_dest), (reg_base), (word_offset)) -#define ASM_LOAD8_REG_REG(as, reg_dest, reg_base) asm_thumb_ldrb_rlo_rlo_i5((as), (reg_dest), (reg_base), 0) -#define ASM_LOAD16_REG_REG(as, reg_dest, reg_base) asm_thumb_ldrh_rlo_rlo_i5((as), (reg_dest), (reg_base), 0) -#define ASM_LOAD32_REG_REG(as, reg_dest, reg_base) asm_thumb_ldr_rlo_rlo_i5((as), (reg_dest), (reg_base), 0) - -#define ASM_STORE_REG_REG(as, reg_src, reg_base) asm_thumb_str_rlo_rlo_i5((as), (reg_src), (reg_base), 0) -#define ASM_STORE_REG_REG_OFFSET(as, reg_src, reg_base, word_offset) asm_thumb_str_rlo_rlo_i5((as), (reg_src), (reg_base), (word_offset)) -#define ASM_STORE8_REG_REG(as, reg_src, reg_base) asm_thumb_strb_rlo_rlo_i5((as), (reg_src), (reg_base), 0) -#define ASM_STORE16_REG_REG(as, reg_src, reg_base) asm_thumb_strh_rlo_rlo_i5((as), (reg_src), (reg_base), 0) -#define ASM_STORE32_REG_REG(as, reg_src, reg_base) asm_thumb_str_rlo_rlo_i5((as), (reg_src), (reg_base), 0) - #elif N_ARM // ARM specific stuff - #include "py/asmarm.h" - -#define ASM_WORD_SIZE (4) - #define EXPORT_FUN(name) emit_native_arm_##name -#define REG_RET ASM_ARM_REG_R0 -#define REG_ARG_1 ASM_ARM_REG_R0 -#define REG_ARG_2 ASM_ARM_REG_R1 -#define REG_ARG_3 ASM_ARM_REG_R2 -#define REG_ARG_4 ASM_ARM_REG_R3 - -#define REG_TEMP0 ASM_ARM_REG_R0 -#define REG_TEMP1 ASM_ARM_REG_R1 -#define REG_TEMP2 ASM_ARM_REG_R2 - -#define REG_LOCAL_1 ASM_ARM_REG_R4 -#define REG_LOCAL_2 ASM_ARM_REG_R5 -#define REG_LOCAL_3 ASM_ARM_REG_R6 -#define REG_LOCAL_NUM (3) - -#define ASM_PASS_COMPUTE ASM_ARM_PASS_COMPUTE -#define ASM_PASS_EMIT ASM_ARM_PASS_EMIT - -#define ASM_T asm_arm_t -#define ASM_NEW asm_arm_new -#define ASM_FREE asm_arm_free -#define ASM_GET_CODE asm_arm_get_code -#define ASM_GET_CODE_POS asm_arm_get_code_pos -#define ASM_GET_CODE_SIZE asm_arm_get_code_size -#define ASM_START_PASS asm_arm_start_pass -#define ASM_END_PASS asm_arm_end_pass -#define ASM_ENTRY asm_arm_entry -#define ASM_EXIT asm_arm_exit - -#define ASM_ALIGN asm_arm_align -#define ASM_DATA asm_arm_data - -#define ASM_LABEL_ASSIGN asm_arm_label_assign -#define ASM_JUMP asm_arm_b_label -#define ASM_JUMP_IF_REG_ZERO(as, reg, label) \ - do { \ - asm_arm_cmp_reg_i8(as, reg, 0); \ - asm_arm_bcc_label(as, ASM_ARM_CC_EQ, label); \ - } while (0) -#define ASM_JUMP_IF_REG_NONZERO(as, reg, label) \ - do { \ - asm_arm_cmp_reg_i8(as, reg, 0); \ - asm_arm_bcc_label(as, ASM_ARM_CC_NE, label); \ - } while (0) -#define ASM_JUMP_IF_REG_EQ(as, reg1, reg2, label) \ - do { \ - asm_arm_cmp_reg_reg(as, reg1, reg2); \ - asm_arm_bcc_label(as, ASM_ARM_CC_EQ, label); \ - } while (0) -#define ASM_CALL_IND(as, ptr, idx) asm_arm_bl_ind(as, ptr, idx, ASM_ARM_REG_R3) - -#define ASM_MOV_REG_TO_LOCAL(as, reg, local_num) asm_arm_mov_local_reg(as, (local_num), (reg)) -#define ASM_MOV_IMM_TO_REG(as, imm, reg) asm_arm_mov_reg_i32(as, (reg), (imm)) -#define ASM_MOV_ALIGNED_IMM_TO_REG(as, imm, reg) asm_arm_mov_reg_i32(as, (reg), (imm)) -#define ASM_MOV_IMM_TO_LOCAL_USING(as, imm, local_num, reg_temp) \ - do { \ - asm_arm_mov_reg_i32(as, (reg_temp), (imm)); \ - asm_arm_mov_local_reg(as, (local_num), (reg_temp)); \ - } while (false) -#define ASM_MOV_LOCAL_TO_REG(as, local_num, reg) asm_arm_mov_reg_local(as, (reg), (local_num)) -#define ASM_MOV_REG_REG(as, reg_dest, reg_src) asm_arm_mov_reg_reg((as), (reg_dest), (reg_src)) -#define ASM_MOV_LOCAL_ADDR_TO_REG(as, local_num, reg) asm_arm_mov_reg_local_addr(as, (reg), (local_num)) - -#define ASM_LSL_REG_REG(as, reg_dest, reg_shift) asm_arm_lsl_reg_reg((as), (reg_dest), (reg_shift)) -#define ASM_ASR_REG_REG(as, reg_dest, reg_shift) asm_arm_asr_reg_reg((as), (reg_dest), (reg_shift)) -#define ASM_OR_REG_REG(as, reg_dest, reg_src) asm_arm_orr_reg_reg_reg((as), (reg_dest), (reg_dest), (reg_src)) -#define ASM_XOR_REG_REG(as, reg_dest, reg_src) asm_arm_eor_reg_reg_reg((as), (reg_dest), (reg_dest), (reg_src)) -#define ASM_AND_REG_REG(as, reg_dest, reg_src) asm_arm_and_reg_reg_reg((as), (reg_dest), (reg_dest), (reg_src)) -#define ASM_ADD_REG_REG(as, reg_dest, reg_src) asm_arm_add_reg_reg_reg((as), (reg_dest), (reg_dest), (reg_src)) -#define ASM_SUB_REG_REG(as, reg_dest, reg_src) asm_arm_sub_reg_reg_reg((as), (reg_dest), (reg_dest), (reg_src)) -#define ASM_MUL_REG_REG(as, reg_dest, reg_src) asm_arm_mul_reg_reg_reg((as), (reg_dest), (reg_dest), (reg_src)) - -#define ASM_LOAD_REG_REG(as, reg_dest, reg_base) asm_arm_ldr_reg_reg((as), (reg_dest), (reg_base), 0) -#define ASM_LOAD_REG_REG_OFFSET(as, reg_dest, reg_base, word_offset) asm_arm_ldr_reg_reg((as), (reg_dest), (reg_base), 4 * (word_offset)) -#define ASM_LOAD8_REG_REG(as, reg_dest, reg_base) asm_arm_ldrb_reg_reg((as), (reg_dest), (reg_base)) -#define ASM_LOAD16_REG_REG(as, reg_dest, reg_base) asm_arm_ldrh_reg_reg((as), (reg_dest), (reg_base)) -#define ASM_LOAD32_REG_REG(as, reg_dest, reg_base) asm_arm_ldr_reg_reg((as), (reg_dest), (reg_base), 0) - -#define ASM_STORE_REG_REG(as, reg_value, reg_base) asm_arm_str_reg_reg((as), (reg_value), (reg_base), 0) -#define ASM_STORE_REG_REG_OFFSET(as, reg_dest, reg_base, word_offset) asm_arm_str_reg_reg((as), (reg_dest), (reg_base), 4 * (word_offset)) -#define ASM_STORE8_REG_REG(as, reg_value, reg_base) asm_arm_strb_reg_reg((as), (reg_value), (reg_base)) -#define ASM_STORE16_REG_REG(as, reg_value, reg_base) asm_arm_strh_reg_reg((as), (reg_value), (reg_base)) -#define ASM_STORE32_REG_REG(as, reg_value, reg_base) asm_arm_str_reg_reg((as), (reg_value), (reg_base), 0) +#elif N_XTENSA + +// Xtensa specific stuff +#include "py/asmxtensa.h" +#define EXPORT_FUN(name) emit_native_xtensa_##name #else @@ -582,12 +234,14 @@ struct _emit_t { emit_t *EXPORT_FUN(new)(mp_obj_t *error_slot, mp_uint_t max_num_labels) { emit_t *emit = m_new0(emit_t, 1); emit->error_slot = error_slot; - emit->as = ASM_NEW(max_num_labels); + emit->as = m_new0(ASM_T, 1); + mp_asm_base_init(&emit->as->base, max_num_labels); return emit; } void EXPORT_FUN(free)(emit_t *emit) { - ASM_FREE(emit->as, false); + mp_asm_base_deinit(&emit->as->base, false); + m_del_obj(ASM_T, emit->as); m_del(vtype_kind_t, emit->local_vtype, emit->local_vtype_alloc); m_del(stack_info_t, emit->stack_info, emit->stack_info_alloc); m_del_obj(emit_t, emit); @@ -679,7 +333,7 @@ STATIC void emit_native_start_pass(emit_t *emit, pass_kind_t pass, scope_t *scop emit->stack_info[i].vtype = VTYPE_UNBOUND; } - ASM_START_PASS(emit->as, pass == MP_PASS_EMIT ? ASM_PASS_EMIT : ASM_PASS_COMPUTE); + mp_asm_base_start_pass(&emit->as->base, pass == MP_PASS_EMIT ? MP_ASM_PASS_EMIT : MP_ASM_PASS_COMPUTE); // generate code for entry to function @@ -824,21 +478,21 @@ STATIC void emit_native_end_pass(emit_t *emit) { } if (!emit->do_viper_types) { - emit->prelude_offset = ASM_GET_CODE_POS(emit->as); - ASM_DATA(emit->as, 1, emit->scope->scope_flags); - ASM_DATA(emit->as, 1, emit->scope->num_pos_args); - ASM_DATA(emit->as, 1, emit->scope->num_kwonly_args); - ASM_DATA(emit->as, 1, emit->scope->num_def_pos_args); + emit->prelude_offset = mp_asm_base_get_code_pos(&emit->as->base); + mp_asm_base_data(&emit->as->base, 1, emit->scope->scope_flags); + mp_asm_base_data(&emit->as->base, 1, emit->scope->num_pos_args); + mp_asm_base_data(&emit->as->base, 1, emit->scope->num_kwonly_args); + mp_asm_base_data(&emit->as->base, 1, emit->scope->num_def_pos_args); // write code info #if MICROPY_PERSISTENT_CODE - ASM_DATA(emit->as, 1, 5); - ASM_DATA(emit->as, 1, emit->scope->simple_name); - ASM_DATA(emit->as, 1, emit->scope->simple_name >> 8); - ASM_DATA(emit->as, 1, emit->scope->source_file); - ASM_DATA(emit->as, 1, emit->scope->source_file >> 8); + mp_asm_base_data(&emit->as->base, 1, 5); + mp_asm_base_data(&emit->as->base, 1, emit->scope->simple_name); + mp_asm_base_data(&emit->as->base, 1, emit->scope->simple_name >> 8); + mp_asm_base_data(&emit->as->base, 1, emit->scope->source_file); + mp_asm_base_data(&emit->as->base, 1, emit->scope->source_file >> 8); #else - ASM_DATA(emit->as, 1, 1); + mp_asm_base_data(&emit->as->base, 1, 1); #endif // bytecode prelude: initialise closed over variables @@ -846,13 +500,13 @@ STATIC void emit_native_end_pass(emit_t *emit) { id_info_t *id = &emit->scope->id_info[i]; if (id->kind == ID_INFO_KIND_CELL) { assert(id->local_num < 255); - ASM_DATA(emit->as, 1, id->local_num); // write the local which should be converted to a cell + mp_asm_base_data(&emit->as->base, 1, id->local_num); // write the local which should be converted to a cell } } - ASM_DATA(emit->as, 1, 255); // end of list sentinel + mp_asm_base_data(&emit->as->base, 1, 255); // end of list sentinel - ASM_ALIGN(emit->as, ASM_WORD_SIZE); - emit->const_table_offset = ASM_GET_CODE_POS(emit->as); + mp_asm_base_align(&emit->as->base, ASM_WORD_SIZE); + emit->const_table_offset = mp_asm_base_get_code_pos(&emit->as->base); // write argument names as qstr objects // see comment in corresponding part of emitbc.c about the logic here @@ -865,7 +519,7 @@ STATIC void emit_native_end_pass(emit_t *emit) { break; } } - ASM_DATA(emit->as, ASM_WORD_SIZE, (mp_uint_t)MP_OBJ_NEW_QSTR(qst)); + mp_asm_base_data(&emit->as->base, ASM_WORD_SIZE, (mp_uint_t)MP_OBJ_NEW_QSTR(qst)); } } @@ -878,8 +532,8 @@ STATIC void emit_native_end_pass(emit_t *emit) { } if (emit->pass == MP_PASS_EMIT) { - void *f = ASM_GET_CODE(emit->as); - mp_uint_t f_len = ASM_GET_CODE_SIZE(emit->as); + void *f = mp_asm_base_get_code(&emit->as->base); + mp_uint_t f_len = mp_asm_base_get_code_size(&emit->as->base); // compute type signature // note that the lower 4 bits of a vtype are tho correct MP_NATIVE_TYPE_xxx @@ -1255,17 +909,41 @@ STATIC void emit_native_label_assign(emit_t *emit, mp_uint_t l) { emit_native_pre(emit); // need to commit stack because we can jump here from elsewhere need_stack_settled(emit); - ASM_LABEL_ASSIGN(emit->as, l); + mp_asm_base_label_assign(&emit->as->base, l); emit_post(emit); } STATIC void emit_native_import_name(emit_t *emit, qstr qst) { DEBUG_printf("import_name %s\n", qstr_str(qst)); - vtype_kind_t vtype_fromlist; - vtype_kind_t vtype_level; - emit_pre_pop_reg_reg(emit, &vtype_fromlist, REG_ARG_2, &vtype_level, REG_ARG_3); // arg2 = fromlist, arg3 = level - assert(vtype_fromlist == VTYPE_PYOBJ); - assert(vtype_level == VTYPE_PYOBJ); + + // get arguments from stack: arg2 = fromlist, arg3 = level + // if using viper types these arguments must be converted to proper objects + if (emit->do_viper_types) { + // fromlist should be None or a tuple + stack_info_t *top = peek_stack(emit, 0); + if (top->vtype == VTYPE_PTR_NONE) { + emit_pre_pop_discard(emit); + ASM_MOV_IMM_TO_REG(emit->as, (mp_uint_t)mp_const_none, REG_ARG_2); + } else { + vtype_kind_t vtype_fromlist; + emit_pre_pop_reg(emit, &vtype_fromlist, REG_ARG_2); + assert(vtype_fromlist == VTYPE_PYOBJ); + } + + // level argument should be an immediate integer + top = peek_stack(emit, 0); + assert(top->vtype == VTYPE_INT && top->kind == STACK_IMM); + ASM_MOV_IMM_TO_REG(emit->as, (mp_uint_t)MP_OBJ_NEW_SMALL_INT(top->data.u_imm), REG_ARG_3); + emit_pre_pop_discard(emit); + + } else { + vtype_kind_t vtype_fromlist; + vtype_kind_t vtype_level; + emit_pre_pop_reg_reg(emit, &vtype_fromlist, REG_ARG_2, &vtype_level, REG_ARG_3); + assert(vtype_fromlist == VTYPE_PYOBJ); + assert(vtype_level == VTYPE_PYOBJ); + } + emit_call_with_imm_arg(emit, MP_F_IMPORT_NAME, qst, REG_ARG_1); // arg1 = import name emit_post_push_reg(emit, VTYPE_PYOBJ, REG_RET); } @@ -2294,6 +1972,21 @@ STATIC void emit_native_binary_op(emit_t *emit, mp_binary_op_t op) { ASM_ARM_CC_NE, }; asm_arm_setcc_reg(emit->as, REG_RET, ccs[op - MP_BINARY_OP_LESS]); + #elif N_XTENSA + static uint8_t ccs[6] = { + ASM_XTENSA_CC_LT, + 0x80 | ASM_XTENSA_CC_LT, // for GT we'll swap args + ASM_XTENSA_CC_EQ, + 0x80 | ASM_XTENSA_CC_GE, // for LE we'll swap args + ASM_XTENSA_CC_GE, + ASM_XTENSA_CC_NE, + }; + uint8_t cc = ccs[op - MP_BINARY_OP_LESS]; + if ((cc & 0x80) == 0) { + asm_xtensa_setcc_reg_reg_reg(emit->as, cc, REG_RET, REG_ARG_2, reg_rhs); + } else { + asm_xtensa_setcc_reg_reg_reg(emit->as, cc & ~0x80, REG_RET, reg_rhs, REG_ARG_2); + } #else #error not implemented #endif diff --git a/py/lexer.c b/py/lexer.c index 4a7c8f580a..458fba0900 100644 --- a/py/lexer.c +++ b/py/lexer.c @@ -28,6 +28,7 @@ #include <assert.h> #include "py/mpstate.h" +#include "py/reader.h" #include "py/lexer.h" #include "py/runtime.h" @@ -51,6 +52,7 @@ STATIC bool str_strn_equal(const char *str, const char *strn, mp_uint_t len) { return i == len && *str == 0; } +#define MP_LEXER_EOF ((unichar)MP_READER_EOF) #define CUR_CHAR(lex) ((lex)->chr0) STATIC bool is_end(mp_lexer_t *lex) { @@ -126,10 +128,6 @@ STATIC bool is_tail_of_identifier(mp_lexer_t *lex) { } STATIC void next_char(mp_lexer_t *lex) { - if (lex->chr0 == MP_LEXER_EOF) { - return; - } - if (lex->chr0 == '\n') { // a new line ++lex->line; @@ -144,7 +142,7 @@ STATIC void next_char(mp_lexer_t *lex) { lex->chr0 = lex->chr1; lex->chr1 = lex->chr2; - lex->chr2 = lex->stream_next_byte(lex->stream_data); + lex->chr2 = lex->reader.readbyte(lex->reader.data); if (lex->chr0 == '\r') { // CR is a new line, converted to LF @@ -152,7 +150,7 @@ STATIC void next_char(mp_lexer_t *lex) { if (lex->chr1 == '\n') { // CR LF is a single new line lex->chr1 = lex->chr2; - lex->chr2 = lex->stream_next_byte(lex->stream_data); + lex->chr2 = lex->reader.readbyte(lex->reader.data); } } @@ -428,8 +426,9 @@ STATIC void mp_lexer_next_token_into(mp_lexer_t *lex, bool first_token) { vstr_add_char(&lex->vstr, '\\'); } else { switch (c) { - case MP_LEXER_EOF: break; // TODO a proper error message? - case '\n': c = MP_LEXER_EOF; break; // TODO check this works correctly (we are supposed to ignore it + // note: "c" can never be MP_LEXER_EOF because next_char + // always inserts a newline at the end of the input stream + case '\n': c = MP_LEXER_EOF; break; // backslash escape the newline, just ignore it case '\\': break; case '\'': break; case '"': break; @@ -688,21 +687,17 @@ STATIC void mp_lexer_next_token_into(mp_lexer_t *lex, bool first_token) { } } -mp_lexer_t *mp_lexer_new(qstr src_name, void *stream_data, mp_lexer_stream_next_byte_t stream_next_byte, mp_lexer_stream_close_t stream_close) { +mp_lexer_t *mp_lexer_new(qstr src_name, mp_reader_t reader) { mp_lexer_t *lex = m_new_obj_maybe(mp_lexer_t); // check for memory allocation error if (lex == NULL) { - if (stream_close) { - stream_close(stream_data); - } + reader.close(reader.data); return NULL; } lex->source_name = src_name; - lex->stream_data = stream_data; - lex->stream_next_byte = stream_next_byte; - lex->stream_close = stream_close; + lex->reader = reader; lex->line = 1; lex->column = 1; lex->emit_dent = 0; @@ -723,9 +718,9 @@ mp_lexer_t *mp_lexer_new(qstr src_name, void *stream_data, mp_lexer_stream_next_ lex->indent_level[0] = 0; // preload characters - lex->chr0 = stream_next_byte(stream_data); - lex->chr1 = stream_next_byte(stream_data); - lex->chr2 = stream_next_byte(stream_data); + lex->chr0 = reader.readbyte(reader.data); + lex->chr1 = reader.readbyte(reader.data); + lex->chr2 = reader.readbyte(reader.data); // if input stream is 0, 1 or 2 characters long and doesn't end in a newline, then insert a newline at the end if (lex->chr0 == MP_LEXER_EOF) { @@ -750,11 +745,43 @@ mp_lexer_t *mp_lexer_new(qstr src_name, void *stream_data, mp_lexer_stream_next_ return lex; } +mp_lexer_t *mp_lexer_new_from_str_len(qstr src_name, const char *str, mp_uint_t len, mp_uint_t free_len) { + mp_reader_t reader; + if (!mp_reader_new_mem(&reader, (const byte*)str, len, free_len)) { + return NULL; + } + return mp_lexer_new(src_name, reader); +} + +#if MICROPY_READER_POSIX || MICROPY_READER_FATFS + +mp_lexer_t *mp_lexer_new_from_file(const char *filename) { + mp_reader_t reader; + int ret = mp_reader_new_file(&reader, filename); + if (ret != 0) { + return NULL; + } + return mp_lexer_new(qstr_from_str(filename), reader); +} + +#if MICROPY_HELPER_LEXER_UNIX + +mp_lexer_t *mp_lexer_new_from_fd(qstr filename, int fd, bool close_fd) { + mp_reader_t reader; + int ret = mp_reader_new_file_from_fd(&reader, fd, close_fd); + if (ret != 0) { + return NULL; + } + return mp_lexer_new(filename, reader); +} + +#endif + +#endif + void mp_lexer_free(mp_lexer_t *lex) { if (lex) { - if (lex->stream_close) { - lex->stream_close(lex->stream_data); - } + lex->reader.close(lex->reader.data); vstr_clear(&lex->vstr); m_del(uint16_t, lex->indent_level, lex->alloc_indent_level); m_del_obj(mp_lexer_t, lex); @@ -765,7 +792,9 @@ void mp_lexer_to_next(mp_lexer_t *lex) { mp_lexer_next_token_into(lex, false); } -#if MICROPY_DEBUG_PRINTERS +#if 0 +// This function is used to print the current token and should only be +// needed to debug the lexer, so it's not available via a config option. void mp_lexer_show_token(const mp_lexer_t *lex) { printf("(" UINT_FMT ":" UINT_FMT ") kind:%u str:%p len:%zu", lex->tok_line, lex->tok_column, lex->tok_kind, lex->vstr.buf, lex->vstr.len); if (lex->vstr.len > 0) { diff --git a/py/lexer.h b/py/lexer.h index 463be5fffc..32aef96266 100644 --- a/py/lexer.h +++ b/py/lexer.h @@ -30,6 +30,7 @@ #include "py/mpconfig.h" #include "py/qstr.h" +#include "py/reader.h" /* lexer.h -- simple tokeniser for Micro Python * @@ -142,21 +143,11 @@ typedef enum _mp_token_kind_t { MP_TOKEN_DEL_MINUS_MORE, } mp_token_kind_t; -// the next-byte function must return the next byte in the stream -// it must return MP_LEXER_EOF if end of stream -// it can be called again after returning MP_LEXER_EOF, and in that case must return MP_LEXER_EOF -#define MP_LEXER_EOF ((unichar)(-1)) - -typedef mp_uint_t (*mp_lexer_stream_next_byte_t)(void*); -typedef void (*mp_lexer_stream_close_t)(void*); - // this data structure is exposed for efficiency // public members are: source_name, tok_line, tok_column, tok_kind, vstr typedef struct _mp_lexer_t { qstr source_name; // name of source - void *stream_data; // data for stream - mp_lexer_stream_next_byte_t stream_next_byte; // stream callback to get next byte - mp_lexer_stream_close_t stream_close; // stream callback to free + mp_reader_t reader; // stream source unichar chr0, chr1, chr2; // current cached characters from source @@ -176,12 +167,11 @@ typedef struct _mp_lexer_t { vstr_t vstr; // token data } mp_lexer_t; -mp_lexer_t *mp_lexer_new(qstr src_name, void *stream_data, mp_lexer_stream_next_byte_t stream_next_byte, mp_lexer_stream_close_t stream_close); +mp_lexer_t *mp_lexer_new(qstr src_name, mp_reader_t reader); mp_lexer_t *mp_lexer_new_from_str_len(qstr src_name, const char *str, mp_uint_t len, mp_uint_t free_len); void mp_lexer_free(mp_lexer_t *lex); void mp_lexer_to_next(mp_lexer_t *lex); -void mp_lexer_show_token(const mp_lexer_t *lex); /******************************************************************/ // platform specific import function; must be implemented for a specific port diff --git a/py/lexerstr.c b/py/lexerstr.c deleted file mode 100644 index 9fdf4c1eb5..0000000000 --- a/py/lexerstr.c +++ /dev/null @@ -1,65 +0,0 @@ -/* - * This file is part of the Micro Python project, http://micropython.org/ - * - * The MIT License (MIT) - * - * Copyright (c) 2013, 2014 Damien P. George - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN - * THE SOFTWARE. - */ - -#include "py/lexer.h" - -#if MICROPY_ENABLE_COMPILER - -typedef struct _mp_lexer_str_buf_t { - mp_uint_t free_len; // if > 0, src_beg will be freed when done by: m_free(src_beg, free_len) - const char *src_beg; // beginning of source - const char *src_cur; // current location in source - const char *src_end; // end (exclusive) of source -} mp_lexer_str_buf_t; - -STATIC mp_uint_t str_buf_next_byte(mp_lexer_str_buf_t *sb) { - if (sb->src_cur < sb->src_end) { - return *sb->src_cur++; - } else { - return MP_LEXER_EOF; - } -} - -STATIC void str_buf_free(mp_lexer_str_buf_t *sb) { - if (sb->free_len > 0) { - m_del(char, (char*)sb->src_beg, sb->free_len); - } - m_del_obj(mp_lexer_str_buf_t, sb); -} - -mp_lexer_t *mp_lexer_new_from_str_len(qstr src_name, const char *str, mp_uint_t len, mp_uint_t free_len) { - mp_lexer_str_buf_t *sb = m_new_obj_maybe(mp_lexer_str_buf_t); - if (sb == NULL) { - return NULL; - } - sb->free_len = free_len; - sb->src_beg = str; - sb->src_cur = str; - sb->src_end = str + len; - return mp_lexer_new(src_name, sb, (mp_lexer_stream_next_byte_t)str_buf_next_byte, (mp_lexer_stream_close_t)str_buf_free); -} - -#endif // MICROPY_ENABLE_COMPILER diff --git a/py/lexerunix.c b/py/lexerunix.c deleted file mode 100644 index e8f8994a6c..0000000000 --- a/py/lexerunix.c +++ /dev/null @@ -1,96 +0,0 @@ -/* - * This file is part of the Micro Python project, http://micropython.org/ - * - * The MIT License (MIT) - * - * Copyright (c) 2013, 2014 Damien P. George - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN - * THE SOFTWARE. - */ - -#include "py/mpconfig.h" - -#if MICROPY_HELPER_LEXER_UNIX - -#include <stdio.h> -#include <stdint.h> -#include <unistd.h> -#include <fcntl.h> -#include <sys/stat.h> -#include <sys/types.h> - -#include "py/lexer.h" - -typedef struct _mp_lexer_file_buf_t { - int fd; - bool close_fd; - byte buf[20]; - mp_uint_t len; - mp_uint_t pos; -} mp_lexer_file_buf_t; - -STATIC mp_uint_t file_buf_next_byte(mp_lexer_file_buf_t *fb) { - if (fb->pos >= fb->len) { - if (fb->len == 0) { - return MP_LEXER_EOF; - } else { - int n = read(fb->fd, fb->buf, sizeof(fb->buf)); - if (n <= 0) { - fb->len = 0; - return MP_LEXER_EOF; - } - fb->len = n; - fb->pos = 0; - } - } - return fb->buf[fb->pos++]; -} - -STATIC void file_buf_close(mp_lexer_file_buf_t *fb) { - if (fb->close_fd) { - close(fb->fd); - } - m_del_obj(mp_lexer_file_buf_t, fb); -} - -mp_lexer_t *mp_lexer_new_from_fd(qstr filename, int fd, bool close_fd) { - mp_lexer_file_buf_t *fb = m_new_obj_maybe(mp_lexer_file_buf_t); - if (fb == NULL) { - if (close_fd) { - close(fd); - } - return NULL; - } - fb->fd = fd; - fb->close_fd = close_fd; - int n = read(fb->fd, fb->buf, sizeof(fb->buf)); - fb->len = n; - fb->pos = 0; - return mp_lexer_new(filename, fb, (mp_lexer_stream_next_byte_t)file_buf_next_byte, (mp_lexer_stream_close_t)file_buf_close); -} - -mp_lexer_t *mp_lexer_new_from_file(const char *filename) { - int fd = open(filename, O_RDONLY); - if (fd < 0) { - return NULL; - } - return mp_lexer_new_from_fd(qstr_from_str(filename), fd, true); -} - -#endif // MICROPY_HELPER_LEXER_UNIX @@ -46,7 +46,11 @@ typedef unsigned int uint; #define MAX(x, y) ((x) > (y) ? (x) : (y)) #endif -/** memomry allocation ******************************************/ +// Classical double-indirection stringification of preprocessor macro's value +#define _MP_STRINGIFY(x) #x +#define MP_STRINGIFY(x) _MP_STRINGIFY(x) + +/** memory allocation ******************************************/ // TODO make a lazy m_renew that can increase by a smaller amount than requested (but by at least 1 more element) diff --git a/py/mkrules.mk b/py/mkrules.mk index ea647e86f8..b71450a21d 100644 --- a/py/mkrules.mk +++ b/py/mkrules.mk @@ -49,7 +49,7 @@ $(BUILD)/%.o: %.c # List all native flags since the current build system doesn't have # the micropython configuration available. However, these flags are # needed to extract all qstrings -QSTR_GEN_EXTRA_CFLAGS += -DNO_QSTR -DN_X64 -DN_X86 -DN_THUMB -DN_ARM +QSTR_GEN_EXTRA_CFLAGS += -DNO_QSTR -DN_X64 -DN_X86 -DN_THUMB -DN_ARM -DN_XTENSA QSTR_GEN_EXTRA_CFLAGS += -I$(BUILD)/tmp vpath %.c . $(TOP) @@ -108,14 +108,14 @@ endif ifneq ($(FROZEN_MPY_DIR),) # make a list of all the .py files that need compiling and freezing -FROZEN_MPY_PY_FILES := $(shell find -L $(FROZEN_MPY_DIR) -type f -name '*.py' -printf '%P\n') +FROZEN_MPY_PY_FILES := $(shell find -L $(FROZEN_MPY_DIR) -type f -name '*.py' | $(SED) -e 's=^$(FROZEN_MPY_DIR)/==') FROZEN_MPY_MPY_FILES := $(addprefix $(BUILD)/frozen_mpy/,$(FROZEN_MPY_PY_FILES:.py=.mpy)) # to build .mpy files from .py files $(BUILD)/frozen_mpy/%.mpy: $(FROZEN_MPY_DIR)/%.py @$(ECHO) "MPY $<" $(Q)$(MKDIR) -p $(dir $@) - $(Q)$(MPY_CROSS) -o $@ -s $(^:$(FROZEN_MPY_DIR)/%=%) $^ + $(Q)$(MPY_CROSS) -o $@ -s $(^:$(FROZEN_MPY_DIR)/%=%) $(MPY_CROSS_FLAGS) $^ # to build frozen_mpy.c from all .mpy files $(BUILD)/frozen_mpy.c: $(FROZEN_MPY_MPY_FILES) $(BUILD)/genhdr/qstrdefs.generated.h diff --git a/py/modbuiltins.c b/py/modbuiltins.c index cbdcc9aae0..b7c8ff2601 100644 --- a/py/modbuiltins.c +++ b/py/modbuiltins.c @@ -475,9 +475,6 @@ STATIC mp_obj_t mp_builtin_round(size_t n_args, const mp_obj_t *args) { } else if (val - rounded == -0.5) { r &= ~1; } - if (n_args > 1) { - return mp_obj_new_float(r); - } #else mp_int_t r = mp_obj_get_int(o_in); #endif diff --git a/py/mpconfig.h b/py/mpconfig.h index 3945a1a5ab..6d18937177 100644 --- a/py/mpconfig.h +++ b/py/mpconfig.h @@ -288,8 +288,21 @@ #define MICROPY_EMIT_ARM (0) #endif +// Whether to emit Xtensa native code +#ifndef MICROPY_EMIT_XTENSA +#define MICROPY_EMIT_XTENSA (0) +#endif + +// Whether to enable the Xtensa inline assembler +#ifndef MICROPY_EMIT_INLINE_XTENSA +#define MICROPY_EMIT_INLINE_XTENSA (0) +#endif + // Convenience definition for whether any native emitter is enabled -#define MICROPY_EMIT_NATIVE (MICROPY_EMIT_X64 || MICROPY_EMIT_X86 || MICROPY_EMIT_THUMB || MICROPY_EMIT_ARM) +#define MICROPY_EMIT_NATIVE (MICROPY_EMIT_X64 || MICROPY_EMIT_X86 || MICROPY_EMIT_THUMB || MICROPY_EMIT_ARM || MICROPY_EMIT_XTENSA) + +// Convenience definition for whether any inline assembler emitter is enabled +#define MICROPY_EMIT_INLINE_ASM (MICROPY_EMIT_INLINE_THUMB || MICROPY_EMIT_INLINE_XTENSA) /*****************************************************************************/ /* Compiler configuration */ @@ -349,7 +362,6 @@ #endif // Whether to build functions that print debugging info: -// mp_lexer_show_token // mp_bytecode_print // mp_parse_node_print #ifndef MICROPY_DEBUG_PRINTERS @@ -381,6 +393,16 @@ /*****************************************************************************/ /* Python internal features */ +// Whether to use the POSIX reader for importing files +#ifndef MICROPY_READER_POSIX +#define MICROPY_READER_POSIX (0) +#endif + +// Whether to use the FatFS reader for importing files +#ifndef MICROPY_READER_FATFS +#define MICROPY_READER_FATFS (0) +#endif + // Hook for the VM at the start of the opcode loop (can contain variable // definitions usable by the other hook functions) #ifndef MICROPY_VM_HOOK_INIT @@ -423,6 +445,11 @@ # endif #endif +// Whether to provide the mp_kbd_exception object +#ifndef MICROPY_KBD_EXCEPTION +#define MICROPY_KBD_EXCEPTION (0) +#endif + // Prefer to raise KeyboardInterrupt asynchronously (from signal or interrupt // handler) - if supported by a particular port. #ifndef MICROPY_ASYNC_KBD_INTR @@ -844,7 +871,7 @@ typedef double mp_float_t; // Whether to provide "sys.exit" function #ifndef MICROPY_PY_SYS_EXIT -#define MICROPY_PY_SYS_EXIT (0) +#define MICROPY_PY_SYS_EXIT (1) #endif // Whether to provide sys.{stdin,stdout,stderr} objects @@ -863,6 +890,11 @@ typedef double mp_float_t; #define MICROPY_PY_UERRNO (0) #endif +// Whether to provide "uselect" module (baremetal implementation) +#ifndef MICROPY_PY_USELECT +#define MICROPY_PY_USELECT (0) +#endif + // Whether to provide "utime" module functions implementation // in terms of mp_hal_* functions. #ifndef MICROPY_PY_UTIME_MP_HAL @@ -912,6 +944,11 @@ typedef double mp_float_t; #define MICROPY_PY_UHEAPQ (0) #endif +// Optimized heap queue for relative timestamps +#ifndef MICROPY_PY_UTIMEQ +#define MICROPY_PY_UTIMEQ (0) +#endif + #ifndef MICROPY_PY_UHASHLIB #define MICROPY_PY_UHASHLIB (0) #endif diff --git a/py/mpprint.c b/py/mpprint.c index 9ad0f3f9a0..72d1c55ca0 100644 --- a/py/mpprint.c +++ b/py/mpprint.c @@ -202,6 +202,11 @@ STATIC int mp_print_int(const mp_print_t *print, mp_uint_t x, int sgn, int base, } int mp_print_mp_int(const mp_print_t *print, mp_obj_t x, int base, int base_char, int flags, char fill, int width, int prec) { + // These are the only values for "base" that are required to be supported by this + // function, since Python only allows the user to format integers in these bases. + // If needed this function could be generalised to handle other values. + assert(base == 2 || base == 8 || base == 10 || base == 16); + if (!MP_OBJ_IS_INT(x)) { // This will convert booleans to int, or raise an error for // non-integer types. diff --git a/py/mpstate.h b/py/mpstate.h index 439ed66066..91fb68b3ad 100644 --- a/py/mpstate.h +++ b/py/mpstate.h @@ -118,6 +118,11 @@ typedef struct _mp_state_vm_t { #endif #endif + #if MICROPY_KBD_EXCEPTION + // exception object of type KeyboardInterrupt + mp_obj_exception_t mp_kbd_exception; + #endif + // dictionary with loaded modules (may be exposed as sys.modules) mp_obj_dict_t mp_loaded_modules_dict; @@ -308,9 +308,13 @@ STATIC mp_uint_t mpn_or_neg(mpz_dig_t *idig, const mpz_dig_t *jdig, mp_uint_t jl carryi >>= DIG_SIZE; } - if (0 != carryi) { - *idig++ = carryi; - } + // At least one of j,k must be negative so the above for-loop runs at least + // once. For carryi to be non-zero here it must be equal to 1 at the end of + // each iteration of the loop. So the accumulation of carryi must overflow + // each time, ie carryi += 0xff..ff. So carryj|carryk must be 0 in the + // DIG_MASK bits on each iteration. But considering all cases of signs of + // j,k one sees that this is not possible. + assert(carryi == 0); return mpn_remove_trailing_zeros(oidig, idig); } @@ -334,9 +338,8 @@ STATIC mp_uint_t mpn_or_neg(mpz_dig_t *idig, const mpz_dig_t *jdig, mp_uint_t jl carryi >>= DIG_SIZE; } - if (0 != carryi) { - *idig++ = carryi; - } + // See comment in above mpn_or_neg for why carryi must be 0. + assert(carryi == 0); return mpn_remove_trailing_zeros(oidig, idig); } @@ -871,7 +874,7 @@ typedef uint32_t mp_float_int_t; // returns number of bytes from str that were processed mp_uint_t mpz_set_from_str(mpz_t *z, const char *str, mp_uint_t len, bool neg, mp_uint_t base) { - assert(base < 36); + assert(base <= 36); const char *cur = str; const char *top = str + len; diff --git a/py/nativeglue.c b/py/nativeglue.c index bc2f4ff5e7..5f2164ee0d 100644 --- a/py/nativeglue.c +++ b/py/nativeglue.c @@ -64,7 +64,7 @@ mp_uint_t mp_convert_obj_to_native(mp_obj_t obj, mp_uint_t type) { #endif -#if MICROPY_EMIT_NATIVE || MICROPY_EMIT_INLINE_THUMB +#if MICROPY_EMIT_NATIVE || MICROPY_EMIT_INLINE_ASM // convert a native value to a Micro Python object based on type mp_obj_t mp_convert_native_to_obj(mp_uint_t val, mp_uint_t type) { @@ -271,8 +271,10 @@ mp_float_t mp_obj_get_float(mp_obj_t arg) { return 1; } else if (MP_OBJ_IS_SMALL_INT(arg)) { return MP_OBJ_SMALL_INT_VALUE(arg); + #if MICROPY_LONGINT_IMPL != MICROPY_LONGINT_IMPL_NONE } else if (MP_OBJ_IS_TYPE(arg, &mp_type_int)) { - return mp_obj_int_as_float(arg); + return mp_obj_int_as_float_impl(arg); + #endif } else if (mp_obj_is_float(arg)) { return mp_obj_float_get(arg); } else { @@ -296,9 +298,11 @@ void mp_obj_get_complex(mp_obj_t arg, mp_float_t *real, mp_float_t *imag) { } else if (MP_OBJ_IS_SMALL_INT(arg)) { *real = MP_OBJ_SMALL_INT_VALUE(arg); *imag = 0; + #if MICROPY_LONGINT_IMPL != MICROPY_LONGINT_IMPL_NONE } else if (MP_OBJ_IS_TYPE(arg, &mp_type_int)) { - *real = mp_obj_int_as_float(arg); + *real = mp_obj_int_as_float_impl(arg); *imag = 0; + #endif } else if (mp_obj_is_float(arg)) { *real = mp_obj_float_get(arg); *imag = 0; @@ -680,9 +680,6 @@ void mp_obj_cell_set(mp_obj_t self_in, mp_obj_t obj); mp_int_t mp_obj_int_get_truncated(mp_const_obj_t self_in); // Will raise exception if value doesn't fit into mp_int_t mp_int_t mp_obj_int_get_checked(mp_const_obj_t self_in); -#if MICROPY_PY_BUILTINS_FLOAT -mp_float_t mp_obj_int_as_float(mp_obj_t self_in); -#endif // exception #define mp_obj_is_native_exception_instance(o) (mp_obj_get_type(o)->make_new == mp_obj_exception_make_new) diff --git a/py/objboundmeth.c b/py/objboundmeth.c index e32caba330..57be6a6cfd 100644 --- a/py/objboundmeth.c +++ b/py/objboundmeth.c @@ -47,11 +47,8 @@ STATIC void bound_meth_print(const mp_print_t *print, mp_obj_t o_in, mp_print_ki } #endif -STATIC mp_obj_t bound_meth_call(mp_obj_t self_in, size_t n_args, size_t n_kw, const mp_obj_t *args) { - mp_obj_bound_meth_t *self = MP_OBJ_TO_PTR(self_in); - - // need to insert self->self before all other args and then call self->meth - +mp_obj_t mp_call_method_self_n_kw(mp_obj_t meth, mp_obj_t self, size_t n_args, size_t n_kw, const mp_obj_t *args) { + // need to insert self before all other args and then call meth size_t n_total = n_args + 2 * n_kw; mp_obj_t *args2 = NULL; mp_obj_t *free_args2 = NULL; @@ -64,15 +61,20 @@ STATIC mp_obj_t bound_meth_call(mp_obj_t self_in, size_t n_args, size_t n_kw, co // (fallback to) use stack to allocate temporary args array args2 = alloca(sizeof(mp_obj_t) * (1 + n_total)); } - args2[0] = self->self; + args2[0] = self; memcpy(args2 + 1, args, n_total * sizeof(mp_obj_t)); - mp_obj_t res = mp_call_function_n_kw(self->meth, n_args + 1, n_kw, &args2[0]); + mp_obj_t res = mp_call_function_n_kw(meth, n_args + 1, n_kw, args2); if (free_args2 != NULL) { m_del(mp_obj_t, free_args2, 1 + n_total); } return res; } +STATIC mp_obj_t bound_meth_call(mp_obj_t self_in, size_t n_args, size_t n_kw, const mp_obj_t *args) { + mp_obj_bound_meth_t *self = MP_OBJ_TO_PTR(self_in); + return mp_call_method_self_n_kw(self->meth, self->self, n_args, n_kw, args); +} + #if MICROPY_PY_FUNCTION_ATTRS STATIC void bound_meth_attr(mp_obj_t self_in, qstr attr, mp_obj_t *dest) { if (dest[0] != MP_OBJ_NULL) { diff --git a/py/objexcept.c b/py/objexcept.c index 9ccc9288c9..c1b992d276 100644 --- a/py/objexcept.c +++ b/py/objexcept.c @@ -152,11 +152,21 @@ mp_obj_t mp_obj_exception_get_value(mp_obj_t self_in) { } STATIC void exception_attr(mp_obj_t self_in, qstr attr, mp_obj_t *dest) { + mp_obj_exception_t *self = MP_OBJ_TO_PTR(self_in); if (dest[0] != MP_OBJ_NULL) { - // not load attribute + // store/delete attribute + if (attr == MP_QSTR___traceback__ && dest[1] == mp_const_none) { + // We allow 'exc.__traceback__ = None' assignment as low-level + // optimization of pre-allocating exception instance and raising + // it repeatedly - this avoids memory allocation during raise. + // However, uPy will keep adding traceback entries to such + // exception instance, so before throwing it, traceback should + // be cleared like above. + self->traceback_len = 0; + dest[0] = MP_OBJ_NULL; // indicate success + } return; } - mp_obj_exception_t *self = MP_OBJ_TO_PTR(self_in); if (attr == MP_QSTR_args) { dest[0] = MP_OBJ_FROM_PTR(self->args); } else if (self->base.type == &mp_type_StopIteration && attr == MP_QSTR_value) { diff --git a/py/objfun.c b/py/objfun.c index 6b8fe6d382..207e68a771 100644 --- a/py/objfun.c +++ b/py/objfun.c @@ -471,7 +471,7 @@ mp_obj_t mp_obj_new_fun_viper(mp_uint_t n_args, void *fun_data, mp_uint_t type_s /******************************************************************************/ /* inline assembler functions */ -#if MICROPY_EMIT_INLINE_THUMB +#if MICROPY_EMIT_INLINE_ASM typedef struct _mp_obj_fun_asm_t { mp_obj_base_t base; @@ -582,4 +582,4 @@ mp_obj_t mp_obj_new_fun_asm(mp_uint_t n_args, void *fun_data, mp_uint_t type_sig return o; } -#endif // MICROPY_EMIT_INLINE_THUMB +#endif // MICROPY_EMIT_INLINE_ASM diff --git a/py/objint.c b/py/objint.c index f8988d6c94..5842a00a4d 100644 --- a/py/objint.c +++ b/py/objint.c @@ -151,23 +151,21 @@ typedef mp_int_t fmt_int_t; #endif STATIC const uint8_t log_base2_floor[] = { - 0, 0, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 4, + /* if needed, these are the values for higher bases 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5 + */ }; size_t mp_int_format_size(size_t num_bits, int base, const char *prefix, char comma) { - if (base < 2 || base > 32) { - return 0; - } - - size_t num_digits = num_bits / log_base2_floor[base] + 1; + assert(2 <= base && base <= 16); + size_t num_digits = num_bits / log_base2_floor[base - 1] + 1; size_t num_commas = comma ? num_digits / 3 : 0; size_t prefix_len = prefix ? strlen(prefix) : 0; return num_digits + num_commas + prefix_len + 2; // +1 for sign, +1 for null byte @@ -354,12 +352,6 @@ mp_int_t mp_obj_int_get_checked(mp_const_obj_t self_in) { return MP_OBJ_SMALL_INT_VALUE(self_in); } -#if MICROPY_PY_BUILTINS_FLOAT -mp_float_t mp_obj_int_as_float(mp_obj_t self_in) { - return MP_OBJ_SMALL_INT_VALUE(self_in); -} -#endif - #endif // MICROPY_LONGINT_IMPL == MICROPY_LONGINT_IMPL_NONE // This dispatcher function is expected to be independent of the implementation of long int @@ -383,10 +375,14 @@ mp_obj_t mp_obj_int_binary_op_extra_cases(mp_uint_t op, mp_obj_t lhs_in, mp_obj_ // this is a classmethod STATIC mp_obj_t int_from_bytes(size_t n_args, const mp_obj_t *args) { // TODO: Support long ints - // TODO: Support byteorder param (assumes 'little' at the moment) + // TODO: Support byteorder param // TODO: Support signed param (assumes signed=False at the moment) (void)n_args; + if (args[2] != MP_OBJ_NEW_QSTR(MP_QSTR_little)) { + mp_not_implemented(""); + } + // get the buffer info mp_buffer_info_t bufinfo; mp_get_buffer_raise(args[1], &bufinfo, MP_BUFFER_READ); @@ -400,14 +396,18 @@ STATIC mp_obj_t int_from_bytes(size_t n_args, const mp_obj_t *args) { return mp_obj_new_int_from_uint(value); } -STATIC MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(int_from_bytes_fun_obj, 2, 3, int_from_bytes); +STATIC MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(int_from_bytes_fun_obj, 3, 4, int_from_bytes); STATIC MP_DEFINE_CONST_CLASSMETHOD_OBJ(int_from_bytes_obj, MP_ROM_PTR(&int_from_bytes_fun_obj)); STATIC mp_obj_t int_to_bytes(size_t n_args, const mp_obj_t *args) { - // TODO: Support byteorder param (assumes 'little') + // TODO: Support byteorder param // TODO: Support signed param (assumes signed=False) (void)n_args; + if (args[2] != MP_OBJ_NEW_QSTR(MP_QSTR_little)) { + mp_not_implemented(""); + } + mp_uint_t len = MP_OBJ_SMALL_INT_VALUE(args[1]); vstr_t vstr; @@ -427,7 +427,7 @@ STATIC mp_obj_t int_to_bytes(size_t n_args, const mp_obj_t *args) { return mp_obj_new_str_from_vstr(&mp_type_bytes, &vstr); } -STATIC MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(int_to_bytes_obj, 2, 4, int_to_bytes); +STATIC MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(int_to_bytes_obj, 3, 4, int_to_bytes); STATIC const mp_rom_map_elem_t int_locals_dict_table[] = { { MP_ROM_QSTR(MP_QSTR_from_bytes), MP_ROM_PTR(&int_from_bytes_obj) }, diff --git a/py/objint.h b/py/objint.h index 6e627f1bd7..f418c329e9 100644 --- a/py/objint.h +++ b/py/objint.h @@ -48,6 +48,7 @@ typedef enum { } mp_fp_as_int_class_t; mp_fp_as_int_class_t mp_classify_fp_as_int(mp_float_t val); +mp_float_t mp_obj_int_as_float_impl(mp_obj_t self_in); #endif // MICROPY_PY_BUILTINS_FLOAT size_t mp_int_format_size(size_t num_bits, int base, const char *prefix, char comma); diff --git a/py/objint_longlong.c b/py/objint_longlong.c index b051cfbe64..f5b5d9c939 100644 --- a/py/objint_longlong.c +++ b/py/objint_longlong.c @@ -295,13 +295,10 @@ mp_int_t mp_obj_int_get_checked(mp_const_obj_t self_in) { } #if MICROPY_PY_BUILTINS_FLOAT -mp_float_t mp_obj_int_as_float(mp_obj_t self_in) { - if (MP_OBJ_IS_SMALL_INT(self_in)) { - return MP_OBJ_SMALL_INT_VALUE(self_in); - } else { - mp_obj_int_t *self = self_in; - return self->val; - } +mp_float_t mp_obj_int_as_float_impl(mp_obj_t self_in) { + assert(MP_OBJ_IS_TYPE(self_in, &mp_type_int)); + mp_obj_int_t *self = self_in; + return self->val; } #endif diff --git a/py/objint_mpz.c b/py/objint_mpz.c index 0a1d68598d..eadf64fce7 100644 --- a/py/objint_mpz.c +++ b/py/objint_mpz.c @@ -403,13 +403,10 @@ mp_int_t mp_obj_int_get_checked(mp_const_obj_t self_in) { } #if MICROPY_PY_BUILTINS_FLOAT -mp_float_t mp_obj_int_as_float(mp_obj_t self_in) { - if (MP_OBJ_IS_SMALL_INT(self_in)) { - return MP_OBJ_SMALL_INT_VALUE(self_in); - } else { - mp_obj_int_t *self = MP_OBJ_TO_PTR(self_in); - return mpz_as_float(&self->mpz); - } +mp_float_t mp_obj_int_as_float_impl(mp_obj_t self_in) { + assert(MP_OBJ_IS_TYPE(self_in, &mp_type_int)); + mp_obj_int_t *self = MP_OBJ_TO_PTR(self_in); + return mpz_as_float(&self->mpz); } #endif diff --git a/py/objmodule.c b/py/objmodule.c index 9b06e3b7b5..1c79e1a18d 100644 --- a/py/objmodule.c +++ b/py/objmodule.c @@ -189,6 +189,9 @@ STATIC const mp_rom_map_elem_t mp_builtin_module_table[] = { #if MICROPY_PY_UHEAPQ { MP_ROM_QSTR(MP_QSTR_uheapq), MP_ROM_PTR(&mp_module_uheapq) }, #endif +#if MICROPY_PY_UTIMEQ + { MP_ROM_QSTR(MP_QSTR_utimeq), MP_ROM_PTR(&mp_module_utimeq) }, +#endif #if MICROPY_PY_UHASHLIB { MP_ROM_QSTR(MP_QSTR_uhashlib), MP_ROM_PTR(&mp_module_uhashlib) }, #endif @@ -198,6 +201,9 @@ STATIC const mp_rom_map_elem_t mp_builtin_module_table[] = { #if MICROPY_PY_URANDOM { MP_ROM_QSTR(MP_QSTR_urandom), MP_ROM_PTR(&mp_module_urandom) }, #endif +#if MICROPY_PY_USELECT + { MP_ROM_QSTR(MP_QSTR_uselect), MP_ROM_PTR(&mp_module_uselect) }, +#endif #if MICROPY_PY_USSL { MP_ROM_QSTR(MP_QSTR_ussl), MP_ROM_PTR(&mp_module_ussl) }, #endif diff --git a/py/objtype.c b/py/objtype.c index 8b46c54001..c20b0693e5 100644 --- a/py/objtype.c +++ b/py/objtype.c @@ -4,7 +4,7 @@ * The MIT License (MIT) * * Copyright (c) 2013, 2014 Damien P. George - * Copyright (c) 2014 Paul Sokolovsky + * Copyright (c) 2014-2016 Paul Sokolovsky * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal @@ -687,9 +687,8 @@ STATIC mp_obj_t instance_subscr(mp_obj_t self_in, mp_obj_t index, mp_obj_t value } } -STATIC mp_obj_t mp_obj_instance_get_call(mp_obj_t self_in) { +STATIC mp_obj_t mp_obj_instance_get_call(mp_obj_t self_in, mp_obj_t *member) { mp_obj_instance_t *self = MP_OBJ_TO_PTR(self_in); - mp_obj_t member[2] = {MP_OBJ_NULL, MP_OBJ_NULL}; struct class_lookup_data lookup = { .obj = self, .attr = MP_QSTR___call__, @@ -702,11 +701,13 @@ STATIC mp_obj_t mp_obj_instance_get_call(mp_obj_t self_in) { } bool mp_obj_instance_is_callable(mp_obj_t self_in) { - return mp_obj_instance_get_call(self_in) != MP_OBJ_NULL; + mp_obj_t member[2] = {MP_OBJ_NULL, MP_OBJ_NULL}; + return mp_obj_instance_get_call(self_in, member) != MP_OBJ_NULL; } mp_obj_t mp_obj_instance_call(mp_obj_t self_in, size_t n_args, size_t n_kw, const mp_obj_t *args) { - mp_obj_t call = mp_obj_instance_get_call(self_in); + mp_obj_t member[2] = {MP_OBJ_NULL, MP_OBJ_NULL}; + mp_obj_t call = mp_obj_instance_get_call(self_in, member); if (call == MP_OBJ_NULL) { if (MICROPY_ERROR_REPORTING == MICROPY_ERROR_REPORTING_TERSE) { mp_raise_msg(&mp_type_TypeError, "object not callable"); @@ -719,8 +720,8 @@ mp_obj_t mp_obj_instance_call(mp_obj_t self_in, size_t n_args, size_t n_kw, cons if (call == MP_OBJ_SENTINEL) { return mp_call_function_n_kw(self->subobj[0], n_args, n_kw, args); } - mp_obj_t meth = mp_obj_new_bound_meth(call, self_in); - return mp_call_function_n_kw(meth, n_args, n_kw, args); + + return mp_call_method_self_n_kw(member[0], member[1], n_args, n_kw, args); } STATIC mp_obj_t instance_getiter(mp_obj_t self_in) { diff --git a/py/parse.c b/py/parse.c index 397d46d9f0..dc360e40ce 100644 --- a/py/parse.c +++ b/py/parse.c @@ -227,11 +227,14 @@ STATIC void pop_rule(parser_t *parser, const rule_t **rule, size_t *arg_i, size_ *src_line = parser->rule_stack[parser->rule_stack_top].src_line; } -mp_parse_node_t mp_parse_node_new_leaf(size_t kind, mp_int_t arg) { - if (kind == MP_PARSE_NODE_SMALL_INT) { - return (mp_parse_node_t)(kind | (arg << 1)); - } - return (mp_parse_node_t)(kind | (arg << 4)); +bool mp_parse_node_is_const_false(mp_parse_node_t pn) { + return MP_PARSE_NODE_IS_TOKEN_KIND(pn, MP_TOKEN_KW_FALSE) + || (MP_PARSE_NODE_IS_SMALL_INT(pn) && MP_PARSE_NODE_LEAF_SMALL_INT(pn) == 0); +} + +bool mp_parse_node_is_const_true(mp_parse_node_t pn) { + return MP_PARSE_NODE_IS_TOKEN_KIND(pn, MP_TOKEN_KW_TRUE) + || (MP_PARSE_NODE_IS_SMALL_INT(pn) && MP_PARSE_NODE_LEAF_SMALL_INT(pn) != 0); } bool mp_parse_node_get_int_maybe(mp_parse_node_t pn, mp_obj_t *o) { @@ -408,7 +411,7 @@ STATIC void push_result_token(parser_t *parser, const rule_t *rule) { mp_map_elem_t *elem; if (rule->rule_id == RULE_atom && (elem = mp_map_lookup(&parser->consts, MP_OBJ_NEW_QSTR(id), MP_MAP_LOOKUP)) != NULL) { - pn = mp_parse_node_new_leaf(MP_PARSE_NODE_SMALL_INT, MP_OBJ_SMALL_INT_VALUE(elem->value)); + pn = mp_parse_node_new_small_int(MP_OBJ_SMALL_INT_VALUE(elem->value)); } else { pn = mp_parse_node_new_leaf(MP_PARSE_NODE_ID, id); } @@ -419,7 +422,7 @@ STATIC void push_result_token(parser_t *parser, const rule_t *rule) { } else if (lex->tok_kind == MP_TOKEN_INTEGER) { mp_obj_t o = mp_parse_num_integer(lex->vstr.buf, lex->vstr.len, 0, lex); if (MP_OBJ_IS_SMALL_INT(o)) { - pn = mp_parse_node_new_leaf(MP_PARSE_NODE_SMALL_INT, MP_OBJ_SMALL_INT_VALUE(o)); + pn = mp_parse_node_new_small_int(MP_OBJ_SMALL_INT_VALUE(o)); } else { pn = make_node_const_object(parser, lex->tok_line, o); } @@ -467,6 +470,63 @@ STATIC MP_DEFINE_CONST_MAP(mp_constants_map, mp_constants_table); STATIC void push_result_rule(parser_t *parser, size_t src_line, const rule_t *rule, size_t num_args); #if MICROPY_COMP_CONST_FOLDING +STATIC bool fold_logical_constants(parser_t *parser, const rule_t *rule, size_t *num_args) { + if (rule->rule_id == RULE_or_test + || rule->rule_id == RULE_and_test) { + // folding for binary logical ops: or and + size_t copy_to = *num_args; + for (size_t i = copy_to; i > 0;) { + mp_parse_node_t pn = peek_result(parser, --i); + parser->result_stack[parser->result_stack_top - copy_to] = pn; + if (i == 0) { + // always need to keep the last value + break; + } + if (rule->rule_id == RULE_or_test) { + if (mp_parse_node_is_const_true(pn)) { + // + break; + } else if (!mp_parse_node_is_const_false(pn)) { + copy_to -= 1; + } + } else { + // RULE_and_test + if (mp_parse_node_is_const_false(pn)) { + break; + } else if (!mp_parse_node_is_const_true(pn)) { + copy_to -= 1; + } + } + } + copy_to -= 1; // copy_to now contains number of args to pop + + // pop and discard all the short-circuited expressions + for (size_t i = 0; i < copy_to; ++i) { + pop_result(parser); + } + *num_args -= copy_to; + + // we did a complete folding if there's only 1 arg left + return *num_args == 1; + + } else if (rule->rule_id == RULE_not_test_2) { + // folding for unary logical op: not + mp_parse_node_t pn = peek_result(parser, 0); + if (mp_parse_node_is_const_false(pn)) { + pn = mp_parse_node_new_leaf(MP_PARSE_NODE_TOKEN, MP_TOKEN_KW_TRUE); + } else if (mp_parse_node_is_const_true(pn)) { + pn = mp_parse_node_new_leaf(MP_PARSE_NODE_TOKEN, MP_TOKEN_KW_FALSE); + } else { + return false; + } + pop_result(parser); + push_result_node(parser, pn); + return true; + } + + return false; +} + STATIC bool fold_constants(parser_t *parser, const rule_t *rule, size_t num_args) { // this code does folding of arbitrary integer expressions, eg 1 + 2 * 3 + 4 // it does not do partial folding, eg 1 + 2 + x -> 3 + x @@ -648,7 +708,7 @@ STATIC bool fold_constants(parser_t *parser, const rule_t *rule, size_t num_args pop_result(parser); } if (MP_OBJ_IS_SMALL_INT(arg0)) { - push_result_node(parser, mp_parse_node_new_leaf(MP_PARSE_NODE_SMALL_INT, MP_OBJ_SMALL_INT_VALUE(arg0))); + push_result_node(parser, mp_parse_node_new_small_int(MP_OBJ_SMALL_INT_VALUE(arg0))); } else { // TODO reuse memory for parse node struct? push_result_node(parser, make_node_const_object(parser, 0, arg0)); @@ -674,6 +734,10 @@ STATIC void push_result_rule(parser_t *parser, size_t src_line, const rule_t *ru } #if MICROPY_COMP_CONST_FOLDING + if (fold_logical_constants(parser, rule, &num_args)) { + // we folded this rule so return straight away + return; + } if (fold_constants(parser, rule, num_args)) { // we folded this rule so return straight away return; diff --git a/py/parse.h b/py/parse.h index c4ac15ffcf..c56145b1b0 100644 --- a/py/parse.h +++ b/py/parse.h @@ -77,7 +77,14 @@ typedef struct _mp_parse_node_struct_t { #define MP_PARSE_NODE_STRUCT_KIND(pns) ((pns)->kind_num_nodes & 0xff) #define MP_PARSE_NODE_STRUCT_NUM_NODES(pns) ((pns)->kind_num_nodes >> 8) -mp_parse_node_t mp_parse_node_new_leaf(size_t kind, mp_int_t arg); +static inline mp_parse_node_t mp_parse_node_new_small_int(mp_int_t val) { + return (mp_parse_node_t)(MP_PARSE_NODE_SMALL_INT | ((mp_uint_t)val << 1)); +} +static inline mp_parse_node_t mp_parse_node_new_leaf(size_t kind, mp_int_t arg) { + return (mp_parse_node_t)(kind | ((mp_uint_t)arg << 4)); +} +bool mp_parse_node_is_const_false(mp_parse_node_t pn); +bool mp_parse_node_is_const_true(mp_parse_node_t pn); bool mp_parse_node_get_int_maybe(mp_parse_node_t pn, mp_obj_t *o); int mp_parse_node_extract_list(mp_parse_node_t *pn, size_t pn_kind, mp_parse_node_t **nodes); void mp_parse_node_print(mp_parse_node_t pn, size_t indent); diff --git a/py/parsenum.c b/py/parsenum.c index b1c449c9b8..2e41801ee9 100644 --- a/py/parsenum.c +++ b/py/parsenum.c @@ -81,20 +81,18 @@ mp_obj_t mp_parse_num_integer(const char *restrict str_, size_t len, int base, m for (; str < top; str++) { // get next digit as a value mp_uint_t dig = *str; - if (unichar_isdigit(dig) && (int)dig - '0' < base) { - // 0-9 digit - dig = dig - '0'; - } else if (base == 16) { - dig |= 0x20; - if ('a' <= dig && dig <= 'f') { - // a-f hex digit - dig = dig - 'a' + 10; + if ('0' <= dig && dig <= '9') { + dig -= '0'; + } else { + dig |= 0x20; // make digit lower-case + if ('a' <= dig && dig <= 'z') { + dig -= 'a' - 10; } else { // unknown character break; } - } else { - // unknown character + } + if (dig >= (mp_uint_t)base) { break; } diff --git a/py/persistentcode.c b/py/persistentcode.c new file mode 100644 index 0000000000..99b01f8e27 --- /dev/null +++ b/py/persistentcode.c @@ -0,0 +1,400 @@ +/* + * This file is part of the MicroPython project, http://micropython.org/ + * + * The MIT License (MIT) + * + * Copyright (c) 2013-2016 Damien P. George + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include <stdint.h> +#include <stdio.h> +#include <string.h> +#include <assert.h> + +#include "py/reader.h" +#include "py/emitglue.h" +#include "py/persistentcode.h" +#include "py/bc.h" + +#if MICROPY_PERSISTENT_CODE_LOAD || MICROPY_PERSISTENT_CODE_SAVE + +#include "py/smallint.h" + +// The feature flags byte encodes the compile-time config options that +// affect the generate bytecode. +#define MPY_FEATURE_FLAGS ( \ + ((MICROPY_OPT_CACHE_MAP_LOOKUP_IN_BYTECODE) << 0) \ + | ((MICROPY_PY_BUILTINS_STR_UNICODE) << 1) \ + ) +// This is a version of the flags that can be configured at runtime. +#define MPY_FEATURE_FLAGS_DYNAMIC ( \ + ((MICROPY_OPT_CACHE_MAP_LOOKUP_IN_BYTECODE_DYNAMIC) << 0) \ + | ((MICROPY_PY_BUILTINS_STR_UNICODE_DYNAMIC) << 1) \ + ) + +#if MICROPY_PERSISTENT_CODE_LOAD || (MICROPY_PERSISTENT_CODE_SAVE && !MICROPY_DYNAMIC_COMPILER) +// The bytecode will depend on the number of bits in a small-int, and +// this function computes that (could make it a fixed constant, but it +// would need to be defined in mpconfigport.h). +STATIC int mp_small_int_bits(void) { + mp_int_t i = MP_SMALL_INT_MAX; + int n = 1; + while (i != 0) { + i >>= 1; + ++n; + } + return n; +} +#endif + +typedef struct _bytecode_prelude_t { + uint n_state; + uint n_exc_stack; + uint scope_flags; + uint n_pos_args; + uint n_kwonly_args; + uint n_def_pos_args; + uint code_info_size; +} bytecode_prelude_t; + +// ip will point to start of opcodes +// ip2 will point to simple_name, source_file qstrs +STATIC void extract_prelude(const byte **ip, const byte **ip2, bytecode_prelude_t *prelude) { + prelude->n_state = mp_decode_uint(ip); + prelude->n_exc_stack = mp_decode_uint(ip); + prelude->scope_flags = *(*ip)++; + prelude->n_pos_args = *(*ip)++; + prelude->n_kwonly_args = *(*ip)++; + prelude->n_def_pos_args = *(*ip)++; + *ip2 = *ip; + prelude->code_info_size = mp_decode_uint(ip2); + *ip += prelude->code_info_size; + while (*(*ip)++ != 255) { + } +} + +#endif // MICROPY_PERSISTENT_CODE_LOAD || MICROPY_PERSISTENT_CODE_SAVE + +#if MICROPY_PERSISTENT_CODE_LOAD + +#include "py/parsenum.h" +#include "py/bc0.h" + +STATIC int read_byte(mp_reader_t *reader) { + return reader->readbyte(reader->data); +} + +STATIC void read_bytes(mp_reader_t *reader, byte *buf, size_t len) { + while (len-- > 0) { + *buf++ = reader->readbyte(reader->data); + } +} + +STATIC mp_uint_t read_uint(mp_reader_t *reader) { + mp_uint_t unum = 0; + for (;;) { + byte b = reader->readbyte(reader->data); + unum = (unum << 7) | (b & 0x7f); + if ((b & 0x80) == 0) { + break; + } + } + return unum; +} + +STATIC qstr load_qstr(mp_reader_t *reader) { + mp_uint_t len = read_uint(reader); + char *str = m_new(char, len); + read_bytes(reader, (byte*)str, len); + qstr qst = qstr_from_strn(str, len); + m_del(char, str, len); + return qst; +} + +STATIC mp_obj_t load_obj(mp_reader_t *reader) { + byte obj_type = read_byte(reader); + if (obj_type == 'e') { + return MP_OBJ_FROM_PTR(&mp_const_ellipsis_obj); + } else { + size_t len = read_uint(reader); + vstr_t vstr; + vstr_init_len(&vstr, len); + read_bytes(reader, (byte*)vstr.buf, len); + if (obj_type == 's' || obj_type == 'b') { + return mp_obj_new_str_from_vstr(obj_type == 's' ? &mp_type_str : &mp_type_bytes, &vstr); + } else if (obj_type == 'i') { + return mp_parse_num_integer(vstr.buf, vstr.len, 10, NULL); + } else { + assert(obj_type == 'f' || obj_type == 'c'); + return mp_parse_num_decimal(vstr.buf, vstr.len, obj_type == 'c', false, NULL); + } + } +} + +STATIC void load_bytecode_qstrs(mp_reader_t *reader, byte *ip, byte *ip_top) { + while (ip < ip_top) { + size_t sz; + uint f = mp_opcode_format(ip, &sz); + if (f == MP_OPCODE_QSTR) { + qstr qst = load_qstr(reader); + ip[1] = qst; + ip[2] = qst >> 8; + } + ip += sz; + } +} + +STATIC mp_raw_code_t *load_raw_code(mp_reader_t *reader) { + // load bytecode + mp_uint_t bc_len = read_uint(reader); + byte *bytecode = m_new(byte, bc_len); + read_bytes(reader, bytecode, bc_len); + + // extract prelude + const byte *ip = bytecode; + const byte *ip2; + bytecode_prelude_t prelude; + extract_prelude(&ip, &ip2, &prelude); + + // load qstrs and link global qstr ids into bytecode + qstr simple_name = load_qstr(reader); + qstr source_file = load_qstr(reader); + ((byte*)ip2)[0] = simple_name; ((byte*)ip2)[1] = simple_name >> 8; + ((byte*)ip2)[2] = source_file; ((byte*)ip2)[3] = source_file >> 8; + load_bytecode_qstrs(reader, (byte*)ip, bytecode + bc_len); + + // load constant table + mp_uint_t n_obj = read_uint(reader); + mp_uint_t n_raw_code = read_uint(reader); + mp_uint_t *const_table = m_new(mp_uint_t, prelude.n_pos_args + prelude.n_kwonly_args + n_obj + n_raw_code); + mp_uint_t *ct = const_table; + for (mp_uint_t i = 0; i < prelude.n_pos_args + prelude.n_kwonly_args; ++i) { + *ct++ = (mp_uint_t)MP_OBJ_NEW_QSTR(load_qstr(reader)); + } + for (mp_uint_t i = 0; i < n_obj; ++i) { + *ct++ = (mp_uint_t)load_obj(reader); + } + for (mp_uint_t i = 0; i < n_raw_code; ++i) { + *ct++ = (mp_uint_t)(uintptr_t)load_raw_code(reader); + } + + // create raw_code and return it + mp_raw_code_t *rc = mp_emit_glue_new_raw_code(); + mp_emit_glue_assign_bytecode(rc, bytecode, bc_len, const_table, + #if MICROPY_PERSISTENT_CODE_SAVE + n_obj, n_raw_code, + #endif + prelude.scope_flags); + return rc; +} + +mp_raw_code_t *mp_raw_code_load(mp_reader_t *reader) { + byte header[4]; + read_bytes(reader, header, sizeof(header)); + if (strncmp((char*)header, "M\x00", 2) != 0) { + mp_raise_ValueError("invalid .mpy file"); + } + if (header[2] != MPY_FEATURE_FLAGS || header[3] > mp_small_int_bits()) { + mp_raise_ValueError("incompatible .mpy file"); + } + mp_raw_code_t *rc = load_raw_code(reader); + reader->close(reader->data); + return rc; +} + +mp_raw_code_t *mp_raw_code_load_mem(const byte *buf, size_t len) { + mp_reader_t reader; + if (!mp_reader_new_mem(&reader, buf, len, 0)) { + m_malloc_fail(BYTES_PER_WORD); // we need to raise a MemoryError + } + return mp_raw_code_load(&reader); +} + +mp_raw_code_t *mp_raw_code_load_file(const char *filename) { + mp_reader_t reader; + int ret = mp_reader_new_file(&reader, filename); + if (ret != 0) { + mp_raise_OSError(ret); + } + return mp_raw_code_load(&reader); +} + +#endif // MICROPY_PERSISTENT_CODE_LOAD + +#if MICROPY_PERSISTENT_CODE_SAVE + +#include "py/objstr.h" + +STATIC void mp_print_bytes(mp_print_t *print, const byte *data, size_t len) { + print->print_strn(print->data, (const char*)data, len); +} + +#define BYTES_FOR_INT ((BYTES_PER_WORD * 8 + 6) / 7) +STATIC void mp_print_uint(mp_print_t *print, mp_uint_t n) { + byte buf[BYTES_FOR_INT]; + byte *p = buf + sizeof(buf); + *--p = n & 0x7f; + n >>= 7; + for (; n != 0; n >>= 7) { + *--p = 0x80 | (n & 0x7f); + } + print->print_strn(print->data, (char*)p, buf + sizeof(buf) - p); +} + +STATIC void save_qstr(mp_print_t *print, qstr qst) { + size_t len; + const byte *str = qstr_data(qst, &len); + mp_print_uint(print, len); + mp_print_bytes(print, str, len); +} + +STATIC void save_obj(mp_print_t *print, mp_obj_t o) { + if (MP_OBJ_IS_STR_OR_BYTES(o)) { + byte obj_type; + if (MP_OBJ_IS_STR(o)) { + obj_type = 's'; + } else { + obj_type = 'b'; + } + mp_uint_t len; + const char *str = mp_obj_str_get_data(o, &len); + mp_print_bytes(print, &obj_type, 1); + mp_print_uint(print, len); + mp_print_bytes(print, (const byte*)str, len); + } else if (MP_OBJ_TO_PTR(o) == &mp_const_ellipsis_obj) { + byte obj_type = 'e'; + mp_print_bytes(print, &obj_type, 1); + } else { + // we save numbers using a simplistic text representation + // TODO could be improved + byte obj_type; + if (MP_OBJ_IS_TYPE(o, &mp_type_int)) { + obj_type = 'i'; + } else if (mp_obj_is_float(o)) { + obj_type = 'f'; + } else { + assert(MP_OBJ_IS_TYPE(o, &mp_type_complex)); + obj_type = 'c'; + } + vstr_t vstr; + mp_print_t pr; + vstr_init_print(&vstr, 10, &pr); + mp_obj_print_helper(&pr, o, PRINT_REPR); + mp_print_bytes(print, &obj_type, 1); + mp_print_uint(print, vstr.len); + mp_print_bytes(print, (const byte*)vstr.buf, vstr.len); + vstr_clear(&vstr); + } +} + +STATIC void save_bytecode_qstrs(mp_print_t *print, const byte *ip, const byte *ip_top) { + while (ip < ip_top) { + size_t sz; + uint f = mp_opcode_format(ip, &sz); + if (f == MP_OPCODE_QSTR) { + qstr qst = ip[1] | (ip[2] << 8); + save_qstr(print, qst); + } + ip += sz; + } +} + +STATIC void save_raw_code(mp_print_t *print, mp_raw_code_t *rc) { + if (rc->kind != MP_CODE_BYTECODE) { + mp_raise_ValueError("can only save bytecode"); + } + + // save bytecode + mp_print_uint(print, rc->data.u_byte.bc_len); + mp_print_bytes(print, rc->data.u_byte.bytecode, rc->data.u_byte.bc_len); + + // extract prelude + const byte *ip = rc->data.u_byte.bytecode; + const byte *ip2; + bytecode_prelude_t prelude; + extract_prelude(&ip, &ip2, &prelude); + + // save qstrs + save_qstr(print, ip2[0] | (ip2[1] << 8)); // simple_name + save_qstr(print, ip2[2] | (ip2[3] << 8)); // source_file + save_bytecode_qstrs(print, ip, rc->data.u_byte.bytecode + rc->data.u_byte.bc_len); + + // save constant table + mp_print_uint(print, rc->data.u_byte.n_obj); + mp_print_uint(print, rc->data.u_byte.n_raw_code); + const mp_uint_t *const_table = rc->data.u_byte.const_table; + for (uint i = 0; i < prelude.n_pos_args + prelude.n_kwonly_args; ++i) { + mp_obj_t o = (mp_obj_t)*const_table++; + save_qstr(print, MP_OBJ_QSTR_VALUE(o)); + } + for (uint i = 0; i < rc->data.u_byte.n_obj; ++i) { + save_obj(print, (mp_obj_t)*const_table++); + } + for (uint i = 0; i < rc->data.u_byte.n_raw_code; ++i) { + save_raw_code(print, (mp_raw_code_t*)(uintptr_t)*const_table++); + } +} + +void mp_raw_code_save(mp_raw_code_t *rc, mp_print_t *print) { + // header contains: + // byte 'M' + // byte version + // byte feature flags + // byte number of bits in a small int + byte header[4] = {'M', 0, MPY_FEATURE_FLAGS_DYNAMIC, + #if MICROPY_DYNAMIC_COMPILER + mp_dynamic_compiler.small_int_bits, + #else + mp_small_int_bits(), + #endif + }; + mp_print_bytes(print, header, sizeof(header)); + + save_raw_code(print, rc); +} + +// here we define mp_raw_code_save_file depending on the port +// TODO abstract this away properly + +#if defined(__i386__) || defined(__x86_64__) || (defined(__arm__) && (defined(__unix__))) + +#include <unistd.h> +#include <sys/stat.h> +#include <fcntl.h> + +STATIC void fd_print_strn(void *env, const char *str, size_t len) { + int fd = (intptr_t)env; + ssize_t ret = write(fd, str, len); + (void)ret; +} + +void mp_raw_code_save_file(mp_raw_code_t *rc, const char *filename) { + int fd = open(filename, O_WRONLY | O_CREAT | O_TRUNC, 0644); + mp_print_t fd_print = {(void*)(intptr_t)fd, fd_print_strn}; + mp_raw_code_save(rc, &fd_print); + close(fd); +} + +#else +#error mp_raw_code_save_file not implemented for this platform +#endif + +#endif // MICROPY_PERSISTENT_CODE_SAVE diff --git a/py/persistentcode.h b/py/persistentcode.h new file mode 100644 index 0000000000..d04e0b6330 --- /dev/null +++ b/py/persistentcode.h @@ -0,0 +1,40 @@ +/* + * This file is part of the MicroPython project, http://micropython.org/ + * + * The MIT License (MIT) + * + * Copyright (c) 2013-2016 Damien P. George + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ +#ifndef MICROPY_INCLUDED_PY_PERSISTENTCODE_H +#define MICROPY_INCLUDED_PY_PERSISTENTCODE_H + +#include "py/mpprint.h" +#include "py/reader.h" +#include "py/emitglue.h" + +mp_raw_code_t *mp_raw_code_load(mp_reader_t *reader); +mp_raw_code_t *mp_raw_code_load_mem(const byte *buf, size_t len); +mp_raw_code_t *mp_raw_code_load_file(const char *filename); + +void mp_raw_code_save(mp_raw_code_t *rc, mp_print_t *print); +void mp_raw_code_save_file(mp_raw_code_t *rc, const char *filename); + +#endif // MICROPY_INCLUDED_PY_PERSISTENTCODE_H @@ -113,14 +113,14 @@ PY_O_BASENAME = \ mpprint.o \ unicode.o \ mpz.o \ + reader.o \ lexer.o \ - lexerstr.o \ - lexerunix.o \ parse2.o \ scope.o \ compile2.o \ emitcommon.o \ emitbc.o \ + asmbase.o \ asmx64.o \ emitnx64.o \ asmx86.o \ @@ -130,10 +130,14 @@ PY_O_BASENAME = \ emitinlinethumb.o \ asmarm.o \ emitnarm.o \ + asmxtensa.o \ + emitnxtensa.o \ + emitinlinextensa.o \ formatfloat.o \ parsenumbase.o \ parsenum.o \ emitglue.o \ + persistentcode.o \ runtime.o \ runtime_utils.o \ nativeglue.o \ @@ -208,6 +212,7 @@ PY_O_BASENAME = \ ../extmod/modure.o \ ../extmod/moduzlib.o \ ../extmod/moduheapq.o \ + ../extmod/modutimeq.o \ ../extmod/moduhashlib.o \ ../extmod/modubinascii.o \ ../extmod/virtpin.o \ @@ -219,6 +224,7 @@ PY_O_BASENAME = \ ../extmod/modussl_axtls.o \ ../extmod/modussl_mbedtls.o \ ../extmod/modurandom.o \ + ../extmod/moduselect.o \ ../extmod/modwebsocket.o \ ../extmod/modwebrepl.o \ ../extmod/modframebuf.o \ @@ -227,7 +233,7 @@ PY_O_BASENAME = \ ../extmod/vfs_fat_ffconf.o \ ../extmod/vfs_fat_diskio.o \ ../extmod/vfs_fat_file.o \ - ../extmod/vfs_fat_lexer.o \ + ../extmod/vfs_fat_reader.o \ ../extmod/vfs_fat_misc.o \ ../extmod/utime_mphal.o \ ../extmod/uos_dupterm.o \ @@ -248,7 +254,7 @@ PY_O += $(BUILD)/$(BUILD)/frozen_mpy.o endif # Sources that may contain qstrings -SRC_QSTR_IGNORE = nlr% emitnx% emitnthumb% emitnarm% +SRC_QSTR_IGNORE = nlr% emitnx86% emitnx64% emitnthumb% emitnarm% emitnxtensa% SRC_QSTR = $(SRC_MOD) $(addprefix py/,$(filter-out $(SRC_QSTR_IGNORE),$(PY_O_BASENAME:.o=.c)) emitnative.c) # Anything that depends on FORCE will be considered out-of-date @@ -269,7 +275,7 @@ MPCONFIGPORT_MK = $(wildcard mpconfigport.mk) # the lines in "" and then unwrap after the preprocessor is finished. $(HEADER_BUILD)/qstrdefs.generated.h: $(PY_QSTR_DEFS) $(QSTR_DEFS) $(QSTR_DEFS_COLLECTED) $(PY_SRC)/makeqstrdata.py mpconfigport.h $(MPCONFIGPORT_MK) $(PY_SRC)/mpconfig.h | $(HEADER_BUILD) $(ECHO) "GEN $@" - $(Q)cat $(PY_QSTR_DEFS) $(QSTR_DEFS) $(QSTR_DEFS_COLLECTED) | $(SED) 's/^Q(.*)/"&"/' | $(CPP) $(CFLAGS) - | sed 's/^"\(Q(.*)\)"/\1/' > $(HEADER_BUILD)/qstrdefs.preprocessed.h + $(Q)cat $(PY_QSTR_DEFS) $(QSTR_DEFS) $(QSTR_DEFS_COLLECTED) | $(SED) 's/^Q(.*)/"&"/' | $(CPP) $(CFLAGS) - | $(SED) 's/^"\(Q(.*)\)"/\1/' > $(HEADER_BUILD)/qstrdefs.preprocessed.h $(Q)$(PYTHON) $(PY_SRC)/makeqstrdata.py $(HEADER_BUILD)/qstrdefs.preprocessed.h > $@ # emitters @@ -290,6 +296,10 @@ $(PY_BUILD)/emitnarm.o: CFLAGS += -DN_ARM $(PY_BUILD)/emitnarm.o: py/emitnative.c $(call compile_c) +$(PY_BUILD)/emitnxtensa.o: CFLAGS += -DN_XTENSA +$(PY_BUILD)/emitnxtensa.o: py/emitnative.c + $(call compile_c) + # optimising gc for speed; 5ms down to 4ms on pybv2 $(PY_BUILD)/gc.o: CFLAGS += $(CSUPEROPT) diff --git a/py/reader.c b/py/reader.c new file mode 100644 index 0000000000..d7de7aa6c4 --- /dev/null +++ b/py/reader.c @@ -0,0 +1,146 @@ +/* + * This file is part of the MicroPython project, http://micropython.org/ + * + * The MIT License (MIT) + * + * Copyright (c) 2013-2016 Damien P. George + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include <stdio.h> +#include <assert.h> + +#include "py/mperrno.h" +#include "py/reader.h" + +typedef struct _mp_reader_mem_t { + size_t free_len; // if >0 mem is freed on close by: m_free(beg, free_len) + const byte *beg; + const byte *cur; + const byte *end; +} mp_reader_mem_t; + +STATIC mp_uint_t mp_reader_mem_readbyte(void *data) { + mp_reader_mem_t *reader = (mp_reader_mem_t*)data; + if (reader->cur < reader->end) { + return *reader->cur++; + } else { + return MP_READER_EOF; + } +} + +STATIC void mp_reader_mem_close(void *data) { + mp_reader_mem_t *reader = (mp_reader_mem_t*)data; + if (reader->free_len > 0) { + m_del(char, (char*)reader->beg, reader->free_len); + } + m_del_obj(mp_reader_mem_t, reader); +} + +bool mp_reader_new_mem(mp_reader_t *reader, const byte *buf, size_t len, size_t free_len) { + mp_reader_mem_t *rm = m_new_obj_maybe(mp_reader_mem_t); + if (rm == NULL) { + return false; + } + rm->free_len = free_len; + rm->beg = buf; + rm->cur = buf; + rm->end = buf + len; + reader->data = rm; + reader->readbyte = mp_reader_mem_readbyte; + reader->close = mp_reader_mem_close; + return true; +} + +#if MICROPY_READER_POSIX + +#include <sys/stat.h> +#include <fcntl.h> +#include <unistd.h> +#include <errno.h> + +typedef struct _mp_reader_posix_t { + bool close_fd; + int fd; + size_t len; + size_t pos; + byte buf[20]; +} mp_reader_posix_t; + +STATIC mp_uint_t mp_reader_posix_readbyte(void *data) { + mp_reader_posix_t *reader = (mp_reader_posix_t*)data; + if (reader->pos >= reader->len) { + if (reader->len == 0) { + return MP_READER_EOF; + } else { + int n = read(reader->fd, reader->buf, sizeof(reader->buf)); + if (n <= 0) { + reader->len = 0; + return MP_READER_EOF; + } + reader->len = n; + reader->pos = 0; + } + } + return reader->buf[reader->pos++]; +} + +STATIC void mp_reader_posix_close(void *data) { + mp_reader_posix_t *reader = (mp_reader_posix_t*)data; + if (reader->close_fd) { + close(reader->fd); + } + m_del_obj(mp_reader_posix_t, reader); +} + +int mp_reader_new_file_from_fd(mp_reader_t *reader, int fd, bool close_fd) { + mp_reader_posix_t *rp = m_new_obj_maybe(mp_reader_posix_t); + if (rp == NULL) { + if (close_fd) { + close(fd); + } + return MP_ENOMEM; + } + rp->close_fd = close_fd; + rp->fd = fd; + int n = read(rp->fd, rp->buf, sizeof(rp->buf)); + if (n == -1) { + if (close_fd) { + close(fd); + } + return errno; + } + rp->len = n; + rp->pos = 0; + reader->data = rp; + reader->readbyte = mp_reader_posix_readbyte; + reader->close = mp_reader_posix_close; + return 0; // success +} + +int mp_reader_new_file(mp_reader_t *reader, const char *filename) { + int fd = open(filename, O_RDONLY, 0644); + if (fd < 0) { + return errno; + } + return mp_reader_new_file_from_fd(reader, fd, true); +} + +#endif diff --git a/py/reader.h b/py/reader.h new file mode 100644 index 0000000000..b02d96149b --- /dev/null +++ b/py/reader.h @@ -0,0 +1,46 @@ +/* + * This file is part of the MicroPython project, http://micropython.org/ + * + * The MIT License (MIT) + * + * Copyright (c) 2013-2016 Damien P. George + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ +#ifndef MICROPY_INCLUDED_PY_READER_H +#define MICROPY_INCLUDED_PY_READER_H + +#include "py/obj.h" + +// the readbyte function must return the next byte in the input stream +// it must return MP_READER_EOF if end of stream +// it can be called again after returning MP_READER_EOF, and in that case must return MP_READER_EOF +#define MP_READER_EOF ((mp_uint_t)(-1)) + +typedef struct _mp_reader_t { + void *data; + mp_uint_t (*readbyte)(void *data); + void (*close)(void *data); +} mp_reader_t; + +bool mp_reader_new_mem(mp_reader_t *reader, const byte *buf, size_t len, size_t free_len); +int mp_reader_new_file(mp_reader_t *reader, const char *filename); +int mp_reader_new_file_from_fd(mp_reader_t *reader, int fd, bool close_fd); + +#endif // MICROPY_INCLUDED_PY_READER_H diff --git a/py/runtime.c b/py/runtime.c index c255574643..8b4420926c 100644 --- a/py/runtime.c +++ b/py/runtime.c @@ -68,6 +68,15 @@ void mp_init(void) { mp_init_emergency_exception_buf(); #endif + #if MICROPY_KBD_EXCEPTION + // initialise the exception object for raising KeyboardInterrupt + MP_STATE_VM(mp_kbd_exception).base.type = &mp_type_KeyboardInterrupt; + MP_STATE_VM(mp_kbd_exception).traceback_alloc = 0; + MP_STATE_VM(mp_kbd_exception).traceback_len = 0; + MP_STATE_VM(mp_kbd_exception).traceback_data = NULL; + MP_STATE_VM(mp_kbd_exception).args = mp_const_empty_tuple; + #endif + // call port specific initialization if any #ifdef MICROPY_PORT_INIT_FUNC MICROPY_PORT_INIT_FUNC; @@ -91,6 +100,11 @@ void mp_init(void) { MP_STATE_VM(mp_module_builtins_override_dict) = NULL; #endif + #if MICROPY_FSUSERMOUNT + // zero out the pointers to the user-mounted devices + memset(MP_STATE_VM(fs_user_mount), 0, sizeof(MP_STATE_VM(fs_user_mount))); + #endif + #if MICROPY_PY_THREAD_GIL mp_thread_mutex_init(&MP_STATE_VM(gil_mutex)); #endif @@ -1191,17 +1205,31 @@ mp_vm_return_kind_t mp_resume(mp_obj_t self_in, mp_obj_t send_value, mp_obj_t th mp_obj_t dest[3]; // Reserve slot for send() arg + // Python instance iterator protocol if (send_value == mp_const_none) { mp_load_method_maybe(self_in, MP_QSTR___next__, dest); if (dest[0] != MP_OBJ_NULL) { - *ret_val = mp_call_method_n_kw(0, 0, dest); - return MP_VM_RETURN_YIELD; + nlr_buf_t nlr; + if (nlr_push(&nlr) == 0) { + *ret_val = mp_call_method_n_kw(0, 0, dest); + nlr_pop(); + return MP_VM_RETURN_YIELD; + } else { + *ret_val = MP_OBJ_FROM_PTR(nlr.ret_val); + return MP_VM_RETURN_EXCEPTION; + } } } + // Either python instance generator protocol, or native object + // generator protocol. if (send_value != MP_OBJ_NULL) { mp_load_method(self_in, MP_QSTR_send, dest); dest[2] = send_value; + // TODO: This should have exception wrapping like __next__ case + // above. Not done right away to think how to optimize native + // generators better, see: + // https://github.com/micropython/micropython/issues/2628 *ret_val = mp_call_method_n_kw(1, 0, dest); return MP_VM_RETURN_YIELD; } diff --git a/py/runtime.h b/py/runtime.h index 80488098ae..3532b838de 100644 --- a/py/runtime.h +++ b/py/runtime.h @@ -95,6 +95,7 @@ mp_obj_t mp_call_function_2(mp_obj_t fun, mp_obj_t arg1, mp_obj_t arg2); mp_obj_t mp_call_function_n_kw(mp_obj_t fun, mp_uint_t n_args, mp_uint_t n_kw, const mp_obj_t *args); mp_obj_t mp_call_method_n_kw(mp_uint_t n_args, mp_uint_t n_kw, const mp_obj_t *args); mp_obj_t mp_call_method_n_kw_var(bool have_self, mp_uint_t n_args_n_kw, const mp_obj_t *args); +mp_obj_t mp_call_method_self_n_kw(mp_obj_t meth, mp_obj_t self, size_t n_args, size_t n_kw, const mp_obj_t *args); // Call function and catch/dump exception - for Python callbacks from C code void mp_call_function_1_protected(mp_obj_t fun, mp_obj_t arg); void mp_call_function_2_protected(mp_obj_t fun, mp_obj_t arg1, mp_obj_t arg2); diff --git a/py/stream.c b/py/stream.c index dadfcf5d62..c915110e0b 100644 --- a/py/stream.c +++ b/py/stream.c @@ -361,7 +361,6 @@ STATIC mp_obj_t stream_readall(mp_obj_t self_in) { vstr.len = total_size; return mp_obj_new_str_from_vstr(STREAM_CONTENT_TYPE(stream_p), &vstr); } -MP_DEFINE_CONST_FUN_OBJ_1(mp_stream_readall_obj, stream_readall); // Unbuffered, inefficient implementation of readline() for raw I/O files. STATIC mp_obj_t stream_unbuffered_readline(size_t n_args, const mp_obj_t *args) { diff --git a/py/stream.h b/py/stream.h index 4cdea11eba..01199ab601 100644 --- a/py/stream.h +++ b/py/stream.h @@ -42,6 +42,12 @@ #define MP_STREAM_GET_DATA_OPTS (8) // Get data/message options #define MP_STREAM_SET_DATA_OPTS (9) // Set data/message options +// These poll ioctl values are compatible with Linux +#define MP_STREAM_POLL_RD (0x0001) +#define MP_STREAM_POLL_WR (0x0004) +#define MP_STREAM_POLL_ERR (0x0008) +#define MP_STREAM_POLL_HUP (0x0010) + // Argument structure for MP_STREAM_SEEK struct mp_stream_seek_t { mp_off_t offset; @@ -51,7 +57,6 @@ struct mp_stream_seek_t { MP_DECLARE_CONST_FUN_OBJ_VAR_BETWEEN(mp_stream_read_obj); MP_DECLARE_CONST_FUN_OBJ_VAR_BETWEEN(mp_stream_read1_obj); MP_DECLARE_CONST_FUN_OBJ_VAR_BETWEEN(mp_stream_readinto_obj); -MP_DECLARE_CONST_FUN_OBJ_1(mp_stream_readall_obj); MP_DECLARE_CONST_FUN_OBJ_VAR_BETWEEN(mp_stream_unbuffered_readline_obj); MP_DECLARE_CONST_FUN_OBJ_1(mp_stream_unbuffered_readlines_obj); MP_DECLARE_CONST_FUN_OBJ_VAR_BETWEEN(mp_stream_write_obj); diff --git a/py/unicode.c b/py/unicode.c index 8be63f217a..c6f872038d 100644 --- a/py/unicode.c +++ b/py/unicode.c @@ -133,9 +133,11 @@ bool unichar_isalpha(unichar c) { return c < 128 && (attr[c] & FL_ALPHA) != 0; } +/* unused bool unichar_isprint(unichar c) { return c < 128 && (attr[c] & FL_PRINT) != 0; } +*/ bool unichar_isdigit(unichar c) { return c < 128 && (attr[c] & FL_DIGIT) != 0; |