From b0cbfb0492028192a28f0514fba71ec954330108 Mon Sep 17 00:00:00 2001 From: Damien George Date: Sun, 13 Nov 2016 15:32:05 +1100 Subject: py/parse: Move function to check for const parse node to parse.[ch]. --- py/compile.c | 26 ++++++++------------------ 1 file changed, 8 insertions(+), 18 deletions(-) (limited to 'py/compile.c') diff --git a/py/compile.c b/py/compile.c index f84d5e2145..cd38b07f33 100644 --- a/py/compile.c +++ b/py/compile.c @@ -243,23 +243,13 @@ STATIC void compile_generic_tuple(compiler_t *comp, mp_parse_node_struct_t *pns) c_tuple(comp, MP_PARSE_NODE_NULL, pns); } -STATIC bool node_is_const_false(mp_parse_node_t pn) { - return MP_PARSE_NODE_IS_TOKEN_KIND(pn, MP_TOKEN_KW_FALSE) - || (MP_PARSE_NODE_IS_SMALL_INT(pn) && MP_PARSE_NODE_LEAF_SMALL_INT(pn) == 0); -} - -STATIC bool node_is_const_true(mp_parse_node_t pn) { - return MP_PARSE_NODE_IS_TOKEN_KIND(pn, MP_TOKEN_KW_TRUE) - || (MP_PARSE_NODE_IS_SMALL_INT(pn) && MP_PARSE_NODE_LEAF_SMALL_INT(pn) != 0); -} - STATIC void c_if_cond(compiler_t *comp, mp_parse_node_t pn, bool jump_if, int label) { - if (node_is_const_false(pn)) { + if (mp_parse_node_is_const_false(pn)) { if (jump_if == false) { EMIT_ARG(jump, label); } return; - } else if (node_is_const_true(pn)) { + } else if (mp_parse_node_is_const_true(pn)) { if (jump_if == true) { EMIT_ARG(jump, label); } @@ -1218,14 +1208,14 @@ STATIC void compile_if_stmt(compiler_t *comp, mp_parse_node_struct_t *pns) { uint l_end = comp_next_label(comp); // optimisation: don't emit anything when "if False" - if (!node_is_const_false(pns->nodes[0])) { + if (!mp_parse_node_is_const_false(pns->nodes[0])) { uint l_fail = comp_next_label(comp); c_if_cond(comp, pns->nodes[0], false, l_fail); // if condition compile_node(comp, pns->nodes[1]); // if block // optimisation: skip everything else when "if True" - if (node_is_const_true(pns->nodes[0])) { + if (mp_parse_node_is_const_true(pns->nodes[0])) { goto done; } @@ -1250,14 +1240,14 @@ STATIC void compile_if_stmt(compiler_t *comp, mp_parse_node_struct_t *pns) { mp_parse_node_struct_t *pns_elif = (mp_parse_node_struct_t*)pn_elif[i]; // optimisation: don't emit anything when "if False" - if (!node_is_const_false(pns_elif->nodes[0])) { + if (!mp_parse_node_is_const_false(pns_elif->nodes[0])) { uint l_fail = comp_next_label(comp); c_if_cond(comp, pns_elif->nodes[0], false, l_fail); // elif condition compile_node(comp, pns_elif->nodes[1]); // elif block // optimisation: skip everything else when "elif True" - if (node_is_const_true(pns_elif->nodes[0])) { + if (mp_parse_node_is_const_true(pns_elif->nodes[0])) { goto done; } @@ -1294,9 +1284,9 @@ done: STATIC void compile_while_stmt(compiler_t *comp, mp_parse_node_struct_t *pns) { START_BREAK_CONTINUE_BLOCK - if (!node_is_const_false(pns->nodes[0])) { // optimisation: don't emit anything for "while False" + if (!mp_parse_node_is_const_false(pns->nodes[0])) { // optimisation: don't emit anything for "while False" uint top_label = comp_next_label(comp); - if (!node_is_const_true(pns->nodes[0])) { // optimisation: don't jump to cond for "while True" + if (!mp_parse_node_is_const_true(pns->nodes[0])) { // optimisation: don't jump to cond for "while True" EMIT_ARG(jump, continue_label); } EMIT_ARG(label_assign, top_label); -- cgit v1.2.3 From ed9c93f0f13fe2b6b4b0fbc1155b1e38ee1be242 Mon Sep 17 00:00:00 2001 From: Damien George Date: Sun, 13 Nov 2016 15:35:11 +1100 Subject: py/parse: Make mp_parse_node_new_leaf an inline function. It is split into 2 functions, one to make small ints and the other to make a non-small-int leaf node. This reduces code size by 32 bytes on bare-arm, 64 bytes on unix (x64-64) and 144 bytes on stmhal. --- py/compile.c | 6 +++--- py/parse.c | 13 +++---------- py/parse.h | 7 ++++++- 3 files changed, 12 insertions(+), 14 deletions(-) (limited to 'py/compile.c') diff --git a/py/compile.c b/py/compile.c index cd38b07f33..6efd15effb 100644 --- a/py/compile.c +++ b/py/compile.c @@ -1403,13 +1403,13 @@ STATIC void compile_for_stmt(compiler_t *comp, mp_parse_node_struct_t *pns) { if (1 <= n_args && n_args <= 3) { optimize = true; if (n_args == 1) { - pn_range_start = mp_parse_node_new_leaf(MP_PARSE_NODE_SMALL_INT, 0); + pn_range_start = mp_parse_node_new_small_int(0); pn_range_end = args[0]; - pn_range_step = mp_parse_node_new_leaf(MP_PARSE_NODE_SMALL_INT, 1); + pn_range_step = mp_parse_node_new_small_int(1); } else if (n_args == 2) { pn_range_start = args[0]; pn_range_end = args[1]; - pn_range_step = mp_parse_node_new_leaf(MP_PARSE_NODE_SMALL_INT, 1); + pn_range_step = mp_parse_node_new_small_int(1); } else { pn_range_start = args[0]; pn_range_end = args[1]; diff --git a/py/parse.c b/py/parse.c index aa6034a6a0..698f796176 100644 --- a/py/parse.c +++ b/py/parse.c @@ -227,13 +227,6 @@ STATIC void pop_rule(parser_t *parser, const rule_t **rule, size_t *arg_i, size_ *src_line = parser->rule_stack[parser->rule_stack_top].src_line; } -mp_parse_node_t mp_parse_node_new_leaf(size_t kind, mp_int_t arg) { - if (kind == MP_PARSE_NODE_SMALL_INT) { - return (mp_parse_node_t)(kind | (arg << 1)); - } - return (mp_parse_node_t)(kind | (arg << 4)); -} - bool mp_parse_node_is_const_false(mp_parse_node_t pn) { return MP_PARSE_NODE_IS_TOKEN_KIND(pn, MP_TOKEN_KW_FALSE) || (MP_PARSE_NODE_IS_SMALL_INT(pn) && MP_PARSE_NODE_LEAF_SMALL_INT(pn) == 0); @@ -418,7 +411,7 @@ STATIC void push_result_token(parser_t *parser, const rule_t *rule) { mp_map_elem_t *elem; if (rule->rule_id == RULE_atom && (elem = mp_map_lookup(&parser->consts, MP_OBJ_NEW_QSTR(id), MP_MAP_LOOKUP)) != NULL) { - pn = mp_parse_node_new_leaf(MP_PARSE_NODE_SMALL_INT, MP_OBJ_SMALL_INT_VALUE(elem->value)); + pn = mp_parse_node_new_small_int(MP_OBJ_SMALL_INT_VALUE(elem->value)); } else { pn = mp_parse_node_new_leaf(MP_PARSE_NODE_ID, id); } @@ -429,7 +422,7 @@ STATIC void push_result_token(parser_t *parser, const rule_t *rule) { } else if (lex->tok_kind == MP_TOKEN_INTEGER) { mp_obj_t o = mp_parse_num_integer(lex->vstr.buf, lex->vstr.len, 0, lex); if (MP_OBJ_IS_SMALL_INT(o)) { - pn = mp_parse_node_new_leaf(MP_PARSE_NODE_SMALL_INT, MP_OBJ_SMALL_INT_VALUE(o)); + pn = mp_parse_node_new_small_int(MP_OBJ_SMALL_INT_VALUE(o)); } else { pn = make_node_const_object(parser, lex->tok_line, o); } @@ -658,7 +651,7 @@ STATIC bool fold_constants(parser_t *parser, const rule_t *rule, size_t num_args pop_result(parser); } if (MP_OBJ_IS_SMALL_INT(arg0)) { - push_result_node(parser, mp_parse_node_new_leaf(MP_PARSE_NODE_SMALL_INT, MP_OBJ_SMALL_INT_VALUE(arg0))); + push_result_node(parser, mp_parse_node_new_small_int(MP_OBJ_SMALL_INT_VALUE(arg0))); } else { // TODO reuse memory for parse node struct? push_result_node(parser, make_node_const_object(parser, 0, arg0)); diff --git a/py/parse.h b/py/parse.h index 54bbd26459..769f5a8757 100644 --- a/py/parse.h +++ b/py/parse.h @@ -76,7 +76,12 @@ typedef struct _mp_parse_node_struct_t { #define MP_PARSE_NODE_STRUCT_KIND(pns) ((pns)->kind_num_nodes & 0xff) #define MP_PARSE_NODE_STRUCT_NUM_NODES(pns) ((pns)->kind_num_nodes >> 8) -mp_parse_node_t mp_parse_node_new_leaf(size_t kind, mp_int_t arg); +static inline mp_parse_node_t mp_parse_node_new_small_int(mp_int_t val) { + return (mp_parse_node_t)(MP_PARSE_NODE_SMALL_INT | ((mp_uint_t)val << 1)); +} +static inline mp_parse_node_t mp_parse_node_new_leaf(size_t kind, mp_int_t arg) { + return (mp_parse_node_t)(kind | ((mp_uint_t)arg << 4)); +} bool mp_parse_node_is_const_false(mp_parse_node_t pn); bool mp_parse_node_is_const_true(mp_parse_node_t pn); bool mp_parse_node_get_int_maybe(mp_parse_node_t pn, mp_obj_t *o); -- cgit v1.2.3 From e6cf5fb2cc43a94210203f4d45d38f29f33a82a8 Mon Sep 17 00:00:00 2001 From: Damien George Date: Sat, 26 Nov 2016 16:15:55 +1100 Subject: py/compile: Remove comment about TODO for short circuiting for if-stmt. Short circuiting is handled correctly by c_if_cond, and constants within short-circuit expressions are optimised by the parser. --- py/compile.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'py/compile.c') diff --git a/py/compile.c b/py/compile.c index 6efd15effb..bc8d8d16d7 100644 --- a/py/compile.c +++ b/py/compile.c @@ -1203,8 +1203,6 @@ STATIC void compile_assert_stmt(compiler_t *comp, mp_parse_node_struct_t *pns) { } STATIC void compile_if_stmt(compiler_t *comp, mp_parse_node_struct_t *pns) { - // TODO proper and/or short circuiting - uint l_end = comp_next_label(comp); // optimisation: don't emit anything when "if False" -- cgit v1.2.3 From 080a78b15edc465010864002d1c83a5730a85dd0 Mon Sep 17 00:00:00 2001 From: Damien George Date: Wed, 7 Dec 2016 11:17:17 +1100 Subject: py/compile: Simplify configuration of native emitter. --- py/compile.c | 46 ++++++++++++++++++---------------------------- 1 file changed, 18 insertions(+), 28 deletions(-) (limited to 'py/compile.c') diff --git a/py/compile.c b/py/compile.c index bc8d8d16d7..418c1100bf 100644 --- a/py/compile.c +++ b/py/compile.c @@ -69,6 +69,21 @@ typedef enum { #endif +#if MICROPY_EMIT_NATIVE +// define a macro to access external native emitter +#if MICROPY_EMIT_X64 +#define NATIVE_EMITTER(f) emit_native_x64_##f +#elif MICROPY_EMIT_X86 +#define NATIVE_EMITTER(f) emit_native_x86_##f +#elif MICROPY_EMIT_THUMB +#define NATIVE_EMITTER(f) emit_native_thumb_##f +#elif MICROPY_EMIT_ARM +#define NATIVE_EMITTER(f) emit_native_arm_##f +#else +#error "unknown native emitter" +#endif +#endif + #define EMIT_INLINE_ASM(fun) (comp->emit_inline_asm_method_table->fun(comp->emit_inline_asm)) #define EMIT_INLINE_ASM_ARG(fun, ...) (comp->emit_inline_asm_method_table->fun(comp->emit_inline_asm, __VA_ARGS__)) @@ -3397,27 +3412,10 @@ mp_raw_code_t *mp_compile_to_raw_code(mp_parse_tree_t *parse_tree, qstr source_f #if MICROPY_EMIT_NATIVE case MP_EMIT_OPT_NATIVE_PYTHON: case MP_EMIT_OPT_VIPER: -#if MICROPY_EMIT_X64 - if (emit_native == NULL) { - emit_native = emit_native_x64_new(&comp->compile_error, max_num_labels); - } - comp->emit_method_table = &emit_native_x64_method_table; -#elif MICROPY_EMIT_X86 if (emit_native == NULL) { - emit_native = emit_native_x86_new(&comp->compile_error, max_num_labels); + emit_native = NATIVE_EMITTER(new)(&comp->compile_error, max_num_labels); } - comp->emit_method_table = &emit_native_x86_method_table; -#elif MICROPY_EMIT_THUMB - if (emit_native == NULL) { - emit_native = emit_native_thumb_new(&comp->compile_error, max_num_labels); - } - comp->emit_method_table = &emit_native_thumb_method_table; -#elif MICROPY_EMIT_ARM - if (emit_native == NULL) { - emit_native = emit_native_arm_new(&comp->compile_error, max_num_labels); - } - comp->emit_method_table = &emit_native_arm_method_table; -#endif + comp->emit_method_table = &NATIVE_EMITTER(method_table); comp->emit = emit_native; EMIT_ARG(set_native_type, MP_EMIT_NATIVE_TYPE_ENABLE, s->emit_options == MP_EMIT_OPT_VIPER, 0); break; @@ -3460,15 +3458,7 @@ mp_raw_code_t *mp_compile_to_raw_code(mp_parse_tree_t *parse_tree, qstr source_f emit_bc_free(emit_bc); #if MICROPY_EMIT_NATIVE if (emit_native != NULL) { -#if MICROPY_EMIT_X64 - emit_native_x64_free(emit_native); -#elif MICROPY_EMIT_X86 - emit_native_x86_free(emit_native); -#elif MICROPY_EMIT_THUMB - emit_native_thumb_free(emit_native); -#elif MICROPY_EMIT_ARM - emit_native_arm_free(emit_native); -#endif + NATIVE_EMITTER(free)(emit_native); } #endif #if MICROPY_EMIT_INLINE_THUMB -- cgit v1.2.3 From 8e5aced1fd4845b23b203d8d15f9b34b6b022ceb Mon Sep 17 00:00:00 2001 From: Damien George Date: Fri, 9 Dec 2016 16:39:39 +1100 Subject: py: Integrate Xtensa assembler into native emitter. The config option MICROPY_EMIT_XTENSA can now be enabled to target the Xtensa architecture with @micropython.native and @micropython.viper decorators. --- py/compile.c | 2 ++ py/emit.h | 3 +++ py/emitnative.c | 24 +++++++++++++++++++++++- py/mkrules.mk | 2 +- py/mpconfig.h | 7 ++++++- py/py.mk | 8 +++++++- 6 files changed, 42 insertions(+), 4 deletions(-) (limited to 'py/compile.c') diff --git a/py/compile.c b/py/compile.c index 418c1100bf..7c2c89fb52 100644 --- a/py/compile.c +++ b/py/compile.c @@ -79,6 +79,8 @@ typedef enum { #define NATIVE_EMITTER(f) emit_native_thumb_##f #elif MICROPY_EMIT_ARM #define NATIVE_EMITTER(f) emit_native_arm_##f +#elif MICROPY_EMIT_XTENSA +#define NATIVE_EMITTER(f) emit_native_xtensa_##f #else #error "unknown native emitter" #endif diff --git a/py/emit.h b/py/emit.h index 885033e3d0..122df8e7d2 100644 --- a/py/emit.h +++ b/py/emit.h @@ -154,6 +154,7 @@ extern const emit_method_table_t emit_native_x64_method_table; extern const emit_method_table_t emit_native_x86_method_table; extern const emit_method_table_t emit_native_thumb_method_table; extern const emit_method_table_t emit_native_arm_method_table; +extern const emit_method_table_t emit_native_xtensa_method_table; extern const mp_emit_method_table_id_ops_t mp_emit_bc_method_table_load_id_ops; extern const mp_emit_method_table_id_ops_t mp_emit_bc_method_table_store_id_ops; @@ -164,6 +165,7 @@ emit_t *emit_native_x64_new(mp_obj_t *error_slot, mp_uint_t max_num_labels); emit_t *emit_native_x86_new(mp_obj_t *error_slot, mp_uint_t max_num_labels); emit_t *emit_native_thumb_new(mp_obj_t *error_slot, mp_uint_t max_num_labels); emit_t *emit_native_arm_new(mp_obj_t *error_slot, mp_uint_t max_num_labels); +emit_t *emit_native_xtensa_new(mp_obj_t *error_slot, mp_uint_t max_num_labels); void emit_bc_set_max_num_labels(emit_t* emit, mp_uint_t max_num_labels); @@ -172,6 +174,7 @@ void emit_native_x64_free(emit_t *emit); void emit_native_x86_free(emit_t *emit); void emit_native_thumb_free(emit_t *emit); void emit_native_arm_free(emit_t *emit); +void emit_native_xtensa_free(emit_t *emit); void mp_emit_bc_start_pass(emit_t *emit, pass_kind_t pass, scope_t *scope); void mp_emit_bc_end_pass(emit_t *emit); diff --git a/py/emitnative.c b/py/emitnative.c index 5a7e0ea341..2e18d26b4a 100644 --- a/py/emitnative.c +++ b/py/emitnative.c @@ -61,7 +61,8 @@ #if (MICROPY_EMIT_X64 && N_X64) \ || (MICROPY_EMIT_X86 && N_X86) \ || (MICROPY_EMIT_THUMB && N_THUMB) \ - || (MICROPY_EMIT_ARM && N_ARM) + || (MICROPY_EMIT_ARM && N_ARM) \ + || (MICROPY_EMIT_XTENSA && N_XTENSA) \ // this is defined so that the assembler exports generic assembler API macros #define GENERIC_ASM_API (1) @@ -139,6 +140,12 @@ STATIC byte mp_f_n_args[MP_F_NUMBER_OF] = { #include "py/asmarm.h" #define EXPORT_FUN(name) emit_native_arm_##name +#elif N_XTENSA + +// Xtensa specific stuff +#include "py/asmxtensa.h" +#define EXPORT_FUN(name) emit_native_xtensa_##name + #else #error unknown native emitter @@ -1965,6 +1972,21 @@ STATIC void emit_native_binary_op(emit_t *emit, mp_binary_op_t op) { ASM_ARM_CC_NE, }; asm_arm_setcc_reg(emit->as, REG_RET, ccs[op - MP_BINARY_OP_LESS]); + #elif N_XTENSA + static uint8_t ccs[6] = { + ASM_XTENSA_CC_LT, + 0x80 | ASM_XTENSA_CC_LT, // for GT we'll swap args + ASM_XTENSA_CC_EQ, + 0x80 | ASM_XTENSA_CC_GE, // for LE we'll swap args + ASM_XTENSA_CC_GE, + ASM_XTENSA_CC_NE, + }; + uint8_t cc = ccs[op - MP_BINARY_OP_LESS]; + if ((cc & 0x80) == 0) { + asm_xtensa_setcc_reg_reg_reg(emit->as, cc, REG_RET, REG_ARG_2, reg_rhs); + } else { + asm_xtensa_setcc_reg_reg_reg(emit->as, cc & ~0x80, REG_RET, reg_rhs, REG_ARG_2); + } #else #error not implemented #endif diff --git a/py/mkrules.mk b/py/mkrules.mk index d509f7b481..f4a2363a7a 100644 --- a/py/mkrules.mk +++ b/py/mkrules.mk @@ -49,7 +49,7 @@ $(BUILD)/%.o: %.c # List all native flags since the current build system doesn't have # the micropython configuration available. However, these flags are # needed to extract all qstrings -QSTR_GEN_EXTRA_CFLAGS += -DNO_QSTR -DN_X64 -DN_X86 -DN_THUMB -DN_ARM +QSTR_GEN_EXTRA_CFLAGS += -DNO_QSTR -DN_X64 -DN_X86 -DN_THUMB -DN_ARM -DN_XTENSA QSTR_GEN_EXTRA_CFLAGS += -I$(BUILD)/tmp vpath %.c . $(TOP) diff --git a/py/mpconfig.h b/py/mpconfig.h index 4572fc4cc9..15f770573b 100644 --- a/py/mpconfig.h +++ b/py/mpconfig.h @@ -288,8 +288,13 @@ #define MICROPY_EMIT_ARM (0) #endif +// Whether to emit Xtensa native code +#ifndef MICROPY_EMIT_XTENSA +#define MICROPY_EMIT_XTENSA (0) +#endif + // Convenience definition for whether any native emitter is enabled -#define MICROPY_EMIT_NATIVE (MICROPY_EMIT_X64 || MICROPY_EMIT_X86 || MICROPY_EMIT_THUMB || MICROPY_EMIT_ARM) +#define MICROPY_EMIT_NATIVE (MICROPY_EMIT_X64 || MICROPY_EMIT_X86 || MICROPY_EMIT_THUMB || MICROPY_EMIT_ARM || MICROPY_EMIT_XTENSA) /*****************************************************************************/ /* Compiler configuration */ diff --git a/py/py.mk b/py/py.mk index 87a860f755..9d2a188f1a 100644 --- a/py/py.mk +++ b/py/py.mk @@ -130,6 +130,8 @@ PY_O_BASENAME = \ emitinlinethumb.o \ asmarm.o \ emitnarm.o \ + asmxtensa.o \ + emitnxtensa.o \ formatfloat.o \ parsenumbase.o \ parsenum.o \ @@ -250,7 +252,7 @@ PY_O += $(BUILD)/$(BUILD)/frozen_mpy.o endif # Sources that may contain qstrings -SRC_QSTR_IGNORE = nlr% emitnx% emitnthumb% emitnarm% +SRC_QSTR_IGNORE = nlr% emitnx86% emitnx64% emitnthumb% emitnarm% emitnxtensa% SRC_QSTR = $(SRC_MOD) $(addprefix py/,$(filter-out $(SRC_QSTR_IGNORE),$(PY_O_BASENAME:.o=.c)) emitnative.c) # Anything that depends on FORCE will be considered out-of-date @@ -292,6 +294,10 @@ $(PY_BUILD)/emitnarm.o: CFLAGS += -DN_ARM $(PY_BUILD)/emitnarm.o: py/emitnative.c $(call compile_c) +$(PY_BUILD)/emitnxtensa.o: CFLAGS += -DN_XTENSA +$(PY_BUILD)/emitnxtensa.o: py/emitnative.c + $(call compile_c) + # optimising gc for speed; 5ms down to 4ms on pybv2 $(PY_BUILD)/gc.o: CFLAGS += $(CSUPEROPT) -- cgit v1.2.3 From ad297a1950c74c35c90dd655ae9a69d33ed28dc0 Mon Sep 17 00:00:00 2001 From: Damien George Date: Fri, 9 Dec 2016 13:17:49 +1100 Subject: py: Allow inline-assembler emitter to be generic. This patch refactors some code so that it is easier to integrate new inline assemblers for different architectures other than ARM Thumb. --- py/asmbase.c | 4 ++-- py/compile.c | 54 ++++++++++++++++++++++++++++++------------------------ py/compile.h | 2 +- py/emitglue.c | 4 ++-- py/mpconfig.h | 3 +++ py/nativeglue.c | 2 +- py/objfun.c | 4 ++-- 7 files changed, 41 insertions(+), 32 deletions(-) (limited to 'py/compile.c') diff --git a/py/asmbase.c b/py/asmbase.c index 916f7c5ec8..848730593e 100644 --- a/py/asmbase.c +++ b/py/asmbase.c @@ -31,7 +31,7 @@ #include "py/misc.h" #include "py/asmbase.h" -#if MICROPY_EMIT_NATIVE || MICROPY_EMIT_INLINE_THUMB +#if MICROPY_EMIT_NATIVE || MICROPY_EMIT_INLINE_ASM void mp_asm_base_init(mp_asm_base_t *as, size_t max_num_labels) { as->max_num_labels = max_num_labels; @@ -101,4 +101,4 @@ void mp_asm_base_data(mp_asm_base_t* as, unsigned int bytesize, uintptr_t val) { } } -#endif // MICROPY_EMIT_NATIVE || MICROPY_EMIT_INLINE_THUMB +#endif // MICROPY_EMIT_NATIVE || MICROPY_EMIT_INLINE_ASM diff --git a/py/compile.c b/py/compile.c index 7c2c89fb52..b85e659e7a 100644 --- a/py/compile.c +++ b/py/compile.c @@ -86,6 +86,16 @@ typedef enum { #endif #endif +#if MICROPY_EMIT_INLINE_ASM +// define macros for inline assembler +#if MICROPY_EMIT_INLINE_THUMB +#define ASM_DECORATOR_QSTR MP_QSTR_asm_thumb +#define ASM_EMITTER(f) emit_inline_thumb_##f +#else +#error "unknown asm emitter" +#endif +#endif + #define EMIT_INLINE_ASM(fun) (comp->emit_inline_asm_method_table->fun(comp->emit_inline_asm)) #define EMIT_INLINE_ASM_ARG(fun, ...) (comp->emit_inline_asm_method_table->fun(comp->emit_inline_asm, __VA_ARGS__)) @@ -120,7 +130,7 @@ typedef struct _compiler_t { const emit_method_table_t *emit_method_table; // current emit method table #endif - #if MICROPY_EMIT_INLINE_THUMB + #if MICROPY_EMIT_INLINE_ASM emit_inline_asm_t *emit_inline_asm; // current emitter for inline asm const emit_inline_asm_method_table_t *emit_inline_asm_method_table; // current emit method table for inline asm #endif @@ -767,10 +777,10 @@ STATIC bool compile_built_in_decorator(compiler_t *comp, int name_len, mp_parse_ } else if (attr == MP_QSTR_viper) { *emit_options = MP_EMIT_OPT_VIPER; #endif -#if MICROPY_EMIT_INLINE_THUMB - } else if (attr == MP_QSTR_asm_thumb) { - *emit_options = MP_EMIT_OPT_ASM_THUMB; -#endif + #if MICROPY_EMIT_INLINE_ASM + } else if (attr == ASM_DECORATOR_QSTR) { + *emit_options = MP_EMIT_OPT_ASM; + #endif } else { compile_syntax_error(comp, name_nodes[1], "invalid micropython decorator"); } @@ -3100,7 +3110,7 @@ STATIC void compile_scope(compiler_t *comp, scope_t *scope, pass_kind_t pass) { assert(comp->cur_except_level == 0); } -#if MICROPY_EMIT_INLINE_THUMB +#if MICROPY_EMIT_INLINE_ASM // requires 3 passes: SCOPE, CODE_SIZE, EMIT STATIC void compile_scope_inline_asm(compiler_t *comp, scope_t *scope, pass_kind_t pass) { comp->pass = pass; @@ -3357,10 +3367,10 @@ mp_raw_code_t *mp_compile_to_raw_code(mp_parse_tree_t *parse_tree, qstr source_f uint max_num_labels = 0; for (scope_t *s = comp->scope_head; s != NULL && comp->compile_error == MP_OBJ_NULL; s = s->next) { if (false) { -#if MICROPY_EMIT_INLINE_THUMB - } else if (s->emit_options == MP_EMIT_OPT_ASM_THUMB) { + #if MICROPY_EMIT_INLINE_ASM + } else if (s->emit_options == MP_EMIT_OPT_ASM) { compile_scope_inline_asm(comp, s, MP_PASS_SCOPE); -#endif + #endif } else { compile_scope(comp, s, MP_PASS_SCOPE); } @@ -3382,28 +3392,24 @@ mp_raw_code_t *mp_compile_to_raw_code(mp_parse_tree_t *parse_tree, qstr source_f // compile pass 2 and 3 #if MICROPY_EMIT_NATIVE emit_t *emit_native = NULL; -#endif -#if MICROPY_EMIT_INLINE_THUMB - emit_inline_asm_t *emit_inline_thumb = NULL; #endif for (scope_t *s = comp->scope_head; s != NULL && comp->compile_error == MP_OBJ_NULL; s = s->next) { if (false) { // dummy -#if MICROPY_EMIT_INLINE_THUMB - } else if (s->emit_options == MP_EMIT_OPT_ASM_THUMB) { - // inline assembly for thumb - if (emit_inline_thumb == NULL) { - emit_inline_thumb = emit_inline_thumb_new(max_num_labels); + #if MICROPY_EMIT_INLINE_ASM + } else if (s->emit_options == MP_EMIT_OPT_ASM) { + // inline assembly + if (comp->emit_inline_asm == NULL) { + comp->emit_inline_asm = ASM_EMITTER(new)(max_num_labels); } comp->emit = NULL; - comp->emit_inline_asm = emit_inline_thumb; - comp->emit_inline_asm_method_table = &emit_inline_thumb_method_table; + comp->emit_inline_asm_method_table = &ASM_EMITTER(method_table); compile_scope_inline_asm(comp, s, MP_PASS_CODE_SIZE); if (comp->compile_error == MP_OBJ_NULL) { compile_scope_inline_asm(comp, s, MP_PASS_EMIT); } -#endif + #endif } else { @@ -3463,11 +3469,11 @@ mp_raw_code_t *mp_compile_to_raw_code(mp_parse_tree_t *parse_tree, qstr source_f NATIVE_EMITTER(free)(emit_native); } #endif -#if MICROPY_EMIT_INLINE_THUMB - if (emit_inline_thumb != NULL) { - emit_inline_thumb_free(emit_inline_thumb); + #if MICROPY_EMIT_INLINE_ASM + if (comp->emit_inline_asm != NULL) { + ASM_EMITTER(free)(comp->emit_inline_asm); } -#endif + #endif // free the parse tree mp_parse_tree_clear(parse_tree); diff --git a/py/compile.h b/py/compile.h index 3cca4cb30b..45a98588d8 100644 --- a/py/compile.h +++ b/py/compile.h @@ -36,7 +36,7 @@ enum { MP_EMIT_OPT_BYTECODE, MP_EMIT_OPT_NATIVE_PYTHON, MP_EMIT_OPT_VIPER, - MP_EMIT_OPT_ASM_THUMB, + MP_EMIT_OPT_ASM, }; // the compiler will raise an exception if an error occurred diff --git a/py/emitglue.c b/py/emitglue.c index abcf50cdd4..795c738d3f 100644 --- a/py/emitglue.c +++ b/py/emitglue.c @@ -82,7 +82,7 @@ void mp_emit_glue_assign_bytecode(mp_raw_code_t *rc, const byte *code, mp_uint_t #endif } -#if MICROPY_EMIT_NATIVE || MICROPY_EMIT_INLINE_THUMB +#if MICROPY_EMIT_NATIVE || MICROPY_EMIT_INLINE_ASM void mp_emit_glue_assign_native(mp_raw_code_t *rc, mp_raw_code_kind_t kind, void *fun_data, mp_uint_t fun_len, const mp_uint_t *const_table, mp_uint_t n_pos_args, mp_uint_t scope_flags, mp_uint_t type_sig) { assert(kind == MP_CODE_NATIVE_PY || kind == MP_CODE_NATIVE_VIPER || kind == MP_CODE_NATIVE_ASM); rc->kind = kind; @@ -138,7 +138,7 @@ mp_obj_t mp_make_function_from_raw_code(const mp_raw_code_t *rc, mp_obj_t def_ar fun = mp_obj_new_fun_viper(rc->n_pos_args, rc->data.u_native.fun_data, rc->data.u_native.type_sig); break; #endif - #if MICROPY_EMIT_INLINE_THUMB + #if MICROPY_EMIT_INLINE_ASM case MP_CODE_NATIVE_ASM: fun = mp_obj_new_fun_asm(rc->n_pos_args, rc->data.u_native.fun_data, rc->data.u_native.type_sig); break; diff --git a/py/mpconfig.h b/py/mpconfig.h index 15f770573b..5234cc7f9c 100644 --- a/py/mpconfig.h +++ b/py/mpconfig.h @@ -296,6 +296,9 @@ // Convenience definition for whether any native emitter is enabled #define MICROPY_EMIT_NATIVE (MICROPY_EMIT_X64 || MICROPY_EMIT_X86 || MICROPY_EMIT_THUMB || MICROPY_EMIT_ARM || MICROPY_EMIT_XTENSA) +// Convenience definition for whether any inline assembler emitter is enabled +#define MICROPY_EMIT_INLINE_ASM (MICROPY_EMIT_INLINE_THUMB) + /*****************************************************************************/ /* Compiler configuration */ diff --git a/py/nativeglue.c b/py/nativeglue.c index bc2f4ff5e7..5f2164ee0d 100644 --- a/py/nativeglue.c +++ b/py/nativeglue.c @@ -64,7 +64,7 @@ mp_uint_t mp_convert_obj_to_native(mp_obj_t obj, mp_uint_t type) { #endif -#if MICROPY_EMIT_NATIVE || MICROPY_EMIT_INLINE_THUMB +#if MICROPY_EMIT_NATIVE || MICROPY_EMIT_INLINE_ASM // convert a native value to a Micro Python object based on type mp_obj_t mp_convert_native_to_obj(mp_uint_t val, mp_uint_t type) { diff --git a/py/objfun.c b/py/objfun.c index 6b8fe6d382..207e68a771 100644 --- a/py/objfun.c +++ b/py/objfun.c @@ -471,7 +471,7 @@ mp_obj_t mp_obj_new_fun_viper(mp_uint_t n_args, void *fun_data, mp_uint_t type_s /******************************************************************************/ /* inline assembler functions */ -#if MICROPY_EMIT_INLINE_THUMB +#if MICROPY_EMIT_INLINE_ASM typedef struct _mp_obj_fun_asm_t { mp_obj_base_t base; @@ -582,4 +582,4 @@ mp_obj_t mp_obj_new_fun_asm(mp_uint_t n_args, void *fun_data, mp_uint_t type_sig return o; } -#endif // MICROPY_EMIT_INLINE_THUMB +#endif // MICROPY_EMIT_INLINE_ASM -- cgit v1.2.3 From f76b1bfa9f59fcfa03837c6934ad51d2db3ff4a3 Mon Sep 17 00:00:00 2001 From: Damien George Date: Fri, 9 Dec 2016 17:03:33 +1100 Subject: py: Add inline Xtensa assembler. This patch adds the MICROPY_EMIT_INLINE_XTENSA option, which, when enabled, allows the @micropython.asm_xtensa decorator to be used. The following opcodes are currently supported (ax is a register, a0-a15): ret_n() callx0(ax) j(label) jx(ax) beqz(ax, label) bnez(ax, label) mov(ax, ay) movi(ax, imm) # imm can be full 32-bit, uses l32r if needed and_(ax, ay, az) or_(ax, ay, az) xor(ax, ay, az) add(ax, ay, az) sub(ax, ay, az) mull(ax, ay, az) l8ui(ax, ay, imm) l16ui(ax, ay, imm) l32i(ax, ay, imm) s8i(ax, ay, imm) s16i(ax, ay, imm) s32i(ax, ay, imm) l16si(ax, ay, imm) addi(ax, ay, imm) ball(ax, ay, label) bany(ax, ay, label) bbc(ax, ay, label) bbs(ax, ay, label) beq(ax, ay, label) bge(ax, ay, label) bgeu(ax, ay, label) blt(ax, ay, label) bnall(ax, ay, label) bne(ax, ay, label) bnone(ax, ay, label) Upon entry to the assembly function the registers a0, a12, a13, a14 are pushed to the stack and the stack pointer (a1) decreased by 16. Upon exit, these registers and the stack pointer are restored, and ret.n is executed to return to the caller (caller address is in a0). Note that the ABI for the Xtensa emitters is non-windowing. --- py/asmxtensa.c | 4 +- py/compile.c | 3 + py/emit.h | 4 + py/emitinlinextensa.c | 364 ++++++++++++++++++++++++++++++++++++++++++++++++++ py/mpconfig.h | 7 +- py/py.mk | 1 + 6 files changed, 380 insertions(+), 3 deletions(-) create mode 100644 py/emitinlinextensa.c (limited to 'py/compile.c') diff --git a/py/asmxtensa.c b/py/asmxtensa.c index 63aad02c06..00df432ce7 100644 --- a/py/asmxtensa.c +++ b/py/asmxtensa.c @@ -30,7 +30,7 @@ #include "py/mpconfig.h" // wrapper around everything in this file -#if MICROPY_EMIT_XTENSA +#if MICROPY_EMIT_XTENSA || MICROPY_EMIT_INLINE_XTENSA #include "py/asmxtensa.h" @@ -167,4 +167,4 @@ void asm_xtensa_mov_reg_local_addr(asm_xtensa_t *as, uint reg_dest, int local_nu asm_xtensa_op_addi(as, reg_dest, reg_dest, (4 + local_num) * WORD_SIZE); } -#endif // MICROPY_EMIT_XTENSA +#endif // MICROPY_EMIT_XTENSA || MICROPY_EMIT_INLINE_XTENSA diff --git a/py/compile.c b/py/compile.c index b85e659e7a..6c9b6abfe6 100644 --- a/py/compile.c +++ b/py/compile.c @@ -91,6 +91,9 @@ typedef enum { #if MICROPY_EMIT_INLINE_THUMB #define ASM_DECORATOR_QSTR MP_QSTR_asm_thumb #define ASM_EMITTER(f) emit_inline_thumb_##f +#elif MICROPY_EMIT_INLINE_XTENSA +#define ASM_DECORATOR_QSTR MP_QSTR_asm_xtensa +#define ASM_EMITTER(f) emit_inline_xtensa_##f #else #error "unknown asm emitter" #endif diff --git a/py/emit.h b/py/emit.h index 122df8e7d2..6080b83c40 100644 --- a/py/emit.h +++ b/py/emit.h @@ -272,9 +272,13 @@ typedef struct _emit_inline_asm_method_table_t { } emit_inline_asm_method_table_t; extern const emit_inline_asm_method_table_t emit_inline_thumb_method_table; +extern const emit_inline_asm_method_table_t emit_inline_xtensa_method_table; emit_inline_asm_t *emit_inline_thumb_new(mp_uint_t max_num_labels); +emit_inline_asm_t *emit_inline_xtensa_new(mp_uint_t max_num_labels); + void emit_inline_thumb_free(emit_inline_asm_t *emit); +void emit_inline_xtensa_free(emit_inline_asm_t *emit); #if MICROPY_WARNINGS void mp_emitter_warning(pass_kind_t pass, const char *msg); diff --git a/py/emitinlinextensa.c b/py/emitinlinextensa.c new file mode 100644 index 0000000000..bd1a7a3ec8 --- /dev/null +++ b/py/emitinlinextensa.c @@ -0,0 +1,364 @@ +/* + * This file is part of the MicroPython project, http://micropython.org/ + * + * The MIT License (MIT) + * + * Copyright (c) 2013-2016 Damien P. George + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include +#include +#include +#include +#include + +#include "py/emit.h" +#include "py/asmxtensa.h" + +#if MICROPY_EMIT_INLINE_XTENSA + +struct _emit_inline_asm_t { + uint16_t pass; + scope_t *scope; + mp_obj_t *error_slot; + mp_uint_t max_num_labels; + qstr *label_lookup; + asm_xtensa_t *as; +}; + +STATIC void emit_inline_xtensa_error_msg(emit_inline_asm_t *emit, const char *msg) { + *emit->error_slot = mp_obj_new_exception_msg(&mp_type_SyntaxError, msg); +} + +STATIC void emit_inline_xtensa_error_exc(emit_inline_asm_t *emit, mp_obj_t exc) { + *emit->error_slot = exc; +} + +emit_inline_asm_t *emit_inline_xtensa_new(mp_uint_t max_num_labels) { + emit_inline_asm_t *emit = m_new_obj(emit_inline_asm_t); + emit->max_num_labels = max_num_labels; + emit->label_lookup = m_new(qstr, max_num_labels); + emit->as = m_new0(asm_xtensa_t, 1); + mp_asm_base_init(&emit->as->base, max_num_labels); + return emit; +} + +void emit_inline_xtensa_free(emit_inline_asm_t *emit) { + m_del(qstr, emit->label_lookup, emit->max_num_labels); + mp_asm_base_deinit(&emit->as->base, false); + m_del_obj(asm_xtensa_t, emit->as); + m_del_obj(emit_inline_asm_t, emit); +} + +STATIC void emit_inline_xtensa_start_pass(emit_inline_asm_t *emit, pass_kind_t pass, scope_t *scope, mp_obj_t *error_slot) { + emit->pass = pass; + emit->scope = scope; + emit->error_slot = error_slot; + if (emit->pass == MP_PASS_CODE_SIZE) { + memset(emit->label_lookup, 0, emit->max_num_labels * sizeof(qstr)); + } + mp_asm_base_start_pass(&emit->as->base, pass == MP_PASS_EMIT ? MP_ASM_PASS_EMIT : MP_ASM_PASS_COMPUTE); + asm_xtensa_entry(emit->as, 0); +} + +STATIC void emit_inline_xtensa_end_pass(emit_inline_asm_t *emit, mp_uint_t type_sig) { + asm_xtensa_exit(emit->as); + asm_xtensa_end_pass(emit->as); + + if (emit->pass == MP_PASS_EMIT) { + void *f = mp_asm_base_get_code(&emit->as->base); + mp_emit_glue_assign_native(emit->scope->raw_code, MP_CODE_NATIVE_ASM, f, + mp_asm_base_get_code_size(&emit->as->base), NULL, emit->scope->num_pos_args, 0, type_sig); + } +} + +STATIC mp_uint_t emit_inline_xtensa_count_params(emit_inline_asm_t *emit, mp_uint_t n_params, mp_parse_node_t *pn_params) { + if (n_params > 4) { + emit_inline_xtensa_error_msg(emit, "can only have up to 4 parameters to Xtensa assembly"); + return 0; + } + for (mp_uint_t i = 0; i < n_params; i++) { + if (!MP_PARSE_NODE_IS_ID(pn_params[i])) { + emit_inline_xtensa_error_msg(emit, "parameters must be registers in sequence a2 to a5"); + return 0; + } + const char *p = qstr_str(MP_PARSE_NODE_LEAF_ARG(pn_params[i])); + if (!(strlen(p) == 2 && p[0] == 'a' && p[1] == '2' + i)) { + emit_inline_xtensa_error_msg(emit, "parameters must be registers in sequence a2 to a5"); + return 0; + } + } + return n_params; +} + +STATIC bool emit_inline_xtensa_label(emit_inline_asm_t *emit, mp_uint_t label_num, qstr label_id) { + assert(label_num < emit->max_num_labels); + if (emit->pass == MP_PASS_CODE_SIZE) { + // check for duplicate label on first pass + for (uint i = 0; i < emit->max_num_labels; i++) { + if (emit->label_lookup[i] == label_id) { + return false; + } + } + } + emit->label_lookup[label_num] = label_id; + mp_asm_base_label_assign(&emit->as->base, label_num); + return true; +} + +STATIC void emit_inline_xtensa_align(emit_inline_asm_t *emit, mp_uint_t align) { + mp_asm_base_align(&emit->as->base, align); +} + +STATIC void emit_inline_xtensa_data(emit_inline_asm_t *emit, mp_uint_t bytesize, mp_uint_t val) { + mp_asm_base_data(&emit->as->base, bytesize, val); +} + +typedef struct _reg_name_t { byte reg; byte name[3]; } reg_name_t; +STATIC const reg_name_t reg_name_table[] = { + {0, "a0\0"}, + {1, "a1\0"}, + {2, "a2\0"}, + {3, "a3\0"}, + {4, "a4\0"}, + {5, "a5\0"}, + {6, "a6\0"}, + {7, "a7\0"}, + {8, "a8\0"}, + {9, "a9\0"}, + {10, "a10"}, + {11, "a11"}, + {12, "a12"}, + {13, "a13"}, + {14, "a14"}, + {15, "a15"}, +}; + +// return empty string in case of error, so we can attempt to parse the string +// without a special check if it was in fact a string +STATIC const char *get_arg_str(mp_parse_node_t pn) { + if (MP_PARSE_NODE_IS_ID(pn)) { + qstr qst = MP_PARSE_NODE_LEAF_ARG(pn); + return qstr_str(qst); + } else { + return ""; + } +} + +STATIC mp_uint_t get_arg_reg(emit_inline_asm_t *emit, const char *op, mp_parse_node_t pn) { + const char *reg_str = get_arg_str(pn); + for (mp_uint_t i = 0; i < MP_ARRAY_SIZE(reg_name_table); i++) { + const reg_name_t *r = ®_name_table[i]; + if (reg_str[0] == r->name[0] + && reg_str[1] == r->name[1] + && reg_str[2] == r->name[2] + && (reg_str[2] == '\0' || reg_str[3] == '\0')) { + return r->reg; + } + } + emit_inline_xtensa_error_exc(emit, + mp_obj_new_exception_msg_varg(&mp_type_SyntaxError, + "'%s' expects a register", op)); + return 0; +} + +STATIC uint32_t get_arg_i(emit_inline_asm_t *emit, const char *op, mp_parse_node_t pn, int min, int max) { + mp_obj_t o; + if (!mp_parse_node_get_int_maybe(pn, &o)) { + emit_inline_xtensa_error_exc(emit, mp_obj_new_exception_msg_varg(&mp_type_SyntaxError, "'%s' expects an integer", op)); + return 0; + } + uint32_t i = mp_obj_get_int_truncated(o); + if (min != max && ((int)i < min || (int)i > max)) { + emit_inline_xtensa_error_exc(emit, mp_obj_new_exception_msg_varg(&mp_type_SyntaxError, "'%s' integer %d is not within range %d..%d", op, i, min, max)); + return 0; + } + return i; +} + +STATIC int get_arg_label(emit_inline_asm_t *emit, const char *op, mp_parse_node_t pn) { + if (!MP_PARSE_NODE_IS_ID(pn)) { + emit_inline_xtensa_error_exc(emit, mp_obj_new_exception_msg_varg(&mp_type_SyntaxError, "'%s' expects a label", op)); + return 0; + } + qstr label_qstr = MP_PARSE_NODE_LEAF_ARG(pn); + for (uint i = 0; i < emit->max_num_labels; i++) { + if (emit->label_lookup[i] == label_qstr) { + return i; + } + } + // only need to have the labels on the last pass + if (emit->pass == MP_PASS_EMIT) { + emit_inline_xtensa_error_exc(emit, mp_obj_new_exception_msg_varg(&mp_type_SyntaxError, "label '%q' not defined", label_qstr)); + } + return 0; +} + +#define RRR (0) +#define RRI8 (1) +#define RRI8_B (2) + +typedef struct _opcode_table_3arg_t { + uint16_t name; // actually a qstr, which should fit in 16 bits + uint8_t type; + uint8_t a0 : 4; + uint8_t a1 : 4; +} opcode_table_3arg_t; + +STATIC const opcode_table_3arg_t opcode_table_3arg[] = { + // arithmetic opcodes: reg, reg, reg + {MP_QSTR_and_, RRR, 0, 1}, + {MP_QSTR_or_, RRR, 0, 2}, + {MP_QSTR_xor, RRR, 0, 3}, + {MP_QSTR_add, RRR, 0, 8}, + {MP_QSTR_sub, RRR, 0, 12}, + {MP_QSTR_mull, RRR, 2, 8}, + + // load/store/addi opcodes: reg, reg, imm + // upper nibble of type encodes the range of the immediate arg + {MP_QSTR_l8ui, RRI8 | 0x10, 2, 0}, + {MP_QSTR_l16ui, RRI8 | 0x30, 2, 1}, + {MP_QSTR_l32i, RRI8 | 0x50, 2, 2}, + {MP_QSTR_s8i, RRI8 | 0x10, 2, 4}, + {MP_QSTR_s16i, RRI8 | 0x30, 2, 5}, + {MP_QSTR_s32i, RRI8 | 0x50, 2, 6}, + {MP_QSTR_l16si, RRI8 | 0x30, 2, 9}, + {MP_QSTR_addi, RRI8 | 0x00, 2, 12}, + + // branch opcodes: reg, reg, label + {MP_QSTR_ball, RRI8_B, ASM_XTENSA_CC_ALL, 0}, + {MP_QSTR_bany, RRI8_B, ASM_XTENSA_CC_ANY, 0}, + {MP_QSTR_bbc, RRI8_B, ASM_XTENSA_CC_BC, 0}, + {MP_QSTR_bbs, RRI8_B, ASM_XTENSA_CC_BS, 0}, + {MP_QSTR_beq, RRI8_B, ASM_XTENSA_CC_EQ, 0}, + {MP_QSTR_bge, RRI8_B, ASM_XTENSA_CC_GE, 0}, + {MP_QSTR_bgeu, RRI8_B, ASM_XTENSA_CC_GEU, 0}, + {MP_QSTR_blt, RRI8_B, ASM_XTENSA_CC_LT, 0}, + {MP_QSTR_bnall, RRI8_B, ASM_XTENSA_CC_NALL, 0}, + {MP_QSTR_bne, RRI8_B, ASM_XTENSA_CC_NE, 0}, + {MP_QSTR_bnone, RRI8_B, ASM_XTENSA_CC_NONE, 0}, +}; + +STATIC void emit_inline_xtensa_op(emit_inline_asm_t *emit, qstr op, mp_uint_t n_args, mp_parse_node_t *pn_args) { + size_t op_len; + const char *op_str = (const char*)qstr_data(op, &op_len); + + if (n_args == 0) { + if (op == MP_QSTR_ret_n) { + asm_xtensa_op_ret_n(emit->as); + } else { + goto unknown_op; + } + + } else if (n_args == 1) { + if (op == MP_QSTR_callx0) { + uint r0 = get_arg_reg(emit, op_str, pn_args[0]); + asm_xtensa_op_callx0(emit->as, r0); + } else if (op == MP_QSTR_j) { + int label = get_arg_label(emit, op_str, pn_args[0]); + asm_xtensa_j_label(emit->as, label); + } else if (op == MP_QSTR_jx) { + uint r0 = get_arg_reg(emit, op_str, pn_args[0]); + asm_xtensa_op_jx(emit->as, r0); + } else { + goto unknown_op; + } + + } else if (n_args == 2) { + uint r0 = get_arg_reg(emit, op_str, pn_args[0]); + if (op == MP_QSTR_beqz) { + int label = get_arg_label(emit, op_str, pn_args[1]); + asm_xtensa_bccz_reg_label(emit->as, ASM_XTENSA_CCZ_EQ, r0, label); + } else if (op == MP_QSTR_bnez) { + int label = get_arg_label(emit, op_str, pn_args[1]); + asm_xtensa_bccz_reg_label(emit->as, ASM_XTENSA_CCZ_NE, r0, label); + } else if (op == MP_QSTR_mov || op == MP_QSTR_mov_n) { + // we emit mov.n for both "mov" and "mov_n" opcodes + uint r1 = get_arg_reg(emit, op_str, pn_args[1]); + asm_xtensa_op_mov_n(emit->as, r0, r1); + } else if (op == MP_QSTR_movi) { + // for convenience we emit l32r if the integer doesn't fit in movi + uint32_t imm = get_arg_i(emit, op_str, pn_args[1], 0, 0); + asm_xtensa_mov_reg_i32(emit->as, r0, imm); + } else { + goto unknown_op; + } + + } else if (n_args == 3) { + // search table for 3 arg instructions + for (uint i = 0; i < MP_ARRAY_SIZE(opcode_table_3arg); i++) { + const opcode_table_3arg_t *o = &opcode_table_3arg[i]; + if (op == o->name) { + uint r0 = get_arg_reg(emit, op_str, pn_args[0]); + uint r1 = get_arg_reg(emit, op_str, pn_args[1]); + if (o->type == RRR) { + uint r2 = get_arg_reg(emit, op_str, pn_args[2]); + asm_xtensa_op24(emit->as, ASM_XTENSA_ENCODE_RRR(0, o->a0, o->a1, r0, r1, r2)); + } else if (o->type == RRI8_B) { + int label = get_arg_label(emit, op_str, pn_args[2]); + asm_xtensa_bcc_reg_reg_label(emit->as, o->a0, r0, r1, label); + } else { + int shift, min, max; + if ((o->type & 0xf0) == 0) { + shift = 0; + min = -128; + max = 127; + } else { + shift = (o->type & 0xf0) >> 5; + min = 0; + max = 0xff << shift; + } + uint32_t imm = get_arg_i(emit, op_str, pn_args[2], min, max); + asm_xtensa_op24(emit->as, ASM_XTENSA_ENCODE_RRI8(o->a0, o->a1, r1, r0, (imm >> shift) & 0xff)); + } + return; + } + } + goto unknown_op; + + } else { + goto unknown_op; + } + + return; + +unknown_op: + emit_inline_xtensa_error_exc(emit, mp_obj_new_exception_msg_varg(&mp_type_SyntaxError, "unsupported Xtensa instruction '%s' with %d arguments", op_str, n_args)); + return; + + /* +branch_not_in_range: + emit_inline_xtensa_error_msg(emit, "branch not in range"); + return; + */ +} + +const emit_inline_asm_method_table_t emit_inline_xtensa_method_table = { + emit_inline_xtensa_start_pass, + emit_inline_xtensa_end_pass, + emit_inline_xtensa_count_params, + emit_inline_xtensa_label, + emit_inline_xtensa_align, + emit_inline_xtensa_data, + emit_inline_xtensa_op, +}; + +#endif // MICROPY_EMIT_INLINE_XTENSA diff --git a/py/mpconfig.h b/py/mpconfig.h index 5234cc7f9c..3612e6bcb3 100644 --- a/py/mpconfig.h +++ b/py/mpconfig.h @@ -293,11 +293,16 @@ #define MICROPY_EMIT_XTENSA (0) #endif +// Whether to enable the Xtensa inline assembler +#ifndef MICROPY_EMIT_INLINE_XTENSA +#define MICROPY_EMIT_INLINE_XTENSA (0) +#endif + // Convenience definition for whether any native emitter is enabled #define MICROPY_EMIT_NATIVE (MICROPY_EMIT_X64 || MICROPY_EMIT_X86 || MICROPY_EMIT_THUMB || MICROPY_EMIT_ARM || MICROPY_EMIT_XTENSA) // Convenience definition for whether any inline assembler emitter is enabled -#define MICROPY_EMIT_INLINE_ASM (MICROPY_EMIT_INLINE_THUMB) +#define MICROPY_EMIT_INLINE_ASM (MICROPY_EMIT_INLINE_THUMB || MICROPY_EMIT_INLINE_XTENSA) /*****************************************************************************/ /* Compiler configuration */ diff --git a/py/py.mk b/py/py.mk index 9d2a188f1a..79436aa9e0 100644 --- a/py/py.mk +++ b/py/py.mk @@ -132,6 +132,7 @@ PY_O_BASENAME = \ emitnarm.o \ asmxtensa.o \ emitnxtensa.o \ + emitinlinextensa.o \ formatfloat.o \ parsenumbase.o \ parsenum.o \ -- cgit v1.2.3 From dd53b12193dca4800ab207170fcc883142dd0f22 Mon Sep 17 00:00:00 2001 From: Damien George Date: Fri, 9 Dec 2016 20:54:54 +1100 Subject: py/emitinline: Move inline-asm align and data methods to compiler. These are generic methods that don't depend on the architecture and so can be handled directly by the compiler. --- py/compile.c | 7 +++++-- py/emit.h | 2 -- py/emitinlinethumb.c | 10 ---------- py/emitinlinextensa.c | 10 ---------- 4 files changed, 5 insertions(+), 24 deletions(-) (limited to 'py/compile.c') diff --git a/py/compile.c b/py/compile.c index 6c9b6abfe6..f98b783b58 100644 --- a/py/compile.c +++ b/py/compile.c @@ -34,6 +34,7 @@ #include "py/emit.h" #include "py/compile.h" #include "py/runtime.h" +#include "py/asmbase.h" #if MICROPY_ENABLE_COMPILER @@ -3224,7 +3225,8 @@ STATIC void compile_scope_inline_asm(compiler_t *comp, scope_t *scope, pass_kind return; } if (pass > MP_PASS_SCOPE) { - EMIT_INLINE_ASM_ARG(align, MP_PARSE_NODE_LEAF_SMALL_INT(pn_arg[0])); + mp_asm_base_align((mp_asm_base_t*)comp->emit_inline_asm, + MP_PARSE_NODE_LEAF_SMALL_INT(pn_arg[0])); } } else if (op == MP_QSTR_data) { if (!(n_args >= 2 && MP_PARSE_NODE_IS_SMALL_INT(pn_arg[0]))) { @@ -3238,7 +3240,8 @@ STATIC void compile_scope_inline_asm(compiler_t *comp, scope_t *scope, pass_kind compile_syntax_error(comp, nodes[i], "'data' requires integer arguments"); return; } - EMIT_INLINE_ASM_ARG(data, bytesize, MP_PARSE_NODE_LEAF_SMALL_INT(pn_arg[j])); + mp_asm_base_data((mp_asm_base_t*)comp->emit_inline_asm, + bytesize, MP_PARSE_NODE_LEAF_SMALL_INT(pn_arg[j])); } } } else { diff --git a/py/emit.h b/py/emit.h index 6080b83c40..41cb2162d7 100644 --- a/py/emit.h +++ b/py/emit.h @@ -266,8 +266,6 @@ typedef struct _emit_inline_asm_method_table_t { void (*end_pass)(emit_inline_asm_t *emit, mp_uint_t type_sig); mp_uint_t (*count_params)(emit_inline_asm_t *emit, mp_uint_t n_params, mp_parse_node_t *pn_params); bool (*label)(emit_inline_asm_t *emit, mp_uint_t label_num, qstr label_id); - void (*align)(emit_inline_asm_t *emit, mp_uint_t align); - void (*data)(emit_inline_asm_t *emit, mp_uint_t bytesize, mp_uint_t val); void (*op)(emit_inline_asm_t *emit, qstr op, mp_uint_t n_args, mp_parse_node_t *pn_args); } emit_inline_asm_method_table_t; diff --git a/py/emitinlinethumb.c b/py/emitinlinethumb.c index 1373e173dc..24ba3fa343 100644 --- a/py/emitinlinethumb.c +++ b/py/emitinlinethumb.c @@ -130,14 +130,6 @@ STATIC bool emit_inline_thumb_label(emit_inline_asm_t *emit, mp_uint_t label_num return true; } -STATIC void emit_inline_thumb_align(emit_inline_asm_t *emit, mp_uint_t align) { - mp_asm_base_align(&emit->as.base, align); -} - -STATIC void emit_inline_thumb_data(emit_inline_asm_t *emit, mp_uint_t bytesize, mp_uint_t val) { - mp_asm_base_data(&emit->as.base, bytesize, val); -} - typedef struct _reg_name_t { byte reg; byte name[3]; } reg_name_t; STATIC const reg_name_t reg_name_table[] = { {0, "r0\0"}, @@ -823,8 +815,6 @@ const emit_inline_asm_method_table_t emit_inline_thumb_method_table = { emit_inline_thumb_end_pass, emit_inline_thumb_count_params, emit_inline_thumb_label, - emit_inline_thumb_align, - emit_inline_thumb_data, emit_inline_thumb_op, }; diff --git a/py/emitinlinextensa.c b/py/emitinlinextensa.c index 284624e45b..38a8629e1f 100644 --- a/py/emitinlinextensa.c +++ b/py/emitinlinextensa.c @@ -123,14 +123,6 @@ STATIC bool emit_inline_xtensa_label(emit_inline_asm_t *emit, mp_uint_t label_nu return true; } -STATIC void emit_inline_xtensa_align(emit_inline_asm_t *emit, mp_uint_t align) { - mp_asm_base_align(&emit->as.base, align); -} - -STATIC void emit_inline_xtensa_data(emit_inline_asm_t *emit, mp_uint_t bytesize, mp_uint_t val) { - mp_asm_base_data(&emit->as.base, bytesize, val); -} - typedef struct _reg_name_t { byte reg; byte name[3]; } reg_name_t; STATIC const reg_name_t reg_name_table[] = { {0, "a0\0"}, @@ -355,8 +347,6 @@ const emit_inline_asm_method_table_t emit_inline_xtensa_method_table = { emit_inline_xtensa_end_pass, emit_inline_xtensa_count_params, emit_inline_xtensa_label, - emit_inline_xtensa_align, - emit_inline_xtensa_data, emit_inline_xtensa_op, }; -- cgit v1.2.3 From e920bab9768f71c7e22fcfc5af3e1c40f2db8eeb Mon Sep 17 00:00:00 2001 From: Damien George Date: Fri, 9 Dec 2016 21:23:17 +1100 Subject: py/emitinline: Move common code for end of final pass to compiler. This patch moves some common code from the individual inline assemblers to the compiler, the code that calls the emit-glue to assign the machine code to the functions scope. --- py/compile.c | 9 ++++++++- py/emit.h | 2 +- py/emitinlinethumb.c | 10 +--------- py/emitinlinextensa.c | 10 +--------- 4 files changed, 11 insertions(+), 20 deletions(-) (limited to 'py/compile.c') diff --git a/py/compile.c b/py/compile.c index f98b783b58..43a0bf454a 100644 --- a/py/compile.c +++ b/py/compile.c @@ -3127,7 +3127,7 @@ STATIC void compile_scope_inline_asm(compiler_t *comp, scope_t *scope, pass_kind } if (comp->pass > MP_PASS_SCOPE) { - EMIT_INLINE_ASM_ARG(start_pass, comp->pass, comp->scope_cur, &comp->compile_error); + EMIT_INLINE_ASM_ARG(start_pass, comp->pass, &comp->compile_error); } // get the function definition parse node @@ -3258,6 +3258,13 @@ STATIC void compile_scope_inline_asm(compiler_t *comp, scope_t *scope, pass_kind if (comp->pass > MP_PASS_SCOPE) { EMIT_INLINE_ASM_ARG(end_pass, type_sig); + + if (comp->pass == MP_PASS_EMIT) { + void *f = mp_asm_base_get_code((mp_asm_base_t*)comp->emit_inline_asm); + mp_emit_glue_assign_native(comp->scope_cur->raw_code, MP_CODE_NATIVE_ASM, + f, mp_asm_base_get_code_size((mp_asm_base_t*)comp->emit_inline_asm), + NULL, comp->scope_cur->num_pos_args, 0, type_sig); + } } if (comp->compile_error != MP_OBJ_NULL) { diff --git a/py/emit.h b/py/emit.h index 41cb2162d7..7d00f11f95 100644 --- a/py/emit.h +++ b/py/emit.h @@ -262,7 +262,7 @@ void mp_emit_bc_end_except_handler(emit_t *emit); typedef struct _emit_inline_asm_t emit_inline_asm_t; typedef struct _emit_inline_asm_method_table_t { - void (*start_pass)(emit_inline_asm_t *emit, pass_kind_t pass, scope_t *scope, mp_obj_t *error_slot); + void (*start_pass)(emit_inline_asm_t *emit, pass_kind_t pass, mp_obj_t *error_slot); void (*end_pass)(emit_inline_asm_t *emit, mp_uint_t type_sig); mp_uint_t (*count_params)(emit_inline_asm_t *emit, mp_uint_t n_params, mp_parse_node_t *pn_params); bool (*label)(emit_inline_asm_t *emit, mp_uint_t label_num, qstr label_id); diff --git a/py/emitinlinethumb.c b/py/emitinlinethumb.c index 24ba3fa343..4062728136 100644 --- a/py/emitinlinethumb.c +++ b/py/emitinlinethumb.c @@ -45,7 +45,6 @@ typedef enum { struct _emit_inline_asm_t { asm_thumb_t as; uint16_t pass; - scope_t *scope; mp_obj_t *error_slot; mp_uint_t max_num_labels; qstr *label_lookup; @@ -74,9 +73,8 @@ void emit_inline_thumb_free(emit_inline_asm_t *emit) { m_del_obj(emit_inline_asm_t, emit); } -STATIC void emit_inline_thumb_start_pass(emit_inline_asm_t *emit, pass_kind_t pass, scope_t *scope, mp_obj_t *error_slot) { +STATIC void emit_inline_thumb_start_pass(emit_inline_asm_t *emit, pass_kind_t pass, mp_obj_t *error_slot) { emit->pass = pass; - emit->scope = scope; emit->error_slot = error_slot; if (emit->pass == MP_PASS_CODE_SIZE) { memset(emit->label_lookup, 0, emit->max_num_labels * sizeof(qstr)); @@ -88,12 +86,6 @@ STATIC void emit_inline_thumb_start_pass(emit_inline_asm_t *emit, pass_kind_t pa STATIC void emit_inline_thumb_end_pass(emit_inline_asm_t *emit, mp_uint_t type_sig) { asm_thumb_exit(&emit->as); asm_thumb_end_pass(&emit->as); - - if (emit->pass == MP_PASS_EMIT) { - void *f = mp_asm_base_get_code(&emit->as.base); - mp_emit_glue_assign_native(emit->scope->raw_code, MP_CODE_NATIVE_ASM, f, - mp_asm_base_get_code_size(&emit->as.base), NULL, emit->scope->num_pos_args, 0, type_sig); - } } STATIC mp_uint_t emit_inline_thumb_count_params(emit_inline_asm_t *emit, mp_uint_t n_params, mp_parse_node_t *pn_params) { diff --git a/py/emitinlinextensa.c b/py/emitinlinextensa.c index 38a8629e1f..3d3217f5bb 100644 --- a/py/emitinlinextensa.c +++ b/py/emitinlinextensa.c @@ -38,7 +38,6 @@ struct _emit_inline_asm_t { asm_xtensa_t as; uint16_t pass; - scope_t *scope; mp_obj_t *error_slot; mp_uint_t max_num_labels; qstr *label_lookup; @@ -67,9 +66,8 @@ void emit_inline_xtensa_free(emit_inline_asm_t *emit) { m_del_obj(emit_inline_asm_t, emit); } -STATIC void emit_inline_xtensa_start_pass(emit_inline_asm_t *emit, pass_kind_t pass, scope_t *scope, mp_obj_t *error_slot) { +STATIC void emit_inline_xtensa_start_pass(emit_inline_asm_t *emit, pass_kind_t pass, mp_obj_t *error_slot) { emit->pass = pass; - emit->scope = scope; emit->error_slot = error_slot; if (emit->pass == MP_PASS_CODE_SIZE) { memset(emit->label_lookup, 0, emit->max_num_labels * sizeof(qstr)); @@ -81,12 +79,6 @@ STATIC void emit_inline_xtensa_start_pass(emit_inline_asm_t *emit, pass_kind_t p STATIC void emit_inline_xtensa_end_pass(emit_inline_asm_t *emit, mp_uint_t type_sig) { asm_xtensa_exit(&emit->as); asm_xtensa_end_pass(&emit->as); - - if (emit->pass == MP_PASS_EMIT) { - void *f = mp_asm_base_get_code(&emit->as.base); - mp_emit_glue_assign_native(emit->scope->raw_code, MP_CODE_NATIVE_ASM, f, - mp_asm_base_get_code_size(&emit->as.base), NULL, emit->scope->num_pos_args, 0, type_sig); - } } STATIC mp_uint_t emit_inline_xtensa_count_params(emit_inline_asm_t *emit, mp_uint_t n_params, mp_parse_node_t *pn_params) { -- cgit v1.2.3 From de9cd00b39fbd66279dda69bc642ac2f3c459fa1 Mon Sep 17 00:00:00 2001 From: Damien George Date: Mon, 19 Dec 2016 17:42:25 +1100 Subject: py/compile: Add an extra pass for Xtensa inline assembler. It needs an extra pass to compute the size of the constant table for the l32r instructions. --- py/compile.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'py/compile.c') diff --git a/py/compile.c b/py/compile.c index 43a0bf454a..b84793d10a 100644 --- a/py/compile.c +++ b/py/compile.c @@ -3419,6 +3419,12 @@ mp_raw_code_t *mp_compile_to_raw_code(mp_parse_tree_t *parse_tree, qstr source_f comp->emit = NULL; comp->emit_inline_asm_method_table = &ASM_EMITTER(method_table); compile_scope_inline_asm(comp, s, MP_PASS_CODE_SIZE); + #if MICROPY_EMIT_INLINE_XTENSA + // Xtensa requires an extra pass to compute size of l32r const table + // TODO this can be improved by calculating it during SCOPE pass + // but that requires some other structural changes to the asm emitters + compile_scope_inline_asm(comp, s, MP_PASS_CODE_SIZE); + #endif if (comp->compile_error == MP_OBJ_NULL) { compile_scope_inline_asm(comp, s, MP_PASS_EMIT); } -- cgit v1.2.3