summaryrefslogtreecommitdiffstatshomepage
diff options
context:
space:
mode:
authorDamien <damien.p.george@gmail.com>2013-11-17 13:16:36 +0000
committerDamien <damien.p.george@gmail.com>2013-11-17 13:16:36 +0000
commit0446a0d76d6949423a77286522c116d35a7c5f31 (patch)
treeea6e5ab6983974b64d7eaad9853642449dbd5a57
parent6d4f3462c4c3bb8a3d5b60482766b9c06891dc1a (diff)
downloadmicropython-0446a0d76d6949423a77286522c116d35a7c5f31.tar.gz
micropython-0446a0d76d6949423a77286522c116d35a7c5f31.zip
Change some debugging/output messages for native code generation.
-rw-r--r--py/asmthumb.c2
-rw-r--r--py/compile.c2
-rw-r--r--py/runtime.c51
3 files changed, 38 insertions, 17 deletions
diff --git a/py/asmthumb.c b/py/asmthumb.c
index 7cfb479b56..602caeb2d6 100644
--- a/py/asmthumb.c
+++ b/py/asmthumb.c
@@ -74,7 +74,7 @@ void asm_thumb_end_pass(asm_thumb_t *as) {
// calculate size of code in bytes
as->code_size = as->code_offset;
as->code_base = m_new(byte, as->code_size);
- printf("code_size: %u\n", as->code_size);
+ //printf("code_size: %u\n", as->code_size);
}
/*
diff --git a/py/compile.c b/py/compile.c
index a01f5f4c58..f6bbe1ab30 100644
--- a/py/compile.c
+++ b/py/compile.c
@@ -1409,7 +1409,7 @@ void compile_for_stmt(compiler_t *comp, py_parse_node_struct_t *pns) {
// this bit optimises: for <x> in range(...), turning it into an explicitly incremented variable
// this is actually slower, but uses no heap memory
// for viper it will be much, much faster
- if (PY_PARSE_NODE_IS_STRUCT_KIND(pns->nodes[1], PN_power)) {
+ if (/*comp->scope_cur->emit_options == EMIT_OPT_VIPER &&*/ PY_PARSE_NODE_IS_STRUCT_KIND(pns->nodes[1], PN_power)) {
py_parse_node_struct_t *pns_it = (py_parse_node_struct_t*)pns->nodes[1];
if (PY_PARSE_NODE_IS_ID(pns_it->nodes[0]) && PY_PARSE_NODE_IS_STRUCT_KIND(pns_it->nodes[1], PN_trailer_paren) && PY_PARSE_NODE_IS_NULL(pns_it->nodes[2])) {
py_parse_node_t pn_range_args = ((py_parse_node_struct_t*)pns_it->nodes[1])->nodes[0];
diff --git a/py/runtime.c b/py/runtime.c
index 5ca8f99f30..81881c4038 100644
--- a/py/runtime.c
+++ b/py/runtime.c
@@ -17,7 +17,7 @@
#if 0 // print debugging info
#define DEBUG_PRINT (1)
-#define WRITE_NATIVE (1)
+#define WRITE_CODE (1)
#define DEBUG_printf(args...) printf(args)
#define DEBUG_OP_printf(args...) printf(args)
#else // don't print debugging info
@@ -932,8 +932,8 @@ py_obj_t py_builtin_range(py_obj_t o_arg) {
return py_obj_new_range(0, py_obj_get_int(o_arg), 1);
}
-#ifdef WRITE_NATIVE
-FILE *fp_native = NULL;
+#ifdef WRITE_CODE
+FILE *fp_write_code = NULL;
#endif
void rt_init(void) {
@@ -974,15 +974,15 @@ void rt_init(void) {
fun_list_append = rt_make_function_2(rt_list_append);
fun_gen_instance_next = rt_make_function_1(rt_gen_instance_next);
-#ifdef WRITE_NATIVE
- fp_native = fopen("out-native", "wb");
+#ifdef WRITE_CODE
+ fp_write_code = fopen("out-code", "wb");
#endif
}
void rt_deinit(void) {
-#ifdef WRITE_NATIVE
- if (fp_native != NULL) {
- fclose(fp_native);
+#ifdef WRITE_CODE
+ if (fp_write_code != NULL) {
+ fclose(fp_write_code);
}
#endif
}
@@ -1016,7 +1016,26 @@ void rt_assign_byte_code(int unique_code_id, byte *code, uint len, int n_args, i
unique_codes[unique_code_id].u_byte.code = code;
unique_codes[unique_code_id].u_byte.len = len;
+ printf("byte code: %d bytes\n", len);
+
+#ifdef DEBUG_PRINT
DEBUG_printf("assign byte code: id=%d code=%p len=%u n_args=%d\n", unique_code_id, code, len, n_args);
+ for (int i = 0; i < 128 && i < len; i++) {
+ if (i > 0 && i % 16 == 0) {
+ DEBUG_printf("\n");
+ }
+ DEBUG_printf(" %02x", code[i]);
+ }
+ DEBUG_printf("\n");
+ py_un_byte_code(code, len);
+
+#ifdef WRITE_CODE
+ if (fp_write_code != NULL) {
+ fwrite(code, len, 1, fp_write_code);
+ fflush(fp_write_code);
+ }
+#endif
+#endif
}
void rt_assign_native_code(int unique_code_id, py_fun_t fun, uint len, int n_args) {
@@ -1030,6 +1049,8 @@ void rt_assign_native_code(int unique_code_id, py_fun_t fun, uint len, int n_arg
unique_codes[unique_code_id].is_generator = false;
unique_codes[unique_code_id].u_native.fun = fun;
+ printf("native code: %d bytes\n", len);
+
#ifdef DEBUG_PRINT
DEBUG_printf("assign native code: id=%d fun=%p len=%u n_args=%d\n", unique_code_id, fun, len, n_args);
byte *fun_data = (byte*)(((machine_uint_t)fun) & (~1)); // need to clear lower bit in case it's thumb code
@@ -1041,10 +1062,10 @@ void rt_assign_native_code(int unique_code_id, py_fun_t fun, uint len, int n_arg
}
DEBUG_printf("\n");
-#ifdef WRITE_NATIVE
- if (fp_native != NULL) {
- fwrite(fun_data, len, 1, fp_native);
- fflush(fp_native);
+#ifdef WRITE_CODE
+ if (fp_write_code != NULL) {
+ fwrite(fun_data, len, 1, fp_write_code);
+ fflush(fp_write_code);
}
#endif
#endif
@@ -1072,9 +1093,9 @@ void rt_assign_inline_asm_code(int unique_code_id, py_fun_t fun, uint len, int n
}
DEBUG_printf("\n");
-#ifdef WRITE_NATIVE
- if (fp_native != NULL) {
- fwrite(fun_data, len, 1, fp_native);
+#ifdef WRITE_CODE
+ if (fp_write_code != NULL) {
+ fwrite(fun_data, len, 1, fp_write_code);
}
#endif
#endif