diff options
Diffstat (limited to 'py')
-rw-r--r-- | py/builtin.h | 2 | ||||
-rw-r--r-- | py/builtinimport.c | 2 | ||||
-rw-r--r-- | py/gc.c | 139 | ||||
-rw-r--r-- | py/gc.h | 1 | ||||
-rw-r--r-- | py/makeqstrdefs.py | 2 | ||||
-rw-r--r-- | py/mkrules.mk | 8 | ||||
-rw-r--r-- | py/modio.c | 2 | ||||
-rw-r--r-- | py/modmicropython.c | 2 | ||||
-rw-r--r-- | py/modthread.c | 301 | ||||
-rw-r--r-- | py/mpconfig.h | 20 | ||||
-rw-r--r-- | py/mphal.h | 1 | ||||
-rw-r--r-- | py/mpstate.h | 51 | ||||
-rw-r--r-- | py/mpthread.h | 61 | ||||
-rw-r--r-- | py/nlr.h | 10 | ||||
-rw-r--r-- | py/nlrsetjmp.c | 4 | ||||
-rw-r--r-- | py/nlrthumb.S | 165 | ||||
-rw-r--r-- | py/nlrthumb.c | 134 | ||||
-rw-r--r-- | py/nlrx64.S | 38 | ||||
-rw-r--r-- | py/nlrx86.S | 40 | ||||
-rw-r--r-- | py/obj.h | 3 | ||||
-rw-r--r-- | py/objarray.c | 11 | ||||
-rw-r--r-- | py/objarray.h | 43 | ||||
-rw-r--r-- | py/objdict.c | 12 | ||||
-rw-r--r-- | py/objmodule.c | 6 | ||||
-rw-r--r-- | py/objstringio.c | 4 | ||||
-rw-r--r-- | py/objtype.c | 8 | ||||
-rw-r--r-- | py/parse.c | 11 | ||||
-rw-r--r-- | py/py.mk | 27 | ||||
-rw-r--r-- | py/qstr.c | 35 | ||||
-rw-r--r-- | py/qstr.h | 2 | ||||
-rw-r--r-- | py/runtime.c | 6 | ||||
-rw-r--r-- | py/stackctrl.c | 10 | ||||
-rw-r--r-- | py/stream.c | 7 | ||||
-rw-r--r-- | py/vm.c | 4 |
34 files changed, 921 insertions, 251 deletions
diff --git a/py/builtin.h b/py/builtin.h index 5d79d2835e..cd1be3ab88 100644 --- a/py/builtin.h +++ b/py/builtin.h @@ -94,6 +94,7 @@ extern const mp_obj_module_t mp_module_micropython; extern const mp_obj_module_t mp_module_ustruct; extern const mp_obj_module_t mp_module_sys; extern const mp_obj_module_t mp_module_gc; +extern const mp_obj_module_t mp_module_thread; extern const mp_obj_dict_t mp_module_builtins_globals; @@ -113,6 +114,7 @@ extern const mp_obj_module_t mp_module_lwip; extern const mp_obj_module_t mp_module_websocket; extern const mp_obj_module_t mp_module_webrepl; extern const mp_obj_module_t mp_module_framebuf; +extern const mp_obj_module_t mp_module_btree; // extmod functions MP_DECLARE_CONST_FUN_OBJ(pyb_mount_obj); diff --git a/py/builtinimport.c b/py/builtinimport.c index d3670858e5..ef3545d653 100644 --- a/py/builtinimport.c +++ b/py/builtinimport.c @@ -462,7 +462,7 @@ mp_obj_t mp_builtin___import__(size_t n_args, const mp_obj_t *args) { vstr_add_str(&path, "__init__.py"); if (mp_import_stat_any(vstr_null_terminated_str(&path)) != MP_IMPORT_STAT_FILE) { vstr_cut_tail_bytes(&path, sizeof("/__init__.py") - 1); // cut off /__init__.py - mp_warning("%s is imported as namespace package", vstr_str(&path)); + //mp_warning("%s is imported as namespace package", vstr_str(&path)); } else { do_load(module_obj, &path); vstr_cut_tail_bytes(&path, sizeof("/__init__.py") - 1); // cut off /__init__.py @@ -94,6 +94,14 @@ #define FTB_CLEAR(block) do { MP_STATE_MEM(gc_finaliser_table_start)[(block) / BLOCKS_PER_FTB] &= (~(1 << ((block) & 7))); } while (0) #endif +#if MICROPY_PY_THREAD && !MICROPY_PY_THREAD_GIL +#define GC_ENTER() mp_thread_mutex_lock(&MP_STATE_MEM(gc_mutex), 1) +#define GC_EXIT() mp_thread_mutex_unlock(&MP_STATE_MEM(gc_mutex)) +#else +#define GC_ENTER() +#define GC_EXIT() +#endif + // TODO waste less memory; currently requires that all entries in alloc_table have a corresponding block in pool void gc_init(void *start, void *end) { // align end pointer on block boundary @@ -144,6 +152,10 @@ void gc_init(void *start, void *end) { // allow auto collection MP_STATE_MEM(gc_auto_collect_enabled) = 1; + #if MICROPY_PY_THREAD + mp_thread_mutex_init(&MP_STATE_MEM(gc_mutex)); + #endif + DEBUG_printf("GC layout:\n"); DEBUG_printf(" alloc table at %p, length " UINT_FMT " bytes, " UINT_FMT " blocks\n", MP_STATE_MEM(gc_alloc_table_start), MP_STATE_MEM(gc_alloc_table_byte_len), MP_STATE_MEM(gc_alloc_table_byte_len) * BLOCKS_PER_ATB); #if MICROPY_ENABLE_FINALISER @@ -153,11 +165,15 @@ void gc_init(void *start, void *end) { } void gc_lock(void) { + GC_ENTER(); MP_STATE_MEM(gc_lock_depth)++; + GC_EXIT(); } void gc_unlock(void) { + GC_ENTER(); MP_STATE_MEM(gc_lock_depth)--; + GC_EXIT(); } bool gc_is_locked(void) { @@ -236,6 +252,10 @@ STATIC void gc_sweep(void) { case AT_HEAD: #if MICROPY_ENABLE_FINALISER if (FTB_GET(block)) { + #if MICROPY_PY_THREAD + // TODO need to think about reentrancy with finaliser code + assert(!"finaliser with threading not implemented"); + #endif mp_obj_base_t *obj = (mp_obj_base_t*)PTR_FROM_BLOCK(block); if (obj->type != NULL) { // if the object has a type then see if it has a __del__ method @@ -272,14 +292,15 @@ STATIC void gc_sweep(void) { } void gc_collect_start(void) { - gc_lock(); + GC_ENTER(); + MP_STATE_MEM(gc_lock_depth)++; MP_STATE_MEM(gc_stack_overflow) = 0; MP_STATE_MEM(gc_sp) = MP_STATE_MEM(gc_stack); // Trace root pointers. This relies on the root pointers being organised // correctly in the mp_state_ctx structure. We scan nlr_top, dict_locals, // dict_globals, then the root pointer section of mp_state_vm. void **ptrs = (void**)(void*)&mp_state_ctx; - gc_collect_root(ptrs, offsetof(mp_state_ctx_t, vm.stack_top) / sizeof(void*)); + gc_collect_root(ptrs, offsetof(mp_state_ctx_t, vm.qstr_last_chunk) / sizeof(void*)); } void gc_collect_root(void **ptrs, size_t len) { @@ -294,31 +315,26 @@ void gc_collect_end(void) { gc_deal_with_stack_overflow(); gc_sweep(); MP_STATE_MEM(gc_last_free_atb_index) = 0; - gc_unlock(); + MP_STATE_MEM(gc_lock_depth)--; + GC_EXIT(); } void gc_info(gc_info_t *info) { + GC_ENTER(); info->total = MP_STATE_MEM(gc_pool_end) - MP_STATE_MEM(gc_pool_start); info->used = 0; info->free = 0; + info->max_free = 0; info->num_1block = 0; info->num_2block = 0; info->max_block = 0; - for (size_t block = 0, len = 0; block < MP_STATE_MEM(gc_alloc_table_byte_len) * BLOCKS_PER_ATB; block++) { + bool finish = false; + for (size_t block = 0, len = 0, len_free = 0; !finish;) { size_t kind = ATB_GET_KIND(block); - if (kind == AT_FREE || kind == AT_HEAD) { - if (len == 1) { - info->num_1block += 1; - } else if (len == 2) { - info->num_2block += 1; - } - if (len > info->max_block) { - info->max_block = len; - } - } switch (kind) { case AT_FREE: info->free += 1; + len_free += 1; len = 0; break; @@ -336,23 +352,51 @@ void gc_info(gc_info_t *info) { // shouldn't happen break; } + + block++; + finish = (block == MP_STATE_MEM(gc_alloc_table_byte_len) * BLOCKS_PER_ATB); + // Get next block type if possible + if (!finish) { + kind = ATB_GET_KIND(block); + } + + if (finish || kind == AT_FREE || kind == AT_HEAD) { + if (len == 1) { + info->num_1block += 1; + } else if (len == 2) { + info->num_2block += 1; + } + if (len > info->max_block) { + info->max_block = len; + } + if (finish || kind == AT_HEAD) { + if (len_free > info->max_free) { + info->max_free = len_free; + } + len_free = 0; + } + } } info->used *= BYTES_PER_BLOCK; info->free *= BYTES_PER_BLOCK; + GC_EXIT(); } void *gc_alloc(size_t n_bytes, bool has_finaliser) { size_t n_blocks = ((n_bytes + BYTES_PER_BLOCK - 1) & (~(BYTES_PER_BLOCK - 1))) / BYTES_PER_BLOCK; DEBUG_printf("gc_alloc(" UINT_FMT " bytes -> " UINT_FMT " blocks)\n", n_bytes, n_blocks); - // check if GC is locked - if (MP_STATE_MEM(gc_lock_depth) > 0) { + // check for 0 allocation + if (n_blocks == 0) { return NULL; } - // check for 0 allocation - if (n_blocks == 0) { + GC_ENTER(); + + // check if GC is locked + if (MP_STATE_MEM(gc_lock_depth) > 0) { + GC_EXIT(); return NULL; } @@ -372,6 +416,7 @@ void *gc_alloc(size_t n_bytes, bool has_finaliser) { if (ATB_3_IS_FREE(a)) { if (++n_free >= n_blocks) { i = i * BLOCKS_PER_ATB + 3; goto found; } } else { n_free = 0; } } + GC_EXIT(); // nothing found! if (collected) { return NULL; @@ -379,6 +424,7 @@ void *gc_alloc(size_t n_bytes, bool has_finaliser) { DEBUG_printf("gc_alloc(" UINT_FMT "): no free mem, triggering GC\n", n_bytes); gc_collect(); collected = 1; + GC_ENTER(); } // found, ending at block i inclusive @@ -406,9 +452,12 @@ found: } // get pointer to first block + // we must create this pointer before unlocking the GC so a collection can find it void *ret_ptr = (void*)(MP_STATE_MEM(gc_pool_start) + start_block * BYTES_PER_BLOCK); DEBUG_printf("gc_alloc(%p)\n", ret_ptr); + GC_EXIT(); + // zero out the additional bytes of the newly allocated blocks // This is needed because the blocks may have previously held pointers // to the heap and will not be set to something else if the caller @@ -421,7 +470,9 @@ found: // clear type pointer in case it is never set ((mp_obj_base_t*)ret_ptr)->type = NULL; // set mp_obj flag only if it has a finaliser + GC_ENTER(); FTB_SET(start_block); + GC_EXIT(); } #else (void)has_finaliser; @@ -447,8 +498,10 @@ void *gc_alloc_with_finaliser(mp_uint_t n_bytes) { // force the freeing of a piece of memory // TODO: freeing here does not call finaliser void gc_free(void *ptr) { + GC_ENTER(); if (MP_STATE_MEM(gc_lock_depth) > 0) { // TODO how to deal with this error? + GC_EXIT(); return; } @@ -471,18 +524,25 @@ void gc_free(void *ptr) { block += 1; } while (ATB_GET_KIND(block) == AT_TAIL); + GC_EXIT(); + #if EXTENSIVE_HEAP_PROFILING gc_dump_alloc_table(); #endif } else { + GC_EXIT(); assert(!"bad free"); } } else if (ptr != NULL) { + GC_EXIT(); assert(!"bad free"); + } else { + GC_EXIT(); } } size_t gc_nbytes(const void *ptr) { + GC_ENTER(); if (VERIFY_PTR(ptr)) { size_t block = BLOCK_FROM_PTR(ptr); if (ATB_GET_KIND(block) == AT_HEAD) { @@ -491,11 +551,13 @@ size_t gc_nbytes(const void *ptr) { do { n_blocks += 1; } while (ATB_GET_KIND(block + n_blocks) == AT_TAIL); + GC_EXIT(); return n_blocks * BYTES_PER_BLOCK; } } // invalid pointer + GC_EXIT(); return 0; } @@ -529,10 +591,6 @@ void *gc_realloc(void *ptr, mp_uint_t n_bytes) { #else // Alternative gc_realloc impl void *gc_realloc(void *ptr_in, size_t n_bytes, bool allow_move) { - if (MP_STATE_MEM(gc_lock_depth) > 0) { - return NULL; - } - // check for pure allocation if (ptr_in == NULL) { return gc_alloc(n_bytes, false); @@ -554,8 +612,16 @@ void *gc_realloc(void *ptr_in, size_t n_bytes, bool allow_move) { // get first block size_t block = BLOCK_FROM_PTR(ptr); + GC_ENTER(); + // sanity check the ptr is pointing to the head of a block if (ATB_GET_KIND(block) != AT_HEAD) { + GC_EXIT(); + return NULL; + } + + if (MP_STATE_MEM(gc_lock_depth) > 0) { + GC_EXIT(); return NULL; } @@ -590,6 +656,7 @@ void *gc_realloc(void *ptr_in, size_t n_bytes, bool allow_move) { // return original ptr if it already has the requested number of blocks if (new_blocks == n_blocks) { + GC_EXIT(); return ptr_in; } @@ -605,6 +672,8 @@ void *gc_realloc(void *ptr_in, size_t n_bytes, bool allow_move) { MP_STATE_MEM(gc_last_free_atb_index) = (block + new_blocks) / BLOCKS_PER_ATB; } + GC_EXIT(); + #if EXTENSIVE_HEAP_PROFILING gc_dump_alloc_table(); #endif @@ -620,6 +689,8 @@ void *gc_realloc(void *ptr_in, size_t n_bytes, bool allow_move) { ATB_FREE_TO_TAIL(bl); } + GC_EXIT(); + // zero out the additional bytes of the newly allocated blocks (see comment above in gc_alloc) memset((byte*)ptr_in + n_bytes, 0, new_blocks * BYTES_PER_BLOCK - n_bytes); @@ -630,19 +701,21 @@ void *gc_realloc(void *ptr_in, size_t n_bytes, bool allow_move) { return ptr_in; } + #if MICROPY_ENABLE_FINALISER + bool ftb_state = FTB_GET(block); + #else + bool ftb_state = false; + #endif + + GC_EXIT(); + if (!allow_move) { // not allowed to move memory block so return failure return NULL; } // can't resize inplace; try to find a new contiguous chain - void *ptr_out = gc_alloc(n_bytes, -#if MICROPY_ENABLE_FINALISER - FTB_GET(block) -#else - false -#endif - ); + void *ptr_out = gc_alloc(n_bytes, ftb_state); // check that the alloc succeeded if (ptr_out == NULL) { @@ -661,11 +734,12 @@ void gc_dump_info(void) { gc_info(&info); mp_printf(&mp_plat_print, "GC: total: %u, used: %u, free: %u\n", (uint)info.total, (uint)info.used, (uint)info.free); - mp_printf(&mp_plat_print, " No. of 1-blocks: %u, 2-blocks: %u, max blk sz: %u\n", - (uint)info.num_1block, (uint)info.num_2block, (uint)info.max_block); + mp_printf(&mp_plat_print, " No. of 1-blocks: %u, 2-blocks: %u, max blk sz: %u, max free sz: %u\n", + (uint)info.num_1block, (uint)info.num_2block, (uint)info.max_block, (uint)info.max_free); } void gc_dump_alloc_table(void) { + GC_ENTER(); static const size_t DUMP_BYTES_PER_LINE = 64; #if !EXTENSIVE_HEAP_PROFILING // When comparing heap output we don't want to print the starting @@ -713,7 +787,7 @@ void gc_dump_alloc_table(void) { } if (c == 'h') { ptrs = (void**)&c; - len = ((mp_uint_t)MP_STATE_VM(stack_top) - (mp_uint_t)&c) / sizeof(mp_uint_t); + len = ((mp_uint_t)MP_STATE_THREAD(stack_top) - (mp_uint_t)&c) / sizeof(mp_uint_t); for (mp_uint_t i = 0; i < len; i++) { mp_uint_t ptr = (mp_uint_t)ptrs[i]; if (VERIFY_PTR(ptr) && BLOCK_FROM_PTR(ptr) == bl) { @@ -771,6 +845,7 @@ void gc_dump_alloc_table(void) { mp_printf(&mp_plat_print, "%c", c); } mp_print_str(&mp_plat_print, "\n"); + GC_EXIT(); } #if DEBUG_PRINT @@ -54,6 +54,7 @@ typedef struct _gc_info_t { size_t total; size_t used; size_t free; + size_t max_free; size_t num_1block; size_t num_2block; size_t max_block; diff --git a/py/makeqstrdefs.py b/py/makeqstrdefs.py index 194d901d26..69aaefb3e6 100644 --- a/py/makeqstrdefs.py +++ b/py/makeqstrdefs.py @@ -30,7 +30,7 @@ def process_file(f): m = re.match(r"#[line]*\s\d+\s\"([^\"]+)\"", line) assert m is not None fname = m.group(1) - if fname[0] == "/" or not fname.endswith(".c"): + if not fname.endswith(".c"): continue if fname != last_fname: write_out(last_fname, output) diff --git a/py/mkrules.mk b/py/mkrules.mk index 3ed4afec19..b77f8d600f 100644 --- a/py/mkrules.mk +++ b/py/mkrules.mk @@ -49,7 +49,7 @@ $(BUILD)/%.o: %.c # List all native flags since the current build system doesn't have # the micropython configuration available. However, these flags are # needed to extract all qstrings -QSTR_GEN_EXTRA_CFLAGS += -D__QSTR_EXTRACT -DN_X64 -DN_X86 -DN_THUMB -DN_ARM +QSTR_GEN_EXTRA_CFLAGS += -DNO_QSTR -DN_X64 -DN_X86 -DN_THUMB -DN_ARM QSTR_GEN_EXTRA_CFLAGS += -I$(BUILD)/tmp vpath %.c . $(TOP) @@ -115,9 +115,6 @@ ifndef DEBUG endif $(Q)$(SIZE) $(PROG) -lib: $(OBJ) - $(AR) rcs libmicropython.a $^ - clean: clean-prog clean-prog: $(RM) -f $(PROG) @@ -126,6 +123,9 @@ clean-prog: .PHONY: clean-prog endif +lib: $(OBJ) + $(AR) rcs libmicropython.a $^ + clean: $(RM) -rf $(BUILD) .PHONY: clean diff --git a/py/modio.c b/py/modio.c index 2fbe6bc1e1..f8826c71a7 100644 --- a/py/modio.c +++ b/py/modio.c @@ -124,7 +124,7 @@ STATIC const mp_obj_type_t bufwriter_type = { { &mp_type_type }, .name = MP_QSTR_BufferedWriter, .make_new = bufwriter_make_new, - .stream_p = &bufwriter_stream_p, + .protocol = &bufwriter_stream_p, .locals_dict = (mp_obj_t)&bufwriter_locals_dict, }; #endif // MICROPY_PY_IO_BUFFEREDWRITER diff --git a/py/modmicropython.c b/py/modmicropython.c index 1ff5e25bd0..805bda51d2 100644 --- a/py/modmicropython.c +++ b/py/modmicropython.c @@ -60,7 +60,7 @@ mp_obj_t mp_micropython_mem_info(size_t n_args, const mp_obj_t *args) { (mp_uint_t)m_get_total_bytes_allocated(), (mp_uint_t)m_get_current_bytes_allocated(), (mp_uint_t)m_get_peak_bytes_allocated()); #endif #if MICROPY_STACK_CHECK - mp_printf(&mp_plat_print, "stack: " UINT_FMT " out of " INT_FMT "\n", mp_stack_usage(), MP_STATE_VM(stack_limit)); + mp_printf(&mp_plat_print, "stack: " UINT_FMT " out of " INT_FMT "\n", mp_stack_usage(), MP_STATE_THREAD(stack_limit)); #else mp_printf(&mp_plat_print, "stack: " UINT_FMT "\n", mp_stack_usage()); #endif diff --git a/py/modthread.c b/py/modthread.c new file mode 100644 index 0000000000..6c8340c928 --- /dev/null +++ b/py/modthread.c @@ -0,0 +1,301 @@ +/* + * This file is part of the MicroPython project, http://micropython.org/ + * + * The MIT License (MIT) + * + * Copyright (c) 2016 Damien P. George on behalf of Pycom Ltd + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include <stdio.h> +#include <string.h> + +#include "py/runtime.h" +#include "py/stackctrl.h" + +#if MICROPY_PY_THREAD + +#include "py/mpthread.h" + +#if 0 // print debugging info +#define DEBUG_PRINT (1) +#define DEBUG_printf DEBUG_printf +#else // don't print debugging info +#define DEBUG_PRINT (0) +#define DEBUG_printf(...) (void)0 +#endif + +/****************************************************************/ +// Lock object +// Note: with the GIL enabled we can easily synthesise a lock object + +STATIC const mp_obj_type_t mp_type_thread_lock; + +typedef struct _mp_obj_thread_lock_t { + mp_obj_base_t base; + #if !MICROPY_PY_THREAD_GIL + mp_thread_mutex_t mutex; + #endif + volatile bool locked; +} mp_obj_thread_lock_t; + +STATIC mp_obj_thread_lock_t *mp_obj_new_thread_lock(void) { + mp_obj_thread_lock_t *self = m_new_obj(mp_obj_thread_lock_t); + self->base.type = &mp_type_thread_lock; + #if !MICROPY_PY_THREAD_GIL + mp_thread_mutex_init(&self->mutex); + #endif + self->locked = false; + return self; +} + +STATIC mp_obj_t thread_lock_acquire(size_t n_args, const mp_obj_t *args) { + mp_obj_thread_lock_t *self = MP_OBJ_TO_PTR(args[0]); + bool wait = true; + if (n_args > 1) { + wait = mp_obj_get_int(args[1]); + // TODO support timeout arg + } + #if MICROPY_PY_THREAD_GIL + if (self->locked) { + if (!wait) { + return mp_const_false; + } + do { + MP_THREAD_GIL_EXIT(); + MP_THREAD_GIL_ENTER(); + } while (self->locked); + } + self->locked = true; + return mp_const_true; + #else + int ret = mp_thread_mutex_lock(&self->mutex, wait); + if (ret == 0) { + return mp_const_false; + } else if (ret == 1) { + self->locked = true; + return mp_const_true; + } else { + nlr_raise(mp_obj_new_exception_arg1(&mp_type_OSError, MP_OBJ_NEW_SMALL_INT(-ret))); + } + #endif +} +STATIC MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(thread_lock_acquire_obj, 1, 3, thread_lock_acquire); + +STATIC mp_obj_t thread_lock_release(mp_obj_t self_in) { + mp_obj_thread_lock_t *self = MP_OBJ_TO_PTR(self_in); + // TODO check if already unlocked + self->locked = false; + #if !MICROPY_PY_THREAD_GIL + mp_thread_mutex_unlock(&self->mutex); + #endif + return mp_const_none; +} +STATIC MP_DEFINE_CONST_FUN_OBJ_1(thread_lock_release_obj, thread_lock_release); + +STATIC mp_obj_t thread_lock_locked(mp_obj_t self_in) { + mp_obj_thread_lock_t *self = MP_OBJ_TO_PTR(self_in); + return mp_obj_new_bool(self->locked); +} +STATIC MP_DEFINE_CONST_FUN_OBJ_1(thread_lock_locked_obj, thread_lock_locked); + +STATIC mp_obj_t thread_lock___exit__(size_t n_args, const mp_obj_t *args) { + (void)n_args; // unused + return thread_lock_release(args[0]); +} +STATIC MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(thread_lock___exit___obj, 4, 4, thread_lock___exit__); + +STATIC const mp_rom_map_elem_t thread_lock_locals_dict_table[] = { + { MP_ROM_QSTR(MP_QSTR_acquire), MP_ROM_PTR(&thread_lock_acquire_obj) }, + { MP_ROM_QSTR(MP_QSTR_release), MP_ROM_PTR(&thread_lock_release_obj) }, + { MP_ROM_QSTR(MP_QSTR_locked), MP_ROM_PTR(&thread_lock_locked_obj) }, + { MP_ROM_QSTR(MP_QSTR___enter__), MP_ROM_PTR(&thread_lock_acquire_obj) }, + { MP_ROM_QSTR(MP_QSTR___exit__), MP_ROM_PTR(&thread_lock___exit___obj) }, +}; + +STATIC MP_DEFINE_CONST_DICT(thread_lock_locals_dict, thread_lock_locals_dict_table); + +STATIC const mp_obj_type_t mp_type_thread_lock = { + { &mp_type_type }, + .name = MP_QSTR_lock, + .locals_dict = (mp_obj_dict_t*)&thread_lock_locals_dict, +}; + +/****************************************************************/ +// _thread module + +STATIC size_t thread_stack_size = 0; + +STATIC mp_obj_t mod_thread_get_ident(void) { + return mp_obj_new_int_from_uint((uintptr_t)mp_thread_get_state()); +} +STATIC MP_DEFINE_CONST_FUN_OBJ_0(mod_thread_get_ident_obj, mod_thread_get_ident); + +STATIC mp_obj_t mod_thread_stack_size(size_t n_args, const mp_obj_t *args) { + mp_obj_t ret = mp_obj_new_int_from_uint(thread_stack_size); + if (n_args == 0) { + thread_stack_size = 0; + } else { + thread_stack_size = mp_obj_get_int(args[0]); + } + return ret; +} +STATIC MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(mod_thread_stack_size_obj, 0, 1, mod_thread_stack_size); + +typedef struct _thread_entry_args_t { + size_t stack_size; + mp_obj_t fun; + size_t n_args; + size_t n_kw; + mp_obj_t args[]; +} thread_entry_args_t; + +STATIC void *thread_entry(void *args_in) { + // Execution begins here for a new thread. We do not have the GIL. + + thread_entry_args_t *args = (thread_entry_args_t*)args_in; + + mp_state_thread_t ts; + mp_thread_set_state(&ts); + + mp_stack_set_top(&ts + 1); // need to include ts in root-pointer scan + mp_stack_set_limit(args->stack_size); + + MP_THREAD_GIL_ENTER(); + + // signal that we are set up and running + mp_thread_start(); + + // TODO set more thread-specific state here: + // mp_pending_exception? (root pointer) + // cur_exception (root pointer) + // dict_locals? (root pointer) uPy doesn't make a new locals dict for functions, just for classes, so it's different to CPy + + DEBUG_printf("[thread] start ts=%p args=%p stack=%p\n", &ts, &args, MP_STATE_THREAD(stack_top)); + + nlr_buf_t nlr; + if (nlr_push(&nlr) == 0) { + mp_call_function_n_kw(args->fun, args->n_args, args->n_kw, args->args); + nlr_pop(); + } else { + // uncaught exception + // check for SystemExit + mp_obj_base_t *exc = (mp_obj_base_t*)nlr.ret_val; + if (mp_obj_is_subclass_fast(MP_OBJ_FROM_PTR(exc->type), MP_OBJ_FROM_PTR(&mp_type_SystemExit))) { + // swallow exception silently + } else { + // print exception out + mp_printf(&mp_plat_print, "Unhandled exception in thread started by "); + mp_obj_print_helper(&mp_plat_print, args->fun, PRINT_REPR); + mp_printf(&mp_plat_print, "\n"); + mp_obj_print_exception(&mp_plat_print, MP_OBJ_FROM_PTR(exc)); + } + } + + DEBUG_printf("[thread] finish ts=%p\n", &ts); + + // signal that we are finished + mp_thread_finish(); + + MP_THREAD_GIL_EXIT(); + + return NULL; +} + +STATIC mp_obj_t mod_thread_start_new_thread(size_t n_args, const mp_obj_t *args) { + // This structure holds the Python function and arguments for thread entry. + // We copy all arguments into this structure to keep ownership of them. + // We must be very careful about root pointers because this pointer may + // disappear from our address space before the thread is created. + thread_entry_args_t *th_args; + + // get positional arguments + mp_uint_t pos_args_len; + mp_obj_t *pos_args_items; + mp_obj_get_array(args[1], &pos_args_len, &pos_args_items); + + // check for keyword arguments + if (n_args == 2) { + // just position arguments + th_args = m_new_obj_var(thread_entry_args_t, mp_obj_t, pos_args_len); + th_args->n_kw = 0; + } else { + // positional and keyword arguments + if (mp_obj_get_type(args[2]) != &mp_type_dict) { + nlr_raise(mp_obj_new_exception_msg(&mp_type_TypeError, "expecting a dict for keyword args")); + } + mp_map_t *map = &((mp_obj_dict_t*)MP_OBJ_TO_PTR(args[2]))->map; + th_args = m_new_obj_var(thread_entry_args_t, mp_obj_t, pos_args_len + 2 * map->used); + th_args->n_kw = map->used; + // copy across the keyword arguments + for (size_t i = 0, n = pos_args_len; i < map->alloc; ++i) { + if (MP_MAP_SLOT_IS_FILLED(map, i)) { + th_args->args[n++] = map->table[i].key; + th_args->args[n++] = map->table[i].value; + } + } + } + + // copy agross the positional arguments + th_args->n_args = pos_args_len; + memcpy(th_args->args, pos_args_items, pos_args_len * sizeof(mp_obj_t)); + + // set the stack size to use + th_args->stack_size = thread_stack_size; + + // set the function for thread entry + th_args->fun = args[0]; + + // spawn the thread! + mp_thread_create(thread_entry, th_args, &th_args->stack_size); + + return mp_const_none; +} +STATIC MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(mod_thread_start_new_thread_obj, 2, 3, mod_thread_start_new_thread); + +STATIC mp_obj_t mod_thread_exit(void) { + nlr_raise(mp_obj_new_exception(&mp_type_SystemExit)); +} +STATIC MP_DEFINE_CONST_FUN_OBJ_0(mod_thread_exit_obj, mod_thread_exit); + +STATIC mp_obj_t mod_thread_allocate_lock(void) { + return MP_OBJ_FROM_PTR(mp_obj_new_thread_lock()); +} +STATIC MP_DEFINE_CONST_FUN_OBJ_0(mod_thread_allocate_lock_obj, mod_thread_allocate_lock); + +STATIC const mp_rom_map_elem_t mp_module_thread_globals_table[] = { + { MP_ROM_QSTR(MP_QSTR___name__), MP_ROM_QSTR(MP_QSTR__thread) }, + { MP_ROM_QSTR(MP_QSTR_LockType), MP_ROM_PTR(&mp_type_thread_lock) }, + { MP_ROM_QSTR(MP_QSTR_get_ident), MP_ROM_PTR(&mod_thread_get_ident_obj) }, + { MP_ROM_QSTR(MP_QSTR_stack_size), MP_ROM_PTR(&mod_thread_stack_size_obj) }, + { MP_ROM_QSTR(MP_QSTR_start_new_thread), MP_ROM_PTR(&mod_thread_start_new_thread_obj) }, + { MP_ROM_QSTR(MP_QSTR_exit), MP_ROM_PTR(&mod_thread_exit_obj) }, + { MP_ROM_QSTR(MP_QSTR_allocate_lock), MP_ROM_PTR(&mod_thread_allocate_lock_obj) }, +}; + +STATIC MP_DEFINE_CONST_DICT(mp_module_thread_globals, mp_module_thread_globals_table); + +const mp_obj_module_t mp_module_thread = { + .base = { &mp_type_module }, + .name = MP_QSTR__thread, + .globals = (mp_obj_dict_t*)&mp_module_thread_globals, +}; + +#endif // MICROPY_PY_THREAD diff --git a/py/mpconfig.h b/py/mpconfig.h index 084fc246f5..aec5d40826 100644 --- a/py/mpconfig.h +++ b/py/mpconfig.h @@ -824,6 +824,17 @@ typedef double mp_float_t; #define MICROPY_PY_UERRNO (0) #endif +// Whether to provide "_thread" module +#ifndef MICROPY_PY_THREAD +#define MICROPY_PY_THREAD (0) +#endif + +// Whether to make the VM/runtime thread-safe using a global lock +// If not enabled then thread safety must be provided at the Python level +#ifndef MICROPY_PY_THREAD_GIL +#define MICROPY_PY_THREAD_GIL (MICROPY_PY_THREAD) +#endif + // Extended modules #ifndef MICROPY_PY_UCTYPES @@ -888,6 +899,10 @@ typedef double mp_float_t; #define MICROPY_PY_FRAMEBUF (0) #endif +#ifndef MICROPY_PY_BTREE +#define MICROPY_PY_BTREE (0) +#endif + /*****************************************************************************/ /* Hooks for a port to add builtins */ @@ -1031,6 +1046,11 @@ typedef double mp_float_t; #define MP_WEAK __attribute__((weak)) #endif +// Modifier for functions which should be never inlined +#ifndef MP_NOINLINE +#define MP_NOINLINE __attribute__((noinline)) +#endif + // Condition is likely to be true, to help branch prediction #ifndef MP_LIKELY #define MP_LIKELY(x) __builtin_expect((x), 1) diff --git a/py/mphal.h b/py/mphal.h index aacd02ebd8..54a45b0240 100644 --- a/py/mphal.h +++ b/py/mphal.h @@ -73,6 +73,7 @@ mp_uint_t mp_hal_ticks_us(void); #define mp_hal_get_pin_obj(pin) (pin) #define mp_hal_pin_read(pin) mp_virtual_pin_read(pin) #define mp_hal_pin_write(pin, v) mp_virtual_pin_write(pin, v) +#include "extmod/virtpin.h" #endif #endif // __MICROPY_INCLUDED_PY_MPHAL_H__ diff --git a/py/mpstate.h b/py/mpstate.h index 0e77e65833..281795773f 100644 --- a/py/mpstate.h +++ b/py/mpstate.h @@ -29,6 +29,7 @@ #include <stdint.h> #include "py/mpconfig.h" +#include "py/mpthread.h" #include "py/misc.h" #include "py/nlr.h" #include "py/obj.h" @@ -80,6 +81,11 @@ typedef struct _mp_state_mem_t { #if MICROPY_PY_GC_COLLECT_RETVAL size_t gc_collected; #endif + + #if MICROPY_PY_THREAD + // This is a global mutex used to make the GC thread-safe. + mp_thread_mutex_t gc_mutex; + #endif } mp_state_mem_t; // This structure hold runtime and VM information. It includes a section @@ -91,9 +97,6 @@ typedef struct _mp_state_vm_t { // this must start at the start of this structure // - // Note: nlr asm code has the offset of this hard-coded - nlr_buf_t *nlr_top; - qstr_pool_t *last_pool; // non-heap memory for creating an exception if we can't allocate RAM @@ -140,6 +143,7 @@ typedef struct _mp_state_vm_t { #if MICROPY_PY_OS_DUPTERM mp_obj_t term_obj; + mp_obj_t dupterm_arr_obj; #endif #if MICROPY_PY_LWIP_SLIP @@ -161,12 +165,9 @@ typedef struct _mp_state_vm_t { size_t qstr_last_alloc; size_t qstr_last_used; - // Stack top at the start of program - // Note: this entry is used to locate the end of the root pointer section. - char *stack_top; - - #if MICROPY_STACK_CHECK - mp_uint_t stack_limit; + #if MICROPY_PY_THREAD + // This is a global mutex used to make qstr interning thread-safe. + mp_thread_mutex_t qstr_mutex; #endif mp_uint_t mp_optimise_value; @@ -175,9 +176,29 @@ typedef struct _mp_state_vm_t { #if MICROPY_ENABLE_EMERGENCY_EXCEPTION_BUF && MICROPY_EMERGENCY_EXCEPTION_BUF_SIZE == 0 mp_int_t mp_emergency_exception_buf_size; #endif + + #if MICROPY_PY_THREAD_GIL + // This is a global mutex used to make the VM/runtime thread-safe. + mp_thread_mutex_t gil_mutex; + #endif } mp_state_vm_t; -// This structure combines the above 2 structures, and adds the local +// This structure holds state that is specific to a given thread. +// Everything in this structure is scanned for root pointers. +typedef struct _mp_state_thread_t { + // Note: nlr asm code has the offset of this hard-coded + nlr_buf_t *nlr_top; // ROOT POINTER + + // Stack top at the start of program + // Note: this entry is used to locate the end of the root pointer section. + char *stack_top; + + #if MICROPY_STACK_CHECK + size_t stack_limit; + #endif +} mp_state_thread_t; + +// This structure combines the above 3 structures, and adds the local // and global dicts. // Note: if this structure changes then revisit all nlr asm code since they // have the offset of nlr_top hard-coded. @@ -185,7 +206,8 @@ typedef struct _mp_state_ctx_t { // these must come first for root pointer scanning in GC to work mp_obj_dict_t *dict_locals; mp_obj_dict_t *dict_globals; - // this must come next for root pointer scanning in GC to work + // these must come next in this order for root pointer scanning in GC to work + mp_state_thread_t thread; mp_state_vm_t vm; mp_state_mem_t mem; } mp_state_ctx_t; @@ -196,4 +218,11 @@ extern mp_state_ctx_t mp_state_ctx; #define MP_STATE_VM(x) (mp_state_ctx.vm.x) #define MP_STATE_MEM(x) (mp_state_ctx.mem.x) +#if MICROPY_PY_THREAD +extern mp_state_thread_t *mp_thread_get_state(void); +#define MP_STATE_THREAD(x) (mp_thread_get_state()->x) +#else +#define MP_STATE_THREAD(x) (mp_state_ctx.thread.x) +#endif + #endif // __MICROPY_INCLUDED_PY_MPSTATE_H__ diff --git a/py/mpthread.h b/py/mpthread.h new file mode 100644 index 0000000000..04d4f19684 --- /dev/null +++ b/py/mpthread.h @@ -0,0 +1,61 @@ +/* + * This file is part of the MicroPython project, http://micropython.org/ + * + * The MIT License (MIT) + * + * Copyright (c) 2016 Damien P. George on behalf of Pycom Ltd + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ +#ifndef __MICROPY_INCLUDED_PY_MPTHREAD_H__ +#define __MICROPY_INCLUDED_PY_MPTHREAD_H__ + +#include "py/mpconfig.h" + +#if MICROPY_PY_THREAD + +#ifdef MICROPY_MPTHREADPORT_H +#include MICROPY_MPTHREADPORT_H +#else +#include <mpthreadport.h> +#endif + +struct _mp_state_thread_t; + +struct _mp_state_thread_t *mp_thread_get_state(void); +void mp_thread_set_state(void *state); +void mp_thread_create(void *(*entry)(void*), void *arg, size_t *stack_size); +void mp_thread_start(void); +void mp_thread_finish(void); +void mp_thread_mutex_init(mp_thread_mutex_t *mutex); +int mp_thread_mutex_lock(mp_thread_mutex_t *mutex, int wait); +void mp_thread_mutex_unlock(mp_thread_mutex_t *mutex); + +#endif // MICROPY_PY_THREAD + +#if MICROPY_PY_THREAD && MICROPY_PY_THREAD_GIL +#include "py/mpstate.h" +#define MP_THREAD_GIL_ENTER() mp_thread_mutex_lock(&MP_STATE_VM(gil_mutex), 1) +#define MP_THREAD_GIL_EXIT() mp_thread_mutex_unlock(&MP_STATE_VM(gil_mutex)) +#else +#define MP_THREAD_GIL_ENTER() +#define MP_THREAD_GIL_EXIT() +#endif + +#endif // __MICROPY_INCLUDED_PY_MPTHREAD_H__ @@ -70,8 +70,8 @@ struct _nlr_buf_t { NORETURN void nlr_setjmp_jump(void *val); // nlr_push() must be defined as a macro, because "The stack context will be // invalidated if the function which called setjmp() returns." -#define nlr_push(buf) ((buf)->prev = MP_STATE_VM(nlr_top), MP_STATE_VM(nlr_top) = (buf), setjmp((buf)->jmpbuf)) -#define nlr_pop() { MP_STATE_VM(nlr_top) = MP_STATE_VM(nlr_top)->prev; } +#define nlr_push(buf) ((buf)->prev = MP_STATE_THREAD(nlr_top), MP_STATE_THREAD(nlr_top) = (buf), setjmp((buf)->jmpbuf)) +#define nlr_pop() { MP_STATE_THREAD(nlr_top) = MP_STATE_THREAD(nlr_top)->prev; } #define nlr_jump(val) nlr_setjmp_jump(val) #else unsigned int nlr_push(nlr_buf_t *); @@ -91,7 +91,7 @@ void nlr_jump_fail(void *val); #include "mpstate.h" #define nlr_raise(val) \ do { \ - /*printf("nlr_raise: nlr_top=%p\n", MP_STATE_VM(nlr_top)); \ + /*printf("nlr_raise: nlr_top=%p\n", MP_STATE_THREAD(nlr_top)); \ fflush(stdout);*/ \ void *_val = MP_OBJ_TO_PTR(val); \ assert(_val != NULL); \ @@ -101,11 +101,11 @@ void nlr_jump_fail(void *val); #if !MICROPY_NLR_SETJMP #define nlr_push(val) \ - assert(MP_STATE_VM(nlr_top) != val),nlr_push(val) + assert(MP_STATE_THREAD(nlr_top) != val),nlr_push(val) /* #define nlr_push(val) \ - printf("nlr_push: before: nlr_top=%p, val=%p\n", MP_STATE_VM(nlr_top), val),assert(MP_STATE_VM(nlr_top) != val),nlr_push(val) + printf("nlr_push: before: nlr_top=%p, val=%p\n", MP_STATE_THREAD(nlr_top), val),assert(MP_STATE_THREAD(nlr_top) != val),nlr_push(val) #endif */ #endif diff --git a/py/nlrsetjmp.c b/py/nlrsetjmp.c index 661b650c5c..43a13156f2 100644 --- a/py/nlrsetjmp.c +++ b/py/nlrsetjmp.c @@ -29,8 +29,8 @@ #if MICROPY_NLR_SETJMP void nlr_setjmp_jump(void *val) { - nlr_buf_t *buf = MP_STATE_VM(nlr_top); - MP_STATE_VM(nlr_top) = buf->prev; + nlr_buf_t *buf = MP_STATE_THREAD(nlr_top); + MP_STATE_THREAD(nlr_top) = buf->prev; buf->ret_val = val; longjmp(buf->jmpbuf, 1); } diff --git a/py/nlrthumb.S b/py/nlrthumb.S deleted file mode 100644 index 624275e8ed..0000000000 --- a/py/nlrthumb.S +++ /dev/null @@ -1,165 +0,0 @@ -/* - * This file is part of the MicroPython project, http://micropython.org/ - * - * The MIT License (MIT) - * - * Copyright (c) 2013-2015 Damien P. George - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN - * THE SOFTWARE. - */ - -#if (!defined(MICROPY_NLR_SETJMP) || !MICROPY_NLR_SETJMP) && (defined(__thumb2__) || defined(__thumb__) || defined(__arm__)) - -// We only need the functions here if we are on arm/thumb, and we are not -// using setjmp/longjmp. -// -// For reference, arm/thumb callee save regs are: -// r4-r11, r13=sp - -// the offset of nlr_top within mp_state_ctx_t -#define NLR_TOP_OFFSET (2 * 4) - - .syntax unified - /*.cpu cortex-m4*/ - /*.thumb*/ - .text - .align 2 - -/**************************************/ -// mp_uint_t nlr_push(r0=nlr_buf_t *nlr) - - .global nlr_push -#if defined(__thumb2__) - .thumb - .thumb_func -#endif - .type nlr_push, %function -nlr_push: - str r4, [r0, #12] @ store r4 into nlr_buf - str r5, [r0, #16] @ store r5 into nlr_buf - str r6, [r0, #20] @ store r6 into nlr_buf - str r7, [r0, #24] @ store r7 into nlr_buf - -#if defined(__ARM_ARCH_6M__) - mov r1, r8 - str r1, [r0, #28] @ store r8 into nlr_buf - mov r1, r9 - str r1, [r0, #32] @ store r9 into nlr_buf - mov r1, r10 - str r1, [r0, #36] @ store r10 into nlr_buf - mov r1, r11 - str r1, [r0, #40] @ store r11 into nlr_buf - mov r1, r13 - str r1, [r0, #44] @ store r13=sp into nlr_buf - mov r1, lr - str r1, [r0, #8] @ store lr into nlr_buf -#else - str r8, [r0, #28] @ store r8 into nlr_buf - str r9, [r0, #32] @ store r9 into nlr_buf - str r10, [r0, #36] @ store r10 into nlr_buf - str r11, [r0, #40] @ store r11 into nlr_buf - str r13, [r0, #44] @ store r13=sp into nlr_buf - str lr, [r0, #8] @ store lr into nlr_buf -#endif - - ldr r3, nlr_top_addr @ load addr of nlr_top - ldr r2, [r3] @ load nlr_top - str r2, [r0] @ store nlr_top into nlr_buf - str r0, [r3] @ store nlr_buf into nlr_top (to link list) - - movs r0, #0 @ return 0, normal return - bx lr @ return - .size nlr_push, .-nlr_push - -/**************************************/ -// void nlr_pop() - - .global nlr_pop -#if defined(__thumb2__) - .thumb - .thumb_func -#endif - .type nlr_pop, %function -nlr_pop: - ldr r3, nlr_top_addr @ load addr of nlr_top - ldr r2, [r3] @ load nlr_top - ldr r2, [r2] @ load prev nlr_buf - str r2, [r3] @ store prev nlr_buf to nlr_top (to unlink list) - bx lr @ return - .size nlr_pop, .-nlr_pop - -/**************************************/ -// void nlr_jump(r0=mp_uint_t val) - - .global nlr_jump -#if defined(__thumb2__) - .thumb - .thumb_func -#endif - .type nlr_jump, %function -nlr_jump: - ldr r3, nlr_top_addr @ load addr of nlr_top - ldr r2, [r3] @ load nlr_top - cmp r2, #0 @ test if nlr_top is NULL -#if defined(__ARM_ARCH_6M__) - bne nlr_jump_non_null @ if nlr_top is NULL, transfer control to nlr_jump_fail - bl nlr_jump_fail -nlr_jump_non_null: -#else - beq nlr_jump_fail @ if nlr_top is NULL, transfer control to nlr_jump_fail -#endif - str r0, [r2, #4] @ store return value - ldr r0, [r2] @ load prev nlr_buf - str r0, [r3] @ store prev nol_buf into nlr_top (to unlink list) - - ldr r4, [r2, #12] @ load r4 from nlr_buf - ldr r5, [r2, #16] @ load r5 from nlr_buf - ldr r6, [r2, #20] @ load r6 from nlr_buf - ldr r7, [r2, #24] @ load r7 from nlr_buf -#if defined(__ARM_ARCH_6M__) - ldr r1, [r2, #28] @ load r8 from nlr_buf - mov r8, r1 - ldr r1, [r2, #32] @ load r9 from nlr_buf - mov r9, r1 - ldr r1, [r2, #36] @ load r10 from nlr_buf - mov r10, r1 - ldr r1, [r2, #40] @ load r11 from nlr_buf - mov r11, r1 - ldr r1, [r2, #44] @ load r13=sp from nlr_buf - mov r13, r1 - ldr r1, [r2, #8] @ load lr from nlr_buf - mov lr, r1 -#else - ldr r8, [r2, #28] @ load r8 from nlr_buf - ldr r9, [r2, #32] @ load r9 from nlr_buf - ldr r10, [r2, #36] @ load r10 from nlr_buf - ldr r11, [r2, #40] @ load r11 from nlr_buf - ldr r13, [r2, #44] @ load r13=sp from nlr_buf - ldr lr, [r2, #8] @ load lr from nlr_buf -#endif - - movs r0, #1 @ return 1, non-local return - bx lr @ return - .size nlr_jump, .-nlr_jump - - .align 2 -nlr_top_addr: - .word mp_state_ctx + NLR_TOP_OFFSET - -#endif // (!defined(MICROPY_NLR_SETJMP) || !MICROPY_NLR_SETJMP) && (defined(__thumb2__) || defined(__thumb__) || defined(__arm__)) diff --git a/py/nlrthumb.c b/py/nlrthumb.c new file mode 100644 index 0000000000..a61c73c036 --- /dev/null +++ b/py/nlrthumb.c @@ -0,0 +1,134 @@ +/* + * This file is part of the MicroPython project, http://micropython.org/ + * + * The MIT License (MIT) + * + * Copyright (c) 2013-2016 Damien P. George + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "py/mpstate.h" +#include "py/nlr.h" + +#if (!defined(MICROPY_NLR_SETJMP) || !MICROPY_NLR_SETJMP) && (defined(__thumb2__) || defined(__thumb__) || defined(__arm__)) + +// We only need the functions here if we are on arm/thumb, and we are not +// using setjmp/longjmp. +// +// For reference, arm/thumb callee save regs are: +// r4-r11, r13=sp + +__attribute__((naked)) unsigned int nlr_push(nlr_buf_t *nlr) { + + __asm volatile ( + "str r4, [r0, #12] \n" // store r4 into nlr_buf + "str r5, [r0, #16] \n" // store r5 into nlr_buf + "str r6, [r0, #20] \n" // store r6 into nlr_buf + "str r7, [r0, #24] \n" // store r7 into nlr_buf + +#if defined(__ARM_ARCH_6M__) + "mov r1, r8 \n" + "str r1, [r0, #28] \n" // store r8 into nlr_buf + "mov r1, r9 \n" + "str r1, [r0, #32] \n" // store r9 into nlr_buf + "mov r1, r10 \n" + "str r1, [r0, #36] \n" // store r10 into nlr_buf + "mov r1, r11 \n" + "str r1, [r0, #40] \n" // store r11 into nlr_buf + "mov r1, r13 \n" + "str r1, [r0, #44] \n" // store r13=sp into nlr_buf + "mov r1, lr \n" + "str r1, [r0, #8] \n" // store lr into nlr_buf +#else + "str r8, [r0, #28] \n" // store r8 into nlr_buf + "str r9, [r0, #32] \n" // store r9 into nlr_buf + "str r10, [r0, #36] \n" // store r10 into nlr_buf + "str r11, [r0, #40] \n" // store r11 into nlr_buf + "str r13, [r0, #44] \n" // store r13=sp into nlr_buf + "str lr, [r0, #8] \n" // store lr into nlr_buf +#endif + + "b nlr_push_tail \n" // do the rest in C + ); + + return 0; // needed to silence compiler warning +} + +unsigned int nlr_push_tail(nlr_buf_t *nlr) { + nlr_buf_t **top = &MP_STATE_THREAD(nlr_top); + nlr->prev = *top; + *top = nlr; + return 0; // normal return +} + +void nlr_pop(void) { + nlr_buf_t **top = &MP_STATE_THREAD(nlr_top); + *top = (*top)->prev; +} + +NORETURN __attribute__((naked)) void nlr_jump(void *val) { + nlr_buf_t **top_ptr = &MP_STATE_THREAD(nlr_top); + nlr_buf_t *top = *top_ptr; + if (top == NULL) { + nlr_jump_fail(val); + } + + top->ret_val = val; + *top_ptr = top->prev; + + __asm volatile ( + "mov r0, %0 \n" // r0 points to nlr_buf + "ldr r4, [r0, #12] \n" // load r4 from nlr_buf + "ldr r5, [r0, #16] \n" // load r5 from nlr_buf + "ldr r6, [r0, #20] \n" // load r6 from nlr_buf + "ldr r7, [r0, #24] \n" // load r7 from nlr_buf + +#if defined(__ARM_ARCH_6M__) + "ldr r1, [r0, #28] \n" // load r8 from nlr_buf + "mov r8, r1 \n" + "ldr r1, [r0, #32] \n" // load r9 from nlr_buf + "mov r9, r1 \n" + "ldr r1, [r0, #36] \n" // load r10 from nlr_buf + "mov r10, r1 \n" + "ldr r1, [r0, #40] \n" // load r11 from nlr_buf + "mov r11, r1 \n" + "ldr r1, [r0, #44] \n" // load r13=sp from nlr_buf + "mov r13, r1 \n" + "ldr r1, [r0, #8] \n" // load lr from nlr_buf + "mov lr, r1 \n" +#else + "ldr r8, [r0, #28] \n" // load r8 from nlr_buf + "ldr r9, [r0, #32] \n" // load r9 from nlr_buf + "ldr r10, [r0, #36] \n" // load r10 from nlr_buf + "ldr r11, [r0, #40] \n" // load r11 from nlr_buf + "ldr r13, [r0, #44] \n" // load r13=sp from nlr_buf + "ldr lr, [r0, #8] \n" // load lr from nlr_buf +#endif + "movs r0, #1 \n" // return 1, non-local return + "bx lr \n" // return + : // output operands + : "r"(top) // input operands + : // clobbered registers + ); + + for (;;); // needed to silence compiler warning +} + +#endif // (!defined(MICROPY_NLR_SETJMP) || !MICROPY_NLR_SETJMP) && (defined(__thumb2__) || defined(__thumb__) || defined(__arm__)) diff --git a/py/nlrx64.S b/py/nlrx64.S index 8dda025cad..ad2b66fdb2 100644 --- a/py/nlrx64.S +++ b/py/nlrx64.S @@ -41,6 +41,9 @@ #define NLR_TOP (mp_state_ctx + NLR_TOP_OFFSET) #endif +// offset of nlr_top within mp_state_thread_t structure +#define NLR_TOP_TH_OFF (0) + #if defined(_WIN32) || defined(__CYGWIN__) #define NLR_OS_WINDOWS #endif @@ -77,9 +80,20 @@ _nlr_push: movq %r13, 56(%rdi) # store %r13 into nlr_buf movq %r14, 64(%rdi) # store %r14 into nlr_buf movq %r15, 72(%rdi) # store %r15 into nlr_buf + +#if !MICROPY_PY_THREAD movq NLR_TOP(%rip), %rax # get last nlr_buf movq %rax, (%rdi) # store it movq %rdi, NLR_TOP(%rip) # stor new nlr_buf (to make linked list) +#else + movq %rdi, %rbp # since we make a call, must save rdi in rbp + callq mp_thread_get_state # get mp_state_thread ptr into rax + movq NLR_TOP_TH_OFF(%rax), %rsi # get thread.nlr_top (last nlr_buf) + movq %rsi, (%rbp) # store it + movq %rbp, NLR_TOP_TH_OFF(%rax) # store new nlr_buf (to make linked list) + movq 24(%rbp), %rbp # restore rbp +#endif + xorq %rax, %rax # return 0, normal return ret # return #if !(defined(__APPLE__) && defined(__MACH__)) @@ -97,9 +111,18 @@ nlr_pop: .globl _nlr_pop _nlr_pop: #endif + +#if !MICROPY_PY_THREAD movq NLR_TOP(%rip), %rax # get nlr_top into %rax movq (%rax), %rax # load prev nlr_buf movq %rax, NLR_TOP(%rip) # store prev nlr_buf (to unlink list) +#else + callq mp_thread_get_state # get mp_state_thread ptr into rax + movq NLR_TOP_TH_OFF(%rax), %rdi # get thread.nlr_top (last nlr_buf) + movq (%rdi), %rdi # load prev nlr_buf + movq %rdi, NLR_TOP_TH_OFF(%rax) # store prev nlr_buf (to unlink list) +#endif + ret # return #if !(defined(__APPLE__) && defined(__MACH__)) .size nlr_pop, .-nlr_pop @@ -116,6 +139,8 @@ nlr_jump: .globl _nlr_jump _nlr_jump: #endif + +#if !MICROPY_PY_THREAD movq %rdi, %rax # put return value in %rax movq NLR_TOP(%rip), %rdi # get nlr_top into %rdi test %rdi, %rdi # check for nlr_top being NULL @@ -123,6 +148,19 @@ nlr_jump: movq %rax, 8(%rdi) # store return value movq (%rdi), %rax # load prev nlr_buf movq %rax, NLR_TOP(%rip) # store prev nlr_buf (to unlink list) +#else + movq %rdi, %rbp # put return value in rbp + callq mp_thread_get_state # get thread ptr in rax + movq %rax, %rsi # put thread ptr in rsi + movq %rbp, %rax # put return value to rax (for je .fail) + movq NLR_TOP_TH_OFF(%rsi), %rdi # get thread.nlr_top in rdi + test %rdi, %rdi # check for nlr_top being NULL + je .fail # fail if nlr_top is NULL + movq %rax, 8(%rdi) # store return value + movq (%rdi), %rax # load prev nlr_buf + movq %rax, NLR_TOP_TH_OFF(%rsi) # store prev nlr_buf (to unlink list) +#endif + movq 72(%rdi), %r15 # load saved %r15 movq 64(%rdi), %r14 # load saved %r14 movq 56(%rdi), %r13 # load saved %r13 diff --git a/py/nlrx86.S b/py/nlrx86.S index a6ec4b73c7..8a96af81ce 100644 --- a/py/nlrx86.S +++ b/py/nlrx86.S @@ -42,6 +42,9 @@ #define NLR_TOP (mp_state_ctx + NLR_TOP_OFFSET) #endif +// offset of nlr_top within mp_state_thread_t structure +#define NLR_TOP_TH_OFF (0) + .file "nlr.s" .text @@ -65,9 +68,20 @@ nlr_push: mov %ebx, 20(%edx) # store %bx into nlr_buf+20 mov %edi, 24(%edx) # store %di into nlr_buf mov %esi, 28(%edx) # store %si into nlr_buf + +#if !MICROPY_PY_THREAD mov NLR_TOP, %eax # load nlr_top mov %eax, (%edx) # store it mov %edx, NLR_TOP # stor new nlr_buf (to make linked list) +#else + // to check: stack is aligned to 16-byte boundary before this call + call mp_thread_get_state # get mp_state_thread ptr into eax + mov 4(%esp), %edx # load nlr_buf argument into edx (edx clobbered by call) + mov NLR_TOP_TH_OFF(%eax), %ecx # get thread.nlr_top (last nlr_buf) + mov %ecx, (%edx) # store it + mov %edx, NLR_TOP_TH_OFF(%eax) # store new nlr_buf (to make linked list) +#endif + xor %eax, %eax # return 0, normal return ret # return #if !defined(NLR_OS_WINDOWS) @@ -86,9 +100,18 @@ _nlr_pop: .type nlr_pop, @function nlr_pop: #endif + +#if !MICROPY_PY_THREAD mov NLR_TOP, %eax # load nlr_top mov (%eax), %eax # load prev nlr_buf mov %eax, NLR_TOP # store nlr_top (to unlink list) +#else + call mp_thread_get_state # get mp_state_thread ptr into eax + mov NLR_TOP_TH_OFF(%eax), %ecx # get thread.nlr_top (last nlr_buf) + mov (%ecx), %ecx # load prev nlr_buf + mov %ecx, NLR_TOP_TH_OFF(%eax) # store prev nlr_buf (to unlink list) +#endif + ret # return #if !defined(NLR_OS_WINDOWS) .size nlr_pop, .-nlr_pop @@ -106,6 +129,8 @@ _nlr_jump: .type nlr_jump, @function nlr_jump: #endif + +#if !MICROPY_PY_THREAD mov NLR_TOP, %edx # load nlr_top test %edx, %edx # check for nlr_top being NULL #if defined(NLR_OS_WINDOWS) @@ -117,6 +142,21 @@ nlr_jump: mov %eax, 4(%edx) # store return value mov (%edx), %eax # load prev nlr_top mov %eax, NLR_TOP # store nlr_top (to unlink list) +#else + call mp_thread_get_state # get mp_state_thread ptr into eax + mov NLR_TOP_TH_OFF(%eax), %edx # get thread.nlr_top (last nlr_buf) + test %edx, %edx # check for nlr_top being NULL +#if defined(NLR_OS_WINDOWS) + je _nlr_jump_fail # fail if nlr_top is NULL +#else + je nlr_jump_fail # fail if nlr_top is NULL +#endif + mov 4(%esp), %ecx # load return value + mov %ecx, 4(%edx) # store return value + mov (%edx), %ecx # load prev nlr_top + mov %ecx, NLR_TOP_TH_OFF(%eax) # store nlr_top (to unlink list) +#endif + mov 28(%edx), %esi # load saved %si mov 24(%edx), %edi # load saved %di mov 20(%edx), %ebx # load saved %bx @@ -484,7 +484,8 @@ struct _mp_obj_type_t { mp_fun_1_t iternext; // may return MP_OBJ_STOP_ITERATION as an optimisation instead of raising StopIteration() (with no args) mp_buffer_p_t buffer_p; - const mp_stream_p_t *stream_p; + // One of disjoint protocols (interfaces), like mp_stream_p_t, etc. + const void *protocol; // these are for dynamically created types (classes) struct _mp_obj_tuple_t *bases_tuple; diff --git a/py/objarray.c b/py/objarray.c index bafba7623f..2cd0fef6b6 100644 --- a/py/objarray.c +++ b/py/objarray.c @@ -34,6 +34,7 @@ #include "py/runtime.h" #include "py/binary.h" #include "py/objstr.h" +#include "py/objarray.h" #if MICROPY_PY_ARRAY || MICROPY_PY_BUILTINS_BYTEARRAY || MICROPY_PY_BUILTINS_MEMORYVIEW @@ -58,16 +59,6 @@ #define TYPECODE_MASK (~(mp_uint_t)0) #endif -typedef struct _mp_obj_array_t { - mp_obj_base_t base; - mp_uint_t typecode : 8; - // free is number of unused elements after len used elements - // alloc size = len + free - mp_uint_t free : (8 * sizeof(mp_uint_t) - 8); - mp_uint_t len; // in elements - void *items; -} mp_obj_array_t; - STATIC mp_obj_t array_iterator_new(mp_obj_t array_in); STATIC mp_obj_t array_append(mp_obj_t self_in, mp_obj_t arg); STATIC mp_obj_t array_extend(mp_obj_t self_in, mp_obj_t arg_in); diff --git a/py/objarray.h b/py/objarray.h new file mode 100644 index 0000000000..013ac5be9b --- /dev/null +++ b/py/objarray.h @@ -0,0 +1,43 @@ +/* + * This file is part of the MicroPython project, http://micropython.org/ + * + * The MIT License (MIT) + * + * Copyright (c) 2013, 2014 Damien P. George + * Copyright (c) 2014 Paul Sokolovsky + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#ifndef __MICROPY_INCLUDED_PY_OBJARRAY_H__ +#define __MICROPY_INCLUDED_PY_OBJARRAY_H__ + +#include "py/obj.h" + +typedef struct _mp_obj_array_t { + mp_obj_base_t base; + mp_uint_t typecode : 8; + // free is number of unused elements after len used elements + // alloc size = len + free + mp_uint_t free : (8 * sizeof(mp_uint_t) - 8); + mp_uint_t len; // in elements + void *items; +} mp_obj_array_t; + +#endif // __MICROPY_INCLUDED_PY_OBJARRAY_H__ diff --git a/py/objdict.c b/py/objdict.c index 04da2bf624..91d5b75e2b 100644 --- a/py/objdict.c +++ b/py/objdict.c @@ -119,8 +119,16 @@ STATIC mp_obj_t dict_binary_op(mp_uint_t op, mp_obj_t lhs_in, mp_obj_t rhs_in) { case MP_BINARY_OP_EQUAL: { #if MICROPY_PY_COLLECTIONS_ORDEREDDICT if (MP_UNLIKELY(MP_OBJ_IS_TYPE(lhs_in, &mp_type_ordereddict) && MP_OBJ_IS_TYPE(rhs_in, &mp_type_ordereddict))) { - //TODO: implement - return MP_OBJ_NULL; + // Iterate through both dictionaries simultaneously and compare keys and values. + mp_obj_dict_t *rhs = MP_OBJ_TO_PTR(rhs_in); + mp_uint_t c1 = 0, c2 = 0; + mp_map_elem_t *e1 = dict_iter_next(o, &c1), *e2 = dict_iter_next(rhs, &c2); + for (; e1 != NULL && e2 != NULL; e1 = dict_iter_next(o, &c1), e2 = dict_iter_next(rhs, &c2)) { + if (!mp_obj_equal(e1->key, e2->key) || !mp_obj_equal(e1->value, e2->value)) { + return mp_const_false; + } + } + return e1 == NULL && e2 == NULL ? mp_const_true : mp_const_false; } else #endif if (MP_OBJ_IS_TYPE(rhs_in, &mp_type_dict)) { diff --git a/py/objmodule.c b/py/objmodule.c index c7cb644488..dc2ce787b4 100644 --- a/py/objmodule.c +++ b/py/objmodule.c @@ -160,6 +160,9 @@ STATIC const mp_rom_map_elem_t mp_builtin_module_table[] = { #if MICROPY_PY_GC && MICROPY_ENABLE_GC { MP_ROM_QSTR(MP_QSTR_gc), MP_ROM_PTR(&mp_module_gc) }, #endif +#if MICROPY_PY_THREAD + { MP_ROM_QSTR(MP_QSTR__thread), MP_ROM_PTR(&mp_module_thread) }, +#endif // extmod modules @@ -205,6 +208,9 @@ STATIC const mp_rom_map_elem_t mp_builtin_module_table[] = { #if MICROPY_PY_FRAMEBUF { MP_ROM_QSTR(MP_QSTR_framebuf), MP_ROM_PTR(&mp_module_framebuf) }, #endif +#if MICROPY_PY_BTREE + { MP_ROM_QSTR(MP_QSTR_btree), MP_ROM_PTR(&mp_module_btree) }, +#endif // extra builtin modules as defined by a port MICROPY_PORT_BUILTIN_MODULES diff --git a/py/objstringio.c b/py/objstringio.c index 5fd2ca9d3b..abd4e835e8 100644 --- a/py/objstringio.c +++ b/py/objstringio.c @@ -174,7 +174,7 @@ const mp_obj_type_t mp_type_stringio = { .make_new = stringio_make_new, .getiter = mp_identity, .iternext = mp_stream_unbuffered_iter, - .stream_p = &stringio_stream_p, + .protocol = &stringio_stream_p, .locals_dict = (mp_obj_dict_t*)&stringio_locals_dict, }; @@ -186,7 +186,7 @@ const mp_obj_type_t mp_type_bytesio = { .make_new = stringio_make_new, .getiter = mp_identity, .iternext = mp_stream_unbuffered_iter, - .stream_p = &bytesio_stream_p, + .protocol = &bytesio_stream_p, .locals_dict = (mp_obj_dict_t*)&stringio_locals_dict, }; #endif diff --git a/py/objtype.c b/py/objtype.c index 2f14c387db..907308a757 100644 --- a/py/objtype.c +++ b/py/objtype.c @@ -914,7 +914,13 @@ mp_obj_t mp_obj_new_type(qstr name, mp_obj_t bases_tuple, mp_obj_t locals_dict) o->getiter = instance_getiter; //o->iternext = ; not implemented o->buffer_p.get_buffer = instance_get_buffer; - //o->stream_p = ; not implemented + // Inherit protocol from a base class. This allows to define an + // abstract base class which would translate C-level protocol to + // Python method calls, and any subclass inheriting from it will + // support this feature. + if (len > 0) { + o->protocol = ((mp_obj_type_t*)MP_OBJ_TO_PTR(items[0]))->protocol; + } o->bases_tuple = MP_OBJ_TO_PTR(bases_tuple); o->locals_dict = MP_OBJ_TO_PTR(locals_dict); diff --git a/py/parse.c b/py/parse.c index 7da484c497..1ec995cd8f 100644 --- a/py/parse.c +++ b/py/parse.c @@ -461,6 +461,8 @@ STATIC const mp_rom_map_elem_t mp_constants_table[] = { STATIC MP_DEFINE_CONST_MAP(mp_constants_map, mp_constants_table); #endif +STATIC void push_result_rule(parser_t *parser, size_t src_line, const rule_t *rule, size_t num_args); + #if MICROPY_COMP_CONST_FOLDING STATIC bool fold_constants(parser_t *parser, const rule_t *rule, size_t num_args) { // this code does folding of arbitrary integer expressions, eg 1 + 2 * 3 + 4 @@ -587,6 +589,15 @@ STATIC bool fold_constants(parser_t *parser, const rule_t *rule, size_t num_args assert(elem->value == MP_OBJ_NULL); elem->value = MP_OBJ_NEW_SMALL_INT(value); + // If the constant starts with an underscore then treat it as a private + // variable and don't emit any code to store the value to the id. + if (qstr_str(id)[0] == '_') { + pop_result(parser); // pop const(value) + pop_result(parser); // pop id + push_result_rule(parser, 0, rules[RULE_pass_stmt], 0); // replace with "pass" + return true; + } + // replace const(value) with value pop_result(parser); push_result_node(parser, pn_value); @@ -16,6 +16,7 @@ endif # some code is performance bottleneck and compiled with other optimization options CSUPEROPT = -O3 +INC += -I../lib INC += -I../lib/netutils ifeq ($(MICROPY_PY_USSL),1) @@ -63,6 +64,30 @@ SRC_MOD += $(LWIP_DIR)/netif/slipif.c endif endif +ifeq ($(MICROPY_PY_BTREE),1) +BTREE_DIR = lib/berkeley-db-1.xx +CFLAGS_MOD += -D__DBINTERFACE_PRIVATE=1 +INC += -I../$(BTREE_DIR)/PORT/include +SRC_MOD += extmod/modbtree.c +SRC_MOD += $(addprefix $(BTREE_DIR)/,\ +btree/bt_close.c \ +btree/bt_conv.c \ +btree/bt_debug.c \ +btree/bt_delete.c \ +btree/bt_get.c \ +btree/bt_open.c \ +btree/bt_overflow.c \ +btree/bt_page.c \ +btree/bt_put.c \ +btree/bt_search.c \ +btree/bt_seq.c \ +btree/bt_split.c \ +btree/bt_utils.c \ +mpool/mpool.c \ + ) +CFLAGS_MOD += -DMICROPY_PY_BTREE=1 +endif + # py object files PY_O_BASENAME = \ mpstate.o \ @@ -161,6 +186,7 @@ PY_O_BASENAME = \ modstruct.o \ modsys.o \ moduerrno.o \ + modthread.o \ vm.o \ bc.o \ showbc.o \ @@ -176,6 +202,7 @@ PY_O_BASENAME = \ ../extmod/modubinascii.o \ ../extmod/virtpin.o \ ../extmod/machine_mem.o \ + ../extmod/machine_pinbase.o \ ../extmod/machine_pulse.o \ ../extmod/machine_i2c.o \ ../extmod/modussl.o \ @@ -72,6 +72,14 @@ #error unimplemented qstr length decoding #endif +#if MICROPY_PY_THREAD && !MICROPY_PY_THREAD_GIL +#define QSTR_ENTER() mp_thread_mutex_lock(&MP_STATE_VM(qstr_mutex), 1) +#define QSTR_EXIT() mp_thread_mutex_unlock(&MP_STATE_VM(qstr_mutex)) +#else +#define QSTR_ENTER() +#define QSTR_EXIT() +#endif + // this must match the equivalent function in makeqstrdata.py mp_uint_t qstr_compute_hash(const byte *data, size_t len) { // djb2 algorithm; see http://www.cse.yorku.ca/~oz/hash.html @@ -93,7 +101,7 @@ const qstr_pool_t mp_qstr_const_pool = { 10, // set so that the first dynamically allocated pool is twice this size; must be <= the len (just below) MP_QSTRnumber_of, // corresponds to number of strings in array just below { -#ifndef __QSTR_EXTRACT +#ifndef NO_QSTR #define QDEF(id, str) str, #include "genhdr/qstrdefs.generated.h" #undef QDEF @@ -111,6 +119,10 @@ extern const qstr_pool_t MICROPY_QSTR_EXTRA_POOL; void qstr_init(void) { MP_STATE_VM(last_pool) = (qstr_pool_t*)&CONST_POOL; // we won't modify the const_pool since it has no allocated room left MP_STATE_VM(qstr_last_chunk) = NULL; + + #if MICROPY_PY_THREAD + mp_thread_mutex_init(&MP_STATE_VM(qstr_mutex)); + #endif } STATIC const byte *find_qstr(qstr q) { @@ -125,12 +137,17 @@ STATIC const byte *find_qstr(qstr q) { return 0; } +// qstr_mutex must be taken while in this function STATIC qstr qstr_add(const byte *q_ptr) { DEBUG_printf("QSTR: add hash=%d len=%d data=%.*s\n", Q_GET_HASH(q_ptr), Q_GET_LENGTH(q_ptr), Q_GET_LENGTH(q_ptr), Q_GET_DATA(q_ptr)); // make sure we have room in the pool for a new qstr if (MP_STATE_VM(last_pool)->len >= MP_STATE_VM(last_pool)->alloc) { - qstr_pool_t *pool = m_new_obj_var(qstr_pool_t, const char*, MP_STATE_VM(last_pool)->alloc * 2); + qstr_pool_t *pool = m_new_obj_var_maybe(qstr_pool_t, const char*, MP_STATE_VM(last_pool)->alloc * 2); + if (pool == NULL) { + QSTR_EXIT(); + m_malloc_fail(MP_STATE_VM(last_pool)->alloc * 2); + } pool->prev = MP_STATE_VM(last_pool); pool->total_prev_len = MP_STATE_VM(last_pool)->total_prev_len + MP_STATE_VM(last_pool)->len; pool->alloc = MP_STATE_VM(last_pool)->alloc * 2; @@ -169,6 +186,7 @@ qstr qstr_from_str(const char *str) { qstr qstr_from_strn(const char *str, size_t len) { assert(len < (1 << (8 * MICROPY_QSTR_BYTES_IN_LEN))); + QSTR_ENTER(); qstr q = qstr_find_strn(str, len); if (q == 0) { // qstr does not exist in interned pool so need to add it @@ -198,7 +216,11 @@ qstr qstr_from_strn(const char *str, size_t len) { MP_STATE_VM(qstr_last_chunk) = m_new_maybe(byte, al); if (MP_STATE_VM(qstr_last_chunk) == NULL) { // failed to allocate a large chunk so try with exact size - MP_STATE_VM(qstr_last_chunk) = m_new(byte, n_bytes); + MP_STATE_VM(qstr_last_chunk) = m_new_maybe(byte, n_bytes); + if (MP_STATE_VM(qstr_last_chunk) == NULL) { + QSTR_EXIT(); + m_malloc_fail(n_bytes); + } al = n_bytes; } MP_STATE_VM(qstr_last_alloc) = al; @@ -217,6 +239,7 @@ qstr qstr_from_strn(const char *str, size_t len) { q_ptr[MICROPY_QSTR_BYTES_IN_HASH + MICROPY_QSTR_BYTES_IN_LEN + len] = '\0'; q = qstr_add(q_ptr); } + QSTR_EXIT(); return q; } @@ -228,6 +251,7 @@ byte *qstr_build_start(size_t len, byte **q_ptr) { } qstr qstr_build_end(byte *q_ptr) { + QSTR_ENTER(); qstr q = qstr_find_strn((const char*)Q_GET_DATA(q_ptr), Q_GET_LENGTH(q_ptr)); if (q == 0) { size_t len = Q_GET_LENGTH(q_ptr); @@ -238,6 +262,7 @@ qstr qstr_build_end(byte *q_ptr) { } else { m_del(byte, q_ptr, Q_GET_ALLOC(q_ptr)); } + QSTR_EXIT(); return q; } @@ -263,6 +288,7 @@ const byte *qstr_data(qstr q, size_t *len) { } void qstr_pool_info(size_t *n_pool, size_t *n_qstr, size_t *n_str_data_bytes, size_t *n_total_bytes) { + QSTR_ENTER(); *n_pool = 0; *n_qstr = 0; *n_str_data_bytes = 0; @@ -280,14 +306,17 @@ void qstr_pool_info(size_t *n_pool, size_t *n_qstr, size_t *n_str_data_bytes, si #endif } *n_total_bytes += *n_str_data_bytes; + QSTR_EXIT(); } #if MICROPY_PY_MICROPYTHON_MEM_INFO void qstr_dump_data(void) { + QSTR_ENTER(); for (qstr_pool_t *pool = MP_STATE_VM(last_pool); pool != NULL && pool != &CONST_POOL; pool = pool->prev) { for (const byte **q = pool->qstrs, **q_top = pool->qstrs + pool->len; q < q_top; q++) { mp_printf(&mp_plat_print, "Q(%s)\n", Q_GET_DATA(*q)); } } + QSTR_EXIT(); } #endif @@ -37,7 +37,7 @@ // first entry in enum will be MP_QSTR_NULL=0, which indicates invalid/no qstr enum { -#ifndef __QSTR_EXTRACT +#ifndef NO_QSTR #define QDEF(id, str) id, #include "genhdr/qstrdefs.generated.h" #undef QDEF diff --git a/py/runtime.c b/py/runtime.c index 7f28abbf4f..f88c92be63 100644 --- a/py/runtime.c +++ b/py/runtime.c @@ -91,6 +91,12 @@ void mp_init(void) { // start with no extensions to builtins MP_STATE_VM(mp_module_builtins_override_dict) = NULL; #endif + + #if MICROPY_PY_THREAD_GIL + mp_thread_mutex_init(&MP_STATE_VM(gil_mutex)); + #endif + + MP_THREAD_GIL_ENTER(); } void mp_deinit(void) { diff --git a/py/stackctrl.c b/py/stackctrl.c index 14d1fd0429..1843e7339e 100644 --- a/py/stackctrl.c +++ b/py/stackctrl.c @@ -32,23 +32,23 @@ void mp_stack_ctrl_init(void) { volatile int stack_dummy; - MP_STATE_VM(stack_top) = (char*)&stack_dummy; + MP_STATE_THREAD(stack_top) = (char*)&stack_dummy; } void mp_stack_set_top(void *top) { - MP_STATE_VM(stack_top) = top; + MP_STATE_THREAD(stack_top) = top; } mp_uint_t mp_stack_usage(void) { // Assumes descending stack volatile int stack_dummy; - return MP_STATE_VM(stack_top) - (char*)&stack_dummy; + return MP_STATE_THREAD(stack_top) - (char*)&stack_dummy; } #if MICROPY_STACK_CHECK void mp_stack_set_limit(mp_uint_t limit) { - MP_STATE_VM(stack_limit) = limit; + MP_STATE_THREAD(stack_limit) = limit; } void mp_exc_recursion_depth(void) { @@ -57,7 +57,7 @@ void mp_exc_recursion_depth(void) { } void mp_stack_check(void) { - if (mp_stack_usage() >= MP_STATE_VM(stack_limit)) { + if (mp_stack_usage() >= MP_STATE_THREAD(stack_limit)) { mp_exc_recursion_depth(); } } diff --git a/py/stream.c b/py/stream.c index ebdbe26b45..4fcc151dca 100644 --- a/py/stream.c +++ b/py/stream.c @@ -58,10 +58,11 @@ mp_uint_t mp_stream_rw(mp_obj_t stream, void *buf_, mp_uint_t size, int *errcode mp_obj_base_t* s = (mp_obj_base_t*)MP_OBJ_TO_PTR(stream); typedef mp_uint_t (*io_func_t)(mp_obj_t obj, void *buf, mp_uint_t size, int *errcode); io_func_t io_func; + const mp_stream_p_t *stream_p = s->type->protocol; if (flags & MP_STREAM_RW_WRITE) { - io_func = (io_func_t)s->type->stream_p->write; + io_func = (io_func_t)stream_p->write; } else { - io_func = s->type->stream_p->read; + io_func = stream_p->read; } *errcode = 0; @@ -94,7 +95,7 @@ mp_uint_t mp_stream_rw(mp_obj_t stream, void *buf_, mp_uint_t size, int *errcode const mp_stream_p_t *mp_get_stream_raise(mp_obj_t self_in, int flags) { mp_obj_base_t *o = (mp_obj_base_t*)MP_OBJ_TO_PTR(self_in); - const mp_stream_p_t *stream_p = o->type->stream_p; + const mp_stream_p_t *stream_p = o->type->protocol; if (stream_p == NULL || ((flags & MP_STREAM_OP_READ) && stream_p->read == NULL) || ((flags & MP_STREAM_OP_WRITE) && stream_p->write == NULL) @@ -1263,6 +1263,10 @@ pending_exception_check: RAISE(obj); } + // TODO make GIL release more efficient + MP_THREAD_GIL_EXIT(); + MP_THREAD_GIL_ENTER(); + } // for loop } else { |