summaryrefslogtreecommitdiffstatshomepage
path: root/py
diff options
context:
space:
mode:
Diffstat (limited to 'py')
-rw-r--r--py/gc.c45
-rw-r--r--py/modmicropython.c4
-rw-r--r--py/modthread.c3
-rw-r--r--py/mpstate.h4
4 files changed, 30 insertions, 26 deletions
diff --git a/py/gc.c b/py/gc.c
index 53a0d9da4a..88adf2045e 100644
--- a/py/gc.c
+++ b/py/gc.c
@@ -150,7 +150,7 @@ void gc_init(void *start, void *end) {
MP_STATE_MEM(gc_last_free_atb_index) = 0;
// unlock the GC
- MP_STATE_MEM(gc_lock_depth) = 0;
+ MP_STATE_THREAD(gc_lock_depth) = 0;
// allow auto collection
MP_STATE_MEM(gc_auto_collect_enabled) = 1;
@@ -174,19 +174,20 @@ void gc_init(void *start, void *end) {
}
void gc_lock(void) {
- GC_ENTER();
- MP_STATE_MEM(gc_lock_depth)++;
- GC_EXIT();
+ // This does not need to be atomic or have the GC mutex because:
+ // - each thread has its own gc_lock_depth so there are no races between threads;
+ // - a hard interrupt will only change gc_lock_depth during its execution, and
+ // upon return will restore the value of gc_lock_depth.
+ MP_STATE_THREAD(gc_lock_depth)++;
}
void gc_unlock(void) {
- GC_ENTER();
- MP_STATE_MEM(gc_lock_depth)--;
- GC_EXIT();
+ // This does not need to be atomic, See comment above in gc_lock.
+ MP_STATE_THREAD(gc_lock_depth)--;
}
bool gc_is_locked(void) {
- return MP_STATE_MEM(gc_lock_depth) != 0;
+ return MP_STATE_THREAD(gc_lock_depth) != 0;
}
// ptr should be of type void*
@@ -320,7 +321,7 @@ STATIC void gc_sweep(void) {
void gc_collect_start(void) {
GC_ENTER();
- MP_STATE_MEM(gc_lock_depth)++;
+ MP_STATE_THREAD(gc_lock_depth)++;
#if MICROPY_GC_ALLOC_THRESHOLD
MP_STATE_MEM(gc_alloc_amount) = 0;
#endif
@@ -360,13 +361,13 @@ void gc_collect_end(void) {
gc_deal_with_stack_overflow();
gc_sweep();
MP_STATE_MEM(gc_last_free_atb_index) = 0;
- MP_STATE_MEM(gc_lock_depth)--;
+ MP_STATE_THREAD(gc_lock_depth)--;
GC_EXIT();
}
void gc_sweep_all(void) {
GC_ENTER();
- MP_STATE_MEM(gc_lock_depth)++;
+ MP_STATE_THREAD(gc_lock_depth)++;
MP_STATE_MEM(gc_stack_overflow) = 0;
gc_collect_end();
}
@@ -445,14 +446,13 @@ void *gc_alloc(size_t n_bytes, unsigned int alloc_flags) {
return NULL;
}
- GC_ENTER();
-
// check if GC is locked
- if (MP_STATE_MEM(gc_lock_depth) > 0) {
- GC_EXIT();
+ if (MP_STATE_THREAD(gc_lock_depth) > 0) {
return NULL;
}
+ GC_ENTER();
+
size_t i;
size_t end_block;
size_t start_block;
@@ -573,13 +573,13 @@ void *gc_alloc_with_finaliser(mp_uint_t n_bytes) {
// force the freeing of a piece of memory
// TODO: freeing here does not call finaliser
void gc_free(void *ptr) {
- GC_ENTER();
- if (MP_STATE_MEM(gc_lock_depth) > 0) {
+ if (MP_STATE_THREAD(gc_lock_depth) > 0) {
// TODO how to deal with this error?
- GC_EXIT();
return;
}
+ GC_ENTER();
+
DEBUG_printf("gc_free(%p)\n", ptr);
if (ptr == NULL) {
@@ -674,15 +674,14 @@ void *gc_realloc(void *ptr_in, size_t n_bytes, bool allow_move) {
return NULL;
}
+ if (MP_STATE_THREAD(gc_lock_depth) > 0) {
+ return NULL;
+ }
+
void *ptr = ptr_in;
GC_ENTER();
- if (MP_STATE_MEM(gc_lock_depth) > 0) {
- GC_EXIT();
- return NULL;
- }
-
// get the GC block number corresponding to this pointer
assert(VERIFY_PTR(ptr));
size_t block = BLOCK_FROM_PTR(ptr);
diff --git a/py/modmicropython.c b/py/modmicropython.c
index f7eadf79bd..180f7f186c 100644
--- a/py/modmicropython.c
+++ b/py/modmicropython.c
@@ -130,13 +130,13 @@ STATIC MP_DEFINE_CONST_FUN_OBJ_0(mp_micropython_heap_lock_obj, mp_micropython_he
STATIC mp_obj_t mp_micropython_heap_unlock(void) {
gc_unlock();
- return MP_OBJ_NEW_SMALL_INT(MP_STATE_MEM(gc_lock_depth));
+ return MP_OBJ_NEW_SMALL_INT(MP_STATE_THREAD(gc_lock_depth));
}
STATIC MP_DEFINE_CONST_FUN_OBJ_0(mp_micropython_heap_unlock_obj, mp_micropython_heap_unlock);
#if MICROPY_PY_MICROPYTHON_HEAP_LOCKED
STATIC mp_obj_t mp_micropython_heap_locked(void) {
- return MP_OBJ_NEW_SMALL_INT(MP_STATE_MEM(gc_lock_depth));
+ return MP_OBJ_NEW_SMALL_INT(MP_STATE_THREAD(gc_lock_depth));
}
STATIC MP_DEFINE_CONST_FUN_OBJ_0(mp_micropython_heap_locked_obj, mp_micropython_heap_locked);
#endif
diff --git a/py/modthread.c b/py/modthread.c
index 1306dc642b..64fbb3f198 100644
--- a/py/modthread.c
+++ b/py/modthread.c
@@ -171,6 +171,9 @@ STATIC void *thread_entry(void *args_in) {
mp_pystack_init(mini_pystack, &mini_pystack[128]);
#endif
+ // The GC starts off unlocked on this thread.
+ ts.gc_lock_depth = 0;
+
// set locals and globals from the calling context
mp_locals_set(args->dict_locals);
mp_globals_set(args->dict_globals);
diff --git a/py/mpstate.h b/py/mpstate.h
index 2519c77e2d..a0e3d4f143 100644
--- a/py/mpstate.h
+++ b/py/mpstate.h
@@ -80,7 +80,6 @@ typedef struct _mp_state_mem_t {
int gc_stack_overflow;
MICROPY_GC_STACK_ENTRY_TYPE gc_stack[MICROPY_ALLOC_GC_STACK_SIZE];
- uint16_t gc_lock_depth;
// This variable controls auto garbage collection. If set to 0 then the
// GC won't automatically run when gc_alloc can't find enough blocks. But
@@ -253,6 +252,9 @@ typedef struct _mp_state_thread_t {
uint8_t *pystack_cur;
#endif
+ // Locking of the GC is done per thread.
+ uint16_t gc_lock_depth;
+
////////////////////////////////////////////////////////////
// START ROOT POINTER SECTION
// Everything that needs GC scanning must start here, and