summaryrefslogtreecommitdiffstatshomepage
path: root/py/scheduler.c
diff options
context:
space:
mode:
authorDamien George <damien@micropython.org>2022-03-23 17:13:03 +1100
committerDamien George <damien@micropython.org>2022-04-14 12:31:53 +1000
commit75506e496f7545c43c4d76bac131c4e01c258fa5 (patch)
treecbd1c429156a780a559ece6f98e260f52a877f0b /py/scheduler.c
parentd242a9b7f7b6d4e744889d5af6c17dfb54aed8c3 (diff)
downloadmicropython-75506e496f7545c43c4d76bac131c4e01c258fa5.tar.gz
micropython-75506e496f7545c43c4d76bac131c4e01c258fa5.zip
py/scheduler: Add support for scheduling static C-based callbacks.
If MICROPY_SCHEDULER_STATIC_NODES is enabled then C code can declare a static mp_sched_node_t and schedule a callback using mp_sched_schedule_node(). In contrast to using mp_sched_schedule(), the node version will have at most one pending callback outstanding, and will always be able to schedule if there is nothing already scheduled on this node. This guarantees that the the callback will be called exactly once after it is scheduled. Signed-off-by: Damien George <damien@micropython.org>
Diffstat (limited to 'py/scheduler.c')
-rw-r--r--py/scheduler.c52
1 files changed, 51 insertions, 1 deletions
diff --git a/py/scheduler.c b/py/scheduler.c
index bd0bbf2078..3966da2975 100644
--- a/py/scheduler.c
+++ b/py/scheduler.c
@@ -90,6 +90,24 @@ void mp_handle_pending(bool raise_exc) {
// or by the VM's inlined version of that function.
void mp_handle_pending_tail(mp_uint_t atomic_state) {
MP_STATE_VM(sched_state) = MP_SCHED_LOCKED;
+
+ #if MICROPY_SCHEDULER_STATIC_NODES
+ // Run all pending C callbacks.
+ while (MP_STATE_VM(sched_head) != NULL) {
+ mp_sched_node_t *node = MP_STATE_VM(sched_head);
+ MP_STATE_VM(sched_head) = node->next;
+ if (MP_STATE_VM(sched_head) == NULL) {
+ MP_STATE_VM(sched_tail) = NULL;
+ }
+ mp_sched_callback_t callback = node->callback;
+ node->callback = NULL;
+ MICROPY_END_ATOMIC_SECTION(atomic_state);
+ callback(node);
+ atomic_state = MICROPY_BEGIN_ATOMIC_SECTION();
+ }
+ #endif
+
+ // Run at most one pending Python callback.
if (!mp_sched_empty()) {
mp_sched_item_t item = MP_STATE_VM(sched_queue)[MP_STATE_VM(sched_idx)];
MP_STATE_VM(sched_idx) = IDX_MASK(MP_STATE_VM(sched_idx) + 1);
@@ -99,6 +117,7 @@ void mp_handle_pending_tail(mp_uint_t atomic_state) {
} else {
MICROPY_END_ATOMIC_SECTION(atomic_state);
}
+
mp_sched_unlock();
}
@@ -117,7 +136,11 @@ void mp_sched_unlock(void) {
assert(MP_STATE_VM(sched_state) < 0);
if (++MP_STATE_VM(sched_state) == 0) {
// vm became unlocked
- if (MP_STATE_THREAD(mp_pending_exception) != MP_OBJ_NULL || mp_sched_num_pending()) {
+ if (MP_STATE_THREAD(mp_pending_exception) != MP_OBJ_NULL
+ #if MICROPY_SCHEDULER_STATIC_NODES
+ || MP_STATE_VM(sched_head) != NULL
+ #endif
+ || mp_sched_num_pending()) {
MP_STATE_VM(sched_state) = MP_SCHED_PENDING;
} else {
MP_STATE_VM(sched_state) = MP_SCHED_IDLE;
@@ -146,6 +169,33 @@ bool MICROPY_WRAP_MP_SCHED_SCHEDULE(mp_sched_schedule)(mp_obj_t function, mp_obj
return ret;
}
+#if MICROPY_SCHEDULER_STATIC_NODES
+bool mp_sched_schedule_node(mp_sched_node_t *node, mp_sched_callback_t callback) {
+ mp_uint_t atomic_state = MICROPY_BEGIN_ATOMIC_SECTION();
+ bool ret;
+ if (node->callback == NULL) {
+ if (MP_STATE_VM(sched_state) == MP_SCHED_IDLE) {
+ MP_STATE_VM(sched_state) = MP_SCHED_PENDING;
+ }
+ node->callback = callback;
+ node->next = NULL;
+ if (MP_STATE_VM(sched_tail) == NULL) {
+ MP_STATE_VM(sched_head) = node;
+ } else {
+ MP_STATE_VM(sched_tail)->next = node;
+ }
+ MP_STATE_VM(sched_tail) = node;
+ MICROPY_SCHED_HOOK_SCHEDULED;
+ ret = true;
+ } else {
+ // already scheduled
+ ret = false;
+ }
+ MICROPY_END_ATOMIC_SECTION(atomic_state);
+ return ret;
+}
+#endif
+
#else // MICROPY_ENABLE_SCHEDULER
// A variant of this is inlined in the VM at the pending exception check