summaryrefslogtreecommitdiffstatshomepage
path: root/py
diff options
context:
space:
mode:
authorAngus Gratton <angus@redyak.com.au>2025-05-23 14:39:37 +1000
committerDamien George <damien@micropython.org>2025-06-04 11:31:12 +1000
commit7f274c7550654160f73b0de8fa73338dc58109a6 (patch)
tree219a9755e531528cd7c1978cb0e97ad76d013d0e /py
parentb15348415e9d5ad2a978ca38a8da356faee88e91 (diff)
downloadmicropython-7f274c7550654160f73b0de8fa73338dc58109a6.tar.gz
micropython-7f274c7550654160f73b0de8fa73338dc58109a6.zip
py/scheduler: Only run scheduler callbacks queued before run started.
Without this change, a scheduler callback which itself queues a new callback will have that callback executed as part of the same scheduler run. Where a callback may re-queue itself, this can lead to an infinite loop. With this change, each call to mp_handle_pending() will only service the callbacks which were queued when the scheduler pass started - any callbacks added during the run are serviced on the next mp_handle_pending(). This does mean some interrupts may have higher latency (as callback is deferred until next scheduler run), but the worst-case latency should stay very similar. This work was funded through GitHub Sponsors. Signed-off-by: Angus Gratton <angus@redyak.com.au>
Diffstat (limited to 'py')
-rw-r--r--py/scheduler.c26
1 files changed, 15 insertions, 11 deletions
diff --git a/py/scheduler.c b/py/scheduler.c
index 2170b9577e..d4cdb59efb 100644
--- a/py/scheduler.c
+++ b/py/scheduler.c
@@ -88,17 +88,21 @@ static inline void mp_sched_run_pending(void) {
#if MICROPY_SCHEDULER_STATIC_NODES
// Run all pending C callbacks.
- while (MP_STATE_VM(sched_head) != NULL) {
- mp_sched_node_t *node = MP_STATE_VM(sched_head);
- MP_STATE_VM(sched_head) = node->next;
- if (MP_STATE_VM(sched_head) == NULL) {
- MP_STATE_VM(sched_tail) = NULL;
- }
- mp_sched_callback_t callback = node->callback;
- node->callback = NULL;
- MICROPY_END_ATOMIC_SECTION(atomic_state);
- callback(node);
- atomic_state = MICROPY_BEGIN_ATOMIC_SECTION();
+ mp_sched_node_t *original_tail = MP_STATE_VM(sched_tail);
+ if (original_tail != NULL) {
+ mp_sched_node_t *node;
+ do {
+ node = MP_STATE_VM(sched_head);
+ MP_STATE_VM(sched_head) = node->next;
+ if (MP_STATE_VM(sched_head) == NULL) {
+ MP_STATE_VM(sched_tail) = NULL;
+ }
+ mp_sched_callback_t callback = node->callback;
+ node->callback = NULL;
+ MICROPY_END_ATOMIC_SECTION(atomic_state);
+ callback(node);
+ atomic_state = MICROPY_BEGIN_ATOMIC_SECTION();
+ } while (node != original_tail); // Don't execute any callbacks scheduled during this run
}
#endif