py/scheduler: Fix race in checking scheduler pending state.

Because the atomic section starts after checking whether the scheduler
state is pending, it's possible it can become a different state by the time
the atomic section starts.

This is especially likely on ports where MICROPY_BEGIN_ATOMIC_SECTION is
implemented with a mutex (i.e. it might block), but the race exists
regardless, i.e. if a context switch occurs between those two lines.
pull/5890/head
Jim Mussared 2020-04-03 14:15:18 +11:00 zatwierdzone przez Damien George
rodzic c2cfbcc8d4
commit 243805d776
2 zmienionych plików z 29 dodań i 19 usunięć

Wyświetl plik

@ -60,22 +60,27 @@ static inline bool mp_sched_empty(void) {
void mp_handle_pending(bool raise_exc) { void mp_handle_pending(bool raise_exc) {
if (MP_STATE_VM(sched_state) == MP_SCHED_PENDING) { if (MP_STATE_VM(sched_state) == MP_SCHED_PENDING) {
mp_uint_t atomic_state = MICROPY_BEGIN_ATOMIC_SECTION(); mp_uint_t atomic_state = MICROPY_BEGIN_ATOMIC_SECTION();
mp_obj_t obj = MP_STATE_VM(mp_pending_exception); // Re-check state is still pending now that we're in the atomic section.
if (obj != MP_OBJ_NULL) { if (MP_STATE_VM(sched_state) == MP_SCHED_PENDING) {
MP_STATE_VM(mp_pending_exception) = MP_OBJ_NULL; mp_obj_t obj = MP_STATE_VM(mp_pending_exception);
if (!mp_sched_num_pending()) { if (obj != MP_OBJ_NULL) {
MP_STATE_VM(sched_state) = MP_SCHED_IDLE; MP_STATE_VM(mp_pending_exception) = MP_OBJ_NULL;
} if (!mp_sched_num_pending()) {
if (raise_exc) { MP_STATE_VM(sched_state) = MP_SCHED_IDLE;
MICROPY_END_ATOMIC_SECTION(atomic_state); }
nlr_raise(obj); if (raise_exc) {
MICROPY_END_ATOMIC_SECTION(atomic_state);
nlr_raise(obj);
}
} }
mp_handle_pending_tail(atomic_state);
} else {
MICROPY_END_ATOMIC_SECTION(atomic_state);
} }
mp_handle_pending_tail(atomic_state);
} }
} }
// This function should only be called be mp_sched_handle_pending, // This function should only be called by mp_handle_pending,
// or by the VM's inlined version of that function. // or by the VM's inlined version of that function.
void mp_handle_pending_tail(mp_uint_t atomic_state) { void mp_handle_pending_tail(mp_uint_t atomic_state) {
MP_STATE_VM(sched_state) = MP_SCHED_LOCKED; MP_STATE_VM(sched_state) = MP_SCHED_LOCKED;

21
py/vm.c
Wyświetl plik

@ -1366,18 +1366,23 @@ pending_exception_check:
#if MICROPY_ENABLE_SCHEDULER #if MICROPY_ENABLE_SCHEDULER
// This is an inlined variant of mp_handle_pending // This is an inlined variant of mp_handle_pending
if (MP_STATE_VM(sched_state) == MP_SCHED_PENDING) { if (MP_STATE_VM(sched_state) == MP_SCHED_PENDING) {
MARK_EXC_IP_SELECTIVE();
mp_uint_t atomic_state = MICROPY_BEGIN_ATOMIC_SECTION(); mp_uint_t atomic_state = MICROPY_BEGIN_ATOMIC_SECTION();
mp_obj_t obj = MP_STATE_VM(mp_pending_exception); // Re-check state is still pending now that we're in the atomic section.
if (obj != MP_OBJ_NULL) { if (MP_STATE_VM(sched_state) == MP_SCHED_PENDING) {
MP_STATE_VM(mp_pending_exception) = MP_OBJ_NULL; MARK_EXC_IP_SELECTIVE();
if (!mp_sched_num_pending()) { mp_obj_t obj = MP_STATE_VM(mp_pending_exception);
MP_STATE_VM(sched_state) = MP_SCHED_IDLE; if (obj != MP_OBJ_NULL) {
MP_STATE_VM(mp_pending_exception) = MP_OBJ_NULL;
if (!mp_sched_num_pending()) {
MP_STATE_VM(sched_state) = MP_SCHED_IDLE;
}
MICROPY_END_ATOMIC_SECTION(atomic_state);
RAISE(obj);
} }
mp_handle_pending_tail(atomic_state);
} else {
MICROPY_END_ATOMIC_SECTION(atomic_state); MICROPY_END_ATOMIC_SECTION(atomic_state);
RAISE(obj);
} }
mp_handle_pending_tail(atomic_state);
} }
#else #else
// This is an inlined variant of mp_handle_pending // This is an inlined variant of mp_handle_pending