@@ -414,6 +414,8 @@ void sched_wakeup_task(tcb_t *task)
414414 */
415415uint16_t sched_select_next_task (void )
416416{
417+ uint32_t flag = 0 ;
418+
417419 if (unlikely (!get_task_current () || !get_task_current ()-> data ))
418420 panic (ERR_NO_TASKS );
419421
@@ -482,6 +484,8 @@ void dispatcher(void)
482484/* Top-level context-switch for preemptive scheduling. */
483485void dispatch (void )
484486{
487+ uint32_t flag = 0 ;
488+
485489 if (unlikely (!kcb || !get_task_current () || !get_task_current ()-> data ))
486490 panic (ERR_NO_TASKS );
487491
@@ -515,6 +519,8 @@ void dispatch(void)
515519/* Cooperative context switch */
516520void yield (void )
517521{
522+ uint32_t flag = 0 ;
523+
518524 if (unlikely (!kcb || !get_task_current () || !get_task_current ()-> data ))
519525 return ;
520526
@@ -530,6 +536,8 @@ void yield(void)
530536#endif
531537
532538 /* In cooperative mode, delays are only processed on an explicit yield. */
539+ spin_lock_irqsave (& kcb -> kcb_lock , & flag );
540+
533541 if (!kcb -> preemptive )
534542 list_foreach (kcb -> tasks , delay_update , NULL );
535543
@@ -824,10 +832,13 @@ uint16_t mo_task_id(void)
824832
825833int32_t mo_task_idref (void * task_entry )
826834{
827- if (!task_entry || !kcb -> tasks )
835+ spin_lock_irqsave (& kcb -> kcb_lock , & task_flags );
836+
837+ if (!task_entry || !kcb -> tasks ) {
838+ spin_unlock_irqrestore (& kcb -> kcb_lock , task_flags );
828839 return ERR_TASK_NOT_FOUND ;
840+ }
829841
830- spin_lock_irqsave (& kcb -> kcb_lock , & task_flags );
831842 list_node_t * node = list_foreach (kcb -> tasks , refcmp , task_entry );
832843 spin_unlock_irqrestore (& kcb -> kcb_lock , task_flags );
833844
@@ -838,23 +849,46 @@ void mo_task_wfi(void)
838849{
839850 /* Process deferred timer work before waiting */
840851 process_deferred_timer_work ();
852+ uint32_t flag = 0 ;
841853
842854 if (!kcb -> preemptive )
843855 return ;
844856
857+ spin_lock_irqsave (& kcb -> kcb_lock , & flag );
845858 volatile uint32_t current_ticks = kcb -> ticks ;
846- while (current_ticks == kcb -> ticks )
859+ spin_unlock_irqrestore (& kcb -> kcb_lock , flag );
860+
861+ while (1 ) {
862+ spin_lock_irqsave (& kcb -> kcb_lock , & flag );
863+ if (current_ticks != kcb -> ticks ) {
864+ spin_unlock_irqrestore (& kcb -> kcb_lock , flag );
865+ break ;
866+ }
867+ spin_unlock_irqrestore (& kcb -> kcb_lock , flag );
847868 hal_cpu_idle ();
869+ }
848870}
849871
850872uint16_t mo_task_count (void )
851873{
852- return kcb -> task_count ;
874+ uint32_t task_count ;
875+ uint32_t flag ;
876+
877+ spin_lock_irqsave (& kcb -> kcb_lock , & flag );
878+ task_count = kcb -> task_count ;
879+ spin_unlock_irqrestore (& kcb -> kcb_lock , flag );
880+ return task_count ;
853881}
854882
855883uint32_t mo_ticks (void )
856884{
857- return kcb -> ticks ;
885+ uint32_t ticks ;
886+ uint32_t flag ;
887+
888+ spin_lock_irqsave (& kcb -> kcb_lock , & flag );
889+ ticks = kcb -> ticks ;
890+ spin_unlock_irqrestore (& kcb -> kcb_lock , flag );
891+ return ticks ;
858892}
859893
860894uint64_t mo_uptime (void )
0 commit comments