diff --git a/include/nuttx/init.h b/include/nuttx/init.h index 1db060b53b372..63e5b266b41a1 100644 --- a/include/nuttx/init.h +++ b/include/nuttx/init.h @@ -89,7 +89,7 @@ extern "C" * hardware resources may not yet be available to the OS-internal logic. */ -EXTERN uint8_t g_nx_initstate; /* See enum nx_initstate_e */ +EXTERN volatile uint8_t g_nx_initstate; /* See enum nx_initstate_e */ /**************************************************************************** * Public Function Prototypes diff --git a/sched/init/nx_smpstart.c b/sched/init/nx_smpstart.c index 0deaf4e830a3e..5a38cb46c385b 100644 --- a/sched/init/nx_smpstart.c +++ b/sched/init/nx_smpstart.c @@ -35,6 +35,7 @@ #include #include #include +#include #include "group/group.h" #include "sched/sched.h" @@ -74,6 +75,12 @@ void nx_idle_trampoline(void) sched_note_start(tcb); #endif + /* wait until cpu0 in idle() */ + + while (!OSINIT_IDLELOOP()); + + sched_unlock(); + /* Enter the IDLE loop */ sinfo("CPU%d: Beginning Idle Loop\n", this_cpu()); diff --git a/sched/init/nx_start.c b/sched/init/nx_start.c index 027b805e2c31d..10c68fd5e290f 100644 --- a/sched/init/nx_start.c +++ b/sched/init/nx_start.c @@ -195,7 +195,7 @@ struct tasklist_s g_tasklisttable[NUM_TASK_STATES]; * hardware resources may not yet be available to the kernel logic. */ -uint8_t g_nx_initstate; /* See enum nx_initstate_e */ +volatile uint8_t g_nx_initstate; /* See enum nx_initstate_e */ /**************************************************************************** * Private Data @@ -361,6 +361,7 @@ static void idle_task_initialize(void) tcb->pid = i; tcb->task_state = TSTATE_TASK_RUNNING; + tcb->lockcount = 1; /* Set the entry point. This is only for debug purposes. NOTE: that * the start_t entry point is not saved. That is acceptable, however, @@ -628,13 +629,6 @@ void nx_start(void) task_initialize(); - /* Disables context switching because we need take the memory manager - * semaphore on this CPU so that it will not be available on the other - * CPUs until we have finished initialization. - */ - - sched_lock(); - /* Initialize the instrument function */ instrument_initialize(); diff --git a/sched/sched/sched.h b/sched/sched/sched.h index 26600cae45f3a..47ea3b448bd55 100644 --- a/sched/sched/sched.h +++ b/sched/sched/sched.h @@ -297,9 +297,6 @@ extern volatile clock_t g_cpuload_total; */ #ifdef CONFIG_SMP -/* Used to keep track of which CPU(s) hold the IRQ lock. */ - -extern volatile cpu_set_t g_cpu_lockset; /* This is the spinlock that enforces critical sections when interrupts are * disabled. @@ -406,16 +403,13 @@ static inline_function FAR struct tcb_s *this_task(void) int nxsched_select_cpu(cpu_set_t affinity); int nxsched_pause_cpu(FAR struct tcb_s *tcb); void nxsched_process_delivered(int cpu); - -# define nxsched_islocked_global() (g_cpu_lockset != 0) -# define nxsched_islocked_tcb(tcb) nxsched_islocked_global() - #else # define nxsched_select_cpu(a) (0) # define nxsched_pause_cpu(t) (-38) /* -ENOSYS */ -# define nxsched_islocked_tcb(tcb) ((tcb)->lockcount > 0) #endif +#define nxsched_islocked_tcb(tcb) ((tcb)->lockcount > 0) + /* CPU load measurement support */ #if defined(CONFIG_SCHED_CPULOAD_SYSCLK) || \ diff --git a/sched/sched/sched_addreadytorun.c b/sched/sched/sched_addreadytorun.c index ff481ee669ab5..79c66a991299a 100644 --- a/sched/sched/sched_addreadytorun.c +++ b/sched/sched/sched_addreadytorun.c @@ -194,7 +194,7 @@ bool nxsched_add_readytorun(FAR struct tcb_s *btcb) * situation. */ - if (nxsched_islocked_global()) + if (nxsched_islocked_tcb(this_task())) { /* Add the new ready-to-run task to the g_pendingtasks task list for * now. @@ -275,14 +275,6 @@ bool nxsched_add_readytorun(FAR struct tcb_s *btcb) btcb->task_state = TSTATE_TASK_RUNNING; doswitch = true; - - /* Resume scheduling lock */ - - DEBUGASSERT(g_cpu_lockset == 0); - if (btcb->lockcount > 0) - { - g_cpu_lockset |= (1 << cpu); - } } return doswitch; diff --git a/sched/sched/sched_lock.c b/sched/sched/sched_lock.c index 0833b5746bad4..36b9ab7c87498 100644 --- a/sched/sched/sched_lock.c +++ b/sched/sched/sched_lock.c @@ -42,30 +42,6 @@ * Public Data ****************************************************************************/ -/* Pre-emption is disabled via the interface sched_lock(). sched_lock() - * works by preventing context switches from the currently executing tasks. - * This prevents other tasks from running (without disabling interrupts) and - * gives the currently executing task exclusive access to the (single) CPU - * resources. Thus, sched_lock() and its companion, sched_unlock(), are - * used to implement some critical sections. - * - * In the single CPU case, pre-emption is disabled using a simple lockcount - * in the TCB. When the scheduling is locked, the lockcount is incremented; - * when the scheduler is unlocked, the lockcount is decremented. If the - * lockcount for the task at the head of the g_readytorun list has a - * lockcount > 0, then pre-emption is disabled. - * - * No special protection is required since only the executing task can - * modify its lockcount. - */ - -#ifdef CONFIG_SMP -/* Used to keep track of which CPU(s) hold the IRQ lock. */ - -volatile cpu_set_t g_cpu_lockset; - -#endif /* CONFIG_SMP */ - /**************************************************************************** * Public Functions ****************************************************************************/ @@ -93,7 +69,6 @@ volatile cpu_set_t g_cpu_lockset; int sched_lock(void) { FAR struct tcb_s *rtcb; - int cpu; /* If the CPU supports suppression of interprocessor interrupts, then * simple disabling interrupts will provide sufficient protection for @@ -118,36 +93,9 @@ int sched_lock(void) DEBUGASSERT(rtcb->lockcount < MAX_LOCK_COUNT); flags = enter_critical_section(); - cpu = this_cpu(); - - /* We must hold the lock on this CPU before we increment the lockcount - * for the first time. Holding the lock is sufficient to lockout - * context switching. - */ - - if (rtcb->lockcount == 0) - { - /* We don't have the scheduler locked. But logic running on a - * different CPU may have the scheduler locked. It is not - * possible for some other task on this CPU to have the scheduler - * locked (or we would not be executing!). - */ - - DEBUGASSERT((g_cpu_lockset & (1 << cpu)) == 0); - g_cpu_lockset |= (1 << cpu); - } - else - { - /* If this thread already has the scheduler locked, then - * g_cpu_lockset should indicate that the scheduler is locked - * and g_cpu_lockset should include the bit setting for this CPU. - */ - - DEBUGASSERT((g_cpu_lockset & (1 << cpu)) != 0); - } /* A counter is used to support locking. This allows nested lock - * operations on this thread (on any CPU) + * operations on this thread */ rtcb->lockcount++; diff --git a/sched/sched/sched_mergepending.c b/sched/sched/sched_mergepending.c index c529bb3063799..2aa515458c979 100644 --- a/sched/sched/sched_mergepending.c +++ b/sched/sched/sched_mergepending.c @@ -199,7 +199,7 @@ bool nxsched_merge_pending(void) * some CPU other than this one is in a critical section. */ - if (!nxsched_islocked_global()) + if (!nxsched_islocked_tcb(this_task())) { /* Find the CPU that is executing the lowest priority task */ @@ -237,7 +237,7 @@ bool nxsched_merge_pending(void) * Check if that happened. */ - if (nxsched_islocked_global()) + if (nxsched_islocked_tcb(this_task())) { /* Yes.. then we may have incorrectly placed some TCBs in the * g_readytorun list (unlikely, but possible). We will have to diff --git a/sched/sched/sched_process_delivered.c b/sched/sched/sched_process_delivered.c index 574e89e0ed776..fed04e51eadbd 100644 --- a/sched/sched/sched_process_delivered.c +++ b/sched/sched/sched_process_delivered.c @@ -84,9 +84,10 @@ void nxsched_process_delivered(int cpu) g_cpu_irqset |= (1 << cpu); } + tcb = current_task(cpu); + if (g_delivertasks[cpu] == NULL) { - tcb = current_task(cpu); if (tcb->irqcount <= 0) { cpu_irqlock_clear(); @@ -95,13 +96,12 @@ void nxsched_process_delivered(int cpu) return; } - if (nxsched_islocked_global()) + if (nxsched_islocked_tcb(tcb)) { btcb = g_delivertasks[cpu]; g_delivertasks[cpu] = NULL; nxsched_add_prioritized(btcb, &g_pendingtasks); btcb->task_state = TSTATE_TASK_PENDING; - tcb = current_task(cpu); if (tcb->irqcount <= 0) { cpu_irqlock_clear(); @@ -111,9 +111,8 @@ void nxsched_process_delivered(int cpu) } btcb = g_delivertasks[cpu]; - tasklist = &g_assignedtasks[cpu]; - for (next = (FAR struct tcb_s *)tasklist->head; + for (next = tcb; (next && btcb->sched_priority <= next->sched_priority); next = next->flink); @@ -122,6 +121,7 @@ void nxsched_process_delivered(int cpu) { /* Special case: Insert at the head of the list */ + tasklist = &g_assignedtasks[cpu]; dq_addfirst_nonempty((FAR dq_entry_t *)btcb, tasklist); btcb->cpu = cpu; btcb->task_state = TSTATE_TASK_RUNNING; @@ -129,11 +129,6 @@ void nxsched_process_delivered(int cpu) DEBUGASSERT(btcb->flink != NULL); DEBUGASSERT(next == btcb->flink); next->task_state = TSTATE_TASK_ASSIGNED; - - if (btcb->lockcount > 0) - { - g_cpu_lockset |= (1 << cpu); - } } else { diff --git a/sched/sched/sched_removereadytorun.c b/sched/sched/sched_removereadytorun.c index e9b26a68d897f..5ff83b31cee26 100644 --- a/sched/sched/sched_removereadytorun.c +++ b/sched/sched/sched_removereadytorun.c @@ -262,23 +262,6 @@ void nxsched_remove_running(FAR struct tcb_s *tcb) nxttcb = rtrtcb; } - /* Will pre-emption be disabled after the switch? If the lockcount is - * greater than zero, then this task/this CPU holds the scheduler lock. - */ - - if (nxttcb->lockcount > 0) - { - /* Yes... make sure that scheduling logic knows about this */ - - g_cpu_lockset |= (1 << cpu); - } - else - { - /* No.. we may need to perform release our hold on the lock. */ - - g_cpu_lockset &= ~(1 << cpu); - } - /* NOTE: If the task runs on another CPU(cpu), adjusting global IRQ * controls will be done in the pause handler on the new CPU(cpu). * If the task is scheduled on this CPU(me), do nothing because diff --git a/sched/sched/sched_setpriority.c b/sched/sched/sched_setpriority.c index 372ce47dc132f..7a3d5bfd33349 100644 --- a/sched/sched/sched_setpriority.c +++ b/sched/sched/sched_setpriority.c @@ -70,7 +70,7 @@ static FAR struct tcb_s *nxsched_nexttcb(FAR struct tcb_s *tcb) * then use the 'nxttcb' which will probably be the IDLE thread. */ - if (!nxsched_islocked_global()) + if (!nxsched_islocked_tcb(this_task())) { /* Search for the highest priority task that can run on tcb->cpu. */ diff --git a/sched/sched/sched_unlock.c b/sched/sched/sched_unlock.c index 0c42dd6be318a..5d35eb858782c 100644 --- a/sched/sched/sched_unlock.c +++ b/sched/sched/sched_unlock.c @@ -77,12 +77,11 @@ int sched_unlock(void) irqstate_t flags = enter_critical_section(); int cpu = this_cpu(); + DEBUGASSERT(rtcb->lockcount > 0); + /* Decrement the preemption lock counter */ - if (rtcb->lockcount > 0) - { - rtcb->lockcount--; - } + rtcb->lockcount--; /* Check if the lock counter has decremented to zero. If so, * then pre-emption has been re-enabled. @@ -103,14 +102,6 @@ int sched_unlock(void) rtcb->lockcount = 0; - /* The lockcount has decremented to zero and we need to perform - * release our hold on the lock. - */ - - DEBUGASSERT((g_cpu_lockset & (1 << cpu)) != 0); - - g_cpu_lockset &= ~(1 << cpu); - /* Release any ready-to-run tasks that have collected in * g_pendingtasks. * @@ -137,7 +128,7 @@ int sched_unlock(void) * BEFORE it clears IRQ lock. */ - if (!nxsched_islocked_global() && + if (!nxsched_islocked_tcb(rtcb) && list_pendingtasks()->head != NULL) { if (nxsched_merge_pending()) @@ -211,6 +202,7 @@ int sched_unlock(void) #endif } + UNUSED(cpu); leave_critical_section(flags); } @@ -234,12 +226,11 @@ int sched_unlock(void) irqstate_t flags = enter_critical_section(); + DEBUGASSERT(rtcb->lockcount > 0); + /* Decrement the preemption lock counter */ - if (rtcb->lockcount > 0) - { - rtcb->lockcount--; - } + rtcb->lockcount--; /* Check if the lock counter has decremented to zero. If so, * then pre-emption has been re-enabled. diff --git a/sched/task/task_exit.c b/sched/task/task_exit.c index 3dc03fa41e1d8..a89fb821cd3ef 100644 --- a/sched/task/task_exit.c +++ b/sched/task/task_exit.c @@ -137,12 +137,6 @@ int nxtask_exit(void) rtcb->lockcount++; -#ifdef CONFIG_SMP - /* Make sure that the system knows about the locked state */ - - g_cpu_lockset |= (1 << cpu); -#endif - rtcb->task_state = TSTATE_TASK_READYTORUN; /* Move the TCB to the specified blocked task list and delete it. Calling @@ -177,14 +171,5 @@ int nxtask_exit(void) rtcb->lockcount--; -#ifdef CONFIG_SMP - if (rtcb->lockcount == 0) - { - /* Make sure that the system knows about the unlocked state */ - - g_cpu_lockset &= ~(1 << cpu); - } -#endif - return ret; }