Skip to content

Commit

Permalink
sched:remove g_cpu_schedlock g_cpu_irqsetlock g_cpu_locksetlock
Browse files Browse the repository at this point in the history
we can use g_cpu_lockset to determine whether we are currently in the scheduling lock,
and all accesses and modifications to g_cpu_lockset, g_cpu_irqlock, g_cpu_irqset
are in the critical section, so we can directly operate on it.

test:
We can use qemu for testing.

compiling
make distclean -j20; ./tools/configure.sh -l qemu-armv8a:nsh_smp ;make -j20
running
qemu-system-aarch64 -cpu cortex-a53 -smp 4 -nographic -machine virt,virtualization=on,gic-version=3 -net none -chardev stdio,id=con,mux=on -serial chardev:con -mon chardev=con,mode=readline -kernel ./nuttx

Signed-off-by: hujun5 <hujun5@xiaomi.com>
  • Loading branch information
hujun260 committed May 28, 2024
1 parent 7044e38 commit 8b2ad8d
Show file tree
Hide file tree
Showing 11 changed files with 54 additions and 316 deletions.
11 changes: 0 additions & 11 deletions arch/arm/src/armv8-m/arm_schedulesigaction.c
Original file line number Diff line number Diff line change
Expand Up @@ -372,17 +372,6 @@ void up_schedule_sigaction(struct tcb_s *tcb, sig_deliver_t sigdeliver)
#endif
}

/* In an SMP configuration, the interrupt disable logic also
* involves spinlocks that are configured per the TCB irqcount
* field. This is logically equivalent to
* enter_critical_section(). The matching call to
* leave_critical_section() will be performed in
* arm_sigdeliver().
*/

spin_setbit(&g_cpu_irqset, cpu, &g_cpu_irqsetlock,
&g_cpu_irqlock);

/* RESUME the other CPU if it was PAUSED */

if (cpu != me)
Expand Down
12 changes: 12 additions & 0 deletions include/nuttx/irq.h
Original file line number Diff line number Diff line change
Expand Up @@ -70,6 +70,18 @@

#endif /* __ASSEMBLY__ */

#ifdef CONFIG_SMP
# define cpu_irqlock_clear() \
do \
{ \
g_cpu_irqset = 0; \
SP_DMB(); \
g_cpu_irqlock = SP_UNLOCKED; \
SP_DSB(); \
} \
while (0)
#endif

/****************************************************************************
* Public Types
****************************************************************************/
Expand Down
46 changes: 0 additions & 46 deletions include/nuttx/spinlock.h
Original file line number Diff line number Diff line change
Expand Up @@ -344,52 +344,6 @@ void spin_unlock_wo_note(FAR volatile spinlock_t *lock);
# define spin_is_locked(l) (*(l) == SP_LOCKED)
#endif

/****************************************************************************
* Name: spin_setbit
*
* Description:
* Makes setting a CPU bit in a bitset an atomic action
*
* Input Parameters:
* set - A reference to the bitset to set the CPU bit in
* cpu - The bit number to be set
* setlock - A reference to the lock protecting the set
* orlock - Will be set to SP_LOCKED while holding setlock
*
* Returned Value:
* None
*
****************************************************************************/

#ifdef CONFIG_SPINLOCK
void spin_setbit(FAR volatile cpu_set_t *set, unsigned int cpu,
FAR volatile spinlock_t *setlock,
FAR volatile spinlock_t *orlock);
#endif

/****************************************************************************
* Name: spin_clrbit
*
* Description:
* Makes clearing a CPU bit in a bitset an atomic action
*
* Input Parameters:
* set - A reference to the bitset to set the CPU bit in
* cpu - The bit number to be set
* setlock - A reference to the lock protecting the set
* orlock - Will be set to SP_UNLOCKED if all bits become cleared in set
*
* Returned Value:
* None
*
****************************************************************************/

#ifdef CONFIG_SPINLOCK
void spin_clrbit(FAR volatile cpu_set_t *set, unsigned int cpu,
FAR volatile spinlock_t *setlock,
FAR volatile spinlock_t *orlock);
#endif

/****************************************************************************
* Name: spin_initialize
*
Expand Down
52 changes: 25 additions & 27 deletions sched/irq/irq_csection.c
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,18 @@
#include "irq/irq.h"

#ifdef CONFIG_IRQCOUNT
/****************************************************************************
* Pre-processor Definitions
****************************************************************************/

#ifdef CONFIG_SMP
# define cpu_irqlock_set(cpu) \
do \
{ \
g_cpu_irqset |= (1 << cpu); \
} \
while (0)
#endif

/****************************************************************************
* Public Data
Expand All @@ -50,7 +62,6 @@ volatile spinlock_t g_cpu_irqlock = SP_UNLOCKED;

/* Used to keep track of which CPU(s) hold the IRQ lock. */

volatile spinlock_t g_cpu_irqsetlock;
volatile cpu_set_t g_cpu_irqset;

/* Handles nested calls to enter_critical section from interrupt handlers */
Expand Down Expand Up @@ -277,40 +288,29 @@ irqstate_t enter_critical_section(void)
DEBUGVERIFY(up_cpu_paused(cpu));
paused = true;

/* NOTE: As the result of up_cpu_paused(cpu), this CPU
* might set g_cpu_irqset in nxsched_resume_scheduler()
* However, another CPU might hold g_cpu_irqlock.
* To avoid this situation, releae g_cpu_irqlock first.
*/

if ((g_cpu_irqset & (1 << cpu)) != 0)
{
spin_clrbit(&g_cpu_irqset, cpu, &g_cpu_irqsetlock,
&g_cpu_irqlock);
}
DEBUGASSERT((g_cpu_irqset & (1 << cpu)) == 0);

/* NOTE: Here, this CPU does not hold g_cpu_irqlock,
* so call irq_waitlock(cpu) to acquire g_cpu_irqlock.
*/

goto try_again_in_irq;
}

cpu_irqlock_set(cpu);
}

/* In any event, the nesting count is now one */

g_cpu_nestcount[cpu] = 1;

/* Also set the CPU bit so that other CPUs will be aware that
* this CPU holds the critical section.
*/

spin_setbit(&g_cpu_irqset, cpu, &g_cpu_irqsetlock,
&g_cpu_irqlock);
if (paused)
{
up_cpu_paused_restore();
}

DEBUGASSERT(spin_is_locked(&g_cpu_irqlock) &&
(g_cpu_irqset & (1 << cpu)) != 0);
}
}
else
Expand Down Expand Up @@ -375,8 +375,7 @@ irqstate_t enter_critical_section(void)
* like lockcount: Both will disable pre-emption.
*/

spin_setbit(&g_cpu_irqset, cpu, &g_cpu_irqsetlock,
&g_cpu_irqlock);
cpu_irqlock_set(cpu);
rtcb->irqcount = 1;

/* Note that we have entered the critical section */
Expand Down Expand Up @@ -482,11 +481,11 @@ void leave_critical_section(irqstate_t flags)

FAR struct tcb_s *rtcb = current_task(cpu);
DEBUGASSERT(rtcb != NULL);
DEBUGASSERT((g_cpu_irqset & (1 << cpu)) != 0);

if (rtcb->irqcount <= 0)
{
spin_clrbit(&g_cpu_irqset, cpu, &g_cpu_irqsetlock,
&g_cpu_irqlock);
cpu_irqlock_clear();
}

g_cpu_nestcount[cpu] = 0;
Expand Down Expand Up @@ -541,8 +540,7 @@ void leave_critical_section(irqstate_t flags)
*/

rtcb->irqcount = 0;
spin_clrbit(&g_cpu_irqset, cpu, &g_cpu_irqsetlock,
&g_cpu_irqlock);
cpu_irqlock_clear();

/* Have all CPUs released the lock? */
}
Expand Down Expand Up @@ -615,13 +613,14 @@ void restore_critical_section(void)
* followed by context switching.
*/

FAR struct tcb_s *tcb = this_task();
FAR struct tcb_s *tcb;
int me = this_cpu();

/* Adjust global IRQ controls. If irqcount is greater than zero,
* then this task/this CPU holds the IRQ lock
*/

tcb = current_task(me);
if (tcb->irqcount > 0)
{
/* Do notihing here
Expand All @@ -643,8 +642,7 @@ void restore_critical_section(void)

if ((g_cpu_irqset & (1 << me)) != 0)
{
spin_clrbit(&g_cpu_irqset, me, &g_cpu_irqsetlock,
&g_cpu_irqlock);
cpu_irqlock_clear();
}
}
}
Expand Down
44 changes: 1 addition & 43 deletions sched/sched/sched.h
Original file line number Diff line number Diff line change
Expand Up @@ -281,50 +281,8 @@ extern volatile clock_t g_cpuload_total;
*/

#ifdef CONFIG_SMP
/* In the multiple CPU, SMP case, disabling context switches will not give a
* task exclusive access to the (multiple) CPU resources (at least without
* stopping the other CPUs): Even though pre-emption is disabled, other
* threads will still be executing on the other CPUS.
*
* There are additional rules for this multi-CPU case:
*
* 1. There is a global lock count 'g_cpu_lockset' that includes a bit for
* each CPU: If the bit is '1', then the corresponding CPU has the
* scheduler locked; if '0', then the CPU does not have the scheduler
* locked.
* 2. Scheduling logic would set the bit associated with the cpu in
* 'g_cpu_lockset' when the TCB at the head of the g_assignedtasks[cpu]
* list transitions has 'lockcount' > 0. This might happen when
* sched_lock() is called, or after a context switch that changes the
* TCB at the head of the g_assignedtasks[cpu] list.
* 3. Similarly, the cpu bit in the global 'g_cpu_lockset' would be cleared
* when the TCB at the head of the g_assignedtasks[cpu] list has
* 'lockcount' == 0. This might happen when sched_unlock() is called, or
* after a context switch that changes the TCB at the head of the
* g_assignedtasks[cpu] list.
* 4. Modification of the global 'g_cpu_lockset' must be protected by a
* spinlock, 'g_cpu_schedlock'. That spinlock would be taken when
* sched_lock() is called, and released when sched_unlock() is called.
* This assures that the scheduler does enforce the critical section.
* NOTE: Because of this spinlock, there should never be more than one
* bit set in 'g_cpu_lockset'; attempts to set additional bits should
* be cause the CPU to block on the spinlock. However, additional bits
* could get set in 'g_cpu_lockset' due to the context switches on the
* various CPUs.
* 5. Each the time the head of a g_assignedtasks[] list changes and the
* scheduler modifies 'g_cpu_lockset', it must also set 'g_cpu_schedlock'
* depending on the new state of 'g_cpu_lockset'.
* 5. Logic that currently uses the currently running tasks lockcount
* instead uses the global 'g_cpu_schedlock'. A value of SP_UNLOCKED
* means that no CPU has pre-emption disabled; SP_LOCKED means that at
* least one CPU has pre-emption disabled.
*/

extern volatile spinlock_t g_cpu_schedlock;

/* Used to keep track of which CPU(s) hold the IRQ lock. */

extern volatile spinlock_t g_cpu_locksetlock;
extern volatile cpu_set_t g_cpu_lockset;

/* Used to lock tasklist to prevent from concurrent access */
Expand Down Expand Up @@ -404,7 +362,7 @@ FAR struct tcb_s *this_task(void) noinstrument_function;
int nxsched_select_cpu(cpu_set_t affinity);
int nxsched_pause_cpu(FAR struct tcb_s *tcb);

# define nxsched_islocked_global() spin_is_locked(&g_cpu_schedlock)
# define nxsched_islocked_global() (g_cpu_lockset != 0)
# define nxsched_islocked_tcb(tcb) nxsched_islocked_global()

#else
Expand Down
8 changes: 3 additions & 5 deletions sched/sched/sched_addreadytorun.c
Original file line number Diff line number Diff line change
Expand Up @@ -206,7 +206,6 @@ bool nxsched_add_readytorun(FAR struct tcb_s *btcb)
* situation.
*/

me = this_cpu();
if ((nxsched_islocked_global()) &&
task_state != TSTATE_TASK_ASSIGNED)
{
Expand Down Expand Up @@ -239,6 +238,7 @@ bool nxsched_add_readytorun(FAR struct tcb_s *btcb)
* will need to stop that CPU.
*/

me = this_cpu();
if (cpu != me)
{
DEBUGVERIFY(up_cpu_pause(cpu));
Expand Down Expand Up @@ -278,13 +278,11 @@ bool nxsched_add_readytorun(FAR struct tcb_s *btcb)

if (btcb->lockcount > 0)
{
spin_setbit(&g_cpu_lockset, cpu, &g_cpu_locksetlock,
&g_cpu_schedlock);
g_cpu_lockset |= (1 << cpu);
}
else
{
spin_clrbit(&g_cpu_lockset, cpu, &g_cpu_locksetlock,
&g_cpu_schedlock);
g_cpu_lockset &= ~(1 << cpu);
}

/* NOTE: If the task runs on another CPU(cpu), adjusting global IRQ
Expand Down
Loading

0 comments on commit 8b2ad8d

Please sign in to comment.