Skip to content

Commit

Permalink
sched: adjust the scheduling strategy
Browse files Browse the repository at this point in the history
1 Only the idle task can have the flag TCB_FLAG_CPU_LOCKED.
  According to the code logic, btcb cannot be an idle task, so this check can be removed.
2 Optimized the preemption logic check and removed the call to nxsched_add_prioritized.
3 Speed up the scheduling time while avoiding the potential for
  tasks to be moved multiple times between g_assignedtasks and g_readytorun.

Configuring NuttX and compile:
$ ./tools/configure.sh -l qemu-armv8a:nsh_smp
$ make
Running with qemu
$ qemu-system-aarch64 -cpu cortex-a53 -smp 4 -nographic \
   -machine virt,virtualization=on,gic-version=3 \
   -net none -chardev stdio,id=con,mux=on -serial chardev:con \
   -mon chardev=con,mode=readline -kernel ./nuttx

Signed-off-by: hujun5 <hujun5@xiaomi.com>
  • Loading branch information
hujun260 committed Sep 11, 2024
1 parent 43d0d95 commit 2bf8742
Show file tree
Hide file tree
Showing 3 changed files with 140 additions and 124 deletions.
66 changes: 66 additions & 0 deletions sched/sched/queue.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,66 @@
/****************************************************************************
* sched/sched/queue.h
*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership. The
* ASF licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the
* License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
****************************************************************************/

#ifndef __INCLUDE_SCHED_SCHED_NUTTX_QUEUE_H
#define __INCLUDE_SCHED_SCHED_NUTTX_QUEUE_H

/****************************************************************************
* Included Files
****************************************************************************/

#include <nuttx/queue.h>

/****************************************************************************
* Pre-processor Definitions
****************************************************************************/

#define dq_addfirst_nonempty(p, q) \
do \
{ \
FAR dq_entry_t *tmp_node = (p); \
tmp_node->blink = NULL; \
tmp_node->flink = (q)->head; \
(q)->head->blink = tmp_node; \
(q)->head = tmp_node; \
} \
while (0)

#define dq_rem_head(p, q) \
do \
{ \
FAR dq_entry_t *tmp_node = (p); \
FAR dq_entry_t *tmp_next = tmp_node->flink; \
(q)->head = tmp_next; \
tmp_next->blink = NULL; \
tmp_node->flink = NULL; \
} \
while (0)

#define dq_rem_mid(p) \
do \
{ \
FAR dq_entry_t *tmp_prev = (FAR dq_entry_t *)p->blink; \
FAR dq_entry_t *tmp_next = (FAR dq_entry_t *)p->flink; \
tmp_prev->flink = tmp_next; \
tmp_next->blink = tmp_prev; \
} \
while (0)

#endif /* __INCLUDE_NUTTX_QUEUE_H_ */
151 changes: 30 additions & 121 deletions sched/sched/sched_addreadytorun.c
Original file line number Diff line number Diff line change
Expand Up @@ -27,9 +27,8 @@
#include <stdbool.h>
#include <assert.h>

#include <nuttx/queue.h>

#include "irq/irq.h"
#include "sched/queue.h"
#include "sched/sched.h"

/****************************************************************************
Expand Down Expand Up @@ -152,30 +151,14 @@ bool nxsched_add_readytorun(FAR struct tcb_s *btcb)
bool nxsched_add_readytorun(FAR struct tcb_s *btcb)
{
FAR struct tcb_s *rtcb;
FAR struct tcb_s *headtcb;
FAR dq_queue_t *tasklist;
bool doswitch;
int task_state;
int cpu;
int me;

/* Check if the blocked TCB is locked to this CPU */

if ((btcb->flags & TCB_FLAG_CPU_LOCKED) != 0)
{
/* Yes.. that is the CPU we must use */

task_state = TSTATE_TASK_ASSIGNED;
cpu = btcb->cpu;
}
else
{
/* Otherwise, find the CPU that is executing the lowest priority task
* (possibly its IDLE task).
*/

task_state = TSTATE_TASK_READYTORUN;
cpu = nxsched_select_cpu(btcb->affinity);
}
cpu = nxsched_select_cpu(btcb->affinity);

/* Get the task currently running on the CPU (may be the IDLE task) */

Expand All @@ -191,6 +174,10 @@ bool nxsched_add_readytorun(FAR struct tcb_s *btcb)
{
task_state = TSTATE_TASK_RUNNING;
}
else
{
task_state = TSTATE_TASK_READYTORUN;
}

/* If the selected state is TSTATE_TASK_RUNNING, then we would like to
* start running the task. Be we cannot do that if pre-emption is
Expand All @@ -205,8 +192,7 @@ bool nxsched_add_readytorun(FAR struct tcb_s *btcb)
* situation.
*/

if ((nxsched_islocked_global()) &&
task_state != TSTATE_TASK_ASSIGNED)
if (nxsched_islocked_global())
{
/* Add the new ready-to-run task to the g_pendingtasks task list for
* now.
Expand All @@ -231,7 +217,7 @@ bool nxsched_add_readytorun(FAR struct tcb_s *btcb)
btcb->task_state = TSTATE_TASK_READYTORUN;
doswitch = false;
}
else /* (task_state == TSTATE_TASK_ASSIGNED || task_state == TSTATE_TASK_RUNNING) */
else /* (task_state == TSTATE_TASK_RUNNING) */
{
/* If we are modifying some assigned task list other than our own, we
* will need to stop that CPU.
Expand All @@ -243,109 +229,32 @@ bool nxsched_add_readytorun(FAR struct tcb_s *btcb)
DEBUGVERIFY(up_cpu_pause(cpu));
}

/* Add the task to the list corresponding to the selected state
* and check if a context switch will occur
*/
tasklist = &g_assignedtasks[cpu];

/* Change "head" from TSTATE_TASK_RUNNING to TSTATE_TASK_ASSIGNED */

tasklist = list_assignedtasks(cpu);
doswitch = nxsched_add_prioritized(btcb, tasklist);
headtcb = (FAR struct tcb_s *)tasklist->head;
DEBUGASSERT(headtcb->task_state = TSTATE_TASK_RUNNING);
headtcb->task_state = TSTATE_TASK_ASSIGNED;

/* If the selected task list was the g_assignedtasks[] list and if the
* new tasks is the highest priority (RUNNING) task, then a context
* switch will occur.
/* Add btcb to the head of the g_assignedtasks
* task list and mark it as running
*/

if (doswitch)
{
FAR struct tcb_s *next;

/* The new btcb was added at the head of the ready-to-run list. It
* is now the new active task!
*/

/* Assign the CPU and set the running state */

DEBUGASSERT(task_state == TSTATE_TASK_RUNNING);

btcb->cpu = cpu;
btcb->task_state = TSTATE_TASK_RUNNING;

/* Adjust global pre-emption controls. If the lockcount is
* greater than zero, then this task/this CPU holds the scheduler
* lock.
*/

if (btcb->lockcount > 0)
{
g_cpu_lockset |= (1 << cpu);
}
else
{
g_cpu_lockset &= ~(1 << cpu);
}

/* NOTE: If the task runs on another CPU(cpu), adjusting global IRQ
* controls will be done in the pause handler on the new CPU(cpu).
* If the task is scheduled on this CPU(me), do nothing because
* this CPU already has a critical section
*/

/* If the following task is not locked to this CPU, then it must
* be moved to the g_readytorun list. Since it cannot be at the
* head of the list, we can do this without invoking any heavy
* lifting machinery.
*/

DEBUGASSERT(btcb->flink != NULL);
next = btcb->flink;

if ((next->flags & TCB_FLAG_CPU_LOCKED) != 0)
{
DEBUGASSERT(next->cpu == cpu);
next->task_state = TSTATE_TASK_ASSIGNED;
}
else
{
/* Remove the task from the assigned task list */

dq_rem((FAR dq_entry_t *)next, tasklist);

/* Add the task to the g_readytorun or to the g_pendingtasks
* list. NOTE: That the above operations may cause the
* scheduler to become locked. It may be assigned to a
* different CPU the next time that it runs.
*/

if (nxsched_islocked_global())
{
next->task_state = TSTATE_TASK_PENDING;
tasklist = list_pendingtasks();
}
else
{
next->task_state = TSTATE_TASK_READYTORUN;
tasklist = list_readytorun();
}

nxsched_add_prioritized(next, tasklist);
}
}
else
dq_addfirst_nonempty((FAR dq_entry_t *)btcb, tasklist);

DEBUGASSERT(task_state == TSTATE_TASK_RUNNING);
btcb->cpu = cpu;
btcb->task_state = TSTATE_TASK_RUNNING;

doswitch = true;

/* Resume scheduling lock */

DEBUGASSERT(g_cpu_lockset == 0);
if (btcb->lockcount > 0)
{
/* No context switch. Assign the CPU and set the assigned state.
*
* REVISIT: I have seen this assertion fire. Apparently another
* CPU may add another, higher priority task to the same
* g_assignedtasks[] list sometime after nxsched_select_cpu() was
* called above, leaving this TCB in the wrong task list if
* task_state is TSTATE_TASK_ASSIGNED).
*/

DEBUGASSERT(task_state == TSTATE_TASK_ASSIGNED);

btcb->cpu = cpu;
btcb->task_state = TSTATE_TASK_ASSIGNED;
doswitch = false;
g_cpu_lockset |= (1 << cpu);
}

/* All done, restart the other CPU (if it was paused). */
Expand Down
47 changes: 44 additions & 3 deletions sched/sched/sched_removereadytorun.c
Original file line number Diff line number Diff line change
Expand Up @@ -27,10 +27,10 @@
#include <stdbool.h>
#include <assert.h>

#include <nuttx/queue.h>
#include <nuttx/sched_note.h>

#include "irq/irq.h"
#include "sched/queue.h"
#include "sched/sched.h"

/****************************************************************************
Expand Down Expand Up @@ -188,7 +188,48 @@ bool nxsched_remove_readytorun(FAR struct tcb_s *rtcb, bool merge)
* or the g_assignedtasks[cpu] list.
*/

dq_rem((FAR dq_entry_t *)rtcb, tasklist);
dq_rem_head((FAR dq_entry_t *)rtcb, tasklist);

/* Find the highest priority non-running tasks in the g_assignedtasks
* list of other CPUs, and also non-idle tasks, place them in the
* g_readytorun list. so as to find the task with the highest priority,
* globally
*/

for (int i = 0; i < CONFIG_SMP_NCPUS; i++)
{
if (i == cpu)
{
/* The highest priority task of the current
* CPU has been found, which is nxttcb.
*/

continue;
}

for (rtrtcb = (FAR struct tcb_s *)g_assignedtasks[i].head;
!is_idle_task(rtrtcb); rtrtcb = rtrtcb->flink)
{
if (rtrtcb->task_state != TSTATE_TASK_RUNNING &&
CPU_ISSET(cpu, &rtrtcb->affinity))
{
/* We have found the task with the highest priority whose
* CPU index is i. Since this task must be between the two
* tasks, we can use the dq_rem_mid macro to delete it.
*/

dq_rem_mid(rtrtcb);
rtrtcb->task_state = TSTATE_TASK_READYTORUN;

/* Add rtrtcb to g_readytorun to find
* the task with the highest global priority
*/

nxsched_add_prioritized(rtrtcb, &g_readytorun);
break;
}
}
}

/* Which task will go at the head of the list? It will be either the
* next tcb in the assigned task list (nxttcb) or a TCB in the
Expand Down Expand Up @@ -219,7 +260,7 @@ bool nxsched_remove_readytorun(FAR struct tcb_s *rtcb, bool merge)
*/

dq_rem((FAR dq_entry_t *)rtrtcb, list_readytorun());
dq_addfirst((FAR dq_entry_t *)rtrtcb, tasklist);
dq_addfirst_nonempty((FAR dq_entry_t *)rtrtcb, tasklist);

rtrtcb->cpu = cpu;
nxttcb = rtrtcb;
Expand Down

0 comments on commit 2bf8742

Please sign in to comment.