From 520882a2461e96728ae5bdb95966b126ad100044 Mon Sep 17 00:00:00 2001 From: richidubey Date: Sat, 25 Jul 2020 22:43:05 +0530 Subject: [PATCH 01/29] Added files for version 1.1 --- .../include/rtems/score/schedulerstrongapa.h | 307 +++--- cpukit/score/src/schedulerstrongapa.c | 966 ++++++++++++++---- 2 files changed, 962 insertions(+), 311 deletions(-) diff --git a/cpukit/include/rtems/score/schedulerstrongapa.h b/cpukit/include/rtems/score/schedulerstrongapa.h index 0ac28cb4393..4ddae3235a4 100644 --- a/cpukit/include/rtems/score/schedulerstrongapa.h +++ b/cpukit/include/rtems/score/schedulerstrongapa.h @@ -6,25 +6,24 @@ * @brief Strong APA Scheduler API */ -/* - * Copyright (c) 2013, 2018 embedded brains GmbH. All rights reserved. - * - * embedded brains GmbH - * Dornierstr. 4 - * 82178 Puchheim - * Germany - * - * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. +/* + * Copyright (c) 2013, 2018 embedded brains GmbH. All rights reserved. + * + * embedded brains GmbH + * Dornierstr. 4 + * 82178 Puchheim + * Germany + * + * + * The license and distribution terms for this file may be + * found in the file LICENSE in this distribution or at + * http://www.rtems.org/license/LICENSE. */ #ifndef _RTEMS_SCORE_SCHEDULERSTRONGAPA_H #define _RTEMS_SCORE_SCHEDULERSTRONGAPA_H #include -#include #include #ifdef __cplusplus @@ -38,25 +37,32 @@ extern "C" { * * @brief Strong APA Scheduler * - * This is an implementation of the global fixed priority scheduler (G-FP). It - * uses one ready chain per priority to ensure constant time insert operations. - * The scheduled chain uses linear insert operations and has at most processor - * count entries. Since the processor and priority count are constants all - * scheduler operations complete in a bounded execution time. - * - * The the_thread preempt mode will be ignored. + * This is an implementation of the Strong APA scheduler defined by + * Bradenbug et. al in Linux's Processor Affinity API, Refined: Shifting Real-Time Tasks Towards Higher Schedulability. * * @{ */ -/** - * @brief Scheduler context specialization for Strong APA - * schedulers. + /** + * @brief Scheduler context for Strong APA + * scheduler. + * + * Has the structure for scheduler context + * and Node defintion for Strong APA scheduler */ typedef struct { - Scheduler_SMP_Context Base; - Priority_bit_map_Control Bit_map; - Chain_Control Ready[ RTEMS_ZERO_LENGTH_ARRAY ]; + /** + * @brief SMP Context to refer to SMP implementation + * code. + */ + Scheduler_SMP_Context Base; + + /** + * @brief Chain of all the nodes present in + * the system. Accounts for ready and scheduled nodes. + */ + Chain_Control allNodes; + } Scheduler_strong_APA_Context; /** @@ -64,17 +70,51 @@ typedef struct { * schedulers. */ typedef struct { + /** + * @brief Chain node for + * Scheduler_strong_APA_Context::allNodes + * + */ + Chain_Node Node; + /** * @brief SMP scheduler node. */ Scheduler_SMP_Node Base; /** - * @brief The associated ready queue of this node. + * @brief The associated affinity set of this node. + */ + Processor_mask affinity; + + /** + * @brief The associated affinity set of this node + * to be used while unpinning the node. */ - Scheduler_priority_Ready_queue Ready_queue; + Processor_mask unpin_affinity; + } Scheduler_strong_APA_Node; +/** + * @brief CPU structure to be used + * while traversing in the FIFO Queue + */ +typedef struct CPU +{ + /** + * @brief Chain node for + * _Scheduler_strong_APA_Get_highest_ready::Queue + * and _Scheduler_strong_APA_Get_lowest_scheduled::Queue + */ + Chain_Node node; + + /** + * @brief cpu associated with the node + */ + Per_CPU_Control cpu; + +}CPU; + /** * @brief Entry points for the Strong APA Scheduler. */ @@ -91,8 +131,8 @@ typedef struct { _Scheduler_strong_APA_Ask_for_help, \ _Scheduler_strong_APA_Reconsider_help_request, \ _Scheduler_strong_APA_Withdraw_node, \ - _Scheduler_default_Pin_or_unpin, \ - _Scheduler_default_Pin_or_unpin, \ + _Scheduler_strong_APA_Pin, \ + _Scheduler_strong_APA_Unpin, \ _Scheduler_strong_APA_Add_processor, \ _Scheduler_strong_APA_Remove_processor, \ _Scheduler_strong_APA_Node_initialize, \ @@ -100,25 +140,15 @@ typedef struct { _Scheduler_default_Release_job, \ _Scheduler_default_Cancel_job, \ _Scheduler_default_Tick, \ - _Scheduler_SMP_Start_idle \ - SCHEDULER_OPERATION_DEFAULT_GET_SET_AFFINITY \ + _Scheduler_strong_APA_Start_idle \ + _Scheduler_strong_APA_Set_affinity \ } -/** - * @brief Initializes the scheduler. - * - * @param scheduler The scheduler to initialize. - */ -void _Scheduler_strong_APA_Initialize( const Scheduler_Control *scheduler ); +void _Scheduler_strong_APA_Initialize( + const Scheduler_Control *scheduler + ); + -/** - * @brief Initializes the node with the given priority. - * - * @param scheduler The scheduler control instance. - * @param[out] node The node to initialize. - * @param the_thread The thread of the node to initialize. - * @param priority The priority for @a node. - */ void _Scheduler_strong_APA_Node_initialize( const Scheduler_Control *scheduler, Scheduler_Node *node, @@ -126,82 +156,101 @@ void _Scheduler_strong_APA_Node_initialize( Priority_Control priority ); -/** - * @brief Blocks the thread. - * - * @param scheduler The scheduler control instance. - * @param[in, out] the_thread The thread to block. - * @param[in, out] node The node of the thread to block. - */ +void _Scheduler_strong_APA_Do_update( + Scheduler_Context *context, + Scheduler_Node *node, + Priority_Control new_priority +); + +bool _Scheduler_strong_APA_Has_ready( + Scheduler_Context *context + ); + +Scheduler_Node *_Scheduler_strong_APA_Get_highest_ready( + Scheduler_Context *context, + Scheduler_Node *filter +); + +Scheduler_Node *_Scheduler_strong_APA_Get_lowest_scheduled( + Scheduler_Context *context, + Scheduler_Node *filter_base +); + +void _Scheduler_strong_APA_Extract_from_scheduled( + Scheduler_Context *context, + Scheduler_Node *node_to_extract +); + +void _Scheduler_strong_APA_Extract_from_ready( + Scheduler_Context *context, + Scheduler_Node *node_to_extract +); + +void _Scheduler_strong_APA_Move_from_scheduled_to_ready( + Scheduler_Context *context, + Scheduler_Node *scheduled_to_ready +); + +void _Scheduler_strong_APA_Move_from_ready_to_scheduled( + Scheduler_Context *context, + Scheduler_Node *ready_to_scheduled +); + +void _Scheduler_strong_APA_Insert_ready( + Scheduler_Context *context, + Scheduler_Node *node_base, + Priority_Control insert_priority +); + +void _Scheduler_strong_APA_Allocate_processor( + Scheduler_Context *context, + Scheduler_Node *scheduled_base, + Scheduler_Node *victim_base, + Per_CPU_Control *victim_cpu +); + void _Scheduler_strong_APA_Block( const Scheduler_Control *scheduler, - Thread_Control *the_thread, + Thread_Control *thread, Scheduler_Node *node ); -/** - * @brief Unblocks the thread. - * - * @param scheduler The scheduler control instance. - * @param[in, out] the_thread The thread to unblock. - * @param[in, out] node The node of the thread to unblock. - */ +bool _Scheduler_strong_APA_Enqueue( + Scheduler_Context *context, + Scheduler_Node *node, + Priority_Control insert_priority +); + +bool _Scheduler_strong_APA_Enqueue_scheduled( + Scheduler_Context *context, + Scheduler_Node *node, + Priority_Control insert_priority +); + void _Scheduler_strong_APA_Unblock( const Scheduler_Control *scheduler, - Thread_Control *the_thread, + Thread_Control *thread, Scheduler_Node *node ); -/** - * @brief Updates the priority of the node. - * - * @param scheduler The scheduler control instance. - * @param the_thread The thread for the operation. - * @param[in, out] node The node to update the priority of. - */ -void _Scheduler_strong_APA_Update_priority( +bool _Scheduler_strong_APA_Ask_for_help( const Scheduler_Control *scheduler, Thread_Control *the_thread, Scheduler_Node *node ); -/** - * @brief Asks for help. - * - * @param scheduler The scheduler control instance. - * @param the_thread The thread that asks for help. - * @param node The node of @a the_thread. - * - * @retval true The request for help was successful. - * @retval false The request for help was not successful. - */ -bool _Scheduler_strong_APA_Ask_for_help( +void _Scheduler_strong_APA_Update_priority( const Scheduler_Control *scheduler, - Thread_Control *the_thread, + Thread_Control *thread, Scheduler_Node *node ); -/** - * @brief Reconsiders help request. - * - * @param scheduler The scheduler control instance. - * @param the_thread The thread to reconsider the help request of. - * @param[in, out] node The node of @a the_thread - */ void _Scheduler_strong_APA_Reconsider_help_request( const Scheduler_Control *scheduler, Thread_Control *the_thread, Scheduler_Node *node ); -/** - * @brief Withdraws the node. - * - * @param scheduler The scheduler control instance. - * @param[in, out] the_thread The thread to change the state to @a next_state. - * @param[in, out] node The node to withdraw. - * @param next_state The next state for @a the_thread. - */ void _Scheduler_strong_APA_Withdraw_node( const Scheduler_Control *scheduler, Thread_Control *the_thread, @@ -209,43 +258,61 @@ void _Scheduler_strong_APA_Withdraw_node( Thread_Scheduler_state next_state ); -/** - * @brief Adds the idle thread to a processor. - * - * @param scheduler The scheduler control instance. - * @param[in, out] The idle thread to add to the processor. - */ +void _Scheduler_strong_APA_Register_idle( + Scheduler_Context *context, + Scheduler_Node *idle_base, + Per_CPU_Control *cpu +); + void _Scheduler_strong_APA_Add_processor( const Scheduler_Control *scheduler, Thread_Control *idle ); -/** - * @brief Removes an idle thread from the given cpu. - * - * @param scheduler The scheduler instance. - * @param cpu The cpu control to remove from @a scheduler. - * - * @return The idle thread of the processor. - */ Thread_Control *_Scheduler_strong_APA_Remove_processor( const Scheduler_Control *scheduler, - struct Per_CPU_Control *cpu + Per_CPU_Control *cpu ); -/** - * @brief Performs a yield operation. - * - * @param scheduler The scheduler control instance. - * @param the_thread The thread to yield. - * @param[in, out] node The node of @a the_thread. - */ void _Scheduler_strong_APA_Yield( const Scheduler_Control *scheduler, - Thread_Control *the_thread, + Thread_Control *thread, Scheduler_Node *node ); +void _Scheduler_strong_APA_Do_set_affinity( + Scheduler_Context *context, + Scheduler_Node *node_base, + void *arg +); + +void _Scheduler_strong_APA_Start_idle( + const Scheduler_Control *scheduler, + Thread_Control *idle, + Per_CPU_Control *cpu +); + +void _Scheduler_strong_APA_Pin( + const Scheduler_Control *scheduler, + Thread_Control *thread, + Scheduler_Node *node_base, + struct Per_CPU_Control *cpu +); + +void _Scheduler_strong_APA_Unpin( + const Scheduler_Control *scheduler, + Thread_Control *thread, + Scheduler_Node *node_base, + struct Per_CPU_Control *cpu +); + +bool _Scheduler_strong_APA_Set_affinity( + const Scheduler_Control *scheduler, + Thread_Control *thread, + Scheduler_Node *node_base, + const Processor_mask *affinity +); + /** @} */ #ifdef __cplusplus diff --git a/cpukit/score/src/schedulerstrongapa.c b/cpukit/score/src/schedulerstrongapa.c index 924cd86412c..c26c2488566 100644 --- a/cpukit/score/src/schedulerstrongapa.c +++ b/cpukit/score/src/schedulerstrongapa.c @@ -5,249 +5,579 @@ * * @brief Strong APA Scheduler Implementation */ - -/* - * Copyright (c) 2013, 2016 embedded brains GmbH. All rights reserved. - * - * embedded brains GmbH - * Dornierstr. 4 - * 82178 Puchheim - * Germany - * - * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. - */ + +/* + * Copyright (c) 2013, 2016 embedded brains GmbH. All rights reserved. + * + * embedded brains GmbH + * Dornierstr. 4 + * 82178 Puchheim + * Germany + * + * + * The license and distribution terms for this file may be + * found in the file LICENSE in this distribution or at + * http://www.rtems.org/license/LICENSE. + */ #ifdef HAVE_CONFIG_H #include "config.h" #endif #include -#include #include -static Scheduler_strong_APA_Context *_Scheduler_strong_APA_Get_self( - Scheduler_Context *context -) +static inline Scheduler_strong_APA_Context * +_Scheduler_strong_APA_Get_context( const Scheduler_Control *scheduler ) +{ + return (Scheduler_strong_APA_Context *) _Scheduler_Get_context( scheduler ); +} + +static inline Scheduler_strong_APA_Context * +_Scheduler_strong_APA_Get_self( Scheduler_Context *context ) { return (Scheduler_strong_APA_Context *) context; } -static Scheduler_strong_APA_Node * +static inline Scheduler_strong_APA_Node * _Scheduler_strong_APA_Node_downcast( Scheduler_Node *node ) { return (Scheduler_strong_APA_Node *) node; } - -static void _Scheduler_strong_APA_Move_from_scheduled_to_ready( - Scheduler_Context *context, - Scheduler_Node *scheduled_to_ready -) +/** + * @brief Initializes the Strong_APA scheduler. + * + * Sets the chain containing all the nodes to empty + * and initializes the SMP scheduler. + * + * @param scheduler used to get + * reference to Strong APA scheduler context + * @retval void + * @see _Scheduler_strong_APA_Node_initialize() + * + */ +void _Scheduler_strong_APA_Initialize( const Scheduler_Control *scheduler ) { Scheduler_strong_APA_Context *self = - _Scheduler_strong_APA_Get_self( context ); - Scheduler_strong_APA_Node *node = - _Scheduler_strong_APA_Node_downcast( scheduled_to_ready ); - - _Chain_Extract_unprotected( &node->Base.Base.Node.Chain ); - _Scheduler_priority_Ready_queue_enqueue_first( - &node->Base.Base.Node.Chain, - &node->Ready_queue, - &self->Bit_map - ); + _Scheduler_strong_APA_Get_context( scheduler ); + + _Scheduler_SMP_Initialize( &self->Base ); + _Chain_Initialize_empty( &self->allNodes ); } -static void _Scheduler_strong_APA_Move_from_ready_to_scheduled( - Scheduler_Context *context, - Scheduler_Node *ready_to_scheduled +/** + * @brief Initializes the node with the given priority. + * + * @param scheduler The scheduler control instance. + * @param[out] node The node to initialize. + * @param the_thread The thread of the node to initialize. + * @param priority The priority for @a node. + */ +void _Scheduler_strong_APA_Node_initialize( + const Scheduler_Control *scheduler, + Scheduler_Node *node, + Thread_Control *the_thread, + Priority_Control priority ) { - Scheduler_strong_APA_Context *self; - Scheduler_strong_APA_Node *node; - Priority_Control insert_priority; - - self = _Scheduler_strong_APA_Get_self( context ); - node = _Scheduler_strong_APA_Node_downcast( ready_to_scheduled ); - - _Scheduler_priority_Ready_queue_extract( - &node->Base.Base.Node.Chain, - &node->Ready_queue, - &self->Bit_map - ); - insert_priority = _Scheduler_SMP_Node_priority( &node->Base.Base ); - insert_priority = SCHEDULER_PRIORITY_APPEND( insert_priority ); - _Chain_Insert_ordered_unprotected( - &self->Base.Scheduled, - &node->Base.Base.Node.Chain, - &insert_priority, - _Scheduler_SMP_Priority_less_equal - ); + Scheduler_SMP_Node *smp_node; + + smp_node = _Scheduler_SMP_Node_downcast( node ); + _Scheduler_SMP_Node_initialize( scheduler, smp_node, the_thread, priority ); } -static void _Scheduler_strong_APA_Insert_ready( +/** + * @brief Helper function for /a update entry function + * + * Calls _Scheduler_SMP_Node_update_priority() + * + * @param scheduler The scheduler context. + * @param[out] node The node to update the priority of. + * @param new_priority Node's new priority. + * @see _Scheduler_strong_APA_Do_update() + */ +void _Scheduler_strong_APA_Do_update( Scheduler_Context *context, - Scheduler_Node *node_base, - Priority_Control insert_priority + Scheduler_Node *node, + Priority_Control new_priority ) { - Scheduler_strong_APA_Context *self; - Scheduler_strong_APA_Node *node; + Scheduler_SMP_Node *smp_node; - self = _Scheduler_strong_APA_Get_self( context ); - node = _Scheduler_strong_APA_Node_downcast( node_base ); + (void) context; + + smp_node = _Scheduler_SMP_Node_downcast( node ); + _Scheduler_SMP_Node_update_priority( smp_node, new_priority ); +} + +/** + * @brief Checks if scheduler has a ready node + * + * Iterates through all the nodes in /a allNodes to + * look for a ready node + * + * @param scheduler The scheduler context. + * @retval true if scheduler has a ready node available + * @retval false if scheduler has no ready nodes available + */ - if ( SCHEDULER_PRIORITY_IS_APPEND( insert_priority ) ) { - _Scheduler_priority_Ready_queue_enqueue( - &node->Base.Base.Node.Chain, - &node->Ready_queue, - &self->Bit_map - ); - } else { - _Scheduler_priority_Ready_queue_enqueue_first( - &node->Base.Base.Node.Chain, - &node->Ready_queue, - &self->Bit_map - ); +bool _Scheduler_strong_APA_Has_ready( Scheduler_Context *context ) +{ + Scheduler_strong_APA_Context *self = _Scheduler_strong_APA_Get_self( context ); + + bool ret; + const Chain_Node *tail; + Chain_Node *next; + + tail = _Chain_Immutable_tail( &self->allNodes ); + next = _Chain_First( &self->allNodes ); + + ret=false; + + while ( next != tail ) { + Scheduler_strong_APA_Node *node; + + node = (Scheduler_strong_APA_Node *) next; + + if( _Scheduler_SMP_Node_state( &node->Base.Base ) + == SCHEDULER_SMP_NODE_READY ) { + ret=true; + break; + } } + + return ret; } -static void _Scheduler_strong_APA_Extract_from_ready( +/** + * @brief Checks the next highest node ready on run + * on the CPU on which @filter node was running on + * + */ +Scheduler_Node *_Scheduler_strong_APA_Get_highest_ready( Scheduler_Context *context, - Scheduler_Node *the_thread -) + Scheduler_Node *filter +) //TODO { - Scheduler_strong_APA_Context *self = - _Scheduler_strong_APA_Get_self( context ); - Scheduler_strong_APA_Node *node = - _Scheduler_strong_APA_Node_downcast( the_thread ); - - _Scheduler_priority_Ready_queue_extract( - &node->Base.Base.Node.Chain, - &node->Ready_queue, - &self->Bit_map - ); + //Plan for this function: (Pseudo Code): + Scheduler_strong_APA_Context *self=_Scheduler_strong_APA_Get_self( context ); + + CPU *Qcpu; + Thread_Control *thread; + Per_CPU_Control *thread_cpu; + Per_CPU_Control *curr_CPU; + Per_CPU_Control *assigned_cpu; + Scheduler_Node *ret; + Priority_Control max_priority; + Priority_Control curr_priority; + Chain_Control Queue; + bool visited[10]; //Temporary Compilation Fix + + + + thread = filter->user; + thread_cpu = _Thread_Get_CPU( thread ); + + //Implement the BFS Algorithm for task departure + //to get the highest ready task for a particular CPU + + + max_priority = _Scheduler_Node_get_priority( filter ); + max_priority = SCHEDULER_PRIORITY_PURIFY( max_priority ); + + ret=filter; + + const Chain_Node *tail; + Chain_Node *next; + + _Chain_Initialize_empty(&Queue); + + Qcpu = malloc( sizeof(CPU) ); + assert (Qcpu != NULL); //Should it be NULL? + Qcpu->cpu=thread_cpu; + + _Chain_Initialize_node( &Qcpu->node ); + _Chain_Append_unprotected( &Queue, &Qcpu->node ); //Insert thread_CPU in the Queue + visited[ _Per_CPU_Get_index( thread_cpu ) ]=true; + + + while( !_Chain_Is_empty( &Queue) ) { + Qcpu = (CPU*) _Chain_Get_first_unprotected( &Queue ); + curr_CPU = Qcpu->cpu; + tail = _Chain_Immutable_tail( &self->allNodes ); + next = _Chain_First( &self->allNodes ); + + while ( next != tail ) { + Scheduler_strong_APA_Node *node; + node = (Scheduler_strong_APA_Node *) next; + + if( node->affinity & (1 << _Per_CPU_Get_index( curr_CPU ) ) ) { + //Checks if the thread_CPU is in the affinity set of the node + + if(_Scheduler_SMP_Node_state( &node->Base.Base ) + == SCHEDULER_SMP_NODE_SCHEDULED) { + + assigned_cpu = _Thread_Get_CPU( node->Base.Base.user ); + + if(visited[ _Per_CPU_Get_index( assigned_cpu ) ] == false) { + Qcpu = malloc( sizeof(CPU) ); + assert (Qcpu != NULL); //Should it be NULL? + Qcpu->cpu=*assigned_cpu; + + _Chain_Initialize_node( &Qcpu->node ); + _Chain_Append_unprotected( &Queue, &Qcpu->node ); + //Insert thread_CPU in the Queue + visited[ _Per_CPU_Get_index (assigned_cpu) ]=true; + } + } + else if(_Scheduler_SMP_Node_state( &node->Base.Base ) + == SCHEDULER_SMP_NODE_READY) { + curr_priority = _Scheduler_Node_get_priority( (Scheduler_Node *) next ); + curr_priority = SCHEDULER_PRIORITY_PURIFY( curr_priority ); + + if(curr_priorityBase.Base; + } + } + } + next = _Chain_Next( next ); + } + } + + if( ret != filter) + { + //Backtrack on the path from + //thread_cpu to ret, shifting along every task. + + //After this, thread_cpu receives the ret task + // So the ready task ret gets scheduled as well. + } + + return ret; +} +/** + * @brief Checks the lowest scheduled node + * running on a processor on which the + * @filter_base node could be running on + * + * @param context The scheduler context instance. + * @param filter_base The node which wants to get scheduled. + * + * @retval node The lowest scheduled node that can be + * replaced by @filter_base + */ +Scheduler_Node *_Scheduler_strong_APA_Get_lowest_scheduled( + Scheduler_Context *context, + Scheduler_Node *filter_base +) +{ + //Idea: BFS Algorithm for task arrival + + uint32_t cpu_max; + uint32_t cpu_index; + CPU *Qcpu; + + Per_CPU_Control *curr_CPU; + Thread_Control *curr_thread; + Scheduler_Node *curr_node; + Scheduler_Node *ret; + Chain_Control Queue; + Priority_Control max_priority; + Priority_Control curr_priority; + bool visited[10]; //Temporary Compilation Fix + + Scheduler_strong_APA_Node *Scurr_node; //Current Strong_APA_Node + Scheduler_strong_APA_Node *filter_node; + + filter_node = _Scheduler_strong_APA_Node_downcast( filter_base ); + + max_priority = 300;//Max (Lowest) priority encountered so far. + + cpu_max = _SMP_Get_processor_maximum(); + _Chain_Initialize_empty(&Queue); + + for ( cpu_index = 0 ; cpu_index < cpu_max ; ++cpu_index ) { + if( ( filter_node->affinity & (1<cpu=*cpu; + + _Chain_Initialize_node( &Qcpu->node ); + _Chain_Append_unprotected( &Queue, &Qcpu->node ); + //Insert cpu in the Queue + visited[ cpu_index ]=true; + } + } + } + + while( !_Chain_Is_empty( &Queue) ) { + Qcpu = (CPU*) _Chain_Get_first_unprotected( &Queue ); + curr_CPU = &Qcpu->cpu; + curr_thread = curr_CPU->executing; + + curr_node = (Scheduler_Node *) _Chain_First( &curr_thread->Scheduler.Scheduler_nodes ); + + //How to check if the thread is not participating + //in helping on this processor? + + curr_priority = _Scheduler_Node_get_priority( curr_node ); + curr_priority = SCHEDULER_PRIORITY_PURIFY( curr_priority ); + + if(curr_priority < max_priority) { + ret = curr_node; + max_priority = curr_priority; + } + + Scurr_node = _Scheduler_strong_APA_Node_downcast( curr_node ); + if( !curr_thread->is_idle ) { + for ( cpu_index = 0 ; cpu_index < cpu_max ; ++cpu_index ) { + if( ( Scurr_node->affinity & (1<cpu=*cpu; + + _Chain_Initialize_node( &Qcpu->node ); + _Chain_Append_unprotected( &Queue, &Qcpu->node ); + //Insert the cpu in the affinty set of curr_thread in the Queue + visited[ cpu_index ]=true; + } + } + } + } + } + + Priority_Control filter_priority; + filter_priority = _Scheduler_Node_get_priority( filter_base ); + filter_priority = SCHEDULER_PRIORITY_PURIFY( filter_priority ); + + if( ret->Priority.value < filter_priority ) { + //Lowest priority task found has higher priority + // than filter_base. + //So, filter_base remains unassigned + //No task shifting. + } + + else { + //Backtrack on the path from + //_Thread_Get_CPU(ret->user) to ret, shifting along every task + } + + return ret; } -static void _Scheduler_strong_APA_Do_update( +/** + * @brief Extracts a node from the scheduled node's list + * + * Calls _Scheduler_SMP_Extract_from_scheduled() + * + * @param scheduler The scheduler context + * @param[in] node_to_extract The node to extract. + */ +void _Scheduler_strong_APA_Extract_from_scheduled( Scheduler_Context *context, - Scheduler_Node *node_to_update, - Priority_Control new_priority + Scheduler_Node *node_to_extract ) { - Scheduler_strong_APA_Context *self = - _Scheduler_strong_APA_Get_self( context ); - Scheduler_strong_APA_Node *node = - _Scheduler_strong_APA_Node_downcast( node_to_update ); - - _Scheduler_SMP_Node_update_priority( &node->Base, new_priority ); - _Scheduler_priority_Ready_queue_update( - &node->Ready_queue, - SCHEDULER_PRIORITY_UNMAP( new_priority ), - &self->Bit_map, - &self->Ready[ 0 ] - ); + Scheduler_strong_APA_Context *self; + Scheduler_strong_APA_Node *node; + + self = _Scheduler_strong_APA_Get_self( context ); + node = _Scheduler_strong_APA_Node_downcast( node_to_extract ); + + _Scheduler_SMP_Extract_from_scheduled( &self->Base.Base, &node->Base.Base ); + //Not removing it from allNodes since the node could go in the ready state. } -static Scheduler_strong_APA_Context * -_Scheduler_strong_APA_Get_context( const Scheduler_Control *scheduler ) +/** + * @brief Extracts a node from the ready queue + * + * Removes the node from /a allNodes chain. + * + * @param scheduler The scheduler context + * @param[in] node_to_extract The node to extract. + */ +void _Scheduler_strong_APA_Extract_from_ready( + Scheduler_Context *context, + Scheduler_Node *node_to_extract +) { - return (Scheduler_strong_APA_Context *) _Scheduler_Get_context( scheduler ); + Scheduler_strong_APA_Context *self; + Scheduler_strong_APA_Node *node; + + self = _Scheduler_strong_APA_Get_self( context ); + node = _Scheduler_strong_APA_Node_downcast( node_to_extract ); + + _Assert( _Chain_Is_empty(self->allNodes) == false ); + _Assert( _Chain_Is_node_off_chain( &node->Node ) == false ); + + _Chain_Extract_unprotected( &node->Node ); //Removed from allNodes + _Chain_Set_off_chain( &node->Node ); } -void _Scheduler_strong_APA_Initialize( const Scheduler_Control *scheduler ) +/** + * @brief Moves a node from scheduled to ready state + * + * Calls _Scheduler_SMP_Extract_from_scheduled() to remove it from + * scheduled nodes list + * + * @param scheduler The scheduler context + * @param[in] scheduled_to_ready The node to move. + * @see _Scheduler_strong_APA_Insert_ready() + * + */ +void _Scheduler_strong_APA_Move_from_scheduled_to_ready( + Scheduler_Context *context, + Scheduler_Node *scheduled_to_ready +) { - Scheduler_strong_APA_Context *self = - _Scheduler_strong_APA_Get_context( scheduler ); + Priority_Control insert_priority; - _Scheduler_SMP_Initialize( &self->Base ); - _Priority_bit_map_Initialize( &self->Bit_map ); - _Scheduler_priority_Ready_queue_initialize( - &self->Ready[ 0 ], - scheduler->maximum_priority + _Scheduler_SMP_Extract_from_scheduled( context, scheduled_to_ready ); + insert_priority = _Scheduler_SMP_Node_priority( scheduled_to_ready ); + + _Scheduler_strong_APA_Insert_ready( + context, + scheduled_to_ready, + insert_priority ); } -void _Scheduler_strong_APA_Node_initialize( - const Scheduler_Control *scheduler, - Scheduler_Node *node, - Thread_Control *the_thread, - Priority_Control priority +/** + * @brief Moves a node from ready to scheduled state. + * + * Calls the corresponding SMP function \a _Scheduler_SMP_Insert_scheduled() + * with append insert_priority + * + * @param context The scheduler context. + * @param ready_to_scheduled Node which moves from ready state. + */ +void _Scheduler_strong_APA_Move_from_ready_to_scheduled( + Scheduler_Context *context, + Scheduler_Node *ready_to_scheduled ) { - Scheduler_Context *context; - Scheduler_strong_APA_Context *self; - Scheduler_strong_APA_Node *the_node; - - the_node = _Scheduler_strong_APA_Node_downcast( node ); - _Scheduler_SMP_Node_initialize( - scheduler, - &the_node->Base, - the_thread, - priority - ); + Priority_Control insert_priority; - context = _Scheduler_Get_context( scheduler ); - self = _Scheduler_strong_APA_Get_self( context ); - _Scheduler_priority_Ready_queue_update( - &the_node->Ready_queue, - SCHEDULER_PRIORITY_UNMAP( priority ), - &self->Bit_map, - &self->Ready[ 0 ] + _Scheduler_strong_APA_Extract_from_ready( context, ready_to_scheduled ); + insert_priority = _Scheduler_SMP_Node_priority( ready_to_scheduled ); + insert_priority = SCHEDULER_PRIORITY_APPEND( insert_priority ); + _Scheduler_SMP_Insert_scheduled( + context, + ready_to_scheduled, + insert_priority ); + //Note: The node still stays in the allNodes chain } -static bool _Scheduler_strong_APA_Has_ready( Scheduler_Context *context ) +/** + * @brief Inserts a node in the ready queue. + * + * Adds it into the /a allNodes chain. + * + * @param context The scheduler context. + * @param node_base Node which is inserted into the queue. + * @param insert_priority priority at which the node is inserted + */ + +void _Scheduler_strong_APA_Insert_ready( + Scheduler_Context *context, + Scheduler_Node *node_base, + Priority_Control insert_priority +) { - Scheduler_strong_APA_Context *self = - _Scheduler_strong_APA_Get_self( context ); + Scheduler_strong_APA_Context *self; + Scheduler_strong_APA_Node *node; - return !_Priority_bit_map_Is_empty( &self->Bit_map ); + self = _Scheduler_strong_APA_Get_self( context ); + node = _Scheduler_strong_APA_Node_downcast( node_base ); + + _Assert( _Chain_Is_node_off_chain( &node->Node ) == true ); + + _Chain_Append_unprotected( &self->allNodes, &node->Node ); } -static Scheduler_Node *_Scheduler_strong_APA_Get_highest_ready( +/** + * @brief Allocates a processor for the node. + * + * Calls _Scheduler_SMP_Allocate_processor_exact() + * + * @param context The scheduler context. + * @param scheduled_base Node which is to be allocated the @victim_cpu. + * @param victim_base Node which was executing earlier on @victim_cpu + * @victim_cpu CPU on which the @scheduled_base would be allocated on + */ +void _Scheduler_strong_APA_Allocate_processor( Scheduler_Context *context, - Scheduler_Node *node + Scheduler_Node *scheduled_base, + Scheduler_Node *victim_base, + Per_CPU_Control *victim_cpu ) { - Scheduler_strong_APA_Context *self = - _Scheduler_strong_APA_Get_self( context ); - - (void) node; + Scheduler_strong_APA_Node *scheduled; + + (void) victim_base; + scheduled = _Scheduler_strong_APA_Node_downcast( scheduled_base ); - return (Scheduler_Node *) _Scheduler_priority_Ready_queue_first( - &self->Bit_map, - &self->Ready[ 0 ] + _Scheduler_SMP_Allocate_processor_exact( + context, + &(scheduled->Base.Base), + NULL, + victim_cpu ); } +/** + * @brief Blocks a node + * + * Changes the state of the node and extracts it from the queue + * calls _Scheduler_SMP_Block(). + * + * @param context The scheduler control instance. + * @param thread Thread correspoding to the @node. + * @param node node which is to be blocked + */ void _Scheduler_strong_APA_Block( const Scheduler_Control *scheduler, - Thread_Control *the_thread, + Thread_Control *thread, Scheduler_Node *node ) { Scheduler_Context *context = _Scheduler_Get_context( scheduler ); - +//The extract from ready automatically removes the node from allNodes chain. _Scheduler_SMP_Block( context, - the_thread, + thread, node, - _Scheduler_SMP_Extract_from_scheduled, + _Scheduler_strong_APA_Extract_from_scheduled, _Scheduler_strong_APA_Extract_from_ready, _Scheduler_strong_APA_Get_highest_ready, _Scheduler_strong_APA_Move_from_ready_to_scheduled, - _Scheduler_SMP_Allocate_processor_exact + _Scheduler_strong_APA_Allocate_processor ); } -static bool _Scheduler_strong_APA_Enqueue( +/** + * @brief Enqueues a node + * + * + * @param context The scheduler context. + * @param node node which is to be enqueued + * @param insert_priority priority at which the node should be enqueued. + */ +bool _Scheduler_strong_APA_Enqueue( Scheduler_Context *context, Scheduler_Node *node, Priority_Control insert_priority ) -{ +{//I'm hoping all this works on its own. return _Scheduler_SMP_Enqueue( context, node, @@ -256,17 +586,25 @@ static bool _Scheduler_strong_APA_Enqueue( _Scheduler_strong_APA_Insert_ready, _Scheduler_SMP_Insert_scheduled, _Scheduler_strong_APA_Move_from_scheduled_to_ready, - _Scheduler_SMP_Get_lowest_scheduled, - _Scheduler_SMP_Allocate_processor_exact + _Scheduler_strong_APA_Get_lowest_scheduled, + _Scheduler_strong_APA_Allocate_processor ); } -static bool _Scheduler_strong_APA_Enqueue_scheduled( +/** + * @brief Enqueues a node in the scheduled queue + * + * + * @param context The scheduler context. + * @param node node which is to be enqueued + * @param insert_priority priority at which the node should be enqueued. + */ +bool _Scheduler_strong_APA_Enqueue_scheduled( Scheduler_Context *context, Scheduler_Node *node, - Priority_Control insert_priority + Priority_Control insert_priority ) -{ +{ return _Scheduler_SMP_Enqueue_scheduled( context, node, @@ -277,13 +615,24 @@ static bool _Scheduler_strong_APA_Enqueue_scheduled( _Scheduler_strong_APA_Insert_ready, _Scheduler_SMP_Insert_scheduled, _Scheduler_strong_APA_Move_from_ready_to_scheduled, - _Scheduler_SMP_Allocate_processor_exact + _Scheduler_strong_APA_Allocate_processor ); } +/** + * @brief Unblocks a node + * + * Changes the state of the node and calls _Scheduler_SMP_Unblock(). + * + * @param scheduler The scheduler control instance. + * @param thread Thread correspoding to the @node. + * @param node node which is to be unblocked + * @see _Scheduler_strong_APA_Enqueue() + * @see _Scheduler_strong_APA_Do_update() + */ void _Scheduler_strong_APA_Unblock( const Scheduler_Control *scheduler, - Thread_Control *the_thread, + Thread_Control *thread, Scheduler_Node *node ) { @@ -291,19 +640,29 @@ void _Scheduler_strong_APA_Unblock( _Scheduler_SMP_Unblock( context, - the_thread, + thread, node, _Scheduler_strong_APA_Do_update, _Scheduler_strong_APA_Enqueue ); } -static bool _Scheduler_strong_APA_Do_ask_for_help( - Scheduler_Context *context, - Thread_Control *the_thread, - Scheduler_Node *node +/** + * @brief Calls the smp Ask_for_help + * + * + * @param scheduler The scheduler control instance. + * @param thread Thread correspoding to the @node that asks for help. + * @param node node associated with @thread + */ +bool _Scheduler_strong_APA_Ask_for_help( + const Scheduler_Control *scheduler, + Thread_Control *the_thread, + Scheduler_Node *node ) { + Scheduler_Context *context = _Scheduler_Get_context( scheduler ); + return _Scheduler_SMP_Ask_for_help( context, the_thread, @@ -312,14 +671,21 @@ static bool _Scheduler_strong_APA_Do_ask_for_help( _Scheduler_strong_APA_Insert_ready, _Scheduler_SMP_Insert_scheduled, _Scheduler_strong_APA_Move_from_scheduled_to_ready, - _Scheduler_SMP_Get_lowest_scheduled, - _Scheduler_SMP_Allocate_processor_lazy + _Scheduler_strong_APA_Get_lowest_scheduled, + _Scheduler_strong_APA_Allocate_processor ); } +/** + * @brief Updates the priority of the node + * + * @param scheduler The scheduler control instance. + * @param thread Thread correspoding to the @node. + * @param node Node whose priority has to be updated + */ void _Scheduler_strong_APA_Update_priority( const Scheduler_Control *scheduler, - Thread_Control *the_thread, + Thread_Control *thread, Scheduler_Node *node ) { @@ -327,27 +693,23 @@ void _Scheduler_strong_APA_Update_priority( _Scheduler_SMP_Update_priority( context, - the_thread, + thread, node, _Scheduler_strong_APA_Extract_from_ready, _Scheduler_strong_APA_Do_update, _Scheduler_strong_APA_Enqueue, _Scheduler_strong_APA_Enqueue_scheduled, - _Scheduler_strong_APA_Do_ask_for_help + _Scheduler_strong_APA_Ask_for_help ); } - -bool _Scheduler_strong_APA_Ask_for_help( - const Scheduler_Control *scheduler, - Thread_Control *the_thread, - Scheduler_Node *node -) -{ - Scheduler_Context *context = _Scheduler_Get_context( scheduler ); - - return _Scheduler_strong_APA_Do_ask_for_help( context, the_thread, node ); -} - +/** + * @brief To Reconsider the help request + * + * @param scheduler The scheduler control instance. + * @param thread Thread correspoding to the @node. + * @param node Node corresponding to @thread which asks for + * reconsideration + */ void _Scheduler_strong_APA_Reconsider_help_request( const Scheduler_Control *scheduler, Thread_Control *the_thread, @@ -364,6 +726,14 @@ void _Scheduler_strong_APA_Reconsider_help_request( ); } +/** + * @brief Withdraws a node + * + * @param scheduler The scheduler control instance. + * @param thread Thread correspoding to the @node. + * @param node Node that has to be withdrawn + * @param next_state the state that the node goes to + */ void _Scheduler_strong_APA_Withdraw_node( const Scheduler_Control *scheduler, Thread_Control *the_thread, @@ -381,10 +751,38 @@ void _Scheduler_strong_APA_Withdraw_node( _Scheduler_strong_APA_Extract_from_ready, _Scheduler_strong_APA_Get_highest_ready, _Scheduler_strong_APA_Move_from_ready_to_scheduled, - _Scheduler_SMP_Allocate_processor_lazy + _Scheduler_strong_APA_Allocate_processor ); } +/** + * @brief To register an idle thread on a cpu + * + * For compatibility with SMP functions, + * does nothing in our implementation + */ +void _Scheduler_strong_APA_Register_idle( + Scheduler_Context *context, + Scheduler_Node *idle_base, + Per_CPU_Control *cpu +) +{ + (void) context; + (void) idle_base; + (void) cpu; + //We do not maintain a variable to access the scheduled + //node for a CPU. So this function does nothing. +} + +/** + * @brief Adds a processor to the scheduler instance + * + * and allocates an idle thread to the processor. + * + * @param scheduler The scheduler control instance. + * @param idle Idle thread to be allocated to the processor + * + */ void _Scheduler_strong_APA_Add_processor( const Scheduler_Control *scheduler, Thread_Control *idle @@ -397,10 +795,18 @@ void _Scheduler_strong_APA_Add_processor( idle, _Scheduler_strong_APA_Has_ready, _Scheduler_strong_APA_Enqueue_scheduled, - _Scheduler_SMP_Do_nothing_register_idle + _Scheduler_strong_APA_Register_idle ); } + +/** + * @brief Removes a processor from the scheduler instance + * + * @param scheduler The scheduler control instance. + * @param cpu processor that is removed + * + */ Thread_Control *_Scheduler_strong_APA_Remove_processor( const Scheduler_Control *scheduler, Per_CPU_Control *cpu @@ -416,9 +822,18 @@ Thread_Control *_Scheduler_strong_APA_Remove_processor( ); } + +/** + * @brief Called when a node yields the processor + * + * @param scheduler The scheduler control instance. + * @param thread Thread corresponding to @node + * @param node Node that yield the processor + * + */ void _Scheduler_strong_APA_Yield( const Scheduler_Control *scheduler, - Thread_Control *the_thread, + Thread_Control *thread, Scheduler_Node *node ) { @@ -426,10 +841,179 @@ void _Scheduler_strong_APA_Yield( _Scheduler_SMP_Yield( context, - the_thread, + thread, node, _Scheduler_strong_APA_Extract_from_ready, _Scheduler_strong_APA_Enqueue, _Scheduler_strong_APA_Enqueue_scheduled ); } + +/** + * @brief Called by _Scheduler_strong_APA_Set_affinity() + * + */ +void _Scheduler_strong_APA_Do_set_affinity( + Scheduler_Context *context, + Scheduler_Node *node_base, + void *arg +) +{ + Scheduler_strong_APA_Node *node; + const Processor_mask *affinity; + + node = _Scheduler_strong_APA_Node_downcast( node_base ); + affinity = arg; + node->affinity = *affinity; +} + +/** + * @brief Starts an idle thread on a CPU + * + * @param scheduler The scheduler control instance. + * @param idle Idle Thread + * @param cpu processor that gets the idle thread + * + */ +void _Scheduler_strong_APA_Start_idle( + const Scheduler_Control *scheduler, + Thread_Control *idle, + Per_CPU_Control *cpu +) +{ + Scheduler_Context *context; + + context = _Scheduler_Get_context( scheduler ); + + _Scheduler_SMP_Do_start_idle( + context, + idle, + cpu, + _Scheduler_strong_APA_Register_idle + ); +} + +/** + * @brief Pins a node to a cpu + * + * @param scheduler The scheduler control instance. + * @param thread Thread corresponding to @node + * @param node_base node which gets pinned + * @param cpu processor that the node gets pinned to + * + */ +void _Scheduler_strong_APA_Pin( + const Scheduler_Control *scheduler, + Thread_Control *thread, + Scheduler_Node *node_base, + struct Per_CPU_Control *cpu +) +{ + Scheduler_strong_APA_Node *node; + uint32_t pin_cpu; + + (void) scheduler; + node = _Scheduler_strong_APA_Node_downcast( node_base ); + pin_cpu = (uint32_t) _Per_CPU_Get_index( cpu ); + + _Assert( + _Scheduler_SMP_Node_state( &node->Base.Base ) == SCHEDULER_SMP_NODE_BLOCKED + ); + + node = _Scheduler_strong_APA_Node_downcast( node_base ); + + _Processor_mask_Zero( &node->affinity ); + _Processor_mask_Set( &node->affinity, pin_cpu ); +} + +/** + * @brief Unpins a node + * + * and sets it affinity back to normal. + * + * @param scheduler The scheduler control instance. + * @param thread Thread corresponding to @node + * @param node_base node which gets unpinned + * @param cpu processor that the node gets unpinned from : UNUSED + * + */ +void _Scheduler_strong_APA_Unpin( + const Scheduler_Control *scheduler, + Thread_Control *thread, + Scheduler_Node *node_base, + struct Per_CPU_Control *cpu +) +{ + Scheduler_strong_APA_Node *node; + + (void) scheduler; + (void) cpu; + node = _Scheduler_strong_APA_Node_downcast( node_base ); + + _Assert( + _Scheduler_SMP_Node_state( &node->Base.Base ) == SCHEDULER_SMP_NODE_BLOCKED + ); + + _Processor_mask_Zero( &node->affinity ); + _Processor_mask_Assign( &node->affinity, &node->unpin_affinity ); +} + +/** + * @brief Checks if the processor set of the scheduler is the subset of the affinity set. + * + * Default implementation of the set affinity scheduler operation. + * + * @param scheduler This parameter is unused. + * @param thread This parameter is unused. + * @param node This parameter is unused. + * @param affinity The new processor affinity set for the thread. + * + * @see _Scheduler_strong_APA_Do_set_affinity() + * + * @retval true The processor set of the scheduler is a subset of the affinity set. + * @retval false The processor set of the scheduler is not a subset of the affinity set. + */ +bool _Scheduler_strong_APA_Set_affinity( + const Scheduler_Control *scheduler, + Thread_Control *thread, + Scheduler_Node *node_base, + const Processor_mask *affinity +) +{ + Scheduler_Context *context; + Scheduler_strong_APA_Node *node; + Processor_mask local_affinity; + + context = _Scheduler_Get_context( scheduler ); + _Processor_mask_And( &local_affinity, &context->Processors, affinity ); + + if ( _Processor_mask_Is_zero( &local_affinity ) ) { + return false; + } + + node = _Scheduler_strong_APA_Node_downcast( node_base ); + + if ( _Processor_mask_Is_equal( &node->affinity, affinity ) ) + return true; //Nothing to do. Return true. + + _Processor_mask_Zero( &node->affinity ); + _Processor_mask_Zero( &node->unpin_affinity ); + + _Processor_mask_Assign( &node->affinity, &local_affinity ); + _Processor_mask_Assign( &node->unpin_affinity, &local_affinity ); + + _Scheduler_SMP_Set_affinity( + context, + thread, + node_base, + &local_affinity, + _Scheduler_strong_APA_Do_set_affinity, + _Scheduler_strong_APA_Extract_from_ready, + _Scheduler_strong_APA_Get_highest_ready, + _Scheduler_strong_APA_Move_from_ready_to_scheduled, + _Scheduler_strong_APA_Enqueue, + _Scheduler_strong_APA_Allocate_processor + ); + + return true; +} From 8731311060a877a860fe6a64820fbb354f20dfc0 Mon Sep 17 00:00:00 2001 From: richidubey Date: Mon, 27 Jul 2020 14:00:23 +0530 Subject: [PATCH 02/29] Removed all compilation errors --- .../include/rtems/score/schedulerstrongapa.h | 14 +- cpukit/score/src/schedulerstrongapa.h | 310 ++++++++++++++++++ 2 files changed, 311 insertions(+), 13 deletions(-) create mode 100644 cpukit/score/src/schedulerstrongapa.h diff --git a/cpukit/include/rtems/score/schedulerstrongapa.h b/cpukit/include/rtems/score/schedulerstrongapa.h index 4ddae3235a4..eaef8873df8 100644 --- a/cpukit/include/rtems/score/schedulerstrongapa.h +++ b/cpukit/include/rtems/score/schedulerstrongapa.h @@ -6,25 +6,13 @@ * @brief Strong APA Scheduler API */ -/* - * Copyright (c) 2013, 2018 embedded brains GmbH. All rights reserved. - * - * embedded brains GmbH - * Dornierstr. 4 - * 82178 Puchheim - * Germany - * - * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. - */ #ifndef _RTEMS_SCORE_SCHEDULERSTRONGAPA_H #define _RTEMS_SCORE_SCHEDULERSTRONGAPA_H #include #include +#include #ifdef __cplusplus extern "C" { diff --git a/cpukit/score/src/schedulerstrongapa.h b/cpukit/score/src/schedulerstrongapa.h new file mode 100644 index 00000000000..eaef8873df8 --- /dev/null +++ b/cpukit/score/src/schedulerstrongapa.h @@ -0,0 +1,310 @@ +/** + * @file + * + * @ingroup RTEMSScoreSchedulerStrongAPA + * + * @brief Strong APA Scheduler API + */ + + +#ifndef _RTEMS_SCORE_SCHEDULERSTRONGAPA_H +#define _RTEMS_SCORE_SCHEDULERSTRONGAPA_H + +#include +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +/** + * @defgroup RTEMSScoreSchedulerStrongAPA Strong APA Scheduler + * + * @ingroup RTEMSScoreSchedulerSMP + * + * @brief Strong APA Scheduler + * + * This is an implementation of the Strong APA scheduler defined by + * Bradenbug et. al in Linux's Processor Affinity API, Refined: Shifting Real-Time Tasks Towards Higher Schedulability. + * + * @{ + */ + + /** + * @brief Scheduler context for Strong APA + * scheduler. + * + * Has the structure for scheduler context + * and Node defintion for Strong APA scheduler + */ +typedef struct { + /** + * @brief SMP Context to refer to SMP implementation + * code. + */ + Scheduler_SMP_Context Base; + + /** + * @brief Chain of all the nodes present in + * the system. Accounts for ready and scheduled nodes. + */ + Chain_Control allNodes; + +} Scheduler_strong_APA_Context; + +/** + * @brief Scheduler node specialization for Strong APA + * schedulers. + */ +typedef struct { + /** + * @brief Chain node for + * Scheduler_strong_APA_Context::allNodes + * + */ + Chain_Node Node; + + /** + * @brief SMP scheduler node. + */ + Scheduler_SMP_Node Base; + + /** + * @brief The associated affinity set of this node. + */ + Processor_mask affinity; + + /** + * @brief The associated affinity set of this node + * to be used while unpinning the node. + */ + Processor_mask unpin_affinity; + +} Scheduler_strong_APA_Node; + +/** + * @brief CPU structure to be used + * while traversing in the FIFO Queue + */ +typedef struct CPU +{ + /** + * @brief Chain node for + * _Scheduler_strong_APA_Get_highest_ready::Queue + * and _Scheduler_strong_APA_Get_lowest_scheduled::Queue + */ + Chain_Node node; + + /** + * @brief cpu associated with the node + */ + Per_CPU_Control cpu; + +}CPU; + +/** + * @brief Entry points for the Strong APA Scheduler. + */ +#define SCHEDULER_STRONG_APA_ENTRY_POINTS \ + { \ + _Scheduler_strong_APA_Initialize, \ + _Scheduler_default_Schedule, \ + _Scheduler_strong_APA_Yield, \ + _Scheduler_strong_APA_Block, \ + _Scheduler_strong_APA_Unblock, \ + _Scheduler_strong_APA_Update_priority, \ + _Scheduler_default_Map_priority, \ + _Scheduler_default_Unmap_priority, \ + _Scheduler_strong_APA_Ask_for_help, \ + _Scheduler_strong_APA_Reconsider_help_request, \ + _Scheduler_strong_APA_Withdraw_node, \ + _Scheduler_strong_APA_Pin, \ + _Scheduler_strong_APA_Unpin, \ + _Scheduler_strong_APA_Add_processor, \ + _Scheduler_strong_APA_Remove_processor, \ + _Scheduler_strong_APA_Node_initialize, \ + _Scheduler_default_Node_destroy, \ + _Scheduler_default_Release_job, \ + _Scheduler_default_Cancel_job, \ + _Scheduler_default_Tick, \ + _Scheduler_strong_APA_Start_idle \ + _Scheduler_strong_APA_Set_affinity \ + } + +void _Scheduler_strong_APA_Initialize( + const Scheduler_Control *scheduler + ); + + +void _Scheduler_strong_APA_Node_initialize( + const Scheduler_Control *scheduler, + Scheduler_Node *node, + Thread_Control *the_thread, + Priority_Control priority +); + +void _Scheduler_strong_APA_Do_update( + Scheduler_Context *context, + Scheduler_Node *node, + Priority_Control new_priority +); + +bool _Scheduler_strong_APA_Has_ready( + Scheduler_Context *context + ); + +Scheduler_Node *_Scheduler_strong_APA_Get_highest_ready( + Scheduler_Context *context, + Scheduler_Node *filter +); + +Scheduler_Node *_Scheduler_strong_APA_Get_lowest_scheduled( + Scheduler_Context *context, + Scheduler_Node *filter_base +); + +void _Scheduler_strong_APA_Extract_from_scheduled( + Scheduler_Context *context, + Scheduler_Node *node_to_extract +); + +void _Scheduler_strong_APA_Extract_from_ready( + Scheduler_Context *context, + Scheduler_Node *node_to_extract +); + +void _Scheduler_strong_APA_Move_from_scheduled_to_ready( + Scheduler_Context *context, + Scheduler_Node *scheduled_to_ready +); + +void _Scheduler_strong_APA_Move_from_ready_to_scheduled( + Scheduler_Context *context, + Scheduler_Node *ready_to_scheduled +); + +void _Scheduler_strong_APA_Insert_ready( + Scheduler_Context *context, + Scheduler_Node *node_base, + Priority_Control insert_priority +); + +void _Scheduler_strong_APA_Allocate_processor( + Scheduler_Context *context, + Scheduler_Node *scheduled_base, + Scheduler_Node *victim_base, + Per_CPU_Control *victim_cpu +); + +void _Scheduler_strong_APA_Block( + const Scheduler_Control *scheduler, + Thread_Control *thread, + Scheduler_Node *node +); + +bool _Scheduler_strong_APA_Enqueue( + Scheduler_Context *context, + Scheduler_Node *node, + Priority_Control insert_priority +); + +bool _Scheduler_strong_APA_Enqueue_scheduled( + Scheduler_Context *context, + Scheduler_Node *node, + Priority_Control insert_priority +); + +void _Scheduler_strong_APA_Unblock( + const Scheduler_Control *scheduler, + Thread_Control *thread, + Scheduler_Node *node +); + +bool _Scheduler_strong_APA_Ask_for_help( + const Scheduler_Control *scheduler, + Thread_Control *the_thread, + Scheduler_Node *node +); + +void _Scheduler_strong_APA_Update_priority( + const Scheduler_Control *scheduler, + Thread_Control *thread, + Scheduler_Node *node +); + +void _Scheduler_strong_APA_Reconsider_help_request( + const Scheduler_Control *scheduler, + Thread_Control *the_thread, + Scheduler_Node *node +); + +void _Scheduler_strong_APA_Withdraw_node( + const Scheduler_Control *scheduler, + Thread_Control *the_thread, + Scheduler_Node *node, + Thread_Scheduler_state next_state +); + +void _Scheduler_strong_APA_Register_idle( + Scheduler_Context *context, + Scheduler_Node *idle_base, + Per_CPU_Control *cpu +); + +void _Scheduler_strong_APA_Add_processor( + const Scheduler_Control *scheduler, + Thread_Control *idle +); + +Thread_Control *_Scheduler_strong_APA_Remove_processor( + const Scheduler_Control *scheduler, + Per_CPU_Control *cpu +); + +void _Scheduler_strong_APA_Yield( + const Scheduler_Control *scheduler, + Thread_Control *thread, + Scheduler_Node *node +); + +void _Scheduler_strong_APA_Do_set_affinity( + Scheduler_Context *context, + Scheduler_Node *node_base, + void *arg +); + +void _Scheduler_strong_APA_Start_idle( + const Scheduler_Control *scheduler, + Thread_Control *idle, + Per_CPU_Control *cpu +); + +void _Scheduler_strong_APA_Pin( + const Scheduler_Control *scheduler, + Thread_Control *thread, + Scheduler_Node *node_base, + struct Per_CPU_Control *cpu +); + +void _Scheduler_strong_APA_Unpin( + const Scheduler_Control *scheduler, + Thread_Control *thread, + Scheduler_Node *node_base, + struct Per_CPU_Control *cpu +); + +bool _Scheduler_strong_APA_Set_affinity( + const Scheduler_Control *scheduler, + Thread_Control *thread, + Scheduler_Node *node_base, + const Processor_mask *affinity +); + +/** @} */ + +#ifdef __cplusplus +} +#endif /* __cplusplus */ + +#endif /* _RTEMS_SCORE_SCHEDULERSTRONGAPA_H */ From 3a9843f61bbd547d150d2ee2d791668cfb5aa282 Mon Sep 17 00:00:00 2001 From: richidubey Date: Mon, 27 Jul 2020 14:01:46 +0530 Subject: [PATCH 03/29] Removed file from wrong place --- cpukit/score/src/schedulerstrongapa.h | 310 -------------------------- 1 file changed, 310 deletions(-) delete mode 100644 cpukit/score/src/schedulerstrongapa.h diff --git a/cpukit/score/src/schedulerstrongapa.h b/cpukit/score/src/schedulerstrongapa.h deleted file mode 100644 index eaef8873df8..00000000000 --- a/cpukit/score/src/schedulerstrongapa.h +++ /dev/null @@ -1,310 +0,0 @@ -/** - * @file - * - * @ingroup RTEMSScoreSchedulerStrongAPA - * - * @brief Strong APA Scheduler API - */ - - -#ifndef _RTEMS_SCORE_SCHEDULERSTRONGAPA_H -#define _RTEMS_SCORE_SCHEDULERSTRONGAPA_H - -#include -#include -#include - -#ifdef __cplusplus -extern "C" { -#endif /* __cplusplus */ - -/** - * @defgroup RTEMSScoreSchedulerStrongAPA Strong APA Scheduler - * - * @ingroup RTEMSScoreSchedulerSMP - * - * @brief Strong APA Scheduler - * - * This is an implementation of the Strong APA scheduler defined by - * Bradenbug et. al in Linux's Processor Affinity API, Refined: Shifting Real-Time Tasks Towards Higher Schedulability. - * - * @{ - */ - - /** - * @brief Scheduler context for Strong APA - * scheduler. - * - * Has the structure for scheduler context - * and Node defintion for Strong APA scheduler - */ -typedef struct { - /** - * @brief SMP Context to refer to SMP implementation - * code. - */ - Scheduler_SMP_Context Base; - - /** - * @brief Chain of all the nodes present in - * the system. Accounts for ready and scheduled nodes. - */ - Chain_Control allNodes; - -} Scheduler_strong_APA_Context; - -/** - * @brief Scheduler node specialization for Strong APA - * schedulers. - */ -typedef struct { - /** - * @brief Chain node for - * Scheduler_strong_APA_Context::allNodes - * - */ - Chain_Node Node; - - /** - * @brief SMP scheduler node. - */ - Scheduler_SMP_Node Base; - - /** - * @brief The associated affinity set of this node. - */ - Processor_mask affinity; - - /** - * @brief The associated affinity set of this node - * to be used while unpinning the node. - */ - Processor_mask unpin_affinity; - -} Scheduler_strong_APA_Node; - -/** - * @brief CPU structure to be used - * while traversing in the FIFO Queue - */ -typedef struct CPU -{ - /** - * @brief Chain node for - * _Scheduler_strong_APA_Get_highest_ready::Queue - * and _Scheduler_strong_APA_Get_lowest_scheduled::Queue - */ - Chain_Node node; - - /** - * @brief cpu associated with the node - */ - Per_CPU_Control cpu; - -}CPU; - -/** - * @brief Entry points for the Strong APA Scheduler. - */ -#define SCHEDULER_STRONG_APA_ENTRY_POINTS \ - { \ - _Scheduler_strong_APA_Initialize, \ - _Scheduler_default_Schedule, \ - _Scheduler_strong_APA_Yield, \ - _Scheduler_strong_APA_Block, \ - _Scheduler_strong_APA_Unblock, \ - _Scheduler_strong_APA_Update_priority, \ - _Scheduler_default_Map_priority, \ - _Scheduler_default_Unmap_priority, \ - _Scheduler_strong_APA_Ask_for_help, \ - _Scheduler_strong_APA_Reconsider_help_request, \ - _Scheduler_strong_APA_Withdraw_node, \ - _Scheduler_strong_APA_Pin, \ - _Scheduler_strong_APA_Unpin, \ - _Scheduler_strong_APA_Add_processor, \ - _Scheduler_strong_APA_Remove_processor, \ - _Scheduler_strong_APA_Node_initialize, \ - _Scheduler_default_Node_destroy, \ - _Scheduler_default_Release_job, \ - _Scheduler_default_Cancel_job, \ - _Scheduler_default_Tick, \ - _Scheduler_strong_APA_Start_idle \ - _Scheduler_strong_APA_Set_affinity \ - } - -void _Scheduler_strong_APA_Initialize( - const Scheduler_Control *scheduler - ); - - -void _Scheduler_strong_APA_Node_initialize( - const Scheduler_Control *scheduler, - Scheduler_Node *node, - Thread_Control *the_thread, - Priority_Control priority -); - -void _Scheduler_strong_APA_Do_update( - Scheduler_Context *context, - Scheduler_Node *node, - Priority_Control new_priority -); - -bool _Scheduler_strong_APA_Has_ready( - Scheduler_Context *context - ); - -Scheduler_Node *_Scheduler_strong_APA_Get_highest_ready( - Scheduler_Context *context, - Scheduler_Node *filter -); - -Scheduler_Node *_Scheduler_strong_APA_Get_lowest_scheduled( - Scheduler_Context *context, - Scheduler_Node *filter_base -); - -void _Scheduler_strong_APA_Extract_from_scheduled( - Scheduler_Context *context, - Scheduler_Node *node_to_extract -); - -void _Scheduler_strong_APA_Extract_from_ready( - Scheduler_Context *context, - Scheduler_Node *node_to_extract -); - -void _Scheduler_strong_APA_Move_from_scheduled_to_ready( - Scheduler_Context *context, - Scheduler_Node *scheduled_to_ready -); - -void _Scheduler_strong_APA_Move_from_ready_to_scheduled( - Scheduler_Context *context, - Scheduler_Node *ready_to_scheduled -); - -void _Scheduler_strong_APA_Insert_ready( - Scheduler_Context *context, - Scheduler_Node *node_base, - Priority_Control insert_priority -); - -void _Scheduler_strong_APA_Allocate_processor( - Scheduler_Context *context, - Scheduler_Node *scheduled_base, - Scheduler_Node *victim_base, - Per_CPU_Control *victim_cpu -); - -void _Scheduler_strong_APA_Block( - const Scheduler_Control *scheduler, - Thread_Control *thread, - Scheduler_Node *node -); - -bool _Scheduler_strong_APA_Enqueue( - Scheduler_Context *context, - Scheduler_Node *node, - Priority_Control insert_priority -); - -bool _Scheduler_strong_APA_Enqueue_scheduled( - Scheduler_Context *context, - Scheduler_Node *node, - Priority_Control insert_priority -); - -void _Scheduler_strong_APA_Unblock( - const Scheduler_Control *scheduler, - Thread_Control *thread, - Scheduler_Node *node -); - -bool _Scheduler_strong_APA_Ask_for_help( - const Scheduler_Control *scheduler, - Thread_Control *the_thread, - Scheduler_Node *node -); - -void _Scheduler_strong_APA_Update_priority( - const Scheduler_Control *scheduler, - Thread_Control *thread, - Scheduler_Node *node -); - -void _Scheduler_strong_APA_Reconsider_help_request( - const Scheduler_Control *scheduler, - Thread_Control *the_thread, - Scheduler_Node *node -); - -void _Scheduler_strong_APA_Withdraw_node( - const Scheduler_Control *scheduler, - Thread_Control *the_thread, - Scheduler_Node *node, - Thread_Scheduler_state next_state -); - -void _Scheduler_strong_APA_Register_idle( - Scheduler_Context *context, - Scheduler_Node *idle_base, - Per_CPU_Control *cpu -); - -void _Scheduler_strong_APA_Add_processor( - const Scheduler_Control *scheduler, - Thread_Control *idle -); - -Thread_Control *_Scheduler_strong_APA_Remove_processor( - const Scheduler_Control *scheduler, - Per_CPU_Control *cpu -); - -void _Scheduler_strong_APA_Yield( - const Scheduler_Control *scheduler, - Thread_Control *thread, - Scheduler_Node *node -); - -void _Scheduler_strong_APA_Do_set_affinity( - Scheduler_Context *context, - Scheduler_Node *node_base, - void *arg -); - -void _Scheduler_strong_APA_Start_idle( - const Scheduler_Control *scheduler, - Thread_Control *idle, - Per_CPU_Control *cpu -); - -void _Scheduler_strong_APA_Pin( - const Scheduler_Control *scheduler, - Thread_Control *thread, - Scheduler_Node *node_base, - struct Per_CPU_Control *cpu -); - -void _Scheduler_strong_APA_Unpin( - const Scheduler_Control *scheduler, - Thread_Control *thread, - Scheduler_Node *node_base, - struct Per_CPU_Control *cpu -); - -bool _Scheduler_strong_APA_Set_affinity( - const Scheduler_Control *scheduler, - Thread_Control *thread, - Scheduler_Node *node_base, - const Processor_mask *affinity -); - -/** @} */ - -#ifdef __cplusplus -} -#endif /* __cplusplus */ - -#endif /* _RTEMS_SCORE_SCHEDULERSTRONGAPA_H */ From af96cca61c755e0470497e2401a7cd0a1a9b1821 Mon Sep 17 00:00:00 2001 From: richidubey Date: Mon, 27 Jul 2020 17:52:21 +0530 Subject: [PATCH 04/29] Corrected Assert Statements --- cpukit/score/src/schedulerstrongapa.c | 84 ++++++++++++++------------- 1 file changed, 43 insertions(+), 41 deletions(-) diff --git a/cpukit/score/src/schedulerstrongapa.c b/cpukit/score/src/schedulerstrongapa.c index c26c2488566..7c5077e9729 100644 --- a/cpukit/score/src/schedulerstrongapa.c +++ b/cpukit/score/src/schedulerstrongapa.c @@ -5,20 +5,6 @@ * * @brief Strong APA Scheduler Implementation */ - -/* - * Copyright (c) 2013, 2016 embedded brains GmbH. All rights reserved. - * - * embedded brains GmbH - * Dornierstr. 4 - * 82178 Puchheim - * Germany - * - * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. - */ #ifdef HAVE_CONFIG_H #include "config.h" @@ -26,6 +12,8 @@ #include #include +#include +#include static inline Scheduler_strong_APA_Context * _Scheduler_strong_APA_Get_context( const Scheduler_Control *scheduler ) @@ -103,7 +91,6 @@ void _Scheduler_strong_APA_Do_update( ) { Scheduler_SMP_Node *smp_node; - (void) context; smp_node = _Scheduler_SMP_Node_downcast( node ); @@ -192,9 +179,8 @@ Scheduler_Node *_Scheduler_strong_APA_Get_highest_ready( _Chain_Initialize_empty(&Queue); - Qcpu = malloc( sizeof(CPU) ); - assert (Qcpu != NULL); //Should it be NULL? - Qcpu->cpu=thread_cpu; + Qcpu = rtems_malloc( sizeof(CPU) ); //Does not return any errornum on failure + Qcpu->cpu=*thread_cpu; _Chain_Initialize_node( &Qcpu->node ); _Chain_Append_unprotected( &Queue, &Qcpu->node ); //Insert thread_CPU in the Queue @@ -203,7 +189,7 @@ Scheduler_Node *_Scheduler_strong_APA_Get_highest_ready( while( !_Chain_Is_empty( &Queue) ) { Qcpu = (CPU*) _Chain_Get_first_unprotected( &Queue ); - curr_CPU = Qcpu->cpu; + curr_CPU = &Qcpu->cpu; tail = _Chain_Immutable_tail( &self->allNodes ); next = _Chain_First( &self->allNodes ); @@ -211,7 +197,7 @@ Scheduler_Node *_Scheduler_strong_APA_Get_highest_ready( Scheduler_strong_APA_Node *node; node = (Scheduler_strong_APA_Node *) next; - if( node->affinity & (1 << _Per_CPU_Get_index( curr_CPU ) ) ) { + if( _Processor_mask_Is_set( &node->affinity, _Per_CPU_Get_index( curr_CPU ) ) ) { //Checks if the thread_CPU is in the affinity set of the node if(_Scheduler_SMP_Node_state( &node->Base.Base ) @@ -220,8 +206,8 @@ Scheduler_Node *_Scheduler_strong_APA_Get_highest_ready( assigned_cpu = _Thread_Get_CPU( node->Base.Base.user ); if(visited[ _Per_CPU_Get_index( assigned_cpu ) ] == false) { - Qcpu = malloc( sizeof(CPU) ); - assert (Qcpu != NULL); //Should it be NULL? + Qcpu = rtems_malloc( sizeof(CPU) ); + //rtems_malloc does not return a errnum in case of failure Qcpu->cpu=*assigned_cpu; _Chain_Initialize_node( &Qcpu->node ); @@ -289,6 +275,10 @@ Scheduler_Node *_Scheduler_strong_APA_Get_lowest_scheduled( Scheduler_strong_APA_Node *Scurr_node; //Current Strong_APA_Node Scheduler_strong_APA_Node *filter_node; + + ret=NULL; //To remove compiler warning. + //ret would always point to the node with the lowest priority + //node unless the affinity of filter_base is NULL. filter_node = _Scheduler_strong_APA_Node_downcast( filter_base ); @@ -298,12 +288,12 @@ Scheduler_Node *_Scheduler_strong_APA_Get_lowest_scheduled( _Chain_Initialize_empty(&Queue); for ( cpu_index = 0 ; cpu_index < cpu_max ; ++cpu_index ) { - if( ( filter_node->affinity & (1<affinity , cpu_index) + && visited[ cpu_index ] == false ) ) { //Checks if the thread_CPU is in the affinity set of the node Per_CPU_Control *cpu = _Per_CPU_Get_by_index( cpu_index ); if( _Per_CPU_Is_processor_online( cpu ) ) { - Qcpu = malloc( sizeof(CPU) ); - assert (Qcpu != NULL); //Should it be NULL? + Qcpu = rtems_malloc( sizeof(CPU) ); //No errornum returned in case of failure Qcpu->cpu=*cpu; _Chain_Initialize_node( &Qcpu->node ); @@ -335,13 +325,12 @@ Scheduler_Node *_Scheduler_strong_APA_Get_lowest_scheduled( Scurr_node = _Scheduler_strong_APA_Node_downcast( curr_node ); if( !curr_thread->is_idle ) { for ( cpu_index = 0 ; cpu_index < cpu_max ; ++cpu_index ) { - if( ( Scurr_node->affinity & (1<affinity , cpu_index ) ) { //Checks if the thread_CPU is in the affinity set of the node Per_CPU_Control *cpu = _Per_CPU_Get_by_index( cpu_index ); if( _Per_CPU_Is_processor_online( cpu ) && visited[ cpu_index ] == false ) { - Qcpu = malloc( sizeof(CPU) ); - assert (Qcpu != NULL); //Should it be NULL? + Qcpu = rtems_malloc( sizeof(CPU) ); Qcpu->cpu=*cpu; _Chain_Initialize_node( &Qcpu->node ); @@ -369,7 +358,6 @@ Scheduler_Node *_Scheduler_strong_APA_Get_lowest_scheduled( //Backtrack on the path from //_Thread_Get_CPU(ret->user) to ret, shifting along every task } - return ret; } @@ -415,8 +403,8 @@ void _Scheduler_strong_APA_Extract_from_ready( self = _Scheduler_strong_APA_Get_self( context ); node = _Scheduler_strong_APA_Node_downcast( node_to_extract ); - _Assert( _Chain_Is_empty(self->allNodes) == false ); - _Assert( _Chain_Is_node_off_chain( &node->Node ) == false ); + _Assert( !_Chain_Is_empty(self->allNodes) ); + _Assert( !_Chain_Is_node_off_chain( &node->Node ) ); _Chain_Extract_unprotected( &node->Node ); //Removed from allNodes _Chain_Set_off_chain( &node->Node ); @@ -499,7 +487,7 @@ void _Scheduler_strong_APA_Insert_ready( self = _Scheduler_strong_APA_Get_self( context ); node = _Scheduler_strong_APA_Node_downcast( node_base ); - _Assert( _Chain_Is_node_off_chain( &node->Node ) == true ); + _Assert( !_Chain_Is_node_off_chain( &node->Node ) ); _Chain_Append_unprotected( &self->allNodes, &node->Node ); } @@ -647,6 +635,25 @@ void _Scheduler_strong_APA_Unblock( ); } +static inline bool _Scheduler_strong_APA_Do_ask_for_help( + Scheduler_Context *context, + Thread_Control *the_thread, + Scheduler_Node *node +) +{ + return _Scheduler_SMP_Ask_for_help( + context, + the_thread, + node, + _Scheduler_SMP_Priority_less_equal, + _Scheduler_strong_APA_Insert_ready, + _Scheduler_SMP_Insert_scheduled, + _Scheduler_strong_APA_Move_from_scheduled_to_ready, + _Scheduler_strong_APA_Get_lowest_scheduled, + _Scheduler_strong_APA_Allocate_processor + ); +} + /** * @brief Calls the smp Ask_for_help * @@ -663,19 +670,14 @@ bool _Scheduler_strong_APA_Ask_for_help( { Scheduler_Context *context = _Scheduler_Get_context( scheduler ); - return _Scheduler_SMP_Ask_for_help( + return _Scheduler_strong_APA_Do_ask_for_help( context, the_thread, - node, - _Scheduler_SMP_Priority_less_equal, - _Scheduler_strong_APA_Insert_ready, - _Scheduler_SMP_Insert_scheduled, - _Scheduler_strong_APA_Move_from_scheduled_to_ready, - _Scheduler_strong_APA_Get_lowest_scheduled, - _Scheduler_strong_APA_Allocate_processor + node ); } + /** * @brief Updates the priority of the node * @@ -699,7 +701,7 @@ void _Scheduler_strong_APA_Update_priority( _Scheduler_strong_APA_Do_update, _Scheduler_strong_APA_Enqueue, _Scheduler_strong_APA_Enqueue_scheduled, - _Scheduler_strong_APA_Ask_for_help + _Scheduler_strong_APA_Do_ask_for_help ); } /** From dfda77a4828b1e00d41ea6b7af53ddbbcffde346 Mon Sep 17 00:00:00 2001 From: richidubey Date: Tue, 28 Jul 2020 19:09:49 +0530 Subject: [PATCH 05/29] Removed all compiler errors --- cpukit/include/rtems/score/schedulerstrongapa.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cpukit/include/rtems/score/schedulerstrongapa.h b/cpukit/include/rtems/score/schedulerstrongapa.h index eaef8873df8..ef3677ba893 100644 --- a/cpukit/include/rtems/score/schedulerstrongapa.h +++ b/cpukit/include/rtems/score/schedulerstrongapa.h @@ -128,7 +128,7 @@ typedef struct CPU _Scheduler_default_Release_job, \ _Scheduler_default_Cancel_job, \ _Scheduler_default_Tick, \ - _Scheduler_strong_APA_Start_idle \ + _Scheduler_strong_APA_Start_idle, \ _Scheduler_strong_APA_Set_affinity \ } From 66455b82afe405dbd5cc0ee3453eb72958f75dac Mon Sep 17 00:00:00 2001 From: richidubey Date: Thu, 30 Jul 2020 17:23:20 +0530 Subject: [PATCH 06/29] Resolving reviews on pull request --- .../include/rtems/score/schedulerstrongapa.h | 297 +++++---- cpukit/score/src/schedulerstrongapa.c | 594 ++++++------------ 2 files changed, 348 insertions(+), 543 deletions(-) diff --git a/cpukit/include/rtems/score/schedulerstrongapa.h b/cpukit/include/rtems/score/schedulerstrongapa.h index ef3677ba893..cae588a1570 100644 --- a/cpukit/include/rtems/score/schedulerstrongapa.h +++ b/cpukit/include/rtems/score/schedulerstrongapa.h @@ -6,7 +6,23 @@ * @brief Strong APA Scheduler API */ - +/* + * Copyright (c) 2013, 2018 embedded brains GmbH, 2020 Richi Dubey. + * All rights reserved. + * + * embedded brains GmbH + * Dornierstr. 4 + * 82178 Puchheim + * Germany + * + * + * Richi Dubey: richidubey@gmail.com + * + * The license and distribution terms for this file may be + * found in the file LICENSE in this distribution or at + * http://www.rtems.org/license/LICENSE. + */ + #ifndef _RTEMS_SCORE_SCHEDULERSTRONGAPA_H #define _RTEMS_SCORE_SCHEDULERSTRONGAPA_H @@ -26,14 +42,14 @@ extern "C" { * @brief Strong APA Scheduler * * This is an implementation of the Strong APA scheduler defined by - * Bradenbug et. al in Linux's Processor Affinity API, Refined: Shifting Real-Time Tasks Towards Higher Schedulability. + * Cerqueira et al. in Linux's Processor Affinity API, Refined: + * Shifting Real-Time Tasks Towards Higher Schedulability. * * @{ */ /** - * @brief Scheduler context for Strong APA - * scheduler. + * @brief Scheduler context for Strong APA scheduler. * * Has the structure for scheduler context * and Node defintion for Strong APA scheduler @@ -49,19 +65,15 @@ typedef struct { * @brief Chain of all the nodes present in * the system. Accounts for ready and scheduled nodes. */ - Chain_Control allNodes; - + Chain_Control all_nodes; } Scheduler_strong_APA_Context; /** - * @brief Scheduler node specialization for Strong APA - * schedulers. + * @brief Scheduler node specialization for Strong APA schedulers. */ typedef struct { /** - * @brief Chain node for - * Scheduler_strong_APA_Context::allNodes - * + * @brief Chain node for Scheduler_strong_APA_Context::allNodes */ Chain_Node Node; @@ -73,21 +85,18 @@ typedef struct { /** * @brief The associated affinity set of this node. */ - Processor_mask affinity; + Processor_mask Affinity; /** - * @brief The associated affinity set of this node - * to be used while unpinning the node. + * @brief The associated affinity set of this node to be used while unpinning the node. */ - Processor_mask unpin_affinity; - + Processor_mask Unpin_affinity; } Scheduler_strong_APA_Node; /** - * @brief CPU structure to be used - * while traversing in the FIFO Queue + * @brief CPU structure to be used while traversing in the FIFO Queue */ -typedef struct CPU +typedef struct Scheduler_strong_APA_CPU { /** * @brief Chain node for @@ -100,8 +109,7 @@ typedef struct CPU * @brief cpu associated with the node */ Per_CPU_Control cpu; - -}CPU; +}Scheduler_strong_APA_CPU; /** * @brief Entry points for the Strong APA Scheduler. @@ -132,113 +140,114 @@ typedef struct CPU _Scheduler_strong_APA_Set_affinity \ } +/** + * @brief Initializes the Strong_APA scheduler. + * + * Sets the chain containing all the nodes to empty + * and initializes the SMP scheduler. + * + * @param scheduler used to get reference to Strong APA scheduler context + * @retval void + * @see _Scheduler_strong_APA_Node_initialize() + */ void _Scheduler_strong_APA_Initialize( - const Scheduler_Control *scheduler - ); + const Scheduler_Control *scheduler +); - -void _Scheduler_strong_APA_Node_initialize( +/** + * @brief Called when a node yields the processor + * + * @param scheduler The scheduler control instance. + * @param thread Thread corresponding to @node + * @param node Node that yield the processor + */ +void _Scheduler_strong_APA_Yield( const Scheduler_Control *scheduler, - Scheduler_Node *node, - Thread_Control *the_thread, - Priority_Control priority -); - -void _Scheduler_strong_APA_Do_update( - Scheduler_Context *context, - Scheduler_Node *node, - Priority_Control new_priority -); - -bool _Scheduler_strong_APA_Has_ready( - Scheduler_Context *context - ); - -Scheduler_Node *_Scheduler_strong_APA_Get_highest_ready( - Scheduler_Context *context, - Scheduler_Node *filter -); - -Scheduler_Node *_Scheduler_strong_APA_Get_lowest_scheduled( - Scheduler_Context *context, - Scheduler_Node *filter_base -); - -void _Scheduler_strong_APA_Extract_from_scheduled( - Scheduler_Context *context, - Scheduler_Node *node_to_extract -); - -void _Scheduler_strong_APA_Extract_from_ready( - Scheduler_Context *context, - Scheduler_Node *node_to_extract -); - -void _Scheduler_strong_APA_Move_from_scheduled_to_ready( - Scheduler_Context *context, - Scheduler_Node *scheduled_to_ready -); - -void _Scheduler_strong_APA_Move_from_ready_to_scheduled( - Scheduler_Context *context, - Scheduler_Node *ready_to_scheduled -); - -void _Scheduler_strong_APA_Insert_ready( - Scheduler_Context *context, - Scheduler_Node *node_base, - Priority_Control insert_priority -); - -void _Scheduler_strong_APA_Allocate_processor( - Scheduler_Context *context, - Scheduler_Node *scheduled_base, - Scheduler_Node *victim_base, - Per_CPU_Control *victim_cpu + Thread_Control *thread, + Scheduler_Node *node ); +/** + * @brief Blocks a node + * + * Changes the state of the node and extracts it from the queue + * calls _Scheduler_SMP_Block(). + * + * @param context The scheduler control instance. + * @param thread Thread correspoding to the @node. + * @param node node which is to be blocked + */ void _Scheduler_strong_APA_Block( const Scheduler_Control *scheduler, Thread_Control *thread, Scheduler_Node *node ); -bool _Scheduler_strong_APA_Enqueue( - Scheduler_Context *context, - Scheduler_Node *node, - Priority_Control insert_priority -); - -bool _Scheduler_strong_APA_Enqueue_scheduled( - Scheduler_Context *context, - Scheduler_Node *node, - Priority_Control insert_priority -); - +/** + * @brief Unblocks a node + * + * Changes the state of the node and calls _Scheduler_SMP_Unblock(). + * + * @param scheduler The scheduler control instance. + * @param thread Thread correspoding to the @node. + * @param node node which is to be unblocked + * @see _Scheduler_strong_APA_Enqueue() + * @see _Scheduler_strong_APA_Do_update() + */ void _Scheduler_strong_APA_Unblock( const Scheduler_Control *scheduler, Thread_Control *thread, Scheduler_Node *node ); -bool _Scheduler_strong_APA_Ask_for_help( +/** + * @brief Updates the priority of the node + * + * @param scheduler The scheduler control instance. + * @param thread Thread correspoding to the @node. + * @param node Node whose priority has to be updated + */ +void _Scheduler_strong_APA_Update_priority( const Scheduler_Control *scheduler, - Thread_Control *the_thread, + Thread_Control *thread, Scheduler_Node *node ); -void _Scheduler_strong_APA_Update_priority( +/** + * @brief Calls the SMP Ask_for_help + * + * @param scheduler The scheduler control instance. + * @param thread Thread correspoding to the @node that asks for help. + * @param node node associated with @thread + */ +bool _Scheduler_strong_APA_Ask_for_help( const Scheduler_Control *scheduler, - Thread_Control *thread, + Thread_Control *the_thread, Scheduler_Node *node ); +/** + * @brief To Reconsider the help request + * + * @param scheduler The scheduler control instance. + * @param thread Thread correspoding to the @node. + * @param node Node corresponding to @thread which asks for + * reconsideration + */ void _Scheduler_strong_APA_Reconsider_help_request( const Scheduler_Control *scheduler, Thread_Control *the_thread, Scheduler_Node *node ); +/** + * @brief Withdraws a node + * + * @param scheduler The scheduler control instance. + * @param thread Thread correspoding to the @node. + * @param node Node that has to be withdrawn + * @param next_state the state that the node goes to + */ void _Scheduler_strong_APA_Withdraw_node( const Scheduler_Control *scheduler, Thread_Control *the_thread, @@ -246,61 +255,99 @@ void _Scheduler_strong_APA_Withdraw_node( Thread_Scheduler_state next_state ); -void _Scheduler_strong_APA_Register_idle( - Scheduler_Context *context, - Scheduler_Node *idle_base, - Per_CPU_Control *cpu +/** + * @brief Pins a node to a cpu + * + * @param scheduler The scheduler control instance. + * @param thread Thread corresponding to @node + * @param node_base node which gets pinned + * @param cpu processor that the node gets pinned to + */ +void _Scheduler_strong_APA_Pin( + const Scheduler_Control *scheduler, + Thread_Control *thread, + Scheduler_Node *node_base, + struct Per_CPU_Control *cpu +); + +/** + * @brief Unpins a node + * + * and sets it affinity back to normal. + * + * @param scheduler The scheduler control instance. + * @param thread Thread corresponding to @node + * @param node_base node which gets unpinned + * @param cpu processor that the node gets unpinned from : UNUSED + */ +void _Scheduler_strong_APA_Unpin( + const Scheduler_Control *scheduler, + Thread_Control *thread, + Scheduler_Node *node_base, + struct Per_CPU_Control *cpu ); +/** + * @brief Adds a processor to the scheduler instance + * + * and allocates an idle thread to the processor. + * + * @param scheduler The scheduler control instance. + * @param idle Idle thread to be allocated to the processor + */ void _Scheduler_strong_APA_Add_processor( const Scheduler_Control *scheduler, Thread_Control *idle ); +/** + * @brief Removes a processor from the scheduler instance + * + * @param scheduler The scheduler control instance. + * @param cpu processor that is removed + */ Thread_Control *_Scheduler_strong_APA_Remove_processor( const Scheduler_Control *scheduler, Per_CPU_Control *cpu ); -void _Scheduler_strong_APA_Yield( +/** + * @brief Initializes the node with the given priority. + * + * @param scheduler The scheduler control instance. + * @param[out] node The node to initialize. + * @param the_thread The thread of the node to initialize. + * @param priority The priority for @a node. + */ +void _Scheduler_strong_APA_Node_initialize( const Scheduler_Control *scheduler, - Thread_Control *thread, - Scheduler_Node *node -); - -void _Scheduler_strong_APA_Do_set_affinity( - Scheduler_Context *context, - Scheduler_Node *node_base, - void *arg + Scheduler_Node *node, + Thread_Control *the_thread, + Priority_Control priority ); +/** + * @brief Starts an idle thread on a CPU + * + * @param scheduler The scheduler control instance. + * @param idle Idle Thread + * @param cpu processor that gets the idle thread + */ void _Scheduler_strong_APA_Start_idle( const Scheduler_Control *scheduler, Thread_Control *idle, Per_CPU_Control *cpu ); -void _Scheduler_strong_APA_Pin( - const Scheduler_Control *scheduler, - Thread_Control *thread, - Scheduler_Node *node_base, - struct Per_CPU_Control *cpu -); - -void _Scheduler_strong_APA_Unpin( - const Scheduler_Control *scheduler, - Thread_Control *thread, - Scheduler_Node *node_base, - struct Per_CPU_Control *cpu -); - +/** + * @brief Sets the affinity of the @node_base to @affinity + */ bool _Scheduler_strong_APA_Set_affinity( const Scheduler_Control *scheduler, Thread_Control *thread, Scheduler_Node *node_base, const Processor_mask *affinity ); - /** @} */ #ifdef __cplusplus diff --git a/cpukit/score/src/schedulerstrongapa.c b/cpukit/score/src/schedulerstrongapa.c index 7c5077e9729..d6c1ab03fea 100644 --- a/cpukit/score/src/schedulerstrongapa.c +++ b/cpukit/score/src/schedulerstrongapa.c @@ -6,6 +6,24 @@ * @brief Strong APA Scheduler Implementation */ +/* + * Copyright (c) 2013, 2018 embedded brains GmbH, 2020 Richi Dubey. + * All rights reserved. + * + * embedded brains GmbH + * Dornierstr. 4 + * 82178 Puchheim + * Germany + * + * + * Richi Dubey + * + * + * The license and distribution terms for this file may be + * found in the file LICENSE in this distribution or at + * http://www.rtems.org/license/LICENSE. + */ + #ifdef HAVE_CONFIG_H #include "config.h" #endif @@ -32,59 +50,8 @@ _Scheduler_strong_APA_Node_downcast( Scheduler_Node *node ) { return (Scheduler_strong_APA_Node *) node; } -/** - * @brief Initializes the Strong_APA scheduler. - * - * Sets the chain containing all the nodes to empty - * and initializes the SMP scheduler. - * - * @param scheduler used to get - * reference to Strong APA scheduler context - * @retval void - * @see _Scheduler_strong_APA_Node_initialize() - * - */ -void _Scheduler_strong_APA_Initialize( const Scheduler_Control *scheduler ) -{ - Scheduler_strong_APA_Context *self = - _Scheduler_strong_APA_Get_context( scheduler ); - - _Scheduler_SMP_Initialize( &self->Base ); - _Chain_Initialize_empty( &self->allNodes ); -} - -/** - * @brief Initializes the node with the given priority. - * - * @param scheduler The scheduler control instance. - * @param[out] node The node to initialize. - * @param the_thread The thread of the node to initialize. - * @param priority The priority for @a node. - */ -void _Scheduler_strong_APA_Node_initialize( - const Scheduler_Control *scheduler, - Scheduler_Node *node, - Thread_Control *the_thread, - Priority_Control priority -) -{ - Scheduler_SMP_Node *smp_node; - - smp_node = _Scheduler_SMP_Node_downcast( node ); - _Scheduler_SMP_Node_initialize( scheduler, smp_node, the_thread, priority ); -} -/** - * @brief Helper function for /a update entry function - * - * Calls _Scheduler_SMP_Node_update_priority() - * - * @param scheduler The scheduler context. - * @param[out] node The node to update the priority of. - * @param new_priority Node's new priority. - * @see _Scheduler_strong_APA_Do_update() - */ -void _Scheduler_strong_APA_Do_update( +static inline void _Scheduler_strong_APA_Do_update( Scheduler_Context *context, Scheduler_Node *node, Priority_Control new_priority @@ -97,18 +64,7 @@ void _Scheduler_strong_APA_Do_update( _Scheduler_SMP_Node_update_priority( smp_node, new_priority ); } -/** - * @brief Checks if scheduler has a ready node - * - * Iterates through all the nodes in /a allNodes to - * look for a ready node - * - * @param scheduler The scheduler context. - * @retval true if scheduler has a ready node available - * @retval false if scheduler has no ready nodes available - */ - -bool _Scheduler_strong_APA_Has_ready( Scheduler_Context *context ) +static inline bool _Scheduler_strong_APA_Has_ready( Scheduler_Context *context ) { Scheduler_strong_APA_Context *self = _Scheduler_strong_APA_Get_self( context ); @@ -136,12 +92,7 @@ bool _Scheduler_strong_APA_Has_ready( Scheduler_Context *context ) return ret; } -/** - * @brief Checks the next highest node ready on run - * on the CPU on which @filter node was running on - * - */ -Scheduler_Node *_Scheduler_strong_APA_Get_highest_ready( +static inline Scheduler_Node *_Scheduler_strong_APA_Get_highest_ready( Scheduler_Context *context, Scheduler_Node *filter ) //TODO @@ -242,18 +193,8 @@ Scheduler_Node *_Scheduler_strong_APA_Get_highest_ready( return ret; } -/** - * @brief Checks the lowest scheduled node - * running on a processor on which the - * @filter_base node could be running on - * - * @param context The scheduler context instance. - * @param filter_base The node which wants to get scheduled. - * - * @retval node The lowest scheduled node that can be - * replaced by @filter_base - */ -Scheduler_Node *_Scheduler_strong_APA_Get_lowest_scheduled( + +static inline Scheduler_Node *_Scheduler_strong_APA_Get_lowest_scheduled( Scheduler_Context *context, Scheduler_Node *filter_base ) @@ -361,15 +302,7 @@ Scheduler_Node *_Scheduler_strong_APA_Get_lowest_scheduled( return ret; } -/** - * @brief Extracts a node from the scheduled node's list - * - * Calls _Scheduler_SMP_Extract_from_scheduled() - * - * @param scheduler The scheduler context - * @param[in] node_to_extract The node to extract. - */ -void _Scheduler_strong_APA_Extract_from_scheduled( +static inline void _Scheduler_strong_APA_Extract_from_scheduled( Scheduler_Context *context, Scheduler_Node *node_to_extract ) @@ -384,15 +317,7 @@ void _Scheduler_strong_APA_Extract_from_scheduled( //Not removing it from allNodes since the node could go in the ready state. } -/** - * @brief Extracts a node from the ready queue - * - * Removes the node from /a allNodes chain. - * - * @param scheduler The scheduler context - * @param[in] node_to_extract The node to extract. - */ -void _Scheduler_strong_APA_Extract_from_ready( +static inline void _Scheduler_strong_APA_Extract_from_ready( Scheduler_Context *context, Scheduler_Node *node_to_extract ) @@ -410,18 +335,7 @@ void _Scheduler_strong_APA_Extract_from_ready( _Chain_Set_off_chain( &node->Node ); } -/** - * @brief Moves a node from scheduled to ready state - * - * Calls _Scheduler_SMP_Extract_from_scheduled() to remove it from - * scheduled nodes list - * - * @param scheduler The scheduler context - * @param[in] scheduled_to_ready The node to move. - * @see _Scheduler_strong_APA_Insert_ready() - * - */ -void _Scheduler_strong_APA_Move_from_scheduled_to_ready( +static inline void _Scheduler_strong_APA_Move_from_scheduled_to_ready( Scheduler_Context *context, Scheduler_Node *scheduled_to_ready ) @@ -438,16 +352,7 @@ void _Scheduler_strong_APA_Move_from_scheduled_to_ready( ); } -/** - * @brief Moves a node from ready to scheduled state. - * - * Calls the corresponding SMP function \a _Scheduler_SMP_Insert_scheduled() - * with append insert_priority - * - * @param context The scheduler context. - * @param ready_to_scheduled Node which moves from ready state. - */ -void _Scheduler_strong_APA_Move_from_ready_to_scheduled( +static inline void _Scheduler_strong_APA_Move_from_ready_to_scheduled( Scheduler_Context *context, Scheduler_Node *ready_to_scheduled ) @@ -465,17 +370,7 @@ void _Scheduler_strong_APA_Move_from_ready_to_scheduled( //Note: The node still stays in the allNodes chain } -/** - * @brief Inserts a node in the ready queue. - * - * Adds it into the /a allNodes chain. - * - * @param context The scheduler context. - * @param node_base Node which is inserted into the queue. - * @param insert_priority priority at which the node is inserted - */ - -void _Scheduler_strong_APA_Insert_ready( +static inline void _Scheduler_strong_APA_Insert_ready( Scheduler_Context *context, Scheduler_Node *node_base, Priority_Control insert_priority @@ -492,17 +387,7 @@ void _Scheduler_strong_APA_Insert_ready( _Chain_Append_unprotected( &self->allNodes, &node->Node ); } -/** - * @brief Allocates a processor for the node. - * - * Calls _Scheduler_SMP_Allocate_processor_exact() - * - * @param context The scheduler context. - * @param scheduled_base Node which is to be allocated the @victim_cpu. - * @param victim_base Node which was executing earlier on @victim_cpu - * @victim_cpu CPU on which the @scheduled_base would be allocated on - */ -void _Scheduler_strong_APA_Allocate_processor( +static inline void _Scheduler_strong_APA_Allocate_processor( Scheduler_Context *context, Scheduler_Node *scheduled_base, Scheduler_Node *victim_base, @@ -522,45 +407,7 @@ void _Scheduler_strong_APA_Allocate_processor( ); } -/** - * @brief Blocks a node - * - * Changes the state of the node and extracts it from the queue - * calls _Scheduler_SMP_Block(). - * - * @param context The scheduler control instance. - * @param thread Thread correspoding to the @node. - * @param node node which is to be blocked - */ -void _Scheduler_strong_APA_Block( - const Scheduler_Control *scheduler, - Thread_Control *thread, - Scheduler_Node *node -) -{ - Scheduler_Context *context = _Scheduler_Get_context( scheduler ); -//The extract from ready automatically removes the node from allNodes chain. - _Scheduler_SMP_Block( - context, - thread, - node, - _Scheduler_strong_APA_Extract_from_scheduled, - _Scheduler_strong_APA_Extract_from_ready, - _Scheduler_strong_APA_Get_highest_ready, - _Scheduler_strong_APA_Move_from_ready_to_scheduled, - _Scheduler_strong_APA_Allocate_processor - ); -} - -/** - * @brief Enqueues a node - * - * - * @param context The scheduler context. - * @param node node which is to be enqueued - * @param insert_priority priority at which the node should be enqueued. - */ -bool _Scheduler_strong_APA_Enqueue( +static inline bool _Scheduler_strong_APA_Enqueue( Scheduler_Context *context, Scheduler_Node *node, Priority_Control insert_priority @@ -579,15 +426,7 @@ bool _Scheduler_strong_APA_Enqueue( ); } -/** - * @brief Enqueues a node in the scheduled queue - * - * - * @param context The scheduler context. - * @param node node which is to be enqueued - * @param insert_priority priority at which the node should be enqueued. - */ -bool _Scheduler_strong_APA_Enqueue_scheduled( +static inline bool _Scheduler_strong_APA_Enqueue_scheduled( Scheduler_Context *context, Scheduler_Node *node, Priority_Control insert_priority @@ -607,18 +446,62 @@ bool _Scheduler_strong_APA_Enqueue_scheduled( ); } -/** - * @brief Unblocks a node - * - * Changes the state of the node and calls _Scheduler_SMP_Unblock(). - * - * @param scheduler The scheduler control instance. - * @param thread Thread correspoding to the @node. - * @param node node which is to be unblocked - * @see _Scheduler_strong_APA_Enqueue() - * @see _Scheduler_strong_APA_Do_update() - */ -void _Scheduler_strong_APA_Unblock( +static inline bool _Scheduler_strong_APA_Do_ask_for_help( + Scheduler_Context *context, + Thread_Control *the_thread, + Scheduler_Node *node +) +{ + return _Scheduler_SMP_Ask_for_help( + context, + the_thread, + node, + _Scheduler_SMP_Priority_less_equal, + _Scheduler_strong_APA_Insert_ready, + _Scheduler_SMP_Insert_scheduled, + _Scheduler_strong_APA_Move_from_scheduled_to_ready, + _Scheduler_strong_APA_Get_lowest_scheduled, + _Scheduler_strong_APA_Allocate_processor + ); +} + +static inline void _Scheduler_strong_APA_Register_idle( + Scheduler_Context *context, + Scheduler_Node *idle_base, + Per_CPU_Control *cpu +) +{ + (void) context; + (void) idle_base; + (void) cpu; + //We do not maintain a variable to access the scheduled + //node for a CPU. So this function does nothing. +} + +static inline void _Scheduler_strong_APA_Do_set_affinity( + Scheduler_Context *context, + Scheduler_Node *node_base, + void *arg +) +{ + Scheduler_strong_APA_Node *node; + const Processor_mask *affinity; + + node = _Scheduler_strong_APA_Node_downcast( node_base ); + affinity = arg; + node->affinity = *affinity; +} + +void _Scheduler_strong_APA_Initialize( const Scheduler_Control *scheduler ) +{ + Scheduler_strong_APA_Context *self = + _Scheduler_strong_APA_Get_context( scheduler ); + + _Scheduler_SMP_Initialize( &self->Base ); + _Chain_Initialize_empty( &self->allNodes ); +} + +void _Scheduler_strong_APA_Yield( const Scheduler_Control *scheduler, Thread_Control *thread, Scheduler_Node *node @@ -626,65 +509,53 @@ void _Scheduler_strong_APA_Unblock( { Scheduler_Context *context = _Scheduler_Get_context( scheduler ); - _Scheduler_SMP_Unblock( + _Scheduler_SMP_Yield( context, thread, node, - _Scheduler_strong_APA_Do_update, - _Scheduler_strong_APA_Enqueue + _Scheduler_strong_APA_Extract_from_ready, + _Scheduler_strong_APA_Enqueue, + _Scheduler_strong_APA_Enqueue_scheduled ); } -static inline bool _Scheduler_strong_APA_Do_ask_for_help( - Scheduler_Context *context, - Thread_Control *the_thread, - Scheduler_Node *node +void _Scheduler_strong_APA_Block( + const Scheduler_Control *scheduler, + Thread_Control *thread, + Scheduler_Node *node ) { - return _Scheduler_SMP_Ask_for_help( + Scheduler_Context *context = _Scheduler_Get_context( scheduler ); +//The extract from ready automatically removes the node from allNodes chain. + _Scheduler_SMP_Block( context, - the_thread, + thread, node, - _Scheduler_SMP_Priority_less_equal, - _Scheduler_strong_APA_Insert_ready, - _Scheduler_SMP_Insert_scheduled, - _Scheduler_strong_APA_Move_from_scheduled_to_ready, - _Scheduler_strong_APA_Get_lowest_scheduled, + _Scheduler_strong_APA_Extract_from_scheduled, + _Scheduler_strong_APA_Extract_from_ready, + _Scheduler_strong_APA_Get_highest_ready, + _Scheduler_strong_APA_Move_from_ready_to_scheduled, _Scheduler_strong_APA_Allocate_processor ); } -/** - * @brief Calls the smp Ask_for_help - * - * - * @param scheduler The scheduler control instance. - * @param thread Thread correspoding to the @node that asks for help. - * @param node node associated with @thread - */ -bool _Scheduler_strong_APA_Ask_for_help( +void _Scheduler_strong_APA_Unblock( const Scheduler_Control *scheduler, - Thread_Control *the_thread, + Thread_Control *thread, Scheduler_Node *node ) { Scheduler_Context *context = _Scheduler_Get_context( scheduler ); - return _Scheduler_strong_APA_Do_ask_for_help( + _Scheduler_SMP_Unblock( context, - the_thread, - node + thread, + node, + _Scheduler_strong_APA_Do_update, + _Scheduler_strong_APA_Enqueue ); } - -/** - * @brief Updates the priority of the node - * - * @param scheduler The scheduler control instance. - * @param thread Thread correspoding to the @node. - * @param node Node whose priority has to be updated - */ void _Scheduler_strong_APA_Update_priority( const Scheduler_Control *scheduler, Thread_Control *thread, @@ -704,14 +575,22 @@ void _Scheduler_strong_APA_Update_priority( _Scheduler_strong_APA_Do_ask_for_help ); } -/** - * @brief To Reconsider the help request - * - * @param scheduler The scheduler control instance. - * @param thread Thread correspoding to the @node. - * @param node Node corresponding to @thread which asks for - * reconsideration - */ + +bool _Scheduler_strong_APA_Ask_for_help( + const Scheduler_Control *scheduler, + Thread_Control *the_thread, + Scheduler_Node *node +) +{ + Scheduler_Context *context = _Scheduler_Get_context( scheduler ); + + return _Scheduler_strong_APA_Do_ask_for_help( + context, + the_thread, + node + ); +} + void _Scheduler_strong_APA_Reconsider_help_request( const Scheduler_Control *scheduler, Thread_Control *the_thread, @@ -728,14 +607,6 @@ void _Scheduler_strong_APA_Reconsider_help_request( ); } -/** - * @brief Withdraws a node - * - * @param scheduler The scheduler control instance. - * @param thread Thread correspoding to the @node. - * @param node Node that has to be withdrawn - * @param next_state the state that the node goes to - */ void _Scheduler_strong_APA_Withdraw_node( const Scheduler_Control *scheduler, Thread_Control *the_thread, @@ -757,34 +628,51 @@ void _Scheduler_strong_APA_Withdraw_node( ); } -/** - * @brief To register an idle thread on a cpu - * - * For compatibility with SMP functions, - * does nothing in our implementation - */ -void _Scheduler_strong_APA_Register_idle( - Scheduler_Context *context, - Scheduler_Node *idle_base, - Per_CPU_Control *cpu +void _Scheduler_strong_APA_Pin( + const Scheduler_Control *scheduler, + Thread_Control *thread, + Scheduler_Node *node_base, + struct Per_CPU_Control *cpu ) { - (void) context; - (void) idle_base; + Scheduler_strong_APA_Node *node; + uint32_t pin_cpu; + + (void) scheduler; + node = _Scheduler_strong_APA_Node_downcast( node_base ); + pin_cpu = (uint32_t) _Per_CPU_Get_index( cpu ); + + _Assert( + _Scheduler_SMP_Node_state( &node->Base.Base ) == SCHEDULER_SMP_NODE_BLOCKED + ); + + node = _Scheduler_strong_APA_Node_downcast( node_base ); + + _Processor_mask_Zero( &node->affinity ); + _Processor_mask_Set( &node->affinity, pin_cpu ); +} + +void _Scheduler_strong_APA_Unpin( + const Scheduler_Control *scheduler, + Thread_Control *thread, + Scheduler_Node *node_base, + struct Per_CPU_Control *cpu +) +{ + Scheduler_strong_APA_Node *node; + + (void) scheduler; (void) cpu; - //We do not maintain a variable to access the scheduled - //node for a CPU. So this function does nothing. + node = _Scheduler_strong_APA_Node_downcast( node_base ); + + _Assert( + _Scheduler_SMP_Node_state( &node->Base.Base ) == SCHEDULER_SMP_NODE_BLOCKED + ); + + _Processor_mask_Zero( &node->affinity ); + _Processor_mask_Assign( &node->affinity, &node->unpin_affinity ); } -/** - * @brief Adds a processor to the scheduler instance - * - * and allocates an idle thread to the processor. - * - * @param scheduler The scheduler control instance. - * @param idle Idle thread to be allocated to the processor - * - */ void _Scheduler_strong_APA_Add_processor( const Scheduler_Control *scheduler, Thread_Control *idle @@ -801,14 +689,6 @@ void _Scheduler_strong_APA_Add_processor( ); } - -/** - * @brief Removes a processor from the scheduler instance - * - * @param scheduler The scheduler control instance. - * @param cpu processor that is removed - * - */ Thread_Control *_Scheduler_strong_APA_Remove_processor( const Scheduler_Control *scheduler, Per_CPU_Control *cpu @@ -824,59 +704,19 @@ Thread_Control *_Scheduler_strong_APA_Remove_processor( ); } - -/** - * @brief Called when a node yields the processor - * - * @param scheduler The scheduler control instance. - * @param thread Thread corresponding to @node - * @param node Node that yield the processor - * - */ -void _Scheduler_strong_APA_Yield( +void _Scheduler_strong_APA_Node_initialize( const Scheduler_Control *scheduler, - Thread_Control *thread, - Scheduler_Node *node -) -{ - Scheduler_Context *context = _Scheduler_Get_context( scheduler ); - - _Scheduler_SMP_Yield( - context, - thread, - node, - _Scheduler_strong_APA_Extract_from_ready, - _Scheduler_strong_APA_Enqueue, - _Scheduler_strong_APA_Enqueue_scheduled - ); -} - -/** - * @brief Called by _Scheduler_strong_APA_Set_affinity() - * - */ -void _Scheduler_strong_APA_Do_set_affinity( - Scheduler_Context *context, - Scheduler_Node *node_base, - void *arg + Scheduler_Node *node, + Thread_Control *the_thread, + Priority_Control priority ) { - Scheduler_strong_APA_Node *node; - const Processor_mask *affinity; - - node = _Scheduler_strong_APA_Node_downcast( node_base ); - affinity = arg; - node->affinity = *affinity; + Scheduler_SMP_Node *smp_node; + + smp_node = _Scheduler_SMP_Node_downcast( node ); + _Scheduler_SMP_Node_initialize( scheduler, smp_node, the_thread, priority ); } -/** - * @brief Starts an idle thread on a CPU - * - * @param scheduler The scheduler control instance. - * @param idle Idle Thread - * @param cpu processor that gets the idle thread - * - */ void _Scheduler_strong_APA_Start_idle( const Scheduler_Control *scheduler, Thread_Control *idle, @@ -895,86 +735,6 @@ void _Scheduler_strong_APA_Start_idle( ); } -/** - * @brief Pins a node to a cpu - * - * @param scheduler The scheduler control instance. - * @param thread Thread corresponding to @node - * @param node_base node which gets pinned - * @param cpu processor that the node gets pinned to - * - */ -void _Scheduler_strong_APA_Pin( - const Scheduler_Control *scheduler, - Thread_Control *thread, - Scheduler_Node *node_base, - struct Per_CPU_Control *cpu -) -{ - Scheduler_strong_APA_Node *node; - uint32_t pin_cpu; - - (void) scheduler; - node = _Scheduler_strong_APA_Node_downcast( node_base ); - pin_cpu = (uint32_t) _Per_CPU_Get_index( cpu ); - - _Assert( - _Scheduler_SMP_Node_state( &node->Base.Base ) == SCHEDULER_SMP_NODE_BLOCKED - ); - - node = _Scheduler_strong_APA_Node_downcast( node_base ); - - _Processor_mask_Zero( &node->affinity ); - _Processor_mask_Set( &node->affinity, pin_cpu ); -} - -/** - * @brief Unpins a node - * - * and sets it affinity back to normal. - * - * @param scheduler The scheduler control instance. - * @param thread Thread corresponding to @node - * @param node_base node which gets unpinned - * @param cpu processor that the node gets unpinned from : UNUSED - * - */ -void _Scheduler_strong_APA_Unpin( - const Scheduler_Control *scheduler, - Thread_Control *thread, - Scheduler_Node *node_base, - struct Per_CPU_Control *cpu -) -{ - Scheduler_strong_APA_Node *node; - - (void) scheduler; - (void) cpu; - node = _Scheduler_strong_APA_Node_downcast( node_base ); - - _Assert( - _Scheduler_SMP_Node_state( &node->Base.Base ) == SCHEDULER_SMP_NODE_BLOCKED - ); - - _Processor_mask_Zero( &node->affinity ); - _Processor_mask_Assign( &node->affinity, &node->unpin_affinity ); -} - -/** - * @brief Checks if the processor set of the scheduler is the subset of the affinity set. - * - * Default implementation of the set affinity scheduler operation. - * - * @param scheduler This parameter is unused. - * @param thread This parameter is unused. - * @param node This parameter is unused. - * @param affinity The new processor affinity set for the thread. - * - * @see _Scheduler_strong_APA_Do_set_affinity() - * - * @retval true The processor set of the scheduler is a subset of the affinity set. - * @retval false The processor set of the scheduler is not a subset of the affinity set. - */ bool _Scheduler_strong_APA_Set_affinity( const Scheduler_Control *scheduler, Thread_Control *thread, @@ -997,10 +757,7 @@ bool _Scheduler_strong_APA_Set_affinity( if ( _Processor_mask_Is_equal( &node->affinity, affinity ) ) return true; //Nothing to do. Return true. - - _Processor_mask_Zero( &node->affinity ); - _Processor_mask_Zero( &node->unpin_affinity ); - + _Processor_mask_Assign( &node->affinity, &local_affinity ); _Processor_mask_Assign( &node->unpin_affinity, &local_affinity ); @@ -1019,3 +776,4 @@ bool _Scheduler_strong_APA_Set_affinity( return true; } + From 424c500086a6c8cc583d9b86edd2a0e560bdb451 Mon Sep 17 00:00:00 2001 From: richidubey Date: Wed, 5 Aug 2020 19:41:35 +0530 Subject: [PATCH 07/29] Started replacing the pseduo codes --- cpukit/include/rtems/scheduler.h | 2 +- .../include/rtems/score/schedulerstrongapa.h | 97 ++++---- cpukit/score/src/schedulerstrongapa.c | 227 ++++++++++-------- 3 files changed, 176 insertions(+), 150 deletions(-) diff --git a/cpukit/include/rtems/scheduler.h b/cpukit/include/rtems/scheduler.h index 955a83cfb48..b84b29fd209 100644 --- a/cpukit/include/rtems/scheduler.h +++ b/cpukit/include/rtems/scheduler.h @@ -257,7 +257,7 @@ #define RTEMS_SCHEDULER_STRONG_APA( name, prio_count ) \ static struct { \ Scheduler_strong_APA_Context Base; \ - Chain_Control Ready[ ( prio_count ) ]; \ + Scheduler_strong_APA_CPU CPU[ CONFIGURE_MAXIMUM_PROCESSORS ]; } SCHEDULER_STRONG_APA_CONTEXT_NAME( name ) #define RTEMS_SCHEDULER_TABLE_STRONG_APA( name, obj_name ) \ diff --git a/cpukit/include/rtems/score/schedulerstrongapa.h b/cpukit/include/rtems/score/schedulerstrongapa.h index cae588a1570..1947585a19f 100644 --- a/cpukit/include/rtems/score/schedulerstrongapa.h +++ b/cpukit/include/rtems/score/schedulerstrongapa.h @@ -20,7 +20,7 @@ * * The license and distribution terms for this file may be * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. + * http://www.rtems.org/license/LICENSE. */ #ifndef _RTEMS_SCORE_SCHEDULERSTRONGAPA_H @@ -65,7 +65,24 @@ typedef struct { * @brief Chain of all the nodes present in * the system. Accounts for ready and scheduled nodes. */ - Chain_Control all_nodes; + Chain_Control All_nodes; + + /** + * @brief Queue for this context + */ + Scheduler_strong_APA_Queue *queue; + + /** + * @brief Pointer to structure with array of + * boolean visited values + */ + Scheduler_strong_APA_Visited *visited; + + /** + * @brief Pointer to structure with array of + * caller corresponding to a CPU + */ + Scheduler_strong_APA_Caller *caller; } Scheduler_strong_APA_Context; /** @@ -86,30 +103,42 @@ typedef struct { * @brief The associated affinity set of this node. */ Processor_mask Affinity; - - /** - * @brief The associated affinity set of this node to be used while unpinning the node. - */ - Processor_mask Unpin_affinity; } Scheduler_strong_APA_Node; /** * @brief CPU structure to be used while traversing in the FIFO Queue */ -typedef struct Scheduler_strong_APA_CPU +typedef struct { /** - * @brief Chain node for - * _Scheduler_strong_APA_Get_highest_ready::Queue - * and _Scheduler_strong_APA_Get_lowest_scheduled::Queue - */ - Chain_Node node; + * @brief Array of Cpu to be used for the queue operations + */ + Per_CPU_Control Cpu[ RTEMS_ZERO_LENGTH_ARRAY ]; +} Scheduler_strong_APA_Queue; + +/** + * @brief Caller corresponding to a Cpu in Scheduler_strong_APA_Queue + */ +typedef struct +{ + /** + * @brief Array of caller each corresponding to the + * Scheduler_strong_APA_Queue::Cpu at the same index + */ + Scheduler_strong_APA_Node *caller[ RTEMS_ZERO_LENGTH_ARRAY ]; +} Scheduler_strong_APA_Caller; +/** + * @brief to a Cpu in Scheduler_strong_APA_Queue + */ +typedef struct +{ /** - * @brief cpu associated with the node + * @brief Array of boolean each corresponding to the visited status of + * Scheduler_strong_APA_Queue::Cpu at the same index */ - Per_CPU_Control cpu; -}Scheduler_strong_APA_CPU; + bool *visited[ RTEMS_ZERO_LENGTH_ARRAY ]; +} Scheduler_strong_APA_Visited; /** * @brief Entry points for the Strong APA Scheduler. @@ -127,8 +156,8 @@ typedef struct Scheduler_strong_APA_CPU _Scheduler_strong_APA_Ask_for_help, \ _Scheduler_strong_APA_Reconsider_help_request, \ _Scheduler_strong_APA_Withdraw_node, \ - _Scheduler_strong_APA_Pin, \ - _Scheduler_strong_APA_Unpin, \ + _Scheduler_default_Pin_or_unpin, \ + _Scheduler_default_Pin_or_unpin, \ _Scheduler_strong_APA_Add_processor, \ _Scheduler_strong_APA_Remove_processor, \ _Scheduler_strong_APA_Node_initialize, \ @@ -255,38 +284,6 @@ void _Scheduler_strong_APA_Withdraw_node( Thread_Scheduler_state next_state ); -/** - * @brief Pins a node to a cpu - * - * @param scheduler The scheduler control instance. - * @param thread Thread corresponding to @node - * @param node_base node which gets pinned - * @param cpu processor that the node gets pinned to - */ -void _Scheduler_strong_APA_Pin( - const Scheduler_Control *scheduler, - Thread_Control *thread, - Scheduler_Node *node_base, - struct Per_CPU_Control *cpu -); - -/** - * @brief Unpins a node - * - * and sets it affinity back to normal. - * - * @param scheduler The scheduler control instance. - * @param thread Thread corresponding to @node - * @param node_base node which gets unpinned - * @param cpu processor that the node gets unpinned from : UNUSED - */ -void _Scheduler_strong_APA_Unpin( - const Scheduler_Control *scheduler, - Thread_Control *thread, - Scheduler_Node *node_base, - struct Per_CPU_Control *cpu -); - /** * @brief Adds a processor to the scheduler instance * diff --git a/cpukit/score/src/schedulerstrongapa.c b/cpukit/score/src/schedulerstrongapa.c index d6c1ab03fea..4bf43ea563d 100644 --- a/cpukit/score/src/schedulerstrongapa.c +++ b/cpukit/score/src/schedulerstrongapa.c @@ -21,7 +21,7 @@ * * The license and distribution terms for this file may be * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. + * http://www.rtems.org/license/LICENSE. */ #ifdef HAVE_CONFIG_H @@ -68,25 +68,27 @@ static inline bool _Scheduler_strong_APA_Has_ready( Scheduler_Context *context ) { Scheduler_strong_APA_Context *self = _Scheduler_strong_APA_Get_self( context ); - bool ret; - const Chain_Node *tail; - Chain_Node *next; + bool ret; + const Chain_Node *tail; + Chain_Node *next; + Scheduler_strong_APA_Node *node; tail = _Chain_Immutable_tail( &self->allNodes ); next = _Chain_First( &self->allNodes ); - ret=false; + ret = false; while ( next != tail ) { - Scheduler_strong_APA_Node *node; - - node = (Scheduler_strong_APA_Node *) next; + node = (Scheduler_strong_APA_Node *) next; - if( _Scheduler_SMP_Node_state( &node->Base.Base ) - == SCHEDULER_SMP_NODE_READY ) { - ret=true; - break; - } + if ( + _Scheduler_SMP_Node_state( &node->Base.Base ) == SCHEDULER_SMP_NODE_READY + ) { + ret = true; + break; + } + + next = _Chain_Next( next ); } return ret; @@ -100,18 +102,17 @@ static inline Scheduler_Node *_Scheduler_strong_APA_Get_highest_ready( //Plan for this function: (Pseudo Code): Scheduler_strong_APA_Context *self=_Scheduler_strong_APA_Get_self( context ); - CPU *Qcpu; - Thread_Control *thread; - Per_CPU_Control *thread_cpu; - Per_CPU_Control *curr_CPU; - Per_CPU_Control *assigned_cpu; - Scheduler_Node *ret; - Priority_Control max_priority; - Priority_Control curr_priority; - Chain_Control Queue; - bool visited[10]; //Temporary Compilation Fix - - + CPU *Qcpu; + Thread_Control *thread; + Per_CPU_Control *thread_cpu; + Per_CPU_Control *curr_CPU; + Per_CPU_Control *assigned_cpu; + Scheduler_Node *ret; + Priority_Control max_priority; + Priority_Control curr_priority; + Scheduler_SMP_Node_state curr_state; + Chain_Control Queue; + bool visited[10]; //Temporary Compilation Fix thread = filter->user; thread_cpu = _Thread_Get_CPU( thread ); @@ -119,14 +120,13 @@ static inline Scheduler_Node *_Scheduler_strong_APA_Get_highest_ready( //Implement the BFS Algorithm for task departure //to get the highest ready task for a particular CPU - max_priority = _Scheduler_Node_get_priority( filter ); max_priority = SCHEDULER_PRIORITY_PURIFY( max_priority ); - ret=filter; + ret = filter; - const Chain_Node *tail; - Chain_Node *next; + const Chain_Node *tail; + Chain_Node *next; _Chain_Initialize_empty(&Queue); @@ -147,42 +147,41 @@ static inline Scheduler_Node *_Scheduler_strong_APA_Get_highest_ready( while ( next != tail ) { Scheduler_strong_APA_Node *node; node = (Scheduler_strong_APA_Node *) next; + curr_state = _Scheduler_SMP_Node_state( &node->Base.Base ); - if( _Processor_mask_Is_set( &node->affinity, _Per_CPU_Get_index( curr_CPU ) ) ) { - //Checks if the thread_CPU is in the affinity set of the node + if ( + _Processor_mask_Is_set(&node->affinity, _Per_CPU_Get_index( curr_CPU)) + ) { + //Checks if the thread_CPU is in the affinity set of the node - if(_Scheduler_SMP_Node_state( &node->Base.Base ) - == SCHEDULER_SMP_NODE_SCHEDULED) { - - assigned_cpu = _Thread_Get_CPU( node->Base.Base.user ); + if ( curr_state == SCHEDULER_SMP_NODE_SCHEDULED) { + assigned_cpu = _Thread_Get_CPU( node->Base.Base.user ); - if(visited[ _Per_CPU_Get_index( assigned_cpu ) ] == false) { - Qcpu = rtems_malloc( sizeof(CPU) ); - //rtems_malloc does not return a errnum in case of failure - Qcpu->cpu=*assigned_cpu; + if ( visited[ _Per_CPU_Get_index( assigned_cpu ) ] == false) { + Qcpu = rtems_malloc( sizeof(CPU) ); + //rtems_malloc does not return a errnum in case of failure + Qcpu->cpu=*assigned_cpu; - _Chain_Initialize_node( &Qcpu->node ); - _Chain_Append_unprotected( &Queue, &Qcpu->node ); - //Insert thread_CPU in the Queue - visited[ _Per_CPU_Get_index (assigned_cpu) ]=true; - } - } - else if(_Scheduler_SMP_Node_state( &node->Base.Base ) - == SCHEDULER_SMP_NODE_READY) { - curr_priority = _Scheduler_Node_get_priority( (Scheduler_Node *) next ); - curr_priority = SCHEDULER_PRIORITY_PURIFY( curr_priority ); + _Chain_Initialize_node( &Qcpu->node ); + _Chain_Append_unprotected( &Queue, &Qcpu->node ); + //Insert thread_CPU in the Queue + visited[ _Per_CPU_Get_index (assigned_cpu) ]=true; + } + } else if ( curr_state == SCHEDULER_SMP_NODE_READY) { + curr_priority = _Scheduler_Node_get_priority( (Scheduler_Node *) next ); + curr_priority = SCHEDULER_PRIORITY_PURIFY( curr_priority ); - if(curr_priorityBase.Base; - } - } - } + if ( curr_priorityBase.Base; + } + } + } next = _Chain_Next( next ); } } - if( ret != filter) + if ( ret != filter) { //Backtrack on the path from //thread_cpu to ret, shifting along every task. @@ -204,43 +203,55 @@ static inline Scheduler_Node *_Scheduler_strong_APA_Get_lowest_scheduled( uint32_t cpu_max; uint32_t cpu_index; CPU *Qcpu; + Priority_Control filter_priority; + + Per_CPU_Control *curr_CPU; + Thread_Control *curr_thread; + Scheduler_Node *curr_node; + Scheduler_Node *ret; + Chain_Control Queue; + Priority_Control max_priority; + Priority_Control curr_priority; + bool *visited; + - Per_CPU_Control *curr_CPU; - Thread_Control *curr_thread; - Scheduler_Node *curr_node; - Scheduler_Node *ret; - Chain_Control Queue; - Priority_Control max_priority; - Priority_Control curr_priority; - bool visited[10]; //Temporary Compilation Fix + Scheduler_strong_APA_Context *self; + + self = _Scheduler_strong_APA_Get_self( context ); + + visited = self->visited->visited; Scheduler_strong_APA_Node *Scurr_node; //Current Strong_APA_Node Scheduler_strong_APA_Node *filter_node; - ret=NULL; //To remove compiler warning. + ret = NULL; //To remove compiler warning. //ret would always point to the node with the lowest priority //node unless the affinity of filter_base is NULL. filter_node = _Scheduler_strong_APA_Node_downcast( filter_base ); - max_priority = 300;//Max (Lowest) priority encountered so far. + max_priority_num = 0;//Max (Lowest) priority encountered so far. + + _Assert( !_Processor_mask_Zero( &filter_node->affinity ) ); cpu_max = _SMP_Get_processor_maximum(); _Chain_Initialize_empty(&Queue); for ( cpu_index = 0 ; cpu_index < cpu_max ; ++cpu_index ) { - if( ( _Processor_mask_Is_set( &filter_node->affinity , cpu_index) - && visited[ cpu_index ] == false ) ) { - //Checks if the thread_CPU is in the affinity set of the node + visited[ cpu_index ] = false; + + //Checks if the thread_CPU is in the affinity set of the node + if ( _Processor_mask_Is_set( &filter_node->affinity, cpu_index)) { Per_CPU_Control *cpu = _Per_CPU_Get_by_index( cpu_index ); - if( _Per_CPU_Is_processor_online( cpu ) ) { + + if ( _Per_CPU_Is_processor_online( cpu ) ) { Qcpu = rtems_malloc( sizeof(CPU) ); //No errornum returned in case of failure - Qcpu->cpu=*cpu; + Qcpu->cpu = *cpu; _Chain_Initialize_node( &Qcpu->node ); _Chain_Append_unprotected( &Queue, &Qcpu->node ); - //Insert cpu in the Queue - visited[ cpu_index ]=true; + //Insert cpu in the Queue + visited[ cpu_index ] = true; } } } @@ -250,45 +261,46 @@ static inline Scheduler_Node *_Scheduler_strong_APA_Get_lowest_scheduled( curr_CPU = &Qcpu->cpu; curr_thread = curr_CPU->executing; - curr_node = (Scheduler_Node *) _Chain_First( &curr_thread->Scheduler.Scheduler_nodes ); - - //How to check if the thread is not participating - //in helping on this processor? + curr_node = _Thread_Scheduler_get_home_node( curr_thread ); curr_priority = _Scheduler_Node_get_priority( curr_node ); curr_priority = SCHEDULER_PRIORITY_PURIFY( curr_priority ); - if(curr_priority < max_priority) { + if ( curr_priority > max_priority_num) { ret = curr_node; - max_priority = curr_priority; + max_priority_num = curr_priority; + + if( curr_priority > SCHEDULER_PRIORITY_PURIFY( _Scheduler_Node_get_priority( filter_base ) ) ) + { + cpu_to_preempt=curr_CPU; + } } - Scurr_node = _Scheduler_strong_APA_Node_downcast( curr_node ); - if( !curr_thread->is_idle ) { + Scurr_node = _Scheduler_strong_APA_Node_downcast( curr_node ); + if ( !curr_thread->is_idle ) { for ( cpu_index = 0 ; cpu_index < cpu_max ; ++cpu_index ) { - if( _Processor_mask_Is_set( &Scurr_node->affinity , cpu_index ) ) { + if ( _Processor_mask_Is_set( &Scurr_node->affinity, cpu_index ) ) { //Checks if the thread_CPU is in the affinity set of the node Per_CPU_Control *cpu = _Per_CPU_Get_by_index( cpu_index ); - if( _Per_CPU_Is_processor_online( cpu ) && visited[ cpu_index ] == false ) { + if ( _Per_CPU_Is_processor_online( cpu ) && visited[ cpu_index ] == false ) { Qcpu = rtems_malloc( sizeof(CPU) ); - Qcpu->cpu=*cpu; + Qcpu->cpu = *cpu; _Chain_Initialize_node( &Qcpu->node ); _Chain_Append_unprotected( &Queue, &Qcpu->node ); //Insert the cpu in the affinty set of curr_thread in the Queue - visited[ cpu_index ]=true; + visited[ cpu_index ] = true; } } } } } - Priority_Control filter_priority; filter_priority = _Scheduler_Node_get_priority( filter_base ); filter_priority = SCHEDULER_PRIORITY_PURIFY( filter_priority ); - if( ret->Priority.value < filter_priority ) { + if ( ret->Priority.value < filter_priority ) { //Lowest priority task found has higher priority // than filter_base. //So, filter_base remains unassigned @@ -307,8 +319,8 @@ static inline void _Scheduler_strong_APA_Extract_from_scheduled( Scheduler_Node *node_to_extract ) { - Scheduler_strong_APA_Context *self; - Scheduler_strong_APA_Node *node; + Scheduler_strong_APA_Context *self; + Scheduler_strong_APA_Node *node; self = _Scheduler_strong_APA_Get_self( context ); node = _Scheduler_strong_APA_Node_downcast( node_to_extract ); @@ -322,8 +334,8 @@ static inline void _Scheduler_strong_APA_Extract_from_ready( Scheduler_Node *node_to_extract ) { - Scheduler_strong_APA_Context *self; - Scheduler_strong_APA_Node *node; + Scheduler_strong_APA_Context *self; + Scheduler_strong_APA_Node *node; self = _Scheduler_strong_APA_Get_self( context ); node = _Scheduler_strong_APA_Node_downcast( node_to_extract ); @@ -370,14 +382,31 @@ static inline void _Scheduler_strong_APA_Move_from_ready_to_scheduled( //Note: The node still stays in the allNodes chain } +static inline void _Scheduler_strong_APA_Set_scheduled( + _Scheduler_strong_APA_Context *self, + _Scheduler_strong_APA_Node *scheduled, + const Per_CPU_Control *cpu +) +{ + self->CPU[ _Per_CPU_Get_index( cpu ) ].scheduled = scheduled; +} + +static inline Scheduler_EDF_SMP_Node *_Scheduler_strong_APA_Get_scheduled( + const _Scheduler_strong_APA_Context *self, + uint8_t cpu +) +{ + return self->CPU[ cpu ].scheduled; +} + static inline void _Scheduler_strong_APA_Insert_ready( Scheduler_Context *context, Scheduler_Node *node_base, Priority_Control insert_priority ) { - Scheduler_strong_APA_Context *self; - Scheduler_strong_APA_Node *node; + Scheduler_strong_APA_Context *self; + Scheduler_strong_APA_Node *node; self = _Scheduler_strong_APA_Get_self( context ); node = _Scheduler_strong_APA_Node_downcast( node_base ); @@ -394,7 +423,7 @@ static inline void _Scheduler_strong_APA_Allocate_processor( Per_CPU_Control *victim_cpu ) { - Scheduler_strong_APA_Node *scheduled; + Scheduler_strong_APA_Node *scheduled; (void) victim_base; scheduled = _Scheduler_strong_APA_Node_downcast( scheduled_base ); @@ -635,8 +664,8 @@ void _Scheduler_strong_APA_Pin( struct Per_CPU_Control *cpu ) { - Scheduler_strong_APA_Node *node; - uint32_t pin_cpu; + Scheduler_strong_APA_Node *node; + uint32_t pin_cpu; (void) scheduler; node = _Scheduler_strong_APA_Node_downcast( node_base ); @@ -742,9 +771,9 @@ bool _Scheduler_strong_APA_Set_affinity( const Processor_mask *affinity ) { - Scheduler_Context *context; - Scheduler_strong_APA_Node *node; - Processor_mask local_affinity; + Scheduler_Context *context; + Scheduler_strong_APA_Node *node; + Processor_mask local_affinity; context = _Scheduler_Get_context( scheduler ); _Processor_mask_And( &local_affinity, &context->Processors, affinity ); From f2d6e4256ab07fe6e7479af8eeb0dcdb574964f4 Mon Sep 17 00:00:00 2001 From: richidubey Date: Thu, 6 Aug 2020 20:29:51 +0530 Subject: [PATCH 08/29] Added get_lowest_scheduled --- cpukit/score/src/schedulerstrongapa.c | 206 +++++++++++++++----------- 1 file changed, 120 insertions(+), 86 deletions(-) diff --git a/cpukit/score/src/schedulerstrongapa.c b/cpukit/score/src/schedulerstrongapa.c index 4bf43ea563d..9dd0ea610a7 100644 --- a/cpukit/score/src/schedulerstrongapa.c +++ b/cpukit/score/src/schedulerstrongapa.c @@ -111,9 +111,21 @@ static inline Scheduler_Node *_Scheduler_strong_APA_Get_highest_ready( Priority_Control max_priority; Priority_Control curr_priority; Scheduler_SMP_Node_state curr_state; - Chain_Control Queue; - bool visited[10]; //Temporary Compilation Fix + const Chain_Node *tail; + Chain_Node *next; + Per_CPU_Control *queue; //Array of Cpu that serves as a queue + bool *visited; //Array of bool values each corresponding to a cpu + + //Denotes front and rear of the queue + uint32_t front; + uint32_t rear; + front = 0; + rear = -1; + + visited = self->visited->visited; + queue = self->queue->Cpu; + thread = filter->user; thread_cpu = _Thread_Get_CPU( thread ); @@ -124,23 +136,15 @@ static inline Scheduler_Node *_Scheduler_strong_APA_Get_highest_ready( max_priority = SCHEDULER_PRIORITY_PURIFY( max_priority ); ret = filter; - - const Chain_Node *tail; - Chain_Node *next; - - _Chain_Initialize_empty(&Queue); - - Qcpu = rtems_malloc( sizeof(CPU) ); //Does not return any errornum on failure - Qcpu->cpu=*thread_cpu; - _Chain_Initialize_node( &Qcpu->node ); - _Chain_Append_unprotected( &Queue, &Qcpu->node ); //Insert thread_CPU in the Queue + rear = rear + 1; + queue[ rear ] = thread_cpu; visited[ _Per_CPU_Get_index( thread_cpu ) ]=true; - - while( !_Chain_Is_empty( &Queue) ) { - Qcpu = (CPU*) _Chain_Get_first_unprotected( &Queue ); - curr_CPU = &Qcpu->cpu; + while( front <= rear ) { + curr_CPU = queue[ front ]; + front = front + 1; + tail = _Chain_Immutable_tail( &self->allNodes ); next = _Chain_First( &self->allNodes ); @@ -149,37 +153,38 @@ static inline Scheduler_Node *_Scheduler_strong_APA_Get_highest_ready( node = (Scheduler_strong_APA_Node *) next; curr_state = _Scheduler_SMP_Node_state( &node->Base.Base ); - if ( + if ( _Processor_mask_Is_set(&node->affinity, _Per_CPU_Get_index( curr_CPU)) - ) { - //Checks if the thread_CPU is in the affinity set of the node - - if ( curr_state == SCHEDULER_SMP_NODE_SCHEDULED) { - assigned_cpu = _Thread_Get_CPU( node->Base.Base.user ); - - if ( visited[ _Per_CPU_Get_index( assigned_cpu ) ] == false) { - Qcpu = rtems_malloc( sizeof(CPU) ); - //rtems_malloc does not return a errnum in case of failure - Qcpu->cpu=*assigned_cpu; - - _Chain_Initialize_node( &Qcpu->node ); - _Chain_Append_unprotected( &Queue, &Qcpu->node ); - //Insert thread_CPU in the Queue - visited[ _Per_CPU_Get_index (assigned_cpu) ]=true; - } - } else if ( curr_state == SCHEDULER_SMP_NODE_READY) { - curr_priority = _Scheduler_Node_get_priority( (Scheduler_Node *) next ); - curr_priority = SCHEDULER_PRIORITY_PURIFY( curr_priority ); - - if ( curr_priorityBase.Base; - } - } - } + ) { + //Checks if the thread_CPU is in the affinity set of the node + + if ( curr_state == SCHEDULER_SMP_NODE_SCHEDULED) { + assigned_cpu = _Thread_Get_CPU( node->Base.Base.user ); + + if ( visited[ _Per_CPU_Get_index( assigned_cpu ) ] == false) { + Qcpu = rtems_malloc( sizeof(CPU) ); + //rtems_malloc does not return a errnum in case of failure + Qcpu->cpu=*assigned_cpu; + + _Chain_Initialize_node( &Qcpu->node ); + _Chain_Append_unprotected( &Queue, &Qcpu->node ); + //Insert thread_CPU in the Queue + visited[ _Per_CPU_Get_index (assigned_cpu) ]=true; + } + } + else if ( curr_state == SCHEDULER_SMP_NODE_READY) { + curr_priority = _Scheduler_Node_get_priority( (Scheduler_Node *) next ); + curr_priority = SCHEDULER_PRIORITY_PURIFY( curr_priority ); + + if ( curr_priorityBase.Base; + } + } + } next = _Chain_Next( next ); } - } + } if ( ret != filter) { @@ -202,27 +207,41 @@ static inline Scheduler_Node *_Scheduler_strong_APA_Get_lowest_scheduled( uint32_t cpu_max; uint32_t cpu_index; - CPU *Qcpu; Priority_Control filter_priority; Per_CPU_Control *curr_CPU; + Per_CPU_Control *next_cpu; + Per_CPU_Control *cpu_to_preempt; Thread_Control *curr_thread; Scheduler_Node *curr_node; Scheduler_Node *ret; - Chain_Control Queue; Priority_Control max_priority; Priority_Control curr_priority; - bool *visited; + + Scheduler_strong_APA_Node *curr_strong_node; //Current Strong_APA_Node + Scheduler_strong_APA_Node *filter_node; + Scheduler_strong_APA_Context *self; + Per_CPU_Control *queue; //Array of Cpu that serves as a queue + bool *visited; //Array of bool values each corresponding to a cpu. + //Node that has a CPU in its affinity set which gets used for backtracking. + Scheduler_strong_APA_Node *caller; //Caller node for a CPU. - Scheduler_strong_APA_Context *self; + //Denotes front and rear of the queue + uint32_t front; + uint32_t rear; + + front = 0; + rear = -1; - self = _Scheduler_strong_APA_Get_self( context ); - visited = self->visited->visited; + queue = self->queue->Cpu; + caller = Caller->caller; - Scheduler_strong_APA_Node *Scurr_node; //Current Strong_APA_Node - Scheduler_strong_APA_Node *filter_node; + filter_priority = _Scheduler_Node_get_priority( filter_base ); + filter_priority = SCHEDULER_PRIORITY_PURIFY( filter_priority ); + + self = _Scheduler_strong_APA_Get_self( context ); ret = NULL; //To remove compiler warning. //ret would always point to the node with the lowest priority @@ -232,10 +251,11 @@ static inline Scheduler_Node *_Scheduler_strong_APA_Get_lowest_scheduled( max_priority_num = 0;//Max (Lowest) priority encountered so far. + //This assert makes sure that there always exist an element in the + // Queue when we start the queue traversal. _Assert( !_Processor_mask_Zero( &filter_node->affinity ) ); cpu_max = _SMP_Get_processor_maximum(); - _Chain_Initialize_empty(&Queue); for ( cpu_index = 0 ; cpu_index < cpu_max ; ++cpu_index ) { visited[ cpu_index ] = false; @@ -245,71 +265,85 @@ static inline Scheduler_Node *_Scheduler_strong_APA_Get_lowest_scheduled( Per_CPU_Control *cpu = _Per_CPU_Get_by_index( cpu_index ); if ( _Per_CPU_Is_processor_online( cpu ) ) { - Qcpu = rtems_malloc( sizeof(CPU) ); //No errornum returned in case of failure - Qcpu->cpu = *cpu; - - _Chain_Initialize_node( &Qcpu->node ); - _Chain_Append_unprotected( &Queue, &Qcpu->node ); - //Insert cpu in the Queue + rear = rear + 1; + queue[ rear ] = cpu; visited[ cpu_index ] = true; + caller[ cpu_index ] = filter_base; } } } - while( !_Chain_Is_empty( &Queue) ) { - Qcpu = (CPU*) _Chain_Get_first_unprotected( &Queue ); - curr_CPU = &Qcpu->cpu; + while( front <= rear ) { + curr_CPU = queue[ front ]; + front = front + 1; + curr_thread = curr_CPU->executing; - curr_node = _Thread_Scheduler_get_home_node( curr_thread ); curr_priority = _Scheduler_Node_get_priority( curr_node ); - curr_priority = SCHEDULER_PRIORITY_PURIFY( curr_priority ); + curr_priority = SCHEDULER_PRIORITY_PURIFY( curr_priority ); + + curr_strong_node = _Scheduler_strong_APA_Node_downcast( curr_node ); if ( curr_priority > max_priority_num) { ret = curr_node; max_priority_num = curr_priority; - if( curr_priority > SCHEDULER_PRIORITY_PURIFY( _Scheduler_Node_get_priority( filter_base ) ) ) + if( curr_priority > filter_priority) { - cpu_to_preempt=curr_CPU; + cpu_to_preempt = curr_CPU; } } - Scurr_node = _Scheduler_strong_APA_Node_downcast( curr_node ); if ( !curr_thread->is_idle ) { for ( cpu_index = 0 ; cpu_index < cpu_max ; ++cpu_index ) { if ( _Processor_mask_Is_set( &Scurr_node->affinity, cpu_index ) ) { //Checks if the thread_CPU is in the affinity set of the node Per_CPU_Control *cpu = _Per_CPU_Get_by_index( cpu_index ); if ( _Per_CPU_Is_processor_online( cpu ) && visited[ cpu_index ] == false ) { - - Qcpu = rtems_malloc( sizeof(CPU) ); - Qcpu->cpu = *cpu; - - _Chain_Initialize_node( &Qcpu->node ); - _Chain_Append_unprotected( &Queue, &Qcpu->node ); - //Insert the cpu in the affinty set of curr_thread in the Queue + rear = rear + 1; + queue[ rear ] = cpu; visited[ cpu_index ] = true; + caller[ cpu_index ] = curr_node; } } } } - } + } - filter_priority = _Scheduler_Node_get_priority( filter_base ); - filter_priority = SCHEDULER_PRIORITY_PURIFY( filter_priority ); - - if ( ret->Priority.value < filter_priority ) { - //Lowest priority task found has higher priority - // than filter_base. - //So, filter_base remains unassigned - //No task shifting. - } - - else { + if( ret->Priority.value > filter_priority ) { //Backtrack on the path from //_Thread_Get_CPU(ret->user) to ret, shifting along every task + + curr_node = caller[ _Per_CPU_Get_index(cpu_to_preempt) ]; + curr_cpu = _Thread_Get_CPU( curr_node->user ); + + curr_node = caller [ _Per_CPU_Get_index( curr_cpu ) ]; + curr_cpu = _Thread_Get_CPU( curr_node->user ); + + do{ + next_cpu = _Thread_Get_CPU( curr_node->user ); + + _Scheduler_SMP_Preempt( + context, + curr_node, + _Thread_Scheduler_get_home_node( curr_cpu->executing ), + _Scheduler_strong_APA_Allocate_processor + ); + + curr_cpu = _Per_CPU_Get_index( next_cpu ); + curr_node = caller[ curr_cpu ]; + + }while( curr_node != filter_base ); + + _Scheduler_SMP_Preempt( + context, + curr_node, + _Thread_Scheduler_get_home_node( curr_cpu->executing ), + _Scheduler_strong_APA_Allocate_processor + ); + + filter_base = caller[ _Per_CPU_Get_index(cpu_to_preempt) ]; } return ret; } From 9cc11fb283114f032c34602552c59fe51c1b0a58 Mon Sep 17 00:00:00 2001 From: richidubey Date: Thu, 6 Aug 2020 20:32:51 +0530 Subject: [PATCH 09/29] Added configuration definitions --- cpukit/include/rtems/scheduler.h | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/cpukit/include/rtems/scheduler.h b/cpukit/include/rtems/scheduler.h index b84b29fd209..3ec9b3aeabb 100644 --- a/cpukit/include/rtems/scheduler.h +++ b/cpukit/include/rtems/scheduler.h @@ -257,7 +257,9 @@ #define RTEMS_SCHEDULER_STRONG_APA( name, prio_count ) \ static struct { \ Scheduler_strong_APA_Context Base; \ - Scheduler_strong_APA_CPU CPU[ CONFIGURE_MAXIMUM_PROCESSORS ]; + Scheduler_strong_APA_Queue Cpu[ CONFIGURE_MAXIMUM_PROCESSORS ]; \ + Scheduler_strong_APA_Caller caller[ CONFIGURE_MAXIMUM_PROCESSORS ]; \ + Scheduler_strong_APA_Visited visited[ CONFIGURE_MAXIMUM_PROCESSORS ]; } SCHEDULER_STRONG_APA_CONTEXT_NAME( name ) #define RTEMS_SCHEDULER_TABLE_STRONG_APA( name, obj_name ) \ From 9fee2f44418bff9ae0d6e6541a61f9eb1b147067 Mon Sep 17 00:00:00 2001 From: richidubey Date: Mon, 10 Aug 2020 18:01:46 +0530 Subject: [PATCH 10/29] Removed compiler warnings --- .../include/rtems/score/schedulerstrongapa.h | 96 +++++++++---------- 1 file changed, 48 insertions(+), 48 deletions(-) diff --git a/cpukit/include/rtems/score/schedulerstrongapa.h b/cpukit/include/rtems/score/schedulerstrongapa.h index 1947585a19f..92a7958e344 100644 --- a/cpukit/include/rtems/score/schedulerstrongapa.h +++ b/cpukit/include/rtems/score/schedulerstrongapa.h @@ -7,16 +7,16 @@ */ /* - * Copyright (c) 2013, 2018 embedded brains GmbH, 2020 Richi Dubey. - * All rights reserved. - * + * Copyright (c) 2020 Richi Dubey + * richidubey@gmail.com + * + * Copyright (c) 2013, 2018 embedded brains GmbH. All rights reserved. + * * embedded brains GmbH * Dornierstr. 4 * 82178 Puchheim * Germany * - * - * Richi Dubey: richidubey@gmail.com * * The license and distribution terms for this file may be * found in the file LICENSE in this distribution or at @@ -48,43 +48,6 @@ extern "C" { * @{ */ - /** - * @brief Scheduler context for Strong APA scheduler. - * - * Has the structure for scheduler context - * and Node defintion for Strong APA scheduler - */ -typedef struct { - /** - * @brief SMP Context to refer to SMP implementation - * code. - */ - Scheduler_SMP_Context Base; - - /** - * @brief Chain of all the nodes present in - * the system. Accounts for ready and scheduled nodes. - */ - Chain_Control All_nodes; - - /** - * @brief Queue for this context - */ - Scheduler_strong_APA_Queue *queue; - - /** - * @brief Pointer to structure with array of - * boolean visited values - */ - Scheduler_strong_APA_Visited *visited; - - /** - * @brief Pointer to structure with array of - * caller corresponding to a CPU - */ - Scheduler_strong_APA_Caller *caller; -} Scheduler_strong_APA_Context; - /** * @brief Scheduler node specialization for Strong APA schedulers. */ @@ -111,9 +74,9 @@ typedef struct { typedef struct { /** - * @brief Array of Cpu to be used for the queue operations + * @brief Array of Cpu pointers to be used for the queue operations */ - Per_CPU_Control Cpu[ RTEMS_ZERO_LENGTH_ARRAY ]; + Per_CPU_Control *Cpu[ RTEMS_ZERO_LENGTH_ARRAY ]; } Scheduler_strong_APA_Queue; /** @@ -122,10 +85,10 @@ typedef struct typedef struct { /** - * @brief Array of caller each corresponding to the - * Scheduler_strong_APA_Queue::Cpu at the same index + * @brief Array of caller pointers with each pointer pointing to the + * Scheduler_strong_APA_Queue::Cpu at the same index as the pointer */ - Scheduler_strong_APA_Node *caller[ RTEMS_ZERO_LENGTH_ARRAY ]; + Scheduler_Node *caller[ RTEMS_ZERO_LENGTH_ARRAY ]; } Scheduler_strong_APA_Caller; /** @@ -137,9 +100,46 @@ typedef struct * @brief Array of boolean each corresponding to the visited status of * Scheduler_strong_APA_Queue::Cpu at the same index */ - bool *visited[ RTEMS_ZERO_LENGTH_ARRAY ]; + bool visited[ RTEMS_ZERO_LENGTH_ARRAY ]; } Scheduler_strong_APA_Visited; + /** + * @brief Scheduler context for Strong APA scheduler. + * + * Has the structure for scheduler context + * and Node defintion for Strong APA scheduler + */ +typedef struct { + /** + * @brief SMP Context to refer to SMP implementation + * code. + */ + Scheduler_SMP_Context Base; + + /** + * @brief Chain of all the nodes present in + * the system. Accounts for ready and scheduled nodes. + */ + Chain_Control All_nodes; + + /** + * @brief Queue for this context + */ + Scheduler_strong_APA_Queue *queue; + + /** + * @brief Pointer to structure with array of + * boolean visited values + */ + Scheduler_strong_APA_Visited *visited; + + /** + * @brief Pointer to structure with array of + * caller corresponding to a CPU + */ + Scheduler_strong_APA_Caller *caller; +} Scheduler_strong_APA_Context; + /** * @brief Entry points for the Strong APA Scheduler. */ From dc6477cda0d1dee8033717b83233cce5ad177e89 Mon Sep 17 00:00:00 2001 From: richidubey Date: Mon, 10 Aug 2020 18:07:33 +0530 Subject: [PATCH 11/29] Removed error from scheduler.h --- cpukit/include/rtems/scheduler.h | 2 +- cpukit/score/src/schedulerstrongapa.c | 252 ++++++++++---------------- 2 files changed, 97 insertions(+), 157 deletions(-) diff --git a/cpukit/include/rtems/scheduler.h b/cpukit/include/rtems/scheduler.h index 3ec9b3aeabb..ea9119ba87d 100644 --- a/cpukit/include/rtems/scheduler.h +++ b/cpukit/include/rtems/scheduler.h @@ -259,7 +259,7 @@ Scheduler_strong_APA_Context Base; \ Scheduler_strong_APA_Queue Cpu[ CONFIGURE_MAXIMUM_PROCESSORS ]; \ Scheduler_strong_APA_Caller caller[ CONFIGURE_MAXIMUM_PROCESSORS ]; \ - Scheduler_strong_APA_Visited visited[ CONFIGURE_MAXIMUM_PROCESSORS ]; + Scheduler_strong_APA_Visited visited[ CONFIGURE_MAXIMUM_PROCESSORS ]; \ } SCHEDULER_STRONG_APA_CONTEXT_NAME( name ) #define RTEMS_SCHEDULER_TABLE_STRONG_APA( name, obj_name ) \ diff --git a/cpukit/score/src/schedulerstrongapa.c b/cpukit/score/src/schedulerstrongapa.c index 9dd0ea610a7..417f366f6a8 100644 --- a/cpukit/score/src/schedulerstrongapa.c +++ b/cpukit/score/src/schedulerstrongapa.c @@ -7,17 +7,16 @@ */ /* - * Copyright (c) 2013, 2018 embedded brains GmbH, 2020 Richi Dubey. - * All rights reserved. - * + * Copyright (c) 2020 Richi Dubey + * richidubey@gmail.com + * + * Copyright (c) 2013, 2018 embedded brains GmbH. All rights reserved. + * * embedded brains GmbH * Dornierstr. 4 * 82178 Puchheim * Germany * - * - * Richi Dubey - * * * The license and distribution terms for this file may be * found in the file LICENSE in this distribution or at @@ -73,8 +72,8 @@ static inline bool _Scheduler_strong_APA_Has_ready( Scheduler_Context *context ) Chain_Node *next; Scheduler_strong_APA_Node *node; - tail = _Chain_Immutable_tail( &self->allNodes ); - next = _Chain_First( &self->allNodes ); + tail = _Chain_Immutable_tail( &self->All_nodes ); + next = _Chain_First( &self->All_nodes ); ret = false; @@ -94,6 +93,26 @@ static inline bool _Scheduler_strong_APA_Has_ready( Scheduler_Context *context ) return ret; } +static inline void _Scheduler_strong_APA_Allocate_processor( + Scheduler_Context *context, + Scheduler_Node *scheduled_base, + Scheduler_Node *victim_base, + Per_CPU_Control *victim_cpu +) +{ + Scheduler_strong_APA_Node *scheduled; + + (void) victim_base; + scheduled = _Scheduler_strong_APA_Node_downcast( scheduled_base ); + + _Scheduler_SMP_Allocate_processor_exact( + context, + &(scheduled->Base.Base), + NULL, + victim_cpu + ); +} + static inline Scheduler_Node *_Scheduler_strong_APA_Get_highest_ready( Scheduler_Context *context, Scheduler_Node *filter @@ -102,20 +121,24 @@ static inline Scheduler_Node *_Scheduler_strong_APA_Get_highest_ready( //Plan for this function: (Pseudo Code): Scheduler_strong_APA_Context *self=_Scheduler_strong_APA_Get_self( context ); - CPU *Qcpu; Thread_Control *thread; Per_CPU_Control *thread_cpu; Per_CPU_Control *curr_CPU; Per_CPU_Control *assigned_cpu; + uint32_t index_assigned_cpu; Scheduler_Node *ret; - Priority_Control max_priority; + Priority_Control min_priority_num; Priority_Control curr_priority; Scheduler_SMP_Node_state curr_state; + Scheduler_Node *curr_node; const Chain_Node *tail; Chain_Node *next; - Per_CPU_Control *queue; //Array of Cpu that serves as a queue + Per_CPU_Control **queue; //Array of Cpu that serves as a queue bool *visited; //Array of bool values each corresponding to a cpu + //Node that has a CPU in its affinity set which gets used for backtracking. + Scheduler_Node **caller; //Caller node for a CPU. + //Denotes front and rear of the queue uint32_t front; uint32_t rear; @@ -125,6 +148,7 @@ static inline Scheduler_Node *_Scheduler_strong_APA_Get_highest_ready( visited = self->visited->visited; queue = self->queue->Cpu; + caller = self->caller->caller; thread = filter->user; thread_cpu = _Thread_Get_CPU( thread ); @@ -132,8 +156,9 @@ static inline Scheduler_Node *_Scheduler_strong_APA_Get_highest_ready( //Implement the BFS Algorithm for task departure //to get the highest ready task for a particular CPU - max_priority = _Scheduler_Node_get_priority( filter ); - max_priority = SCHEDULER_PRIORITY_PURIFY( max_priority ); + //Initialize the min_priority_num variable + min_priority_num = _Scheduler_Node_get_priority( filter ); + min_priority_num = SCHEDULER_PRIORITY_PURIFY( min_priority_num ); ret = filter; @@ -145,39 +170,37 @@ static inline Scheduler_Node *_Scheduler_strong_APA_Get_highest_ready( curr_CPU = queue[ front ]; front = front + 1; - tail = _Chain_Immutable_tail( &self->allNodes ); - next = _Chain_First( &self->allNodes ); + tail = _Chain_Immutable_tail( &self->All_nodes ); + next = _Chain_First( &self->All_nodes ); while ( next != tail ) { Scheduler_strong_APA_Node *node; node = (Scheduler_strong_APA_Node *) next; + curr_node = (Scheduler_Node *) next; curr_state = _Scheduler_SMP_Node_state( &node->Base.Base ); if ( - _Processor_mask_Is_set(&node->affinity, _Per_CPU_Get_index( curr_CPU)) + _Processor_mask_Is_set(&node->Affinity, _Per_CPU_Get_index(curr_CPU) ) ) { //Checks if the thread_CPU is in the affinity set of the node if ( curr_state == SCHEDULER_SMP_NODE_SCHEDULED) { assigned_cpu = _Thread_Get_CPU( node->Base.Base.user ); + index_assigned_cpu = _Per_CPU_Get_index( assigned_cpu ); - if ( visited[ _Per_CPU_Get_index( assigned_cpu ) ] == false) { - Qcpu = rtems_malloc( sizeof(CPU) ); - //rtems_malloc does not return a errnum in case of failure - Qcpu->cpu=*assigned_cpu; - - _Chain_Initialize_node( &Qcpu->node ); - _Chain_Append_unprotected( &Queue, &Qcpu->node ); - //Insert thread_CPU in the Queue - visited[ _Per_CPU_Get_index (assigned_cpu) ]=true; + if ( visited[ index_assigned_cpu ] == false) { + rear = rear + 1; + queue[ rear ] = assigned_cpu; + visited[ index_assigned_cpu ] = true; + caller[ index_assigned_cpu ] = curr_node; } } else if ( curr_state == SCHEDULER_SMP_NODE_READY) { curr_priority = _Scheduler_Node_get_priority( (Scheduler_Node *) next ); curr_priority = SCHEDULER_PRIORITY_PURIFY( curr_priority ); - if ( curr_priorityBase.Base; } } @@ -210,22 +233,22 @@ static inline Scheduler_Node *_Scheduler_strong_APA_Get_lowest_scheduled( Priority_Control filter_priority; Per_CPU_Control *curr_CPU; - Per_CPU_Control *next_cpu; + Per_CPU_Control *next_CPU; Per_CPU_Control *cpu_to_preempt; Thread_Control *curr_thread; Scheduler_Node *curr_node; Scheduler_Node *ret; - Priority_Control max_priority; + Priority_Control max_priority_num; Priority_Control curr_priority; Scheduler_strong_APA_Node *curr_strong_node; //Current Strong_APA_Node Scheduler_strong_APA_Node *filter_node; Scheduler_strong_APA_Context *self; - Per_CPU_Control *queue; //Array of Cpu that serves as a queue + Per_CPU_Control **queue; //Array of Cpu that serves as a queue bool *visited; //Array of bool values each corresponding to a cpu. //Node that has a CPU in its affinity set which gets used for backtracking. - Scheduler_strong_APA_Node *caller; //Caller node for a CPU. + Scheduler_Node **caller; //Caller node for a CPU. //Denotes front and rear of the queue uint32_t front; @@ -233,16 +256,16 @@ static inline Scheduler_Node *_Scheduler_strong_APA_Get_lowest_scheduled( front = 0; rear = -1; - + + self = _Scheduler_strong_APA_Get_self( context ); + visited = self->visited->visited; queue = self->queue->Cpu; - caller = Caller->caller; + caller = self->caller->caller; filter_priority = _Scheduler_Node_get_priority( filter_base ); filter_priority = SCHEDULER_PRIORITY_PURIFY( filter_priority ); - - self = _Scheduler_strong_APA_Get_self( context ); - + ret = NULL; //To remove compiler warning. //ret would always point to the node with the lowest priority //node unless the affinity of filter_base is NULL. @@ -253,7 +276,7 @@ static inline Scheduler_Node *_Scheduler_strong_APA_Get_lowest_scheduled( //This assert makes sure that there always exist an element in the // Queue when we start the queue traversal. - _Assert( !_Processor_mask_Zero( &filter_node->affinity ) ); + _Assert( !_Processor_mask_Zero( &filter_node->Affinity ) ); cpu_max = _SMP_Get_processor_maximum(); @@ -261,7 +284,7 @@ static inline Scheduler_Node *_Scheduler_strong_APA_Get_lowest_scheduled( visited[ cpu_index ] = false; //Checks if the thread_CPU is in the affinity set of the node - if ( _Processor_mask_Is_set( &filter_node->affinity, cpu_index)) { + if ( _Processor_mask_Is_set( &filter_node->Affinity, cpu_index)) { Per_CPU_Control *cpu = _Per_CPU_Get_by_index( cpu_index ); if ( _Per_CPU_Is_processor_online( cpu ) ) { @@ -297,7 +320,7 @@ static inline Scheduler_Node *_Scheduler_strong_APA_Get_lowest_scheduled( if ( !curr_thread->is_idle ) { for ( cpu_index = 0 ; cpu_index < cpu_max ; ++cpu_index ) { - if ( _Processor_mask_Is_set( &Scurr_node->affinity, cpu_index ) ) { + if ( _Processor_mask_Is_set( &curr_strong_node->Affinity, cpu_index ) ) { //Checks if the thread_CPU is in the affinity set of the node Per_CPU_Control *cpu = _Per_CPU_Get_by_index( cpu_index ); if ( _Per_CPU_Is_processor_online( cpu ) && visited[ cpu_index ] == false ) { @@ -316,30 +339,30 @@ static inline Scheduler_Node *_Scheduler_strong_APA_Get_lowest_scheduled( //_Thread_Get_CPU(ret->user) to ret, shifting along every task curr_node = caller[ _Per_CPU_Get_index(cpu_to_preempt) ]; - curr_cpu = _Thread_Get_CPU( curr_node->user ); + curr_CPU = _Thread_Get_CPU( curr_node->user ); - curr_node = caller [ _Per_CPU_Get_index( curr_cpu ) ]; - curr_cpu = _Thread_Get_CPU( curr_node->user ); + curr_node = caller [ _Per_CPU_Get_index( curr_CPU ) ]; + curr_CPU = _Thread_Get_CPU( curr_node->user ); do{ - next_cpu = _Thread_Get_CPU( curr_node->user ); + next_CPU = _Thread_Get_CPU( curr_node->user ); _Scheduler_SMP_Preempt( context, curr_node, - _Thread_Scheduler_get_home_node( curr_cpu->executing ), + _Thread_Scheduler_get_home_node( curr_CPU->executing ), _Scheduler_strong_APA_Allocate_processor ); - curr_cpu = _Per_CPU_Get_index( next_cpu ); - curr_node = caller[ curr_cpu ]; + curr_CPU = next_CPU; + curr_node = caller[ _Per_CPU_Get_index( curr_CPU ) ]; }while( curr_node != filter_base ); _Scheduler_SMP_Preempt( context, curr_node, - _Thread_Scheduler_get_home_node( curr_cpu->executing ), + _Thread_Scheduler_get_home_node( curr_CPU->executing ), _Scheduler_strong_APA_Allocate_processor ); @@ -360,7 +383,7 @@ static inline void _Scheduler_strong_APA_Extract_from_scheduled( node = _Scheduler_strong_APA_Node_downcast( node_to_extract ); _Scheduler_SMP_Extract_from_scheduled( &self->Base.Base, &node->Base.Base ); - //Not removing it from allNodes since the node could go in the ready state. + //Not removing it from All_nodes since the node could go in the ready state. } static inline void _Scheduler_strong_APA_Extract_from_ready( @@ -374,13 +397,30 @@ static inline void _Scheduler_strong_APA_Extract_from_ready( self = _Scheduler_strong_APA_Get_self( context ); node = _Scheduler_strong_APA_Node_downcast( node_to_extract ); - _Assert( !_Chain_Is_empty(self->allNodes) ); + _Assert( !_Chain_Is_empty(self->All_nodes) ); _Assert( !_Chain_Is_node_off_chain( &node->Node ) ); - _Chain_Extract_unprotected( &node->Node ); //Removed from allNodes + _Chain_Extract_unprotected( &node->Node ); //Removed from All_nodes _Chain_Set_off_chain( &node->Node ); } +static inline void _Scheduler_strong_APA_Insert_ready( + Scheduler_Context *context, + Scheduler_Node *node_base, + Priority_Control insert_priority +) +{ + Scheduler_strong_APA_Context *self; + Scheduler_strong_APA_Node *node; + + self = _Scheduler_strong_APA_Get_self( context ); + node = _Scheduler_strong_APA_Node_downcast( node_base ); + + _Assert( !_Chain_Is_node_off_chain( &node->Node ) ); + + _Chain_Append_unprotected( &self->All_nodes, &node->Node ); +} + static inline void _Scheduler_strong_APA_Move_from_scheduled_to_ready( Scheduler_Context *context, Scheduler_Node *scheduled_to_ready @@ -413,61 +453,7 @@ static inline void _Scheduler_strong_APA_Move_from_ready_to_scheduled( ready_to_scheduled, insert_priority ); - //Note: The node still stays in the allNodes chain -} - -static inline void _Scheduler_strong_APA_Set_scheduled( - _Scheduler_strong_APA_Context *self, - _Scheduler_strong_APA_Node *scheduled, - const Per_CPU_Control *cpu -) -{ - self->CPU[ _Per_CPU_Get_index( cpu ) ].scheduled = scheduled; -} - -static inline Scheduler_EDF_SMP_Node *_Scheduler_strong_APA_Get_scheduled( - const _Scheduler_strong_APA_Context *self, - uint8_t cpu -) -{ - return self->CPU[ cpu ].scheduled; -} - -static inline void _Scheduler_strong_APA_Insert_ready( - Scheduler_Context *context, - Scheduler_Node *node_base, - Priority_Control insert_priority -) -{ - Scheduler_strong_APA_Context *self; - Scheduler_strong_APA_Node *node; - - self = _Scheduler_strong_APA_Get_self( context ); - node = _Scheduler_strong_APA_Node_downcast( node_base ); - - _Assert( !_Chain_Is_node_off_chain( &node->Node ) ); - - _Chain_Append_unprotected( &self->allNodes, &node->Node ); -} - -static inline void _Scheduler_strong_APA_Allocate_processor( - Scheduler_Context *context, - Scheduler_Node *scheduled_base, - Scheduler_Node *victim_base, - Per_CPU_Control *victim_cpu -) -{ - Scheduler_strong_APA_Node *scheduled; - - (void) victim_base; - scheduled = _Scheduler_strong_APA_Node_downcast( scheduled_base ); - - _Scheduler_SMP_Allocate_processor_exact( - context, - &(scheduled->Base.Base), - NULL, - victim_cpu - ); + //Note: The node still stays in the All_nodes chain } static inline bool _Scheduler_strong_APA_Enqueue( @@ -552,7 +538,7 @@ static inline void _Scheduler_strong_APA_Do_set_affinity( node = _Scheduler_strong_APA_Node_downcast( node_base ); affinity = arg; - node->affinity = *affinity; + node->Affinity = *affinity; } void _Scheduler_strong_APA_Initialize( const Scheduler_Control *scheduler ) @@ -561,7 +547,7 @@ void _Scheduler_strong_APA_Initialize( const Scheduler_Control *scheduler ) _Scheduler_strong_APA_Get_context( scheduler ); _Scheduler_SMP_Initialize( &self->Base ); - _Chain_Initialize_empty( &self->allNodes ); + _Chain_Initialize_empty( &self->All_nodes ); } void _Scheduler_strong_APA_Yield( @@ -589,7 +575,7 @@ void _Scheduler_strong_APA_Block( ) { Scheduler_Context *context = _Scheduler_Get_context( scheduler ); -//The extract from ready automatically removes the node from allNodes chain. +//The extract from ready automatically removes the node from All_nodes chain. _Scheduler_SMP_Block( context, thread, @@ -691,51 +677,6 @@ void _Scheduler_strong_APA_Withdraw_node( ); } -void _Scheduler_strong_APA_Pin( - const Scheduler_Control *scheduler, - Thread_Control *thread, - Scheduler_Node *node_base, - struct Per_CPU_Control *cpu -) -{ - Scheduler_strong_APA_Node *node; - uint32_t pin_cpu; - - (void) scheduler; - node = _Scheduler_strong_APA_Node_downcast( node_base ); - pin_cpu = (uint32_t) _Per_CPU_Get_index( cpu ); - - _Assert( - _Scheduler_SMP_Node_state( &node->Base.Base ) == SCHEDULER_SMP_NODE_BLOCKED - ); - - node = _Scheduler_strong_APA_Node_downcast( node_base ); - - _Processor_mask_Zero( &node->affinity ); - _Processor_mask_Set( &node->affinity, pin_cpu ); -} - -void _Scheduler_strong_APA_Unpin( - const Scheduler_Control *scheduler, - Thread_Control *thread, - Scheduler_Node *node_base, - struct Per_CPU_Control *cpu -) -{ - Scheduler_strong_APA_Node *node; - - (void) scheduler; - (void) cpu; - node = _Scheduler_strong_APA_Node_downcast( node_base ); - - _Assert( - _Scheduler_SMP_Node_state( &node->Base.Base ) == SCHEDULER_SMP_NODE_BLOCKED - ); - - _Processor_mask_Zero( &node->affinity ); - _Processor_mask_Assign( &node->affinity, &node->unpin_affinity ); -} - void _Scheduler_strong_APA_Add_processor( const Scheduler_Control *scheduler, Thread_Control *idle @@ -818,11 +759,10 @@ bool _Scheduler_strong_APA_Set_affinity( node = _Scheduler_strong_APA_Node_downcast( node_base ); - if ( _Processor_mask_Is_equal( &node->affinity, affinity ) ) + if ( _Processor_mask_Is_equal( &node->Affinity, affinity ) ) return true; //Nothing to do. Return true. - _Processor_mask_Assign( &node->affinity, &local_affinity ); - _Processor_mask_Assign( &node->unpin_affinity, &local_affinity ); + _Processor_mask_Assign( &node->Affinity, &local_affinity ); _Scheduler_SMP_Set_affinity( context, From 26444631846e7f1edf43ea8c9816e04518784537 Mon Sep 17 00:00:00 2001 From: richidubey Date: Thu, 13 Aug 2020 01:35:29 +0530 Subject: [PATCH 12/29] Finished logic for get_highest_ready --- cpukit/include/rtems/scheduler.h | 4 +- .../include/rtems/score/schedulerstrongapa.h | 10 ++- cpukit/score/src/schedulerstrongapa.c | 79 +++++++++++++------ 3 files changed, 65 insertions(+), 28 deletions(-) diff --git a/cpukit/include/rtems/scheduler.h b/cpukit/include/rtems/scheduler.h index ea9119ba87d..1af55b28d69 100644 --- a/cpukit/include/rtems/scheduler.h +++ b/cpukit/include/rtems/scheduler.h @@ -266,9 +266,7 @@ { \ &SCHEDULER_STRONG_APA_CONTEXT_NAME( name ).Base.Base.Base, \ SCHEDULER_STRONG_APA_ENTRY_POINTS, \ - RTEMS_ARRAY_SIZE( \ - SCHEDULER_STRONG_APA_CONTEXT_NAME( name ).Ready \ - ) - 1, \ + SCHEDULER_STRONG_APA_MAXIMUM_PRIORITY, \ ( obj_name ) \ SCHEDULER_CONTROL_IS_NON_PREEMPT_MODE_SUPPORTED( false ) \ } diff --git a/cpukit/include/rtems/score/schedulerstrongapa.h b/cpukit/include/rtems/score/schedulerstrongapa.h index 92a7958e344..5169353e3f8 100644 --- a/cpukit/include/rtems/score/schedulerstrongapa.h +++ b/cpukit/include/rtems/score/schedulerstrongapa.h @@ -61,6 +61,12 @@ typedef struct { * @brief SMP scheduler node. */ Scheduler_SMP_Node Base; + + /** + * @brief CPU that invokes this node in the backtracking part of + * _Scheduler_strong_APA_Get_highest_ready. + */ + Per_CPU_Control *invoker; /** * @brief The associated affinity set of this node. @@ -135,11 +141,13 @@ typedef struct { /** * @brief Pointer to structure with array of - * caller corresponding to a CPU + * Scheduler_Node caller corresponding to a CPU */ Scheduler_strong_APA_Caller *caller; } Scheduler_strong_APA_Context; +#define SCHEDULER_STRONG_APA_MAXIMUM_PRIORITY 255 + /** * @brief Entry points for the Strong APA Scheduler. */ diff --git a/cpukit/score/src/schedulerstrongapa.c b/cpukit/score/src/schedulerstrongapa.c index 417f366f6a8..5b9c9db2cf8 100644 --- a/cpukit/score/src/schedulerstrongapa.c +++ b/cpukit/score/src/schedulerstrongapa.c @@ -118,27 +118,27 @@ static inline Scheduler_Node *_Scheduler_strong_APA_Get_highest_ready( Scheduler_Node *filter ) //TODO { - //Plan for this function: (Pseudo Code): + //return the highest ready Scheduler_Node and Scheduler_Node filter here points + // to the victim node that is blocked. Scheduler_strong_APA_Context *self=_Scheduler_strong_APA_Get_self( context ); Thread_Control *thread; Per_CPU_Control *thread_cpu; Per_CPU_Control *curr_CPU; + Scheduler_strong_APA_Node *node; Per_CPU_Control *assigned_cpu; uint32_t index_assigned_cpu; - Scheduler_Node *ret; + Scheduler_Node *highest_ready; Priority_Control min_priority_num; Priority_Control curr_priority; Scheduler_SMP_Node_state curr_state; Scheduler_Node *curr_node; + Scheduler_Node *next_node; const Chain_Node *tail; Chain_Node *next; Per_CPU_Control **queue; //Array of Cpu that serves as a queue bool *visited; //Array of bool values each corresponding to a cpu - //Node that has a CPU in its affinity set which gets used for backtracking. - Scheduler_Node **caller; //Caller node for a CPU. - //Denotes front and rear of the queue uint32_t front; uint32_t rear; @@ -148,7 +148,6 @@ static inline Scheduler_Node *_Scheduler_strong_APA_Get_highest_ready( visited = self->visited->visited; queue = self->queue->Cpu; - caller = self->caller->caller; thread = filter->user; thread_cpu = _Thread_Get_CPU( thread ); @@ -160,7 +159,7 @@ static inline Scheduler_Node *_Scheduler_strong_APA_Get_highest_ready( min_priority_num = _Scheduler_Node_get_priority( filter ); min_priority_num = SCHEDULER_PRIORITY_PURIFY( min_priority_num ); - ret = filter; + highest_ready = filter; rear = rear + 1; queue[ rear ] = thread_cpu; @@ -174,7 +173,6 @@ static inline Scheduler_Node *_Scheduler_strong_APA_Get_highest_ready( next = _Chain_First( &self->All_nodes ); while ( next != tail ) { - Scheduler_strong_APA_Node *node; node = (Scheduler_strong_APA_Node *) next; curr_node = (Scheduler_Node *) next; curr_state = _Scheduler_SMP_Node_state( &node->Base.Base ); @@ -182,26 +180,32 @@ static inline Scheduler_Node *_Scheduler_strong_APA_Get_highest_ready( if ( _Processor_mask_Is_set(&node->Affinity, _Per_CPU_Get_index(curr_CPU) ) ) { - //Checks if the thread_CPU is in the affinity set of the node + //Checks if the curr_CPU is in the affinity set of the node - if ( curr_state == SCHEDULER_SMP_NODE_SCHEDULED) { + if ( curr_state == SCHEDULER_SMP_NODE_SCHEDULED ) { assigned_cpu = _Thread_Get_CPU( node->Base.Base.user ); index_assigned_cpu = _Per_CPU_Get_index( assigned_cpu ); - if ( visited[ index_assigned_cpu ] == false) { + if ( visited[ index_assigned_cpu ] == false ) { + rear = rear + 1; queue[ rear ] = assigned_cpu; visited[ index_assigned_cpu ] = true; - caller[ index_assigned_cpu ] = curr_node; + // The curr CPU of the queue invoked this node to add its CPU + // that it is executing on to the queue. So this node might get + // preempted because of the invoker curr_CPU and this curr_CPU + // is the CPU that node should preempt in case this node + // gets preempted. + node->invoker = curr_CPU; } } - else if ( curr_state == SCHEDULER_SMP_NODE_READY) { + else if ( curr_state == SCHEDULER_SMP_NODE_READY ) { curr_priority = _Scheduler_Node_get_priority( (Scheduler_Node *) next ); curr_priority = SCHEDULER_PRIORITY_PURIFY( curr_priority ); - if ( curr_priorityBase.Base; + highest_ready = &node->Base.Base; } } } @@ -209,16 +213,42 @@ static inline Scheduler_Node *_Scheduler_strong_APA_Get_highest_ready( } } - if ( ret != filter) - { + if ( highest_ready != filter ) { //Should always be true since highest ready + // node corresponding to an empty cpu must exist: TODO: See how. + //Backtrack on the path from - //thread_cpu to ret, shifting along every task. + //thread_cpu to highest_ready, shifting along every task. + + node = _Scheduler_strong_APA_Node_downcast( highest_ready ); + + if( node->invoker != thread_cpu ) { + // Highest ready is not just directly reachable from the victim cpu + // So there is need of task shifting + + do { + curr_node = &node->Base.Base; + //TODO: Put this in a module since it breaks line length + next_node = _Thread_Scheduler_get_home_node( node->invoker->executing ); - //After this, thread_cpu receives the ret task - // So the ready task ret gets scheduled as well. + + _Scheduler_SMP_Preempt( + context, + curr_node, + _Thread_Scheduler_get_home_node( node->invoker->executing ), + _Scheduler_strong_APA_Allocate_processor + ); + + node = _Scheduler_strong_APA_Node_downcast( next_node ); + }while( node->invoker != thread_cpu ); + //To save the last node so that the caller SMP_* function + //can do the allocation + + curr_node = &node->Base.Base; + highest_ready = curr_node; + } } - return ret; + return highest_ready; } static inline Scheduler_Node *_Scheduler_strong_APA_Get_lowest_scheduled( @@ -255,6 +285,7 @@ static inline Scheduler_Node *_Scheduler_strong_APA_Get_lowest_scheduled( uint32_t rear; front = 0; + rear = -1; self = _Scheduler_strong_APA_Get_self( context ); @@ -284,7 +315,7 @@ static inline Scheduler_Node *_Scheduler_strong_APA_Get_lowest_scheduled( visited[ cpu_index ] = false; //Checks if the thread_CPU is in the affinity set of the node - if ( _Processor_mask_Is_set( &filter_node->Affinity, cpu_index)) { + if ( _Processor_mask_Is_set( &filter_node->Affinity, cpu_index) ) { Per_CPU_Control *cpu = _Per_CPU_Get_by_index( cpu_index ); if ( _Per_CPU_Is_processor_online( cpu ) ) { @@ -308,11 +339,11 @@ static inline Scheduler_Node *_Scheduler_strong_APA_Get_lowest_scheduled( curr_strong_node = _Scheduler_strong_APA_Node_downcast( curr_node ); - if ( curr_priority > max_priority_num) { + if ( curr_priority > max_priority_num ) { ret = curr_node; max_priority_num = curr_priority; - if( curr_priority > filter_priority) + if( curr_priority > filter_priority ) { cpu_to_preempt = curr_CPU; } From 72781faf5ae14a997b55d60abcea602976e788d5 Mon Sep 17 00:00:00 2001 From: richidubey Date: Thu, 13 Aug 2020 01:51:20 +0530 Subject: [PATCH 13/29] Changed conf def in scheduler.h --- cpukit/include/rtems/scheduler.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/cpukit/include/rtems/scheduler.h b/cpukit/include/rtems/scheduler.h index 1af55b28d69..b5c9328f5d4 100644 --- a/cpukit/include/rtems/scheduler.h +++ b/cpukit/include/rtems/scheduler.h @@ -257,9 +257,9 @@ #define RTEMS_SCHEDULER_STRONG_APA( name, prio_count ) \ static struct { \ Scheduler_strong_APA_Context Base; \ - Scheduler_strong_APA_Queue Cpu[ CONFIGURE_MAXIMUM_PROCESSORS ]; \ - Scheduler_strong_APA_Caller caller[ CONFIGURE_MAXIMUM_PROCESSORS ]; \ - Scheduler_strong_APA_Visited visited[ CONFIGURE_MAXIMUM_PROCESSORS ]; \ + Per_CPU_Control *Cpu[ CONFIGURE_MAXIMUM_PROCESSORS ]; \ + Scheduler_Node *caller[ CONFIGURE_MAXIMUM_PROCESSORS ]; \ + bool visited[ CONFIGURE_MAXIMUM_PROCESSORS ]; \ } SCHEDULER_STRONG_APA_CONTEXT_NAME( name ) #define RTEMS_SCHEDULER_TABLE_STRONG_APA( name, obj_name ) \ From b33f7a068dce03576dad4b77831a5e61a7854436 Mon Sep 17 00:00:00 2001 From: richidubey Date: Thu, 13 Aug 2020 13:54:23 +0530 Subject: [PATCH 14/29] Rename and remove whiteline --- cpukit/score/src/schedulerstrongapa.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/cpukit/score/src/schedulerstrongapa.c b/cpukit/score/src/schedulerstrongapa.c index 5b9c9db2cf8..813170c1964 100644 --- a/cpukit/score/src/schedulerstrongapa.c +++ b/cpukit/score/src/schedulerstrongapa.c @@ -146,7 +146,7 @@ static inline Scheduler_Node *_Scheduler_strong_APA_Get_highest_ready( front = 0; rear = -1; - visited = self->visited->visited; + visited = self->visited->vis; queue = self->queue->Cpu; thread = filter->user; @@ -229,7 +229,6 @@ static inline Scheduler_Node *_Scheduler_strong_APA_Get_highest_ready( curr_node = &node->Base.Base; //TODO: Put this in a module since it breaks line length next_node = _Thread_Scheduler_get_home_node( node->invoker->executing ); - _Scheduler_SMP_Preempt( context, @@ -290,9 +289,9 @@ static inline Scheduler_Node *_Scheduler_strong_APA_Get_lowest_scheduled( self = _Scheduler_strong_APA_Get_self( context ); - visited = self->visited->visited; + visited = self->visited->vis; queue = self->queue->Cpu; - caller = self->caller->caller; + caller = self->caller->call; filter_priority = _Scheduler_Node_get_priority( filter_base ); filter_priority = SCHEDULER_PRIORITY_PURIFY( filter_priority ); From 03650c47c537c1acd584c5a9c3db588ab49634ef Mon Sep 17 00:00:00 2001 From: richidubey Date: Fri, 14 Aug 2020 15:14:09 +0530 Subject: [PATCH 15/29] New structure and definitions --- cpukit/include/rtems/scheduler.h | 4 +- .../include/rtems/score/schedulerstrongapa.h | 64 ++++++----------- cpukit/score/src/schedulerstrongapa.c | 70 +++++++++---------- 3 files changed, 56 insertions(+), 82 deletions(-) diff --git a/cpukit/include/rtems/scheduler.h b/cpukit/include/rtems/scheduler.h index b5c9328f5d4..b6e1e83b3b4 100644 --- a/cpukit/include/rtems/scheduler.h +++ b/cpukit/include/rtems/scheduler.h @@ -257,9 +257,7 @@ #define RTEMS_SCHEDULER_STRONG_APA( name, prio_count ) \ static struct { \ Scheduler_strong_APA_Context Base; \ - Per_CPU_Control *Cpu[ CONFIGURE_MAXIMUM_PROCESSORS ]; \ - Scheduler_Node *caller[ CONFIGURE_MAXIMUM_PROCESSORS ]; \ - bool visited[ CONFIGURE_MAXIMUM_PROCESSORS ]; \ + Scheduler_strong_APA_Struct Struct[ CONFIGURE_MAXIMUM_PROCESSORS ] \ } SCHEDULER_STRONG_APA_CONTEXT_NAME( name ) #define RTEMS_SCHEDULER_TABLE_STRONG_APA( name, obj_name ) \ diff --git a/cpukit/include/rtems/score/schedulerstrongapa.h b/cpukit/include/rtems/score/schedulerstrongapa.h index 5169353e3f8..1b6ef93237d 100644 --- a/cpukit/include/rtems/score/schedulerstrongapa.h +++ b/cpukit/include/rtems/score/schedulerstrongapa.h @@ -8,7 +8,8 @@ /* * Copyright (c) 2020 Richi Dubey - * richidubey@gmail.com + * + * * * Copyright (c) 2013, 2018 embedded brains GmbH. All rights reserved. * @@ -52,16 +53,16 @@ extern "C" { * @brief Scheduler node specialization for Strong APA schedulers. */ typedef struct { - /** - * @brief Chain node for Scheduler_strong_APA_Context::allNodes - */ - Chain_Node Node; - /** * @brief SMP scheduler node. */ Scheduler_SMP_Node Base; + /** + * @brief Chain node for Scheduler_strong_APA_Context::allNodes + */ + Chain_Node Node; + /** * @brief CPU that invokes this node in the backtracking part of * _Scheduler_strong_APA_Get_highest_ready. @@ -74,19 +75,9 @@ typedef struct { Processor_mask Affinity; } Scheduler_strong_APA_Node; -/** - * @brief CPU structure to be used while traversing in the FIFO Queue - */ -typedef struct -{ - /** - * @brief Array of Cpu pointers to be used for the queue operations - */ - Per_CPU_Control *Cpu[ RTEMS_ZERO_LENGTH_ARRAY ]; -} Scheduler_strong_APA_Queue; /** - * @brief Caller corresponding to a Cpu in Scheduler_strong_APA_Queue + * @brief Struct for each index of the different variable size arrays */ typedef struct { @@ -94,20 +85,19 @@ typedef struct * @brief Array of caller pointers with each pointer pointing to the * Scheduler_strong_APA_Queue::Cpu at the same index as the pointer */ - Scheduler_Node *caller[ RTEMS_ZERO_LENGTH_ARRAY ]; -} Scheduler_strong_APA_Caller; - -/** - * @brief to a Cpu in Scheduler_strong_APA_Queue - */ -typedef struct -{ - /** + Scheduler_Node *caller; + + /** + * @brief Array of Cpu pointers to be used for the queue operations + */ + Per_CPU_Control *Cpu; + + /** * @brief Array of boolean each corresponding to the visited status of * Scheduler_strong_APA_Queue::Cpu at the same index */ - bool visited[ RTEMS_ZERO_LENGTH_ARRAY ]; -} Scheduler_strong_APA_Visited; + bool visited; +} Scheduler_strong_APA_Struct; /** * @brief Scheduler context for Strong APA scheduler. @@ -127,23 +117,11 @@ typedef struct { * the system. Accounts for ready and scheduled nodes. */ Chain_Control All_nodes; - + /** - * @brief Queue for this context + * @brief Struct with important variables for each cpu */ - Scheduler_strong_APA_Queue *queue; - - /** - * @brief Pointer to structure with array of - * boolean visited values - */ - Scheduler_strong_APA_Visited *visited; - - /** - * @brief Pointer to structure with array of - * Scheduler_Node caller corresponding to a CPU - */ - Scheduler_strong_APA_Caller *caller; + Scheduler_strong_APA_Struct Struct[ RTEMS_ZERO_LENGTH_ARRAY ]; } Scheduler_strong_APA_Context; #define SCHEDULER_STRONG_APA_MAXIMUM_PRIORITY 255 diff --git a/cpukit/score/src/schedulerstrongapa.c b/cpukit/score/src/schedulerstrongapa.c index 813170c1964..93b29d8af90 100644 --- a/cpukit/score/src/schedulerstrongapa.c +++ b/cpukit/score/src/schedulerstrongapa.c @@ -6,9 +6,10 @@ * @brief Strong APA Scheduler Implementation */ -/* +/* * Copyright (c) 2020 Richi Dubey - * richidubey@gmail.com + * + * * * Copyright (c) 2013, 2018 embedded brains GmbH. All rights reserved. * @@ -136,18 +137,17 @@ static inline Scheduler_Node *_Scheduler_strong_APA_Get_highest_ready( Scheduler_Node *next_node; const Chain_Node *tail; Chain_Node *next; - Per_CPU_Control **queue; //Array of Cpu that serves as a queue - bool *visited; //Array of bool values each corresponding to a cpu + Scheduler_strong_APA_Struct *Struct; //Denotes front and rear of the queue uint32_t front; uint32_t rear; + uint32_t cpu_max; + uint32_t cpu_index; + front = 0; rear = -1; - - visited = self->visited->vis; - queue = self->queue->Cpu; thread = filter->user; thread_cpu = _Thread_Get_CPU( thread ); @@ -160,13 +160,19 @@ static inline Scheduler_Node *_Scheduler_strong_APA_Get_highest_ready( min_priority_num = SCHEDULER_PRIORITY_PURIFY( min_priority_num ); highest_ready = filter; + Struct = self->Struct; + cpu_max = _SMP_Get_processor_maximum(); + + for ( cpu_index = 0 ; cpu_index < cpu_max ; ++cpu_index ) { + Struct[ cpu_index ].visited = false; + } rear = rear + 1; - queue[ rear ] = thread_cpu; - visited[ _Per_CPU_Get_index( thread_cpu ) ]=true; + Struct[ rear ].Cpu = thread_cpu; + Struct[ _Per_CPU_Get_index( thread_cpu ) ].visited = true; while( front <= rear ) { - curr_CPU = queue[ front ]; + curr_CPU = Struct[ front ].Cpu; front = front + 1; tail = _Chain_Immutable_tail( &self->All_nodes ); @@ -186,11 +192,10 @@ static inline Scheduler_Node *_Scheduler_strong_APA_Get_highest_ready( assigned_cpu = _Thread_Get_CPU( node->Base.Base.user ); index_assigned_cpu = _Per_CPU_Get_index( assigned_cpu ); - if ( visited[ index_assigned_cpu ] == false ) { - + if ( Struct[ index_assigned_cpu ].visited == false ) { rear = rear + 1; - queue[ rear ] = assigned_cpu; - visited[ index_assigned_cpu ] = true; + Struct[ rear ].Cpu = assigned_cpu; + Struct[ index_assigned_cpu ].visited = true; // The curr CPU of the queue invoked this node to add its CPU // that it is executing on to the queue. So this node might get // preempted because of the invoker curr_CPU and this curr_CPU @@ -273,11 +278,7 @@ static inline Scheduler_Node *_Scheduler_strong_APA_Get_lowest_scheduled( Scheduler_strong_APA_Node *curr_strong_node; //Current Strong_APA_Node Scheduler_strong_APA_Node *filter_node; Scheduler_strong_APA_Context *self; - Per_CPU_Control **queue; //Array of Cpu that serves as a queue - bool *visited; //Array of bool values each corresponding to a cpu. - - //Node that has a CPU in its affinity set which gets used for backtracking. - Scheduler_Node **caller; //Caller node for a CPU. + Scheduler_strong_APA_Struct *Struct; //Denotes front and rear of the queue uint32_t front; @@ -288,10 +289,7 @@ static inline Scheduler_Node *_Scheduler_strong_APA_Get_lowest_scheduled( rear = -1; self = _Scheduler_strong_APA_Get_self( context ); - - visited = self->visited->vis; - queue = self->queue->Cpu; - caller = self->caller->call; + Struct = self->Struct; filter_priority = _Scheduler_Node_get_priority( filter_base ); filter_priority = SCHEDULER_PRIORITY_PURIFY( filter_priority ); @@ -311,7 +309,7 @@ static inline Scheduler_Node *_Scheduler_strong_APA_Get_lowest_scheduled( cpu_max = _SMP_Get_processor_maximum(); for ( cpu_index = 0 ; cpu_index < cpu_max ; ++cpu_index ) { - visited[ cpu_index ] = false; + Struct[ cpu_index ].visited = false; //Checks if the thread_CPU is in the affinity set of the node if ( _Processor_mask_Is_set( &filter_node->Affinity, cpu_index) ) { @@ -319,15 +317,15 @@ static inline Scheduler_Node *_Scheduler_strong_APA_Get_lowest_scheduled( if ( _Per_CPU_Is_processor_online( cpu ) ) { rear = rear + 1; - queue[ rear ] = cpu; - visited[ cpu_index ] = true; - caller[ cpu_index ] = filter_base; + Struct[ rear ].Cpu = cpu; + Struct[ cpu_index ].visited = true; + Struct[ cpu_index ].caller = filter_base; } } } while( front <= rear ) { - curr_CPU = queue[ front ]; + curr_CPU = Struct[ front ].Cpu; front = front + 1; curr_thread = curr_CPU->executing; @@ -353,11 +351,11 @@ static inline Scheduler_Node *_Scheduler_strong_APA_Get_lowest_scheduled( if ( _Processor_mask_Is_set( &curr_strong_node->Affinity, cpu_index ) ) { //Checks if the thread_CPU is in the affinity set of the node Per_CPU_Control *cpu = _Per_CPU_Get_by_index( cpu_index ); - if ( _Per_CPU_Is_processor_online( cpu ) && visited[ cpu_index ] == false ) { + if ( _Per_CPU_Is_processor_online( cpu ) && Struct[ cpu_index ].visited == false ) { rear = rear + 1; - queue[ rear ] = cpu; - visited[ cpu_index ] = true; - caller[ cpu_index ] = curr_node; + Struct[ rear ].Cpu = cpu; + Struct[ cpu_index ].visited = true; + Struct[ cpu_index ].caller = curr_node; } } } @@ -368,10 +366,10 @@ static inline Scheduler_Node *_Scheduler_strong_APA_Get_lowest_scheduled( //Backtrack on the path from //_Thread_Get_CPU(ret->user) to ret, shifting along every task - curr_node = caller[ _Per_CPU_Get_index(cpu_to_preempt) ]; + curr_node = Struct[ _Per_CPU_Get_index(cpu_to_preempt) ].caller; curr_CPU = _Thread_Get_CPU( curr_node->user ); - curr_node = caller [ _Per_CPU_Get_index( curr_CPU ) ]; + curr_node = Struct[ _Per_CPU_Get_index( curr_CPU ) ].caller; curr_CPU = _Thread_Get_CPU( curr_node->user ); do{ @@ -385,7 +383,7 @@ static inline Scheduler_Node *_Scheduler_strong_APA_Get_lowest_scheduled( ); curr_CPU = next_CPU; - curr_node = caller[ _Per_CPU_Get_index( curr_CPU ) ]; + curr_node = Struct[ _Per_CPU_Get_index( curr_CPU ) ].caller; }while( curr_node != filter_base ); @@ -396,7 +394,7 @@ static inline Scheduler_Node *_Scheduler_strong_APA_Get_lowest_scheduled( _Scheduler_strong_APA_Allocate_processor ); - filter_base = caller[ _Per_CPU_Get_index(cpu_to_preempt) ]; + filter_base = Struct[ _Per_CPU_Get_index(cpu_to_preempt) ].caller; } return ret; } From 4d93042539911d855699b7fb62c49f8807b5f2bd Mon Sep 17 00:00:00 2001 From: richidubey Date: Sat, 15 Aug 2020 14:41:23 +0530 Subject: [PATCH 16/29] Removing errors while trying to debug --- cpukit/score/src/schedulerstrongapa.c | 69 +++++++++++++++++---------- 1 file changed, 43 insertions(+), 26 deletions(-) diff --git a/cpukit/score/src/schedulerstrongapa.c b/cpukit/score/src/schedulerstrongapa.c index 93b29d8af90..f403532ad64 100644 --- a/cpukit/score/src/schedulerstrongapa.c +++ b/cpukit/score/src/schedulerstrongapa.c @@ -120,7 +120,7 @@ static inline Scheduler_Node *_Scheduler_strong_APA_Get_highest_ready( ) //TODO { //return the highest ready Scheduler_Node and Scheduler_Node filter here points - // to the victim node that is blocked. + // to the victim node that is blocked resulting which this function is called. Scheduler_strong_APA_Context *self=_Scheduler_strong_APA_Get_self( context ); Thread_Control *thread; @@ -184,8 +184,12 @@ static inline Scheduler_Node *_Scheduler_strong_APA_Get_highest_ready( curr_state = _Scheduler_SMP_Node_state( &node->Base.Base ); if ( - _Processor_mask_Is_set(&node->Affinity, _Per_CPU_Get_index(curr_CPU) ) + _Processor_mask_Is_set(&node->Affinity, _Per_CPU_Get_index(curr_CPU) || + //2nd condition is a hack for now. Since if it has no affinity (false case + // actually means has all affinity), then it has affinty for this cpu as well. Yay + _Processor_mask_Is_zero( &node->Affinity ) ) ) { + //Checks if the curr_CPU is in the affinity set of the node if ( curr_state == SCHEDULER_SMP_NODE_SCHEDULED ) { @@ -369,32 +373,38 @@ static inline Scheduler_Node *_Scheduler_strong_APA_Get_lowest_scheduled( curr_node = Struct[ _Per_CPU_Get_index(cpu_to_preempt) ].caller; curr_CPU = _Thread_Get_CPU( curr_node->user ); - curr_node = Struct[ _Per_CPU_Get_index( curr_CPU ) ].caller; - curr_CPU = _Thread_Get_CPU( curr_node->user ); + //In case the lowest scheduled is on a processor which is directly + // reachable, there has to be no task shifting. + if( curr_node != filter_base) { - do{ - next_CPU = _Thread_Get_CPU( curr_node->user ); - - _Scheduler_SMP_Preempt( - context, - curr_node, - _Thread_Scheduler_get_home_node( curr_CPU->executing ), - _Scheduler_strong_APA_Allocate_processor - ); - - curr_CPU = next_CPU; curr_node = Struct[ _Per_CPU_Get_index( curr_CPU ) ].caller; + curr_CPU = _Thread_Get_CPU( curr_node->user ); + + do{ + next_CPU = _Thread_Get_CPU( curr_node->user ); + + _Scheduler_SMP_Preempt( + context, + curr_node, + _Thread_Scheduler_get_home_node( curr_CPU->executing ), + _Scheduler_strong_APA_Allocate_processor + ); + + curr_CPU = next_CPU; + curr_node = Struct[ _Per_CPU_Get_index( curr_CPU ) ].caller; + + }while( curr_node != filter_base ); + + _Scheduler_SMP_Preempt( + context, + curr_node, + _Thread_Scheduler_get_home_node( curr_CPU->executing ), + _Scheduler_strong_APA_Allocate_processor + ); - }while( curr_node != filter_base ); - - _Scheduler_SMP_Preempt( - context, - curr_node, - _Thread_Scheduler_get_home_node( curr_CPU->executing ), - _Scheduler_strong_APA_Allocate_processor - ); - - filter_base = Struct[ _Per_CPU_Get_index(cpu_to_preempt) ].caller; + filter_base = Struct[ _Per_CPU_Get_index(cpu_to_preempt) ].caller; + } + } return ret; } @@ -744,9 +754,16 @@ void _Scheduler_strong_APA_Node_initialize( ) { Scheduler_SMP_Node *smp_node; + Scheduler_strong_APA_Node *strong_node; + + smp_node = _Scheduler_SMP_Node_downcast( node ); + strong_node = _Scheduler_strong_APA_Node_downcast( node ); - smp_node = _Scheduler_SMP_Node_downcast( node ); _Scheduler_SMP_Node_initialize( scheduler, smp_node, the_thread, priority ); + _Processor_mask_Assign( + &strong_node->Affinity, + _SMP_Get_online_processors() + ); } void _Scheduler_strong_APA_Start_idle( From c3162134d3fd7d4d46ec01609d81bdb62bc818af Mon Sep 17 00:00:00 2001 From: richidubey Date: Sun, 16 Aug 2020 23:00:00 +0530 Subject: [PATCH 17/29] Latest version Sun 16th Aug --- cpukit/score/src/schedulerstrongapa.c | 17 ++++++++++++----- 1 file changed, 12 insertions(+), 5 deletions(-) diff --git a/cpukit/score/src/schedulerstrongapa.c b/cpukit/score/src/schedulerstrongapa.c index f403532ad64..f64b854dec7 100644 --- a/cpukit/score/src/schedulerstrongapa.c +++ b/cpukit/score/src/schedulerstrongapa.c @@ -184,10 +184,10 @@ static inline Scheduler_Node *_Scheduler_strong_APA_Get_highest_ready( curr_state = _Scheduler_SMP_Node_state( &node->Base.Base ); if ( - _Processor_mask_Is_set(&node->Affinity, _Per_CPU_Get_index(curr_CPU) || + _Processor_mask_Is_set(&node->Affinity, _Per_CPU_Get_index(curr_CPU)) || //2nd condition is a hack for now. Since if it has no affinity (false case // actually means has all affinity), then it has affinty for this cpu as well. Yay - _Processor_mask_Is_zero( &node->Affinity ) ) + _Processor_mask_Is_zero( &node->Affinity ) ) { //Checks if the curr_CPU is in the affinity set of the node @@ -454,9 +454,8 @@ static inline void _Scheduler_strong_APA_Insert_ready( self = _Scheduler_strong_APA_Get_self( context ); node = _Scheduler_strong_APA_Node_downcast( node_base ); - _Assert( !_Chain_Is_node_off_chain( &node->Node ) ); - - _Chain_Append_unprotected( &self->All_nodes, &node->Node ); + if(_Chain_Is_node_off_chain( &node->Node ) ) + _Chain_Append_unprotected( &self->All_nodes, &node->Node ); } static inline void _Scheduler_strong_APA_Move_from_scheduled_to_ready( @@ -760,10 +759,18 @@ void _Scheduler_strong_APA_Node_initialize( strong_node = _Scheduler_strong_APA_Node_downcast( node ); _Scheduler_SMP_Node_initialize( scheduler, smp_node, the_thread, priority ); + _Processor_mask_Assign( &strong_node->Affinity, _SMP_Get_online_processors() ); + + Scheduler_strong_APA_Context *self; + + self = _Scheduler_strong_APA_Get_self( scheduler->context ); + + if(_Chain_Is_node_off_chain( &strong_node->Node ) ) + _Chain_Append_unprotected( &self->All_nodes, &strong_node->Node ); } void _Scheduler_strong_APA_Start_idle( From e679ffa2044e264aad25c733300a5ae14a6b708d Mon Sep 17 00:00:00 2001 From: richidubey Date: Mon, 17 Aug 2020 14:11:21 +0530 Subject: [PATCH 18/29] Corrected error in getting node from the chain and getting its priority --- cpukit/score/src/schedulerstrongapa.c | 31 ++++++++++++++------------- 1 file changed, 16 insertions(+), 15 deletions(-) diff --git a/cpukit/score/src/schedulerstrongapa.c b/cpukit/score/src/schedulerstrongapa.c index f64b854dec7..7b12ff11735 100644 --- a/cpukit/score/src/schedulerstrongapa.c +++ b/cpukit/score/src/schedulerstrongapa.c @@ -79,7 +79,7 @@ static inline bool _Scheduler_strong_APA_Has_ready( Scheduler_Context *context ) ret = false; while ( next != tail ) { - node = (Scheduler_strong_APA_Node *) next; + node = (Scheduler_strong_APA_Node *) RTEMS_CONTAINER_OF( next, Scheduler_strong_APA_Node, Chain ); if ( _Scheduler_SMP_Node_state( &node->Base.Base ) == SCHEDULER_SMP_NODE_READY @@ -146,6 +146,10 @@ static inline Scheduler_Node *_Scheduler_strong_APA_Get_highest_ready( uint32_t cpu_max; uint32_t cpu_index; + //When the first task accessed has nothing to compare its priority against + // then it is the task with the highest priority witnessed so far! + bool first_task = true; + front = 0; rear = -1; @@ -155,10 +159,6 @@ static inline Scheduler_Node *_Scheduler_strong_APA_Get_highest_ready( //Implement the BFS Algorithm for task departure //to get the highest ready task for a particular CPU - //Initialize the min_priority_num variable - min_priority_num = _Scheduler_Node_get_priority( filter ); - min_priority_num = SCHEDULER_PRIORITY_PURIFY( min_priority_num ); - highest_ready = filter; Struct = self->Struct; cpu_max = _SMP_Get_processor_maximum(); @@ -179,7 +179,7 @@ static inline Scheduler_Node *_Scheduler_strong_APA_Get_highest_ready( next = _Chain_First( &self->All_nodes ); while ( next != tail ) { - node = (Scheduler_strong_APA_Node *) next; + node = (Scheduler_strong_APA_Node*) RTEMS_CONTAINER_OF( next, Scheduler_strong_APA_Node, Chain ); curr_node = (Scheduler_Node *) next; curr_state = _Scheduler_SMP_Node_state( &node->Base.Base ); @@ -209,12 +209,13 @@ static inline Scheduler_Node *_Scheduler_strong_APA_Get_highest_ready( } } else if ( curr_state == SCHEDULER_SMP_NODE_READY ) { - curr_priority = _Scheduler_Node_get_priority( (Scheduler_Node *) next ); + curr_priority = _Scheduler_Node_get_priority( &node->Base.Base ); curr_priority = SCHEDULER_PRIORITY_PURIFY( curr_priority ); - if ( curr_priority < min_priority_num ) { + if ( first_task == true || curr_priority < min_priority_num ) { min_priority_num = curr_priority; highest_ready = &node->Base.Base; + first_task = false; } } } @@ -436,10 +437,10 @@ static inline void _Scheduler_strong_APA_Extract_from_ready( node = _Scheduler_strong_APA_Node_downcast( node_to_extract ); _Assert( !_Chain_Is_empty(self->All_nodes) ); - _Assert( !_Chain_Is_node_off_chain( &node->Node ) ); + _Assert( !_Chain_Is_node_off_chain( &node->Chain ) ); - _Chain_Extract_unprotected( &node->Node ); //Removed from All_nodes - _Chain_Set_off_chain( &node->Node ); + _Chain_Extract_unprotected( &node->Chain ); //Removed from All_nodes + _Chain_Set_off_chain( &node->Chain ); } static inline void _Scheduler_strong_APA_Insert_ready( @@ -454,8 +455,8 @@ static inline void _Scheduler_strong_APA_Insert_ready( self = _Scheduler_strong_APA_Get_self( context ); node = _Scheduler_strong_APA_Node_downcast( node_base ); - if(_Chain_Is_node_off_chain( &node->Node ) ) - _Chain_Append_unprotected( &self->All_nodes, &node->Node ); + if(_Chain_Is_node_off_chain( &node->Chain ) ) + _Chain_Append_unprotected( &self->All_nodes, &node->Chain ); } static inline void _Scheduler_strong_APA_Move_from_scheduled_to_ready( @@ -769,8 +770,8 @@ void _Scheduler_strong_APA_Node_initialize( self = _Scheduler_strong_APA_Get_self( scheduler->context ); - if(_Chain_Is_node_off_chain( &strong_node->Node ) ) - _Chain_Append_unprotected( &self->All_nodes, &strong_node->Node ); + if(_Chain_Is_node_off_chain( &strong_node->Chain ) ) + _Chain_Append_unprotected( &self->All_nodes, &strong_node->Chain ); } void _Scheduler_strong_APA_Start_idle( From ae7945c935281013dd176d7cb0341a89f6e54e72 Mon Sep 17 00:00:00 2001 From: richidubey Date: Tue, 18 Aug 2020 17:51:16 +0530 Subject: [PATCH 19/29] Added missed semicolon --- cpukit/include/rtems/scheduler.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cpukit/include/rtems/scheduler.h b/cpukit/include/rtems/scheduler.h index b6e1e83b3b4..b101842ba7d 100644 --- a/cpukit/include/rtems/scheduler.h +++ b/cpukit/include/rtems/scheduler.h @@ -257,7 +257,7 @@ #define RTEMS_SCHEDULER_STRONG_APA( name, prio_count ) \ static struct { \ Scheduler_strong_APA_Context Base; \ - Scheduler_strong_APA_Struct Struct[ CONFIGURE_MAXIMUM_PROCESSORS ] \ + Scheduler_strong_APA_Struct Struct[ CONFIGURE_MAXIMUM_PROCESSORS ]; \ } SCHEDULER_STRONG_APA_CONTEXT_NAME( name ) #define RTEMS_SCHEDULER_TABLE_STRONG_APA( name, obj_name ) \ From b94696c00c32bd9e5a74587af27a263794575919 Mon Sep 17 00:00:00 2001 From: richidubey Date: Tue, 18 Aug 2020 18:42:10 +0530 Subject: [PATCH 20/29] All errors resolved except assert for a task --- .../include/rtems/score/schedulerstrongapa.h | 2 +- cpukit/score/src/schedulerstrongapa.c | 61 ++++++------- testsuites/smptests/smpstrongapa01/init.c | 85 +++++++++++-------- 3 files changed, 81 insertions(+), 67 deletions(-) diff --git a/cpukit/include/rtems/score/schedulerstrongapa.h b/cpukit/include/rtems/score/schedulerstrongapa.h index 1b6ef93237d..fd096f52864 100644 --- a/cpukit/include/rtems/score/schedulerstrongapa.h +++ b/cpukit/include/rtems/score/schedulerstrongapa.h @@ -61,7 +61,7 @@ typedef struct { /** * @brief Chain node for Scheduler_strong_APA_Context::allNodes */ - Chain_Node Node; + Chain_Node Chain; /** * @brief CPU that invokes this node in the backtracking part of diff --git a/cpukit/score/src/schedulerstrongapa.c b/cpukit/score/src/schedulerstrongapa.c index 7b12ff11735..12d60afd808 100644 --- a/cpukit/score/src/schedulerstrongapa.c +++ b/cpukit/score/src/schedulerstrongapa.c @@ -216,6 +216,8 @@ static inline Scheduler_Node *_Scheduler_strong_APA_Get_highest_ready( min_priority_num = curr_priority; highest_ready = &node->Base.Base; first_task = false; + //In case this task is directly reachable from thread_CPU + node->invoker = curr_CPU; } } } @@ -276,11 +278,13 @@ static inline Scheduler_Node *_Scheduler_strong_APA_Get_lowest_scheduled( Per_CPU_Control *cpu_to_preempt; Thread_Control *curr_thread; Scheduler_Node *curr_node; + Scheduler_Node *next_node; Scheduler_Node *ret; Priority_Control max_priority_num; Priority_Control curr_priority; Scheduler_strong_APA_Node *curr_strong_node; //Current Strong_APA_Node + Scheduler_strong_APA_Node *next_strong_node; //Next Strong_APA_Node Scheduler_strong_APA_Node *filter_node; Scheduler_strong_APA_Context *self; Scheduler_strong_APA_Struct *Struct; @@ -372,41 +376,38 @@ static inline Scheduler_Node *_Scheduler_strong_APA_Get_lowest_scheduled( //_Thread_Get_CPU(ret->user) to ret, shifting along every task curr_node = Struct[ _Per_CPU_Get_index(cpu_to_preempt) ].caller; - curr_CPU = _Thread_Get_CPU( curr_node->user ); //In case the lowest scheduled is on a processor which is directly // reachable, there has to be no task shifting. - if( curr_node != filter_base) { + //And ret gets directly preempted in smpimpl caller function of this function + if( curr_node != filter_base) { + curr_strong_node = _Scheduler_strong_APA_Node_downcast( curr_node ); + curr_strong_node->invoker = cpu_to_preempt; + + do{ + curr_CPU = _Thread_Get_CPU( curr_node->user ); + curr_node = Struct[ _Per_CPU_Get_index( curr_CPU ) ].caller; + curr_strong_node = _Scheduler_strong_APA_Node_downcast( curr_node ); + curr_strong_node->invoker = curr_CPU; + }while( curr_node != filter_base ); - curr_node = Struct[ _Per_CPU_Get_index( curr_CPU ) ].caller; - curr_CPU = _Thread_Get_CPU( curr_node->user ); - - do{ - next_CPU = _Thread_Get_CPU( curr_node->user ); - - _Scheduler_SMP_Preempt( + while( curr_node != Struct[ _Per_CPU_Get_index(cpu_to_preempt) ].caller ) { + next_node = _Thread_Scheduler_get_home_node( curr_strong_node->invoker->executing); + //curr_node preempts the next_node; + _Scheduler_SMP_Preempt( context, curr_node, - _Thread_Scheduler_get_home_node( curr_CPU->executing ), + next_node, _Scheduler_strong_APA_Allocate_processor ); - - curr_CPU = next_CPU; - curr_node = Struct[ _Per_CPU_Get_index( curr_CPU ) ].caller; - - }while( curr_node != filter_base ); - - _Scheduler_SMP_Preempt( - context, - curr_node, - _Thread_Scheduler_get_home_node( curr_CPU->executing ), - _Scheduler_strong_APA_Allocate_processor - ); - - filter_base = Struct[ _Per_CPU_Get_index(cpu_to_preempt) ].caller; + + curr_node = next_node; + curr_strong_node = _Scheduler_strong_APA_Node_downcast( curr_node ); } - - } + + filter_base = Struct[ _Per_CPU_Get_index(cpu_to_preempt) ].caller; + } + } return ret; } @@ -768,10 +769,10 @@ void _Scheduler_strong_APA_Node_initialize( Scheduler_strong_APA_Context *self; - self = _Scheduler_strong_APA_Get_self( scheduler->context ); - - if(_Chain_Is_node_off_chain( &strong_node->Chain ) ) - _Chain_Append_unprotected( &self->All_nodes, &strong_node->Chain ); +/* self = _Scheduler_strong_APA_Get_self( scheduler->context );*/ +/* */ +/* if(_Chain_Is_node_off_chain( &strong_node->Chain ) )*/ +/* _Chain_Append_unprotected( &self->All_nodes, &strong_node->Chain );*/ } void _Scheduler_strong_APA_Start_idle( diff --git a/testsuites/smptests/smpstrongapa01/init.c b/testsuites/smptests/smpstrongapa01/init.c index bf8bc052310..89cc6404b98 100644 --- a/testsuites/smptests/smpstrongapa01/init.c +++ b/testsuites/smptests/smpstrongapa01/init.c @@ -1,38 +1,40 @@ /* - * Copyright (c) 2016, 2017 embedded brains GmbH. All rights reserved. + * Copyright (c) 2020 Richi Dubey + * All rights reserved. * - * embedded brains GmbH - * Dornierstr. 4 - * 82178 Puchheim - * Germany - * + * richidubey@gmail.com * * The license and distribution terms for this file may be * found in the file LICENSE in this distribution or at * http://www.rtems.org/license/LICENSE. */ - #ifdef HAVE_CONFIG_H #include "config.h" #endif -#include "tmacros.h" +#include #include const char rtems_test_name[] = "SMPSTRONGAPA 1"; -#define CPU_COUNT 4 +#define CPU_COUNT 3 -#define TASK_COUNT (3 * CPU_COUNT) +#define TASK_COUNT 4 #define P(i) (UINT32_C(2) + i) #define ALL ((UINT32_C(1) << CPU_COUNT) - 1) -#define IDLE UINT8_C(255) +#define A(cpu0, cpu1, cpu2) ((cpu2 << 2) | (cpu1 << 1) | cpu0) -#define NAME rtems_build_name('S', 'A', 'P', 'A') +typedef enum { + T0, + T1, + T2, + T3, + IDLE +} task_index; typedef struct { enum { @@ -43,7 +45,7 @@ typedef struct { KIND_UNBLOCK } kind; - size_t index; + task_index index; struct { rtems_task_priority priority; @@ -65,54 +67,62 @@ typedef struct { KIND_RESET, \ 0, \ { 0 }, \ - { IDLE, IDLE, IDLE, IDLE } \ + { IDLE, IDLE, IDLE} \ } -#define SET_PRIORITY(index, prio, cpu0, cpu1, cpu2, cpu3) \ +#define SET_PRIORITY(index, prio, cpu0, cpu1, cpu2) \ { \ KIND_SET_PRIORITY, \ index, \ { .priority = prio }, \ - { cpu0, cpu1, cpu2, cpu3 } \ + { cpu0, cpu1, cpu2 } \ } -#define SET_AFFINITY(index, aff, cpu0, cpu1, cpu2, cpu3) \ +#define SET_AFFINITY(index, aff, cpu0, cpu1, cpu2) \ { \ KIND_SET_AFFINITY, \ index, \ { .cpu_set = aff }, \ - { cpu0, cpu1, cpu2, cpu3 } \ + { cpu0, cpu1, cpu2 } \ } -#define BLOCK(index, cpu0, cpu1, cpu2, cpu3) \ +#define BLOCK(index, cpu0, cpu1, cpu2) \ { \ KIND_BLOCK, \ index, \ { 0 }, \ - { cpu0, cpu1, cpu2, cpu3 } \ + { cpu0, cpu1, cpu2 } \ } -#define UNBLOCK(index, cpu0, cpu1, cpu2, cpu3) \ +#define UNBLOCK(index, cpu0, cpu1, cpu2) \ { \ KIND_UNBLOCK, \ index, \ { 0 }, \ - { cpu0, cpu1, cpu2, cpu3 } \ + { cpu0, cpu1, cpu2 } \ } static const test_action test_actions[] = { RESET, - UNBLOCK( 0, 0, IDLE, IDLE, IDLE), - UNBLOCK( 1, 0, 1, IDLE, IDLE), - UNBLOCK( 2, 0, 1, 2, IDLE), - UNBLOCK( 3, 0, 1, 2, 3), - UNBLOCK( 5, 0, 1, 2, 3), - SET_PRIORITY( 3, P(4), 0, 1, 2, 3), - SET_PRIORITY( 5, P(3), 0, 1, 2, 5), - BLOCK( 5, 0, 1, 2, 3), - SET_AFFINITY( 5, ALL, 0, 1, 2, 3), - RESET, - UNBLOCK( 0, 0, IDLE, IDLE, IDLE), + UNBLOCK( T0, T0, IDLE, IDLE), + UNBLOCK( T1, T0, T1, IDLE), + UNBLOCK( T2, T0, T1, T2), + UNBLOCK( T3, T0, T1, T2), + SET_PRIORITY( T0, P(0), T0, T1, T2), + SET_PRIORITY( T1, P(1), T0, T1, T2), + SET_PRIORITY( T3, P(3), T0, T1, T2), + /* + * Introduce Task 2 intially with lowest priority to imitate late arrival + */ + SET_PRIORITY( T2, P(4), T0, T1, T3), + SET_AFFINITY( T0, ALL, T0, T1, T3), + SET_AFFINITY( T1, A(0, 1, 1), T0, T1, T3), + SET_AFFINITY( T2, A(1, 0, 0), T0, T1, T3), + SET_AFFINITY( T3, A(0, 1, 1), T0, T1, T3), + /* + * Show that higher priority task gets dislodged from its processor + */ + SET_PRIORITY( T2, P(2), T2, T1, T0), RESET }; @@ -182,7 +192,7 @@ static void check_cpu_allocations(test_context *ctx, const test_action *action) size_t i; for (i = 0; i < CPU_COUNT; ++i) { - size_t e; + task_index e; const Per_CPU_Control *c; const Thread_Control *h; @@ -279,7 +289,7 @@ static void test(void) for (i = 0; i < TASK_COUNT; ++i) { sc = rtems_task_create( - NAME, + rtems_build_name(' ', ' ', 'T', '0' + i), P(i), RTEMS_MINIMUM_STACK_SIZE, RTEMS_DEFAULT_MODES, @@ -292,7 +302,10 @@ static void test(void) rtems_test_assert(sc == RTEMS_SUCCESSFUL); } - sc = rtems_timer_create(NAME, &ctx->timer_id); + sc = rtems_timer_create( + rtems_build_name('A', 'C', 'T', 'N'), + &ctx->timer_id + ); rtems_test_assert(sc == RTEMS_SUCCESSFUL); sc = rtems_timer_fire_after(ctx->timer_id, 1, timer, ctx); From 6e455a5d77417dcbc2f00330ebc37a7a143c5384 Mon Sep 17 00:00:00 2001 From: richidubey Date: Wed, 19 Aug 2020 12:22:32 +0530 Subject: [PATCH 21/29] BWorking backtracking code for both the algorithms --- cpukit/score/src/schedulerstrongapa.c | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/cpukit/score/src/schedulerstrongapa.c b/cpukit/score/src/schedulerstrongapa.c index 12d60afd808..93012b60971 100644 --- a/cpukit/score/src/schedulerstrongapa.c +++ b/cpukit/score/src/schedulerstrongapa.c @@ -290,7 +290,7 @@ static inline Scheduler_Node *_Scheduler_strong_APA_Get_lowest_scheduled( Scheduler_strong_APA_Struct *Struct; //Denotes front and rear of the queue - uint32_t front; + uint32_t front; uint32_t rear; front = 0; @@ -405,7 +405,14 @@ static inline Scheduler_Node *_Scheduler_strong_APA_Get_lowest_scheduled( curr_strong_node = _Scheduler_strong_APA_Node_downcast( curr_node ); } - filter_base = Struct[ _Per_CPU_Get_index(cpu_to_preempt) ].caller; + _Scheduler_SMP_Preempt( + context, + curr_node, + ret, + _Scheduler_strong_APA_Allocate_processor + ); + + ret = filter_base; } } return ret; From 03d08d02b3e61570f2022845caa44ec6a261f677 Mon Sep 17 00:00:00 2001 From: richidubey Date: Thu, 20 Aug 2020 18:56:59 +0530 Subject: [PATCH 22/29] Completed transformation of Get_lowest_scheduled --- cpukit/score/src/schedulerstrongapa.c | 325 ++++++++++++++------------ 1 file changed, 176 insertions(+), 149 deletions(-) diff --git a/cpukit/score/src/schedulerstrongapa.c b/cpukit/score/src/schedulerstrongapa.c index 93012b60971..8067eaff7f3 100644 --- a/cpukit/score/src/schedulerstrongapa.c +++ b/cpukit/score/src/schedulerstrongapa.c @@ -267,155 +267,47 @@ static inline Scheduler_Node *_Scheduler_strong_APA_Get_lowest_scheduled( Scheduler_Node *filter_base ) { - //Idea: BFS Algorithm for task arrival + //Checks the lowest scheduled directly reachable task uint32_t cpu_max; uint32_t cpu_index; - Priority_Control filter_priority; - - Per_CPU_Control *curr_CPU; - Per_CPU_Control *next_CPU; - Per_CPU_Control *cpu_to_preempt; + Thread_Control *curr_thread; Scheduler_Node *curr_node; - Scheduler_Node *next_node; - Scheduler_Node *ret; + Scheduler_Node *lowest_scheduled; Priority_Control max_priority_num; Priority_Control curr_priority; + Scheduler_strong_APA_Node *filter_strong_node; - Scheduler_strong_APA_Node *curr_strong_node; //Current Strong_APA_Node - Scheduler_strong_APA_Node *next_strong_node; //Next Strong_APA_Node - Scheduler_strong_APA_Node *filter_node; - Scheduler_strong_APA_Context *self; - Scheduler_strong_APA_Struct *Struct; - - //Denotes front and rear of the queue - uint32_t front; - uint32_t rear; - - front = 0; - - rear = -1; - - self = _Scheduler_strong_APA_Get_self( context ); - Struct = self->Struct; - - filter_priority = _Scheduler_Node_get_priority( filter_base ); - filter_priority = SCHEDULER_PRIORITY_PURIFY( filter_priority ); - - ret = NULL; //To remove compiler warning. - //ret would always point to the node with the lowest priority - //node unless the affinity of filter_base is NULL. - - filter_node = _Scheduler_strong_APA_Node_downcast( filter_base ); - + lowest_scheduled = NULL; //To remove compiler warning. max_priority_num = 0;//Max (Lowest) priority encountered so far. + filter_strong_node = _Scheduler_strong_APA_Node_downcast( filter_base ); - //This assert makes sure that there always exist an element in the - // Queue when we start the queue traversal. - _Assert( !_Processor_mask_Zero( &filter_node->Affinity ) ); - + //lowest_scheduled is NULL if affinty of a node is 0 + _Assert( !_Processor_mask_Zero( &filter_strong_node->Affinity ) ); cpu_max = _SMP_Get_processor_maximum(); for ( cpu_index = 0 ; cpu_index < cpu_max ; ++cpu_index ) { - Struct[ cpu_index ].visited = false; - - //Checks if the thread_CPU is in the affinity set of the node - if ( _Processor_mask_Is_set( &filter_node->Affinity, cpu_index) ) { + + //Checks if the CPU is in the affinity set of filter_node + if ( _Processor_mask_Is_set( &filter_strong_node->Affinity, cpu_index) ) { Per_CPU_Control *cpu = _Per_CPU_Get_by_index( cpu_index ); - if ( _Per_CPU_Is_processor_online( cpu ) ) { - rear = rear + 1; - Struct[ rear ].Cpu = cpu; - Struct[ cpu_index ].visited = true; - Struct[ cpu_index ].caller = filter_base; + if ( _Per_CPU_Is_processor_online( cpu ) ) { + curr_thread = cpu->executing; + curr_node = _Thread_Scheduler_get_home_node( curr_thread ); + curr_priority = _Scheduler_Node_get_priority( curr_node ); + curr_priority = SCHEDULER_PRIORITY_PURIFY( curr_priority ); + + if ( curr_priority > max_priority_num ) { + lowest_scheduled = curr_node; + max_priority_num = curr_priority; + } } } } - while( front <= rear ) { - curr_CPU = Struct[ front ].Cpu; - front = front + 1; - - curr_thread = curr_CPU->executing; - curr_node = _Thread_Scheduler_get_home_node( curr_thread ); - - curr_priority = _Scheduler_Node_get_priority( curr_node ); - curr_priority = SCHEDULER_PRIORITY_PURIFY( curr_priority ); - - curr_strong_node = _Scheduler_strong_APA_Node_downcast( curr_node ); - - if ( curr_priority > max_priority_num ) { - ret = curr_node; - max_priority_num = curr_priority; - - if( curr_priority > filter_priority ) - { - cpu_to_preempt = curr_CPU; - } - } - - if ( !curr_thread->is_idle ) { - for ( cpu_index = 0 ; cpu_index < cpu_max ; ++cpu_index ) { - if ( _Processor_mask_Is_set( &curr_strong_node->Affinity, cpu_index ) ) { - //Checks if the thread_CPU is in the affinity set of the node - Per_CPU_Control *cpu = _Per_CPU_Get_by_index( cpu_index ); - if ( _Per_CPU_Is_processor_online( cpu ) && Struct[ cpu_index ].visited == false ) { - rear = rear + 1; - Struct[ rear ].Cpu = cpu; - Struct[ cpu_index ].visited = true; - Struct[ cpu_index ].caller = curr_node; - } - } - } - } - } - - if( ret->Priority.value > filter_priority ) { - //Backtrack on the path from - //_Thread_Get_CPU(ret->user) to ret, shifting along every task - - curr_node = Struct[ _Per_CPU_Get_index(cpu_to_preempt) ].caller; - - //In case the lowest scheduled is on a processor which is directly - // reachable, there has to be no task shifting. - //And ret gets directly preempted in smpimpl caller function of this function - if( curr_node != filter_base) { - curr_strong_node = _Scheduler_strong_APA_Node_downcast( curr_node ); - curr_strong_node->invoker = cpu_to_preempt; - - do{ - curr_CPU = _Thread_Get_CPU( curr_node->user ); - curr_node = Struct[ _Per_CPU_Get_index( curr_CPU ) ].caller; - curr_strong_node = _Scheduler_strong_APA_Node_downcast( curr_node ); - curr_strong_node->invoker = curr_CPU; - }while( curr_node != filter_base ); - - while( curr_node != Struct[ _Per_CPU_Get_index(cpu_to_preempt) ].caller ) { - next_node = _Thread_Scheduler_get_home_node( curr_strong_node->invoker->executing); - //curr_node preempts the next_node; - _Scheduler_SMP_Preempt( - context, - curr_node, - next_node, - _Scheduler_strong_APA_Allocate_processor - ); - - curr_node = next_node; - curr_strong_node = _Scheduler_strong_APA_Node_downcast( curr_node ); - } - - _Scheduler_SMP_Preempt( - context, - curr_node, - ret, - _Scheduler_strong_APA_Allocate_processor - ); - - ret = filter_base; - } - } - return ret; + return lowest_scheduled; } static inline void _Scheduler_strong_APA_Extract_from_scheduled( @@ -507,18 +399,160 @@ static inline bool _Scheduler_strong_APA_Enqueue( Scheduler_Node *node, Priority_Control insert_priority ) -{//I'm hoping all this works on its own. - return _Scheduler_SMP_Enqueue( - context, - node, - insert_priority, - _Scheduler_SMP_Priority_less_equal, - _Scheduler_strong_APA_Insert_ready, - _Scheduler_SMP_Insert_scheduled, - _Scheduler_strong_APA_Move_from_scheduled_to_ready, - _Scheduler_strong_APA_Get_lowest_scheduled, - _Scheduler_strong_APA_Allocate_processor - ); +{ + //Idea: BFS Algorithm for task arrival + //Enqueue node either in the scheduled chain or in the ready chain + //node is the newly arrived node and is not scheduled. + + uint32_t cpu_max; + uint32_t cpu_index; + + Per_CPU_Control *curr_CPU; + Per_CPU_Control *cpu_to_preempt; + Thread_Control *curr_thread; + Scheduler_Node *curr_node; + Scheduler_Node *next_node; + Scheduler_Node *lowest_reachable; + Priority_Control max_priority_num; + Priority_Control curr_priority; + Priority_Control node_priority; + Priority_Control lowest_priority; + + Scheduler_strong_APA_Node *curr_strong_node; //Current Strong_APA_Node + Scheduler_strong_APA_Node *filter_node; + Scheduler_strong_APA_Context *self; + Scheduler_strong_APA_Struct *Struct; + bool needs_help; + + //Denotes front and rear of the queue + uint32_t front; + uint32_t rear; + + front = 0; + rear = -1; + + self = _Scheduler_strong_APA_Get_self( context ); + Struct = self->Struct; + + filter_node = _Scheduler_strong_APA_Node_downcast( node ); + + max_priority_num = 0;//Max (Lowest) priority encountered so far. + + //This assert makes sure that there always exist an element in the + // Queue when we start the queue traversal. + _Assert( !_Processor_mask_Zero( &filter_node->Affinity ) ); + + cpu_max = _SMP_Get_processor_maximum(); + + for ( cpu_index = 0 ; cpu_index < cpu_max ; ++cpu_index ) { + Struct[ cpu_index ].visited = false; + + //Checks if the thread_CPU is in the affinity set of the node + if ( _Processor_mask_Is_set( &filter_node->Affinity, cpu_index) ) { + Per_CPU_Control *cpu = _Per_CPU_Get_by_index( cpu_index ); + + if ( _Per_CPU_Is_processor_online( cpu ) ) { + rear = rear + 1; + Struct[ rear ].Cpu = cpu; + Struct[ cpu_index ].visited = true; + Struct[ cpu_index ].caller = node; + } + } + } + + while( front <= rear ) { + curr_CPU = Struct[ front ].Cpu; + front = front + 1; + + curr_thread = curr_CPU->executing; + curr_node = _Thread_Scheduler_get_home_node( curr_thread ); + + curr_priority = _Scheduler_Node_get_priority( curr_node ); + curr_priority = SCHEDULER_PRIORITY_PURIFY( curr_priority ); + + curr_strong_node = _Scheduler_strong_APA_Node_downcast( curr_node ); + + if ( curr_priority > max_priority_num ) { + lowest_reachable = curr_node; + max_priority_num = curr_priority; + cpu_to_preempt = curr_CPU; + } + + if ( !curr_thread->is_idle ) { + for ( cpu_index = 0 ; cpu_index < cpu_max ; ++cpu_index ) { + if ( _Processor_mask_Is_set( &curr_strong_node->Affinity, cpu_index ) ) { + //Checks if the thread_CPU is in the affinity set of the node + Per_CPU_Control *cpu = _Per_CPU_Get_by_index( cpu_index ); + if ( _Per_CPU_Is_processor_online( cpu ) && Struct[ cpu_index ].visited == false ) { + rear = rear + 1; + Struct[ rear ].Cpu = cpu; + Struct[ cpu_index ].visited = true; + Struct[ cpu_index ].caller = curr_node; + } + } + } + } + } + + + node_priority = _Scheduler_Node_get_priority( node ); + node_priority = SCHEDULER_PRIORITY_PURIFY( node_priority ); + + lowest_priority = _Scheduler_Node_get_priority( lowest_reachable ); + lowest_priority = SCHEDULER_PRIORITY_PURIFY( lowest_priority ); + + if( lowest_priority > node_priority ) { + //Backtrack on the path from + //_Thread_Get_CPU(lowest_reachable->user) to lowest_reachable, shifting along every task + + curr_node = Struct[ _Per_CPU_Get_index(cpu_to_preempt) ].caller; + curr_strong_node = _Scheduler_strong_APA_Node_downcast( curr_node ); + curr_strong_node->invoker = cpu_to_preempt; + + //Save which cpu to preempt in invoker value of the node + while( curr_node != node ) { + curr_CPU = _Thread_Get_CPU( curr_node->user ); + curr_node = Struct[ _Per_CPU_Get_index( curr_CPU ) ].caller; + curr_strong_node = _Scheduler_strong_APA_Node_downcast( curr_node ); + curr_strong_node->invoker = curr_CPU; + } + + //Stop just before the last preemption + while( curr_node != Struct[ _Per_CPU_Get_index(cpu_to_preempt) ].caller) { + next_node = _Thread_Scheduler_get_home_node( curr_strong_node->invoker->executing); + //curr_node preempts the next_node; + _Scheduler_SMP_Preempt( + context, + curr_node, + next_node, + _Scheduler_strong_APA_Allocate_processor + ); + + curr_node = next_node; + curr_strong_node = _Scheduler_strong_APA_Node_downcast( curr_node ); + } + + node_priority = _Scheduler_Node_get_priority( curr_node ); + node_priority = SCHEDULER_PRIORITY_PURIFY( node_priority ); + + _Scheduler_SMP_Enqueue_to_scheduled( + context, + curr_node, + node_priority, + lowest_reachable, + _Scheduler_SMP_Insert_scheduled, + _Scheduler_strong_APA_Move_from_scheduled_to_ready, + _Scheduler_strong_APA_Allocate_processor + ); + + + needs_help = false; + } else { + _Scheduler_strong_APA_Insert_ready(context,node,insert_priority); + needs_help = true; + } + + return needs_help; } static inline bool _Scheduler_strong_APA_Enqueue_scheduled( @@ -773,13 +807,6 @@ void _Scheduler_strong_APA_Node_initialize( &strong_node->Affinity, _SMP_Get_online_processors() ); - - Scheduler_strong_APA_Context *self; - -/* self = _Scheduler_strong_APA_Get_self( scheduler->context );*/ -/* */ -/* if(_Chain_Is_node_off_chain( &strong_node->Chain ) )*/ -/* _Chain_Append_unprotected( &self->All_nodes, &strong_node->Chain );*/ } void _Scheduler_strong_APA_Start_idle( From 418d60c3ffb919dbf2284c01fd0ce2b625361d13 Mon Sep 17 00:00:00 2001 From: richidubey Date: Sun, 23 Aug 2020 00:17:03 +0530 Subject: [PATCH 23/29] Version 1.3 release --- .../include/rtems/score/schedulerstrongapa.h | 33 +- cpukit/score/src/schedulerstrongapa.c | 380 ++++++++++-------- 2 files changed, 241 insertions(+), 172 deletions(-) diff --git a/cpukit/include/rtems/score/schedulerstrongapa.h b/cpukit/include/rtems/score/schedulerstrongapa.h index fd096f52864..86d91688e86 100644 --- a/cpukit/include/rtems/score/schedulerstrongapa.h +++ b/cpukit/include/rtems/score/schedulerstrongapa.h @@ -35,6 +35,9 @@ extern "C" { #endif /* __cplusplus */ +#define STRONG_SCHEDULER_NODE_OF_CHAIN( node ) \ + RTEMS_CONTAINER_OF( next, Scheduler_strong_APA_Node, Chain ) + /** * @defgroup RTEMSScoreSchedulerStrongAPA Strong APA Scheduler * @@ -46,6 +49,10 @@ extern "C" { * Cerqueira et al. in Linux's Processor Affinity API, Refined: * Shifting Real-Time Tasks Towards Higher Schedulability. * + * This is an implementation of the Strong APA scheduler defined by + * Cerqueira et al. in Linux's Processor Affinity API, Refined: + * Shifting Real-Time Tasks Towards Higher Schedulability. + * * @{ */ @@ -59,15 +66,16 @@ typedef struct { Scheduler_SMP_Node Base; /** - * @brief Chain node for Scheduler_strong_APA_Context::allNodes + * @brief Chain node for Scheduler_strong_APA_Context::All_nodes */ Chain_Node Chain; /** - * @brief CPU that invokes this node in the backtracking part of - * _Scheduler_strong_APA_Get_highest_ready. + * @brief CPU that this node would preempt in the backtracking part of + * _Scheduler_strong_APA_Get_highest_ready and + * _Scheduler_strong_APA_Do_Enqueue. */ - Per_CPU_Control *invoker; + Per_CPU_Control *invoker; /** * @brief The associated affinity set of this node. @@ -77,24 +85,27 @@ typedef struct { /** - * @brief Struct for each index of the different variable size arrays + * @brief Struct for each index of the variable size arrays */ typedef struct { /** - * @brief Array of caller pointers with each pointer pointing to the - * Scheduler_strong_APA_Queue::Cpu at the same index as the pointer + * @brief The node that called this CPU, i.e. a node which has + * the cpu at the index of Scheduler_strong_APA_Context::Struct in + * its affinity set. */ Scheduler_Node *caller; /** - * @brief Array of Cpu pointers to be used for the queue operations + * @brief Cpu at the index of Scheduler_strong_APA_Context::Struct + * in Queue implementation. */ - Per_CPU_Control *Cpu; + Per_CPU_Control *cpu; /** - * @brief Array of boolean each corresponding to the visited status of - * Scheduler_strong_APA_Queue::Cpu at the same index + * @brief Indicates if the CPU at the index of + * Scheduler_strong_APA_Context::Struct is already + * added to the Queue or not. */ bool visited; } Scheduler_strong_APA_Struct; diff --git a/cpukit/score/src/schedulerstrongapa.c b/cpukit/score/src/schedulerstrongapa.c index 8067eaff7f3..7a09edeb2ad 100644 --- a/cpukit/score/src/schedulerstrongapa.c +++ b/cpukit/score/src/schedulerstrongapa.c @@ -79,8 +79,8 @@ static inline bool _Scheduler_strong_APA_Has_ready( Scheduler_Context *context ) ret = false; while ( next != tail ) { - node = (Scheduler_strong_APA_Node *) RTEMS_CONTAINER_OF( next, Scheduler_strong_APA_Node, Chain ); - + node = (Scheduler_strong_APA_Node *) STRONG_SCHEDULER_NODE_OF_CHAIN( next ); + if ( _Scheduler_SMP_Node_state( &node->Base.Base ) == SCHEDULER_SMP_NODE_READY ) { @@ -114,31 +114,101 @@ static inline void _Scheduler_strong_APA_Allocate_processor( ); } +static inline Scheduler_Node * _Scheduler_strong_APA_Find_highest_ready( + Scheduler_strong_APA_Context *self, + uint32_t front, + uint32_t rear +) +{ + Scheduler_Node *highest_ready; + Scheduler_strong_APA_Struct *Struct; + const Chain_Node *tail; + Chain_Node *next; + uint32_t index_assigned_cpu; + Scheduler_strong_APA_Node *node; + Priority_Control min_priority_num; + Priority_Control curr_priority; + Per_CPU_Control *assigned_cpu; + Scheduler_SMP_Node_state curr_state; + Per_CPU_Control *curr_CPU; + bool first_task; + + Struct = self->Struct; + //When the first task accessed has nothing to compare its priority against + // So, it is the task with the highest priority witnessed so far! + first_task = true; + + while( front <= rear ) { + curr_CPU = Struct[ front ].cpu; + front = front + 1; + + tail = _Chain_Immutable_tail( &self->All_nodes ); + next = _Chain_First( &self->All_nodes ); + + while ( next != tail ) { + node = (Scheduler_strong_APA_Node*) STRONG_SCHEDULER_NODE_OF_CHAIN( next ); + //Check if the curr_CPU is in the affinity set of the node + if ( + _Processor_mask_Is_set(&node->Affinity, _Per_CPU_Get_index(curr_CPU)) + ) { + curr_state = _Scheduler_SMP_Node_state( &node->Base.Base ); + + if ( curr_state == SCHEDULER_SMP_NODE_SCHEDULED ) { + assigned_cpu = _Thread_Get_CPU( node->Base.Base.user ); + index_assigned_cpu = _Per_CPU_Get_index( assigned_cpu ); + + if ( Struct[ index_assigned_cpu ].visited == false ) { + rear = rear + 1; + Struct[ rear ].cpu = assigned_cpu; + Struct[ index_assigned_cpu ].visited = true; + // The curr CPU of the queue invoked this node to add its CPU + // that it is executing on to the queue. So this node might get + // preempted because of the invoker curr_CPU and this curr_CPU + // is the CPU that node should preempt in case this node + // gets preempted. + node->invoker = curr_CPU; + } + } + else if ( curr_state == SCHEDULER_SMP_NODE_READY ) { + curr_priority = _Scheduler_Node_get_priority( &node->Base.Base ); + curr_priority = SCHEDULER_PRIORITY_PURIFY( curr_priority ); + + if ( first_task == true || curr_priority < min_priority_num ) { + min_priority_num = curr_priority; + highest_ready = &node->Base.Base; + first_task = false; + //In case this task is directly reachable from thread_CPU + node->invoker = curr_CPU; + } + } + } + next = _Chain_Next( next ); + } + } + + return highest_ready; +} + static inline Scheduler_Node *_Scheduler_strong_APA_Get_highest_ready( Scheduler_Context *context, Scheduler_Node *filter -) //TODO +) { + + //Implement the BFS Algorithm for task departure + //to get the highest ready task for a particular CPU //return the highest ready Scheduler_Node and Scheduler_Node filter here points // to the victim node that is blocked resulting which this function is called. - Scheduler_strong_APA_Context *self=_Scheduler_strong_APA_Get_self( context ); - - Thread_Control *thread; - Per_CPU_Control *thread_cpu; - Per_CPU_Control *curr_CPU; + Scheduler_strong_APA_Context *self; + + Per_CPU_Control *filter_cpu; Scheduler_strong_APA_Node *node; - Per_CPU_Control *assigned_cpu; - uint32_t index_assigned_cpu; Scheduler_Node *highest_ready; - Priority_Control min_priority_num; - Priority_Control curr_priority; - Scheduler_SMP_Node_state curr_state; Scheduler_Node *curr_node; Scheduler_Node *next_node; - const Chain_Node *tail; - Chain_Node *next; Scheduler_strong_APA_Struct *Struct; + self=_Scheduler_strong_APA_Get_self( context ); //Denotes front and rear of the queue uint32_t front; uint32_t rear; @@ -146,20 +216,10 @@ static inline Scheduler_Node *_Scheduler_strong_APA_Get_highest_ready( uint32_t cpu_max; uint32_t cpu_index; - //When the first task accessed has nothing to compare its priority against - // then it is the task with the highest priority witnessed so far! - bool first_task = true; - front = 0; rear = -1; - thread = filter->user; - thread_cpu = _Thread_Get_CPU( thread ); - - //Implement the BFS Algorithm for task departure - //to get the highest ready task for a particular CPU - - highest_ready = filter; + filter_cpu = _Thread_Get_CPU( filter->user ); Struct = self->Struct; cpu_max = _SMP_Get_processor_maximum(); @@ -168,89 +228,38 @@ static inline Scheduler_Node *_Scheduler_strong_APA_Get_highest_ready( } rear = rear + 1; - Struct[ rear ].Cpu = thread_cpu; - Struct[ _Per_CPU_Get_index( thread_cpu ) ].visited = true; - - while( front <= rear ) { - curr_CPU = Struct[ front ].Cpu; - front = front + 1; - - tail = _Chain_Immutable_tail( &self->All_nodes ); - next = _Chain_First( &self->All_nodes ); - - while ( next != tail ) { - node = (Scheduler_strong_APA_Node*) RTEMS_CONTAINER_OF( next, Scheduler_strong_APA_Node, Chain ); - curr_node = (Scheduler_Node *) next; - curr_state = _Scheduler_SMP_Node_state( &node->Base.Base ); - - if ( - _Processor_mask_Is_set(&node->Affinity, _Per_CPU_Get_index(curr_CPU)) || - //2nd condition is a hack for now. Since if it has no affinity (false case - // actually means has all affinity), then it has affinty for this cpu as well. Yay - _Processor_mask_Is_zero( &node->Affinity ) - ) { - - //Checks if the curr_CPU is in the affinity set of the node - - if ( curr_state == SCHEDULER_SMP_NODE_SCHEDULED ) { - assigned_cpu = _Thread_Get_CPU( node->Base.Base.user ); - index_assigned_cpu = _Per_CPU_Get_index( assigned_cpu ); - - if ( Struct[ index_assigned_cpu ].visited == false ) { - rear = rear + 1; - Struct[ rear ].Cpu = assigned_cpu; - Struct[ index_assigned_cpu ].visited = true; - // The curr CPU of the queue invoked this node to add its CPU - // that it is executing on to the queue. So this node might get - // preempted because of the invoker curr_CPU and this curr_CPU - // is the CPU that node should preempt in case this node - // gets preempted. - node->invoker = curr_CPU; - } - } - else if ( curr_state == SCHEDULER_SMP_NODE_READY ) { - curr_priority = _Scheduler_Node_get_priority( &node->Base.Base ); - curr_priority = SCHEDULER_PRIORITY_PURIFY( curr_priority ); - - if ( first_task == true || curr_priority < min_priority_num ) { - min_priority_num = curr_priority; - highest_ready = &node->Base.Base; - first_task = false; - //In case this task is directly reachable from thread_CPU - node->invoker = curr_CPU; - } - } - } - next = _Chain_Next( next ); - } - } + Struct[ rear ].cpu = filter_cpu; + Struct[ _Per_CPU_Get_index( filter_cpu ) ].visited = true; - if ( highest_ready != filter ) { //Should always be true since highest ready - // node corresponding to an empty cpu must exist: TODO: See how. + highest_ready = _Scheduler_strong_APA_Find_highest_ready( + self, + front, + rear + ); + if ( highest_ready != filter ) { //Backtrack on the path from - //thread_cpu to highest_ready, shifting along every task. + //filter_cpu to highest_ready, shifting along every task. node = _Scheduler_strong_APA_Node_downcast( highest_ready ); - if( node->invoker != thread_cpu ) { + if( node->invoker != filter_cpu ) { // Highest ready is not just directly reachable from the victim cpu // So there is need of task shifting do { curr_node = &node->Base.Base; - //TODO: Put this in a module since it breaks line length - next_node = _Thread_Scheduler_get_home_node( node->invoker->executing ); + next_node = _Thread_Scheduler_get_home_node( node->invoker->heir ); _Scheduler_SMP_Preempt( context, curr_node, - _Thread_Scheduler_get_home_node( node->invoker->executing ), + _Thread_Scheduler_get_home_node( node->invoker->heir ), _Scheduler_strong_APA_Allocate_processor ); node = _Scheduler_strong_APA_Node_downcast( next_node ); - }while( node->invoker != thread_cpu ); + }while( node->invoker != filter_cpu ); //To save the last node so that the caller SMP_* function //can do the allocation @@ -289,12 +298,12 @@ static inline Scheduler_Node *_Scheduler_strong_APA_Get_lowest_scheduled( for ( cpu_index = 0 ; cpu_index < cpu_max ; ++cpu_index ) { - //Checks if the CPU is in the affinity set of filter_node + //Checks if the CPU is in the affinity set of filter_strong_node if ( _Processor_mask_Is_set( &filter_strong_node->Affinity, cpu_index) ) { Per_CPU_Control *cpu = _Per_CPU_Get_by_index( cpu_index ); if ( _Per_CPU_Is_processor_online( cpu ) ) { - curr_thread = cpu->executing; + curr_thread = cpu->heir; curr_node = _Thread_Scheduler_get_home_node( curr_thread ); curr_priority = _Scheduler_Node_get_priority( curr_node ); curr_priority = SCHEDULER_PRIORITY_PURIFY( curr_priority ); @@ -394,77 +403,33 @@ static inline void _Scheduler_strong_APA_Move_from_ready_to_scheduled( //Note: The node still stays in the All_nodes chain } -static inline bool _Scheduler_strong_APA_Enqueue( - Scheduler_Context *context, - Scheduler_Node *node, - Priority_Control insert_priority +static inline Scheduler_Node* _Scheduler_strong_APA_Get_lowest_reachable( + Scheduler_strong_APA_Context *self, + uint32_t front, + uint32_t rear, + Per_CPU_Control **cpu_to_preempt ) { - //Idea: BFS Algorithm for task arrival - //Enqueue node either in the scheduled chain or in the ready chain - //node is the newly arrived node and is not scheduled. - - uint32_t cpu_max; - uint32_t cpu_index; - - Per_CPU_Control *curr_CPU; - Per_CPU_Control *cpu_to_preempt; - Thread_Control *curr_thread; - Scheduler_Node *curr_node; - Scheduler_Node *next_node; - Scheduler_Node *lowest_reachable; - Priority_Control max_priority_num; - Priority_Control curr_priority; - Priority_Control node_priority; - Priority_Control lowest_priority; - - Scheduler_strong_APA_Node *curr_strong_node; //Current Strong_APA_Node - Scheduler_strong_APA_Node *filter_node; - Scheduler_strong_APA_Context *self; + Scheduler_Node *lowest_reachable; + Priority_Control max_priority_num; + uint32_t cpu_max; + uint32_t cpu_index; + Thread_Control *curr_thread; + Per_CPU_Control *curr_CPU; + Priority_Control curr_priority; + Scheduler_Node *curr_node; + Scheduler_strong_APA_Node *curr_strong_node; //Current Strong_APA_Node Scheduler_strong_APA_Struct *Struct; - bool needs_help; - - //Denotes front and rear of the queue - uint32_t front; - uint32_t rear; - - front = 0; - rear = -1; - - self = _Scheduler_strong_APA_Get_self( context ); - Struct = self->Struct; - - filter_node = _Scheduler_strong_APA_Node_downcast( node ); - + max_priority_num = 0;//Max (Lowest) priority encountered so far. - - //This assert makes sure that there always exist an element in the - // Queue when we start the queue traversal. - _Assert( !_Processor_mask_Zero( &filter_node->Affinity ) ); - + Struct = self->Struct; cpu_max = _SMP_Get_processor_maximum(); - for ( cpu_index = 0 ; cpu_index < cpu_max ; ++cpu_index ) { - Struct[ cpu_index ].visited = false; - - //Checks if the thread_CPU is in the affinity set of the node - if ( _Processor_mask_Is_set( &filter_node->Affinity, cpu_index) ) { - Per_CPU_Control *cpu = _Per_CPU_Get_by_index( cpu_index ); - - if ( _Per_CPU_Is_processor_online( cpu ) ) { - rear = rear + 1; - Struct[ rear ].Cpu = cpu; - Struct[ cpu_index ].visited = true; - Struct[ cpu_index ].caller = node; - } - } - } - while( front <= rear ) { - curr_CPU = Struct[ front ].Cpu; + curr_CPU = Struct[ front ].cpu; front = front + 1; - curr_thread = curr_CPU->executing; + curr_thread = curr_CPU->heir; curr_node = _Thread_Scheduler_get_home_node( curr_thread ); curr_priority = _Scheduler_Node_get_priority( curr_node ); @@ -475,7 +440,7 @@ static inline bool _Scheduler_strong_APA_Enqueue( if ( curr_priority > max_priority_num ) { lowest_reachable = curr_node; max_priority_num = curr_priority; - cpu_to_preempt = curr_CPU; + *cpu_to_preempt = curr_CPU; } if ( !curr_thread->is_idle ) { @@ -485,7 +450,7 @@ static inline bool _Scheduler_strong_APA_Enqueue( Per_CPU_Control *cpu = _Per_CPU_Get_by_index( cpu_index ); if ( _Per_CPU_Is_processor_online( cpu ) && Struct[ cpu_index ].visited == false ) { rear = rear + 1; - Struct[ rear ].Cpu = cpu; + Struct[ rear ].cpu = cpu; Struct[ cpu_index ].visited = true; Struct[ cpu_index ].caller = curr_node; } @@ -494,6 +459,30 @@ static inline bool _Scheduler_strong_APA_Enqueue( } } + return lowest_reachable; +} + +static inline bool _Scheduler_strong_APA_Do_enqueue( + Scheduler_Context *context, + Scheduler_Node *lowest_reachable, + Scheduler_Node *node, + Priority_Control insert_priority, + Per_CPU_Control *cpu_to_preempt +) +{ + bool needs_help; + Priority_Control node_priority; + Priority_Control lowest_priority; + Scheduler_strong_APA_Struct *Struct; + Scheduler_Node *curr_node; + Scheduler_strong_APA_Node *curr_strong_node; //Current Strong_APA_Node + Per_CPU_Control *curr_CPU; + Thread_Control *next_thread; + Scheduler_strong_APA_Context *self; + Scheduler_Node *next_node; + + self = _Scheduler_strong_APA_Get_self( context ); + Struct = self->Struct; node_priority = _Scheduler_Node_get_priority( node ); node_priority = SCHEDULER_PRIORITY_PURIFY( node_priority ); @@ -503,7 +492,8 @@ static inline bool _Scheduler_strong_APA_Enqueue( if( lowest_priority > node_priority ) { //Backtrack on the path from - //_Thread_Get_CPU(lowest_reachable->user) to lowest_reachable, shifting along every task + //_Thread_Get_CPU(lowest_reachable->user) to lowest_reachable, shifting + //along every task curr_node = Struct[ _Per_CPU_Get_index(cpu_to_preempt) ].caller; curr_strong_node = _Scheduler_strong_APA_Node_downcast( curr_node ); @@ -519,7 +509,8 @@ static inline bool _Scheduler_strong_APA_Enqueue( //Stop just before the last preemption while( curr_node != Struct[ _Per_CPU_Get_index(cpu_to_preempt) ].caller) { - next_node = _Thread_Scheduler_get_home_node( curr_strong_node->invoker->executing); + next_thread = curr_strong_node->invoker->heir; + next_node = _Thread_Scheduler_get_home_node( next_thread ); //curr_node preempts the next_node; _Scheduler_SMP_Preempt( context, @@ -544,17 +535,84 @@ static inline bool _Scheduler_strong_APA_Enqueue( _Scheduler_strong_APA_Move_from_scheduled_to_ready, _Scheduler_strong_APA_Allocate_processor ); - needs_help = false; } else { - _Scheduler_strong_APA_Insert_ready(context,node,insert_priority); needs_help = true; } + //Add it to All_nodes chain since it is now either scheduled or just ready. + _Scheduler_strong_APA_Insert_ready(context,node,insert_priority); + return needs_help; } +static inline bool _Scheduler_strong_APA_Enqueue( + Scheduler_Context *context, + Scheduler_Node *node, + Priority_Control insert_priority +) +{ + //Idea: BFS Algorithm for task arrival + //Enqueue node either in the scheduled chain or in the ready chain + //node is the newly arrived node and is not scheduled. + Scheduler_strong_APA_Context *self; + Scheduler_strong_APA_Struct *Struct; + uint32_t cpu_max; + uint32_t cpu_index; + Per_CPU_Control *cpu_to_preempt; + Scheduler_Node *lowest_reachable; + Scheduler_strong_APA_Node *strong_node; + + //Denotes front and rear of the queue + uint32_t front; + uint32_t rear; + + front = 0; + rear = -1; + + self = _Scheduler_strong_APA_Get_self( context ); + Struct = self->Struct; + + strong_node = _Scheduler_strong_APA_Node_downcast( node ); + cpu_max = _SMP_Get_processor_maximum(); + + for ( cpu_index = 0 ; cpu_index < cpu_max ; ++cpu_index ) { + Struct[ cpu_index ].visited = false; + + //Checks if the thread_CPU is in the affinity set of the node + if ( _Processor_mask_Is_set( &strong_node->Affinity, cpu_index) ) { + Per_CPU_Control *cpu = _Per_CPU_Get_by_index( cpu_index ); + + if ( _Per_CPU_Is_processor_online( cpu ) ) { + rear = rear + 1; + Struct[ rear ].cpu = cpu; + Struct[ cpu_index ].visited = true; + Struct[ cpu_index ].caller = node; + } + } + } + + //This assert makes sure that there always exist an element in the + // Queue when we start the queue traversal. + _Assert( !_Processor_mask_Zero( &strong_node->Affinity ) ); + + lowest_reachable = _Scheduler_strong_APA_Get_lowest_reachable( + self, + front, + rear, + &cpu_to_preempt + ); + + return _Scheduler_strong_APA_Do_enqueue( + context, + lowest_reachable, + node, + insert_priority, + cpu_to_preempt + ); +} + static inline bool _Scheduler_strong_APA_Enqueue_scheduled( Scheduler_Context *context, Scheduler_Node *node, @@ -864,7 +922,7 @@ bool _Scheduler_strong_APA_Set_affinity( _Scheduler_strong_APA_Enqueue, _Scheduler_strong_APA_Allocate_processor ); - + return true; } From d1b3a98da2ae546b8a739a0ee595a3b59724bdf5 Mon Sep 17 00:00:00 2001 From: richidubey Date: Sun, 23 Aug 2020 01:34:58 +0530 Subject: [PATCH 24/29] Version 1.3 Updated --- cpukit/score/src/schedulerstrongapa.c | 35 +++++++++++++++------------ 1 file changed, 20 insertions(+), 15 deletions(-) diff --git a/cpukit/score/src/schedulerstrongapa.c b/cpukit/score/src/schedulerstrongapa.c index 7a09edeb2ad..ed86e2531a4 100644 --- a/cpukit/score/src/schedulerstrongapa.c +++ b/cpukit/score/src/schedulerstrongapa.c @@ -506,9 +506,27 @@ static inline bool _Scheduler_strong_APA_Do_enqueue( curr_strong_node = _Scheduler_strong_APA_Node_downcast( curr_node ); curr_strong_node->invoker = curr_CPU; } + + next_thread = curr_strong_node->invoker->heir; + next_node = _Thread_Scheduler_get_home_node( next_thread ); + + node_priority = _Scheduler_Node_get_priority( curr_node ); + node_priority = SCHEDULER_PRIORITY_PURIFY( node_priority ); + + _Scheduler_SMP_Enqueue_to_scheduled( + context, + curr_node, + node_priority, + next_node, + _Scheduler_SMP_Insert_scheduled, + _Scheduler_strong_APA_Move_from_scheduled_to_ready, + _Scheduler_strong_APA_Allocate_processor + ); - //Stop just before the last preemption - while( curr_node != Struct[ _Per_CPU_Get_index(cpu_to_preempt) ].caller) { + curr_node = next_node; + curr_strong_node = _Scheduler_strong_APA_Node_downcast( curr_node ); + + while( curr_node != lowest_reachable) { next_thread = curr_strong_node->invoker->heir; next_node = _Thread_Scheduler_get_home_node( next_thread ); //curr_node preempts the next_node; @@ -523,19 +541,6 @@ static inline bool _Scheduler_strong_APA_Do_enqueue( curr_strong_node = _Scheduler_strong_APA_Node_downcast( curr_node ); } - node_priority = _Scheduler_Node_get_priority( curr_node ); - node_priority = SCHEDULER_PRIORITY_PURIFY( node_priority ); - - _Scheduler_SMP_Enqueue_to_scheduled( - context, - curr_node, - node_priority, - lowest_reachable, - _Scheduler_SMP_Insert_scheduled, - _Scheduler_strong_APA_Move_from_scheduled_to_ready, - _Scheduler_strong_APA_Allocate_processor - ); - needs_help = false; } else { needs_help = true; From 9bb37330e2c91a2fbd5bf3236770acb06d3466b3 Mon Sep 17 00:00:00 2001 From: richidubey Date: Sun, 23 Aug 2020 01:48:22 +0530 Subject: [PATCH 25/29] Final update to version 1.3 --- cpukit/score/src/schedulerstrongapa.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/cpukit/score/src/schedulerstrongapa.c b/cpukit/score/src/schedulerstrongapa.c index ed86e2531a4..92f28802388 100644 --- a/cpukit/score/src/schedulerstrongapa.c +++ b/cpukit/score/src/schedulerstrongapa.c @@ -541,6 +541,8 @@ static inline bool _Scheduler_strong_APA_Do_enqueue( curr_strong_node = _Scheduler_strong_APA_Node_downcast( curr_node ); } + _Scheduler_strong_APA_Move_from_scheduled_to_ready(context, lowest_reachable); + needs_help = false; } else { needs_help = true; From c28b76a0183b2d2b108eefc7de1fb5b10838eb9f Mon Sep 17 00:00:00 2001 From: richidubey Date: Sun, 23 Aug 2020 02:32:34 +0530 Subject: [PATCH 26/29] Final update to v1.3 --- cpukit/score/src/schedulerstrongapa.c | 20 ++++++++++++++++---- 1 file changed, 16 insertions(+), 4 deletions(-) diff --git a/cpukit/score/src/schedulerstrongapa.c b/cpukit/score/src/schedulerstrongapa.c index 92f28802388..c31dc1f34c9 100644 --- a/cpukit/score/src/schedulerstrongapa.c +++ b/cpukit/score/src/schedulerstrongapa.c @@ -246,8 +246,20 @@ static inline Scheduler_Node *_Scheduler_strong_APA_Get_highest_ready( if( node->invoker != filter_cpu ) { // Highest ready is not just directly reachable from the victim cpu // So there is need of task shifting + + curr_node = &node->Base.Base; + next_node = _Thread_Scheduler_get_home_node( node->invoker->heir ); - do { + _Scheduler_SMP_Preempt( + context, + curr_node, + _Thread_Scheduler_get_home_node( node->invoker->heir ), + _Scheduler_strong_APA_Allocate_processor + ); + + node = _Scheduler_strong_APA_Node_downcast( next_node ); + + while( node->invoker != filter_cpu ){ curr_node = &node->Base.Base; next_node = _Thread_Scheduler_get_home_node( node->invoker->heir ); @@ -256,10 +268,10 @@ static inline Scheduler_Node *_Scheduler_strong_APA_Get_highest_ready( curr_node, _Thread_Scheduler_get_home_node( node->invoker->heir ), _Scheduler_strong_APA_Allocate_processor - ); + ); - node = _Scheduler_strong_APA_Node_downcast( next_node ); - }while( node->invoker != filter_cpu ); + node = _Scheduler_strong_APA_Node_downcast( next_node ); + } //To save the last node so that the caller SMP_* function //can do the allocation From 83b59a5aef901a5c6ecd916a9fc2d7a96470d088 Mon Sep 17 00:00:00 2001 From: richidubey Date: Mon, 24 Aug 2020 21:47:05 +0530 Subject: [PATCH 27/29] Indentation corrections and minor bug fix --- cpukit/score/src/schedulerstrongapa.c | 165 ++++++++++++-------------- 1 file changed, 79 insertions(+), 86 deletions(-) diff --git a/cpukit/score/src/schedulerstrongapa.c b/cpukit/score/src/schedulerstrongapa.c index c31dc1f34c9..a3f4c49cabb 100644 --- a/cpukit/score/src/schedulerstrongapa.c +++ b/cpukit/score/src/schedulerstrongapa.c @@ -68,9 +68,9 @@ static inline bool _Scheduler_strong_APA_Has_ready( Scheduler_Context *context ) { Scheduler_strong_APA_Context *self = _Scheduler_strong_APA_Get_self( context ); - bool ret; - const Chain_Node *tail; - Chain_Node *next; + bool ret; + const Chain_Node *tail; + Chain_Node *next; Scheduler_strong_APA_Node *node; tail = _Chain_Immutable_tail( &self->All_nodes ); @@ -150,43 +150,59 @@ static inline Scheduler_Node * _Scheduler_strong_APA_Find_highest_ready( //Check if the curr_CPU is in the affinity set of the node if ( _Processor_mask_Is_set(&node->Affinity, _Per_CPU_Get_index(curr_CPU)) - ) { - curr_state = _Scheduler_SMP_Node_state( &node->Base.Base ); + ) { + curr_state = _Scheduler_SMP_Node_state( &node->Base.Base ); - if ( curr_state == SCHEDULER_SMP_NODE_SCHEDULED ) { - assigned_cpu = _Thread_Get_CPU( node->Base.Base.user ); - index_assigned_cpu = _Per_CPU_Get_index( assigned_cpu ); + if ( curr_state == SCHEDULER_SMP_NODE_SCHEDULED ) { + assigned_cpu = _Thread_Get_CPU( node->Base.Base.user ); + index_assigned_cpu = _Per_CPU_Get_index( assigned_cpu ); - if ( Struct[ index_assigned_cpu ].visited == false ) { - rear = rear + 1; - Struct[ rear ].cpu = assigned_cpu; - Struct[ index_assigned_cpu ].visited = true; - // The curr CPU of the queue invoked this node to add its CPU - // that it is executing on to the queue. So this node might get - // preempted because of the invoker curr_CPU and this curr_CPU - // is the CPU that node should preempt in case this node - // gets preempted. - node->invoker = curr_CPU; - } - } - else if ( curr_state == SCHEDULER_SMP_NODE_READY ) { - curr_priority = _Scheduler_Node_get_priority( &node->Base.Base ); - curr_priority = SCHEDULER_PRIORITY_PURIFY( curr_priority ); - - if ( first_task == true || curr_priority < min_priority_num ) { - min_priority_num = curr_priority; - highest_ready = &node->Base.Base; - first_task = false; - //In case this task is directly reachable from thread_CPU - node->invoker = curr_CPU; - } - } + if ( Struct[ index_assigned_cpu ].visited == false ) { + rear = rear + 1; + Struct[ rear ].cpu = assigned_cpu; + Struct[ index_assigned_cpu ].visited = true; + // The curr CPU of the queue invoked this node to add its CPU + // that it is executing on to the queue. So this node might get + // preempted because of the invoker curr_CPU and this curr_CPU + // is the CPU that node should preempt in case this node + // gets preempted. + node->invoker = curr_CPU; + } + } + else if ( curr_state == SCHEDULER_SMP_NODE_READY ) { + curr_priority = _Scheduler_Node_get_priority( &node->Base.Base ); + curr_priority = SCHEDULER_PRIORITY_PURIFY( curr_priority ); + + if ( first_task == true || curr_priority < min_priority_num ) { + min_priority_num = curr_priority; + highest_ready = &node->Base.Base; + first_task = false; + //In case this task is directly reachable from thread_CPU + node->invoker = curr_CPU; + } } - next = _Chain_Next( next ); - } - } + } + next = _Chain_Next( next ); + } + } - return highest_ready; + return highest_ready; +} + +static inline void _Scheduler_strong_APA_Move_from_ready_to_scheduled( + Scheduler_Context *context, + Scheduler_Node *ready_to_scheduled +) +{ + Priority_Control insert_priority; + + insert_priority = _Scheduler_SMP_Node_priority( ready_to_scheduled ); + insert_priority = SCHEDULER_PRIORITY_APPEND( insert_priority ); + _Scheduler_SMP_Insert_scheduled( + context, + ready_to_scheduled, + insert_priority + ); } static inline Scheduler_Node *_Scheduler_strong_APA_Get_highest_ready( @@ -194,28 +210,24 @@ static inline Scheduler_Node *_Scheduler_strong_APA_Get_highest_ready( Scheduler_Node *filter ) { - //Implement the BFS Algorithm for task departure //to get the highest ready task for a particular CPU //return the highest ready Scheduler_Node and Scheduler_Node filter here points // to the victim node that is blocked resulting which this function is called. Scheduler_strong_APA_Context *self; - - Per_CPU_Control *filter_cpu; - Scheduler_strong_APA_Node *node; - Scheduler_Node *highest_ready; - Scheduler_Node *curr_node; - Scheduler_Node *next_node; - Scheduler_strong_APA_Struct *Struct; + Per_CPU_Control *filter_cpu; + Scheduler_strong_APA_Node *node; + Scheduler_Node *highest_ready; + Scheduler_Node *curr_node; + Scheduler_Node *next_node; + Scheduler_strong_APA_Struct *Struct; + uint32_t front; + uint32_t rear; + uint32_t cpu_max; + uint32_t cpu_index; self=_Scheduler_strong_APA_Get_self( context ); //Denotes front and rear of the queue - uint32_t front; - uint32_t rear; - - uint32_t cpu_max; - uint32_t cpu_index; - front = 0; rear = -1; @@ -224,7 +236,7 @@ static inline Scheduler_Node *_Scheduler_strong_APA_Get_highest_ready( cpu_max = _SMP_Get_processor_maximum(); for ( cpu_index = 0 ; cpu_index < cpu_max ; ++cpu_index ) { - Struct[ cpu_index ].visited = false; + Struct[ cpu_index ].visited = false; } rear = rear + 1; @@ -237,7 +249,7 @@ static inline Scheduler_Node *_Scheduler_strong_APA_Get_highest_ready( rear ); - if ( highest_ready != filter ) { + if ( highest_ready != filter ) { //Backtrack on the path from //filter_cpu to highest_ready, shifting along every task. @@ -256,6 +268,8 @@ static inline Scheduler_Node *_Scheduler_strong_APA_Get_highest_ready( _Thread_Scheduler_get_home_node( node->invoker->heir ), _Scheduler_strong_APA_Allocate_processor ); + + _Scheduler_strong_APA_Move_from_ready_to_scheduled(context, curr_node); node = _Scheduler_strong_APA_Node_downcast( next_node ); @@ -268,9 +282,9 @@ static inline Scheduler_Node *_Scheduler_strong_APA_Get_highest_ready( curr_node, _Thread_Scheduler_get_home_node( node->invoker->heir ), _Scheduler_strong_APA_Allocate_processor - ); + ); - node = _Scheduler_strong_APA_Node_downcast( next_node ); + node = _Scheduler_strong_APA_Node_downcast( next_node ); } //To save the last node so that the caller SMP_* function //can do the allocation @@ -290,14 +304,13 @@ static inline Scheduler_Node *_Scheduler_strong_APA_Get_lowest_scheduled( { //Checks the lowest scheduled directly reachable task - uint32_t cpu_max; - uint32_t cpu_index; - - Thread_Control *curr_thread; - Scheduler_Node *curr_node; - Scheduler_Node *lowest_scheduled; - Priority_Control max_priority_num; - Priority_Control curr_priority; + uint32_t cpu_max; + uint32_t cpu_index; + Thread_Control *curr_thread; + Scheduler_Node *curr_node; + Scheduler_Node *lowest_scheduled; + Priority_Control max_priority_num; + Priority_Control curr_priority; Scheduler_strong_APA_Node *filter_strong_node; lowest_scheduled = NULL; //To remove compiler warning. @@ -309,7 +322,6 @@ static inline Scheduler_Node *_Scheduler_strong_APA_Get_lowest_scheduled( cpu_max = _SMP_Get_processor_maximum(); for ( cpu_index = 0 ; cpu_index < cpu_max ; ++cpu_index ) { - //Checks if the CPU is in the affinity set of filter_strong_node if ( _Processor_mask_Is_set( &filter_strong_node->Affinity, cpu_index) ) { Per_CPU_Control *cpu = _Per_CPU_Get_by_index( cpu_index ); @@ -397,24 +409,6 @@ static inline void _Scheduler_strong_APA_Move_from_scheduled_to_ready( ); } -static inline void _Scheduler_strong_APA_Move_from_ready_to_scheduled( - Scheduler_Context *context, - Scheduler_Node *ready_to_scheduled -) -{ - Priority_Control insert_priority; - - _Scheduler_strong_APA_Extract_from_ready( context, ready_to_scheduled ); - insert_priority = _Scheduler_SMP_Node_priority( ready_to_scheduled ); - insert_priority = SCHEDULER_PRIORITY_APPEND( insert_priority ); - _Scheduler_SMP_Insert_scheduled( - context, - ready_to_scheduled, - insert_priority - ); - //Note: The node still stays in the All_nodes chain -} - static inline Scheduler_Node* _Scheduler_strong_APA_Get_lowest_reachable( Scheduler_strong_APA_Context *self, uint32_t front, @@ -591,10 +585,9 @@ static inline bool _Scheduler_strong_APA_Enqueue( rear = -1; self = _Scheduler_strong_APA_Get_self( context ); - Struct = self->Struct; - strong_node = _Scheduler_strong_APA_Node_downcast( node ); cpu_max = _SMP_Get_processor_maximum(); + Struct = self->Struct; for ( cpu_index = 0 ; cpu_index < cpu_max ; ++cpu_index ) { Struct[ cpu_index ].visited = false; @@ -629,7 +622,7 @@ static inline bool _Scheduler_strong_APA_Enqueue( node, insert_priority, cpu_to_preempt - ); + ); } static inline bool _Scheduler_strong_APA_Enqueue_scheduled( @@ -691,7 +684,7 @@ static inline void _Scheduler_strong_APA_Do_set_affinity( ) { Scheduler_strong_APA_Node *node; - const Processor_mask *affinity; + const Processor_mask *affinity; node = _Scheduler_strong_APA_Node_downcast( node_base ); affinity = arg; @@ -732,7 +725,7 @@ void _Scheduler_strong_APA_Block( ) { Scheduler_Context *context = _Scheduler_Get_context( scheduler ); -//The extract from ready automatically removes the node from All_nodes chain. + //The extract from ready automatically removes the node from All_nodes chain. _Scheduler_SMP_Block( context, thread, From 4bbf5d9abe1e16bd942b9806e0be7efcd0cbfe7f Mon Sep 17 00:00:00 2001 From: richidubey Date: Wed, 26 Aug 2020 21:02:12 +0530 Subject: [PATCH 28/29] Revisions on review --- cpukit/include/rtems/scheduler.h | 2 +- .../include/rtems/score/schedulerstrongapa.h | 258 ++++---- cpukit/score/src/schedulerstrongapa.c | 564 +++++++++--------- 3 files changed, 411 insertions(+), 413 deletions(-) diff --git a/cpukit/include/rtems/scheduler.h b/cpukit/include/rtems/scheduler.h index b101842ba7d..6a05c2798a1 100644 --- a/cpukit/include/rtems/scheduler.h +++ b/cpukit/include/rtems/scheduler.h @@ -257,7 +257,7 @@ #define RTEMS_SCHEDULER_STRONG_APA( name, prio_count ) \ static struct { \ Scheduler_strong_APA_Context Base; \ - Scheduler_strong_APA_Struct Struct[ CONFIGURE_MAXIMUM_PROCESSORS ]; \ + Scheduler_strong_APA_CPU CPU[ CONFIGURE_MAXIMUM_PROCESSORS ]; \ } SCHEDULER_STRONG_APA_CONTEXT_NAME( name ) #define RTEMS_SCHEDULER_TABLE_STRONG_APA( name, obj_name ) \ diff --git a/cpukit/include/rtems/score/schedulerstrongapa.h b/cpukit/include/rtems/score/schedulerstrongapa.h index 86d91688e86..2476984cc4a 100644 --- a/cpukit/include/rtems/score/schedulerstrongapa.h +++ b/cpukit/include/rtems/score/schedulerstrongapa.h @@ -5,25 +5,34 @@ * * @brief Strong APA Scheduler API */ - -/* - * Copyright (c) 2020 Richi Dubey + +/* SPDX-License-Identifier: BSD-2-Clause * - * + * Copyright (C) 2020 Richi Dubey + * Copyright (c) 2013, 2018 embedded brains GmbH * - * Copyright (c) 2013, 2018 embedded brains GmbH. All rights reserved. + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. * - * embedded brains GmbH - * Dornierstr. 4 - * 82178 Puchheim - * Germany - * - * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. */ - + #ifndef _RTEMS_SCORE_SCHEDULERSTRONGAPA_H #define _RTEMS_SCORE_SCHEDULERSTRONGAPA_H @@ -36,7 +45,7 @@ extern "C" { #endif /* __cplusplus */ #define STRONG_SCHEDULER_NODE_OF_CHAIN( node ) \ - RTEMS_CONTAINER_OF( next, Scheduler_strong_APA_Node, Chain ) + RTEMS_CONTAINER_OF( node, Scheduler_strong_APA_Node, Ready_node ) /** * @defgroup RTEMSScoreSchedulerStrongAPA Strong APA Scheduler @@ -49,10 +58,12 @@ extern "C" { * Cerqueira et al. in Linux's Processor Affinity API, Refined: * Shifting Real-Time Tasks Towards Higher Schedulability. * - * This is an implementation of the Strong APA scheduler defined by - * Cerqueira et al. in Linux's Processor Affinity API, Refined: - * Shifting Real-Time Tasks Towards Higher Schedulability. - * + * The scheduled and ready nodes are accessed via the + * Scheduler_strong_APA_Context::Ready which helps in backtracking when a + * node which is executing on a CPU gets blocked. New node is allocated to + * the cpu by checking all the executing nodes in the affinity set of the + * node and the subsequent nodes executing on the processors in its + * affinity set. * @{ */ @@ -66,16 +77,16 @@ typedef struct { Scheduler_SMP_Node Base; /** - * @brief Chain node for Scheduler_strong_APA_Context::All_nodes + * @brief Chain node for Scheduler_strong_APA_Context::Ready. */ - Chain_Node Chain; + Chain_Node Ready_node; /** * @brief CPU that this node would preempt in the backtracking part of * _Scheduler_strong_APA_Get_highest_ready and * _Scheduler_strong_APA_Do_Enqueue. */ - Per_CPU_Control *invoker; + Per_CPU_Control *cpu_to_preempt; /** * @brief The associated affinity set of this node. @@ -85,54 +96,51 @@ typedef struct { /** - * @brief Struct for each index of the variable size arrays + * @brief CPU related variables and a CPU_Control to implement BFS. */ typedef struct { - /** - * @brief The node that called this CPU, i.e. a node which has - * the cpu at the index of Scheduler_strong_APA_Context::Struct in - * its affinity set. + /** + * @brief CPU in a queue. */ - Scheduler_Node *caller; + Per_CPU_Control *cpu; - /** - * @brief Cpu at the index of Scheduler_strong_APA_Context::Struct - * in Queue implementation. + /** + * @brief The node that would preempt this CPU. */ - Per_CPU_Control *cpu; + Scheduler_Node *preempting_node; - /** - * @brief Indicates if the CPU at the index of - * Scheduler_strong_APA_Context::Struct is already - * added to the Queue or not. + /** + * @brief Whether or not this cpu has been added to the queue + * (visited in BFS). */ bool visited; -} Scheduler_strong_APA_Struct; + + /** + * @brief The node currently executing on this cpu + */ + Scheduler_Node *executing; +} Scheduler_strong_APA_CPU; /** - * @brief Scheduler context for Strong APA scheduler. - * - * Has the structure for scheduler context - * and Node defintion for Strong APA scheduler + * @brief Scheduler context and node definition for Strong APA scheduler. */ typedef struct { /** - * @brief SMP Context to refer to SMP implementation - * code. + * @brief @see Scheduler_SMP_Context. */ Scheduler_SMP_Context Base; /** - * @brief Chain of all the nodes present in - * the system. Accounts for ready and scheduled nodes. + * @brief Chain of all the ready and scheduled nodes present in + * the Strong APA scheduler. */ - Chain_Control All_nodes; + Chain_Control Ready; /** - * @brief Struct with important variables for each cpu + * @brief Struct with important variables for each cpu. */ - Scheduler_strong_APA_Struct Struct[ RTEMS_ZERO_LENGTH_ARRAY ]; + Scheduler_strong_APA_CPU CPU[ RTEMS_ZERO_LENGTH_ARRAY ]; } Scheduler_strong_APA_Context; #define SCHEDULER_STRONG_APA_MAXIMUM_PRIORITY 255 @@ -162,90 +170,81 @@ typedef struct { _Scheduler_default_Release_job, \ _Scheduler_default_Cancel_job, \ _Scheduler_default_Tick, \ - _Scheduler_strong_APA_Start_idle, \ + _Scheduler_SMP_Start_idle, \ _Scheduler_strong_APA_Set_affinity \ } /** - * @brief Initializes the Strong_APA scheduler. - * - * Sets the chain containing all the nodes to empty - * and initializes the SMP scheduler. + * @brief Initializes the scheduler. * - * @param scheduler used to get reference to Strong APA scheduler context - * @retval void - * @see _Scheduler_strong_APA_Node_initialize() - */ -void _Scheduler_strong_APA_Initialize( - const Scheduler_Control *scheduler -); - + * @param scheduler The scheduler to initialize. + */ +void _Scheduler_strong_APA_Initialize( const Scheduler_Control *scheduler ); + /** - * @brief Called when a node yields the processor + * @brief Initializes the node with the given priority. * * @param scheduler The scheduler control instance. - * @param thread Thread corresponding to @node - * @param node Node that yield the processor + * @param[out] node The node to initialize. + * @param the_thread The thread of the node to initialize. + * @param priority The priority for @a node. */ -void _Scheduler_strong_APA_Yield( +void _Scheduler_strong_APA_Node_initialize( const Scheduler_Control *scheduler, - Thread_Control *thread, - Scheduler_Node *node + Scheduler_Node *node, + Thread_Control *the_thread, + Priority_Control priority ); /** - * @brief Blocks a node - * - * Changes the state of the node and extracts it from the queue - * calls _Scheduler_SMP_Block(). + * @brief Blocks the thread. * - * @param context The scheduler control instance. - * @param thread Thread correspoding to the @node. - * @param node node which is to be blocked - */ + * @param scheduler The scheduler control instance. + * @param[in, out] the_thread The thread to block. + * @param[in, out] node The node of the thread to block. + */ void _Scheduler_strong_APA_Block( const Scheduler_Control *scheduler, - Thread_Control *thread, + Thread_Control *the_thread, Scheduler_Node *node ); /** - * @brief Unblocks a node - * - * Changes the state of the node and calls _Scheduler_SMP_Unblock(). + * @brief Unblocks the thread. * * @param scheduler The scheduler control instance. - * @param thread Thread correspoding to the @node. - * @param node node which is to be unblocked - * @see _Scheduler_strong_APA_Enqueue() - * @see _Scheduler_strong_APA_Do_update() - */ + * @param[in, out] the_thread The thread to unblock. + * @param[in, out] node The node of the thread to unblock. + */ void _Scheduler_strong_APA_Unblock( const Scheduler_Control *scheduler, - Thread_Control *thread, + Thread_Control *the_thread, Scheduler_Node *node ); /** - * @brief Updates the priority of the node + * @brief Updates the priority of the node. * * @param scheduler The scheduler control instance. - * @param thread Thread correspoding to the @node. - * @param node Node whose priority has to be updated - */ + * @param the_thread The thread for the operation. + * @param[in, out] node The node to update the priority of. + */ void _Scheduler_strong_APA_Update_priority( const Scheduler_Control *scheduler, - Thread_Control *thread, + Thread_Control *the_thread, Scheduler_Node *node ); /** - * @brief Calls the SMP Ask_for_help + * @brief Asks for help. * - * @param scheduler The scheduler control instance. - * @param thread Thread correspoding to the @node that asks for help. - * @param node node associated with @thread - */ + * @param scheduler The scheduler control instance. + * @param the_thread The thread that asks for help. + * @param node The node of @a the_thread. + * + * @retval true The request for help was successful. + * @retval false The request for help was not successful. + */ bool _Scheduler_strong_APA_Ask_for_help( const Scheduler_Control *scheduler, Thread_Control *the_thread, @@ -253,13 +252,12 @@ bool _Scheduler_strong_APA_Ask_for_help( ); /** - * @brief To Reconsider the help request + * @brief Reconsiders help request. * * @param scheduler The scheduler control instance. - * @param thread Thread correspoding to the @node. - * @param node Node corresponding to @thread which asks for - * reconsideration - */ + * @param the_thread The thread to reconsider the help request of. + * @param[in, out] node The node of @a the_thread + */ void _Scheduler_strong_APA_Reconsider_help_request( const Scheduler_Control *scheduler, Thread_Control *the_thread, @@ -267,13 +265,13 @@ void _Scheduler_strong_APA_Reconsider_help_request( ); /** - * @brief Withdraws a node + * @brief Withdraws the node. * * @param scheduler The scheduler control instance. - * @param thread Thread correspoding to the @node. - * @param node Node that has to be withdrawn - * @param next_state the state that the node goes to - */ + * @param[in, out] the_thread The thread to change the state to @a next_state. + * @param[in, out] node The node to withdraw. + * @param next_state The next state for @a the_thread. + */ void _Scheduler_strong_APA_Withdraw_node( const Scheduler_Control *scheduler, Thread_Control *the_thread, @@ -282,59 +280,48 @@ void _Scheduler_strong_APA_Withdraw_node( ); /** - * @brief Adds a processor to the scheduler instance - * - * and allocates an idle thread to the processor. + * @brief Adds the idle thread to a processor. * * @param scheduler The scheduler control instance. - * @param idle Idle thread to be allocated to the processor - */ + * @param[in, out] The idle thread to add to the processor. + */ void _Scheduler_strong_APA_Add_processor( const Scheduler_Control *scheduler, Thread_Control *idle ); /** - * @brief Removes a processor from the scheduler instance + * @brief Removes an idle thread from the given cpu. * - * @param scheduler The scheduler control instance. - * @param cpu processor that is removed - */ + * @param scheduler The scheduler instance. + * @param cpu The cpu control to remove from @a scheduler. + * + * @return The idle thread of the processor. + */ Thread_Control *_Scheduler_strong_APA_Remove_processor( const Scheduler_Control *scheduler, - Per_CPU_Control *cpu + struct Per_CPU_Control *cpu ); /** - * @brief Initializes the node with the given priority. + * @brief Performs a yield operation. * * @param scheduler The scheduler control instance. - * @param[out] node The node to initialize. - * @param the_thread The thread of the node to initialize. - * @param priority The priority for @a node. + * @param the_thread The thread to yield. + * @param[in, out] node The node of @a the_thread. */ -void _Scheduler_strong_APA_Node_initialize( +void _Scheduler_strong_APA_Yield( const Scheduler_Control *scheduler, - Scheduler_Node *node, Thread_Control *the_thread, - Priority_Control priority + Scheduler_Node *node ); /** - * @brief Starts an idle thread on a CPU + * @brief Sets the affinity . * * @param scheduler The scheduler control instance. - * @param idle Idle Thread - * @param cpu processor that gets the idle thread - */ -void _Scheduler_strong_APA_Start_idle( - const Scheduler_Control *scheduler, - Thread_Control *idle, - Per_CPU_Control *cpu -); - -/** - * @brief Sets the affinity of the @node_base to @affinity + * @param the_thread The thread to yield. + * @param[in, out] node The node of @a the_thread. */ bool _Scheduler_strong_APA_Set_affinity( const Scheduler_Control *scheduler, @@ -342,6 +329,7 @@ bool _Scheduler_strong_APA_Set_affinity( Scheduler_Node *node_base, const Processor_mask *affinity ); + /** @} */ #ifdef __cplusplus diff --git a/cpukit/score/src/schedulerstrongapa.c b/cpukit/score/src/schedulerstrongapa.c index a3f4c49cabb..429bbacece6 100644 --- a/cpukit/score/src/schedulerstrongapa.c +++ b/cpukit/score/src/schedulerstrongapa.c @@ -6,24 +6,33 @@ * @brief Strong APA Scheduler Implementation */ -/* - * Copyright (c) 2020 Richi Dubey +/* SPDX-License-Identifier: BSD-2-Clause * - * + * Copyright (C) 2020 Richi Dubey + * Copyright (c) 2013, 2018 embedded brains GmbH * - * Copyright (c) 2013, 2018 embedded brains GmbH. All rights reserved. + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. * - * embedded brains GmbH - * Dornierstr. 4 - * 82178 Puchheim - * Germany - * - * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. */ - + #ifdef HAVE_CONFIG_H #include "config.h" #endif @@ -31,7 +40,6 @@ #include #include #include -#include static inline Scheduler_strong_APA_Context * _Scheduler_strong_APA_Get_context( const Scheduler_Control *scheduler ) @@ -64,34 +72,36 @@ static inline void _Scheduler_strong_APA_Do_update( _Scheduler_SMP_Node_update_priority( smp_node, new_priority ); } -static inline bool _Scheduler_strong_APA_Has_ready( Scheduler_Context *context ) +/* + * Returns true if the Strong APA scheduler has ready nodes + * available for scheduling. + */ +static inline bool + _Scheduler_strong_APA_Has_ready( Scheduler_Context *context ) { - Scheduler_strong_APA_Context *self = _Scheduler_strong_APA_Get_self( context ); - - bool ret; - const Chain_Node *tail; - Chain_Node *next; - Scheduler_strong_APA_Node *node; - - tail = _Chain_Immutable_tail( &self->All_nodes ); - next = _Chain_First( &self->All_nodes ); - - ret = false; - + Scheduler_strong_APA_Context *self; + const Chain_Node *tail; + Chain_Node *next; + Scheduler_strong_APA_Node *node; + + self = _Scheduler_strong_APA_Get_self( context ); + tail = _Chain_Immutable_tail( &self->Ready ); + next = _Chain_First( &self->Ready ); + while ( next != tail ) { node = (Scheduler_strong_APA_Node *) STRONG_SCHEDULER_NODE_OF_CHAIN( next ); - - if ( - _Scheduler_SMP_Node_state( &node->Base.Base ) == SCHEDULER_SMP_NODE_READY + + if ( + _Scheduler_SMP_Node_state( &node->Base.Base ) == + SCHEDULER_SMP_NODE_READY ) { - ret = true; - break; + return true; } - + next = _Chain_Next( next ); } - - return ret; + + return false; } static inline void _Scheduler_strong_APA_Allocate_processor( @@ -101,10 +111,15 @@ static inline void _Scheduler_strong_APA_Allocate_processor( Per_CPU_Control *victim_cpu ) { - Scheduler_strong_APA_Node *scheduled; - + Scheduler_strong_APA_Node *scheduled; + Scheduler_strong_APA_Context *self; + (void) victim_base; + scheduled = _Scheduler_strong_APA_Node_downcast( scheduled_base ); + self = _Scheduler_strong_APA_Get_self( context ); + + self->CPU[ _Per_CPU_Get_index( victim_cpu ) ].executing = scheduled_base; _Scheduler_SMP_Allocate_processor_exact( context, @@ -114,14 +129,19 @@ static inline void _Scheduler_strong_APA_Allocate_processor( ); } +/* + * Finds and returns the highest ready node present by accessing the + * _Strong_APA_Context->CPU with front and rear values. + */ + static inline Scheduler_Node * _Scheduler_strong_APA_Find_highest_ready( Scheduler_strong_APA_Context *self, - uint32_t front, + uint32_t front, uint32_t rear ) { Scheduler_Node *highest_ready; - Scheduler_strong_APA_Struct *Struct; + Scheduler_strong_APA_CPU *CPU; const Chain_Node *tail; Chain_Node *next; uint32_t index_assigned_cpu; @@ -132,60 +152,71 @@ static inline Scheduler_Node * _Scheduler_strong_APA_Find_highest_ready( Scheduler_SMP_Node_state curr_state; Per_CPU_Control *curr_CPU; bool first_task; - - Struct = self->Struct; - //When the first task accessed has nothing to compare its priority against - // So, it is the task with the highest priority witnessed so far! + + CPU = self->CPU; + /* + * When the first task accessed has nothing to compare its priority against + * So, it is the task with the highest priority witnessed so far. + */ first_task = true; - + + //Assert rear < sizeof(Context->CPU) + _Assert( rear < CONFIGURE_MAXIMUM_PROCESSOR ); + while( front <= rear ) { - curr_CPU = Struct[ front ].cpu; + curr_CPU = CPU[ front ].cpu; front = front + 1; - tail = _Chain_Immutable_tail( &self->All_nodes ); - next = _Chain_First( &self->All_nodes ); - + tail = _Chain_Immutable_tail( &self->Ready ); + next = _Chain_First( &self->Ready ); + while ( next != tail ) { node = (Scheduler_strong_APA_Node*) STRONG_SCHEDULER_NODE_OF_CHAIN( next ); - //Check if the curr_CPU is in the affinity set of the node + //Check if the curr_CPU is in the affinity set of the node. if ( _Processor_mask_Is_set(&node->Affinity, _Per_CPU_Get_index(curr_CPU)) - ) { + ) { curr_state = _Scheduler_SMP_Node_state( &node->Base.Base ); - + if ( curr_state == SCHEDULER_SMP_NODE_SCHEDULED ) { assigned_cpu = _Thread_Get_CPU( node->Base.Base.user ); index_assigned_cpu = _Per_CPU_Get_index( assigned_cpu ); - - if ( Struct[ index_assigned_cpu ].visited == false ) { + + if ( CPU[ index_assigned_cpu ].visited == false ) { rear = rear + 1; - Struct[ rear ].cpu = assigned_cpu; - Struct[ index_assigned_cpu ].visited = true; - // The curr CPU of the queue invoked this node to add its CPU - // that it is executing on to the queue. So this node might get - // preempted because of the invoker curr_CPU and this curr_CPU - // is the CPU that node should preempt in case this node - // gets preempted. - node->invoker = curr_CPU; - } - } + CPU[ rear ].cpu = assigned_cpu; + CPU[ index_assigned_cpu ].visited = true; + /* + * The curr CPU of the queue invoked this node to add its CPU + * that it is executing on to the queue. So this node might get + * preempted because of the invoker curr_CPU and this curr_CPU + * is the CPU that node should preempt in case this node + * gets preempted. + */ + node->cpu_to_preempt = curr_CPU; + } + } else if ( curr_state == SCHEDULER_SMP_NODE_READY ) { curr_priority = _Scheduler_Node_get_priority( &node->Base.Base ); curr_priority = SCHEDULER_PRIORITY_PURIFY( curr_priority ); - + if ( first_task == true || curr_priority < min_priority_num ) { min_priority_num = curr_priority; highest_ready = &node->Base.Base; first_task = false; - //In case this task is directly reachable from thread_CPU - node->invoker = curr_CPU; + /* + * In case curr_CPU is filter_CPU, we need to store the + * cpu_to_preempt value so that we go back to SMP_* + * function, rather than preempting the node ourselves. + */ + node->cpu_to_preempt = curr_CPU; } } } next = _Chain_Next( next ); } } - + return highest_ready; } @@ -204,134 +235,128 @@ static inline void _Scheduler_strong_APA_Move_from_ready_to_scheduled( insert_priority ); } - + +/* + * Implement the BFS Algorithm for task departure to get the highest ready task + * for a particular CPU, returns the highest ready Scheduler_Node + * Scheduler_Node filter here pointst to the victim node that is blocked + * resulting which this function is called. + */ static inline Scheduler_Node *_Scheduler_strong_APA_Get_highest_ready( Scheduler_Context *context, Scheduler_Node *filter ) { - //Implement the BFS Algorithm for task departure - //to get the highest ready task for a particular CPU - //return the highest ready Scheduler_Node and Scheduler_Node filter here points - // to the victim node that is blocked resulting which this function is called. Scheduler_strong_APA_Context *self; Per_CPU_Control *filter_cpu; Scheduler_strong_APA_Node *node; Scheduler_Node *highest_ready; Scheduler_Node *curr_node; Scheduler_Node *next_node; - Scheduler_strong_APA_Struct *Struct; + Scheduler_strong_APA_CPU *CPU; uint32_t front; uint32_t rear; uint32_t cpu_max; uint32_t cpu_index; - - self=_Scheduler_strong_APA_Get_self( context ); - //Denotes front and rear of the queue + + self = _Scheduler_strong_APA_Get_self( context ); + //Denotes front and rear of the queue. front = 0; - rear = -1; + rear = -1; filter_cpu = _Thread_Get_CPU( filter->user ); - Struct = self->Struct; + CPU = self->CPU; cpu_max = _SMP_Get_processor_maximum(); - - for ( cpu_index = 0 ; cpu_index < cpu_max ; ++cpu_index ) { - Struct[ cpu_index ].visited = false; + + for ( cpu_index = 0 ; cpu_index < cpu_max ; ++cpu_index ) { + CPU[ cpu_index ].visited = false; } - + rear = rear + 1; - Struct[ rear ].cpu = filter_cpu; - Struct[ _Per_CPU_Get_index( filter_cpu ) ].visited = true; - + CPU[ rear ].cpu = filter_cpu; + CPU[ _Per_CPU_Get_index( filter_cpu ) ].visited = true; + highest_ready = _Scheduler_strong_APA_Find_highest_ready( self, front, rear ); - + if ( highest_ready != filter ) { - //Backtrack on the path from - //filter_cpu to highest_ready, shifting along every task. - + /* + * Backtrack on the path from + * filter_cpu to highest_ready, shifting along every task. + */ + node = _Scheduler_strong_APA_Node_downcast( highest_ready ); - - if( node->invoker != filter_cpu ) { - // Highest ready is not just directly reachable from the victim cpu - // So there is need of task shifting - + /* + * Highest ready is not just directly reachable from the victim cpu + * So there is need of task shifting . + */ + while( node->cpu_to_preempt != filter_cpu ){ curr_node = &node->Base.Base; - next_node = _Thread_Scheduler_get_home_node( node->invoker->heir ); - + next_node = CPU[ _Per_CPU_Get_index( node->cpu_to_preempt ) ].executing; + _Scheduler_SMP_Preempt( context, curr_node, - _Thread_Scheduler_get_home_node( node->invoker->heir ), + next_node, _Scheduler_strong_APA_Allocate_processor ); - - _Scheduler_strong_APA_Move_from_ready_to_scheduled(context, curr_node); - - node = _Scheduler_strong_APA_Node_downcast( next_node ); - - while( node->invoker != filter_cpu ){ - curr_node = &node->Base.Base; - next_node = _Thread_Scheduler_get_home_node( node->invoker->heir ); - - _Scheduler_SMP_Preempt( - context, - curr_node, - _Thread_Scheduler_get_home_node( node->invoker->heir ), - _Scheduler_strong_APA_Allocate_processor - ); - - node = _Scheduler_strong_APA_Node_downcast( next_node ); + + if( curr_node == highest_ready) { + _Scheduler_strong_APA_Move_from_ready_to_scheduled(context, curr_node); } - //To save the last node so that the caller SMP_* function - //can do the allocation - + + node = _Scheduler_strong_APA_Node_downcast( next_node ); + } + /* + * To save the last node so that the caller SMP_* function + * can do the allocation + */ curr_node = &node->Base.Base; - highest_ready = curr_node; + highest_ready = curr_node; } - } - - return highest_ready; + + return highest_ready; } +/* + * Checks the lowest scheduled directly reachable task + */ static inline Scheduler_Node *_Scheduler_strong_APA_Get_lowest_scheduled( Scheduler_Context *context, Scheduler_Node *filter_base ) -{ - //Checks the lowest scheduled directly reachable task - - uint32_t cpu_max; - uint32_t cpu_index; - Thread_Control *curr_thread; - Scheduler_Node *curr_node; - Scheduler_Node *lowest_scheduled; - Priority_Control max_priority_num; - Priority_Control curr_priority; - Scheduler_strong_APA_Node *filter_strong_node; - - lowest_scheduled = NULL; //To remove compiler warning. - max_priority_num = 0;//Max (Lowest) priority encountered so far. +{ + uint32_t cpu_max; + uint32_t cpu_index; + Scheduler_Node *curr_node; + Scheduler_Node *lowest_scheduled; + Priority_Control max_priority_num; + Priority_Control curr_priority; + Scheduler_strong_APA_Node *filter_strong_node; + Scheduler_strong_APA_Context *self; + + self = _Scheduler_strong_APA_Get_self( context ); + lowest_scheduled = NULL; // To remove compiler warning. + max_priority_num = 0; // Max (Lowest) priority encountered so far. filter_strong_node = _Scheduler_strong_APA_Node_downcast( filter_base ); - + //lowest_scheduled is NULL if affinty of a node is 0 _Assert( !_Processor_mask_Zero( &filter_strong_node->Affinity ) ); cpu_max = _SMP_Get_processor_maximum(); - - for ( cpu_index = 0 ; cpu_index < cpu_max ; ++cpu_index ) { + + for ( cpu_index = 0 ; cpu_index < cpu_max ; ++cpu_index ) { //Checks if the CPU is in the affinity set of filter_strong_node if ( _Processor_mask_Is_set( &filter_strong_node->Affinity, cpu_index) ) { Per_CPU_Control *cpu = _Per_CPU_Get_by_index( cpu_index ); - - if ( _Per_CPU_Is_processor_online( cpu ) ) { - curr_thread = cpu->heir; - curr_node = _Thread_Scheduler_get_home_node( curr_thread ); + + if ( _Per_CPU_Is_processor_online( cpu ) ) { + curr_node = self->CPU[ _Per_CPU_Get_index( cpu ) ].executing; curr_priority = _Scheduler_Node_get_priority( curr_node ); - curr_priority = SCHEDULER_PRIORITY_PURIFY( curr_priority ); - + curr_priority = SCHEDULER_PRIORITY_PURIFY( curr_priority ); + if ( curr_priority > max_priority_num ) { lowest_scheduled = curr_node; max_priority_num = curr_priority; @@ -339,7 +364,8 @@ static inline Scheduler_Node *_Scheduler_strong_APA_Get_lowest_scheduled( } } } - + + _Assert( lowest_scheduled != NULL ); return lowest_scheduled; } @@ -355,7 +381,7 @@ static inline void _Scheduler_strong_APA_Extract_from_scheduled( node = _Scheduler_strong_APA_Node_downcast( node_to_extract ); _Scheduler_SMP_Extract_from_scheduled( &self->Base.Base, &node->Base.Base ); - //Not removing it from All_nodes since the node could go in the ready state. + //Not removing it from Ready since the node could go in the READY state. } static inline void _Scheduler_strong_APA_Extract_from_ready( @@ -368,12 +394,12 @@ static inline void _Scheduler_strong_APA_Extract_from_ready( self = _Scheduler_strong_APA_Get_self( context ); node = _Scheduler_strong_APA_Node_downcast( node_to_extract ); - - _Assert( !_Chain_Is_empty(self->All_nodes) ); - _Assert( !_Chain_Is_node_off_chain( &node->Chain ) ); - - _Chain_Extract_unprotected( &node->Chain ); //Removed from All_nodes - _Chain_Set_off_chain( &node->Chain ); + + _Assert( !_Chain_Is_empty(self->Ready) ); + _Assert( !_Chain_Is_node_off_chain( &node->Ready_node ) ); + + _Chain_Extract_unprotected( &node->Ready_node ); //Removed from Ready + _Chain_Set_off_chain( &node->Ready_node ); } static inline void _Scheduler_strong_APA_Insert_ready( @@ -387,9 +413,10 @@ static inline void _Scheduler_strong_APA_Insert_ready( self = _Scheduler_strong_APA_Get_self( context ); node = _Scheduler_strong_APA_Node_downcast( node_base ); - - if(_Chain_Is_node_off_chain( &node->Chain ) ) - _Chain_Append_unprotected( &self->All_nodes, &node->Chain ); + + if( _Chain_Is_node_off_chain( &node->Ready_node ) ) { + _Chain_Append_unprotected( &self->Ready, &node->Ready_node ); + } } static inline void _Scheduler_strong_APA_Move_from_scheduled_to_ready( @@ -401,7 +428,7 @@ static inline void _Scheduler_strong_APA_Move_from_scheduled_to_ready( _Scheduler_SMP_Extract_from_scheduled( context, scheduled_to_ready ); insert_priority = _Scheduler_SMP_Node_priority( scheduled_to_ready ); - + _Scheduler_strong_APA_Insert_ready( context, scheduled_to_ready, @@ -411,7 +438,7 @@ static inline void _Scheduler_strong_APA_Move_from_scheduled_to_ready( static inline Scheduler_Node* _Scheduler_strong_APA_Get_lowest_reachable( Scheduler_strong_APA_Context *self, - uint32_t front, + uint32_t front, uint32_t rear, Per_CPU_Control **cpu_to_preempt ) @@ -424,50 +451,50 @@ static inline Scheduler_Node* _Scheduler_strong_APA_Get_lowest_reachable( Per_CPU_Control *curr_CPU; Priority_Control curr_priority; Scheduler_Node *curr_node; - Scheduler_strong_APA_Node *curr_strong_node; //Current Strong_APA_Node - Scheduler_strong_APA_Struct *Struct; - - max_priority_num = 0;//Max (Lowest) priority encountered so far. - Struct = self->Struct; + Scheduler_strong_APA_Node *curr_strong_node; //Current Strong_APA_Node + Scheduler_strong_APA_CPU *CPU; + + max_priority_num = 0; //Max (Lowest) priority encountered so far. + CPU = self->CPU; cpu_max = _SMP_Get_processor_maximum(); - + while( front <= rear ) { - curr_CPU = Struct[ front ].cpu; + curr_CPU = CPU[ front ].cpu; front = front + 1; - - curr_thread = curr_CPU->heir; - curr_node = _Thread_Scheduler_get_home_node( curr_thread ); - + + curr_node = CPU[ _Per_CPU_Get_index( curr_CPU ) ].executing; + curr_thread = curr_node->user; + curr_priority = _Scheduler_Node_get_priority( curr_node ); - curr_priority = SCHEDULER_PRIORITY_PURIFY( curr_priority ); - - curr_strong_node = _Scheduler_strong_APA_Node_downcast( curr_node ); - + curr_priority = SCHEDULER_PRIORITY_PURIFY( curr_priority ); + + curr_strong_node = _Scheduler_strong_APA_Node_downcast( curr_node ); + if ( curr_priority > max_priority_num ) { lowest_reachable = curr_node; max_priority_num = curr_priority; *cpu_to_preempt = curr_CPU; } - + if ( !curr_thread->is_idle ) { for ( cpu_index = 0 ; cpu_index < cpu_max ; ++cpu_index ) { - if ( _Processor_mask_Is_set( &curr_strong_node->Affinity, cpu_index ) ) { + if ( _Processor_mask_Is_set( &curr_strong_node->Affinity, cpu_index ) ) { //Checks if the thread_CPU is in the affinity set of the node Per_CPU_Control *cpu = _Per_CPU_Get_by_index( cpu_index ); - if ( _Per_CPU_Is_processor_online( cpu ) && Struct[ cpu_index ].visited == false ) { + if ( _Per_CPU_Is_processor_online( cpu ) && CPU[ cpu_index ].visited == false ) { rear = rear + 1; - Struct[ rear ].cpu = cpu; - Struct[ cpu_index ].visited = true; - Struct[ cpu_index ].caller = curr_node; + CPU[ rear ].cpu = cpu; + CPU[ cpu_index ].visited = true; + CPU[ cpu_index ].preempting_node = curr_node; } - } + } } } } - + return lowest_reachable; } - + static inline bool _Scheduler_strong_APA_Do_enqueue( Scheduler_Context *context, Scheduler_Node *lowest_reachable, @@ -479,46 +506,45 @@ static inline bool _Scheduler_strong_APA_Do_enqueue( bool needs_help; Priority_Control node_priority; Priority_Control lowest_priority; - Scheduler_strong_APA_Struct *Struct; + Scheduler_strong_APA_CPU *CPU; Scheduler_Node *curr_node; Scheduler_strong_APA_Node *curr_strong_node; //Current Strong_APA_Node Per_CPU_Control *curr_CPU; - Thread_Control *next_thread; Scheduler_strong_APA_Context *self; Scheduler_Node *next_node; - + self = _Scheduler_strong_APA_Get_self( context ); - Struct = self->Struct; - + CPU = self->CPU; + node_priority = _Scheduler_Node_get_priority( node ); - node_priority = SCHEDULER_PRIORITY_PURIFY( node_priority ); - + node_priority = SCHEDULER_PRIORITY_PURIFY( node_priority ); + lowest_priority = _Scheduler_Node_get_priority( lowest_reachable ); - lowest_priority = SCHEDULER_PRIORITY_PURIFY( lowest_priority ); - + lowest_priority = SCHEDULER_PRIORITY_PURIFY( lowest_priority ); + if( lowest_priority > node_priority ) { //Backtrack on the path from - //_Thread_Get_CPU(lowest_reachable->user) to lowest_reachable, shifting + //_Thread_Get_CPU(lowest_reachable->user) to lowest_reachable, shifting //along every task - - curr_node = Struct[ _Per_CPU_Get_index(cpu_to_preempt) ].caller; + + curr_node = CPU[ _Per_CPU_Get_index(cpu_to_preempt) ].preempting_node; curr_strong_node = _Scheduler_strong_APA_Node_downcast( curr_node ); - curr_strong_node->invoker = cpu_to_preempt; - - //Save which cpu to preempt in invoker value of the node - while( curr_node != node ) { + curr_strong_node->cpu_to_preempt = cpu_to_preempt; + + //Save which cpu to preempt in cpu_to_preempt value of the node + while ( curr_node != node ) { curr_CPU = _Thread_Get_CPU( curr_node->user ); - curr_node = Struct[ _Per_CPU_Get_index( curr_CPU ) ].caller; + curr_node = CPU[ _Per_CPU_Get_index( curr_CPU ) ].preempting_node; curr_strong_node = _Scheduler_strong_APA_Node_downcast( curr_node ); - curr_strong_node->invoker = curr_CPU; + curr_strong_node->cpu_to_preempt = curr_CPU; } - - next_thread = curr_strong_node->invoker->heir; - next_node = _Thread_Scheduler_get_home_node( next_thread ); - + + curr_CPU = curr_strong_node->cpu_to_preempt; + next_node = CPU[ _Per_CPU_Get_index( curr_CPU ) ].executing; + node_priority = _Scheduler_Node_get_priority( curr_node ); - node_priority = SCHEDULER_PRIORITY_PURIFY( node_priority ); - + node_priority = SCHEDULER_PRIORITY_PURIFY( node_priority ); + _Scheduler_SMP_Enqueue_to_scheduled( context, curr_node, @@ -528,13 +554,13 @@ static inline bool _Scheduler_strong_APA_Do_enqueue( _Scheduler_strong_APA_Move_from_scheduled_to_ready, _Scheduler_strong_APA_Allocate_processor ); - + curr_node = next_node; curr_strong_node = _Scheduler_strong_APA_Node_downcast( curr_node ); - - while( curr_node != lowest_reachable) { - next_thread = curr_strong_node->invoker->heir; - next_node = _Thread_Scheduler_get_home_node( next_thread ); + + while( curr_node != lowest_reachable ) { + curr_CPU = curr_strong_node->cpu_to_preempt; + next_node = CPU[ _Per_CPU_Get_index( curr_CPU ) ].executing; //curr_node preempts the next_node; _Scheduler_SMP_Preempt( context, @@ -542,80 +568,82 @@ static inline bool _Scheduler_strong_APA_Do_enqueue( next_node, _Scheduler_strong_APA_Allocate_processor ); - + curr_node = next_node; curr_strong_node = _Scheduler_strong_APA_Node_downcast( curr_node ); } - - _Scheduler_strong_APA_Move_from_scheduled_to_ready(context, lowest_reachable); - + + _Scheduler_strong_APA_Move_from_scheduled_to_ready( context, lowest_reachable ); + needs_help = false; } else { needs_help = true; } - - //Add it to All_nodes chain since it is now either scheduled or just ready. + + //Add it to Ready chain since it is now either scheduled or just ready. _Scheduler_strong_APA_Insert_ready(context,node,insert_priority); - + return needs_help; } +/* + * BFS Algorithm for task arrival + * Enqueue node either in the scheduled chain or in the ready chain. + * node is the newly arrived node and is currently not scheduled. + */ static inline bool _Scheduler_strong_APA_Enqueue( Scheduler_Context *context, Scheduler_Node *node, Priority_Control insert_priority ) { - //Idea: BFS Algorithm for task arrival - //Enqueue node either in the scheduled chain or in the ready chain - //node is the newly arrived node and is not scheduled. Scheduler_strong_APA_Context *self; - Scheduler_strong_APA_Struct *Struct; + Scheduler_strong_APA_CPU *CPU; uint32_t cpu_max; uint32_t cpu_index; Per_CPU_Control *cpu_to_preempt; Scheduler_Node *lowest_reachable; - Scheduler_strong_APA_Node *strong_node; + Scheduler_strong_APA_Node *strong_node; //Denotes front and rear of the queue - uint32_t front; + uint32_t front; uint32_t rear; - + front = 0; - rear = -1; + rear = -1; self = _Scheduler_strong_APA_Get_self( context ); strong_node = _Scheduler_strong_APA_Node_downcast( node ); cpu_max = _SMP_Get_processor_maximum(); - Struct = self->Struct; - - for ( cpu_index = 0 ; cpu_index < cpu_max ; ++cpu_index ) { - Struct[ cpu_index ].visited = false; - + CPU = self->CPU; + + for ( cpu_index = 0 ; cpu_index < cpu_max ; ++cpu_index ) { + CPU[ cpu_index ].visited = false; + //Checks if the thread_CPU is in the affinity set of the node - if ( _Processor_mask_Is_set( &strong_node->Affinity, cpu_index) ) { + if ( _Processor_mask_Is_set( &strong_node->Affinity, cpu_index) ) { Per_CPU_Control *cpu = _Per_CPU_Get_by_index( cpu_index ); - + if ( _Per_CPU_Is_processor_online( cpu ) ) { rear = rear + 1; - Struct[ rear ].cpu = cpu; - Struct[ cpu_index ].visited = true; - Struct[ cpu_index ].caller = node; + CPU[ rear ].cpu = cpu; + CPU[ cpu_index ].visited = true; + CPU[ cpu_index ].preempting_node = node; } } } - - //This assert makes sure that there always exist an element in the - // Queue when we start the queue traversal. + + //This assert makes sure that there always exist an element in the + // Queue when we start the queue traversal. _Assert( !_Processor_mask_Zero( &strong_node->Affinity ) ); - + lowest_reachable = _Scheduler_strong_APA_Get_lowest_reachable( self, front, rear, &cpu_to_preempt ); - + return _Scheduler_strong_APA_Do_enqueue( context, lowest_reachable, @@ -630,7 +658,7 @@ static inline bool _Scheduler_strong_APA_Enqueue_scheduled( Scheduler_Node *node, Priority_Control insert_priority ) -{ +{ return _Scheduler_SMP_Enqueue_scheduled( context, node, @@ -673,8 +701,10 @@ static inline void _Scheduler_strong_APA_Register_idle( (void) context; (void) idle_base; (void) cpu; - //We do not maintain a variable to access the scheduled - //node for a CPU. So this function does nothing. + /* + * We do not maintain a variable to access the scheduled + * node for a CPU. So this function does nothing. + */ } static inline void _Scheduler_strong_APA_Do_set_affinity( @@ -684,20 +714,18 @@ static inline void _Scheduler_strong_APA_Do_set_affinity( ) { Scheduler_strong_APA_Node *node; - const Processor_mask *affinity; node = _Scheduler_strong_APA_Node_downcast( node_base ); - affinity = arg; - node->Affinity = *affinity; + node->Affinity = *(( const Processor_mask *) arg); } void _Scheduler_strong_APA_Initialize( const Scheduler_Control *scheduler ) { Scheduler_strong_APA_Context *self = - _Scheduler_strong_APA_Get_context( scheduler ); + _Scheduler_strong_APA_Get_context( scheduler ); _Scheduler_SMP_Initialize( &self->Base ); - _Chain_Initialize_empty( &self->All_nodes ); + _Chain_Initialize_empty( &self->Ready ); } void _Scheduler_strong_APA_Yield( @@ -725,7 +753,7 @@ void _Scheduler_strong_APA_Block( ) { Scheduler_Context *context = _Scheduler_Get_context( scheduler ); - //The extract from ready automatically removes the node from All_nodes chain. + //The extract from ready automatically removes the node from Ready chain. _Scheduler_SMP_Block( context, thread, @@ -839,7 +867,7 @@ void _Scheduler_strong_APA_Add_processor( idle, _Scheduler_strong_APA_Has_ready, _Scheduler_strong_APA_Enqueue_scheduled, - _Scheduler_strong_APA_Register_idle + _Scheduler_SMP_Do_nothing_register_idle ); } @@ -867,36 +895,18 @@ void _Scheduler_strong_APA_Node_initialize( { Scheduler_SMP_Node *smp_node; Scheduler_strong_APA_Node *strong_node; - - smp_node = _Scheduler_SMP_Node_downcast( node ); + + smp_node = _Scheduler_SMP_Node_downcast( node ); strong_node = _Scheduler_strong_APA_Node_downcast( node ); - + _Scheduler_SMP_Node_initialize( scheduler, smp_node, the_thread, priority ); - + _Processor_mask_Assign( &strong_node->Affinity, _SMP_Get_online_processors() ); } -void _Scheduler_strong_APA_Start_idle( - const Scheduler_Control *scheduler, - Thread_Control *idle, - Per_CPU_Control *cpu -) -{ - Scheduler_Context *context; - - context = _Scheduler_Get_context( scheduler ); - - _Scheduler_SMP_Do_start_idle( - context, - idle, - cpu, - _Scheduler_strong_APA_Register_idle - ); -} - bool _Scheduler_strong_APA_Set_affinity( const Scheduler_Control *scheduler, Thread_Control *thread, @@ -907,7 +917,7 @@ bool _Scheduler_strong_APA_Set_affinity( Scheduler_Context *context; Scheduler_strong_APA_Node *node; Processor_mask local_affinity; - + context = _Scheduler_Get_context( scheduler ); _Processor_mask_And( &local_affinity, &context->Processors, affinity ); @@ -928,13 +938,13 @@ bool _Scheduler_strong_APA_Set_affinity( node_base, &local_affinity, _Scheduler_strong_APA_Do_set_affinity, - _Scheduler_strong_APA_Extract_from_ready, - _Scheduler_strong_APA_Get_highest_ready, + _Scheduler_strong_APA_Extract_from_ready, + _Scheduler_strong_APA_Get_highest_ready, _Scheduler_strong_APA_Move_from_ready_to_scheduled, _Scheduler_strong_APA_Enqueue, _Scheduler_strong_APA_Allocate_processor ); - + return true; } From e542a76befcb0c2a3f638c395eb6404932c00447 Mon Sep 17 00:00:00 2001 From: richidubey Date: Fri, 28 Aug 2020 19:25:22 +0530 Subject: [PATCH 29/29] Releasing final version --- .../include/rtems/score/schedulerstrongapa.h | 53 +++--- cpukit/score/src/schedulerstrongapa.c | 166 +++++++++++------- 2 files changed, 136 insertions(+), 83 deletions(-) diff --git a/cpukit/include/rtems/score/schedulerstrongapa.h b/cpukit/include/rtems/score/schedulerstrongapa.h index 2476984cc4a..77b2883814d 100644 --- a/cpukit/include/rtems/score/schedulerstrongapa.h +++ b/cpukit/include/rtems/score/schedulerstrongapa.h @@ -5,8 +5,8 @@ * * @brief Strong APA Scheduler API */ - -/* SPDX-License-Identifier: BSD-2-Clause + +/* SPDX-License-Identifier: BSD-2-Clause * * Copyright (C) 2020 Richi Dubey * Copyright (c) 2013, 2018 embedded brains GmbH @@ -55,14 +55,14 @@ extern "C" { * @brief Strong APA Scheduler * * This is an implementation of the Strong APA scheduler defined by - * Cerqueira et al. in Linux's Processor Affinity API, Refined: + * Cerqueira et al. in Linux's Processor Affinity API, Refined: * Shifting Real-Time Tasks Towards Higher Schedulability. * - * The scheduled and ready nodes are accessed via the - * Scheduler_strong_APA_Context::Ready which helps in backtracking when a + * The scheduled and ready nodes are accessed via the + * Scheduler_strong_APA_Context::Ready which helps in backtracking when a * node which is executing on a CPU gets blocked. New node is allocated to * the cpu by checking all the executing nodes in the affinity set of the - * node and the subsequent nodes executing on the processors in its + * node and the subsequent nodes executing on the processors in its * affinity set. * @{ */ @@ -75,12 +75,12 @@ typedef struct { * @brief SMP scheduler node. */ Scheduler_SMP_Node Base; - + /** * @brief Chain node for Scheduler_strong_APA_Context::Ready. */ Chain_Node Ready_node; - + /** * @brief CPU that this node would preempt in the backtracking part of * _Scheduler_strong_APA_Get_highest_ready and @@ -102,23 +102,23 @@ typedef struct { /** * @brief CPU in a queue. - */ + */ Per_CPU_Control *cpu; - + /** * @brief The node that would preempt this CPU. - */ + */ Scheduler_Node *preempting_node; - + /** * @brief Whether or not this cpu has been added to the queue * (visited in BFS). - */ + */ bool visited; - + /** - * @brief The node currently executing on this cpu - */ + * @brief The node currently executing on this cpu. + */ Scheduler_Node *executing; } Scheduler_strong_APA_CPU; @@ -130,13 +130,13 @@ typedef struct { * @brief @see Scheduler_SMP_Context. */ Scheduler_SMP_Context Base; - + /** * @brief Chain of all the ready and scheduled nodes present in * the Strong APA scheduler. */ Chain_Control Ready; - + /** * @brief Struct with important variables for each cpu. */ @@ -170,7 +170,7 @@ typedef struct { _Scheduler_default_Release_job, \ _Scheduler_default_Cancel_job, \ _Scheduler_default_Tick, \ - _Scheduler_SMP_Start_idle, \ + _Scheduler_strong_APA_Start_idle, \ _Scheduler_strong_APA_Set_affinity \ } @@ -238,7 +238,7 @@ void _Scheduler_strong_APA_Update_priority( /** * @brief Asks for help. * - * @param scheduler The scheduler control instance. + * @param scheduler The scheduler control instance. * @param the_thread The thread that asks for help. * @param node The node of @a the_thread. * @@ -316,6 +316,19 @@ void _Scheduler_strong_APA_Yield( Scheduler_Node *node ); +/** + * @brief Starts an idle thread. + * + * @param scheduler The scheduler instance. + * @param[in, out] the_thread An idle thread. + * @param cpu The cpu for the operation. + */ +void _Scheduler_strong_APA_Start_idle( + const Scheduler_Control *scheduler, + Thread_Control *idle, + struct Per_CPU_Control *cpu +); + /** * @brief Sets the affinity . * diff --git a/cpukit/score/src/schedulerstrongapa.c b/cpukit/score/src/schedulerstrongapa.c index 429bbacece6..7315ac27f34 100644 --- a/cpukit/score/src/schedulerstrongapa.c +++ b/cpukit/score/src/schedulerstrongapa.c @@ -104,6 +104,23 @@ static inline bool return false; } +static inline void _Scheduler_strong_APA_Set_scheduled( + Scheduler_strong_APA_Context *self, + Scheduler_Node *executing, + const Per_CPU_Control *cpu +) +{ + self->CPU[ _Per_CPU_Get_index( cpu ) ].executing = executing; +} + +static inline Scheduler_Node *_Scheduler_strong_APA_Get_scheduled( + const Scheduler_strong_APA_Context *self, + const Per_CPU_Control *cpu +) +{ + return self->CPU[ _Per_CPU_Get_index( cpu ) ].executing; +} + static inline void _Scheduler_strong_APA_Allocate_processor( Scheduler_Context *context, Scheduler_Node *scheduled_base, @@ -118,12 +135,12 @@ static inline void _Scheduler_strong_APA_Allocate_processor( scheduled = _Scheduler_strong_APA_Node_downcast( scheduled_base ); self = _Scheduler_strong_APA_Get_self( context ); - - self->CPU[ _Per_CPU_Get_index( victim_cpu ) ].executing = scheduled_base; + + _Scheduler_strong_APA_Set_scheduled( self, scheduled_base, victim_cpu ); _Scheduler_SMP_Allocate_processor_exact( context, - &(scheduled->Base.Base), + &( scheduled->Base.Base ), NULL, victim_cpu ); @@ -133,7 +150,6 @@ static inline void _Scheduler_strong_APA_Allocate_processor( * Finds and returns the highest ready node present by accessing the * _Strong_APA_Context->CPU with front and rear values. */ - static inline Scheduler_Node * _Scheduler_strong_APA_Find_highest_ready( Scheduler_strong_APA_Context *self, uint32_t front, @@ -145,6 +161,7 @@ static inline Scheduler_Node * _Scheduler_strong_APA_Find_highest_ready( const Chain_Node *tail; Chain_Node *next; uint32_t index_assigned_cpu; + uint32_t index_curr_cpu; Scheduler_strong_APA_Node *node; Priority_Control min_priority_num; Priority_Control curr_priority; @@ -160,7 +177,6 @@ static inline Scheduler_Node * _Scheduler_strong_APA_Find_highest_ready( */ first_task = true; - //Assert rear < sizeof(Context->CPU) _Assert( rear < CONFIGURE_MAXIMUM_PROCESSOR ); while( front <= rear ) { @@ -172,9 +188,10 @@ static inline Scheduler_Node * _Scheduler_strong_APA_Find_highest_ready( while ( next != tail ) { node = (Scheduler_strong_APA_Node*) STRONG_SCHEDULER_NODE_OF_CHAIN( next ); - //Check if the curr_CPU is in the affinity set of the node. + /* Check if the curr_CPU is in the affinity set of the node. */ + index_curr_cpu = _Per_CPU_Get_index( curr_CPU ); if ( - _Processor_mask_Is_set(&node->Affinity, _Per_CPU_Get_index(curr_CPU)) + _Processor_mask_Is_set( &node->Affinity, index_curr_cpu ) ) { curr_state = _Scheduler_SMP_Node_state( &node->Base.Base ); @@ -260,7 +277,7 @@ static inline Scheduler_Node *_Scheduler_strong_APA_Get_highest_ready( uint32_t cpu_index; self = _Scheduler_strong_APA_Get_self( context ); - //Denotes front and rear of the queue. + /* Denotes front and rear of the queue */ front = 0; rear = -1; @@ -291,11 +308,12 @@ static inline Scheduler_Node *_Scheduler_strong_APA_Get_highest_ready( node = _Scheduler_strong_APA_Node_downcast( highest_ready ); /* * Highest ready is not just directly reachable from the victim cpu - * So there is need of task shifting . + * So there is need for task shifting . */ - while( node->cpu_to_preempt != filter_cpu ){ + while ( node->cpu_to_preempt != filter_cpu ) { curr_node = &node->Base.Base; - next_node = CPU[ _Per_CPU_Get_index( node->cpu_to_preempt ) ].executing; + next_node = + _Scheduler_strong_APA_Get_scheduled( self, node->cpu_to_preempt ); _Scheduler_SMP_Preempt( context, @@ -304,8 +322,8 @@ static inline Scheduler_Node *_Scheduler_strong_APA_Get_highest_ready( _Scheduler_strong_APA_Allocate_processor ); - if( curr_node == highest_ready) { - _Scheduler_strong_APA_Move_from_ready_to_scheduled(context, curr_node); + if( curr_node == highest_ready ) { + _Scheduler_strong_APA_Move_from_ready_to_scheduled( context, curr_node ); } node = _Scheduler_strong_APA_Node_downcast( next_node ); @@ -339,21 +357,21 @@ static inline Scheduler_Node *_Scheduler_strong_APA_Get_lowest_scheduled( Scheduler_strong_APA_Context *self; self = _Scheduler_strong_APA_Get_self( context ); - lowest_scheduled = NULL; // To remove compiler warning. - max_priority_num = 0; // Max (Lowest) priority encountered so far. + lowest_scheduled = NULL; /* To remove compiler warning */ + max_priority_num = 0; /* Max (Lowest) priority encountered so far */ filter_strong_node = _Scheduler_strong_APA_Node_downcast( filter_base ); - //lowest_scheduled is NULL if affinty of a node is 0 + /* lowest_scheduled is NULL if affinity of a node is 0 */ _Assert( !_Processor_mask_Zero( &filter_strong_node->Affinity ) ); cpu_max = _SMP_Get_processor_maximum(); for ( cpu_index = 0 ; cpu_index < cpu_max ; ++cpu_index ) { - //Checks if the CPU is in the affinity set of filter_strong_node - if ( _Processor_mask_Is_set( &filter_strong_node->Affinity, cpu_index) ) { + /* Checks if the CPU is in the affinity set of filter_strong_node */ + if ( _Processor_mask_Is_set( &filter_strong_node->Affinity, cpu_index ) ) { Per_CPU_Control *cpu = _Per_CPU_Get_by_index( cpu_index ); if ( _Per_CPU_Is_processor_online( cpu ) ) { - curr_node = self->CPU[ _Per_CPU_Get_index( cpu ) ].executing; + curr_node = _Scheduler_strong_APA_Get_scheduled( self, cpu ); curr_priority = _Scheduler_Node_get_priority( curr_node ); curr_priority = SCHEDULER_PRIORITY_PURIFY( curr_priority ); @@ -381,7 +399,7 @@ static inline void _Scheduler_strong_APA_Extract_from_scheduled( node = _Scheduler_strong_APA_Node_downcast( node_to_extract ); _Scheduler_SMP_Extract_from_scheduled( &self->Base.Base, &node->Base.Base ); - //Not removing it from Ready since the node could go in the READY state. + /* Not removing it from Ready since the node could go in the READY state */ } static inline void _Scheduler_strong_APA_Extract_from_ready( @@ -395,10 +413,10 @@ static inline void _Scheduler_strong_APA_Extract_from_ready( self = _Scheduler_strong_APA_Get_self( context ); node = _Scheduler_strong_APA_Node_downcast( node_to_extract ); - _Assert( !_Chain_Is_empty(self->Ready) ); + _Assert( !_Chain_Is_empty( self->Ready ) ); _Assert( !_Chain_Is_node_off_chain( &node->Ready_node ) ); - _Chain_Extract_unprotected( &node->Ready_node ); //Removed from Ready + _Chain_Extract_unprotected( &node->Ready_node ); _Chain_Set_off_chain( &node->Ready_node ); } @@ -451,10 +469,10 @@ static inline Scheduler_Node* _Scheduler_strong_APA_Get_lowest_reachable( Per_CPU_Control *curr_CPU; Priority_Control curr_priority; Scheduler_Node *curr_node; - Scheduler_strong_APA_Node *curr_strong_node; //Current Strong_APA_Node + Scheduler_strong_APA_Node *curr_strong_node; /* Current Strong_APA_Node */ Scheduler_strong_APA_CPU *CPU; - max_priority_num = 0; //Max (Lowest) priority encountered so far. + max_priority_num = 0; /* Max (Lowest) priority encountered so far */ CPU = self->CPU; cpu_max = _SMP_Get_processor_maximum(); @@ -462,7 +480,7 @@ static inline Scheduler_Node* _Scheduler_strong_APA_Get_lowest_reachable( curr_CPU = CPU[ front ].cpu; front = front + 1; - curr_node = CPU[ _Per_CPU_Get_index( curr_CPU ) ].executing; + curr_node = _Scheduler_strong_APA_Get_scheduled( self, curr_CPU ); curr_thread = curr_node->user; curr_priority = _Scheduler_Node_get_priority( curr_node ); @@ -479,9 +497,12 @@ static inline Scheduler_Node* _Scheduler_strong_APA_Get_lowest_reachable( if ( !curr_thread->is_idle ) { for ( cpu_index = 0 ; cpu_index < cpu_max ; ++cpu_index ) { if ( _Processor_mask_Is_set( &curr_strong_node->Affinity, cpu_index ) ) { - //Checks if the thread_CPU is in the affinity set of the node + /* Checks if the thread_CPU is in the affinity set of the node */ Per_CPU_Control *cpu = _Per_CPU_Get_by_index( cpu_index ); - if ( _Per_CPU_Is_processor_online( cpu ) && CPU[ cpu_index ].visited == false ) { + if ( + _Per_CPU_Is_processor_online( cpu ) && + CPU[ cpu_index ].visited == false ) + { rear = rear + 1; CPU[ rear ].cpu = cpu; CPU[ cpu_index ].visited = true; @@ -508,7 +529,7 @@ static inline bool _Scheduler_strong_APA_Do_enqueue( Priority_Control lowest_priority; Scheduler_strong_APA_CPU *CPU; Scheduler_Node *curr_node; - Scheduler_strong_APA_Node *curr_strong_node; //Current Strong_APA_Node + Scheduler_strong_APA_Node *curr_strong_node; /* Current Strong_APA_Node */ Per_CPU_Control *curr_CPU; Scheduler_strong_APA_Context *self; Scheduler_Node *next_node; @@ -523,15 +544,17 @@ static inline bool _Scheduler_strong_APA_Do_enqueue( lowest_priority = SCHEDULER_PRIORITY_PURIFY( lowest_priority ); if( lowest_priority > node_priority ) { - //Backtrack on the path from - //_Thread_Get_CPU(lowest_reachable->user) to lowest_reachable, shifting - //along every task + /* + * Backtrack on the path from + * _Thread_Get_CPU(lowest_reachable->user) to lowest_reachable, shifting + * along every task + */ - curr_node = CPU[ _Per_CPU_Get_index(cpu_to_preempt) ].preempting_node; + curr_node = CPU[ _Per_CPU_Get_index( cpu_to_preempt ) ].preempting_node; curr_strong_node = _Scheduler_strong_APA_Node_downcast( curr_node ); curr_strong_node->cpu_to_preempt = cpu_to_preempt; - //Save which cpu to preempt in cpu_to_preempt value of the node + /* Save which cpu to preempt in cpu_to_preempt value of the node */ while ( curr_node != node ) { curr_CPU = _Thread_Get_CPU( curr_node->user ); curr_node = CPU[ _Per_CPU_Get_index( curr_CPU ) ].preempting_node; @@ -540,7 +563,7 @@ static inline bool _Scheduler_strong_APA_Do_enqueue( } curr_CPU = curr_strong_node->cpu_to_preempt; - next_node = CPU[ _Per_CPU_Get_index( curr_CPU ) ].executing; + next_node = _Scheduler_strong_APA_Get_scheduled( self, curr_CPU ); node_priority = _Scheduler_Node_get_priority( curr_node ); node_priority = SCHEDULER_PRIORITY_PURIFY( node_priority ); @@ -558,10 +581,10 @@ static inline bool _Scheduler_strong_APA_Do_enqueue( curr_node = next_node; curr_strong_node = _Scheduler_strong_APA_Node_downcast( curr_node ); - while( curr_node != lowest_reachable ) { + while ( curr_node != lowest_reachable ) { curr_CPU = curr_strong_node->cpu_to_preempt; - next_node = CPU[ _Per_CPU_Get_index( curr_CPU ) ].executing; - //curr_node preempts the next_node; + next_node = _Scheduler_strong_APA_Get_scheduled( self, curr_CPU ); + /* curr_node preempts the next_node; */ _Scheduler_SMP_Preempt( context, curr_node, @@ -580,8 +603,8 @@ static inline bool _Scheduler_strong_APA_Do_enqueue( needs_help = true; } - //Add it to Ready chain since it is now either scheduled or just ready. - _Scheduler_strong_APA_Insert_ready(context,node,insert_priority); + /* Add it to Ready chain since it is now either scheduled or just ready. */ + _Scheduler_strong_APA_Insert_ready( context,node, insert_priority ); return needs_help; } @@ -605,7 +628,7 @@ static inline bool _Scheduler_strong_APA_Enqueue( Scheduler_Node *lowest_reachable; Scheduler_strong_APA_Node *strong_node; - //Denotes front and rear of the queue + /* Denotes front and rear of the queue */ uint32_t front; uint32_t rear; @@ -620,8 +643,8 @@ static inline bool _Scheduler_strong_APA_Enqueue( for ( cpu_index = 0 ; cpu_index < cpu_max ; ++cpu_index ) { CPU[ cpu_index ].visited = false; - //Checks if the thread_CPU is in the affinity set of the node - if ( _Processor_mask_Is_set( &strong_node->Affinity, cpu_index) ) { + /* Checks if the thread_CPU is in the affinity set of the node */ + if ( _Processor_mask_Is_set( &strong_node->Affinity, cpu_index ) ) { Per_CPU_Control *cpu = _Per_CPU_Get_by_index( cpu_index ); if ( _Per_CPU_Is_processor_online( cpu ) ) { @@ -633,8 +656,10 @@ static inline bool _Scheduler_strong_APA_Enqueue( } } - //This assert makes sure that there always exist an element in the - // Queue when we start the queue traversal. + /* + * This assert makes sure that there always exist an element in the + * Queue when we start the queue traversal. + */ _Assert( !_Processor_mask_Zero( &strong_node->Affinity ) ); lowest_reachable = _Scheduler_strong_APA_Get_lowest_reachable( @@ -692,21 +717,6 @@ static inline bool _Scheduler_strong_APA_Do_ask_for_help( ); } -static inline void _Scheduler_strong_APA_Register_idle( - Scheduler_Context *context, - Scheduler_Node *idle_base, - Per_CPU_Control *cpu -) -{ - (void) context; - (void) idle_base; - (void) cpu; - /* - * We do not maintain a variable to access the scheduled - * node for a CPU. So this function does nothing. - */ -} - static inline void _Scheduler_strong_APA_Do_set_affinity( Scheduler_Context *context, Scheduler_Node *node_base, @@ -716,7 +726,7 @@ static inline void _Scheduler_strong_APA_Do_set_affinity( Scheduler_strong_APA_Node *node; node = _Scheduler_strong_APA_Node_downcast( node_base ); - node->Affinity = *(( const Processor_mask *) arg); + node->Affinity = *( (const Processor_mask *) arg ); } void _Scheduler_strong_APA_Initialize( const Scheduler_Control *scheduler ) @@ -753,7 +763,7 @@ void _Scheduler_strong_APA_Block( ) { Scheduler_Context *context = _Scheduler_Get_context( scheduler ); - //The extract from ready automatically removes the node from Ready chain. + /* The extract from ready automatically removes the node from Ready chain */ _Scheduler_SMP_Block( context, thread, @@ -855,6 +865,18 @@ void _Scheduler_strong_APA_Withdraw_node( ); } +static inline void _Scheduler_strong_APA_Register_idle( + Scheduler_Context *context, + Scheduler_Node *idle_base, + Per_CPU_Control *cpu +) +{ + Scheduler_strong_APA_Context *self; + self = _Scheduler_strong_APA_Get_self( context ); + + _Scheduler_strong_APA_Set_scheduled( self, idle_base, cpu ); +} + void _Scheduler_strong_APA_Add_processor( const Scheduler_Control *scheduler, Thread_Control *idle @@ -867,7 +889,25 @@ void _Scheduler_strong_APA_Add_processor( idle, _Scheduler_strong_APA_Has_ready, _Scheduler_strong_APA_Enqueue_scheduled, - _Scheduler_SMP_Do_nothing_register_idle + _Scheduler_strong_APA_Register_idle + ); +} + +void _Scheduler_strong_APA_Start_idle( + const Scheduler_Control *scheduler, + Thread_Control *idle, + Per_CPU_Control *cpu +) +{ + Scheduler_Context *context; + + context = _Scheduler_Get_context( scheduler ); + + _Scheduler_SMP_Do_start_idle( + context, + idle, + cpu, + _Scheduler_strong_APA_Register_idle ); } @@ -928,7 +968,7 @@ bool _Scheduler_strong_APA_Set_affinity( node = _Scheduler_strong_APA_Node_downcast( node_base ); if ( _Processor_mask_Is_equal( &node->Affinity, affinity ) ) - return true; //Nothing to do. Return true. + return true; /* Nothing to do. Return true. */ _Processor_mask_Assign( &node->Affinity, &local_affinity );