Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
28 commits
Select commit Hold shift + click to select a range
2129e55
refactor(freertos/smp): Move critical sections inside xTaskPriorityIn…
Dazza0 Jun 15, 2024
0926574
feat(freertos/smp): Allow vTaskPreemptionEnable() to be nested
Dazza0 Jun 15, 2024
2c86281
feat(freertos/smp): Add granular locking port macros checks
Dazza0 Jun 15, 2024
6850d88
feat(granular_locks): Add granular locking functions
Dazza0 Jun 15, 2024
c08a323
change(freertos/smp): Update tasks.c locking
Dazza0 Jun 15, 2024
9212425
change(freertos/smp): Update queue.c locking
Dazza0 Jun 15, 2024
1db6c7c
change(freertos/smp): Update event_groups.c locking
Dazza0 Jun 15, 2024
3e23312
change(freertos/smp): Update stream_buffer.c locking
Dazza0 Jun 15, 2024
f2c560d
change(freertos/smp): Update timers.c locking
Dazza0 Jun 15, 2024
97ffa4c
feat(freertos/smp): Add Granular Locking V4 proposal documents
Dazza0 Jun 17, 2024
274fbb5
feat(freertos-smp): Light Weight Preemption Disable Locks
sudeep-mohanty Aug 2, 2025
4330d90
fix(freertos-smp): Fixed Lightweight Critical Sections for deferred s…
sudeep-mohanty Aug 5, 2025
60bca5e
fix(freertos-smp): Stop unconditional yielding in vTaskPreemptionEnable
sudeep-mohanty Aug 2, 2025
e3d92dd
fix(freertos-smp): Fix yielding decisions based on preemption state o…
sudeep-mohanty Aug 5, 2025
3502585
fix(freertos-smp): Miscellaneous fixes for granular locks
sudeep-mohanty Aug 15, 2025
1b14543
feat(freertos-smp): Create private function for task preemption enable
sudeep-mohanty Aug 19, 2025
c5667e3
feat(freertos-smp): Update event groups unlock to use taskDATA_GROUP_…
sudeep-mohanty Aug 19, 2025
829d8ba
fix(freertos-smp): Remove scheduler suspension from event_groups.c
sudeep-mohanty Aug 20, 2025
9000208
feat(freertos-smp): Remove xTaskUnlockCanYield() and make it inline
sudeep-mohanty Aug 21, 2025
592177f
remove(freertos-smp): Remove support for light-weight critical sections
sudeep-mohanty Aug 21, 2025
fbd37a2
feat(freertos-smp): Added xTaskRemoveFromEventListFromISR()
sudeep-mohanty Aug 21, 2025
f46aaa7
feat(freertos-smp): Reintroduce Light Weight Critical Sections
sudeep-mohanty Aug 22, 2025
49b89b2
feat(freertos-smp): Use light weight locks for preemption disable/enable
sudeep-mohanty Aug 22, 2025
1585b15
feat(freertos-smp): Update queueUNLOCK() to receive yield status
sudeep-mohanty Aug 22, 2025
5098b1a
feat(freertos-smp): Optimize prvLockQueue() and prvUnlockQueue()
sudeep-mohanty Aug 22, 2025
1b91c54
fix(freertos-smp): Always take ISR locks in vTaskEnter/ExitCriticalFr…
sudeep-mohanty Aug 25, 2025
a1cc3bd
fix(freertos-smp): Update critical nesting count in prvLock/UnlockQueue
sudeep-mohanty Oct 13, 2025
4ee7179
fix(freertos-smp): Stream Buffer task lists must be manipulated in cr…
sudeep-mohanty Oct 13, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
87 changes: 70 additions & 17 deletions event_groups.c
Original file line number Diff line number Diff line change
Expand Up @@ -63,10 +63,30 @@
#if ( ( configSUPPORT_STATIC_ALLOCATION == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) )
uint8_t ucStaticallyAllocated; /**< Set to pdTRUE if the event group is statically allocated to ensure no attempt is made to free the memory. */
#endif

#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
portSPINLOCK_TYPE xTaskSpinlock;
portSPINLOCK_TYPE xISRSpinlock;
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */
} EventGroup_t;

/*-----------------------------------------------------------*/

/*
* Macros to mark the start and end of a critical code region.
*/
#if ( portUSING_GRANULAR_LOCKS == 1 )
#define event_groupsENTER_CRITICAL( pxEventBits ) taskDATA_GROUP_ENTER_CRITICAL( &pxEventBits->xTaskSpinlock, &pxEventBits->xISRSpinlock )
#define event_groupsENTER_CRITICAL_FROM_ISR( pxEventBits, puxSavedInterruptStatus ) taskDATA_GROUP_ENTER_CRITICAL_FROM_ISR( &pxEventBits->xISRSpinlock, puxSavedInterruptStatus )
#define event_groupsEXIT_CRITICAL( pxEventBits ) taskDATA_GROUP_EXIT_CRITICAL( &pxEventBits->xTaskSpinlock, &pxEventBits->xISRSpinlock )
#define event_groupsEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus, pxEventBits ) taskDATA_GROUP_EXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus, &pxEventBits->xISRSpinlock )
#else /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */
#define event_groupsENTER_CRITICAL( pxEventBits ) taskENTER_CRITICAL();
#define event_groupsENTER_CRITICAL_FROM_ISR( pxEventBits, puxSavedInterruptStatus ) do { *( puxSavedInterruptStatus ) = taskENTER_CRITICAL_FROM_ISR(); } while( 0 )
#define event_groupsEXIT_CRITICAL( pxEventBits ) taskEXIT_CRITICAL();
#define event_groupsEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus, pxEventBits ) taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus );
#endif /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */

/*
* Test the bits set in uxCurrentEventBits to see if the wait condition is met.
* The wait condition is defined by xWaitForAllBits. If xWaitForAllBits is
Expand All @@ -79,6 +99,25 @@
const EventBits_t uxBitsToWaitFor,
const BaseType_t xWaitForAllBits ) PRIVILEGED_FUNCTION;

/*-----------------------------------------------------------*/

/*
* Macros used to lock and unlock an event group. When a task locks an,
* event group, the task will have thread safe non-deterministic access to
* the event group.
* - Concurrent access from other tasks will be blocked by the xTaskSpinlock
* - Concurrent access from ISRs will be pended
*
* When the task unlocks the event group, all pended access attempts are handled.
*/
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
#define event_groupsLOCK( pxEventBits ) taskDATA_GROUP_LOCK( &( ( pxEventBits )->xTaskSpinlock ) )
#define event_groupsUNLOCK( pxEventBits ) taskDATA_GROUP_UNLOCK( &( ( pxEventBits )->xTaskSpinlock ) )
#else /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */
#define event_groupsLOCK( pxEventBits ) vTaskSuspendAll()
#define event_groupsUNLOCK( pxEventBits ) xTaskResumeAll()
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */

/*-----------------------------------------------------------*/

#if ( configSUPPORT_STATIC_ALLOCATION == 1 )
Expand Down Expand Up @@ -122,6 +161,13 @@
}
#endif /* configSUPPORT_DYNAMIC_ALLOCATION */

#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
{
portINIT_SPINLOCK( &( pxEventBits->xTaskSpinlock ) );
portINIT_SPINLOCK( &( pxEventBits->xISRSpinlock ) );
}
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */

traceEVENT_GROUP_CREATE( pxEventBits );
}
else
Expand Down Expand Up @@ -167,6 +213,13 @@
}
#endif /* configSUPPORT_STATIC_ALLOCATION */

#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
{
portINIT_SPINLOCK( &( pxEventBits->xTaskSpinlock ) );
portINIT_SPINLOCK( &( pxEventBits->xISRSpinlock ) );
}
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */

traceEVENT_GROUP_CREATE( pxEventBits );
}
else
Expand Down Expand Up @@ -202,7 +255,7 @@
}
#endif

vTaskSuspendAll();
event_groupsLOCK( pxEventBits );
{
uxOriginalBitValue = pxEventBits->uxEventBits;

Expand Down Expand Up @@ -245,7 +298,7 @@
}
}
}
xAlreadyYielded = xTaskResumeAll();
xAlreadyYielded = event_groupsUNLOCK( pxEventBits );

if( xTicksToWait != ( TickType_t ) 0 )
{
Expand All @@ -267,7 +320,7 @@
if( ( uxReturn & eventUNBLOCKED_DUE_TO_BIT_SET ) == ( EventBits_t ) 0 )
{
/* The task timed out, just return the current event bit value. */
taskENTER_CRITICAL();
event_groupsENTER_CRITICAL( pxEventBits );
{
uxReturn = pxEventBits->uxEventBits;

Expand All @@ -284,7 +337,7 @@
mtCOVERAGE_TEST_MARKER();
}
}
taskEXIT_CRITICAL();
event_groupsEXIT_CRITICAL( pxEventBits );

xTimeoutOccurred = pdTRUE;
}
Expand Down Expand Up @@ -333,7 +386,7 @@
}
#endif

vTaskSuspendAll();
event_groupsLOCK( pxEventBits );
{
const EventBits_t uxCurrentEventBits = pxEventBits->uxEventBits;

Expand Down Expand Up @@ -401,7 +454,7 @@
traceEVENT_GROUP_WAIT_BITS_BLOCK( xEventGroup, uxBitsToWaitFor );
}
}
xAlreadyYielded = xTaskResumeAll();
xAlreadyYielded = event_groupsUNLOCK( pxEventBits );

if( xTicksToWait != ( TickType_t ) 0 )
{
Expand All @@ -422,7 +475,7 @@

if( ( uxReturn & eventUNBLOCKED_DUE_TO_BIT_SET ) == ( EventBits_t ) 0 )
{
taskENTER_CRITICAL();
event_groupsENTER_CRITICAL( pxEventBits );
{
/* The task timed out, just return the current event bit value. */
uxReturn = pxEventBits->uxEventBits;
Expand All @@ -447,7 +500,7 @@

xTimeoutOccurred = pdTRUE;
}
taskEXIT_CRITICAL();
event_groupsEXIT_CRITICAL( pxEventBits );
}
else
{
Expand Down Expand Up @@ -482,7 +535,7 @@
configASSERT( xEventGroup );
configASSERT( ( uxBitsToClear & eventEVENT_BITS_CONTROL_BYTES ) == 0 );

taskENTER_CRITICAL();
event_groupsENTER_CRITICAL( pxEventBits );
{
traceEVENT_GROUP_CLEAR_BITS( xEventGroup, uxBitsToClear );

Expand All @@ -493,7 +546,7 @@
/* Clear the bits. */
pxEventBits->uxEventBits &= ~uxBitsToClear;
}
taskEXIT_CRITICAL();
event_groupsEXIT_CRITICAL( pxEventBits );

traceRETURN_xEventGroupClearBits( uxReturn );

Expand Down Expand Up @@ -524,19 +577,19 @@
EventBits_t xEventGroupGetBitsFromISR( EventGroupHandle_t xEventGroup )
{
UBaseType_t uxSavedInterruptStatus;
EventGroup_t const * const pxEventBits = xEventGroup;
EventGroup_t * const pxEventBits = xEventGroup;
EventBits_t uxReturn;

traceENTER_xEventGroupGetBitsFromISR( xEventGroup );

/* MISRA Ref 4.7.1 [Return value shall be checked] */
/* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#dir-47 */
/* coverity[misra_c_2012_directive_4_7_violation] */
uxSavedInterruptStatus = taskENTER_CRITICAL_FROM_ISR();
event_groupsENTER_CRITICAL_FROM_ISR( pxEventBits, &uxSavedInterruptStatus );
{
uxReturn = pxEventBits->uxEventBits;
}
taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus );
event_groupsEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus, pxEventBits );

traceRETURN_xEventGroupGetBitsFromISR( uxReturn );

Expand Down Expand Up @@ -564,7 +617,7 @@

pxList = &( pxEventBits->xTasksWaitingForBits );
pxListEnd = listGET_END_MARKER( pxList );
vTaskSuspendAll();
event_groupsLOCK( pxEventBits );
{
traceEVENT_GROUP_SET_BITS( xEventGroup, uxBitsToSet );

Expand Down Expand Up @@ -639,7 +692,7 @@
/* Snapshot resulting bits. */
uxReturnBits = pxEventBits->uxEventBits;
}
( void ) xTaskResumeAll();
( void ) event_groupsUNLOCK( pxEventBits );

traceRETURN_xEventGroupSetBits( uxReturnBits );

Expand All @@ -658,7 +711,7 @@

pxTasksWaitingForBits = &( pxEventBits->xTasksWaitingForBits );

vTaskSuspendAll();
event_groupsLOCK( pxEventBits );
{
traceEVENT_GROUP_DELETE( xEventGroup );

Expand All @@ -670,7 +723,7 @@
vTaskRemoveFromUnorderedEventList( pxTasksWaitingForBits->xListEnd.pxNext, eventUNBLOCKED_DUE_TO_BIT_SET );
}
}
( void ) xTaskResumeAll();
( void ) event_groupsUNLOCK( pxEventBits );

#if ( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 0 ) )
{
Expand Down
Loading