SuperTinyKernel™ RTOS 1.05.3
Lightweight, high-performance, deterministic, bare-metal C++ RTOS for resource-constrained embedded systems. MIT Open Source License.
Loading...
Searching...
No Matches
stk.h
Go to the documentation of this file.
1/*
2 * SuperTinyKernel(TM) RTOS: Lightweight High-Performance Deterministic C++ RTOS for Embedded Systems.
3 *
4 * Source: https://github.com/SuperTinyKernel-RTOS
5 *
6 * Copyright (c) 2022-2026 Neutron Code Limited <stk@neutroncode.com>. All Rights Reserved.
7 * License: MIT License, see LICENSE for a full text.
8 */
9
10#ifndef STK_H_
11#define STK_H_
12
13#include "stk_helper.h"
19
34
35namespace stk {
36
81template <uint8_t TMode, uint32_t TSize, class TStrategy, class TPlatform>
83{
84protected:
90
96
101 enum ERequest : uint8_t
102 {
105 };
106
118 class KernelTask : public IKernelTask
119 {
120 friend class Kernel;
121
127 {
131 };
132
133 public:
142 {
144 };
145
150 explicit KernelTask() : m_user(nullptr), m_stack(), m_state(STATE_NONE), m_time_sleep(0),
151 m_srt(), m_hrt(), m_rt_weight()
152 {
153 // bind to wait object
154 if (IsSyncMode())
155 m_wait_obj->m_task = this;
156 }
157
161 ITask *GetUserTask() { return m_user; }
162
166 Stack *GetUserStack() { return &m_stack; }
167
171 bool IsBusy() const { return (m_user != nullptr); }
172
176 bool IsSleeping() const { return (m_time_sleep < 0); }
177
181 TId GetTid() const { return hw::PtrToWord(m_user); }
182
187 void Wake()
188 {
190
191 // wakeup on a next cycle
192 m_time_sleep = -1;
193 }
194
198 void SetCurrentWeight(int32_t weight)
199 {
200 if (TStrategy::WEIGHT_API)
201 m_rt_weight[0] = weight;
202 }
203
207 int32_t GetWeight() const { return (TStrategy::WEIGHT_API ? m_user->GetWeight() : 1); }
208
214 int32_t GetCurrentWeight() const { return (TStrategy::WEIGHT_API ? m_rt_weight[0] : 1); }
215
221 {
223
224 return (IsHrtMode() ? m_hrt[0].periodicity : 0);
225 }
226
233 {
235
236 return (IsHrtMode() ? m_hrt[0].deadline : 0);
237 }
238
245 {
248
249 return (IsHrtMode() ? (m_hrt[0].deadline - m_hrt[0].duration) : 0);
250 }
251
252 protected:
257 {}
258
264 struct SrtInfo
265 {
267 {}
268
271 void Clear()
272 {
273 add_task_req = nullptr;
274 }
275
282 };
283
288 struct HrtInfo
289 {
290 HrtInfo() : periodicity(0), deadline(0), duration(0), done(false)
291 {}
292
295 void Clear()
296 {
297 periodicity = 0;
298 deadline = 0;
299 duration = 0;
300 done = false;
301 }
302
306 volatile bool done;
307 };
308
315 struct WaitObject : public IWaitObject
316 {
317 explicit WaitObject() : m_task(nullptr), m_sync_obj(nullptr), m_timeout(false), m_time_wait(0)
318 {}
319
324 {}
325
332 {
334 };
335
339 TId GetTid() const { return m_task->GetTid(); }
340
344 bool IsTimeout() const { return m_timeout; }
345
349 bool IsWaiting() const { return (m_sync_obj != nullptr); }
350
356 void Wake(bool timeout)
357 {
359
360 m_timeout = timeout;
361 m_time_wait = 0;
362
363 m_sync_obj->RemoveWaitObject(this);
364 m_sync_obj = nullptr;
365
366 return m_task->Wake();
367 }
368
375 bool Tick(Timeout elapsed_ticks)
376 {
378 {
379 m_time_wait -= elapsed_ticks;
380
381 if (m_time_wait <= 0)
382 m_timeout = true;
383 }
384
385 return !m_timeout;
386 }
387
395 void SetupWait(ISyncObject *sync_obj, Timeout timeout)
396 {
398
399 m_sync_obj = sync_obj;
400 m_time_wait = timeout;
401 m_timeout = false;
402
403 sync_obj->AddWaitObject(this);
404 }
405
408 volatile bool m_timeout;
410 };
411
416 void Bind(TPlatform *platform, ITask *user_task)
417 {
418 // set access mode for this stack
419 m_stack.mode = user_task->GetAccessMode();
420
421 // set task id for tracking purpose
422 #if STK_NEED_TASK_ID
423 m_stack.tid = user_task->GetId();
424 #endif
425
426 // init stack of the user task
427 if (!platform->InitStack(STACK_USER_TASK, &m_stack, user_task, user_task))
428 {
429 STK_ASSERT(false);
430 }
431
432 // bind user task
433 m_user = user_task;
434 }
435
439 void Unbind()
440 {
441 if (IsSyncMode())
442 {
443 // should be freed from waiting on task exit
444 STK_ASSERT(!m_wait_obj->IsWaiting());
445 }
446
447 m_user = nullptr;
448 m_stack = {};
450 m_time_sleep = 0;
451
452 if (IsHrtMode())
453 m_hrt[0].Clear();
454 else
455 m_srt->Clear();
456 }
457
461 {
462 // make this task sleeping to switch it out from scheduling process
464
465 // mark it as done HRT task
466 if (IsHrtMode())
468
469 // mark it as pending for removal
471 }
472
475 bool IsPendingRemoval() const { return ((m_state & STATE_REMOVE_PENDING) != 0U); }
476
480 bool IsMemoryOfSP(Word SP) const
481 {
482 Word *start = m_user->GetStack();
483 Word *end = start + m_user->GetStackSize();
484
485 return (SP >= hw::PtrToWord(start)) && (SP <= hw::PtrToWord(end));
486 }
487
494 void HrtInit(Timeout periodicity_tc, Timeout deadline_tc, Timeout start_delay_tc)
495 {
496 STK_ASSERT(periodicity_tc > 0);
497 STK_ASSERT(deadline_tc > 0);
498 STK_ASSERT(start_delay_tc >= 0);
499 STK_ASSERT(periodicity_tc < INT32_MAX);
500 STK_ASSERT(deadline_tc < INT32_MAX);
501
502 m_hrt[0].periodicity = periodicity_tc;
503 m_hrt[0].deadline = deadline_tc;
504
505 if (start_delay_tc > 0)
506 ScheduleSleep(start_delay_tc);
507 }
508
514
520 void HrtOnSwitchedOut(IPlatform */*platform*/)
521 {
522 const Timeout duration = m_hrt[0].duration;
523
524 STK_ASSERT(duration >= 0);
525
526 Timeout sleep = m_hrt[0].periodicity - duration;
527 if (sleep > 0)
528 ScheduleSleep(sleep);
529
530 m_hrt[0].duration = 0;
531 m_hrt[0].done = false;
532 }
533
539 {
540 const Timeout duration = m_hrt[0].duration;
541
542 STK_ASSERT(duration >= 0);
544
545 m_user->OnDeadlineMissed(duration);
546 platform->ProcessHardFault();
547 }
548
553 {
554 m_hrt[0].done = true;
555 __stk_full_memfence();
556 }
557
561 bool HrtIsDeadlineMissed(Timeout duration) const { return (duration > m_hrt[0].deadline); }
562
573 {
574 STK_ASSERT(ticks > 0);
575
576 // set state first as kernel checks it when task IsSleeping
577 if (TStrategy::SLEEP_EVENT_API)
578 {
579 if (m_time_sleep >= 0)
581 }
582
583 m_time_sleep = -ticks;
584 __stk_full_memfence();
585 }
586
589 volatile uint32_t m_state;
593 int32_t m_rt_weight[STK_ALLOCATE_COUNT(TStrategy::WEIGHT_API, 1, 1, 0)];
595 };
596
605 {
606 friend class Kernel;
607
608 public:
613 TId GetTid() const
614 {
616
617 return m_platform->GetTid();
618 }
619
625
630 int32_t GetTickResolution() const { return m_platform->GetTickResolution(); }
631
638 {
640
641 Ticks now = GetTicks();
642 const Ticks deadline = now + ticks;
643 STK_ASSERT(deadline >= now);
644
645 for (; now < deadline; now = GetTicks())
646 {
648 }
649 }
650
657 {
659
660 if (!IsHrtMode())
661 {
662 m_platform->Sleep(ticks);
663 }
664 else
665 {
666 // sleeping is not supported in HRT mode, task will sleep according to its periodicity and workload
667 STK_ASSERT(false);
668 }
669 }
670
677 {
679
680 if (!IsHrtMode())
681 {
682 m_platform->SleepUntil(timestamp);
683 }
684 else
685 {
686 // sleeping is not supported in HRT mode, task will sleep according to its periodicity and workload
687 STK_ASSERT(false);
688 }
689 }
690
697 {
699
700 m_platform->SwitchToNext();
701 }
702
711 {
712 if (IsSyncMode())
713 {
714 return m_platform->Wait(sobj, mutex, ticks);
715 }
716 else
717 {
718 STK_ASSERT(false);
719 return nullptr;
720 }
721 }
722
723 private:
727 explicit KernelService() : m_platform(nullptr), m_ticks(0)
728 {}
729
735
741 void Initialize(IPlatform *platform)
742 {
743 m_platform = static_cast<TPlatform *>(platform);
744 }
745
749 void IncrementTicks(Ticks advance)
750 {
751 // using WriteVolatile64() to guarantee correct lockless reading order by ReadVolatile64
753 }
754
755 TPlatform *m_platform;
756 volatile Ticks m_ticks;
757 };
758
759public:
763 {
764 TASKS_MAX = TSize
765 };
766
776 {
777 #ifdef _DEBUG
778 // TPlatform must inherit IPlatform
779 IPlatform *platform = &m_platform;
780 (void)platform;
781
782 // TStrategy must inherit ITaskSwitchStrategy
783 ITaskSwitchStrategy *strategy = &m_strategy;
784 (void)strategy;
785 #endif
786
787 #if !STK_TICKLESS_IDLE
788 STK_STATIC_ASSERT_DESC(((TMode & KERNEL_TICKLESS) == 0U),
789 "STK_TICKLESS_IDLE must be defined to 1 for KERNEL_TICKLESS");
790 #endif
791 }
792
797 {}
798
806 {
807 STK_ASSERT(resolution_us != 0);
808 STK_ASSERT(resolution_us <= PERIODICITY_MAX);
810
811 // reinitialize key state variables
812 m_task_now = nullptr;
815
816 m_service.Initialize(&m_platform);
817
818 m_platform.Initialize(this, &m_service, resolution_us, (IsDynamicMode() ? &m_exit_trap[0].stack : nullptr));
819
820 // now ready to Start()
822 }
823
833 {
834 if (!IsHrtMode())
835 {
836 STK_ASSERT(user_task != nullptr);
838
839 // when started the operation must be serialized by switching out from processing until
840 // kernel processes this request
841 if (IsStarted())
842 {
843 if (IsDynamicMode())
844 {
845 RequestAddTask(user_task);
846 }
847 else
848 {
849 STK_ASSERT(false);
850 }
851 }
852 else
853 {
854 AllocateAndAddNewTask(user_task);
855 }
856 }
857 else
858 {
859 STK_ASSERT(false);
860 }
861 }
862
871 __stk_attr_noinline void AddTask(ITask *user_task, Timeout periodicity_tc, Timeout deadline_tc, Timeout start_delay_tc)
872 {
873 if (IsHrtMode())
874 {
875 STK_ASSERT(user_task != nullptr);
878
879 HrtAllocateAndAddNewTask(user_task, periodicity_tc, deadline_tc, start_delay_tc);
880 }
881 else
882 {
883 STK_ASSERT(false);
884 }
885 }
886
896 {
897 if (IsDynamicMode())
898 {
899 STK_ASSERT(user_task != nullptr);
901
902 KernelTask *task = FindTaskByUserTask(user_task);
903 if (task != nullptr)
904 RemoveTask(task);
905 }
906 else
907 {
908 // kernel operating mode must be KERNEL_DYNAMIC for tasks to be able to be removed
909 STK_ASSERT(false);
910 }
911 }
912
922 {
924
925 // stacks of the traps must be re-initilized on every subsequent Start
926 InitTraps();
927
928 // start tracing
929 #if STK_SEGGER_SYSVIEW
930 SEGGER_SYSVIEW_Start();
931 for (int32_t i = 0; i < TASKS_MAX; ++i)
932 {
933 KernelTask *task = &m_task_storage[i];
934 if (task->IsBusy())
935 SendTaskTraceInfo(task);
936 }
937 #endif
938
939 m_platform.Start();
940 }
941
946 bool IsStarted() const
947 {
948 return (m_task_now != nullptr);
949 }
950
955
960
963 EState GetState() const { return m_state; }
964
965protected:
979
992
996 {
997 return (state > FSM_STATE_NONE) &&
998 (state < FSM_STATE_MAX);
999 }
1000
1004 {
1005 // init stack for a Sleep trap
1006 {
1007 SleepTrapStack &sleep = m_sleep_trap[0];
1008
1009 SleepTrapStackMemory wrapper(&sleep.memory);
1011 #if STK_NEED_TASK_ID
1012 sleep.stack.tid = SYS_TASK_ID_SLEEP;
1013 #endif
1014
1015 m_platform.InitStack(STACK_SLEEP_TRAP, &sleep.stack, &wrapper, nullptr);
1016 }
1017
1018 // init stack for an Exit trap
1019 if (IsDynamicMode())
1020 {
1021 ExitTrapStack &exit = m_exit_trap[0];
1022
1023 ExitTrapStackMemory wrapper(&exit.memory);
1025 #if STK_NEED_TASK_ID
1026 exit.stack.tid = SYS_TASK_ID_EXIT;
1027 #endif
1028
1029 m_platform.InitStack(STACK_EXIT_TRAP, &exit.stack, &wrapper, nullptr);
1030 }
1031 }
1032
1038 {
1039 // look for a free kernel task
1040 KernelTask *new_task = nullptr;
1041 for (uint32_t i = 0; i < TASKS_MAX; ++i)
1042 {
1043 KernelTask *task = &m_task_storage[i];
1044 if (task->IsBusy())
1045 {
1046 // avoid task collision
1047 STK_ASSERT(task->m_user != user_task);
1048
1049 // avoid stack collision
1050 STK_ASSERT(task->m_user->GetStack() != user_task->GetStack());
1051 }
1052 else
1053 if (new_task == nullptr)
1054 {
1055 new_task = task;
1056 #if defined(NDEBUG) && !defined(_STK_ASSERT_REDIRECT)
1057 break; // break if assertions are inactive and do not try to validate collision with existing tasks
1058 #endif
1059 }
1060 }
1061
1062 // if nullptr - exceeded max supported kernel task count, application design failure
1063 STK_ASSERT(new_task != nullptr);
1064
1065 new_task->Bind(&m_platform, user_task);
1066
1067 return new_task;
1068 }
1069
1074 {
1075 #if STK_SEGGER_SYSVIEW
1076 // start tracing new task
1077 SEGGER_SYSVIEW_OnTaskCreate(task->GetUserStack()->tid);
1078 if (IsStarted())
1079 SendTaskTraceInfo(task);
1080 #endif
1081
1082 m_strategy.AddTask(task);
1083 }
1084
1090 {
1091 KernelTask *task = AllocateNewTask(user_task);
1092 STK_ASSERT(task != nullptr);
1093
1094 AddKernelTask(task);
1095 }
1096
1105 void HrtAllocateAndAddNewTask(ITask *user_task, Timeout periodicity_tc, Timeout deadline_tc, Timeout start_delay_tc)
1106 {
1107 KernelTask *task = AllocateNewTask(user_task);
1108 STK_ASSERT(task != nullptr);
1109
1110 task->HrtInit(periodicity_tc, deadline_tc, start_delay_tc);
1111
1112 AddKernelTask(task);
1113 }
1114
1120 {
1122
1123 KernelTask *caller = FindTaskBySP(m_platform.GetCallerSP());
1124 STK_ASSERT(caller != nullptr);
1125
1126 typename KernelTask::AddTaskRequest req = { .user_task = user_task };
1127 caller->m_srt[0].add_task_req = &req;
1128
1129 // notify kernel
1131
1132 // switch out and wait for completion (due to context switch request could be processed here)
1133 if (caller->m_srt[0].add_task_req != nullptr)
1134 m_service.SwitchToNext();
1135
1136 STK_ASSERT(caller->m_srt[0].add_task_req == nullptr);
1137 }
1138
1144 {
1145 for (uint32_t i = 0; i < TASKS_MAX; ++i)
1146 {
1147 KernelTask *task = &m_task_storage[i];
1148 if (task->GetUserTask() == user_task)
1149 return task;
1150 }
1151
1152 return nullptr;
1153 }
1154
1160 {
1161 for (uint32_t i = 0; i < TASKS_MAX; ++i)
1162 {
1163 KernelTask *task = &m_task_storage[i];
1164 if (task->GetUserStack() == stack)
1165 return task;
1166 }
1167
1168 return nullptr;
1169 }
1170
1176 {
1177 STK_ASSERT(m_task_now != nullptr);
1178
1179 if (m_task_now->IsMemoryOfSP(SP))
1180 return m_task_now;
1181
1182 for (uint32_t i = 0; i < TASKS_MAX; ++i)
1183 {
1184 KernelTask *task = &m_task_storage[i];
1185
1186 // skip finished tasks (applicable only for KERNEL_DYNAMIC mode)
1187 if (IsDynamicMode() && !task->IsBusy())
1188 continue;
1189
1190 if (task->IsMemoryOfSP(SP))
1191 return task;
1192 }
1193
1194 return nullptr;
1195 }
1196
1202 {
1203 STK_ASSERT(task != nullptr);
1204
1205 #if STK_SEGGER_SYSVIEW
1206 SEGGER_SYSVIEW_OnTaskTerminate(task->GetUserStack()->tid);
1207 #endif
1208
1209 m_strategy.RemoveTask(task);
1210 task->Unbind();
1211 }
1212
1224 {
1225 STK_ASSERT(m_strategy.GetSize() != 0);
1226
1227 // iterate tasks and generate OnTaskSleep for a strategy for all initially sleeping tasks
1228 if (TStrategy::SLEEP_EVENT_API)
1229 {
1230 for (uint32_t i = 0; i < TASKS_MAX; ++i)
1231 {
1232 KernelTask *task = &m_task_storage[i];
1233
1234 if (task->IsSleeping())
1235 {
1236 if ((task->m_state & KernelTask::STATE_SLEEP_PENDING) != 0U)
1237 {
1238 task->m_state &= ~KernelTask::STATE_SLEEP_PENDING;
1239
1240 // notify strategy that task is sleeping
1241 m_strategy.OnTaskSleep(task);
1242 }
1243 }
1244 }
1245 }
1246
1247 // get initial state and first task
1248 {
1250
1251 KernelTask *next = nullptr;
1253
1254 // expecting only SLEEPING or SWITCHING states
1256
1258 {
1259 m_task_now = next;
1260
1261 active = next->GetUserStack();
1262
1263 if (IsHrtMode())
1264 next->HrtOnSwitchedIn();
1265 }
1266 else
1268 {
1269 // MISRA 5-2-3 deviation: GetNext/GetFirst returns IKernelTask*, all objects in
1270 // the strategy pool are KernelTask instances - downcast is guaranteed safe.
1271 m_task_now = static_cast<KernelTask *>(m_strategy.GetFirst());
1272
1273 active = &m_sleep_trap[0].stack;
1274 }
1275 }
1276
1277 // is in running state
1279
1280 #if STK_SEGGER_SYSVIEW
1281 SEGGER_SYSVIEW_OnTaskStartExec(m_task_now->tid);
1282 #endif
1283 }
1284
1291 {
1292 if (IsDynamicMode())
1293 {
1295
1296 // is in stopped state, i.e. is ready to Start() again
1298 }
1299 }
1300
1317 bool OnTick(Stack *&idle, Stack *&active
1319 , Timeout &ticks
1320 #endif
1321 )
1322 {
1323 #if !STK_TICKLESS_IDLE
1324 // in non-tickless mode kernel is advancing strictly by 1 tick on every OnTick call
1325 enum { ticks = 1 };
1326 #endif
1327
1328 // advance internal timestamp
1329 m_service.IncrementTicks(ticks);
1330
1331 // consume elapsed and update to ticks to sleep
1332 #if STK_TICKLESS_IDLE
1333 ticks = (
1334 #else
1335 // notify compiler that we ignore a return value of UpdateTasks
1336 static_cast<void>(
1337 #endif
1338 UpdateTasks(ticks));
1339
1340 // decide on a context switch
1341 return UpdateFsmState(idle, active);
1342 }
1343
1344 void OnTaskSwitch(Word caller_SP)
1345 {
1346 // yield with 2 ticks: 1 will be incremented on the next OnTick call by UpdateTasks
1347 // and remaining 1 will cause a context switch by UpdateFsmState when strategy detects
1348 // it as a sleeping test
1349 OnTaskSleep(caller_SP, 2);
1350 }
1351
1352 void OnTaskSleep(Word caller_SP, Timeout ticks)
1353 {
1354 KernelTask *task = FindTaskBySP(caller_SP);
1355 STK_ASSERT(task != nullptr);
1356
1357 // make change to HRT state and sleep time atomic
1358 {
1360
1361 if (IsHrtMode())
1362 task->HrtOnWorkCompleted();
1363
1364 task->ScheduleSleep(ticks);
1365 }
1366
1367 // note: we do not spin long here, kernel will switch this task out from scheduling on the next tick
1368 while (task->IsSleeping())
1369 {
1371 }
1372 }
1373
1374 void OnTaskSleepUntil(Word caller_SP, Ticks timestamp)
1375 {
1377
1378 KernelTask *task = FindTaskBySP(caller_SP);
1379 STK_ASSERT(task != nullptr);
1380
1381 // make change to HRT state and sleep time atomic
1382 {
1384
1385 Ticks ticks = timestamp - m_service.m_ticks;
1386
1387 // if provided timestamp expired, just ignore any sleep for this task
1388 if (ticks <= 0)
1389 return;
1390
1391 task->ScheduleSleep(ticks);
1392 }
1393
1394 // note: we do not spin long here, kernel will switch this task out from scheduling on the next tick
1395 while (task->IsSleeping())
1396 {
1398 }
1399 }
1400
1401 void OnTaskExit(Stack *stack)
1402 {
1403 if (IsDynamicMode())
1404 {
1405 KernelTask *task = FindTaskByStack(stack);
1406 STK_ASSERT(task != nullptr);
1407
1408 task->ScheduleRemoval();
1409 }
1410 else
1411 {
1412 // kernel operating mode must be KERNEL_DYNAMIC for tasks to be able to exit
1414 }
1415 }
1416
1417 IWaitObject *OnTaskWait(Word caller_SP, ISyncObject *sync_obj, IMutex *mutex, Timeout timeout)
1418 {
1419 if (IsSyncMode())
1420 {
1421 STK_ASSERT(timeout != 0); // API contract: caller must not be in ISR
1422 STK_ASSERT(sync_obj != nullptr); // API contract: ISyncObject instance must be provided
1423 STK_ASSERT(mutex != nullptr); // API contract: IMutex instance must be provided
1424 STK_ASSERT((sync_obj->GetHead() == nullptr) || (sync_obj->GetHead() == &m_sync_list[0]));
1425
1426 KernelTask *task = FindTaskBySP(caller_SP);
1427 STK_ASSERT(task != nullptr);
1428
1429 // configure waiting
1430 task->m_wait_obj->SetupWait(sync_obj, timeout);
1431
1432 // register ISyncObject if not yet
1433 if (sync_obj->GetHead() == nullptr)
1434 m_sync_list->LinkBack(sync_obj);
1435
1436 // start sleeping infinitely, we rely on a Wake call via WaitObject
1438
1439 // unlock mutex locked externally, so that we could wait in a busy-waiting loop
1440 mutex->Unlock();
1441
1442 // note: we do not spin long here, kernel will switch this task out from scheduling on the next tick
1443 while (task->IsSleeping())
1444 {
1446 }
1447
1448 // re-lock mutex when returning to the task's execution space
1449 mutex->Lock();
1450
1451 return task->m_wait_obj;
1452 }
1453 else
1454 {
1455 STK_ASSERT(false);
1456 return nullptr;
1457 }
1458 }
1459
1460 TId OnGetTid(Word caller_SP)
1461 {
1462 KernelTask *task = FindTaskBySP(caller_SP);
1463 STK_ASSERT(task != nullptr);
1464
1465 return task->GetTid();
1466 }
1467
1470 Timeout UpdateTasks(const Timeout elapsed_ticks)
1471 {
1472 // sync objects are updated before UpdateTaskRequest which may add a new object (newly added object must become 1 tick older)
1473 if (IsSyncMode())
1474 UpdateSyncObjects(elapsed_ticks);
1475
1477
1478 return UpdateTaskState(elapsed_ticks);
1479 }
1480
1490 Timeout UpdateTaskState(const Timeout elapsed_ticks)
1491 {
1492 Timeout sleep_ticks = (IsTicklessMode() ? STK_TICKLESS_TICKS_MAX : 1);
1493
1494 for (uint32_t i = 0; i < TASKS_MAX; ++i)
1495 {
1496 KernelTask *task = &m_task_storage[i];
1497
1498 if (task->IsSleeping())
1499 {
1500 if (IsDynamicMode())
1501 {
1502 // task is pending removal, wait until it is switched out
1503 if (task->IsPendingRemoval())
1504 {
1505 if ((task != m_task_now) ||
1506 ((m_strategy.GetSize() == 1) && (m_fsm_state == FSM_STATE_SLEEPING)))
1507 {
1508 RemoveTask(task);
1509 continue;
1510 }
1511 }
1512 }
1513
1514 // deliver sleep event to strategy
1515 // note: only currently scheduled task can be pending to sleep
1516 if (TStrategy::SLEEP_EVENT_API)
1517 {
1518 if ((task->m_state & KernelTask::STATE_SLEEP_PENDING) != 0U)
1519 {
1520 task->m_state &= ~KernelTask::STATE_SLEEP_PENDING;
1521
1522 // notify strategy that task is sleeping
1523 m_strategy.OnTaskSleep(task);
1524 }
1525 }
1526
1527 // advance sleep time by a tick
1528 task->m_time_sleep += elapsed_ticks;
1529
1530 // deliver sleep event to strategy
1531 if (TStrategy::SLEEP_EVENT_API)
1532 {
1533 // notify strategy that task woke up
1534 if (task->m_time_sleep >= 0)
1535 m_strategy.OnTaskWake(task);
1536 }
1537 }
1538 else
1539 if (IsHrtMode())
1540 {
1541 // in HRT mode we trace how long task spent in active state (doing some work)
1542 if (task->IsBusy())
1543 {
1544 task->m_hrt[0].duration += elapsed_ticks;
1545
1546 // check if deadline is missed (HRT failure)
1547 if (task->HrtIsDeadlineMissed(task->m_hrt[0].duration))
1548 {
1549 bool can_recover = false;
1550
1551 // report deadline overrun to a strategy which supports overrun recovery
1552 if (TStrategy::DEADLINE_MISSED_API)
1553 can_recover = m_strategy.OnTaskDeadlineMissed(task);
1554
1555 // report failure if it could not be recovered by a scheduling strategy
1556 if (!can_recover)
1558 }
1559 }
1560 }
1561
1562 // get the number ticks the driver has to keep CPU in Idle
1563 if (IsTicklessMode() && (sleep_ticks > 1) && task->IsBusy())
1564 {
1565 // note: task sleep time is negative
1566 Timeout task_sleep = stk::Max<Timeout>(0, -task->m_time_sleep);
1567
1568 if (IsSyncMode())
1569 {
1570 // likely task is sleeping during sync operation (see Wait)
1571 if (task->m_wait_obj->IsWaiting())
1572 {
1573 // note: sync wait time is positive
1574 task_sleep = task->m_wait_obj->m_time_wait;
1575
1576 // we shall account for only valid time (when task is waiting during sync operation)
1577 if (task_sleep > 0)
1578 sleep_ticks = stk::Min(sleep_ticks, task_sleep);
1579 }
1580 else
1581 {
1582 sleep_ticks = stk::Min(sleep_ticks, task_sleep);
1583 }
1584 }
1585 else
1586 {
1587 sleep_ticks = stk::Min(sleep_ticks, task_sleep);
1588 }
1589
1590 // clamp to [1, STK_TICKLESS_TICKS_MAX] range
1591 sleep_ticks = stk::Max<Timeout>(sleep_ticks, 1);
1592 }
1593 }
1594
1595 return sleep_ticks;
1596 }
1597
1600 void UpdateSyncObjects(const Timeout elapsed_ticks)
1601 {
1603
1604 ISyncObject::ListEntryType *itr = m_sync_list->GetFirst();
1605
1606 while (itr != nullptr)
1607 {
1608 ISyncObject::ListEntryType *next = itr->GetNext();
1609
1610 // MISRA 5-2-3 deviation: GetNext/GetFirst returns ISyncObject*, all objects in
1611 // m_sync_list are ISyncObject instances - downcast is guaranteed safe.
1612 if (!static_cast<ISyncObject *>(itr)->Tick(elapsed_ticks))
1613 m_sync_list->Unlink(itr);
1614
1615 itr = next;
1616 }
1617 }
1618
1622 {
1623 if (m_request == REQUEST_NONE)
1624 return;
1625
1626 // process AddTask requests coming from tasks (KERNEL_DYNAMIC mode only, KERNEL_HRT is
1627 // excluded as we assume that HRT tasks must be known to the kernel before a Start())
1628 if (IsDynamicMode() && !IsHrtMode())
1629 {
1630 // process serialized AddTask request made from another active task, requesting process
1631 // is currently waiting due to SwitchToNext()
1632 if ((m_request & REQUEST_ADD_TASK) != 0U)
1633 {
1635
1636 for (uint32_t i = 0; i < TASKS_MAX; ++i)
1637 {
1638 KernelTask *task = &m_task_storage[i];
1639
1640 if (task->m_srt[0].add_task_req != nullptr)
1641 {
1643
1644 task->m_srt[0].add_task_req = nullptr;
1645 __stk_full_memfence();
1646 }
1647 }
1648 }
1649 }
1650 }
1651
1657 {
1659 KernelTask *itr = nullptr;
1660
1661 // check if no tasks left in KERNEL_DYNAMIC mode and exit, if KERNEL_DYNAMIC is not
1662 // set then 'is_empty' will always be false
1663 bool is_empty = IsDynamicMode() && (m_strategy.GetSize() == 0U);
1664
1665 if (!is_empty)
1666 {
1667 // MISRA 5-2-3 deviation: GetNext/GetFirst returns IKernelTask*, all objects in
1668 // the strategy pool are KernelTask instances - downcast is guaranteed safe.
1669 itr = static_cast<KernelTask *>(m_strategy.GetNext());
1670
1671 // sleep-aware strategy returns nullptr if no active tasks available, start sleeping
1672 if (itr == nullptr)
1673 {
1674 type = FSM_EVENT_SLEEP;
1675 }
1676 else
1677 {
1678 // strategy must provide active-only task
1679 STK_ASSERT(!itr->IsSleeping());
1680
1681 // if was sleeping, process wake event first
1683 }
1684 }
1685
1686 next = itr;
1687 return type;
1688 }
1689
1694#ifdef _STK_UNDER_TEST
1695 virtual
1696#endif
1702
1708 bool UpdateFsmState(Stack *&idle, Stack *&active)
1709 {
1710 KernelTask *now = m_task_now, *next = nullptr;
1711 bool switch_context = false;
1712
1713 EFsmState new_state = GetNewFsmState(next);
1714
1715 switch (new_state)
1716 {
1718 switch_context = StateSwitch(now, next, idle, active);
1719 break;
1720 case FSM_STATE_SLEEPING:
1721 switch_context = StateSleep(now, next, idle, active);
1722 break;
1723 case FSM_STATE_WAKING:
1724 switch_context = StateWake(now, next, idle, active);
1725 break;
1726 case FSM_STATE_EXITING:
1727 switch_context = StateExit(now, next, idle, active);
1728 break;
1729 case FSM_STATE_NONE:
1730 return switch_context; // valid intermittent non-persisting state: no-transition
1731 case FSM_STATE_MAX:
1732 default: // invalid state value
1734 break;
1735 }
1736
1737 m_fsm_state = new_state;
1738 return switch_context;
1739 }
1740
1748 bool StateSwitch(KernelTask *now, KernelTask *next, Stack *&idle, Stack *&active)
1749 {
1750 STK_ASSERT(now != nullptr);
1751 STK_ASSERT(next != nullptr);
1752
1753 // do not switch context because task did not change
1754 if (next == now)
1755 return false;
1756
1757 idle = now->GetUserStack();
1758 active = next->GetUserStack();
1759
1760 // if stack memory is exceeded these assertions will be hit
1761 if (now->IsBusy())
1762 {
1763 // current task could exit, thus we check it with IsBusy to avoid referencing nullptr returned by GetUserTask()
1765 }
1767
1768 m_task_now = next;
1769
1770 if ((IsHrtMode()))
1771 {
1772 if (now->m_hrt[0].done)
1773 {
1775 next->HrtOnSwitchedIn();
1776 }
1777 }
1778
1779 #if STK_SEGGER_SYSVIEW
1780 SEGGER_SYSVIEW_OnTaskStopReady(now->GetUserStack()->tid, TRACE_EVENT_SWITCH);
1781 SEGGER_SYSVIEW_OnTaskStartReady(next->GetUserStack()->tid);
1782 #endif
1783
1784 return true; // switch context
1785 }
1786
1794 bool StateWake(KernelTask *now, KernelTask *next, Stack *&idle, Stack *&active)
1795 {
1796 (void)now;
1797
1798 STK_ASSERT(next != nullptr);
1799
1800 idle = &m_sleep_trap[0].stack;
1801 active = next->GetUserStack();
1802
1803 // if stack memory is exceeded these assertions will be hit
1806
1807 m_task_now = next;
1808
1809 #if STK_SEGGER_SYSVIEW
1810 SEGGER_SYSVIEW_OnTaskStartReady(next->GetUserStack()->tid);
1811 #endif
1812
1813 if ((IsHrtMode()))
1814 next->HrtOnSwitchedIn();
1815
1816 return true; // switch context
1817 }
1818
1826 bool StateSleep(KernelTask *now, KernelTask *next, Stack *&idle, Stack *&active)
1827 {
1828 (void)next;
1829
1830 STK_ASSERT(now != nullptr);
1831 STK_ASSERT(m_sleep_trap[0].stack.SP != 0);
1832
1833 idle = now->GetUserStack();
1834 active = &m_sleep_trap[0].stack;
1835
1836 m_task_now = static_cast<KernelTask *>(m_strategy.GetFirst());
1837
1838 #if STK_SEGGER_SYSVIEW
1839 SEGGER_SYSVIEW_OnTaskStopReady(now->GetUserStack()->tid, TRACE_EVENT_SLEEP);
1840 #endif
1841
1842 if (IsHrtMode())
1843 {
1844 if (!now->IsPendingRemoval())
1846 }
1847
1848 return true; // switch context
1849 }
1850
1859 bool StateExit(KernelTask *now, KernelTask *next, Stack *&idle, Stack *&active)
1860 {
1861 (void)now;
1862 (void)next;
1863
1864 if (IsDynamicMode())
1865 {
1866 // dynamic tasks are not supported if main processes's stack memory is not provided in Start()
1867 STK_ASSERT(m_exit_trap[0].stack.SP != 0);
1868
1869 idle = nullptr;
1870 active = &m_exit_trap[0].stack;
1871
1872 m_task_now = nullptr;
1873
1874 m_platform.Stop();
1875 }
1876 else
1877 {
1878 (void)idle;
1879 (void)active;
1880 }
1881
1882 return false;
1883 }
1884
1888 bool IsInitialized() const { return (m_state != STATE_INACTIVE); }
1889
1899
1900#if STK_SEGGER_SYSVIEW
1905 void SendTaskTraceInfo(KernelTask *task)
1906 {
1907 STK_ASSERT(task->IsBusy());
1908
1909 SEGGER_SYSVIEW_TASKINFO info =
1910 {
1911 .TaskID = task->GetUserStack()->tid,
1912 .sName = task->GetUserTask()->GetTraceName(),
1913 .Prio = 0,
1914 .StackBase = hw::PtrToWord(task->GetUserTask()->GetStack()),
1915 .StackSize = task->GetUserTask()->GetStackSizeBytes()
1916 };
1917 SEGGER_SYSVIEW_SendTaskInfo(&info);
1918 }
1919#endif
1920
1921 // Kernel modes:
1922 static __stk_forceinline bool IsStaticMode() { return ((TMode & KERNEL_STATIC) != 0U); }
1923 static __stk_forceinline bool IsDynamicMode() { return ((TMode & KERNEL_DYNAMIC) != 0U); }
1924 static __stk_forceinline bool IsHrtMode() { return ((TMode & KERNEL_HRT) != 0U); }
1925 static __stk_forceinline bool IsSyncMode() { return ((TMode & KERNEL_SYNC) != 0U); }
1926 static __stk_forceinline bool IsTicklessMode() { return ((TMode & KERNEL_TICKLESS) != 0U); }
1927
1928 // If hit here: Kernel<N> expects at least 1 task, e.g. N > 0
1930
1931 // If hit here: Kernel mode must be assigned.
1932 STK_STATIC_ASSERT_N(KERNEL_MODE_MUST_BE_SET, (TMode != 0U));
1933
1934 // If hit here: KERNEL_STATIC and KERNEL_DYNAMIC can not be mixed, either one of these is possible.
1935 STK_STATIC_ASSERT_N(KERNEL_MODE_MIX_NOT_ALLOWED,
1936 (((TMode & KERNEL_STATIC) & (TMode & KERNEL_DYNAMIC)) == 0U));
1937
1938 // If hit here: KERNEL_HRT must accompany KERNEL_STATIC or KERNEL_DYNAMIC.
1939 STK_STATIC_ASSERT_N(KERNEL_MODE_HRT_ALONE, (((TMode & KERNEL_HRT) == 0U) ||
1940 ((((TMode & KERNEL_HRT) != 0U)) && (((TMode & KERNEL_STATIC) != 0U) || ((TMode & KERNEL_DYNAMIC) != 0U)))));
1941
1942 // if hit here: KERNEL_TICKLESS is incompatible with KERNEL_HRT. Tickless suppresses the timer,
1943 // which destroys the precise periodicity HRT depends on.
1944 STK_STATIC_ASSERT_N(TICKLESS_HRT_CONFLICT,
1945 (((TMode & KERNEL_TICKLESS) == 0U) || ((TMode & KERNEL_HRT) == 0U)));
1946
1951
1966
1982
1989
1991 TPlatform m_platform;
1992 TStrategy m_strategy;
1998 volatile uint8_t m_request;
1999 volatile EState m_state;
2001
2003 // FSM_EVENT_SWITCH FSM_EVENT_SLEEP FSM_EVENT_WAKE FSM_EVENT_EXIT
2008 };
2009
2011};
2012
2013} // namespace stk
2014
2015#endif /* STK_H_ */
#define STK_KERNEL_PANIC(id)
Called when the kernel detects an unrecoverable internal fault.
Definition stk_arch.h:63
#define STK_STATIC_ASSERT_N(NAME, X)
Compile-time assertion with a user-defined name suffix.
Definition stk_defs.h:359
#define __stk_forceinline
Forces compiler to always inline the decorated function, regardless of optimisation level.
Definition stk_defs.h:104
#define STK_TICKLESS_IDLE
Enables tickless (dynamic-tick) low-power operation during idle periods.
Definition stk_defs.h:36
#define STK_ASSERT(e)
Runtime assertion. Halts execution if the expression e evaluates to false.
Definition stk_defs.h:330
#define __stk_attr_noinline
Prevents compiler from inlining the decorated function (function prefix).
Definition stk_defs.h:185
#define STK_TICKLESS_TICKS_MAX
Maximum number of kernel ticks the hardware timer may be suppressed in one tickless idle interval whe...
Definition stk_defs.h:62
#define STK_STATIC_ASSERT_DESC(X, DESC)
Compile-time assertion with a custom error description. Produces a compilation error if X is false.
Definition stk_defs.h:350
#define STK_STACK_MEMORY_FILLER
Sentinel value written to the entire stack region at initialization (stack watermark pattern).
Definition stk_defs.h:377
#define STK_ALLOCATE_COUNT(MODE, FLAG, ONTRUE, ONFALSE)
Selects a static array element count at compile time based on a mode flag.
Definition stk_defs.h:485
Contains helper implementations which simplify user-side code.
Earliest Deadline First (EDF) task-switching strategy (stk::SwitchStrategyEDF).
Fixed-priority preemptive task-switching strategy with round-robin within each priority level (stk::S...
Rate-Monotonic (RM) and Deadline-Monotonic (DM) task-switching strategies (stk::SwitchStrategyMonoton...
Round-Robin task-switching strategy (stk::SwitchStrategyRoundRobin / stk::SwitchStrategyRR).
Smooth Weighted Round-Robin task-switching strategy (stk::SwitchStrategySmoothWeightedRoundRobin / st...
#define __stk_relax_cpu
Emits a CPU pipeline-relaxation hint for use inside hot busy-wait (spin) loops (in-code statement).
Definition stktest.h:33
Namespace of STK package.
uintptr_t Word
Native processor word type.
Definition stk_common.h:112
@ TRACE_EVENT_SLEEP
Definition stk_common.h:102
@ TRACE_EVENT_SWITCH
Definition stk_common.h:101
const Timeout WAIT_INFINITE
Timeout value: block indefinitely until the synchronization object is signaled.
Definition stk_common.h:139
int64_t Ticks
Ticks value.
Definition stk_common.h:150
int32_t Timeout
Timeout time (ticks).
Definition stk_common.h:133
@ STACK_SLEEP_TRAP
Stack of the Sleep trap.
Definition stk_common.h:72
@ STACK_USER_TASK
Stack of the user task.
Definition stk_common.h:71
@ STACK_EXIT_TRAP
Stack of the Exit trap.
Definition stk_common.h:73
constexpr T Max(T a, T b) noexcept
Compile-time maximum of two values.
Definition stk_defs.h:536
constexpr T Min(T a, T b) noexcept
Compile-time minimum of two values.
Definition stk_defs.h:530
@ PERIODICITY_DEFAULT
Default periodicity (microseconds), 1 millisecond.
Definition stk_common.h:82
@ PERIODICITY_MAX
Maximum periodicity (microseconds), 99 milliseconds (note: this value is the highest working on a rea...
Definition stk_common.h:81
@ SYS_TASK_ID_EXIT
Exit trap.
Definition stk_common.h:92
@ SYS_TASK_ID_SLEEP
Sleep trap.
Definition stk_common.h:91
Word TId
Definition stk_common.h:117
@ ACCESS_PRIVILEGED
Privileged access mode (access to hardware is fully unrestricted).
Definition stk_common.h:33
@ KERNEL_TICKLESS
Tickless mode. To use this mode STK_TICKLESS_IDLE must be defined to 1 in stk_config....
Definition stk_common.h:45
@ KERNEL_SYNC
Synchronization support (see Event).
Definition stk_common.h:44
@ KERNEL_HRT
Hard Real-Time (HRT) behavior (tasks are scheduled periodically and have an execution deadline,...
Definition stk_common.h:43
@ KERNEL_STATIC
All tasks are static and can not exit.
Definition stk_common.h:41
@ KERNEL_DYNAMIC
Tasks can be added or removed and therefore exit when done.
Definition stk_common.h:42
@ KERNEL_PANIC_BAD_MODE
Kernel is in bad/unsupported mode for the current operation.
Definition stk_common.h:62
@ KERNEL_PANIC_BAD_STATE
Kernel entered unexpected (bad) state.
Definition stk_common.h:61
__stk_forceinline void WriteVolatile64(volatile T *addr, T value)
Atomically write a 64-bit volatile value.
Definition stk_arch.h:411
__stk_forceinline Word PtrToWord(T *ptr) noexcept
Cast a pointer to a CPU register-width integer.
Definition stk_arch.h:94
bool IsInsideISR()
Check whether the CPU is currently executing inside a hardware interrupt service routine (ISR).
Definition stktest.cpp:103
__stk_forceinline T ReadVolatile64(volatile const T *addr)
Atomically read a 64-bit volatile value.
Definition stk_arch.h:357
volatile uint8_t m_request
Bitmask of pending ERequest flags from running tasks. Written by tasks, read/cleared by UpdateTaskReq...
Definition stk.h:1998
static bool IsStaticMode()
Definition stk.h:1922
bool UpdateFsmState(Stack *&idle, Stack *&active)
Update FSM state.
Definition stk.h:1708
TStrategy m_strategy
Task-switching strategy (determines which task runs next).
Definition stk.h:1992
KernelTask * AllocateNewTask(ITask *user_task)
Allocate new instance of KernelTask.
Definition stk.h:1037
static bool IsSyncMode()
Definition stk.h:1925
void OnStart(Stack *&active)
Called by platform driver immediately after a scheduler start (first tick).
Definition stk.h:1223
SyncObjectList m_sync_list[((((TMode) &(KERNEL_SYNC)) !=0U) ?(1) :(0))]
List of active sync objects. Zero-size (no memory) if KERNEL_SYNC is not set.
Definition stk.h:2000
EFsmState
Finite-state machine (FSM) state. Encodes what the kernel is currently doing between two consecutive ...
Definition stk.h:971
@ FSM_STATE_EXITING
All tasks exited (KERNEL_DYNAMIC only), executing the exit trap to return from Start().
Definition stk.h:976
@ FSM_STATE_SLEEPING
All tasks are sleeping, the sleep trap is executing (CPU in low-power state).
Definition stk.h:974
@ FSM_STATE_SWITCHING
Normal operation: switching between runnable tasks each tick.
Definition stk.h:973
@ FSM_STATE_NONE
Sentinel / uninitialized value. Set by the constructor, replaced by FSM_STATE_SWITCHING on the first ...
Definition stk.h:972
@ FSM_STATE_MAX
Sentinel: number of valid states (used to size the FSM table), denotes uninitialized state.
Definition stk.h:977
@ FSM_STATE_WAKING
At least one task woke up, transitioning from sleep trap back to a user task.
Definition stk.h:975
KernelTask * FindTaskByStack(const Stack *stack)
Find kernel task by the bound Stack instance.
Definition stk.h:1159
KernelTask TaskStorageType[TASKS_MAX]
KernelTask array type used as a storage for the KernelTask instances.
Definition stk.h:1950
volatile EState m_state
Current kernel state.
Definition stk.h:1999
SleepTrapStack m_sleep_trap[1]
Sleep trap (always present): executed when all tasks are sleeping.
Definition stk.h:1995
void OnTaskSwitch(Word caller_SP)
Called by Thread process (via IKernelService::SwitchToNext) to switch to a next task.
Definition stk.h:1344
EFsmState GetNewFsmState(KernelTask *&next)
Get new FSM state.
Definition stk.h:1697
void RequestAddTask(ITask *user_task)
Request to add new task.
Definition stk.h:1119
void AddTask(ITask *user_task, Timeout periodicity_tc, Timeout deadline_tc, Timeout start_delay_tc)
Register a task for hard real-time (HRT) scheduling.
Definition stk.h:871
StackMemoryWrapper< STACK_SIZE_MIN > ExitTrapStackMemory
Stack memory wrapper type for the exit trap.
Definition stk.h:95
void Initialize(uint32_t resolution_us=PERIODICITY_DEFAULT)
Prepare kernel for use: reset state, configure the platform, and register the service singleton.
Definition stk.h:805
bool StateWake(KernelTask *now, KernelTask *next, Stack *&idle, Stack *&active)
Wakes up after sleeping.
Definition stk.h:1794
bool OnTick(Stack *&idle, Stack *&active)
Process one scheduler tick. Called from the platform timer/tick ISR.
Definition stk.h:1317
EState GetState() const
Get kernel state.
Definition stk.h:963
KernelTask * FindTaskBySP(Word SP)
Find kernel task for a Stack Pointer (SP).
Definition stk.h:1175
void InitTraps()
Initialize stack of the traps.
Definition stk.h:1003
void OnTaskSleepUntil(Word caller_SP, Ticks timestamp)
Called by Thread process (via IKernelService::SleepUntil) for exclusion of the calling process from s...
Definition stk.h:1374
ISyncObject::ListHeadType SyncObjectList
Intrusive list of active ISyncObject instances registered with this kernel. Each sync object in this ...
Definition stk.h:1988
ExitTrapStack m_exit_trap[((((TMode) &(KERNEL_DYNAMIC)) !=0U) ?(1) :(0))]
Exit trap: zero-size in KERNEL_STATIC mode; one entry in KERNEL_DYNAMIC mode.
Definition stk.h:1996
EFsmEvent
Finite-state machine (FSM) event. Computed by FetchNextEvent() each tick based on strategy output and...
Definition stk.h:985
@ FSM_EVENT_EXIT
No tasks remain (KERNEL_DYNAMIC), exit scheduling and return from Start().
Definition stk.h:989
@ FSM_EVENT_WAKE
A task became runnable while the kernel was sleeping, wake from sleep trap.
Definition stk.h:988
@ FSM_EVENT_SLEEP
No runnable tasks, enter sleep trap.
Definition stk.h:987
@ FSM_EVENT_SWITCH
Strategy returned a runnable task, perform a context switch.
Definition stk.h:986
@ FSM_EVENT_MAX
Sentinel: number of valid events (used to size the FSM table).
Definition stk.h:990
IWaitObject * OnTaskWait(Word caller_SP, ISyncObject *sync_obj, IMutex *mutex, Timeout timeout)
Called from the Thread process when task needs to wait.
Definition stk.h:1417
ITaskSwitchStrategy * GetSwitchStrategy()
Get task-switching strategy instance owned by this kernel.
Definition stk.h:959
KernelTask * FindTaskByUserTask(const ITask *user_task)
Find kernel task by the bound ITask instance.
Definition stk.h:1143
static bool IsValidFsmState(EFsmState state)
Check if FSM state is valid.
Definition stk.h:995
TaskStorageType m_task_storage
Static pool of TSize KernelTask slots (free slots have m_user == nullptr).
Definition stk.h:1994
static bool IsHrtMode()
Definition stk.h:1924
void RemoveTask(KernelTask *task)
Remove kernel task.
Definition stk.h:1201
void Start()
Start the scheduler. This call does not return until all tasks have exited (KERNEL_DYNAMIC mode) or i...
Definition stk.h:921
void AddKernelTask(KernelTask *task)
Add kernel task to the scheduling strategy.
Definition stk.h:1073
static bool IsDynamicMode()
Definition stk.h:1923
KernelService m_service
Kernel service singleton exposed to running tasks via IKernelService::GetInstance().
Definition stk.h:1990
void RemoveTask(ITask *user_task)
Remove a previously added task from the kernel before Start().
Definition stk.h:895
ERequest
Bitmask flags for pending inter-task requests that must be processed by the kernel on the next tick (...
Definition stk.h:102
@ REQUEST_NONE
No pending requests.
Definition stk.h:103
@ REQUEST_ADD_TASK
An AddTask() request is pending from a running task (KERNEL_DYNAMIC only).
Definition stk.h:104
~Kernel()
Destructor.
Definition stk.h:796
bool StateExit(KernelTask *now, KernelTask *next, Stack *&idle, Stack *&active)
Exits from scheduling.
Definition stk.h:1859
KernelTask * m_task_now
Currently executing task, or nullptr before Start() or after all tasks exit.
Definition stk.h:1993
TId OnGetTid(Word caller_SP)
Called from the Thread process when for getting task/thread id of the process.
Definition stk.h:1460
void AllocateAndAddNewTask(ITask *user_task)
Allocate new instance of KernelTask and add it into the scheduling process.
Definition stk.h:1089
TPlatform m_platform
Platform driver (SysTick, PendSV, context switch implementation).
Definition stk.h:1991
void HrtAllocateAndAddNewTask(ITask *user_task, Timeout periodicity_tc, Timeout deadline_tc, Timeout start_delay_tc)
Allocate new instance of KernelTask and add it into the HRT scheduling process.
Definition stk.h:1105
EFsmState m_fsm_state
Current FSM state. Drives context-switch decision on every tick.
Definition stk.h:1997
Timeout UpdateTasks(const Timeout elapsed_ticks)
Update tasks (sleep, requests).
Definition stk.h:1470
void ScheduleAddTask()
Signal the kernel to process a pending AddTask request on the next tick.
Definition stk.h:1894
bool IsInitialized() const
Check whether Initialize() has been called and completed successfully.
Definition stk.h:1888
IPlatform * GetPlatform()
Get platform driver instance owned by this kernel.
Definition stk.h:954
bool StateSwitch(KernelTask *now, KernelTask *next, Stack *&idle, Stack *&active)
Switches contexts.
Definition stk.h:1748
StackMemoryWrapper<(32)> SleepTrapStackMemory
Stack memory wrapper type for the sleep trap.
Definition stk.h:89
static bool IsTicklessMode()
Definition stk.h:1926
void OnTaskExit(Stack *stack)
Called from the Thread process when task finished (its Run function exited by return).
Definition stk.h:1401
Kernel()
Construct the kernel with all storage zero-initialised and the request flag set to ~0 (indicating uni...
Definition stk.h:774
EConsts
Constants.
Definition stk.h:763
@ TASKS_MAX
Maximum number of concurrently registered tasks. Fixed at compile time. Exceeding this limit in AddTa...
Definition stk.h:764
void UpdateTaskRequest()
Update pending task requests.
Definition stk.h:1621
bool StateSleep(KernelTask *now, KernelTask *next, Stack *&idle, Stack *&active)
Enters into a sleeping mode.
Definition stk.h:1826
Timeout UpdateTaskState(const Timeout elapsed_ticks)
Update task state: process removals, advance sleep timers, and track HRT durations.
Definition stk.h:1490
void OnStop()
Called by the platform driver after a scheduler stop (all tasks have exited).
Definition stk.h:1290
void UpdateSyncObjects(const Timeout elapsed_ticks)
Update synchronization objects.
Definition stk.h:1600
void OnTaskSleep(Word caller_SP, Timeout ticks)
Called by Thread process (via IKernelService::Sleep) for exclusion of the calling process from schedu...
Definition stk.h:1352
bool IsStarted() const
Check whether scheduler is currently running.
Definition stk.h:946
const EFsmState m_fsm[FSM_STATE_MAX][FSM_EVENT_MAX]
Definition stk.h:2002
void AddTask(ITask *user_task)
Register task for a soft real-time (SRT) scheduling.
Definition stk.h:832
EFsmEvent FetchNextEvent(KernelTask *&next)
Fetch next event for the FSM.
Definition stk.h:1656
Internal per-slot kernel descriptor that wraps a user ITask instance.
Definition stk.h:119
KernelTask()
Construct a free (unbound) task slot. All fields set to zero/null.
Definition stk.h:150
void ScheduleSleep(Timeout ticks)
Put the task into a sleeping state for the specified number of ticks.
Definition stk.h:572
TId GetTid() const
Get task identifier.
Definition stk.h:181
int32_t m_rt_weight[((((TStrategy::WEIGHT_API) &(1)) !=0U) ?(1) :(0))]
Run-time weight for weighted-round-robin scheduling. Zero-size for unweighted strategies.
Definition stk.h:593
WaitObject m_wait_obj[((((TMode) &(KERNEL_SYNC)) !=0U) ?(1) :(0))]
Embedded wait object for synchronization. Zero-size (no memory) if KERNEL_SYNC is not set.
Definition stk.h:594
void SetCurrentWeight(int32_t weight)
Update the run-time scheduling weight (weighted strategies only).
Definition stk.h:198
friend class Kernel
Definition stk.h:120
bool HrtIsDeadlineMissed(Timeout duration) const
Check if deadline missed.
Definition stk.h:561
EStateFlags
Bitmask of transient state flags. Set by the task or the kernel and consumed (cleared) during UpdateT...
Definition stk.h:127
@ STATE_REMOVE_PENDING
Task returned from its Run function; slot will be freed on the next tick (KERNEL_DYNAMIC only).
Definition stk.h:129
@ STATE_SLEEP_PENDING
Task called Sleep/SleepUntil/Yield; strategy's OnTaskSleep() will be invoked on the next tick (sleep-...
Definition stk.h:130
@ STATE_NONE
No pending state flags.
Definition stk.h:128
void ScheduleRemoval()
Schedule the removal of the task from the kernel on next tick.
Definition stk.h:460
Timeout GetHrtRelativeDeadline() const
Get remaining HRT deadline (ticks left before the deadline expires).
Definition stk.h:244
Stack m_stack
Stack descriptor (SP register value + access mode + optional tid).
Definition stk.h:588
Stack * GetUserStack()
Get stack descriptor for this task slot.
Definition stk.h:166
void Bind(TPlatform *platform, ITask *user_task)
Bind this slot to a user task: set access mode, task ID, and initialise the stack.
Definition stk.h:416
void HrtHardFailDeadline(IPlatform *platform)
Hard-fail HRT task when it missed its deadline.
Definition stk.h:538
void HrtInit(Timeout periodicity_tc, Timeout deadline_tc, Timeout start_delay_tc)
Initialize task with HRT info.
Definition stk.h:494
volatile uint32_t m_state
Bitmask of EStateFlags. Written by task thread, read/cleared by kernel tick.
Definition stk.h:589
int32_t GetCurrentWeight() const
Get current (run-time) scheduling weight.
Definition stk.h:214
ITask * GetUserTask()
Get bound user task.
Definition stk.h:161
ITask * m_user
Bound user task, or NULL when slot is free.
Definition stk.h:587
~KernelTask()
Destructor.
Definition stk.h:256
bool IsBusy() const
Check whether this slot is bound to a user task.
Definition stk.h:171
void HrtOnWorkCompleted()
Called when task process called IKernelService::SwitchToNext to inform Kernel that work is completed.
Definition stk.h:552
void Wake()
Wake this task on the next scheduling tick.
Definition stk.h:187
HrtInfo m_hrt[((((TMode) &(KERNEL_HRT)) !=0U) ?(1) :(0))]
HRT metadata. Zero-size (no memory) in non-HRT mode.
Definition stk.h:592
Timeout GetHrtDeadline() const
Get absolute HRT deadline (ticks elapsed since task was activated).
Definition stk.h:232
SrtInfo m_srt[((((TMode) &(KERNEL_HRT)) !=0U) ?(0) :(1))]
SRT metadata. Zero-size (no memory) in KERNEL_HRT mode.
Definition stk.h:591
volatile Timeout m_time_sleep
Sleep countdown: negative while sleeping (absolute value = ticks remaining), zero when awake.
Definition stk.h:590
bool IsPendingRemoval() const
Check if task is pending removal.
Definition stk.h:475
void HrtOnSwitchedOut(IPlatform *)
Called when task is switched out from the scheduling process.
Definition stk.h:520
void Unbind()
Reset this slot to the free (unbound) state, clearing all scheduling metadata.
Definition stk.h:439
void HrtOnSwitchedIn()
Called when task is switched into the scheduling process.
Definition stk.h:513
bool IsMemoryOfSP(Word SP) const
Check if Stack Pointer (SP) belongs to this task.
Definition stk.h:480
int32_t GetWeight() const
Get static scheduling weight from the user task.
Definition stk.h:207
Timeout GetHrtPeriodicity() const
Get HRT scheduling periodicity.
Definition stk.h:220
bool IsSleeping() const
Check whether this task is currently sleeping (waiting for a tick or a wake event).
Definition stk.h:176
Payload for an in-flight AddTask() request issued by a running task.
Definition stk.h:142
ITask * user_task
User task to add. Must remain valid for the lifetime of its kernel slot.
Definition stk.h:143
Per-task soft real-time (SRT) metadata.
Definition stk.h:265
void Clear()
Clear all fields, ready for slot re-use.
Definition stk.h:271
AddTaskRequest * add_task_req
Definition stk.h:281
Per-task Hard Real-Time (HRT) scheduling metadata.
Definition stk.h:289
void Clear()
Clear all fields, ready for slot re-use or re-activation.
Definition stk.h:295
volatile bool done
Set to true when the task signals work completion (via Yield() or on exit). Triggers HrtOnSwitchedOut...
Definition stk.h:306
Timeout deadline
Maximum allowed active duration in ticks (relative to switch-in). Exceeding this triggers OnDeadlineM...
Definition stk.h:304
Timeout periodicity
Activation period in ticks: the task is re-activated every this many ticks.
Definition stk.h:303
Timeout duration
Ticks spent in the active (non-sleeping) state in the current period. Incremented by UpdateTaskState(...
Definition stk.h:305
Concrete implementation of IWaitObject, embedded in each KernelTask slot.
Definition stk.h:316
bool IsWaiting() const
Check if busy with waiting.
Definition stk.h:349
bool Tick(Timeout elapsed_ticks)
Advance the timeout countdown by one tick.
Definition stk.h:375
Timeout m_time_wait
Ticks remaining until timeout. Decremented each tick; WAIT_INFINITE means no timeout.
Definition stk.h:409
bool IsTimeout() const
Check whether the wait expired due to timeout.
Definition stk.h:344
void Wake(bool timeout)
Wake the waiting task (called by ISyncObject when it signals).
Definition stk.h:356
void SetupWait(ISyncObject *sync_obj, Timeout timeout)
Configure and arm this wait object for a new wait operation.
Definition stk.h:395
TId GetTid() const
Get the TId of the task that owns this wait object.
Definition stk.h:339
volatile bool m_timeout
true if the wait expired due to timeout rather than a Wake() signal.
Definition stk.h:408
ISyncObject * m_sync_obj
Sync object this wait is registered with, or NULL when not waiting.
Definition stk.h:407
KernelTask * m_task
Back-pointer to the owning KernelTask. Set once at construction; never changes.
Definition stk.h:406
Payload stored in the sync object's kernel-side list entry while a task is waiting.
Definition stk.h:332
ISyncObject * sync_obj
Sync object whose Tick() will be called each kernel tick.
Definition stk.h:333
Concrete implementation of IKernelService exposed to running tasks.
Definition stk.h:605
KernelService()
Construct an uninitialised service instance (m_platform = null, m_ticks = 0).
Definition stk.h:727
IWaitObject * Wait(ISyncObject *sobj, IMutex *mutex, Timeout ticks)
Block the calling task until a synchronization object signals or the timeout expires.
Definition stk.h:710
volatile Ticks m_ticks
Global tick counter. Written via hw::WriteVolatile64() by IncrementTick() (ISR context); read via hw:...
Definition stk.h:756
~KernelService()
Destructor.
Definition stk.h:733
friend class Kernel
Definition stk.h:606
void SwitchToNext()
Yield the CPU to the next runnable task.
Definition stk.h:696
Ticks GetTicks() const
Get the current tick count since kernel start.
Definition stk.h:624
void Sleep(Timeout ticks)
Yield the CPU for ticks, allowing the scheduler to run other tasks.
Definition stk.h:656
TPlatform * m_platform
Typed platform driver pointer, set at Initialize().
Definition stk.h:755
int32_t GetTickResolution() const
Get the tick resolution.
Definition stk.h:630
void SleepUntil(Ticks timestamp)
Yield the CPU till timestamp, allowing the scheduler to run other tasks.
Definition stk.h:676
TId GetTid() const
Get the TId of the calling task.
Definition stk.h:613
void Delay(Timeout ticks)
Busy-wait until ticks have elapsed.
Definition stk.h:637
void IncrementTicks(Ticks advance)
Increment counter by value.
Definition stk.h:749
void Initialize(IPlatform *platform)
Initialize instance.
Definition stk.h:741
Storage bundle for the sleep trap: a Stack descriptor paired with its backing memory.
Definition stk.h:1960
SleepTrapStackMemory::MemoryType Memory
Definition stk.h:1961
Memory memory
Backing stack memory array. Size: STK_SLEEP_TRAP_STACK_SIZE elements of Word.
Definition stk.h:1964
Stack stack
Stack descriptor (SP register value + access mode). Initialised by InitTraps() on every Start().
Definition stk.h:1963
Storage bundle for the exit trap: a Stack descriptor paired with its backing memory.
Definition stk.h:1976
Memory memory
Backing stack memory array. Size: STACK_SIZE_MIN elements of Word.
Definition stk.h:1980
ExitTrapStackMemory::MemoryType Memory
Definition stk.h:1977
Stack stack
Stack descriptor (SP register value + access mode). Initialised by InitTraps() on every Start().
Definition stk.h:1979
RAII guard that enters the critical section on construction and exits it on destruction.
Definition stk_arch.h:218
Stack descriptor.
Definition stk_common.h:181
EAccessMode mode
access mode
Definition stk_common.h:183
virtual Word * GetStack() const =0
Get pointer to the stack memory.
Wait object.
Definition stk_common.h:212
Synchronization object.
Definition stk_common.h:297
DLEntryType ListEntryType
List entry type of ISyncObject elements.
Definition stk_common.h:313
virtual void AddWaitObject(IWaitObject *wobj)
Called by kernel when a new task starts waiting on this event.
Definition stk_common.h:318
DLHeadType ListHeadType
List head type for ISyncObject elements.
Definition stk_common.h:308
Interface for mutex synchronization primitive.
Definition stk_common.h:381
virtual void Unlock()=0
Unlock the mutex.
virtual void Lock()=0
Lock the mutex.
Interface for a user task.
Definition stk_common.h:433
virtual EAccessMode GetAccessMode() const =0
Get hardware access mode of the user task.
virtual TId GetId() const =0
Get task Id set by application.
Scheduling-strategy-facing interface for a kernel task slot.
Definition stk_common.h:493
Interface for a platform driver.
Definition stk_common.h:575
virtual void ProcessHardFault()=0
Cause a hard fault of the system.
Interface for a back-end event handler.
Definition stk_common.h:583
Interface for a task switching strategy implementation.
Definition stk_common.h:782
Interface for the implementation of the kernel of the scheduler. It supports Soft and Hard Real-Time ...
Definition stk_common.h:854
EState
Kernel state.
Definition stk_common.h:860
@ STATE_INACTIVE
not ready, IKernel::Initialize() must be called
Definition stk_common.h:861
@ STATE_READY
ready to start, IKernel::Start() must be called
Definition stk_common.h:862
@ STATE_RUNNING
initialized and running, IKernel::Start() was called successfully
Definition stk_common.h:863
Interface for the kernel services exposed to the user processes during run-time when Kernel started s...
Definition stk_common.h:929
Adapts an externally-owned stack memory array to the IStackMemory interface.
Definition stk_helper.h:174
StackMemoryDef< _StackSize >::Type MemoryType
Definition stk_helper.h:179
DLHeadType * GetHead() const
Get the list head this entry currently belongs to.
DLEntryType * GetNext() const
Get the next entry in the list.