SuperTinyKernel™ RTOS 1.05.3
Lightweight, high-performance, deterministic, bare-metal C++ RTOS for resource-constrained embedded systems. MIT Open Source License.
Loading...
Searching...
No Matches
stktest_kernel.cpp
Go to the documentation of this file.
1/*
2 * SuperTinyKernel(TM) RTOS: Lightweight High-Performance Deterministic C++ RTOS for Embedded Systems.
3 *
4 * Source: https://github.com/SuperTinyKernel-RTOS
5 *
6 * Copyright (c) 2022-2026 Neutron Code Limited <stk@neutroncode.com>. All Rights Reserved.
7 * License: MIT License, see LICENSE for a full text.
8 */
9
10#include "stktest.h"
11
12namespace stk {
13namespace test {
14
15// ============================================================================ //
16// ============================== Kernel ====================================== //
17// ============================================================================ //
18
19template <uint8_t TMode, uint32_t TSize, class TStrategy, class TPlatform>
20class KernelMock : public Kernel<TMode, TSize, TStrategy, TPlatform>
21{
23
24public:
26
27 // Override the getter to bypass the ROM table during tests
28 typename BaseType::EFsmState GetNewFsmState(typename BaseType::KernelTask *&next) override
29 {
31 return static_cast<typename BaseType::EFsmState>(m_fsm_state_mock);
32
33 return BaseType::GetNewFsmState(next);
34 }
35
36 void ForceUpdateInvalidFsmState(bool max_val)
37 {
38 m_fsm_state_mock = KernelMock::FSM_STATE_MAX + (max_val ? 0 : 1);
39
40 Stack *idle = nullptr, *active = this->m_task_now->GetUserStack();
41 KernelMock::UpdateFsmState(idle, active);
42 }
43};
44
46{
47 void setup() {}
48 void teardown()
49 {
50 g_TestContext.RethrowAssertException(true);
51 g_TestContext.ExpectAssert(false);
52 g_TestContext.ExpectPanic(false);
54 }
55};
56
57TEST(Kernel, MaxTasks)
58{
59 const int32_t TASKS = 2;
62
63 CHECK_EQUAL(TASKS, result);
64}
65
66TEST(Kernel, State)
67{
70
71 PlatformTestMock *platform = static_cast<PlatformTestMock *>(kernel.GetPlatform());
72
73 CHECK_TRUE(platform != NULL);
74
75 CHECK_TRUE(kernel.GetState() == IKernel::STATE_INACTIVE);
76
77 kernel.Initialize();
78
79 CHECK_TRUE(kernel.GetState() == IKernel::STATE_READY);
80
81 CHECK_TRUE(IKernelService::GetInstance() != NULL);
82 CHECK_TRUE(IKernelService::GetInstance() == platform->m_service);
83
84 kernel.AddTask(&task);
85 kernel.Start();
86
87 CHECK_TRUE(kernel.GetState() == IKernel::STATE_RUNNING);
88}
89
90TEST(Kernel, InitDoubleFail)
91{
93
94 try
95 {
96 g_TestContext.ExpectAssert(true);
97 kernel.Initialize();
98 kernel.Initialize();
99 CHECK_TEXT(false, "duplicate Kernel::Initialize() did not fail");
100 }
101 catch (TestAssertPassed &pass)
102 {
103 CHECK(true);
104 g_TestContext.ExpectAssert(false);
105 }
106}
107
108TEST(Kernel, AddTaskNoInit)
109{
112
113 try
114 {
115 g_TestContext.ExpectAssert(true);
116 kernel.AddTask(&task);
117 CHECK_TEXT(false, "AddTask() did not fail");
118 }
119 catch (TestAssertPassed &pass)
120 {
121 CHECK(true);
122 g_TestContext.ExpectAssert(false);
123 }
124}
125
126TEST(Kernel, AddTask)
127{
130 const ITaskSwitchStrategy *strategy = kernel.GetSwitchStrategy();
131
132 kernel.Initialize();
133
134 CHECK_EQUAL_TEXT(0, strategy->GetSize(), "Expecting none kernel tasks");
135
136 kernel.AddTask(&task);
137
138 CHECK_EQUAL_TEXT(1, strategy->GetSize(), "Expecting 1 kernel task");
139
140 IKernelTask *ktask = strategy->GetFirst();
141 CHECK_TRUE_TEXT(ktask != NULL, "Expecting one kernel task");
142
143 CHECK_TRUE_TEXT(ktask->GetUserTask() == &task, "Expecting just added user task");
144}
145
146TEST(Kernel, AddTaskInitStack)
147{
150 PlatformTestMock *platform = static_cast<PlatformTestMock *>(kernel.GetPlatform());
151
152 kernel.Initialize();
153 kernel.AddTask(&task);
154
155 CHECK_EQUAL(&task, platform->m_stack_info[STACK_USER_TASK].task);
156 CHECK_EQUAL((size_t)task.GetStack(), platform->m_stack_info[STACK_USER_TASK].stack->SP);
157}
158
159TEST(Kernel, AddTaskFailMaxOut)
160{
162 TaskMock<ACCESS_USER> task1, task2, task3;
163
164 kernel.Initialize();
165
166 try
167 {
168 g_TestContext.ExpectAssert(true);
169 kernel.AddTask(&task1);
170 kernel.AddTask(&task2);
171 kernel.AddTask(&task3);
172 CHECK_TEXT(false, "expecting to fail adding task because max is 2 but adding 3-rd");
173 }
174 catch (TestAssertPassed &pass)
175 {
176 CHECK(true);
177 g_TestContext.ExpectAssert(false);
178 }
179}
180
181TEST(Kernel, AddTaskFailSameTask)
182{
185
186 kernel.Initialize();
187
188 try
189 {
190 g_TestContext.ExpectAssert(true);
191 kernel.AddTask(&task);
192 kernel.AddTask(&task);
193 CHECK_TEXT(false, "expecting to fail adding the same task");
194 }
195 catch (TestAssertPassed &pass)
196 {
197 CHECK(true);
198 g_TestContext.ExpectAssert(false);
199 }
200}
201
203{
205 {
206 counter = 0;
207 platform = NULL;
208 strategy = NULL;
209 }
210
211 uint32_t counter;
214
215 void Process()
216 {
217 platform->ProcessTick();
218
219 if (counter >= 1)
220 {
221 CHECK_EQUAL_TEXT(2, strategy->GetSize(), "task2 must be added within 1 tick");
222 }
223
224 ++counter;
225 }
226}
228
233
234TEST(Kernel, AddTaskWhenStarted)
235{
237 TaskMock<ACCESS_USER> task1, task2;
238 const ITaskSwitchStrategy *strategy = kernel.GetSwitchStrategy();
239
240 kernel.Initialize();
241 kernel.AddTask(&task1);
242 kernel.Start();
243
244 CHECK_EQUAL_TEXT(1, strategy->GetSize(), "expecting task1 be added at this stage");
245
247 g_AddTaskWhenStartedRelaxCpuContext.strategy = strategy;
249
250 kernel.AddTask(&task2);
251
252 CHECK_EQUAL_TEXT(2, strategy->GetSize(), "task2 must be added");
253
254 // AddTask is calling Yield/SwitchToNext which takes 2 ticks
255 CHECK_EQUAL_TEXT(2, g_AddTaskWhenStartedRelaxCpuContext.counter, "should complete within 2 ticks");
256}
257
258TEST(Kernel, AddTaskFailStaticStarted)
259{
261 TaskMock<ACCESS_USER> task1, task2;
262
263 kernel.Initialize();
264 kernel.AddTask(&task1);
265 kernel.Start();
266
267 try
268 {
269 g_TestContext.ExpectAssert(true);
270 kernel.AddTask(&task2);
271 CHECK_TEXT(false, "expecting to AddTask to fail when non KERNEL_DYNAMIC");
272 }
273 catch (TestAssertPassed &pass)
274 {
275 CHECK(true);
276 g_TestContext.ExpectAssert(false);
277 }
278}
279
280TEST(Kernel, AddTaskFailHrtStarted)
281{
283 TaskMock<ACCESS_USER> task1, task2;
284
285 kernel.Initialize();
286 kernel.AddTask(&task1, 1, 1, 0);
287 kernel.Start();
288
289 try
290 {
291 g_TestContext.ExpectAssert(true);
292 kernel.AddTask(&task2, 1, 1, 0);
293 CHECK_TEXT(false, "expecting to AddTask to fail when KERNEL_HRT");
294 }
295 catch (TestAssertPassed &pass)
296 {
297 CHECK(true);
298 g_TestContext.ExpectAssert(false);
299 }
300}
301
302TEST(Kernel, RemoveTask)
303{
305 TaskMock<ACCESS_USER> task1, task2;
306 const ITaskSwitchStrategy *strategy = kernel.GetSwitchStrategy();
307
308 kernel.Initialize();
309 kernel.AddTask(&task1);
310 kernel.AddTask(&task2);
311
312 kernel.RemoveTask(&task1);
313 CHECK_EQUAL_TEXT(&task2, strategy->GetFirst()->GetUserTask(), "Expecting task2 as first");
314
315 kernel.RemoveTask(&task1);
316 CHECK_EQUAL_TEXT(&task2, strategy->GetFirst()->GetUserTask(), "Expecting task2 as first (duplicate task1 removal attempt)");
317
318 kernel.RemoveTask(&task2);
319 CHECK_EQUAL_TEXT(0, strategy->GetSize(), "Expecting none tasks");
320}
321
322TEST(Kernel, RemoveTaskFailNull)
323{
325
326 kernel.Initialize();
327
328 try
329 {
330 g_TestContext.ExpectAssert(true);
331 kernel.RemoveTask((ITask *)NULL);
332 CHECK_TEXT(false, "expecting to fail with NULL argument");
333 }
334 catch (TestAssertPassed &pass)
335 {
336 CHECK(true);
337 g_TestContext.ExpectAssert(false);
338 }
339}
340
341TEST(Kernel, RemoveTaskFailUnsupported)
342{
345
346 kernel.Initialize();
347 kernel.AddTask(&task);
348
349 try
350 {
351 g_TestContext.ExpectAssert(true);
352 kernel.RemoveTask(&task);
353 CHECK_TEXT(false, "expecting to fail in KERNEL_STATIC mode");
354 }
355 catch (TestAssertPassed &pass)
356 {
357 CHECK(true);
358 g_TestContext.ExpectAssert(false);
359 }
360}
361
362TEST(Kernel, RemoveTaskFailStarted)
363{
366
367 kernel.Initialize();
368 kernel.AddTask(&task);
369 kernel.Start();
370
371 try
372 {
373 g_TestContext.ExpectAssert(true);
374 kernel.RemoveTask(&task);
375 CHECK_TEXT(false, "expecting to fail when Kernel has started");
376 }
377 catch (TestAssertPassed &pass)
378 {
379 CHECK(true);
380 g_TestContext.ExpectAssert(false);
381 }
382}
383
384TEST(Kernel, StartInvalidPeriodicity)
385{
388
389 try
390 {
391 g_TestContext.ExpectAssert(true);
392 kernel.Initialize(0);
393 CHECK_TEXT(false, "expecting to fail with 0 periodicity");
394 }
395 catch (TestAssertPassed &pass)
396 {
397 CHECK(true);
398 g_TestContext.ExpectAssert(false);
399 }
400
401 try
402 {
403 g_TestContext.ExpectAssert(true);
404 kernel.Initialize(PERIODICITY_MAX + 1);
405 CHECK_TEXT(false, "expecting to fail with too large periodicity");
406 }
407 catch (TestAssertPassed &pass)
408 {
409 CHECK(true);
410 g_TestContext.ExpectAssert(false);
411 }
412}
413
414TEST(Kernel, StartNotIntialized)
415{
417
418 try
419 {
420 g_TestContext.ExpectAssert(true);
421 kernel.Start();
422 CHECK_TEXT(false, "expecting to fail when not initialized");
423 }
424 catch (TestAssertPassed &pass)
425 {
426 CHECK(true);
427 g_TestContext.ExpectAssert(false);
428 }
429}
430
431TEST(Kernel, StartNoTasks)
432{
434
435 kernel.Initialize();
436
437 try
438 {
439 g_TestContext.ExpectAssert(true);
440 kernel.Start();
441 CHECK_TEXT(false, "expecting to fail without tasks");
442 }
443 catch (TestAssertPassed &pass)
444 {
445 CHECK(true);
446 g_TestContext.ExpectAssert(false);
447 }
448}
449
451{
454 PlatformTestMock *platform = static_cast<PlatformTestMock *>(kernel.GetPlatform());
455 const uint32_t periodicity = PERIODICITY_MAX - 1;
456
457 kernel.Initialize(periodicity);
458 kernel.AddTask(&task);
459
460 kernel.Start();
461
462 CHECK_TRUE(platform->m_started);
463 CHECK_TRUE(g_KernelService != NULL);
464 CHECK_TRUE(platform->m_stack_active != NULL);
465 CHECK_EQUAL((size_t)task.GetStack(), platform->m_stack_active->SP);
466 CHECK_EQUAL(periodicity, platform->GetTickResolution());
467}
468
469TEST(Kernel, StartBeginISR)
470{
473 PlatformTestMock *platform = static_cast<PlatformTestMock *>(kernel.GetPlatform());
474
475 kernel.Initialize();
476 kernel.AddTask(&task);
477 kernel.Start();
478
479 // expect that first task's access mode is requested by kernel
480 CHECK_EQUAL(ACCESS_PRIVILEGED, platform->m_stack_active->mode);
481}
482
483TEST(Kernel, ContextSwitchOnSysTickISR)
484{
486 TaskMock<ACCESS_USER> task1, task2;
487 PlatformTestMock *platform = static_cast<PlatformTestMock *>(kernel.GetPlatform());
488 Stack *&idle = platform->m_stack_idle, *&active = platform->m_stack_active;
489
490 kernel.Initialize();
491 kernel.AddTask(&task1);
492 kernel.AddTask(&task2);
493 kernel.Start();
494
495 // ISR calls OnSysTick 1-st time
496 {
497 platform->ProcessTick();
498
499 CHECK_TRUE(idle != NULL);
500 CHECK_TRUE(active != NULL);
501
502 // 1-st task is switched from Active and becomes Idle
503 CHECK_EQUAL(idle->SP, (size_t)task1.GetStack());
504
505 // 2-nd task becomes Active
506 CHECK_EQUAL(active->SP, (size_t)task2.GetStack());
507
508 // context switch requested
509 CHECK_EQUAL(1, platform->m_context_switch_nr);
510 }
511
512 // ISR calls OnSysTick 2-nd time
513 {
514 platform->ProcessTick();
515
516 CHECK_TRUE(idle != NULL);
517 CHECK_TRUE(active != NULL);
518
519 // 2-st task is switched from Active and becomes Idle
520 CHECK_EQUAL(idle->SP, (size_t)task2.GetStack());
521
522 // 1-nd task becomes Active
523 CHECK_EQUAL(active->SP, (size_t)task1.GetStack());
524
525 // context switch requested
526 CHECK_EQUAL(2, platform->m_context_switch_nr);
527 }
528}
529
530TEST(Kernel, ContextSwitchAccessModeChange)
531{
535 PlatformTestMock *platform = static_cast<PlatformTestMock *>(kernel.GetPlatform());
536
537 kernel.Initialize();
538 kernel.AddTask(&task1);
539 kernel.AddTask(&task2);
540 kernel.Start();
541
542 // 1-st task
543 CHECK_EQUAL(ACCESS_USER, platform->m_stack_active->mode);
544
545 // ISR calls OnSysTick
546 platform->ProcessTick();
547
548 // 2-st task
549 CHECK_EQUAL(ACCESS_PRIVILEGED, platform->m_stack_active->mode);
550}
551
552TEST(Kernel, ContextSwitchCorruptedFsmMode)
553{
556 PlatformTestMock *platform = static_cast<PlatformTestMock *>(kernel.GetPlatform());
557
558 kernel.Initialize();
559 kernel.AddTask(&task);
560 kernel.Start();
561
562 // ISR calls OnSysTick
563 platform->ProcessTick();
564
565 g_TestContext.ExpectPanic(true);
566
567 kernel.ForceUpdateInvalidFsmState(true);
568 platform->ProcessTick();
570
572
573 kernel.ForceUpdateInvalidFsmState(false);
574 platform->ProcessTick();
576}
577
578TEST(Kernel, SingleTask)
579{
582 PlatformTestMock *platform = static_cast<PlatformTestMock *>(kernel.GetPlatform());
583 Stack *&idle = platform->m_stack_idle, *&active = platform->m_stack_active;
584
585 kernel.Initialize();
586 kernel.AddTask(&task);
587 kernel.Start();
588
589 // ISR calls OnSysTick
590 platform->ProcessTick();
591
592 // expect that with single task nothing changes
593 CHECK_EQUAL((Stack *)NULL, idle);
594 CHECK_EQUAL((Stack *)platform->m_stack_info[STACK_USER_TASK].stack, active);
595}
596
597template <class _SwitchStrategy>
598static void TestTaskExit()
599{
601 TaskMock<ACCESS_PRIVILEGED> task1, task2;
602 PlatformTestMock *platform = static_cast<PlatformTestMock *>(kernel.GetPlatform());
603 Stack *&idle = platform->m_stack_idle, *&active = platform->m_stack_active;
604
605 kernel.Initialize();
606 kernel.AddTask(&task1);
607 kernel.AddTask(&task2);
608 kernel.Start();
609
610 // ISR calls OnSysTick (task1 = idle, task2 = active)
611 platform->ProcessTick();
612
613 // task2 exited (will schedule its removal)
614 platform->EventTaskExit(active);
615
616 // ISR calls OnSysTick (task2 = idle, task1 = active)
617 platform->ProcessTick();
618
619 // task1 exited (will schedule its removal)
620 platform->EventTaskExit(active);
621
622 // ISR calls OnSysTick
623 platform->ProcessTick();
624
625 // last task is removed
626 platform->ProcessTick();
627
628 platform->ProcessTick();
629
630 // no Idle tasks left
631 CHECK_EQUAL((Stack *)NULL, idle);
632
633 // Exit trap stack is provided for a long jump to the end of Kernel::Start()
634 CHECK_EQUAL(platform->m_exit_trap, active);
635}
636
637TEST(Kernel, OnTaskExitRR)
638{
640}
641
642TEST(Kernel, OnTaskExitSWRR)
643{
645}
646
647TEST(Kernel, OnTaskExitFP31)
648{
650}
651
652TEST(Kernel, OnTaskExitUnknownOrNull)
653{
656 PlatformTestMock *platform = static_cast<PlatformTestMock *>(kernel.GetPlatform());
657 Stack unk_stack;
658
659 kernel.Initialize();
660 kernel.AddTask(&task1);
661 kernel.Start();
662
663 // ISR calls OnSysTick (task1 = idle, task2 = active)
664 platform->ProcessTick();
665
666 try
667 {
668 g_TestContext.ExpectAssert(true);
669 platform->EventTaskExit(&unk_stack);
670 CHECK_TEXT(false, "expecting to fail on unknown stack");
671 }
672 catch (TestAssertPassed &pass)
673 {
674 CHECK(true);
675 g_TestContext.ExpectAssert(false);
676 }
677
678 try
679 {
680 g_TestContext.ExpectAssert(true);
681 platform->EventTaskExit(NULL);
682 CHECK_TEXT(false, "expecting to fail on NULL");
683 }
684 catch (TestAssertPassed &pass)
685 {
686 CHECK(true);
687 g_TestContext.ExpectAssert(false);
688 }
689}
690
691TEST(Kernel, OnTaskExitUnsupported)
692{
695 PlatformTestMock *platform = static_cast<PlatformTestMock *>(kernel.GetPlatform());
696 Stack *&active = platform->m_stack_active;
697
698 kernel.Initialize();
699 kernel.AddTask(&task1);
700 kernel.Start();
701
702 // ISR calls OnSysTick
703 platform->ProcessTick();
704
705 g_TestContext.ExpectPanic(true);
706 platform->EventTaskExit(active);
708}
709
710TEST(Kernel, OnTaskNotFoundBySP)
711{
714 PlatformTestMock *platform = static_cast<PlatformTestMock *>(kernel.GetPlatform());
715
716 kernel.Initialize();
717 kernel.AddTask(&task1);
718 kernel.Start();
719
720 platform->ProcessTick();
721
722 try
723 {
724 g_TestContext.ExpectAssert(true);
725 platform->EventTaskSwitch(0xdeadbeef);
726 CHECK_TEXT(false, "non existent task must not succeed");
727 }
728 catch (TestAssertPassed &pass)
729 {
730 CHECK(true);
731 g_TestContext.ExpectAssert(false);
732 }
733}
734
735TEST(Kernel, OnTaskSkipFreedTask)
736{
738 TaskMock<ACCESS_PRIVILEGED> task1, task2;
739 PlatformTestMock *platform = static_cast<PlatformTestMock *>(kernel.GetPlatform());
740 Stack *&active = platform->m_stack_active;
741
742 kernel.Initialize();
743 kernel.AddTask(&task1);
744 kernel.AddTask(&task2);
745 kernel.Start();
746
747 // task1 exited (will schedule its removal)
748 platform->EventTaskExit(active);
749
750 // 2 ticks to remove exited task1 from scheduling (1st switched to task2, 2nd cleans up task1 exit)
751 platform->ProcessTick();
752 platform->ProcessTick();
753
754 try
755 {
756 g_TestContext.ExpectAssert(true);
757
758 // we loop through all tasks in attempt to find non existent SP (0xdeadbeef)
759 // by this FindTaskBySP() is invoked and will loop thorugh the exited task1's
760 // slot
761 platform->EventTaskSwitch(0xdeadbeef);
762 CHECK_TEXT(false, "exited task must be successfully skipped by FindTaskBySP()");
763 }
764 catch (TestAssertPassed &pass)
765 {
766 CHECK(true);
767 g_TestContext.ExpectAssert(false);
768 }
769}
770
772{
774 TaskMock<ACCESS_USER> task1, task2;
775 PlatformTestMock *platform = static_cast<PlatformTestMock *>(kernel.GetPlatform());
776
777 kernel.Initialize();
778 kernel.AddTask(&task1, 1, 1, 0);
779 kernel.AddTask(&task2, 2, 1, 0);
780 kernel.Start();
781
782 platform->ProcessTick();
783 CHECK_EQUAL(platform->m_stack_active->SP, (size_t)task2.GetStack());
784
785 platform->ProcessTick();
786 CHECK_EQUAL(platform->m_stack_active->SP, (size_t)task1.GetStack());
787}
788
789TEST(Kernel, HrtAddNonHrt)
790{
793
794 kernel.Initialize();
795
796 try
797 {
798 g_TestContext.ExpectAssert(true);
799 kernel.AddTask(&task);
800 CHECK_TEXT(false, "non-HRT AddTask not supported in HRT mode");
801 }
802 catch (TestAssertPassed &pass)
803 {
804 CHECK(true);
805 g_TestContext.ExpectAssert(false);
806 }
807}
808
809TEST(Kernel, HrtAddNotAllowedForNonHrtMode)
810{
813
814 kernel.Initialize();
815
816 try
817 {
818 g_TestContext.ExpectAssert(true);
819 kernel.AddTask(&task, 1, 1, 0);
820 CHECK_TEXT(false, "HRT-related AddTask not supported in non-HRT mode");
821 }
822 catch (TestAssertPassed &pass)
823 {
824 CHECK(true);
825 g_TestContext.ExpectAssert(false);
826 }
827}
828
829TEST(Kernel, HrtSleepNotAllowed)
830{
833
834 kernel.Initialize();
835 kernel.AddTask(&task, 1, 1, 0);
836 kernel.Start();
837
838 try
839 {
840 g_TestContext.ExpectAssert(true);
841 Sleep(1);
842 CHECK_TEXT(false, "IKernelService::Sleep not allowed in HRT mode");
843 }
844 catch (TestAssertPassed &pass)
845 {
846 CHECK(true);
847 g_TestContext.ExpectAssert(false);
848 }
849
850 try
851 {
852 g_TestContext.ExpectAssert(true);
853 SleepUntil(GetTicks() + 1);
854 CHECK_TEXT(false, "IKernelService::SleepUntil not allowed in HRT mode");
855 }
856 catch (TestAssertPassed &pass)
857 {
858 CHECK(true);
859 g_TestContext.ExpectAssert(false);
860 }
861}
862
863TEST(Kernel, HrtTaskCompleted)
864{
867 PlatformTestMock *platform = static_cast<PlatformTestMock *>(kernel.GetPlatform());
868 const ITaskSwitchStrategy *strategy = kernel.GetSwitchStrategy();
869
870 kernel.Initialize();
871 kernel.AddTask(&task, 1, 1, 0);
872 kernel.Start();
873
874 CHECK_TRUE(strategy->GetSize() != 0);
875
876 platform->EventTaskExit(platform->m_stack_active);
877 platform->ProcessTick();
878
879 platform->ProcessTick();
880
881 CHECK_EQUAL(0, strategy->GetSize());
882
883 CHECK_EQUAL(IKernel::STATE_READY, kernel.GetState());
884}
885
887{
889 {
890 counter = 0;
891 platform = NULL;
892 }
893
894 uint32_t counter;
896
897 void Process()
898 {
899 platform->ProcessTick();
900 ++counter;
901 }
902}
904
909
910TEST(Kernel, HrtTaskDeadlineMissedRR)
911{
914 PlatformTestMock *platform = static_cast<PlatformTestMock *>(kernel.GetPlatform());
915
916 kernel.Initialize();
917 kernel.AddTask(&task, 2, 1, 0);
918 kernel.Start();
919
920 g_HrtTaskDeadlineMissedRelaxCpuContext.platform = platform;
922
923 platform->ProcessTick();
924
925 // task does not Yield() and thus next tick will overcome the deadline
926 g_TestContext.ExpectAssert(true);
927
928 // 2-nd tick goes outside the deadline
929 platform->ProcessTick();
930
931 CHECK_TRUE(platform->m_hard_fault);
932 CHECK_EQUAL(2, task.m_deadline_missed);
933}
934
935TEST(Kernel, HrtTaskDeadlineNotMissedRR)
936{
939 PlatformTestMock *platform = static_cast<PlatformTestMock *>(kernel.GetPlatform());
940
941 kernel.Initialize();
942 kernel.AddTask(&task, 2, 1, 0);
943 kernel.Start();
944
945 g_HrtTaskDeadlineMissedRelaxCpuContext.platform = platform;
947
948 platform->ProcessTick();
949
950 // task completes its work and yields to kernel, its workload is 1 ticks now that is within deadline 1
951 Yield();
952
953 // 2-nd tick continues scheduling normally
954 platform->ProcessTick();
955
956 CHECK_FALSE(platform->m_hard_fault);
957 CHECK_EQUAL(0, task.m_deadline_missed);
958}
959
960TEST(Kernel, HrtSkipSleepingNextRM)
961{
963 TaskMock<ACCESS_USER> task1, task2;
964 PlatformTestMock *platform = static_cast<PlatformTestMock *>(kernel.GetPlatform());
965
966 kernel.Initialize();
967 kernel.AddTask(&task1, 2, 2, 3);
968 kernel.AddTask(&task2, 3, 3, 0);
969 kernel.Start();
970
971 g_HrtTaskDeadlineMissedRelaxCpuContext.platform = platform;
973
974 CHECK_EQUAL(platform->m_stack_active->SP, (size_t)task2.GetStack());
975 platform->ProcessTick();
976 CHECK_EQUAL(platform->m_stack_active->SP, (size_t)task2.GetStack());
977 platform->ProcessTick();
978 Yield();
979 CHECK_EQUAL(platform->m_stack_active->SP, (size_t)task1.GetStack());
980
981 CHECK_FALSE(platform->m_hard_fault);
982 CHECK_EQUAL(0, task1.m_deadline_missed);
983 CHECK_EQUAL(0, task2.m_deadline_missed);
984}
985
986template <class _SwitchStrategy>
988{
990 TaskMock<ACCESS_USER> task1, task2;
991 PlatformTestMock *platform = static_cast<PlatformTestMock *>(kernel.GetPlatform());
992 const _SwitchStrategy *strategy = static_cast<const _SwitchStrategy *>(kernel.GetSwitchStrategy());
993
994 kernel.Initialize();
995 kernel.AddTask(&task1, 1, 2, 0);
996 kernel.AddTask(&task2, 1, 2, 2);
997 kernel.Start();
998
999 // task1 is the first
1000 CHECK_EQUAL((size_t)task1.GetStack(), platform->m_stack_active->SP);
1001
1002 // task returns (exiting) without calling SwitchToNext
1003 platform->EventTaskExit(platform->m_stack_active);
1004
1005 platform->ProcessTick(); // schedules task removal but task2 is still sleeping
1006
1007 // here scheduler is sleeping because task1 was sent to infinite sleep until removal and task2 is still pending
1008
1009 platform->ProcessTick(); // task2 is still sleeping
1010 platform->ProcessTick(); // switched to task2
1011
1012 CHECK_EQUAL((size_t)task2.GetStack(), platform->m_stack_active->SP);
1013
1014 CHECK_EQUAL(1, strategy->GetSize());
1015}
1016
1017TEST(Kernel, HrtTaskExitDuringSleepStateRR)
1018{
1020}
1021
1022TEST(Kernel, HrtTaskExitDuringSleepStateRM)
1023{
1025}
1026
1027TEST(Kernel, HrtTaskExitDuringSleepStateDM)
1028{
1030}
1031
1032TEST(Kernel, HrtTaskExitDuringSleepStateEDF)
1033{
1035}
1036
1037TEST(Kernel, HrtSleepingAwakeningStateChange)
1038{
1041 PlatformTestMock *platform = static_cast<PlatformTestMock *>(kernel.GetPlatform());
1042
1043 kernel.Initialize();
1044 kernel.AddTask(&task, 1, 1, 1);
1045 kernel.Start();
1046
1047 // due to 1 tick delayed start of the task Kernel enters into a SLEEPING state
1048 CHECK_EQUAL(platform->m_stack_active, platform->m_stack_info[STACK_SLEEP_TRAP].stack);
1049
1050 platform->ProcessTick();
1051
1052 // after a tick task become active and Kernel enters into a AWAKENING state
1053 CHECK_EQUAL(platform->m_stack_idle, platform->m_stack_info[STACK_SLEEP_TRAP].stack);
1054 CHECK_EQUAL(platform->m_stack_active->SP, (size_t)task.GetStack());
1055}
1056
1057TEST(Kernel, HrtOnlyAPI)
1058{
1061
1062 kernel.Initialize();
1063 kernel.AddTask(&task);
1064 kernel.Start();
1065
1066 // Obtain kernel task
1067 IKernelTask *ktask = kernel.GetSwitchStrategy()->GetFirst();
1068 CHECK_TRUE_TEXT(ktask != nullptr, "Kernel task must exist");
1069
1070 try
1071 {
1072 g_TestContext.ExpectAssert(true);
1073 ktask->GetHrtRelativeDeadline();
1074 CHECK_TEXT(false, "HRT API can't be called in non-HRT mode");
1075 }
1076 catch (TestAssertPassed &pass)
1077 {
1078 CHECK(true);
1079 g_TestContext.ExpectAssert(false);
1080 }
1081
1082 try
1083 {
1084 g_TestContext.ExpectAssert(true);
1085 ktask->GetHrtPeriodicity();
1086 CHECK_TEXT(false, "HRT API can't be called in non-HRT mode");
1087 }
1088 catch (TestAssertPassed &pass)
1089 {
1090 CHECK(true);
1091 g_TestContext.ExpectAssert(false);
1092 }
1093
1094 try
1095 {
1096 g_TestContext.ExpectAssert(true);
1097 ktask->GetHrtDeadline();
1098 CHECK_TEXT(false, "HRT API can't be called in non-HRT mode");
1099 }
1100 catch (TestAssertPassed &pass)
1101 {
1102 CHECK(true);
1103 g_TestContext.ExpectAssert(false);
1104 }
1105}
1106
1107TEST(Kernel, SyncNotEnabledFailsOnWait)
1108{
1110 PlatformTestMock *platform = static_cast<PlatformTestMock *>(kernel.GetPlatform());
1112
1113 kernel.Initialize();
1114 kernel.AddTask(&task);
1115 kernel.Start();
1116
1117 try
1118 {
1119 g_TestContext.ExpectAssert(true);
1120 IKernelService::GetInstance()->Wait(nullptr, nullptr, 0);
1121 CHECK_TEXT(false, "kernel does not support waiting without KERNEL_SYNC");
1122 }
1123 catch (TestAssertPassed &pass)
1124 {
1125 CHECK(true);
1126 g_TestContext.ExpectAssert(false);
1127 }
1128
1129 // test return NULL
1130 g_TestContext.ExpectAssert(true);
1131 g_TestContext.RethrowAssertException(false);
1132 IWaitObject *wo = IKernelService::GetInstance()->Wait(nullptr, nullptr, 0);
1133 g_TestContext.RethrowAssertException(true);
1134 g_TestContext.ExpectAssert(false);
1135 CHECK_TRUE_TEXT(wo == nullptr, "expect NULL");
1136
1137 try
1138 {
1139 g_TestContext.ExpectAssert(true);
1140 platform->EventTaskWait(0, nullptr, nullptr, 0);
1141 CHECK_TEXT(false, "kernel does not support waiting without KERNEL_SYNC");
1142 }
1143 catch (TestAssertPassed &pass)
1144 {
1145 CHECK(true);
1146 g_TestContext.ExpectAssert(false);
1147 }
1148
1149 // test return NULL
1150 g_TestContext.ExpectAssert(true);
1151 g_TestContext.RethrowAssertException(false);
1152 wo = platform->EventTaskWait(0, nullptr, nullptr, 0);
1153 g_TestContext.RethrowAssertException(true);
1154 g_TestContext.ExpectAssert(false);
1155 CHECK_TRUE_TEXT(wo == nullptr, "expect NULL");
1156}
1157
1158TEST(Kernel, SyncNoNullSyncObj)
1159{
1162
1164
1165 kernel.Initialize();
1166 kernel.AddTask(&task);
1167 kernel.Start();
1168
1169 try
1170 {
1171 g_TestContext.ExpectAssert(true);
1172 IKernelService::GetInstance()->Wait(nullptr, &mutex, 10);
1173 CHECK_TEXT(false, "sync object must not be NULL");
1174 }
1175 catch (TestAssertPassed &pass)
1176 {
1177 CHECK(true);
1178 g_TestContext.ExpectAssert(false);
1179 }
1180}
1181
1182TEST(Kernel, SyncNoNullMutex)
1183{
1186
1187 SyncObjectMock sobj;
1188
1189 kernel.Initialize();
1190 kernel.AddTask(&task);
1191 kernel.Start();
1192
1193 try
1194 {
1195 g_TestContext.ExpectAssert(true);
1196 IKernelService::GetInstance()->Wait(&sobj, nullptr, 10);
1197 CHECK_TEXT(false, "mutex must not be NULL");
1198 }
1199 catch (TestAssertPassed &pass)
1200 {
1201 CHECK(true);
1202 g_TestContext.ExpectAssert(false);
1203 }
1204}
1205
1206TEST(Kernel, SyncNoZeroWait)
1207{
1210
1212 SyncObjectMock sobj;
1213
1214 kernel.Initialize();
1215 kernel.AddTask(&task);
1216 kernel.Start();
1217
1218 try
1219 {
1220 g_TestContext.ExpectAssert(true);
1221 IKernelService::GetInstance()->Wait(&sobj, &mutex, 0);
1222 CHECK_TEXT(false, "must not be zero wait");
1223 }
1224 catch (TestAssertPassed &pass)
1225 {
1226 CHECK(true);
1227 g_TestContext.ExpectAssert(false);
1228 }
1229}
1230
1231TEST(Kernel, SyncMutexMustBeLocked)
1232{
1235
1237 SyncObjectMock sobj;
1238
1239 kernel.Initialize();
1240 kernel.AddTask(&task);
1241 kernel.Start();
1242
1243 try
1244 {
1245 g_TestContext.ExpectAssert(true);
1246 IKernelService::GetInstance()->Wait(&sobj, &mutex, 10);
1247 CHECK_TEXT(false, "mutex must be locked");
1248 }
1249 catch (TestAssertPassed &pass)
1250 {
1251 CHECK(true);
1252 g_TestContext.ExpectAssert(false);
1253 }
1254}
1255
1256TEST(Kernel, SyncTaskExitAfterWait)
1257{
1259 PlatformTestMock *platform = static_cast<PlatformTestMock *>(kernel.GetPlatform());
1261
1263 SyncObjectMock sobj;
1264
1265 kernel.Initialize();
1266 kernel.AddTask(&task);
1267 kernel.Start();
1268
1269 {
1270 //MutexMock::ScopedLock guard(mutex);
1271
1272 //IKernelService::GetInstance()->Wait(&sobj, &mutex, 10);
1273 }
1274
1275 // task1 exited (will schedule its removal)
1276 platform->EventTaskExit(platform->m_stack_active);
1277
1278 platform->ProcessTick();
1279
1280 // should be still running here, next tick will result in task exit and kernel stop
1281 CHECK_EQUAL(IKernel::STATE_RUNNING, kernel.GetState());
1282
1283 platform->ProcessTick();
1284
1285 // should be stopped here
1286 CHECK_EQUAL(IKernel::STATE_READY, kernel.GetState());
1287}
1288
1290{
1292 {
1293 Reset();
1294 }
1295
1296 void Reset()
1297 {
1298 counter = 0;
1299 platform = NULL;
1300 check_tickless = ~0;
1301 }
1302
1303 uint32_t counter;
1306
1307 void Process()
1308 {
1309 platform->ProcessTick();
1310 ++counter;
1311
1312 // Wait object affects sleep_ticks, not a task
1313 if (counter == check_tickless)
1314 {
1315 CHECK_EQUAL(2, platform->m_ticks_count);
1316 }
1317 }
1318}
1320
1321static void SyncWaitRelaxCpu()
1322{
1323 g_SyncWaitRelaxCpuContext.Process();
1324}
1325
1326template <bool TTickless>
1328{
1330 PlatformTestMock *platform = static_cast<PlatformTestMock *>(kernel.GetPlatform());
1332
1334 SyncObjectMock sobj;
1335
1337 g_SyncWaitRelaxCpuContext.platform = platform;
1339
1340 kernel.Initialize();
1341 kernel.AddTask(&task);
1342 kernel.Start();
1343
1345
1347
1348 CHECK_EQUAL(GetTid(), wo->GetTid()); // expect the same thread id as WaitObject belongs to the caller's task
1349 CHECK_TRUE(wo != nullptr); // expect wait object in return after timeout
1350 CHECK_TRUE(wo->IsTimeout()); // expect timeout
1351 CHECK_EQUAL(2, g_SyncWaitRelaxCpuContext.counter); // expect 2 ticks after timeout
1352 CHECK_EQUAL(true, mutex.m_locked); // expect locked mutex after Wait return
1353}
1354
1355TEST(Kernel, SyncWait)
1356{
1358}
1359
1360TEST(Kernel, SyncWaitTickless)
1361{
1363}
1364
1365TEST(Kernel, SyncWaitTicklessDuration)
1366{
1368 PlatformTestMock *platform = static_cast<PlatformTestMock *>(kernel.GetPlatform());
1370
1372 SyncObjectMock sobj;
1373
1375 g_SyncWaitRelaxCpuContext.platform = platform;
1376 g_SyncWaitRelaxCpuContext.check_tickless = 0; // check first tick
1378
1379 kernel.Initialize();
1380 kernel.AddTask(&task);
1381 kernel.Start();
1382
1384
1385 // sleep_ticks should be equal to 2 on first OnTick call
1387
1388 // in total 4 ticks must be elapsed, including sleep ticks
1389 CHECK_EQUAL(4, platform->m_ticks_count);
1390
1391 // at this stage test should pass successfully by validating sleep_ticks in SyncWaitRelaxCpuContext::Process
1392
1393 CHECK_EQUAL(GetTid(), wo->GetTid());
1394 CHECK_TRUE(wo != nullptr);
1395 CHECK_TRUE(wo->IsTimeout());
1396 CHECK_EQUAL(3, g_SyncWaitRelaxCpuContext.counter);
1397 CHECK_EQUAL(true, mutex.m_locked);
1398}
1399
1400} // namespace stk
1401} // namespace test
void(* g_RelaxCpuHandler)()
__stk_relax_cpu handler.
Definition stktest.cpp:17
Namespace of STK package.
void Sleep(uint32_t ticks)
Put calling process into a sleep state.
Definition stk_helper.h:298
Ticks GetTicks()
Get number of ticks elapsed since kernel start.
Definition stk_helper.h:248
@ STACK_SLEEP_TRAP
Stack of the Sleep trap.
Definition stk_common.h:72
@ STACK_USER_TASK
Stack of the user task.
Definition stk_common.h:71
void Yield()
Notify scheduler to switch to the next runnable task.
Definition stk_helper.h:331
void SleepUntil(Ticks timestamp)
Put calling process into a sleep state until the specified timestamp.
Definition stk_helper.h:322
@ PERIODICITY_MAX
Maximum periodicity (microseconds), 99 milliseconds (note: this value is the highest working on a rea...
Definition stk_common.h:81
TId GetTid()
Get task/thread Id of the calling task.
Definition stk_helper.h:217
SwitchStrategyRoundRobin SwitchStrategyRR
Shorthand alias for SwitchStrategyRoundRobin.
@ ACCESS_USER
Unprivileged access mode (access to some hardware is restricted, see CPU manual for details).
Definition stk_common.h:32
@ ACCESS_PRIVILEGED
Privileged access mode (access to hardware is fully unrestricted).
Definition stk_common.h:33
@ KERNEL_TICKLESS
Tickless mode. To use this mode STK_TICKLESS_IDLE must be defined to 1 in stk_config....
Definition stk_common.h:45
@ KERNEL_SYNC
Synchronization support (see Event).
Definition stk_common.h:44
@ KERNEL_STATIC
All tasks are static and can not exit.
Definition stk_common.h:41
@ KERNEL_PANIC_BAD_MODE
Kernel is in bad/unsupported mode for the current operation.
Definition stk_common.h:62
@ KERNEL_PANIC_NONE
Panic is absent (no fault).
Definition stk_common.h:53
@ KERNEL_PANIC_BAD_STATE
Kernel entered unexpected (bad) state.
Definition stk_common.h:61
Namespace of the test inventory.
static struct stk::test::SyncWaitRelaxCpuContext g_SyncWaitRelaxCpuContext
TestContext g_TestContext
Global instance of the TestContext.
Definition stktest.cpp:16
static void SyncWaitRelaxCpu()
static struct stk::test::AddTaskWhenStartedRelaxCpuContext g_AddTaskWhenStartedRelaxCpuContext
IKernelService * g_KernelService
Definition stktest.cpp:18
static struct stk::test::HrtTaskDeadlineMissedRelaxCpuContext g_HrtTaskDeadlineMissedRelaxCpuContext
EKernelPanicId g_PanicValue
Panic value.
Definition stktest.cpp:20
static void HrtTaskDeadlineMissedRelaxCpu()
static void TestTaskExit()
TEST_GROUP(Kernel)
static void AddTaskWhenStartedRelaxCpu()
static void TestHrtTaskExitDuringSleepState()
TEST(Kernel, MaxTasks)
Namespace of Mutex test.
Concrete implementation of IKernel.
Definition stk.h:83
bool UpdateFsmState(Stack *&idle, Stack *&active)
Update FSM state.
Definition stk.h:1708
EFsmState
Finite-state machine (FSM) state. Encodes what the kernel is currently doing between two consecutive ...
Definition stk.h:971
@ FSM_STATE_NONE
Sentinel / uninitialized value. Set by the constructor, replaced by FSM_STATE_SWITCHING on the first ...
Definition stk.h:972
@ FSM_STATE_MAX
Sentinel: number of valid states (used to size the FSM table), denotes uninitialized state.
Definition stk.h:977
EFsmState GetNewFsmState(KernelTask *&next)
Get new FSM state.
Definition stk.h:1697
void Initialize(uint32_t resolution_us=PERIODICITY_DEFAULT)
Prepare kernel for use: reset state, configure the platform, and register the service singleton.
Definition stk.h:805
EState GetState() const
Get kernel state.
Definition stk.h:963
ITaskSwitchStrategy * GetSwitchStrategy()
Get task-switching strategy instance owned by this kernel.
Definition stk.h:959
void Start()
Start the scheduler. This call does not return until all tasks have exited (KERNEL_DYNAMIC mode) or i...
Definition stk.h:921
void RemoveTask(ITask *user_task)
Remove a previously added task from the kernel before Start().
Definition stk.h:895
KernelTask * m_task_now
Currently executing task, or nullptr before Start() or after all tasks exit.
Definition stk.h:1993
IPlatform * GetPlatform()
Get platform driver instance owned by this kernel.
Definition stk.h:954
Kernel()
Construct the kernel with all storage zero-initialised and the request flag set to ~0 (indicating uni...
Definition stk.h:774
@ TASKS_MAX
Maximum number of concurrently registered tasks. Fixed at compile time. Exceeding this limit in AddTa...
Definition stk.h:764
void AddTask(ITask *user_task)
Register task for a soft real-time (SRT) scheduling.
Definition stk.h:832
Internal per-slot kernel descriptor that wraps a user ITask instance.
Definition stk.h:119
Stack descriptor.
Definition stk_common.h:181
Word SP
Stack Pointer (SP) register (note: must be the first entry in this struct).
Definition stk_common.h:182
EAccessMode mode
access mode
Definition stk_common.h:183
Wait object.
Definition stk_common.h:212
virtual TId GetTid() const =0
Get thread Id of this task.
virtual bool IsTimeout() const =0
Check if task woke up due to a timeout.
Locks bound mutex within a scope of execution. Ensures the mutex is always unlocked when leaving the ...
Definition stk_common.h:389
Interface for a user task.
Definition stk_common.h:433
Scheduling-strategy-facing interface for a kernel task slot.
Definition stk_common.h:493
virtual Timeout GetHrtRelativeDeadline() const =0
Get HRT task's relative deadline.
virtual Timeout GetHrtDeadline() const =0
Get HRT task deadline (max allowed task execution time).
virtual Timeout GetHrtPeriodicity() const =0
Get HRT task execution periodicity.
virtual ITask * GetUserTask()=0
Get user task.
Interface for a task switching strategy implementation.
Definition stk_common.h:782
virtual size_t GetSize() const =0
Get number of tasks currently managed by this strategy.
virtual IKernelTask * GetFirst() const =0
Get first task.
@ STATE_INACTIVE
not ready, IKernel::Initialize() must be called
Definition stk_common.h:861
@ STATE_READY
ready to start, IKernel::Start() must be called
Definition stk_common.h:862
@ STATE_RUNNING
initialized and running, IKernel::Start() was called successfully
Definition stk_common.h:863
static IKernelService * GetInstance()
Get CPU-local instance of the kernel service.
Definition stktest.cpp:69
virtual IWaitObject * Wait(ISyncObject *sobj, IMutex *mutex, Timeout timeout)=0
Put calling process into a waiting state until synchronization object is signaled or timeout occurs.
Word * GetStack() const
Get pointer to the stack memory.
Definition stk_helper.h:54
BaseType::EFsmState GetNewFsmState(typename BaseType::KernelTask *&next) override
Kernel< TMode, TSize, TStrategy, TPlatform > BaseType
void ForceUpdateInvalidFsmState(bool max_val)
Throwable class for catching assertions from STK_ASSERT_HANDLER().
Definition stktest.h:67
IPlatform mock.
Definition stktest.h:75
void EventTaskSwitch(size_t caller_SP)
Definition stktest.h:213
void ProcessTick()
Process one tick.
Definition stktest.h:180
void EventTaskExit(Stack *stack)
Definition stktest.h:208
IWaitObject * EventTaskWait(size_t caller_SP, ISyncObject *sync_obj, IMutex *mutex, Timeout timeout)
Definition stktest.h:223
IKernelService * m_service
Definition stktest.h:238
StackInfo m_stack_info[STACK_EXIT_TRAP+1]
Definition stktest.h:250
uint32_t GetTickResolution() const
Get resolution of the system tick timer in microseconds. Resolution means a number of microseconds be...
Definition stktest.h:149
uint32_t m_deadline_missed
duration of workload if deadline is missed in HRT mode
Definition stktest.h:337