SuperTinyKernel™ RTOS 1.05.3
Lightweight, high-performance, deterministic, bare-metal C++ RTOS for resource-constrained embedded systems. MIT Open Source License.
Loading...
Searching...
No Matches
stk_arch_risc-v.cpp
Go to the documentation of this file.
1/*
2 * SuperTinyKernel(TM) RTOS: Lightweight High-Performance Deterministic C++ RTOS for Embedded Systems.
3 *
4 * Source: https://github.com/SuperTinyKernel-RTOS
5 *
6 * Copyright (c) 2022-2026 Neutron Code Limited <stk@neutroncode.com>. All Rights Reserved.
7 * License: MIT License, see LICENSE for a full text.
8 */
9
10// note: If missing, this header must be customized (get it in the root of the source folder) and
11// copied to the /include folder manually.
12#include "stk_config.h"
13
14#ifdef _STK_ARCH_RISC_V
15
16#include "stk_arch.h"
18#include "stk_helper.h"
19
20using namespace stk;
21
60//#define _STK_RISCV_USE_PENDSV
61
62// CLINT
63// Details: https://github.com/riscv/riscv-aclint/blob/main/riscv-aclint.adoc
64#ifndef STK_RISCV_CLINT_BASE_ADDR
65 #define STK_RISCV_CLINT_BASE_ADDR (0x2000000U)
66#endif
67#ifndef STK_RISCV_CLINT_MTIMECMP_ADDR
68 #define STK_RISCV_CLINT_MTIMECMP_ADDR (STK_RISCV_CLINT_BASE_ADDR + 0x4000U) // 8-byte value, 1 per hart
69#endif
70#ifndef STK_RISCV_CLINT_MTIME_ADDR
71 #define STK_RISCV_CLINT_MTIME_ADDR (STK_RISCV_CLINT_BASE_ADDR + 0xBFF8U) // 8-byte value, global
72#endif
73
78#define STK_RISCV_ISR_STACK_SIZE 256U
79
83#ifndef STK_TIMER_CLOCK_FREQUENCY
84 #define STK_TIMER_CLOCK_FREQUENCY 1000000U
85#endif
86
91#ifndef STK_RISCV_ISR_SECTION
92 #define STK_RISCV_ISR_SECTION
93#endif
94
99#define STK_RISCV_ISR extern "C" STK_RISCV_ISR_SECTION __attribute__ ((interrupt("machine")))
100
102#define STK_ASM_EXIT_FROM_HANDLER "mret"
103
109#ifndef STK_RISCV_CLINT_MTIMECMP_PER_HART
110 #define STK_RISCV_CLINT_MTIMECMP_PER_HART (1)
111#endif
112
117#ifndef STK_ARCH_GET_CPU_ID
118 #define STK_ARCH_GET_CPU_ID() read_csr(mhartid)
119#endif
120
122#if (__riscv_flen == 0)
123 #define STK_RISCV_FPU 0
124#else
125 #define STK_RISCV_FPU __riscv_flen
126#endif
127
128#define STR(x) #x
129#define XSTR(s) STR(s)
130
132#if (__riscv_xlen == 32)
133 #define REGBYTES XSTR(4)
134 #define LREG XSTR(lw)
135 #define SREG XSTR(sw)
136#elif (__riscv_xlen == 64)
137 #define REGBYTES XSTR(8)
138 #define LREG XSTR(ld)
139 #define SREG XSTR(sd)
140#else
141 #error Unsupported RISC-V platform!
142#endif
143
144#if (STK_RISCV_FPU == 32)
145 #define FREGBYTES XSTR(4)
146 #define FLREG XSTR(flw)
147 #define FSREG XSTR(fsw)
148#elif (STK_RISCV_FPU == 64)
149 #define FREGBYTES XSTR(8)
150 #define FLREG XSTR(fld)
151 #define FSREG XSTR(fsd)
152#elif (STK_RISCV_FPU != 0)
153#error Unsupported FP register count!
154#endif
155
156
157#if (__riscv_32e == 1)
158 #define STK_RISCV_REGISTER_COUNT (15 + (STK_RISCV_FPU != 0 ? 31 : 0))
159#else
160 #define STK_RISCV_REGISTER_COUNT (31 + (STK_RISCV_FPU != 0 ? 31 : 0))
161#endif
162
163#define STK_SERVICE_SLOTS 2 // (0) mepc, (1) mstatus
164
165#if (__riscv_32e == 1)
166 #define FOFFSET XSTR(68) // FP stack offset = (17 * 4)
167 #if (STK_RISCV_FPU == 0)
168 #define REGSIZE XSTR(((15 + STK_SERVICE_SLOTS) * 4)) // STK_RISCV_REGISTER_COUNT + 2 for mepc, mstatus
169 #else
170 #if (STK_RISCV_FPU == 32)
171 #define REGSIZE XSTR((((15 + STK_SERVICE_SLOTS) * 4) + (31 * 4))) // STK_RISCV_REGISTER_COUNT + 2 for mepc, mstatus + 32 fp registers
172 #elif (STK_RISCV_FPU == 64)
173 #define REGSIZE XSTR((((15 + STK_SERVICE_SLOTS) * 4) + (31 * 8))) // STK_RISCV_REGISTER_COUNT + 2 for mepc, mstatus + 32 fp registers
174 #endif
175 #endif
176#elif (__riscv_xlen == 32)
177 #define FOFFSET XSTR(132) // FP stack offset = (33 * 4)
178 #if (STK_RISCV_FPU == 0)
179 #define REGSIZE XSTR(((31 + STK_SERVICE_SLOTS) * 4)) // STK_RISCV_REGISTER_COUNT + 2 for mepc, mstatus
180 #else
181 #if (STK_RISCV_FPU == 32)
182 #define REGSIZE XSTR((((31 + STK_SERVICE_SLOTS) * 4) + (31 * 4))) // STK_RISCV_REGISTER_COUNT + 2 for mepc, mstatus + 32 fp registers
183 #elif (STK_RISCV_FPU == 64)
184 #define REGSIZE XSTR((((31 + STK_SERVICE_SLOTS) * 4) + (31 * 8))) // STK_RISCV_REGISTER_COUNT + 2 for mepc, mstatus + 32 fp registers
185 #endif
186 #endif
187#elif (__riscv_xlen == 64)
188 #define FOFFSET XSTR(264) // FP stack offset = (33 * 8)
189 #if (STK_RISCV_FPU == 0)
190 #define REGSIZE XSTR(((31 + STK_SERVICE_SLOTS) * 8)) // STK_RISCV_REGISTER_COUNT + 2 for mepc, mstatus
191 #else
192 #if (STK_RISCV_FPU == 32)
193 #define REGSIZE XSTR((((31 + STK_SERVICE_SLOTS) * 8) + (31 * 4))) // STK_RISCV_REGISTER_COUNT + 2 for mepc, mstatus + 32 fp registers
194 #elif (STK_RISCV_FPU == 64)
195 #define REGSIZE XSTR((((31 + STK_SERVICE_SLOTS) * 8) + (31 * 8))) // STK_RISCV_REGISTER_COUNT + 2 for mepc, mstatus + 32 fp registers
196 #endif
197 #endif
198#endif
199
200#if (__riscv_xlen == 32)
201 #define REGBYTES_LOG2 "2" // log2(4) - used for hart-index shift
202#elif (__riscv_xlen == 64)
203 #define REGBYTES_LOG2 "3" // log2(8)
204#endif
205
212#ifndef STK_SYSTICK_HANDLER
213 #define STK_SYSTICK_HANDLER riscv_mtvec_mti
214#endif
215
222#ifndef STK_SVC_HANDLER
223 #define STK_SVC_HANDLER riscv_mtvec_exception
224#endif
225
233#ifndef STK_MSI_HANDLER
234 #define STK_MSI_HANDLER riscv_mtvec_msi
235#endif
236
255struct TaskFrame
256{
257 // Service slots (indices 0, 1) - sit at sp+0 and sp+REGBYTES
258 Word MEPC;
259 Word MSTATUS;
260
261 // General-purpose register slots (indices 2..N), one per xN
262 Word X1_RA;
263 Word X2_SP;
264#if (STK_RISCV_FPU != 0)
265 Word X3_FSR;
266#else
267 Word X3_GP;
268#endif
269 Word X4;
270 Word X5;
271 Word X6;
272 Word X7;
273 Word X8;
274 Word X9;
275 Word X10_A0;
276 Word X11;
277 Word X12;
278 Word X13;
279 Word X14;
280 Word X15;
281#if (__riscv_32e != 1)
282 Word X16;
283 Word X17;
284 Word X18;
285 Word X19;
286 Word X20;
287 Word X21;
288 Word X22;
289 Word X23;
290 Word X24;
291 Word X25;
292 Word X26;
293 Word X27;
294 Word X28;
295 Word X29;
296 Word X30;
297 Word X31;
298#endif
299#if (STK_RISCV_FPU != 0)
300 // FP register slots - at FOFFSET from frame base, immediately after integer slots.
301 // FREGBYTES may differ from REGBYTES (32-bit FP on a 64-bit integer machine).
302 // Declared as Word arrays for uniform struct sizing; the FP load/store
303 // instructions address them by byte offset and tolerate the type mismatch.
304 Word F[32];
305#endif
306};
307
310static __stk_forceinline void __DSB()
311{
312 __asm volatile("fence rw, rw" ::: "memory");
313}
314
317static __stk_forceinline void __ISB()
318{
319#ifdef __riscv_zifencei
320 __asm volatile("fence.i" ::: "memory");
321#else
322 __sync_synchronize();
323#endif
324}
325
328static __stk_forceinline void __WFI()
329{
330 __asm volatile("wfi");
331}
332
336static __stk_forceinline void HW_StartScheduler()
337{
338 __asm volatile("ecall");
339}
340
343static __stk_forceinline uint8_t HW_GetHartId()
344{
345 return STK_ARCH_GET_CPU_ID();
346}
347
350static __stk_forceinline void HW_DisableInterrupts()
351{
352 __asm volatile("csrrci zero, mstatus, %0"
353 : /* output: none */
354 : "i"(MSTATUS_MIE)
355 : /* clobbers: none */);
356}
357
360static __stk_forceinline void HW_EnableInterrupts()
361{
362 __asm volatile("csrrsi zero, mstatus, %0"
363 : /* output: none */
364 : "i"(MSTATUS_MIE)
365 : /* clobbers: none */);
366}
367
371static __stk_forceinline Word HW_EnterCriticalSection()
372{
373 Word ses;
374 __asm volatile("csrrci %0, mstatus, %1"
375 : "=r"(ses)
376 : "i"(MSTATUS_MIE)
377 : /* clobbers: none */);
378
379 return ses;
380}
381
385static __stk_forceinline void HW_ExitCriticalSection(Word ses)
386{
387 __asm volatile("csrrs zero, mstatus, %0"
388 : /* output: none */
389 : "r"(ses)
390 : /* clobbers: none */);
391}
392
395static __stk_forceinline void HW_StopMTimer()
396{
397 clear_csr(mie, MIP_MTIP);
398}
399
403static __stk_forceinline uint32_t HW_CoreClockFrequency()
404{
405 return SystemCoreClock; // CPU speed, e.g. 125/150 MHz
406}
407
412static __stk_forceinline uint32_t HW_MtimeClockFrequency()
413{
414 return STK_TIMER_CLOCK_FREQUENCY; // Timer frequency, e.g. 1 MHz
415}
416
422static __stk_forceinline uint64_t HW_GetMtime()
423{
424#if ( __riscv_xlen > 32)
425 return *((volatile uint64_t *)STK_RISCV_CLINT_MTIME_ADDR);
426#else
427 volatile uint32_t *mtime_hi = ((uint32_t *)STK_RISCV_CLINT_MTIME_ADDR) + 1;
428 volatile uint32_t *mtime_lo = ((uint32_t *)STK_RISCV_CLINT_MTIME_ADDR);
429
430 uint32_t hi, lo;
431 do
432 {
433 hi = (*mtime_hi);
434 lo = (*mtime_lo);
435 }
436 while (hi != (*mtime_hi)); // make sure mtime_hi did not tick when read mtime_lo
437
438 return ((uint64_t)hi << 32) | lo;
439#endif
440}
441
445static __stk_forceinline void HW_SetMtimecmp(uint64_t time_next)
446{
447#if STK_RISCV_CLINT_MTIMECMP_PER_HART
448 const uint8_t hart = HW_GetHartId();
449#else
450 const uint8_t hart = 0;
451#endif
452
453#if (__riscv_xlen == 64)
454 ((volatile uint64_t *)STK_RISCV_CLINT_MTIMECMP_ADDR)[hart] = next;
455#else
456 volatile uint32_t *mtime_lo = (uint32_t *)((uint64_t *)STK_RISCV_CLINT_MTIMECMP_ADDR + hart);
457 volatile uint32_t *mtime_hi = mtime_lo + 1;
458
459 // expecting 4-byte aligned memory
460 STK_ASSERT(((uintptr_t)mtime_lo & (4 - 1)) == 0);
461 STK_ASSERT(((uintptr_t)mtime_hi & (4 - 1)) == 0);
462
463 // prevent unexpected interrupt by setting some very large value to the high part
464 // details: https://riscv.org/wp-content/uploads/2017/05/riscv-privileged-v1.10.pdf, page 31
465 (*mtime_hi) = ~0;
466
467 (*mtime_lo) = (uint32_t)(time_next & 0xFFFFFFFF);
468 (*mtime_hi) = (uint32_t)(time_next >> 32);
469#endif
470}
471
477static __stk_forceinline uint64_t HW_GetMtimeElapsed(uint64_t since)
478{
479 return HW_GetMtime() - since;
480}
481
484static __stk_forceinline void HW_EnableCycleCounter()
485{
486 __asm volatile("csrci mcountinhibit, 0x1");
487}
488
492static __stk_forceinline Cycles HW_GetCycleCounter()
493{
494 uint32_t high, low, check;
495 do
496 {
497 __asm volatile("csrr %0, mcycleh" : "=r"(high));
498 __asm volatile("csrr %0, mcycle" : "=r"(low));
499 __asm volatile("csrr %0, mcycleh" : "=r"(check));
500 }
501 while (high != check);
502
503 return (static_cast<Cycles>(high) << 32) | low;
504}
505
508static __stk_forceinline Word HW_GetCallerSP()
509{
510 Word sp;
511 __asm volatile("mv %0, sp"
512 : "=r"(sp)
513 : /* input: none */
514 : /* clobbers: none */);
515
516 return sp;
517}
518
521static __stk_forceinline void HW_CriticalSectionStart(Word &ses)
522{
523 ses = HW_EnterCriticalSection();
524
525 // ensure the disable is recognized before subsequent code
526 __DSB();
527 __ISB();
528}
529
532static __stk_forceinline void HW_CriticalSectionEnd(Word ses)
533{
534 // ensure all memory work is finished before re-enabling
535 __DSB();
536
537 HW_ExitCriticalSection(ses);
538
539 // synchronization point: any pending interrupt can be serviced immediately at this boundary
540 __ISB();
541}
542
557static __stk_forceinline bool HW_SpinLockTryLock(volatile bool &lock)
558{
559 return !__atomic_test_and_set(&lock, __ATOMIC_ACQUIRE);
560}
561
578static __stk_forceinline void HW_SpinLockLock(volatile bool &lock)
579{
580 uint32_t timeout = 0xFFFFFF;
581 while (!HW_SpinLockTryLock(lock))
582 {
583 if (--timeout == 0)
584 {
585 // Invariant violated: the lock owner exited without releasing,
586 // Kernel state is suspect, enter defined safe state.
588 }
590 }
591}
592
608static __stk_forceinline void HW_SpinLockUnlock(volatile bool &lock)
609{
610 if (!lock)
611 STK_KERNEL_PANIC(KERNEL_PANIC_SPINLOCK_DEADLOCK); // release attempt of unowned lock
612
613 // ensure all data writes (like scheduling metadata) are flushed before the lock is released:
614 // __atomic_clear with __ATOMIC_RELEASE provides the required store-release barrier,
615 // the explicit fence rw,w is retained for toolchains that do not lower __ATOMIC_RELEASE
616 // to a full release fence on all RISC-V targets
617 __asm volatile("fence rw, w" ::: "memory");
618
619 __atomic_clear(&lock, __ATOMIC_RELEASE);
620}
621
624static __stk_forceinline void HW_ScheduleContextSwitch(uint8_t hart)
625{
626#ifdef _STK_RISCV_USE_PENDSV
627 // Pend Machine Software Interrupt (MSI) - equivalent of ARM's PENDSVSET
628 volatile uint32_t *msip = (volatile uint32_t *)(STK_RISCV_CLINT_BASE_ADDR);
629 msip[hart] = 1; // set pending
630 __DSB();
631#else
632 (void)hart;
633#endif
634}
635
637#ifndef _STK_SYSTEM_CORE_CLOCK_EXTERNAL
639#endif
640
643static volatile bool s_StkRiscvCsuLock = false;
644
652
656#ifdef _STK_RISCV_USE_PENDSV
657Stack * volatile s_StkRiscvStackIdle[STK_ARCH_CPU_COUNT] = {};
658
664volatile Word s_StkRiscvSpIsrInt[STK_ARCH_CPU_COUNT] = {};
665#endif
666
670Stack * volatile s_StkRiscvStackActive[STK_ARCH_CPU_COUNT] = {};
671
675Stack * volatile s_StkRiscvStackIsr[STK_ARCH_CPU_COUNT] = {};
676
678
698
708struct JmpFrame
709{
710 Word RA;
711 Word SP;
712 Word S0;
713 Word S1;
714 Word S2;
715 Word S3;
716 Word S4;
717 Word S5;
718 Word S6;
719 Word S7;
720 Word S8;
721 Word S9;
722 Word S10;
723 Word S11;
724#if (STK_RISCV_FPU != 0)
725 Word FCSR;
726#endif
727};
728
743__attribute__((naked))
744int32_t SaveJmp(JmpFrame &/*f*/)
745{
746 __asm volatile(
747 // a0 = &f - no prologue has touched sp or s0 yet
748 SREG " ra, 0*" REGBYTES "(a0) \n" // save return address
749 SREG " sp, 1*" REGBYTES "(a0) \n" // save caller's stack pointer
750 SREG " s0, 2*" REGBYTES "(a0) \n"
751 SREG " s1, 3*" REGBYTES "(a0) \n"
752 SREG " s2, 4*" REGBYTES "(a0) \n"
753 SREG " s3, 5*" REGBYTES "(a0) \n"
754 SREG " s4, 6*" REGBYTES "(a0) \n"
755 SREG " s5, 7*" REGBYTES "(a0) \n"
756 SREG " s6, 8*" REGBYTES "(a0) \n"
757 SREG " s7, 9*" REGBYTES "(a0) \n"
758 SREG " s8, 10*" REGBYTES "(a0) \n"
759 SREG " s9, 11*" REGBYTES "(a0) \n"
760 SREG " s10, 12*" REGBYTES "(a0) \n"
761 SREG " s11, 13*" REGBYTES "(a0) \n"
762#if (STK_RISCV_FPU != 0)
763 "frcsr t0 \n" // read fcsr (rounding mode + flags)
764 SREG " t0, 14*" REGBYTES "(a0) \n" // save to JmpFrame::FCSR
765#endif
766 "li a0, 0 \n" // return 0
767 "ret \n" // explicit return (naked)
768 );
769}
770
788__attribute__((naked, noreturn))
789void RestoreJmp(JmpFrame &/*f*/, int32_t /*val*/)
790{
791 __asm volatile(
792 // a0 = &f, a1 = val
793 LREG " ra, 0*" REGBYTES "(a0) \n"
794 LREG " sp, 1*" REGBYTES "(a0) \n"
795 LREG " s0, 2*" REGBYTES "(a0) \n"
796 LREG " s1, 3*" REGBYTES "(a0) \n"
797 LREG " s2, 4*" REGBYTES "(a0) \n"
798 LREG " s3, 5*" REGBYTES "(a0) \n"
799 LREG " s4, 6*" REGBYTES "(a0) \n"
800 LREG " s5, 7*" REGBYTES "(a0) \n"
801 LREG " s6, 8*" REGBYTES "(a0) \n"
802 LREG " s7, 9*" REGBYTES "(a0) \n"
803 LREG " s8, 10*" REGBYTES "(a0) \n"
804 LREG " s9, 11*" REGBYTES "(a0) \n"
805 LREG " s10, 12*" REGBYTES "(a0) \n"
806 LREG " s11, 13*" REGBYTES "(a0) \n"
807#if (STK_RISCV_FPU != 0)
808 LREG " t0, 14*" REGBYTES "(a0) \n" // load saved fcsr into t0
809 "fscsr t0 \n" // restore rounding mode + flags
810#endif
811 "mv a0, a1 \n" // return val to SaveJmp's caller
812 "ret \n" // jump to saved RA
813 );
814}
815
817
819#if STK_SUBMICORSECOND_PRECISION_TIMER
820class HiResClockCYCLE
821{
822public:
823 static HiResClockCYCLE *GetInstance()
824 {
825 // keep declaration function-local to allow compiler stripping it from the binary if
826 // it is unused by the user code
827 static HiResClockCYCLE clock;
828 return &clock;
829 }
830
831 Cycles GetCycles()
832 {
833 return HW_GetCycleCounter();
834 }
835
836 uint32_t GetFrequency()
837 {
838 return HW_CoreClockFrequency();
839 }
840};
841typedef HiResClockCYCLE HiResClockImpl;
842#else
843class HiResClockMTIME
844{
845public:
846 static HiResClockMTIME *GetInstance()
847 {
848 // keep declaration function-local to allow compiler stripping it from the binary if
849 // it is unused by the user code
850 static HiResClockMTIME clock;
851 return &clock;
852 }
853
854 Cycles GetCycles()
855 {
856 return HW_GetMtime();
857 }
858
859 uint32_t GetFrequency()
860 {
861 return HW_MtimeClockFrequency();
862 }
863};
864typedef HiResClockMTIME HiResClockImpl;
865#endif // !STK_SUBMICORSECOND_PRECISION_TIMER
866
868static struct Context : public PlatformContext
869{
870 Context() : PlatformContext(), m_stack_main(), m_stack_isr(), m_stack_isr_mem(),
871 m_exit_buf(), m_overrider(nullptr), m_specific(nullptr), m_tick_period(0), m_last_mtime(0ULL),
873 m_sleep_ticks(0),
874 #endif
875 m_csu(0), m_csu_nesting(0),
876 m_starting(false), m_started(false), m_exiting(false)
877
878 {}
879
883 ~Context()
884 {}
885
886 void Initialize(IPlatform::IEventHandler *handler, IKernelService *service, Stack *exit_trap, int32_t resolution_us)
887 {
888 PlatformContext::Initialize(handler, service, exit_trap, resolution_us);
889
890 // init ISR's stack
891 {
892 StackMemoryWrapper<STK_RISCV_ISR_STACK_SIZE> stack_isr_mem(&m_stack_isr_mem);
893 m_stack_isr.SP = hw::PtrToWord(InitStackMemory(&stack_isr_mem));
894 m_stack_isr.mode = ACCESS_PRIVILEGED;
895 }
896
897 // init Main stack
898 {
899 m_stack_main.SP = STK_STACK_MEMORY_FILLER;
900 m_stack_main.mode = ACCESS_PRIVILEGED;
901 }
902
903 m_csu = 0;
904 m_csu_nesting = 0;
905 m_overrider = NULL;
906 m_specific = NULL;
907 m_tick_period = ConvertTimeUsToClockCycles(STK_TIMER_CLOCK_FREQUENCY, resolution_us);
908 m_last_mtime = 0ULL;
909 m_starting = false;
910 m_started = false;
911 m_exiting = false;
912
913 // mcycle counter must be enabled per-core
914 #if STK_SUBMICORSECOND_PRECISION_TIMER
915 HW_EnableCycleCounter();
916 #endif
917 }
918
919 __stk_forceinline void OnTick()
920 {
921 // process tick - scheduler may update m_stack_active to point at a new task
922 Word cs;
923 HW_CriticalSectionStart(cs);
924
925 #if STK_TICKLESS_IDLE
926 Timeout ticks = m_sleep_ticks;
927 #endif
928
929 if (m_handler->OnTick(m_stack_idle, m_stack_active
931 , ticks
932 #endif
933 ))
934 {
935 // refresh ISR asm pointer cache so the naked ISR reads the correct
936 // (possibly new) active stack SP immediately when jal returns
937 // s_StkRiscvStackActive[hart] always points to Context::m_stack_active, the pointer
938 // itself is stable, but we reassign here so multi-core hart-indexed builds
939 // stay correct if the hart mapping ever changes in future,
940 // for single-core builds this is a simple store to a known address at index 0
941 const uint8_t hart = HW_GetHartId();
942 s_StkRiscvStackActive[hart] = m_stack_active;
943 #ifdef _STK_RISCV_USE_PENDSV
944 s_StkRiscvStackIdle[hart] = m_stack_idle;
945 #endif
946
947 HW_ScheduleContextSwitch(hart);
948 }
949
950 #if STK_TICKLESS_IDLE
951 m_sleep_ticks = ticks;
952 #endif
953
954 HW_CriticalSectionEnd(cs);
955 }
956
957 __stk_forceinline void EnterCriticalSection()
958 {
959 // disable local interrupts and save state
960 Word current_ses;
961 HW_CriticalSectionStart(current_ses);
962
963 if (m_csu_nesting == 0)
964 {
965 // ONLY attempt the global spinlock if we aren't already nested
966 HW_SpinLockLock(s_StkRiscvCsuLock);
967
968 // store the hardware interrupt state to restore later
969 m_csu = current_ses;
970 }
971
972 // increase nesting count within a limit
973 if (++m_csu_nesting > STK_CRITICAL_SECTION_NESTINGS_MAX)
974 {
975 // invariant violated: exceeded max allowed number of recursions
976 STK_KERNEL_PANIC(KERNEL_PANIC_CS_NESTING_OVERFLOW);
977 }
978 }
979
980 __stk_forceinline void ExitCriticalSection()
981 {
982 STK_ASSERT(m_csu_nesting != 0);
983 --m_csu_nesting;
984
985 if (m_csu_nesting == 0)
986 {
987 // capture the state before releasing lock
988 Word ses_to_restore = m_csu;
989
990 // release global lock
991 HW_SpinLockUnlock(s_StkRiscvCsuLock);
992
993 // restore hardware interrupts
994 HW_CriticalSectionEnd(ses_to_restore);
995 }
996 }
997
998 uint64_t GetSleepTicksPrev()
999 {
1000 #if STK_TICKLESS_IDLE
1001 uint64_t ticks = (static_cast<uint64_t>(m_sleep_ticks) * static_cast<uint64_t>(m_tick_period));
1002 #else
1003 uint64_t ticks = (1U * static_cast<uint64_t>(m_tick_period));
1004 #endif
1005 return ticks;
1006 }
1007
1008 __stk_forceinline void OnSwitchContext()
1009 {
1010 // capture mtime at ISR entry as the absolute base for the next period;
1011 // this eliminates drift from time spent inside OnTick regardless of how
1012 // long the scheduler takes to run
1013 const uint64_t mtime_now = HW_GetMtime();
1014 const uint64_t error = (mtime_now - m_last_mtime) - GetSleepTicksPrev();
1015 __stk_compiler_barrier(); // avoid compiler reordering, we count ticks from this point
1016
1017 // make sure timer is enabled by the Kernel::Start(), disable its start anywhere else
1018 STK_ASSERT(m_started);
1019 STK_ASSERT(m_handler != NULL);
1020
1021 // process tick - scheduler may update m_stack_active and m_sleep_ticks
1022 OnTick();
1023
1024 // rearm timer: use the ISR-entry mtime snapshot as the absolute base so
1025 // any CPU cycles consumed by OnTick do not accumulate as period drift
1026 #if STK_TICKLESS_IDLE
1027 // guard against overflow (theoretical at normal tick periods and CPU frequencies)
1028 STK_ASSERT((static_cast<uint64_t>(m_sleep_ticks) * static_cast<uint64_t>(m_tick_period)) <= (UINT64_MAX - mtime_now));
1029 const uint64_t next_time = (static_cast<uint64_t>(m_sleep_ticks) * static_cast<uint64_t>(m_tick_period));
1030 #else
1031 const uint64_t next_time = (1U * static_cast<uint64_t>(m_tick_period));
1032 #endif
1033 HW_SetMtimecmp(mtime_now + next_time - error);
1034 m_last_mtime = mtime_now;
1035 }
1036
1037 void Start();
1038 void OnStart();
1039 void OnStop();
1040
1041 typedef IPlatform::IEventOverrider eovrd_t;
1042 typedef PlatformRiscV::ISpecificEventHandler sehndl_t;
1043 typedef StackMemoryWrapper<STK_RISCV_ISR_STACK_SIZE>::MemoryType isrmem_t;
1044
1045 Stack m_stack_main;
1046 Stack m_stack_isr;
1047 isrmem_t m_stack_isr_mem;
1048 JmpFrame m_exit_buf;
1049 eovrd_t *m_overrider;
1050 sehndl_t *m_specific;
1051 uint32_t m_tick_period;
1052 uint64_t m_last_mtime;
1053#if STK_TICKLESS_IDLE
1054 Timeout m_sleep_ticks;
1055#endif
1056 Word m_csu;
1057 uint8_t m_csu_nesting;
1058 bool m_starting;
1059 bool m_started;
1060 volatile bool m_exiting;
1061}
1062s_StkPlatformContext[STK_ARCH_CPU_COUNT];
1063
1065{
1066#ifdef _STK_RISCV_USE_PENDSV
1067 Word cs;
1068 HW_CriticalSectionStart(cs);
1069
1070 GetContext().OnTick();
1071
1072 HW_CriticalSectionEnd(cs);
1073#else
1074 // unsupported scenario
1075 STK_ASSERT(false);
1076#endif
1077}
1078
1080static volatile EKernelPanicId g_LastPanicId = KERNEL_PANIC_NONE;
1081
1082__stk_attr_noinline // keep out of inlining to preserve stack frame
1083__stk_attr_noreturn // never returns - a trap
1085{
1086 g_LastPanicId = id;
1087
1088 // disable all maskable interrupts: this prevents scheduler from running again and corrupting state further
1089 HW_DisableInterrupts();
1090
1091 // spin forever: with a watchdog active this produces a clean reset, without a watchdog,
1092 // a debugger can attach and inspect 'id'
1093 for (;;)
1094 {
1096 }
1097}
1098
1099#define STK_ASM_SAVE_CONTEXT_BASE\
1100 SREG " x1, 2*" REGBYTES "(sp) \n"\
1101 /*SREG " x2, 3*" REGBYTES "(sp) \n" // skip saving sp, Stack pointer */\
1102 /*SREG " x3, 4*" REGBYTES "(sp) \n" // skip saving gp, Global pointer (note: slot is used by fscsr) */\
1103 SREG " x4, 5*" REGBYTES "(sp) \n"\
1104 SREG " x5, 6*" REGBYTES "(sp) \n"\
1105 SREG " x6, 7*" REGBYTES "(sp) \n"\
1106 SREG " x7, 8*" REGBYTES "(sp) \n"\
1107 SREG " x8, 9*" REGBYTES "(sp) \n"\
1108 SREG " x9, 10*" REGBYTES "(sp) \n"\
1109 SREG " x10, 11*" REGBYTES "(sp) \n"\
1110 SREG " x11, 12*" REGBYTES "(sp) \n"\
1111 SREG " x12, 13*" REGBYTES "(sp) \n"\
1112 SREG " x13, 14*" REGBYTES "(sp) \n"\
1113 SREG " x14, 15*" REGBYTES "(sp) \n"\
1114 SREG " x15, 16*" REGBYTES "(sp) \n"
1115
1116#if (__riscv_32e != 1)
1117#define STK_ASM_SAVE_CONTEXT_RV32I_EXT\
1118 SREG " x16, 17*" REGBYTES "(sp) \n"\
1119 SREG " x17, 18*" REGBYTES "(sp) \n"\
1120 SREG " x18, 19*" REGBYTES "(sp) \n"\
1121 SREG " x19, 20*" REGBYTES "(sp) \n"\
1122 SREG " x20, 21*" REGBYTES "(sp) \n"\
1123 SREG " x21, 22*" REGBYTES "(sp) \n"\
1124 SREG " x22, 23*" REGBYTES "(sp) \n"\
1125 SREG " x23, 24*" REGBYTES "(sp) \n"\
1126 SREG " x24, 25*" REGBYTES "(sp) \n"\
1127 SREG " x25, 26*" REGBYTES "(sp) \n"\
1128 SREG " x26, 27*" REGBYTES "(sp) \n"\
1129 SREG " x27, 28*" REGBYTES "(sp) \n"\
1130 SREG " x28, 29*" REGBYTES "(sp) \n"\
1131 SREG " x29, 30*" REGBYTES "(sp) \n"\
1132 SREG " x30, 31*" REGBYTES "(sp) \n"\
1133 SREG " x31, 32*" REGBYTES "(sp) \n"
1134#else
1135#define STK_ASM_SAVE_CONTEXT_RV32I_EXT
1136#endif
1137
1138#if (STK_RISCV_FPU != 0)
1139#define STK_ASM_SAVE_CONTEXT_FP\
1140 FSREG " f0, " FOFFSET "+0*" FREGBYTES "(sp) \n"\
1141 FSREG " f1, " FOFFSET "+1*" FREGBYTES "(sp) \n"\
1142 FSREG " f2, " FOFFSET "+2*" FREGBYTES "(sp) \n"\
1143 FSREG " f3, " FOFFSET "+3*" FREGBYTES "(sp) \n"\
1144 FSREG " f4, " FOFFSET "+4*" FREGBYTES "(sp) \n"\
1145 FSREG " f5, " FOFFSET "+5*" FREGBYTES "(sp) \n"\
1146 FSREG " f6, " FOFFSET "+6*" FREGBYTES "(sp) \n"\
1147 FSREG " f7, " FOFFSET "+7*" FREGBYTES "(sp) \n"\
1148 FSREG " f8, " FOFFSET "+8*" FREGBYTES "(sp) \n"\
1149 FSREG " f9, " FOFFSET "+9*" FREGBYTES "(sp) \n"\
1150 FSREG " f10, " FOFFSET "+10*" FREGBYTES "(sp) \n"\
1151 FSREG " f11, " FOFFSET "+11*" FREGBYTES "(sp) \n"\
1152 FSREG " f12, " FOFFSET "+12*" FREGBYTES "(sp) \n"\
1153 FSREG " f13, " FOFFSET "+13*" FREGBYTES "(sp) \n"\
1154 FSREG " f14, " FOFFSET "+14*" FREGBYTES "(sp) \n"\
1155 FSREG " f15, " FOFFSET "+15*" FREGBYTES "(sp) \n"\
1156 FSREG " f16, " FOFFSET "+16*" FREGBYTES "(sp) \n"\
1157 FSREG " f17, " FOFFSET "+17*" FREGBYTES "(sp) \n"\
1158 FSREG " f18, " FOFFSET "+18*" FREGBYTES "(sp) \n"\
1159 FSREG " f19, " FOFFSET "+19*" FREGBYTES "(sp) \n"\
1160 FSREG " f20, " FOFFSET "+20*" FREGBYTES "(sp) \n"\
1161 FSREG " f21, " FOFFSET "+21*" FREGBYTES "(sp) \n"\
1162 FSREG " f22, " FOFFSET "+22*" FREGBYTES "(sp) \n"\
1163 FSREG " f23, " FOFFSET "+23*" FREGBYTES "(sp) \n"\
1164 FSREG " f24, " FOFFSET "+24*" FREGBYTES "(sp) \n"\
1165 FSREG " f25, " FOFFSET "+25*" FREGBYTES "(sp) \n"\
1166 FSREG " f26, " FOFFSET "+26*" FREGBYTES "(sp) \n"\
1167 FSREG " f27, " FOFFSET "+27*" FREGBYTES "(sp) \n"\
1168 FSREG " f28, " FOFFSET "+28*" FREGBYTES "(sp) \n"\
1169 FSREG " f29, " FOFFSET "+29*" FREGBYTES "(sp) \n"\
1170 FSREG " f30, " FOFFSET "+30*" FREGBYTES "(sp) \n"\
1171 FSREG " f31, " FOFFSET "+31*" FREGBYTES "(sp) \n"
1172#else
1173#define STK_ASM_SAVE_CONTEXT_FP
1174#endif
1175
1176#define STK_ASM_SAVE_CONTEXT_PC_STATUS\
1177 "csrr t0, mepc \n"\
1178 "csrr t1, mstatus \n"\
1179 SREG " t0, 0*" REGBYTES "(sp) \n"\
1180 SREG " t1, 1*" REGBYTES "(sp) \n"
1181
1182#if (STK_RISCV_FPU != 0)
1183#define STK_ASM_SAVE_CONTEXT_FRCSR\
1184 "frcsr t0 \n"\
1185 SREG " t0, 4*" REGBYTES "(sp) \n" /* use stack memory slot of gp (see comment for x3 above) */
1186#else
1187#define STK_ASM_SAVE_CONTEXT_FRCSR
1188#endif
1189
1190#define STK_ASM_SAVE_CONTEXT\
1191 "addi sp, sp, -" REGSIZE " \n" /* allocate stack memory for registers */\
1192 STK_ASM_SAVE_CONTEXT_BASE\
1193 STK_ASM_SAVE_CONTEXT_RV32I_EXT\
1194 STK_ASM_SAVE_CONTEXT_FP\
1195 STK_ASM_SAVE_CONTEXT_PC_STATUS\
1196 STK_ASM_SAVE_CONTEXT_FRCSR
1197
1198#define STK_ASM_LOAD_CONTEXT_BASE\
1199 LREG " x1, 2*" REGBYTES "(sp) \n"\
1200 /*LREG " x2, 3*" REGBYTES "(sp) \n" skip loading sp, Stack pointer */\
1201 /*LREG " x3, 4*" REGBYTES "(sp) \n" skip loading gp, Global pointer (note: slot is used by fscsr) */\
1202 LREG " x4, 5*" REGBYTES "(sp) \n"\
1203 LREG " x5, 6*" REGBYTES "(sp) \n"\
1204 LREG " x6, 7*" REGBYTES "(sp) \n"\
1205 LREG " x7, 8*" REGBYTES "(sp) \n"\
1206 LREG " x8, 9*" REGBYTES "(sp) \n"\
1207 LREG " x9, 10*" REGBYTES "(sp) \n"\
1208 LREG " x10, 11*" REGBYTES "(sp) \n"\
1209 LREG " x11, 12*" REGBYTES "(sp) \n"\
1210 LREG " x12, 13*" REGBYTES "(sp) \n"\
1211 LREG " x13, 14*" REGBYTES "(sp) \n"\
1212 LREG " x14, 15*" REGBYTES "(sp) \n"\
1213 LREG " x15, 16*" REGBYTES "(sp) \n"
1214
1215#if (__riscv_32e != 1)
1216#define STK_ASM_LOAD_CONTEXT_RV32I_EXT\
1217 LREG " x16, 17*" REGBYTES "(sp) \n"\
1218 LREG " x17, 18*" REGBYTES "(sp) \n"\
1219 LREG " x18, 19*" REGBYTES "(sp) \n"\
1220 LREG " x19, 20*" REGBYTES "(sp) \n"\
1221 LREG " x20, 21*" REGBYTES "(sp) \n"\
1222 LREG " x21, 22*" REGBYTES "(sp) \n"\
1223 LREG " x22, 23*" REGBYTES "(sp) \n"\
1224 LREG " x23, 24*" REGBYTES "(sp) \n"\
1225 LREG " x24, 25*" REGBYTES "(sp) \n"\
1226 LREG " x25, 26*" REGBYTES "(sp) \n"\
1227 LREG " x26, 27*" REGBYTES "(sp) \n"\
1228 LREG " x27, 28*" REGBYTES "(sp) \n"\
1229 LREG " x28, 29*" REGBYTES "(sp) \n"\
1230 LREG " x29, 30*" REGBYTES "(sp) \n"\
1231 LREG " x30, 31*" REGBYTES "(sp) \n"\
1232 LREG " x31, 32*" REGBYTES "(sp) \n"
1233#else
1234#define STK_ASM_LOAD_CONTEXT_RV32I_EXT
1235#endif
1236
1237#if (STK_RISCV_FPU != 0)
1238#define STK_ASM_LOAD_CONTEXT_FP\
1239 FLREG " f0, " FOFFSET "+0*" FREGBYTES "(sp) \n"\
1240 FLREG " f1, " FOFFSET "+1*" FREGBYTES "(sp) \n"\
1241 FLREG " f2, " FOFFSET "+2*" FREGBYTES "(sp) \n"\
1242 FLREG " f3, " FOFFSET "+3*" FREGBYTES "(sp) \n"\
1243 FLREG " f4, " FOFFSET "+4*" FREGBYTES "(sp) \n"\
1244 FLREG " f5, " FOFFSET "+5*" FREGBYTES "(sp) \n"\
1245 FLREG " f6, " FOFFSET "+6*" FREGBYTES "(sp) \n"\
1246 FLREG " f7, " FOFFSET "+7*" FREGBYTES "(sp) \n"\
1247 FLREG " f8, " FOFFSET "+8*" FREGBYTES "(sp) \n"\
1248 FLREG " f9, " FOFFSET "+9*" FREGBYTES "(sp) \n"\
1249 FLREG " f10, " FOFFSET "+10*" FREGBYTES "(sp) \n"\
1250 FLREG " f11, " FOFFSET "+11*" FREGBYTES "(sp) \n"\
1251 FLREG " f12, " FOFFSET "+12*" FREGBYTES "(sp) \n"\
1252 FLREG " f13, " FOFFSET "+13*" FREGBYTES "(sp) \n"\
1253 FLREG " f14, " FOFFSET "+14*" FREGBYTES "(sp) \n"\
1254 FLREG " f15, " FOFFSET "+15*" FREGBYTES "(sp) \n"\
1255 FLREG " f16, " FOFFSET "+16*" FREGBYTES "(sp) \n"\
1256 FLREG " f17, " FOFFSET "+17*" FREGBYTES "(sp) \n"\
1257 FLREG " f18, " FOFFSET "+18*" FREGBYTES "(sp) \n"\
1258 FLREG " f19, " FOFFSET "+19*" FREGBYTES "(sp) \n"\
1259 FLREG " f20, " FOFFSET "+20*" FREGBYTES "(sp) \n"\
1260 FLREG " f21, " FOFFSET "+21*" FREGBYTES "(sp) \n"\
1261 FLREG " f22, " FOFFSET "+22*" FREGBYTES "(sp) \n"\
1262 FLREG " f23, " FOFFSET "+23*" FREGBYTES "(sp) \n"\
1263 FLREG " f24, " FOFFSET "+24*" FREGBYTES "(sp) \n"\
1264 FLREG " f25, " FOFFSET "+25*" FREGBYTES "(sp) \n"\
1265 FLREG " f26, " FOFFSET "+26*" FREGBYTES "(sp) \n"\
1266 FLREG " f27, " FOFFSET "+27*" FREGBYTES "(sp) \n"\
1267 FLREG " f28, " FOFFSET "+28*" FREGBYTES "(sp) \n"\
1268 FLREG " f29, " FOFFSET "+29*" FREGBYTES "(sp) \n"\
1269 FLREG " f30, " FOFFSET "+30*" FREGBYTES "(sp) \n"\
1270 FLREG " f31, " FOFFSET "+31*" FREGBYTES "(sp) \n"
1271#else
1272#define STK_ASM_LOAD_CONTEXT_FP
1273#endif
1274
1275#define STK_ASM_LOAD_CONTEXT_PC_STATUS\
1276 LREG " t0, 0*" REGBYTES "(sp) \n"\
1277 LREG " t1, 1*" REGBYTES "(sp) \n"\
1278 "csrw mepc, t0 \n"\
1279 "csrw mstatus, t1 \n"
1280
1281#if (STK_RISCV_FPU != 0)
1282#define STK_ASM_LOAD_CONTEXT_FRCSR\
1283 LREG " t0, 4*" REGBYTES "(sp) \n" /* use stack memory slot of gp (see comment for x3 below) */\
1284 "fscsr t0 \n"
1285#else
1286#define STK_ASM_LOAD_CONTEXT_FRCSR
1287#endif
1288
1289#define STK_ASM_LOAD_CONTEXT\
1290 STK_ASM_LOAD_CONTEXT_PC_STATUS\
1291 STK_ASM_LOAD_CONTEXT_FRCSR\
1292 STK_ASM_LOAD_CONTEXT_BASE\
1293 STK_ASM_LOAD_CONTEXT_RV32I_EXT\
1294 STK_ASM_LOAD_CONTEXT_FP\
1295 "addi sp, sp, " REGSIZE " \n" /* shrink stack memory of registers */
1296
1297static __stk_forceinline void HW_LoadContextAndExit()
1298{
1299 __asm volatile(
1300 LREG " t0, %0 \n" // load the first member (SP) into t0
1301 LREG " sp, 0(t0) \n" // sp = t0
1302
1303 STK_ASM_LOAD_CONTEXT
1304 STK_ASM_EXIT_FROM_HANDLER " \n"
1305
1306 : /* output: none */
1307 : "m"(GetContext().m_stack_active)
1308 : "t0", "t1", "a2", "a3", "a4", "a5", "gp", "memory");
1309}
1310
1311static __stk_forceinline void HW_EnableFullFpuAccess()
1312{
1313#if (STK_RISCV_FPU != 0)
1314 __asm volatile(
1315 "csrs mstatus, %0"
1316 : /* output: none */
1317 : "r"(MSTATUS_FS | MSTATUS_XS)
1318 : "memory" /* ensure no FP instructions are moved before this call */);
1319#endif
1320}
1321
1322static __stk_forceinline void HW_ClearFpuState()
1323{
1324#if (STK_RISCV_FPU != 0)
1325 __asm volatile(
1326 "fssr x0"
1327 : /* output: none */
1328 : /* input: none */
1329 : "memory" /* ensure flags are cleared before next FP op */);
1330#endif
1331}
1332
1333static __stk_forceinline void HW_SaveMainSP()
1334{
1335 __asm volatile(
1336 SREG " sp, %0"
1337 : "=m"(GetContext().m_stack_main)
1338 : /* input: none */
1339 : "memory" /* protect against compiler reordering */ );
1340}
1341
1342static __stk_forceinline void HW_LoadMainSP()
1343{
1344 __asm volatile(
1345 LREG " sp, %0"
1346 : /* output: none */
1347 : "m"(GetContext().m_stack_main)
1348 : "memory" /* protect against compiler reordering */ );
1349}
1350
1351static __stk_forceinline bool HW_IsHandlerMode()
1352{
1353 Word current_sp = HW_GetCallerSP();
1354
1355 // get the bounds of the ISR stack from our Context
1356 // note: STK uses StackMemoryWrapper, so we check against that memory block
1357 const Word isr_stack_base = (Word)&GetContext().m_stack_isr_mem;
1358 const Word isr_stack_top = isr_stack_base + STK_RISCV_ISR_STACK_SIZE;
1359
1360 return ((current_sp >= isr_stack_base) && (current_sp < isr_stack_top));
1361}
1362
1363static __stk_forceinline void OnTaskStart()
1364{
1365 HW_LoadContextAndExit();
1366}
1367
1368// __stk_attr_used for LTO
1369extern "C" STK_RISCV_ISR_SECTION __stk_attr_used void TrySwitchContext()
1370{
1371 GetContext().OnSwitchContext();
1372}
1373
1374#ifdef _STK_RISCV_USE_PENDSV
1375extern "C" STK_RISCV_ISR_SECTION __stk_attr_naked void STK_SYSTICK_HANDLER()
1376{
1377 __asm volatile(
1378 // 1. save full interrupted context onto the task stack
1379 STK_ASM_SAVE_CONTEXT
1380
1381 // 2. store task SP into s_StkRiscvSpIsrInt[hart] directly (plain Word, no struct indirection)
1382#if (STK_ARCH_CPU_COUNT > 1)
1383 "csrr t0, mhartid \n"
1384 "la t1, s_StkRiscvSpIsrInt \n"
1385 "slli t0, t0, " REGBYTES_LOG2 " \n" // t0 = hart * sizeof(Word)
1386 "add t1, t1, t0 \n" // t1 = &s_StkRiscvSpIsrInt[hart]
1387 SREG " sp, 0(t1) \n" // store sp directly - no pointer dereference
1388#else
1389 "la t1, s_StkRiscvSpIsrInt \n"
1390 SREG " sp, 0(t1) \n" // store sp directly - no pointer dereference
1391#endif
1392
1393 // 3. switch to private ISR stack
1394#if (STK_ARCH_CPU_COUNT > 1)
1395 "csrr t0, mhartid \n"
1396 "la t1, s_StkRiscvStackIsr \n"
1397 "slli t0, t0, " REGBYTES_LOG2 " \n"
1398 "add t1, t1, t0 \n"
1399 LREG " t1, 0(t1) \n"
1400#else
1401 "la t1, s_StkRiscvStackIsr \n"
1402 LREG " t1, 0(t1) \n"
1403#endif
1404 LREG " sp, 0(t1) \n" // sp = Stack::SP of ISR stack
1405
1406 // 4. run scheduler
1407 "jal ra, TrySwitchContext \n"
1408
1409 // 5. restore the interrupted task's SP from s_StkRiscvSpIsrInt[hart]
1410#if (STK_ARCH_CPU_COUNT > 1)
1411 "csrr t0, mhartid \n"
1412 "la t1, s_StkRiscvSpIsrInt \n"
1413 "slli t0, t0, " REGBYTES_LOG2 " \n"
1414 "add t1, t1, t0 \n"
1415 LREG " sp, 0(t1) \n" // sp = saved task SP - direct load, no struct
1416#else
1417 "la t1, s_StkRiscvSpIsrInt \n"
1418 LREG " sp, 0(t1) \n" // sp = saved task SP - direct load, no struct
1419#endif
1420
1421 // 6. restore context
1422 STK_ASM_LOAD_CONTEXT
1423
1424 // 7. exit ISR handler
1425 STK_ASM_EXIT_FROM_HANDLER " \n"
1426
1427 : /* outputs: none - naked, compiler emits nothing outside this asm */
1428 : /* inputs: all addresses loaded as linker symbols via "la" */
1429 : /* clobbers: none - the asm string owns all registers */);
1430}
1431extern "C" STK_RISCV_ISR_SECTION __stk_attr_naked void STK_MSI_HANDLER()
1432{
1433 __asm volatile(
1434 // 1. save context
1435 STK_ASM_SAVE_CONTEXT
1436
1437 // 2. store task SP into s_StkRiscvStackIdle[hart]->SP
1438 // all integer registers are now saved. t0/t1 are free to use as scratch.
1439 // "la" loads the address of the global array - a linker-time constant,
1440 // no compiler-generated runtime code, safe to use here
1441#if (STK_ARCH_CPU_COUNT > 1)
1442 "csrr t0, mhartid \n"
1443 "la t1, s_StkRiscvStackIdle \n"
1444 "slli t0, t0, " REGBYTES_LOG2 " \n" // t0 = hart * sizeof(Stack*)
1445 "add t1, t1, t0 \n" // t1 = &s_StkRiscvStackIdle[hart]
1446 LREG " t1, 0(t1) \n" // t1 = s_StkRiscvStackIdle[hart] (Stack*)
1447#else
1448 "la t1, s_StkRiscvStackIdle \n"
1449 LREG " t1, 0(t1) \n" // t1 = s_StkRiscvStackIdle[0] (Stack*)
1450#endif
1451 SREG " sp, 0(t1) \n" // Stack::SP = task's sp (SP is first member)
1452
1453 // 3. clear exception: MSIP[hart] = 0
1454#if (STK_ARCH_CPU_COUNT > 1)
1455 "csrr t0, mhartid \n"
1456 "slli t0, t0, 2 \n" // t0 = hart * 4
1457 "li t1, %[clint_msip_base] \n"
1458 "add t0, t0, t1 \n" // t0 = &MSIP[hart]
1459#else
1460 "li t0, %[clint_msip_base] \n" // t0 = &MSIP[0]
1461#endif
1462 "sw zero, 0(t0) \n" // MSIP[hart] = 0
1463 "fence rw, rw \n" // fence rw,rw - ensure the write is visible before re-enable
1464
1465 // 4. load SP from s_StkRiscvStackActive[hart]->SP
1466#if (STK_ARCH_CPU_COUNT > 1)
1467 "csrr t0, mhartid \n"
1468 "la t1, s_StkRiscvStackActive \n"
1469 "slli t0, t0, " REGBYTES_LOG2 " \n"
1470 "add t1, t1, t0 \n"
1471 LREG " t1, 0(t1) \n"
1472#else
1473 "la t1, s_StkRiscvStackActive \n"
1474 LREG " t1, 0(t1) \n"
1475#endif
1476 LREG " sp, 0(t1) \n" // sp = active task's saved SP
1477
1478 // 5. load context of the active task
1479 STK_ASM_LOAD_CONTEXT
1480
1481 // 6. exit ISR handler
1482 STK_ASM_EXIT_FROM_HANDLER " \n"
1483
1484 : /* outputs: none - naked, compiler emits nothing outside this asm */
1485 : [clint_msip_base] "i" (STK_RISCV_CLINT_BASE_ADDR) /* other inputs: all addresses loaded as linker symbols via "la" */
1486 : /* clobbers: none - the asm string owns all registers */);
1487}
1488#else // !_STK_RISCV_USE_PENDSV
1489/* STK_SYSTICK_HANDLER
1490
1491RISC-V machine-timer ISR: Saves the interrupted task's full context, switches
1492to the private ISR stack, calls TrySwitchContext (which reschedules the timer
1493and runs the scheduler), then restores the (possibly new) task's context.
1494
1495DESIGN RULES - must be obeyed to work correctly at all optimisation levels:
1496
1497 1. Single asm volatile, no compiler operands.
1498 The function body is ONE __asm volatile("..." : : : ) with empty
1499 input/output/clobber lists. No "m" or "r" constraints are used because
1500 the compiler evaluates those as C expressions BEFORE emitting any asm
1501 text, i.e. before the register save - trashing uninitialized registers.
1502
1503 2. All addresses are linker symbols loaded via "la" inside the asm.
1504 s_StkRiscvStackActive and s_StkRiscvStackIsr are plain file-scope globals. "la reg, sym"
1505 emits a PC-relative load that is resolved at link time, it produces no
1506 compiler-generated code outside the asm string.
1507
1508 3. Stack pointer indexing uses sizeof(Stack*) == REGBYTES.
1509 For multi-hart builds the array index is hart * REGBYTES, which is a
1510 single left-shift by log2(REGBYTES): 2 for RV32 (4 bytes), 3 for RV64
1511 (8 bytes). REGBYTES_LOG2 is defined below accordingly.
1512
1513 4. s_StkRiscvStackActive[hart]->SP is updated by TrySwitchContext.
1514 The naked asm reads it fresh after the jal returns, so it always sees
1515 the task the scheduler has chosen - even if it changed.
1516
1517 Stack frame layout (offsets from sp after "addi sp,-REGSIZE"):
1518 [0*REGBYTES] mepc (service slot 0)
1519 [1*REGBYTES] mstatus (service slot 1)
1520 [2*REGBYTES] x1 / ra
1521 [3*REGBYTES] x2 / sp - SKIPPED, managed explicitly
1522 [4*REGBYTES] x3 / gp - SKIPPED, fixed register; slot reused for FCSR
1523 [5*REGBYTES] x4 / tp
1524 [6*REGBYTES] x5 / t0
1525 ...
1526 [32*REGBYTES] x31 / t6 (RV32I; absent on RV32E)
1527 [FOFFSET + n*FREGBYTES] fn (FP registers, if STK_RISCV_FPU != 0)
1528*/
1529extern "C" STK_RISCV_ISR_SECTION __stk_attr_naked void STK_SYSTICK_HANDLER()
1530{
1531 __asm volatile(
1532 // 1. save context
1533 STK_ASM_SAVE_CONTEXT
1534
1535 // 2. store task SP into s_StkRiscvStackActive[hart]->SP
1536 // all integer registers are now saved. t0/t1 are free to use as scratch.
1537 // "la" loads the address of the global array - a linker-time constant,
1538 // no compiler-generated runtime code, safe to use here
1539#if (STK_ARCH_CPU_COUNT > 1)
1540 "csrr t0, mhartid \n"
1541 "la t1, s_StkRiscvStackActive \n"
1542 "slli t0, t0, " REGBYTES_LOG2 " \n" // t0 = hart * sizeof(Stack*)
1543 "add t1, t1, t0 \n" // t1 = &s_StkRiscvStackActive[hart]
1544 LREG " t1, 0(t1) \n" // t1 = s_StkRiscvStackActive[hart] (Stack*)
1545#else
1546 "la t1, s_StkRiscvStackActive \n"
1547 LREG " t1, 0(t1) \n" // t1 = s_StkRiscvStackActive[0] (Stack*)
1548#endif
1549 SREG " sp, 0(t1) \n" // Stack::SP = task's sp (SP is first member)
1550
1551 // 3. switch to private ISR stack
1552#if (STK_ARCH_CPU_COUNT > 1)
1553 "csrr t0, mhartid \n"
1554 "la t1, s_StkRiscvStackIsr \n"
1555 "slli t0, t0, " REGBYTES_LOG2 " \n"
1556 "add t1, t1, t0 \n"
1557 LREG " t1, 0(t1) \n" // t1 = s_StkRiscvStackIsr[hart] (Stack*)
1558#else
1559 "la t1, s_StkRiscvStackIsr \n"
1560 LREG " t1, 0(t1) \n" // t1 = s_StkRiscvStackIsr[0] (Stack*)
1561#endif
1562 LREG " sp, 0(t1) \n" // sp = Stack::SP of ISR stack
1563
1564 // 4. call TrySwitchContext
1565 // runs on the ISR stack: reschedules timer, runs scheduler
1566 // (which may update m_stack_active to a new task), then updates
1567 // s_StkRiscvStackActive[hart] so step 5 below reads the correct new SP,
1568 // all caller-saved registers (a0-a7, t0-t6, ra) are trashed - expected
1569 "jal ra, TrySwitchContext \n"
1570
1571 // 5. reload SP from s_StkRiscvStackActive[hart]->SP
1572 // TrySwitchContext updated s_StkRiscvStackActive[hart] before returning,
1573 // we re-read it fresh to pick up any task switch the scheduler made
1574#if (STK_ARCH_CPU_COUNT > 1)
1575 "csrr t0, mhartid \n"
1576 "la t1, s_StkRiscvStackActive \n"
1577 "slli t0, t0, " REGBYTES_LOG2 " \n"
1578 "add t1, t1, t0 \n"
1579 LREG " t1, 0(t1) \n"
1580#else
1581 "la t1, s_StkRiscvStackActive \n"
1582 LREG " t1, 0(t1) \n"
1583#endif
1584 LREG " sp, 0(t1) \n" // sp = active task's saved SP
1585
1586 // 6. load context of the active task
1587 STK_ASM_LOAD_CONTEXT
1588
1589 // 7. exit ISR handler
1590 STK_ASM_EXIT_FROM_HANDLER " \n"
1591
1592 : /* outputs: none - naked, compiler emits nothing outside this asm */
1593 : /* inputs: none - all addresses loaded as linker symbols via "la" */
1594 : /* clobbers: none - the asm string owns all registers */
1595 );
1596}
1597#endif // !_STK_RISCV_USE_PENDSV
1598
1599void Context::OnStart()
1600{
1601 const uint8_t hart = HW_GetHartId();
1602
1603 // save SP of main stack to reuse it for scheduler exit
1604 HW_SaveMainSP();
1605
1606 // enable FPU (if available)
1607 HW_EnableFullFpuAccess();
1608
1609 // clear FPU usage status if FPU was used before kernel start
1610 HW_ClearFpuState();
1611
1612 // notify kernel
1613 m_handler->OnStart(m_stack_active);
1614
1615#if STK_TICKLESS_IDLE
1616 // reset sleep ticks if kernel was restarted
1617 m_sleep_ticks = 1;
1618#endif
1619
1620 // start timer with default periodicity
1621 m_last_mtime = HW_GetMtime();
1622 HW_SetMtimecmp(m_last_mtime + m_tick_period);
1623
1624 // change state before enabling interrupt
1625 m_started = true;
1626 m_starting = false;
1627
1628 // initialize ISR asm pointer cache
1629 s_StkRiscvStackIsr[hart] = &m_stack_isr; // set once here, the ISR stack never moves
1630 s_StkRiscvStackActive[hart] = m_stack_active;
1631#ifdef _STK_RISCV_USE_PENDSV
1632 s_StkRiscvStackIdle[hart] = m_stack_idle;
1633#endif
1634
1635 // enable timer interrupt
1636 set_csr(mie, MIP_MTIP
1637 #ifdef _STK_RISCV_USE_PENDSV
1638 | MIP_MSIP
1639 #endif
1640 );
1641}
1642
1643STK_RISCV_ISR void STK_SVC_HANDLER()
1644{
1645 Word cause;
1646 __asm volatile("csrr %0, mcause"
1647 : "=r"(cause)
1648 : /* input : none */
1649 : /* clobbers: none */);
1650
1651 /*if (cause & (1UL << (__riscv_xlen - 1)))
1652 {
1653 cause &= ~(1UL << (__riscv_xlen - 1));
1654
1655 if (cause == IRQ_M_TIMER)
1656 {
1657
1658 }
1659 }*/
1660
1661 if (cause == IRQ_M_EXT)
1662 {
1663 // not starting scheduler, then try to forward ecall to user
1664 if (!GetContext().m_starting)
1665 {
1666 // forward event to user
1667 if (GetContext().m_specific != NULL)
1668 GetContext().m_specific->OnException(cause);
1669
1670 // switch to the next instruction of the caller space (PC) after the return
1671 write_csr(mepc, read_csr(mepc) + sizeof(Word));
1672 }
1673 else
1674 {
1675 // make sure interrupts do not interfere
1676 HW_DisableInterrupts();
1677
1678 // configure scheduling
1679 GetContext().OnStart();
1680
1681 // start first task
1682 OnTaskStart();
1683 }
1684 }
1685 else
1686 {
1687 if (GetContext().m_specific != NULL)
1688 {
1689 // forward event to user
1690 GetContext().m_specific->OnException(cause);
1691 }
1692 else
1693 {
1694 // trap further execution
1695 // note: normally, if trapped here with cause 2 or 4 then check stack memory size of the
1696 // tasks, scheduler and ISR, they were likely overwritten if your code is 100% correct
1698 }
1699 }
1700}
1701
1702static void OnTaskRun(ITask *task)
1703{
1704 task->Run();
1705}
1706
1707static void OnTaskExit()
1708{
1709 Word cs;
1710 HW_CriticalSectionStart(cs);
1711
1712 GetContext().m_handler->OnTaskExit(GetContext().m_stack_active);
1713
1714 HW_CriticalSectionEnd(cs);
1715
1716 for (;;)
1717 {
1718 __DSB(); // data barrier
1719 __WFI(); // enter standby mode until time slot expires
1720 }
1721}
1722
1723static STK_RISCV_ISR_SECTION void OnSchedulerSleep()
1724{
1725#if STK_SEGGER_SYSVIEW
1726 SEGGER_SYSVIEW_OnIdle();
1727#endif
1728
1729 for (;;)
1730 {
1731 __DSB(); // data barrier
1732 __WFI(); // enter sleep until interrupt
1733 }
1734}
1735
1736static STK_RISCV_ISR_SECTION void OnSchedulerSleepOverride()
1737{
1738 if (!GetContext().m_overrider->OnSleep())
1739 OnSchedulerSleep();
1740}
1741
1742static void OnSchedulerExit()
1743{
1744 // switch to main stack
1745 HW_LoadMainSP();
1746
1747 // jump to the exit from the IKernel::Start()
1748 RestoreJmp(GetContext().m_exit_buf, 0);
1749}
1750
1751void PlatformRiscV::Initialize(IEventHandler *event_handler, IKernelService *service, uint32_t resolution_us, Stack *exit_trap)
1752{
1753 GetContext().Initialize(event_handler, service, exit_trap, resolution_us);
1754}
1755
1756void Context::Start()
1757{
1758 m_exiting = false;
1759
1760 // save jump location of the Exit trap
1761 SaveJmp(m_exit_buf);
1762 if (m_exiting)
1763 {
1764 // notify kernel about a full stop
1765 m_handler->OnStop();
1766 return;
1767 }
1768
1769 // enable FPU (if available)
1770 HW_EnableFullFpuAccess();
1771
1772 // start
1773 m_starting = true;
1774 HW_StartScheduler();
1775}
1776
1778{
1779 GetContext().Start();
1780}
1781
1782bool PlatformRiscV::InitStack(EStackType stack_type, Stack *stack, IStackMemory *stack_memory, ITask *user_task)
1783{
1784 // TaskFrame must map exactly onto the slot layout consumed by STK_ASM_SAVE_CONTEXT / STK_ASM_LOAD_CONTEXT - no padding allowed
1785 STK_STATIC_ASSERT_DESC(sizeof(TaskFrame) == (STK_RISCV_REGISTER_COUNT + STK_SERVICE_SLOTS) * sizeof(Word),
1786 "TaskFrame size must match REGSIZE: (REGISTER_COUNT + SERVICE_SLOTS) * REGBYTES");
1787
1788 STK_ASSERT(stack_memory->GetStackSize() > (STK_RISCV_REGISTER_COUNT + STK_SERVICE_SLOTS));
1789
1790 // initialize stack memory (fills all slots with STK_STACK_MEMORY_FILLER)
1791 Word *stack_top = PlatformContext::InitStackMemory(stack_memory);
1792
1793 // initialize Stack Pointer (SP): frame sits at the bottom of the register window
1794 stack->SP = hw::PtrToWord(stack_top - (STK_RISCV_REGISTER_COUNT + STK_SERVICE_SLOTS));
1795
1796 // place the task frame at SP directly at the base of the register window
1797 TaskFrame * const task_frame = reinterpret_cast<TaskFrame *>(stack->SP);
1798
1799 // initialize registers for the user task's first start
1800 switch (stack_type)
1801 {
1802 case STACK_USER_TASK: {
1803 task_frame->MEPC = hw::PtrToWord(&OnTaskRun);
1804 task_frame->X1_RA = hw::PtrToWord(&OnTaskExit);
1805 task_frame->X10_A0 = hw::PtrToWord(user_task);
1806 break; }
1807
1808 case STACK_SLEEP_TRAP: {
1809 task_frame->MEPC = hw::PtrToWord(GetContext().m_overrider != NULL ? &OnSchedulerSleepOverride : &OnSchedulerSleep);
1810 task_frame->X1_RA = STK_STACK_MEMORY_FILLER; // should not attempt to exit
1811 break; }
1812
1813 case STACK_EXIT_TRAP: {
1814 task_frame->MEPC = hw::PtrToWord(&OnSchedulerExit);
1815 task_frame->X1_RA = STK_STACK_MEMORY_FILLER; // should not attempt to exit
1816 break; }
1817
1818 default:
1819 return false;
1820 }
1821
1822 // mstatus: return to M-mode (MPP), interrupts enabled on mret (MPIE),
1823 // FPU/extension state initial (FS/XS) if FPU present
1824 task_frame->MSTATUS = MSTATUS_MPP | MSTATUS_MPIE | (STK_RISCV_FPU != 0 ? (MSTATUS_FS | MSTATUS_XS) : 0);
1825
1826#if (STK_RISCV_FPU != 0)
1827 task_frame->X3_FSR = 0; // FCSR = 0: round-to-nearest, no accrued exception flags
1828#endif
1829
1830 return true;
1831}
1832
1833void Context::OnStop()
1834{
1835 // stop timer
1836 HW_StopMTimer();
1837
1838 // clear pending SV exception
1839#ifdef _STK_RISCV_USE_PENDSV
1840 clear_csr(mie, MIP_MSIP);
1841#endif
1842
1843 m_started = false;
1844 m_exiting = true;
1845
1846 // make sure all assignments are set and executed
1847 __DSB();
1848 __ISB();
1849}
1850
1852{
1853 GetContext().OnStop();
1854
1855 // load context of the Exit trap
1856 HW_DisableInterrupts();
1857 OnTaskStart();
1858}
1859
1860uint32_t PlatformRiscV::GetTickResolution() const
1861{
1862 return GetContext().m_tick_resolution;
1863}
1864
1866{
1867 GetContext().m_handler->OnTaskSwitch(HW_GetCallerSP());
1868}
1869
1870void PlatformRiscV::Sleep(Timeout ticks)
1871{
1872 GetContext().m_handler->OnTaskSleep(HW_GetCallerSP(), ticks);
1873}
1874
1875void PlatformRiscV::SleepUntil(Ticks timestamp)
1876{
1877 GetContext().m_handler->OnTaskSleepUntil(HW_GetCallerSP(), timestamp);
1878}
1879
1880IWaitObject *PlatformRiscV::Wait(ISyncObject *sync_obj, IMutex *mutex, Timeout timeout)
1881{
1882 return GetContext().m_handler->OnTaskWait(HW_GetCallerSP(), sync_obj, mutex, timeout);
1883}
1884
1886{
1887 return GetContext().m_handler->OnGetTid(HW_GetCallerSP());
1888}
1889
1891{
1892 if ((GetContext().m_overrider == NULL) || !GetContext().m_overrider->OnHardFault())
1893 {
1895 }
1896}
1897
1898void PlatformRiscV::SetEventOverrider(IEventOverrider *overrider)
1899{
1900 STK_ASSERT(!GetContext().m_started);
1901 GetContext().m_overrider = overrider;
1902}
1903
1905{
1906 return HW_GetCallerSP();
1907}
1908
1909void PlatformRiscV::SetSpecificEventHandler(ISpecificEventHandler *handler)
1910{
1911 STK_ASSERT(!GetContext().m_started);
1912 GetContext().m_specific = handler;
1913}
1914
1916{
1917 return GetContext().m_service;
1918}
1919
1921{
1922 GetContext().EnterCriticalSection();
1923}
1924
1926{
1927 GetContext().ExitCriticalSection();
1928}
1929
1931{
1932 HW_SpinLockLock(m_lock);
1933}
1934
1936{
1937 HW_SpinLockUnlock(m_lock);
1938}
1939
1941{
1942 return HW_SpinLockTryLock(m_lock);
1943}
1944
1946{
1947 return HW_IsHandlerMode();
1948}
1949
1951{
1952 return HiResClockImpl::GetInstance()->GetCycles();
1953}
1954
1956{
1957 uint32_t freq = HiResClockImpl::GetInstance()->GetFrequency();
1958 STK_ASSERT(freq != 0);
1959 return freq;
1960}
1961
1962#endif // _STK_ARCH_RISC_V
#define STK_SYSTEM_CORE_CLOCK_FREQUENCY
System clock frequency in Hz. Default: 150 MHz.
#define STK_SYSTEM_CORE_CLOCK_VAR
Definition of the system core clock variable holding frequency of the CPU in Hz.
volatile uint32_t SystemCoreClock
System clock frequency in Hz.
Contains common inventory for platform implementation.
#define STK_ARCH_GET_CPU_ID()
Get CPU core id of the caller, e.g. if called while running on core 0 then returned value must be 0.
#define GetContext()
Get platform's context.
Hardware Abstraction Layer (HAL) declarations for the stk::hw namespace.
void STK_PANIC_HANDLER_DEFAULT(stk::EKernelPanicId id)
Default panic handler: disable interrupts, record the id, and spin in a tight loop — a defined,...
Definition stktest.cpp:55
#define STK_KERNEL_PANIC(id)
Called when the kernel detects an unrecoverable internal fault.
Definition stk_arch.h:63
#define __stk_attr_used
Marks a symbol as used, preventing the linker from discarding it even if no references are visible (d...
Definition stk_defs.h:172
#define __stk_forceinline
Forces compiler to always inline the decorated function, regardless of optimisation level.
Definition stk_defs.h:104
#define STK_TICKLESS_IDLE
Enables tickless (dynamic-tick) low-power operation during idle periods.
Definition stk_defs.h:36
#define STK_ASSERT(e)
Runtime assertion. Halts execution if the expression e evaluates to false.
Definition stk_defs.h:330
#define __stk_attr_noinline
Prevents compiler from inlining the decorated function (function prefix).
Definition stk_defs.h:185
#define STK_CRITICAL_SECTION_NESTINGS_MAX
Maximum allowable recursion depth for critical section entry (default: 16).
Definition stk_defs.h:404
#define STK_ARCH_CPU_COUNT
Number of physical CPU cores available to the scheduler (default: 1).
Definition stk_defs.h:414
#define __stk_attr_naked
Suppresses compiler-generated function prologue and epilogue (function prefix).
Definition stk_defs.h:133
#define STK_STATIC_ASSERT_DESC(X, DESC)
Compile-time assertion with a custom error description. Produces a compilation error if X is false.
Definition stk_defs.h:350
#define STK_STACK_MEMORY_FILLER
Sentinel value written to the entire stack region at initialization (stack watermark pattern).
Definition stk_defs.h:377
#define __stk_attr_noreturn
Declares that function never returns to its caller (function prefix).
Definition stk_defs.h:146
Contains helper implementations which simplify user-side code.
#define __stk_relax_cpu
Emits a CPU pipeline-relaxation hint for use inside hot busy-wait (spin) loops (in-code statement).
Definition stktest.h:33
Namespace of STK package.
uintptr_t Word
Native processor word type.
Definition stk_common.h:112
int64_t Ticks
Ticks value.
Definition stk_common.h:150
int32_t Timeout
Timeout time (ticks).
Definition stk_common.h:133
EStackType
Stack type.
Definition stk_common.h:70
@ STACK_SLEEP_TRAP
Stack of the Sleep trap.
Definition stk_common.h:72
@ STACK_USER_TASK
Stack of the user task.
Definition stk_common.h:71
@ STACK_EXIT_TRAP
Stack of the Exit trap.
Definition stk_common.h:73
uint64_t Cycles
Cycles value.
Definition stk_common.h:155
Word TId
Definition stk_common.h:117
@ ACCESS_PRIVILEGED
Privileged access mode (access to hardware is fully unrestricted).
Definition stk_common.h:33
EKernelPanicId
Identifies the source of a kernel panic.
Definition stk_common.h:52
@ KERNEL_PANIC_HRT_HARD_FAULT
Kernel running in KERNEL_HRT mode reported deadline failure of the task.
Definition stk_common.h:57
@ KERNEL_PANIC_NONE
Panic is absent (no fault).
Definition stk_common.h:53
@ KERNEL_PANIC_CPU_EXCEPTION
CPU reported an exception and halted execution.
Definition stk_common.h:58
@ KERNEL_PANIC_SPINLOCK_DEADLOCK
Spin-lock timeout expired: lock owner never released.
Definition stk_common.h:54
__stk_forceinline Word PtrToWord(T *ptr) noexcept
Cast a pointer to a CPU register-width integer.
Definition stk_arch.h:94
bool IsInsideISR()
Check whether the CPU is currently executing inside a hardware interrupt service routine (ISR).
Definition stktest.cpp:103
void SetEventOverrider(IEventOverrider *overrider)
Set platform event overrider.
void Initialize(IEventHandler *event_handler, IKernelService *service, uint32_t resolution_us, Stack *exit_trap)
Initialize scheduler's context.
void SleepUntil(Ticks timestamp)
Put calling process into a sleep state until the specified timestamp.
void SetSpecificEventHandler(ISpecificEventHandler *handler)
void Stop()
Stop scheduling.
void Sleep(Timeout ticks)
Put calling process into a sleep state.
void SwitchToNext()
Switch to a next task.
void ProcessTick()
Process one tick.
uint32_t GetTickResolution() const
Get resolution of the system tick timer in microseconds. Resolution means a number of microseconds be...
bool InitStack(EStackType stack_type, Stack *stack, IStackMemory *stack_memory, ITask *user_task)
Initialize stack memory of the user task.
void ProcessHardFault()
Cause a hard fault of the system.
IWaitObject * Wait(ISyncObject *sync_obj, IMutex *mutex, Timeout timeout)
Word GetCallerSP() const
Get caller's Stack Pointer (SP).
TId GetTid() const
Get thread Id.
void Start()
Start scheduling.
Base platform context for all platform implementations.
static Word * InitStackMemory(IStackMemory *memory)
Initialize stack memory by filling it with STK_STACK_MEMORY_FILLER.
static void Exit()
Exit a critical section.
Definition stktest.cpp:78
static void Enter()
Enter a critical section.
Definition stktest.cpp:74
bool TryLock()
Attempt to acquire SpinLock in a single non-blocking attempt.
void Lock()
Acquire SpinLock, blocking until it is available.
Definition stktest.cpp:85
void Unlock()
Release SpinLock, allowing another thread or core to acquire it.
Definition stktest.cpp:89
static uint32_t GetFrequency()
Get clock frequency.
static Cycles GetCycles()
Get number of clock cycles elapsed.
Stack descriptor.
Definition stk_common.h:181
Word SP
Stack Pointer (SP) register (note: must be the first entry in this struct).
Definition stk_common.h:182
Interface for a stack memory region.
Definition stk_common.h:193
virtual size_t GetStackSize() const =0
Get number of elements of the stack memory array.
Wait object.
Definition stk_common.h:212
Synchronization object.
Definition stk_common.h:297
Interface for mutex synchronization primitive.
Definition stk_common.h:381
Interface for a user task.
Definition stk_common.h:433
virtual void Run()=0
Entry point of the user task.
Interface for the kernel services exposed to the user processes during run-time when Kernel started s...
Definition stk_common.h:929
static IKernelService * GetInstance()
Get CPU-local instance of the kernel service.
Definition stktest.cpp:69
RISC-V specific event handler.