SuperTinyKernel™ RTOS 1.05.3
Lightweight, high-performance, deterministic, bare-metal C++ RTOS for resource-constrained embedded systems. MIT Open Source License.
Loading...
Searching...
No Matches
stk_arch_x86-win32.cpp
Go to the documentation of this file.
1/*
2 * SuperTinyKernel(TM) RTOS: Lightweight High-Performance Deterministic C++ RTOS for Embedded Systems.
3 *
4 * Source: https://github.com/SuperTinyKernel-RTOS
5 *
6 * Copyright (c) 2022-2026 Neutron Code Limited <stk@neutroncode.com>. All Rights Reserved.
7 * License: MIT License, see LICENSE for a full text.
8 */
9
10// note: If missing, this header must be customized (get it in the root of the source folder) and
11// copied to the /include folder manually.
12#include "stk_config.h"
13
14#ifdef _STK_ARCH_X86_WIN32
15
16#include "stk_arch.h"
18
19using namespace stk;
20
21#define WIN32_LEAN_AND_MEAN
22#include <windows.h>
23#include <stdlib.h>
24#include <stdio.h>
25#include <assert.h>
26#include <list>
27#include <vector>
28
29using namespace stk;
30
31#ifndef WINAPI
32#define WINAPI __stdcall
33#endif
34
35typedef UINT MMRESULT;
36typedef MMRESULT (WINAPI * timeBeginPeriodF)(UINT uPeriod);
37static timeBeginPeriodF timeBeginPeriod = nullptr;
38
39#define STK_X86_WIN32_CRITICAL_SECTION CRITICAL_SECTION
40#define STK_X86_WIN32_CRITICAL_SECTION_INIT(SES) ::InitializeCriticalSection(SES)
41#define STK_X86_WIN32_CRITICAL_SECTION_START(SES) ::EnterCriticalSection(SES)
42#define STK_X86_WIN32_CRITICAL_SECTION_END(SES) ::LeaveCriticalSection(SES)
43#define STK_X86_WIN32_MIN_RESOLUTION (1000)
44#define STK_X86_WIN32_GET_SP(STACK) (STACK + 2) // +2 to overcome stack filler check inside Kernel (adjusting to +2 preserves 8-byte alignment)
45#define SLK_UNLOCKED hw::SpinLock::UNLOCKED
46#define SLK_LOCKED hw::SpinLock::LOCKED
47
50static __stk_forceinline bool HW_SpinLockTryLock(volatile LONG &lock)
51{
52 return (InterlockedCompareExchange(
53 reinterpret_cast<volatile LONG *>(&lock), SLK_LOCKED, SLK_UNLOCKED) == SLK_UNLOCKED);
54}
55
58static __stk_forceinline void HW_SpinLockLock(volatile LONG &lock)
59{
60 uint8_t sleep_time = 0;
61 uint32_t timeout = 0xFFFFFF;
62
63test:
64 while (!HW_SpinLockTryLock(lock))
65 {
66 if (--timeout == 0)
67 {
68 // invariant violated: the lock owner exited without releasing
70 }
71
72 for (volatile int32_t spin = 100; (spin != 0); spin--)
73 {
75
76 // check if became unlocked then try locking atomically again
77 if (lock == SLK_UNLOCKED)
78 goto test;
79 }
80
81 // avoid priority inversion
82 ::Sleep(sleep_time);
83 sleep_time ^= 1;
84 }
85}
86
89static __stk_forceinline void HW_SpinLockUnlock(volatile LONG &lock)
90{
91 InterlockedExchange(reinterpret_cast<volatile LONG *>(&lock), SLK_UNLOCKED);
92}
93
94struct Win32ScopedCriticalSection
95{
96 STK_X86_WIN32_CRITICAL_SECTION &m_sec;
97
98 explicit Win32ScopedCriticalSection(STK_X86_WIN32_CRITICAL_SECTION &sec) : m_sec(sec)
99 {
100 STK_X86_WIN32_CRITICAL_SECTION_START(&sec);
101 }
102 ~Win32ScopedCriticalSection()
103 {
104 STK_X86_WIN32_CRITICAL_SECTION_END(&m_sec);
105 }
106};
107
108class HiResClockQPC
109{
110 LARGE_INTEGER m_freq;
111 LARGE_INTEGER m_start;
112
113public:
114 explicit HiResClockQPC()
115 {
116 QueryPerformanceFrequency(&m_freq);
117 QueryPerformanceCounter(&m_start);
118 }
119
120 static HiResClockQPC *GetInstance()
121 {
122 // keep declaration function-local to allow compiler stripping it from the binary if
123 // it is unused by the user code
124 static HiResClockQPC clock;
125 return &clock;
126 }
127
128 Cycles GetCycles()
129 {
130 LARGE_INTEGER current;
131 QueryPerformanceCounter(&current);
132
133 // relative cycles since simulation start
134 return static_cast<Cycles>(current.QuadPart - m_start.QuadPart);
135 }
136
137 uint32_t GetFrequency()
138 {
139 return static_cast<uint32_t>(m_freq.QuadPart);
140 }
141};
142
144static struct Context : public PlatformContext
145{
146 Context()
147 : m_overrider(nullptr),
148 m_sleep_trap(nullptr),
149 m_exit_trap(nullptr),
150 m_winmm_dll(nullptr),
151 m_timer_thread(nullptr),
152 m_tls(TLS_OUT_OF_INDEXES),
153 m_tasks(),
154 m_task_threads(),
155 m_timer_tid(0),
156 m_cs(),
157 m_csu_nesting(0),
158 m_started(false),
159 m_stop_signal(false)
160 {}
161
162 void Initialize(IPlatform::IEventHandler *handler, IKernelService *service, Stack *exit_trap, int32_t resolution_us)
163 {
164 PlatformContext::Initialize(handler, service, exit_trap, resolution_us);
165
166 m_overrider = nullptr;
167 m_sleep_trap = nullptr; // set by Context::InitStack
168 m_exit_trap = nullptr; // set by Context::InitStack
169 m_winmm_dll = nullptr;
170 m_timer_thread = nullptr;
171 m_started = false;
172 m_stop_signal = false;
173 m_csu_nesting = 0;
174 m_timer_tid = 0;
175
176 if ((m_tls = TlsAlloc()) == TLS_OUT_OF_INDEXES)
177 {
178 assert(false);
179 return;
180 }
181
182 STK_X86_WIN32_CRITICAL_SECTION_INIT(&m_cs);
183
184 LoadWindowsAPI();
185 }
186
190 ~Context()
191 {
192 if (m_tls != TLS_OUT_OF_INDEXES)
193 TlsFree(m_tls);
194
195 UnloadWindowsAPI();
196 }
197
198 void LoadWindowsAPI()
199 {
200 HMODULE winmm = GetModuleHandleA("Winmm");
201 if (winmm == nullptr)
202 m_winmm_dll = winmm = LoadLibraryA("Winmm.dll");
203 assert(winmm != nullptr);
204
205 timeBeginPeriod = (timeBeginPeriodF)GetProcAddress(winmm, "timeBeginPeriod");
206 assert(timeBeginPeriod != nullptr);
207
208 timeBeginPeriod(1);
209 }
210
211 void UnloadWindowsAPI()
212 {
213 if (m_winmm_dll != nullptr)
214 {
215 FreeLibrary(m_winmm_dll);
216 m_winmm_dll = nullptr;
217 }
218 }
219
220 struct TaskContext
221 {
222 TaskContext() : m_task(nullptr), m_stack(nullptr), m_thread(nullptr), m_thread_id(0)
223 { }
224
225 void Initialize(ITask *task, Stack *stack)
226 {
227 m_task = task;
228 m_stack = stack;
229 m_thread = nullptr;
230 m_thread_id = 0;
231
232 InitThread();
233 }
234
235 void InitThread()
236 {
237 // simulate stack size limitation
238 size_t stack_size = m_task->GetStackSize() * sizeof(Word);
239
240 m_thread = CreateThread(nullptr, stack_size, &OnTaskRun, this, CREATE_SUSPENDED, &m_thread_id);
241 }
242
243 static DWORD WINAPI OnTaskRun(LPVOID param)
244 {
245 ((TaskContext *)param)->m_task->Run();
246 return 0;
247 }
248
249 ITask *m_task;
250 Stack *m_stack;
251 HANDLE m_thread;
252 DWORD m_thread_id;
253 };
254
255 bool InitStack(EStackType stack_type, Stack *stack, IStackMemory *stack_memory, ITask *user_task);
256 void ConfigureTime();
257 void StartActiveTask();
258 void CreateTimerThreadAndJoin();
259 void Cleanup();
260 void ProcessTick();
261 void SwitchContext();
262 void SwitchToNext();
263 void Sleep(Timeout ticks);
264 void SleepUntil(Ticks timestamp);
265 IWaitObject *Wait(ISyncObject *sync_obj, IMutex *mutex, Timeout timeout);
266 void Stop();
267 Word GetCallerSP() const;
268 TId GetTid() const;
269
271 {
272 return hw::PtrToWord(TlsGetValue(m_tls));
273 }
274
275 __stk_forceinline void SetTls(Word tp)
276 {
277 TlsSetValue(m_tls, hw::WordToPtr<void>(tp));
278 }
279
280 __stk_forceinline void EnterCriticalSection()
281 {
282 STK_X86_WIN32_CRITICAL_SECTION_START(&m_cs);
283
284 if (m_csu_nesting == 0)
285 {
286 // avoid suspending self
287 if (GetCurrentThreadId() != m_timer_tid)
288 SuspendThread(m_timer_thread);
289 }
290
291 // increase nesting count within a limit
292 if (++m_csu_nesting > STK_CRITICAL_SECTION_NESTINGS_MAX)
293 {
294 // invariant violated: exceeded max allowed number of recursions
295 STK_KERNEL_PANIC(KERNEL_PANIC_CS_NESTING_OVERFLOW);
296 }
297 }
298
299 __stk_forceinline void ExitCriticalSection()
300 {
301 STK_ASSERT(m_csu_nesting != 0);
302
303 --m_csu_nesting;
304
305 if (m_csu_nesting == 0)
306 {
307 // suspending self is not supported
308 if (GetCurrentThreadId() != m_timer_tid)
309 ResumeThread(m_timer_thread);
310 }
311
312 STK_X86_WIN32_CRITICAL_SECTION_END(&m_cs);
313 }
314
315 IPlatform::IEventOverrider *m_overrider;
316 Stack *m_sleep_trap;
317 Stack *m_exit_trap;
318 HMODULE m_winmm_dll;
319 HANDLE m_timer_thread;
320 DWORD m_tls;
321 std::list<TaskContext *> m_tasks;
322 std::vector<HANDLE> m_task_threads;
323 DWORD m_timer_tid;
324 STK_X86_WIN32_CRITICAL_SECTION m_cs;
325 uint8_t m_csu_nesting;
326 bool m_started;
327 volatile bool m_stop_signal;
328}
329s_StkPlatformContext[1];
330
332static volatile EKernelPanicId g_LastPanicId = KERNEL_PANIC_NONE;
333
334__stk_attr_noinline // keep out of inlining to preserve stack frame
335__stk_attr_noreturn // never returns - a trap
337{
338 g_LastPanicId = id;
339
340 // spin forever: without a watchdog, a debugger can attach and inspect 'id'
341 for (;;)
342 {
344 }
345}
346
347static DWORD WINAPI TimerThread(LPVOID param)
348{
349 (void)param;
350
351 DWORD wait_ms = (1U * GetContext().m_tick_resolution) / 1000U;
352 GetContext().m_timer_tid = GetCurrentThreadId();
353
354 while (WaitForSingleObject(GetContext().m_timer_thread, wait_ms) == WAIT_TIMEOUT)
355 {
356 if (GetContext().m_stop_signal)
357 break;
358
359 GetContext().ProcessTick();
360 }
361
362 return 0;
363}
364
365void Context::ConfigureTime()
366{
367 // Windows timers are jittery, so make resolution more coarse
368 if (m_tick_resolution < STK_X86_WIN32_MIN_RESOLUTION)
369 m_tick_resolution = STK_X86_WIN32_MIN_RESOLUTION;
370
371 // increase precision of ticks to at least 1 ms (although Windows timers will still be quite coarse and have jitter of +1 ms)
372 timeBeginPeriod(1);
373}
374
375void Context::StartActiveTask()
376{
377 STK_ASSERT(m_stack_active != nullptr);
378 TaskContext *active_task = hw::WordToPtr<TaskContext>(m_stack_active->SP);
379 STK_ASSERT(active_task != nullptr);
380
381 ResumeThread(active_task->m_thread);
382}
383
384void Context::CreateTimerThreadAndJoin()
385{
386 m_started = true;
387
388 m_handler->OnStart(m_stack_active);
389
390 StartActiveTask();
391
392 // create tick thread with highest priority
393 m_timer_thread = CreateThread(nullptr, 0, &TimerThread, nullptr, 0, nullptr);
394 STK_ASSERT(m_timer_thread != nullptr);
395 SetThreadPriority(m_timer_thread, THREAD_PRIORITY_TIME_CRITICAL);
396
397 while (!m_task_threads.empty())
398 {
399 DWORD result = WaitForMultipleObjects((DWORD)m_task_threads.size(), m_task_threads.data(), FALSE, INFINITE);
400 STK_ASSERT(result != WAIT_TIMEOUT);
401 STK_ASSERT(result != WAIT_ABANDONED);
402 STK_ASSERT(result != WAIT_FAILED);
403
404 Win32ScopedCriticalSection __cs(m_cs);
405
406 uint32_t i = 0;
407 for (std::vector<HANDLE>::iterator itr = m_task_threads.begin(); itr != m_task_threads.end(); ++itr)
408 {
409 if (result == (WAIT_OBJECT_0 + i))
410 {
411 TaskContext *exiting_task = nullptr;
412 for (std::list<TaskContext *>::iterator titr = m_tasks.begin(); titr != m_tasks.end(); ++titr)
413 {
414 if ((*titr)->m_thread == (*itr))
415 {
416 exiting_task = (*titr);
417 break;
418 }
419 }
420 STK_ASSERT(exiting_task != nullptr);
421
422 if (exiting_task != nullptr)
423 m_handler->OnTaskExit(exiting_task->m_stack);
424
425 m_task_threads.erase(itr);
426 break;
427 }
428
429 ++i;
430 }
431 }
432
433 // join (never returns to the caller from here unless thread is terminated, see KERNEL_DYNAMIC),
434 // a stop signal is sent by IPlatform::Stop() by the last exiting task
435 if (m_timer_thread != nullptr)
436 WaitForSingleObject(m_timer_thread, INFINITE);
437}
438
439void Context::Cleanup()
440{
441 // close thread handles of all tasks
442 for (std::list<TaskContext *>::iterator itr = m_tasks.begin(); itr != m_tasks.end(); ++itr)
443 {
444 if ((*itr)->m_thread != nullptr)
445 {
446 CloseHandle((*itr)->m_thread);
447 (*itr)->m_thread = nullptr;
448 }
449 }
450 m_tasks.clear();
451
452 // close timer thread
453 if (m_timer_thread != nullptr)
454 {
455 CloseHandle(m_timer_thread);
456 m_timer_thread = nullptr;
457 }
458
459 // reset stop signal
460 m_stop_signal = false;
461
462 // notify kernel about a full stop
463 m_handler->OnStop();
464}
465
466void Context::ProcessTick()
467{
468 Win32ScopedCriticalSection __cs(m_cs);
469
470 if (m_handler->OnTick(m_stack_idle, m_stack_active))
471 GetContext().SwitchContext();
472}
473
474void Context::SwitchContext()
475{
476 // suspend Idle thread
477 if ((m_stack_idle != m_sleep_trap) && (m_stack_idle != m_exit_trap))
478 {
479 TaskContext *idle_task = hw::WordToPtr<TaskContext>(m_stack_idle->SP);
480 STK_ASSERT(idle_task != nullptr);
481
482 SuspendThread(idle_task->m_thread);
483 }
484
485 // resume Active thread
486 if (m_stack_active == m_sleep_trap)
487 {
488 if ((m_overrider == nullptr) || !m_overrider->OnSleep())
489 {
490 // pass
491 }
492 }
493 else
494 if (m_stack_active == GetContext().m_exit_trap)
495 {
496 // pass
497 }
498 else
499 {
500 TaskContext *active_task = hw::WordToPtr<TaskContext>(m_stack_active->SP);
501 STK_ASSERT(active_task != nullptr);
502
503 ResumeThread(active_task->m_thread);
504 }
505}
506
507Word Context::GetCallerSP() const
508{
509 Word caller_sp = 0;
510 DWORD calling_tid = GetCurrentThreadId();
511
512 Win32ScopedCriticalSection __cs(const_cast<STK_X86_WIN32_CRITICAL_SECTION &>(m_cs));
513
514 for (std::list<TaskContext *>::const_iterator itr = m_tasks.begin(), end = m_tasks.end(); itr != end; ++itr)
515 {
516 if ((*itr)->m_thread_id == calling_tid)
517 {
518 caller_sp = hw::PtrToWord(STK_X86_WIN32_GET_SP((*itr)->m_task->GetStack()));
519 break;
520 }
521 }
522
523 // expect to find the calling task inside m_tasks
524 STK_ASSERT(caller_sp != 0);
525
526 return caller_sp;
527}
528
529TId Context::GetTid() const
530{
531 return m_handler->OnGetTid(GetCallerSP());
532}
533
534void Context::SwitchToNext()
535{
536 m_handler->OnTaskSwitch(GetCallerSP());
537}
538
539void Context::Sleep(Timeout ticks)
540{
541 m_handler->OnTaskSleep(GetCallerSP(), ticks);
542}
543
544void Context::SleepUntil(Ticks timestamp)
545{
546 m_handler->OnTaskSleepUntil(GetCallerSP(), timestamp);
547}
548
549IWaitObject *Context::Wait(ISyncObject *sync_obj, IMutex *mutex, Timeout timeout)
550{
551 return m_handler->OnTaskWait(GetCallerSP(), sync_obj, mutex, timeout);
552}
553
554void Context::Stop()
555{
556 m_stop_signal = true;
557 m_started = false;
558}
559
560bool Context::InitStack(EStackType stack_type, Stack *stack, IStackMemory *stack_memory, ITask *user_task)
561{
562 InitStackMemory(stack_memory);
563
564 TaskContext *ctx = reinterpret_cast<TaskContext *>(STK_X86_WIN32_GET_SP(stack_memory->GetStack()));
565
566 switch (stack_type)
567 {
568 case STACK_USER_TASK: {
569 ctx->Initialize(user_task, stack);
570
571 m_tasks.push_back(ctx);
572 m_task_threads.push_back(ctx->m_thread);
573 break; }
574
575 case STACK_SLEEP_TRAP: {
576 GetContext().m_sleep_trap = stack;
577 break; }
578
579 case STACK_EXIT_TRAP: {
580 GetContext().m_exit_trap = stack;
581 break; }
582 }
583
584 stack->SP = hw::PtrToWord(ctx);
585
586 return true;
587}
588
589void PlatformX86Win32::Initialize(IEventHandler *event_handler, IKernelService *service, uint32_t resolution_us,
590 Stack *exit_trap)
591{
592 GetContext().Initialize(event_handler, service, exit_trap, resolution_us);
593}
594
596{
597 GetContext().ConfigureTime();
598 GetContext().CreateTimerThreadAndJoin();
599 GetContext().Cleanup();
600}
601
603{
604 GetContext().Stop();
605}
606
607bool PlatformX86Win32::InitStack(EStackType stack_type, Stack *stack, IStackMemory *stack_memory, ITask *user_task)
608{
609 return GetContext().InitStack(stack_type, stack, stack_memory, user_task);
610}
611
613{
614 return GetContext().m_tick_resolution;
615}
616
618{
619 GetContext().SwitchToNext();
620}
621
623{
624 GetContext().Sleep(ticks);
625}
626
628{
629 GetContext().SleepUntil(timestamp);
630}
631
633{
634 return GetContext().Wait(sync_obj, mutex, timeout);
635}
636
638{
639 GetContext().ProcessTick();
640}
641
643{
644 if ((GetContext().m_overrider == nullptr) || !GetContext().m_overrider->OnHardFault())
645 {
647 }
648}
649
650void PlatformX86Win32::SetEventOverrider(IEventOverrider *overrider)
651{
652 STK_ASSERT(!GetContext().m_started);
653 GetContext().m_overrider = overrider;
654}
655
657{
658 return GetContext().GetCallerSP();
659}
660
662{
663 return GetContext().GetTid();
664}
665
667{
668 return GetContext().GetTls();
669}
670
671void stk::hw::SetTls(Word tp)
672{
673 return GetContext().SetTls(tp);
674}
675
677{
678 return GetContext().m_service;
679}
680
682{
683 GetContext().EnterCriticalSection();
684}
685
687{
688 GetContext().ExitCriticalSection();
689}
690
692{
693 HW_SpinLockLock(m_lock);
694}
695
697{
698 HW_SpinLockUnlock(m_lock);
699}
700
702{
703 return HW_SpinLockTryLock(m_lock);
704}
705
707{
708 return false;
709}
710
712{
713 return HiResClockQPC::GetInstance()->GetCycles();
714}
715
717{
718 return HiResClockQPC::GetInstance()->GetFrequency();
719}
720
721#endif // _STK_ARCH_X86_WIN32
Contains common inventory for platform implementation.
#define GetContext()
Get platform's context.
Hardware Abstraction Layer (HAL) declarations for the stk::hw namespace.
void STK_PANIC_HANDLER_DEFAULT(stk::EKernelPanicId id)
Default panic handler: disable interrupts, record the id, and spin in a tight loop — a defined,...
Definition stktest.cpp:55
#define STK_KERNEL_PANIC(id)
Called when the kernel detects an unrecoverable internal fault.
Definition stk_arch.h:63
#define __stk_forceinline
Forces compiler to always inline the decorated function, regardless of optimisation level.
Definition stk_defs.h:104
#define STK_ASSERT(e)
Runtime assertion. Halts execution if the expression e evaluates to false.
Definition stk_defs.h:330
#define __stk_attr_noinline
Prevents compiler from inlining the decorated function (function prefix).
Definition stk_defs.h:185
#define STK_CRITICAL_SECTION_NESTINGS_MAX
Maximum allowable recursion depth for critical section entry (default: 16).
Definition stk_defs.h:404
#define __stk_attr_noreturn
Declares that function never returns to its caller (function prefix).
Definition stk_defs.h:146
#define __stk_relax_cpu
Emits a CPU pipeline-relaxation hint for use inside hot busy-wait (spin) loops (in-code statement).
Definition stktest.h:33
Namespace of STK package.
uintptr_t Word
Native processor word type.
Definition stk_common.h:112
void Sleep(uint32_t ticks)
Put calling process into a sleep state.
Definition stk_helper.h:298
int64_t Ticks
Ticks value.
Definition stk_common.h:150
int32_t Timeout
Timeout time (ticks).
Definition stk_common.h:133
void SetTls(Word tp)
Set thread-local storage (TLS).
EStackType
Stack type.
Definition stk_common.h:70
@ STACK_SLEEP_TRAP
Stack of the Sleep trap.
Definition stk_common.h:72
@ STACK_USER_TASK
Stack of the user task.
Definition stk_common.h:71
@ STACK_EXIT_TRAP
Stack of the Exit trap.
Definition stk_common.h:73
Word GetTls()
Get thread-local storage (TLS).
void SleepUntil(Ticks timestamp)
Put calling process into a sleep state until the specified timestamp.
Definition stk_helper.h:322
TId GetTid()
Get task/thread Id of the calling task.
Definition stk_helper.h:217
uint64_t Cycles
Cycles value.
Definition stk_common.h:155
Word TId
Definition stk_common.h:117
EKernelPanicId
Identifies the source of a kernel panic.
Definition stk_common.h:52
@ KERNEL_PANIC_HRT_HARD_FAULT
Kernel running in KERNEL_HRT mode reported deadline failure of the task.
Definition stk_common.h:57
@ KERNEL_PANIC_NONE
Panic is absent (no fault).
Definition stk_common.h:53
@ KERNEL_PANIC_SPINLOCK_DEADLOCK
Spin-lock timeout expired: lock owner never released.
Definition stk_common.h:54
__stk_forceinline T * WordToPtr(Word value) noexcept
Cast a CPU register-width integer back to a pointer.
Definition stk_arch.h:111
__stk_forceinline Word PtrToWord(T *ptr) noexcept
Cast a pointer to a CPU register-width integer.
Definition stk_arch.h:94
void SetTls(Word tp)
Write raw thread-pointer (TP) register used as per-task TLS storage.
Word GetTls()
Read raw thread-pointer (TP) register used as per-task TLS storage.
bool IsInsideISR()
Check whether the CPU is currently executing inside a hardware interrupt service routine (ISR).
Definition stktest.cpp:103
Namespace of the test inventory.
Base platform context for all platform implementations.
bool InitStack(EStackType stack_type, Stack *stack, IStackMemory *stack_memory, ITask *user_task)
Initialize stack memory of the user task.
TId GetTid() const
Get thread Id.
uint32_t GetTickResolution() const
Get resolution of the system tick timer in microseconds. Resolution means a number of microseconds be...
void SetEventOverrider(IEventOverrider *overrider)
Set platform event overrider.
void Start()
Start scheduling.
void Stop()
Stop scheduling.
void SwitchToNext()
Switch to a next task.
void ProcessHardFault()
Cause a hard fault of the system.
void ProcessTick()
Process one tick.
IWaitObject * Wait(ISyncObject *sync_obj, IMutex *mutex, Timeout timeout)
void Sleep(Timeout ticks)
Put calling process into a sleep state.
void SleepUntil(Ticks timestamp)
Put calling process into a sleep state until the specified timestamp.
void Initialize(IEventHandler *event_handler, IKernelService *service, uint32_t resolution_us, Stack *exit_trap)
Initialize scheduler's context.
Word GetCallerSP() const
Get caller's Stack Pointer (SP).
static void Exit()
Exit a critical section.
Definition stktest.cpp:78
static void Enter()
Enter a critical section.
Definition stktest.cpp:74
bool TryLock()
Attempt to acquire SpinLock in a single non-blocking attempt.
void Lock()
Acquire SpinLock, blocking until it is available.
Definition stktest.cpp:85
void Unlock()
Release SpinLock, allowing another thread or core to acquire it.
Definition stktest.cpp:89
static uint32_t GetFrequency()
Get clock frequency.
static Cycles GetCycles()
Get number of clock cycles elapsed.
Stack descriptor.
Definition stk_common.h:181
Word SP
Stack Pointer (SP) register (note: must be the first entry in this struct).
Definition stk_common.h:182
Interface for a stack memory region.
Definition stk_common.h:193
virtual Word * GetStack() const =0
Get pointer to the stack memory.
Wait object.
Definition stk_common.h:212
Synchronization object.
Definition stk_common.h:297
Interface for mutex synchronization primitive.
Definition stk_common.h:381
Interface for a user task.
Definition stk_common.h:433
Interface for the kernel services exposed to the user processes during run-time when Kernel started s...
Definition stk_common.h:929
static IKernelService * GetInstance()
Get CPU-local instance of the kernel service.
Definition stktest.cpp:69
RISC-V specific event handler.