From 465a0c414d029754eca5961b6928f482dbdc37b7 Mon Sep 17 00:00:00 2001 From: Jakub Lisowski Date: Wed, 7 Jan 2026 21:40:44 +0100 Subject: [PATCH] Code rebase move --- kernel/arch/x86_64/src/hal/impl/spinlock.hpp | 38 +- kernel/arch/x86_64/src/include/thread.nasm | 3 +- kernel/src/mem/virt/addr_space.cpp | 4 +- kernel/src/mem/virt/page_fault.cpp | 2 +- kernel/src/mem/virt/vmm.cpp | 4 +- kernel/src/scheduling/error.hpp | 3 + kernel/src/scheduling/kworker.cpp | 21 +- kernel/src/scheduling/kworker.hpp | 2 + .../src/scheduling/policies/mlfq_policy.hpp | 6 +- .../policies/priority_queue_policy.hpp | 5 +- .../policies/round_robin_policy.hpp | 4 +- kernel/src/scheduling/policy.hpp | 11 + kernel/src/scheduling/process.hpp | 17 + kernel/src/scheduling/processes.cpp | 18 +- kernel/src/scheduling/processes.hpp | 12 +- kernel/src/scheduling/scheduler.cpp | 155 +++++++- kernel/src/scheduling/scheduler.hpp | 31 +- kernel/src/scheduling/task_mgr.cpp | 373 ++++++++++++++++-- kernel/src/scheduling/task_mgr.hpp | 20 +- kernel/src/scheduling/thread.hpp | 18 +- kernel/src/scheduling/threads.cpp | 30 ++ kernel/src/scheduling/threads.hpp | 18 +- kernel/src/scheduling/wait_queue.hpp | 89 +++++ kernel/src/sys/loader.cpp | 2 +- kernel/src/syscalls/calls/proc.hpp | 36 +- kernel/src/syscalls/syscalls.cpp | 2 + kernel/src/trace.cpp | 4 + kernel/src/video/window_manager.cpp | 2 +- libs/libc/src/abi/platform.h | 4 +- libs/libc/src/include/alkos/sys/proc.h | 6 +- libs/libc/src/include/alkos/syscall.h | 2 + libs/libc/src/internal/crt0.cpp | 9 +- libs/libc/src/internal/libc_backend.cpp | 4 +- .../data_structures/intrusive_linked_list.hpp | 291 +++++++++++--- .../maps/intrusive_rb_tree.hpp | 44 ++- .../priority_queues/bitmap_pq.hpp | 12 +- .../include/template/scope_guard.hpp | 2 +- scripts/config/feature_flags_schema.yaml | 7 + userspace/programs/shell/shell.cpp | 112 +++++- userspace/programs/shell/shell.hpp | 2 + 40 files changed, 1223 insertions(+), 202 deletions(-) create mode 100644 kernel/src/scheduling/wait_queue.hpp diff --git a/kernel/arch/x86_64/src/hal/impl/spinlock.hpp b/kernel/arch/x86_64/src/hal/impl/spinlock.hpp index 1d061b94..55b26091 100644 --- a/kernel/arch/x86_64/src/hal/impl/spinlock.hpp +++ b/kernel/arch/x86_64/src/hal/impl/spinlock.hpp @@ -35,33 +35,33 @@ class alignas(kCacheLineSizeBytes) Spinlock : public SpinlockAPI FORCE_INLINE_F void Lock() { - if constexpr (FeatureEnabled) { - LockDebug_(); - return; - } - - while (__builtin_expect(__sync_lock_test_and_set(&lock_, 1), 0)) { - Pause_(); - } + // if constexpr (FeatureEnabled) { + // LockDebug_(); + // return; + // } + // + // while (__builtin_expect(__sync_lock_test_and_set(&lock_, 1), 0)) { + // Pause_(); + // } } FORCE_INLINE_F void Unlock() { - if constexpr (FeatureEnabled) { - UnlockDebug_(); - return; - } - - __sync_lock_release(&lock_); + // if constexpr (FeatureEnabled) { + // UnlockDebug_(); + // return; + // } + // + // __sync_lock_release(&lock_); } FORCE_INLINE_F NODISCARD bool TryLock() { - if constexpr (FeatureEnabled) { - return TryLockDebug_(); - } - - return !__sync_lock_test_and_set(&lock_, 1); + // if constexpr (FeatureEnabled) { + // return TryLockDebug_(); + // } + // + // return !__sync_lock_test_and_set(&lock_, 1); } FORCE_INLINE_F NODISCARD bool IsLocked() const { return lock_ != 0; } diff --git a/kernel/arch/x86_64/src/include/thread.nasm b/kernel/arch/x86_64/src/include/thread.nasm index b7e76cf8..38f97e74 100644 --- a/kernel/arch/x86_64/src/include/thread.nasm +++ b/kernel/arch/x86_64/src/include/thread.nasm @@ -3,13 +3,14 @@ ; ------------------------------------------------------------ struc Thread - .intrusive_data resb 88 + .intrusive_data resb 144 .tid: resq 1 .owner: resq 1 .flags resq 1 .state resq 1 .retval resq 1 + .wait_queue resq 1 .kernel_stack: resq 1 .kernel_stack_bottom: resq 1 diff --git a/kernel/src/mem/virt/addr_space.cpp b/kernel/src/mem/virt/addr_space.cpp index 0dc20237..fad3fe23 100644 --- a/kernel/src/mem/virt/addr_space.cpp +++ b/kernel/src/mem/virt/addr_space.cpp @@ -82,13 +82,13 @@ expected AS::AddArea(VMemArea *vma) pos->prev ? area_list_.InsertAfter(pos->prev, vma) : area_list_.PushFront(vma); RET_UNEXPECTED_IF(!new_node, MemError::OutOfMemory); - vma_guard.dismiss(); + vma_guard.Dismiss(); return {}; } } RET_UNEXPECTED_IF(!area_list_.PushBack(vma), MemError::OutOfMemory); - vma_guard.dismiss(); + vma_guard.Dismiss(); return {}; } diff --git a/kernel/src/mem/virt/page_fault.cpp b/kernel/src/mem/virt/page_fault.cpp index 9fbc3a90..7e552e26 100644 --- a/kernel/src/mem/virt/page_fault.cpp +++ b/kernel/src/mem/virt/page_fault.cpp @@ -68,7 +68,7 @@ void HandleUnresolvableFault(const PageFaultData &pfd, const hal::ExceptionData if (hal::IsInterruptFromUserSpace(data)) { auto pid = hardware::GetRunningPid(); TRACE_FATAL_GENERAL( - "Process %llu Segmentation Fault at %p (RIP=%p)", pid.id, pfd.faulting_ptr, + "Process %llu Segmentation Fault at %p (RIP=%p)", pid, pfd.faulting_ptr, data.isr_stack_frame.rip ); diff --git a/kernel/src/mem/virt/vmm.cpp b/kernel/src/mem/virt/vmm.cpp index 930d4ff2..2d105b21 100644 --- a/kernel/src/mem/virt/vmm.cpp +++ b/kernel/src/mem/virt/vmm.cpp @@ -69,7 +69,7 @@ expected, MemError> Vmm::CreateUserAddrSpace() auto res = as->AddArea(*kernel_sync_vma); RET_UNEXPECTED_IF_ERR(res); - as_guard.dismiss(); + as_guard.Dismiss(); return as; } @@ -197,7 +197,7 @@ expected, MemError> Vmm::MapUserBackbuffer( auto add_res = as->AddArea(vma); RET_UNEXPECTED_IF_ERR(add_res); - vma_guard.dismiss(); + vma_guard.Dismiss(); return gap_res->start; } diff --git a/kernel/src/scheduling/error.hpp b/kernel/src/scheduling/error.hpp index 6194fa89..773482d2 100644 --- a/kernel/src/scheduling/error.hpp +++ b/kernel/src/scheduling/error.hpp @@ -16,6 +16,7 @@ enum class Error { JoiningDetachedThread, SelfJoin, AlreadyJoined, + NoPermission, }; } // namespace Sched @@ -41,6 +42,8 @@ static constexpr const char *to_string(const Sched::Error &error) return "SelfJoin"; case Sched::Error::AlreadyJoined: return "AlreadyJoined"; + case Sched::Error::NoPermission: + return "NoPermission"; } return "unknown error"; diff --git a/kernel/src/scheduling/kworker.cpp b/kernel/src/scheduling/kworker.cpp index d470abc9..b8628692 100644 --- a/kernel/src/scheduling/kworker.cpp +++ b/kernel/src/scheduling/kworker.cpp @@ -24,7 +24,6 @@ void Sched::KWorkerMain() hal::Noop(); hal::Noop(); } - SchedulingModule::Get().GetScheduler().Yield(); } } @@ -39,6 +38,26 @@ void Sched::TraceDumperMain() } } +void Sched::ThreadRipperMain() +{ + TRACE_INFO_SCHEDULING("Created new ThreadRipper!"); + + while (true) { + SchedulingModule::Get().GetTaskMgr().ThreadRipperWork(); + SchedulingModule::Get().GetScheduler().Yield(); + } +} + +void Sched::ProcessRipperMain() +{ + TRACE_INFO_SCHEDULING("Created new ProcessRipper!"); + + while (true) { + SchedulingModule::Get().GetTaskMgr().ProcessRipperWork(); + SchedulingModule::Get().GetScheduler().Yield(); + } +} + void Sched::StdoutTracerMain(Pid pid) { TRACE_INFO_SCHEDULING("Created new StdoutTracer!"); diff --git a/kernel/src/scheduling/kworker.hpp b/kernel/src/scheduling/kworker.hpp index 425b7c14..c983ba79 100644 --- a/kernel/src/scheduling/kworker.hpp +++ b/kernel/src/scheduling/kworker.hpp @@ -7,6 +7,8 @@ struct Pid; void KWorkerMain(); void TraceDumperMain(); +void ThreadRipperMain(); +void ProcessRipperMain(); void FdHierarchyDumperMain(); void StdoutTracerMain(Pid pid); } // namespace Sched diff --git a/kernel/src/scheduling/policies/mlfq_policy.hpp b/kernel/src/scheduling/policies/mlfq_policy.hpp index c4787e10..5b7d877a 100644 --- a/kernel/src/scheduling/policies/mlfq_policy.hpp +++ b/kernel/src/scheduling/policies/mlfq_policy.hpp @@ -166,6 +166,8 @@ class MLFQPolicy : public PolicyImpl } } + void RemoveTask(Thread *thread) { queues_[thread->flags.priority].Delete(thread); } + // ------------------------------ // Private methods // ------------------------------ @@ -192,8 +194,8 @@ class MLFQPolicy : public PolicyImpl // Class fields // ------------------------------ - data_structures::IntrusiveRBTree queues_[kNumLevels]; - using HookT = data_structures::IntrusiveRBTree::HookT; + data_structures::IntrusiveRBTree queues_[kNumLevels]; + using HookT = data_structures::IntrusiveRBTree::HookT; u64 last_boost_time_ns_{0}; u64 min_vruntime_{0}; diff --git a/kernel/src/scheduling/policies/priority_queue_policy.hpp b/kernel/src/scheduling/policies/priority_queue_policy.hpp index d397ef11..f0737f9a 100644 --- a/kernel/src/scheduling/policies/priority_queue_policy.hpp +++ b/kernel/src/scheduling/policies/priority_queue_policy.hpp @@ -64,6 +64,8 @@ class PriorityQueuePolicy : public PolicyImpl return flags->priority >= kMaxPriority; } + void RemoveTask(Thread *thread) { priority_queue_.Remove(thread, thread->flags.priority); } + // ------------------------------ // Private methods // ------------------------------ @@ -73,7 +75,8 @@ class PriorityQueuePolicy : public PolicyImpl // Class fields // ------------------------------ - data_structures::BitmapPriorityQueue priority_queue_{}; + data_structures::BitmapPriorityQueue + priority_queue_{}; }; } // namespace Sched diff --git a/kernel/src/scheduling/policies/round_robin_policy.hpp b/kernel/src/scheduling/policies/round_robin_policy.hpp index d0cb540c..de047dde 100644 --- a/kernel/src/scheduling/policies/round_robin_policy.hpp +++ b/kernel/src/scheduling/policies/round_robin_policy.hpp @@ -52,6 +52,8 @@ class RoundRobinPolicy : public PolicyImpl NODISCARD bool ValidateThreadFlags(const ThreadFlags *) { return false; } + void RemoveTask(Thread *thread) { threads_.Remove(thread); } + // ------------------------------ // Private methods // ------------------------------ @@ -61,7 +63,7 @@ class RoundRobinPolicy : public PolicyImpl // Class fields // ------------------------------ - data_structures::IntrusiveList threads_{}; + data_structures::IntrusiveDoubleList threads_{}; }; } // namespace Sched diff --git a/kernel/src/scheduling/policy.hpp b/kernel/src/scheduling/policy.hpp index 3f9fd301..9d6f8176 100644 --- a/kernel/src/scheduling/policy.hpp +++ b/kernel/src/scheduling/policy.hpp @@ -37,6 +37,7 @@ struct Policy { bool (*validate_flags)(void *, const ThreadFlags *); void (*on_thread_yield)(void *, Thread *); void (*on_periodic_update)(void *, u64 current_time_ns); + void (*remove_task)(void *, Thread *); } cbs; void *self; }; @@ -51,6 +52,7 @@ struct PolicyImpl { NODISCARD u64 GetPreemptTime(Thread *) { R_FAIL_ALWAYS("NOT_IMPLEMENTED"); } NODISCARD bool IsFirstHigherPriority(Thread *, Thread *) { R_FAIL_ALWAYS("NOT_IMPLEMENTED"); } NODISCARD bool ValidateThreadFlags(const ThreadFlags *) { R_FAIL_ALWAYS("NOT_IMPLEMENTED"); } + void RemoveTask(Thread *) { R_FAIL_ALWAYS("NOT_IMPLEMENTED"); } // Event callbacks void OnThreadYield(Thread *) {} @@ -113,6 +115,14 @@ void OnPeriodicUpdateImpl(void *self, u64 current_time_ns) policy->OnPeriodicUpdate(current_time_ns); } +template + requires std::derived_from +void RemoveTaskImpl(void *self, Thread *thread) +{ + const auto policy = static_cast(self); + policy->RemoveTask(thread); +} + template requires std::derived_from NODISCARD FAST_CALL Policy PreparePolicy(T *self) @@ -127,6 +137,7 @@ NODISCARD FAST_CALL Policy PreparePolicy(T *self) policy.cbs.validate_flags = ValidateThreadFlagsImpl; policy.cbs.on_thread_yield = OnThreadYieldImpl; policy.cbs.on_periodic_update = OnPeriodicUpdateImpl; + policy.cbs.remove_task = RemoveTaskImpl; return policy; } diff --git a/kernel/src/scheduling/process.hpp b/kernel/src/scheduling/process.hpp index 4d6f4dc5..8c89bbde 100644 --- a/kernel/src/scheduling/process.hpp +++ b/kernel/src/scheduling/process.hpp @@ -9,6 +9,7 @@ #include "hal/tasks.hpp" #include "io/pipe.hpp" #include "mem/types.hpp" +#include "wait_queue.hpp" namespace Mem { @@ -23,6 +24,8 @@ class FdTable; namespace Sched { +struct Thread; + struct PACK Pid { u16 id; u64 count : 48; @@ -36,6 +39,14 @@ struct PACK ProcessFlags { }; static_assert(sizeof(ProcessFlags) == 1); +enum class ProcessState : u64 { + kReady = 0, + kWaitingForJoin, + kTerminated, + kLast, +}; +static_assert(sizeof(ProcessState) == sizeof(u64)); + struct Process : hal::Process { static constexpr size_t kMaxNameLength = vfs::kMaxComponentSize; @@ -43,6 +54,12 @@ struct Process : hal::Process { char name[kMaxNameLength]; Pid pid; ProcessFlags flags; + Thread *threads; + u64 live_threads; + u64 threads_to_clean; + ProcessState state; + WaitQueue *wait_queue; + int status; /* Process resources */ Mem::VPtr address_space; diff --git a/kernel/src/scheduling/processes.cpp b/kernel/src/scheduling/processes.cpp index 74dbedcf..9c18c652 100644 --- a/kernel/src/scheduling/processes.cpp +++ b/kernel/src/scheduling/processes.cpp @@ -38,8 +38,18 @@ std::expected Sched::Processes::PrepareProcess() // Initialize standard I/O pipes // ---------------------------------------------------------- + // Allocate wait queue + const auto wait_queue = Mem::KNew>(); + if (!wait_queue) { + return std::unexpected(Error::OutOfMemory); + } + template_lib::BatchedScopeGuard wait_queue_guard(dismiss, [&]() { + Mem::KDelete(wait_queue.value()); + }); + process->wait_queue = wait_queue.value(); + // Create the process's file descriptor table - auto fd_table_ptr = Mem::KNew(); + const auto fd_table_ptr = Mem::KNew(); RET_UNEXPECTED_IF(!fd_table_ptr, Error::OutOfMemory); auto *fd_table = process->fd_table = *fd_table_ptr; @@ -80,10 +90,12 @@ void Sched::Processes::CleanupProcess(Process *process) { ASSERT_NOT_NULL(process); - VideoModule::Get().GetWindowManager().ReleaseFocus(process->pid); - auto fd_table = process->fd_table; ASSERT_NOT_NULL(fd_table); Mem::KDelete(fd_table); + + ASSERT_NOT_NULL(process->wait_queue); + ASSERT_TRUE(process->wait_queue->IsEmpty()); + Mem::KDelete(process->wait_queue); } diff --git a/kernel/src/scheduling/processes.hpp b/kernel/src/scheduling/processes.hpp index 1266a5c0..55321c67 100644 --- a/kernel/src/scheduling/processes.hpp +++ b/kernel/src/scheduling/processes.hpp @@ -33,10 +33,9 @@ class Processes void CleanupProcess(Process *process); - NODISCARD FORCE_INLINE_F std::expected GetProcess(const Pid pid) + NODISCARD FORCE_INLINE_F std::expected GetProcess(const u32 id) { - const u16 id = pid.id; - auto ptr = processes_.Get(id); + auto ptr = processes_.Get(id); if (ptr == nullptr) { return std::unexpected(Error::ProcessNotFound); @@ -45,6 +44,11 @@ class Processes return ptr; } + NODISCARD FORCE_INLINE_F std::expected GetProcess(const Pid pid) + { + return GetProcess(pid.id); + } + NODISCARD FORCE_INLINE_F std::expected GetCurrentProcess() { const Pid pid = hardware::GetRunningPid(); @@ -61,6 +65,8 @@ class Processes CleanupProcess(processes_.Get(id)); processes_.Free(id); + + TRACE_INFO_SCHEDULING("Fully freed process with PID: %llu", pid); return {}; } diff --git a/kernel/src/scheduling/scheduler.cpp b/kernel/src/scheduling/scheduler.cpp index 858d71e2..4ff86ef0 100644 --- a/kernel/src/scheduling/scheduler.cpp +++ b/kernel/src/scheduling/scheduler.cpp @@ -30,11 +30,82 @@ Scheduler::Scheduler() policies_[static_cast(SchedulingPolicy::kUrgentTasks_PQ_P2)] = PreparePolicy(&policy2_); policies_[static_cast(SchedulingPolicy::kNormalTasks_MLFQ_P3)] = - PreparePolicy(&policy3_); + PreparePolicy(&policy3_); policies_[static_cast(SchedulingPolicy::kBackgroundTasks_RR_P4)] = PreparePolicy(&policy4_); } +void Scheduler::BlockOnWaitQueue(WaitQueue *wq) +{ + ASSERT_EQ(hardware::GetCoreLocalTcb()->state, ThreadState::kRunning); + ASSERT_NOT_NULL(wq); + + LocalCoreLock core_lock{}; + + if constexpr (FeatureEnabled) { + DebugTraceWaitQueue_(nullptr); + } + + wq->EnqueueLast(hardware::GetCoreLocalTcb()); + OnThreadYield_(hardware::GetCoreLocalTcb()); + hal::ContextSwitch(ScheduleAndUpdateThreads(true, ThreadState::kBlockedOnWaitQueue)); +} + +void Scheduler::ReleaseAndProcessAllBeforeProceeding( + WaitQueue *wq +) +{ + ASSERT_EQ(hardware::GetCoreLocalTcb()->state, ThreadState::kRunning); + ASSERT_NOT_NULL(wq); + + LocalCoreLock core_lock{}; + + while (!wq->IsEmpty()) { + /* Wake up all waiting processes before proceeding */ + + auto thread = wq->Dequeue(); + + if constexpr (FeatureEnabled) { + DebugTraceWaitQueue_(thread); + } + + hal::ContextSwitch(ScheduleAndUpdateThreads(false, ThreadState::kReady, thread)); + } +} + +void Scheduler::ReleaseAll(WaitQueue *wq) +{ + ASSERT_EQ(hardware::GetCoreLocalTcb()->state, ThreadState::kRunning); + ASSERT_NOT_NULL(wq); + + LocalCoreLock core_lock{}; + + while (!wq->IsEmpty()) { + auto thread = wq->Dequeue(); + thread->state = ThreadState::kReady; + AddReadyThread(thread); + } +} + +void Scheduler::RemoveThread(Thread *thread) +{ + ASSERT_NOT_NULL(thread); + ASSERT( + thread->state == ThreadState::kSleeping || thread->state == ThreadState::kReady || + thread->state == ThreadState::kBlockedOnWaitQueue + ); + + if (thread->state == ThreadState::kSleeping) { + ASSERT_TRUE(sleep_queue_.Contains(thread)); + sleep_queue_.Delete(thread); + } else if (thread->state == ThreadState::kReady) { + RemoveFromPolicy_(thread); + } else if (thread->state == ThreadState::kBlockedOnWaitQueue) { + using wq = WaitQueue; + wq::Remove(thread); + } +} + void Scheduler::InstallInterruptHandler() { HardwareModule::Get() @@ -74,7 +145,9 @@ Thread *Scheduler::Schedule() return nullptr; } -Thread *Scheduler::ScheduleAndUpdateThreads(const bool preempt, const ThreadState thread_state) +Thread *Scheduler::ScheduleAndUpdateThreads( + const bool preempt, const ThreadState thread_state, Thread *forced_next_thread +) { u64 min_time_ns = std::numeric_limits::max(); @@ -90,7 +163,23 @@ Thread *Scheduler::ScheduleAndUpdateThreads(const bool preempt, const ThreadStat // 3. Scheduling new thread if needed and update structs Thread *thread{}; u64 preempt_time_ns = GetPreemptTime_(hardware::GetCoreLocalTcb()); - if (preempt || force_preempt || ShouldPreempt_(preempt_time_ns)) { + if (forced_next_thread) { + /* Forced picked next thread */ + + forced_next_thread->state = ThreadState::kReady; + preempt_time_ns = GetPreemptTime_(forced_next_thread); + forced_next_thread->state = ThreadState::kRunning; + + ASSERT_NOT_NULL(hardware::GetCoreLocalTcb()); + ASSERT_EQ(hardware::GetCoreLocalTcb()->state, ThreadState::kRunning); + hardware::GetCoreLocalTcb()->state = thread_state; + + if (thread_state == ThreadState::kReady) { + AddReadyThread(hardware::GetCoreLocalTcb()); + } + + thread = forced_next_thread; + } else if (preempt || force_preempt || ShouldPreempt_(preempt_time_ns)) { auto next_thread = Schedule(); if (!next_thread && thread_state == ThreadState::kReady) { @@ -130,6 +219,10 @@ Thread *Scheduler::ScheduleAndUpdateThreads(const bool preempt, const ThreadStat ASSERT_GT(min_time_ns, kMinDelta); SetupNextTimeEvent_(min_time_ns); + if (FeatureEnabled && thread) { + DebugTraceContextSwitch_(thread); + } + return thread; } @@ -142,6 +235,10 @@ void Scheduler::Yield() const auto thread = ScheduleAndUpdateThreads(true, ThreadState::kReady); if (thread == nullptr) { + if constexpr (FeatureEnabled) { + DebugTraceOmitYield_(); + } + return; } @@ -214,6 +311,57 @@ void Scheduler::NanoSleepUntil(const u64 systime_ns) } } +void Scheduler::DebugTraceContextSwitch_(Thread *thread) +{ + ASSERT_NOT_NULL(thread); + + const auto curr_process = + SchedulingModule::Get().GetProcesses().GetProcess(hardware::GetCoreLocalTcb()->owner); + ASSERT_TRUE(curr_process); + + const auto next_process = SchedulingModule::Get().GetProcesses().GetProcess(thread->owner); + ASSERT_TRUE(next_process); + + DEBUG_FREQ_INFO_SCHEDULING( + "Switching context from TID: %llu (%s) to TID: %llu (%s)", hardware::GetCoreLocalTcb()->tid, + curr_process.value()->name, thread->tid, next_process.value()->name + ); +} + +void Scheduler::DebugTraceOmitYield_() +{ + const auto process = + SchedulingModule::Get().GetProcesses().GetProcess(hardware::GetCoreLocalTcb()->owner); + ASSERT_TRUE(process); + + DEBUG_FREQ_INFO_SCHEDULING( + "Omitting yield for TID: %llu (%s)", hardware::GetCoreLocalTcb()->tid, process.value()->name + ); +} + +void Scheduler::DebugTraceWaitQueue_(const Thread *popped_thread) +{ + if (popped_thread) { + const auto process = + SchedulingModule::Get().GetProcesses().GetProcess(popped_thread->owner); + ASSERT_TRUE(process); + + DEBUG_FREQ_INFO_SCHEDULING( + "Woken up thread from wait queue with TID: %llu (%s)", popped_thread->tid, + process.value()->name + ); + } else { + const auto process = + SchedulingModule::Get().GetProcesses().GetProcess(hardware::GetCoreLocalTcb()->owner); + ASSERT_TRUE(process); + + DEBUG_FREQ_INFO_SCHEDULING( + "Placing thread with TID: %llu (%s) on waiting queue", hardware::GetCoreLocalTcb()->tid, + process.value()->name + ); + } +} + void Scheduler::SetupNextTimeEvent_(const u64 time_ns) { ASSERT_TRUE(HardwareModule::Get().GetEventClockRegistry().IsSelectedPicked()); @@ -221,6 +369,7 @@ void Scheduler::SetupNextTimeEvent_(const u64 time_ns) auto &event_clock = HardwareModule::Get().GetEventClockRegistry().GetSelected(); ASSERT_TRUE(event_clock.flags.IsCoreLocal, "Scheduler supports only core local event clocks"); + event_clock.cbs.set_periodic(&event_clock); event_clock.cbs.next_event(&event_clock, time_ns); } diff --git a/kernel/src/scheduling/scheduler.hpp b/kernel/src/scheduling/scheduler.hpp index 6ca0d626..573fdd9b 100644 --- a/kernel/src/scheduling/scheduler.hpp +++ b/kernel/src/scheduling/scheduler.hpp @@ -16,6 +16,7 @@ #include "policies/mlfq_policy.hpp" #include "policies/priority_queue_policy.hpp" #include "policies/round_robin_policy.hpp" +#include "wait_queue.hpp" namespace Sched { @@ -36,13 +37,23 @@ class Scheduler // Class interaction // ------------------------------ + void BlockOnWaitQueue(WaitQueue *wq); + + void ReleaseAndProcessAllBeforeProceeding(WaitQueue *wq); + + void ReleaseAll(WaitQueue *wq); + + void RemoveThread(Thread *thread); + void InstallInterruptHandler(); void AddReadyThread(Thread *thread); NODISCARD Thread *Schedule(); - NODISCARD Thread *ScheduleAndUpdateThreads(bool preempt, ThreadState thread_state); + NODISCARD Thread *ScheduleAndUpdateThreads( + bool preempt, ThreadState thread_state, Thread *next_thread = nullptr + ); void Yield(); @@ -75,6 +86,12 @@ class Scheduler // ------------------------------ protected: + void DebugTraceContextSwitch_(Thread *thread); + + void DebugTraceOmitYield_(); + + void DebugTraceWaitQueue_(const Thread *popped_thread); + void PrepareNextTimerInterruptBeforeSwitchUnguarded_(Thread *next_thread); NODISCARD FORCE_INLINE_F const Policy &GetPolicy_(const ThreadFlags flags) const @@ -96,6 +113,12 @@ class Scheduler return policy.cbs.get_preempt_time(policy.self, thread); } + FORCE_INLINE_F void RemoveFromPolicy_(Thread *thread) const + { + const auto &policy = GetPolicy_(thread); + policy.cbs.remove_task(policy.self, thread); + } + void SetupNextTimeEvent_(u64 time_ns); NODISCARD FORCE_INLINE_F bool ShouldPreempt_(Thread *thread) const @@ -140,8 +163,8 @@ class Scheduler // ------------------------------ // Sleeping queue - data_structures::IntrusiveRBTree sleep_queue_{}; - using HookT = data_structures::IntrusiveRBTree::HookT; + data_structures::IntrusiveRBTree sleep_queue_{}; + using HookT = data_structures::IntrusiveRBTree::HookT; // Locking hal::Spinlock spinlock_{}; @@ -150,7 +173,7 @@ class Scheduler PriorityQueuePolicy policy0_{}; // kUberTask_PQ_P0 PriorityQueuePolicy policy1_{}; // kDrivers_PQ_P1 PriorityQueuePolicy policy2_{}; // kUrgentTasks_PQ_P2 - MLFQPolicy policy3_{}; // kNormalTasks_MLFQ_P3 + RoundRobinPolicy policy3_{}; // kNormalTasks_MLFQ_P3 RoundRobinPolicy policy4_{}; // kBackgroundTasks_RR_P4 // Abstraction diff --git a/kernel/src/scheduling/task_mgr.cpp b/kernel/src/scheduling/task_mgr.cpp index 343b741b..2559b808 100644 --- a/kernel/src/scheduling/task_mgr.cpp +++ b/kernel/src/scheduling/task_mgr.cpp @@ -28,27 +28,18 @@ void TaskMgr::InitializeMultitasking() SchedulingModule::Get().GetScheduler().InstallInterruptHandler(); // Spawn trace dumper - const auto result = + const auto result0 = SpawnKernelProcess("kworker-trace-dumper", {}, PrepareKThreadTask(TraceDumperMain)); - R_ASSERT_TRUE(static_cast(result), "Failed to spawn trace dumper process..."); - - // Spawn 3 Kernel Workers - static constexpr size_t kNumKWorkers = 3; - for (size_t i = 0; i < kNumKWorkers; ++i) { - char name[] = "kworker-0"; + R_ASSERT_TRUE(static_cast(result0), "Failed to spawn trace dumper process..."); - name[sizeof(name) - 2] = static_cast('0' + i); + // Spawn thread ripper + const auto result1 = + SpawnKernelProcess("kworker-thread-ripper", {}, PrepareKThreadTask(ThreadRipperMain)); + R_ASSERT_TRUE(static_cast(result1), "Failed to spawn thread ripper..."); - auto result = SpawnKernelProcess(name, {}, PrepareKThreadTask(KWorkerMain)); - R_ASSERT_TRUE( - static_cast(result), - "Failed to spawn kernel workers. Not enough resources for the system" - ); - - TRACE_INFO_SCHEDULING( - "Created initial Kernel Worker process with Pid: %llu", result.value().get<0>() - ); - } + const auto result2 = + SpawnKernelProcess("kworker-process-ripper", {}, PrepareKThreadTask(ProcessRipperMain)); + R_ASSERT_TRUE(static_cast(result2), "Failed to spawn process ripper..."); } std::expected TaskMgr::SpawnEmptyProcess(const char *name, const ProcessFlags flags) @@ -94,7 +85,7 @@ std::expected TaskMgr::SpawnEmptyProcess(const char *name, const Pro } process.value()->address_space = address_space; - process_guard.dismiss(); + process_guard.Dismiss(); return process.value()->pid; } @@ -128,9 +119,13 @@ std::expected TaskMgr::SpawnThread( } std::expected TaskMgr::SpawnThread( - const Process *process, const ThreadFlags flags, const Task &task + Process *process, const ThreadFlags flags, const Task &task ) { + ASSERT_NOT_NULL(process); + ASSERT_NEQ(process->state, ProcessState::kTerminated); + ASSERT_NEQ(process->state, ProcessState::kWaitingForJoin); + LocalCoreLock local_lock{}; bool dismiss = false; @@ -192,6 +187,13 @@ std::expected TaskMgr::SpawnThread( } hal::InitializeThreadStack(&thread.value()->kernel_stack, task); + + process->live_threads++; + data_structures::FronIntrusiveDoubleListView( + process->threads + ) + .PushFront(thread.value()); + dismiss = true; return thread.value(); } @@ -226,7 +228,7 @@ std::expected, Error> TaskMgr::SpawnKernelProcess( process.value(), name, thread.value()->tid ); - process_guard.dismiss(); + process_guard.Dismiss(); return std::make_tuple(process.value(), thread.value()->tid); } @@ -264,7 +266,7 @@ std::expected TaskMgr::ExecuteElf64(const Pid pid, const char *path) thread.value()->tid ); - process_guard.dismiss(); + process_guard.Dismiss(); return thread.value()->tid; } @@ -289,17 +291,190 @@ std::expected, Error> TaskMgr::ExecuteElf64( auto thread = ExecuteElf64(process.value(), path); RET_UNEXPECTED_IF_ERR(thread); - VideoModule::Get().GetWindowManager().SetFocus(process.value()); - - process_guard.dismiss(); + process_guard.Dismiss(); return std::make_tuple(process.value(), thread.value()); } -std::expected TaskMgr::CommitMurder(Pid) { R_FAIL_ALWAYS("NOT IMPLEMENTED"); } +std::expected TaskMgr::CommitMurder(const Tid tid) +{ + if (tid == hardware::GetCoreLocalTcb()->tid) { + ThreadExit(nullptr); + } + + LocalCoreLock local_lock{}; + + DEBUG_INFO_SCHEDULING( + "Thread with TID: %llu commiting murder on TID: %llu", hardware::GetCoreLocalTcb()->tid, tid + ); + + const auto thread = SchedulingModule::Get().GetThreads().GetThread(tid); + RET_UNEXPECTED_IF_ERR(thread); + + if (thread.value()->state == ThreadState::kTerminated) { + return {}; + } + + // 1. Remove from process thread list + const auto process = SchedulingModule::Get().GetProcesses().GetProcess(thread.value()->owner); + RET_UNEXPECTED_IF_ERR(process); + + data_structures::FronIntrusiveDoubleListView( + process.value()->threads + ) + .Remove(thread.value()); + + // 2. Remove thread from scheduler + if (thread.value()->state != ThreadState::kWaitingForJoin) { + SchedulingModule::Get().GetScheduler().RemoveThread(thread.value()); + } + + // 3. Wake up all joining threads + SchedulingModule::Get().GetScheduler().ReleaseAndProcessAllBeforeProceeding( + thread.value()->wait_queue + ); + + // 4. Mark for removal if not marked by any of waiting + if (thread.value()->state != ThreadState::kTerminated) { + thread.value()->state = ThreadState::kTerminated; + threads_to_clean_.Push(thread.value()->tid.id); + process.value()->threads_to_clean++; + process.value()->live_threads--; + } + + return {}; +} + +std::expected TaskMgr::CommitMurder(const Pid pid) +{ + if (hardware::GetRunningPid() == pid) { + ExitProcess(-2); + } + + LocalCoreLock local_lock{}; + const auto process = SchedulingModule::Get().GetProcesses().GetProcess(pid); + RET_UNEXPECTED_IF_ERR(process); + + const auto current_process = SchedulingModule::Get().GetProcesses().GetCurrentProcess(); + ASSERT_TRUE(static_cast(current_process)); + + if (!current_process.value()->flags.KernelSpaceOnly && process.value()->flags.KernelSpaceOnly) { + TRACE_INFO_SCHEDULING( + "Userspace process (PID: %llu) tried to murder kernel process (PID: %llu)", + current_process.value()->pid, process.value()->pid + ); + return std::unexpected(Error::NoPermission); + } + + DEBUG_INFO_SCHEDULING( + "Process with PID: %llu is commiting murder on PID: %llu", hardware::GetRunningPid(), pid + ); + + // 1. Kill all running threads + data_structures::FronIntrusiveDoubleListView threads( + process.value()->threads + ); + while (!threads.IsEmpty()) { + const auto result = CommitMurder(threads.Front()->tid); + ASSERT_TRUE(static_cast(result)); + } + + // 2. Wake up all joining threads + SchedulingModule::Get().GetScheduler().ReleaseAndProcessAllBeforeProceeding( + process.value()->wait_queue + ); + + // 3. Mark target as terminated if not waited by no one + if (process.value()->state != ProcessState::kTerminated) { + process.value()->state = ProcessState::kTerminated; + process.value()->status = -2; + processes_to_clean_.Push(process.value()->pid.id); + } + + return {}; +} + +void TaskMgr::CommitSuicide() +{ + LocalCoreLock lock{}; + + const auto process = + SchedulingModule::Get().GetProcesses().GetProcess(hardware::GetCoreLocalTcb()->owner); + ASSERT_TRUE(static_cast(process)); + + DEBUG_INFO_SCHEDULING("Process with PID: %llu exiting...", process.value()->pid); + + // 1. Kill all running threads except us + data_structures::FronIntrusiveDoubleListView threads( + process.value()->threads + ); + while (!threads.IsEmpty()) { + const auto thread = threads.PopFront(); + + if (thread == hardware::GetCoreLocalTcb()) { + continue; + } + + const auto result = CommitMurder(thread->tid); + ASSERT_TRUE(static_cast(result)); + } + threads.PushFront(hardware::GetCoreLocalTcb()); -void TaskMgr::CommitSuicide() { R_FAIL_ALWAYS("CommitSuicide NOT IMPLEMENTED"); } + // 2. Mark self as waiting + process.value()->state = ProcessState::kWaitingForJoin; + process.value()->status = -1; -std::expected TaskMgr::ExitProcess() { R_FAIL_ALWAYS("NOT IMPLEMENTED"); } + // 3. Wake up all joining threads + SchedulingModule::Get().GetScheduler().ReleaseAndProcessAllBeforeProceeding( + process.value()->wait_queue + ); + + // 4. Mark to delete if not joined + if (process.value()->state != ProcessState::kTerminated) { + process.value()->state = ProcessState::kTerminated; + process.value()->status = -1; + processes_to_clean_.Push(process.value()->pid.id); + } + + // 5. Kill current thread + ThreadExit(nullptr); +} + +void TaskMgr::ExitProcess(const int status) +{ + LocalCoreLock lock{}; + + const auto process = + SchedulingModule::Get().GetProcesses().GetProcess(hardware::GetCoreLocalTcb()->owner); + ASSERT_TRUE(static_cast(process)); + + DEBUG_INFO_SCHEDULING("Process with PID: %llu exiting...", process.value()->pid); + + // 1. Kill all running threads except us + data_structures::FronIntrusiveDoubleListView threads( + process.value()->threads + ); + while (!threads.IsEmpty()) { + const auto thread = threads.PopFront(); + + if (thread == hardware::GetCoreLocalTcb()) { + continue; + } + + const auto result = CommitMurder(thread->tid); + ASSERT_TRUE(static_cast(result)); + } + threads.PushFront(hardware::GetCoreLocalTcb()); + + // 2. Mark self as waiting + process.value()->state = ProcessState::kWaitingForJoin; + process.value()->status = status; + + // 3. Wake up all joining threads + SchedulingModule::Get().GetScheduler().ReleaseAll(process.value()->wait_queue); + + // 4. Kill current thread + ThreadExit(nullptr); +} std::expected TaskMgr::CreateThread(const ThreadFlags flags, const Task &task) { @@ -335,6 +510,11 @@ std::expected TaskMgr::DetachThread(const Tid tid) thread.value()->state = ThreadState::kTerminated; threads_to_clean_.Push(thread.value()->tid.id); + auto process = SchedulingModule::Get().GetProcesses().GetProcess(thread.value()->owner); + ASSERT_TRUE(static_cast(process)); + process.value()->threads_to_clean++; + process.value()->live_threads--; + return {}; } @@ -348,13 +528,33 @@ void TaskMgr::ThreadExit(void *retval) ASSERT_NOT_NULL(tcb); ASSERT_EQ(tcb->state, ThreadState::kRunning); + DEBUG_INFO_SCHEDULING("Thread with tid: %llu exiting...", tcb->tid); + tcb->retval = retval; LocalCoreLock core_lock{}; + + // 1. Remove from active thread list + const auto process = SchedulingModule::Get().GetProcesses().GetProcess(tcb->owner); + ASSERT_TRUE(static_cast(process)); + data_structures::FronIntrusiveDoubleListView( + process.value()->threads + ) + .Remove(tcb); + + // 2. Wake up all joining threads after exiting this one + SchedulingModule::Get().GetScheduler().ReleaseAll(tcb->wait_queue); + + // 3. Update thread state ThreadState state{}; - if (tcb->flags.detached) { + if (tcb->flags.detached || process.value()->live_threads == 1) { + TRACE_FATAL_ACPI("SELF CLEAN"); + ASSERT_NOT_ZERO(process.value()->live_threads); + state = ThreadState::kTerminated; threads_to_clean_.Push(tcb->tid.id); + process.value()->threads_to_clean++; + process.value()->live_threads--; } else { state = ThreadState::kWaitingForJoin; } @@ -373,27 +573,28 @@ std::expected TaskMgr::JoinThread(const Tid tid) LocalCoreLock lock{}; auto thread = SchedulingModule::Get().GetThreads().GetThread(tid); + RET_UNEXPECTED_IF_ERR(thread); - if (!thread) { - return std::unexpected(thread.error()); - } - - while (thread.value()->state != ThreadState::kWaitingForJoin || - thread.value()->state != ThreadState::kTerminated) { - static constexpr u64 kWaitTime = 50'000'000; // 50 ms - - SchedulingModule::Get().GetScheduler().NanoSleepUntil( - TimingModule::Get().GetSystemTime().ReadLifeTimeNs() + kWaitTime - ); + if (thread.value()->state != ThreadState::kWaitingForJoin || + thread.value()->state != ThreadState::kTerminated) { + SchedulingModule::Get().GetScheduler().BlockOnWaitQueue(thread.value()->wait_queue); } if (thread.value()->state == ThreadState::kWaitingForJoin) { thread.value()->state = ThreadState::kTerminated; threads_to_clean_.Push(thread.value()->tid.id); + const auto process = + SchedulingModule::Get().GetProcesses().GetProcess(thread.value()->owner); + ASSERT_TRUE(static_cast(process)); + + process.value()->threads_to_clean++; + process.value()->live_threads--; + return {thread.value()->retval}; } + ASSERT_EQ(thread.value()->state, ThreadState::kTerminated); return std::unexpected(Error::AlreadyJoined); } @@ -411,6 +612,96 @@ std::expected TaskMgr::Exec(const char *path) return std::unexpected(result.error()); } - return std::get(result.value()); + const auto pid = std::get(result.value()); + return pid; +} + +std::expected TaskMgr::JoinProcess(const Pid pid) +{ + if (hardware::GetRunningPid() == pid) { + return std::unexpected(Error::SelfJoin); + } + + LocalCoreLock lock{}; + const auto process = SchedulingModule::Get().GetProcesses().GetProcess(pid); + RET_UNEXPECTED_IF_ERR(process); + + TRACE_FATAL_ACPI("WAITING ON PROC: %llu (%s)", process.value()->pid, process.value()->name); + if (process.value()->state != ProcessState::kWaitingForJoin || + process.value()->state != ProcessState::kTerminated) { + SchedulingModule::Get().GetScheduler().BlockOnWaitQueue(process.value()->wait_queue); + } + + if (process.value()->state == ProcessState::kWaitingForJoin) { + process.value()->state = ProcessState::kTerminated; + processes_to_clean_.Push(process.value()->pid.id); + + return {process.value()->status}; + } + + ASSERT_EQ(process.value()->state, ProcessState::kTerminated); + return std::unexpected(Error::AlreadyJoined); +} + +void TaskMgr::ThreadRipperWork() +{ + while (threads_to_clean_.Size() != 0) { + LocalCoreLock lock{}; + ThreadRipperClean_(threads_to_clean_.Pop()); + } +} + +void TaskMgr::ProcessRipperWork() +{ + while (processes_to_clean_.Size() != 0) { + LocalCoreLock lock{}; + ProcessRipperClean_(processes_to_clean_.Pop()); + } +} + +void TaskMgr::ThreadRipperClean_(const u32 id) +{ + const auto thread = SchedulingModule::Get().GetThreads().GetThread(id); + ASSERT_NOT_NULL(thread); + + DEBUG_INFO_SCHEDULING("ThreadRipper cleaning: %llu", thread.value()->tid); + + // Mem::KFreeAligned(thread.value()->kernel_stack_bottom); // TODO + + if (thread.value()->user_stack != nullptr) { + // TODO: remove + } + + const auto process = SchedulingModule::Get().GetProcesses().GetProcess(thread.value()->owner); + ASSERT_NOT_ZERO(process.value()->threads_to_clean); + process.value()->threads_to_clean--; + + const auto result = SchedulingModule::Get().GetThreads().Free(thread.value()->tid); + ASSERT_TRUE(static_cast(result)); + + DEBUG_INFO_SCHEDULING("ThreadRipper cleaned: %llu", thread.value()->tid); + trace::TraceDumperTask(); +} + +void TaskMgr::ProcessRipperClean_(const u32 id) +{ + const auto process = SchedulingModule::Get().GetProcesses().GetProcess(id); + ASSERT_TRUE(static_cast(process)); + + DEBUG_INFO_SCHEDULING("ProcessRipper cleaning: %llu", process.value()->pid); + + while (process.value()->threads_to_clean != 0 && process.value()->live_threads != 0) { + SchedulingModule::Get().GetScheduler().Yield(); + } + + const auto result = + MemoryModule::Get().GetVmm().DestroyUserAddrSpace(process.value()->address_space); + ASSERT_TRUE(static_cast(result)); + + const auto proc_result = SchedulingModule::Get().GetProcesses().Free(process.value()->pid); + ASSERT_TRUE(static_cast(proc_result)); + + DEBUG_INFO_SCHEDULING("ProcessRipper cleaned: %llu", process.value()->pid); } } // namespace Sched +; diff --git a/kernel/src/scheduling/task_mgr.hpp b/kernel/src/scheduling/task_mgr.hpp index afe4e92b..26657658 100644 --- a/kernel/src/scheduling/task_mgr.hpp +++ b/kernel/src/scheduling/task_mgr.hpp @@ -37,7 +37,7 @@ class TaskMgr ); NODISCARD std::expected SpawnThread( - const Process *process, ThreadFlags flags, const Task &task + Process *process, ThreadFlags flags, const Task &task ); NODISCARD std::expected, Error> SpawnKernelProcess( @@ -60,11 +60,13 @@ class TaskMgr // Syscalls // ------------------------------ + NODISCARD std::expected CommitMurder(Tid tid); + NODISCARD std::expected CommitMurder(Pid pid); void CommitSuicide(); - NODISCARD std::expected ExitProcess(); + void ExitProcess(int status); NODISCARD std::expected CreateThread(ThreadFlags flags, const Task &task); @@ -80,16 +82,30 @@ class TaskMgr NODISCARD std::expected Exec(const char *path); + NODISCARD std::expected JoinProcess(Pid pid); + + // ------------------------------ + // Cleanups + // ------------------------------ + + void ThreadRipperWork(); + + void ProcessRipperWork(); + // ------------------------------ // Private methods // ------------------------------ protected: + void ThreadRipperClean_(u32 id); + void ProcessRipperClean_(u32 id); + // ------------------------------ // Class fields // ------------------------------ AtomicArraySingleTypeStaticStack threads_to_clean_{}; + AtomicArraySingleTypeStaticStack processes_to_clean_{}; }; } // namespace Sched diff --git a/kernel/src/scheduling/thread.hpp b/kernel/src/scheduling/thread.hpp index d18119fa..949b8485 100644 --- a/kernel/src/scheduling/thread.hpp +++ b/kernel/src/scheduling/thread.hpp @@ -10,6 +10,7 @@ #include "hal/tasks.hpp" #include "policy.hpp" #include "process.hpp" +#include "wait_queue.hpp" namespace Sched { @@ -52,21 +53,32 @@ enum class ThreadState : u64 { kReady = 0, kRunning, kSleeping, + kBlockedOnWaitQueue, kWaitingForJoin, kTerminated, kLast, }; static_assert(sizeof(ThreadState) == sizeof(u64)); -struct Thread : data_structures::IntrusiveRbNode, - data_structures::IntrusiveRbNode, - data_structures::IntrusiveListNode { +static constexpr int kSchedulingIntrusiveLevel = 0; +static constexpr int kSleepingIntrusiveLevel = 1; +static constexpr int kProcessListIntrusiveLevel = 2; +static constexpr int kWaitQueueIntrusiveLevel = 3; + +struct Thread : data_structures::IntrusiveRbNode, + data_structures::IntrusiveRbNode, + data_structures::IntrusiveListNode, + data_structures::IntrusiveListNode, + data_structures::IntrusiveDoubleListNode, + data_structures::IntrusiveDoubleListNode, + data_structures::IntrusiveDoubleListNode { /* Management */ Tid tid; Pid owner; ThreadFlags flags; ThreadState state; void *retval; + WaitQueue *wait_queue; /* Thread resources */ void *kernel_stack; diff --git a/kernel/src/scheduling/threads.cpp b/kernel/src/scheduling/threads.cpp index bbf2f654..4d1c681b 100644 --- a/kernel/src/scheduling/threads.cpp +++ b/kernel/src/scheduling/threads.cpp @@ -10,12 +10,16 @@ #include "modules/timing.hpp" #include "scheduling/local_lock.hpp" #include "sys/loader.hpp" +#include "template/scope_guard.hpp" namespace Sched { std::expected Threads::PrepareThread() { const size_t idx = threads_.Allocate(); + template_lib::ScopeGuard thread_guard([&]() { + threads_.Free(idx); + }); if (idx == std::numeric_limits::max()) { return std::unexpected(Error::ExceededMaxAllowedInstances); @@ -25,9 +29,35 @@ std::expected Threads::PrepareThread() ASSERT_LE(idx, std::numeric_limits::max()); thread->tid = AssignNewTid(static_cast(idx)); + // Allocate wait queue + const auto wait_queue = Mem::KNew>(); + if (!wait_queue) { + return std::unexpected(Error::OutOfMemory); + } + thread->wait_queue = wait_queue.value(); + + thread_guard.Dismiss(); return thread; } +std::expected Threads::Free(const Tid tid) +{ + const u16 id = tid.id; + + const auto thread = threads_.Get(id); + if (thread == nullptr) { + return std::unexpected(Error::ThreadNotFound); + } + + ASSERT_NOT_NULL(thread->wait_queue); + ASSERT_TRUE(thread->wait_queue->IsEmpty()); + Mem::KDelete(thread->wait_queue); + threads_.Free(id); + + TRACE_INFO_SCHEDULING("Fully freed thread with TID: %llu", tid); + return {}; +} + void KThreadEntrypoint(void (*f)()) { f(); diff --git a/kernel/src/scheduling/threads.hpp b/kernel/src/scheduling/threads.hpp index eaa672e3..2323887f 100644 --- a/kernel/src/scheduling/threads.hpp +++ b/kernel/src/scheduling/threads.hpp @@ -29,10 +29,9 @@ class Threads std::expected PrepareThread(); - NODISCARD FORCE_INLINE_F std::expected GetThread(const Tid tid) + NODISCARD FORCE_INLINE_F std::expected GetThread(const u32 id) { - const u16 id = tid.id; - auto ptr = threads_.Get(id); + auto ptr = threads_.Get(id); if (ptr == nullptr) { return std::unexpected(Error::ThreadNotFound); @@ -41,18 +40,13 @@ class Threads return ptr; } - FORCE_INLINE_F std::expected Free(const Tid tid) + NODISCARD FORCE_INLINE_F std::expected GetThread(const Tid tid) { - const u16 id = tid.id; - - if (threads_.Get(id) == nullptr) { - return std::unexpected(Error::ThreadNotFound); - } - - threads_.Free(id); - return {}; + return GetThread(tid.id); } + NODISCARD std::expected Free(Tid tid); + // ------------------------------ // Private methods // ------------------------------ diff --git a/kernel/src/scheduling/wait_queue.hpp b/kernel/src/scheduling/wait_queue.hpp new file mode 100644 index 00000000..eef55f25 --- /dev/null +++ b/kernel/src/scheduling/wait_queue.hpp @@ -0,0 +1,89 @@ +#ifndef KERNEL_SRC_SCHEDULING_WAIT_QUEUE_HPP_ +#define KERNEL_SRC_SCHEDULING_WAIT_QUEUE_HPP_ + +#include +#include +#include