Make Scheduler a singleton
This commit is contained in:
@@ -82,7 +82,7 @@ void Core::Run(float volumeL, float volumeR) {
|
||||
|
||||
cycles += taken;
|
||||
frameCycles += taken;
|
||||
scheduler.Tick(taken, mem);
|
||||
Scheduler::GetInstance().Tick(taken, mem);
|
||||
}
|
||||
|
||||
cycles -= mmio.vi.cyclesPerHalfline;
|
||||
@@ -93,7 +93,7 @@ void Core::Run(float volumeL, float volumeR) {
|
||||
}
|
||||
|
||||
mmio.ai.Step(frameCycles, volumeL, volumeR);
|
||||
scheduler.Tick(frameCycles, mem);
|
||||
Scheduler::GetInstance().Tick(frameCycles, mem);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -2,8 +2,6 @@
|
||||
#include <core/Mem.hpp>
|
||||
#include <core/registers/Registers.hpp>
|
||||
|
||||
Scheduler scheduler;
|
||||
|
||||
void Scheduler::EnqueueRelative(const u64 t, const EventType type) { EnqueueAbsolute(t + ticks, type); }
|
||||
|
||||
void Scheduler::EnqueueAbsolute(const u64 t, const EventType type) { events.push({t, type}); }
|
||||
|
||||
@@ -35,6 +35,11 @@ struct IterableEvents {
|
||||
struct Scheduler {
|
||||
Scheduler() { EnqueueAbsolute(std::numeric_limits<u64>::max(), IMPOSSIBLE); }
|
||||
|
||||
static Scheduler &GetInstance() {
|
||||
static Scheduler instance;
|
||||
return instance;
|
||||
}
|
||||
|
||||
void EnqueueRelative(u64, EventType);
|
||||
void EnqueueAbsolute(u64, EventType);
|
||||
[[nodiscard]] u64 Remove(EventType) const;
|
||||
@@ -44,5 +49,3 @@ struct Scheduler {
|
||||
u64 ticks = 0;
|
||||
u8 index = 0;
|
||||
};
|
||||
|
||||
extern Scheduler scheduler;
|
||||
|
||||
@@ -14,6 +14,7 @@ Mem::Mem(Registers ®s, ParallelRDP ¶llel, JIT *jit) : mmio(*this, regs, p
|
||||
|
||||
void Mem::Reset() {
|
||||
std::ranges::fill(rom.cart, 0);
|
||||
std::ranges::fill(isviewer, 0);
|
||||
flash.Reset();
|
||||
if (saveData.is_mapped()) {
|
||||
std::error_code error;
|
||||
|
||||
@@ -31,7 +31,7 @@ bool PI::WriteLatch(u32 value) {
|
||||
} else {
|
||||
ioBusy = true;
|
||||
latch = value;
|
||||
scheduler.EnqueueRelative(100, PI_BUS_WRITE_COMPLETE);
|
||||
Scheduler::GetInstance().EnqueueRelative(100, PI_BUS_WRITE_COMPLETE);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
@@ -39,7 +39,7 @@ bool PI::WriteLatch(u32 value) {
|
||||
bool PI::ReadLatch() {
|
||||
if (ioBusy) [[unlikely]] {
|
||||
ioBusy = false;
|
||||
regs.CpuStall(scheduler.Remove(PI_BUS_WRITE_COMPLETE));
|
||||
regs.CpuStall(Scheduler::GetInstance().Remove(PI_BUS_WRITE_COMPLETE));
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
@@ -506,7 +506,7 @@ void PI::DMA<false>() {
|
||||
cartAddr += 1;
|
||||
|
||||
dmaBusy = true;
|
||||
scheduler.EnqueueRelative(AccessTiming(GetDomain(cartAddr), rdLen), PI_DMA_COMPLETE);
|
||||
Scheduler::GetInstance().EnqueueRelative(AccessTiming(GetDomain(cartAddr), rdLen), PI_DMA_COMPLETE);
|
||||
}
|
||||
|
||||
// cart -> rdram
|
||||
@@ -529,7 +529,7 @@ void PI::DMA<true>() {
|
||||
cartAddr += 1;
|
||||
|
||||
dmaBusy = true;
|
||||
scheduler.EnqueueRelative(AccessTiming(GetDomain(cartAddr), len), PI_DMA_COMPLETE);
|
||||
Scheduler::GetInstance().EnqueueRelative(AccessTiming(GetDomain(cartAddr), len), PI_DMA_COMPLETE);
|
||||
}
|
||||
|
||||
void PI::Write(u32 addr, u32 val) {
|
||||
|
||||
@@ -73,13 +73,13 @@ void SI::Write(u32 addr, u32 val) {
|
||||
pifAddr = val & 0x1FFFFFFF;
|
||||
status.dmaBusy = true;
|
||||
toDram = true;
|
||||
scheduler.EnqueueRelative(SI_DMA_DELAY, SI_DMA);
|
||||
Scheduler::GetInstance().EnqueueRelative(SI_DMA_DELAY, SI_DMA);
|
||||
break;
|
||||
case 0x04800010:
|
||||
pifAddr = val & 0x1FFFFFFF;
|
||||
status.dmaBusy = true;
|
||||
toDram = false;
|
||||
scheduler.EnqueueRelative(SI_DMA_DELAY, SI_DMA);
|
||||
Scheduler::GetInstance().EnqueueRelative(SI_DMA_DELAY, SI_DMA);
|
||||
break;
|
||||
case 0x04800018:
|
||||
mem.mmio.mi.InterruptLower(MI::Interrupt::SI);
|
||||
|
||||
Reference in New Issue
Block a user