summaryrefslogtreecommitdiffstats
path: root/thirdparty/thorvg/src/renderer/tvgTaskScheduler.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'thirdparty/thorvg/src/renderer/tvgTaskScheduler.cpp')
-rw-r--r--thirdparty/thorvg/src/renderer/tvgTaskScheduler.cpp112
1 files changed, 69 insertions, 43 deletions
diff --git a/thirdparty/thorvg/src/renderer/tvgTaskScheduler.cpp b/thirdparty/thorvg/src/renderer/tvgTaskScheduler.cpp
index e3887c60fc..68a3dbe97a 100644
--- a/thirdparty/thorvg/src/renderer/tvgTaskScheduler.cpp
+++ b/thirdparty/thorvg/src/renderer/tvgTaskScheduler.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2020 - 2023 the ThorVG project. All rights reserved.
+ * Copyright (c) 2020 - 2024 the ThorVG project. All rights reserved.
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
@@ -20,21 +20,30 @@
* SOFTWARE.
*/
-#include <deque>
-#include <thread>
-#include <vector>
-#include <atomic>
-#include <condition_variable>
+#include "tvgArray.h"
+#include "tvgInlist.h"
#include "tvgTaskScheduler.h"
+#ifdef THORVG_THREAD_SUPPORT
+ #include <thread>
+ #include <atomic>
+#endif
+
/************************************************************************/
/* Internal Class Implementation */
/************************************************************************/
namespace tvg {
+struct TaskSchedulerImpl;
+static TaskSchedulerImpl* inst = nullptr;
+
+#ifdef THORVG_THREAD_SUPPORT
+
+static thread_local bool _async = true;
+
struct TaskQueue {
- deque<Task*> taskDeque;
+ Inlist<Task> taskDeque;
mutex mtx;
condition_variable ready;
bool done = false;
@@ -44,8 +53,6 @@ struct TaskQueue {
unique_lock<mutex> lock{mtx, try_to_lock};
if (!lock || taskDeque.empty()) return false;
*task = taskDeque.front();
- taskDeque.pop_front();
-
return true;
}
@@ -54,18 +61,16 @@ struct TaskQueue {
{
unique_lock<mutex> lock{mtx, try_to_lock};
if (!lock) return false;
- taskDeque.push_back(task);
+ taskDeque.back(task);
}
-
ready.notify_one();
-
return true;
}
void complete()
{
{
- unique_lock<mutex> lock{mtx};
+ lock_guard<mutex> lock{mtx};
done = true;
}
ready.notify_all();
@@ -82,47 +87,52 @@ struct TaskQueue {
if (taskDeque.empty()) return false;
*task = taskDeque.front();
- taskDeque.pop_front();
-
return true;
}
void push(Task* task)
{
{
- unique_lock<mutex> lock{mtx};
- taskDeque.push_back(task);
+ lock_guard<mutex> lock{mtx};
+ taskDeque.back(task);
}
-
ready.notify_one();
}
-
};
-static thread_local bool _async = true; //toggle async tasking for each thread on/off
-
-
struct TaskSchedulerImpl
{
- uint32_t threadCnt;
- vector<thread> threads;
- vector<TaskQueue> taskQueues;
+ Array<thread*> threads;
+ Array<TaskQueue*> taskQueues;
atomic<uint32_t> idx{0};
- TaskSchedulerImpl(unsigned threadCnt) : threadCnt(threadCnt), taskQueues(threadCnt)
+ TaskSchedulerImpl(uint32_t threadCnt)
{
threads.reserve(threadCnt);
+ taskQueues.reserve(threadCnt);
- for (unsigned i = 0; i < threadCnt; ++i) {
- threads.emplace_back([&, i] { run(i); });
+ for (uint32_t i = 0; i < threadCnt; ++i) {
+ taskQueues.push(new TaskQueue);
+ threads.push(new thread);
+ }
+ for (uint32_t i = 0; i < threadCnt; ++i) {
+ *threads.data[i] = thread([&, i] { run(i); });
}
}
~TaskSchedulerImpl()
{
- for (auto& queue : taskQueues) queue.complete();
- for (auto& thread : threads) thread.join();
+ for (auto tq = taskQueues.data; tq < taskQueues.end(); ++tq) {
+ (*tq)->complete();
+ }
+ for (auto thread = threads.data; thread < threads.end(); ++thread) {
+ (*thread)->join();
+ delete(*thread);
+ }
+ for (auto tq = taskQueues.data; tq < taskQueues.end(); ++tq) {
+ delete(*tq);
+ }
}
void run(unsigned i)
@@ -132,14 +142,14 @@ struct TaskSchedulerImpl
//Thread Loop
while (true) {
auto success = false;
- for (unsigned x = 0; x < threadCnt * 2; ++x) {
- if (taskQueues[(i + x) % threadCnt].tryPop(&task)) {
+ for (uint32_t x = 0; x < threads.count * 2; ++x) {
+ if (taskQueues[(i + x) % threads.count]->tryPop(&task)) {
success = true;
break;
}
}
- if (!success && !taskQueues[i].pop(&task)) break;
+ if (!success && !taskQueues[i]->pop(&task)) break;
(*task)(i + 1);
}
}
@@ -147,29 +157,45 @@ struct TaskSchedulerImpl
void request(Task* task)
{
//Async
- if (threadCnt > 0 && _async) {
+ if (threads.count > 0 && _async) {
task->prepare();
auto i = idx++;
- for (unsigned n = 0; n < threadCnt; ++n) {
- if (taskQueues[(i + n) % threadCnt].tryPush(task)) return;
+ for (uint32_t n = 0; n < threads.count; ++n) {
+ if (taskQueues[(i + n) % threads.count]->tryPush(task)) return;
}
- taskQueues[i % threadCnt].push(task);
+ taskQueues[i % threads.count]->push(task);
//Sync
} else {
task->run(0);
}
}
+
+ uint32_t threadCnt()
+ {
+ return threads.count;
+ }
};
-}
+#else //THORVG_THREAD_SUPPORT
-static TaskSchedulerImpl* inst = nullptr;
+static bool _async = true;
+
+struct TaskSchedulerImpl
+{
+ TaskSchedulerImpl(TVG_UNUSED uint32_t threadCnt) {}
+ void request(Task* task) { task->run(0); }
+ uint32_t threadCnt() { return 0; }
+};
+
+#endif //THORVG_THREAD_SUPPORT
+
+} //namespace
/************************************************************************/
/* External Class Implementation */
/************************************************************************/
-void TaskScheduler::init(unsigned threads)
+void TaskScheduler::init(uint32_t threads)
{
if (inst) return;
inst = new TaskSchedulerImpl(threads);
@@ -178,7 +204,6 @@ void TaskScheduler::init(unsigned threads)
void TaskScheduler::term()
{
- if (!inst) return;
delete(inst);
inst = nullptr;
}
@@ -190,14 +215,15 @@ void TaskScheduler::request(Task* task)
}
-unsigned TaskScheduler::threads()
+uint32_t TaskScheduler::threads()
{
- if (inst) return inst->threadCnt;
+ if (inst) return inst->threadCnt();
return 0;
}
void TaskScheduler::async(bool on)
{
+ //toggle async tasking for each thread on/off
_async = on;
}