Merge branch 'master' into cxx17

This commit is contained in:
Stephen Seo 2021-09-07 18:16:29 +09:00
commit 155df42657
2 changed files with 288 additions and 185 deletions

View file

@ -58,7 +58,9 @@ namespace EC
EC::Manager<TypeList<C0, C1, C2>, TypeList<T0, T1>> manager;
\endcode
*/
template <typename ComponentsList, typename TagsList, unsigned int ThreadCount = 4>
template <typename ComponentsList,
typename TagsList,
unsigned int ThreadCount = 4>
struct Manager
{
public:
@ -93,6 +95,72 @@ namespace EC
std::unique_ptr<ThreadPool<ThreadCount> > threadPool;
public:
// section for "temporary" structures {{{
struct TPFnDataStructZero {
std::array<std::size_t, 2> range;
Manager *manager;
EntitiesType *entities;
const BitsetType *signature;
void *userData;
};
template <typename Function>
struct TPFnDataStructOne {
std::array<std::size_t, 2> range;
Manager *manager;
EntitiesType *entities;
BitsetType *signature;
void *userData;
Function *fn;
};
struct TPFnDataStructTwo {
std::array<std::size_t, 2> range;
Manager *manager;
EntitiesType *entities;
void *userData;
const std::vector<std::size_t> *matching;
};
struct TPFnDataStructThree {
std::array<std::size_t, 2> range;
Manager *manager;
std::vector<std::vector<std::size_t> > *matchingV;
const std::vector<BitsetType*> *bitsets;
EntitiesType *entities;
std::mutex *mutex;
};
struct TPFnDataStructFour {
std::array<std::size_t, 2> range;
Manager *manager;
std::vector<std::vector<std::size_t> >*
multiMatchingEntities;
BitsetType *signatures;
std::mutex *mutex;
};
struct TPFnDataStructFive {
std::array<std::size_t, 2> range;
std::size_t index;
Manager *manager;
void *userData;
std::vector<std::vector<std::size_t> >*
multiMatchingEntities;
};
struct TPFnDataStructSix {
std::array<std::size_t, 2> range;
Manager *manager;
std::vector<std::vector<std::size_t> > *
multiMatchingEntities;
BitsetType *bitsets;
std::mutex *mutex;
};
template <typename Iterable>
struct TPFnDataStructSeven {
std::array<std::size_t, 2> range;
Manager *manager;
EntitiesType *entities;
Iterable *iterable;
void *userData;
};
// end section for "temporary" structures }}}
/*!
\brief Initializes the manager with a default capacity.
@ -656,8 +724,7 @@ namespace EC
}
else
{
using TPFnDataType = std::tuple<Manager*, EntitiesType*, BitsetType*, std::array<std::size_t, 2>, void*>;
std::array<TPFnDataType, ThreadCount> fnDataAr;
std::array<TPFnDataStructZero, ThreadCount> fnDataAr;
std::size_t s = currentSize / ThreadCount;
for(std::size_t i = 0; i < ThreadCount; ++i) {
@ -671,33 +738,37 @@ namespace EC
if(begin == end) {
continue;
}
std::get<0>(fnDataAr.at(i)) = this;
std::get<1>(fnDataAr.at(i)) = &entities;
std::get<2>(fnDataAr.at(i)) = &signatureBitset;
std::get<3>(fnDataAr.at(i)) = {begin, end};
std::get<4>(fnDataAr.at(i)) = userData;
fnDataAr[i].range = {begin, end};
fnDataAr[i].manager = this;
fnDataAr[i].entities = &entities;
fnDataAr[i].signature = &signatureBitset;
fnDataAr[i].userData = userData;
threadPool->queueFn([&function] (void *ud) {
auto *data = static_cast<TPFnDataType*>(ud);
for(std::size_t i = std::get<3>(*data).at(0);
i < std::get<3>(*data).at(1);
auto *data = static_cast<TPFnDataStructZero*>(ud);
for(std::size_t i = data->range[0]; i < data->range[1];
++i) {
if(!std::get<0>(*data)->isAlive(i)) {
if(!data->manager->isAlive(i)) {
continue;
}
if((*std::get<2>(*data)
& std::get<BitsetType>(
std::get<1>(*data)->at(i)))
== *std::get<2>(*data)) {
Helper::call(i, *std::get<0>(*data), std::forward<Function>(function), std::get<4>(*data));
if(((*data->signature)
& std::get<BitsetType>(
data->entities->at(i)))
== *data->signature) {
Helper::call(i,
*data->manager,
std::forward<Function>(function),
data->userData);
}
}
}, &fnDataAr.at(i));
}, &fnDataAr[i]);
}
threadPool->wakeThreads();
do {
std::this_thread::sleep_for(std::chrono::microseconds(200));
} while(!threadPool->isQueueEmpty() || !threadPool->isAllThreadsWaiting());
} while(!threadPool->isQueueEmpty()
|| !threadPool->isAllThreadsWaiting());
}
}
@ -774,8 +845,7 @@ namespace EC
}
else
{
using TPFnDataType = std::tuple<Manager*, EntitiesType*, BitsetType*, std::array<std::size_t, 2>, void*, Function*>;
std::array<TPFnDataType, ThreadCount> fnDataAr;
std::array<TPFnDataStructOne<Function>, ThreadCount> fnDataAr;
std::size_t s = currentSize / ThreadCount;
for(std::size_t i = 0; i < ThreadCount; ++i) {
@ -789,34 +859,37 @@ namespace EC
if(begin == end) {
continue;
}
std::get<0>(fnDataAr.at(i)) = this;
std::get<1>(fnDataAr.at(i)) = &entities;
std::get<2>(fnDataAr.at(i)) = &signatureBitset;
std::get<3>(fnDataAr.at(i)) = {begin, end};
std::get<4>(fnDataAr.at(i)) = userData;
std::get<5>(fnDataAr.at(i)) = function;
fnDataAr[i].range = {begin, end};
fnDataAr[i].manager = this;
fnDataAr[i].entities = &entities;
fnDataAr[i].signature = &signatureBitset;
fnDataAr[i].userData = userData;
fnDataAr[i].fn = function;
threadPool->queueFn([] (void *ud) {
auto *data = static_cast<TPFnDataType*>(ud);
for(std::size_t i = std::get<3>(*data).at(0);
i < std::get<3>(*data).at(1);
auto *data = static_cast<TPFnDataStructOne<Function>*>(ud);
for(std::size_t i = data->range[0]; i < data->range[1];
++i) {
if(!std::get<0>(*data)->isAlive(i)) {
if(!data->manager->isAlive(i)) {
continue;
}
if((*std::get<2>(*data)
& std::get<BitsetType>(
std::get<1>(*data)->at(i)))
== *std::get<2>(*data)) {
Helper::callPtr(i, *std::get<0>(*data), std::get<5>(*data), std::get<4>(*data));
if(((*data->signature)
& std::get<BitsetType>(
data->entities->at(i)))
== *data->signature) {
Helper::callPtr(i,
*data->manager,
data->fn,
data->userData);
}
}
}, &fnDataAr.at(i));
}, &fnDataAr[i]);
}
threadPool->wakeThreads();
do {
std::this_thread::sleep_for(std::chrono::microseconds(200));
} while(!threadPool->isQueueEmpty() || !threadPool->isAllThreadsWaiting());
} while(!threadPool->isQueueEmpty()
|| !threadPool->isAllThreadsWaiting());
}
}
@ -922,8 +995,7 @@ namespace EC
}
else
{
using TPFnDataType = std::tuple<Manager*, EntitiesType*, std::array<std::size_t, 2>, void*, const std::vector<std::size_t>*>;
std::array<TPFnDataType, ThreadCount> fnDataAr;
std::array<TPFnDataStructTwo, ThreadCount> fnDataAr;
std::size_t s = matching.size() / ThreadCount;
for(std::size_t i = 0; i < ThreadCount; ++i) {
@ -937,30 +1009,33 @@ namespace EC
if(begin == end) {
continue;
}
std::get<0>(fnDataAr.at(i)) = this;
std::get<1>(fnDataAr.at(i)) = &entities;
std::get<2>(fnDataAr.at(i)) = {begin, end};
std::get<3>(fnDataAr.at(i)) = userData;
std::get<4>(fnDataAr.at(i)) = &matching;
threadPool->queueFn([function, helper] (void* ud) {
auto *data = static_cast<TPFnDataType*>(ud);
for(std::size_t i = std::get<2>(*data).at(0);
i < std::get<2>(*data).at(1);
fnDataAr[i].range = {begin, end};
fnDataAr[i].manager = this;
fnDataAr[i].entities = &entities;
fnDataAr[i].userData = userData;
fnDataAr[i].matching = &matching;
threadPool->queueFn([&function, helper] (void* ud) {
auto *data = static_cast<TPFnDataStructTwo*>(ud);
for(std::size_t i = data->range[0];
i < data->range[1];
++i) {
if(std::get<0>(*data)->isAlive(std::get<4>(*data)->at(i))) {
if(data->manager->isAlive(
data->matching->at(i))) {
helper.callInstancePtr(
std::get<4>(*data)->at(i),
*std::get<0>(*data),
data->matching->at(i),
*data->manager,
&function,
std::get<3>(*data));
data->userData);
}
}
}, &fnDataAr.at(i));
}, &fnDataAr[i]);
}
threadPool->wakeThreads();
do {
std::this_thread::sleep_for(std::chrono::microseconds(200));
} while(!threadPool->isQueueEmpty() || !threadPool->isAllThreadsWaiting());
std::this_thread::sleep_for(
std::chrono::microseconds(200));
} while(!threadPool->isQueueEmpty()
|| !threadPool->isAllThreadsWaiting());
}
})));
@ -993,8 +1068,7 @@ namespace EC
}
else
{
using TPFnDataType = std::tuple<Manager*, std::array<std::size_t, 2>, std::vector<std::vector<std::size_t> >*, const std::vector<BitsetType*>*, EntitiesType*, std::mutex*>;
std::array<TPFnDataType, ThreadCount> fnDataAr;
std::array<TPFnDataStructThree, ThreadCount> fnDataAr;
std::size_t s = currentSize / ThreadCount;
std::mutex mutex;
@ -1009,37 +1083,38 @@ namespace EC
if(begin == end) {
continue;
}
std::get<0>(fnDataAr.at(i)) = this;
std::get<1>(fnDataAr.at(i)) = {begin, end};
std::get<2>(fnDataAr.at(i)) = &matchingV;
std::get<3>(fnDataAr.at(i)) = &bitsets;
std::get<4>(fnDataAr.at(i)) = &entities;
std::get<5>(fnDataAr.at(i)) = &mutex;
fnDataAr[i].range = {begin, end};
fnDataAr[i].manager = this;
fnDataAr[i].matchingV = &matchingV;
fnDataAr[i].bitsets = &bitsets;
fnDataAr[i].entities = &entities;
fnDataAr[i].mutex = &mutex;
threadPool->queueFn([] (void *ud) {
auto *data = static_cast<TPFnDataType*>(ud);
for(std::size_t i = std::get<1>(*data).at(0);
i < std::get<1>(*data).at(1);
auto *data = static_cast<TPFnDataStructThree*>(ud);
for(std::size_t i = data->range[0]; i < data->range[1];
++i) {
if(!std::get<0>(*data)->isAlive(i)) {
if(!data->manager->isAlive(i)) {
continue;
}
for(std::size_t j = 0;
j < std::get<3>(*data)->size();
for(std::size_t j = 0; j < data->bitsets->size();
++j) {
if(((*std::get<3>(*data)->at(j))
& std::get<BitsetType>(std::get<4>(*data)->at(i)))
== (*std::get<3>(*data)->at(j))) {
std::lock_guard<std::mutex> lock(*std::get<5>(*data));
std::get<2>(*data)->at(j).push_back(i);
if((*data->bitsets->at(j)
& std::get<BitsetType>(
data->entities->at(i)))
== *data->bitsets->at(j)) {
std::lock_guard<std::mutex> lock(
*data->mutex);
data->matchingV->at(j).push_back(i);
}
}
}
}, &fnDataAr.at(i));
}, &fnDataAr[i]);
}
threadPool->wakeThreads();
do {
std::this_thread::sleep_for(std::chrono::microseconds(200));
} while(!threadPool->isQueueEmpty() || !threadPool->isAllThreadsWaiting());
} while(!threadPool->isQueueEmpty()
|| !threadPool->isAllThreadsWaiting());
}
return matchingV;
@ -1383,8 +1458,7 @@ namespace EC
}
else
{
using TPFnDataType = std::tuple<Manager*, std::array<std::size_t, 2>, std::vector<std::vector<std::size_t> >*, BitsetType*, std::mutex*>;
std::array<TPFnDataType, ThreadCount> fnDataAr;
std::array<TPFnDataStructFour, ThreadCount> fnDataAr;
std::mutex mutex;
std::size_t s = currentSize / ThreadCount;
@ -1399,34 +1473,38 @@ namespace EC
if(begin == end) {
continue;
}
std::get<0>(fnDataAr.at(i)) = this;
std::get<1>(fnDataAr.at(i)) = {begin, end};
std::get<2>(fnDataAr.at(i)) = &multiMatchingEntities;
std::get<3>(fnDataAr.at(i)) = signatureBitsets;
std::get<4>(fnDataAr.at(i)) = &mutex;
fnDataAr[i].range = {begin, end};
fnDataAr[i].manager = this;
fnDataAr[i].multiMatchingEntities = &multiMatchingEntities;
fnDataAr[i].signatures = signatureBitsets;
fnDataAr[i].mutex = &mutex;
threadPool->queueFn([] (void *ud) {
auto *data = static_cast<TPFnDataType*>(ud);
for(std::size_t i = std::get<1>(*data).at(0);
i < std::get<1>(*data).at(1);
auto *data = static_cast<TPFnDataStructFour*>(ud);
for(std::size_t i = data->range[0]; i < data->range[1];
++i) {
if(!std::get<0>(*data)->isAlive(i)) {
if(!data->manager->isAlive(i)) {
continue;
}
for(std::size_t j = 0; j < SigList::size; ++j) {
if((std::get<3>(*data)[j] & std::get<BitsetType>(std::get<0>(*data)->entities[i]))
== std::get<3>(*data)[j]) {
std::lock_guard<std::mutex> lock(*std::get<4>(*data));
std::get<2>(*data)->at(j).push_back(i);
if((data->signatures[j]
& std::get<BitsetType>(
data->manager->entities[i]))
== data->signatures[j]) {
std::lock_guard<std::mutex> lock(
*data->mutex);
data->multiMatchingEntities->at(j)
.push_back(i);
}
}
}
}, &fnDataAr.at(i));
}, &fnDataAr[i]);
}
threadPool->wakeThreads();
do {
std::this_thread::sleep_for(std::chrono::microseconds(200));
} while(!threadPool->isQueueEmpty() || !threadPool->isAllThreadsWaiting());
} while(!threadPool->isQueueEmpty()
|| !threadPool->isAllThreadsWaiting());
}
// call functions on matching entities
@ -1450,8 +1528,7 @@ namespace EC
}
}
} else {
using TPFnType = std::tuple<Manager*, void*, std::array<std::size_t, 2>, std::vector<std::vector<std::size_t> > *, std::size_t>;
std::array<TPFnType, ThreadCount> fnDataAr;
std::array<TPFnDataStructFive, ThreadCount> fnDataAr;
std::size_t s = multiMatchingEntities[index].size()
/ ThreadCount;
for(unsigned int i = 0; i < ThreadCount; ++i) {
@ -1465,30 +1542,35 @@ namespace EC
if(begin == end) {
continue;
}
std::get<0>(fnDataAr.at(i)) = this;
std::get<1>(fnDataAr.at(i)) = userData;
std::get<2>(fnDataAr.at(i)) = {begin, end};
std::get<3>(fnDataAr.at(i)) = &multiMatchingEntities;
std::get<4>(fnDataAr.at(i)) = index;
fnDataAr[i].range = {begin, end};
fnDataAr[i].index = index;
fnDataAr[i].manager = this;
fnDataAr[i].userData = userData;
fnDataAr[i].multiMatchingEntities =
&multiMatchingEntities;
threadPool->queueFn([&func] (void *ud) {
auto *data = static_cast<TPFnType*>(ud);
for(std::size_t i = std::get<2>(*data).at(0);
i < std::get<2>(*data).at(1);
++i) {
if(std::get<0>(*data)->isAlive(std::get<3>(*data)->at(std::get<4>(*data)).at(i))) {
auto *data = static_cast<TPFnDataStructFive*>(ud);
for(std::size_t i = data->range[0];
i < data->range[1]; ++i) {
if(data->manager->isAlive(
data->multiMatchingEntities
->at(data->index).at(i))) {
Helper::call(
std::get<3>(*data)->at(std::get<4>(*data)).at(i),
*std::get<0>(*data),
data->multiMatchingEntities
->at(data->index).at(i),
*data->manager,
func,
std::get<1>(*data));
data->userData);
}
}
}, &fnDataAr.at(i));
}, &fnDataAr[i]);
}
threadPool->wakeThreads();
do {
std::this_thread::sleep_for(std::chrono::microseconds(200));
} while(!threadPool->isQueueEmpty() || !threadPool->isAllThreadsWaiting());
std::this_thread::sleep_for(
std::chrono::microseconds(200));
} while(!threadPool->isQueueEmpty()
|| !threadPool->isAllThreadsWaiting());
}
}
);
@ -1584,8 +1666,7 @@ namespace EC
}
else
{
using TPFnDataType = std::tuple<Manager*, std::array<std::size_t, 2>, std::vector<std::vector<std::size_t> >*, BitsetType*, std::mutex*>;
std::array<TPFnDataType, ThreadCount> fnDataAr;
std::array<TPFnDataStructSix, ThreadCount> fnDataAr;
std::mutex mutex;
std::size_t s = currentSize / ThreadCount;
@ -1600,34 +1681,38 @@ namespace EC
if(begin == end) {
continue;
}
std::get<0>(fnDataAr.at(i)) = this;
std::get<1>(fnDataAr.at(i)) = {begin, end};
std::get<2>(fnDataAr.at(i)) = &multiMatchingEntities;
std::get<3>(fnDataAr.at(i)) = signatureBitsets;
std::get<4>(fnDataAr.at(i)) = &mutex;
fnDataAr[i].range = {begin, end};
fnDataAr[i].manager = this;
fnDataAr[i].multiMatchingEntities = &multiMatchingEntities;
fnDataAr[i].bitsets = signatureBitsets;
fnDataAr[i].mutex = &mutex;
threadPool->queueFn([] (void *ud) {
auto *data = static_cast<TPFnDataType*>(ud);
for(std::size_t i = std::get<1>(*data).at(0);
i < std::get<1>(*data).at(1);
auto *data = static_cast<TPFnDataStructSix*>(ud);
for(std::size_t i = data->range[0]; i < data->range[1];
++i) {
if(!std::get<0>(*data)->isAlive(i)) {
if(!data->manager->isAlive(i)) {
continue;
}
for(std::size_t j = 0; j < SigList::size; ++j) {
if((std::get<3>(*data)[j] & std::get<BitsetType>(std::get<0>(*data)->entities[i]))
== std::get<3>(*data)[j]) {
std::lock_guard<std::mutex> lock(*std::get<4>(*data));
std::get<2>(*data)->at(j).push_back(i);
if((data->bitsets[j]
& std::get<BitsetType>(
data->manager->entities[i]))
== data->bitsets[j]) {
std::lock_guard<std::mutex> lock(
*data->mutex);
data->multiMatchingEntities->at(j)
.push_back(i);
}
}
}
}, &fnDataAr.at(i));
}, &fnDataAr[i]);
}
threadPool->wakeThreads();
do {
std::this_thread::sleep_for(std::chrono::microseconds(200));
} while(!threadPool->isQueueEmpty() || !threadPool->isAllThreadsWaiting());
} while(!threadPool->isQueueEmpty()
|| !threadPool->isAllThreadsWaiting());
}
// call functions on matching entities
@ -1656,8 +1741,7 @@ namespace EC
}
else
{
using TPFnType = std::tuple<Manager*, void*, std::array<std::size_t, 2>, std::vector<std::vector<std::size_t> > *, std::size_t>;
std::array<TPFnType, ThreadCount> fnDataAr;
std::array<TPFnDataStructFive, ThreadCount> fnDataAr;
std::size_t s = multiMatchingEntities[index].size()
/ ThreadCount;
for(unsigned int i = 0; i < ThreadCount; ++i) {
@ -1671,36 +1755,43 @@ namespace EC
if(begin == end) {
continue;
}
std::get<0>(fnDataAr.at(i)) = this;
std::get<1>(fnDataAr.at(i)) = userData;
std::get<2>(fnDataAr.at(i)) = {begin, end};
std::get<3>(fnDataAr.at(i)) = &multiMatchingEntities;
std::get<4>(fnDataAr.at(i)) = index;
fnDataAr[i].range = {begin, end};
fnDataAr[i].index = index;
fnDataAr[i].manager = this;
fnDataAr[i].userData = userData;
fnDataAr[i].multiMatchingEntities =
&multiMatchingEntities;
threadPool->queueFn([&func] (void *ud) {
auto *data = static_cast<TPFnType*>(ud);
for(std::size_t i = std::get<2>(*data).at(0);
i < std::get<2>(*data).at(1);
++i) {
if(std::get<0>(*data)->isAlive(std::get<3>(*data)->at(std::get<4>(*data)).at(i))) {
auto *data = static_cast<TPFnDataStructFive*>(ud);
for(std::size_t i = data->range[0];
i < data->range[1]; ++i) {
if(data->manager->isAlive(
data->multiMatchingEntities
->at(data->index).at(i))) {
Helper::callPtr(
std::get<3>(*data)->at(std::get<4>(*data)).at(i),
*std::get<0>(*data),
data->multiMatchingEntities
->at(data->index).at(i),
*data->manager,
func,
std::get<1>(*data));
data->userData);
}
}
}, &fnDataAr.at(i));
}, &fnDataAr[i]);
}
threadPool->wakeThreads();
do {
std::this_thread::sleep_for(std::chrono::microseconds(200));
} while(!threadPool->isQueueEmpty() || !threadPool->isAllThreadsWaiting());
std::this_thread::sleep_for(
std::chrono::microseconds(200));
} while(!threadPool->isQueueEmpty()
|| !threadPool->isAllThreadsWaiting());
}
}
);
}
typedef void ForMatchingFn(std::size_t, Manager<ComponentsList, TagsList>*, void*);
typedef void ForMatchingFn(std::size_t,
Manager<ComponentsList, TagsList>*,
void*);
/*!
\brief A simple version of forMatchingSignature()
@ -1720,19 +1811,23 @@ namespace EC
may not have as great of a speed-up.
*/
template <typename Signature>
void forMatchingSimple(ForMatchingFn fn, void *userData = nullptr, const bool useThreadPool = false) {
const BitsetType signatureBitset = BitsetType::template generateBitset<Signature>();
void forMatchingSimple(ForMatchingFn fn,
void *userData = nullptr,
const bool useThreadPool = false) {
const BitsetType signatureBitset =
BitsetType::template generateBitset<Signature>();
if(!useThreadPool || !threadPool) {
for(std::size_t i = 0; i < currentSize; ++i) {
if(!std::get<bool>(entities[i])) {
continue;
} else if((signatureBitset & std::get<BitsetType>(entities[i])) == signatureBitset) {
} else if((signatureBitset
& std::get<BitsetType>(entities[i]))
== signatureBitset) {
fn(i, this, userData);
}
}
} else {
using TPFnDataType = std::tuple<Manager*, EntitiesType*, const BitsetType*, std::array<std::size_t, 2>, void*>;
std::array<TPFnDataType, ThreadCount> fnDataAr;
std::array<TPFnDataStructZero, ThreadCount> fnDataAr;
std::size_t s = currentSize / ThreadCount;
for(std::size_t i = 0; i < ThreadCount; ++i) {
@ -1746,28 +1841,31 @@ namespace EC
if(begin == end) {
continue;
}
std::get<0>(fnDataAr.at(i)) = this;
std::get<1>(fnDataAr.at(i)) = &entities;
std::get<2>(fnDataAr.at(i)) = &signatureBitset;
std::get<3>(fnDataAr.at(i)) = {begin, end};
std::get<4>(fnDataAr.at(i)) = userData;
fnDataAr[i].range = {begin, end};
fnDataAr[i].manager = this;
fnDataAr[i].entities = &entities;
fnDataAr[i].signature = &signatureBitset;
fnDataAr[i].userData = userData;
threadPool->queueFn([&fn] (void *ud) {
auto *data = static_cast<TPFnDataType*>(ud);
for(std::size_t i = std::get<3>(*data).at(0);
i < std::get<3>(*data).at(1);
auto *data = static_cast<TPFnDataStructZero*>(ud);
for(std::size_t i = data->range[0]; i < data->range[1];
++i) {
if(!std::get<0>(*data)->isAlive(i)) {
if(!data->manager->isAlive(i)) {
continue;
} else if((*std::get<2>(*data) & std::get<BitsetType>(std::get<1>(*data)->at(i))) == *std::get<2>(*data)) {
fn(i, std::get<0>(*data), std::get<4>(*data));
} else if((*data->signature
& std::get<BitsetType>(
data->entities->at(i)))
== *data->signature) {
fn(i, data->manager, data->userData);
}
}
}, &fnDataAr.at(i));
}, &fnDataAr[i]);
}
threadPool->wakeThreads();
do {
std::this_thread::sleep_for(std::chrono::microseconds(200));
} while(!threadPool->isQueueEmpty() || !threadPool->isAllThreadsWaiting());
} while(!threadPool->isQueueEmpty()
|| !threadPool->isAllThreadsWaiting());
}
}
@ -1790,7 +1888,10 @@ namespace EC
may not have as great of a speed-up.
*/
template <typename Iterable>
void forMatchingIterable(Iterable iterable, ForMatchingFn fn, void* userData = nullptr, const bool useThreadPool = false) {
void forMatchingIterable(Iterable iterable,
ForMatchingFn fn,
void* userData = nullptr,
const bool useThreadPool = false) {
if(!useThreadPool || !threadPool) {
bool isValid;
for(std::size_t i = 0; i < currentSize; ++i) {
@ -1800,7 +1901,8 @@ namespace EC
isValid = true;
for(const auto& integralValue : iterable) {
if(!std::get<BitsetType>(entities[i]).getCombinedBit(integralValue)) {
if(!std::get<BitsetType>(entities[i]).getCombinedBit(
integralValue)) {
isValid = false;
break;
}
@ -1810,8 +1912,7 @@ namespace EC
fn(i, this, userData);
}
} else {
using TPFnDataType = std::tuple<Manager*, EntitiesType*, Iterable*, std::array<std::size_t, 2>, void*>;
std::array<TPFnDataType, ThreadCount> fnDataAr;
std::array<TPFnDataStructSeven<Iterable>, ThreadCount> fnDataAr;
std::size_t s = currentSize / ThreadCount;
for(std::size_t i = 0; i < ThreadCount; ++i) {
@ -1825,38 +1926,38 @@ namespace EC
if(begin == end) {
continue;
}
std::get<0>(fnDataAr.at(i)) = this;
std::get<1>(fnDataAr.at(i)) = &entities;
std::get<2>(fnDataAr.at(i)) = &iterable;
std::get<3>(fnDataAr.at(i)) = {begin, end};
std::get<4>(fnDataAr.at(i)) = userData;
fnDataAr[i].range = {begin, end};
fnDataAr[i].manager = this;
fnDataAr[i].entities = &entities;
fnDataAr[i].iterable = &iterable;
fnDataAr[i].userData = userData;
threadPool->queueFn([&fn] (void *ud) {
auto *data = static_cast<TPFnDataType*>(ud);
auto *data = static_cast<TPFnDataStructSeven<Iterable>*>(ud);
bool isValid;
for(std::size_t i = std::get<3>(*data).at(0);
i < std::get<3>(*data).at(1);
for(std::size_t i = data->range[0]; i < data->range[1];
++i) {
if(!std::get<0>(*data)->isAlive(i)) {
if(!data->manager->isAlive(i)) {
continue;
}
isValid = true;
for(const auto& integralValue : *std::get<2>(*data)) {
if(!std::get<BitsetType>(std::get<1>(*data)->at(i)).getCombinedBit(integralValue)) {
for(const auto& integralValue : *data->iterable) {
if(!std::get<BitsetType>(data->entities->at(i))
.getCombinedBit(integralValue)) {
isValid = false;
break;
}
}
if(!isValid) { continue; }
fn(i, std::get<0>(*data), std::get<4>(*data));
fn(i, data->manager, data->userData);
}
}, &fnDataAr.at(i));
}, &fnDataAr[i]);
}
threadPool->wakeThreads();
do {
std::this_thread::sleep_for(std::chrono::microseconds(200));
} while(!threadPool->isQueueEmpty() || !threadPool->isAllThreadsWaiting());
} while(!threadPool->isQueueEmpty()
|| !threadPool->isAllThreadsWaiting());
}
}
};

View file

@ -49,6 +49,8 @@ struct Base
{
return 0;
}
virtual ~Base() {}
};
struct Derived : public Base