Refactor locking mutex during update

This commit is contained in:
Stephen Seo 2020-01-05 15:05:23 +09:00
parent 7996bd5c36
commit 5c8480e5bc

View file

@ -317,6 +317,8 @@ void UDPC::Context::update_impl() {
}
newCon.sent = std::chrono::steady_clock::now() - UDPC::INIT_PKT_INTERVAL_DT;
std::lock_guard<std::mutex> lock(conMapMutex);
if(conMap.find(optE->conId) == conMap.end()) {
conMap.insert(std::make_pair(
optE->conId,
@ -349,6 +351,8 @@ void UDPC::Context::update_impl() {
}
break;
case UDPC_ET_REQUEST_DISCONNECT:
{
std::lock_guard<std::mutex> lock(conMapMutex);
if(optE->v.dropAllWithAddr != 0) {
// drop all connections with same address
auto addrConIter = addrConMap.find(optE->conId.addr);
@ -369,6 +373,7 @@ void UDPC::Context::update_impl() {
deletionMap.insert(iter->first);
}
}
}
break;
default:
assert(!"internalEvents got invalid type");
@ -380,6 +385,7 @@ void UDPC::Context::update_impl() {
{
// check timed out, check good/bad mode with rtt, remove timed out
std::vector<UDPC_ConnectionId> removed;
std::lock_guard<std::mutex> lock(conMapMutex);
for(auto iter = conMap.begin(); iter != conMap.end(); ++iter) {
temp_dt_fs = now - iter->second.received;
if(temp_dt_fs >= UDPC::CONNECTION_TIMEOUT) {
@ -499,6 +505,7 @@ void UDPC::Context::update_impl() {
while(true) {
auto next = sendIter.current();
if(next) {
std::lock_guard<std::mutex> lock(conMapMutex);
auto iter = conMap.find(next->receiver);
if(iter != conMap.end()) {
if(iter->second.sendPkts.size() >= UDPC_QUEUED_PKTS_MAX_SIZE) {
@ -551,6 +558,8 @@ void UDPC::Context::update_impl() {
}
// update send (only if triggerSend flag is set)
{
std::lock_guard<std::mutex> lock(conMapMutex);
for(auto iter = conMap.begin(); iter != conMap.end(); ++iter) {
auto delIter = deletionMap.find(iter->first);
if(!iter->second.flags.test(0) && delIter == deletionMap.end()) {
@ -1018,9 +1027,11 @@ void UDPC::Context::update_impl() {
}
iter->second.sent = now;
}
}
// remove queued for deletion
for(auto delIter = deletionMap.begin(); delIter != deletionMap.end(); ++delIter) {
std::lock_guard<std::mutex> lock(conMapMutex);
auto iter = conMap.find(*delIter);
if(iter != conMap.end()) {
if(iter->second.flags.test(4)) {
@ -1166,6 +1177,7 @@ void UDPC::Context::update_impl() {
if(isConnect && !isPing) {
// is connect packet and is accepting new connections
std::lock_guard<std::mutex> lock(conMapMutex);
if(!flags.test(1)
&& conMap.find(identifier) == conMap.end()
&& isAcceptNewConnections.load()) {
@ -1397,6 +1409,7 @@ void UDPC::Context::update_impl() {
return;
}
std::lock_guard<std::mutex> lock(conMapMutex);
auto iter = conMap.find(identifier);
if(iter == conMap.end() || iter->second.flags.test(3)
|| !iter->second.flags.test(4) || iter->second.id != conID) {
@ -1780,10 +1793,7 @@ void UDPC::threadedUpdate(Context *ctx) {
decltype(now) nextNow;
while(ctx->threadRunning.load()) {
now = std::chrono::steady_clock::now();
{
std::lock_guard<std::mutex> lock(ctx->conMapMutex);
ctx->update_impl();
}
nextNow = std::chrono::steady_clock::now();
std::this_thread::sleep_for(ctx->threadedSleepTime - (nextNow - now));
}
@ -2066,7 +2076,6 @@ void UDPC_update(UDPC_HContext ctx) {
return;
}
std::lock_guard<std::mutex> lock(c->conMapMutex);
c->update_impl();
}