Revert "Impl "RWLock" for use in TSLQueue"

This reverts commit cf27a6bb76.

The use of "shared_lock" in TSLQueue is unsafe because of two things:

  - The TSLQueue iterator takes a "read" lock.
  - The TSLQueue iterator can erase the current element.
This commit is contained in:
Stephen Seo 2023-07-22 17:28:18 +09:00
parent ce092f81f9
commit 611287b377
5 changed files with 29 additions and 234 deletions

View file

@ -5,7 +5,6 @@ set(UDPC_VERSION 1.0)
set(UDPC_SOURCES
src/UDPConnection.cpp
src/CXX11_shared_spin_lock.cpp
)
add_compile_options(
@ -63,7 +62,6 @@ if(CMAKE_BUILD_TYPE MATCHES "Debug")
find_package(GTest QUIET)
if(GTEST_FOUND)
set(UDPC_UnitTest_SOURCES
src/CXX11_shared_spin_lock.cpp
src/test/UDPC_UnitTest.cpp
src/test/TestTSLQueue.cpp
src/test/TestUDPC.cpp

View file

@ -1,74 +0,0 @@
#include "CXX11_shared_spin_lock.hpp"
UDPC::Badge::Badge() :
isValid(true)
{}
UDPC::SharedSpinLock::Ptr UDPC::SharedSpinLock::newInstance() {
Ptr sharedSpinLock = Ptr(new SharedSpinLock());
sharedSpinLock->selfWeakPtr = sharedSpinLock;
return sharedSpinLock;
}
UDPC::SharedSpinLock::SharedSpinLock() :
selfWeakPtr(),
mutex(),
read(0),
write(false)
{}
UDPC::LockObj<false> UDPC::SharedSpinLock::spin_read_lock() {
while (true) {
std::lock_guard<std::mutex> lock(mutex);
if (!write.load()) {
++read;
return LockObj<false>(selfWeakPtr, Badge{});
}
}
}
UDPC::LockObj<false> UDPC::SharedSpinLock::try_spin_read_lock() {
std::lock_guard<std::mutex> lock(mutex);
if (!write.load()) {
++read;
return LockObj<false>(selfWeakPtr, Badge{});
}
return LockObj<false>(Badge{});
}
void UDPC::SharedSpinLock::read_unlock(UDPC::Badge &&badge) {
if (badge.isValid) {
std::lock_guard<std::mutex> lock(mutex);
if (read.load() > 0) {
--read;
badge.isValid = false;
}
}
}
UDPC::LockObj<true> UDPC::SharedSpinLock::spin_write_lock() {
while (true) {
std::lock_guard<std::mutex> lock(mutex);
if (!write.load() && read.load() == 0) {
write.store(true);
return LockObj<true>(selfWeakPtr, Badge{});
}
}
}
UDPC::LockObj<true> UDPC::SharedSpinLock::try_spin_write_lock() {
std::lock_guard<std::mutex> lock(mutex);
if (!write.load() && read.load() == 0) {
write.store(true);
return LockObj<true>(selfWeakPtr, Badge{});
}
return LockObj<true>(Badge{});
}
void UDPC::SharedSpinLock::write_unlock(UDPC::Badge &&badge) {
if (badge.isValid) {
std::lock_guard<std::mutex> lock(mutex);
write.store(false);
badge.isValid = false;
}
}

View file

@ -1,128 +0,0 @@
#ifndef UDPC_CXX11_SHARED_SPIN_LOCK_H_
#define UDPC_CXX11_SHARED_SPIN_LOCK_H_
#include <memory>
#include <mutex>
#include <atomic>
namespace UDPC {
// Forward declaration for LockObj.
class SharedSpinLock;
class Badge {
public:
// Disallow copy.
Badge(const Badge&) = delete;
Badge& operator=(const Badge&) = delete;
// Allow move.
Badge(Badge&&) = default;
Badge& operator=(Badge&&) = default;
private:
friend class SharedSpinLock;
// Can only be created by SharedSpinLock.
Badge();
bool isValid;
};
template <bool IsWriteObj>
class LockObj {
public:
~LockObj();
// Disallow copy.
LockObj(const LockObj&) = delete;
LockObj& operator=(const LockObj&) = delete;
// Allow move.
LockObj(LockObj&&) = default;
LockObj& operator=(LockObj&&) = default;
bool isValid() const;
private:
friend class SharedSpinLock;
// Only can be created by SharedSpinLock.
LockObj(Badge &&badge);
LockObj(std::weak_ptr<SharedSpinLock> lockPtr, Badge &&badge);
std::weak_ptr<SharedSpinLock> weakPtrLock;
bool isLocked;
Badge badge;
};
class SharedSpinLock {
public:
using Ptr = std::shared_ptr<SharedSpinLock>;
using Weak = std::weak_ptr<SharedSpinLock>;
static Ptr newInstance();
// Disallow copy.
SharedSpinLock(const SharedSpinLock&) = delete;
SharedSpinLock& operator=(const SharedSpinLock&) = delete;
// Allow move.
SharedSpinLock(SharedSpinLock&&) = default;
SharedSpinLock& operator=(SharedSpinLock&&) = default;
LockObj<false> spin_read_lock();
LockObj<false> try_spin_read_lock();
void read_unlock(Badge&&);
LockObj<true> spin_write_lock();
LockObj<true> try_spin_write_lock();
void write_unlock(Badge&&);
private:
SharedSpinLock();
Weak selfWeakPtr;
std::mutex mutex;
std::atomic_uint read;
std::atomic_bool write;
};
template <bool IsWriteObj>
LockObj<IsWriteObj>::LockObj(Badge &&badge) :
weakPtrLock(),
isLocked(false),
badge(std::forward<Badge>(badge))
{}
template <bool IsWriteObj>
LockObj<IsWriteObj>::LockObj(SharedSpinLock::Weak lockPtr, Badge &&badge) :
weakPtrLock(lockPtr),
isLocked(true),
badge(std::forward<Badge>(badge))
{}
template <bool IsWriteObj>
LockObj<IsWriteObj>::~LockObj() {
if (!isLocked) {
return;
}
auto strongPtrLock = weakPtrLock.lock();
if (strongPtrLock) {
if (IsWriteObj) {
strongPtrLock->write_unlock(std::move(badge));
} else {
strongPtrLock->read_unlock(std::move(badge));
}
}
}
template <bool IsWriteObj>
bool LockObj<IsWriteObj>::isValid() const {
return isLocked;
}
} // namespace UDPC
#endif

View file

@ -10,8 +10,6 @@
#include <list>
#include <type_traits>
#include "CXX11_shared_spin_lock.hpp"
template <typename T>
class TSLQueue {
public:
@ -64,7 +62,7 @@ class TSLQueue {
class TSLQIter {
public:
TSLQIter(UDPC::SharedSpinLock::Weak sharedSpinLockWeak,
TSLQIter(std::mutex *mutex,
std::weak_ptr<TSLQNode> currentNode,
unsigned long *msize);
~TSLQIter();
@ -79,8 +77,7 @@ class TSLQueue {
bool remove();
private:
UDPC::SharedSpinLock::Weak sharedSpinLockWeak;
UDPC::LockObj<false> readLock;
std::mutex *mutex;
std::weak_ptr<TSLQNode> currentNode;
unsigned long *const msize;
@ -90,7 +87,7 @@ class TSLQueue {
TSLQIter begin();
private:
UDPC::SharedSpinLock::Ptr sharedSpinLock;
std::mutex mutex;
std::shared_ptr<TSLQNode> head;
std::shared_ptr<TSLQNode> tail;
unsigned long msize;
@ -98,7 +95,7 @@ class TSLQueue {
template <typename T>
TSLQueue<T>::TSLQueue() :
sharedSpinLock(UDPC::SharedSpinLock::newInstance()),
mutex(),
head(std::shared_ptr<TSLQNode>(new TSLQNode())),
tail(std::shared_ptr<TSLQNode>(new TSLQNode())),
msize(0)
@ -124,8 +121,8 @@ TSLQueue<T>::TSLQueue(TSLQueue &&other)
template <typename T>
TSLQueue<T> & TSLQueue<T>::operator=(TSLQueue &&other) {
auto selfWriteLock = sharedSpinLock->spin_write_lock();
auto otherWriteLock = other.sharedSpinLock->spin_write_lock();
std::lock_guard<std::mutex> lock(mutex);
std::lock_guard<std::mutex> otherLock(other.mutex);
head = std::move(other.head);
tail = std::move(other.tail);
msize = std::move(other.msize);
@ -133,7 +130,7 @@ TSLQueue<T> & TSLQueue<T>::operator=(TSLQueue &&other) {
template <typename T>
void TSLQueue<T>::push(const T &data) {
auto writeLock = sharedSpinLock->spin_write_lock();
std::lock_guard<std::mutex> lock(mutex);
auto newNode = std::shared_ptr<TSLQNode>(new TSLQNode());
newNode->data = std::unique_ptr<T>(new T(data));
@ -149,8 +146,7 @@ void TSLQueue<T>::push(const T &data) {
template <typename T>
bool TSLQueue<T>::push_nb(const T &data) {
auto writeLock = sharedSpinLock->try_spin_write_lock();
if(writeLock.isValid()) {
if(mutex.try_lock()) {
auto newNode = std::shared_ptr<TSLQNode>(new TSLQNode());
newNode->data = std::unique_ptr<T>(new T(data));
@ -163,6 +159,7 @@ bool TSLQueue<T>::push_nb(const T &data) {
tail->prev = newNode;
++msize;
mutex.unlock();
return true;
} else {
return false;
@ -171,7 +168,7 @@ bool TSLQueue<T>::push_nb(const T &data) {
template <typename T>
std::unique_ptr<T> TSLQueue<T>::top() {
auto readLock = sharedSpinLock->spin_read_lock();
std::lock_guard<std::mutex> lock(mutex);
std::unique_ptr<T> result;
if(head->next != tail) {
assert(head->next->data);
@ -184,20 +181,20 @@ std::unique_ptr<T> TSLQueue<T>::top() {
template <typename T>
std::unique_ptr<T> TSLQueue<T>::top_nb() {
std::unique_ptr<T> result;
auto readLock = sharedSpinLock->try_spin_read_lock();
if(readLock.isValid()) {
if(mutex.try_lock()) {
if(head->next != tail) {
assert(head->next->data);
result = std::unique_ptr<T>(new T);
*result = *head->next->data;
}
mutex.unlock();
}
return result;
}
template <typename T>
bool TSLQueue<T>::pop() {
auto writeLock = sharedSpinLock->spin_write_lock();
std::lock_guard<std::mutex> lock(mutex);
if(head->next == tail) {
return false;
} else {
@ -214,7 +211,7 @@ bool TSLQueue<T>::pop() {
template <typename T>
std::unique_ptr<T> TSLQueue<T>::top_and_pop() {
std::unique_ptr<T> result;
auto writeLock = sharedSpinLock->spin_write_lock();
std::lock_guard<std::mutex> lock(mutex);
if(head->next != tail) {
assert(head->next->data);
result = std::unique_ptr<T>(new T);
@ -232,7 +229,7 @@ std::unique_ptr<T> TSLQueue<T>::top_and_pop() {
template <typename T>
std::unique_ptr<T> TSLQueue<T>::top_and_pop_and_empty(bool *isEmpty) {
std::unique_ptr<T> result;
auto writeLock = sharedSpinLock->spin_write_lock();
std::lock_guard<std::mutex> lock(mutex);
if(head->next == tail) {
if(isEmpty) {
*isEmpty = true;
@ -258,7 +255,7 @@ std::unique_ptr<T> TSLQueue<T>::top_and_pop_and_empty(bool *isEmpty) {
template <typename T>
std::unique_ptr<T> TSLQueue<T>::top_and_pop_and_rsize(unsigned long *rsize) {
std::unique_ptr<T> result;
auto writeLock = sharedSpinLock->spin_write_lock();
std::lock_guard<std::mutex> lock(mutex);
if(head->next == tail) {
if(rsize) {
*rsize = 0;
@ -283,7 +280,7 @@ std::unique_ptr<T> TSLQueue<T>::top_and_pop_and_rsize(unsigned long *rsize) {
template <typename T>
void TSLQueue<T>::clear() {
auto writeLock = sharedSpinLock->spin_write_lock();
std::lock_guard<std::mutex> lock(mutex);
head->next = tail;
tail->prev = head;
@ -292,13 +289,13 @@ void TSLQueue<T>::clear() {
template <typename T>
bool TSLQueue<T>::empty() {
auto readLock = sharedSpinLock->spin_read_lock();
std::lock_guard<std::mutex> lock(mutex);
return head->next == tail;
}
template <typename T>
unsigned long TSLQueue<T>::size() {
auto readLock = sharedSpinLock->spin_read_lock();
std::lock_guard<std::mutex> lock(mutex);
return msize;
}
@ -316,17 +313,20 @@ bool TSLQueue<T>::TSLQNode::isNormal() const {
}
template <typename T>
TSLQueue<T>::TSLQIter::TSLQIter(UDPC::SharedSpinLock::Weak lockWeak,
TSLQueue<T>::TSLQIter::TSLQIter(std::mutex *mutex,
std::weak_ptr<TSLQNode> currentNode,
unsigned long *msize) :
sharedSpinLockWeak(lockWeak),
readLock(lockWeak.lock()->spin_read_lock()),
mutex(mutex),
currentNode(currentNode),
msize(msize)
{}
{
mutex->lock();
}
template <typename T>
TSLQueue<T>::TSLQIter::~TSLQIter() {}
TSLQueue<T>::TSLQIter::~TSLQIter() {
mutex->unlock();
}
template <typename T>
std::unique_ptr<T> TSLQueue<T>::TSLQIter::current() {
@ -389,7 +389,7 @@ bool TSLQueue<T>::TSLQIter::remove() {
template <typename T>
typename TSLQueue<T>::TSLQIter TSLQueue<T>::begin() {
return TSLQIter(sharedSpinLock, head->next, &msize);
return TSLQIter(&mutex, head->next, &msize);
}
#endif

View file

@ -143,8 +143,7 @@ TEST(TSLQueue, Iterator) {
// test that lock is held by iterator
EXPECT_FALSE(q.push_nb(10));
op = q.top_nb();
// Getting top and iterator use the read lock, so this should be true.
EXPECT_TRUE(op);
EXPECT_FALSE(op);
// backwards iteration
EXPECT_TRUE(iter.prev());