Skip to content

Fix data race, GitHub #1085 #1088

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 2 commits into from
Mar 27, 2019
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
59 changes: 26 additions & 33 deletions Release/src/pplx/pplx.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -14,13 +14,8 @@
#include "stdafx.h"

#if !defined(_WIN32) || CPPREST_FORCE_PPLX

#include "pplx/pplx.h"

// Disable false alarm code analyze warning
#if defined(_MSC_VER)
#pragma warning(disable : 26165 26110)
#endif
#include <atomic>

namespace pplx
{
Expand All @@ -36,24 +31,16 @@ class _Spin_lock

void lock()
{
if (details::atomic_compare_exchange(_M_lock, 1l, 0l) != 0l)
while (_M_lock.test_and_set())
{
do
{
pplx::details::platform::YieldExecution();

} while (details::atomic_compare_exchange(_M_lock, 1l, 0l) != 0l);
pplx::details::platform::YieldExecution();
}
}

void unlock()
{
// fence for release semantics
details::atomic_exchange(_M_lock, 0l);
}
void unlock() { _M_lock.clear(); }

private:
atomic_long _M_lock;
std::atomic_flag _M_lock;
};

typedef ::pplx::scoped_lock<_Spin_lock> _Scoped_spin_lock;
Expand All @@ -63,59 +50,65 @@ static struct _pplx_g_sched_t
{
typedef std::shared_ptr<pplx::scheduler_interface> sched_ptr;

_pplx_g_sched_t() { m_state = post_ctor; }
_pplx_g_sched_t() { m_state.store(post_ctor, std::memory_order_relaxed); }

~_pplx_g_sched_t() { m_state = post_dtor; }
~_pplx_g_sched_t() { m_state.store(post_dtor, std::memory_order_relaxed); }

sched_ptr get_scheduler()
{
switch (m_state)
sched_ptr result;
switch (m_state.load(std::memory_order_relaxed))
{
case post_ctor:
// This is the 99.9% case.

if (!m_scheduler)
{
::pplx::details::_Scoped_spin_lock lock(m_spinlock);
if (!m_scheduler)
{
m_scheduler = std::make_shared<::pplx::default_scheduler_t>();
}
}

return m_scheduler;
result = m_scheduler;
} // unlock

break;
default:
// This case means the global m_scheduler is not available.
// We spin off an individual scheduler instead.
return std::make_shared<::pplx::default_scheduler_t>();
result = std::make_shared<::pplx::default_scheduler_t>();
break;
}

return result;
}

void set_scheduler(sched_ptr scheduler)
{
if (m_state == pre_ctor || m_state == post_dtor)
const auto localState = m_state.load(std::memory_order_relaxed);
if (localState == pre_ctor || localState == post_dtor)
{
throw invalid_operation("Scheduler cannot be initialized now");
}

::pplx::details::_Scoped_spin_lock lock(m_spinlock);

if (m_scheduler != nullptr)
if (m_scheduler)
{
throw invalid_operation("Scheduler is already initialized");
}

m_scheduler = std::move(scheduler);
}

enum
enum m_state_values
{
pre_ctor = 0,
post_ctor = 1,
post_dtor = 2
} m_state;
pre_ctor,
post_ctor,
post_dtor
};

private:
std::atomic<m_state_values> m_state;
pplx::details::_Spin_lock m_spinlock;
sched_ptr m_scheduler;
} _pplx_g_sched;
Expand Down