Skip to content

Commit

Permalink
#3488 Split mesh thread mutexes
Browse files Browse the repository at this point in the history
Tracy shows that thread waits a lot on mutexes.
Split in and out mutexes, so that new work can be scheduled by main
thread without blocking handling
  • Loading branch information
akleshchev committed Feb 3, 2025
1 parent 314ef53 commit 33303b9
Show file tree
Hide file tree
Showing 2 changed files with 60 additions and 39 deletions.
95 changes: 57 additions & 38 deletions indra/newview/llmeshrepository.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -255,12 +255,12 @@
// mSkinInfoQ mMutex rw.repo.mMutex, rw.main.mMutex [5] (was: [0])
// mDecompositionRequests mMutex rw.repo.mMutex, ro.repo.none [5]
// mPhysicsShapeRequests mMutex rw.repo.mMutex, ro.repo.none [5]
// mDecompositionQ mMutex rw.repo.mMutex, rw.main.mMutex [5] (was: [0])
// mDecompositionQ mMutex rw.repo.mLoadedMutex, rw.main.mLoadedMutex [5] (was: [0])
// mHeaderReqQ mMutex ro.repo.none [5], rw.repo.mMutex, rw.any.mMutex
// mLODReqQ mMutex ro.repo.none [5], rw.repo.mMutex, rw.any.mMutex
// mUnavailableQ mMutex rw.repo.none [0], ro.main.none [5], rw.main.mMutex
// mLoadedQ mMutex rw.repo.mMutex, ro.main.none [5], rw.main.mMutex
// mPendingLOD mMutex rw.repo.mMutex, rw.any.mMutex
// mUnavailableQ mMutex rw.repo.none [0], ro.main.none [5], rw.main.mLoadedMutex
// mLoadedQ mMutex rw.repo.mLoadedMutex, ro.main.none [5], rw.main.mLoadedMutex
// mPendingLOD mMutex rw.repo.mPendingMutex, rw.any.mPendingMutex
// mGetMeshCapability mMutex rw.main.mMutex, ro.repo.mMutex (was: [0])
// mGetMesh2Capability mMutex rw.main.mMutex, ro.repo.mMutex (was: [0])
// mGetMeshVersion mMutex rw.main.mMutex, ro.repo.mMutex
Expand Down Expand Up @@ -850,6 +850,8 @@ LLMeshRepoThread::LLMeshRepoThread()

mMutex = new LLMutex();
mHeaderMutex = new LLMutex();
mLoadedMutex = new LLMutex();
mPendingMutex = new LLMutex();
mSignal = new LLCondition();
mHttpRequest = new LLCore::HttpRequest;
mHttpOptions = LLCore::HttpOptions::ptr_t(new LLCore::HttpOptions);
Expand Down Expand Up @@ -893,6 +895,10 @@ LLMeshRepoThread::~LLMeshRepoThread()
mMutex = NULL;
delete mHeaderMutex;
mHeaderMutex = NULL;
delete mLoadedMutex;
mLoadedMutex = NULL;
delete mPendingMutex;
mPendingMutex = NULL;
delete mSignal;
mSignal = NULL;
delete[] mDiskCacheBuffer;
Expand Down Expand Up @@ -972,7 +978,7 @@ void LLMeshRepoThread::run()
}
else
{
LLMutexLock locker(mMutex);
LLMutexLock locker(mLoadedMutex);
mSkinUnavailableQ.push_back(req);
LL_DEBUGS() << "mSkinReqQ failed: " << req.mId << LL_ENDL;
}
Expand Down Expand Up @@ -1021,7 +1027,7 @@ void LLMeshRepoThread::run()
else
{
// too many fails
LLMutexLock lock(mMutex);
LLMutexLock lock(mLoadedMutex);
mUnavailableQ.push_back(req);
LL_WARNS() << "Failed to load " << req.mMeshParams << " , skip" << LL_ENDL;
}
Expand Down Expand Up @@ -1217,15 +1223,11 @@ void LLMeshRepoThread::lockAndLoadMeshLOD(const LLVolumeParams& mesh_params, S32
void LLMeshRepoThread::loadMeshLOD(const LLVolumeParams& mesh_params, S32 lod)
{ //could be called from any thread
const LLUUID& mesh_id = mesh_params.getSculptID();
LLMutexLock lock(mMutex);
LLMutexLock header_lock(mHeaderMutex);
loadMeshLOD(mesh_id, mesh_params, lod);
}

void LLMeshRepoThread::loadMeshLODs(const lod_list_t& list)
{ //could be called from any thread
LLMutexLock lock(mMutex);
LLMutexLock header_lock(mHeaderMutex);
for (auto lod_pair : list)
{
const LLVolumeParams& mesh_params = lod_pair.first;
Expand All @@ -1237,19 +1239,25 @@ void LLMeshRepoThread::loadMeshLODs(const lod_list_t& list)

void LLMeshRepoThread::loadMeshLOD(const LLUUID& mesh_id, const LLVolumeParams& mesh_params, S32 lod)
{
// must be mutex locked by caller
mesh_header_map::iterator iter = mMeshHeader.find(mesh_id);
if (iter != mMeshHeader.end())
bool has_header = false;
{
LLMutexLock header_lock(mHeaderMutex);
mesh_header_map::iterator iter = mMeshHeader.find(mesh_id);
has_header = iter != mMeshHeader.end();
}
if (has_header)
{ //if we have the header, request LOD byte range

LODRequest req(mesh_params, lod);
{
LLMutexLock lock(mMutex);
mLODReqQ.push(req);
LLMeshRepository::sLODProcessing++;
}
}
else
{
LLMutexLock lock(mPendingMutex);
HeaderRequest req(mesh_params);
pending_lod_map::iterator pending = mPendingLOD.find(mesh_id);

Expand All @@ -1269,10 +1277,12 @@ void LLMeshRepoThread::loadMeshLOD(const LLUUID& mesh_id, const LLVolumeParams&
else
{
//if no header request is pending, fetch header
mHeaderReqQ.push(req);
auto& array = mPendingLOD[mesh_id];
std::fill(array.begin(), array.end(), 0);
array[lod]++;

LLMutexLock lock(mMutex);
mHeaderReqQ.push(req);
}
}
}
Expand Down Expand Up @@ -1451,6 +1461,7 @@ bool LLMeshRepoThread::fetchMeshSkinInfo(const LLUUID& mesh_id)
U8* buffer = getDiskCacheBuffer(size);
if (!buffer)
{
LLMutexLock locker(mLoadedMutex);
mSkinUnavailableQ.emplace_back(mesh_id);
return true;
}
Expand Down Expand Up @@ -1499,13 +1510,13 @@ bool LLMeshRepoThread::fetchMeshSkinInfo(const LLUUID& mesh_id)
}
else
{
LLMutexLock locker(mMutex);
LLMutexLock locker(mLoadedMutex);
mSkinUnavailableQ.emplace_back(mesh_id);
}
}
else
{
LLMutexLock locker(mMutex);
LLMutexLock locker(mLoadedMutex);
mSkinUnavailableQ.emplace_back(mesh_id);
}
}
Expand Down Expand Up @@ -1884,7 +1895,7 @@ bool LLMeshRepoThread::fetchMeshLOD(const LLVolumeParams& mesh_params, S32 lod)
U8* buffer = getDiskCacheBuffer(size);
if (!buffer)
{
LLMutexLock lock(mMutex);
LLMutexLock lock(mLoadedMutex);
mUnavailableQ.push_back(LODRequest(mesh_params, lod));
return true;
}
Expand Down Expand Up @@ -1938,13 +1949,13 @@ bool LLMeshRepoThread::fetchMeshLOD(const LLVolumeParams& mesh_params, S32 lod)
}
else
{
LLMutexLock lock(mMutex);
LLMutexLock lock(mLoadedMutex);
mUnavailableQ.push_back(LODRequest(mesh_params, lod));
}
}
else
{
LLMutexLock lock(mMutex);
LLMutexLock lock(mLoadedMutex);
mUnavailableQ.push_back(LODRequest(mesh_params, lod));
}
}
Expand Down Expand Up @@ -2079,7 +2090,7 @@ EMeshProcessingResult LLMeshRepoThread::headerReceived(const LLVolumeParams& mes
std::array<S32, LLModel::NUM_LODS> pending_lods;
bool has_pending_lods = false;
{
LLMutexLock lock(mMutex); // make sure only one thread access mPendingLOD at the same time.
LLMutexLock lock(mPendingMutex); // make sure only one thread access mPendingLOD at the same time.
pending_lod_map::iterator iter = mPendingLOD.find(mesh_id);
if (iter != mPendingLOD.end())
{
Expand Down Expand Up @@ -2158,7 +2169,7 @@ EMeshProcessingResult LLMeshRepoThread::lodReceived(const LLVolumeParams& mesh_p

LoadedMesh mesh(volume, mesh_params, lod);
{
LLMutexLock lock(mMutex);
LLMutexLock lock(mLoadedMutex);
mLoadedQ.push_back(mesh);
// LLPointer is not thread safe, since we added this pointer into
// threaded list, make sure counter gets decreased inside mutex lock
Expand Down Expand Up @@ -2216,7 +2227,7 @@ bool LLMeshRepoThread::skinInfoReceived(const LLUUID& mesh_id, U8* data, S32 dat
{
// Move the LLPointer in to the skin info queue to avoid reference
// count modification after we leave the lock
LLMutexLock lock(mMutex);
LLMutexLock lock(mLoadedMutex);
mSkinInfoQ.emplace_back(std::move(info));
}
}
Expand Down Expand Up @@ -2253,7 +2264,7 @@ bool LLMeshRepoThread::decompositionReceived(const LLUUID& mesh_id, U8* data, S3
LLModel::Decomposition* d = new LLModel::Decomposition(decomp);
d->mMeshID = mesh_id;
{
LLMutexLock lock(mMutex);
LLMutexLock lock(mLoadedMutex);
mDecompositionQ.push_back(d);
}
}
Expand Down Expand Up @@ -2303,7 +2314,7 @@ EMeshProcessingResult LLMeshRepoThread::physicsShapeReceived(const LLUUID& mesh_
}

{
LLMutexLock lock(mMutex);
LLMutexLock lock(mLoadedMutex);
mDecompositionQ.push_back(d);
}
return MESH_OK;
Expand Down Expand Up @@ -3101,11 +3112,11 @@ void LLMeshRepoThread::notifyLoadedMeshes()
{
std::deque<LoadedMesh> loaded_queue;

mMutex->lock();
mLoadedMutex->lock();
if (!mLoadedQ.empty())
{
loaded_queue.swap(mLoadedQ);
mMutex->unlock();
mLoadedMutex->unlock();

update_metrics = true;

Expand All @@ -3123,17 +3134,21 @@ void LLMeshRepoThread::notifyLoadedMeshes()
}
}
}
else
{
mLoadedMutex->unlock();
}
}

if (!mUnavailableQ.empty())
{
std::deque<LODRequest> unavil_queue;

mMutex->lock();
mLoadedMutex->lock();
if (!mUnavailableQ.empty())
{
unavil_queue.swap(mUnavailableQ);
mMutex->unlock();
mLoadedMutex->unlock();

update_metrics = true;

Expand All @@ -3143,11 +3158,15 @@ void LLMeshRepoThread::notifyLoadedMeshes()
gMeshRepo.notifyMeshUnavailable(req.mMeshParams, req.mLOD);
}
}
else
{
mLoadedMutex->unlock();
}
}

if (!mSkinInfoQ.empty() || !mSkinUnavailableQ.empty() || ! mDecompositionQ.empty())
{
if (mMutex->trylock())
if (mLoadedMutex->trylock())
{
std::deque<LLPointer<LLMeshSkinInfo>> skin_info_q;
std::deque<UUIDBasedRequest> skin_info_unavail_q;
Expand All @@ -3168,7 +3187,7 @@ void LLMeshRepoThread::notifyLoadedMeshes()
decomp_q.swap(mDecompositionQ);
}

mMutex->unlock();
mLoadedMutex->unlock();

// Process the elements free of the lock
while (! skin_info_q.empty())
Expand Down Expand Up @@ -3409,7 +3428,7 @@ void LLMeshHeaderHandler::processFailure(LLCore::HttpStatus status)
<< LL_ENDL;

// Can't get the header so none of the LODs will be available
LLMutexLock lock(gMeshRepo.mThread->mMutex);
LLMutexLock lock(gMeshRepo.mThread->mLoadedMutex);
for (int i(0); i < LLVolumeLODGroup::NUM_LODS; ++i)
{
gMeshRepo.mThread->mUnavailableQ.push_back(LLMeshRepoThread::LODRequest(mMeshParams, i));
Expand Down Expand Up @@ -3439,7 +3458,7 @@ void LLMeshHeaderHandler::processData(LLCore::BufferArray * /* body */, S32 /* b
<< LL_ENDL;

// Can't get the header so none of the LODs will be available
LLMutexLock lock(gMeshRepo.mThread->mMutex);
LLMutexLock lock(gMeshRepo.mThread->mLoadedMutex);
for (int i(0); i < LLVolumeLODGroup::NUM_LODS; ++i)
{
gMeshRepo.mThread->mUnavailableQ.push_back(LLMeshRepoThread::LODRequest(mMeshParams, i));
Expand Down Expand Up @@ -3521,7 +3540,7 @@ void LLMeshHeaderHandler::processData(LLCore::BufferArray * /* body */, S32 /* b
gMeshRepo.mThread->mHeaderMutex->unlock();

// headerReceived() parsed header, but header's data is invalid so none of the LODs will be available
LLMutexLock lock(gMeshRepo.mThread->mMutex);
LLMutexLock lock(gMeshRepo.mThread->mLoadedMutex);
for (int i(0); i < LLVolumeLODGroup::NUM_LODS; ++i)
{
gMeshRepo.mThread->mUnavailableQ.push_back(LLMeshRepoThread::LODRequest(mMeshParams, i));
Expand Down Expand Up @@ -3550,7 +3569,7 @@ void LLMeshLODHandler::processFailure(LLCore::HttpStatus status)
<< " (" << status.toTerseString() << "). Not retrying."
<< LL_ENDL;

LLMutexLock lock(gMeshRepo.mThread->mMutex);
LLMutexLock lock(gMeshRepo.mThread->mLoadedMutex);
gMeshRepo.mThread->mUnavailableQ.push_back(LLMeshRepoThread::LODRequest(mMeshParams, mLOD));
}

Expand Down Expand Up @@ -3610,7 +3629,7 @@ void LLMeshLODHandler::processData(LLCore::BufferArray * /* body */, S32 /* body
<< " Data size: " << data_size
<< " Not retrying."
<< LL_ENDL;
LLMutexLock lock(gMeshRepo.mThread->mMutex);
LLMutexLock lock(gMeshRepo.mThread->mLoadedMutex);
gMeshRepo.mThread->mUnavailableQ.push_back(LLMeshRepoThread::LODRequest(mMeshParams, mLOD));
}
}
Expand All @@ -3621,7 +3640,7 @@ void LLMeshLODHandler::processData(LLCore::BufferArray * /* body */, S32 /* body
<< " LOD: " << mLOD
<< " Data size: " << data_size
<< LL_ENDL;
LLMutexLock lock(gMeshRepo.mThread->mMutex);
LLMutexLock lock(gMeshRepo.mThread->mLoadedMutex);
gMeshRepo.mThread->mUnavailableQ.push_back(LLMeshRepoThread::LODRequest(mMeshParams, mLOD));
}
}
Expand All @@ -3641,7 +3660,7 @@ void LLMeshSkinInfoHandler::processFailure(LLCore::HttpStatus status)
<< ", Reason: " << status.toString()
<< " (" << status.toTerseString() << "). Not retrying."
<< LL_ENDL;
LLMutexLock lock(gMeshRepo.mThread->mMutex);
LLMutexLock lock(gMeshRepo.mThread->mLoadedMutex);
gMeshRepo.mThread->mSkinUnavailableQ.emplace_back(mMeshID);
}

Expand Down Expand Up @@ -3697,7 +3716,7 @@ void LLMeshSkinInfoHandler::processData(LLCore::BufferArray * /* body */, S32 /*
LL_WARNS(LOG_MESH) << "Error during mesh skin info processing. ID: " << mMeshID
<< ", Unknown reason. Not retrying."
<< LL_ENDL;
LLMutexLock lock(gMeshRepo.mThread->mMutex);
LLMutexLock lock(gMeshRepo.mThread->mLoadedMutex);
gMeshRepo.mThread->mSkinUnavailableQ.emplace_back(mMeshID);
}
}
Expand Down
4 changes: 3 additions & 1 deletion indra/newview/llmeshrepository.h
Original file line number Diff line number Diff line change
Expand Up @@ -386,6 +386,8 @@ class LLMeshRepoThread : public LLThread

LLMutex* mMutex;
LLMutex* mHeaderMutex;
LLMutex* mLoadedMutex;
LLMutex* mPendingMutex;
LLCondition* mSignal;

//map of known mesh headers
Expand Down Expand Up @@ -574,7 +576,7 @@ class LLMeshRepoThread : public LLThread
size_t offset, size_t len,
const LLCore::HttpHandler::ptr_t &handler);

// Mutex: mMutex must be alerady locked when calling
// Mutex: acquires mPendingMutex, mMutex and mHeaderMutex as needed
void loadMeshLOD(const LLUUID &mesh_id, const LLVolumeParams& mesh_params, S32 lod);

// Threads: Repo thread only
Expand Down

0 comments on commit 33303b9

Please sign in to comment.