diff --git a/src/ripple/nodestore/backend/NuDBFactory.cpp b/src/ripple/nodestore/backend/NuDBFactory.cpp index 74afe2ed7f1..3bb4253dbff 100644 --- a/src/ripple/nodestore/backend/NuDBFactory.cpp +++ b/src/ripple/nodestore/backend/NuDBFactory.cpp @@ -266,7 +266,7 @@ class NuDBBackend : public Backend, public BatchWriter::Callback void store(std::shared_ptr const& no) override { - batch_.store(no); + batch_.store(no, j_); } void diff --git a/src/ripple/nodestore/backend/RocksDBFactory.cpp b/src/ripple/nodestore/backend/RocksDBFactory.cpp index b34560dba89..9443e91f233 100644 --- a/src/ripple/nodestore/backend/RocksDBFactory.cpp +++ b/src/ripple/nodestore/backend/RocksDBFactory.cpp @@ -343,7 +343,7 @@ class RocksDBBackend : public Backend, public BatchWriter::Callback void store(std::shared_ptr const& object) override { - m_batch.store(object); + m_batch.store(object, m_journal); } void diff --git a/src/ripple/nodestore/impl/BatchWriter.cpp b/src/ripple/nodestore/impl/BatchWriter.cpp index 692032016cc..4ce6f62fad4 100644 --- a/src/ripple/nodestore/impl/BatchWriter.cpp +++ b/src/ripple/nodestore/impl/BatchWriter.cpp @@ -37,21 +37,31 @@ BatchWriter::~BatchWriter() } void -BatchWriter::store(std::shared_ptr const& object) +BatchWriter::store( + std::shared_ptr const& object, + beast::Journal const& j) { std::unique_lock sl(mWriteMutex); - // If the batch has reached its limit, we wait - // until the batch writer is finished - while (mWriteSet.size() >= batchWriteLimitSize) - mWriteCondition.wait(sl); - mWriteSet.push_back(object); if (!mWritePending) { mWritePending = true; + // Log if the write batch size is too big. The trade-off here + // is that new ledgers can't be persisted if a limit is enforced, + // which causes desync. Versus eventual memory exhaustion if + // there are no limits. But we at least stay in sync longer in the + // latter case. + if (mWriteSet.size() >= batchWriteLimitSize) + { + JLOG(j.warn()) + << "Pending write batch size " << mWriteSet.size() + << " exceeds threshold of " << batchWriteLimitSize + << ". This may be caused by slow i/o. More memory is consumed " + " as pending write count increases."; + } m_scheduler.scheduleTask(*this); } } diff --git a/src/ripple/nodestore/impl/BatchWriter.h b/src/ripple/nodestore/impl/BatchWriter.h index 9ce4f120329..2c664e1411b 100644 --- a/src/ripple/nodestore/impl/BatchWriter.h +++ b/src/ripple/nodestore/impl/BatchWriter.h @@ -20,6 +20,7 @@ #ifndef RIPPLE_NODESTORE_BATCHWRITER_H_INCLUDED #define RIPPLE_NODESTORE_BATCHWRITER_H_INCLUDED +#include #include #include #include @@ -68,7 +69,7 @@ class BatchWriter : private Task write the batch out. */ void - store(std::shared_ptr const& object); + store(std::shared_ptr const& object, beast::Journal const& j); /** Get an estimate of the amount of writing I/O pending. */ int