From e883a0581b21638320bb7919242284187723cc77 Mon Sep 17 00:00:00 2001 From: Odysseas Georgoudis Date: Tue, 21 Mar 2023 00:30:51 +0000 Subject: [PATCH] add backend_thread_transit_events_hard_limit and backend_thread_transit_events_soft_limit --- CHANGELOG.md | 5 ++- README.md | 3 +- quill/include/quill/Config.h | 30 ++++++++++++-- .../quill/detail/backend/BackendWorker.h | 39 ++++++++++++------- 4 files changed, 57 insertions(+), 20 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 836f2a72..9d90db81 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -63,8 +63,11 @@ not a runtime option, you still need to recompile your target and pass `QUILL_USE_BOUNDED_QUEUE` as a flag. See [example_bounded_queue_message_dropping.cpp](https://github.com/odygrd/quill/blob/master/examples/example_bounded_queue_message_dropping.cpp) - Added `QUILL_USE_BOUNDED_BLOCKING_QUEUE` option that makes possible to use a bounded queue which blocks the hot - thread rather than dropping messages ([#270](https://github.com/odygrd/quill/pull/270)). + thread rather than dropping messages ([#270](https://github.com/odygrd/quill/pull/270)) See [example_bounded_queue_blocking.cpp](https://github.com/odygrd/quill/blob/master/examples/example_bounded_queue_blocking.cpp) +- Renamed `backend_thread_max_transit_events` to `backend_thread_transit_events_soft_limit` in + Config.h ([#270](https://github.com/odygrd/quill/pull/270)) +- Added `backend_thread_transit_events_hard_limit` in Config.h ([#270](https://github.com/odygrd/quill/pull/270)) - CMake: `QUILL_X86ARCH` and `QUILL_USE_BOUNDED_QUEUE` options have been removed. The users can decide on enabling these options on their side and quill doesn't need to be recompiled as a library. For example : ```cmake diff --git a/README.md b/README.md index ddda7f5f..9985a9ed 100644 --- a/README.md +++ b/README.md @@ -206,7 +206,8 @@ formatting, ordering the log messages from multiple hot threads and finally outp text. The logging thread always empties all the queues of the hot threads on the highest priority (to avoid allocating a new queue or dropping messages on the hot path). To achieve that, it internally buffers the log messages and then -writes them later when the hot thread queues are empty or when a limit is reached `backend_thread_max_transit_events`. +writes them later when the hot thread queues are empty or when a limit is +reached `backend_thread_transit_events_soft_limit`. I haven't found an easy way to compare the throughput against other logging libraries while doing asynchronous logging. For example some libraries will drop the log messages ending in producing much smaller log files than the expected, diff --git a/quill/include/quill/Config.h b/quill/include/quill/Config.h index 439c7650..4a3ddfcc 100644 --- a/quill/include/quill/Config.h +++ b/quill/include/quill/Config.h @@ -46,11 +46,33 @@ struct Config * However if the hot threads keep pushing messages to the queues * e.g logging in a loop then no logs can ever be processed. * - * This variable sets the maximum transit events number. - * When that number is reached then half of them will get flushed to the log files before - * continuing reading the SPSC queues + * When the soft limit is reached then this number of events (default 800) will be logged to the + * log files before continuing reading the SPSC queues + * + * The SPSC queues are emptied on each iteration. + * This means that the actual messages from the SPSC queues can be much more + * than the backend_thread_transit_events_soft_limit. + * + * @note This number represents a limit across ALL hot threads + */ + size_t backend_thread_transit_events_soft_limit = 800; + + /** + * The backend worker thread gives priority to reading the messages from SPSC queues from all + * the hot threads first and buffers them temporarily. + * + * However if the hot threads keep pushing messages to the queues + * e.g logging in a loop then no logs can ever be processed. + * + * As the backend thread is buffering messages it can keep buffering for ever if the hot + * threads keep pushing. + * + * This limit is the maximum size of the backend thread buffer. When reached the backend worker + * thread will stop reading the SPSC queues until the buffer has space again. + * + * @note This is limit PER hot thread */ - size_t backend_thread_max_transit_events = 800; + size_t backend_thread_transit_events_hard_limit = 25'000; /** * The backend worker thread pops all the SPSC queues log messages and buffers them to a local diff --git a/quill/include/quill/detail/backend/BackendWorker.h b/quill/include/quill/detail/backend/BackendWorker.h index 40fa2376..835d9af7 100644 --- a/quill/include/quill/detail/backend/BackendWorker.h +++ b/quill/include/quill/detail/backend/BackendWorker.h @@ -156,7 +156,8 @@ class BackendWorker std::atomic _rdtsc_clock{nullptr}; /** rdtsc clock if enabled **/ std::chrono::nanoseconds _backend_thread_sleep_duration; /** backend_thread_sleep_duration from config **/ - size_t _max_transit_events; /** limit of transit events before start flushing, value from config */ + size_t _transit_events_soft_limit; /** limit of transit events before start flushing, value from config */ + size_t _thread_transit_events_hard_limit; /** limit for the transit event buffer value from config */ std::vector> _args; /** Format args tmp storage as member to avoid reallocation */ @@ -202,7 +203,8 @@ void BackendWorker::run() // enforce the user to configure a variable before the thread has started _backend_thread_sleep_duration = _config.backend_thread_sleep_duration; _backend_thread_yield = _config.backend_thread_yield; - _max_transit_events = _config.backend_thread_max_transit_events; + _transit_events_soft_limit = _config.backend_thread_transit_events_soft_limit; + _thread_transit_events_hard_limit = _config.backend_thread_transit_events_hard_limit; _empty_all_queues_before_exit = _config.backend_thread_empty_all_queues_before_exit; _strict_log_timestamp_order = _config.backend_thread_strict_log_timestamp_order; _rdtsc_resync_interval = _config.rdtsc_resync_interval; @@ -333,10 +335,16 @@ uint32_t BackendWorker::_read_queue_messages_and_decode(QueueT& queue, ThreadCon std::byte* read_pos = queue.prepare_read(); - // read max of one full queue otherwise we can get stuck here forever if - // the producer keeps producing + // read max of one full queue and also max_transit events otherwise we can get stuck here forever + // if the producer keeps producing while ((total_bytes_read < queue_capacity) && read_pos) { + if (transit_event_buffer.size() == _thread_transit_events_hard_limit) + { + // stop reading the queue, we reached the transit buffer hard limit + return transit_event_buffer.size(); + } + std::byte* const read_begin = read_pos; // First we want to allocate a new TransitEvent or use an existing one @@ -528,9 +536,15 @@ void BackendWorker::_process_transit_event(ThreadContextCollection::backend_thre } } - assert(transit_buffer && "transit_buffer can never be a nullptr"); + if (!transit_buffer) + { + // all buffers are empty + // return false, meaning we processed a message + return; + } TransitEvent* transit_event = transit_buffer->front(); + assert(transit_event && "transit_buffer is set only when transit_event is valid"); std::pair const mf = transit_event->header.metadata_and_format_fn(); MacroMetadata const& macro_metadata = mf.first; @@ -668,10 +682,10 @@ void BackendWorker::_main_loop() if (QUILL_LIKELY(total_events != 0)) { // there are buffered events to process - if (total_events >= _max_transit_events) + if (total_events >= _transit_events_soft_limit) { - // process half transit events - for (size_t i = 0; i < total_events; ++i) + // process max transit events + for (size_t i = 0; i < _transit_events_soft_limit; ++i) { _process_transit_event(cached_thread_contexts); } @@ -684,8 +698,6 @@ void BackendWorker::_main_loop() } else { - // there was nothing to process - // None of the thread local queues had any events to process, this means we have processed // all messages in all queues We force flush all remaining messages _force_flush(); @@ -739,17 +751,16 @@ void BackendWorker::_exit() if (total_events != 0) { // there are events to process - if (total_events >= _max_transit_events) + if (total_events >= _transit_events_soft_limit) { - // process half transit events - for (size_t i = 0; i < total_events; ++i) + for (size_t i = 0; i < _transit_events_soft_limit; ++i) { _process_transit_event(cached_thread_contexts); } } else { - // process a single transit event, then give priority to the hot thread spsc queue again + // process a single transit event, then give priority to the hot thread SPSC queue again _process_transit_event(cached_thread_contexts); } }