From a94e1e9b6283821a4399071137d26cc1c1b0c191 Mon Sep 17 00:00:00 2001 From: Wangda Zhang Date: Tue, 5 Dec 2023 16:30:41 -0500 Subject: [PATCH] Add config to disable loggers and skip event logging. --- .gitignore | 8 ++++- include/constants.h | 2 ++ rlclientlib/live_model_impl.cc | 51 ++++++++++++++++++++++-------- rlclientlib/live_model_impl.h | 4 +-- unit_test/live_model_test.cc | 58 ++++++++++++++++++++++++++++++++++ 5 files changed, 106 insertions(+), 17 deletions(-) diff --git a/.gitignore b/.gitignore index 10b3dc37c..7636285ad 100644 --- a/.gitignore +++ b/.gitignore @@ -39,4 +39,10 @@ _build dist/ rl_client.egg-info/ -.cache \ No newline at end of file +.cache + +# Unit test outputs +interaction.fb.data +interaction.txt +observation.fb.data +observation.txt diff --git a/include/constants.h b/include/constants.h index 05014b471..719475c5b 100644 --- a/include/constants.h +++ b/include/constants.h @@ -28,6 +28,7 @@ const char* const HTTP_API_KEY = "http.api.key"; const char* const HTTP_API_HEADER_KEY_NAME = "http.api.header.key.name"; const char* const AUDIT_ENABLED = "audit.enabled"; const char* const AUDIT_OUTPUT_PATH = "audit.output.path"; +const char* const EVENT_LOGGING_ENABLED = "event.logging.enabled"; // Episode const char* const EPISODE_EH_HOST = "episode.eventhub.host"; @@ -146,6 +147,7 @@ const bool DEFAULT_MODEL_BACKGROUND_REFRESH = true; const int DEFAULT_VW_POOL_INIT_SIZE = 4; const int DEFAULT_PROTOCOL_VERSION = 1; const char* const DEFAULT_AUDIT_OUTPUT_PATH = "audit"; +const bool DEFAULT_EVENT_LOGGING_ENABLED = true; const char* get_default_episode_sender(); const char* get_default_observation_sender(); diff --git a/rlclientlib/live_model_impl.cc b/rlclientlib/live_model_impl.cc index ebd1c8c22..1c01f9bf3 100644 --- a/rlclientlib/live_model_impl.cc +++ b/rlclientlib/live_model_impl.cc @@ -55,7 +55,10 @@ int live_model_impl::init(api_status* status) RETURN_IF_FAIL(init_trace(status)); RETURN_IF_FAIL(init_model(status)); RETURN_IF_FAIL(init_model_mgmt(status)); - RETURN_IF_FAIL(init_loggers(status)); + if (_configuration.get_bool(name::EVENT_LOGGING_ENABLED, value::DEFAULT_EVENT_LOGGING_ENABLED)) + { + RETURN_IF_FAIL(init_loggers(status)); + } if (_protocol_version == 1) { @@ -104,7 +107,10 @@ int live_model_impl::choose_rank( RETURN_IF_FAIL(reset_action_order(response)); } - RETURN_IF_FAIL(_interaction_logger->log(context, flags, response, status, _learning_mode)); + if (_interaction_logger) + { + RETURN_IF_FAIL(_interaction_logger->log(context, flags, response, status, _learning_mode)); + } if (_learning_mode == APPRENTICE) { @@ -145,7 +151,10 @@ int live_model_impl::request_continuous_action(const char* event_id, string_view RETURN_IF_FAIL(_model->choose_continuous_action(context, action, pdf_value, model_version, status)); RETURN_IF_FAIL(populate_response( action, pdf_value, std::string(event_id), std::string(model_version), response, _trace_logger.get(), status)); - RETURN_IF_FAIL(_interaction_logger->log_continuous_action(context.data(), flags, response, status)); + if (_interaction_logger) + { + RETURN_IF_FAIL(_interaction_logger->log_continuous_action(context.data(), flags, response, status)); + } if (_watchdog.has_background_error_been_reported()) { @@ -209,8 +218,11 @@ int live_model_impl::request_decision( RETURN_IF_FAIL(_model->request_decision(event_ids, context_json, actions_ids, actions_pdfs, model_version, status)); RETURN_IF_FAIL(populate_response( actions_ids, actions_pdfs, event_ids, std::string(model_version), resp, _trace_logger.get(), status)); - RETURN_IF_FAIL(_interaction_logger->log_decisions( - event_ids, context_json, flags, actions_ids, actions_pdfs, model_version, status)); + if (_interaction_logger) + { + RETURN_IF_FAIL(_interaction_logger->log_decisions( + event_ids, context_json, flags, actions_ids, actions_pdfs, model_version, status)); + } // Check watchdog for any background errors. Do this at the end of function so that the work is still done. if (_watchdog.has_background_error_been_reported()) @@ -276,8 +288,11 @@ int live_model_impl::request_multi_slot_decision(const char* event_id, string_vi event_id, context_json, slot_ids, action_ids, action_pdfs, model_version, status)); RETURN_IF_FAIL(populate_multi_slot_response(action_ids, action_pdfs, std::string(event_id), std::string(model_version), slot_ids, resp, _trace_logger.get(), status)); - RETURN_IF_FAIL(_interaction_logger->log_decision(event_id, context_json, flags, action_ids, action_pdfs, - model_version, slot_ids, status, baseline_actions, _learning_mode)); + if (_interaction_logger) + { + RETURN_IF_FAIL(_interaction_logger->log_decision(event_id, context_json, flags, action_ids, action_pdfs, + model_version, slot_ids, status, baseline_actions, _learning_mode)); + } if (_learning_mode == APPRENTICE || _learning_mode == LOGGINGONLY) { @@ -322,8 +337,11 @@ int live_model_impl::request_multi_slot_decision(const char* event_id, string_vi RETURN_IF_FAIL(populate_multi_slot_response_detailed(action_ids, action_pdfs, std::string(event_id), std::string(model_version), slot_ids, resp, _trace_logger.get(), status)); - RETURN_IF_FAIL(_interaction_logger->log_decision(event_id, context_json, flags, action_ids, action_pdfs, - model_version, slot_ids, status, baseline_actions, _learning_mode)); + if (_interaction_logger) + { + RETURN_IF_FAIL(_interaction_logger->log_decision(event_id, context_json, flags, action_ids, action_pdfs, + model_version, slot_ids, status, baseline_actions, _learning_mode)); + } if (_learning_mode == APPRENTICE || _learning_mode == LOGGINGONLY) { @@ -346,7 +364,8 @@ int live_model_impl::report_action_taken(const char* event_id, api_status* statu // Clear previous errors if any api_status::try_clear(status); // Send the outcome event to the backend - return _outcome_logger->report_action_taken(event_id, status); + if (_outcome_logger) { return _outcome_logger->report_action_taken(event_id, status); } + return error_code::success; } int live_model_impl::report_action_taken(const char* primary_id, const char* secondary_id, api_status* status) @@ -354,7 +373,8 @@ int live_model_impl::report_action_taken(const char* primary_id, const char* sec // Clear previous errors if any api_status::try_clear(status); // Send the outcome event to the backend - return _outcome_logger->report_action_taken(primary_id, secondary_id, status); + if (_outcome_logger) { return _outcome_logger->report_action_taken(primary_id, secondary_id, status); } + return error_code::success; } int live_model_impl::report_outcome(const char* event_id, const char* outcome, api_status* status) @@ -658,10 +678,13 @@ int live_model_impl::request_episodic_decision(const char* event_id, const char* if (episode.size() == 1) { // Log the episode id when starting a new episode - RETURN_IF_FAIL(_episode_logger->log(episode.get_episode_id(), status)); + if (_episode_logger) { RETURN_IF_FAIL(_episode_logger->log(episode.get_episode_id(), status)); } + } + if (_interaction_logger) + { + RETURN_IF_FAIL( + _interaction_logger->log(episode.get_episode_id(), previous_id, context_patched.c_str(), flags, resp, status)); } - RETURN_IF_FAIL( - _interaction_logger->log(episode.get_episode_id(), previous_id, context_patched.c_str(), flags, resp, status)); return error_code::success; } diff --git a/rlclientlib/live_model_impl.h b/rlclientlib/live_model_impl.h index 8a5a0f237..c521ed0c8 100644 --- a/rlclientlib/live_model_impl.h +++ b/rlclientlib/live_model_impl.h @@ -135,7 +135,7 @@ int live_model_impl::report_outcome_internal(const char* event_id, D outcome, ap api_status::try_clear(status); // Send the outcome event to the backend - RETURN_IF_FAIL(_outcome_logger->log(event_id, outcome, status)); + if (_outcome_logger) { RETURN_IF_FAIL(_outcome_logger->log(event_id, outcome, status)); } // Check watchdog for any background errors. Do this at the end of function so that the work is still done. if (_watchdog.has_background_error_been_reported()) @@ -153,7 +153,7 @@ int live_model_impl::report_outcome_internal(const char* primary_id, I secondary api_status::try_clear(status); // Send the outcome event to the backend - RETURN_IF_FAIL(_outcome_logger->log(primary_id, secondary_id, outcome, status)); + if (_outcome_logger) { RETURN_IF_FAIL(_outcome_logger->log(primary_id, secondary_id, outcome, status)); } // Check watchdog for any background errors. Do this at the end of function so that the work is still done. if (_watchdog.has_background_error_been_reported()) diff --git a/unit_test/live_model_test.cc b/unit_test/live_model_test.cc index e125a6162..6d861dbbd 100644 --- a/unit_test/live_model_test.cc +++ b/unit_test/live_model_test.cc @@ -923,6 +923,64 @@ BOOST_AUTO_TEST_CASE(live_model_logger_receive_data) BOOST_CHECK_GE(recorded_observations.size(), 1); } +BOOST_AUTO_TEST_CASE(live_model_disable_event_logging) +{ + std::vector recorded_interactions; + auto mock_interaction_sender = get_mock_sender(recorded_interactions); + + std::vector recorded_observations; + auto mock_observation_sender = get_mock_sender(recorded_observations); + + auto mock_data_transport = get_mock_data_transport(); + auto mock_model = get_mock_model(r::model_management::model_type_t::CB); + + auto logger_factory = get_mock_sender_factory(mock_observation_sender.get(), mock_interaction_sender.get()); + auto data_transport_factory = get_mock_data_transport_factory(mock_data_transport.get()); + auto model_factory = get_mock_model_factory(mock_model.get()); + + u::configuration config; + cfg::create_from_json(JSON_CFG, config); + config.set(r::name::EH_TEST, "true"); + config.set(r::name::EVENT_LOGGING_ENABLED, "false"); // Disable event logging. + + auto const version_number = "1"; + + auto const event_id_1 = "event_id"; + auto const event_id_2 = "event_id_2"; + + auto const expected_interaction_1 = u::concat(R"({"Version":")", version_number, R"(","EventId":")", event_id_1, + R"(","a":[1,2],"c":)", JSON_CONTEXT, R"(,"p":[0.500000,0.500000],"VWState":{"m":"N/A"}})"); + auto const expected_observation_1 = u::concat(R"({"EventId":")", event_id_1, R"(","v":1.000000})"); + + auto const expected_interaction_2 = u::concat(R"({"Version":")", version_number, R"(","EventId":")", event_id_2, + R"(","a":[1,2],"c":)", JSON_CONTEXT, R"(,"p":[0.500000,0.500000],"VWState":{"m":"N/A"}})"); + auto const expected_observation_2 = u::concat(R"({"EventId":")", event_id_2, R"(","v":1.000000})"); + auto const num_iterations = 5; + + { + r::cb_loop model = create_mock_live_model( + config, data_transport_factory.get(), model_factory.get(), logger_factory.get()); + + r::api_status status; + BOOST_CHECK_EQUAL(model.init(&status), err::success); + + r::ranking_response response; + for (auto i = 0; i < num_iterations; i++) + { + BOOST_CHECK_EQUAL(model.choose_rank(event_id_1, JSON_CONTEXT, response), err::success); + BOOST_CHECK_EQUAL(model.report_outcome(event_id_1, 1.0), err::success); + + BOOST_CHECK_EQUAL(model.choose_rank(event_id_2, JSON_CONTEXT, response), err::success); + BOOST_CHECK_EQUAL(model.report_outcome(event_id_2, 1.0), err::success); + } + + Verify(Method((*mock_interaction_sender), init)).Exactly(0); + Verify(Method((*mock_observation_sender), init)).Exactly(0); + } + BOOST_CHECK_GE(recorded_interactions.size(), 0); + BOOST_CHECK_GE(recorded_observations.size(), 0); +} + BOOST_AUTO_TEST_CASE(populate_response_same_size_test) { r::api_status status;