From 1bff9bf54e88b38165326fde5f1745f587c8702b Mon Sep 17 00:00:00 2001 From: wh002 Date: Thu, 19 Jan 2023 20:23:55 +0800 Subject: [PATCH 1/6] refactor(log): use LOG_ERROR_F instead of LOG_ERROR (2/2) --- src/block_service/fds/fds_service.cpp | 90 +++---- src/block_service/local/local_service.cpp | 21 +- src/client/partition_resolver.cpp | 8 +- src/client/partition_resolver_manager.cpp | 2 +- src/client/partition_resolver_simple.cpp | 30 +-- src/client/replication_ddl_client.cpp | 2 +- .../pegasus_client_factory_impl.cpp | 4 +- src/client_lib/pegasus_client_impl.cpp | 92 +++---- src/common/fs_manager.cpp | 7 +- src/failure_detector/failure_detector.cpp | 26 +- src/http/http_message_parser.cpp | 8 +- src/http/pprof_http_service.cpp | 8 +- src/meta/dump_file.h | 12 +- src/meta/meta_backup_service.cpp | 66 +++-- src/meta/meta_data.cpp | 31 ++- src/meta/meta_server_failure_detector.cpp | 8 +- src/meta/meta_service.cpp | 18 +- src/meta/meta_state_service_simple.cpp | 2 +- src/meta/partition_guardian.cpp | 4 +- src/meta/server_state.cpp | 43 ++- src/meta/server_state_restore.cpp | 5 +- src/meta/test/main.cpp | 2 +- src/nfs/nfs_client_impl.cpp | 40 +-- src/nfs/nfs_server_impl.cpp | 17 +- src/redis_protocol/proxy_lib/proxy_layer.cpp | 6 +- src/reporter/pegasus_counter_reporter.cpp | 12 +- src/runtime/env.sim.cpp | 2 +- src/runtime/fault_injector.cpp | 2 +- src/runtime/rpc/asio_net_provider.cpp | 41 ++- src/runtime/rpc/asio_rpc_session.cpp | 14 +- src/runtime/rpc/dsn_message_parser.cpp | 18 +- src/runtime/rpc/network.cpp | 10 +- src/runtime/rpc/network.sim.cpp | 2 +- src/runtime/rpc/thrift_message_parser.cpp | 8 +- src/runtime/service_api_c.cpp | 4 +- src/runtime/simulator.cpp | 4 +- src/runtime/task/task_spec.cpp | 8 +- src/runtime/task/task_worker.cpp | 4 +- src/runtime/test/test_utils.h | 2 +- src/server/available_detector.cpp | 40 ++- src/server/info_collector.cpp | 8 +- src/server/pegasus_server_impl.cpp | 254 ++++++++---------- src/server/result_writer.cpp | 14 +- src/shell/command_helper.h | 29 +- src/test/kill_test/data_verifier.cpp | 83 +++--- src/test/kill_test/kill_testor.cpp | 6 +- src/test/kill_test/partition_kill_testor.cpp | 2 +- src/test/kill_test/process_kill_testor.cpp | 4 +- src/utils/api_utilities.h | 4 +- src/utils/filesystem.cpp | 15 +- src/utils/shared_io_service.cpp | 2 +- .../distributed_lock_service_zookeeper.cpp | 8 +- src/zookeeper/lock_struct.cpp | 22 +- 53 files changed, 554 insertions(+), 620 deletions(-) diff --git a/src/block_service/fds/fds_service.cpp b/src/block_service/fds/fds_service.cpp index 73adeb3dde..45452c1359 100644 --- a/src/block_service/fds/fds_service.cpp +++ b/src/block_service/fds/fds_service.cpp @@ -161,26 +161,26 @@ error_code fds_service::initialize(const std::vector &args) #define FDS_EXCEPTION_HANDLE(ERR_REFERENCE, OPERATION, INPUT_PARAMETER) \ catch (const Poco::TimeoutException &ex) \ { \ - LOG_ERROR("fds %s timeout: parameter(%s), code(%d), msg(%s)", \ - OPERATION, \ - INPUT_PARAMETER, \ - ex.code(), \ - ex.message().c_str()); \ + LOG_ERROR_F("fds {} timeout: parameter({}), code({}), msg({})", \ + OPERATION, \ + INPUT_PARAMETER, \ + ex.code(), \ + ex.message()); \ ERR_REFERENCE = ERR_TIMEOUT; \ } \ catch (const Poco::Exception &ex) \ { \ - LOG_ERROR("fds %s get poco exception: parameter(%s), code(%d), msg(%s), what(%s)", \ - OPERATION, \ - INPUT_PARAMETER, \ - ex.code(), \ - ex.message().c_str(), \ - ex.what()); \ + LOG_ERROR_F("fds {} get poco exception: parameter({}), code({}), msg({}), what({})", \ + OPERATION, \ + INPUT_PARAMETER, \ + ex.code(), \ + ex.message(), \ + ex.what()); \ ERR_REFERENCE = ERR_FS_INTERNAL; \ } \ catch (...) \ { \ - LOG_ERROR("fds %s get unknown exception: parameter(%s)", OPERATION, INPUT_PARAMETER); \ + LOG_ERROR_F("fds {} get unknown exception: parameter({})", OPERATION, INPUT_PARAMETER); \ ERR_REFERENCE = ERR_FS_INTERNAL; \ } @@ -233,10 +233,10 @@ dsn::task_ptr fds_service::list_dir(const ls_request &req, } } } catch (const galaxy::fds::GalaxyFDSClientException &ex) { - LOG_ERROR("fds listObjects failed: parameter(%s), code(%d), msg(%s)", - req.dir_name.c_str(), - ex.code(), - ex.what()); + LOG_ERROR_F("fds listObjects failed: parameter({}), code({}), msg({})", + req.dir_name, + ex.code(), + ex.what()); resp.err = ERR_FS_INTERNAL; } FDS_EXCEPTION_HANDLE(resp.err, "listObject", req.dir_name.c_str()) @@ -245,19 +245,17 @@ dsn::task_ptr fds_service::list_dir(const ls_request &req, try { if (_client->doesObjectExist(_bucket_name, utils::path_to_fds(req.dir_name, false))) { - LOG_ERROR("fds list_dir failed: path not dir, parameter(%s)", - req.dir_name.c_str()); + LOG_ERROR_F("fds list_dir failed: path not dir, parameter({})", req.dir_name); resp.err = ERR_INVALID_PARAMETERS; } else { - LOG_ERROR("fds list_dir failed: path not found, parameter(%s)", - req.dir_name.c_str()); + LOG_ERROR_F("fds list_dir failed: path not found, parameter({})", req.dir_name); resp.err = ERR_OBJECT_NOT_FOUND; } } catch (const galaxy::fds::GalaxyFDSClientException &ex) { - LOG_ERROR("fds doesObjectExist failed: parameter(%s), code(%d), msg(%s)", - req.dir_name.c_str(), - ex.code(), - ex.what()); + LOG_ERROR_F("fds doesObjectExist failed: parameter({}), code({}), msg({})", + req.dir_name, + ex.code(), + ex.what()); resp.err = ERR_FS_INTERNAL; } FDS_EXCEPTION_HANDLE(resp.err, "doesObjectExist", req.dir_name.c_str()) @@ -333,24 +331,22 @@ dsn::task_ptr fds_service::remove_path(const remove_path_request &req, if (req.recursive) { should_remove_path = true; } else { - LOG_ERROR("fds remove_path failed: dir not empty, parameter(%s)", - req.path.c_str()); + LOG_ERROR_F("fds remove_path failed: dir not empty, parameter({})", req.path); resp.err = ERR_DIR_NOT_EMPTY; } } else { if (_client->doesObjectExist(_bucket_name, utils::path_to_fds(req.path, false))) { should_remove_path = true; } else { - LOG_ERROR("fds remove_path failed: path not found, parameter(%s)", - req.path.c_str()); + LOG_ERROR_F("fds remove_path failed: path not found, parameter({})", req.path); resp.err = ERR_OBJECT_NOT_FOUND; } } } catch (const galaxy::fds::GalaxyFDSClientException &ex) { - LOG_ERROR("fds remove_path failed: parameter(%s), code(%d), msg(%s)", - req.path.c_str(), - ex.code(), - ex.what()); + LOG_ERROR_F("fds remove_path failed: parameter({}), code({}), msg({})", + req.path, + ex.code(), + ex.what()); resp.err = ERR_FS_INTERNAL; } FDS_EXCEPTION_HANDLE(resp.err, "remove_path", req.path.c_str()); @@ -362,16 +358,16 @@ dsn::task_ptr fds_service::remove_path(const remove_path_request &req, if (deleting->countFailedObjects() <= 0) { resp.err = ERR_OK; } else { - LOG_ERROR("fds remove_path failed: countFailedObjects = %d, parameter(%s)", - deleting->countFailedObjects(), - req.path.c_str()); + LOG_ERROR_F("fds remove_path failed: countFailedObjects = {}, parameter({})", + deleting->countFailedObjects(), + req.path); resp.err = ERR_FS_INTERNAL; } } catch (const galaxy::fds::GalaxyFDSClientException &ex) { - LOG_ERROR("fds remove_path failed: parameter(%s), code(%d), msg(%s)", - req.path.c_str(), - ex.code(), - ex.what()); + LOG_ERROR_F("fds remove_path failed: parameter({}), code({}), msg({})", + req.path, + ex.code(), + ex.what()); resp.err = ERR_FS_INTERNAL; } FDS_EXCEPTION_HANDLE(resp.err, "remove_path", req.path.c_str()); @@ -505,10 +501,10 @@ error_code fds_file_object::get_content(uint64_t pos, transfered_bytes += utils::copy_stream(is, os, PIECE_SIZE); err = ERR_OK; } catch (const galaxy::fds::GalaxyFDSClientException &ex) { - LOG_ERROR("fds getObject error: remote_file(%s), code(%d), msg(%s)", - file_name().c_str(), - ex.code(), - ex.what()); + LOG_ERROR_F("fds getObject error: remote_file({}), code({}), msg({})", + file_name(), + ex.code(), + ex.what()); if (ex.code() == Poco::Net::HTTPResponse::HTTP_NOT_FOUND) { _has_meta_synced = true; _md5sum = ""; @@ -549,10 +545,10 @@ error_code fds_file_object::put_content(/*in-out*/ std::istream &is, try { c->putObject(_service->get_bucket_name(), _fds_path, is, galaxy::fds::FDSObjectMetadata()); } catch (const galaxy::fds::GalaxyFDSClientException &ex) { - LOG_ERROR("fds putObject error: remote_file(%s), code(%d), msg(%s)", - file_name().c_str(), - ex.code(), - ex.what()); + LOG_ERROR_F("fds putObject error: remote_file({}), code({}), msg({})", + file_name(), + ex.code(), + ex.what()); err = ERR_FS_INTERNAL; } FDS_EXCEPTION_HANDLE(err, "putObject", file_name().c_str()) diff --git a/src/block_service/local/local_service.cpp b/src/block_service/local/local_service.cpp index b22fcc539a..a3f259764c 100644 --- a/src/block_service/local/local_service.cpp +++ b/src/block_service/local/local_service.cpp @@ -116,7 +116,7 @@ dsn::task_ptr local_service::list_dir(const ls_request &req, resp.err = ERR_OBJECT_NOT_FOUND; } else { if (!::dsn::utils::filesystem::get_subfiles(dir_path, children, false)) { - LOG_ERROR("get files under directory: %s fail", dir_path.c_str()); + LOG_ERROR_F("get files under directory: {} fail", dir_path); resp.err = ERR_FS_INTERNAL; children.clear(); } else { @@ -137,7 +137,7 @@ dsn::task_ptr local_service::list_dir(const ls_request &req, children.clear(); if (!::dsn::utils::filesystem::get_subdirectories(dir_path, children, false)) { - LOG_ERROR("get subpaths under directory: %s fail", dir_path.c_str()); + LOG_ERROR_F("get subpaths under directory: {} fail", dir_path); resp.err = ERR_FS_INTERNAL; children.clear(); } else { @@ -492,9 +492,8 @@ dsn::task_ptr local_file_object::download(const download_request &req, resp.err = ERR_OK; std::string target_file = req.output_local_name; if (target_file.empty()) { - LOG_ERROR("download %s failed, because target name(%s) is invalid", - file_name().c_str(), - target_file.c_str()); + LOG_ERROR_F( + "download {} failed, because target name({}) is invalid", file_name(), target_file); resp.err = ERR_INVALID_PARAMETERS; } @@ -508,9 +507,9 @@ dsn::task_ptr local_file_object::download(const download_request &req, if (resp.err == ERR_OK) { std::ifstream fin(file_name(), std::ifstream::in); if (!fin.is_open()) { - LOG_ERROR("open block file(%s) failed, err(%s)", - file_name().c_str(), - utils::safe_strerror(errno).c_str()); + LOG_ERROR_F("open block file({}) failed, err({})", + file_name(), + utils::safe_strerror(errno)); resp.err = ERR_FS_INTERNAL; } @@ -518,9 +517,9 @@ dsn::task_ptr local_file_object::download(const download_request &req, if (!fout.is_open()) { if (fin.is_open()) fin.close(); - LOG_ERROR("open target file(%s) failed, err(%s)", - target_file.c_str(), - utils::safe_strerror(errno).c_str()); + LOG_ERROR_F("open target file({}) failed, err({})", + target_file, + utils::safe_strerror(errno)); resp.err = ERR_FILE_OPERATION_FAILED; } diff --git a/src/client/partition_resolver.cpp b/src/client/partition_resolver.cpp index 9769f50221..d3c67bcf6e 100644 --- a/src/client/partition_resolver.cpp +++ b/src/client/partition_resolver.cpp @@ -88,10 +88,10 @@ void partition_resolver::call_task(const rpc_response_task_ptr &t) std::chrono::milliseconds(gap)); return; } else { - LOG_ERROR("service access failed (%s), no more time for further " - "tries, set error = ERR_TIMEOUT, trace_id = %016" PRIx64, - err.to_string(), - req->header->trace_id); + LOG_ERROR_F("service access failed ({}), no more time for further tries, set error " + "= ERR_TIMEOUT, trace_id = {:#018x}", + err, + req->header->trace_id); err = ERR_TIMEOUT; } } diff --git a/src/client/partition_resolver_manager.cpp b/src/client/partition_resolver_manager.cpp index c01cbd8e56..946a0f5a9f 100644 --- a/src/client/partition_resolver_manager.cpp +++ b/src/client/partition_resolver_manager.cpp @@ -66,7 +66,7 @@ partition_resolver_ptr partition_resolver_manager::find_or_create( dsn::rpc_address meta_group = ptr->get_meta_server(); const std::vector &existing_list = meta_group.group_address()->members(); if (!vector_equal(meta_list, existing_list)) { - LOG_ERROR("meta list not match for cluster(%s)", cluster_name); + LOG_ERROR_F("meta list not match for cluster({})", cluster_name); return nullptr; } return ptr; diff --git a/src/client/partition_resolver_simple.cpp b/src/client/partition_resolver_simple.cpp index f9e2467ace..3e41cd55d8 100644 --- a/src/client/partition_resolver_simple.cpp +++ b/src/client/partition_resolver_simple.cpp @@ -310,28 +310,28 @@ void partition_resolver_simple::query_config_reply(error_code err, } } } else if (resp.err == ERR_OBJECT_NOT_FOUND) { - LOG_ERROR("%s.client: query config reply, gpid = %d.%d, err = %s", - _app_name.c_str(), - _app_id, - partition_index, - resp.err.to_string()); + LOG_ERROR_F("{}.client: query config reply, gpid = {}.{}, err = {}", + _app_name, + _app_id, + partition_index, + resp.err); client_err = ERR_APP_NOT_EXIST; } else { - LOG_ERROR("%s.client: query config reply, gpid = %d.%d, err = %s", - _app_name.c_str(), - _app_id, - partition_index, - resp.err.to_string()); + LOG_ERROR_F("{}.client: query config reply, gpid = {}.{}, err = {}", + _app_name, + _app_id, + partition_index, + resp.err); client_err = resp.err; } } else { - LOG_ERROR("%s.client: query config reply, gpid = %d.%d, err = %s", - _app_name.c_str(), - _app_id, - partition_index, - err.to_string()); + LOG_ERROR_F("{}.client: query config reply, gpid = {}.{}, err = {}", + _app_name, + _app_id, + partition_index, + err); } // get specific or all partition update diff --git a/src/client/replication_ddl_client.cpp b/src/client/replication_ddl_client.cpp index 63d187ee25..ec67f37f28 100644 --- a/src/client/replication_ddl_client.cpp +++ b/src/client/replication_ddl_client.cpp @@ -393,7 +393,7 @@ dsn::error_code replication_ddl_client::list_apps(const dsn::app_status::type st std::vector partitions; r = list_app(info.app_name, app_id, partition_count, partitions); if (r != dsn::ERR_OK) { - LOG_ERROR("list app(%s) failed, err = %s", info.app_name.c_str(), r.to_string()); + LOG_ERROR_F("list app({}) failed, err = {}", info.app_name, r); return r; } CHECK_EQ(info.app_id, app_id); diff --git a/src/client_lib/pegasus_client_factory_impl.cpp b/src/client_lib/pegasus_client_factory_impl.cpp index b68b8b9acb..8e7c351206 100644 --- a/src/client_lib/pegasus_client_factory_impl.cpp +++ b/src/client_lib/pegasus_client_factory_impl.cpp @@ -52,11 +52,11 @@ pegasus_client *pegasus_client_factory_impl::get_client(const char *cluster_name const char *app_name) { if (cluster_name == nullptr || cluster_name[0] == '\0') { - LOG_ERROR("invalid parameter 'cluster_name'"); + LOG_ERROR_F("invalid parameter 'cluster_name'"); return nullptr; } if (app_name == nullptr || app_name[0] == '\0') { - LOG_ERROR("invalid parameter 'app_name'"); + LOG_ERROR_F("invalid parameter 'app_name'"); return nullptr; } diff --git a/src/client_lib/pegasus_client_impl.cpp b/src/client_lib/pegasus_client_impl.cpp index d88509a84a..623f67a4fa 100644 --- a/src/client_lib/pegasus_client_impl.cpp +++ b/src/client_lib/pegasus_client_impl.cpp @@ -92,8 +92,8 @@ void pegasus_client_impl::async_set(const std::string &hash_key, { // check params if (hash_key.size() >= UINT16_MAX) { - LOG_ERROR("invalid hash key: hash key length should be less than UINT16_MAX, but %d", - (int)hash_key.size()); + LOG_ERROR_F("invalid hash key: hash key length should be less than UINT16_MAX, but {}", + hash_key.size()); if (callback != nullptr) callback(PERR_INVALID_HASH_KEY, internal_info()); return; @@ -161,20 +161,20 @@ void pegasus_client_impl::async_multi_set(const std::string &hash_key, { // check params if (hash_key.size() == 0) { - LOG_ERROR("invalid hash key: hash key should not be empty for multi_set"); + LOG_ERROR_F("invalid hash key: hash key should not be empty for multi_set"); if (callback != nullptr) callback(PERR_INVALID_HASH_KEY, internal_info()); return; } if (hash_key.size() >= UINT16_MAX) { - LOG_ERROR("invalid hash key: hash key length should be less than UINT16_MAX, but %d", - (int)hash_key.size()); + LOG_ERROR_F("invalid hash key: hash key length should be less than UINT16_MAX, but {}", + hash_key.size()); if (callback != nullptr) callback(PERR_INVALID_HASH_KEY, internal_info()); return; } if (kvs.empty()) { - LOG_ERROR("invalid kvs: kvs should not be empty"); + LOG_ERROR_F("invalid kvs: kvs should not be empty"); if (callback != nullptr) callback(PERR_INVALID_VALUE, internal_info()); return; @@ -249,8 +249,8 @@ void pegasus_client_impl::async_get(const std::string &hash_key, { // check params if (hash_key.size() >= UINT16_MAX) { - LOG_ERROR("invalid hash key: hash key length should be less than UINT16_MAX, but %d", - (int)hash_key.size()); + LOG_ERROR_F("invalid hash key: hash key length should be less than UINT16_MAX, but {}", + hash_key.size()); if (callback != nullptr) callback(PERR_INVALID_HASH_KEY, std::string(), internal_info()); return; @@ -323,14 +323,14 @@ void pegasus_client_impl::async_multi_get(const std::string &hash_key, { // check params if (hash_key.size() == 0) { - LOG_ERROR("invalid hash key: hash key should not be empty"); + LOG_ERROR_F("invalid hash key: hash key should not be empty"); if (callback != nullptr) callback(PERR_INVALID_HASH_KEY, std::map(), internal_info()); return; } if (hash_key.size() >= UINT16_MAX) { - LOG_ERROR("invalid hash key: hash key length should be less than UINT16_MAX, but %d", - (int)hash_key.size()); + LOG_ERROR_F("invalid hash key: hash key length should be less than UINT16_MAX, but {}", + hash_key.size()); if (callback != nullptr) callback(PERR_INVALID_HASH_KEY, std::map(), internal_info()); return; @@ -419,14 +419,14 @@ void pegasus_client_impl::async_multi_get(const std::string &hash_key, { // check params if (hash_key.size() == 0) { - LOG_ERROR("invalid hash key: hash key should not be empty"); + LOG_ERROR_F("invalid hash key: hash key should not be empty"); if (callback != nullptr) callback(PERR_INVALID_HASH_KEY, std::map(), internal_info()); return; } if (hash_key.size() >= UINT16_MAX) { - LOG_ERROR("invalid hash key: hash key length should be less than UINT16_MAX, but %d", - (int)hash_key.size()); + LOG_ERROR_F("invalid hash key: hash key length should be less than UINT16_MAX, but {}", + hash_key.size()); if (callback != nullptr) callback(PERR_INVALID_HASH_KEY, std::map(), internal_info()); return; @@ -506,14 +506,14 @@ void pegasus_client_impl::async_multi_get_sortkeys(const std::string &hash_key, { // check params if (hash_key.size() == 0) { - LOG_ERROR("invalid hash key: hash key should not be empty for multi_get_sortkeys"); + LOG_ERROR_F("invalid hash key: hash key should not be empty for multi_get_sortkeys"); if (callback != nullptr) callback(PERR_INVALID_HASH_KEY, std::set(), internal_info()); return; } if (hash_key.size() >= UINT16_MAX) { - LOG_ERROR("invalid hash key: hash key length should be less than UINT16_MAX, but %d", - (int)hash_key.size()); + LOG_ERROR_F("invalid hash key: hash key length should be less than UINT16_MAX, but {}", + hash_key.size()); if (callback != nullptr) callback(PERR_INVALID_HASH_KEY, std::set(), internal_info()); return; @@ -570,12 +570,12 @@ int pegasus_client_impl::sortkey_count(const std::string &hash_key, { // check params if (hash_key.size() == 0) { - LOG_ERROR("invalid hash key: hash key should not be empty for sortkey_count"); + LOG_ERROR_F("invalid hash key: hash key should not be empty for sortkey_count"); return PERR_INVALID_HASH_KEY; } if (hash_key.size() >= UINT16_MAX) { - LOG_ERROR("invalid hash key: hash key length should be less than UINT16_MAX, but %d", - (int)hash_key.size()); + LOG_ERROR_F("invalid hash key: hash key length should be less than UINT16_MAX, but {}", + hash_key.size()); return PERR_INVALID_HASH_KEY; } @@ -629,8 +629,8 @@ void pegasus_client_impl::async_del(const std::string &hash_key, { // check params if (hash_key.size() >= UINT16_MAX) { - LOG_ERROR("invalid hash key: hash key length should be less than UINT16_MAX, but %d", - (int)hash_key.size()); + LOG_ERROR_F("invalid hash key: hash key length should be less than UINT16_MAX, but {}", + hash_key.size()); if (callback != nullptr) callback(PERR_INVALID_HASH_KEY, internal_info()); return; @@ -692,20 +692,20 @@ void pegasus_client_impl::async_multi_del(const std::string &hash_key, { // check params if (hash_key.size() == 0) { - LOG_ERROR("invalid hash key: hash key should not be empty for multi_del"); + LOG_ERROR_F("invalid hash key: hash key should not be empty for multi_del"); if (callback != nullptr) callback(PERR_INVALID_HASH_KEY, 0, internal_info()); return; } if (hash_key.size() >= UINT16_MAX) { - LOG_ERROR("invalid hash key: hash key length should be less than UINT16_MAX, but %d", - (int)hash_key.size()); + LOG_ERROR_F("invalid hash key: hash key length should be less than UINT16_MAX, but {}", + hash_key.size()); if (callback != nullptr) callback(PERR_INVALID_HASH_KEY, 0, internal_info()); return; } if (sort_keys.empty()) { - LOG_ERROR("invalid sort keys: should not be empty"); + LOG_ERROR_F("invalid sort keys: should not be empty"); if (callback != nullptr) callback(PERR_INVALID_VALUE, 0, internal_info()); return; @@ -780,14 +780,14 @@ void pegasus_client_impl::async_incr(const std::string &hash_key, { // check params if (hash_key.size() >= UINT16_MAX) { - LOG_ERROR("invalid hash key: hash key length should be less than UINT16_MAX, but %d", - (int)hash_key.size()); + LOG_ERROR_F("invalid hash key: hash key length should be less than UINT16_MAX, but {}", + hash_key.size()); if (callback != nullptr) callback(PERR_INVALID_HASH_KEY, 0, internal_info()); return; } if (ttl_seconds < -1) { - LOG_ERROR("invalid ttl seconds: should be no less than -1, but %d", ttl_seconds); + LOG_ERROR_F("invalid ttl seconds: should be no less than -1, but {}", ttl_seconds); if (callback != nullptr) callback(PERR_INVALID_ARGUMENT, 0, internal_info()); return; @@ -872,8 +872,8 @@ void pegasus_client_impl::async_check_and_set(const std::string &hash_key, { // check params if (hash_key.size() >= UINT16_MAX) { - LOG_ERROR("invalid hash key: hash key length should be less than UINT16_MAX, but %d", - (int)hash_key.size()); + LOG_ERROR_F("invalid hash key: hash key length should be less than UINT16_MAX, but {}", + hash_key.size()); if (callback != nullptr) callback(PERR_INVALID_HASH_KEY, check_and_set_results(), internal_info()); return; @@ -881,7 +881,7 @@ void pegasus_client_impl::async_check_and_set(const std::string &hash_key, if (dsn::apps::_cas_check_type_VALUES_TO_NAMES.find(check_type) == dsn::apps::_cas_check_type_VALUES_TO_NAMES.end()) { - LOG_ERROR("invalid check type: %d", (int)check_type); + LOG_ERROR_F("invalid check type: {}", check_type); if (callback != nullptr) callback(PERR_INVALID_ARGUMENT, check_and_set_results(), internal_info()); return; @@ -990,8 +990,8 @@ void pegasus_client_impl::async_check_and_mutate(const std::string &hash_key, { // check params if (hash_key.size() >= UINT16_MAX) { - LOG_ERROR("invalid hash key: hash key length should be less than UINT16_MAX, but %d", - (int)hash_key.size()); + LOG_ERROR_F("invalid hash key: hash key length should be less than UINT16_MAX, but {}", + hash_key.size()); if (callback != nullptr) callback(PERR_INVALID_HASH_KEY, check_and_mutate_results(), internal_info()); return; @@ -999,13 +999,13 @@ void pegasus_client_impl::async_check_and_mutate(const std::string &hash_key, if (dsn::apps::_cas_check_type_VALUES_TO_NAMES.find(check_type) == dsn::apps::_cas_check_type_VALUES_TO_NAMES.end()) { - LOG_ERROR("invalid check type: %d", (int)check_type); + LOG_ERROR_F("invalid check type: {}", check_type); if (callback != nullptr) callback(PERR_INVALID_ARGUMENT, check_and_mutate_results(), internal_info()); return; } if (mutations.is_empty()) { - LOG_ERROR("invalid mutations: mutations should not be empty."); + LOG_ERROR_F("invalid mutations: mutations should not be empty."); if (callback != nullptr) callback(PERR_INVALID_ARGUMENT, check_and_mutate_results(), internal_info()); return; @@ -1086,8 +1086,8 @@ int pegasus_client_impl::ttl(const std::string &hash_key, { // check params if (hash_key.size() >= UINT16_MAX) { - LOG_ERROR("invalid hash key: hash key length should be less than UINT16_MAX, but %d", - (int)hash_key.size()); + LOG_ERROR_F("invalid hash key: hash key length should be less than UINT16_MAX, but {}", + hash_key.size()); return PERR_INVALID_HASH_KEY; } @@ -1136,12 +1136,12 @@ int pegasus_client_impl::get_scanner(const std::string &hash_key, { // check params if (hash_key.size() >= UINT16_MAX) { - LOG_ERROR("invalid hash key: hash key length should be less than UINT16_MAX, but %d", - (int)hash_key.size()); + LOG_ERROR_F("invalid hash key: hash key length should be less than UINT16_MAX, but {}", + hash_key.size()); return PERR_INVALID_HASH_KEY; } if (hash_key.empty()) { - LOG_ERROR("invalid hash key: hash key cannot be empty when scan"); + LOG_ERROR_F("invalid hash key: hash key cannot be empty when scan"); return PERR_INVALID_HASH_KEY; } @@ -1201,8 +1201,8 @@ void pegasus_client_impl::async_get_unordered_scanners( // check params if (max_split_count <= 0) { - LOG_ERROR("invalid max_split_count: which should be greater than 0, but %d", - max_split_count); + LOG_ERROR_F("invalid max_split_count: which should be greater than 0, but {}", + max_split_count); callback(PERR_INVALID_SPLIT_COUNT, std::vector()); return; } @@ -1313,9 +1313,9 @@ const char *pegasus_client_impl::get_error_string(int error_code) const auto it = _server_error_to_client.find(server_error); if (it != _server_error_to_client.end()) return it->second; - LOG_ERROR("can't find corresponding client error definition, server error:[%d:%s]", - server_error, - ::dsn::error_code(server_error).to_string()); + LOG_ERROR_F("can't find corresponding client error definition, server error:[{}:{}]", + server_error, + ::dsn::error_code(server_error)); return PERR_UNKNOWN; } diff --git a/src/common/fs_manager.cpp b/src/common/fs_manager.cpp index 1215a088c7..9f611b69bd 100644 --- a/src/common/fs_manager.cpp +++ b/src/common/fs_manager.cpp @@ -202,11 +202,8 @@ void fs_manager::add_replica(const gpid &pid, const std::string &pid_dir) { dir_node *n = get_dir_node(pid_dir); if (nullptr == n) { - LOG_ERROR("%s: dir(%s) of gpid(%d.%d) haven't registered", - dsn_primary_address().to_string(), - pid_dir.c_str(), - pid.get_app_id(), - pid.get_partition_index()); + LOG_ERROR_F( + "{}: dir({}) of gpid({}) haven't registered", dsn_primary_address(), pid_dir, pid); } else { zauto_write_lock l(_lock); std::set &replicas_for_app = n->holding_replicas[pid.get_app_id()]; diff --git a/src/failure_detector/failure_detector.cpp b/src/failure_detector/failure_detector.cpp index 231bb08abc..7a87c82234 100644 --- a/src/failure_detector/failure_detector.cpp +++ b/src/failure_detector/failure_detector.cpp @@ -225,13 +225,13 @@ void failure_detector::check_all_records() is_time_greater_than(now, record.last_send_time_for_beacon_with_ack) && now + _check_interval_milliseconds - record.last_send_time_for_beacon_with_ack > _lease_milliseconds) { - LOG_ERROR("master %s disconnected, now=%" PRId64 ", last_send_time=%" PRId64 - ", now+check_interval-last_send_time=%" PRId64, - record.node.to_string(), - now, - record.last_send_time_for_beacon_with_ack, - now + _check_interval_milliseconds - - record.last_send_time_for_beacon_with_ack); + LOG_ERROR_F("master {} disconnected, now={:#018x}, last_send_time={:#018x}, " + "now+check_interval-last_send_time={:#018x}", + record.node, + now, + record.last_send_time_for_beacon_with_ack, + now + _check_interval_milliseconds - + record.last_send_time_for_beacon_with_ack); expire.push_back(record.node); record.is_alive = false; @@ -268,12 +268,12 @@ void failure_detector::check_all_records() // overflow if (record.is_alive && is_time_greater_than(now, record.last_beacon_recv_time) && now - record.last_beacon_recv_time > _grace_milliseconds) { - LOG_ERROR("worker %s disconnected, now=%" PRId64 ", last_beacon_recv_time=%" PRId64 - ", now-last_recv=%" PRId64, - record.node.to_string(), - now, - record.last_beacon_recv_time, - now - record.last_beacon_recv_time); + LOG_ERROR_F("worker {} disconnected, now={:#018x}, last_beacon_recv_time={:#018x}, " + "now-last_recv={:#018x}", + record.node, + now, + record.last_beacon_recv_time, + now - record.last_beacon_recv_time); expire.push_back(record.node); record.is_alive = false; diff --git a/src/http/http_message_parser.cpp b/src/http/http_message_parser.cpp index d4bea0b4aa..6771fed7d2 100644 --- a/src/http/http_message_parser.cpp +++ b/src/http/http_message_parser.cpp @@ -136,7 +136,7 @@ http_message_parser::http_message_parser() header->hdr_type = http_method::HTTP_METHOD_POST; header->context.u.is_request = 1; } else { - LOG_ERROR("invalid http type %d and method %d", parser->type, parser->method); + LOG_ERROR_F("invalid http type {} and method {}", parser->type, parser->method); return 1; } return 0; @@ -179,9 +179,9 @@ message_ex *http_message_parser::get_message_on_receive(message_reader *reader, // error handling if (_parser.http_errno != HPE_OK) { auto err = HTTP_PARSER_ERRNO(&_parser); - LOG_ERROR("failed on stage %s [%s]", - http_parser_stage_to_string(_stage), - http_errno_description(err)); + LOG_ERROR_F("failed on stage {} [{}]", + http_parser_stage_to_string(_stage), + http_errno_description(err)); read_next = -1; return nullptr; diff --git a/src/http/pprof_http_service.cpp b/src/http/pprof_http_service.cpp index b57773ba2d..a4b047af1a 100644 --- a/src/http/pprof_http_service.cpp +++ b/src/http/pprof_http_service.cpp @@ -76,7 +76,7 @@ static int extract_symbols_from_binary(std::map &addr_ma LOG_INFO_F("executing `{}`", cmd); const int rc = utils::pipe_execute(cmd.c_str(), ss); if (rc < 0) { - LOG_ERROR("fail to popen `%s`", cmd.c_str()); + LOG_ERROR_F("fail to popen `{}`", cmd); return -1; } std::string line; @@ -372,19 +372,19 @@ ssize_t read_command_line(char *buf, size_t len, bool with_args) { auto fd = open("/proc/self/cmdline", O_RDONLY); if (fd < 0) { - LOG_ERROR("Fail to open /proc/self/cmdline"); + LOG_ERROR_F("Fail to open /proc/self/cmdline"); return -1; } auto cleanup = defer([fd]() { close(fd); }); ssize_t nr = read(fd, buf, len); if (nr <= 0) { - LOG_ERROR("Fail to read /proc/self/cmdline"); + LOG_ERROR_F("Fail to read /proc/self/cmdline"); return -1; } if (with_args) { if ((size_t)nr == len) { - LOG_ERROR("buf is not big enough"); + LOG_ERROR_F("buf is not big enough"); return -1; } for (ssize_t i = 0; i < nr; ++i) { diff --git a/src/meta/dump_file.h b/src/meta/dump_file.h index 4c115e777f..79a758af05 100644 --- a/src/meta/dump_file.h +++ b/src/meta/dump_file.h @@ -53,7 +53,7 @@ #define log_error_and_return(buffer, length) \ do { \ ::dsn::utils::safe_strerror_r(errno, buffer, length); \ - LOG_ERROR("append file failed, reason(%s)", buffer); \ + LOG_ERROR_F("append file failed, reason({})", buffer); \ return -1; \ } while (0) @@ -131,8 +131,8 @@ class dump_file size_t cnt = fread(raw_mem + len, 1, hdr.length - len, _file_handle); if (len + cnt < hdr.length) { if (feof(_file_handle)) { - LOG_ERROR("unexpected file end, start offset of this block (%u)", - ftell(_file_handle) - len - sizeof(hdr)); + LOG_ERROR_F("unexpected file end, start offset of this block ({})", + ftell(_file_handle) - len - sizeof(hdr)); return -1; } else if (errno != EINTR) { log_error_and_return(msg_buffer, 128); @@ -142,9 +142,9 @@ class dump_file } _crc = dsn::utils::crc32_calc(raw_mem, len, _crc); if (_crc != hdr.crc32) { - LOG_ERROR("file %s data error, block offset(%ld)", - _filename.c_str(), - ftell(_file_handle) - hdr.length - sizeof(hdr)); + LOG_ERROR_F("file {} data error, block offset({})", + _filename, + ftell(_file_handle) - hdr.length - sizeof(hdr)); return -1; } diff --git a/src/meta/meta_backup_service.cpp b/src/meta/meta_backup_service.cpp index 2c7ba989ae..f24c6952a9 100644 --- a/src/meta/meta_backup_service.cpp +++ b/src/meta/meta_backup_service.cpp @@ -95,9 +95,9 @@ void policy_context::start_backup_app_meta_unlocked(int32_t app_id) }) ->wait(); if (err != dsn::ERR_OK) { - LOG_ERROR("%s: create file %s failed, restart this backup later", - _backup_sig.c_str(), - create_file_req.file_name.c_str()); + LOG_ERROR_F("{}: create file {} failed, restart this backup later", + _backup_sig, + create_file_req.file_name); tasking::enqueue(LPC_DEFAULT_CALLBACK, &_tracker, [this, app_id]() { @@ -202,9 +202,9 @@ void policy_context::write_backup_app_finish_flag_unlocked(int32_t app_id, ->wait(); if (err != ERR_OK) { - LOG_ERROR("%s: create file %s failed, restart this backup later", - _backup_sig.c_str(), - create_file_req.file_name.c_str()); + LOG_ERROR_F("{}: create file {} failed, restart this backup later", + _backup_sig, + create_file_req.file_name); tasking::enqueue(LPC_DEFAULT_CALLBACK, &_tracker, [this, app_id, write_callback]() { @@ -318,9 +318,9 @@ void policy_context::write_backup_info_unlocked(const backup_info &b_info, ->wait(); if (err != ERR_OK) { - LOG_ERROR("%s: create file %s failed, restart this backup later", - _backup_sig.c_str(), - create_file_req.file_name.c_str()); + LOG_ERROR_F("{}: create file {} failed, restart this backup later", + _backup_sig, + create_file_req.file_name); tasking::enqueue(LPC_DEFAULT_CALLBACK, &_tracker, [this, b_info, write_callback]() { @@ -625,10 +625,10 @@ void policy_context::sync_backup_to_remote_storage_unlocked(const backup_info &b LOG_WARNING_F("{}: empty callback", _policy.policy_name); } } else if (ERR_TIMEOUT == err) { - LOG_ERROR("%s: sync backup info(" PRId64 - ") to remote storage got timeout, retry it later", - _policy.policy_name.c_str(), - b_info.backup_id); + LOG_ERROR_F( + "{}: sync backup info({:#018x}) to remote storage got timeout, retry it later", + _policy.policy_name, + b_info.backup_id); tasking::enqueue(LPC_DEFAULT_CALLBACK, &_tracker, [this, b_info, sync_callback, create_new_node]() { @@ -1002,8 +1002,8 @@ void policy_context::sync_remove_backup_info(const backup_info &info, dsn::task_ sync_callback->enqueue(); } } else if (err == ERR_TIMEOUT) { - LOG_ERROR("%s: sync remove backup info on remote storage got timeout, retry it later", - _policy.policy_name.c_str()); + LOG_ERROR_F("{}: sync remove backup info on remote storage got timeout, retry it later", + _policy.policy_name); tasking::enqueue( LPC_DEFAULT_CALLBACK, &_tracker, @@ -1053,8 +1053,7 @@ void backup_service::start_create_policy_meta_root(dsn::task_ptr callback) "create policy meta root({}) succeed, with err({})", _policy_meta_root, err); callback->enqueue(); } else if (err == dsn::ERR_TIMEOUT) { - LOG_ERROR("create policy meta root(%s) timeout, try it later", - _policy_meta_root.c_str()); + LOG_ERROR_F("create policy meta root({}) timeout, try it later", _policy_meta_root); dsn::tasking::enqueue( LPC_DEFAULT_CALLBACK, &_tracker, @@ -1084,7 +1083,7 @@ void backup_service::start_sync_policies() } _in_initialize.store(false); } else if (err == dsn::ERR_TIMEOUT) { - LOG_ERROR("sync policies got timeout, retry it later"); + LOG_ERROR_F("sync policies got timeout, retry it later"); dsn::tasking::enqueue(LPC_DEFAULT_CALLBACK, &_tracker, std::bind(&backup_service::start_sync_policies, this), @@ -1156,11 +1155,10 @@ error_code backup_service::sync_policies_from_remote_storage() } } else { err = ec; - LOG_ERROR( - "get backup info dirs fail from remote storage, backup_dirs_root = %s, " - "err = %s", - get_policy_path(policy_name).c_str(), - ec.to_string()); + LOG_ERROR_F("get backup info dirs fail from remote storage, backup_dirs_root = " + "{}, err = {}", + get_policy_path(policy_name), + ec); } }, &tracker); @@ -1188,9 +1186,8 @@ error_code backup_service::sync_policies_from_remote_storage() init_backup_info(policy_name); } else { err = ec; - LOG_ERROR("init policy fail, policy_path = %s, error_code = %s", - policy_path.c_str(), - ec.to_string()); + LOG_ERROR_F( + "init policy fail, policy_path = {}, error_code = {}", policy_path, ec); } }, &tracker); @@ -1207,8 +1204,7 @@ error_code backup_service::sync_policies_from_remote_storage() } } else { err = ec; - LOG_ERROR("get policy dirs from remote storage fail, error_code = %s", - ec.to_string()); + LOG_ERROR_F("get policy dirs from remote storage fail, error_code = {}", ec); } }, &tracker); @@ -1329,9 +1325,9 @@ void backup_service::do_add_policy(dsn::message_ex *req, } p->start(); } else if (err == ERR_TIMEOUT) { - LOG_ERROR("create backup policy on remote storage timeout, retry after %" PRId64 - "(ms)", - _opt.meta_retry_delay_ms.count()); + LOG_ERROR_F( + "create backup policy on remote storage timeout, retry after {:#018x} (ms)", + _opt.meta_retry_delay_ms.count()); tasking::enqueue(LPC_DEFAULT_CALLBACK, &_tracker, std::bind(&backup_service::do_add_policy, this, req, p, hint_msg), @@ -1363,10 +1359,10 @@ void backup_service::do_update_policy_to_remote_storage( p.policy_name); p_context_ptr->set_policy(p); } else if (err == ERR_TIMEOUT) { - LOG_ERROR("update backup policy to remote storage failed, policy_name = %s, retry " - "after %" PRId64 "(ms)", - p.policy_name.c_str(), - _opt.meta_retry_delay_ms.count()); + LOG_ERROR_F("update backup policy to remote storage failed, policy_name = {}, " + "retry after {:#018x} (ms)", + p.policy_name, + _opt.meta_retry_delay_ms.count()); tasking::enqueue(LPC_DEFAULT_CALLBACK, &_tracker, std::bind(&backup_service::do_update_policy_to_remote_storage, diff --git a/src/meta/meta_data.cpp b/src/meta/meta_data.cpp index 282bd5306b..5e26fa4201 100644 --- a/src/meta/meta_data.cpp +++ b/src/meta/meta_data.cpp @@ -424,22 +424,21 @@ bool config_context::check_order() return true; for (unsigned int i = 0; i < dropped.size() - 1; ++i) { if (dropped_cmp(dropped[i], dropped[i + 1]) > 0) { - LOG_ERROR("check dropped order for gpid(%d.%d) failed, [%s,%llu,%lld,%lld,%lld@%d] vs " - "[%s,%llu,%lld,%lld,%lld@%d]", - config_owner->pid.get_app_id(), - config_owner->pid.get_partition_index(), - dropped[i].node.to_string(), - dropped[i].time, - dropped[i].ballot, - dropped[i].last_committed_decree, - dropped[i].last_prepared_decree, - i, - dropped[i].node.to_string(), - dropped[i].time, - dropped[i].ballot, - dropped[i].last_committed_decree, - dropped[i].last_prepared_decree, - i + 1); + LOG_ERROR_F("check dropped order for gpid({}) failed, [{},{},{},{},{}@{}] vs " + "[{},{},{},{},{}@{}]", + config_owner->pid, + dropped[i].node, + dropped[i].time, + dropped[i].ballot, + dropped[i].last_committed_decree, + dropped[i].last_prepared_decree, + i, + dropped[i].node, + dropped[i].time, + dropped[i].ballot, + dropped[i].last_committed_decree, + dropped[i].last_prepared_decree, + i + 1); return false; } } diff --git a/src/meta/meta_server_failure_detector.cpp b/src/meta/meta_server_failure_detector.cpp index ff24bda18d..82e5be2cb3 100644 --- a/src/meta/meta_server_failure_detector.cpp +++ b/src/meta/meta_server_failure_detector.cpp @@ -149,10 +149,10 @@ void meta_server_failure_detector::acquire_leader_lock() // lease expire LPC_META_SERVER_LEADER_LOCK_CALLBACK, [](error_code ec, const std::string &owner, uint64_t version) { - LOG_ERROR("leader lock expired callback: err(%s), owner(%s), version(%llu)", - ec.to_string(), - owner.c_str(), - version); + LOG_ERROR_F("leader lock expired callback: err({}), owner({}), version({})", + ec, + owner, + version); // let's take the easy way right now dsn_exit(0); }, diff --git a/src/meta/meta_service.cpp b/src/meta/meta_service.cpp index 0ed9bebd3e..b3712634d7 100644 --- a/src/meta/meta_service.cpp +++ b/src/meta/meta_service.cpp @@ -124,7 +124,7 @@ error_code meta_service::remote_storage_initialize() _meta_opts.meta_state_service_type.c_str(), PROVIDER_TYPE_MAIN); error_code err = storage->initialize(_meta_opts.meta_state_service_args); if (err != ERR_OK) { - LOG_ERROR("init meta_state_service failed, err = %s", err.to_string()); + LOG_ERROR_F("init meta_state_service failed, err = {}", err); return err; } _storage.reset(storage); @@ -139,8 +139,7 @@ error_code meta_service::remote_storage_initialize() _storage->create_node(current, LPC_META_CALLBACK, [&err](error_code ec) { err = ec; }); tsk->wait(); if (err != ERR_OK && err != ERR_NODE_ALREADY_EXIST) { - LOG_ERROR( - "create node failed, node_path = %s, err = %s", current.c_str(), err.to_string()); + LOG_ERROR_F("create node failed, node_path = {}, err = {}", current, err); return err; } } @@ -301,7 +300,7 @@ error_code meta_service::start() error_code err; err = remote_storage_initialize(); - dreturn_not_ok_logged(err, "init remote storage failed, err = %s", err.to_string()); + dreturn_not_ok_logged(err, "init remote storage failed, err = {}", err); LOG_INFO_F("remote storage is successfully initialized"); // start failure detector, and try to acquire the leader lock @@ -316,7 +315,7 @@ error_code meta_service::start() _opts.fd_grace_seconds, _meta_opts.enable_white_list); - dreturn_not_ok_logged(err, "start failure_detector failed, err = %s", err.to_string()); + dreturn_not_ok_logged(err, "start failure_detector failed, err = {}", err); LOG_INFO_F("meta service failure detector is successfully started {}", _meta_opts.enable_white_list ? "with whitelist enabled" : ""); @@ -368,8 +367,7 @@ error_code meta_service::start() "administrator should recover this cluster manually later"); return dsn::ERR_OK; } - LOG_ERROR("initialize server state from remote storage failed, err = %s, retry ...", - err.to_string()); + LOG_ERROR_F("initialize server state from remote storage failed, err = {}, retry ...", err); } _state->recover_from_max_replica_count_env(); @@ -797,7 +795,7 @@ void meta_service::on_add_backup_policy(dsn::message_ex *req) } if (_backup_handler == nullptr) { - LOG_ERROR("meta doesn't enable backup service"); + LOG_ERROR_F("meta doesn't enable backup service"); response.err = ERR_SERVICE_NOT_ACTIVE; reply(req, response); } else { @@ -816,7 +814,7 @@ void meta_service::on_query_backup_policy(query_backup_policy_rpc policy_rpc) auto &response = policy_rpc.response(); if (_backup_handler == nullptr) { - LOG_ERROR("meta doesn't enable backup service"); + LOG_ERROR_F("meta doesn't enable backup service"); response.err = ERR_SERVICE_NOT_ACTIVE; } else { tasking::enqueue( @@ -833,7 +831,7 @@ void meta_service::on_modify_backup_policy(configuration_modify_backup_policy_rp } if (_backup_handler == nullptr) { - LOG_ERROR("meta doesn't enable backup service"); + LOG_ERROR_F("meta doesn't enable backup service"); rpc.response().err = ERR_SERVICE_NOT_ACTIVE; } else { tasking::enqueue( diff --git a/src/meta/meta_state_service_simple.cpp b/src/meta/meta_state_service_simple.cpp index c060091910..2b297a69cd 100644 --- a/src/meta/meta_state_service_simple.cpp +++ b/src/meta/meta_state_service_simple.cpp @@ -284,7 +284,7 @@ error_code meta_state_service_simple::initialize(const std::vector _log = file::open(log_path.c_str(), O_RDWR | O_CREAT | O_BINARY, 0666); if (!_log) { - LOG_ERROR("open file failed: %s", log_path.c_str()); + LOG_ERROR_F("open file failed: {}", log_path); return ERR_FILE_OPERATION_FAILED; } return ERR_OK; diff --git a/src/meta/partition_guardian.cpp b/src/meta/partition_guardian.cpp index 7be75a8f09..9852f8047d 100644 --- a/src/meta/partition_guardian.cpp +++ b/src/meta/partition_guardian.cpp @@ -236,8 +236,8 @@ pc_status partition_guardian::on_missing_primary(meta_view &view, const dsn::gpi } if (action.node.is_invalid()) { - LOG_ERROR( - "all nodes for gpid(%s) are dead, waiting for some secondary to come back....", + LOG_ERROR_F( + "all nodes for gpid({}) are dead, waiting for some secondary to come back....", gpid_name); result = pc_status::dead; } else { diff --git a/src/meta/server_state.cpp b/src/meta/server_state.cpp index 54530eecf6..593a11fb35 100644 --- a/src/meta/server_state.cpp +++ b/src/meta/server_state.cpp @@ -300,7 +300,7 @@ error_code server_state::dump_app_states(const char *local_path, { std::shared_ptr file = dump_file::open_file(local_path, true); if (file == nullptr) { - LOG_ERROR("open file failed, file(%s)", local_path); + LOG_ERROR_F("open file failed, file({})", local_path); return ERR_FILE_OPERATION_FAILED; } @@ -331,7 +331,7 @@ error_code server_state::dump_from_remote_storage(const char *local_path, bool s LOG_INFO_F("remote storage is empty, just stop the dump"); return ERR_OK; } else if (ec != ERR_OK) { - LOG_ERROR("sync from remote storage failed, err(%s)", ec.to_string()); + LOG_ERROR_F("sync from remote storage failed, err({})", ec); return ec; } else { spin_wait_staging(); @@ -374,7 +374,7 @@ error_code server_state::restore_from_local_storage(const char *local_path) std::shared_ptr file = dump_file::open_file(local_path, false); if (file == nullptr) { - LOG_ERROR("open file failed, file(%s)", local_path); + LOG_ERROR_F("open file failed, file({})", local_path); return ERR_FILE_OPERATION_FAILED; } @@ -499,7 +499,7 @@ error_code server_state::sync_apps_to_remote_storage() t->wait(); if (err != ERR_NODE_ALREADY_EXIST && err != ERR_OK) { - LOG_ERROR("create root node /apps in meta store failed, err = %s", err.to_string()); + LOG_ERROR_F("create root node /apps in meta store failed, err = {}", err); return err; } else { LOG_INFO_F("set {} to lock state in remote storage", _apps_root); @@ -553,9 +553,7 @@ error_code server_state::sync_apps_to_remote_storage() LOG_INFO_F("set {} to unlock state in remote storage", _apps_root); return err; } else { - LOG_ERROR("set %s to unlock state in remote storage failed, reason(%s)", - _apps_root.c_str(), - err.to_string()); + LOG_ERROR_F("set {} to unlock state in remote storage failed, reason({})", _apps_root, err); return err; } } @@ -627,7 +625,7 @@ dsn::error_code server_state::sync_apps_from_remote_storage() } } else { - LOG_ERROR("get partition node failed, reason(%s)", ec.to_string()); + LOG_ERROR_F("get partition node failed, reason({})", ec); err = ec; } }, @@ -666,9 +664,9 @@ dsn::error_code server_state::sync_apps_from_remote_storage() sync_partition(app, i, partition_path); } } else { - LOG_ERROR("get app info from meta state service failed, path = %s, err = %s", - app_path.c_str(), - ec.to_string()); + LOG_ERROR_F("get app info from meta state service failed, path = {}, err = {}", + app_path, + ec); err = ec; } }, @@ -706,9 +704,9 @@ dsn::error_code server_state::sync_apps_from_remote_storage() sync_app(_apps_root + "/" + appid_str); } } else { - LOG_ERROR("get app list from meta state service failed, path = %s, err = %s", - _apps_root.c_str(), - ec.to_string()); + LOG_ERROR_F("get app list from meta state service failed, path = {}, err = {}", + _apps_root, + ec); err = ec; } }, @@ -965,10 +963,10 @@ void server_state::query_configuration_by_index(const query_cfg_request &request std::shared_ptr &app = iter->second; if (app->status != app_status::AS_AVAILABLE) { - LOG_ERROR("invalid status(%s) in exist app(%s), app_id(%d)", - enum_to_string(app->status), - (app->app_name).c_str(), - app->app_id); + LOG_ERROR_F("invalid status({}) in exist app({}), app_id({})", + enum_to_string(app->status), + app->app_name, + app->app_id); switch (app->status) { case app_status::AS_CREATING: @@ -1097,7 +1095,7 @@ void server_state::create_app(dsn::message_ex *msg) auto level = _meta_svc->get_function_level(); if (level <= meta_function_level::fl_freezed) { - LOG_ERROR("current meta function level is freezed, since there are too few alive nodes"); + LOG_ERROR_F("current meta function level is freezed, since there are too few alive nodes"); response.err = ERR_STATE_FREEZED; will_create_app = false; } else if (request.options.partition_count <= 0 || @@ -2322,14 +2320,14 @@ server_state::sync_apps_from_replica_nodes(const std::vector & dsn::error_code err = construct_apps(query_app_responses, replica_nodes, hint_message); if (err != dsn::ERR_OK) { - LOG_ERROR("construct apps failed, err = %s", err.to_string()); + LOG_ERROR_F("construct apps failed, err = {}", err); return err; } err = construct_partitions( query_replica_responses, replica_nodes, skip_lost_partitions, hint_message); if (err != dsn::ERR_OK) { - LOG_ERROR("construct partitions failed, err = %s", err.to_string()); + LOG_ERROR_F("construct partitions failed, err = {}", err); return err; } @@ -2347,8 +2345,7 @@ void server_state::on_start_recovery(const configuration_recovery_request &req, resp.err = sync_apps_from_replica_nodes( req.recovery_set, req.skip_bad_nodes, req.skip_lost_partitions, resp.hint_message); if (resp.err != dsn::ERR_OK) { - LOG_ERROR("sync apps from replica nodes failed when do recovery, err = %s", - resp.err.to_string()); + LOG_ERROR_F("sync apps from replica nodes failed when do recovery, err = {}", resp.err); _all_apps.clear(); return; } diff --git a/src/meta/server_state_restore.cpp b/src/meta/server_state_restore.cpp index ffdd4bf087..07d1957e41 100644 --- a/src/meta/server_state_restore.cpp +++ b/src/meta/server_state_restore.cpp @@ -42,7 +42,7 @@ void server_state::sync_app_from_backup_media( _meta_svc->get_block_service_manager().get_or_create_block_filesystem( request.backup_provider_name); if (blk_fs == nullptr) { - LOG_ERROR("acquire block_filesystem(%s) failed", request.backup_provider_name.c_str()); + LOG_ERROR_F("acquire block_filesystem({}) failed", request.backup_provider_name); callback_tsk->enqueue_with(ERR_INVALID_PARAMETERS, dsn::blob()); return; } @@ -148,8 +148,7 @@ void server_state::restore_app(dsn::message_ex *msg) dsn::error_code ec = ERR_OK; // if err != ERR_OK, then sync_app_from_backup_media ecounter some error if (err != ERR_OK) { - LOG_ERROR("sync app_info_data from backup media failed with err(%s)", - err.to_string()); + LOG_ERROR_F("sync app_info_data from backup media failed with err({})", err); ec = err; } else { auto pair = restore_app_info(msg, request, app_info_data); diff --git a/src/meta/test/main.cpp b/src/meta/test/main.cpp index db61f17d22..cdfb97da19 100644 --- a/src/meta/test/main.cpp +++ b/src/meta/test/main.cpp @@ -84,7 +84,7 @@ dsn::error_code meta_service_test_app::start(const std::vector &arg (uint32_t)dsn_config_get_value_uint64("tools.simulator", "random_seed", 0, "random seed"); if (seed == 0) { seed = time(0); - LOG_ERROR("initial seed: %u", seed); + LOG_ERROR_F("initial seed: {}", seed); } srand(seed); diff --git a/src/nfs/nfs_client_impl.cpp b/src/nfs/nfs_client_impl.cpp index 515796b8d3..2230fd8d7f 100644 --- a/src/nfs/nfs_client_impl.cpp +++ b/src/nfs/nfs_client_impl.cpp @@ -147,20 +147,20 @@ void nfs_client_impl::end_get_file_size(::dsn::error_code err, const user_request_ptr &ureq) { if (err != ::dsn::ERR_OK) { - LOG_ERROR("{nfs_service} remote get file size failed, source = %s, dir = %s, err = %s", - ureq->file_size_req.source.to_string(), - ureq->file_size_req.source_dir.c_str(), - err.to_string()); + LOG_ERROR_F("[nfs_service] remote get file size failed, source = {}, dir = {}, err = {}", + ureq->file_size_req.source, + ureq->file_size_req.source_dir, + err); ureq->nfs_task->enqueue(err, 0); return; } err = dsn::error_code(resp.error); if (err != ::dsn::ERR_OK) { - LOG_ERROR("{nfs_service} remote get file size failed, source = %s, dir = %s, err = %s", - ureq->file_size_req.source.to_string(), - ureq->file_size_req.source_dir.c_str(), - err.to_string()); + LOG_ERROR_F("[nfs_service] remote get file size failed, source = {}, dir = {}, err = {}", + ureq->file_size_req.source, + ureq->file_size_req.source_dir, + err); ureq->nfs_task->enqueue(err, 0); return; } @@ -352,13 +352,13 @@ void nfs_client_impl::end_copy(::dsn::error_code err, else _copy_requests_low.push_retry(reqc); } else { - LOG_ERROR("{nfs_service} remote copy failed, source = %s, dir = %s, file = %s, " - "err = %s, retry_count = %d", - fc->user_req->file_size_req.source.to_string(), - fc->user_req->file_size_req.source_dir.c_str(), - fc->file_name.c_str(), - err.to_string(), - reqc->retry_count); + LOG_ERROR_F("[nfs_service] remote copy failed, source = {}, dir = {}, file = {}, " + "err = {}, retry_count = {}", + fc->user_req->file_size_req.source, + fc->user_req->file_size_req.source_dir, + fc->file_name, + err, + reqc->retry_count); handle_completion(fc->user_req, err); } @@ -458,7 +458,7 @@ void nfs_client_impl::continue_write() if (!fc->file_holder->file_handle) { --_concurrent_local_write_count; - LOG_ERROR("open file %s failed", file_path.c_str()); + LOG_ERROR_F("open file {} failed", file_path); handle_completion(fc->user_req, ERR_FILE_OPERATION_FAILED); } else { zauto_lock l(reqc->lock); @@ -499,10 +499,10 @@ void nfs_client_impl::end_write(error_code err, size_t sz, const copy_request_ex if (err != ERR_OK) { _recent_write_fail_count->increment(); - LOG_ERROR("{nfs_service} local write failed, dir = %s, file = %s, err = %s", - fc->user_req->file_size_req.dst_dir.c_str(), - fc->file_name.c_str(), - err.to_string()); + LOG_ERROR_F("[nfs_service] local write failed, dir = {}, file = {}, err = {}", + fc->user_req->file_size_req.dst_dir, + fc->file_name, + err); completed = true; } else { _recent_write_data_size->add(sz); diff --git a/src/nfs/nfs_server_impl.cpp b/src/nfs/nfs_server_impl.cpp index ebd14a9532..f72b79f6a0 100644 --- a/src/nfs/nfs_server_impl.cpp +++ b/src/nfs/nfs_server_impl.cpp @@ -102,7 +102,7 @@ void nfs_service_impl::on_copy(const ::dsn::service::copy_request &request, "nfs: copy file {} [{}, {}]", file_path, request.offset, request.offset + request.size); if (dfile == nullptr) { - LOG_ERROR("{nfs_service} open file %s failed", file_path.c_str()); + LOG_ERROR_F("[nfs_service] open file {} failed", file_path); ::dsn::service::copy_response resp; resp.error = ERR_OBJECT_NOT_FOUND; reply(resp); @@ -148,8 +148,7 @@ void nfs_service_impl::internal_read_callback(error_code err, size_t sz, callbac } if (err != ERR_OK) { - LOG_ERROR( - "{nfs_service} read file %s failed, err = %s", cp.file_path.c_str(), err.to_string()); + LOG_ERROR_F("[nfs_service] read file {} failed, err = {}", cp.file_path, err); _recent_copy_fail_count->increment(); } else { _recent_copy_data_size->add(sz); @@ -176,11 +175,11 @@ void nfs_service_impl::on_get_file_size( if (request.file_list.size() == 0) // return all file size in the destination file folder { if (!dsn::utils::filesystem::directory_exists(folder)) { - LOG_ERROR("{nfs_service} directory %s not exist", folder.c_str()); + LOG_ERROR_F("[nfs_service] directory {} not exist", folder); err = ERR_OBJECT_NOT_FOUND; } else { if (!dsn::utils::filesystem::get_subfiles(folder, file_list, true)) { - LOG_ERROR("{nfs_service} get subfiles of directory %s failed", folder.c_str()); + LOG_ERROR_F("[nfs_service] get subfiles of directory {} failed", folder); err = ERR_FILE_OPERATION_FAILED; } else { for (auto &fpath : file_list) { @@ -188,7 +187,7 @@ void nfs_service_impl::on_get_file_size( // Done int64_t sz; if (!dsn::utils::filesystem::file_size(fpath, sz)) { - LOG_ERROR("{nfs_service} get size of file %s failed", fpath.c_str()); + LOG_ERROR_F("[nfs_service] get size of file {} failed", fpath); err = ERR_FILE_OPERATION_FAILED; break; } @@ -208,9 +207,9 @@ void nfs_service_impl::on_get_file_size( struct stat st; if (0 != ::stat(file_path.c_str(), &st)) { - LOG_ERROR("{nfs_service} get stat of file %s failed, err = %s", - file_path.c_str(), - strerror(errno)); + LOG_ERROR_F("[nfs_service] get stat of file {} failed, err = {}", + file_path, + strerror(errno)); err = ERR_OBJECT_NOT_FOUND; break; } diff --git a/src/redis_protocol/proxy_lib/proxy_layer.cpp b/src/redis_protocol/proxy_lib/proxy_layer.cpp index e6db673d8c..05d52cc3cc 100644 --- a/src/redis_protocol/proxy_lib/proxy_layer.cpp +++ b/src/redis_protocol/proxy_lib/proxy_layer.cpp @@ -130,11 +130,11 @@ void proxy_session::on_recv_request(dsn::message_ex *msg) // 2. as "on_recv_request" won't be called concurrently, it's not necessary to call // "parse" with a lock. a subclass may implement a lock inside parse if necessary if (!parse(msg)) { - LOG_ERROR("%s: got invalid message, try to remove proxy session from proxy stub", - _remote_address.to_string()); + LOG_ERROR_F("{}: got invalid message, try to remove proxy session from proxy stub", + _remote_address); _stub->remove_session(_remote_address); - LOG_ERROR("close the rpc session %s", _remote_address.to_string()); + LOG_ERROR_F("close the rpc session {}", _remote_address); ((dsn::message_ex *)_backup_one_request)->io_session->close(); } } diff --git a/src/reporter/pegasus_counter_reporter.cpp b/src/reporter/pegasus_counter_reporter.cpp index 2ab57f8eee..c33e49fd4a 100644 --- a/src/reporter/pegasus_counter_reporter.cpp +++ b/src/reporter/pegasus_counter_reporter.cpp @@ -325,9 +325,9 @@ void pegasus_counter_reporter::http_request_done(struct evhttp_request *req, voi { struct event_base *event = (struct event_base *)arg; if (req == nullptr) { - LOG_ERROR("http post request failed: unknown reason"); + LOG_ERROR_F("http post request failed: unknown reason"); } else if (req->response_code == 0) { - LOG_ERROR("http post request failed: connection refused"); + LOG_ERROR_F("http post request failed: connection refused"); } else if (req->response_code == HTTP_OK) { LOG_DEBUG_F("http post request succeed"); } else { @@ -336,10 +336,10 @@ void pegasus_counter_reporter::http_request_done(struct evhttp_request *req, voi char *tmp = (char *)alloca(len + 1); memcpy(tmp, evbuffer_pullup(buf, -1), len); tmp[len] = '\0'; - LOG_ERROR("http post request failed: code = %u, code_line = %s, input_buffer = %s", - req->response_code, - req->response_code_line, - tmp); + LOG_ERROR_F("http post request failed: code = {}, code_line = {}, input_buffer = {}", + req->response_code, + req->response_code_line, + tmp); } event_base_loopexit(event, 0); } diff --git a/src/runtime/env.sim.cpp b/src/runtime/env.sim.cpp index 660e2b9f15..373b94aeb6 100644 --- a/src/runtime/env.sim.cpp +++ b/src/runtime/env.sim.cpp @@ -63,7 +63,7 @@ sim_env_provider::sim_env_provider(env_provider *inner_provider) : env_provider( _seed = std::random_device{}(); } - LOG_ERROR("simulation.random seed for this round is %d", _seed); + LOG_ERROR_F("simulation.random seed for this round is {}", _seed); } } // namespace tools diff --git a/src/runtime/fault_injector.cpp b/src/runtime/fault_injector.cpp index e53da59801..9c441bc03d 100644 --- a/src/runtime/fault_injector.cpp +++ b/src/runtime/fault_injector.cpp @@ -221,7 +221,7 @@ static void corrupt_data(message_ex *request, const std::string &corrupt_type) replace_value(request->buffers, rand::next_u32(0, request->body_size() + sizeof(message_header) - 1)); else { - LOG_ERROR("try to inject an unknown data corrupt type: %s", corrupt_type.c_str()); + LOG_ERROR_F("try to inject an unknown data corrupt type: {}", corrupt_type); } } diff --git a/src/runtime/rpc/asio_net_provider.cpp b/src/runtime/rpc/asio_net_provider.cpp index ff6cd6517e..23f6679f09 100644 --- a/src/runtime/rpc/asio_net_provider.cpp +++ b/src/runtime/rpc/asio_net_provider.cpp @@ -107,23 +107,23 @@ error_code asio_network_provider::start(rpc_channel channel, int port, bool clie _acceptor.reset(new boost::asio::ip::tcp::acceptor(get_io_service())); _acceptor->open(endpoint.protocol(), ec); if (ec) { - LOG_ERROR("asio tcp acceptor open failed, error = %s", ec.message().c_str()); + LOG_ERROR_F("asio tcp acceptor open failed, error = {}", ec.message()); _acceptor.reset(); return ERR_NETWORK_INIT_FAILED; } _acceptor->set_option(boost::asio::socket_base::reuse_address(true)); _acceptor->bind(endpoint, ec); if (ec) { - LOG_ERROR("asio tcp acceptor bind failed, error = %s", ec.message().c_str()); + LOG_ERROR_F("asio tcp acceptor bind failed, error = {}", ec.message()); _acceptor.reset(); return ERR_NETWORK_INIT_FAILED; } int backlog = boost::asio::socket_base::max_connections; _acceptor->listen(backlog, ec); if (ec) { - LOG_ERROR("asio tcp acceptor listen failed, port = %u, error = %s", - _address.port(), - ec.message().c_str()); + LOG_ERROR_F("asio tcp acceptor listen failed, port = {}, error = {}", + _address.port(), + ec.message()); _acceptor.reset(); return ERR_NETWORK_INIT_FAILED; } @@ -148,7 +148,7 @@ void asio_network_provider::do_accept() if (!ec) { auto remote = socket->remote_endpoint(ec); if (ec) { - LOG_ERROR("failed to get the remote endpoint: %s", ec.message().data()); + LOG_ERROR_F("failed to get the remote endpoint: {}", ec.message().data()); } else { auto ip = remote.address().to_v4().to_ulong(); auto port = remote.port(); @@ -274,23 +274,22 @@ void asio_udp_provider::do_receive() [this, send_endpoint](const boost::system::error_code &error, std::size_t bytes_transferred) { if (!!error) { - LOG_ERROR( - "%s: asio udp read failed: %s", _address.to_string(), error.message().c_str()); + LOG_ERROR_F("{}: asio udp read failed: {}", _address, error.message()); do_receive(); return; } if (bytes_transferred < sizeof(uint32_t)) { - LOG_ERROR("%s: asio udp read failed: too short message", _address.to_string()); + LOG_ERROR_F("{}: asio udp read failed: too short message", _address); do_receive(); return; } auto hdr_format = message_parser::get_header_type(_recv_reader._buffer.data()); if (NET_HDR_INVALID == hdr_format) { - LOG_ERROR("%s: asio udp read failed: invalid header type '%s'", - _address.to_string(), - message_parser::get_debug_string(_recv_reader._buffer.data()).c_str()); + LOG_ERROR_F("{}: asio udp read failed: invalid header type '{}'", + _address, + message_parser::get_debug_string(_recv_reader._buffer.data())); do_receive(); return; } @@ -304,7 +303,7 @@ void asio_udp_provider::do_receive() message_ex *msg = parser->get_message_on_receive(&_recv_reader, read_next); if (msg == nullptr) { - LOG_ERROR("%s: asio udp read failed: invalid udp packet", _address.to_string()); + LOG_ERROR_F("{}: asio udp read failed: invalid udp packet", _address); do_receive(); return; } @@ -346,15 +345,15 @@ error_code asio_udp_provider::start(rpc_channel channel, int port, bool client_o _socket.reset(new ::boost::asio::ip::udp::socket(_io_service)); _socket->open(endpoint.protocol(), ec); if (ec) { - LOG_ERROR("asio udp socket open failed, error = %s", ec.message().c_str()); + LOG_ERROR_F("asio udp socket open failed, error = {}", ec.message()); _socket.reset(); continue; } _socket->bind(endpoint, ec); if (ec) { - LOG_ERROR("asio udp socket bind failed, port = %u, error = %s", - _address.port(), - ec.message().c_str()); + LOG_ERROR_F("asio udp socket bind failed, port = {}, error = {}", + _address.port(), + ec.message()); _socket.reset(); continue; } @@ -368,15 +367,15 @@ error_code asio_udp_provider::start(rpc_channel channel, int port, bool client_o _socket.reset(new ::boost::asio::ip::udp::socket(_io_service)); _socket->open(endpoint.protocol(), ec); if (ec) { - LOG_ERROR("asio udp socket open failed, error = %s", ec.message().c_str()); + LOG_ERROR_F("asio udp socket open failed, error = {}", ec.message()); _socket.reset(); return ERR_NETWORK_INIT_FAILED; } _socket->bind(endpoint, ec); if (ec) { - LOG_ERROR("asio udp socket bind failed, port = %u, error = %s", - _address.port(), - ec.message().c_str()); + LOG_ERROR_F("asio udp socket bind failed, port = {}, error = {}", + _address.port(), + ec.message()); _socket.reset(); return ERR_NETWORK_INIT_FAILED; } diff --git a/src/runtime/rpc/asio_rpc_session.cpp b/src/runtime/rpc/asio_rpc_session.cpp index 6fe8862440..0bb31176b4 100644 --- a/src/runtime/rpc/asio_rpc_session.cpp +++ b/src/runtime/rpc/asio_rpc_session.cpp @@ -91,9 +91,7 @@ void asio_rpc_session::do_read(int read_next) if (ec == boost::asio::error::make_error_code(boost::asio::error::eof)) { LOG_INFO_F("asio read from {} failed: {}", _remote_addr, ec.message()); } else { - LOG_ERROR("asio read from %s failed: %s", - _remote_addr.to_string(), - ec.message().c_str()); + LOG_ERROR_F("asio read from {} failed: {}", _remote_addr, ec.message()); } on_failure(); } else { @@ -115,7 +113,7 @@ void asio_rpc_session::do_read(int read_next) } if (read_next == -1) { - LOG_ERROR("asio read from %s failed", _remote_addr.to_string()); + LOG_ERROR_F("asio read from {} failed", _remote_addr); on_failure(); } else { start_read_next(read_next); @@ -142,8 +140,7 @@ void asio_rpc_session::send(uint64_t signature) boost::asio::async_write( *_socket, asio_wbufs, [this, signature](boost::system::error_code ec, std::size_t length) { if (ec) { - LOG_ERROR( - "asio write to %s failed: %s", _remote_addr.to_string(), ec.message().c_str()); + LOG_ERROR_F("asio write to {} failed: {}", _remote_addr, ec.message()); on_failure(true); } else { on_send_completed(signature); @@ -191,9 +188,8 @@ void asio_rpc_session::connect() on_send_completed(); start_read_next(); } else { - LOG_ERROR("client session connect to %s failed, error = %s", - _remote_addr.to_string(), - ec.message().c_str()); + LOG_ERROR_F( + "client session connect to {} failed, error = {}", _remote_addr, ec.message()); on_failure(true); } release_ref(); diff --git a/src/runtime/rpc/dsn_message_parser.cpp b/src/runtime/rpc/dsn_message_parser.cpp index 2c665b561a..c22c712e67 100644 --- a/src/runtime/rpc/dsn_message_parser.cpp +++ b/src/runtime/rpc/dsn_message_parser.cpp @@ -55,7 +55,7 @@ message_ex *dsn_message_parser::get_message_on_receive(message_reader *reader, if (buf_len >= sizeof(message_header)) { if (!_header_checked) { if (!is_right_header(buf_ptr)) { - LOG_ERROR("dsn message header check failed"); + LOG_ERROR_F("dsn message header check failed"); read_next = -1; return nullptr; } else { @@ -71,12 +71,12 @@ message_ex *dsn_message_parser::get_message_on_receive(message_reader *reader, message_ex *msg = message_ex::create_receive_message(msg_bb); if (!is_right_body(msg)) { message_header *header = (message_header *)buf_ptr; - LOG_ERROR("dsn message body check failed, id = %" PRIu64 ", trace_id = %016" PRIx64 - ", rpc_name = %s, from_addr = %s", - header->id, - header->trace_id, - header->rpc_name, - header->from_address.to_string()); + LOG_ERROR_F("dsn message body check failed, id = {}, trace_id = {:#018x}, rpc_name " + "= {}, from_addr = {}", + header->id, + header->trace_id, + header->rpc_name, + header->from_address); read_next = -1; delete msg; return nullptr; @@ -169,7 +169,7 @@ int dsn_message_parser::get_buffers_on_send(message_ex *msg, /*out*/ send_buf *b bool r = (crc32 == dsn::utils::crc32_calc(hdr, sizeof(message_header), 0)); *pcrc = crc32; if (!r) { - LOG_ERROR("dsn message header crc check failed"); + LOG_ERROR_F("dsn message header crc check failed"); } return r; } @@ -202,7 +202,7 @@ int dsn_message_parser::get_buffers_on_send(message_ex *msg, /*out*/ send_buf *b CHECK_EQ(len, header->body_length); bool r = (header->body_crc32 == crc32); if (!r) { - LOG_ERROR("dsn message body crc check failed"); + LOG_ERROR_F("dsn message body crc check failed"); } return r; } diff --git a/src/runtime/rpc/network.cpp b/src/runtime/rpc/network.cpp index 09cfff05b8..4dcb951be5 100644 --- a/src/runtime/rpc/network.cpp +++ b/src/runtime/rpc/network.cpp @@ -233,9 +233,9 @@ int rpc_session::prepare_parser() hdr_format = _net.unknown_msg_hdr_format(); if (hdr_format == NET_HDR_INVALID) { - LOG_ERROR("invalid header type, remote_client = %s, header_type = '%s'", - _remote_addr.to_string(), - message_parser::get_debug_string(_reader._buffer.data()).c_str()); + LOG_ERROR_F("invalid header type, remote_client = {}, header_type = '{}'", + _remote_addr, + message_parser::get_debug_string(_reader._buffer.data())); return -1; } } @@ -418,8 +418,8 @@ bool rpc_session::on_recv_message(message_ex *msg, int delay_ms) // - the remote address is not listened, which means the remote port is not occupied // - operating system chooses the remote port as client's ephemeral port if (is_client() && msg->header->from_address == _net.engine()->primary_address()) { - LOG_ERROR("self connection detected, address = %s", - msg->header->from_address.to_string()); + LOG_ERROR_F("self connection detected, address = {}", + msg->header->from_address.to_string()); CHECK_EQ_MSG(msg->get_count(), 0, "message should not be referenced by anybody so far"); delete msg; return false; diff --git a/src/runtime/rpc/network.sim.cpp b/src/runtime/rpc/network.sim.cpp index 01c1525d1f..eb591a8c04 100644 --- a/src/runtime/rpc/network.sim.cpp +++ b/src/runtime/rpc/network.sim.cpp @@ -91,7 +91,7 @@ void sim_client_session::send(uint64_t sig) sim_network_provider *rnet = nullptr; if (!s_switch[task_spec::get(msg->local_rpc_code)->rpc_call_channel][msg->hdr_format].get( remote_address(), rnet)) { - LOG_ERROR("cannot find destination node %s in simulator", remote_address().to_string()); + LOG_ERROR_F("cannot find destination node {} in simulator", remote_address()); // on_disconnected(); // disable this to avoid endless resending } else { auto server_session = rnet->get_server_session(_net.address()); diff --git a/src/runtime/rpc/thrift_message_parser.cpp b/src/runtime/rpc/thrift_message_parser.cpp index dd24ab6e37..eac3026fa4 100644 --- a/src/runtime/rpc/thrift_message_parser.cpp +++ b/src/runtime/rpc/thrift_message_parser.cpp @@ -114,7 +114,7 @@ static message_ex *create_message_from_request_blob(const blob &body_data) dsn_hdr->context.u.is_request = 1; } if (dsn_hdr->context.u.is_request != 1) { - LOG_ERROR("invalid message type: %d", mtype); + LOG_ERROR_F("invalid message type: {}", mtype); delete msg; /// set set rpc_read_stream::_msg to nullptr, /// to avoid the dstor to call read_commit of _msg, which is deleted here. @@ -151,7 +151,7 @@ bool thrift_message_parser::parse_request_header(message_reader *reader, int &re // The first 4 bytes is "THFT" data_input input(buf); if (!utils::mequals(buf.data(), "THFT", 4)) { - LOG_ERROR("hdr_type mismatch %s", message_parser::get_debug_string(buf.data()).c_str()); + LOG_ERROR_F("hdr_type mismatch {}", message_parser::get_debug_string(buf.data())); read_next = -1; return false; } @@ -167,7 +167,7 @@ bool thrift_message_parser::parse_request_header(message_reader *reader, int &re uint32_t hdr_length = input.read_u32(); if (hdr_length != HEADER_LENGTH_V0) { - LOG_ERROR("hdr_length should be %u, but %u", HEADER_LENGTH_V0, hdr_length); + LOG_ERROR_F("hdr_length should be {}, but {}", HEADER_LENGTH_V0, hdr_length); read_next = -1; return false; } @@ -184,7 +184,7 @@ bool thrift_message_parser::parse_request_header(message_reader *reader, int &re _v1_specific_vars->_body_length = input.read_u32(); reader->consume_buffer(HEADER_LENGTH_V1); } else { - LOG_ERROR("invalid hdr_version %d", _header_version); + LOG_ERROR_F("invalid hdr_version {}", _header_version); read_next = -1; return false; } diff --git a/src/runtime/service_api_c.cpp b/src/runtime/service_api_c.cpp index fe4a91d696..53547e6db3 100644 --- a/src/runtime/service_api_c.cpp +++ b/src/runtime/service_api_c.cpp @@ -197,7 +197,7 @@ bool dsn_mimic_app(const char *app_role, int index) if (cnode->spec().role_name == std::string(app_role) && cnode->spec().index == index) { return true; } else { - LOG_ERROR("current thread is already attached to another rDSN app %s", name.c_str()); + LOG_ERROR_F("current thread is already attached to another rDSN app {}", name); return false; } } @@ -211,7 +211,7 @@ bool dsn_mimic_app(const char *app_role, int index) } } - LOG_ERROR("cannot find host app %s with index %d", app_role, index); + LOG_ERROR_F("cannot find host app {} with index {}", app_role, index); return false; } diff --git a/src/runtime/simulator.cpp b/src/runtime/simulator.cpp index abc5bfd127..3c1a8c5e4b 100644 --- a/src/runtime/simulator.cpp +++ b/src/runtime/simulator.cpp @@ -121,8 +121,8 @@ void simulator::install(service_spec &spec) void simulator::on_system_exit(sys_exit_type st) { - LOG_ERROR("system exits, you can replay this process using random seed %d", - sim_env_provider::seed()); + LOG_ERROR_F("system exits, you can replay this process using random seed {}", + sim_env_provider::seed()); } void simulator::run() diff --git a/src/runtime/task/task_spec.cpp b/src/runtime/task/task_spec.cpp index 06846e8ef8..f2d4939bc9 100644 --- a/src/runtime/task/task_spec.cpp +++ b/src/runtime/task/task_spec.cpp @@ -223,15 +223,15 @@ bool task_spec::init() if (spec->rpc_request_throttling_mode != TM_NONE) { if (spec->type != TASK_TYPE_RPC_REQUEST) { - LOG_ERROR("%s: only rpc request type can have non TM_NONE throttling_mode", - spec->name.c_str()); + LOG_ERROR_F("{}: only rpc request type can have non TM_NONE throttling_mode", + spec->name); return false; } } if (spec->rpc_call_channel == RPC_CHANNEL_UDP && !dsn::tools::FLAGS_enable_udp) { - LOG_ERROR("task rpc_call_channel RPC_CHANNEL_UCP need udp service, make sure " - "[network].enable_udp"); + LOG_ERROR_F("task rpc_call_channel RPC_CHANNEL_UCP need udp service, make sure " + "[network].enable_udp"); return false; } } diff --git a/src/runtime/task/task_worker.cpp b/src/runtime/task/task_worker.cpp index f19da169ec..d1478afbbf 100644 --- a/src/runtime/task/task_worker.cpp +++ b/src/runtime/task/task_worker.cpp @@ -188,8 +188,8 @@ void task_worker::run_internal() } else { uint64_t current_mask = pool_spec().worker_affinity_mask; if (0 == current_mask) { - LOG_ERROR("mask for %s is set to 0x0, mostly due to that #core > 64, set to 64 now", - pool_spec().name.c_str()); + LOG_ERROR_F("mask for {} is set to 0x0, mostly due to that #core > 64, set to 64 now", + pool_spec().name); current_mask = ~((uint64_t)0); } diff --git a/src/runtime/test/test_utils.h b/src/runtime/test/test_utils.h index 84280b5d05..962d982147 100644 --- a/src/runtime/test/test_utils.h +++ b/src/runtime/test/test_utils.h @@ -117,7 +117,7 @@ class test_client : public ::dsn::serverlet, public ::dsn::service_ } else if (command.substr(0, 5) == "echo ") { reply(message, command.substr(5)); } else { - LOG_ERROR("unknown command"); + LOG_ERROR_F("unknown command"); } } diff --git a/src/server/available_detector.cpp b/src/server/available_detector.cpp index 68297524f3..3e2b475d87 100644 --- a/src/server/available_detector.cpp +++ b/src/server/available_detector.cpp @@ -156,7 +156,7 @@ void available_detector::stop() { _tracker.cancel_outstanding_tasks(); } void available_detector::detect_available() { if (!generate_hash_keys()) { - LOG_ERROR("initialize hash_keys failed, do not detect available, retry after 60 seconds"); + LOG_ERROR_F("initialize hash_keys failed, do not detect available, retry after 60 seconds"); _detect_timer = ::dsn::tasking::enqueue(LPC_DETECT_AVAILABLE, &_tracker, @@ -305,11 +305,11 @@ void available_detector::on_detect(int32_t idx) _recent_day_fail_times.fetch_add(1); _recent_hour_fail_times.fetch_add(1); _recent_minute_fail_times.fetch_add(1); - LOG_ERROR("async_get partition[%d] fail, fail_count = %d, hash_key = %s, error = %s", - idx, - prev + 1, - _hash_keys[idx].c_str(), - _client->get_error_string(err)); + LOG_ERROR_F("async_get partition[{}] fail, fail_count = {}, hash_key = {}, error = {}", + idx, + prev + 1, + _hash_keys[idx], + _client->get_error_string(err)); check_and_send_email(&cnt, idx); } else { cnt.store(0); @@ -331,11 +331,11 @@ void available_detector::on_detect(int32_t idx) _recent_day_fail_times.fetch_add(1); _recent_hour_fail_times.fetch_add(1); _recent_minute_fail_times.fetch_add(1); - LOG_ERROR("async_set partition[%d] fail, fail_count = %d, hash_key = %s , error = %s", - idx, - prev + 1, - _hash_keys[idx].c_str(), - _client->get_error_string(err)); + LOG_ERROR_F("async_set partition[{}] fail, fail_count = {}, hash_key = {}, error = {}", + idx, + prev + 1, + _hash_keys[idx], + _client->get_error_string(err)); check_and_send_email(&cnt, idx); } else { LOG_DEBUG_F("async_set partition[{}] ok, hash_key = {}", idx, _hash_keys[idx]); @@ -371,10 +371,8 @@ void available_detector::check_and_send_email(std::atomic *cnt, int32_t idx if (r == 0) { LOG_INFO_F("send alert email done, partition_index = {}", idx); } else { - LOG_ERROR("send alert email failed, partition_index = %d, " - "command_return = %d", - idx, - r); + LOG_ERROR_F( + "send alert email failed, partition_index = {}, command_return = {}", idx, r); } } } @@ -419,12 +417,12 @@ void available_detector::on_day_report() detect_times, fail_times); } else { - LOG_ERROR("send availability email fail, date = %s, " - "total_detect_times = %u, total_fail_times = %u, command_return = %d", - _old_day.c_str(), - detect_times, - fail_times, - r); + LOG_ERROR_F("send availability email fail, date = {}, total_detect_times = {}, " + "total_fail_times = {}, command_return = {}", + _old_day, + detect_times, + fail_times, + r); } } diff --git a/src/server/info_collector.cpp b/src/server/info_collector.cpp index 1cc830284d..47ef46ebb3 100644 --- a/src/server/info_collector.cpp +++ b/src/server/info_collector.cpp @@ -143,7 +143,7 @@ void info_collector::on_app_stat() LOG_INFO_F("start to stat apps"); std::map> all_rows; if (!get_app_partition_stat(_shell_context.get(), all_rows)) { - LOG_ERROR("call get_app_stat() failed"); + LOG_ERROR_F("call get_app_stat() failed"); return; } @@ -271,7 +271,8 @@ void info_collector::on_capacity_unit_stat(int remaining_retry_count) 0, std::chrono::seconds(_capacity_unit_retry_wait_seconds)); } else { - LOG_ERROR("get capacity unit stat failed, remaining_retry_count = 0, no retry anymore"); + LOG_ERROR_F( + "get capacity unit stat failed, remaining_retry_count = 0, no retry anymore"); } return; } @@ -318,7 +319,8 @@ void info_collector::on_storage_size_stat(int remaining_retry_count) 0, std::chrono::seconds(_storage_size_retry_wait_seconds)); } else { - LOG_ERROR("get storage size stat failed, remaining_retry_count = 0, no retry anymore"); + LOG_ERROR_F( + "get storage size stat failed, remaining_retry_count = 0, no retry anymore"); } return; } diff --git a/src/server/pegasus_server_impl.cpp b/src/server/pegasus_server_impl.cpp index 5dc29b4755..d4465b5b40 100644 --- a/src/server/pegasus_server_impl.cpp +++ b/src/server/pegasus_server_impl.cpp @@ -99,8 +99,7 @@ void pegasus_server_impl::parse_checkpoints() LOG_INFO_PREFIX("invalid checkpoint directory {}, remove it", d); ::dsn::utils::filesystem::remove_path(d); if (!::dsn::utils::filesystem::remove_path(d)) { - LOG_ERROR( - "%s: remove invalid checkpoint directory %s failed", replica_name(), d.c_str()); + LOG_ERROR_PREFIX("remove invalid checkpoint directory {} failed", d); } } } @@ -207,9 +206,8 @@ void pegasus_server_impl::gc_checkpoints(bool force_reserve_one) if (::dsn::utils::filesystem::remove_path(cpt_dir)) { LOG_INFO_PREFIX("checkpoint directory {} removed by garbage collection", cpt_dir); } else { - LOG_ERROR("%s: checkpoint directory %s remove failed by garbage collection", - replica_name(), - cpt_dir.c_str()); + LOG_ERROR_PREFIX("checkpoint directory {} remove failed by garbage collection", + cpt_dir); put_back_list.push_back(del_d); } } else { @@ -287,9 +285,7 @@ void pegasus_server_impl::on_get(get_rpc rpc) if (check_if_record_expired(utils::epoch_now(), value)) { _pfc_recent_expire_count->increment(); if (_verbose_log) { - LOG_ERROR("%s: rocksdb data expired for get from %s", - replica_name(), - rpc.remote_address().to_string()); + LOG_ERROR_PREFIX("rocksdb data expired for get from {}", rpc.remote_address()); } status = rocksdb::Status::NotFound(); } @@ -299,18 +295,16 @@ void pegasus_server_impl::on_get(get_rpc rpc) if (_verbose_log) { ::dsn::blob hash_key, sort_key; pegasus_restore_key(key, hash_key, sort_key); - LOG_ERROR("%s: rocksdb get failed for get from %s: " - "hash_key = \"%s\", sort_key = \"%s\", error = %s", - replica_name(), - rpc.remote_address().to_string(), - ::pegasus::utils::c_escape_string(hash_key).c_str(), - ::pegasus::utils::c_escape_string(sort_key).c_str(), - status.ToString().c_str()); + LOG_ERROR_PREFIX("rocksdb get failed for get from {}: hash_key = \"{}\", sort_key = " + "\"{}\", error = {}", + rpc.remote_address(), + ::pegasus::utils::c_escape_string(hash_key), + ::pegasus::utils::c_escape_string(sort_key), + status.ToString()); } else if (!status.IsNotFound()) { - LOG_ERROR("%s: rocksdb get failed for get from %s: error = %s", - replica_name(), - rpc.remote_address().to_string(), - status.ToString().c_str()); + LOG_ERROR_PREFIX("rocksdb get failed for get from {}: error = {}", + rpc.remote_address(), + status.ToString()); } } @@ -365,11 +359,9 @@ void pegasus_server_impl::on_multi_get(multi_get_rpc rpc) } if (!is_filter_type_supported(request.sort_key_filter_type)) { - LOG_ERROR("%s: invalid argument for multi_get from %s: " - "sort key filter type %d not supported", - replica_name(), - rpc.remote_address().to_string(), - request.sort_key_filter_type); + LOG_ERROR_F("invalid argument for multi_get from {}: sort key filter type {} not supported", + rpc.remote_address(), + request.sort_key_filter_type); resp.error = rocksdb::Status::kInvalidArgument; _cu_calculator->add_multi_get_cu(req, resp.error, request.hash_key, resp.kvs); _pfc_multi_get_latency->set(dsn_now_ns() - start_time); @@ -618,20 +610,18 @@ void pegasus_server_impl::on_multi_get(multi_get_rpc rpc) if (!it->status().ok()) { // error occur if (_verbose_log) { - LOG_ERROR("%s: rocksdb scan failed for multi_get from %s: " - "hash_key = \"%s\", reverse = %s, error = %s", - replica_name(), - rpc.remote_address().to_string(), - ::pegasus::utils::c_escape_string(request.hash_key).c_str(), - request.reverse ? "true" : "false", - it->status().ToString().c_str()); + LOG_ERROR_PREFIX("rocksdb scan failed for multi_get from {}: hash_key = \"{}\", " + "reverse = {}, error = {}", + rpc.remote_address(), + ::pegasus::utils::c_escape_string(request.hash_key), + request.reverse ? "true" : "false", + it->status().ToString()); } else { - LOG_ERROR("%s: rocksdb scan failed for multi_get from %s: " - "reverse = %s, error = %s", - replica_name(), - rpc.remote_address().to_string(), - request.reverse ? "true" : "false", - it->status().ToString().c_str()); + LOG_ERROR_PREFIX( + "rocksdb scan failed for multi_get from {}: reverse = {}, error = {}", + rpc.remote_address(), + request.reverse ? "true" : "false", + it->status().ToString()); } resp.kvs.clear(); } else if (it->Valid() && !complete) { @@ -668,18 +658,16 @@ void pegasus_server_impl::on_multi_get(multi_get_rpc rpc) // print log if (!status.ok()) { if (_verbose_log) { - LOG_ERROR("%s: rocksdb get failed for multi_get from %s: " - "hash_key = \"%s\", sort_key = \"%s\", error = %s", - replica_name(), - rpc.remote_address().to_string(), - ::pegasus::utils::c_escape_string(request.hash_key).c_str(), - ::pegasus::utils::c_escape_string(request.sort_keys[i]).c_str(), - status.ToString().c_str()); + LOG_ERROR_PREFIX("rocksdb get failed for multi_get from {}: hash_key = \"{}\", " + "sort_key = \"{}\", error = {}", + rpc.remote_address(), + ::pegasus::utils::c_escape_string(request.hash_key), + ::pegasus::utils::c_escape_string(request.sort_keys[i]), + status.ToString()); } else if (!status.IsNotFound()) { - LOG_ERROR("%s: rocksdb get failed for multi_get from %s: error = %s", - replica_name(), - rpc.remote_address().to_string(), - status.ToString().c_str()); + LOG_ERROR_PREFIX("rocksdb get failed for multi_get from {}: error = {}", + rpc.remote_address(), + status.ToString()); } } // check ttl @@ -688,9 +676,8 @@ void pegasus_server_impl::on_multi_get(multi_get_rpc rpc) if (expire_ts > 0 && expire_ts <= epoch_now) { expire_count++; if (_verbose_log) { - LOG_ERROR("%s: rocksdb data expired for multi_get from %s", - replica_name(), - rpc.remote_address().to_string()); + LOG_ERROR_PREFIX("rocksdb data expired for multi_get from {}", + rpc.remote_address()); } status = rocksdb::Status::NotFound(); } @@ -933,9 +920,8 @@ void pegasus_server_impl::on_sortkey_count(sortkey_count_rpc rpc) if (check_if_record_expired(epoch_now, it->value())) { expire_count++; if (_verbose_log) { - LOG_ERROR("%s: rocksdb data expired for sortkey_count from %s", - replica_name(), - rpc.remote_address().to_string()); + LOG_ERROR_PREFIX("rocksdb data expired for sortkey_count from {}", + rpc.remote_address()); } } else { resp.count++; @@ -950,17 +936,15 @@ void pegasus_server_impl::on_sortkey_count(sortkey_count_rpc rpc) if (!it->status().ok()) { // error occur if (_verbose_log) { - LOG_ERROR("%s: rocksdb scan failed for sortkey_count from %s: " - "hash_key = \"%s\", error = %s", - replica_name(), - rpc.remote_address().to_string(), - ::pegasus::utils::c_escape_string(hash_key).c_str(), - it->status().ToString().c_str()); + LOG_ERROR_PREFIX( + "rocksdb scan failed for sortkey_count from {}: hash_key = \"{}\", error = {}", + rpc.remote_address(), + ::pegasus::utils::c_escape_string(hash_key), + it->status().ToString()); } else { - LOG_ERROR("%s: rocksdb scan failed for sortkey_count from %s: error = %s", - replica_name(), - rpc.remote_address().to_string(), - it->status().ToString().c_str()); + LOG_ERROR_PREFIX("rocksdb scan failed for sortkey_count from {}: error = {}", + rpc.remote_address(), + it->status().ToString()); } resp.count = 0; } else if (limiter->exceed_limit()) { @@ -1002,9 +986,7 @@ void pegasus_server_impl::on_ttl(ttl_rpc rpc) if (check_if_ts_expired(now_ts, expire_ts)) { _pfc_recent_expire_count->increment(); if (_verbose_log) { - LOG_ERROR("%s: rocksdb data expired for ttl from %s", - replica_name(), - rpc.remote_address().to_string()); + LOG_ERROR_PREFIX("rocksdb data expired for ttl from {}", rpc.remote_address()); } status = rocksdb::Status::NotFound(); } @@ -1014,18 +996,16 @@ void pegasus_server_impl::on_ttl(ttl_rpc rpc) if (_verbose_log) { ::dsn::blob hash_key, sort_key; pegasus_restore_key(key, hash_key, sort_key); - LOG_ERROR("%s: rocksdb get failed for ttl from %s: " - "hash_key = \"%s\", sort_key = \"%s\", error = %s", - replica_name(), - rpc.remote_address().to_string(), - ::pegasus::utils::c_escape_string(hash_key).c_str(), - ::pegasus::utils::c_escape_string(sort_key).c_str(), - status.ToString().c_str()); + LOG_ERROR_PREFIX("rocksdb get failed for ttl from {}: hash_key = \"{}\", sort_key = " + "\"{}\", error = {}", + rpc.remote_address(), + ::pegasus::utils::c_escape_string(hash_key), + ::pegasus::utils::c_escape_string(sort_key), + status.ToString()); } else if (!status.IsNotFound()) { - LOG_ERROR("%s: rocksdb get failed for ttl from %s: error = %s", - replica_name(), - rpc.remote_address().to_string(), - status.ToString().c_str()); + LOG_ERROR_PREFIX("rocksdb get failed for ttl from {}: error = {}", + rpc.remote_address(), + status.ToString()); } } @@ -1062,11 +1042,10 @@ void pegasus_server_impl::on_get_scanner(get_scanner_rpc rpc) } if (!is_filter_type_supported(request.hash_key_filter_type)) { - LOG_ERROR("%s: invalid argument for get_scanner from %s: " - "hash key filter type %d not supported", - replica_name(), - rpc.remote_address().to_string(), - request.hash_key_filter_type); + LOG_ERROR_PREFIX( + "invalid argument for get_scanner from {}: hash key filter type {} not supported", + rpc.remote_address(), + request.hash_key_filter_type); resp.error = rocksdb::Status::kInvalidArgument; _cu_calculator->add_scan_cu(req, resp.error, resp.kvs); _pfc_scan_latency->set(dsn_now_ns() - start_time); @@ -1074,11 +1053,10 @@ void pegasus_server_impl::on_get_scanner(get_scanner_rpc rpc) return; } if (!is_filter_type_supported(request.sort_key_filter_type)) { - LOG_ERROR("%s: invalid argument for get_scanner from %s: " - "sort key filter type %d not supported", - replica_name(), - rpc.remote_address().to_string(), - request.sort_key_filter_type); + LOG_ERROR_PREFIX( + "invalid argument for get_scanner from {}: sort key filter type {} not supported", + rpc.remote_address(), + request.sort_key_filter_type); resp.error = rocksdb::Status::kInvalidArgument; _cu_calculator->add_scan_cu(req, resp.error, resp.kvs); _pfc_scan_latency->set(dsn_now_ns() - start_time); @@ -1234,23 +1212,21 @@ void pegasus_server_impl::on_get_scanner(get_scanner_rpc rpc) if (!it->status().ok()) { // error occur if (_verbose_log) { - LOG_ERROR("%s: rocksdb scan failed for get_scanner from %s: " - "start_key = \"%s\" (%s), stop_key = \"%s\" (%s), " - "batch_size = %d, read_count = %d, error = %s", - replica_name(), - rpc.remote_address().to_string(), - ::pegasus::utils::c_escape_string(start).c_str(), - request.start_inclusive ? "inclusive" : "exclusive", - ::pegasus::utils::c_escape_string(stop).c_str(), - request.stop_inclusive ? "inclusive" : "exclusive", - batch_count, - count, - it->status().ToString().c_str()); + LOG_ERROR_PREFIX("rocksdb scan failed for get_scanner from {}: start_key = \"{}\" " + "({}), stop_key = \"{}\" ({}), batch_size = {}, read_count = {}, " + "error = {}", + rpc.remote_address(), + ::pegasus::utils::c_escape_string(start), + request.start_inclusive ? "inclusive" : "exclusive", + ::pegasus::utils::c_escape_string(stop), + request.stop_inclusive ? "inclusive" : "exclusive", + batch_count, + count, + it->status().ToString()); } else { - LOG_ERROR("%s: rocksdb scan failed for get_scanner from %s: error = %s", - replica_name(), - rpc.remote_address().to_string(), - it->status().ToString().c_str()); + LOG_ERROR_PREFIX("rocksdb scan failed for get_scanner from {}: error = {}", + rpc.remote_address(), + it->status().ToString()); } resp.kvs.clear(); } else if (limiter->exceed_limit()) { @@ -1407,22 +1383,19 @@ void pegasus_server_impl::on_scan(scan_rpc rpc) if (!it->status().ok()) { // error occur if (_verbose_log) { - LOG_ERROR("%s: rocksdb scan failed for scan from %s: " - "context_id= %" PRId64 ", stop_key = \"%s\" (%s), " - "batch_size = %d, read_count = %d, error = %s", - replica_name(), - rpc.remote_address().to_string(), - request.context_id, - ::pegasus::utils::c_escape_string(stop).c_str(), - stop_inclusive ? "inclusive" : "exclusive", - batch_count, - count, - it->status().ToString().c_str()); + LOG_ERROR_PREFIX("rocksdb scan failed for scan from {}: context_id= {}, stop_key = " + "\"{}\" ({}), batch_size = {}, read_count = {}, error = {}", + rpc.remote_address(), + request.context_id, + ::pegasus::utils::c_escape_string(stop), + stop_inclusive ? "inclusive" : "exclusive", + batch_count, + count, + it->status().ToString()); } else { - LOG_ERROR("%s: rocksdb scan failed for scan from %s: error = %s", - replica_name(), - rpc.remote_address().to_string(), - it->status().ToString().c_str()); + LOG_ERROR_PREFIX("rocksdb scan failed for scan from {}: error = {}", + rpc.remote_address(), + it->status().ToString()); } resp.kvs.clear(); } else if (limiter->exceed_limit()) { @@ -1805,11 +1778,11 @@ ::dsn::error_code pegasus_server_impl::stop(bool clear_state) std::string chkpt_path = dsn::utils::filesystem::path_combine(data_dir(), chkpt_get_dir_name(*iter)); if (!dsn::utils::filesystem::remove_path(chkpt_path)) { - LOG_ERROR("%s: rmdir %s failed when stop app", replica_name(), chkpt_path.c_str()); + LOG_ERROR_PREFIX("rmdir {} failed when stop app", chkpt_path); } } if (!dsn::utils::filesystem::remove_path(data_dir())) { - LOG_ERROR("%s: rmdir %s failed when stop app", replica_name(), data_dir().c_str()); + LOG_ERROR_PREFIX("rmdir {} failed when stop app", data_dir()); return ::dsn::ERR_FILE_OPERATION_FAILED; } _pfc_rdb_sst_count->set(0); @@ -2129,14 +2102,14 @@ ::dsn::error_code pegasus_server_impl::get_checkpoint(int64_t learn_start, int64_t ci = last_durable_decree(); if (ci == 0) { - LOG_ERROR("%s: no checkpoint found", replica_name()); + LOG_ERROR_PREFIX("no checkpoint found"); return ::dsn::ERR_OBJECT_NOT_FOUND; } auto chkpt_dir = ::dsn::utils::filesystem::path_combine(data_dir(), chkpt_get_dir_name(ci)); state.files.clear(); if (!::dsn::utils::filesystem::get_subfiles(chkpt_dir, state.files, true)) { - LOG_ERROR("%s: list files in checkpoint dir %s failed", replica_name(), chkpt_dir.c_str()); + LOG_ERROR_PREFIX("list files in checkpoint dir {} failed", chkpt_dir); return ::dsn::ERR_FILE_OPERATION_FAILED; } @@ -2170,10 +2143,7 @@ pegasus_server_impl::storage_apply_checkpoint(chkpt_apply_mode mode, set_last_durable_decree(ci); err = ::dsn::ERR_OK; } else { - LOG_ERROR("%s: rename directory %s to %s failed", - replica_name(), - learn_dir.c_str(), - chkpt_dir.c_str()); + LOG_ERROR_PREFIX("rename directory {} to {} failed", learn_dir, chkpt_dir); err = ::dsn::ERR_FILE_OPERATION_FAILED; } @@ -2183,14 +2153,14 @@ pegasus_server_impl::storage_apply_checkpoint(chkpt_apply_mode mode, if (_is_open) { err = stop(true); if (err != ::dsn::ERR_OK) { - LOG_ERROR("%s: close rocksdb %s failed, error = %s", replica_name(), err.to_string()); + LOG_ERROR_PREFIX("close rocksdb failed, error = {}", err); return err; } } // clear data dir if (!::dsn::utils::filesystem::remove_path(data_dir())) { - LOG_ERROR("%s: clear data directory %s failed", replica_name(), data_dir().c_str()); + LOG_ERROR_PREFIX("clear data directory {} failed", data_dir()); return ::dsn::ERR_FILE_OPERATION_FAILED; } @@ -2198,7 +2168,7 @@ pegasus_server_impl::storage_apply_checkpoint(chkpt_apply_mode mode, if (state.files.size() > 0) { // create data dir if (!::dsn::utils::filesystem::create_directory(data_dir())) { - LOG_ERROR("%s: create data directory %s failed", replica_name(), data_dir().c_str()); + LOG_ERROR_PREFIX("create data directory {} failed", data_dir()); return ::dsn::ERR_FILE_OPERATION_FAILED; } @@ -2206,10 +2176,7 @@ pegasus_server_impl::storage_apply_checkpoint(chkpt_apply_mode mode, std::string learn_dir = ::dsn::utils::filesystem::remove_file_name(state.files[0]); std::string new_dir = ::dsn::utils::filesystem::path_combine(data_dir(), "rdb"); if (!::dsn::utils::filesystem::rename_path(learn_dir, new_dir)) { - LOG_ERROR("%s: rename directory %s to %s failed", - replica_name(), - learn_dir.c_str(), - new_dir.c_str()); + LOG_ERROR_PREFIX("rename directory {} to {} failed", learn_dir, new_dir); return ::dsn::ERR_FILE_OPERATION_FAILED; } @@ -2220,7 +2187,7 @@ pegasus_server_impl::storage_apply_checkpoint(chkpt_apply_mode mode, } if (err != ::dsn::ERR_OK) { - LOG_ERROR("%s: open rocksdb failed, error = %s", replica_name(), err.to_string()); + LOG_ERROR_PREFIX("open rocksdb failed, error = {}", err); return err; } @@ -2274,7 +2241,7 @@ range_iteration_state pegasus_server_impl::validate_key_value_for_scan( { if (check_if_record_expired(epoch_now, value)) { if (_verbose_log) { - LOG_ERROR("%s: rocksdb data expired for scan", replica_name()); + LOG_ERROR_PREFIX("rocksdb data expired for scan"); } return range_iteration_state::kExpired; } @@ -2298,14 +2265,14 @@ range_iteration_state pegasus_server_impl::validate_key_value_for_scan( if (hash_key_filter_type != ::dsn::apps::filter_type::FT_NO_FILTER && !validate_filter(hash_key_filter_type, hash_key_filter_pattern, hash_key)) { if (_verbose_log) { - LOG_ERROR("%s: hash key filtered for scan", replica_name()); + LOG_ERROR_PREFIX("hash key filtered for scan"); } return range_iteration_state::kFiltered; } if (sort_key_filter_type != ::dsn::apps::filter_type::FT_NO_FILTER && !validate_filter(sort_key_filter_type, sort_key_filter_pattern, sort_key)) { if (_verbose_log) { - LOG_ERROR("%s: sort key filtered for scan", replica_name()); + LOG_ERROR_PREFIX("sort key filtered for scan"); } return range_iteration_state::kFiltered; } @@ -2353,7 +2320,7 @@ range_iteration_state pegasus_server_impl::append_key_value_for_multi_get( { if (check_if_record_expired(epoch_now, value)) { if (_verbose_log) { - LOG_ERROR("%s: rocksdb data expired for multi get", replica_name()); + LOG_ERROR_PREFIX("rocksdb data expired for multi get"); } return range_iteration_state::kExpired; } @@ -2368,7 +2335,7 @@ range_iteration_state pegasus_server_impl::append_key_value_for_multi_get( if (sort_key_filter_type != ::dsn::apps::filter_type::FT_NO_FILTER && !validate_filter(sort_key_filter_type, sort_key_filter_pattern, sort_key)) { if (_verbose_log) { - LOG_ERROR("%s: sort key filtered for multi get", replica_name()); + LOG_ERROR_PREFIX("sort key filtered for multi get"); } return range_iteration_state::kFiltered; } @@ -2977,7 +2944,7 @@ bool pegasus_server_impl::set_usage_scenario(const std::string &usage_scenario) new_options["max_write_buffer_number"] = std::to_string(std::max(_data_cf_opts.max_write_buffer_number, 6)); } else { - LOG_ERROR("%s: invalid usage scenario: %s", replica_name(), usage_scenario.c_str()); + LOG_ERROR_PREFIX("invalid usage scenario: {}", usage_scenario); return false; } if (set_options(new_options)) { @@ -3122,10 +3089,7 @@ bool pegasus_server_impl::set_options( LOG_INFO_PREFIX("rocksdb set options returns {}: {}", status.ToString(), oss.str()); return true; } else { - LOG_ERROR("%s: rocksdb set options returns %s: {%s}", - replica_name(), - status.ToString().c_str(), - oss.str().c_str()); + LOG_ERROR_PREFIX("rocksdb set options returns {}: {}", status.ToString(), oss.str()); return false; } } diff --git a/src/server/result_writer.cpp b/src/server/result_writer.cpp index a88ce284ff..c3a666600a 100644 --- a/src/server/result_writer.cpp +++ b/src/server/result_writer.cpp @@ -55,13 +55,13 @@ void result_writer::set_result(const std::string &hash_key, 0, std::chrono::minutes(1)); } else { - LOG_ERROR("set_result fail, hash_key = %s, sort_key = %s, value = %s, " - "error = %s, left_try_count = %d, do not try again", - hash_key.c_str(), - sort_key.c_str(), - value.c_str(), - _client->get_error_string(err), - new_try_count); + LOG_ERROR_F("set_result fail, hash_key = {}, sort_key = {}, value = {}, error = " + "{}, left_try_count = {}, do not try again", + hash_key, + sort_key, + value, + _client->get_error_string(err), + new_try_count); } } else { LOG_DEBUG_F("set_result succeed, hash_key = {}, sort_key = {}, value = {}", diff --git a/src/shell/command_helper.h b/src/shell/command_helper.h index afbba849ff..d6985b4c92 100644 --- a/src/shell/command_helper.h +++ b/src/shell/command_helper.h @@ -992,11 +992,11 @@ inline bool get_apps_and_nodes(shell_context *sc, { dsn::error_code err = sc->ddl_client->list_apps(dsn::app_status::AS_AVAILABLE, apps); if (err != dsn::ERR_OK) { - LOG_ERROR("list apps failed, error = %s", err.to_string()); + LOG_ERROR_F("list apps failed, error = {}", err); return false; } if (!fill_nodes(sc, "replica-server", nodes)) { - LOG_ERROR("get replica server node list failed"); + LOG_ERROR_F("get replica server node list failed"); return false; } return true; @@ -1013,7 +1013,7 @@ get_app_partitions(shell_context *sc, dsn::error_code err = sc->ddl_client->list_app( app.app_name, app_id, partition_count, app_partitions[app.app_id]); if (err != ::dsn::ERR_OK) { - LOG_ERROR("list app %s failed, error = %s", app.app_name.c_str(), err.to_string()); + LOG_ERROR_F("list app {} failed, error = {}", app.app_name, err); return false; } CHECK_EQ(app_id, app.app_id); @@ -1027,20 +1027,19 @@ inline bool decode_node_perf_counter_info(const dsn::rpc_address &node_addr, dsn::perf_counter_info &info) { if (!result.first) { - LOG_ERROR("query perf counter info from node %s failed", node_addr.to_string()); + LOG_ERROR_F("query perf counter info from node {} failed", node_addr); return false; } dsn::blob bb(result.second.data(), 0, result.second.size()); if (!dsn::json::json_forwarder::decode(bb, info)) { - LOG_ERROR("decode perf counter info from node %s failed, result = %s", - node_addr.to_string(), - result.second.c_str()); + LOG_ERROR_F( + "decode perf counter info from node {} failed, result = {}", node_addr, result.second); return false; } if (info.result != "OK") { - LOG_ERROR("query perf counter info from node %s returns error, error = %s", - node_addr.to_string(), - info.result.c_str()); + LOG_ERROR_F("query perf counter info from node {} returns error, error = {}", + node_addr, + info.result); return false; } return true; @@ -1133,7 +1132,7 @@ get_app_stat(shell_context *sc, const std::string &app_name, std::vectorddl_client->list_app(app_name, app_id, partition_count, partitions); if (err != ::dsn::ERR_OK) { - LOG_ERROR("list app %s failed, error = %s", app_name.c_str(), err.to_string()); + LOG_ERROR_F("list app {} failed, error = {}", app_name, err); return false; } CHECK_EQ(app_id, app_info->app_id); @@ -1253,7 +1252,7 @@ inline bool get_capacity_unit_stat(shell_context *sc, { std::vector nodes; if (!fill_nodes(sc, "replica-server", nodes)) { - LOG_ERROR("get replica server node list failed"); + LOG_ERROR_F("get replica server node list failed"); return false; } @@ -1307,13 +1306,13 @@ inline bool get_storage_size_stat(shell_context *sc, app_storage_size_stat &st_s std::vector<::dsn::app_info> apps; std::vector nodes; if (!get_apps_and_nodes(sc, apps, nodes)) { - LOG_ERROR("get apps and nodes failed"); + LOG_ERROR_F("get apps and nodes failed"); return false; } std::map> app_partitions; if (!get_app_partitions(sc, apps, app_partitions)) { - LOG_ERROR("get app partitions failed"); + LOG_ERROR_F("get app partitions failed"); return false; } for (auto &kv : app_partitions) { diff --git a/src/test/kill_test/data_verifier.cpp b/src/test/kill_test/data_verifier.cpp index 07cd835f12..9b6ed4f852 100644 --- a/src/test/kill_test/data_verifier.cpp +++ b/src/test/kill_test/data_verifier.cpp @@ -151,17 +151,17 @@ void do_set(int thread_id) last_time = cur_time; try_count = 0; } else { - LOG_ERROR("SetThread[%d]: set failed: id=%lld, try=%d, ret=%d, error=%s (gpid=%d.%d, " - "decree=%lld, server=%s)", - thread_id, - id, - try_count, - ret, - client->get_error_string(ret), - info.app_id, - info.partition_index, - info.decree, - info.server.c_str()); + LOG_ERROR_F("SetThread[{}]: set failed: id={}, try={}, ret={}, error={} (gpid={}.{}, " + "decree={}, server={})", + thread_id, + id, + try_count, + ret, + client->get_error_string(ret), + info.app_id, + info.partition_index, + info.decree, + info.server); try_count++; if (try_count > 3) { sleep(1); @@ -262,17 +262,17 @@ void do_get_range(int thread_id, int round_id, long long start_id, long long end try_count = 0; id++; } else { - LOG_ERROR("GetThread[%d]: round(%d): get failed: id=%lld, try=%d, ret=%d, error=%s " - "(gpid=%d.%d, server=%s)", - thread_id, - round_id, - id, - try_count, - ret, - client->get_error_string(ret), - info.app_id, - info.partition_index, - info.server.c_str()); + LOG_ERROR_F("GetThread[{}]: round({}): get failed: id={}, try={}, ret={}, error={} " + "(gpid={}.{}, server={})", + thread_id, + round_id, + id, + try_count, + ret, + client->get_error_string(ret), + info.app_id, + info.partition_index, + info.server); try_count++; if (try_count > 3) { sleep(1); @@ -326,13 +326,13 @@ void do_check(int thread_count) range_end); break; } else { - LOG_ERROR("CheckThread: round(%d): update \"%s\" failed: check_max=%lld, ret=%d, " - "error=%s", - round_id, - check_max_key, - range_end, - ret, - client->get_error_string(ret)); + LOG_ERROR_F( + "CheckThread: round({}): update \"{}\" failed: check_max={}, ret={}, error={}", + round_id, + check_max_key, + range_end, + ret, + client->get_error_string(ret)); } } @@ -364,11 +364,11 @@ void do_mark() cur_time - last_time); old_id = new_id; } else { - LOG_ERROR("MarkThread: update \"%s\" failed: set_next=%lld, ret=%d, error=%s", - set_next_key, - new_id, - ret, - client->get_error_string(ret)); + LOG_ERROR_F("MarkThread: update \"{}\" failed: set_next={}, ret={}, error={}", + set_next_key, + new_id, + ret, + client->get_error_string(ret)); } } } @@ -385,12 +385,12 @@ void verifier_initialize(const char *config_file) pegasus_cluster_name = dsn_config_get_value_string(section, "pegasus_cluster_name", "", "pegasus cluster name"); if (pegasus_cluster_name.empty()) { - LOG_ERROR("Should config the cluster name for verifier"); + LOG_ERROR_F("Should config the cluster name for verifier"); exit(-1); } client = pegasus_client_factory::get_client(pegasus_cluster_name.c_str(), app_name.c_str()); if (client == nullptr) { - LOG_ERROR("Initialize the _client failed"); + LOG_ERROR_F("Initialize the _client failed"); exit(-1); } @@ -411,9 +411,8 @@ void verifier_start() if (ret == PERR_OK) { long long i = atoll(set_next_value.c_str()); if (i == 0 && !set_next_value.empty()) { - LOG_ERROR("MainThread: read \"%s\" failed: value_str=%s", - set_next_key, - set_next_value.c_str()); + LOG_ERROR_F( + "MainThread: read \"{}\" failed: value_str={}", set_next_key, set_next_value); exit(-1); } LOG_INFO_F("MainThread: read \"{}\" succeed: value={}", set_next_key, i); @@ -424,9 +423,9 @@ void verifier_start() set_next.store(0); break; } else { - LOG_ERROR("MainThread: read \"%s\" failed: error=%s", - set_next_key, - client->get_error_string(ret)); + LOG_ERROR_F("MainThread: read \"{}\" failed: error={}", + set_next_key, + client->get_error_string(ret)); } } set_thread_setting_id.resize(set_thread_count); diff --git a/src/test/kill_test/kill_testor.cpp b/src/test/kill_test/kill_testor.cpp index 66c0400d6c..674ca8a331 100644 --- a/src/test/kill_test/kill_testor.cpp +++ b/src/test/kill_test/kill_testor.cpp @@ -57,7 +57,7 @@ kill_testor::kill_testor(const char *config_file) pegasus_cluster_name = dsn_config_get_value_string(section, "pegasus_cluster_name", "", "pegasus cluster name"); if (pegasus_cluster_name.empty()) { - LOG_ERROR("Should config the cluster name for killer"); + LOG_ERROR_F("Should config the cluster name for killer"); exit(-1); } @@ -66,13 +66,13 @@ kill_testor::kill_testor(const char *config_file) dsn::replication::replica_helper::load_meta_servers( meta_list, PEGASUS_CLUSTER_SECTION_NAME.c_str(), pegasus_cluster_name.c_str()); if (meta_list.empty()) { - LOG_ERROR("Should config the meta address for killer"); + LOG_ERROR_F("Should config the meta address for killer"); exit(-1); } ddl_client.reset(new replication_ddl_client(meta_list)); if (ddl_client == nullptr) { - LOG_ERROR("Initialize the _ddl_client failed"); + LOG_ERROR_F("Initialize the _ddl_client failed"); exit(-1); } diff --git a/src/test/kill_test/partition_kill_testor.cpp b/src/test/kill_test/partition_kill_testor.cpp index 99a53888b1..5c4384c3ef 100644 --- a/src/test/kill_test/partition_kill_testor.cpp +++ b/src/test/kill_test/partition_kill_testor.cpp @@ -100,7 +100,7 @@ void partition_kill_testor::run() } if (failed > 0) { - LOG_ERROR("call replica.kill_partition failed"); + LOG_ERROR_F("call replica.kill_partition failed"); } } } // namespace test diff --git a/src/test/kill_test/process_kill_testor.cpp b/src/test/kill_test/process_kill_testor.cpp index 20972969be..0fdcc1a5ef 100644 --- a/src/test/kill_test/process_kill_testor.cpp +++ b/src/test/kill_test/process_kill_testor.cpp @@ -233,14 +233,14 @@ bool process_kill_testor::check_coredump() // make sure all generated core are logged for (int i = 1; i <= _total_meta_count; ++i) { if (_killer_handler->has_meta_dumped_core(i)) { - LOG_ERROR("meta server %d generate core dump", i); + LOG_ERROR_F("meta server {} generate core dump", i); has_core = true; } } for (int i = 1; i <= _total_replica_count; ++i) { if (_killer_handler->has_replica_dumped_core(i)) { - LOG_ERROR("replica server %d generate core dump", i); + LOG_ERROR_F("replica server {} generate core dump", i); has_core = true; } } diff --git a/src/utils/api_utilities.h b/src/utils/api_utilities.h index eca751d278..6a54772307 100644 --- a/src/utils/api_utilities.h +++ b/src/utils/api_utilities.h @@ -87,12 +87,10 @@ extern void dsn_coredump(); dsn_logf(__FILENAME__, __FUNCTION__, __LINE__, level, __VA_ARGS__); \ } while (false) -#define LOG_ERROR(...) dlog(LOG_LEVEL_ERROR, __VA_ARGS__) - #define dreturn_not_ok_logged(err, ...) \ do { \ if (dsn_unlikely((err) != dsn::ERR_OK)) { \ - LOG_ERROR(__VA_ARGS__); \ + LOG_ERROR_F(__VA_ARGS__); \ return err; \ } \ } while (0) diff --git a/src/utils/filesystem.cpp b/src/utils/filesystem.cpp index d1e005d39f..207190d85d 100644 --- a/src/utils/filesystem.cpp +++ b/src/utils/filesystem.cpp @@ -696,8 +696,7 @@ bool get_disk_space_info(const std::string &path, disk_space_info &info) boost::system::error_code ec; boost::filesystem::space_info in = boost::filesystem::space(path, ec); if (ec) { - LOG_ERROR( - "get disk space info failed: path = %s, err = %s", path.c_str(), ec.message().c_str()); + LOG_ERROR_F("get disk space info failed: path = {}, err = {}", path, ec.message()); return false; } else { info.capacity = in.capacity; @@ -722,13 +721,13 @@ error_code md5sum(const std::string &file_path, /*out*/ std::string &result) result.clear(); // if file not exist, we return ERR_OBJECT_NOT_FOUND if (!::dsn::utils::filesystem::file_exists(file_path)) { - LOG_ERROR("md5sum error: file %s not exist", file_path.c_str()); + LOG_ERROR_F("md5sum error: file {} not exist", file_path); return ERR_OBJECT_NOT_FOUND; } FILE *fp = fopen(file_path.c_str(), "rb"); if (fp == nullptr) { - LOG_ERROR("md5sum error: open file %s failed", file_path.c_str()); + LOG_ERROR_F("md5sum error: open file {} failed", file_path); return ERR_FILE_OPERATION_FAILED; } @@ -747,10 +746,10 @@ error_code md5sum(const std::string &file_path, /*out*/ std::string &result) break; } else { int err = ferror(fp); - LOG_ERROR("md5sum error: read file %s failed: errno = %d (%s)", - file_path.c_str(), - err, - safe_strerror(err).c_str()); + LOG_ERROR_F("md5sum error: read file {} failed: errno = %d ({})", + file_path, + err, + safe_strerror(err)); fclose(fp); MD5_Final(out, &c); return ERR_FILE_OPERATION_FAILED; diff --git a/src/utils/shared_io_service.cpp b/src/utils/shared_io_service.cpp index 751786e2e0..3f4df9685f 100644 --- a/src/utils/shared_io_service.cpp +++ b/src/utils/shared_io_service.cpp @@ -29,7 +29,7 @@ DSN_DEFINE_uint32(core, "the number of threads for timer service"); DSN_DEFINE_validator(timer_service_worker_count, [](uint32_t worker_count) -> bool { if (worker_count < kMinTimerServiceWorkerCount) { - LOG_ERROR( + LOG_ERROR_F( "timer_service_worker_count should be at least 3, where one thread is used to " "collect all metrics from registery for monitoring systems, and another two threads " "are used to compute percentiles."); diff --git a/src/zookeeper/distributed_lock_service_zookeeper.cpp b/src/zookeeper/distributed_lock_service_zookeeper.cpp index c81cb1fa34..8c060f37e8 100644 --- a/src/zookeeper/distributed_lock_service_zookeeper.cpp +++ b/src/zookeeper/distributed_lock_service_zookeeper.cpp @@ -90,7 +90,7 @@ void distributed_lock_service_zookeeper::erase(const lock_key &key) error_code distributed_lock_service_zookeeper::initialize(const std::vector &args) { if (args.empty()) { - LOG_ERROR("need parameters: "); + LOG_ERROR_F("need parameters: "); return ERR_INVALID_PARAMETERS; } const char *lock_root = args[0].c_str(); @@ -128,7 +128,7 @@ error_code distributed_lock_service_zookeeper::initialize(const std::vectorvisit(op); e.wait(); if (zerr != ZOK && zerr != ZNODEEXISTS) { - LOG_ERROR("create zk node failed, path = %s, err = %s", current.c_str(), zerror(zerr)); + LOG_ERROR_F("create zk node failed, path = {}, err = {}", current, zerror(zerr)); return from_zerror(zerr); } } @@ -278,8 +278,8 @@ void distributed_lock_service_zookeeper::on_zoo_session_evt(lock_srv_ptr _this, } if (ZOO_EXPIRED_SESSION_STATE == zoo_state || ZOO_AUTH_FAILED_STATE == zoo_state) { - LOG_ERROR("get zoo state: %s, which means the session is expired", - zookeeper_session::string_zoo_state(zoo_state)); + LOG_ERROR_F("get zoo state: {}, which means the session is expired", + zookeeper_session::string_zoo_state(zoo_state)); _this->dispatch_zookeeper_session_expire(); } else { LOG_WARNING_F("get zoo state: {}, ignore it", diff --git a/src/zookeeper/lock_struct.cpp b/src/zookeeper/lock_struct.cpp index fbd96cd2ae..5bf1f69733 100644 --- a/src/zookeeper/lock_struct.cpp +++ b/src/zookeeper/lock_struct.cpp @@ -247,9 +247,9 @@ void lock_struct::after_remove_duplicated_locknode(lock_struct_ptr _this, _this->_lock_id, *path); } else { - LOG_ERROR("lock struct(%s), myself(%s) got session expire", - _this->_lock_dir.c_str(), - _this->_myself._node_seq_name.c_str()); + LOG_ERROR_F("lock struct({}), myself({}) got session expire", + _this->_lock_dir, + _this->_myself._node_seq_name); _this->on_expire(); } } @@ -330,9 +330,9 @@ void lock_struct::after_get_lock_owner(lock_struct_ptr _this, _this->_myself._node_value); } } else { - LOG_ERROR("lock_dir(%s), myself(%s), sessin expired", - _this->_lock_dir.c_str(), - _this->_myself._node_seq_name.c_str()); + LOG_ERROR_F("lock_dir({}), myself({}), sessin expired", + _this->_lock_dir, + _this->_myself._node_seq_name); _this->on_expire(); } } @@ -446,7 +446,7 @@ void lock_struct::after_get_lockdir_nodes(lock_struct_ptr _this, return; } if (ZINVALIDSTATE == ec) { - LOG_ERROR("get lockdir(%s) children got session expired", _this->_lock_dir.c_str()); + LOG_ERROR_F("get lockdir({}) children got session expired", _this->_lock_dir); _this->on_expire(); return; } @@ -564,9 +564,9 @@ void lock_struct::after_create_locknode(lock_struct_ptr _this, return; } if (ZINVALIDSTATE == ec) { - LOG_ERROR("create seq/ephe node (%s) in dir(%s) got session expired", - distributed_lock_service_zookeeper::LOCK_NODE_PREFIX.c_str(), - _this->_lock_dir.c_str()); + LOG_ERROR_F("create seq/ephe node ({}) in dir({}) got session expired", + distributed_lock_service_zookeeper::LOCK_NODE_PREFIX, + _this->_lock_dir); _this->on_expire(); return; } @@ -629,7 +629,7 @@ void lock_struct::after_create_lockdir(lock_struct_ptr _this, int ec) return; } if (ZINVALIDSTATE == ec) { - LOG_ERROR("create lock dir failed got session expire, _path(%s)", _this->_lock_dir.c_str()); + LOG_ERROR_F("create lock dir failed got session expire, _path({})", _this->_lock_dir); _this->_lock_dir.clear(); _this->on_expire(); return; From e08ea8e0e8b0e320891df3cb8a988de77af15fd2 Mon Sep 17 00:00:00 2001 From: wh002 Date: Thu, 19 Jan 2023 16:37:25 +0800 Subject: [PATCH 2/6] cr1 --- src/client/partition_resolver_simple.cpp | 20 +++++--------------- src/failure_detector/failure_detector.cpp | 16 ++++++++-------- src/meta/meta_backup_service.cpp | 12 +++++------- src/nfs/nfs_server_impl.cpp | 2 +- src/runtime/rpc/asio_net_provider.cpp | 2 +- 5 files changed, 20 insertions(+), 32 deletions(-) diff --git a/src/client/partition_resolver_simple.cpp b/src/client/partition_resolver_simple.cpp index 3e41cd55d8..7ccd63dbc5 100644 --- a/src/client/partition_resolver_simple.cpp +++ b/src/client/partition_resolver_simple.cpp @@ -310,28 +310,18 @@ void partition_resolver_simple::query_config_reply(error_code err, } } } else if (resp.err == ERR_OBJECT_NOT_FOUND) { - LOG_ERROR_F("{}.client: query config reply, gpid = {}.{}, err = {}", - _app_name, - _app_id, - partition_index, - resp.err); + LOG_ERROR_PREFIX( + "query config reply, gpid = {}.{}, err = {}", _app_id, partition_index, resp.err); client_err = ERR_APP_NOT_EXIST; } else { - LOG_ERROR_F("{}.client: query config reply, gpid = {}.{}, err = {}", - _app_name, - _app_id, - partition_index, - resp.err); + LOG_ERROR_PREFIX( + "query config reply, gpid = {}.{}, err = {}", _app_id, partition_index, resp.err); client_err = resp.err; } } else { - LOG_ERROR_F("{}.client: query config reply, gpid = {}.{}, err = {}", - _app_name, - _app_id, - partition_index, - err); + LOG_ERROR_F("query config reply, gpid = {}.{}, err = {}", _app_id, partition_index, err); } // get specific or all partition update diff --git a/src/failure_detector/failure_detector.cpp b/src/failure_detector/failure_detector.cpp index 7a87c82234..cb78e0e2f5 100644 --- a/src/failure_detector/failure_detector.cpp +++ b/src/failure_detector/failure_detector.cpp @@ -225,8 +225,8 @@ void failure_detector::check_all_records() is_time_greater_than(now, record.last_send_time_for_beacon_with_ack) && now + _check_interval_milliseconds - record.last_send_time_for_beacon_with_ack > _lease_milliseconds) { - LOG_ERROR_F("master {} disconnected, now={:#018x}, last_send_time={:#018x}, " - "now+check_interval-last_send_time={:#018x}", + LOG_ERROR_F("master {} disconnected, now={}, last_send_time={}, " + "now+check_interval-last_send_time={}", record.node, now, record.last_send_time_for_beacon_with_ack, @@ -268,12 +268,12 @@ void failure_detector::check_all_records() // overflow if (record.is_alive && is_time_greater_than(now, record.last_beacon_recv_time) && now - record.last_beacon_recv_time > _grace_milliseconds) { - LOG_ERROR_F("worker {} disconnected, now={:#018x}, last_beacon_recv_time={:#018x}, " - "now-last_recv={:#018x}", - record.node, - now, - record.last_beacon_recv_time, - now - record.last_beacon_recv_time); + LOG_ERROR_F( + "worker {} disconnected, now={}, last_beacon_recv_time={}, now-last_recv={}", + record.node, + now, + record.last_beacon_recv_time, + now - record.last_beacon_recv_time); expire.push_back(record.node); record.is_alive = false; diff --git a/src/meta/meta_backup_service.cpp b/src/meta/meta_backup_service.cpp index f24c6952a9..e2bb3c2eb9 100644 --- a/src/meta/meta_backup_service.cpp +++ b/src/meta/meta_backup_service.cpp @@ -625,10 +625,9 @@ void policy_context::sync_backup_to_remote_storage_unlocked(const backup_info &b LOG_WARNING_F("{}: empty callback", _policy.policy_name); } } else if (ERR_TIMEOUT == err) { - LOG_ERROR_F( - "{}: sync backup info({:#018x}) to remote storage got timeout, retry it later", - _policy.policy_name, - b_info.backup_id); + LOG_ERROR_F("{}: sync backup info({}) to remote storage got timeout, retry it later", + _policy.policy_name, + b_info.backup_id); tasking::enqueue(LPC_DEFAULT_CALLBACK, &_tracker, [this, b_info, sync_callback, create_new_node]() { @@ -1325,9 +1324,8 @@ void backup_service::do_add_policy(dsn::message_ex *req, } p->start(); } else if (err == ERR_TIMEOUT) { - LOG_ERROR_F( - "create backup policy on remote storage timeout, retry after {:#018x} (ms)", - _opt.meta_retry_delay_ms.count()); + LOG_ERROR_F("create backup policy on remote storage timeout, retry after {} (ms)", + _opt.meta_retry_delay_ms.count()); tasking::enqueue(LPC_DEFAULT_CALLBACK, &_tracker, std::bind(&backup_service::do_add_policy, this, req, p, hint_msg), diff --git a/src/nfs/nfs_server_impl.cpp b/src/nfs/nfs_server_impl.cpp index f72b79f6a0..6ab22b2543 100644 --- a/src/nfs/nfs_server_impl.cpp +++ b/src/nfs/nfs_server_impl.cpp @@ -209,7 +209,7 @@ void nfs_service_impl::on_get_file_size( if (0 != ::stat(file_path.c_str(), &st)) { LOG_ERROR_F("[nfs_service] get stat of file {} failed, err = {}", file_path, - strerror(errno)); + dsn::utils::safe_strerror(errno)); err = ERR_OBJECT_NOT_FOUND; break; } diff --git a/src/runtime/rpc/asio_net_provider.cpp b/src/runtime/rpc/asio_net_provider.cpp index 23f6679f09..1bf0786f34 100644 --- a/src/runtime/rpc/asio_net_provider.cpp +++ b/src/runtime/rpc/asio_net_provider.cpp @@ -148,7 +148,7 @@ void asio_network_provider::do_accept() if (!ec) { auto remote = socket->remote_endpoint(ec); if (ec) { - LOG_ERROR_F("failed to get the remote endpoint: {}", ec.message().data()); + LOG_ERROR_F("failed to get the remote endpoint: {}", ec.message()); } else { auto ip = remote.address().to_v4().to_ulong(); auto port = remote.port(); From 64e6c045f8d3280658c35b990311d1230df2b9f5 Mon Sep 17 00:00:00 2001 From: wh002 Date: Thu, 19 Jan 2023 16:49:59 +0800 Subject: [PATCH 3/6] cr1 --- src/nfs/nfs_server_impl.cpp | 1 + 1 file changed, 1 insertion(+) diff --git a/src/nfs/nfs_server_impl.cpp b/src/nfs/nfs_server_impl.cpp index 6ab22b2543..d4e5453cc2 100644 --- a/src/nfs/nfs_server_impl.cpp +++ b/src/nfs/nfs_server_impl.cpp @@ -33,6 +33,7 @@ #include "aio/disk_engine.h" #include "runtime/task/async_calls.h" +#include "utils/safe_strerror_posix.h" #include "utils/filesystem.h" #include "utils/string_conv.h" From 12c7842ef4e1b273ed338f44c2732900ed8335c9 Mon Sep 17 00:00:00 2001 From: wh002 Date: Thu, 19 Jan 2023 20:17:23 +0800 Subject: [PATCH 4/6] cr2 --- src/client/partition_resolver_simple.cpp | 3 ++- src/meta/meta_backup_service.cpp | 4 ++-- src/server/pegasus_server_impl.cpp | 7 ++++--- 3 files changed, 8 insertions(+), 6 deletions(-) diff --git a/src/client/partition_resolver_simple.cpp b/src/client/partition_resolver_simple.cpp index 7ccd63dbc5..f589b72858 100644 --- a/src/client/partition_resolver_simple.cpp +++ b/src/client/partition_resolver_simple.cpp @@ -321,7 +321,8 @@ void partition_resolver_simple::query_config_reply(error_code err, client_err = resp.err; } } else { - LOG_ERROR_F("query config reply, gpid = {}.{}, err = {}", _app_id, partition_index, err); + LOG_ERROR_PREFIX( + "query config reply, gpid = {}.{}, err = {}", _app_id, partition_index, err); } // get specific or all partition update diff --git a/src/meta/meta_backup_service.cpp b/src/meta/meta_backup_service.cpp index e2bb3c2eb9..7777b34a35 100644 --- a/src/meta/meta_backup_service.cpp +++ b/src/meta/meta_backup_service.cpp @@ -1324,7 +1324,7 @@ void backup_service::do_add_policy(dsn::message_ex *req, } p->start(); } else if (err == ERR_TIMEOUT) { - LOG_ERROR_F("create backup policy on remote storage timeout, retry after {} (ms)", + LOG_ERROR_F("create backup policy on remote storage timeout, retry after {} ms", _opt.meta_retry_delay_ms.count()); tasking::enqueue(LPC_DEFAULT_CALLBACK, &_tracker, @@ -1358,7 +1358,7 @@ void backup_service::do_update_policy_to_remote_storage( p_context_ptr->set_policy(p); } else if (err == ERR_TIMEOUT) { LOG_ERROR_F("update backup policy to remote storage failed, policy_name = {}, " - "retry after {:#018x} (ms)", + "retry after {:#018x} ms", p.policy_name, _opt.meta_retry_delay_ms.count()); tasking::enqueue(LPC_DEFAULT_CALLBACK, diff --git a/src/server/pegasus_server_impl.cpp b/src/server/pegasus_server_impl.cpp index d4465b5b40..e584aa0d99 100644 --- a/src/server/pegasus_server_impl.cpp +++ b/src/server/pegasus_server_impl.cpp @@ -359,9 +359,10 @@ void pegasus_server_impl::on_multi_get(multi_get_rpc rpc) } if (!is_filter_type_supported(request.sort_key_filter_type)) { - LOG_ERROR_F("invalid argument for multi_get from {}: sort key filter type {} not supported", - rpc.remote_address(), - request.sort_key_filter_type); + LOG_ERROR_PREFIX( + "invalid argument for multi_get from {}: sort key filter type {} not supported", + rpc.remote_address(), + request.sort_key_filter_type); resp.error = rocksdb::Status::kInvalidArgument; _cu_calculator->add_multi_get_cu(req, resp.error, request.hash_key, resp.kvs); _pfc_multi_get_latency->set(dsn_now_ns() - start_time); From ff83215e63080ddb5b2c0e10a80fd82fd8bece09 Mon Sep 17 00:00:00 2001 From: wh002 Date: Thu, 19 Jan 2023 22:27:09 +0800 Subject: [PATCH 5/6] cr3 --- src/meta/meta_data.cpp | 10 +++++----- src/nfs/nfs_server_impl.cpp | 2 +- src/runtime/rpc/network.cpp | 3 +-- src/utils/filesystem.cpp | 2 +- 4 files changed, 8 insertions(+), 9 deletions(-) diff --git a/src/meta/meta_data.cpp b/src/meta/meta_data.cpp index 5e26fa4201..652bdd5c40 100644 --- a/src/meta/meta_data.cpp +++ b/src/meta/meta_data.cpp @@ -433,11 +433,11 @@ bool config_context::check_order() dropped[i].last_committed_decree, dropped[i].last_prepared_decree, i, - dropped[i].node, - dropped[i].time, - dropped[i].ballot, - dropped[i].last_committed_decree, - dropped[i].last_prepared_decree, + dropped[i + 1].node, + dropped[i + 1].time, + dropped[i + 1].ballot, + dropped[i + 1].last_committed_decree, + dropped[i + 1].last_prepared_decree, i + 1); return false; } diff --git a/src/nfs/nfs_server_impl.cpp b/src/nfs/nfs_server_impl.cpp index d4e5453cc2..71723b17b5 100644 --- a/src/nfs/nfs_server_impl.cpp +++ b/src/nfs/nfs_server_impl.cpp @@ -33,8 +33,8 @@ #include "aio/disk_engine.h" #include "runtime/task/async_calls.h" -#include "utils/safe_strerror_posix.h" #include "utils/filesystem.h" +#include "utils/safe_strerror_posix.h" #include "utils/string_conv.h" namespace dsn { diff --git a/src/runtime/rpc/network.cpp b/src/runtime/rpc/network.cpp index 4dcb951be5..7835d72ebf 100644 --- a/src/runtime/rpc/network.cpp +++ b/src/runtime/rpc/network.cpp @@ -418,8 +418,7 @@ bool rpc_session::on_recv_message(message_ex *msg, int delay_ms) // - the remote address is not listened, which means the remote port is not occupied // - operating system chooses the remote port as client's ephemeral port if (is_client() && msg->header->from_address == _net.engine()->primary_address()) { - LOG_ERROR_F("self connection detected, address = {}", - msg->header->from_address.to_string()); + LOG_ERROR_F("self connection detected, address = {}", msg->header->from_address); CHECK_EQ_MSG(msg->get_count(), 0, "message should not be referenced by anybody so far"); delete msg; return false; diff --git a/src/utils/filesystem.cpp b/src/utils/filesystem.cpp index 207190d85d..719eaf20cf 100644 --- a/src/utils/filesystem.cpp +++ b/src/utils/filesystem.cpp @@ -746,7 +746,7 @@ error_code md5sum(const std::string &file_path, /*out*/ std::string &result) break; } else { int err = ferror(fp); - LOG_ERROR_F("md5sum error: read file {} failed: errno = %d ({})", + LOG_ERROR_F("md5sum error: read file {} failed: errno = {} ({})", file_path, err, safe_strerror(err)); From 6151c72d52f2035df94c52e64a337f40c998ac1c Mon Sep 17 00:00:00 2001 From: wh002 Date: Thu, 19 Jan 2023 23:30:46 +0800 Subject: [PATCH 6/6] cr4 --- src/meta/meta_backup_service.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/meta/meta_backup_service.cpp b/src/meta/meta_backup_service.cpp index 7777b34a35..259b130b5f 100644 --- a/src/meta/meta_backup_service.cpp +++ b/src/meta/meta_backup_service.cpp @@ -1358,7 +1358,7 @@ void backup_service::do_update_policy_to_remote_storage( p_context_ptr->set_policy(p); } else if (err == ERR_TIMEOUT) { LOG_ERROR_F("update backup policy to remote storage failed, policy_name = {}, " - "retry after {:#018x} ms", + "retry after {} ms", p.policy_name, _opt.meta_retry_delay_ms.count()); tasking::enqueue(LPC_DEFAULT_CALLBACK,