Skip to content

Commit

Permalink
llama : set metal log callback correctly (#4204)
Browse files Browse the repository at this point in the history
  • Loading branch information
slaren authored Nov 24, 2023
1 parent 8a052c1 commit e9c13ff
Showing 1 changed file with 9 additions and 2 deletions.
11 changes: 9 additions & 2 deletions llama.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1118,6 +1118,12 @@ static std::string llama_token_to_piece(const struct llama_context * ctx, llama_
//

struct llama_state {
llama_state() {
#ifdef GGML_USE_METAL
ggml_metal_log_set_callback(log_callback, log_callback_user_data);
#endif
}

// We save the log callback globally
ggml_log_callback log_callback = llama_log_callback_default;
void * log_callback_user_data = nullptr;
Expand Down Expand Up @@ -8569,8 +8575,6 @@ struct llama_context * llama_new_context_with_model(

#ifdef GGML_USE_METAL
if (model->n_gpu_layers > 0) {
ggml_metal_log_set_callback(llama_log_callback_default, NULL);

ctx->ctx_metal = ggml_metal_init(1);
if (!ctx->ctx_metal) {
LLAMA_LOG_ERROR("%s: ggml_metal_init() failed\n", __func__);
Expand Down Expand Up @@ -9706,6 +9710,9 @@ const std::vector<std::pair<std::string, struct ggml_tensor *>> & llama_internal
void llama_log_set(ggml_log_callback log_callback, void * user_data) {
g_state.log_callback = log_callback ? log_callback : llama_log_callback_default;
g_state.log_callback_user_data = user_data;
#ifdef GGML_USE_METAL
ggml_metal_log_set_callback(g_state.log_callback, g_state.log_callback_user_data);
#endif
}

static void llama_log_internal_v(ggml_log_level level, const char * format, va_list args) {
Expand Down

0 comments on commit e9c13ff

Please sign in to comment.