Skip to content

Commit

Permalink
Remove llava printf() statements
Browse files Browse the repository at this point in the history
Fixes #346
  • Loading branch information
jart committed Apr 19, 2024
1 parent e5d53ac commit ff9decc
Show file tree
Hide file tree
Showing 2 changed files with 12 additions and 12 deletions.
14 changes: 7 additions & 7 deletions llama.cpp/llava/clip.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1753,7 +1753,7 @@ int clip_n_patches(const struct clip_ctx * ctx) {

bool clip_image_encode(struct clip_ctx * ctx, const int n_threads, clip_image_f32 * img, float * vec) {
if (!ctx->has_vision_encoder) {
printf("This gguf file seems to have no vision encoder\n");
fprintf(stderr, "This gguf file seems to have no vision encoder\n");
return false;
}

Expand All @@ -1765,7 +1765,7 @@ bool clip_image_encode(struct clip_ctx * ctx, const int n_threads, clip_image_f3

bool clip_image_batch_encode(clip_ctx * ctx, const int n_threads, const clip_image_f32_batch * imgs, float * vec) {
if (!ctx->has_vision_encoder) {
printf("This gguf file seems to have no vision encoder\n");
fprintf(stderr, "This gguf file seems to have no vision encoder\n");
return false;
}

Expand Down Expand Up @@ -1946,7 +1946,7 @@ bool clip_model_quantize(const char * fname_inp, const char * fname_out, const i
f32_data = (float *)conv_buf.data();
break;
default:
printf("Please use an input file in f32 or f16\n");
fprintf(stderr, "Please use an input file in f32 or f16\n");
gguf_free(ctx_out);
return false;
}
Expand All @@ -1973,8 +1973,8 @@ bool clip_model_quantize(const char * fname_inp, const char * fname_out, const i
fout.put(0);
}

printf("%s: n_dims = %d | quantize=%d | size = %f MB -> %f MB\n", name.c_str(), ggml_n_dims(cur), quantize,
orig_size / 1024.0 / 1024.0, new_size / 1024.0 / 1024.0);
tinylogf("%s: n_dims = %d | quantize=%d | size = %f MB -> %f MB\n", name.c_str(), ggml_n_dims(cur), quantize,
orig_size / 1024.0 / 1024.0, new_size / 1024.0 / 1024.0);
}

// go back to beginning of file and write the updated metadata
Expand All @@ -1989,8 +1989,8 @@ bool clip_model_quantize(const char * fname_inp, const char * fname_out, const i
gguf_free(ctx_out);

{
printf("%s: original size = %8.2f MB\n", __func__, total_size_org / 1024.0 / 1024.0);
printf("%s: quantized size = %8.2f MB\n", __func__, total_size_new / 1024.0 / 1024.0);
tinylogf("%s: original size = %8.2f MB\n", __func__, total_size_org / 1024.0 / 1024.0);
tinylogf("%s: quantized size = %8.2f MB\n", __func__, total_size_new / 1024.0 / 1024.0);
}

return true;
Expand Down
10 changes: 5 additions & 5 deletions llama.cpp/llava/llava-cli.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -169,18 +169,18 @@ static void process_prompt(struct llava_context * ctx_llava, struct llava_image_
// new templating mode: Provide the full prompt including system message and use <image> as a placeholder for the image
system_prompt = prompt.substr(0, image_pos);
user_prompt = prompt.substr(image_pos + std::string("<image>").length());
printf("system_prompt: %s\n", system_prompt.c_str());
tinylogf("system_prompt: %s\n", system_prompt.c_str());
if (params->verbose_prompt) {
auto tmp = ::llama_tokenize(ctx_llava->ctx_llama, system_prompt, true, true);
for (int i = 0; i < (int) tmp.size(); i++) {
printf("%6d -> '%s'\n", tmp[i], llama_token_to_piece(ctx_llava->ctx_llama, tmp[i]).c_str());
tinylogf("%6d -> '%s'\n", tmp[i], llama_token_to_piece(ctx_llava->ctx_llama, tmp[i]).c_str());
}
}
printf("user_prompt: %s\n", user_prompt.c_str());
tinylogf("user_prompt: %s\n", user_prompt.c_str());
if (params->verbose_prompt) {
auto tmp = ::llama_tokenize(ctx_llava->ctx_llama, user_prompt, true, true);
for (int i = 0; i < (int) tmp.size(); i++) {
printf("%6d -> '%s'\n", tmp[i], llama_token_to_piece(ctx_llava->ctx_llama, tmp[i]).c_str());
tinylogf("%6d -> '%s'\n", tmp[i], llama_token_to_piece(ctx_llava->ctx_llama, tmp[i]).c_str());
}
}
} else {
Expand All @@ -190,7 +190,7 @@ static void process_prompt(struct llava_context * ctx_llava, struct llava_image_
if (params->verbose_prompt) {
auto tmp = ::llama_tokenize(ctx_llava->ctx_llama, user_prompt, true, true);
for (int i = 0; i < (int) tmp.size(); i++) {
printf("%6d -> '%s'\n", tmp[i], llama_token_to_piece(ctx_llava->ctx_llama, tmp[i]).c_str());
tinylogf("%6d -> '%s'\n", tmp[i], llama_token_to_piece(ctx_llava->ctx_llama, tmp[i]).c_str());
}
}
}
Expand Down

0 comments on commit ff9decc

Please sign in to comment.