Skip to content

Commit

Permalink
llama : avoid using "optional" keyword (ggerganov#4283)
Browse files Browse the repository at this point in the history
  • Loading branch information
ggerganov committed Dec 1, 2023
1 parent d5a1cbd commit 5a7d312
Showing 1 changed file with 6 additions and 6 deletions.
12 changes: 6 additions & 6 deletions llama.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1991,11 +1991,11 @@ struct llama_model_loader {
return tensor;
}

struct ggml_tensor * create_tensor(struct ggml_context * ctx, const std::string & name, const std::vector<int64_t> & ne, ggml_backend_type backend, bool optional = false) {
struct ggml_tensor * create_tensor(struct ggml_context * ctx, const std::string & name, const std::vector<int64_t> & ne, ggml_backend_type backend, bool required = true) {
struct ggml_tensor * cur = ggml_get_tensor(ctx_meta, name.c_str());

if (cur == NULL) {
if (optional) {
if (!required) {
return NULL;
}
throw std::runtime_error(format("%s: tensor '%s' not found", __func__, name.c_str()));
Expand Down Expand Up @@ -2816,10 +2816,10 @@ static void llm_load_tensors(
layer.wo = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, backend_split);

// optional bias tensors
layer.bq = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_Q, "bias", i), {n_embd}, backend, true);
layer.bk = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_K, "bias", i), {n_embd_gqa}, backend, true);
layer.bv = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_V, "bias", i), {n_embd_gqa}, backend, true);
layer.bo = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd}, backend, true);
layer.bq = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_Q, "bias", i), {n_embd}, backend, false);
layer.bk = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_K, "bias", i), {n_embd_gqa}, backend, false);
layer.bv = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_V, "bias", i), {n_embd_gqa}, backend, false);
layer.bo = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd}, backend, false);

layer.ffn_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, backend);

Expand Down

0 comments on commit 5a7d312

Please sign in to comment.