Skip to content

Commit

Permalink
Merge branch 'master' into concedo_experimental
Browse files Browse the repository at this point in the history
# Conflicts:
#	README.md
#	build.zig
#	flake.nix
#	tests/test-grad0.c
#	tests/test-sampling.cpp
#	tests/test-tokenizer-0.cpp
  • Loading branch information
LostRuins committed Jun 25, 2023
2 parents 8342fe8 + 66a2555 commit d2034ce
Show file tree
Hide file tree
Showing 19 changed files with 346 additions and 149 deletions.
4 changes: 2 additions & 2 deletions convert.py
Original file line number Diff line number Diff line change
Expand Up @@ -998,9 +998,9 @@ def write_vocab(self, vocab: Vocab) -> None:
def write_vocab_only(fname_out: Path, vocab: Vocab) -> None:
of = OutputFile(fname_out)
params = Params(n_vocab=vocab.vocab_size, n_embd=0, n_mult=0,
n_head=1, n_layer=0, file_type=GGMLFileType.AllF32)
n_head=1, n_layer=0)
of = OutputFile(fname_out)
of.write_file_header(params)
of.write_file_header(params, file_type=GGMLFileType.AllF32)
of.write_vocab(vocab)
of.fout.close()

Expand Down
22 changes: 15 additions & 7 deletions examples/common.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -536,7 +536,7 @@ std::vector<llama_token> llama_tokenize(struct llama_context * ctx, const std::s
return res;
}

struct llama_context * llama_init_from_gpt_params(const gpt_params & params) {
std::tuple<struct llama_model *, struct llama_context *> llama_init_from_gpt_params(const gpt_params & params) {
auto lparams = llama_context_default_params();

lparams.n_ctx = params.n_ctx;
Expand All @@ -552,25 +552,33 @@ struct llama_context * llama_init_from_gpt_params(const gpt_params & params) {
lparams.logits_all = params.perplexity;
lparams.embedding = params.embedding;

llama_context * lctx = llama_init_from_file(params.model.c_str(), lparams);
llama_model * model = llama_load_model_from_file(params.model.c_str(), lparams);
if (model == NULL) {
fprintf(stderr, "%s: error: failed to load model '%s'\n", __func__, params.model.c_str());
return std::make_tuple(nullptr, nullptr);
}

llama_context * lctx = llama_new_context_with_model(model, lparams);
if (lctx == NULL) {
fprintf(stderr, "%s: error: failed to load model '%s'\n", __func__, params.model.c_str());
return NULL;
fprintf(stderr, "%s: error: failed to create context with model '%s'\n", __func__, params.model.c_str());
llama_free_model(model);
return std::make_tuple(nullptr, nullptr);
}

if (!params.lora_adapter.empty()) {
int err = llama_apply_lora_from_file(lctx,
int err = llama_model_apply_lora_from_file(model,
params.lora_adapter.c_str(),
params.lora_base.empty() ? NULL : params.lora_base.c_str(),
params.n_threads);
if (err != 0) {
fprintf(stderr, "%s: error: failed to apply lora adapter\n", __func__);
return NULL;
llama_free(lctx);
llama_free_model(model);
return std::make_tuple(nullptr, nullptr);
}
}

return lctx;
return std::make_tuple(model, lctx);
}

void console_init(console_state & con_st) {
Expand Down
3 changes: 2 additions & 1 deletion examples/common.h
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@
#include <random>
#include <thread>
#include <unordered_map>
#include <tuple>

#if !defined (_WIN32)
#include <stdio.h>
Expand Down Expand Up @@ -95,7 +96,7 @@ std::vector<llama_token> llama_tokenize(struct llama_context * ctx, const std::s
// Model utils
//

struct llama_context * llama_init_from_gpt_params(const gpt_params & params);
std::tuple<struct llama_model *, struct llama_context *> llama_init_from_gpt_params(const gpt_params & params);

//
// Console utils
Expand Down
6 changes: 4 additions & 2 deletions examples/embedding/embedding.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -37,11 +37,12 @@ int main(int argc, char ** argv) {

llama_init_backend();

llama_model * model;
llama_context * ctx;

// load the model
ctx = llama_init_from_gpt_params(params);
if (ctx == NULL) {
std::tie(model, ctx) = llama_init_from_gpt_params(params);
if (model == NULL) {
fprintf(stderr, "%s: error: unable to load model\n", __func__);
return 1;
}
Expand Down Expand Up @@ -90,6 +91,7 @@ int main(int argc, char ** argv) {

llama_print_timings(ctx);
llama_free(ctx);
llama_free_model(model);

return 0;
}
8 changes: 6 additions & 2 deletions examples/main/main.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -107,12 +107,13 @@ int main(int argc, char ** argv) {

llama_init_backend();

llama_model * model;
llama_context * ctx;
g_ctx = &ctx;

// load the model and apply lora adapter, if any
ctx = llama_init_from_gpt_params(params);
if (ctx == NULL) {
std::tie(model, ctx) = llama_init_from_gpt_params(params);
if (model == NULL) {
fprintf(stderr, "%s: error: unable to load model\n", __func__);
return 1;
}
Expand All @@ -139,6 +140,7 @@ int main(int argc, char ** argv) {

llama_print_timings(ctx);
llama_free(ctx);
llama_free_model(model);

return 0;
}
Expand All @@ -147,6 +149,7 @@ int main(int argc, char ** argv) {
if (params.export_cgraph) {
llama_eval_export(ctx, "llama.ggml");
llama_free(ctx);
llama_free_model(model);

return 0;
}
Expand Down Expand Up @@ -666,6 +669,7 @@ int main(int argc, char ** argv) {

llama_print_timings(ctx);
llama_free(ctx);
llama_free_model(model);

return 0;
}
6 changes: 4 additions & 2 deletions examples/perplexity/perplexity.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -149,11 +149,12 @@ int main(int argc, char ** argv) {

llama_init_backend();

llama_model * model;
llama_context * ctx;

// load the model and apply lora adapter, if any
ctx = llama_init_from_gpt_params(params);
if (ctx == NULL) {
std::tie(model, ctx) = llama_init_from_gpt_params(params);
if (model == NULL) {
fprintf(stderr, "%s: error: unable to load model\n", __func__);
return 1;
}
Expand All @@ -169,6 +170,7 @@ int main(int argc, char ** argv) {

llama_print_timings(ctx);
llama_free(ctx);
llama_free_model(model);

return 0;
}
15 changes: 13 additions & 2 deletions examples/quantize-stats/quantize-stats.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -320,6 +320,7 @@ int main(int argc, char ** argv) {
fprintf(stderr, "Loading model\n");

const int64_t t_main_start_us = ggml_time_us();
llama_model * model;
llama_context * ctx;

{
Expand All @@ -330,12 +331,20 @@ int main(int argc, char ** argv) {
lparams.f16_kv = false;
lparams.use_mlock = false;

ctx = llama_init_from_file(params.model.c_str(), lparams);
model = llama_load_model_from_file(params.model.c_str(), lparams);

if (ctx == NULL) {
if (model == NULL) {
fprintf(stderr, "%s: error: failed to load model '%s'\n", __func__, params.model.c_str());
return 1;
}

ctx = llama_new_context_with_model(model, lparams);

if (ctx == NULL) {
fprintf(stderr, "%s: error: failed to create context with model '%s'\n", __func__, params.model.c_str());
llama_free_model(model);
return 1;
}
}

const auto &tensors = llama_internal_get_tensor_map(ctx);
Expand All @@ -357,6 +366,7 @@ int main(int argc, char ** argv) {
fprintf(stderr, "%s: error: Quantization should be tested with a float model, "
"this model contains already quantized layers (%s is type %d)\n", __func__, kv_tensor.first.c_str(), kv_tensor.second->type);
llama_free(ctx);
llama_free_model(model);
return 1;
}
included_layers++;
Expand Down Expand Up @@ -415,6 +425,7 @@ int main(int argc, char ** argv) {


llama_free(ctx);
llama_free_model(model);
// report timing
{
const int64_t t_main_end_us = ggml_time_us();
Expand Down
29 changes: 25 additions & 4 deletions examples/save-load-state/save-load-state.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -35,12 +35,22 @@ int main(int argc, char ** argv) {
auto last_n_tokens_data = std::vector<llama_token>(params.repeat_last_n, 0);

// init
auto ctx = llama_init_from_file(params.model.c_str(), lparams);
auto model = llama_load_model_from_file(params.model.c_str(), lparams);
if (model == nullptr) {
return 1;
}
auto ctx = llama_new_context_with_model(model, lparams);
if (ctx == nullptr) {
llama_free_model(model);
return 1;
}
auto tokens = std::vector<llama_token>(params.n_ctx);
auto n_prompt_tokens = llama_tokenize(ctx, params.prompt.c_str(), tokens.data(), int(tokens.size()), true);

if (n_prompt_tokens < 1) {
fprintf(stderr, "%s : failed to tokenize prompt\n", __func__);
llama_free(ctx);
llama_free_model(model);
return 1;
}

Expand Down Expand Up @@ -84,30 +94,36 @@ int main(int argc, char ** argv) {
printf("%s", next_token_str);
if (llama_eval(ctx, &next_token, 1, n_past, params.n_threads)) {
fprintf(stderr, "\n%s : failed to evaluate\n", __func__);
llama_free(ctx);
llama_free_model(model);
return 1;
}
n_past += 1;
}

printf("\n\n");

// free old model
// free old context
llama_free(ctx);

// load new model
auto ctx2 = llama_init_from_file(params.model.c_str(), lparams);
// make new context
auto ctx2 = llama_new_context_with_model(model, lparams);

// Load state (rng, logits, embedding and kv_cache) from file
{
FILE *fp_read = fopen("dump_state.bin", "rb");
if (state_size != llama_get_state_size(ctx2)) {
fprintf(stderr, "\n%s : failed to validate state size\n", __func__);
llama_free(ctx2);
llama_free_model(model);
return 1;
}

const size_t ret = fread(state_mem, 1, state_size, fp_read);
if (ret != state_size) {
fprintf(stderr, "\n%s : failed to read state\n", __func__);
llama_free(ctx2);
llama_free_model(model);
return 1;
}

Expand Down Expand Up @@ -138,12 +154,17 @@ int main(int argc, char ** argv) {
printf("%s", next_token_str);
if (llama_eval(ctx2, &next_token, 1, n_past, params.n_threads)) {
fprintf(stderr, "\n%s : failed to evaluate\n", __func__);
llama_free(ctx2);
llama_free_model(model);
return 1;
}
n_past += 1;
}

printf("\n\n");

llama_free(ctx2);
llama_free_model(model);

return 0;
}
9 changes: 7 additions & 2 deletions examples/server/server.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -115,6 +115,7 @@ struct llama_server_context {
std::vector<llama_token> embd;
std::vector<llama_token> last_n_tokens;

llama_model * model = nullptr;
llama_context * ctx = nullptr;
gpt_params params;

Expand All @@ -130,6 +131,10 @@ struct llama_server_context {
llama_free(ctx);
ctx = nullptr;
}
if (model) {
llama_free_model(model);
model = nullptr;
}
}

void rewind() {
Expand All @@ -150,8 +155,8 @@ struct llama_server_context {

bool loadModel(const gpt_params & params_) {
params = params_;
ctx = llama_init_from_gpt_params(params);
if (ctx == nullptr) {
std::tie(model, ctx) = llama_init_from_gpt_params(params);
if (model == nullptr) {
LOG_ERROR("unable to load model", { { "model", params_.model } });
return false;
}
Expand Down
8 changes: 5 additions & 3 deletions examples/simple/simple.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -68,11 +68,12 @@ int main(int argc, char ** argv)

llama_init_backend();

llama_context * ctx ;
llama_model * model;
llama_context * ctx;

ctx = llama_init_from_gpt_params( params );
std::tie(model, ctx) = llama_init_from_gpt_params( params );

if ( ctx == NULL )
if ( model == NULL )
{
fprintf( stderr , "%s: error: unable to load model\n" , __func__ );
return 1;
Expand Down Expand Up @@ -170,6 +171,7 @@ int main(int argc, char ** argv)
} // wend of main loop

llama_free( ctx );
llama_free_model( model );

return 0;
}
Expand Down
5 changes: 4 additions & 1 deletion examples/train-text-from-scratch/train-text-from-scratch.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -3054,7 +3054,8 @@ int main(int argc, char ** argv) {
struct llama_context_params llama_params = llama_context_default_params();
llama_params.vocab_only = true;

struct llama_context * lctx = llama_init_from_file(params.fn_vocab_model, llama_params);
struct llama_model * lmodel = llama_load_model_from_file(params.fn_vocab_model, llama_params);
struct llama_context * lctx = llama_new_context_with_model(lmodel, llama_params);

struct llama_vocab vocab;
{
Expand Down Expand Up @@ -3395,6 +3396,8 @@ int main(int argc, char ** argv) {
delete[] compute_addr;
delete[] compute_buf_0;
delete[] compute_buf_1;
llama_free(lctx);
llama_free_model(lmodel);
ggml_free(model.ctx);

return 0;
Expand Down
2 changes: 1 addition & 1 deletion ggml-cuda.cu
Original file line number Diff line number Diff line change
Expand Up @@ -2635,7 +2635,7 @@ void ggml_cuda_free_scratch() {
bool ggml_cuda_compute_forward(struct ggml_compute_params * params, struct ggml_tensor * tensor){
ggml_cuda_func_t func;
const bool any_on_device = tensor->backend == GGML_BACKEND_GPU
|| tensor->src0->backend == GGML_BACKEND_GPU || tensor->src0->backend == GGML_BACKEND_GPU_SPLIT
|| (tensor->src0 != nullptr && (tensor->src0->backend == GGML_BACKEND_GPU || tensor->src0->backend == GGML_BACKEND_GPU_SPLIT))
|| (tensor->src1 != nullptr && tensor->src1->backend == GGML_BACKEND_GPU);

switch (tensor->op) {
Expand Down
Loading

0 comments on commit d2034ce

Please sign in to comment.