Skip to content

Commit

Permalink
feat: sync llama.cpp
Browse files Browse the repository at this point in the history
  • Loading branch information
jhen0409 committed Jan 18, 2024
1 parent cca3877 commit aef603d
Show file tree
Hide file tree
Showing 17 changed files with 2,072 additions and 1,737 deletions.
22 changes: 22 additions & 0 deletions cpp/common.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -173,6 +173,24 @@ bool gpt_params_parse_ex(int argc, char ** argv, gpt_params & params) {
if (params.n_threads_batch <= 0) {
params.n_threads_batch = std::thread::hardware_concurrency();
}
} else if (arg == "-td" || arg == "--threads-draft") {
if (++i >= argc) {
invalid_param = true;
break;
}
params.n_threads_draft = std::stoi(argv[i]);
if (params.n_threads_draft <= 0) {
params.n_threads_draft = std::thread::hardware_concurrency();
}
} else if (arg == "-tbd" || arg == "--threads-batch-draft") {
if (++i >= argc) {
invalid_param = true;
break;
}
params.n_threads_batch_draft = std::stoi(argv[i]);
if (params.n_threads_batch_draft <= 0) {
params.n_threads_batch_draft = std::thread::hardware_concurrency();
}
} else if (arg == "-p" || arg == "--prompt") {
if (++i >= argc) {
invalid_param = true;
Expand Down Expand Up @@ -851,6 +869,10 @@ void gpt_print_usage(int /*argc*/, char ** argv, const gpt_params & params) {
printf(" -t N, --threads N number of threads to use during generation (default: %d)\n", params.n_threads);
printf(" -tb N, --threads-batch N\n");
printf(" number of threads to use during batch and prompt processing (default: same as --threads)\n");
printf(" -td N, --threads-draft N");
printf(" number of threads to use during generation (default: same as --threads)");
printf(" -tbd N, --threads-batch-draft N\n");
printf(" number of threads to use during batch and prompt processing (default: same as --threads-draft)\n");
printf(" -p PROMPT, --prompt PROMPT\n");
printf(" prompt to start generation with (default: empty)\n");
printf(" -e, --escape process prompt escapes sequences (\\n, \\r, \\t, \\', \\\", \\\\)\n");
Expand Down
2 changes: 2 additions & 0 deletions cpp/common.h
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,9 @@ struct gpt_params {
uint32_t seed = -1; // RNG seed

int32_t n_threads = get_num_physical_cores();
int32_t n_threads_draft = -1;
int32_t n_threads_batch = -1; // number of threads to use for batch processing (-1 = use n_threads)
int32_t n_threads_batch_draft = -1;
int32_t n_predict = -1; // new tokens to predict
int32_t n_ctx = 512; // context size
int32_t n_batch = 512; // batch size for prompt processing (must be >=32 to use BLAS)
Expand Down
60 changes: 30 additions & 30 deletions cpp/ggml-backend-impl.h
Original file line number Diff line number Diff line change
Expand Up @@ -16,14 +16,14 @@ extern "C" {
typedef void * lm_ggml_backend_buffer_type_context_t;

struct lm_ggml_backend_buffer_type_i {
const char * (*get_name) (lm_ggml_backend_buffer_type_t buft);
lm_ggml_backend_buffer_t (*alloc_buffer) (lm_ggml_backend_buffer_type_t buft, size_t size);
size_t (*get_alignment) (lm_ggml_backend_buffer_type_t buft); // tensor alignment
size_t (*get_alloc_size) (lm_ggml_backend_buffer_type_t buft, const struct lm_ggml_tensor * tensor); // data size needed to allocate the tensor, including padding
bool (*supports_backend)(lm_ggml_backend_buffer_type_t buft, lm_ggml_backend_t backend); // check if the buffer type is usable by the backend
const char * (*LM_GGML_CALL get_name) (lm_ggml_backend_buffer_type_t buft);
lm_ggml_backend_buffer_t (*LM_GGML_CALL alloc_buffer) (lm_ggml_backend_buffer_type_t buft, size_t size);
size_t (*LM_GGML_CALL get_alignment) (lm_ggml_backend_buffer_type_t buft); // tensor alignment
size_t (*LM_GGML_CALL get_alloc_size) (lm_ggml_backend_buffer_type_t buft, const struct lm_ggml_tensor * tensor); // data size needed to allocate the tensor, including padding
bool (*LM_GGML_CALL supports_backend)(lm_ggml_backend_buffer_type_t buft, lm_ggml_backend_t backend); // check if the buffer type is usable by the backend
// check if tensor data is in host memory
// should be equivalent to supports_backend(buft, lm_ggml_backend_cpu_init())
bool (*is_host) (lm_ggml_backend_buffer_type_t buft);
bool (*LM_GGML_CALL is_host) (lm_ggml_backend_buffer_type_t buft);
};

struct lm_ggml_backend_buffer_type {
Expand All @@ -35,15 +35,15 @@ extern "C" {
typedef void * lm_ggml_backend_buffer_context_t;

struct lm_ggml_backend_buffer_i {
const char * (*get_name) (lm_ggml_backend_buffer_t buffer);
void (*free_buffer)(lm_ggml_backend_buffer_t buffer);
void * (*get_base) (lm_ggml_backend_buffer_t buffer);
void (*init_tensor)(lm_ggml_backend_buffer_t buffer, struct lm_ggml_tensor * tensor);
void (*set_tensor) (lm_ggml_backend_buffer_t buffer, struct lm_ggml_tensor * tensor, const void * data, size_t offset, size_t size);
void (*get_tensor) (lm_ggml_backend_buffer_t buffer, const struct lm_ggml_tensor * tensor, void * data, size_t offset, size_t size);
bool (*cpy_tensor) (lm_ggml_backend_buffer_t buffer, const struct lm_ggml_tensor * src, struct lm_ggml_tensor * dst); // dst is in the buffer, src may be in any buffer
void (*clear) (lm_ggml_backend_buffer_t buffer, uint8_t value);
void (*reset) (lm_ggml_backend_buffer_t buffer); // reset any internal state due to tensor initialization, such as tensor extras
const char * (*LM_GGML_CALL get_name) (lm_ggml_backend_buffer_t buffer);
void (*LM_GGML_CALL free_buffer)(lm_ggml_backend_buffer_t buffer);
void * (*LM_GGML_CALL get_base) (lm_ggml_backend_buffer_t buffer);
void (*LM_GGML_CALL init_tensor)(lm_ggml_backend_buffer_t buffer, struct lm_ggml_tensor * tensor);
void (*LM_GGML_CALL set_tensor) (lm_ggml_backend_buffer_t buffer, struct lm_ggml_tensor * tensor, const void * data, size_t offset, size_t size);
void (*LM_GGML_CALL get_tensor) (lm_ggml_backend_buffer_t buffer, const struct lm_ggml_tensor * tensor, void * data, size_t offset, size_t size);
bool (*LM_GGML_CALL cpy_tensor) (lm_ggml_backend_buffer_t buffer, const struct lm_ggml_tensor * src, struct lm_ggml_tensor * dst); // dst is in the buffer, src may be in any buffer
void (*LM_GGML_CALL clear) (lm_ggml_backend_buffer_t buffer, uint8_t value);
void (*LM_GGML_CALL reset) (lm_ggml_backend_buffer_t buffer); // reset any internal state due to tensor initialization, such as tensor extras
};

struct lm_ggml_backend_buffer {
Expand All @@ -54,7 +54,7 @@ extern "C" {
enum lm_ggml_backend_buffer_usage usage;
};

lm_ggml_backend_buffer_t lm_ggml_backend_buffer_init(
LM_GGML_CALL lm_ggml_backend_buffer_t lm_ggml_backend_buffer_init(
lm_ggml_backend_buffer_type_t buft,
struct lm_ggml_backend_buffer_i iface,
lm_ggml_backend_buffer_context_t context,
Expand All @@ -70,31 +70,31 @@ extern "C" {
typedef void * lm_ggml_backend_context_t;

struct lm_ggml_backend_i {
const char * (*get_name)(lm_ggml_backend_t backend);
const char * (*LM_GGML_CALL get_name)(lm_ggml_backend_t backend);

void (*free)(lm_ggml_backend_t backend);
void (*LM_GGML_CALL free)(lm_ggml_backend_t backend);

// buffer allocation
lm_ggml_backend_buffer_type_t (*get_default_buffer_type)(lm_ggml_backend_t backend);
lm_ggml_backend_buffer_type_t (*LM_GGML_CALL get_default_buffer_type)(lm_ggml_backend_t backend);

// (optional) asynchronous tensor data access
void (*set_tensor_async)(lm_ggml_backend_t backend, struct lm_ggml_tensor * tensor, const void * data, size_t offset, size_t size);
void (*get_tensor_async)(lm_ggml_backend_t backend, const struct lm_ggml_tensor * tensor, void * data, size_t offset, size_t size);
bool (*cpy_tensor_async)(lm_ggml_backend_t backend, const struct lm_ggml_tensor * src, struct lm_ggml_tensor * dst);
void (*LM_GGML_CALL set_tensor_async)(lm_ggml_backend_t backend, struct lm_ggml_tensor * tensor, const void * data, size_t offset, size_t size);
void (*LM_GGML_CALL get_tensor_async)(lm_ggml_backend_t backend, const struct lm_ggml_tensor * tensor, void * data, size_t offset, size_t size);
bool (*LM_GGML_CALL cpy_tensor_async)(lm_ggml_backend_t backend, const struct lm_ggml_tensor * src, struct lm_ggml_tensor * dst);

// (optional) complete all pending operations
void (*synchronize)(lm_ggml_backend_t backend);
void (*LM_GGML_CALL synchronize)(lm_ggml_backend_t backend);

// compute graph with a plan
lm_ggml_backend_graph_plan_t (*graph_plan_create) (lm_ggml_backend_t backend, const struct lm_ggml_cgraph * cgraph);
void (*graph_plan_free) (lm_ggml_backend_t backend, lm_ggml_backend_graph_plan_t plan);
void (*graph_plan_compute)(lm_ggml_backend_t backend, lm_ggml_backend_graph_plan_t plan);
lm_ggml_backend_graph_plan_t (*LM_GGML_CALL graph_plan_create) (lm_ggml_backend_t backend, const struct lm_ggml_cgraph * cgraph);
void (*LM_GGML_CALL graph_plan_free) (lm_ggml_backend_t backend, lm_ggml_backend_graph_plan_t plan);
void (*LM_GGML_CALL graph_plan_compute)(lm_ggml_backend_t backend, lm_ggml_backend_graph_plan_t plan);

// compute graph without a plan (async)
bool (*graph_compute)(lm_ggml_backend_t backend, struct lm_ggml_cgraph * cgraph);
bool (*LM_GGML_CALL graph_compute)(lm_ggml_backend_t backend, struct lm_ggml_cgraph * cgraph);

// check if the backend supports an operation
bool (*supports_op)(lm_ggml_backend_t backend, const struct lm_ggml_tensor * op);
bool (*LM_GGML_CALL supports_op)(lm_ggml_backend_t backend, const struct lm_ggml_tensor * op);
};

struct lm_ggml_backend {
Expand All @@ -107,9 +107,9 @@ extern "C" {
// Backend registry
//

typedef lm_ggml_backend_t (*lm_ggml_backend_init_fn)(const char * params, void * user_data);
typedef lm_ggml_backend_t (*LM_GGML_CALL lm_ggml_backend_init_fn)(const char * params, void * user_data);

void lm_ggml_backend_register(const char * name, lm_ggml_backend_init_fn init_fn, lm_ggml_backend_buffer_type_t default_buffer_type, void * user_data);
LM_GGML_CALL void lm_ggml_backend_register(const char * name, lm_ggml_backend_init_fn init_fn, lm_ggml_backend_buffer_type_t default_buffer_type, void * user_data);

#ifdef __cplusplus
}
Expand Down
Loading

0 comments on commit aef603d

Please sign in to comment.