From ac12509e4365e83faabaef3a74d66c9fbb22a1cf Mon Sep 17 00:00:00 2001 From: gdedrouas Date: Sat, 20 May 2023 13:57:20 +0200 Subject: [PATCH 1/2] update llamacpp submodule --- vendor/llama.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/vendor/llama.cpp b/vendor/llama.cpp index c238b5873..ea600071c 160000 --- a/vendor/llama.cpp +++ b/vendor/llama.cpp @@ -1 +1 @@ -Subproject commit c238b5873a1ea496db03ffcfe124c9d0d83afbc6 +Subproject commit ea600071cb005267e9e8f2629c1e406dd5fde083 From 3330d7d7e4098cd1016c035899ba344fe784cf84 Mon Sep 17 00:00:00 2001 From: gdedrouas Date: Sat, 20 May 2023 16:42:27 +0200 Subject: [PATCH 2/2] remove n_parts from llama_context struct --- llama_cpp/llama_cpp.py | 1 - 1 file changed, 1 deletion(-) diff --git a/llama_cpp/llama_cpp.py b/llama_cpp/llama_cpp.py index 0dcb16c75..5d3598122 100644 --- a/llama_cpp/llama_cpp.py +++ b/llama_cpp/llama_cpp.py @@ -113,7 +113,6 @@ class llama_token_data_array(Structure): class llama_context_params(Structure): _fields_ = [ ("n_ctx", c_int), # text context - ("n_parts", c_int), # -1 for default ("n_gpu_layers", c_int), # number of layers to store in VRAM ("seed", c_int), # RNG seed, 0 for random ("f16_kv", c_bool), # use fp16 for KV cache