Skip to content

Commit

Permalink
add lowvram parameter
Browse files Browse the repository at this point in the history
  • Loading branch information
YellowRoseCx committed Jun 21, 2023
1 parent 222cbbb commit 665cc11
Show file tree
Hide file tree
Showing 4 changed files with 8 additions and 3 deletions.
4 changes: 2 additions & 2 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -160,8 +160,8 @@ ifdef LLAMA_HIPBLAS
CC := $(ROCM_PATH)/llvm/bin/clang
CXX := $(ROCM_PATH)/llvm/bin/clang++
GPU_TARGETS = gfx900 gfx906 gfx908 gfx90a gfx1030
LLAMA_CUDA_DMMV_X ?= 128
LLAMA_CUDA_DMMV_Y ?= 4
LLAMA_CUDA_DMMV_X ?= 64
LLAMA_CUDA_DMMV_Y ?= 2
CFLAGS += -DGGML_USE_HIPBLAS -DGGML_USE_CUBLAS $(shell $(ROCM_PATH)/bin/hipconfig -C)
CXXFLAGS += -DGGML_USE_HIPBLAS -DGGML_USE_CUBLAS $(shell $(ROCM_PATH)/bin/hipconfig -C)
LDFLAGS += -L/opt/rocm/lib -Wl,-rpath=$(ROCM_PATH)/lib -lhipblas -lamdhip64
Expand Down
1 change: 1 addition & 0 deletions expose.h
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@ struct load_model_inputs
const int max_context_length;
const int batch_size;
const bool f16_kv;
const bool low_vram;
const char * executable_path;
const char * model_filename;
const char * lora_filename;
Expand Down
1 change: 1 addition & 0 deletions gpttype_adapter.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -371,6 +371,7 @@ ModelLoadResult gpttype_load_model(const load_model_inputs inputs, FileFormat in
//llama_ctx_paran_parts = -1;
llama_ctx_params.seed = -1;
llama_ctx_params.f16_kv = inputs.f16_kv;
llama_ctx_params.low_vram = inputs.low_vram;
llama_ctx_params.logits_all = false;
llama_ctx_params.use_mmap = inputs.use_mmap;
llama_ctx_params.use_mlock = inputs.use_mlock;
Expand Down
5 changes: 4 additions & 1 deletion koboldcpp.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@ class load_model_inputs(ctypes.Structure):
("max_context_length", ctypes.c_int),
("batch_size", ctypes.c_int),
("f16_kv", ctypes.c_bool),
("low_vram", ctypes.c_bool),
("executable_path", ctypes.c_char_p),
("model_filename", ctypes.c_char_p),
("lora_filename", ctypes.c_char_p),
Expand Down Expand Up @@ -150,6 +151,7 @@ def load_model(model_filename):
inputs.batch_size = 8
inputs.max_context_length = maxctx #initial value to use for ctx, can be overwritten
inputs.threads = args.threads
inputs.low_vram = args.lowvram
inputs.blasthreads = args.blasthreads
inputs.f16_kv = True
inputs.use_mmap = (not args.nommap)
Expand Down Expand Up @@ -646,7 +648,7 @@ def onDropdownChange(event):
#load all the vars
args.threads = int(threads_var.get())
args.gpulayers = int(gpu_layers_var.get())

args.stream = (stream.get()==1)
args.smartcontext = (smartcontext.get()==1)
args.launch = (launchbrowser.get()==1)
Expand Down Expand Up @@ -861,6 +863,7 @@ def main(args):
parser.add_argument("--hordeconfig", help="Sets the display model name to something else, for easy use on AI Horde. Optional additional parameters set the horde max genlength and max ctxlen.",metavar=('[hordename]', '[hordelength] [hordectx]'), nargs='+')
compatgroup = parser.add_mutually_exclusive_group()
compatgroup.add_argument("--noblas", help="Do not use OpenBLAS for accelerated prompt ingestion", action='store_true')
parser.add_argument("--lowvram", help="Do not keep scratch memory in VRAM for CUDA", action='store_true')
compatgroup.add_argument("--useclblast", help="Use CLBlast instead of OpenBLAS for prompt ingestion. Must specify exactly 2 arguments, platform ID and device ID (e.g. --useclblast 1 0).", type=int, choices=range(0,9), nargs=2)
parser.add_argument("--gpulayers", help="Set number of layers to offload to GPU when using CLBlast. Requires CLBlast.",metavar=('[GPU layers]'), type=int, default=0)
args = parser.parse_args()
Expand Down

0 comments on commit 665cc11

Please sign in to comment.