-
Notifications
You must be signed in to change notification settings - Fork 56
/
Copy pathexample_config.toml
24 lines (18 loc) · 1.12 KB
/
example_config.toml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
# User-specific configuration; adjust according to hardware capabilities and preferences
# Model selection
# Choose between "mistral-7b-instruct" for a smaller, faster model that requires ~5GB RAM,
# ideal for quick processing with reduced output quality, and "mixtral-8x7b-instruct" for a
# larger model that requires ~25GB RAM, offering enhanced reasoning capabilities and accuracy
# for complex code analysis tasks. Note: Disabling memory mapping might be necessary for
# "mixtral-8x7b-instruct" on machines not capable of supporting the required RAM level.
model_identifier = "mistral-7b-instruct"
# Optimize performance by using memory mapping, which loads parts of the model on-demand instead of the entire model into memory. This can reduce memory usage and improve loading times (requires ~5GB RAM for the default model).
use_mmap = true
# Utilize CPU threads; set to 0 to prioritize GPU usage over CPU
n_threads = 0
# Utilize GPU layers for faster processing with a strong GPU
n_gpu_layers = 99
# Ensure deterministic model outputs by specifying a seed
seed = 0
# Toggle verbose logging of model configurations
verbose = false