Skip to content

Commit

Permalink
Create mergekit.py
Browse files Browse the repository at this point in the history
  • Loading branch information
suparious authored May 11, 2024
1 parent 7eddd98 commit 19c96c5
Showing 1 changed file with 30 additions and 0 deletions.
30 changes: 30 additions & 0 deletions mergekit.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,30 @@
# mergekit-yaml somefile.yaml . --cuda --low-cpu-memory --out-shard-size "2B"
import torch
import yaml
import mergekit

#from mergekit.config import MergeConfiguration
#from mergekit.merge import MergeOptions, run_merge

OUTPUT_PATH = "/opt/openbet/inference/data" # folder to store the result in
LORA_MERGE_CACHE = "" # change if you want to keep these for some reason
CONFIG_YML = "/opt/openbet/inference/KatyTestHistorical-SultrySilicon-7B-V2.yaml" # merge configuration file
COPY_TOKENIZER = True # you want a tokenizer? yeah, that's what i thought
LAZY_UNPICKLE = False # experimental low-memory model loader
LOW_CPU_MEMORY = False # enable if you somehow have more VRAM than RAM+swap

with open(CONFIG_YML, "r", encoding="utf-8") as fp:
merge_config = MergeConfiguration.model_validate(yaml.safe_load(fp))

run_merge(
merge_config,
out_path=OUTPUT_PATH,
options=MergeOptions(
lora_merge_cache=LORA_MERGE_CACHE,
cuda=torch.cuda.is_available(),
copy_tokenizer=COPY_TOKENIZER,
lazy_unpickle=LAZY_UNPICKLE,
low_cpu_memory=LOW_CPU_MEMORY,
),
)
print("Done!")

0 comments on commit 19c96c5

Please sign in to comment.