Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

fix: utilities to post process checkpoint for LoRA #338

Merged
merged 39 commits into from
Sep 25, 2024
Merged
Changes from 3 commits
Commits
Show all changes
39 commits
Select commit Hold shift + click to select a range
fa42c73
utilities to post process checkpoint for LoRA
Ssukriti Sep 10, 2024
e5e4c27
Merge branch 'main' into utility_to_post-process_LoRA
Ssukriti Sep 10, 2024
0fa3dac
improve code comments
Ssukriti Sep 10, 2024
fa97871
Add unit test and fix some lint errors
aluu317 Sep 17, 2024
4c9bb95
lint: fix more fmt errors
aluu317 Sep 17, 2024
af191d1
feat: Add post_process_vLLM_adapters_new_tokens function to main
willmj Sep 18, 2024
fb1dcc9
Merge remote-tracking branch 'origin/main' into utility_to_post-proce…
willmj Sep 18, 2024
bcc17b1
fmt
willmj Sep 18, 2024
57cadc3
fix: Add post processing flag so post processing is only done for vLLM
willmj Sep 18, 2024
36a554c
fix: get num_added_tokens from resize function (#344)
Ssukriti Sep 19, 2024
0d34b1f
Merge branch 'main' into utility_to_post-process_LoRA
Ssukriti Sep 19, 2024
4380c5b
Ran fmt and also removed unneccessary files from test artifact
aluu317 Sep 19, 2024
146e9f1
fix: unit tests
Ssukriti Sep 19, 2024
0022da3
fix: Adding tokens in special_tokens_dict
Abhishek-TAMU Sep 20, 2024
e6a2bc8
Merge branch 'main' into utility_to_post-process_LoRA
Ssukriti Sep 20, 2024
0d077ea
fix: Add additional arg to tests to reflect new flag post_process_vllm
willmj Sep 20, 2024
c8d8f98
fmt
willmj Sep 20, 2024
80fae90
feat: Refactor post-processing of adapters (#345)
Ssukriti Sep 23, 2024
fcdfa29
add test for LoRA tuning from main
Ssukriti Sep 23, 2024
e5406e5
fix formatting
Ssukriti Sep 23, 2024
6ae8f36
correcting post processing script
Ssukriti Sep 23, 2024
a93e902
fix:post-process in place
Ssukriti Sep 23, 2024
7f864d0
update documentation for post-processing
Ssukriti Sep 23, 2024
3de588a
fix:formatting
Ssukriti Sep 23, 2024
f38361c
fix:linting
Ssukriti Sep 23, 2024
3966fef
more warnings /exceptions in script
Ssukriti Sep 24, 2024
2b73e63
check for no tokens added
Ssukriti Sep 24, 2024
e4dd9b2
fix:linting
Ssukriti Sep 24, 2024
9caef81
additional unit test
Ssukriti Sep 24, 2024
820222c
add more tests
Ssukriti Sep 25, 2024
5a8aca0
fix:tokenizer test
Ssukriti Sep 25, 2024
8f92b90
fix:linting and docstrings
Ssukriti Sep 25, 2024
48321e3
fix:return type of trainer
Ssukriti Sep 25, 2024
85f623b
test: enable tests and fix copytree
anhuong Sep 25, 2024
7531836
use copy function from build
Ssukriti Sep 25, 2024
3eb0e54
fix:linting and formatting
Ssukriti Sep 25, 2024
f8fd164
make build a module
Ssukriti Sep 25, 2024
3aaae3c
Merge branch 'main' into utility_to_post-process_LoRA
Ssukriti Sep 25, 2024
2b92881
add back old copy function
Ssukriti Sep 25, 2024
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
74 changes: 74 additions & 0 deletions tuning/utils/merge_model_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -102,3 +102,77 @@ def fetch_base_model_from_checkpoint(checkpoint_model: str) -> str:
"Base model adapter config exists, but has no base_model_name_or_path!"
)
return adapter_dict["base_model_name_or_path"]

def _copy_files_to_directory(src: str , dest: str, exclude_files:list[str] = []):
import shutil
src_files = os.listdir(src)
for file_name in src_files:
if file_name in exclude_files:
continue
full_file_name = os.path.join(src, file_name)
if os.path.isfile(full_file_name):
shutil.copy(full_file_name, dest)

def post_process_vLLM_adapters_new_tokens(path_to_checkpoint: str, modified_checkpoint_path:str = None):
Ssukriti marked this conversation as resolved.
Show resolved Hide resolved

from safetensors import safe_open
from safetensors.torch import save_file

# if not set, original checkpoint will be modified
if not modified_checkpoint_path:
modified_checkpoint_path = path_to_checkpoint

# Get all values of new token indexes
sorted_token_indexes = []
if os.path.isfile(os.path.join(path_to_checkpoint,"added_tokens.json")):
with open(os.path.join(path_to_checkpoint,"added_tokens.json"), "r") as fp:
added_tokens = json.load(fp)
sorted_token_indexes = sorted(added_tokens.values())

with safe_open(os.path.join(path_to_checkpoint, "adapter_model.safetensors"), framework="pt") as f:
new_embeddings = {}
adapters = {}
embeddings_weights_in_adapters = False
# Quickly check if post-processing is needed by checking adapters file for weights
for k in f.keys():
if 'lm_head.weight' in k or 'embed_tokens.weight' in k:
embeddings_weights_in_adapters = True
if len(sorted_token_indexes) >=1:
raise NotImplementedError("Seems like embeddings are resized without adding new tokens. \
Cannot be post-processed to load on vLLM. Try setting \
parameter `embedding_size_multiple_of` to 1" )
Ssukriti marked this conversation as resolved.
Show resolved Hide resolved

# Post-processing is needed to copy out new vectors
if embeddings_weights_in_adapters:
for k in f.keys():
if 'lm_head.weight' in k:
fabianlim marked this conversation as resolved.
Show resolved Hide resolved
lm_head = f.get_tensor(k)
# pull out tensor values of new tokens
if len(sorted_token_indexes)==1:
new_output_embeddings = lm_head[sorted_token_indexes[0]:sorted_token_indexes[0]+1]
elif len(sorted_token_indexes)>1:
new_output_embeddings = lm_head[sorted_token_indexes[0]:sorted_token_indexes[-1]]
Ssukriti marked this conversation as resolved.
Show resolved Hide resolved
# vLLM requires renaming to output_embeddings
new_embeddings['output_embeddings'] = new_output_embeddings

elif 'embed_tokens.weight' in k:
embed_tokens = f.get_tensor(k)
# pull out tensor values of new tokens
if len(sorted_token_indexes)==1:
new_input_embeddings = embed_tokens[sorted_token_indexes[0]:sorted_token_indexes[0]+1]
elif len(sorted_token_indexes)>1:
new_input_embeddings = embed_tokens[sorted_token_indexes[0]:sorted_token_indexes[-1]]
Ssukriti marked this conversation as resolved.
Show resolved Hide resolved
# vLLM requires renaming to input_embeddings
new_embeddings['input_embeddings'] = new_input_embeddings
else:
# Retain all other weights in adapters.safetensors
adapters[k] = f.get_tensors(k)

save_file(new_embeddings, os.path.join(modified_checkpoint_path, "new_embeddings.safetensors"))
save_file(adapters, os.path.join(modified_checkpoint_path, "adapter_model.safetensors"))

# copy out remaining files to desired path
if modified_checkpoint_path != path_to_checkpoint:
_copy_files_to_directory(path_to_checkpoint, modified_checkpoint_path, exclude_files = ["adapter_model.safetensors"])


Loading