Skip to content

Commit

Permalink
Don't actually add a duplicate
Browse files Browse the repository at this point in the history
  • Loading branch information
janeyx99 committed Jul 28, 2023
1 parent 5262644 commit 4f19a13
Show file tree
Hide file tree
Showing 5 changed files with 4 additions and 38 deletions.
15 changes: 0 additions & 15 deletions torchbenchmark/models/llama_v2_7b_8h/__init__.py

This file was deleted.

9 changes: 0 additions & 9 deletions torchbenchmark/models/llama_v2_7b_8h/install.py

This file was deleted.

12 changes: 0 additions & 12 deletions torchbenchmark/models/llama_v2_7b_8h/metadata.yaml

This file was deleted.

2 changes: 0 additions & 2 deletions torchbenchmark/util/framework/huggingface/model_factory.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,8 +30,6 @@
'hf_Whisper': (1024, 1024, 'WhisperConfig()', 'AutoModelForAudioClassification'),
# default num_hidden_layers=32 but that OOMs, feel free to change this config to something more real
'llama_v2_7b_16h' : (512,512, 'LlamaConfig(num_hidden_layers=16)', 'AutoModelForCausalLM'),
# an even smaller model since 16h OOMs for our optimizer benchmarks
'llama_v2_7b_8h' : (512,512, 'LlamaConfig(num_hidden_layers=8)', 'AutoModelForCausalLM'),
}

cpu_input_slice = {
Expand Down
4 changes: 4 additions & 0 deletions userbenchmark/optim/run.py
Original file line number Diff line number Diff line change
Expand Up @@ -249,6 +249,10 @@ def get_unstable_models() -> Set[str]:
# Skip models deemed unstable by torch-nightly
{'model': m} for m in unstable_models
] + [
# 16h currently OOMs, but once it supports train, we should remove this line
# See tracker https://github.com/pytorch/benchmark/issues/1793
{'model': 'llama_v2_7b_16h'}
] +[
# SparseAdam does not support dense gradients
{'optim': 'SparseAdam', 'model': m} for m in DENSE_MODELS
] + [
Expand Down

0 comments on commit 4f19a13

Please sign in to comment.