Skip to content

Commit

Permalink
[Bugfix][Model] Jamba assertions and no chunked prefill by default fo…
Browse files Browse the repository at this point in the history
…r Jamba (#6784)
  • Loading branch information
tomeras91 authored Jul 27, 2024
1 parent 3c30123 commit ed94e4f
Show file tree
Hide file tree
Showing 2 changed files with 10 additions and 1 deletion.
6 changes: 5 additions & 1 deletion vllm/engine/arg_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -754,10 +754,14 @@ def create_engine_config(self, ) -> EngineConfig:
use_sliding_window = (model_config.get_sliding_window()
is not None)
use_spec_decode = self.speculative_model is not None
has_seqlen_agnostic_layers = (
model_config.contains_seqlen_agnostic_layers(
parallel_config))
if (is_gpu and not use_sliding_window and not use_spec_decode
and not self.enable_lora
and not self.enable_prompt_adapter
and not self.enable_prefix_caching):
and not self.enable_prefix_caching
and not has_seqlen_agnostic_layers):
self.enable_chunked_prefill = True
logger.warning(
"Chunked prefill is enabled by default for models with "
Expand Down
5 changes: 5 additions & 0 deletions vllm/model_executor/models/jamba.py
Original file line number Diff line number Diff line change
Expand Up @@ -644,6 +644,11 @@ def __init__(
lora_config: Optional[LoRAConfig] = None,
scheduler_config: Optional[SchedulerConfig] = None,
) -> None:
assert not scheduler_config.chunked_prefill_enabled, \
"Jamba currently does not support chunked prefill"
assert not cache_config.enable_prefix_caching, \
"Jamba currently does not support prefix caching"

super().__init__()
self.config = config
self.scheduler_config = scheduler_config
Expand Down

0 comments on commit ed94e4f

Please sign in to comment.