Skip to content

Commit

Permalink
Refactor logits processor handling in LlamaCppLLM
Browse files Browse the repository at this point in the history
- Replaced the `_set_logits_processor` method with direct assignment of `_logits_processor` using `_prepare_structured_output`.
- Simplified the logic for setting the logits processor in both the `load` and generation methods, enhancing code clarity and maintainability.
  • Loading branch information
davidberenstein1957 committed Jan 10, 2025
1 parent 995e4d4 commit 5960441
Showing 1 changed file with 6 additions and 16 deletions.
22 changes: 6 additions & 16 deletions src/distilabel/models/llms/llamacpp.py
Original file line number Diff line number Diff line change
Expand Up @@ -194,7 +194,9 @@ def load(self) -> None:
)

if self.structured_output:
self._set_logits_processor(self.structured_output)
self._logits_processor = self._prepare_structured_output(
self.structured_output
)

if self.use_magpie_template or self.magpie_pre_query_template:
if not self.tokenizer_id:
Expand All @@ -221,19 +223,6 @@ def load(self) -> None:
# out of the model name, which won't be available until the `Llama` instance is created.
super().load()

def _set_logits_processor(
self, structured_output: Optional[OutlinesStructuredOutputType] = None
) -> None:
from distilabel.steps.tasks.structured_outputs.outlines import (
outlines_below_0_1_0,
)

processor = self._prepare_structured_output(structured_output)
if outlines_below_0_1_0:
self._logits_processor = processor
else:
self._logits_processor = [processor]

@property
def model_name(self) -> str:
"""Returns the model name used for the LLM."""
Expand Down Expand Up @@ -352,8 +341,9 @@ def generate( # type: ignore
# after each generation, so subsequent calls yield nothing. This is a workaround
# until is fixed in the `llama_cpp` or `outlines` libraries.
if structured_output:
self._set_logits_processor(structured_output)

self._logits_processor = self._prepare_structured_output(
structured_output
)
if self.tokenizer_id is None:
completion = self._generate_chat_completion(
input,
Expand Down

0 comments on commit 5960441

Please sign in to comment.