diff --git a/google/generativeai/types/generation_types.py b/google/generativeai/types/generation_types.py index 8602d69a4..45ca6b21b 100644 --- a/google/generativeai/types/generation_types.py +++ b/google/generativeai/types/generation_types.py @@ -85,6 +85,8 @@ class GenerationConfigDict(TypedDict, total=False): temperature: float response_mime_type: str response_schema: protos.Schema | Mapping[str, Any] # fmt: off + presence_penalty: float + frequency_penalty: float @dataclasses.dataclass @@ -144,8 +146,6 @@ class GenerationConfig: Note: The default value varies by model, see the `Model.top_k` attribute of the `Model` returned the `genai.get_model` function. - seed: - Optional. Seed used in decoding. If not set, the request uses a randomly generated seed. response_mime_type: Optional. Output response mimetype of the generated candidate text. @@ -161,10 +161,6 @@ class GenerationConfig: Optional. frequency_penalty: Optional. - response_logprobs: - Optional. If true, export the `logprobs` results in response. - logprobs: - Optional. Number of candidates of log probabilities to return at each step of decoding. """ candidate_count: int | None = None @@ -173,13 +169,10 @@ class GenerationConfig: temperature: float | None = None top_p: float | None = None top_k: int | None = None - seed: int | None = None response_mime_type: str | None = None response_schema: protos.Schema | Mapping[str, Any] | type | None = None presence_penalty: float | None = None frequency_penalty: float | None = None - response_logprobs: bool | None = None - logprobs: int | None = None GenerationConfigType = Union[protos.GenerationConfig, GenerationConfigDict, GenerationConfig]