diff --git a/botorch/acquisition/knowledge_gradient.py b/botorch/acquisition/knowledge_gradient.py index 100a7d54be..333648854a 100644 --- a/botorch/acquisition/knowledge_gradient.py +++ b/botorch/acquisition/knowledge_gradient.py @@ -27,7 +27,7 @@ from __future__ import annotations from copy import deepcopy -from typing import Callable, Optional, Tuple, Union +from typing import Any, Callable, Optional, Tuple, Union import torch from botorch import settings @@ -71,6 +71,7 @@ def __init__( inner_sampler: Optional[MCSampler] = None, X_pending: Optional[Tensor] = None, current_value: Optional[Tensor] = None, + **kwargs: Any, ) -> None: r"""q-Knowledge Gradient (one-shot optimization). @@ -227,6 +228,7 @@ def __init__( cost_aware_utility: Optional[CostAwareUtility] = None, project: Callable[[Tensor], Tensor] = lambda X: X, expand: Callable[[Tensor], Tensor] = lambda X: X, + **kwargs: Any, ) -> None: r"""Multi-Fidelity q-Knowledge Gradient (one-shot optimization). diff --git a/botorch/acquisition/monte_carlo.py b/botorch/acquisition/monte_carlo.py index d9eb79b704..68c33c43b5 100644 --- a/botorch/acquisition/monte_carlo.py +++ b/botorch/acquisition/monte_carlo.py @@ -22,7 +22,7 @@ import math from abc import ABC, abstractmethod -from typing import Optional, Union +from typing import Any, Optional, Union import torch from botorch.acquisition.acquisition import AcquisitionFunction @@ -116,6 +116,7 @@ def __init__( sampler: Optional[MCSampler] = None, objective: Optional[MCAcquisitionObjective] = None, X_pending: Optional[Tensor] = None, + **kwargs: Any, ) -> None: r"""q-Expected Improvement. @@ -188,6 +189,7 @@ def __init__( objective: Optional[MCAcquisitionObjective] = None, X_pending: Optional[Tensor] = None, prune_baseline: bool = False, + **kwargs: Any, ) -> None: r"""q-Noisy Expected Improvement.