Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Migrating Langchain's openai-python usage to v1.0.0 #13855

2 changes: 1 addition & 1 deletion docs/docs/expression_language/how_to/fallbacks.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@
"source": [
"from unittest.mock import patch\n",
"\n",
"from openai.error import RateLimitError"
"from openai import RateLimitError"
]
},
{
Expand Down
2 changes: 1 addition & 1 deletion docs/docs/guides/fallbacks.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,7 @@
"source": [
"from unittest.mock import patch\n",
"\n",
"from openai.error import RateLimitError"
"from openai import RateLimitError"
]
},
{
Expand Down
11 changes: 7 additions & 4 deletions libs/langchain/langchain/chains/moderation.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,12 +47,15 @@ def validate_environment(cls, values: Dict) -> Dict:
default="",
)
try:
import openai
from openai import OpenAI

openai.api_key = openai_api_key
if openai_organization:
openai.organization = openai_organization
values["client"] = openai.Moderation
client = OpenAI(
organization=openai_organization, api_key=openai_api_key
)
else:
client = OpenAI(api_key=openai_api_key)
values["client"] = client
except ImportError:
raise ImportError(
"Could not import openai python package. "
Expand Down
20 changes: 10 additions & 10 deletions libs/langchain/langchain/chat_models/jinachat.py
Original file line number Diff line number Diff line change
Expand Up @@ -66,11 +66,11 @@ def _create_retry_decorator(llm: JinaChat) -> Callable[[Any], Any]:
stop=stop_after_attempt(llm.max_retries),
wait=wait_exponential(multiplier=1, min=min_seconds, max=max_seconds),
retry=(
retry_if_exception_type(openai.error.Timeout)
| retry_if_exception_type(openai.error.APIError)
| retry_if_exception_type(openai.error.APIConnectionError)
| retry_if_exception_type(openai.error.RateLimitError)
| retry_if_exception_type(openai.error.ServiceUnavailableError)
JoshuaConcon marked this conversation as resolved.
Show resolved Hide resolved
retry_if_exception_type(openai.Timeout)
| retry_if_exception_type(openai.APIError)
| retry_if_exception_type(openai.APIConnectionError)
| retry_if_exception_type(openai.RateLimitError)
| retry_if_exception_type(openai.APIStatusError)
),
before_sleep=before_sleep_log(logger, logging.WARNING),
)
Expand Down Expand Up @@ -262,11 +262,11 @@ def _create_retry_decorator(self) -> Callable[[Any], Any]:
stop=stop_after_attempt(self.max_retries),
wait=wait_exponential(multiplier=1, min=min_seconds, max=max_seconds),
retry=(
retry_if_exception_type(openai.error.Timeout)
| retry_if_exception_type(openai.error.APIError)
| retry_if_exception_type(openai.error.APIConnectionError)
| retry_if_exception_type(openai.error.RateLimitError)
| retry_if_exception_type(openai.error.ServiceUnavailableError)
retry_if_exception_type(openai.Timeout)
| retry_if_exception_type(openai.APIError)
| retry_if_exception_type(openai.APIConnectionError)
| retry_if_exception_type(openai.RateLimitError)
| retry_if_exception_type(openai.APIStatusError)
),
before_sleep=before_sleep_log(logger, logging.WARNING),
)
Expand Down
15 changes: 8 additions & 7 deletions libs/langchain/langchain/chat_models/openai.py
Original file line number Diff line number Diff line change
Expand Up @@ -80,11 +80,11 @@ def _create_retry_decorator(
import openai

errors = [
openai.error.Timeout,
openai.error.APIError,
openai.error.APIConnectionError,
openai.error.RateLimitError,
openai.error.ServiceUnavailableError,
openai.Timeout,
openai.APIError,
openai.APIConnectionError,
openai.RateLimitError,
openai.APIStatusError,
]
return create_base_retry_decorator(
error_types=errors, max_retries=llm.max_retries, run_manager=run_manager
Expand Down Expand Up @@ -536,9 +536,10 @@ def _client_params(self) -> Dict[str, Any]:
}
)
if self.openai_proxy:
import openai
from openai import OpenAI

openai.proxy = {"http": self.openai_proxy, "https": self.openai_proxy} # type: ignore[assignment] # noqa: E501
OpenAI(proxy={"http": self.openai_proxy, "https": self.openai_proxy})
# type: ignore[assignment] # noqa: E501
return {**self._default_params, **openai_creds}

def _get_invocation_params(
Expand Down
8 changes: 5 additions & 3 deletions libs/langchain/langchain/document_loaders/parsers/audio.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ def lazy_parse(self, blob: Blob) -> Iterator[Document]:
import io

try:
import openai
from openai import OpenAI
except ImportError:
raise ImportError(
"openai package not found, please install it with "
Expand All @@ -38,7 +38,7 @@ def lazy_parse(self, blob: Blob) -> Iterator[Document]:

# Set the API key if provided
if self.api_key:
openai.api_key = self.api_key
client = OpenAI(api_key=self.api_key)

# Audio file from disk
audio = AudioSegment.from_file(blob.path)
Expand All @@ -63,7 +63,9 @@ def lazy_parse(self, blob: Blob) -> Iterator[Document]:
attempts = 0
while attempts < 3:
try:
transcript = openai.Audio.transcribe("whisper-1", file_obj)
transcript = client.audio.transcriptions.create(
"whisper-1", file_obj
)
break
except Exception as e:
attempts += 1
Expand Down
36 changes: 19 additions & 17 deletions libs/langchain/langchain/embeddings/localai.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,11 +44,11 @@ def _create_retry_decorator(embeddings: LocalAIEmbeddings) -> Callable[[Any], An
stop=stop_after_attempt(embeddings.max_retries),
wait=wait_exponential(multiplier=1, min=min_seconds, max=max_seconds),
retry=(
retry_if_exception_type(openai.error.Timeout)
| retry_if_exception_type(openai.error.APIError)
| retry_if_exception_type(openai.error.APIConnectionError)
| retry_if_exception_type(openai.error.RateLimitError)
| retry_if_exception_type(openai.error.ServiceUnavailableError)
retry_if_exception_type(openai.Timeout)
| retry_if_exception_type(openai.APIError)
| retry_if_exception_type(openai.APIConnectionError)
| retry_if_exception_type(openai.RateLimitError)
| retry_if_exception_type(openai.APIStatusError)
),
before_sleep=before_sleep_log(logger, logging.WARNING),
)
Expand All @@ -66,11 +66,11 @@ def _async_retry_decorator(embeddings: LocalAIEmbeddings) -> Any:
stop=stop_after_attempt(embeddings.max_retries),
wait=wait_exponential(multiplier=1, min=min_seconds, max=max_seconds),
retry=(
retry_if_exception_type(openai.error.Timeout)
| retry_if_exception_type(openai.error.APIError)
| retry_if_exception_type(openai.error.APIConnectionError)
| retry_if_exception_type(openai.error.RateLimitError)
| retry_if_exception_type(openai.error.ServiceUnavailableError)
retry_if_exception_type(openai.Timeout)
| retry_if_exception_type(openai.APIError)
| retry_if_exception_type(openai.APIConnectionError)
| retry_if_exception_type(openai.RateLimitError)
| retry_if_exception_type(openai.APIStatusError)
),
before_sleep=before_sleep_log(logger, logging.WARNING),
)
Expand All @@ -91,7 +91,7 @@ def _check_response(response: dict) -> dict:
if any(len(d["embedding"]) == 1 for d in response["data"]):
import openai

raise openai.error.APIError("LocalAI API returned an empty embedding")
raise openai.APIError("LocalAI API returned an empty embedding")
return response


Expand Down Expand Up @@ -251,12 +251,14 @@ def _invocation_params(self) -> Dict:
**self.model_kwargs,
}
if self.openai_proxy:
import openai

openai.proxy = {
"http": self.openai_proxy,
"https": self.openai_proxy,
} # type: ignore[assignment] # noqa: E501
from openai import OpenAI

OpenAI(
proxy={
"http": self.openai_proxy,
"https": self.openai_proxy,
}
) # type: ignore[assignment] # noqa: E501
return openai_args

def _embedding_func(self, text: str, *, engine: str) -> List[float]:
Expand Down
29 changes: 13 additions & 16 deletions libs/langchain/langchain/embeddings/openai.py
Original file line number Diff line number Diff line change
Expand Up @@ -50,11 +50,11 @@ def _create_retry_decorator(embeddings: OpenAIEmbeddings) -> Callable[[Any], Any
stop=stop_after_attempt(embeddings.max_retries),
wait=wait_exponential(multiplier=1, min=min_seconds, max=max_seconds),
retry=(
retry_if_exception_type(openai.error.Timeout)
| retry_if_exception_type(openai.error.APIError)
| retry_if_exception_type(openai.error.APIConnectionError)
| retry_if_exception_type(openai.error.RateLimitError)
| retry_if_exception_type(openai.error.ServiceUnavailableError)
retry_if_exception_type(openai.Timeout)
| retry_if_exception_type(openai.APIError)
| retry_if_exception_type(openai.APIConnectionError)
| retry_if_exception_type(openai.RateLimitError)
| retry_if_exception_type(openai.APIStatusError)
),
before_sleep=before_sleep_log(logger, logging.WARNING),
)
Expand All @@ -72,11 +72,11 @@ def _async_retry_decorator(embeddings: OpenAIEmbeddings) -> Any:
stop=stop_after_attempt(embeddings.max_retries),
wait=wait_exponential(multiplier=1, min=min_seconds, max=max_seconds),
retry=(
retry_if_exception_type(openai.error.Timeout)
| retry_if_exception_type(openai.error.APIError)
| retry_if_exception_type(openai.error.APIConnectionError)
| retry_if_exception_type(openai.error.RateLimitError)
| retry_if_exception_type(openai.error.ServiceUnavailableError)
retry_if_exception_type(openai.Timeout)
| retry_if_exception_type(openai.APIError)
| retry_if_exception_type(openai.APIConnectionError)
| retry_if_exception_type(openai.RateLimitError)
| retry_if_exception_type(openai.APIStatusError)
),
before_sleep=before_sleep_log(logger, logging.WARNING),
)
Expand All @@ -97,7 +97,7 @@ def _check_response(response: dict, skip_empty: bool = False) -> dict:
if any(len(d["embedding"]) == 1 for d in response["data"]) and not skip_empty:
import openai

raise openai.error.APIError("OpenAI API returned an empty embedding")
raise openai.APIError("OpenAI API returned an empty embedding")
return response


Expand Down Expand Up @@ -364,17 +364,14 @@ def _invocation_params(self) -> Dict[str, Any]:
# TODO: Look into proxy with openai v1.
if self.openai_proxy:
try:
import openai
from openai import OpenAI
except ImportError:
raise ImportError(
"Could not import openai python package. "
"Please install it with `pip install openai`."
)

openai.proxy = {
"http": self.openai_proxy,
"https": self.openai_proxy,
} # type: ignore[assignment] # noqa: E501
OpenAI(proxy={"http": self.openai_proxy, "https": self.openai_proxy}) # type: ignore[assignment] # noqa: E501
return openai_args

# please refer to
Expand Down
11 changes: 7 additions & 4 deletions libs/langchain/langchain/llms/gooseai.py
Original file line number Diff line number Diff line change
Expand Up @@ -96,11 +96,14 @@ def validate_environment(cls, values: Dict) -> Dict:
)
values["gooseai_api_key"] = gooseai_api_key
try:
import openai
from openai import OpenAI

openai.api_key = gooseai_api_key.get_secret_value()
openai.api_base = "https://api.goose.ai/v1"
values["client"] = openai.Completion
client = OpenAI(
api_base="https://api.goose.ai/v1",
api_key=gooseai_api_key.get_secret_value(),
)

values["client"] = client
except ImportError:
raise ImportError(
"Could not import openai python package. "
Expand Down
30 changes: 17 additions & 13 deletions libs/langchain/langchain/llms/openai.py
Original file line number Diff line number Diff line change
Expand Up @@ -94,11 +94,11 @@ def _create_retry_decorator(
import openai

errors = [
openai.error.Timeout,
openai.error.APIError,
openai.error.APIConnectionError,
openai.error.RateLimitError,
openai.error.ServiceUnavailableError,
openai.Timeout,
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

this code is only reached if openai<1 is installed, so don't think we want to update

Copy link
Author

@JoshuaConcon JoshuaConcon Nov 27, 2023

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Thank you for reviewing this PR. Just to confirm. are you suggesting that we should not update to openai v1.0.0? This would mean that the error we are receiving that is documented in #11755 that is solved by updating to openai v1.0.0 (openai/openai-python#574) will still persist. Is there an alternative approach to solving this issue? @baskaryan

Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Hello @baskaryan , any updates? Just wanted to confirm that, as you mentioned, langchain is not looking to update to openai v1.0.0.

openai.APIError,
openai.APIConnectionError,
openai.RateLimitError,
openai.APIStatusError,
]
return create_base_retry_decorator(
error_types=errors, max_retries=llm.max_retries, run_manager=run_manager
Expand Down Expand Up @@ -597,9 +597,9 @@ def _invocation_params(self) -> Dict[str, Any]:
}
)
if self.openai_proxy:
import openai
from openai import OpenAI

openai.proxy = {"http": self.openai_proxy, "https": self.openai_proxy} # type: ignore[assignment] # noqa: E501
OpenAI(proxy={"http": self.openai_proxy, "https": self.openai_proxy}) # type: ignore[assignment] # noqa: E501
return {**openai_creds, **self._default_params}

@property
Expand Down Expand Up @@ -1018,22 +1018,26 @@ def validate_environment(cls, values: Dict) -> Dict:
values, "openai_organization", "OPENAI_ORGANIZATION", default=""
)
try:
import openai
from openai import OpenAI

params = {}

openai.api_key = openai_api_key
if openai_api_key:
params["api_key"] = openai_api_key
if openai_api_base:
openai.api_base = openai_api_base
params["api_base"] = openai_api_base
if openai_organization:
openai.organization = openai_organization
params["organization"] = openai_organization
if openai_proxy:
openai.proxy = {"http": openai_proxy, "https": openai_proxy} # type: ignore[assignment] # noqa: E501
params["proxy"] = {"http": openai_proxy, "https": openai_proxy}
client = OpenAI(**params)
except ImportError:
raise ImportError(
"Could not import openai python package. "
"Please install it with `pip install openai`."
)
try:
values["client"] = openai.ChatCompletion
values["client"] = client
except AttributeError:
raise ValueError(
"`openai` has no `ChatCompletion` attribute, this is likely "
Expand Down
24 changes: 16 additions & 8 deletions libs/langchain/tests/integration_tests/adapters/test_openai.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,9 +4,11 @@


def _test_no_stream(**kwargs: Any) -> None:
import openai
from openai import OpenAI

result = openai.ChatCompletion.create(**kwargs)
client = OpenAI()

result = client.chat.completions.create(**kwargs)
lc_result = lcopenai.ChatCompletion.create(**kwargs)
if isinstance(lc_result, dict):
if isinstance(result, dict):
Expand All @@ -17,10 +19,12 @@ def _test_no_stream(**kwargs: Any) -> None:


def _test_stream(**kwargs: Any) -> None:
import openai
from openai import OpenAI

client = OpenAI()

result = []
for c in openai.ChatCompletion.create(**kwargs):
for c in client.chat.completions.create(**kwargs):
result.append(c["choices"][0]["delta"].to_dict_recursive())

lc_result = []
Expand All @@ -30,9 +34,11 @@ def _test_stream(**kwargs: Any) -> None:


async def _test_async(**kwargs: Any) -> None:
import openai
from openai import AsyncOpenAI

result = await openai.ChatCompletion.acreate(**kwargs)
aclient = AsyncOpenAI()

result = await aclient.chat.completions.create(**kwargs)
lc_result = await lcopenai.ChatCompletion.acreate(**kwargs)
if isinstance(lc_result, dict):
if isinstance(result, dict):
Expand All @@ -43,10 +49,12 @@ async def _test_async(**kwargs: Any) -> None:


async def _test_astream(**kwargs: Any) -> None:
import openai
from openai import AsyncOpenAI

aclient = AsyncOpenAI()

result = []
async for c in await openai.ChatCompletion.acreate(**kwargs):
async for c in await aclient.chat.completions.create(**kwargs):
result.append(c["choices"][0]["delta"].to_dict_recursive())

lc_result = []
Expand Down
Loading
Loading