From 9cd608efb3f7a63af4590c3142c435fd6100a562 Mon Sep 17 00:00:00 2001 From: maang-h <55082429+maang-h@users.noreply.github.com> Date: Mon, 12 Aug 2024 04:20:16 +0800 Subject: [PATCH] docs: Standardize OpenAI Docs (#25280) - **Description:** Standardize OpenAI Docs - **Issue:** the issue #24803 --------- Co-authored-by: Chester Curme --- .../openai/langchain_openai/llms/base.py | 102 ++++++++++++++++-- 1 file changed, 93 insertions(+), 9 deletions(-) diff --git a/libs/partners/openai/langchain_openai/llms/base.py b/libs/partners/openai/langchain_openai/llms/base.py index e1d0938f01871..0584b52a751fa 100644 --- a/libs/partners/openai/langchain_openai/llms/base.py +++ b/libs/partners/openai/langchain_openai/llms/base.py @@ -605,21 +605,105 @@ def max_tokens_for_prompt(self, prompt: str) -> int: class OpenAI(BaseOpenAI): - """OpenAI large language models. + """OpenAI completion model integration. + + Setup: + Install ``langchain-openai`` and set environment variable ``OPENAI_API_KEY``. + + .. code-block:: bash + + pip install -U langchain-openai + export OPENAI_API_KEY="your-api-key" + + Key init args — completion params: + model: str + Name of OpenAI model to use. + temperature: float + Sampling temperature. + max_tokens: Optional[int] + Max number of tokens to generate. + logprobs: Optional[bool] + Whether to return logprobs. + stream_options: Dict + Configure streaming outputs, like whether to return token usage when + streaming (``{"include_usage": True}``). + + Key init args — client params: + timeout: Union[float, Tuple[float, float], Any, None] + Timeout for requests. + max_retries: int + Max number of retries. + api_key: Optional[str] + OpenAI API key. If not passed in will be read from env var OPENAI_API_KEY. + base_url: Optional[str] + Base URL for API requests. Only specify if using a proxy or service + emulator. + organization: Optional[str] + OpenAI organization ID. If not passed in will be read from env + var OPENAI_ORG_ID. + + See full list of supported init args and their descriptions in the params section. + + Instantiate: + .. code-block:: python - To use, you should have the environment variable ``OPENAI_API_KEY`` - set with your API key, or pass it as a named parameter to the constructor. + from langchain_openai import OpenAI - Any parameters that are valid to be passed to the openai.create call can be passed - in, even if not explicitly saved on this class. + llm = OpenAI( + model="gpt-3.5-turbo-instruct", + temperature=0, + max_retries=2, + # api_key="...", + # base_url="...", + # organization="...", + # other params... + ) - Example: + Invoke: .. code-block:: python - from langchain_openai import OpenAI + input_text = "The meaning of life is " + llm.invoke(input_text) - model = OpenAI(model_name="gpt-3.5-turbo-instruct") - """ + .. code-block:: none + + "a philosophical question that has been debated by thinkers and scholars for centuries." + + Stream: + .. code-block:: python + + for chunk in llm.stream(input_text): + print(chunk, end="|") + + .. code-block:: none + + a| philosophical| question| that| has| been| debated| by| thinkers| and| scholars| for| centuries|. + + .. code-block:: python + + "".join(llm.stream(input_text)) + + .. code-block:: none + + "a philosophical question that has been debated by thinkers and scholars for centuries." + + Async: + .. code-block:: python + + await llm.ainvoke(input_text) + + # stream: + # async for chunk in (await llm.astream(input_text)): + # print(chunk) + + # batch: + # await llm.abatch([input_text]) + + .. code-block:: none + + "a philosophical question that has been debated by thinkers and scholars for centuries." + + """ # noqa: E501 @classmethod def get_lc_namespace(cls) -> List[str]: