Skip to content

OpenAILLMClient

dbally.llm_client.openai_client.OpenAIClient

OpenAIClient(model_name: str = 'gpt-3.5-turbo', api_key: Optional[str] = None)

Bases: LLMClient

OpenAIClient is a class designed to interact with OpenAI's language model (LLM) endpoints, particularly for the GPT models.

PARAMETER DESCRIPTION
model_name

Name of the OpenAI's model to be used, default is "gpt-3.5-turbo".

TYPE: str DEFAULT: 'gpt-3.5-turbo'

api_key

OpenAI's API key. If None OPENAI_API_KEY environment variable will be used

TYPE: Optional[str] DEFAULT: None

Source code in src/dbally/llm_client/openai_client.py
def __init__(self, model_name: str = "gpt-3.5-turbo", api_key: Optional[str] = None) -> None:
    try:
        from openai import AsyncOpenAI  # pylint: disable=import-outside-toplevel
    except ImportError as exc:
        raise ImportError("You need to install openai package to use GPT models") from exc

    super().__init__(model_name)
    self._client = AsyncOpenAI(api_key=api_key)

model_name instance-attribute

model_name = model_name

text_generation async

text_generation(template: PromptTemplate, fmt: dict, *, event_tracker: Optional[EventTracker] = None, frequency_penalty: Optional[float] = 0.0, max_tokens: Optional[int] = 128, n: Optional[int] = 1, presence_penalty: Optional[float] = 0.0, seed: Optional[int] = None, stop: Optional[Union[str, List[str]]] = None, temperature: Optional[float] = 1.0, top_p: Optional[float] = 1.0) -> str

For a given a PromptType and format dict creates a prompt and returns the response from LLM.

RETURNS DESCRIPTION
str

Text response from LLM.

Source code in src/dbally/llm_client/base.py
async def text_generation(  # pylint: disable=R0913
    self,
    template: PromptTemplate,
    fmt: dict,
    *,
    event_tracker: Optional[EventTracker] = None,
    frequency_penalty: Optional[float] = 0.0,
    max_tokens: Optional[int] = 128,
    n: Optional[int] = 1,
    presence_penalty: Optional[float] = 0.0,
    seed: Optional[int] = None,
    stop: Optional[Union[str, List[str]]] = None,
    temperature: Optional[float] = 1.0,
    top_p: Optional[float] = 1.0,
) -> str:
    """
    For a given a PromptType and format dict creates a prompt and
    returns the response from LLM.

    Returns:
        Text response from LLM.
    """

    options = LLMOptions(
        frequency_penalty=frequency_penalty,
        max_tokens=max_tokens,
        n=n,
        presence_penalty=presence_penalty,
        seed=seed,
        stop=stop,
        temperature=temperature,
        top_p=top_p,
    )

    prompt = self._prompt_builder.build(template, fmt)

    event = LLMEvent(prompt=prompt, type=type(template).__name__)

    event_tracker = event_tracker or EventTracker()
    async with event_tracker.track_event(event) as span:
        event.response = await self.call(prompt, template.response_format, options, event)
        span(event)

    return event.response

call async

call(prompt: Union[str, ChatFormat], response_format: Optional[Dict[str, str]], options: LLMOptions, event: LLMEvent) -> str

Calls the OpenAI API endpoint.

PARAMETER DESCRIPTION
prompt

Prompt as an OpenAI client style list.

TYPE: Union[str, ChatFormat]

response_format

Optional argument used in the OpenAI API - used to force the json output

TYPE: Optional[Dict[str, str]]

options

Additional settings used by the LLM.

TYPE: LLMOptions

event

container with the prompt, LLM response and call metrics.

TYPE: LLMEvent

RETURNS DESCRIPTION
str

Response string from LLM.

Source code in src/dbally/llm_client/openai_client.py
async def call(
    self,
    prompt: Union[str, ChatFormat],
    response_format: Optional[Dict[str, str]],
    options: LLMOptions,
    event: LLMEvent,
) -> str:
    """
    Calls the OpenAI API endpoint.

    Args:
        prompt: Prompt as an OpenAI client style list.
        response_format: Optional argument used in the OpenAI API - used to force the json output
        options: Additional settings used by the LLM.
        event: container with the prompt, LLM response and call metrics.

    Returns:
        Response string from LLM.
    """

    # only "turbo" models support response_format argument
    # https://platform.openai.com/docs/api-reference/chat/create#chat-create-response_format
    if "turbo" not in self.model_name:
        response_format = None

    response = await self._client.chat.completions.create(
        messages=prompt, model=self.model_name, response_format=response_format, **options.dict()  # type: ignore
    )

    event.completion_tokens = response.usage.completion_tokens
    event.prompt_tokens = response.usage.prompt_tokens
    event.total_tokens = response.usage.total_tokens

    return response.choices[0].message.content  # type: ignore