Skip to content

Commit

Permalink
Merge pull request #257 from Mirascope/fix-256
Browse files Browse the repository at this point in the history
fix: unnecessary prevention of tool calls for finish reason
  • Loading branch information
willbakst committed May 18, 2024
2 parents b74ed17 + a3bb545 commit e6b4fbe
Show file tree
Hide file tree
Showing 8 changed files with 78 additions and 49 deletions.
2 changes: 1 addition & 1 deletion mirascope/cohere/types.py
Original file line number Diff line number Diff line change
Expand Up @@ -163,7 +163,7 @@ def tools(self) -> Optional[list[CohereTool]]:
if self.response.finish_reason == "MAX_TOKENS":
raise RuntimeError(
"Generation stopped with MAX_TOKENS finish reason. This means that the "
"response hit the token limit before completion, "
"response hit the token limit before completion."
)

extracted_tools = []
Expand Down
37 changes: 21 additions & 16 deletions mirascope/groq/types.py
Original file line number Diff line number Diff line change
Expand Up @@ -139,22 +139,15 @@ def tools(self) -> Optional[list[GroqTool]]:
if not self.tool_types:
return None

if self.response_format != ResponseFormat(type="json_object"):
if not self.tool_calls:
return None

if self.choices[0].finish_reason not in ["tool_calls", "function_call"]:
raise RuntimeError(
"Finish reason was not `tool_calls` or `function_call`, indicating "
"no or failed tool use. This is likely due to a limit on output "
"tokens that is too low. Note that this could also indicate no "
"tool is beind called, so we recommend that you check the output "
"of the call to confirm. "
f"Finish Reason: {self.choices[0].finish_reason}"
)
else:
# Note: we only handle single tool calls in JSON mode.
tool_type = self.tool_types[0]
if self.choice.finish_reason == "length":
raise RuntimeError(
"Finish reason was `length`, indicating the model ran out of tokens "
"(and likely could not complete the tool call if trying to)"
)

def reconstruct_tools_from_content() -> list[GroqTool]:
# Note: we only handle single tool calls in this case
tool_type = self.tool_types[0] # type: ignore
return [
tool_type.from_tool_call(
ChoiceMessageToolCall(
Expand All @@ -167,6 +160,18 @@ def tools(self) -> Optional[list[GroqTool]]:
)
]

if self.response_format == ResponseFormat(type="json_object"):
return reconstruct_tools_from_content()

if not self.tool_calls:
# Let's see if we got an assistant message back instead and try to
# reconstruct a tool call in this case. We'll assume if it starts with
# an open curly bracket that we got a tool call assistant message.
if "{" == self.content[0]:
# Note: we only handle single tool calls in JSON mode.
return reconstruct_tools_from_content()
return None

extracted_tools = []
for tool_call in self.tool_calls:
for tool_type in self.tool_types:
Expand Down
10 changes: 4 additions & 6 deletions mirascope/mistral/types.py
Original file line number Diff line number Diff line change
Expand Up @@ -105,13 +105,11 @@ def tools(self) -> Optional[list[MistralTool]]:
if not self.tool_types or not self.tool_calls or len(self.tool_calls) == 0:
return None

if self.choices[0].finish_reason != "tool_calls":
if self.choice.finish_reason in ["length", "error"]:
raise RuntimeError(
"Finish reason was not `tool_call`, indicating no or failed tool use."
"This is likely due to a limit on output tokens that is too low. "
"Note that this could also indicate no tool is beind called, so we "
"recommend that you check the output of the call to confirm. "
f"Finish Reason: {self.choices[0].finish_reason}"
f"Finish reason was {self.choice.finish_reason}, indicating the model "
"ran out of token or failed (and could not complete the tool call if "
"trying to)."
)

extracted_tools = []
Expand Down
36 changes: 16 additions & 20 deletions mirascope/openai/types.py
Original file line number Diff line number Diff line change
Expand Up @@ -145,6 +145,12 @@ def tools(self) -> Optional[list[OpenAITool]]:
if not self.tool_types:
return None

if self.choice.finish_reason == "length":
raise RuntimeError(
"Finish reason was `length`, indicating the model ran out of tokens "
"(and could not complete the tool call if trying to)"
)

def reconstruct_tools_from_content() -> list[OpenAITool]:
# Note: we only handle single tool calls in this case
tool_type = self.tool_types[0] # type: ignore
Expand All @@ -160,28 +166,18 @@ def reconstruct_tools_from_content() -> list[OpenAITool]:
)
]

if self.response_format != ResponseFormat(type="json_object"):
if not self.tool_calls:
# Let's see if we got an assistant message back instead and try to
# reconstruct a tool call in this case. We'll assume if it starts with
# an open curly bracket that we got a tool call assistant message.
if "{" == self.content[0]:
# Note: we only handle single tool calls in JSON mode.
return reconstruct_tools_from_content()
return None

if self.choices[0].finish_reason not in ["tool_calls", "function_call"]:
raise RuntimeError(
"Finish reason was not `tool_calls` or `function_call`, indicating "
"no or failed tool use. This is likely due to a limit on output "
"tokens that is too low. Note that this could also indicate no "
"tool is beind called, so we recommend that you check the output "
"of the call to confirm. "
f"Finish Reason: {self.choices[0].finish_reason}"
)
else:
if self.response_format == ResponseFormat(type="json_object"):
return reconstruct_tools_from_content()

if not self.tool_calls:
# Let's see if we got an assistant message back instead and try to
# reconstruct a tool call in this case. We'll assume if it starts with
# an open curly bracket that we got a tool call assistant message.
if "{" == self.content[0]:
# Note: we only handle single tool calls in JSON mode.
return reconstruct_tools_from_content()
return None

extracted_tools = []
for tool_call in self.tool_calls:
for tool_type in self.tool_types:
Expand Down
20 changes: 17 additions & 3 deletions tests/groq/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -111,11 +111,12 @@ def fixture_expected_book_tool_instance() -> BookTool:
title="The Name of the Wind",
author="Patrick Rothfuss",
tool_call=ChoiceMessageToolCall(
id="null",
id="id",
function=ChoiceMessageToolCallFunction(
name="BookTool",
arguments='{"title": "The Name of the Wind","author": "Patrick Rothfuss"}',
),
type="function",
),
)

Expand All @@ -136,11 +137,12 @@ def fixture_chat_completion_response_with_tools() -> ChatCompletion:
content="",
tool_calls=[
ChoiceMessageToolCall(
id="null",
id="id",
function=ChoiceMessageToolCallFunction(
name="BookTool",
arguments='{"title": "The Name of the Wind","author": "Patrick Rothfuss"}',
),
type="function",
)
],
),
Expand All @@ -152,12 +154,24 @@ def fixture_chat_completion_response_with_tools() -> ChatCompletion:
)


@pytest.fixture()
def fixture_chat_completion_response_with_assistant_message_tool(
fixture_chat_completion_response: ChatCompletion,
) -> ChatCompletion:
"""Returns a `ChatCompletion` with an assistant message tool call (just in case)."""
fixture_chat_completion_copy = fixture_chat_completion_response.model_copy()
fixture_chat_completion_copy.choices[
0
].message.content = '{"title": "The Name of the Wind","author": "Patrick Rothfuss"}'
return fixture_chat_completion_copy


@pytest.fixture()
def fixture_chat_completion_response_with_tools_bad_stop_sequence(
fixture_chat_completion_response_with_tools: ChatCompletion,
) -> ChatCompletion:
"""Returns a `ChatCompletion` with tools."""
fixture_chat_completion_response_with_tools.choices[0].finish_reason = "stop" # type: ignore
fixture_chat_completion_response_with_tools.choices[0].finish_reason = "length" # type: ignore
return fixture_chat_completion_response_with_tools


Expand Down
18 changes: 17 additions & 1 deletion tests/groq/test_types.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ def test_groq_call_response(
assert response.output_tokens is not None


def test_groq_call_response_no_usageq(
def test_groq_call_response_no_usage(
fixture_chat_completion_response_no_usage: ChatCompletion,
) -> None:
"""Tests the `GroqCallResponse` class."""
Expand Down Expand Up @@ -80,6 +80,22 @@ def test_groq_call_response_with_tools(
assert tools[0] == expected_tool


def test_groq_call_response_with_assistant_message_tool(
fixture_chat_completion_response_with_assistant_message_tool: ChatCompletion,
fixture_book_tool: Type[GroqTool],
fixture_expected_book_tool_instance: GroqTool,
):
"""Tests that `OpenAICallResponse` returns a tool when it's an assistant message."""
response = GroqCallResponse(
response=fixture_chat_completion_response_with_assistant_message_tool,
tool_types=[fixture_book_tool],
start_time=0,
end_time=0,
)
assert response.tools is not None
assert response.tools[0].tool_call == fixture_expected_book_tool_instance.tool_call


def test_groq_call_response_with_tools_bad_stop_sequence(
fixture_chat_completion_response_with_tools_bad_stop_sequence: ChatCompletion,
fixture_book_tool: Type[GroqTool],
Expand Down
2 changes: 1 addition & 1 deletion tests/mistral/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -113,7 +113,7 @@ def fixture_chat_completion_response_with_tools_bad_stop_sequence(
fixture_chat_completion_response_with_tools: ChatCompletionResponse,
) -> ChatCompletionResponse:
"""Returns a `ChatCompletionResponse` with tools."""
fixture_chat_completion_response_with_tools.choices[0].finish_reason = "stop" # type: ignore
fixture_chat_completion_response_with_tools.choices[0].finish_reason = "length" # type: ignore
return fixture_chat_completion_response_with_tools


Expand Down
2 changes: 1 addition & 1 deletion tests/openai/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -107,7 +107,7 @@ def fixture_chat_completion_with_tools_bad_stop_sequence(
fixture_chat_completion_with_tools: ChatCompletion,
) -> ChatCompletion:
"""Returns a chat completion with tool calls but a bad stop sequence."""
fixture_chat_completion_with_tools.choices[0].finish_reason = "stop"
fixture_chat_completion_with_tools.choices[0].finish_reason = "length"
return fixture_chat_completion_with_tools


Expand Down

0 comments on commit e6b4fbe

Please sign in to comment.