Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion src/strands/models/litellm.py
Original file line number Diff line number Diff line change
Expand Up @@ -194,7 +194,7 @@ def format_request_messages(
formatted_messages = cls._format_system_messages(system_prompt, system_prompt_content=system_prompt_content)
formatted_messages.extend(cls._format_regular_messages(messages))

return [message for message in formatted_messages if message["content"] or "tool_calls" in message]
return [message for message in formatted_messages if "content" in message or "tool_calls" in message]

@override
def format_chunk(self, event: dict[str, Any], **kwargs: Any) -> StreamEvent:
Expand Down
4 changes: 2 additions & 2 deletions src/strands/models/openai.py
Original file line number Diff line number Diff line change
Expand Up @@ -360,7 +360,7 @@ def _format_regular_messages(cls, messages: Messages, **kwargs: Any) -> list[dic

formatted_message = {
"role": message["role"],
"content": formatted_contents,
**({"content": formatted_contents} if formatted_contents else {}),
**({"tool_calls": formatted_tool_calls} if formatted_tool_calls else {}),
}
formatted_messages.append(formatted_message)
Expand Down Expand Up @@ -398,7 +398,7 @@ def format_request_messages(
formatted_messages = cls._format_system_messages(system_prompt, system_prompt_content=system_prompt_content)
formatted_messages.extend(cls._format_regular_messages(messages))

return [message for message in formatted_messages if message["content"] or "tool_calls" in message]
return [message for message in formatted_messages if "content" in message or "tool_calls" in message]

def format_request(
self,
Expand Down
55 changes: 55 additions & 0 deletions tests/strands/models/test_litellm.py
Original file line number Diff line number Diff line change
Expand Up @@ -812,3 +812,58 @@ def __init__(self, usage):
assert metadata_events[0]["metadata"]["usage"]["inputTokens"] == 10
assert metadata_events[0]["metadata"]["usage"]["outputTokens"] == 5
assert metadata_events[0]["metadata"]["usage"]["totalTokens"] == 15


def test_format_request_messages_with_tool_calls_no_content():
"""Test that messages with tool calls but no content are properly formatted."""
messages = [
{"role": "user", "content": [{"text": "Use the calculator"}]},
{
"role": "assistant",
"content": [
{
"toolUse": {
"input": {"expression": "2+2"},
"name": "calculator",
"toolUseId": "c1",
},
},
],
},
]

result = LiteLLMModel.format_request_messages(messages)

# Assistant message should have tool_calls but no content field
assert len(result) == 2
assert result[1]["role"] == "assistant"
assert "tool_calls" in result[1]
assert "content" not in result[1]
assert result[1]["tool_calls"][0]["id"] == "c1"


def test_format_request_messages_filters_tool_only_messages():
"""Test that messages with only tool calls (no content) are included in output."""
messages = [
{"role": "user", "content": [{"text": "test"}]},
{
"role": "assistant",
"content": [
{
"toolUse": {
"input": {},
"name": "tool1",
"toolUseId": "t1",
},
},
],
},
]

result = LiteLLMModel.format_request_messages(messages)

# Both messages should be included
assert len(result) == 2
assert result[0]["role"] == "user"
assert result[1]["role"] == "assistant"
assert "tool_calls" in result[1]
55 changes: 55 additions & 0 deletions tests/strands/models/test_openai.py
Original file line number Diff line number Diff line change
Expand Up @@ -1311,3 +1311,58 @@ def test_format_request_filters_location_source_document(model, caplog):
assert len(formatted_content) == 1
assert formatted_content[0]["type"] == "text"
assert "Location sources are not supported by OpenAI" in caplog.text


def test_format_request_messages_with_tool_calls_no_content():
"""Test that messages with tool calls but no content are properly formatted."""
messages = [
{"role": "user", "content": [{"text": "Use the calculator"}]},
{
"role": "assistant",
"content": [
{
"toolUse": {
"input": {"expression": "2+2"},
"name": "calculator",
"toolUseId": "c1",
},
},
],
},
]

result = OpenAIModel.format_request_messages(messages)

# Assistant message should have tool_calls but no content field
assert len(result) == 2
assert result[1]["role"] == "assistant"
assert "tool_calls" in result[1]
assert "content" not in result[1]
assert result[1]["tool_calls"][0]["id"] == "c1"


def test_format_request_messages_filters_tool_only_messages():
"""Test that messages with only tool calls (no content) are included in output."""
messages = [
{"role": "user", "content": [{"text": "test"}]},
{
"role": "assistant",
"content": [
{
"toolUse": {
"input": {},
"name": "tool1",
"toolUseId": "t1",
},
},
],
},
]

result = OpenAIModel.format_request_messages(messages)

# Both messages should be included
assert len(result) == 2
assert result[0]["role"] == "user"
assert result[1]["role"] == "assistant"
assert "tool_calls" in result[1]
26 changes: 13 additions & 13 deletions tests_integ/models/test_model_openai.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@
import os
import unittest.mock

import pydantic
import pytest
Expand Down Expand Up @@ -205,21 +204,22 @@ def test_rate_limit_throttling_integration_no_retries(model):
the model properly raises a ModelThrottledException. We disable retries
to avoid waiting for the exponential backoff during testing.
"""
# Patch the event loop constants to disable retries for this test
with unittest.mock.patch("strands.event_loop.event_loop.MAX_ATTEMPTS", 1):
agent = Agent(model=model)
from strands.event_loop._retry import ModelRetryStrategy

# Create a message that's very long to trigger token-per-minute rate limits
# This should be large enough to exceed TPM limits immediately
very_long_text = "Really long text " * 20000
# Create agent with retry strategy that doesn't retry
agent = Agent(model=model, retry_strategy=ModelRetryStrategy(max_attempts=1))

# This should raise ModelThrottledException without retries
with pytest.raises(ModelThrottledException) as exc_info:
agent(very_long_text)
# Create a message that's very long to trigger token-per-minute rate limits
# This should be large enough to exceed TPM limits immediately
very_long_text = "Really long text " * 20000

# Verify it's a rate limit error
error_message = str(exc_info.value).lower()
assert "rate limit" in error_message or "tokens per min" in error_message
# This should raise ModelThrottledException without retries
with pytest.raises(ModelThrottledException) as exc_info:
agent(very_long_text)

# Verify it's a rate limit error
error_message = str(exc_info.value).lower()
assert "rate limit" in error_message or "tokens per min" in error_message


def test_content_blocks_handling(model):
Expand Down
1 change: 1 addition & 0 deletions tests_integ/test_multiagent_swarm.py
Original file line number Diff line number Diff line change
Expand Up @@ -113,6 +113,7 @@ def capture_first_node(self, event):
return VerifyHook()


@pytest.mark.timeout(120)
def test_swarm_execution_with_string(researcher_agent, analyst_agent, writer_agent, hook_provider):
"""Test swarm execution with string input."""
# Create the swarm
Expand Down
Loading