⚠ This page is served via a proxy. Original site: https://github.com
This service does not collect credentials or authentication data.
Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
22 changes: 12 additions & 10 deletions src/strands/event_loop/streaming.py
Original file line number Diff line number Diff line change
Expand Up @@ -288,16 +288,8 @@ def handle_content_block_stop(state: dict[str, Any]) -> dict[str, Any]:
content.append({"toolUse": tool_use})
state["current_tool_use"] = {}

elif text:
Copy link
Member

@pgrayy pgrayy Jan 13, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This problem might actually be specific to Gemini and will need to be addressed in a similar manner as #947. Gemini does not mark the start and stop of the reasoning content blocks which is why it gets ignored. We saw that same problem with LiteLLM.

if citations_content:
citations_block: CitationsContentBlock = {"citations": citations_content, "content": [{"text": text}]}
content.append({"citationsContent": citations_block})
state["citationsContent"] = []
else:
content.append({"text": text})
state["text"] = ""

elif reasoning_text:
# Handle reasoning content - checked independently of text so both can be captured
if reasoning_text:
content_block: ContentBlock = {
"reasoningContent": {
"reasoningText": {
Expand All @@ -315,6 +307,16 @@ def handle_content_block_stop(state: dict[str, Any]) -> dict[str, Any]:
content.append({"reasoningContent": {"redactedContent": redacted_content}})
state["redactedContent"] = b""

# Handle text content - checked after reasoning so both can be captured in the same block
if text:
if citations_content:
citations_block: CitationsContentBlock = {"citations": citations_content, "content": [{"text": text}]}
content.append({"citationsContent": citations_block})
state["citationsContent"] = []
else:
content.append({"text": text})
state["text"] = ""

return state


Expand Down
167 changes: 167 additions & 0 deletions tests/strands/event_loop/test_streaming.py
Original file line number Diff line number Diff line change
Expand Up @@ -481,6 +481,90 @@ def test_handle_content_block_delta(event: ContentBlockDeltaEvent, event_type, s
"redactedContent": b"",
},
),
# Reasoning AND Text - both should be captured (Gemini thinking mode)
(
{
"content": [],
"current_tool_use": {},
"text": "2 + 2 = 4",
"reasoningText": "Let me calculate this simple math problem.",
"citationsContent": [],
"redactedContent": b"",
},
{
"content": [
{"reasoningContent": {"reasoningText": {"text": "Let me calculate this simple math problem."}}},
{"text": "2 + 2 = 4"},
],
"current_tool_use": {},
"text": "",
"reasoningText": "",
"citationsContent": [],
"redactedContent": b"",
},
),
# Reasoning AND Text with signature - both should be captured
(
{
"content": [],
"current_tool_use": {},
"text": "The answer is 4",
"reasoningText": "Thinking about the math",
"signature": "test-sig",
"citationsContent": [],
"redactedContent": b"",
},
{
"content": [
{
"reasoningContent": {
"reasoningText": {"text": "Thinking about the math", "signature": "test-sig"}
}
},
{"text": "The answer is 4"},
],
"current_tool_use": {},
"text": "",
"reasoningText": "",
"signature": "test-sig",
"citationsContent": [],
"redactedContent": b"",
},
),
# Reasoning AND Text with Citations - all should be captured
(
{
"content": [],
"current_tool_use": {},
"text": "According to the source",
"reasoningText": "I need to cite the source",
"citationsContent": [
{"location": {"documentChar": {"documentIndex": 0, "start": 0, "end": 10}}, "title": "Source"}
],
"redactedContent": b"",
},
{
"content": [
{"reasoningContent": {"reasoningText": {"text": "I need to cite the source"}}},
{
"citationsContent": {
"citations": [
{
"location": {"documentChar": {"documentIndex": 0, "start": 0, "end": 10}},
"title": "Source",
}
],
"content": [{"text": "According to the source"}],
}
},
],
"current_tool_use": {},
"text": "",
"reasoningText": "",
"citationsContent": [],
"redactedContent": b"",
},
),
],
)
def test_handle_content_block_stop(state, exp_updated_state):
Expand Down Expand Up @@ -982,6 +1066,89 @@ def _get_message_from_event(event: ModelStopReason) -> Message:
return cast(Message, event["stop"][1])


@pytest.mark.asyncio
async def test_process_stream_reasoning_and_text_same_block(agenerator, alist):
response = [
{"messageStart": {"role": "assistant"}},
{"contentBlockStart": {"start": {}}},
{
"contentBlockDelta": {
"delta": {"reasoningContent": {"text": "Let me calculate this..."}},
"contentBlockIndex": 0,
}
},
{
"contentBlockDelta": {
"delta": {"text": "2 + 2 = 4"},
"contentBlockIndex": 0,
}
},
{"contentBlockStop": {"contentBlockIndex": 0}},
{"messageStop": {"stopReason": "end_turn"}},
{
"metadata": {
"usage": {"inputTokens": 10, "outputTokens": 20, "totalTokens": 30},
"metrics": {"latencyMs": 100},
}
},
]

stream = strands.event_loop.streaming.process_stream(agenerator(response))

last_event = cast(ModelStopReason, (await alist(stream))[-1])

message = _get_message_from_event(last_event)

assert len(message["content"]) == 2
assert message["content"][0]["reasoningContent"]["reasoningText"]["text"] == "Let me calculate this..."
assert message["content"][1]["text"] == "2 + 2 = 4"


@pytest.mark.asyncio
async def test_process_stream_reasoning_and_text_same_block_with_signature(agenerator, alist):
response = [
{"messageStart": {"role": "assistant"}},
{"contentBlockStart": {"start": {}}},
{
"contentBlockDelta": {
"delta": {"reasoningContent": {"text": "Thinking about this..."}},
"contentBlockIndex": 0,
}
},
{
"contentBlockDelta": {
"delta": {"reasoningContent": {"signature": "test-signature"}},
"contentBlockIndex": 0,
}
},
{
"contentBlockDelta": {
"delta": {"text": "The answer is 42"},
"contentBlockIndex": 0,
}
},
{"contentBlockStop": {"contentBlockIndex": 0}},
{"messageStop": {"stopReason": "end_turn"}},
{
"metadata": {
"usage": {"inputTokens": 10, "outputTokens": 20, "totalTokens": 30},
"metrics": {"latencyMs": 100},
}
},
]

stream = strands.event_loop.streaming.process_stream(agenerator(response))

last_event = cast(ModelStopReason, (await alist(stream))[-1])

message = _get_message_from_event(last_event)

assert len(message["content"]) == 2
assert message["content"][0]["reasoningContent"]["reasoningText"]["text"] == "Thinking about this..."
assert message["content"][0]["reasoningContent"]["reasoningText"]["signature"] == "test-signature"
assert message["content"][1]["text"] == "The answer is 42"


@pytest.mark.asyncio
async def test_process_stream_with_no_signature(agenerator, alist):
response = [
Expand Down
28 changes: 28 additions & 0 deletions tests_integ/models/test_model_anthropic.py
Original file line number Diff line number Diff line change
Expand Up @@ -182,3 +182,31 @@ def test_input_and_max_tokens_exceed_context_limit():

with pytest.raises(ContextWindowOverflowException):
agent(messages)


def test_agent_invoke_reasoning():
"""Test reasoning content handling with thinking mode."""
reasoning_model = AnthropicModel(
client_args={
"api_key": os.getenv("ANTHROPIC_API_KEY"),
},
model_id="claude-3-7-sonnet-20250219-v1:0",
max_tokens=512,
additional_request_fields={
"thinking": {
"type": "enabled",
"budget_tokens": 2000,
}
},
)

agent = Agent(model=reasoning_model, load_tools_from_directory=False)
result = agent("Please reason about the equation 2+2.")

assert "reasoningContent" in result.message["content"][0]
assert result.message["content"][0]["reasoningContent"]["reasoningText"]["text"]

# Test multi-turn to validate we don't throw an exception
result2 = agent("What was my previous question about?")
# Just validate we get a response without throwing an exception
assert len(str(result2)) > 0
5 changes: 5 additions & 0 deletions tests_integ/models/test_model_bedrock.py
Original file line number Diff line number Diff line change
Expand Up @@ -274,6 +274,11 @@ def test_redacted_content_handling():
assert "redactedContent" in result.message["content"][0]["reasoningContent"]
assert isinstance(result.message["content"][0]["reasoningContent"]["redactedContent"], bytes)

# Test multi-turn to validate we don't throw an exception
result2 = agent("What was my previous question about?")
# Just validate we get a response without throwing an exception
assert len(str(result2)) > 0


def test_multi_prompt_system_content():
"""Test multi-prompt system content blocks."""
Expand Down
25 changes: 25 additions & 0 deletions tests_integ/models/test_model_gemini.py
Original file line number Diff line number Diff line change
Expand Up @@ -202,3 +202,28 @@ def test_agent_with_gemini_code_execution_tool(gemini_tool_model):

result_turn2 = agent("Summarize that into a single number")
assert "5117" in str(result_turn2)


def test_agent_invoke_reasoning():
"""Test reasoning content handling with thinking mode."""
reasoning_model = GeminiModel(
client_args={"api_key": os.getenv("GOOGLE_API_KEY")},
model_id="gemini-2.5-flash",
params={
"thinking_config": {
"thinking_budget": 1024,
"include_thoughts": True,
},
},
)

agent = Agent(model=reasoning_model, load_tools_from_directory=False)
result = agent("Please reason about the equation 2+2.")

assert "reasoningContent" in result.message["content"][0]
assert result.message["content"][0]["reasoningContent"]["reasoningText"]["text"]

# Test multi-turn to validate we don't throw an exception
result2 = agent("What was my previous question about?")
# Just validate we get a response without throwing an exception
assert len(str(result2)) > 0
5 changes: 5 additions & 0 deletions tests_integ/models/test_model_litellm.py
Original file line number Diff line number Diff line change
Expand Up @@ -137,6 +137,11 @@ def test_agent_invoke_reasoning(agent, model):
assert "reasoningContent" in result.message["content"][0]
assert result.message["content"][0]["reasoningContent"]["reasoningText"]["text"]

# Test multi-turn to validate we don't throw an exception
result2 = agent("What was my previous question about?")
# Just validate we get a response without throwing an exception
assert len(str(result2)) > 0


def test_structured_output(agent, weather):
tru_weather = agent.structured_output(type(weather), "The time is 12:00 and the weather is sunny")
Expand Down
Loading