Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 3 additions & 2 deletions src/google/adk/agents/run_config.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,6 @@

logger = logging.getLogger('google_adk.' + __name__)


class StreamingMode(Enum):
"""Streaming modes for agent execution.

Expand Down Expand Up @@ -160,7 +159,6 @@ class StreamingMode(Enum):
For bidirectional streaming, use runner.run_live() instead of run_async().
"""


class RunConfig(BaseModel):
"""Configs for runtime behavior of agents.

Expand All @@ -175,6 +173,9 @@ class RunConfig(BaseModel):
speech_config: Optional[types.SpeechConfig] = None
"""Speech configuration for the live agent."""

http_options: Optional[types.HttpOptions] = None
"""HTTP options for the agent execution (e.g. custom headers)."""

response_modalities: Optional[list[str]] = None
"""The output modalities. If not set, it's default to AUDIO."""

Expand Down
17 changes: 16 additions & 1 deletion src/google/adk/flows/llm_flows/base_llm_flow.py
Original file line number Diff line number Diff line change
Expand Up @@ -67,7 +67,6 @@
# Statistics configuration
DEFAULT_ENABLE_CACHE_STATISTICS = False


class BaseLlmFlow(ABC):
"""A basic flow that calls the LLM in a loop until a final response is generated.

Expand Down Expand Up @@ -483,6 +482,22 @@ async def _preprocess_async(
f'Expected agent to be an LlmAgent, but got {type(agent)}'
)

# Propagate http_options from RunConfig to LlmRequest as defaults.
# Request-level settings (from callbacks/processors) take precedence.
if (
invocation_context.run_config
and invocation_context.run_config.http_options
):
run_opts = invocation_context.run_config.http_options
if not llm_request.config.http_options:
# Deep-copy to avoid mutating the user's RunConfig across steps.
llm_request.config.http_options = run_opts.model_copy(deep=True)
elif run_opts.headers:
# Merge headers: request-level headers win (use setdefault).
if not llm_request.config.http_options.headers:
llm_request.config.http_options.headers = {}
for key, value in run_opts.headers.items():
llm_request.config.http_options.headers.setdefault(key, value)
Comment on lines +492 to +500
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

medium

The elif block starting on line 495 appears to be unreachable. llm_request is newly instantiated in _run_one_step_async, so llm_request.config.http_options will be None. This means the if condition on line 492 will always be true, and the elif block will never be executed. This makes the code confusing.

You can simplify this logic to just assign the http_options from run_config.

      # Since LlmRequest is newly created, llm_request.config.http_options is always None here.
      # We can directly assign it after a deep copy to avoid mutating the user's RunConfig.
      llm_request.config.http_options = run_opts.model_copy(deep=True)

# Runs processors.
for processor in self.request_processors:
async with Aclosing(
Expand Down
34 changes: 31 additions & 3 deletions src/google/adk/flows/llm_flows/basic.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,6 @@
from __future__ import annotations

from typing import AsyncGenerator
from typing import Generator

from google.genai import types
from typing_extensions import override
Expand All @@ -28,7 +27,6 @@
from ...utils.output_schema_utils import can_use_output_schema_with_tools
from ._base_llm_processor import BaseLlmRequestProcessor


class _BasicLlmRequestProcessor(BaseLlmRequestProcessor):

@override
Expand All @@ -38,11 +36,42 @@ async def run_async(
agent = invocation_context.agent
model = agent.canonical_model
llm_request.model = model if isinstance(model, str) else model.model

# Preserve http_options propagated from RunConfig
run_config_http_options = llm_request.config.http_options

llm_request.config = (
agent.generate_content_config.model_copy(deep=True)
if agent.generate_content_config
else types.GenerateContentConfig()
)

if run_config_http_options:
# Merge RunConfig http_options back, overriding agent config
if not llm_request.config.http_options:
llm_request.config.http_options = run_config_http_options
else:
# Merge headers
if run_config_http_options.headers:
if not llm_request.config.http_options.headers:
llm_request.config.http_options.headers = {}
llm_request.config.http_options.headers.update(
run_config_http_options.headers
)

# Merge other http_options fields if present in RunConfig.
# RunConfig values override agent defaults.
# Note: base_url, api_version, base_url_resource_scope are intentionally
# excluded as they are configuration-time settings, not request-time.
for field in [
'timeout',
'retry_options',
'extra_body',
]:
val = getattr(run_config_http_options, field, None)
if val is not None:
setattr(llm_request.config.http_options, field, val)

# Only set output_schema if no tools are specified. as of now, model don't
# support output_schema and tools together. we have a workaround to support
# both output_schema and tools at the same time. see
Expand Down Expand Up @@ -84,5 +113,4 @@ async def run_async(
return
yield # Generator requires yield statement in function body.


request_processor = _BasicLlmRequestProcessor()
Loading