Documentation Index
Fetch the complete documentation index at: https://agno-v2-docs-align-with-readme.mintlify.app/llms.txt
Use this file to discover all available pages before exploring further.
"""
Cache Team Response
=============================
Demonstrates two-layer caching for team leader and member responses.
"""
from agno.agent import Agent
from agno.models.openai import OpenAIResponses
from agno.team import Team
# ---------------------------------------------------------------------------
# Create Members
# ---------------------------------------------------------------------------
researcher = Agent(
name="Researcher",
role="Research and gather information",
model=OpenAIResponses(id="gpt-5.2", cache_response=True),
)
writer = Agent(
name="Writer",
role="Write clear and engaging content",
model=OpenAIResponses(id="gpt-5.2", cache_response=True),
)
# ---------------------------------------------------------------------------
# Create Team
# ---------------------------------------------------------------------------
content_team = Team(
members=[researcher, writer],
model=OpenAIResponses(id="gpt-5.2", cache_response=True),
markdown=True,
debug_mode=True,
)
# ---------------------------------------------------------------------------
# Run Team
# ---------------------------------------------------------------------------
if __name__ == "__main__":
content_team.print_response(
"Write a very very very explanation of caching in software"
)
Run the Example
# Clone and setup repo
git clone https://github.com/agno-agi/agno.git
cd agno/cookbook/03_teams/01_quickstart/caching
# Create and activate virtual environment
./scripts/demo_setup.sh
source .venvs/demo/bin/activate
python cache_team_response.py