from agno.agent import Agent from agno.db.sqlite import SqliteDb from agno.models.openai import OpenAIChat from agno.os import AgentOS from agno.tools.mcp import MCPTools
# Create the Agent agno_agent = Agent( name="Agno Agent", model=OpenAIChat( id="gpt-4o", base_url="https://api.zhizengzeng.com/v1"), # Add a database to the Agent db=SqliteDb(db_file="agno.db"), # Add the Agno MCP server to the Agent tools=[MCPTools(transport="streamable-http", url="https://docs.agno.com/mcp")], # Add the previous session history to the context add_history_to_context=True, markdown=True, )
# Create the AgentOS agent_os = AgentOS(agents=[agno_agent]) # Get the FastAPI app for the AgentOS app = agent_os.get_app()
from typing import Iterator from agno.agent import Agent, RunOutput, RunOutputEvent, RunEvent from agno.models.openai import OpenAIChat from agno.tools.hackernews import HackerNewsTools from agno.utils.pprint import pprint_run_response
from agno.agent import Agent, RunOutput from agno.models.openai import OpenAIChat from agno.tools.hackernews import HackerNewsTools from agno.utils.pprint import pprint_run_response
from typing import Iterator from agno.agent import Agent, RunOutputEvent, RunEvent from agno.models.openai import OpenAIChat from agno.tools.hackernews import HackerNewsTools
from agno.agent import Agent from agno.db.sqlite import SqliteDb from agno.models.openai import OpenAI from agno.tools.hackernews import HackerNewsTools
from agno.agent import Agent from agno.session import SessionSummaryManager from agno.models.openai import OpenAIChat from agno.db.sqlite import SqliteDb
from agno.agent import Agent from agno.models.openai import OpenAIChat from agno.tools.duckduckgo import DuckDuckGoTools from agno.db.sqlite import SqliteDb
from typing importList from rich.pretty import pprint from pydantic import BaseModel, Field from agno.agent import Agent from agno.models.openai import OpenAIChat
from typing importList from agno.agent import Agent from agno.models.openai import OpenAIChat from agno.tools.hackernews import HackerNewsTools from pydantic import BaseModel, Field
from typing importList from agno.agent import Agent from agno.models.anthropic import Claude from agno.tools.hackernews import HackerNewsTools from pydantic import BaseModel, Field from rich.pretty import pprint
classResearchTopic(BaseModel): topic: str sources_required: int = 5
hn_researcher_agent = Agent( model=Claude(id="claude-sonnet-4-0"), tools=[HackerNewsTools()], input_schema=ResearchTopic, output_schema=ResearchOutput, instructions="Research hackernews posts for a given topic", )
from agno.agent import Agent from agno.models.openai import OpenAIChat
agent = Agent( model=OpenAIChat(id="gpt-5-mini"), description="You are a famous short story writer asked to write for a magazine", instructions=["Always write 2 sentence stories."], markdown=True, debug_mode=True, # 设置为 True 以查看详细日志及系统消息内容 ) agent.print_response("Tell me a horror story.", stream=True)
该代码将生成以下系统消息:
1 2 3 4 5 6 7 8
You are a famous short story writer asked to write for a magazine <instructions> - Always write 2 sentence stories. </instructions> <additional_information> - Use markdown to format your answer </additional_information>
agent = Agent( name="Helpful Assistant", role="Assistant", description="You are a helpful assistant", instructions=["Help the user with their question"], additional_context=""" Here is an example of how to answer the user's question: Request: What is the capital of France? Response: The capital of France is Paris. """, expected_output="You should format your response with `Response: <response>`", markdown=True, add_datetime_to_context=True, add_location_to_context=True, add_name_to_context=True, add_session_summary_to_context=True, add_memories_to_context=True, add_session_state_to_context=True, )
You are a helpful assistant <your_role> Assistant </your_role>
<instructions> Help the user with their question </instructions>
<additional_information> Use markdown to format your answers. The current time is 2025-09-30 12:00:00. Your approximate location is: New York, NY, USA. Your name is: Helpful Assistant. </additional_information>
<expected_output> You should format your response with `Response: <response>` </expected_output>
Here is an example of how to answer the user's question: Request: What is the capital of France? Response: The capital of France is Paris.
You have access to memories from previous interactions with the user that you can use:
<memories_from_previous_interactions> - User really likes Digimon and Japan. - User really likes Japan. - User likes coffee. </memories_from_previous_interactions>
Note: this information is from previous interactions and may be updated in this conversation. You should always prefer information from this conversation over the past memories.
Here is a brief summary of your previous interactions:
<summary_of_previous_interactions> The user asked about information about Digimon and Japan. </summary_of_previous_interactions>
Note: this information is from previous interactions and may be outdated. You should ALWAYS prefer information from this conversation over the past summary.
agent = Agent( model=LangDB(id="llama3-1-70b-instruct-v1.0"), tools=[duckdb_tools], markdown=True, additional_context=dedent("""\ You have access to the following tables: - movies: contains information about movies from IMDB. """), ) agent.print_response("What is the average rating of movies?", stream=True)
from agno.agent import Agent from agno.tools.slack import SlackTools
slack_tools = SlackTools( instructions=["Use `send_message` to send a message to the user. If the user specifies a thread, use `send_message_thread` to send a message to the thread."], add_instructions=True, ) agent = Agent( tools=[slack_tools], )
<updating_user_memories> - You have access to the `update_user_memory` tool that you can use to add new memories, update existing memories, delete memories, or clear all memories. - If the user's message includes information that should be captured as a memory, use the `update_user_memory` tool to update your memory database. - Memories should include details that could personalize ongoing interactions with the user. - Use this tool to add new memories or update existing memories that you identify in the conversation. - Use this tool if the user asks to update their memory, delete a memory, or clear all memories. - If you use the `update_user_memory` tool, remember to pass on the response to the user. </updating_user_memories>
The knowledge base contains documents with these metadata filters: [filter1, filter2, filter3]. Always use filters when the user query indicates specific metadata.
Examples: 1. If the user asks about a specific person like "Jordan Mitchell", you MUST use the search_knowledge_base tool with the filters parameter set to {{'<valid key like user_id>': '<valid value based on the user query>'}}. 2. If the user asks about a specific document type like "contracts", you MUST use the search_knowledge_base tool with the filters parameter set to {{'document_type': 'contract'}}. 4. If the user asks about a specific location like "documents from New York", you MUST use the search_knowledge_base tool with the filters parameter set to {{'<valid key like location>': 'New York'}}.
General Guidelines: - Always analyze the user query to identify relevant metadata. - Use the most specific filter(s) possible to narrow down results. - If multiple filters are relevant, combine them in the filters parameter (e.g., {{'name': 'Jordan Mitchell', 'document_type': 'contract'}}). - Ensure the filter keys match the valid metadata filters: [filter1, filter2, filter3].
You can use the search_knowledge_base tool to search the knowledge base and get the most relevant documents. Make sure to pass the filters as [Dict[str: Any]] to the tool. FOLLOW THIS STRUCTURE STRICTLY.
from agno.agent import Agent agent = Agent(add_knowledge_to_context=True, add_dependencies_to_context=True) agent.print_response("What is the capital of France?", dependencies={"name": "John Doe"})
发送给模型的用户消息如下:
1 2 3 4 5 6 7 8 9 10 11
What is the capital of France?
Use the following references from the knowledge base if it helps: <references> - Reference 1 - Reference 2 </references>
from agno.agent.agent import Agent from agno.db.postgres import PostgresDb from agno.models.openai import OpenAIChat
db_url = "postgresql+psycopg://ai:ai@localhost:5532/ai" db = PostgresDb(db_url=db_url)
agent = Agent( model=OpenAIChat(id="gpt-5-mini"), db=db, session_id="chat_history", instructions="You are a helpful assistant that can answer questions about space and oceans.", add_history_to_context=True, num_history_runs=2, # 可选:限制添加到上下文中的历史轮数 )
agent.print_response("Where is the sea of tranquility?") agent.print_response("What was my first question?")
agent = Agent( model=OpenAIChat(id="gpt-5-mini"), tools=[get_weather_for_city], db=SqliteDb(db_file="tmp/agent.db"), add_history_to_context=True, max_tool_calls_from_history=3, # 仅保留最近 3 次工具调用 ) agent.print_response("What's the weather in Tokyo?") agent.print_response("What's the weather in Paris?") agent.print_response("What's the weather in London?") agent.print_response("What's the weather in Berlin?") agent.print_response("What's the weather in Mumbai?") agent.print_response("What's the weather in Miami?") agent.print_response("What's the weather in New York?") agent.print_response("What's the weather in above cities?")
from agno.agent import Agent from agno.models.message import Message from agno.models.openai.chat import OpenAIChat
# Few-shot 示例 support_examples = [ Message(role="user", content="I forgot my password and can't log in"), Message(role="assistant", content="""I'll help you reset your password right away... """), ... ]
agent = Agent( name="Customer Support Specialist", model=OpenAIChat(id="gpt-5-mini"), add_name_to_context=True, additional_input=support_examples, instructions=[ "You are an expert customer support specialist.", "Always be empathetic, professional, and solution-oriented.", "Provide clear, actionable steps to resolve customer issues.", "Follow the established patterns for consistent, high-quality support.", ], markdown=True, )
from agno.agent import Agent from agno.models.openai import OpenAIChat
agent = Agent( model=OpenAIChat(id="gpt-5-mini"), dependencies={"name": "John Doe"}, instructions="You are a story writer. The current user is {name}." )
agent.print_response("Write a 5 second short story about {name}")
import json from textwrap import dedent import httpx from agno.agent import Agent from agno.models.openai import OpenAIChat
defget_top_hackernews_stories() -> str: """获取并返回 HackerNews 上的热门新闻。 Args: num_stories: 要获取的热门新闻数量(默认:5) Returns: JSON 字符串,包含新闻的标题、链接、评分等信息。 """ # 获取热门新闻 stories = [ { k: v for k, v in httpx.get( f"https://hacker-news.firebaseio.com/v0/item/{id}.json" ) .json() .items() if k != "kids"# 排除评论部分 } foridin httpx.get( "https://hacker-news.firebaseio.com/v0/topstories.json" ).json()[:num_stories] ] return json.dumps(stories, indent=4)
agent = Agent( model=OpenAIChat(id="gpt-5-mini"), # 每个依赖项函数会在代理运行时自动求值 # 可以将其理解为 Agent 的“依赖注入” dependencies={"top_hackernews_stories": get_top_hackernews_stories}, # 也可以手动将依赖项添加到指令中 instructions=dedent("""\ You are an insightful tech trend observer! 📰 Here are the top stories on HackerNews: {top_hackernews_stories}\ """), markdown=True, )
# 示例使用 agent.print_response( "Summarize the top stories on HackerNews and identify any interesting trends.", stream=True, )
agent.print_response( "Get the user profile for the user with ID 123 and tell me about their experience level.", stream=True, ) # 也可以在调用 print_response 时传入依赖项 # agent.print_response( # "Get the user profile for the user with ID 123 and tell me about their experience level.", # dependencies={"user_profile": get_user_profile}, # stream=True, # )
这会将整个依赖项字典插入到用户消息中,位于 `` 标签之间。
新的用户消息看起来如下:
1 2 3 4 5 6 7
Get the user profile for the user with ID 123 and tell me about their experience level. <additional context> { "user_profile": "{\n \"name\": \"John Doe\",\n \"experience_level\": \"senior\"\n}" } </additional context>
from agno.agent import Agent from agno.db.sqlite import SqliteDb from agno.models.openai import OpenAIChat
# 定义一个工具,用于向购物清单添加物品 defadd_item(session_state, item: str) -> str: """添加物品到购物清单""" session_state["shopping_list"].append(item) returnf"The shopping list is now {session_state['shopping_list']}"
from textwrap import dedent from agno.agent import Agent from agno.db.sqlite import SqliteDb from agno.models.openai import OpenAIChat
# 定义管理购物清单的工具 defadd_item(session_state, item: str) -> str: """添加物品""" if item.lower() notin [i.lower() for i in session_state["shopping_list"]]: session_state["shopping_list"].append(item) returnf"Added '{item}' to the shopping list" else: returnf"'{item}' is already in the shopping list"
defremove_item(session_state, item: str) -> str: """删除物品""" for i, list_item inenumerate(session_state["shopping_list"]): if list_item.lower() == item.lower(): session_state["shopping_list"].pop(i) returnf"Removed '{list_item}' from the shopping list" returnf"'{item}' was not found in the shopping list"
deflist_items(session_state) -> str: """列出购物清单中的所有物品""" shopping_list = session_state["shopping_list"] ifnot shopping_list: return"The shopping list is empty." items_text = "\n".join([f"- {item}"for item in shopping_list]) returnf"Current shopping list:\n{items_text}"
# 创建一个带状态的购物清单管理 Agent agent = Agent( model=OpenAIChat(id="gpt-5-mini"), session_state={"shopping_list": []}, db=SqliteDb(db_file="tmp/example.db"), tools=[add_item, remove_item, list_items], instructions=dedent("""\ Your job is to manage a shopping list. The shopping list starts empty. You can add items, remove items by name, and list all items. Current shopping list: {shopping_list} """), markdown=True, )
# 示例使用 agent.print_response("Add milk, eggs, and bread to the shopping list", stream=True) print(f"Session state: {agent.get_session_state()}") agent.print_response("I got bread", stream=True) agent.print_response("I need apples and oranges", stream=True) agent.print_response("whats on my list?", stream=True) agent.print_response("Clear everything and start with bananas and yogurt", stream=True)
from agno.agent import Agent from agno.models.openai import OpenAIChat from agno.db.sqlite import SqliteDb
agent = Agent( db=SqliteDb(db_file="tmp/agents.db"), model=OpenAIChat(id="gpt-5-mini"), instructions="Users name is {user_name} and age is {age}", )
# 用户1 agent.print_response("What is my name?", session_id="user_1_session_1", user_id="user_1", session_state={"user_name": "John", "age": 30}) agent.print_response("How old am I?", session_id="user_1_session_1", user_id="user_1")
# 用户2 agent.print_response("What is my name?", session_id="user_2_session_1", user_id="user_2", session_state={"user_name": "Jane", "age": 25}) agent.print_response("How old am I?", session_id="user_2_session_1", user_id="user_2")
agent.print_response( "Can you tell me what's in your session_state?", session_state={"shopping_list": ["Potatoes"]}, stream=True, ) print(f"Stored session state: {agent.get_session_state()}")
agent.print_response( "Can you tell me what is in your session_state?", session_state={"secret_number": 43}, stream=True, ) print(f"Stored session state: {agent.get_session_state()}")
agent.print_response("What was my last question?") agent.print_response("What is the capital of France?") agent.print_response("What was my last question?") pprint(agent.get_messages_for_session())
首次运行时,智能体无法回答 “What was my last question?”, 但再次运行后,它就能回答正确。 因为我们为其设置了固定的 session_id,智能体会在每次运行时延续相同的会话上下文。
# 第一次交互 memory_agent.print_response( "My name is Ava and I like to ski.", user_id=user_id, stream=True, stream_events=True, ) print("关于 Ava 的记忆:") pprint(memory_agent.get_user_memories(user_id=user_id))
# 第二次交互 memory_agent.print_response( "I live in san francisco, where should i move within a 4 hour drive?", user_id=user_id, stream=True, stream_events=True, ) print("关于 Ava 的记忆:") pprint(memory_agent.get_user_memories(user_id=user_id))
import asyncio from agno.agent import Agent from agno.db.postgres.postgres import PostgresDb from agno.knowledge.embedder.openai import OpenAIEmbedder from agno.knowledge.knowledge import Knowledge from agno.vectordb.pgvector import PgVector
db = PostgresDb( db_url="postgresql+psycopg://ai:ai@localhost:5532/ai", knowledge_table="knowledge_contents", )
# image_agent.py from agno.agent import Agent from agno.media import Image from agno.models.openai import OpenAIChat from agno.tools.duckduckgo import DuckDuckGoTools
# audio_agent.py import requests from agno.agent import Agent from agno.media import Audio from agno.models.openai import OpenAIChat from agno.utils.audio import write_audio_to_file
from agno.agent import Agent from agno.models.openai import OpenAIChat from agno.exceptions import CheckTrigger, OutputCheckError from agno.run.agent import RunOutput
from agno.agent import Agent from agno.models.google import Gemini from agno.tools.duckduckgo import DuckDuckGoTools from agno.db.sqlite import SqliteDb from rich.pretty import pprint
for chunk in agent.run( "Write a very long story about a dragon who learns to code. " "Make it at least 2000 words with detailed descriptions and dialogue. " "Take your time and be very thorough.", stream=True, # 启用流式输出以便实时取消 ): if"run_id"notin run_id_container and chunk.run_id: run_id_container["run_id"] = chunk.run_id
if chunk.event == RunEvent.run_content: print(chunk.content, end="", flush=True) content_pieces.append(chunk.content) elif chunk.event == RunEvent.run_cancelled: print(f"\n🚫 Run was cancelled: {chunk.run_id}") run_id_container["result"] = { "status": "cancelled", "run_id": chunk.run_id, "cancelled": True, "content": "".join(content_pieces)[:200] + "..."if content_pieces else"No content before cancellation", } return elifhasattr(chunk, "status") and chunk.status == RunStatus.completed: final_response = chunk
# 正常结束 if final_response: run_id_container["result"] = { "status": final_response.status.value if final_response.status else"completed", "run_id": final_response.run_id, "cancelled": final_response.status == RunStatus.cancelled, "content": ("".join(content_pieces)[:200] + "...") if content_pieces else"No content", }
defcancel_after_delay(agent: Agent, run_id_container: dict, delay_seconds: int = 3): """ 在延迟一段时间后取消运行。 """ print(f"⏰ Will cancel run in {delay_seconds} seconds...") time.sleep(delay_seconds)
run_id = run_id_container.get("run_id") if run_id: print(f"🚫 Cancelling run: {run_id}") success = agent.cancel_run(run_id) if success: print(f"✅ Run {run_id} marked for cancellation") else: print(f"❌ Failed to cancel run {run_id} (may not exist or already completed)") else: print("⚠️ No run_id found to cancel")
result = run_id_container.get("result") if result: print(f"Status: {result['status']}") print(f"Run ID: {result['run_id']}") print(f"Was Cancelled: {result['cancelled']}") print(f"Content Preview: {result['content']}") if result["cancelled"]: print("\n✅ SUCCESS: Run was successfully cancelled!") else: print("\n⚠️ WARNING: Run completed before cancellation") else: print("❌ No result obtained - check if cancellation happened during streaming")