Basic Usage

from observee_agents import chat_with_tools

result = chat_with_tools(
    message="Search for recent AI news",
    provider="anthropic",
    observee_api_key="obs_your_key_here"
)

print(result["content"])

Streaming Responses

import asyncio
from observee_agents import chat_with_tools_stream

async def stream_example():
    async for chunk in chat_with_tools_stream(
        message="What's the weather like today?",
        provider="anthropic",
        observee_api_key="obs_your_key_here"
    ):
        if chunk["type"] == "content":
            print(chunk["content"], end="", flush=True)
        elif chunk["type"] == "final_content":
            print(chunk["content"], end="", flush=True)
        elif chunk["type"] == "tool_result":
            print(f"\n🔧 [Tool: {chunk['tool_name']}]")

asyncio.run(stream_example())

Parameters

ParameterTypeDefaultDescription
messagestrrequiredYour message or query
providerstr"anthropic"LLM provider: "anthropic", "openai", "gemini"
modelstrNoneSpecific model (auto-detected if not provided)
observee_api_keystrNoneYour Observee API key
enable_filteringboolTrueWhether to filter tools
filter_typestr"bm25"Filter method: "bm25", "local_embedding", "cloud"
max_toolsint20Maximum tools to provide
temperaturefloat0.7LLM temperature
max_tokensint1000Maximum response tokens
session_idstrNoneSession ID for conversation history
system_promptstrNoneCustom system prompt

Response Formats

Standard Response

{
    "content": "AI response text",
    "tool_calls": [{"name": "tool_name", "input": {...}}],
    "tool_results": [{"tool": "tool_name", "result": "..."}],
    "filtered_tools_count": 5,
    "used_filtering": True
}

Streaming Chunk Types

# Content chunk
{
    "type": "content",
    "content": "streaming text content"
}

# Tool result chunk
{
    "type": "tool_result", 
    "tool_name": "tool_name",
    "result": "tool output"
}

# Error chunk
{
    "type": "error",
    "error": "error message"
}

# Session metadata chunk
{
    "type": "done",
    "session_id": "session_123",  # When using conversation history
    "final_response": {...}
}

Examples

Different Providers

# Anthropic Claude
result = chat_with_tools(
    message="Analyze this data",
    provider="anthropic",
    model="claude-sonnet-4-20250514"
)

# OpenAI GPT
result = chat_with_tools(
    message="Write a summary",
    provider="openai",
    model="gpt-4o"
)

# Google Gemini
result = chat_with_tools(
    message="Search for information",
    provider="gemini",
    model="gemini-2.5-pro"
)

Tool Filtering

# Fast keyword filtering (default)
result = chat_with_tools(
    message="Gmail email management",
    filter_type="bm25"
)

# Semantic filtering
result = chat_with_tools(
    message="Help me be productive",
    filter_type="local_embedding"
)

# No filtering - all tools
result = chat_with_tools(
    message="What can you do?",
    enable_filtering=False
)

Conversation Features

Conversation History

from observee_agents import chat_with_tools_stream, get_conversation_history

# Create a conversation with memory
async for chunk in chat_with_tools_stream(
    message="Search my emails",
    session_id="my_assistant",
    provider="anthropic"
):
    # Handle response...

# Follow-up - remembers context!
async for chunk in chat_with_tools_stream(
    message="Summarize the first email you found",
    session_id="my_assistant"  # Same session = memory
):
    # Handle response...

# Check conversation history
history = get_conversation_history("my_assistant")
print(f"Conversation has {len(history)} messages")

Streaming with Conversation History

import asyncio
from observee_agents import chat_with_tools_stream, get_conversation_history

async def conversational_streaming():
    session_id = "streaming_assistant"
    custom_prompt = "You are a helpful assistant. Stream responses naturally."
    
    print("💬 First streaming message:")
    async for chunk in chat_with_tools_stream(
        message="Search for emails about meetings",
        provider="anthropic",
        session_id=session_id,  # 🆕 Session for memory
        system_prompt=custom_prompt,  # 🆕 Custom system prompt
        observee_api_key=os.getenv("OBSERVEE_API_KEY")
    ):
        if chunk["type"] == "content":
            print(chunk["content"], end="", flush=True)
        elif chunk["type"] == "tool_result":
            print(f"\n🔧 [Tool: {chunk['tool_name']}]")
        elif chunk["type"] == "done":
            print(f"\n✅ Session: {chunk.get('session_id')}")
    
    print("\n" + "="*40 + "\n💬 Follow-up streaming (remembers context):")
    
    async for chunk in chat_with_tools_stream(
        message="What was the subject of the first meeting?",
        session_id=session_id,  # Same session = memory!
        observee_api_key=os.getenv("OBSERVEE_API_KEY")
    ):
        if chunk["type"] == "content":
            print(chunk["content"], end="", flush=True)
        elif chunk["type"] == "final_content":
            print(chunk["content"], end="", flush=True)
    
    # Check conversation history
    history = get_conversation_history(session_id)
    print(f"\n📊 Total messages in conversation: {len(history)}")

asyncio.run(conversational_streaming())

Custom System Prompts

# Create specialized assistants
result = chat_with_tools(
    message="Help me organize my tasks",
    system_prompt="You are an expert productivity coach. Focus on actionable advice.",
    provider="anthropic"
)

# Different assistant for different tasks
result = chat_with_tools(
    message="Analyze this data",
    system_prompt="You are a data scientist. Provide technical insights.",
    provider="anthropic"
)

Custom Configuration

result = chat_with_tools(
    message="Creative writing task",
    provider="openai",
    temperature=0.9,
    max_tokens=2000,
    max_tools=10
)   

Next Steps