End-to-End Implementation Walk Through: Build FastAPI Server and Client

Before you begin, ensure you have the following prerequisites and technical requirements in place to successfully implement FastAPI server and client

  1. Install Visual Studio Code: https://code.visualstudio.com/

  2. Install Python: https://www.python.org/downloads/

Detailed Step-by-Step Implementation Guide

  1. Create a new directory named "openai-agents" in your C:\ drive by running the command: "mkdir openai-agents" and press enter

  2. Open Visual Studio Code, click File, and select Open Folder.

  1. Select the folder “openai-agents“.

  1. Select "New Terminal" from the Menu.

  1. Type the following commands in your terminal:

PS C:\openai-agents>python -m venv .venv and press enter

PS C:\openai-agents>.venv\Scripts\activate (For Windows) or source .venv\bin\activate (For Mac) and press enter

(.venv) PS C:\openai-agents> pip install fastapi uvicorn httpx pydantic openai openai-agents notion-client and press enter

Explanation:

  • fastapi — Web server framework

  • uvicorn — ASGI server to run FastAPI

  • httpx — For HTTP requests (Perplexity, Alpha Vantage)

  • pydantic — Data validation/schemas (FastAPI uses it)

  • openai — OpenAI Python SDK

  • openai-agents — OpenAI Agents SDK (agents package)

  • notion-client — Official Notion Python API

  1. Create a new file called "server_fastapi.py" and paste the following Python code into Visual Studio Code.

# server_fastapi.py

import os
import textwrap
from datetime import datetime, timezone
import httpx
import uvicorn

from fastapi import FastAPI, Request
from pydantic import BaseModel
from openai import OpenAI
from agents import Agent, Runner, function_tool
from notion_client import Client as NotionClient

# ====== ENVIRONMENT ======

os.environ.setdefault("OPENAI_API_KEY", "YOUR_OPENAI_APIKEY") # Replace with your actual OPENAI_APIKEY
os.environ.setdefault("PERPLEXITY_API_KEY", "YOUR_PERPEXITY_APIKEY") # Replace with your actual PERPEXITY_APIKEY
os.environ.setdefault("ALPHA_VANTAGE_API_KEY", "YOUR_ALPHA_VANTAGE_APIKEY") # Replace with your actual ALPHA_VANTAGE_APIKEY
os.environ.setdefault("NOTION_TOKEN", "YOUR_NOTION_TOKEN") # Replace with your actual NOTION_TOKEN
os.environ.setdefault("NOTION_DATABASE_ID", "YOUR_NOTION_DATABASE_ID") # Replace with your actual NOTION_DATABASE_ID

notion = NotionClient(auth=os.environ["NOTION_TOKEN"])
DATABASE_ID = os.environ["NOTION_DATABASE_ID"]
client = OpenAI()  # OpenAI agent, only if you use LLM as a tool


# ====== TOOLS ======

@function_tool
def perplexity_search(query: str) -> str:
    """Query Perplexity AI and return a markdown summary with citations."""
    headers = {
        "Authorization": f"Bearer {os.environ['PERPLEXITY_API_KEY']}",
        "Content-Type": "application/json"
    }
    messages = [
        {"role": "system", "content": "You are a search assistant. Provide markdown-formatted results with citations."},
        {"role": "user", "content": query}
    ]
    payload = {
        "model": os.getenv("PERPLEXITY_MODEL", "sonar-pro"),
        "messages": messages,
        "max_tokens": 4096,
        "temperature": 0.2,
        "top_p": 0.9,
        "stream": False
    }
    try:
        resp = httpx.post("https://api.perplexity.ai/chat/completions", headers=headers, json=payload, timeout=60)
        resp.raise_for_status()
        return resp.json()["choices"][0]["message"]["content"].strip()
    except Exception as e:
        return f"ERROR: Perplexity search failed → {e}"

@function_tool
def alpha_vantage_finance(symbol: str) -> dict:
    """Return *raw* JSON time-series for the given ticker from Alpha Vantage."""
    api_key = os.environ["ALPHA_VANTAGE_API_KEY"]
    url = f"https://www.alphavantage.co/query?function=TIME_SERIES_DAILY&symbol={symbol}&apikey={api_key}"
    try:
        resp = httpx.get(url, timeout=30)
        resp.raise_for_status()
        return resp.json()
    except Exception as e:
        return {"error": f"Alpha Vantage API call failed → {e}"}

# Utility function for formatting with OpenAI (not a tool!)
def openai_llm_format(prompt: str) -> str:
    try:
        resp = client.chat.completions.create(
            model="gpt-4o",
            messages=[{"role": "user", "content": prompt}],
            temperature=0.7
        )
        return resp.choices[0].message.content.strip()
    except Exception as e:
        return f"OpenAI LLM Error: {e}"

@function_tool
def notion_save_answer(question: str, answer: str) -> str:
    formatted_answer = openai_llm_format(
        f"""Format the following answer as a professional stock/market summary suitable for business readers. 
- Use a bold headline.
- Add concise paragraphs.
- Insert section headers (like 'Key Statistics', 'Dividend Information').
- Present statistics in a bulleted list.
- Add source attribution at the end if provided.

Content:
{answer}
"""
    )
    def to_blocks(markdown: str, chunk: int = 3000):
        paragraphs = textwrap.wrap(markdown, width=chunk, break_long_words=False, break_on_hyphens=False)
        return [
            {
                "object": "block",
                "type": "paragraph",
                "paragraph": {
                    "rich_text": [{"type": "text", "text": {"content": p}}]
                },
            }
            for p in paragraphs
        ]
    try:
        page = notion.pages.create(
            parent={"database_id": DATABASE_ID},
            properties={
                "Title": {"title": [{"text": {"content": question}}]},
                "Saved at": {"date": {"start": datetime.now(timezone.utc).isoformat()}}
            },
            children=to_blocks(formatted_answer),
        )
        return f"✅ Saved to Notion: {page['url']}"
    except Exception as e:
        return f"❌ Failed to save to Notion → {e}"

@function_tool
def openai_llm_tool(prompt: str) -> str:
    """Use OpenAI's GPT to reason/answer directly (optional)."""
    try:
        resp = client.chat.completions.create(
            model="gpt-4o",  # or your preferred model
            messages=[{"role": "user", "content": prompt}],
            temperature=0.7
        )
        return resp.choices[0].message.content.strip()
    except Exception as e:
        return f"OpenAI LLM Error: {e}"

# ====== AGENTS (with Handoffs) ======

knowledge_digger = Agent(
    name="Knowledge Digger",
    instructions="""
    • Gather information with Perplexity and, if needed, alpha_vantage_finance.
    • When you have a complete answer for readability, call notion_save_answer(question, answer) yourself.
      - If you need deeper synthesis first, hand off to Strategic Brain instead.
    • End with exactly one call:
        - transfer_to_strategic_brain()  → for further synthesis
        - *(no hand-off if you already called notion_save_answer)*
    """,
    tools=[perplexity_search, alpha_vantage_finance, notion_save_answer],
)
strategic_brain = Agent(
    name="Strategic Brain",
    instructions="Refine Knowledge Digger’s findings into a concise, actionable insight paragraph. Use OpenAI LLM if needed.",
    tools=[perplexity_search, openai_llm_tool],
)
orchestrator = Agent(
    name="Orchestrator",
    instructions="""
    You control a two-step pipeline and must move one step at a time:
      1. transfer_to_knowledge_digger()
      2. transfer_to_strategic_brain()  (only after Knowledge Digger replies)

    Output requirements:
    • Produce exactly one transfer_to_<agent>() call and nothing else.
    • If you accidentally think of multiple calls, choose only the next step in sequence.
    """,
    handoffs=[knowledge_digger, strategic_brain],
)

# ====== FASTAPI SERVER ======

app = FastAPI()

class InputReq(BaseModel):
    input: str

@app.post("/run")
async def run_agents(req: InputReq):
    result = await Runner.run(orchestrator, input=req.input)
    return {"output": result.final_output}

@app.get("/")
def root():
    return {"status": "ok", "message": "OpenAI Agents SDK multi-agent FastAPI server"}

@app.exception_handler(Exception)
async def exception_handler(request: Request, exc: Exception):
    import traceback
    tb = "".join(traceback.format_exception(type(exc), exc, exc.__traceback__))
    print("EXCEPTION:", tb)  # Will show in your server terminal
    return {"error": "Internal server error", "details": tb}

if __name__ == "__main__":
    uvicorn.run("server_fastapi:app", host="127.0.0.1", port=8080, reload=True)
  1. Create a new file called "client_fastapi.py" and paste the following Python code into Visual Studio Code.

# client_fastapi.py

import asyncio
import httpx
import os

SERVER_URL = "http://127.0.0.1:8080/run"


async def query_server(client: httpx.AsyncClient, prompt: str) -> str:
    """Send a prompt to the FastAPI server and return the assistant's reply."""
    payload = {"input": prompt}
    response = await client.post(SERVER_URL, json=payload)
    response.raise_for_status()
    return response.json()["output"]

async def main() -> None:
    """Interactively query the FastAPI server until the user types "exit"/"quit"."""
    async with httpx.AsyncClient(timeout=120) as client:
        while True:
            prompt = input("Prompt (type 'exit' to quit): ").strip()
            if prompt.lower() in {"exit", "quit"}:
                print("Goodbye!")
                break
            try:
                answer = await query_server(client, prompt)
                print("\n=== RESPONSE ===\n", answer, "\n")
            except Exception as exc:
                print("ERROR:", exc)

if __name__ == "__main__":
    asyncio.run(main())
    
  1. Make sure to save all the files that you have created.