# server.py
import json
from fastapi import FastAPI, Request
from fastapi.responses import StreamingResponse
from fastapi.middleware.cors import CORSMiddleware
from dotenv import load_dotenv
from dedalus_labs import AsyncDedalus
from dedalus_labs.lib.runner import DedalusRunner
load_dotenv()
app = FastAPI()
app.add_middleware(
CORSMiddleware,
allow_origins=["http://localhost:3000"],
allow_methods=["POST"],
allow_headers=["*"],
)
client = AsyncDedalus()
runner = DedalusRunner(client)
@app.post("/api/chat")
async def chat(request: Request):
body = await request.json()
messages = body.get("messages", [])
model = body.get("model", "openai/gpt-5.2")
stream = runner.run(
messages=messages,
model=model,
stream=True,
)
async def generate():
async for chunk in stream:
yield f"data: {chunk.model_dump_json()}\n\n"
yield "data: [DONE]\n\n"
return StreamingResponse(
generate(),
media_type="text/event-stream",
headers={
"Cache-Control": "no-cache",
"Connection": "keep-alive",
},
)
if __name__ == "__main__":
import uvicorn
uvicorn.run(app, host="0.0.0.0", port=8000)