Skip to main content
MCP’s three primitives (tools, resources, prompts) are building blocks. Combine them creatively.

Sequential Thinking

Keep the model on track during complex reasoning. The model calls this tool repeatedly, building a chain of thoughts it can revise or branch.
from dataclasses import dataclass, field
from dedalus_mcp import MCPServer, tool

@dataclass
class ThinkingState:
    thoughts: list[dict] = field(default_factory=list)
    branches: dict[str, list[dict]] = field(default_factory=dict)

state = ThinkingState()

@tool(description="""Step-by-step reasoning with revision support.

Use when:
- Breaking down complex problems
- Planning that might need course correction
- Analysis where the full scope isn't clear initially

You can revise previous thoughts, branch into alternatives, or extend
beyond your initial estimate.""")
def think(
    thought: str,
    thought_number: int,
    total_thoughts: int,
    next_thought_needed: bool,
    is_revision: bool = False,
    revises_thought: int | None = None,
    branch_id: str | None = None,
) -> dict:
    entry = {
        "number": thought_number,
        "thought": thought,
        "is_revision": is_revision,
        "revises": revises_thought,
    }

    if branch_id:
        state.branches.setdefault(branch_id, []).append(entry)
    else:
        state.thoughts.append(entry)

    return {
        "thought_number": thought_number,
        "total_thoughts": total_thoughts,
        "next_thought_needed": next_thought_needed,
        "history_length": len(state.thoughts),
        "branches": list(state.branches.keys()),
    }

server = MCPServer("reasoning")
server.collect(think)
The model decides when to think, revise, or branch. You just provide the infrastructure.

Context Rehydration

Persist important context to a database. After model compaction (when context window fills up), fetch it back instantly.
from dedalus_mcp import MCPServer, tool, resource
import json

# Could be Redis, SQLite, Postgres, etc.
memory_store: dict[str, dict] = {}

@tool(description="Save important context for later retrieval")
def remember(key: str, content: str, tags: list[str] | None = None) -> dict:
    memory_store[key] = {
        "content": content,
        "tags": tags or [],
        "saved_at": datetime.now().isoformat(),
    }
    return {"saved": key}

@tool(description="Retrieve previously saved context")
def recall(key: str) -> dict:
    if key not in memory_store:
        return {"error": f"No memory for key: {key}"}
    return memory_store[key]

@tool(description="Find memories by tag")
def search_memories(tag: str) -> list[dict]:
    return [
        {"key": k, **v}
        for k, v in memory_store.items()
        if tag in v.get("tags", [])
    ]

@resource(uri="memory://index", description="All saved memory keys")
def memory_index() -> dict:
    return {
        "keys": list(memory_store.keys()),
        "count": len(memory_store),
    }

server = MCPServer("memory")
server.collect(remember, recall, search_memories, memory_index)
Start conversations with: “Check memory://index for context from previous sessions.”

Live Data Feeds

Resources can push updates. Build dashboards, monitoring, or real-time collaboration.
from dedalus_mcp import MCPServer, resource
import asyncio

metrics = {"cpu": 0.0, "memory": 0.0, "requests": 0}

@resource(uri="system://metrics", description="Live system metrics")
def get_metrics() -> dict:
    return {"timestamp": datetime.now().isoformat(), **metrics}

server = MCPServer("monitoring")
server.collect(get_metrics)

async def update_metrics():
    while True:
        metrics["cpu"] = get_cpu_usage()
        metrics["memory"] = get_memory_usage()
        metrics["requests"] = get_request_count()
        await server.notify_resource_updated("system://metrics")
        await asyncio.sleep(5)
Subscribed clients receive notifications/resources/updated when data changes.

Persona Switching

Prompts define behavior. Let users switch the model’s persona on demand.
from dedalus_mcp import MCPServer, prompt

@prompt("persona/architect", description="Senior software architect")
def architect_persona(args):
    return [
        ("assistant", """You are a senior software architect with 20 years of experience.
You think in systems, not features. You ask clarifying questions before proposing solutions.
You consider maintainability, scalability, and team dynamics."""),
    ]

@prompt("persona/reviewer", description="Strict code reviewer")
def reviewer_persona(args):
    return [
        ("assistant", """You are a meticulous code reviewer.
You catch bugs others miss. You insist on tests.
You're constructive but don't let things slide."""),
    ]

@prompt("persona/rubber-duck", description="Patient debugging companion")
def rubber_duck_persona(args):
    return [
        ("assistant", """You help by asking questions, not giving answers.
When someone explains their problem, ask what they've tried.
Help them think through it systematically."""),
    ]

server = MCPServer("personas")
server.collect(architect_persona, reviewer_persona, rubber_duck_persona)
Users select prompts/get persona/architect to shift behavior mid-conversation.

Guardrails

Tools can validate and constrain model behavior.
from dedalus_mcp import MCPServer, tool

ALLOWED_PATHS = ["/app/data", "/app/config"]
BLOCKED_PATTERNS = ["password", "secret", "api_key"]

@tool(description="Read a file (with safety checks)")
def safe_read(path: str) -> dict:
    # Path validation
    if not any(path.startswith(allowed) for allowed in ALLOWED_PATHS):
        return {"error": f"Access denied: {path}"}

    content = open(path).read()

    # Content filtering
    for pattern in BLOCKED_PATTERNS:
        if pattern in content.lower():
            return {"error": "Content contains sensitive data"}

    return {"content": content}

@tool(description="Execute a command (restricted)")
def safe_exec(command: str) -> dict:
    allowed = ["ls", "cat", "grep", "find"]
    cmd = command.split()[0]

    if cmd not in allowed:
        return {"error": f"Command not allowed: {cmd}"}

    # Execute safely...
    return {"output": "..."}
The model can only do what you allow.

Workflow Orchestration

Chain tools into multi-step workflows with checkpoints.
from dedalus_mcp import MCPServer, tool, resource
from enum import Enum

class WorkflowStatus(Enum):
    PENDING = "pending"
    RUNNING = "running"
    COMPLETED = "completed"
    FAILED = "failed"

workflows: dict[str, dict] = {}

@tool(description="Start a new workflow")
def start_workflow(workflow_id: str, steps: list[str]) -> dict:
    workflows[workflow_id] = {
        "status": WorkflowStatus.RUNNING.value,
        "steps": steps,
        "current_step": 0,
        "results": [],
    }
    return {"workflow_id": workflow_id, "status": "started"}

@tool(description="Complete current step and advance")
def complete_step(workflow_id: str, result: str) -> dict:
    wf = workflows.get(workflow_id)
    if not wf:
        return {"error": "Workflow not found"}

    wf["results"].append(result)
    wf["current_step"] += 1

    if wf["current_step"] >= len(wf["steps"]):
        wf["status"] = WorkflowStatus.COMPLETED.value

    return {
        "step_completed": wf["current_step"],
        "next_step": wf["steps"][wf["current_step"]] if wf["current_step"] < len(wf["steps"]) else None,
        "status": wf["status"],
    }

@resource(uri="workflows://active", description="All active workflows")
def active_workflows() -> dict:
    return {
        wid: wf for wid, wf in workflows.items()
        if wf["status"] == WorkflowStatus.RUNNING.value
    }
The model manages complex multi-step processes with clear state.

Audit Trail

Log everything the model does for compliance or debugging.
from dedalus_mcp import MCPServer, tool, resource
from datetime import datetime

audit_log: list[dict] = []

def log_action(action: str, details: dict):
    audit_log.append({
        "timestamp": datetime.now().isoformat(),
        "action": action,
        **details,
    })

@tool(description="Perform a sensitive operation")
def sensitive_operation(operation: str, target: str) -> dict:
    log_action("sensitive_operation", {"operation": operation, "target": target})
    # ... do the thing ...
    return {"status": "completed"}

@resource(uri="audit://log", description="Complete audit trail")
def get_audit_log() -> list[dict]:
    return audit_log

@resource(uri="audit://recent", description="Last 10 actions")
def recent_actions() -> list[dict]:
    return audit_log[-10:]

Mix and Match

The real power is combining patterns:
  • Sequential thinking + memory: Save reasoning chains for later reference
  • Guardrails + audit trail: Log blocked attempts
  • Live feeds + workflows: Monitor workflow progress in real-time
  • Personas + prompts: Context-aware behavior switching
Build what your use case needs.