Integration Examples
Ready-to-use recipes for common AI frameworks. Copy, adapt, and ship.
LangChain (Python)
Wrap LangChain's message pipeline with Guard. Inspect every HumanMessage and tool result before it reaches the LLM.
from langchain_core.messages import HumanMessage, AIMessage, ToolMessage
from langchain_openai import ChatOpenAI
from scandar_guard import Guard
guard = Guard(api_key="sk-your-key")
def safe_invoke(messages: list, session_id: str) -> AIMessage:
for msg in messages:
role = "user" if isinstance(msg, HumanMessage) else "tool"
result = guard.inspect_message(
role=role,
content=str(msg.content),
session_id=session_id,
)
if result.should_block:
raise ValueError(f"Blocked: {result.findings[0].title}")
llm = ChatOpenAI(model="gpt-4o")
response = llm.invoke(messages)
# Inspect the model's response for data leakage
guard.inspect_message(
role="assistant",
content=response.content,
session_id=session_id,
)
return response
# Usage
response = safe_invoke(
[HumanMessage(content="Help me with my account")],
session_id="sess_user_123",
)With tool calls
from langchain.tools import tool
from scandar_guard import Guard
guard = Guard(api_key="sk-your-key")
@tool
def fetch_customer_data(customer_id: str) -> dict:
"""Fetch customer record from database."""
# Inspect the tool call before executing
result = guard.inspect_tool_call(
tool_name="fetch_customer_data",
args={"customer_id": customer_id},
session_id=current_session_id,
)
if result.should_block:
return {"error": "Tool call blocked by security policy"}
# Execute and inspect the result
data = db.get_customer(customer_id)
guard.inspect_tool_result(
tool_name="fetch_customer_data",
result=data,
session_id=current_session_id,
)
return dataVercel AI SDK (TypeScript)
Use Guard as middleware in the Vercel AI SDK streaming pipeline.
import { streamText } from "ai";
import { openai } from "@ai-sdk/openai";
import { Guard } from "scandar-guard";
const guard = new Guard({ apiKey: process.env.SCANDAR_API_KEY! });
export async function POST(req: Request) {
const { messages, sessionId } = await req.json();
// Inspect the latest user message
const lastMessage = messages[messages.length - 1];
if (lastMessage.role === "user") {
const result = await guard.inspectMessage({
role: "user",
content: lastMessage.content,
sessionId,
});
if (result.shouldBlock) {
return Response.json(
{ error: "Request blocked", findings: result.findings },
{ status: 403 }
);
}
}
// Stream the response
const result = streamText({
model: openai("gpt-4o"),
messages,
onFinish: async ({ text }) => {
// Inspect the completed response for PII/exfil
await guard.inspectMessage({
role: "assistant",
content: text,
sessionId,
});
},
});
return result.toDataStreamResponse();
}FastAPI (Python)
Add Guard as a FastAPI middleware or dependency to protect every chat endpoint.
from fastapi import FastAPI, HTTPException, Depends
from pydantic import BaseModel
from scandar_guard import Guard
app = FastAPI()
guard = Guard(api_key="sk-your-key")
class ChatRequest(BaseModel):
message: str
session_id: str
class ChatResponse(BaseModel):
response: str
threat_score: int
async def check_guard(req: ChatRequest) -> ChatRequest:
"""FastAPI dependency that runs Guard inspection."""
result = guard.inspect_message(
role="user",
content=req.message,
session_id=req.session_id,
)
if result.should_block:
raise HTTPException(
status_code=403,
detail={
"error": "Request blocked by security policy",
"threat_score": result.threat_score,
"finding": result.findings[0].title if result.findings else None,
},
)
return req
@app.post("/chat", response_model=ChatResponse)
async def chat(
req: ChatRequest = Depends(check_guard),
llm_client = Depends(get_llm_client),
) -> ChatResponse:
response = await llm_client.chat(req.message)
return ChatResponse(response=response, threat_score=0)Tip
Use
Depends(check_guard) on every route that accepts user-supplied content. This keeps security logic out of your business handlers.GitHub Actions — Full Pipeline
Scan all AI artifacts on every PR. Upload SARIF results to GitHub Code Scanning. Block merges if critical findings are present.
# .github/workflows/ai-security.yml
name: AI Security Gate
on:
pull_request:
paths:
- "prompts/**"
- "skills/**"
- "agents/**"
- "*.md"
jobs:
security-scan:
runs-on: ubuntu-latest
permissions:
security-events: write
contents: read
steps:
- uses: actions/checkout@v4
- name: Scan AI artifacts
uses: scandar/security-gate@v1
with:
path: "."
include: "prompts/**,skills/**,agents/**"
threshold: 70
fail-on: "critical,high"
format: "sarif"
output: "scandar-results.sarif"
env:
SCANDAR_API_KEY: ${{ secrets.SCANDAR_API_KEY }}
- name: Upload to GitHub Code Scanning
if: always()
uses: github/codeql-action/upload-sarif@v3
with:
sarif_file: scandar-results.sarif
category: "scandar-ai-security"
# Optional: require this check to pass before merge
# Set up branch protection rules in GitHub repo settingsCrewAI
Wrap CrewAI tasks and tool executions with Guard to monitor multi-agent pipelines.
from crewai import Agent, Task, Crew, Process
from crewai.tools import BaseTool
from scandar_guard import Guard
from functools import wraps
guard = Guard(api_key="sk-your-key")
def guarded_tool(session_id: str):
"""Decorator that wraps any CrewAI tool with Guard inspection."""
def decorator(tool_func):
@wraps(tool_func)
def wrapper(self, **kwargs):
# Inspect tool call before execution
result = guard.inspect_tool_call(
tool_name=self.name,
args=kwargs,
session_id=session_id,
)
if result.should_block:
return f"[BLOCKED] Security policy blocked this tool call: {self.name}"
output = tool_func(self, **kwargs)
# Inspect tool output before feeding back to agent
guard.inspect_tool_result(
tool_name=self.name,
result=output,
session_id=session_id,
)
return output
return wrapper
return decorator
# Apply to your tools
class FetchDataTool(BaseTool):
name: str = "fetch_data"
description: str = "Fetches data from the database"
@guarded_tool(session_id="crew_session_001")
def _run(self, query: str) -> str:
return db.execute(query)
# Create your crew as normal
research_agent = Agent(
role="Researcher",
tools=[FetchDataTool()],
verbose=True,
)Go HTTP Middleware
Use Guard as HTTP middleware in Go to inspect incoming AI-related requests before they reach your handler.
package main
import (
"net/http"
"os"
"github.com/scandar-ai/scandar-guard-go/guard"
)
func guardMiddleware(g *guard.Guard) func(http.Handler) http.Handler {
return func(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
// Inspect incoming AI-related requests
result, err := g.InspectMessage(r.Context(), guard.Message{
Role: "user",
Content: r.FormValue("message"),
})
if err == nil && result.ShouldBlock {
http.Error(w, "Request blocked by security policy", http.StatusForbidden)
return
}
next.ServeHTTP(w, r)
})
}
}
func main() {
g := guard.New(guard.Config{
APIKey: os.Getenv("SCANDAR_API_KEY"),
})
mux := http.NewServeMux()
mux.HandleFunc("/chat", handleChat)
protected := guardMiddleware(g)(mux)
http.ListenAndServe(":8080", protected)
}Tip
The Go Guard SDK has zero external dependencies and works with any
net/http compatible router including Chi, Gorilla Mux, and Gin.Alert Webhooks
Receive real-time security alerts in Slack, PagerDuty, or any webhook endpoint.
Slack
# In Overwatch → Settings → Alerts → Add Webhook
# Set type: slack, paste your Incoming Webhook URL
# Or via API:
curl -X POST https://api.scandar.ai/v1/overwatch/alerts/webhooks \
-H "Authorization: Bearer sk-your-key" \
-d '{
"name": "Security Slack Channel",
"url": "https://hooks.slack.com/services/T.../B.../...",
"type": "slack",
"events": ["critical", "high"],
"environments": ["production"]
}'Custom webhook handler
# Express.js webhook receiver with signature verification
import express from "express";
import crypto from "crypto";
const app = express();
app.use(express.raw({ type: "application/json" }));
app.post("/scandar-webhook", (req, res) => {
const sig = req.headers["x-scandar-signature"] as string;
const expected = crypto
.createHmac("sha256", process.env.SCANDAR_WEBHOOK_SECRET!)
.update(req.body)
.digest("hex");
if (`sha256=${expected}` !== sig) {
return res.status(401).send("Invalid signature");
}
const alert = JSON.parse(req.body.toString());
if (alert.finding.severity === "critical") {
// Page on-call
pagerduty.trigger(alert);
}
// Log to your SIEM
siem.ingest(alert);
res.status(200).send("ok");
});Warning
Always verify the
X-Scandar-Signature header before processing webhook payloads. Use your webhook secret from Dashboard → Settings → Webhooks.