Next.js Integration Guide
Complete guide to integrating IndigiArmor prompt scanning into a Next.js application with the indigiarmor SDK (server-side) and indigiarmor-react components (client-side).
1. Installation
npm install indigiarmor indigiarmor-reactindigiarmor — server-side SDK for scanning prompts via the IndigiArmor API.indigiarmor-react — pre-built React components for displaying scan results.
2. Environment Variables
Add to your .env.local (or Vercel/hosting environment):
# Required — your IndigiArmor API key
INDIGIARMOR_API_KEY=ia_sk_your_key_here
# Optional — override the default API base URL
# Default: https://indigiarmor.com
# IMPORTANT: Must match the final domain (see "Common Pitfalls" section)
INDIGIARMOR_BASE_URL=https://www.indigiarmor.comImportant
Do NOT prefix with NEXT_PUBLIC_ — the API key must stay server-side only.
3. Server-Side Client Setup
Create a shared client module that all API routes import:
import { IndigiArmor } from 'indigiarmor';
import type { GuardResult, ScanResult } from 'indigiarmor';
let client: IndigiArmor | null = null;
let warnedMissingKey = false;
function getClient(): IndigiArmor | null {
if (client) return client;
const apiKey = process.env.INDIGIARMOR_API_KEY;
if (!apiKey) {
if (!warnedMissingKey) {
console.warn('[IndigiArmor] INDIGIARMOR_API_KEY not set — scanning disabled');
warnedMissingKey = true;
}
return null;
}
try {
const baseUrl = process.env.INDIGIARMOR_BASE_URL;
client = new IndigiArmor(apiKey, {
timeout: 10_000,
...(baseUrl ? { baseUrl } : {}),
});
return client;
} catch (error) {
console.error('[IndigiArmor] Failed to initialize client:', error);
return null;
}
}
export interface ScanInputResult {
allowed: boolean;
sanitizedText: string;
reason?: string;
tier?: 'green' | 'yellow' | 'red';
scanResult?: ScanResult;
}
export async function scanInput(text: string): Promise<ScanInputResult> {
const armorClient = getClient();
if (!armorClient) {
return { allowed: true, sanitizedText: text };
}
try {
const guard: GuardResult = await armorClient.guard(text, {
yellowStrategy: 'sanitize',
});
if (guard.safe) {
return {
allowed: true,
sanitizedText: guard.prompt,
tier: guard.result.tier,
};
}
return {
allowed: false,
sanitizedText: text,
reason: guard.result.explanation || 'Content flagged by security scan',
tier: guard.result.tier,
scanResult: guard.result,
};
} catch (error) {
const isAuthError = error instanceof Error && (
error.message.includes('Invalid API key') ||
error.message.includes('401') ||
error.name === 'AuthenticationError'
);
if (isAuthError) {
console.error('[IndigiArmor] Invalid API key, disabling scanning');
client = null;
return { allowed: true, sanitizedText: text };
}
return {
allowed: false,
sanitizedText: text,
reason: 'Safety scan unavailable — please try again shortly',
tier: 'red',
};
}
}Key design decisions:
- Singleton client — created once and reused across requests
- Graceful skip — if the API key is not set, scanning is disabled (app works without IndigiArmor)
- yellowStrategy: 'sanitize' — yellow-tier results auto-redact sensitive content and return the sanitized prompt
- Full scanResult returned — needed by the client to render the WarningOverlay
4. Scanning API Routes
Add scanning to any Next.js App Router route that accepts user text input:
import { scanInput } from '@/lib/indigiarmor';
export async function POST(req: Request) {
const { prompt, skipArmorScan } = await req.json();
// Auth check first...
// Scan user input (skip if user chose "Send Anyway")
if (!skipArmorScan) {
const scan = await scanInput(prompt);
if (!scan.allowed) {
return Response.json(
{ error: 'content_flagged', reason: scan.reason, scanResult: scan.scanResult },
{ status: 400 }
);
}
// Use the sanitized text (PII redacted for yellow-tier)
prompt = scan.sanitizedText;
}
// Proceed with AI call
const result = await callYourAI(prompt);
return Response.json(result);
}When content is flagged, return this structure so the client can render the WarningOverlay:
{
"error": "content_flagged",
"reason": "Blocked: SSN detected (dashed format)",
"scanResult": {
"tier": "red",
"action": "block",
"risk_score": 8,
"signals": [
{
"domain": "pii",
"type": "ssn",
"description": "SSN detected (dashed format)",
"confidence": 0.95,
"weight": 8,
"severity": "critical"
}
],
"explanation": "Blocked: SSN detected (dashed format)",
"sanitized_prompt": null,
"active_domains": ["pii"],
"latency_ms": 34
}
}Use the machine-readable "content_flagged" error code so the client can reliably detect scan blocks vs other errors.
5. Client-Side WarningOverlay
Import WarningOverlay from indigiarmor-react and render it when the API returns a scan block:
'use client';
import { useState } from 'react';
import { WarningOverlay } from 'indigiarmor-react';
import type { ScanResult } from 'indigiarmor-react';
export default function MyPage() {
const [scanResult, setScanResult] = useState<ScanResult | null>(null);
const handleSubmit = async (prompt: string) => {
const response = await fetch('/api/your-route', {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({ prompt }),
});
if (!response.ok) {
const data = await response.json();
// Detect IndigiArmor scan block
if (data.error === 'content_flagged' && data.scanResult) {
setScanResult(data.scanResult);
return;
}
throw new Error(data.error || 'Request failed');
}
// Process successful response...
};
return (
<div>
{/* Your page content */}
{scanResult && (
<WarningOverlay
scanResult={scanResult}
onEditPrompt={() => setScanResult(null)}
onAutoRedact={() => setScanResult(null)}
onSendAnyway={() => setScanResult(null)}
onDismiss={() => setScanResult(null)}
/>
)}
</div>
);
}The WarningOverlay automatically adapts its styling based on scanResult.tier — red border for blocked, yellow border for flagged.
Advanced Patterns
6. Send Anyway Bypass Flow
To let users bypass the scan after seeing the warning, store the blocked request context and re-submit with a bypass flag:
const [scanResult, setScanResult] = useState<ScanResult | null>(null);
const [blockedPrompt, setBlockedPrompt] = useState('');
const handleSubmit = async (prompt: string, skipArmorScan?: boolean) => {
const response = await fetch('/api/your-route', {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({
prompt,
...(skipArmorScan ? { skipArmorScan: true } : {}),
}),
});
if (!response.ok) {
const data = await response.json();
if (data.error === 'content_flagged' && data.scanResult) {
setScanResult(data.scanResult);
setBlockedPrompt(prompt);
return;
}
throw new Error(data.error);
}
// Success...
};
// In JSX:
{scanResult && (
<WarningOverlay
scanResult={scanResult}
onEditPrompt={() => {
setScanResult(null);
setInputValue(blockedPrompt);
}}
onAutoRedact={() => {
setScanResult(null);
if (scanResult.sanitized_prompt) {
setInputValue(scanResult.sanitized_prompt);
}
}}
onSendAnyway={() => {
const prompt = blockedPrompt;
setScanResult(null);
setBlockedPrompt('');
handleSubmit(prompt, true);
}}
onDismiss={() => {
setScanResult(null);
setBlockedPrompt('');
}}
/>
)}export async function POST(req: Request) {
const { prompt, skipArmorScan } = await req.json();
if (!skipArmorScan) {
const scan = await scanInput(prompt);
if (!scan.allowed) {
return Response.json(
{ error: 'content_flagged', reason: scan.reason, scanResult: scan.scanResult },
{ status: 400 }
);
}
}
// Proceed with AI call...
}7. Edit Prompt and Auto Redact
When the user clicks "Edit Prompt", put their original message back in the input field — not any internal wrapper/template you may have added around it.
// Bad — stores the wrapped prompt
setBlockedPrompt(wrappedPromptWithSystemInstructions);
// Good — stores only what the user typed
setBlockedPrompt(originalUserInput);The scanResult.sanitized_prompt field contains the prompt with sensitive data redacted (e.g., SSNs replaced with [REDACTED]). This is only available for yellow-tier results. For red-tier, sanitized_prompt is null.
8. Chat UI — Removing Blocked Messages
If your app adds user messages to the chat UI before sending to the server (optimistic updates), remove the blocked message to prevent sensitive data from lingering in the chat history:
// Message was added optimistically before the API call
setMessages(prev => [...prev, { role: 'user', content: prompt }]);
// If blocked — remove the message with sensitive data
if (data.error === 'content_flagged') {
setMessages(prev => prev.filter(m => m.content !== prompt));
setScanResult(data.scanResult);
return;
}Error Handling
9. Fail-Closed vs Fail-Open
When IndigiArmor is unreachable (network error, timeout, server down), you must choose:
| Strategy | Behavior | Use When |
|---|---|---|
Fail-open | Allow the request through | Development, non-sensitive applications |
Fail-closed | Block the request | Production, sensitive data protection |
Recommended: Fail-closed when a valid API key is configured, skip entirely when no key is set.
10. Invalid Key vs Transient Failure
Not all errors should be treated the same:
| Error Type | Cause | Action |
|---|---|---|
AuthenticationError (401) | Invalid, expired, or revoked API key | Fail open — disable scanning |
Network/Timeout | Server unreachable, DNS failure | Fail closed — block to protect data |
RateLimitError (429) | Too many requests | Fail closed — temporary, resolves shortly |
ValidationError (400) | Bad request payload | Fail open — likely a code bug |
Why invalid keys fail open: If invalid keys caused blocking, someone could set INDIGIARMOR_API_KEY=fake and get the blocking UI without a valid subscription. By failing open on auth errors, invalid keys simply disable scanning. Only valid, paying keys get real protection.
11. Multi-Route Integration
For apps with multiple API routes that accept user input, apply the same pattern to each route:
Each server route gets:
import { scanInput } from '@/lib/indigiarmor'- Scan check after auth, before processing
skipArmorScansupport for "Send Anyway"scanResultin the 400 response
Each client page/component gets:
import { WarningOverlay } from 'indigiarmor-react'useState<ScanResult | null>for the scan result- Error handler that detects
content_flagged - WarningOverlay rendered with all four callbacks
Common Pitfalls
Auth Header Stripped on Redirects
If the IndigiArmor server redirects (e.g., indigiarmor.com → www.indigiarmor.com), the Authorization header is stripped during the redirect per HTTP spec. This causes 401 Missing or malformed Authorization header.
Fix: Set INDIGIARMOR_BASE_URL to the final domain (after all redirects):
# Wrong — redirects to www, auth header lost
INDIGIARMOR_BASE_URL=https://indigiarmor.com
# Correct — no redirect, auth header preserved
INDIGIARMOR_BASE_URL=https://www.indigiarmor.comWrapped Prompts Leaking to Users
If your app wraps user prompts in templates (system instructions, conversation history), make sure "Edit Prompt" returns only the original user input, not the full wrapped text.
// Store raw user input BEFORE wrapping
const rawUserPrompt = userInput;
const wrappedPrompt = `SYSTEM: ${systemPrompt}\n\nUSER: ${userInput}`;
// When blocked, save the raw input for "Edit Prompt"
setBlockedPrompt(rawUserPrompt); // Not wrappedPromptSensitive Data Lingering in Chat
If messages are added to the chat UI before the API call (optimistic updates), blocked messages will display the sensitive data until explicitly removed. Always remove blocked messages from the chat state.
Scanning in the Wrong Layer
The scan must happen server-side, not client-side:
- Client-side scanning exposes the API key in browser DevTools
- Client-side scanning can be bypassed by calling the API directly
- Server-side scanning gates ALL access to the AI, regardless of client
Full Working Example
Server — app/api/chat/route.ts
import { NextRequest } from 'next/server';
import { scanInput } from '@/lib/indigiarmor';
export async function POST(req: NextRequest) {
const { prompt, skipArmorScan } = await req.json();
if (!prompt) {
return Response.json({ error: 'Prompt is required' }, { status: 400 });
}
// TODO: Add your auth check here
// Scan user input (unless bypassed via "Send Anyway")
let safePrompt = prompt;
if (!skipArmorScan) {
const scan = await scanInput(prompt);
if (!scan.allowed) {
return Response.json(
{ error: 'content_flagged', reason: scan.reason, scanResult: scan.scanResult },
{ status: 400 }
);
}
safePrompt = scan.sanitizedText;
}
// Safe to call AI
const result = await yourAICall(safePrompt);
return Response.json(result);
}Client — app/chat/page.tsx
'use client';
import { useState } from 'react';
import { WarningOverlay } from 'indigiarmor-react';
import type { ScanResult } from 'indigiarmor-react';
export default function ChatPage() {
const [input, setInput] = useState('');
const [messages, setMessages] = useState<Array<{ role: string; content: string }>>([]);
const [scanResult, setScanResult] = useState<ScanResult | null>(null);
const [blockedPrompt, setBlockedPrompt] = useState('');
const sendMessage = async (prompt: string, skipScan?: boolean) => {
setMessages(prev => [...prev, { role: 'user', content: prompt }]);
setInput('');
const response = await fetch('/api/chat', {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({
prompt,
...(skipScan ? { skipArmorScan: true } : {}),
}),
});
if (!response.ok) {
const data = await response.json();
if (data.error === 'content_flagged' && data.scanResult) {
setMessages(prev => prev.filter(m => m.content !== prompt));
setScanResult(data.scanResult);
setBlockedPrompt(prompt);
return;
}
throw new Error(data.error);
}
const data = await response.json();
setMessages(prev => [...prev, { role: 'assistant', content: data.reply }]);
};
return (
<div>
{messages.map((msg, i) => (
<div key={i}>{msg.role}: {msg.content}</div>
))}
<input
value={input}
onChange={e => setInput(e.target.value)}
onKeyDown={e => e.key === 'Enter' && sendMessage(input)}
/>
{scanResult && (
<WarningOverlay
scanResult={scanResult}
onEditPrompt={() => {
setScanResult(null);
setInput(blockedPrompt);
}}
onAutoRedact={() => {
setScanResult(null);
if (scanResult.sanitized_prompt) {
setInput(scanResult.sanitized_prompt);
}
}}
onSendAnyway={() => {
const prompt = blockedPrompt;
setScanResult(null);
setBlockedPrompt('');
sendMessage(prompt, true);
}}
onDismiss={() => {
setScanResult(null);
setBlockedPrompt('');
}}
/>
)}
</div>
);
}Detection Domains
| Domain | What It Detects |
|---|---|
PII | SSNs, phone numbers, email addresses, credit card numbers, physical addresses |
FERPA | Student education records, grades, enrollment status, disciplinary records |
Cultural | Indigenous cultural knowledge, sacred/ceremonial information, traditional practices |
Re-identification | Combinations of quasi-identifiers (age + zip + gender) that could identify someone |
Injection | Prompt injection attacks, jailbreak attempts, instruction override patterns |
What gets blocked vs flagged depends on your policy configuration.