TypeScript SDK
Complete API reference for the Vettly TypeScript SDK.
Installation
npm install @nextauralabs/vettly-sdkQuick Start
import { ModerationClient } from '@nextauralabs/vettly-sdk'
const client = new ModerationClient({
apiKey: process.env.VETTLY_API_KEY!
})
const result = await client.check({
content: 'Text to moderate',
policyId: 'moderate',
contentType: 'text'
})
console.log('Safe:', result.safe)ModerationClient
Constructor
new ModerationClient(config: ModerationClientConfig)Config Options
interface ModerationClientConfig {
apiKey: string // Required: Your Vettly API key
apiUrl?: string // Optional: API base URL (default: https://api.vettly.dev)
timeout?: number // Optional: Request timeout in ms (default: 30000)
maxRetries?: number // Optional: Max retries for failed requests (default: 3)
retryDelay?: number // Optional: Base delay for exponential backoff in ms (default: 1000)
}The SDK automatically retries failed requests with exponential backoff:
- Retries on rate limits (429) and server errors (5xx)
- Uses exponential backoff (1s, 2s, 4s, ...)
- Respects
Retry-Afterheaders from the API
Example
const client = new ModerationClient({
apiKey: 'sk_live_xxxxx',
apiUrl: 'https://api.vettly.dev',
timeout: 30000, // 30 seconds
maxRetries: 3, // Retry up to 3 times
retryDelay: 1000 // Start with 1s delay
})Content Moderation
check()
Check content for moderation violations.
async check(request: CheckRequest): Promise<CheckResponse>Request
interface CheckRequest {
content: string // Content to check (text, base64 image)
policyId: string // Policy ID to use
contentType: 'text' | 'image' | 'video'
requestId?: string // Optional: Idempotency key for deduplication
metadata?: { // Optional metadata
userId?: string
ip?: string
userAgent?: string
[key: string]: any
}
}Idempotency: Use requestId to prevent duplicate processing. If you retry the same request with the same requestId, you'll get the cached result instead of processing again.
Response
interface CheckResponse {
safe: boolean // True if content is safe
flagged: boolean // True if any category triggered
action: 'allow' | 'warn' | 'flag' | 'block'
categories: Category[] // Category results
decisionId: string // Unique decision ID
provider: string // AI provider used
latency: number // Response time in ms
cost: number // Cost in USD
}
interface Category {
category: string // Category name
score: number // Confidence score (0-1)
threshold: number // Policy threshold
triggered: boolean // True if score >= threshold
}Example
const result = await client.check({
content: 'Hello, world!',
policyId: 'moderate',
contentType: 'text',
metadata: {
userId: 'user_123',
ip: '192.168.1.1'
}
})
if (result.safe) {
console.log('Content is safe!')
} else {
console.log('Flagged categories:', result.categories.filter(c => c.triggered))
}checkImage()
Convenience method for image moderation.
async checkImage(
imageUrl: string,
options?: {
policyId?: string
requestId?: string
metadata?: Record<string, unknown>
}
): Promise<CheckResponse>Example
// From URL
const result = await client.checkImage(
'https://example.com/image.jpg',
{ policyId: 'strict' }
)
// From base64
const result = await client.checkImage(
'data:image/jpeg;base64,/9j/4AAQ...',
{ policyId: 'moderate' }
)
if (result.action === 'block') {
console.log('Image blocked')
}dryRun()
Test a policy without making actual AI provider calls.
async dryRun(
policyId: string,
mockScores?: Record<string, number>
): Promise<DryRunResponse>Example
const result = await client.dryRun('strict', {
violence: 0.8, // Mock: 80% violence score
sexual: 0.3, // Mock: 30% sexual score
hate: 0.95 // Mock: 95% hate score
})
console.log('Would be blocked:', result.action === 'block')
console.log('Triggered categories:', result.categories.filter(c => c.triggered))batchCheck()
Check multiple items in a single synchronous request.
async batchCheck(request: {
policyId: string
items: Array<{
id: string
content: string
contentType?: 'text' | 'image' | 'video'
metadata?: Record<string, unknown>
}>
}): Promise<BatchCheckResponse>Example
const results = await client.batchCheck({
policyId: 'moderate',
items: [
{ id: 'comment_1', content: 'Great post!' },
{ id: 'comment_2', content: 'Inappropriate content...' },
{ id: 'comment_3', content: 'Thanks for sharing!' }
]
})
results.items.forEach(item => {
console.log(`${item.id}: ${item.safe ? 'Safe' : 'Flagged'}`)
})batchCheckAsync()
Check multiple items asynchronously with webhook delivery.
async batchCheckAsync(request: {
policyId: string
items: Array<{
id: string
content: string
contentType?: 'text' | 'image' | 'video'
metadata?: Record<string, unknown>
}>
webhookUrl: string
}): Promise<{ batchId: string }>Example
const batch = await client.batchCheckAsync({
policyId: 'moderate',
items: [
{ id: '1', content: 'Comment 1' },
{ id: '2', content: 'Comment 2' },
// ... 1000 items
],
webhookUrl: 'https://myapp.com/webhooks/moderation'
})
console.log('Batch ID:', batch.batchId)
// Results will be sent to webhook when readyPolicy Management
createPolicy()
Create or update a moderation policy.
async createPolicy(
policyId: string,
yamlContent: string
): Promise<Policy>Example
const yamlContent = `
name: My Custom Policy
categories:
violence:
threshold: 0.7
action: block
sexual:
threshold: 0.8
action: warn
`
const policy = await client.createPolicy(
'my_custom_policy',
yamlContent
)
console.log('Policy created:', policy.policyId)getPolicy()
Get details of a specific policy.
async getPolicy(policyId: string): Promise<Policy>Example
const policy = await client.getPolicy('moderate')
console.log('Policy name:', policy.name)
console.log('Categories:', policy.categories)listPolicies()
List all available policies.
async listPolicies(): Promise<{ policies: Policy[] }>Example
const { policies } = await client.listPolicies()
policies.forEach(policy => {
console.log(`${policy.policyId}: ${policy.name}`)
})Decision Tracking
getDecision()
Get details of a specific moderation decision.
async getDecision(decisionId: string): Promise<Decision>Example
const decision = await client.getDecision('dec_abc123')
console.log('Content:', decision.content)
console.log('Safe:', decision.safe)
console.log('Timestamp:', decision.timestamp)listDecisions()
List recent moderation decisions.
async listDecisions(options?: {
limit?: number
offset?: number
}): Promise<{ decisions: Decision[] }>Example
const { decisions } = await client.listDecisions({
limit: 100,
offset: 0
})
decisions.forEach(d => {
console.log(`${d.decisionId}: ${d.safe ? 'Safe' : 'Flagged'}`)
})replayDecision()
Replay a past decision with a different policy.
async replayDecision(
decisionId: string,
policyId: string
): Promise<CheckResponse>Example
// Original decision used 'moderate' policy
const original = await client.getDecision('dec_abc123')
// Replay with 'strict' policy
const replayed = await client.replayDecision('dec_abc123', 'strict')
console.log('Original action:', original.action)
console.log('Replayed action:', replayed.action)getCurlCommand()
Get a cURL command to reproduce a decision.
async getCurlCommand(decisionId: string): Promise<string>Example
const curl = await client.getCurlCommand('dec_abc123')
console.log('To reproduce this decision, run:')
console.log(curl)Output:
curl -X POST https://api.vettly.dev/v1/check \
-H "Authorization: Bearer vettly_xxxxx" \
-H "Content-Type: application/json" \
-d '{"content":"...","policyId":"moderate","contentType":"text"}'Webhooks
registerWebhook()
Register a webhook endpoint for events.
async registerWebhook(request: {
url: string
events: string[]
description?: string
}): Promise<Webhook>Example
const webhook = await client.registerWebhook({
url: 'https://myapp.com/webhooks/vettly',
events: ['moderation.completed', 'moderation.failed'],
description: 'Production webhook'
})
console.log('Webhook ID:', webhook.id)listWebhooks()
List all registered webhooks.
async listWebhooks(): Promise<{ webhooks: Webhook[] }>Example
const { webhooks } = await client.listWebhooks()
webhooks.forEach(hook => {
console.log(`${hook.id}: ${hook.url}`)
})getWebhook()
Get details of a specific webhook.
async getWebhook(webhookId: string): Promise<Webhook>Example
const webhook = await client.getWebhook('wh_abc123')
console.log('URL:', webhook.url)
console.log('Events:', webhook.events)
console.log('Enabled:', webhook.enabled)updateWebhook()
Update webhook configuration.
async updateWebhook(
webhookId: string,
updates: {
url?: string
events?: string[]
description?: string
enabled?: boolean
}
): Promise<Webhook>Example
const updated = await client.updateWebhook('wh_abc123', {
events: ['moderation.completed'], // Remove 'failed' event
enabled: true
})deleteWebhook()
Delete a webhook endpoint.
async deleteWebhook(webhookId: string): Promise<void>Example
await client.deleteWebhook('wh_abc123')
console.log('Webhook deleted')testWebhook()
Send a test event to a webhook.
async testWebhook(
webhookId: string,
eventType: string
): Promise<{ success: boolean }>Example
const result = await client.testWebhook(
'wh_abc123',
'moderation.completed'
)
console.log('Test successful:', result.success)getWebhookDeliveries()
Get delivery logs for a webhook.
async getWebhookDeliveries(
webhookId: string,
options?: { limit?: number }
): Promise<{ deliveries: Delivery[] }>Example
const { deliveries } = await client.getWebhookDeliveries('wh_abc123', {
limit: 50
})
deliveries.forEach(d => {
console.log(`${d.timestamp}: ${d.status} (${d.responseCode})`)
})Webhook Signature Verification
Verify webhook signatures to ensure authenticity:
verifyWebhookSignature()
function verifyWebhookSignature(
payload: string,
signature: string,
secret: string
): booleanconstructWebhookEvent()
function constructWebhookEvent(payload: string): WebhookEventExample
import express from 'express'
import {
verifyWebhookSignature,
constructWebhookEvent
} from '@nextauralabs/vettly-sdk'
const app = express()
app.post('/webhooks/vettly',
express.raw({ type: 'application/json' }),
(req, res) => {
const payload = req.body.toString()
const signature = req.headers['x-vettly-signature'] as string
const webhookSecret = process.env.VETTLY_WEBHOOK_SECRET!
if (!verifyWebhookSignature(payload, signature, webhookSecret)) {
return res.status(401).send('Invalid signature')
}
const event = constructWebhookEvent(payload)
switch (event.type) {
case 'decision.blocked':
console.log('Content blocked:', event.data.id)
break
case 'decision.flagged':
console.log('Content flagged:', event.data.id)
break
}
res.status(200).send('OK')
}
)The signature format is t=timestamp,v1=signature. The SDK:
- Validates the timestamp is within 5 minutes
- Uses constant-time comparison to prevent timing attacks
Express Middleware
moderateContent()
Express.js middleware for automatic content moderation.
function moderateContent(options: {
client: ModerationClient
policyId: string
field?: string
onFlagged?: (req, res, result: CheckResponse) => void
}): ExpressMiddlewareExample
import express from 'express'
import { ModerationClient, moderateContent } from '@nextauralabs/vettly-sdk'
const app = express()
const client = new ModerationClient({ apiKey: 'vettly_xxxxx' })
app.post('/api/comments',
moderateContent({
client,
policyId: 'moderate',
field: 'body.content', // Check req.body.content
onFlagged: (req, res, result) => {
// Custom handling
res.status(400).json({
error: 'Content flagged',
categories: result.categories.filter(c => c.triggered)
})
}
}),
async (req, res) => {
// Only reaches here if content is safe
const comment = await saveComment(req.body)
res.json(comment)
}
)Default Behavior
If onFlagged is not provided:
action: 'block'→ 403 response with erroraction: 'warn'or'flag'→ Continue to next middlewareaction: 'allow'→ Continue to next middleware
Nested Fields
app.post('/api/posts',
moderateContent({
client,
policyId: 'moderate',
field: 'body.post.content' // Checks req.body.post.content
}),
handler
)Error Handling
The SDK provides typed exceptions for better error handling:
import {
ModerationClient,
VettlyAuthError,
VettlyRateLimitError,
VettlyQuotaError,
VettlyValidationError,
VettlyError,
} from '@nextauralabs/vettly-sdk'
const client = new ModerationClient({ apiKey: 'sk_live_...' })
try {
const result = await client.check({
content: 'Text',
policyId: 'moderate',
contentType: 'text'
})
} catch (error) {
if (error instanceof VettlyAuthError) {
console.error('Invalid API key')
} else if (error instanceof VettlyRateLimitError) {
console.error(`Rate limited. Retry after ${error.retryAfter}s`)
} else if (error instanceof VettlyQuotaError) {
console.error(`Quota exceeded: ${error.quota}`)
} else if (error instanceof VettlyValidationError) {
console.error(`Invalid request: ${error.errors}`)
} else if (error instanceof VettlyError) {
console.error(`API error: ${error.message}`)
}
}Error Types
| Error Class | HTTP Status | Description |
|---|---|---|
VettlyAuthError | 401 | Invalid or expired API key |
VettlyValidationError | 422 | Invalid request parameters |
VettlyRateLimitError | 429 | Too many requests (has retryAfter) |
VettlyQuotaError | 402 | Usage quota exceeded (has quota) |
VettlyError | 5xx | Server error (auto-retried by SDK) |
Fail Open Strategy
For production, decide how to handle moderation failures:
async function moderateContent(content: string): Promise<boolean> {
try {
const result = await client.check({
content,
policyId: 'moderate',
contentType: 'text'
})
return result.action !== 'block'
} catch (error) {
// Fail open: allow content if moderation fails
console.error('Moderation failed:', error)
return true
}
}TypeScript Types
All types are exported from the SDK:
import type {
// Request/Response
CheckRequest,
CheckResponse,
Category,
ModerationClientConfig,
// Resources
Policy,
Decision,
Webhook,
WebhookEvent,
} from '@nextauralabs/vettly-sdk'
// Error classes (not types, actual classes)
import {
VettlyError,
VettlyAuthError,
VettlyRateLimitError,
VettlyQuotaError,
VettlyValidationError,
} from '@nextauralabs/vettly-sdk'
const request: CheckRequest = {
content: 'Text to check',
policyId: 'moderate',
contentType: 'text'
}
const handleResult = (response: CheckResponse) => {
console.log('Safe:', response.safe)
}Advanced Usage
Rate Limiting
import pLimit from 'p-limit'
const limit = pLimit(10) // Max 10 concurrent requests
const results = await Promise.all(
comments.map(comment =>
limit(() =>
client.check({
content: comment.text,
policyId: 'moderate',
contentType: 'text'
})
)
)
)Caching Results
const cache = new Map<string, CheckResponse>()
async function checkWithCache(
client: ModerationClient,
content: string,
policyId: string
): Promise<CheckResponse> {
const cacheKey = `${policyId}:${content}`
if (cache.has(cacheKey)) {
return cache.get(cacheKey)!
}
const result = await client.check({
content,
policyId,
contentType: 'text'
})
cache.set(cacheKey, result)
return result
}Multi-Policy Check
async function checkMultiplePolicies(
client: ModerationClient,
content: string,
policyIds: string[]
): Promise<Map<string, CheckResponse>> {
const results = await Promise.all(
policyIds.map(policyId =>
client.check({ content, policyId, contentType: 'text' })
)
)
return new Map(
policyIds.map((policyId, i) => [policyId, results[i]])
)
}
const results = await checkMultiplePolicies(
client,
'Hello world',
['lenient', 'moderate', 'strict']
)
console.log('Lenient:', results.get('lenient')?.safe)
console.log('Moderate:', results.get('moderate')?.safe)
console.log('Strict:', results.get('strict')?.safe)See Also
- REST API - Direct HTTP API reference
- Webhooks - Webhook event reference
- Express Integration - Express.js guide
- Next.js Integration - Next.js guide