Python Integration
Add content moderation to your Python app with a simple SDK.
Installation
pip install vettlyQuick Start
from vettly import ModerationClient
client = ModerationClient(api_key="sk_live_...")
result = client.check(
content="Hello, this is a friendly message!",
policy_id="moderate"
)
if result.action == "block":
print("Content blocked:", result.categories)
else:
print("Content allowed")Configuration
from vettly import ModerationClient
client = ModerationClient(
api_key="sk_live_...",
api_url="https://api.vettly.dev", # Optional: custom API URL
timeout=30.0, # Request timeout in seconds (default: 30)
max_retries=3, # Max retries for failures (default: 3)
retry_delay=1.0, # Base delay for backoff in seconds (default: 1)
)The SDK automatically retries failed requests with exponential backoff:
- Retries on rate limits (429) and server errors (5xx)
- Uses exponential backoff (1s, 2s, 4s, ...)
- Respects
Retry-Afterheaders from the API
Text Moderation
result = client.check(
content="User-generated text",
policy_id="moderate"
)
print(result.action) # 'allow' | 'flag' | 'block'
print(result.safe) # True if content is safe
print(result.flagged) # True if any category triggered
print(result.id) # Decision ID for audit trail
for category in result.categories:
print(f"{category.category}: {category.score} (triggered: {category.triggered})")Image Moderation
Use the convenience method for images:
# From URL
result = client.check_image(
image_url="https://example.com/image.jpg",
policy_id="strict"
)
# From base64
result = client.check_image(
image_url="data:image/jpeg;base64,/9j/4AAQ...",
policy_id="strict"
)
if result.action == "block":
print("Image blocked")Or use the general check() method:
import base64
with open("image.jpg", "rb") as f:
image_base64 = base64.b64encode(f.read()).decode()
result = client.check(
content=f"data:image/jpeg;base64,{image_base64}",
policy_id="strict",
content_type="image"
)Idempotency
Prevent duplicate processing with request IDs:
result = client.check(
content="Hello",
policy_id="default",
request_id="unique-request-id-123" # Idempotency key
)If you retry the same request with the same request_id, you'll get the cached result instead of processing again.
Error Handling
The SDK provides typed exceptions for better error handling:
from vettly import (
ModerationClient,
VettlyAuthError,
VettlyRateLimitError,
VettlyQuotaError,
VettlyValidationError,
VettlyServerError,
)
client = ModerationClient(api_key="sk_live_...")
try:
result = client.check(content="test", policy_id="default")
except VettlyAuthError:
print("Invalid API key")
except VettlyRateLimitError as e:
print(f"Rate limited. Retry after {e.retry_after}s")
except VettlyQuotaError as e:
print(f"Quota exceeded: {e.quota}")
except VettlyValidationError as e:
print(f"Invalid request: {e.errors}")
except VettlyServerError:
print("Server error - retry later")Fail Open Strategy
For production, decide how to handle moderation failures:
def moderate_content(content: str) -> bool:
"""Returns True if content should be allowed."""
try:
result = client.check(content=content, policy_id="moderate")
return result.action != "block"
except Exception as e:
# Fail open: allow content if moderation fails
print(f"Moderation failed: {e}")
return TrueWebhook Signature Verification
Verify webhook signatures to ensure authenticity:
from vettly import verify_webhook_signature, construct_webhook_event
# Flask example
@app.route("/webhooks/vettly", methods=["POST"])
def handle_webhook():
payload = request.get_data(as_text=True)
signature = request.headers.get("X-Vettly-Signature")
webhook_secret = "whsec_..." # From dashboard
if not verify_webhook_signature(payload, signature, webhook_secret):
return "Invalid signature", 401
event = construct_webhook_event(payload)
if event["type"] == "decision.blocked":
decision = event["data"]
print(f"Content blocked: {decision['id']}")
return "OK", 200The signature format is t=timestamp,v1=signature. The SDK:
- Validates the timestamp is within 5 minutes
- Uses constant-time comparison to prevent timing attacks
Async Support
For async applications, use AsyncModerationClient:
from vettly import AsyncModerationClient
async def moderate_content():
async with AsyncModerationClient(api_key="sk_live_...") as client:
result = await client.check(
content="Check this message",
policy_id="moderate"
)
return result.action
# Or without context manager
client = AsyncModerationClient(api_key="sk_live_...")
result = await client.check(content="Hello", policy_id="moderate")
await client.close()Flask Example
from flask import Flask, request, jsonify
from vettly import ModerationClient, VettlyError
app = Flask(__name__)
client = ModerationClient(api_key="sk_live_...")
@app.route('/api/comments', methods=['POST'])
def create_comment():
data = request.json
content = data.get('content')
try:
result = client.check(content=content, policy_id="moderate")
if result.action == "block":
return jsonify({
"error": "Content blocked",
"categories": [c.category for c in result.categories if c.triggered]
}), 403
# Save to database
return jsonify({"success": True})
except VettlyError as e:
# Fail open on moderation errors
print(f"Moderation error: {e}")
return jsonify({"success": True})
if __name__ == '__main__':
app.run()FastAPI Example
from fastapi import FastAPI, HTTPException
from pydantic import BaseModel
from vettly import AsyncModerationClient, VettlyError
app = FastAPI()
client = AsyncModerationClient(api_key="sk_live_...")
class Comment(BaseModel):
content: str
@app.post("/api/comments")
async def create_comment(comment: Comment):
try:
result = await client.check(
content=comment.content,
policy_id="moderate"
)
if result.action == "block":
raise HTTPException(
status_code=403,
detail="Content blocked by moderation"
)
return {"success": True}
except VettlyError:
# Fail open
return {"success": True}
@app.on_event("shutdown")
async def shutdown():
await client.close()Django Example
# views.py
from django.http import JsonResponse
from django.views.decorators.http import require_POST
from django.views.decorators.csrf import csrf_exempt
from vettly import ModerationClient, VettlyError
import json
client = ModerationClient(api_key="sk_live_...")
@csrf_exempt
@require_POST
def create_comment(request):
data = json.loads(request.body)
content = data.get('content')
try:
result = client.check(content=content, policy_id="moderate")
if result.action == "block":
return JsonResponse({"error": "Content blocked"}, status=403)
# Save comment
return JsonResponse({"success": True})
except VettlyError as e:
# Fail open
return JsonResponse({"success": True})Batch Moderation
For multiple items, use the batch endpoint:
from vettly import ModerationClient, BatchItem
client = ModerationClient(api_key="sk_live_...")
items = [
BatchItem(id="1", content="First message"),
BatchItem(id="2", content="Second message"),
BatchItem(id="3", content="Third message"),
]
result = client.batch_check(policy_id="moderate", items=items)
print(f"Batch ID: {result.batch_id}")
print(f"Total: {result.total}, Completed: {result.completed}")
for item_result in result.results:
print(f"{item_result.id}: {item_result.action}")Custom Policies
Use your custom policies by ID:
result = client.check(
content="Message to check",
policy_id="my-custom-policy" # Your policy ID from dashboard
)Built-in policies: lenient, moderate, strict
Response Structure
result = client.check(content="Hello", policy_id="moderate")
# CheckResponse attributes
result.id # str: Unique decision ID
result.safe # bool: True if content is safe
result.flagged # bool: True if any category triggered
result.action # Action: 'allow' | 'flag' | 'block'
result.categories # List[CategoryResult]: Category scores
result.latency_ms # int: Response time in milliseconds
result.policy_id # str: Policy used
result.provider # str: AI provider used (e.g., 'hive')
# CategoryResult attributes
for cat in result.categories:
cat.category # str: Category name
cat.score # float: Score 0-1
cat.threshold # float: Threshold for triggering
cat.triggered # bool: Whether threshold was exceededEnvironment Variables
export VETTLY_API_KEY=sk_live_...import os
from vettly import ModerationClient
client = ModerationClient(api_key=os.environ["VETTLY_API_KEY"])Next Steps
- Custom Policies - Define moderation rules
- Image & Video - Moderate visual content
- Error Handling - Production error strategies
- Webhooks - Real-time notifications