Code Examples
Working code examples in popular programming languages to get you started quickly.
cURL Examples
Command Line Examples
# Basic text moderation
curl -X POST https://api.moder8r.app/v1/moderate/text \
-H "Authorization: Bearer m8r_sk_your_key_here" \
-H "Content-Type: application/json" \
-d '{
"content": "This is some text to moderate for harmful content."
}'
# Check usage statistics
curl -X GET https://api.moder8r.app/v1/usage \
-H "Authorization: Bearer m8r_sk_your_key_here"Node.js / JavaScript
Basic Implementation
// Basic moderation function
async function moderateText(content) {
const response = await fetch('https://api.moder8r.app/v1/moderate/text', {
method: 'POST',
headers: {
'Authorization': `Bearer ${process.env.MODER8R_API_KEY}`,
'Content-Type': 'application/json',
},
body: JSON.stringify({ content })
});
if (!response.ok) {
throw new Error(`HTTP error! status: ${response.status}`);
}
return await response.json();
}
// Example usage with error handling
async function moderateUserPost(postContent) {
try {
const result = await moderateText(postContent);
if (result.result.flagged) {
console.log('Content flagged:', result.result.categories);
return {
allowed: false,
reason: result.result.recommendation,
details: result.result.categories
};
}
return {
allowed: true,
moderationId: result.id
};
} catch (error) {
console.error('Moderation failed:', error);
// Fail safe - allow content but log for manual review
return {
allowed: true,
requiresManualReview: true,
error: error.message
};
}
}
// Usage
const postResult = await moderateUserPost("User's post content here");
console.log(postResult);Express.js Middleware
Automatic Content Moderation
// Express.js middleware for automatic moderation
const express = require('express');
const app = express();
app.use(express.json());
// Moderation middleware
async function moderationMiddleware(req, res, next) {
const { content } = req.body;
if (!content) {
return next(); // Skip moderation if no content
}
try {
const response = await fetch('https://api.moder8r.app/v1/moderate/text', {
method: 'POST',
headers: {
'Authorization': `Bearer ${process.env.MODER8R_API_KEY}`,
'Content-Type': 'application/json',
},
body: JSON.stringify({ content })
});
const moderation = await response.json();
if (moderation.result.flagged) {
return res.status(400).json({
error: 'Content not allowed',
reason: moderation.result.recommendation,
categories: Object.keys(moderation.result.categories)
.filter(cat => moderation.result.categories[cat].flagged)
});
}
// Store moderation ID for audit trail
req.moderationId = moderation.id;
next();
} catch (error) {
console.error('Moderation error:', error);
// In production, decide whether to block or allow with manual review
req.moderationError = error.message;
next(); // Continue with manual review flag
}
}
// Apply middleware to routes that need moderation
app.post('/api/posts', moderationMiddleware, (req, res) => {
// Create post logic here
res.json({
success: true,
moderationId: req.moderationId,
message: 'Post created successfully'
});
});
app.listen(3000, () => {
console.log('Server running on port 3000');
});Python
Python Client Class
import requests
import os
from typing import Dict, Optional
class Moder8rClient:
def __init__(self, api_key: Optional[str] = None):
self.api_key = api_key or os.getenv('MODER8R_API_KEY')
self.base_url = 'https://api.moder8r.app/v1'
if not self.api_key:
raise ValueError("API key is required")
def moderate_text(self, content: str) -> Dict:
"""Moderate text content and return results"""
url = f"{self.base_url}/moderate/text"
headers = {
'Authorization': f'Bearer {self.api_key}',
'Content-Type': 'application/json'
}
data = {'content': content}
response = requests.post(url, headers=headers, json=data)
response.raise_for_status()
return response.json()
def get_usage(self) -> Dict:
"""Get current usage statistics"""
url = f"{self.base_url}/usage"
headers = {'Authorization': f'Bearer {self.api_key}'}
response = requests.get(url, headers=headers)
response.raise_for_status()
return response.json()
def is_content_safe(self, content: str) -> Dict:
"""Simple boolean check if content is safe"""
try:
result = self.moderate_text(content)
return {
'safe': not result['result']['flagged'],
'recommendation': result['result']['recommendation'],
'confidence': result['result']['confidence'],
'flagged_categories': [
cat for cat, details in result['result']['categories'].items()
if details['flagged']
]
}
except Exception as e:
# Log error and default to manual review
print(f"Moderation error: {e}")
return {
'safe': False,
'error': str(e),
'requires_manual_review': True
}
# Example usage
def main():
client = Moder8rClient()
# Test content moderation
test_content = "This is a sample text for moderation testing."
# Detailed moderation result
result = client.moderate_text(test_content)
print("Moderation result:", result)
# Simple safety check
safety_check = client.is_content_safe(test_content)
print("Safety check:", safety_check)
# Check usage
usage = client.get_usage()
print(f"Usage: {usage['current_period']['usage_percentage']:.1f}%")
if __name__ == "__main__":
main()PHP
PHP Client Implementation
<?php
class Moder8rClient {
private $apiKey;
private $baseUrl = 'https://api.moder8r.app/v1';
public function __construct($apiKey = null) {
$this->apiKey = $apiKey ?: getenv('MODER8R_API_KEY');
if (!$this->apiKey) {
throw new Exception('API key is required');
}
}
public function moderateText($content) {
$url = $this->baseUrl . '/moderate/text';
$data = json_encode(['content' => $content]);
$headers = [
'Authorization: Bearer ' . $this->apiKey,
'Content-Type: application/json',
'Content-Length: ' . strlen($data)
];
$ch = curl_init();
curl_setopt($ch, CURLOPT_URL, $url);
curl_setopt($ch, CURLOPT_POST, true);
curl_setopt($ch, CURLOPT_POSTFIELDS, $data);
curl_setopt($ch, CURLOPT_HTTPHEADER, $headers);
curl_setopt($ch, CURLOPT_RETURNTRANSFER, true);
curl_setopt($ch, CURLOPT_TIMEOUT, 30);
$response = curl_exec($ch);
$httpCode = curl_getinfo($ch, CURLINFO_HTTP_CODE);
curl_close($ch);
if ($httpCode !== 200) {
throw new Exception('API request failed: ' . $httpCode);
}
return json_decode($response, true);
}
public function isContentSafe($content) {
try {
$result = $this->moderateText($content);
$flaggedCategories = [];
foreach ($result['result']['categories'] as $category => $details) {
if ($details['flagged']) {
$flaggedCategories[] = $category;
}
}
return [
'safe' => !$result['result']['flagged'],
'recommendation' => $result['result']['recommendation'],
'confidence' => $result['result']['confidence'],
'flagged_categories' => $flaggedCategories,
'moderation_id' => $result['id']
];
} catch (Exception $e) {
error_log('Moderation error: ' . $e->getMessage());
return [
'safe' => false,
'error' => $e->getMessage(),
'requires_manual_review' => true
];
}
}
}
// Example usage
try {
$client = new Moder8rClient();
$content = "This is some text to moderate.";
$result = $client->isContentSafe($content);
if ($result['safe']) {
echo "Content is safe to publish\n";
// Process content...
} else {
echo "Content flagged: " . implode(', ', $result['flagged_categories']) . "\n";
// Handle flagged content...
}
} catch (Exception $e) {
echo "Error: " . $e->getMessage() . "\n";
}
?>Go
Go Client Implementation
package main
import (
"bytes"
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"os"
"time"
)
type Moder8rClient struct {
APIKey string
BaseURL string
Client *http.Client
}
type ModerationRequest struct {
Content string `json:"content"`
}
type ModerationResponse struct {
ID string `json:"id"`
Status string `json:"status"`
Result struct {
Flagged bool `json:"flagged"`
Confidence float64 `json:"confidence"`
Recommendation string `json:"recommendation"`
Categories map[string]struct {
Flagged bool `json:"flagged"`
Probability float64 `json:"probability"`
} `json:"categories"`
} `json:"result"`
}
func NewModer8rClient(apiKey string) *Moder8rClient {
if apiKey == "" {
apiKey = os.Getenv("MODER8R_API_KEY")
}
return &Moder8rClient{
APIKey: apiKey,
BaseURL: "https://api.moder8r.app/v1",
Client: &http.Client{
Timeout: 30 * time.Second,
},
}
}
func (c *Moder8rClient) ModerateText(content string) (*ModerationResponse, error) {
reqBody := ModerationRequest{Content: content}
jsonData, err := json.Marshal(reqBody)
if err != nil {
return nil, fmt.Errorf("failed to marshal request: %v", err)
}
req, err := http.NewRequest("POST", c.BaseURL+"/moderate/text", bytes.NewBuffer(jsonData))
if err != nil {
return nil, fmt.Errorf("failed to create request: %v", err)
}
req.Header.Set("Authorization", "Bearer "+c.APIKey)
req.Header.Set("Content-Type", "application/json")
resp, err := c.Client.Do(req)
if err != nil {
return nil, fmt.Errorf("request failed: %v", err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return nil, fmt.Errorf("API error: %d", resp.StatusCode)
}
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, fmt.Errorf("failed to read response: %v", err)
}
var modResp ModerationResponse
if err := json.Unmarshal(body, &modResp); err != nil {
return nil, fmt.Errorf("failed to unmarshal response: %v", err)
}
return &modResp, nil
}
func (c *Moder8rClient) IsContentSafe(content string) (bool, []string, error) {
result, err := c.ModerateText(content)
if err != nil {
return false, nil, err
}
var flaggedCategories []string
for category, details := range result.Result.Categories {
if details.Flagged {
flaggedCategories = append(flaggedCategories, category)
}
}
return !result.Result.Flagged, flaggedCategories, nil
}
func main() {
client := NewModer8rClient("")
content := "This is some text to moderate."
safe, flagged, err := client.IsContentSafe(content)
if err != nil {
fmt.Printf("Error: %v\n", err)
return
}
if safe {
fmt.Println("Content is safe")
} else {
fmt.Printf("Content flagged for: %v\n", flagged)
}
}Framework Integration Examples
DjangoPython Web Framework
# Django view with moderation
from django.http import JsonResponse
from django.views.decorators.csrf import csrf_exempt
from django.views.decorators.http import require_http_methods
import json
import requests
import os
@csrf_exempt
@require_http_methods(["POST"])
def create_comment(request):
"""Create a comment with automatic moderation"""
try:
data = json.loads(request.body)
content = data.get('content', '')
if not content:
return JsonResponse({'error': 'Content is required'}, status=400)
# Moderate content
moderation_result = moderate_content(content)
if not moderation_result['safe']:
return JsonResponse({
'error': 'Content not allowed',
'reason': moderation_result.get('reason', 'Content flagged'),
'flagged_categories': moderation_result.get('flagged_categories', [])
}, status=400)
# Save comment to database (your model logic here)
comment = Comment.objects.create(
content=content,
moderation_id=moderation_result.get('moderation_id'),
# other fields...
)
return JsonResponse({
'success': True,
'comment_id': comment.id,
'message': 'Comment created successfully'
})
except Exception as e:
return JsonResponse({'error': str(e)}, status=500)
def moderate_content(content):
"""Moderate content using moder8r API"""
try:
response = requests.post(
'https://api.moder8r.app/v1/moderate/text',
headers={
'Authorization': f'Bearer {os.getenv("MODER8R_API_KEY")}',
'Content-Type': 'application/json'
},
json={'content': content},
timeout=10
)
if response.status_code == 200:
result = response.json()
return {
'safe': not result['result']['flagged'],
'moderation_id': result['id'],
'reason': result['result']['recommendation'],
'flagged_categories': [
cat for cat, details in result['result']['categories'].items()
if details['flagged']
]
}
else:
# Log error and default to manual review
return {
'safe': False,
'error': f'API error: {response.status_code}',
'requires_manual_review': True
}
except Exception as e:
# Log error and default to manual review
return {
'safe': False,
'error': str(e),
'requires_manual_review': True
}Integration Patterns
✅ Recommended Patterns
- • Middleware approach: Automatic moderation for all content
- • Fail-safe defaults: Allow with manual review on API errors
- • Async processing: Moderate content in background when possible
- • Caching: Store moderation results to avoid re-checking
- • Audit trails: Keep moderation IDs for compliance
- • Rate limit handling: Implement backoff and retry logic
💡 Performance Tips
- • Batch processing: Group content moderation when possible
- • Smart queuing: Prioritize real-time vs batch content
- • Environment variables: Store API keys securely
- • Error monitoring: Track moderation success rates
- • Usage monitoring: Track API usage to optimize costs
- • Client libraries: Use official SDKs when available
Environment Setup
Environment Variables
All examples assume your API key is stored in environment variables:
Environment Setup
# .env file
MODER8R_API_KEY=m8r_sk_your_actual_key_here
# Production
export MODER8R_API_KEY=m8r_sk_your_actual_key_here
# Development
cp .env.example .env
# Edit .env with your API keySecurity Note: Never commit API keys to version control. Use environment variables or secure key management services in production.