Understand how requests flow through HITL.sh, from creation to human review and back to your system
def create_review_request(content, ai_analysis): request = hitl_client.create_request( loop_id="content_moderation", data={ "content": content.text, "content_type": "text", "priority": determine_priority(ai_analysis.risk_score), "ai_analysis": { "confidence": ai_analysis.confidence, "flags": ai_analysis.flags, "risk_score": ai_analysis.risk_score, "model_version": ai_analysis.model_version }, "metadata": { "user_id": content.user_id, "timestamp": content.created_at, "source": "social_media_api", "previous_flags": get_user_flag_count(content.user_id) } } ) return request
Reviewer Skills
Workload Balance
Priority Rules
Escalation Paths
def handle_completed_request(request_id): response = hitl_client.get_response(request_id) # Process the human decision if response.decision == "approved": process_approval(response) elif response.decision == "rejected": process_rejection(response) elif response.decision == "needs_changes": request_modifications(response) elif response.decision == "escalate": escalate_to_senior_reviewer(response) # Update your system's state update_request_status(request_id, response.decision) # Log the decision for analytics log_decision(request_id, response)
{ "loop_id": "string", "content": "string", "content_type": "text|image|video|document", "priority": "low|normal|high|urgent" }
{ "ai_analysis": { "confidence": 0.75, "flags": ["potential_harm", "inappropriate_language"], "risk_score": 0.8, "model_version": "v2.1.0", "processing_time": 0.15 }, "metadata": { "user_id": "user_123", "timestamp": "2024-01-15T10:30:00Z", "source": "api_endpoint", "session_id": "sess_abc", "ip_address": "192.168.1.1" }, "context": { "previous_decisions": 3, "user_reputation": "trusted", "content_category": "user_generated", "language": "en" } }
text_request = { "content": "User submitted comment text", "content_type": "text", "ai_analysis": { "confidence": 0.65, "flags": ["potential_hate_speech"], "risk_score": 0.7 }, "context": { "language": "en", "content_length": 150, "contains_links": False } }
image_request = { "content": "base64_encoded_image_data", "content_type": "image", "ai_analysis": { "confidence": 0.45, "flags": ["potential_nudity", "violence"], "risk_score": 0.9 }, "context": { "image_format": "jpeg", "dimensions": "1920x1080", "file_size": 245760, "upload_source": "mobile_app" } }
video_request = { "content": "video_file_url", "content_type": "video", "ai_analysis": { "confidence": 0.35, "flags": ["potential_copyright_violation"], "risk_score": 0.6 }, "context": { "duration": 180, "format": "mp4", "resolution": "1080p", "audio_track": True } }
document_request = { "content": "document_file_url", "content_type": "document", "ai_analysis": { "confidence": 0.55, "flags": ["missing_required_fields"], "risk_score": 0.4 }, "context": { "document_type": "expense_report", "file_format": "pdf", "page_count": 3, "submission_date": "2024-01-15" } }
Clear Content
Relevant Context
Structured Data
Appropriate Priority
import requests def submit_request(loop_id, request_data): response = requests.post( f"https://api.hitl.sh/v1/loops/{loop_id}/requests", headers={ "Authorization": f"Bearer {API_KEY}", "Content-Type": "application/json" }, json=request_data ) if response.status_code == 201: return response.json() else: raise Exception(f"Failed to create request: {response.text}")
def get_request_response(request_id): response = requests.get( f"https://api.hitl.sh/v1/requests/{request_id}/response", headers={"Authorization": f"Bearer {API_KEY}"} ) if response.status_code == 200: return response.json() else: return None # Response not ready yet