Multi select responses allow reviewers to choose multiple options from a predefined list, making them ideal for identifying multiple issues, categorizing content across several dimensions, or conducting comprehensive audits where multiple aspects need evaluation.
You specify exact values to be returned: ["pii", "external_links", "promo"]
Labels with emojis/descriptions display to reviewers
Full control over response data format
Perfect for matching database flag values
Best for: Custom values, database flags, rich labels with emojis
Both formats work identically - choose based on your preference. The API automatically transforms simple strings to rich objects for mobile app compatibility. Response data is always an array of selected values.
Categorize support tickets across multiple dimensions:
# Multi-category ticket classificationrequest_data = { "processing_type": "time-sensitive", "type": "markdown", "priority": "medium", "request_text": "Categorize this customer support ticket:\n\n'Subject: Billing issue and account access problem\n\nMessage: Hi, I was charged twice for my subscription last month, and now I can't log into my account. I've tried resetting my password but the email isn't coming through. This is really frustrating and I need this resolved ASAP since I have a presentation tomorrow that requires access to my files. Can someone please help? I've been a customer for 3 years and this has never happened before.'", "response_type": "multi_select", "response_config": { "options": [ { "value": "billing_issue", "label": "💳 Billing & Payment" }, { "value": "account_access", "label": "🔐 Account Access" }, { "value": "technical_issue", "label": "⚙️ Technical Problem" }, { "value": "urgent_priority", "label": "🚨 Urgent Priority" }, { "value": "email_delivery", "label": "📧 Email Delivery Issue" }, { "value": "loyal_customer", "label": "⭐ Loyal Customer" }, { "value": "escalation_needed", "label": "📞 Needs Escalation" } ], "min_selections": 2, "max_selections": 6, "required": True }, "default_response": ["billing_issue", "account_access"], "timeout_seconds": 3600, "platform": "api"}
Your application should validate received responses:
def validate_multi_select_response(response_data, response_config): """Validate multi select response against configuration""" # Check response structure if not isinstance(response_data, dict): return False, "Response must be an object" if "selected_values" not in response_data: return False, "Missing selected_values field" selected_values = response_data["selected_values"] # Validate it's an array if not isinstance(selected_values, list): return False, "selected_values must be an array" # Validate all values exist in options valid_values = [opt["value"] for opt in response_config["options"]] invalid_values = [val for val in selected_values if val not in valid_values] if invalid_values: return False, f"Invalid selections: {invalid_values}" # Check selection limits min_selections = response_config.get("min_selections", 0) max_selections = response_config.get("max_selections", len(valid_values)) if len(selected_values) < min_selections: return False, f"Must select at least {min_selections} options" if len(selected_values) > max_selections: return False, f"Cannot select more than {max_selections} options" # Check required if response_config.get("required", False) and len(selected_values) == 0: return False, "At least one selection is required" # Check for duplicates if len(selected_values) != len(set(selected_values)): return False, "Duplicate selections not allowed" return True, "Valid"# Usageis_valid, error_message = validate_multi_select_response( response_data={ "selected_values": ["spam", "misleading_claims"], "selected_labels": ["🚫 Spam Content", "⚠️ Misleading Claims"] }, response_config={ "options": [...], "min_selections": 1, "max_selections": 5, "required": True })
# Weight different selections based on business impactselection_weights = { "critical_issue": 10, "major_issue": 5, "minor_issue": 1, "cosmetic_issue": 0.5}def calculate_severity_score(selected_values): return sum(selection_weights.get(value, 0) for value in selected_values)
Combination Logic
# Handle specific combinations of selectionsdef process_selection_combinations(selected_values): if "urgent_issue" in selected_values and "customer_facing" in selected_values: escalate_immediately() if "billing_problem" in selected_values and "loyal_customer" in selected_values: prioritize_resolution() if set(["spam", "scam", "malicious"]).intersection(selected_values): trigger_security_review()
Analytics and Patterns
# Track selection patterns for insightsdef analyze_selection_patterns(responses): from collections import Counter import itertools # Most common individual selections all_selections = [] for response in responses: all_selections.extend(response["selected_values"]) common_selections = Counter(all_selections) # Most common selection combinations combinations = [] for response in responses: values = response["selected_values"] if len(values) >= 2: combinations.extend(itertools.combinations(sorted(values), 2)) common_combinations = Counter(combinations) return { "individual_frequencies": dict(common_selections), "combination_patterns": dict(common_combinations.most_common(10)) }
# Escalate based on selection combinationsescalation_rules = { ("security_threat", "customer_data"): "immediate_security_team", ("billing_error", "high_value_customer"): "senior_billing_specialist", ("technical_bug", "production_system"): "engineering_lead", ("content_violation", "repeat_offender"): "policy_enforcement_team"}def check_escalation_needed(selected_values): for combination, escalation_target in escalation_rules.items(): if all(item in selected_values for item in combination): return escalation_target return None
# Score content quality based on passed/failed criteriadef calculate_quality_score(selected_criteria, all_possible_criteria): # Basic completion percentage completion_rate = len(selected_criteria) / len(all_possible_criteria) # Weight critical criteria more heavily critical_criteria = ["security_compliant", "legally_compliant", "factually_accurate"] critical_passed = len([c for c in selected_criteria if c in critical_criteria]) critical_total = len([c for c in all_possible_criteria if c in critical_criteria]) if critical_total > 0: critical_rate = critical_passed / critical_total # Heavily weight critical criteria (70% of score) final_score = (0.7 * critical_rate) + (0.3 * completion_rate) else: final_score = completion_rate return min(final_score, 1.0) # Cap at 1.0