{"name":"Prompt Shield API","version":"1.0.0","description":"Detect prompt injection, jailbreak attempts, and PII leakage in LLM inputs. Protect your AI applications with pattern-based threat scanning.","endpoints":{"scan":"POST /prompt-shield/api/v1/scan","health":"GET  /prompt-shield/health"},"pricing":{"free":"Unlimited (no auth required)"}}