{"data":{"full_name":"im-syn/SafeContentText","name":"SafeContentText","description":"A Python toolkit and web API for detecting “bad” content (profanity, hate speech, gore, etc.) in text using a zero‑shot AI classifier.","stars":3.0,"forks":1.0,"language":"Python","license":"Apache-2.0","archived":0.0,"subcategory":"toxic-comment-detection","last_pushed_at":"2025-07-18T09:41:29+00:00","pypi_package":null,"npm_package":null,"downloads_monthly":0.0,"dependency_count":0.0,"commits_30d":null,"reverse_dep_count":0.0,"maintenance_score":2.0,"adoption_score":3.0,"maturity_score":9.0,"community_score":12.0,"quality_score":26.0,"quality_tier":"experimental","risk_flags":"['stale_6m', 'no_package', 'no_dependents']"},"meta":{"timestamp":"2026-04-09T12:12:35.665075+00:00"}}