{"data":{"full_name":"ankitlade12/AgentArmor","name":"AgentArmor","description":"The full-stack safety layer for AI agents. Budget limits, prompt injection shields, PII filtering, output firewalls, and hooks — in 2 lines of code.","stars":2.0,"forks":0.0,"language":"Python","license":"MIT","archived":0.0,"subcategory":"llm-firewall-defense","last_pushed_at":"2026-03-18T23:44:41+00:00","pypi_package":"agentarmor","npm_package":null,"downloads_monthly":494.0,"dependency_count":0.0,"commits_30d":null,"reverse_dep_count":0.0,"maintenance_score":13.0,"adoption_score":8.0,"maturity_score":18.0,"community_score":0.0,"quality_score":39.0,"quality_tier":"emerging","risk_flags":"['no_dependents']"},"meta":{"timestamp":"2026-04-05T18:42:54.470126+00:00"}}