{"data":{"full_name":"evo-eval/evoeval","name":"evoeval","description":"EvoEval: Evolving Coding Benchmarks via LLM","stars":81.0,"forks":13.0,"language":"Python","license":"Apache-2.0","archived":0.0,"subcategory":"evaluation-frameworks-metrics","last_pushed_at":"2024-04-06T23:26:59+00:00","pypi_package":"evoeval","npm_package":null,"downloads_monthly":0.0,"dependency_count":8.0,"commits_30d":null,"reverse_dep_count":0.0,"maintenance_score":0.0,"adoption_score":9.0,"maturity_score":25.0,"community_score":16.0,"quality_score":50.0,"quality_tier":"established","risk_flags":"['stale_6m']"},"meta":{"timestamp":"2026-04-06T17:28:29.933915+00:00"}}