{"data":{"full_name":"modelscope/evalscope","name":"evalscope","description":"A streamlined and customizable framework for efficient large model (LLM, VLM, AIGC) evaluation and performance benchmarking.","stars":2501.0,"forks":285.0,"language":"Python","license":"Apache-2.0","archived":0.0,"subcategory":"rag-evaluation-benchmarking","last_pushed_at":"2026-03-11T12:42:53+00:00","pypi_package":"evalscope","npm_package":null,"downloads_monthly":29097.0,"dependency_count":38.0,"commits_30d":36.0,"reverse_dep_count":1.0,"maintenance_score":23.0,"adoption_score":21.0,"maturity_score":18.0,"community_score":21.0,"quality_score":83.0,"quality_tier":"verified","risk_flags":"[]"},"meta":{"timestamp":"2026-04-05T18:40:48.079620+00:00"}}