{"data":{"full_name":"Phinchanbora/llm-evaluation","name":"llm-evaluation","description":"🎯 Benchmark LLMs effectively with over 10 tests and 108,000 real questions to assess model performance and enhance AI evaluation.","stars":0.0,"forks":0.0,"language":"Python","license":null,"archived":0.0,"subcategory":"llm-benchmark-leaderboards","last_pushed_at":"2026-03-13T07:28:01+00:00","pypi_package":null,"npm_package":null,"downloads_monthly":0.0,"dependency_count":0.0,"commits_30d":null,"reverse_dep_count":0.0,"maintenance_score":13.0,"adoption_score":0.0,"maturity_score":1.0,"community_score":0.0,"quality_score":14.0,"quality_tier":"experimental","risk_flags":"['no_license', 'no_package', 'no_dependents']"},"meta":{"timestamp":"2026-04-11T00:03:42.702170+00:00"}}