{"data":{"full_name":"nl4opt/ORQA","name":"ORQA","description":"[AAAI 2025] ORQA is a new QA benchmark designed to assess the reasoning capabilities of LLMs in a specialized technical domain of Operations Research. The benchmark evaluates whether LLMs can emulate the knowledge and reasoning skills of OR experts when presented with complex optimization modeling tasks.","stars":45.0,"forks":2.0,"language":"Python","license":null,"archived":0.0,"subcategory":"evaluation-frameworks-metrics","last_pushed_at":"2025-06-07T19:31:43+00:00","pypi_package":null,"npm_package":null,"downloads_monthly":0.0,"dependency_count":0.0,"commits_30d":null,"reverse_dep_count":0.0,"maintenance_score":2.0,"adoption_score":8.0,"maturity_score":8.0,"community_score":5.0,"quality_score":23.0,"quality_tier":"experimental","risk_flags":"['no_license', 'stale_6m', 'no_package', 'no_dependents']"},"meta":{"timestamp":"2026-04-11T02:51:29.884461+00:00"}}