{"data":{"full_name":"intel/auto-round","name":"auto-round","description":"🎯An accuracy-first, highly efficient quantization toolkit for LLMs, designed to minimize quality degradation across Weight-Only Quantization, MXFP4, NVFP4, GGUF, and adaptive schemes.","stars":883.0,"forks":81.0,"language":"Python","license":"Apache-2.0","archived":0.0,"subcategory":"llm-quantization-methods","last_pushed_at":"2026-03-13T08:06:11+00:00","pypi_package":"auto-round","npm_package":null,"downloads_monthly":44854.0,"dependency_count":8.0,"commits_30d":84.0,"reverse_dep_count":0.0,"maintenance_score":25.0,"adoption_score":20.0,"maturity_score":25.0,"community_score":18.0,"quality_score":88.0,"quality_tier":"verified","risk_flags":"[]"},"meta":{"timestamp":"2026-04-06T08:11:56.510637+00:00"}}