{"data":{"full_name":"intel/neural-compressor","name":"neural-compressor","description":"SOTA low-bit LLM quantization (INT8/FP8/MXFP8/INT4/MXFP4/NVFP4) & sparsity; leading model compression techniques on PyTorch, TensorFlow, and ONNX Runtime","stars":2597.0,"forks":298.0,"language":"Python","license":"Apache-2.0","archived":0.0,"subcategory":"llm-quantization-techniques","last_pushed_at":"2026-03-13T07:30:28+00:00","pypi_package":"neural-compressor","npm_package":null,"downloads_monthly":27671.0,"dependency_count":14.0,"commits_30d":24.0,"reverse_dep_count":1.0,"maintenance_score":23.0,"adoption_score":21.0,"maturity_score":25.0,"community_score":21.0,"quality_score":90.0,"quality_tier":"verified","risk_flags":"[]"},"meta":{"timestamp":"2026-04-05T18:25:17.881703+00:00"}}