{"data":{"full_name":"madroidmaq/mlx-omni-server","name":"mlx-omni-server","description":"MLX Omni Server is a local inference server powered by Apple's MLX framework, specifically designed for Apple Silicon (M-series) chips. It implements OpenAI-compatible API endpoints, enabling seamless integration with existing OpenAI SDK clients while leveraging the power of local ML inference.","stars":678.0,"forks":84.0,"language":"Python","license":"MIT","archived":0.0,"subcategory":"llm-evaluation-platforms","last_pushed_at":"2026-03-10T00:53:43+00:00","pypi_package":"mlx-omni-server","npm_package":null,"downloads_monthly":2273.0,"dependency_count":15.0,"commits_30d":17.0,"reverse_dep_count":0.0,"maintenance_score":20.0,"adoption_score":18.0,"maturity_score":18.0,"community_score":19.0,"quality_score":75.0,"quality_tier":"verified","risk_flags":"[]"},"meta":{"timestamp":"2026-04-05T18:36:11.530653+00:00"}}