{"data":{"full_name":"ManuelSLemos/RabbitLLM","name":"RabbitLLM","description":"Run 70B+ LLMs on a single 4GB GPU — no quantization required.","stars":38.0,"forks":7.0,"language":"Python","license":"Apache-2.0","archived":0.0,"subcategory":"llm-cuda-optimization","last_pushed_at":"2026-02-28T12:07:57+00:00","pypi_package":"rabbitllm","npm_package":null,"downloads_monthly":179.0,"dependency_count":12.0,"commits_30d":null,"reverse_dep_count":0.0,"maintenance_score":10.0,"adoption_score":12.0,"maturity_score":20.0,"community_score":15.0,"quality_score":57.0,"quality_tier":"established","risk_flags":"[]"},"meta":{"timestamp":"2026-04-06T18:33:41.252149+00:00"}}