{"data":{"full_name":"bigscience-workshop/petals","name":"petals","description":"🌸 Run LLMs at home, BitTorrent-style. Fine-tuning and inference up to 10x faster than offloading","stars":9997.0,"forks":595.0,"language":"Python","license":"MIT","archived":0.0,"subcategory":"llm-inference-engines","last_pushed_at":"2024-09-07T11:54:28+00:00","pypi_package":"petals","npm_package":null,"downloads_monthly":13077.0,"dependency_count":18.0,"commits_30d":null,"reverse_dep_count":1.0,"maintenance_score":0.0,"adoption_score":20.0,"maturity_score":25.0,"community_score":18.0,"quality_score":63.0,"quality_tier":"established","risk_flags":"['stale_6m']"},"meta":{"timestamp":"2026-04-11T13:28:45.229528+00:00"}}