{"data":{"full_name":"eqimp/hogwild_llm","name":"hogwild_llm","description":"Official PyTorch implementation for Hogwild! Inference: Parallel LLM Generation with a Concurrent Attention Cache","stars":140.0,"forks":9.0,"language":"Python","license":"Apache-2.0","archived":0.0,"subcategory":"llm-cuda-optimization","last_pushed_at":"2025-08-13T16:36:53+00:00","pypi_package":null,"npm_package":null,"downloads_monthly":0.0,"dependency_count":0.0,"commits_30d":null,"reverse_dep_count":0.0,"maintenance_score":2.0,"adoption_score":10.0,"maturity_score":15.0,"community_score":10.0,"quality_score":37.0,"quality_tier":"emerging","risk_flags":"['stale_6m', 'no_package', 'no_dependents']"},"meta":{"timestamp":"2026-04-06T17:28:54.813945+00:00"}}