{"data":{"full_name":"fla-org/flash-linear-attention","name":"flash-linear-attention","description":"🚀 Efficient implementations of state-of-the-art linear attention models","stars":4549.0,"forks":431.0,"language":"Python","license":"MIT","archived":0.0,"subcategory":"sparse-attention-optimization","last_pushed_at":"2026-03-12T14:45:14+00:00","pypi_package":"flash-linear-attention","npm_package":null,"downloads_monthly":438484.0,"dependency_count":2.0,"commits_30d":30.0,"reverse_dep_count":1.0,"maintenance_score":23.0,"adoption_score":21.0,"maturity_score":25.0,"community_score":20.0,"quality_score":89.0,"quality_tier":"verified","risk_flags":"[]"},"meta":{"timestamp":"2026-04-05T18:42:27.394808+00:00"}}