{"data":{"full_name":"softmax1/Flash-Attention-Softmax-N","name":"Flash-Attention-Softmax-N","description":"CUDA and Triton implementations of Flash Attention with SoftmaxN.","stars":73.0,"forks":5.0,"language":"Python","license":"GPL-3.0","archived":0.0,"subcategory":"transformer-architecture-tutorials","last_pushed_at":"2024-05-26T22:58:38+00:00","pypi_package":"flash-attention-softmax-n","npm_package":null,"downloads_monthly":0.0,"dependency_count":2.0,"commits_30d":null,"reverse_dep_count":0.0,"maintenance_score":0.0,"adoption_score":9.0,"maturity_score":25.0,"community_score":8.0,"quality_score":42.0,"quality_tier":"emerging","risk_flags":"['stale_6m']"},"meta":{"timestamp":"2026-04-05T22:11:30.696057+00:00"}}