{"data":{"full_name":"Agora-Lab-AI/OmniByteGPT","name":"OmniByteGPT","description":"An implementation of an all-new foundation model architecture that trains on byte sequences from multiple modalities to handle omni-modal generation of text, video, images and more.","stars":9.0,"forks":0.0,"language":"Python","license":"MIT","archived":0.0,"subcategory":"gpt2-pretraining-fine-tuning","last_pushed_at":"2026-03-09T21:59:51+00:00","pypi_package":null,"npm_package":null,"downloads_monthly":0.0,"dependency_count":0.0,"commits_30d":null,"reverse_dep_count":0.0,"maintenance_score":13.0,"adoption_score":5.0,"maturity_score":16.0,"community_score":0.0,"quality_score":34.0,"quality_tier":"emerging","risk_flags":"['no_package', 'no_dependents']"},"meta":{"timestamp":"2026-04-07T10:49:10.056599+00:00"}}