{"data":{"full_name":"berayboztepe/AVESA","name":"AVESA","description":"An approach for recognizing the sound environment class from a video to understand the spoken content with its sentimental context via some sort of analysis that is achieved by the processing of audio-visual content using multimodal deep learning methodology.","stars":0.0,"forks":2.0,"language":null,"license":"MIT","archived":0.0,"subcategory":"audio-event-classification","last_pushed_at":"2022-08-31T08:35:48+00:00","pypi_package":null,"npm_package":null,"downloads_monthly":0.0,"dependency_count":0.0,"commits_30d":null,"reverse_dep_count":0.0,"maintenance_score":0.0,"adoption_score":0.0,"maturity_score":9.0,"community_score":3.0,"quality_score":12.0,"quality_tier":"experimental","risk_flags":"['stale_6m', 'no_package', 'no_dependents']"},"meta":{"timestamp":"2026-04-12T06:51:04.483287+00:00"}}