{"data":{"full_name":"PathologyFoundation/plip","name":"plip","description":"Pathology Language and Image Pre-Training (PLIP) is the first vision and language foundation model for Pathology AI (Nature Medicine). PLIP is a large-scale pre-trained model that can be used to extract visual and language features from pathology images and text description. The model is a fine-tuned version of the original CLIP model.","stars":373.0,"forks":37.0,"language":"Python","license":null,"archived":0.0,"subcategory":"clip-vision-language","last_pushed_at":"2023-09-20T03:39:03+00:00","pypi_package":null,"npm_package":null,"downloads_monthly":0.0,"dependency_count":0.0,"commits_30d":null,"reverse_dep_count":0.0,"maintenance_score":0.0,"adoption_score":10.0,"maturity_score":8.0,"community_score":16.0,"quality_score":34.0,"quality_tier":"emerging","risk_flags":"['no_license', 'stale_6m', 'no_package', 'no_dependents']"},"meta":{"timestamp":"2026-04-06T14:01:43.936182+00:00"}}