{"data":{"full_name":"messkan/prompt-cache","name":"prompt-cache","description":"Cut LLM costs by up to 80% and unlock sub-millisecond responses with intelligent semantic caching.A drop-in, provider-agnostic LLM proxy written in Go with sub-millisecond response","stars":209.0,"forks":19.0,"language":"Go","license":"MIT","archived":0.0,"subcategory":"redis-vector-caching","last_pushed_at":"2026-01-25T23:34:36+00:00","pypi_package":null,"npm_package":null,"downloads_monthly":0.0,"dependency_count":0.0,"commits_30d":null,"reverse_dep_count":0.0,"maintenance_score":10.0,"adoption_score":10.0,"maturity_score":13.0,"community_score":14.0,"quality_score":47.0,"quality_tier":"emerging","risk_flags":"['no_package', 'no_dependents']"},"meta":{"timestamp":"2026-04-12T00:22:48.074026+00:00"}}