{"data":{"full_name":"jgoy-labs/server-nexe","name":"server-nexe","description":"Local AI server with persistent memory, RAG, and multi-backend inference (MLX / llama.cpp / Ollama). Runs entirely on your machine — zero data sent to external services.","stars":0.0,"forks":0.0,"language":"Python","license":"NOASSERTION","archived":0.0,"subcategory":"local-llm-orchestration","last_pushed_at":"2026-03-18T21:02:54+00:00","pypi_package":null,"npm_package":null,"downloads_monthly":0.0,"dependency_count":0.0,"commits_30d":null,"reverse_dep_count":0.0,"maintenance_score":13.0,"adoption_score":0.0,"maturity_score":9.0,"community_score":0.0,"quality_score":22.0,"quality_tier":"experimental","risk_flags":"['no_package', 'no_dependents']"},"meta":{"timestamp":"2026-04-12T10:09:27.565437+00:00"}}