{"payload":{"pageCount":1,"repositories":[{"type":"Public","name":"ollama-benchmark","owner":"aidatatools","isFork":false,"description":"LLM Benchmark for Throughput via Ollama (Local LLMs)","allTopics":["benchmark","ai","ai-tools","llm","ollama"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":3,"starsCount":54,"forksCount":12,"license":"MIT License","participation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,13,0,0,0,1,0,0,0,1,0,33,2,4,0,0,1,0,0,0,0,7],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-11T17:08:44.960Z"}}],"repositoryCount":1,"userInfo":null,"searchable":true,"definitions":[],"typeFilters":[{"id":"all","text":"All"},{"id":"public","text":"Public"},{"id":"source","text":"Sources"},{"id":"fork","text":"Forks"},{"id":"archived","text":"Archived"},{"id":"template","text":"Templates"}],"compactMode":false},"title":"Repositories"}