{"payload":{"pageCount":2,"repositories":[{"type":"Public","name":"openvino_backend","owner":"triton-inference-server","isFork":false,"description":"OpenVINO backend for Triton.","allTopics":[],"primaryLanguage":{"name":"C++","color":"#f34b7d"},"pullRequestCount":2,"issueCount":3,"starsCount":25,"forksCount":14,"license":"BSD 3-Clause \"New\" or \"Revised\" License","participation":[0,0,1,0,1,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,2,0,0,3,1,0,0,0,1,0,0,0,1,0,0,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-03T09:36:27.140Z"}},{"type":"Public","name":"server","owner":"triton-inference-server","isFork":false,"description":"The Triton Inference Server provides an optimized cloud and edge inferencing solution. ","allTopics":["machine-learning","cloud","deep-learning","gpu","inference","edge","datacenter"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":52,"issueCount":438,"starsCount":7539,"forksCount":1398,"license":"BSD 3-Clause \"New\" or \"Revised\" License","participation":[10,8,6,11,8,5,5,5,3,10,11,4,6,4,6,7,3,8,11,9,6,10,11,10,10,1,6,11,1,2,6,7,6,6,7,3,7,3,4,6,7,3,3,7,14,6,6,2,6,7,7,7],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-03T08:58:30.132Z"}},{"type":"Public","name":"client","owner":"triton-inference-server","isFork":false,"description":"Triton Python, C++ and Java client libraries, and GRPC-generated client examples for go, java and scala.","allTopics":[],"primaryLanguage":{"name":"C++","color":"#f34b7d"},"pullRequestCount":31,"issueCount":10,"starsCount":502,"forksCount":220,"license":"BSD 3-Clause \"New\" or \"Revised\" License","participation":[1,5,2,2,1,3,4,5,0,2,4,0,1,1,0,4,5,4,7,2,4,1,2,2,3,3,4,0,1,0,4,3,2,0,1,0,1,0,9,26,27,14,15,5,11,5,11,11,19,6,7,1],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-02T20:21:07.948Z"}},{"type":"Public","name":"tutorials","owner":"triton-inference-server","isFork":false,"description":"This repository contains tutorials and examples for Triton Inference Server","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":14,"issueCount":8,"starsCount":440,"forksCount":76,"license":"BSD 3-Clause \"New\" or \"Revised\" License","participation":[0,0,1,0,0,0,0,0,0,0,0,0,0,4,0,3,1,3,2,0,3,2,2,1,1,0,0,1,0,1,0,0,2,0,0,0,0,0,1,1,0,0,0,0,0,0,1,1,1,0,1,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-02T13:15:09.508Z"}},{"type":"Public","name":"core","owner":"triton-inference-server","isFork":false,"description":"The core library and APIs implementing the Triton Inference Server. ","allTopics":[],"primaryLanguage":{"name":"C++","color":"#f34b7d"},"pullRequestCount":22,"issueCount":0,"starsCount":94,"forksCount":90,"license":"BSD 3-Clause \"New\" or \"Revised\" License","participation":[6,1,2,12,2,2,0,2,2,3,3,0,1,2,2,3,0,2,1,1,1,3,4,4,1,0,4,0,0,0,2,6,3,3,2,0,2,1,0,3,1,0,0,1,1,0,2,1,3,1,3,1],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-01T23:29:45.220Z"}},{"type":"Public","name":"python_backend","owner":"triton-inference-server","isFork":false,"description":"Triton backend that enables pre-process, post-processing and other logic to be implemented in Python.","allTopics":[],"primaryLanguage":{"name":"C++","color":"#f34b7d"},"pullRequestCount":12,"issueCount":0,"starsCount":489,"forksCount":138,"license":"BSD 3-Clause \"New\" or \"Revised\" License","participation":[3,5,1,4,3,5,2,1,2,4,3,1,1,0,1,2,1,3,3,0,4,2,2,0,1,0,2,1,2,0,0,3,2,0,1,1,0,0,0,2,3,0,2,0,2,2,0,0,1,0,0,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-01T01:35:17.594Z"}},{"type":"Public","name":"triton_cli","owner":"triton-inference-server","isFork":false,"description":"Triton CLI is an open source command line interface that enables users to create, deploy, and profile models served by the Triton Inference Server.","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":2,"issueCount":1,"starsCount":25,"forksCount":1,"license":null,"participation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,1,0,1,0,0,0,10,0,0,1,5,6,1,2,2,0,0,3,4,2,0,1,0,1,0,0,0,6,1,1,2],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-01T00:55:42.898Z"}},{"type":"Public","name":"developer_tools","owner":"triton-inference-server","isFork":false,"description":"","allTopics":[],"primaryLanguage":{"name":"C++","color":"#f34b7d"},"pullRequestCount":4,"issueCount":0,"starsCount":15,"forksCount":9,"license":null,"participation":[1,1,1,1,1,0,0,0,0,0,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-05-31T23:06:20.897Z"}},{"type":"Public","name":"pytorch_backend","owner":"triton-inference-server","isFork":false,"description":"The Triton backend for the PyTorch TorchScript models.","allTopics":[],"primaryLanguage":{"name":"C++","color":"#f34b7d"},"pullRequestCount":3,"issueCount":0,"starsCount":107,"forksCount":41,"license":"BSD 3-Clause \"New\" or \"Revised\" License","participation":[2,2,1,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,2,0,0,0,0,0,0,0,1,0,0,0,0,0,1,1,2,1,0,0,1],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-05-31T21:33:01.237Z"}},{"type":"Public","name":"tensorrt_backend","owner":"triton-inference-server","isFork":false,"description":"The Triton backend for TensorRT.","allTopics":[],"primaryLanguage":{"name":"C++","color":"#f34b7d"},"pullRequestCount":1,"issueCount":0,"starsCount":48,"forksCount":28,"license":"BSD 3-Clause \"New\" or \"Revised\" License","participation":[0,0,1,0,0,1,1,0,0,1,0,0,0,1,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0,0,0,1,0,0,0,1,0,0,0,0,0,0,1,0,0,0,0,1],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-05-31T21:08:15.347Z"}},{"type":"Public","name":"vllm_backend","owner":"triton-inference-server","isFork":false,"description":"","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":2,"issueCount":0,"starsCount":125,"forksCount":13,"license":"BSD 3-Clause \"New\" or \"Revised\" License","participation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,19,9,2,2,0,2,0,1,0,1,0,1,0,0,1,0,0,0,1,1,0,3,0,0,0,0,0,2,1,1,1,0,0,0,3],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-05-31T18:57:42.971Z"}},{"type":"Public","name":"model_analyzer","owner":"triton-inference-server","isFork":false,"description":"Triton Model Analyzer is a CLI tool to help with better understanding of the compute and memory requirements of the Triton Inference Server models.","allTopics":["deep-learning","gpu","inference","performance-analysis"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":4,"issueCount":12,"starsCount":386,"forksCount":73,"license":"Apache License 2.0","participation":[7,2,4,1,1,3,3,4,0,2,1,4,6,3,1,1,1,0,1,0,1,2,2,0,0,1,1,2,1,1,0,4,1,2,2,0,1,2,1,0,1,2,0,1,4,0,1,1,1,1,0,2],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-05-31T15:35:23.425Z"}},{"type":"Public","name":"common","owner":"triton-inference-server","isFork":false,"description":"Common source, scripts and utilities shared across all Triton repositories.","allTopics":[],"primaryLanguage":{"name":"C++","color":"#f34b7d"},"pullRequestCount":4,"issueCount":0,"starsCount":57,"forksCount":72,"license":"BSD 3-Clause \"New\" or \"Revised\" License","participation":[3,0,2,0,0,0,0,0,0,0,1,0,0,1,0,0,0,1,0,0,0,1,0,2,2,0,0,0,0,0,0,2,0,0,1,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,2,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-05-30T22:44:20.332Z"}},{"type":"Public","name":"tensorrtllm_backend","owner":"triton-inference-server","isFork":false,"description":"The Triton TensorRT-LLM Backend","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":17,"issueCount":199,"starsCount":538,"forksCount":76,"license":"Apache License 2.0","participation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,4,4,0,4,8,1,1,2,1,1,1,1,1,0,1,1,1,1,1,1,1,1,2,1,1,1,1,1,1,1,1,1,1,1,1,1,1],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-05-30T18:42:21.019Z"}},{"type":"Public","name":"onnxruntime_backend","owner":"triton-inference-server","isFork":false,"description":"The Triton backend for the ONNX Runtime.","allTopics":["inference","backend","triton-inference-server","onnx-runtime"],"primaryLanguage":{"name":"C++","color":"#f34b7d"},"pullRequestCount":3,"issueCount":63,"starsCount":113,"forksCount":53,"license":"BSD 3-Clause \"New\" or \"Revised\" License","participation":[0,0,1,0,2,2,0,0,1,0,0,0,1,0,0,0,2,0,1,0,0,0,0,0,1,0,1,2,1,1,0,1,0,0,1,1,1,0,0,1,2,0,0,0,0,0,0,0,0,0,0,2],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-05-30T17:56:17.551Z"}},{"type":"Public","name":"fil_backend","owner":"triton-inference-server","isFork":false,"description":"FIL backend for the Triton Inference Server","allTopics":[],"primaryLanguage":{"name":"Jupyter Notebook","color":"#DA5B0B"},"pullRequestCount":3,"issueCount":48,"starsCount":66,"forksCount":34,"license":"Apache License 2.0","participation":[0,0,0,0,2,0,0,0,1,0,0,0,1,0,0,0,0,2,0,0,0,1,0,0,0,0,0,0,0,0,2,3,1,0,0,1,0,0,0,1,0,0,0,2,0,0,0,2,0,0,0,2],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-05-29T20:50:40.097Z"}},{"type":"Public","name":"backend","owner":"triton-inference-server","isFork":false,"description":"Common source, scripts and utilities for creating Triton backends.","allTopics":[],"primaryLanguage":{"name":"C++","color":"#f34b7d"},"pullRequestCount":2,"issueCount":0,"starsCount":263,"forksCount":80,"license":"BSD 3-Clause \"New\" or \"Revised\" License","participation":[0,0,2,1,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,0,0,0,1,1,0,0,0,0,0,0,2,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,1],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-05-28T20:04:39.719Z"}},{"type":"Public","name":"tensorflow_backend","owner":"triton-inference-server","isFork":false,"description":"The Triton backend for TensorFlow.","allTopics":[],"primaryLanguage":{"name":"C++","color":"#f34b7d"},"pullRequestCount":2,"issueCount":0,"starsCount":40,"forksCount":18,"license":"BSD 3-Clause \"New\" or \"Revised\" License","participation":[0,0,1,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-05-17T22:48:21.256Z"}},{"type":"Public","name":"identity_backend","owner":"triton-inference-server","isFork":false,"description":"Example Triton backend that demonstrates most of the Triton Backend API.","allTopics":[],"primaryLanguage":{"name":"C++","color":"#f34b7d"},"pullRequestCount":0,"issueCount":0,"starsCount":6,"forksCount":13,"license":"BSD 3-Clause \"New\" or \"Revised\" License","participation":[0,0,1,0,0,1,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,1,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-05-17T01:24:29.131Z"}},{"type":"Public","name":"third_party","owner":"triton-inference-server","isFork":false,"description":"Third-party source packages that are modified for use in Triton.","allTopics":[],"primaryLanguage":{"name":"C","color":"#555555"},"pullRequestCount":5,"issueCount":0,"starsCount":7,"forksCount":46,"license":"BSD 3-Clause \"New\" or \"Revised\" License","participation":[1,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-05-17T01:23:35.812Z"}},{"type":"Public","name":"dali_backend","owner":"triton-inference-server","isFork":false,"description":"The Triton backend that allows running GPU-accelerated data pre-processing pipelines implemented in DALI's python API.","allTopics":["python","deep-learning","gpu","image-processing","dali","data-preprocessing","nvidia-dali","fast-data-pipeline"],"primaryLanguage":{"name":"C++","color":"#f34b7d"},"pullRequestCount":5,"issueCount":20,"starsCount":117,"forksCount":27,"license":"MIT License","participation":[1,0,2,1,0,0,0,0,0,0,0,0,0,0,1,1,3,1,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,1,0,2,3,1,0,2,0,0,0,0,2,2,0,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-05-15T16:35:57.238Z"}},{"type":"Public","name":"pytriton","owner":"triton-inference-server","isFork":false,"description":"PyTriton is a Flask/FastAPI-like interface that simplifies Triton's deployment in Python environments.","allTopics":["gpu","deep-learning","inference"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":9,"starsCount":676,"forksCount":45,"license":"Apache License 2.0","participation":[2,0,4,3,4,3,11,3,4,6,2,6,14,8,2,3,6,1,2,2,4,10,8,4,7,4,3,10,2,0,7,8,2,0,1,6,1,1,6,1,4,1,0,5,3,2,2,0,1,0,0,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-05-15T07:34:36.438Z"}},{"type":"Public","name":"square_backend","owner":"triton-inference-server","isFork":false,"description":"Simple Triton backend used for testing.","allTopics":[],"primaryLanguage":{"name":"C++","color":"#f34b7d"},"pullRequestCount":0,"issueCount":0,"starsCount":2,"forksCount":4,"license":"BSD 3-Clause \"New\" or \"Revised\" License","participation":[0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,1,1,0,1,0,0,0,0,0,0,0,0,0,0,0,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-05-08T16:31:55.150Z"}},{"type":"Public","name":"repeat_backend","owner":"triton-inference-server","isFork":false,"description":"An example Triton backend that demonstrates sending zero, one, or multiple responses for each request. ","allTopics":[],"primaryLanguage":{"name":"C++","color":"#f34b7d"},"pullRequestCount":0,"issueCount":0,"starsCount":5,"forksCount":7,"license":"BSD 3-Clause \"New\" or \"Revised\" License","participation":[0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-05-08T16:31:43.795Z"}},{"type":"Public","name":"redis_cache","owner":"triton-inference-server","isFork":false,"description":"TRITONCACHE implementation of a Redis cache","allTopics":[],"primaryLanguage":{"name":"C++","color":"#f34b7d"},"pullRequestCount":0,"issueCount":2,"starsCount":7,"forksCount":4,"license":"BSD 3-Clause \"New\" or \"Revised\" License","participation":[15,4,19,0,7,7,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-05-08T16:31:36.110Z"}},{"type":"Public","name":"local_cache","owner":"triton-inference-server","isFork":false,"description":"Implementation of a local in-memory cache for Triton Inference Server's TRITONCACHE API","allTopics":[],"primaryLanguage":{"name":"C++","color":"#f34b7d"},"pullRequestCount":0,"issueCount":1,"starsCount":2,"forksCount":1,"license":"BSD 3-Clause \"New\" or \"Revised\" License","participation":[1,0,2,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-05-08T16:30:58.089Z"}},{"type":"Public","name":"checksum_repository_agent","owner":"triton-inference-server","isFork":false,"description":"The Triton repository agent that verifies model checksums.","allTopics":[],"primaryLanguage":{"name":"C++","color":"#f34b7d"},"pullRequestCount":0,"issueCount":0,"starsCount":7,"forksCount":6,"license":"BSD 3-Clause \"New\" or \"Revised\" License","participation":[0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-05-08T16:30:14.209Z"}},{"type":"Public","name":"model_navigator","owner":"triton-inference-server","isFork":false,"description":"Triton Model Navigator is an inference toolkit designed for optimizing and deploying Deep Learning models with a focus on NVIDIA GPUs.","allTopics":["deep-learning","gpu","inference"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":3,"starsCount":161,"forksCount":24,"license":"Apache License 2.0","participation":[2,0,4,10,8,5,4,8,5,5,0,3,5,3,2,2,6,3,3,1,2,5,2,1,2,4,2,2,5,0,0,3,2,5,2,3,3,2,2,1,5,7,4,4,0,6,4,0,4,0,0,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-05-07T18:17:29.722Z"}},{"type":"Public","name":"contrib","owner":"triton-inference-server","isFork":false,"description":"Community contributions to Triton that are not officially supported or maintained by the Triton project.","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":1,"issueCount":0,"starsCount":8,"forksCount":7,"license":"BSD 3-Clause \"New\" or \"Revised\" License","participation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-02-16T23:07:29.062Z"}},{"type":"Public","name":"stateful_backend","owner":"triton-inference-server","isFork":false,"description":"Triton backend for managing the model state tensors automatically in sequence batcher","allTopics":["backend","triton","stateful"],"primaryLanguage":{"name":"C++","color":"#f34b7d"},"pullRequestCount":0,"issueCount":0,"starsCount":11,"forksCount":4,"license":"MIT License","participation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-02-12T16:39:27.879Z"}}],"repositoryCount":34,"userInfo":null,"searchable":true,"definitions":[],"typeFilters":[{"id":"all","text":"All"},{"id":"public","text":"Public"},{"id":"source","text":"Sources"},{"id":"fork","text":"Forks"},{"id":"archived","text":"Archived"},{"id":"template","text":"Templates"}],"compactMode":false},"title":"Repositories"}