{"payload":{"header_redesign_enabled":false,"results":[{"id":"709251718","archived":false,"color":"#f34b7d","followers":131,"has_funding_file":false,"hl_name":"torchpipe/torchpipe","hl_trunc_description":"An Alternative for Triton Inference Server. Boosting DL Service Throughput 1.5-4x by Ensemble Pipeline Serving with Concurrent CUDA Strea…","language":"C++","mirror":false,"owned_by_organization":true,"public":true,"repo":{"repository":{"id":709251718,"name":"torchpipe","owner_id":140804564,"owner_login":"torchpipe","updated_at":"2024-05-24T07:58:46.489Z","has_issues":true}},"sponsorable":false,"topics":["deployment","inference","pytorch","ray","serve","tensorrt","serving","pipeline-parallelism","torch2trt","triton-inference-server","ray-serve","cvcuda"],"type":"Public","help_wanted_issues_count":0,"good_first_issue_issues_count":0,"starred_by_current_user":false}],"type":"repositories","page":1,"page_count":1,"elapsed_millis":68,"errors":[],"result_count":1,"facets":[],"protected_org_logins":[],"topics":null,"query_id":"","logged_in":false,"sign_up_path":"/signup?source=code_search_results","sign_in_path":"/login?return_to=https%3A%2F%2Fgithub.com%2Fsearch%3Fq%3Drepo%253Atorchpipe%252Ftorchpipe%2B%2Blanguage%253AC%252B%252B","metadata":null,"csrf_tokens":{"/torchpipe/torchpipe/star":{"post":"7_j6MqCUeWuE3hikonoT-P9q3yZ1I5r4XFDaDH9wfnnykID-Lk-ppgsHaTgBXER_4btoD5W31m5o7KR7odof1g"},"/torchpipe/torchpipe/unstar":{"post":"FG1hTQhK41ADwh795aNHheiH2QyPYcDtqM9ac8aq8uJ475JQPEQYbIakFIqPRSR7OVzT40nNK--C2bnBn1DJOw"},"/sponsors/batch_deferred_sponsor_buttons":{"post":"cUL2GM1Ksc3KStN7dGjR4GYZ7x4XSJFsZCWe11WHCJfhHvw6F4gnwaLWUJWGiMxrfs_gDpUT0GG7-emTqB4IPQ"}}},"title":"Repository search results"}