{"payload":{"pageCount":4,"repositories":[{"type":"Public","name":"NeMo","owner":"NVIDIA","isFork":false,"description":"A scalable generative AI framework built for researchers and developers working on Large Language Models, Multimodal, and Speech AI (Automatic Speech Recognition and Text-to-Speech)","topicNames":["machine-translation","tts","speech-synthesis","neural-networks","deeplearning","speaker-recognition","asr","multimodal","speech-translation","large-language-models"],"topicsNotShown":2,"allTopics":["machine-translation","tts","speech-synthesis","neural-networks","deeplearning","speaker-recognition","asr","multimodal","speech-translation","large-language-models","speaker-diariazation","generative-ai"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":87,"issueCount":57,"starsCount":10260,"forksCount":2182,"license":"Apache License 2.0","participation":[17,26,14,12,22,10,17,21,11,17,14,22,23,17,21,23,16,21,31,25,13,24,6,12,4,13,4,6,20,14,4,8,15,17,28,13,13,21,22,39,24,20,41,19,13,34,30,27,28,38,28,30],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-05-23T09:50:32.146Z"}},{"type":"Public","name":"modulus","owner":"NVIDIA","isFork":false,"description":"Open-source deep-learning framework for building, training, and fine-tuning deep learning models using state-of-the-art Physics-ML methods","topicNames":["machine-learning","deep-learning","physics","pytorch","nvidia-gpu"],"topicsNotShown":0,"allTopics":["machine-learning","deep-learning","physics","pytorch","nvidia-gpu"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":16,"issueCount":84,"starsCount":699,"forksCount":146,"license":"Apache License 2.0","participation":[2,3,8,1,4,4,2,2,9,9,11,5,1,2,1,3,3,11,7,4,3,4,1,10,5,10,5,10,5,4,0,4,4,5,7,6,5,3,7,7,8,2,3,2,4,10,9,3,5,9,2,3],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-05-23T02:30:41.782Z"}},{"type":"Public","name":"TransformerEngine","owner":"NVIDIA","isFork":false,"description":"A library for accelerating Transformer models on NVIDIA GPUs, including using 8-bit floating point (FP8) precision on Hopper and Ada GPUs, to provide better performance with lower memory utilization in both training and inference.","topicNames":["python","machine-learning","deep-learning","gpu","cuda","pytorch","jax","fp8"],"topicsNotShown":0,"allTopics":["python","machine-learning","deep-learning","gpu","cuda","pytorch","jax","fp8"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":28,"issueCount":91,"starsCount":1483,"forksCount":229,"license":"Apache License 2.0","participation":[4,10,7,8,8,2,2,17,5,7,6,9,5,5,3,2,3,11,6,11,6,10,2,7,6,6,4,12,6,9,0,4,9,6,15,11,6,7,8,5,10,4,5,4,9,8,12,10,11,4,9,10],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-05-23T02:19:12.144Z"}},{"type":"Public","name":"NVFlare","owner":"NVIDIA","isFork":false,"description":"NVIDIA Federated Learning Application Runtime Environment","topicNames":["python"],"topicsNotShown":0,"allTopics":["python"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":13,"issueCount":25,"starsCount":545,"forksCount":147,"license":"Apache License 2.0","participation":[12,13,9,2,7,1,16,6,11,12,13,9,14,12,10,17,19,17,12,5,6,11,16,16,9,9,9,10,9,24,5,3,17,7,9,7,11,2,3,4,2,5,9,4,10,8,7,14,9,8,8,5],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-05-23T00:41:07.992Z"}},{"type":"Public","name":"NeMo-Aligner","owner":"NVIDIA","isFork":false,"description":"Scalable toolkit for efficient model alignment","topicNames":[],"topicsNotShown":0,"allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":21,"issueCount":41,"starsCount":280,"forksCount":31,"license":"Apache License 2.0","participation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,9,2,12,2,3,2,0,2,1,2,5,1,2,0,2,4,3,2,3,2,4,3,1,3,4,0,2],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-05-22T23:36:39.367Z"}},{"type":"Public","name":"cloudai","owner":"NVIDIA","isFork":false,"description":"CloudAI Benchmark Framework","topicNames":[],"topicsNotShown":0,"allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":9,"issueCount":0,"starsCount":10,"forksCount":6,"license":"Apache License 2.0","participation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,22,39],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-05-22T23:57:08.627Z"}},{"type":"Public","name":"NeMo-Curator","owner":"NVIDIA","isFork":false,"description":"Scalable toolkit for data curation","topicNames":[],"topicsNotShown":0,"allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":5,"issueCount":22,"starsCount":259,"forksCount":25,"license":"Apache License 2.0","participation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,4,3,2,0,6,1,6,3,4],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-05-22T23:30:21.677Z"}},{"type":"Public","name":"NeMo-Framework-Launcher","owner":"NVIDIA","isFork":false,"description":"NeMo Megatron launcher and tools","topicNames":[],"topicsNotShown":0,"allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":8,"issueCount":21,"starsCount":399,"forksCount":116,"license":"Apache License 2.0","participation":[17,32,15,8,3,4,4,11,12,12,14,14,1,12,13,27,8,1,8,10,27,13,7,6,11,17,10,24,36,23,11,12,23,22,27,19,16,55,25,33,35,17,34,5,6,23,11,29,13,22,54,33],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-05-23T09:19:07.740Z"}},{"type":"Public","name":"NeMo-text-processing","owner":"NVIDIA","isFork":false,"description":"NeMo text processing for ASR and TTS","topicNames":["text-normalization","inverse-text-n"],"topicsNotShown":0,"allTopics":["text-normalization","inverse-text-n"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":4,"issueCount":3,"starsCount":220,"forksCount":72,"license":"Apache License 2.0","participation":[1,2,4,1,1,3,1,1,0,0,2,2,0,2,2,1,0,1,2,1,0,3,2,1,0,1,0,0,1,0,0,0,0,0,1,0,0,1,1,0,1,4,1,0,1,0,1,0,3,3,0,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-05-22T19:49:12.233Z"}},{"type":"Public","name":"warp","owner":"NVIDIA","isFork":false,"description":"A Python framework for high performance GPU simulation and graphics","topicNames":[],"topicsNotShown":0,"allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":8,"issueCount":45,"starsCount":1726,"forksCount":144,"license":"Other","participation":[50,20,25,26,57,38,10,17,41,9,5,32,24,20,41,7,4,18,29,14,28,32,63,27,32,23,1,26,23,24,0,2,19,39,29,12,6,10,21,48,50,45,40,14,9,14,20,37,35,40,25,18],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-05-22T13:45:51.042Z"}},{"type":"Public","name":"TensorRT-Model-Optimizer","owner":"NVIDIA","isFork":false,"description":"TensorRT Model Optimizer is a unified library of state-of-the-art model optimization techniques such as quantization and sparsity. It compresses deep learning models for downstream deployment frameworks like TensorRT-LLM or TensorRT to optimize inference speed on NVIDIA GPUs.","topicNames":[],"topicsNotShown":0,"allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":7,"starsCount":198,"forksCount":11,"license":"Other","participation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-05-21T18:02:35.173Z"}},{"type":"Public","name":"cuda-python","owner":"NVIDIA","isFork":false,"description":"CUDA Python Low-level Bindings","topicNames":[],"topicsNotShown":0,"allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":10,"starsCount":780,"forksCount":60,"license":"Other","participation":[0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,1,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-05-21T16:20:17.052Z"}},{"type":"Public","name":"earth2studio","owner":"NVIDIA","isFork":false,"description":"Open-source deep-learning framework for exploring, building and deploying AI weather/climate workflows.","topicNames":["weather","ai","deep-learning","climate-science"],"topicsNotShown":0,"allTopics":["weather","ai","deep-learning","climate-science"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":1,"issueCount":4,"starsCount":18,"forksCount":4,"license":"Apache License 2.0","participation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,12,13,7,4,8,3,3],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-05-23T03:51:50.540Z"}},{"type":"Public","name":"numbast","owner":"NVIDIA","isFork":false,"description":"Numbast is a tool to build an automated pipeline that converts CUDA APIs into Numba bindings.","topicNames":["cuda","numba"],"topicsNotShown":0,"allTopics":["cuda","numba"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":3,"issueCount":15,"starsCount":13,"forksCount":3,"license":"Apache License 2.0","participation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,15,0,6,1,8,0,4,0,2],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-05-23T07:23:38.312Z"}},{"type":"Public","name":"GenerativeAIExamples","owner":"NVIDIA","isFork":false,"description":"Generative AI reference workflows optimized for accelerated infrastructure and microservice architecture.","topicNames":["microservice","gpu-acceleration","nemo","tensorrt","rag","triton-inference-server","large-language-models","llm","llm-inference","retrieval-augmented-generation"],"topicsNotShown":0,"allTopics":["microservice","gpu-acceleration","nemo","tensorrt","rag","triton-inference-server","large-language-models","llm","llm-inference","retrieval-augmented-generation"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":8,"issueCount":16,"starsCount":1607,"forksCount":252,"license":"Apache License 2.0","participation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,4,1,1,2,0,4,0,2,2,0,1,2,2,0,4,2,0,0,1,2,1,2,1,1,0,0,1,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-05-21T07:30:16.149Z"}},{"type":"Public","name":"swift","owner":"NVIDIA","isFork":true,"description":"OpenStack Storage (Swift). Mirror of code maintained at opendev.org.","topicNames":[],"topicsNotShown":0,"allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":8,"forksCount":1059,"license":"Apache License 2.0","participation":[10,3,13,4,19,10,5,3,13,1,23,8,15,4,5,13,5,14,3,5,8,3,5,6,5,6,2,5,2,2,1,3,10,3,10,4,9,7,5,8,8,15,7,4,4,1,0,4,13,4,13,6],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-05-21T19:32:14.172Z"}},{"type":"Public","name":"NeMo-Guardrails","owner":"NVIDIA","isFork":false,"description":"NeMo Guardrails is an open-source toolkit for easily adding programmable guardrails to LLM-based conversational systems.","topicNames":[],"topicsNotShown":0,"allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":22,"issueCount":151,"starsCount":3512,"forksCount":301,"license":"Other","participation":[7,1,14,36,46,18,20,27,21,28,15,18,15,21,84,37,29,34,55,71,58,46,51,58,68,53,42,39,47,36,1,11,21,43,26,47,37,56,36,57,29,36,37,81,23,0,18,30,4,11,5,3],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-05-20T22:17:42.949Z"}},{"type":"Public","name":"workbench-example-hybrid-rag","owner":"NVIDIA","isFork":false,"description":"An NVIDIA AI Workbench example project for Retrieval Augmented Generation (RAG)","topicNames":[],"topicsNotShown":0,"allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":1,"issueCount":3,"starsCount":41,"forksCount":111,"license":"Apache License 2.0","participation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,7,0,0,0,0,0,0,0,2,1],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-05-20T20:54:47.726Z"}},{"type":"Public","name":"audio-flamingo","owner":"NVIDIA","isFork":false,"description":"PyTorch implementation of Audio Flamingo: A Novel Audio Language Model with Few-Shot Learning and Dialogue Abilities.","topicNames":[],"topicsNotShown":0,"allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":1,"issueCount":0,"starsCount":3,"forksCount":0,"license":"MIT License","participation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-05-20T18:40:22.567Z"}},{"type":"Public","name":"NeMo-speech-data-processor","owner":"NVIDIA","isFork":false,"description":"A toolkit for processing speech data and creating speech datasets","topicNames":[],"topicsNotShown":0,"allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":4,"issueCount":1,"starsCount":63,"forksCount":19,"license":"Apache License 2.0","participation":[1,1,3,0,2,0,0,17,7,14,0,0,0,0,0,0,0,0,0,0,0,1,1,7,13,3,0,1,0,0,0,0,1,0,0,0,0,0,0,1,0,0,0,5,0,0,1,0,1,1,3,1],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-05-21T17:24:18.990Z"}},{"type":"Public","name":"Megatron-LM","owner":"NVIDIA","isFork":false,"description":"Ongoing research training transformer models at scale","topicNames":["transformers","model-para","large-language-models"],"topicsNotShown":0,"allTopics":["transformers","model-para","large-language-models"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":125,"issueCount":303,"starsCount":8851,"forksCount":1989,"license":"Other","participation":[41,37,12,4,22,14,59,28,38,27,35,51,61,59,32,70,34,36,58,133,101,131,62,33,41,76,24,41,36,59,10,6,39,31,56,60,42,19,5,18,23,41,59,89,44,49,63,29,17,33,13,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-05-20T08:21:00.836Z"}},{"type":"Public","name":"hpc-container-maker","owner":"NVIDIA","isFork":false,"description":"HPC Container Maker","topicNames":["docker","containers","hpc","singularity"],"topicsNotShown":0,"allTopics":["docker","containers","hpc","singularity"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":4,"issueCount":11,"starsCount":439,"forksCount":86,"license":"Apache License 2.0","participation":[0,0,3,0,0,0,0,0,0,0,0,0,0,0,4,0,1,0,0,3,0,0,1,0,0,3,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-05-17T19:54:46.029Z"}},{"type":"Public","name":"nv-cloud-function-helpers","owner":"NVIDIA","isFork":false,"description":"Functions that simplify common tasks with NVIDIA Cloud Functions","topicNames":[],"topicsNotShown":0,"allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":12,"issueCount":0,"starsCount":7,"forksCount":2,"license":"BSD 3-Clause \"New\" or \"Revised\" License","participation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,5,5,9,5,0,1,0,0,0,0,1,0,0,0,0,6,2,1],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-05-17T16:11:27.744Z"}},{"type":"Public","name":"modulus-sym","owner":"NVIDIA","isFork":false,"description":"Framework providing pythonic APIs, algorithms and utilities to be used with Modulus core to physics inform model training as well as higher level abstraction for domain experts","topicNames":["machine-learning","deep-learning","physics","pytorch","nvidia-gpu"],"topicsNotShown":0,"allTopics":["machine-learning","deep-learning","physics","pytorch","nvidia-gpu"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":5,"issueCount":47,"starsCount":122,"forksCount":52,"license":"Apache License 2.0","participation":[2,0,1,1,1,4,0,0,4,5,1,1,0,1,1,0,1,3,4,0,0,0,2,1,2,4,1,2,1,1,0,0,1,1,2,2,0,0,2,1,3,0,0,0,1,2,1,0,0,0,0,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-05-16T16:07:46.025Z"}},{"type":"Public","name":"ChatRTX","owner":"NVIDIA","isFork":false,"description":"A developer reference project for creating Retrieval Augmented Generation (RAG) chatbots on Windows using TensorRT-LLM","topicNames":[],"topicsNotShown":0,"allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":6,"issueCount":21,"starsCount":2398,"forksCount":258,"license":"Other","participation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,0,9,3,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,4,0,0,0,0,0,0,0,0,0,0,0,1,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-05-14T09:19:35.286Z"}},{"type":"Public","name":"Stable-Diffusion-WebUI-TensorRT","owner":"NVIDIA","isFork":false,"description":"TensorRT Extension for Stable Diffusion Web UI","topicNames":[],"topicsNotShown":0,"allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":15,"issueCount":138,"starsCount":1786,"forksCount":136,"license":"MIT License","participation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,8,1,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-05-13T12:08:51.212Z"}},{"type":"Public","name":"air_agent","owner":"NVIDIA","isFork":false,"description":"A Python agent for receiving instructions from the NVIDIA Air platform","topicNames":[],"topicsNotShown":0,"allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":1,"forksCount":3,"license":"Other","participation":[0,0,1,0,0,0,0,1,0,0,4,8,1,2,0,1,2,0,2,0,0,0,1,0,0,0,0,0,0,1,0,1,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-05-10T12:48:15.884Z"}},{"type":"Public","name":"apex","owner":"NVIDIA","isFork":false,"description":"A PyTorch Extension: Tools for easy mixed precision and distributed training in Pytorch","topicNames":[],"topicsNotShown":0,"allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":71,"issueCount":633,"starsCount":8094,"forksCount":1339,"license":"BSD 3-Clause \"New\" or \"Revised\" License","participation":[0,2,2,0,1,1,1,1,1,1,1,1,2,5,2,0,1,0,4,0,2,2,0,5,2,2,3,0,0,2,0,1,2,2,1,1,2,1,0,0,0,1,0,0,0,0,0,4,1,0,0,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-05-09T01:38:51.082Z"}},{"type":"Public","name":"build-system-archive-import-examples","owner":"NVIDIA","isFork":false,"description":"Examples for importing precompiled binary tarball and zip archives into various build and packaging systems","topicNames":[],"topicsNotShown":0,"allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":7,"starsCount":8,"forksCount":5,"license":"MIT License","participation":[0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,0,0,1,0,0,0,9,0,1,2,0,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-05-07T20:28:09.369Z"}},{"type":"Public","name":"air_sdk","owner":"NVIDIA","isFork":false,"description":"A Python SDK library for interacting with NVIDIA Air","topicNames":[],"topicsNotShown":0,"allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":1,"issueCount":0,"starsCount":5,"forksCount":4,"license":"Other","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-05-01T20:57:11.791Z"}}],"repositoryCount":107,"userInfo":null,"searchable":true,"definitions":[],"typeFilters":[{"id":"all","text":"All"},{"id":"public","text":"Public"},{"id":"source","text":"Sources"},{"id":"fork","text":"Forks"},{"id":"archived","text":"Archived"},{"id":"template","text":"Templates"}],"compactMode":false},"title":"Repositories"}