{"payload":{"pageCount":1,"repositories":[{"type":"Public","name":"Video-LLaVA","owner":"PKU-YuanGroup","isFork":false,"description":"Video-LLaVA: Learning United Visual Representation by Alignment Before Projection","topicNames":["multi-modal","instruction-tuning","large-vision-language-model"],"topicsNotShown":0,"allTopics":["multi-modal","instruction-tuning","large-vision-language-model"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":5,"issueCount":67,"starsCount":2494,"forksCount":185,"license":"Apache License 2.0","participation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,10,34,52,5,2,0,0,1,0,1,28,5,2,0,3,0,1,0,0,0,0,0,0,0,0,0,0,2],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-05-19T08:34:27.810Z"}},{"type":"Public","name":"Open-Sora-Plan","owner":"PKU-YuanGroup","isFork":false,"description":"This project aim to reproduce Sora (Open AI T2V model), we wish the open source community contribute to this project.","topicNames":[],"topicsNotShown":0,"allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":6,"issueCount":110,"starsCount":10304,"forksCount":934,"license":"MIT License","participation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,212,66,13,25,45,58,0,0,0,0,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-05-18T21:44:51.839Z"}},{"type":"Public","name":"Hallucination-Attack","owner":"PKU-YuanGroup","isFork":false,"description":"Attack to induce LLMs within hallucinations","topicNames":["nlp","ai-safety","hallucinations","llm","llm-safety","machine-learning","deep-learning","adversarial-attacks"],"topicsNotShown":0,"allTopics":["nlp","ai-safety","hallucinations","llm","llm-safety","machine-learning","deep-learning","adversarial-attacks"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":82,"forksCount":11,"license":"MIT License","participation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,8,0,4,9,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-05-17T08:48:15.526Z"}},{"type":"Public","name":"Envision3D","owner":"PKU-YuanGroup","isFork":false,"description":"Envision3D: One Image to 3D with Anchor Views Interpolation","topicNames":[],"topicsNotShown":0,"allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":88,"forksCount":8,"license":null,"participation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-05-16T18:46:32.791Z"}},{"type":"Public","name":"MoE-LLaVA","owner":"PKU-YuanGroup","isFork":false,"description":"Mixture-of-Experts for Large Vision-Language Models","topicNames":["moe","multi-modal","mixture-of-experts","large-vision-language-model"],"topicsNotShown":0,"allTopics":["moe","multi-modal","mixture-of-experts","large-vision-language-model"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":2,"issueCount":45,"starsCount":1725,"forksCount":100,"license":"Apache License 2.0","participation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,10,4,3,25,63,65,7,42,2,0,0,2,0,0,0,0,0,0,0,0,1],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-05-15T14:50:56.572Z"}},{"type":"Public","name":"MagicTime","owner":"PKU-YuanGroup","isFork":false,"description":"MagicTime: Time-lapse Video Generation Models as Metamorphic Simulators","topicNames":["time-lapse","video-generation","diffusion-models","text-to-video","long-video-generation","time-lapse-dataset","open-sora-plan","metamorphic-video-generation"],"topicsNotShown":0,"allTopics":["time-lapse","video-generation","diffusion-models","text-to-video","long-video-generation","time-lapse-dataset","open-sora-plan","metamorphic-video-generation"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":6,"starsCount":1129,"forksCount":110,"license":"Apache License 2.0","participation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,13,92,7,0,0,2,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-05-09T02:02:00.948Z"}},{"type":"Public","name":"ProLLaMA","owner":"PKU-YuanGroup","isFork":false,"description":"A Protein Large Language Model for Multi-Task Protein Language Processing","topicNames":[],"topicsNotShown":0,"allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":2,"starsCount":79,"forksCount":7,"license":"Apache License 2.0","participation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,17,36,1,0,0,0,1,7,0,8,0,0,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-04-27T09:51:11.215Z"}},{"type":"Public","name":"Open-Sora-Dataset","owner":"PKU-YuanGroup","isFork":false,"description":"","topicNames":[],"topicsNotShown":0,"allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":3,"starsCount":51,"forksCount":3,"license":null,"participation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,10,4,0,12,0,2,0,0,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-04-24T07:13:07.657Z"}},{"type":"Public","name":"Chat-UniVi","owner":"PKU-YuanGroup","isFork":false,"description":"[CVPR 2024 Highlight🔥] Chat-UniVi: Unified Visual Representation Empowers Large Language Models with Image and Video Understanding","topicNames":["video-understanding","image-understanding","large-language-models","vision-language-model"],"topicsNotShown":0,"allTopics":["video-understanding","image-understanding","large-language-models","vision-language-model"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":7,"starsCount":649,"forksCount":31,"license":"Apache License 2.0","participation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,13,35,7,7,0,0,1,4,1,0,0,0,0,0,0,1,1,0,0,0,3,1,0,0,0,0,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-04-12T06:59:55.066Z"}},{"type":"Public","name":"LanguageBind","owner":"PKU-YuanGroup","isFork":false,"description":"【ICLR 2024🔥】 Extending Video-Language Pretraining to N-modality by Language-based Semantic Alignment","topicNames":["multi-modal","zero-shot","pretraining","language-central"],"topicsNotShown":0,"allTopics":["multi-modal","zero-shot","pretraining","language-central"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":1,"issueCount":16,"starsCount":553,"forksCount":44,"license":"MIT License","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-03-25T12:28:38.429Z"}},{"type":"Public","name":"TaxDiff","owner":"PKU-YuanGroup","isFork":false,"description":"The official code for \"TaxDiff: Taxonomic-Guided Diffusion Model for Protein Sequence Generation\"","topicNames":["protein-sequences","generate-model","meachine-learning","ai4science"],"topicsNotShown":0,"allTopics":["protein-sequences","generate-model","meachine-learning","ai4science"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":35,"forksCount":3,"license":"MIT License","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-03-04T01:21:18.395Z"}},{"type":"Public","name":"Peer-review-in-LLMs","owner":"PKU-YuanGroup","isFork":false,"description":"Peer-review-in-LLMs: Automatic Evaluation Method for LLMs in Open-environment,https://arxiv.org/pdf/2402.01830.pdf","topicNames":[],"topicsNotShown":0,"allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":24,"forksCount":1,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-02-07T02:30:30.281Z"}},{"type":"Public","name":"Machine-Mindset","owner":"PKU-YuanGroup","isFork":false,"description":"An MBTI Exploration of Large Language Models","topicNames":[],"topicsNotShown":0,"allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":1,"starsCount":399,"forksCount":18,"license":"Apache License 2.0","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-02-02T02:53:05.571Z"}},{"type":"Public","name":"Video-Bench","owner":"PKU-YuanGroup","isFork":false,"description":"A Comprehensive Benchmark and Toolkit for Evaluating Video-based Large Language Models!","topicNames":["benchmark","toolkit","large-language-models"],"topicsNotShown":0,"allTopics":["benchmark","toolkit","large-language-models"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":3,"starsCount":97,"forksCount":2,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-12-31T03:41:41.734Z"}}],"repositoryCount":14,"userInfo":null,"searchable":true,"definitions":[],"typeFilters":[{"id":"all","text":"All"},{"id":"public","text":"Public"},{"id":"source","text":"Sources"},{"id":"fork","text":"Forks"},{"id":"archived","text":"Archived"},{"id":"template","text":"Templates"}],"compactMode":false},"title":"Repositories"}