{"payload":{"pageCount":3,"repositories":[{"type":"Public","name":"Q-LLM","owner":"dvlab-research","isFork":false,"description":"This is the official repo of \"QuickLLaMA: Query-aware Inference Acceleration for Large Language Models\"","allTopics":["fast-inference","inference-acceleration","large-language-models","long-context","kv-cache-compression"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":2,"starsCount":29,"forksCount":0,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-07-16T07:00:31.221Z"}},{"type":"Public","name":"Step-DPO","owner":"dvlab-research","isFork":false,"description":"Implementation for \"Step-DPO: Step-wise Preference Optimization for Long-chain Reasoning of LLMs\"","allTopics":["math","reasoning","dpo","llm"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":4,"starsCount":164,"forksCount":2,"license":null,"participation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,12,6,1],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-07-15T14:43:51.911Z"}},{"type":"Public","name":"TagCLIP","owner":"dvlab-research","isFork":false,"description":"","allTopics":["segmentation","clip","zero-shot","image-text"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":2,"forksCount":0,"license":null,"participation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,3],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-07-15T07:10:38.315Z"}},{"type":"Public","name":"ControlNeXt","owner":"dvlab-research","isFork":false,"description":"Controllable video and image Generation, SVD, Animate Anyone, ControlNet, LoRA","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":6,"starsCount":397,"forksCount":14,"license":"Apache License 2.0","participation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,16,4,4,25,4,1],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-07-12T12:04:24.272Z"}},{"type":"Public","name":"LLMGA","owner":"dvlab-research","isFork":false,"description":"This project is the official implementation of 'LLMGA: Multimodal Large Language Model based Generation Assistant', ECCV2024","allTopics":["image-editing","image-generation","multi-modal","aigc","llm","large-language-model","mllm","image-design-assistant"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":1,"starsCount":388,"forksCount":28,"license":"Apache License 2.0","participation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-07-12T06:26:41.609Z"}},{"type":"Public","name":"VFIformer","owner":"dvlab-research","isFork":false,"description":"Video Frame Interpolation with Transformer (CVPR2022)","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":12,"starsCount":109,"forksCount":19,"license":"MIT License","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-07-10T13:25:08.479Z"}},{"type":"Public","name":"MR-GSM8K","owner":"dvlab-research","isFork":false,"description":"Challenge LLMs to Reason About Reasoning: A Benchmark to Unveil Cognitive Depth in LLMs","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":2,"starsCount":36,"forksCount":0,"license":"MIT License","participation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,4,7,2,2,6,0,0,0,0,0,0,0,0,0,0,2,0,5,0,0,0,0,0,2,3,0,1,0,1,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-07-10T07:21:30.730Z"}},{"type":"Public","name":"Mr-Ben","owner":"dvlab-research","isFork":false,"description":"This is the repo for our paper \"Mr-Ben: A Comprehensive Meta-Reasoning Benchmark for Large Language Models\"","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":27,"forksCount":0,"license":"MIT License","participation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,12,2,0,1,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-07-10T07:19:07.074Z"}},{"type":"Public","name":"Parametric-Contrastive-Learning","owner":"dvlab-research","isFork":false,"description":"Parametric Contrastive Learning (ICCV2021) & GPaCo (TPAMI 2023)","allTopics":["pytorch","supervised-learning","imagenet","image-classification","class-imbalance","imbalanced-data","imbalanced-learning","tpami","contrastive-learning","supervised-contrastive-learning","long-tailed-recognition","iccv2021","parametric-contrastive-learning"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":5,"starsCount":235,"forksCount":31,"license":"MIT License","participation":[1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-07-04T14:03:30.495Z"}},{"type":"Public","name":"LISA","owner":"dvlab-research","isFork":false,"description":"Project Page for \"LISA: Reasoning Segmentation via Large Language Model\"","allTopics":["segmentation","multi-modal","llm","large-language-model"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":1,"issueCount":57,"starsCount":1654,"forksCount":113,"license":"Apache License 2.0","participation":[0,43,50,14,9,5,0,1,2,2,0,0,0,1,3,1,1,0,0,0,0,0,0,0,0,0,0,1,2,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,2,0,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-07-02T08:27:03.066Z"}},{"type":"Public","name":"MOOD","owner":"dvlab-research","isFork":false,"description":"Official PyTorch implementation of MOOD series: (1) MOODv1: Rethinking Out-of-distributionDetection: Masked Image Modeling Is All You Need. (2) MOODv2: Masked Image Modeling for Out-of-Distribution Detection.","allTopics":["outlier-detection","ood-detection","masked-image-modeling","cvpr2023","pytorch"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":8,"starsCount":133,"forksCount":5,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-07-02T06:41:57.686Z"}},{"type":"Public","name":"PFENet","owner":"dvlab-research","isFork":false,"description":"PFENet: Prior Guided Feature Enrichment Network for Few-shot Segmentation (TPAMI).","allTopics":["segmentation","few-shot","pami-2020"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":35,"starsCount":306,"forksCount":54,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-07-01T15:40:11.703Z"}},{"type":"Public","name":"LongLoRA","owner":"dvlab-research","isFork":false,"description":"Code and documents of LongLoRA and LongAlpaca (ICLR 2024 Oral)","allTopics":["lora","large-language-models","llm","long-context","fine-tuning-llm"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":2,"issueCount":43,"starsCount":2557,"forksCount":261,"license":"Apache License 2.0","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-02T13:10:37.709Z"}},{"type":"Public","name":"MGM","owner":"dvlab-research","isFork":false,"description":"Official repo for \"Mini-Gemini: Mining the Potential of Multi-modality Vision Language Models\"","allTopics":["generation","large-language-models","vision-language-model"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":2,"issueCount":55,"starsCount":3101,"forksCount":277,"license":"Apache License 2.0","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-05-04T14:36:51.258Z"}},{"type":"Public","name":"GroupContrast","owner":"dvlab-research","isFork":false,"description":"[CVPR 2024] GroupContrast: Semantic-aware Self-supervised Representation Learning for 3D Understanding","allTopics":[],"primaryLanguage":null,"pullRequestCount":0,"issueCount":2,"starsCount":41,"forksCount":1,"license":"MIT License","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-03-15T06:31:54.717Z"}},{"type":"Public","name":"Video-P2P","owner":"dvlab-research","isFork":false,"description":"Video-P2P: Video Editing with Cross-attention Control","allTopics":["image-editing","generative-model","video-editing","text-driven-editing","stable-diffusion"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":7,"starsCount":358,"forksCount":24,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-03-12T13:31:27.759Z"}},{"type":"Public","name":"Prompt-Highlighter","owner":"dvlab-research","isFork":false,"description":"[CVPR 2024] Prompt Highlighter: Interactive Control for Multi-Modal LLMs","allTopics":["text-generation","multi-modality","llm-inference"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":2,"starsCount":110,"forksCount":2,"license":"MIT License","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-01-25T04:48:15.566Z"}},{"type":"Public","name":"LLaMA-VID","owner":"dvlab-research","isFork":false,"description":"Official Implementation for LLaMA-VID: An Image is Worth 2 Tokens in Large Language Models","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":33,"starsCount":640,"forksCount":41,"license":"Apache License 2.0","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-01-10T12:34:34.429Z"}},{"type":"Public","name":"MoTCoder","owner":"dvlab-research","isFork":false,"description":"This is the official code repository of MoTCoder: Elevating Large Language Models with Modular of Thought for Challenging Programming Tasks.","allTopics":["natural-language-processing","programming","code","apps","code-generation","large-language-models"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":3,"starsCount":57,"forksCount":1,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-01-06T08:55:11.326Z"}},{"type":"Public","name":"BAL","owner":"dvlab-research","isFork":false,"description":"BAL: Balancing Diversity and Novelty for Active Learning - Official Pytorch Implementation","allTopics":["computer-vision","active-learning"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":1,"issueCount":3,"starsCount":38,"forksCount":2,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-01-03T05:27:53.443Z"}},{"type":"Public","name":"RIVAL","owner":"dvlab-research","isFork":false,"description":"[NeurIPS 2023 Spotlight] Real-World Image Variation by Aligning Diffusion Inversion Chain","allTopics":["style-transfer","text-to-image","diffusion-models","image-variations"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":2,"starsCount":142,"forksCount":10,"license":"Apache License 2.0","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-01-02T08:04:18.111Z"}},{"type":"Public","name":"TriVol","owner":"dvlab-research","isFork":false,"description":"The official code of TriVol in CVPR-2023","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":3,"starsCount":37,"forksCount":1,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-12-30T06:32:42.369Z"}},{"type":"Public","name":"APD","owner":"dvlab-research","isFork":false,"description":"","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":4,"forksCount":0,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-11-09T08:14:23.308Z"}},{"type":"Public","name":"Imbalanced-Learning","owner":"dvlab-research","isFork":false,"description":"Imbalanced learning tool for imbalanced recognition and segmentation","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":4,"starsCount":79,"forksCount":8,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-11-07T14:07:46.139Z"}},{"type":"Public","name":"SparseTransformer","owner":"dvlab-research","isFork":false,"description":"A fast and memory-efficient libarary for sparse transformer with varying token numbers (e.g., 3D point cloud).","allTopics":["cuda","transformer","3d-point-cloud","sparse-transformer"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":5,"starsCount":150,"forksCount":11,"license":"Apache License 2.0","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-09-06T07:16:14.329Z"}},{"type":"Public","name":"Mask-Attention-Free-Transformer","owner":"dvlab-research","isFork":false,"description":"Official Implementation for \"Mask-Attention-Free Transformer for 3D Instance Segmentation\"","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":10,"starsCount":59,"forksCount":5,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-09-06T01:59:11.691Z"}},{"type":"Public","name":"Context-Aware-Consistency","owner":"dvlab-research","isFork":false,"description":"Semi-supervised Semantic Segmentation with Directional Context-aware Consistency (CVPR 2021)","allTopics":["semi-supervised-learning","semantic-segmentation","cvpr2021","semi-supervised-segmentation"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":1,"issueCount":4,"starsCount":155,"forksCount":19,"license":"MIT License","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-08-24T05:52:07.799Z"}},{"type":"Public","name":"ProposeReduce","owner":"dvlab-research","isFork":false,"description":"Video Instance Segmentation with a Propose-Reduce Paradigm (ICCV 2021)","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":41,"forksCount":4,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-08-05T05:40:12.247Z"}},{"type":"Public","name":"PointGroup","owner":"dvlab-research","isFork":false,"description":"PointGroup: Dual-Set Point Grouping for 3D Instance Segmentation","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":1,"issueCount":37,"starsCount":377,"forksCount":81,"license":"Apache License 2.0","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-07-27T09:50:35.337Z"}},{"type":"Public","name":"Ref-NPR","owner":"dvlab-research","isFork":false,"description":"[CVPR 2023] Ref-NPR: Reference-Based Non-PhotoRealistic Radiance Fields","allTopics":["pytorch","image-editing","style-transfer","nerf","stylization","radiance-field"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":3,"starsCount":118,"forksCount":9,"license":"Apache License 2.0","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-07-07T07:09:14.450Z"}}],"repositoryCount":69,"userInfo":null,"searchable":true,"definitions":[],"typeFilters":[{"id":"all","text":"All"},{"id":"public","text":"Public"},{"id":"source","text":"Sources"},{"id":"fork","text":"Forks"},{"id":"archived","text":"Archived"},{"id":"template","text":"Templates"}],"compactMode":false},"title":"dvlab-research repositories"}