{"payload":{"pageCount":1,"repositories":[{"type":"Public","name":"MAmmoTH2","owner":"TIGER-AI-Lab","isFork":false,"description":"Official code for \"MAmmoTH2: Scaling Instructions from the Web\" [NeurIPS 2024]","allTopics":["language","math","reasoning"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":4,"starsCount":115,"forksCount":9,"license":"MIT License","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-09-29T00:45:18.877Z"}},{"type":"Public","name":"ImagenHub","owner":"TIGER-AI-Lab","isFork":false,"description":"A one-stop library to standardize the inference and evaluation of all the conditional image generation models. (ICLR 2024)","allTopics":["deep-learning","pytorch","image-editing","generative-art","image-generation","diffusion-models","stable-diffusion","generative-ai"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":5,"starsCount":145,"forksCount":11,"license":"MIT License","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-09-27T19:43:51.157Z"}},{"type":"Public","name":"MMLU-Pro","owner":"TIGER-AI-Lab","isFork":false,"description":"The code and data for \"MMLU-Pro: A More Robust and Challenging Multi-Task Language Understanding Benchmark\" [NeurIPS 2024]","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":1,"issueCount":4,"starsCount":94,"forksCount":14,"license":"Apache License 2.0","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-09-26T14:10:09.300Z"}},{"type":"Public","name":"LLM-AMT","owner":"TIGER-AI-Lab","isFork":false,"description":"This repository contains the code for our paper \"Augmenting Black-box LLMs with Medical Textbooks for Clinical Question Answering\" [EMNLP 2024]","allTopics":[],"primaryLanguage":null,"pullRequestCount":0,"issueCount":0,"starsCount":0,"forksCount":0,"license":"Apache License 2.0","participation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,5,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-09-21T16:05:22.794Z"}},{"type":"Public","name":"VideoScore","owner":"TIGER-AI-Lab","isFork":false,"description":"official repo for \"VideoScore: Building Automatic Metrics to Simulate Fine-grained Human Feedback for Video Generation\" [EMNLP2024]","allTopics":["language","machine-learning"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":39,"forksCount":1,"license":"MIT License","participation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,11,47,0,0,0,0,0,3,0,0,0,0,0,1,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-09-19T19:25:53.020Z"}},{"type":"Public","name":"Mantis","owner":"TIGER-AI-Lab","isFork":false,"description":"Official code for Paper \"Mantis: Multi-Image Instruction Tuning\"","allTopics":["language","video","vision","mantis","vlm","multimodal","lmm","fuyu","mllm","llava-llama3","multi-image-understanding"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":1,"starsCount":163,"forksCount":14,"license":"Apache License 2.0","participation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,11,2,0,29,5,62,31,2,5,9,0,8,1,1,0,2,0,6,2,5,8,8,5,0,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-09-09T02:54:09.783Z"}},{"type":"Public","name":"GenAI-Bench","owner":"TIGER-AI-Lab","isFork":false,"description":"Code and Data for \"GenAI Arena: An Open Evaluation Platform for Generative Models\" [NeurIPS 2024]","allTopics":["evaluation","diffusion","generative-ai"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":3,"forksCount":0,"license":"MIT License","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-09-08T08:38:19.746Z"}},{"type":"Public","name":"AnyV2V","owner":"TIGER-AI-Lab","isFork":false,"description":"Code and data for \"AnyV2V: A Tuning-Free Framework For Any Video-to-Video Editing Tasks\"","allTopics":["deep-learning","pytorch","image-editing","video-editing","generative-ai","video-synthesis","image-to-video-generation"],"primaryLanguage":{"name":"Jupyter Notebook","color":"#DA5B0B"},"pullRequestCount":0,"issueCount":2,"starsCount":462,"forksCount":34,"license":"MIT License","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-08-28T18:09:55.582Z"}},{"type":"Public","name":"LongRAG","owner":"TIGER-AI-Lab","isFork":false,"description":"Official repo for \"LongRAG: Enhancing Retrieval-Augmented Generation with Long-context LLMs\".","allTopics":["rag","llm"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":2,"starsCount":172,"forksCount":15,"license":"MIT License","participation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,10,25,0,1,0,0,0,0,0,0,2,0,0,0,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-08-25T20:33:27.677Z"}},{"type":"Public","name":"MAmmoTH","owner":"TIGER-AI-Lab","isFork":false,"description":"Code and data for \"MAmmoTH: Building Math Generalist Models through Hybrid Instruction Tuning\" (ICLR 2024)","allTopics":[],"primaryLanguage":{"name":"Jupyter Notebook","color":"#DA5B0B"},"pullRequestCount":0,"issueCount":6,"starsCount":321,"forksCount":44,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-08-25T03:57:55.271Z"}},{"type":"Public","name":"VideoGenHub","owner":"TIGER-AI-Lab","isFork":false,"description":"A one-stop library to standardize the inference and evaluation of all the conditional video generation models.","allTopics":["deep-learning","pytorch","video-generation","diffusion-models","generative-ai"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":2,"starsCount":39,"forksCount":7,"license":"MIT License","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-08-18T15:36:32.268Z"}},{"type":"Public","name":"TIGERScore","owner":"TIGER-AI-Lab","isFork":false,"description":"\"TIGERScore: Towards Building Explainable Metric for All Text Generation Tasks\" [TMLR 2024]","allTopics":["metrics","evaluation","language-model","llm"],"primaryLanguage":{"name":"Jupyter Notebook","color":"#DA5B0B"},"pullRequestCount":0,"issueCount":1,"starsCount":27,"forksCount":1,"license":"MIT License","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-08-02T05:08:02.803Z"}},{"type":"Public","name":"VIEScore","owner":"TIGER-AI-Lab","isFork":false,"description":"Visual Instruction-guided Explainable Metric. Code for \"Towards Explainable Metrics for Conditional Image Synthesis Evaluation\" (ACL 2024 main)","allTopics":["computer-vision","image-editing","image-generation","visual-question-answering","gpt4vision"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":22,"forksCount":0,"license":"MIT License","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-07-26T02:01:43.634Z"}},{"type":"Public","name":"UniIR","owner":"TIGER-AI-Lab","isFork":false,"description":"Official code for paper \"UniIR: Training and Benchmarking Universal Multimodal Information Retrievers\" (ECCV 2024)","allTopics":["retrieval","language-model"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":1,"issueCount":4,"starsCount":94,"forksCount":12,"license":"MIT License","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-07-13T16:54:19.929Z"}},{"type":"Public","name":"StructLM","owner":"TIGER-AI-Lab","isFork":false,"description":"Code and data for \"StructLM: Towards Building Generalist Models for Structured Knowledge Grounding\" (COLM 2024)","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":1,"starsCount":67,"forksCount":9,"license":"MIT License","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-07-10T15:38:17.504Z"}},{"type":"Public","name":"ConsistI2V","owner":"TIGER-AI-Lab","isFork":false,"description":"ConsistI2V: Enhancing Visual Consistency for Image-to-Video Generation (TMLR 2024)","allTopics":["video-generation","diffusion-models","video-synthesis","image-to-video-generation"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":202,"forksCount":14,"license":"MIT License","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-07-01T10:07:31.235Z"}},{"type":"Public","name":"LongICLBench","owner":"TIGER-AI-Lab","isFork":false,"description":"Code and Data for \"Long-context LLMs Struggle with Long In-context Learning\"","allTopics":["large-language-models"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":88,"forksCount":4,"license":"MIT License","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-07-01T02:45:40.897Z"}},{"type":"Public","name":"MAP-NEO","owner":"TIGER-AI-Lab","isFork":true,"description":"MAP-NEO Language Model","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":1,"forksCount":80,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-25T21:53:44.754Z"}},{"type":"Public","name":"TheoremQA","owner":"TIGER-AI-Lab","isFork":false,"description":"The official repo for \"TheoremQA: A Theorem-driven Question Answering dataset\" (EMNLP 2023)","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":17,"forksCount":0,"license":"MIT License","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-05-15T13:39:12.050Z"}},{"type":"Public","name":"Program-of-Thoughts","owner":"TIGER-AI-Lab","isFork":false,"description":"Data and Code for Program of Thoughts (TMLR 2023)","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":1,"starsCount":232,"forksCount":22,"license":"MIT License","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-05-15T02:23:06.284Z"}},{"type":"Public","name":"Blog","owner":"TIGER-AI-Lab","isFork":false,"description":"","allTopics":[],"primaryLanguage":{"name":"Ruby","color":"#701516"},"pullRequestCount":0,"issueCount":0,"starsCount":0,"forksCount":0,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-05-12T01:04:11.792Z"}},{"type":"Public","name":".github","owner":"TIGER-AI-Lab","isFork":false,"description":"main page","allTopics":[],"primaryLanguage":null,"pullRequestCount":0,"issueCount":0,"starsCount":0,"forksCount":0,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-04-16T15:40:06.066Z"}},{"type":"Public","name":"GenAI-Arena","owner":"TIGER-AI-Lab","isFork":false,"description":"Interface for GenAI-Arena","allTopics":["diffusion-models","generative-ai"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":11,"forksCount":0,"license":"Apache License 2.0","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-02-27T11:05:16.841Z"}}],"repositoryCount":23,"userInfo":null,"searchable":true,"definitions":[],"typeFilters":[{"id":"all","text":"All"},{"id":"public","text":"Public"},{"id":"source","text":"Sources"},{"id":"fork","text":"Forks"},{"id":"archived","text":"Archived"},{"id":"template","text":"Templates"}],"compactMode":false},"title":"TIGER-AI-Lab repositories"}