Skip to content

Commit

Permalink
Merge pull request ChiaXinLiang#27 from weian312/dev
Browse files Browse the repository at this point in the history
update bib
  • Loading branch information
ChiaXinLiang authored Oct 6, 2024
2 parents cfa2c53 + 59cde07 commit ae9e8a2
Showing 1 changed file with 22 additions and 49 deletions.
71 changes: 22 additions & 49 deletions MLLM_latex/chapter4/chap4_ref.bib
Original file line number Diff line number Diff line change
Expand Up @@ -36,47 +36,47 @@ @article{ComprehensiveSurvey2024

@article{ManipLLM2024,
title={ManipLLM: Embodied Multimodal Large Language Model for Object ...},
author={Author Name},
author={},
journal={CVPR},
year={2024},
url={https://openaccess.thecvf.com/content/CVPR2024/papers/Li_ManipLLM_Embodied_Multimodal_Large_Language_Model_for_Object-Centric_Robotic_Manipulation_CVPR_2024_paper.pdf}
}

@article{OverviewLMM2024,
title={An Overview of Large Multi-modal Models (LMMs): Part 1},
author={Author Name},
author={},
journal={Medium},
year={2024},
url={https://medium.com/@baicenxiao/introduction-to-the-large-multi-modal-models-llms-part-1-07de7e9caf40}
}

@article{ResearchDevelopment2023,
title={ManipLLM: Embodied Multimodal Large Language Model for Object ...},
author={Author Name},
author={},
journal={arXiv},
year={2023},
url={https://arxiv.org/html/2312.16217v1}
}

@article{MaskedVisionLanguage2023,
title={Masked Vision and Language Pre-training with Unimodal and Multimodal Contrastive Losses for Medical Visual Question Answering},
author={Author Name},
author={},
journal={arXiv},
year={2023},
url={https://arxiv.org/abs/2307.05314}
}

@article{RAMMBiomedicalVQA2023,
title={RAMM: Retrieval-augmented Biomedical Visual Question Answering},
author={Author Name},
author={},
journal={arXiv},
year={2023},
url={https://arxiv.org/abs/2303.00534}
}

@article{MedicalVQA2023,
title={Masked Vision and Language Pre-training with Unimodal and Multimodal Contrastive Losses for Medical Visual Question Answering},
author={Author Name},
author={},
journal={MICCAI},
year={2023},
url={https://conferences.miccai.org/2023/papers/401-Paper2138.html}
Expand All @@ -91,156 +91,129 @@ @misc{pengfeiliHEU2023

@article{VisoAI2024,
title={Vision Language Models: Exploring Multimodal AI},
author={Author Name},
author={},
journal={Viso.ai},
year={2024},
url={https://viso.ai/deep-learning/vision-language-models/}
}

@article{FlexibleVLP2023,
title={Fusion or Defusion? Flexible Vision-and-Language Pre-Training},
author={Author Name},
author={},
journal={ACL Anthology},
year={2023},
url={https://aclanthology.org/2023.findings-acl.316}
}

@article{HeterogeneityFederatedVLP2024,
title={Mitigating Heterogeneity in Federated Multimodal Learning with Biomedical VLP},
author={Author Name},
author={},
journal={arXiv},
year={2024},
url={https://arxiv.org/html/2404.03854v1}
}

@article{FeedbackModalSearch2024,
title={Feedback-based Modal Mutual Search for Attacking Vision-Language Models},
author={Author Name},
author={},
journal={arXiv},
year={2024},
url={https://arxiv.org/html/2409.06726v1}
}

@article{TenyksBlogger2024,
title={Multimodal Large Language Models (MLLMs) transforming computer vision},
author={Tenyks Blogger},
author={},
journal={Medium},
year={2024},
url={https://medium.com/@tenyks_blogger/multimodal-large-language-models-mllms-transforming-computer-vision-76d3c5dd267f}
}

@article{EfficientMLLMs2024,
title={Efficient Multimodal Large Language Models: A Survey},
author={Author Name},
author={},
journal={arXiv},
year={2024},
url={https://arxiv.org/html/2405.10739v1}
}

@article{MultiwayAdapter2024,
title={Multiway-Adapter: Adapting Multimodal Large Language Models for Specific Tasks},
author={Author Name},
author={},
journal={IEEE Explore},
year={2024},
url={https://ieeexplore.ieee.org/document/10446792}
}

@article{RobustInstructionTuning2024,
title={Towards Robust Instruction Tuning on Multimodal Large Language Models},
author={Author Name},
author={},
journal={arXiv},
year={2024},
url={https://arxiv.org/html/2402.14492v2}
}

@article{CrossModalTasks2024,
title={Cross-Modal Tasks in Multimodal Large Language Models},
author={Author Name},
author={},
journal={Journal Name},
year={2024},
url={URL}
}

@article{FewShotZeroShotLearning2024,
title={Few-Shot and Zero-Shot Learning in MLLMs},
author={Author Name},
author={},
journal={Journal Name},
year={2024},
url={URL}
}

@article{FewShotLearning2024,
title={Few-Shot Learning in Multimodal Large Language Models},
author={Author Name},
author={},
journal={Journal Name},
year={2024},
url={URL}
}

@article{ZeroShotLearning2024,
title={Zero-Shot Learning with CLIP},
author={Author Name},
author={},
journal={Journal Name},
year={2024},
url={URL}
}

@article{TransferLearning2024,
title={Transfer Learning in Multimodal Large Language Models},
author={Author Name},
author={},
journal={Journal Name},
year={2024},
url={URL}
}

@article{InstructionTuning2024,
title={Instruction Tuning for Multimodal Large Language Models},
author={Author Name},
author={},
journal={Journal Name},
year={2024},
url={URL}
}

@article{NaturalLanguageInstructions2024,
title={Natural Language Instructions for MLLMs},
author={Author Name},
author={},
journal={Journal Name},
year={2024},
url={URL}
}

@article{MultimodalInstructionTuning2024,
title={Multimodal Instruction Tuning},
author={Author Name},
author={},
journal={Journal Name},
year={2024},
url={URL}
}

@misc{LarkSuite,
title = {Instruction Tuning},
howpublished = {\url{https://www.larksuite.com/en_us/topics/ai-glossary/instruction-tuning}},
year = {2023},
note = {Accessed: 2024-09-28}
}

@misc{OpenAICommunity,
title = {Can I fine-tune the model without the prompt and answer for the system role?},
howpublished = {\url{https://community.openai.com/t/can-i-fine-tune-the-model-without-the-prompt-and-answer-for-the-system-role/550580}},
year = {2023},
note = {Accessed: 2024-09-28}
}

@misc{RohitAggarwal,
author = {Rohit Aggarwal},
title = {AI systems with applications spanning software development, recruitment, and content creation},
howpublished = {\url{https://eccles.utah.edu/team/rohit-aggarwal/}},
note = {Accessed: 2024-09-28}
}

@misc{RedditExperience,
title = {My experience on starting with fine tuning LLMs with custom data},
howpublished = {\url{https://www.reddit.com/r/LocalLLaMA/comments/14vnfh2/my_experience_on_starting_with_fine_tuning_llms/}},
year = {2023},
note = {Accessed: 2024-09-28}
}

0 comments on commit ae9e8a2

Please sign in to comment.