Skip to content

Commit

Permalink
Update references.bib
Browse files Browse the repository at this point in the history
  • Loading branch information
tianpu2014 authored Sep 29, 2024
1 parent 02897a8 commit a15222f
Showing 1 changed file with 243 additions and 1 deletion.
244 changes: 243 additions & 1 deletion MLLM_latex/chapter10/references.bib
Original file line number Diff line number Diff line change
Expand Up @@ -112,4 +112,246 @@ @incollection{rosenstrauch2023artificial
pages={225--239},
year={2023},
publisher={Springer}
}
}


@article{xu2024survey,
title={A Survey on Multilingual Large Language Models: Corpora, Alignment, and Bias},
author={Xu, Yuemei and Hu, Ling and Zhao, Jiayi and Qiu, Zihan and Ye, Yuqi and Gu, Hanwen},
journal={arXiv preprint arXiv:2404.00929},
year={2024}
}

@article{basta2022gender,
title={Gender bias in natural language processing},
author={Basta, Christine Raouf Saad},
year={2022},
publisher={Universitat Polit{\`e}cnica de Catalunya}
}


@article{magesh2024hallucination,
title={Hallucination-Free? Assessing the Reliability of Leading AI Legal Research Tools},
author={Magesh, Varun and Surani, Faiz and Dahl, Matthew and Suzgun, Mirac and Manning, Christopher D and Ho, Daniel E},
journal={arXiv preprint arXiv:2405.20362},
year={2024}
}

@article{cegin2024effects,
title={Effects of diversity incentives on sample diversity and downstream model performance in LLM-based text augmentation},
author={Cegin, Jan and Pecher, Branislav and Simko, Jakub and Srba, Ivan and Bielikova, Maria and Brusilovsky, Peter},
journal={arXiv preprint arXiv:2401.06643},
year={2024}
}

@article{lin2024investigating,
title={Investigating Bias in LLM-Based Bias Detection: Disparities between LLMs and Human Perception},
author={Lin, Luyang and Wang, Lingzhi and Guo, Jinsong and Wong, Kam-Fai},
journal={arXiv preprint arXiv:2403.14896},
year={2024}
}

@article{owens2024multi,
title={A Multi-LLM Debiasing Framework},
author={Owens, Deonna M and Rossi, Ryan A and Kim, Sungchul and Yu, Tong and Dernoncourt, Franck and Chen, Xiang and Zhang, Ruiyi and Gu, Jiuxiang and Deilamsalehy, Hanieh and Lipka, Nedim},
journal={arXiv preprint arXiv:2409.13884},
year={2024}
}

@article{patil2024review,
title={A review of current trends, techniques, and challenges in large language models (llms)},
author={Patil, Rajvardhan and Gudivada, Venkat},
journal={Applied Sciences},
volume={14},
number={5},
pages={2074},
year={2024},
publisher={MDPI}
}

@article{jiao2024navigating,
title={Navigating llm ethics: Advancements, challenges, and future directions},
author={Jiao, Junfeng and Afroogh, Saleh and Xu, Yiming and Phillips, Connor},
journal={arXiv preprint arXiv:2406.18841},
year={2024}
}

@article{poulain2024bias,
title={Bias patterns in the application of LLMs for clinical decision support: A comprehensive study},
author={Poulain, Raphael and Fayyaz, Hamed and Beheshti, Rahmatollah},
journal={arXiv preprint arXiv:2404.15149},
year={2024}
}

@article{chen2023ai,
title={AI fairness in data management and analytics: A review on challenges, methodologies and applications},
author={Chen, Pu and Wu, Linna and Wang, Lei},
journal={Applied Sciences},
volume={13},
number={18},
pages={10258},
year={2023},
publisher={MDPI}
}

@article{mehrabi2021survey,
title={A survey on bias and fairness in machine learning},
author={Mehrabi, Ninareh and Morstatter, Fred and Saxena, Nripsuta and Lerman, Kristina and Galstyan, Aram},
journal={ACM computing surveys (CSUR)},
volume={54},
number={6},
pages={1--35},
year={2021},
publisher={ACM New York, NY, USA}
}

@article{tripathi2024insaaf,
title={InSaAF: Incorporating Safety through Accuracy and Fairness| Are LLMs ready for the Indian Legal Domain?},
author={Tripathi, Yogesh and Donakanti, Raghav and Girhepuje, Sahil and Kavathekar, Ishan and Vedula, Bhaskara Hanuma and Krishnan, Gokul S and Goyal, Shreya and Goel, Anmol and Ravindran, Balaraman and Kumaraguru, Ponnurangam},
journal={arXiv preprint arXiv:2402.10567},
year={2024}
}

@article{lee2024life,
title={The Life Cycle of Large Language Models: A Review of Biases in Education},
author={Lee, Jinsook and Hicke, Yann and Yu, Renzhe and Brooks, Christopher and Kizilcec, Ren{\'e} F},
journal={arXiv preprint arXiv:2407.11203},
year={2024}
}

@inproceedings{brown2022does,
title={What does it mean for a language model to preserve privacy?},
author={Brown, Hannah and Lee, Katherine and Mireshghallah, Fatemehsadat and Shokri, Reza and Tram{\`e}r, Florian},
booktitle={Proceedings of the 2022 ACM conference on fairness, accountability, and transparency},
pages={2280--2292},
year={2022}
}

@article{yao2024survey,
title={A survey on large language model (llm) security and privacy: The good, the bad, and the ugly},
author={Yao, Yifan and Duan, Jinhao and Xu, Kaidi and Cai, Yuanfang and Sun, Zhibo and Zhang, Yue},
journal={High-Confidence Computing},
pages={100211},
year={2024},
publisher={Elsevier}
}

@inproceedings{pan2020privacy,
title={Privacy risks of general-purpose language models},
author={Pan, Xudong and Zhang, Mi and Ji, Shouling and Yang, Min},
booktitle={2020 IEEE Symposium on Security and Privacy (SP)},
pages={1314--1331},
year={2020},
organization={IEEE}
}

@article{weidinger2021ethical,
title={Ethical and social risks of harm from language models},
author={Weidinger, Laura and Mellor, John and Rauh, Maribeth and Griffin, Conor and Uesato, Jonathan and Huang, Po-Sen and Cheng, Myra and Glaese, Mia and Balle, Borja and Kasirzadeh, Atoosa and others},
journal={arXiv preprint arXiv:2112.04359},
year={2021}
}

@inproceedings{brown2022does,
title={What does it mean for a language model to preserve privacy?},
author={Brown, Hannah and Lee, Katherine and Mireshghallah, Fatemehsadat and Shokri, Reza and Tram{\`e}r, Florian},
booktitle={Proceedings of the 2022 ACM conference on fairness, accountability, and transparency},
pages={2280--2292},
year={2022}
}

@article{zhang2024right,
title={Right to be forgotten in the era of large language models: Implications, challenges, and solutions},
author={Zhang, Dawen and Finckenberg-Broman, Pamela and Hoang, Thong and Pan, Shidong and Xing, Zhenchang and Staples, Mark and Xu, Xiwei},
journal={AI and Ethics},
pages={1--10},
year={2024},
publisher={Springer}
}

@inproceedings{weidinger2022taxonomy,
title={Taxonomy of risks posed by language models},
author={Weidinger, Laura and Uesato, Jonathan and Rauh, Maribeth and Griffin, Conor and Huang, Po-Sen and Mellor, John and Glaese, Amelia and Cheng, Myra and Balle, Borja and Kasirzadeh, Atoosa and others},
booktitle={Proceedings of the 2022 ACM Conference on Fairness, Accountability, and Transparency},
pages={214--229},
year={2022}
}

@article{weidinger2021ethical,
title={Ethical and social risks of harm from language models},
author={Weidinger, Laura and Mellor, John and Rauh, Maribeth and Griffin, Conor and Uesato, Jonathan and Huang, Po-Sen and Cheng, Myra and Glaese, Mia and Balle, Borja and Kasirzadeh, Atoosa and others},
journal={arXiv preprint arXiv:2112.04359},
year={2021}
}

@article{kibriya2024privacy,
title={Privacy issues in Large Language Models: A survey},
author={Kibriya, Hareem and Khan, Wazir Zada and Siddiqa, Ayesha and Khan, Muhammad Khurrum},
journal={Computers and Electrical Engineering},
volume={120},
pages={109698},
year={2024},
publisher={Elsevier}
}

@incollection{phattanaviroj2024data,
title={Data Ethics and Privacy},
author={Phattanaviroj, Thanaporn and Moslehpour, Massoud and Walawalkar, Ankita Manohar},
booktitle={Challenges in Large Language Model Development and AI Ethics},
pages={321--353},
year={2024},
publisher={IGI Global}
}

@article{sanderson2023ai,
title={AI ethics principles in practice: Perspectives of designers and developers},
author={Sanderson, Conrad and Douglas, David and Lu, Qinghua and Schleiger, Emma and Whittle, Jon and Lacey, Justine and Newnham, Glenn and Hajkowicz, Stefan and Robinson, Cathy and Hansen, David},
journal={IEEE Transactions on Technology and Society},
volume={4},
number={2},
pages={171--187},
year={2023},
publisher={IEEE}
}

@article{singh2024whispered,
title={Whispered tuning: Data privacy preservation in fine-tuning llms through differential privacy},
author={Singh, Tanmay and Aditya, Harshvardhan and Madisetti, Vijay K and Bahga, Arshdeep},
journal={Journal of Software Engineering and Applications},
volume={17},
number={1},
pages={1--22},
year={2024},
publisher={Scientific Research Publishing}
}

@article{charles2024fine,
title={Fine-Tuning Large Language Models with User-Level Differential Privacy},
author={Charles, Zachary and Ganesh, Arun and McKenna, Ryan and McMahan, H Brendan and Mitchell, Nicole and Pillutla, Krishna and Rush, Keith},
journal={arXiv preprint arXiv:2407.07737},
year={2024}
}

@inproceedings{kuang2024federatedscope,
title={Federatedscope-llm: A comprehensive package for fine-tuning large language models in federated learning},
author={Kuang, Weirui and Qian, Bingchen and Li, Zitao and Chen, Daoyuan and Gao, Dawei and Pan, Xuchen and Xie, Yuexiang and Li, Yaliang and Ding, Bolin and Zhou, Jingren},
booktitle={Proceedings of the 30th ACM SIGKDD Conference on Knowledge Discovery and Data Mining},
pages={5260--5271},
year={2024}
}

@article{wiest2024anonymizing,
title={Anonymizing medical documents with local, privacy preserving large language models: The LLM-Anonymizer},
author={Wiest, Isabella C and Lessmann, Marie-Elisabeth and Wolf, Fabian and Ferber, Dyke and Van Treeck, Marko and Zhu, Jiefu and Ebert, Matthias P and Westphalen, Christoph Benedikt and Wermke, Martin and Kather, Jakob Nikolas},
journal={medRxiv},
pages={2024--06},
year={2024},
publisher={Cold Spring Harbor Laboratory Press}
}

@article{li2024llm,
title={LLM-PBE: Assessing Data Privacy in Large Language Models},
author={Li, Qinbin and Hong, Junyuan and Xie, Chulin and Tan, Jeffrey and Xin, Rachel and Hou, Junyi and Yin, Xavier and Wang, Zhun and Hendrycks, Dan and Wang, Zhangyang and others},
journal={arXiv preprint arXiv:2408.12787},
year={2024}
}

0 comments on commit a15222f

Please sign in to comment.