Skip to content

Commit

Permalink
Merge branch 'dev' into patch-4
Browse files Browse the repository at this point in the history
  • Loading branch information
ChiaXinLiang authored Sep 28, 2024
2 parents a690149 + 20f8f57 commit 64154bb
Show file tree
Hide file tree
Showing 3 changed files with 121 additions and 10 deletions.
115 changes: 115 additions & 0 deletions MLLM_latex/chapter10/chap10_ref.bib
Original file line number Diff line number Diff line change
@@ -0,0 +1,115 @@
@article{einstein,
author = "Albert Einstein",
title = "On the Electrodynamics of Moving Bodies",
journal = "Annalen der Physik",
year = "1905",
volume = "322",
pages = "891--921"
}

@article{konidena2024ethical,
title={Ethical Considerations in the Development and Deployment of AI Systems},
author={Konidena, Bhargav Kumar and Malaiyappan, Jesu Narkarunai Arasu and Tadimarri, Anish},
journal={European Journal of Technology},
volume={8},
number={2},
pages={41--53},
year={2024}
}

@article{peng2024securing,
title={Securing Large Language Models: Addressing Bias, Misinformation, and Prompt Attacks},
author={Peng, Benji and Chen, Keyu and Li, Ming and Feng, Pohsun and Bi, Ziqian and Liu, Junyu and Niu, Qian},
journal={arXiv preprint arXiv:2409.08087},
year={2024}
}

@article{boix2022machine,
title={Can machine-learning models overcome biased datasets?},
author={Boix, Xavier and Tenenbaum, Joshua B. and Torralba, Antonio},
journal={MIT News},
year={2022},
url={https://news.mit.edu/2022/machine-learning-biased-data-0221}
}

@misc{pymetrics2022audit,
title={audit-AI: Open Sourced Bias Testing for Generalized Machine Learning Applications},
author={pymetrics},
year={2022},
howpublished={\url{https://github.com/pymetrics/audit-ai}},
note={GitHub repository}
}

@inproceedings{kim2024domain,
title={Domain-Aware Fine-Tuning: Enhancing Neural Network Adaptability},
author={Seokhyeon Ha, Sunbeom Jung, Jungwoo Lee},
booktitle={Proceedings of the 38th AAAI Conference on Artificial Intelligence},
year={2024}
}

@article{zhang2023mitigating,
title={Bias-Aware Low-Rank Adaptation: Mitigating Catastrophic Inheritance of Large Language Models},
author={Zhang, Xingchen and Ren, Zhuosheng and Jiang, Yihong and Zhao, Dongyan and Zhang, Rui},
journal={arXiv preprint arXiv:2408.04556},
year={2023}
}

@article{aquino2023practical,
title={Practical, epistemic and normative implications of algorithmic bias in healthcare artificial intelligence: a qualitative study of multidisciplinary expert perspectives},
author={Aquino, Yves Saint James and Carter, Stacy M and Houssami, Nehmat and Braunack-Mayer, Annette and Win, Khin Than and Degeling, Chris and Wang, Lei and Rogers, Wendy A},
journal={Journal of Medical Ethics},
year={2023},
publisher={Institute of Medical Ethics}
}

@article{he2024emerged,
title={The Emerged Security and Privacy of LLM Agent: A Survey with Case Studies},
author={He, Feng and Zhu, Tianqing and Ye, Dayong and Liu, Bo and Zhou, Wanlei and Yu, Philip S},
journal={arXiv preprint arXiv:2407.19354},
year={2024}
}

@article{friha2024llm,
title={LLM-Based Edge Intelligence: A Comprehensive Survey on Architectures, Applications, Security and Trustworthiness},
author={Friha, Othmane and Ferrag, Mohamed Amine and Kantarci, Burak and Cakmak, Burak and Ozgun, Arda and Ghoualmi-Zine, Nassira},
journal={IEEE Open Journal of the Communications Society},
year={2024},
publisher={IEEE}
}

@article{mccoy2023ethical,
title={Ethical responsibilities for companies that process personal data},
author={McCoy, Matthew S and Allen, Anita L and Kopp, Katharina and Mello, Michelle M and Patil, DJ and Ossorio, Pilar and Joffe, Steven and Emanuel, Ezekiel J},
journal={The American Journal of Bioethics},
volume={23},
number={11},
pages={11--23},
year={2023},
publisher={Taylor \& Francis}
}

@article{chen2024trustworthy,
title={Trustworthy, Responsible, and Safe AI: A Comprehensive Architectural Framework for AI Safety with Challenges and Mitigations},
author={Chen, Chen and Liu, Ziyao and Jiang, Weifeng and Qi, Goh Si and Lam, KwoK-Yan},
journal={arXiv preprint arXiv:2408.12935},
year={2024}
}

@article{ray2023chatgpt,
title={ChatGPT: A comprehensive review on background, applications, key challenges, bias, ethics, limitations and future scope},
author={Ray, Partha Pratim},
journal={Internet of Things and Cyber-Physical Systems},
volume={3},
pages={121--154},
year={2023},
publisher={Elsevier}
}

@incollection{rosenstrauch2023artificial,
title={Artificial Intelligence and Ethics},
author={Rosenstrauch, Doreen and Mangla, Utpal and Gupta, Atul and Masau, Costansia Taikwa},
booktitle={Digital Health Entrepreneurship},
pages={225--239},
year={2023},
publisher={Springer}
}
12 changes: 5 additions & 7 deletions MLLM_latex/chapter10/chapter10.tex
Original file line number Diff line number Diff line change
@@ -1,12 +1,9 @@



\chapter{Ethical Considerations and Responsible AI}

As Multimodal Large Language Models (MLLMs) continue to advance and shape the AI landscape, capable of processing and generating content across various modalities such as text, images, and audio, it is crucial to address the ethical implications and challenges that arise from their development and deployment to ensure responsible AI practices\cite{konidena2024ethical}.


One of the primary concerns in MLLM development is bias mitigation. These models, trained on vast amounts of data from diverse sources, can inadvertently perpetuate or amplify existing societal biases\cite{peng2024securing}. To combat this, researchers and developers must implement comprehensive bias mitigation strategies\cite{zhang2023mitigating}. These include ensuring diverse and representative training datasets, conducting regular bias\cite{boix2022machine} audits across different modalities\cite{pymetrics2022audit}, and developing bias-aware fine-tuning techniques\cite{kim2024domain}. Additionally, interdisciplinary collaboration with experts from fields such as ethics, sociology, and psychology can provide valuable insights into identifying and addressing potential biases\cite{aquino2023practical}.
One of the primary concerns in MLLM is bias mitigation. It refers to systematic errors or unfair preferences in the model's outputs that can reinforce or amplify societal prejudices and stereotypes. These biases can manifest in various forms, including gender, racial, or cultural biases, and they pose ethical challenges in the deployment and use of LLMs across different applications.
\cite{peng2024securing}. To combat this, researchers and developers must implement comprehensive bias mitigation strategies\cite{zhang2023mitigating}. These include ensuring diverse and representative training datasets, conducting regular bias\cite{boix2022machine} audits across different modalities\cite{pymetrics2022audit}, and developing bias-aware fine-tuning techniques\cite{kim2024domain}. Additionally, interdisciplinary collaboration with experts from fields such as ethics, sociology, and psychology can provide valuable insights into identifying and addressing potential biases\cite{aquino2023practical}.

Privacy and data protection present another significant challenge in the realm of MLLMs. As these models process and generate increasingly complex and potentially sensitive information, robust measures must be put in place to protect individual privacy\cite{he2024emerged, friha2024llm}. This includes implementing advanced data anonymization techniques, exploring decentralized training methods like federated learning, and applying differential privacy approaches. Furthermore, clear protocols for obtaining consent and managing data rights must be established to ensure ethical handling of personal information used in training these models\cite{mccoy2023ethical}.

Expand All @@ -22,7 +19,7 @@ \chapter{Ethical Considerations and Responsible AI}

\section{Bias Mitigation Strategies}

One of the most pressing ethical concerns surrounding MLLMs is indeed the presence of biases in both the training data and the resulting model outputs. This issue is complex and multifaceted, requiring a comprehensive approach to address effectively. Let's explore this topic in more depth, examining the nature of these biases, their potential impacts, and strategies for mitigation.
One of the most pressing ethical concerns surrounding MLLMs is the presence of biases in both the training data and the resulting model outputs. This issue is complex and multifaceted, requiring a comprehensive approach to address effectively. Let's explore this topic in more depth, examining the nature of these biases, their potential impacts, and strategies for mitigation.

Biases in MLLMs can manifest in various ways, often reflecting and amplifying existing societal prejudices. These biases may be related to race, gender, age, socioeconomic status, cultural background, or other demographic factors. For instance, an MLLM might generate images that reinforce gender stereotypes or produce text that uses racially insensitive language. In multimodal systems, these biases can be particularly insidious as they may appear across different modalities, creating a compounded effect.

Expand Down Expand Up @@ -165,5 +162,6 @@ \section{Conclusion}

As MLLMs continue to evolve, ongoing collaboration between researchers, developers, policymakers, and the public will be essential to ensure these powerful tools are used for the betterment of society. By proactively addressing ethical concerns, fostering transparency, and upholding principles of fairness and accountability, we can harness the potential of MLLMs to create a future where AI serves as a force for good, empowering individuals, communities, and societies across the globe.

\printbibliography
\bibliographystyle{plain}
\bibliography{chapter10/reference}

4 changes: 1 addition & 3 deletions MLLM_latex/chapter4/chap4_ref.bib
Original file line number Diff line number Diff line change
Expand Up @@ -243,6 +243,4 @@ @misc{RedditExperience
howpublished = {\url{https://www.reddit.com/r/LocalLLaMA/comments/14vnfh2/my_experience_on_starting_with_fine_tuning_llms/}},
year = {2023},
note = {Accessed: 2024-09-28}
}


}

0 comments on commit 64154bb

Please sign in to comment.