From 86d65332f572eb5c8a96e759e4564d38c4978183 Mon Sep 17 00:00:00 2001 From: Brandon Amos Date: Tue, 23 Apr 2024 07:12:17 -0400 Subject: [PATCH] add AdvPrompter --- cv.yaml | 2 ++ generate.py | 6 ++++++ publications/all.bib | 22 ++++++++++++++++++---- 3 files changed, 26 insertions(+), 4 deletions(-) diff --git a/cv.yaml b/cv.yaml index ccc465f..a82eb86 100644 --- a/cv.yaml +++ b/cv.yaml @@ -249,6 +249,7 @@ all_publications: 'Christopher Williams': 'https://scholar.google.com/citations?user=AW81mosAAAAJ' 'Cihan Oguz': 'https://scholar.google.com/citations?user=Z4534DUAAAAJ' 'Cliff Shaffer': 'https://people.cs.vt.edu/shaffer/' + 'Chuan Guo': 'https://sites.google.com/view/chuanguo' 'Colmenarejo': 'https://scholar.google.com/citations?user=0Dkf68EAAAAJ' 'Daniel DeTone': 'https://danieldetone.com/' 'Daniel Siewiorek': 'http://www.cs.cmu.edu/~dps/' @@ -362,6 +363,7 @@ all_publications: 'Yaron Lipman': 'https://www.wisdom.weizmann.ac.il/~ylipman/' 'Ying Gao': 'https://www.linkedin.com/in/joelyinggao/' 'Yoshihisa Abe': 'https://dblp.org/pid/18/1620.html' + 'Yuandong Tian': 'https://yuandong-tian.com/' 'Yu Xiao': 'https://scholar.google.com/citations?user=ZeRhyWsAAAAJ' 'Yuval Tassa': 'https://scholar.google.com/citations?user=CjOTm_4AAAAJ' 'Zeming Lin': 'https://scholar.google.com/citations?user=ZDjmMuwAAAAJ' diff --git a/generate.py b/generate.py index 7d00e8c..cc47237 100755 --- a/generate.py +++ b/generate.py @@ -41,6 +41,12 @@ def _get_author_str(immut_author_list): authors = authors.replace(r'\"o', 'ö') authors = authors.replace(r'\'o', 'ó') authors = authors.replace(r"\'\i", 'í') + # if r'\diamond' in authors: + # import ipdb; ipdb.set_trace() + # if 'Guo' in authors: + # import ipdb; ipdb.set_trace() + authors = authors.replace("$^\\dagger$", '') + authors = authors.replace("$^*$", '*') return authors diff --git a/publications/all.bib b/publications/all.bib index 92d5a8d..c3be7dc 100644 --- a/publications/all.bib +++ b/publications/all.bib @@ -1,4 +1,18 @@ -@misc{sambharya2023learning, +@misc{paulus2024advprompter, + title={AdvPrompter: Fast Adaptive Adversarial Prompting for LLMs}, + author={Anselm Paulus* and Arman Zharmagambetov* and Chuan Guo and Brandon Amos$^{\dagger}$ and Yuandong Tian$^{\dagger}$}, + year={2024}, + url={https://yuandong-tian.com/papers/co4prompt_llm.pdf}, + _venue={arXiv}, + selected={true}, + abstract={ +While recently Large Language Models (LLMs) have achieved remarkable successes, they are vulnerable to certain jailbreaking attacks that lead to generation of inappropriate or harmful content. Manual red-teaming requires finding adversarial prompts that cause such jailbreaking, e.g. by appending a suffix to a given instruction, which is inefficient and time-consuming. +On the other hand, automatic adversarial prompt generation often leads to semantically meaningless attacks that can easily be detected by perplexity-based filters, may require gradient information from the target LLM, or do not scale well due to time-consuming discrete optimization processes over the token space. In this paper, we present a novel method that uses another LLM, called the AdvPrompter, to generate human-readable adversarial prompts in seconds, approximately 800 times faster than existing optimization-based approaches. +We train the AdvPrompter using a novel algorithm that does not require access to the gradients of the target LLM. This process alternates between two steps: (1) generating high-quality target adversarial suffixes by optimizing the AdvPrompter predictions, and (2) low-rank fine-tuning of the AdvPrompter with the generated adversarial suffixes. The trained AdvPrompter generates suffixes that veil the input instruction without changing its meaning, such that the target LLM is lured to give a harmful response. Experimental results on popular open source target LLMs show state-of-the-art results on the AdvBench dataset, that also transfer to closed-source black-box LLM APIs. Further, we demonstrate that by fine-tuning on a synthetic dataset generated by Advprompter, LLMs can be made more robust against jailbreaking attacks while maintaining performance, i.e. high MMLU scores. + } +} + +@misc{sambharya2024learning, title={Learning to Warm-Start Fixed-Point Optimization Algorithms}, author={Rajiv Sambharya and Georgina Hall and Brandon Amos and Bartolomeo Stellato}, year={2024}, @@ -253,9 +267,9 @@ @misc{zharmagambetov2023landscape frequent calls to the optimizer and sparse gradients, particularly for combinatorial solvers. To address these challenges, we propose - using a smooth and learnable **Landscape Surrogate** - M instead of f ∘ g . This surrogate can be computed - faster than g , provides dense and smooth gradients + using a smooth and learnable Landscape Surrogate + M instead of composing f with g . This surrogate can be computed + faster than g, provides dense and smooth gradients during training, can generalize to unseen optimization problems, and is efficiently learned via alternating optimization. We test our approach