Skip to content

Commit

Permalink
Generate: add min_p sampling (huggingface#30639)
Browse files Browse the repository at this point in the history
* min_p

* more relaxed test to avoid numerical issues

* Update src/transformers/generation/logits_process.py

Co-authored-by: menhguin <minh1228@gmail.com>

* Update src/transformers/generation/configuration_utils.py

Co-authored-by: menhguin <minh1228@gmail.com>

* docstring clarifications

* PR comments

* Update tests/generation/test_logits_process.py

Co-authored-by: amyeroberts <22614925+amyeroberts@users.noreply.github.com>

* make fixup

---------

Co-authored-by: menhguin <minh1228@gmail.com>
Co-authored-by: amyeroberts <22614925+amyeroberts@users.noreply.github.com>
  • Loading branch information
3 people authored and zucchini-nlp committed May 10, 2024
1 parent 824737d commit 31703ab
Show file tree
Hide file tree
Showing 8 changed files with 147 additions and 0 deletions.
3 changes: 3 additions & 0 deletions docs/source/en/internal/generation_utils.md
Original file line number Diff line number Diff line change
Expand Up @@ -167,6 +167,9 @@ generation.
[[autodoc]] MinNewTokensLengthLogitsProcessor
- __call__

[[autodoc]] MinPLogitsWarper
- __call__

[[autodoc]] NoBadWordsLogitsProcessor
- __call__

Expand Down
2 changes: 2 additions & 0 deletions src/transformers/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -1222,6 +1222,7 @@
"MaxTimeCriteria",
"MinLengthLogitsProcessor",
"MinNewTokensLengthLogitsProcessor",
"MinPLogitsWarper",
"NoBadWordsLogitsProcessor",
"NoRepeatNGramLogitsProcessor",
"PhrasalConstraint",
Expand Down Expand Up @@ -5777,6 +5778,7 @@
MaxTimeCriteria,
MinLengthLogitsProcessor,
MinNewTokensLengthLogitsProcessor,
MinPLogitsWarper,
NoBadWordsLogitsProcessor,
NoRepeatNGramLogitsProcessor,
PhrasalConstraint,
Expand Down
2 changes: 2 additions & 0 deletions src/transformers/generation/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -64,6 +64,7 @@
"LogitsWarper",
"MinLengthLogitsProcessor",
"MinNewTokensLengthLogitsProcessor",
"MinPLogitsWarper",
"NoBadWordsLogitsProcessor",
"NoRepeatNGramLogitsProcessor",
"PrefixConstrainedLogitsProcessor",
Expand Down Expand Up @@ -204,6 +205,7 @@
LogitsWarper,
MinLengthLogitsProcessor,
MinNewTokensLengthLogitsProcessor,
MinPLogitsWarper,
NoBadWordsLogitsProcessor,
NoRepeatNGramLogitsProcessor,
PrefixConstrainedLogitsProcessor,
Expand Down
5 changes: 5 additions & 0 deletions src/transformers/generation/configuration_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -139,6 +139,10 @@ class GenerationConfig(PushToHubMixin):
top_p (`float`, *optional*, defaults to 1.0):
If set to float < 1, only the smallest set of most probable tokens with probabilities that add up to
`top_p` or higher are kept for generation.
min_p (`float`, *optional*):
Minimum token probability, which will be scaled by the probability of the most likely token. It must be a
value between 0 and 1. Typical values are in the 0.01-0.2 range, comparably selective as setting `top_p` in
the 0.99-0.8 range (use the opposite of normal `top_p` values).
typical_p (`float`, *optional*, defaults to 1.0):
Local typicality measures how similar the conditional probability of predicting a target token next is to
the expected conditional probability of predicting a random token next, given the partial text already
Expand Down Expand Up @@ -324,6 +328,7 @@ def __init__(self, **kwargs):
self.temperature = kwargs.pop("temperature", 1.0)
self.top_k = kwargs.pop("top_k", 50)
self.top_p = kwargs.pop("top_p", 1.0)
self.min_p = kwargs.pop("min_p", None)
self.typical_p = kwargs.pop("typical_p", 1.0)
self.epsilon_cutoff = kwargs.pop("epsilon_cutoff", 0.0)
self.eta_cutoff = kwargs.pop("eta_cutoff", 0.0)
Expand Down
77 changes: 77 additions & 0 deletions src/transformers/generation/logits_process.py
Original file line number Diff line number Diff line change
Expand Up @@ -520,6 +520,83 @@ def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> to
return scores_processed


class MinPLogitsWarper(LogitsWarper):
"""
[`LogitsWarper`] that performs min-p, i.e. keeps all tokens that are above a minimum probability, scaled by the
probability of the most likely token. As a result, the filter becomes more agressive in the presence of
high-probability tokens, which is a sign of a confident output that we shouldn't deviate from.
Often used together with [`TemperatureLogitsWarper`]. Used as an alternative to [`TopPLogitsWarper`] and
[`TopKLogitsWarper`].
Created by @menhguin and @kalomaze (github handles). Code adapted from [this external PR](https://github.com/oobabooga/text-generation-webui/pull/4449/files)
Args:
min_p (`float`):
Minimum token probability, which will be scaled by the probability of the most likely token. It must be a
value between 0 and 1. Typical values are in the 0.01-0.2 range, comparably selective as setting `top_p` in
the 0.99-0.8 range (use the opposite of normal `top_p` values).
filter_value (`float`, *optional*, defaults to -inf):
All filtered values will be set to this float value.
min_tokens_to_keep (`int`, *optional*, defaults to 1):
Minimum number of tokens that cannot be filtered.
Examples:
```python
>>> from transformers import AutoTokenizer, AutoModelForCausalLM, set_seed
>>> set_seed(1)
>>> model = AutoModelForCausalLM.from_pretrained("distilbert/distilgpt2")
>>> tokenizer = AutoTokenizer.from_pretrained("distilbert/distilgpt2")
>>> inputs = tokenizer("A sequence: 1, 2", return_tensors="pt")
>>> # With sampling, the output is unexpected -- sometimes too unexpected.
>>> outputs = model.generate(**inputs, do_sample=True)
>>> print(tokenizer.batch_decode(outputs, skip_special_tokens=True)[0])
A sequence: 1, 2, 3 | < 4 (left-hand pointer) ;
<BLANKLINE>
<BLANKLINE>
>>> # With `min_p` sampling, the output gets restricted to high-probability tokens.
>>> # Pro tip: In practice, LLMs use `min_p` in the 0.01-0.2 range.
>>> outputs = model.generate(**inputs, do_sample=True, min_p=0.1)
>>> print(tokenizer.batch_decode(outputs, skip_special_tokens=True)[0])
A sequence: 1, 2, 3, 4, 5, 6, 7, 8, 9
```
"""

def __init__(self, min_p: float, filter_value: float = -float("Inf"), min_tokens_to_keep: int = 1):
if not (0 <= min_p <= 1.0):
raise ValueError(f"`min_p` has to be a float in the [0, 1] interval, but is {min_p}")
if not isinstance(min_tokens_to_keep, int) or (min_tokens_to_keep < 1):
raise ValueError(f"`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}")

self.min_p = min_p
self.filter_value = filter_value
self.min_tokens_to_keep = min_tokens_to_keep

def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor:
# Convert logits to probabilities
probs = torch.softmax(scores, dim=-1)
# Get the probability of the top token for each sequence in the batch
top_probs, _ = probs.max(dim=-1, keepdim=True)
# Calculate the actual min_p threshold by scaling min_p with the top token's probability
scaled_min_p = self.min_p * top_probs
# Create a mask for tokens that have a probability less than the scaled min_p
tokens_to_remove = probs < scaled_min_p

sorted_indices = torch.argsort(scores, descending=True, dim=-1)
sorted_indices_to_remove = torch.gather(tokens_to_remove, dim=-1, index=sorted_indices)
# Keep at least min_tokens_to_keep
sorted_indices_to_remove[..., : self.min_tokens_to_keep] = False

indices_to_remove = sorted_indices_to_remove.scatter(1, sorted_indices, sorted_indices_to_remove)
scores_processed = scores.masked_fill(indices_to_remove, self.filter_value)
return scores_processed


class TypicalLogitsWarper(LogitsWarper):
r"""
[`LogitsWarper`] that performs typical decoding. Inspired on how humans use language, it prioritizes tokens whose
Expand Down
4 changes: 4 additions & 0 deletions src/transformers/generation/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -61,6 +61,7 @@
LogitsProcessorList,
MinLengthLogitsProcessor,
MinNewTokensLengthLogitsProcessor,
MinPLogitsWarper,
NoBadWordsLogitsProcessor,
NoRepeatNGramLogitsProcessor,
PrefixConstrainedLogitsProcessor,
Expand Down Expand Up @@ -739,6 +740,9 @@ def _get_logits_warper(
warpers.append(TopKLogitsWarper(top_k=generation_config.top_k, min_tokens_to_keep=min_tokens_to_keep))
if generation_config.top_p is not None and generation_config.top_p < 1.0:
warpers.append(TopPLogitsWarper(top_p=generation_config.top_p, min_tokens_to_keep=min_tokens_to_keep))
if generation_config.min_p is not None:
# Applied after temperature scaling (see https://github.com/ggerganov/llama.cpp/pull/3841#issuecomment-2073826084)
warpers.append(MinPLogitsWarper(min_p=generation_config.min_p, min_tokens_to_keep=min_tokens_to_keep))
if generation_config.typical_p is not None and generation_config.typical_p < 1.0:
warpers.append(
TypicalLogitsWarper(mass=generation_config.typical_p, min_tokens_to_keep=min_tokens_to_keep)
Expand Down
7 changes: 7 additions & 0 deletions src/transformers/utils/dummy_pt_objects.py
Original file line number Diff line number Diff line change
Expand Up @@ -317,6 +317,13 @@ def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])


class MinPLogitsWarper(metaclass=DummyObject):
_backends = ["torch"]

def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])


class NoBadWordsLogitsProcessor(metaclass=DummyObject):
_backends = ["torch"]

Expand Down
47 changes: 47 additions & 0 deletions tests/generation/test_logits_process.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,6 +42,7 @@
LogitsProcessorList,
MinLengthLogitsProcessor,
MinNewTokensLengthLogitsProcessor,
MinPLogitsWarper,
NoBadWordsLogitsProcessor,
NoRepeatNGramLogitsProcessor,
PrefixConstrainedLogitsProcessor,
Expand Down Expand Up @@ -304,6 +305,52 @@ def test_top_p_dist_warper(self):
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).to(torch.long).sum(dim=-1).tolist(), [3, 2])

def test_min_p_dist_warper(self):
input_ids = None
vocab_size = 10
batch_size = 2

# create distribution and take log (inverse to Softmax as taken in MinPLogitsWarper)
dist = torch.log(
torch.tensor(
[
[0.9, 0.0274, 0.047, 0.0274], # two tokens should be kept (0.047 > 0.9*0.05=0.045)
[0.15, 0.3, 0.3, 0.25], # all should be kept -- no high-probability token
[0.97, 0.01, 0.01, 0.01], # only the first token should be kept
],
device=torch_device,
dtype=torch.float,
)
)

min_p_warp = MinPLogitsWarper(0.05)
filtered_dist = torch.exp(min_p_warp(input_ids, dist))

# exp (-inf) => 0
EXPECTED_FILTERED_DIST = torch.tensor(
[[0.9, 0.0, 0.047, 0.0], [0.15, 0.3, 0.3, 0.25], [0.97, 0.0, 0.0, 0.0]],
device=torch_device,
dtype=torch.float,
)
self.assertTrue(torch.allclose(filtered_dist, EXPECTED_FILTERED_DIST, atol=1e-3))

# processor should not change logits in-place
self.assertFalse(torch.all(min_p_warp(input_ids, dist) == dist))

# check edge cases with negative and extreme logits
ramp_logits = torch.arange(vocab_size, device=torch_device, dtype=torch.float) - (vocab_size // 2)
ramp_logits = ramp_logits.unsqueeze(0).repeat(batch_size, 1)

# make ramp_logits more extreme
ramp_logits[1] = ramp_logits[1] * 100.0

# make sure at least 2 tokens are kept
min_p_warp = MinPLogitsWarper(0.9, min_tokens_to_keep=2, filter_value=0.0)
filtered_dist = min_p_warp(input_ids, ramp_logits)

# first batch should keep two tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).to(torch.long).sum(dim=-1).tolist(), [2, 2])

def test_typical_dist_warper(self):
input_ids = None
vocab_size = 10
Expand Down

0 comments on commit 31703ab

Please sign in to comment.