Skip to content

Commit e83a080

Browse files
committed
server : allow to specify custom prompt for penalty calculation
1 parent d9b33fe commit e83a080

File tree

4 files changed

+54
-3
lines changed

4 files changed

+54
-3
lines changed

common/sampling.cpp

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -148,12 +148,14 @@ llama_token llama_sampling_sample(
148148
}
149149

150150
// apply penalties
151-
if (!prev.empty()) {
151+
const auto& penalty_tokens = params.use_penalty_prompt_tokens ? params.penalty_prompt_tokens : prev;
152+
const int penalty_tokens_used_size = std::min((int)penalty_tokens.size(), penalty_last_n);
153+
if (penalty_tokens_used_size) {
152154
const float nl_logit = logits[llama_token_nl(llama_get_model(ctx_main))];
153155

154156
llama_sample_repetition_penalties(ctx_main, &cur_p,
155-
prev.data() + prev.size() - penalty_last_n,
156-
penalty_last_n, penalty_repeat, penalty_freq, penalty_present);
157+
penalty_tokens.data() + penalty_tokens.size() - penalty_tokens_used_size,
158+
penalty_tokens_used_size, penalty_repeat, penalty_freq, penalty_present);
157159

158160
if (!penalize_nl) {
159161
for (size_t idx = 0; idx < cur_p.size; idx++) {

common/sampling.h

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -35,6 +35,9 @@ typedef struct llama_sampling_params {
3535
float cfg_scale = 1.f; // how strong is guidance
3636

3737
std::unordered_map<llama_token, float> logit_bias; // logit bias for specific tokens
38+
39+
std::vector<llama_token> penalty_prompt_tokens;
40+
bool use_penalty_prompt_tokens = false;
3841
} llama_sampling_params;
3942

4043
// general sampler context

examples/server/README.md

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -146,6 +146,8 @@ node index.js
146146

147147
`frequency_penalty`: Repeat alpha frequency penalty (default: 0.0, 0.0 = disabled);
148148

149+
`penalty_prompt`: This will replace the `prompt` for the purpose of the penalty evaluation. Can be either `null`, a string or an array of numbers representing tokens (default: `null` = use the original `prompt`).
150+
149151
`mirostat`: Enable Mirostat sampling, controlling perplexity during text generation (default: 0, 0 = disabled, 1 = Mirostat, 2 = Mirostat 2.0).
150152

151153
`mirostat_tau`: Set the Mirostat target entropy, parameter tau (default: 5.0).

examples/server/server.cpp

Lines changed: 44 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -723,6 +723,42 @@ struct llama_server_context
723723
slot->prompt = "";
724724
}
725725

726+
slot->sparams.penalty_prompt_tokens.clear();
727+
slot->sparams.use_penalty_prompt_tokens = false;
728+
const auto &penalty_prompt = data.find("penalty_prompt");
729+
if (penalty_prompt != data.end())
730+
{
731+
if (penalty_prompt->is_string())
732+
{
733+
const auto penalty_prompt_string = penalty_prompt->get<std::string>();
734+
auto penalty_tokens = llama_tokenize(model, penalty_prompt_string, false);
735+
slot->sparams.penalty_prompt_tokens.swap(penalty_tokens);
736+
if (slot->params.n_predict > 0)
737+
{
738+
slot->sparams.penalty_prompt_tokens.reserve(slot->sparams.penalty_prompt_tokens.size() + slot->params.n_predict);
739+
}
740+
slot->sparams.use_penalty_prompt_tokens = true;
741+
}
742+
else if (penalty_prompt->is_array())
743+
{
744+
const auto n_tokens = penalty_prompt->size();
745+
slot->sparams.penalty_prompt_tokens.reserve(n_tokens + std::max(0, slot->params.n_predict));
746+
const int n_vocab = llama_n_vocab(model);
747+
for (const auto &penalty_token : *penalty_prompt)
748+
{
749+
if (penalty_token.is_number_integer())
750+
{
751+
const auto tok = penalty_token.get<llama_token>();
752+
if (tok >= 0 && tok < n_vocab)
753+
{
754+
slot->sparams.penalty_prompt_tokens.push_back(tok);
755+
}
756+
}
757+
}
758+
slot->sparams.use_penalty_prompt_tokens = true;
759+
}
760+
}
761+
726762
slot->sparams.logit_bias.clear();
727763

728764
if (json_value(data, "ignore_eos", false))
@@ -954,6 +990,12 @@ struct llama_server_context
954990
slot.generated_text += token_str;
955991
slot.has_next_token = true;
956992

993+
if (slot.ctx_sampling->params.use_penalty_prompt_tokens && result.tok != -1)
994+
{
995+
// we can change penalty_prompt_tokens because it is always created from scratch each request
996+
slot.ctx_sampling->params.penalty_prompt_tokens.push_back(result.tok);
997+
}
998+
957999
if (slot.multibyte_pending > 0)
9581000
{
9591001
slot.multibyte_pending -= token_str.size();
@@ -1119,6 +1161,8 @@ struct llama_server_context
11191161
{"repeat_penalty", slot.sparams.penalty_repeat},
11201162
{"presence_penalty", slot.sparams.penalty_present},
11211163
{"frequency_penalty", slot.sparams.penalty_freq},
1164+
{"penalty_prompt_tokens", slot.sparams.penalty_prompt_tokens},
1165+
{"use_penalty_prompt_tokens", slot.sparams.use_penalty_prompt_tokens},
11221166
{"mirostat", slot.sparams.mirostat},
11231167
{"mirostat_tau", slot.sparams.mirostat_tau},
11241168
{"mirostat_eta", slot.sparams.mirostat_eta},

0 commit comments

Comments
 (0)