Skip to content

Commit

Permalink
server: stop generation at n_ctx_train if n_predict is not set (g…
Browse files Browse the repository at this point in the history
…gerganov#6638)

* server: cap n_predict if not set to n_ctx_train

* server: fix infinite loop

* server: infinite loop, move in process_token
server: infinite loop: set stop limit to true

* minor: spaces

* minor: spaces

* server: include prompt tokens in the EOS limit
  • Loading branch information
phymbert authored Apr 26, 2024
1 parent 9e4e077 commit 7f5ff55
Showing 1 changed file with 22 additions and 1 deletion.
23 changes: 22 additions & 1 deletion examples/server/server.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1207,6 +1207,27 @@ struct server_context {
LOG_VERBOSE("eos token found", {});
}

auto n_ctx_train = llama_n_ctx_train(model);
if (slot.params.n_predict < 1 && slot.ga_n == 1
&& slot.n_prompt_tokens + slot.n_decoded >= n_ctx_train) {
LOG_WARNING("n_predict is not set and self-context extend is disabled."
" Limiting generated tokens to n_ctx_train to avoid EOS-less generation infinite loop", {
{ "id_slot", slot.id },
{ "params.n_predict", slot.params.n_predict },
{ "slot.n_prompt_tokens", slot.n_prompt_tokens },
{ "slot.n_decoded", slot.n_decoded },
{ "slot.n_predict", slot.n_predict },
{ "n_slots", params.n_parallel },
{ "slot.n_ctx", slot.n_ctx },
{ "n_ctx", n_ctx },
{ "n_ctx_train", n_ctx_train },
{ "ga_n", slot.ga_n },
});
slot.truncated = true;
slot.stopped_limit = true;
slot.has_next_token = false; // stop prediction
}

LOG_VERBOSE("next token", {
{"id_slot", slot.id},
{"id_task", slot.id_task},
Expand Down Expand Up @@ -2141,7 +2162,7 @@ struct server_context {
});

// process the created batch of tokens
for (int32_t i = 0; i < (int32_t) batch.n_tokens; i += n_batch) {
for (int32_t i = 0; i < batch.n_tokens; i += n_batch) {
const int32_t n_tokens = std::min(n_batch, batch.n_tokens - i);

for (auto & slot : slots) {
Expand Down

0 comments on commit 7f5ff55

Please sign in to comment.