Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

llama : fix tokenizer #2315

Closed
wants to merge 27 commits into from
Closed
Show file tree
Hide file tree
Changes from 15 commits
Commits
Show all changes
27 commits
Select commit Hold shift + click to select a range
ac793a2
Fix for #2023
goerch Jul 21, 2023
8c9d1e7
Fix typo
goerch Jul 21, 2023
9f055e3
Add missing include
goerch Jul 22, 2023
bf665cc
Replace VLA with std::vector
goerch Jul 22, 2023
c8ae817
Add possibly missing typename
goerch Jul 22, 2023
94a0ee1
More testing of the tokenizer
goerch Jul 22, 2023
0e74a72
Added whitespace escaping and unescaping
goerch Jul 22, 2023
e6b1a50
Fix for #2310
goerch Jul 23, 2023
dba8369
One more test case...
goerch Jul 23, 2023
b97a505
Fix C linkage for llama_token_to_str
goerch Jul 24, 2023
81fae1d
Fixing llama_token_to_str for the different sentence_piece token types
goerch Jul 24, 2023
281a4b4
Fixing tests
goerch Jul 24, 2023
a0d28b2
Remove comment
goerch Jul 24, 2023
39c9a3b
Added test cases
goerch Jul 24, 2023
fe7508c
Fix review remarks.
goerch Jul 24, 2023
8253a53
Fix test
goerch Jul 24, 2023
e68580f
Remove llama.cpp.h
goerch Jul 25, 2023
3bdf106
Merge branch 'master' into fix-2023
goerch Jul 25, 2023
b4a5461
Resolve merge conflict with grammar stuff.
goerch Jul 25, 2023
de41d5e
Fix static declarations
goerch Jul 26, 2023
30a0e4c
Fixing function ordering issue
goerch Aug 6, 2023
1b54429
Fix tokenizer regression in convert.py and improve CPP interface for …
goerch Aug 6, 2023
19e950f
Adding support for Aquila (GPT2?) tokenizer.
goerch Aug 6, 2023
bb6a58d
Simplifying an expression.
goerch Aug 6, 2023
5d52192
Remove inactive code.
goerch Aug 6, 2023
38fbb74
Merge branch 'master' into fix-2023
goerch Aug 7, 2023
f1f85de
Split BPE and SentencePiece vocabularies
goerch Aug 8, 2023
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
15 changes: 2 additions & 13 deletions convert.py
Original file line number Diff line number Diff line change
Expand Up @@ -231,19 +231,8 @@ def __init__(self, fname_tokenizer: Path, fname_added_tokens: Optional[Path]) ->
def sentencepiece_tokens(self) -> Iterable[Tuple[bytes, float]]:
tokenizer = self.sentencepiece_tokenizer
for i in range(tokenizer.vocab_size()):
text: bytes
if tokenizer.is_unknown(i):
text = " \u2047 ".encode("utf-8")
elif tokenizer.is_control(i):
text = b""
elif tokenizer.is_byte(i):
piece = tokenizer.id_to_piece(i)
if len(piece) != 6:
raise Exception(f"Invalid token: {piece}")
byte_value = int(piece[3:-1], 16)
text = struct.pack("B", byte_value)
else:
text = tokenizer.id_to_piece(i).replace("\u2581", " ").encode("utf-8")
piece = tokenizer.id_to_piece(i)
text: bytes = piece.encode("utf-8")
score: float = tokenizer.get_score(i)
yield text, score

Expand Down
2 changes: 1 addition & 1 deletion examples/common.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -564,7 +564,7 @@ std::string gpt_random_prompt(std::mt19937 & rng) {
// TODO: not great allocating this every time
std::vector<llama_token> llama_tokenize(struct llama_context * ctx, const std::string & text, bool add_bos) {
// initialize to prompt numer of chars, since n_tokens <= n_prompt_chars
std::vector<llama_token> res(text.size() + (int) add_bos);
std::vector<llama_token> res(text.size() + (int) add_bos + 1);
const int n = llama_tokenize(ctx, text.c_str(), res.data(), res.size(), add_bos);
assert(n >= 0);
res.resize(n);
Expand Down
1 change: 1 addition & 0 deletions examples/common.h
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
#pragma once

#include "llama.h"
#include "llama.cpp.h"

#include <string>
#include <vector>
Expand Down
2 changes: 1 addition & 1 deletion examples/embedding/embedding.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -67,7 +67,7 @@ int main(int argc, char ** argv) {
fprintf(stderr, "%s: prompt: '%s'\n", __func__, params.prompt.c_str());
fprintf(stderr, "%s: number of tokens in prompt = %zu\n", __func__, embd_inp.size());
for (int i = 0; i < (int) embd_inp.size(); i++) {
fprintf(stderr, "%6d -> '%s'\n", embd_inp[i], llama_token_to_str(ctx, embd_inp[i]));
fprintf(stderr, "%6d -> '%s'\n", embd_inp[i], llama_token_to_str(ctx, embd_inp[i]).c_str());
}
fprintf(stderr, "\n");
}
Expand Down
12 changes: 4 additions & 8 deletions examples/main/main.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -196,10 +196,6 @@ int main(int argc, char ** argv) {

// tokenize the prompt
std::vector<llama_token> embd_inp;

// Add a space in front of the first character to match OG llama tokenizer behavior
params.prompt.insert(0, 1, ' ');

if (params.interactive_first || params.instruct || !params.prompt.empty() || session_tokens.empty()) {
embd_inp = ::llama_tokenize(ctx, params.prompt, true);
} else {
Expand Down Expand Up @@ -283,22 +279,22 @@ int main(int argc, char ** argv) {
fprintf(stderr, "%s: prompt: '%s'\n", __func__, params.prompt.c_str());
fprintf(stderr, "%s: number of tokens in prompt = %zu\n", __func__, embd_inp.size());
for (int i = 0; i < (int) embd_inp.size(); i++) {
fprintf(stderr, "%6d -> '%s'\n", embd_inp[i], llama_token_to_str(ctx, embd_inp[i]));
fprintf(stderr, "%6d -> '%s'\n", embd_inp[i], llama_token_to_str(ctx, embd_inp[i]).c_str());
}

if (ctx_guidance) {
fprintf(stderr, "\n");
fprintf(stderr, "%s: negative prompt: '%s'\n", __func__, params.cfg_negative_prompt.c_str());
fprintf(stderr, "%s: number of tokens in negative prompt = %zu\n", __func__, guidance_inp.size());
for (int i = 0; i < (int) guidance_inp.size(); i++) {
fprintf(stderr, "%6d -> '%s'\n", guidance_inp[i], llama_token_to_str(ctx, guidance_inp[i]));
fprintf(stderr, "%6d -> '%s'\n", guidance_inp[i], llama_token_to_str(ctx, guidance_inp[i]).c_str());
}
}

if (params.n_keep > 0) {
fprintf(stderr, "%s: static prompt based on n_keep: '", __func__);
for (int i = 0; i < params.n_keep; i++) {
fprintf(stderr, "%s", llama_token_to_str(ctx, embd_inp[i]));
fprintf(stderr, "%s", llama_token_to_str(ctx, embd_inp[i]).c_str());
}
fprintf(stderr, "'\n");
}
Expand Down Expand Up @@ -636,7 +632,7 @@ int main(int argc, char ** argv) {
// display text
if (input_echo) {
for (auto id : embd) {
printf("%s", llama_token_to_str(ctx, id));
printf("%s", llama_token_to_str(ctx, id).c_str());
}
fflush(stdout);
}
Expand Down
4 changes: 2 additions & 2 deletions examples/save-load-state/save-load-state.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -91,7 +91,7 @@ int main(int argc, char ** argv) {
auto next_token_str = llama_token_to_str(ctx, next_token);
last_n_tokens_data.push_back(next_token);

printf("%s", next_token_str);
printf("%s", next_token_str.c_str());
if (llama_eval(ctx, &next_token, 1, n_past, params.n_threads)) {
fprintf(stderr, "\n%s : failed to evaluate\n", __func__);
llama_free(ctx);
Expand Down Expand Up @@ -151,7 +151,7 @@ int main(int argc, char ** argv) {
auto next_token_str = llama_token_to_str(ctx2, next_token);
last_n_tokens_data.push_back(next_token);

printf("%s", next_token_str);
printf("%s", next_token_str.c_str());
if (llama_eval(ctx2, &next_token, 1, n_past, params.n_threads)) {
fprintf(stderr, "\n%s : failed to evaluate\n", __func__);
llama_free(ctx2);
Expand Down
4 changes: 2 additions & 2 deletions examples/simple/simple.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -102,7 +102,7 @@ int main(int argc, char ** argv)

for( auto id : tokens_list )
{
printf( "%s" , llama_token_to_str( ctx , id ) );
printf( "%s" , llama_token_to_str( ctx , id ).c_str() );
}

fflush(stdout);
Expand Down Expand Up @@ -162,7 +162,7 @@ int main(int argc, char ** argv)
}

// Print the new token :
printf( "%s" , llama_token_to_str( ctx , new_token_id ) );
printf( "%s" , llama_token_to_str( ctx , new_token_id ).c_str() );
fflush( stdout );

// Push this new token for next evaluation :
Expand Down
11 changes: 6 additions & 5 deletions examples/train-text-from-scratch/train-text-from-scratch.cpp
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
#include "ggml.h"
#include "common.h"
#include "llama.h"
#include <unordered_map>
#include <vector>
Expand Down Expand Up @@ -1959,7 +1960,7 @@ void print_matrix(struct ggml_tensor * probs) {


void print_token(struct llama_context * ctx, llama_token token) {
printf("%s", llama_token_to_str(ctx, token));
printf("%s", llama_token_to_str(ctx, token).c_str());
}

void print_tokens(struct llama_context* ctx, struct ggml_tensor * tokens) {
Expand Down Expand Up @@ -2198,17 +2199,17 @@ int tokenize_file(struct llama_context * lctx, const char * filename, std::vecto
const char * in = buf.data();
const char * end = buf.data() + buf.size();
for (int i = 0; i < (int) out.size(); ++i) {
const char * s = llama_token_to_str(lctx, out[i]);
int len = strlen(s);
std::string s = llama_token_to_str(lctx, out[i]);
int len = s.length();
if (in >= end) {
printf("%s: unexpected end of original text.\n", __func__);
break;
}
const bool matches = (strncmp(in, s, len) == 0);
const bool matches = (strncmp(in, s.c_str(), len) == 0);
if (matches) {
in += len;
} else {
printf("%s: mismatch: expected '%s', but got '%s'\n", __func__, std::string(in, len).c_str(), s);
printf("%s: mismatch: expected '%s', but got '%s'\n", __func__, std::string(in, len).c_str(), s.c_str());
}
}
}
Expand Down
Loading