From ee1b497c985f61d6ec519c39fcfed78a3c6f1d06 Mon Sep 17 00:00:00 2001 From: eric8607242 Date: Sat, 29 Jul 2023 02:10:05 +0800 Subject: [PATCH] llama : support more diverse tokenizers? (#2420) * supporting more diverse tokenizers * Update llama.cpp --------- Co-authored-by: Georgi Gerganov --- llama.cpp | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/llama.cpp b/llama.cpp index a448977..a35c690 100644 --- a/llama.cpp +++ b/llama.cpp @@ -1924,7 +1924,9 @@ struct llama_tokenizer { if (token == vocab_.token_to_id.end()) { // output any symbols that did not form tokens as bytes. for (int j = 0; j < (int) symbol.n; ++j) { - llama_vocab::id token_id = static_cast(symbol.text[j]) + 3; + // NOTE: old version, before #2420 - not sure what are the implications of this + //llama_vocab::id token_id = static_cast(symbol.text[j]) + 3; + llama_vocab::id token_id = vocab_.token_to_id.at(std::string(1, symbol.text[j])); output.push_back(token_id); } } else {