mirror of
https://git.adityakumar.xyz/llama.cpp.git
synced 2024-11-14 00:59:43 +00:00
16ffc013c6
* [WIP, broken] Importer for GPTQ quantized LLaMA models Based on: https://github.com/qwopqwop200/GPTQ-for-LLaMa Current status: Something is busted. The output starts out decent, but quickly degrades into gibberish. This doesn't happen with either the original GPTQ-for-LLaMa using the same weights, or llama.cpp when using weights quantized by its own quantizer. Is there a bug in the conversion script that somehow only comes into play with a large context size? I did notice one potential issue. It's clearly not the main cause of the gibberish, since it doesn't happen when using q4_1 weights quantized by llama.cpp itself, but it seems concerning. When doing a matrix multiplication of f16 * f32 => f32 or q4_1 * f32 => f32, at least when the multiplication is not done with BLAS, the intermediate results are stored in the smaller format rather than f32. This seems like an unnecessary waste of precision, especially in the q4_1 case. I was originally hoping to validate the results by matching the Python implementation's output exactly, but precision and non-associativity issues make this very difficult, including when performing matrix multiplications and, especially, computing norms. Anyway, design details: The models being imported store per-layer weights in essentially q4_1 format, although the addend and scale are shared across an entire row rather than every group of 32 weights. This script duplicates the addend and scale to match ggml's expectations, at the cost of wasting some memory. However, there are two differences which I accommodated changing the output format (and adding corresponding support to main.cpp) rather than having the script match the existing one: - The tok_embeddings and output weights (i.e. the weights that aren't per-layer) are f16 instead of q4_1. They could be converted to q4_1, and the impact of the loss of precision would probably be low, but this would rule out exactly matching the Python implementation's output for validation. - There is no sharding, since the input doesn't have it, and for a CPU-only implementation it seems more useful to avoid having to deal with multiple files. The new format is differentiated from existing q4_1 format by changing the 'f16' header flag to a new value, 4. That said, I think a cleaner approach would be to change main.cpp to support loading each tensor with an arbitrary sharding configuration and type rather than hardcoding specific combinations of types. So far I've wasted too much time debugging to try implementing this... * Add missing permutation. Now it works. --------- Co-authored-by: Georgi Gerganov <ggerganov@gmail.com>
1226 lines
45 KiB
C++
1226 lines
45 KiB
C++
#include "ggml.h"
|
|
|
|
#include "utils.h"
|
|
|
|
#include <cassert>
|
|
#include <cinttypes>
|
|
#include <cmath>
|
|
#include <cstdio>
|
|
#include <cstring>
|
|
#include <fstream>
|
|
#include <iostream>
|
|
#include <map>
|
|
#include <string>
|
|
#include <vector>
|
|
|
|
#if defined (__unix__) || (defined (__APPLE__) && defined (__MACH__))
|
|
#include <signal.h>
|
|
#include <unistd.h>
|
|
#elif defined (_WIN32)
|
|
#include <signal.h>
|
|
#endif
|
|
|
|
#if defined (_WIN32)
|
|
#pragma comment(lib,"kernel32.lib")
|
|
extern "C" __declspec(dllimport) void* __stdcall GetStdHandle(unsigned long nStdHandle);
|
|
extern "C" __declspec(dllimport) int __stdcall GetConsoleMode(void* hConsoleHandle, unsigned long* lpMode);
|
|
extern "C" __declspec(dllimport) int __stdcall SetConsoleMode(void* hConsoleHandle, unsigned long dwMode);
|
|
#endif
|
|
|
|
#define ANSI_COLOR_RED "\x1b[31m"
|
|
#define ANSI_COLOR_GREEN "\x1b[32m"
|
|
#define ANSI_COLOR_YELLOW "\x1b[33m"
|
|
#define ANSI_COLOR_BLUE "\x1b[34m"
|
|
#define ANSI_COLOR_MAGENTA "\x1b[35m"
|
|
#define ANSI_COLOR_CYAN "\x1b[36m"
|
|
#define ANSI_COLOR_RESET "\x1b[0m"
|
|
#define ANSI_BOLD "\x1b[1m"
|
|
|
|
static const int EOS_TOKEN_ID = 2;
|
|
|
|
// determine number of model parts based on the dimension
|
|
static const std::map<int, int> LLAMA_N_PARTS = {
|
|
{ 4096, 1 },
|
|
{ 5120, 2 },
|
|
{ 6656, 4 },
|
|
{ 8192, 8 },
|
|
};
|
|
|
|
// default hparams (LLaMA 7B)
|
|
struct llama_hparams {
|
|
int32_t n_vocab = 32000;
|
|
int32_t n_ctx = 512; // this is provided as user input?
|
|
int32_t n_embd = 4096;
|
|
int32_t n_mult = 256;
|
|
int32_t n_head = 32;
|
|
int32_t n_layer = 32;
|
|
int32_t n_rot = 64;
|
|
int32_t f16 = 1;
|
|
};
|
|
|
|
struct llama_layer {
|
|
// normalization
|
|
struct ggml_tensor * attention_norm;
|
|
|
|
// attention
|
|
struct ggml_tensor * wq;
|
|
struct ggml_tensor * wk;
|
|
struct ggml_tensor * wv;
|
|
struct ggml_tensor * wo;
|
|
|
|
// normalization
|
|
struct ggml_tensor * ffn_norm;
|
|
|
|
// ff
|
|
struct ggml_tensor * w1;
|
|
struct ggml_tensor * w2;
|
|
struct ggml_tensor * w3;
|
|
};
|
|
|
|
struct llama_model {
|
|
llama_hparams hparams;
|
|
|
|
struct ggml_tensor * tok_embeddings;
|
|
|
|
struct ggml_tensor * norm;
|
|
struct ggml_tensor * output;
|
|
|
|
std::vector<llama_layer> layers;
|
|
|
|
// key + value memory
|
|
struct ggml_tensor * memory_k;
|
|
struct ggml_tensor * memory_v;
|
|
|
|
//
|
|
struct ggml_context * ctx;
|
|
std::map<std::string, struct ggml_tensor *> tensors;
|
|
};
|
|
|
|
// load the model's weights from a file
|
|
|
|
bool llama_model_load(const std::string & fname, llama_model & model, llama_vocab & vocab, int n_ctx, int n_parts, ggml_type memory_type = GGML_TYPE_F32) {
|
|
fprintf(stderr, "%s: loading model from '%s' - please wait ...\n", __func__, fname.c_str());
|
|
|
|
std::vector<char> f_buf(1024*1024);
|
|
|
|
auto fin = std::ifstream(fname, std::ios::binary);
|
|
fin.rdbuf()->pubsetbuf(f_buf.data(), f_buf.size());
|
|
if (!fin) {
|
|
fprintf(stderr, "%s: failed to open '%s'\n", __func__, fname.c_str());
|
|
return false;
|
|
}
|
|
|
|
// verify magic
|
|
{
|
|
uint32_t magic;
|
|
fin.read((char *) &magic, sizeof(magic));
|
|
if (magic == FILE_MAGIC_UNVERSIONED) {
|
|
fprintf(stderr, "%s: invalid model file '%s' (too old, regenerate your model files!)\n",
|
|
__func__, fname.c_str());
|
|
return false;
|
|
}
|
|
if (magic != FILE_MAGIC) {
|
|
fprintf(stderr, "%s: invalid model file '%s' (bad magic)\n", __func__, fname.c_str());
|
|
return false;
|
|
}
|
|
|
|
uint32_t format_version;
|
|
fin.read((char *) &format_version, sizeof(format_version));
|
|
|
|
if (format_version != FILE_VERSION) {
|
|
fprintf(stderr, "%s: invalid model file '%s' (unsupported format version %" PRIu32 ", expected %d)\n",
|
|
__func__, fname.c_str(), format_version, FILE_VERSION);
|
|
return false;
|
|
}
|
|
}
|
|
|
|
int n_ff = 0;
|
|
|
|
// load hparams
|
|
{
|
|
auto & hparams = model.hparams;
|
|
|
|
fin.read((char *) &hparams.n_vocab, sizeof(hparams.n_vocab));
|
|
//fin.read((char *) &hparams.n_ctx, sizeof(hparams.n_ctx));
|
|
fin.read((char *) &hparams.n_embd, sizeof(hparams.n_embd));
|
|
fin.read((char *) &hparams.n_mult, sizeof(hparams.n_mult));
|
|
fin.read((char *) &hparams.n_head, sizeof(hparams.n_head));
|
|
fin.read((char *) &hparams.n_layer, sizeof(hparams.n_layer));
|
|
fin.read((char *) &hparams.n_rot, sizeof(hparams.n_rot));
|
|
fin.read((char *) &hparams.f16, sizeof(hparams.f16));
|
|
|
|
hparams.n_ctx = n_ctx;
|
|
|
|
n_ff = ((2*(4*hparams.n_embd)/3 + hparams.n_mult - 1)/hparams.n_mult)*hparams.n_mult;
|
|
|
|
if (n_parts < 1) {
|
|
n_parts = LLAMA_N_PARTS.at(hparams.n_embd);
|
|
}
|
|
|
|
// temp warning to tell the user to use "--n_parts"
|
|
if (hparams.f16 == 4 && n_parts != 1) {
|
|
fprintf(stderr, "%s: GPTQ model detected - are you sure n_parts should be %d? we normally expect it to be 1\n", __func__, n_parts);
|
|
fprintf(stderr, "%s: use '--n_parts 1' if necessary\n", __func__);
|
|
}
|
|
|
|
fprintf(stderr, "%s: n_vocab = %d\n", __func__, hparams.n_vocab);
|
|
fprintf(stderr, "%s: n_ctx = %d\n", __func__, hparams.n_ctx);
|
|
fprintf(stderr, "%s: n_embd = %d\n", __func__, hparams.n_embd);
|
|
fprintf(stderr, "%s: n_mult = %d\n", __func__, hparams.n_mult);
|
|
fprintf(stderr, "%s: n_head = %d\n", __func__, hparams.n_head);
|
|
fprintf(stderr, "%s: n_layer = %d\n", __func__, hparams.n_layer);
|
|
fprintf(stderr, "%s: n_rot = %d\n", __func__, hparams.n_rot);
|
|
fprintf(stderr, "%s: f16 = %d\n", __func__, hparams.f16);
|
|
fprintf(stderr, "%s: n_ff = %d\n", __func__, n_ff);
|
|
fprintf(stderr, "%s: n_parts = %d\n", __func__, n_parts);
|
|
}
|
|
|
|
// load vocab
|
|
{
|
|
std::string word;
|
|
std::vector<char> tmp(64);
|
|
|
|
for (int i = 0; i < model.hparams.n_vocab; i++) {
|
|
uint32_t len;
|
|
fin.read((char *) &len, sizeof(len));
|
|
|
|
word.resize(len);
|
|
if (len > 0) {
|
|
tmp.resize(len);
|
|
fin.read(tmp.data(), len);
|
|
word.assign(tmp.data(), len);
|
|
} else {
|
|
word.clear();
|
|
}
|
|
|
|
float score;
|
|
fin.read((char *) &score, sizeof(score));
|
|
|
|
vocab.token_to_id[word] = i;
|
|
vocab.id_to_token[i] = word;
|
|
vocab.score[i] = score;
|
|
}
|
|
}
|
|
|
|
// for the big tensors, we have the option to store the data in 16-bit floats or quantized
|
|
// in order to save memory and also to speed up the computation
|
|
// wtype is for per-layer weights, while vtype is for other weights
|
|
ggml_type wtype, vtype;
|
|
switch (model.hparams.f16) {
|
|
case 0: wtype = vtype = GGML_TYPE_F32; break;
|
|
case 1: wtype = vtype = GGML_TYPE_F16; break;
|
|
case 2: wtype = vtype = GGML_TYPE_Q4_0; break;
|
|
case 3: wtype = vtype = GGML_TYPE_Q4_1; break;
|
|
case 4: wtype = GGML_TYPE_Q4_1; vtype = GGML_TYPE_F16; break;
|
|
default:
|
|
{
|
|
fprintf(stderr, "%s: invalid model file '%s' (bad f16 value %d)\n",
|
|
__func__, fname.c_str(), model.hparams.f16);
|
|
return false;
|
|
}
|
|
}
|
|
|
|
auto & ctx = model.ctx;
|
|
|
|
size_t ctx_size = 0;
|
|
|
|
{
|
|
const auto & hparams = model.hparams;
|
|
|
|
const int n_embd = hparams.n_embd;
|
|
const int n_layer = hparams.n_layer;
|
|
const int n_ctx = hparams.n_ctx;
|
|
const int n_vocab = hparams.n_vocab;
|
|
|
|
ctx_size += n_embd*n_vocab*ggml_type_sizef(vtype); // tok_embeddings
|
|
|
|
ctx_size += n_embd*ggml_type_sizef(GGML_TYPE_F32); // norm
|
|
|
|
ctx_size += n_embd*n_vocab*ggml_type_sizef(vtype); // output
|
|
|
|
ctx_size += n_layer*(n_embd*ggml_type_sizef(GGML_TYPE_F32)); // attention_norm
|
|
|
|
ctx_size += n_layer*(n_embd*n_embd*ggml_type_sizef(wtype)); // wq
|
|
ctx_size += n_layer*(n_embd*n_embd*ggml_type_sizef(wtype)); // wk
|
|
ctx_size += n_layer*(n_embd*n_embd*ggml_type_sizef(wtype)); // wv
|
|
ctx_size += n_layer*(n_embd*n_embd*ggml_type_sizef(wtype)); // wo
|
|
|
|
ctx_size += n_layer*(n_embd*ggml_type_sizef(GGML_TYPE_F32)); // ffn_norm
|
|
|
|
ctx_size += n_layer*(n_ff*n_embd*ggml_type_sizef(wtype)); // w1
|
|
ctx_size += n_layer*(n_ff*n_embd*ggml_type_sizef(wtype)); // w2
|
|
ctx_size += n_layer*(n_ff*n_embd*ggml_type_sizef(wtype)); // w3
|
|
|
|
ctx_size += n_ctx*n_layer*n_embd*ggml_type_sizef(memory_type); // memory_k
|
|
ctx_size += n_ctx*n_layer*n_embd*ggml_type_sizef(memory_type); // memory_v
|
|
|
|
ctx_size += (5 + 10*n_layer)*256; // object overhead
|
|
|
|
fprintf(stderr, "%s: ggml ctx size = %6.2f MB\n", __func__, ctx_size/(1024.0*1024.0));
|
|
}
|
|
|
|
// create the ggml context
|
|
{
|
|
struct ggml_init_params params = {
|
|
/*.mem_size =*/ ctx_size,
|
|
/*.mem_buffer =*/ NULL,
|
|
};
|
|
|
|
model.ctx = ggml_init(params);
|
|
if (!model.ctx) {
|
|
fprintf(stderr, "%s: ggml_init() failed\n", __func__);
|
|
return false;
|
|
}
|
|
}
|
|
|
|
// prepare memory for the weights
|
|
{
|
|
const auto & hparams = model.hparams;
|
|
|
|
const int n_embd = hparams.n_embd;
|
|
const int n_layer = hparams.n_layer;
|
|
const int n_vocab = hparams.n_vocab;
|
|
|
|
model.layers.resize(n_layer);
|
|
|
|
model.tok_embeddings = ggml_new_tensor_2d(ctx, vtype, n_embd, n_vocab);
|
|
|
|
model.norm = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd);
|
|
model.output = ggml_new_tensor_2d(ctx, vtype, n_embd, n_vocab);
|
|
|
|
// map by name
|
|
model.tensors["tok_embeddings.weight"] = model.tok_embeddings;
|
|
|
|
model.tensors["norm.weight"] = model.norm;
|
|
model.tensors["output.weight"] = model.output;
|
|
|
|
for (int i = 0; i < n_layer; ++i) {
|
|
auto & layer = model.layers[i];
|
|
|
|
layer.attention_norm = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd);
|
|
|
|
layer.wq = ggml_new_tensor_2d(ctx, wtype, n_embd, n_embd);
|
|
layer.wk = ggml_new_tensor_2d(ctx, wtype, n_embd, n_embd);
|
|
layer.wv = ggml_new_tensor_2d(ctx, wtype, n_embd, n_embd);
|
|
layer.wo = ggml_new_tensor_2d(ctx, wtype, n_embd, n_embd);
|
|
|
|
layer.ffn_norm = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd);
|
|
|
|
layer.w1 = ggml_new_tensor_2d(ctx, wtype, n_embd, n_ff);
|
|
layer.w2 = ggml_new_tensor_2d(ctx, wtype, n_ff, n_embd);
|
|
layer.w3 = ggml_new_tensor_2d(ctx, wtype, n_embd, n_ff);
|
|
|
|
// map by name
|
|
model.tensors["layers." + std::to_string(i) + ".attention_norm.weight"] = layer.attention_norm;
|
|
|
|
model.tensors["layers." + std::to_string(i) + ".attention.wq.weight"] = layer.wq;
|
|
model.tensors["layers." + std::to_string(i) + ".attention.wk.weight"] = layer.wk;
|
|
model.tensors["layers." + std::to_string(i) + ".attention.wv.weight"] = layer.wv;
|
|
model.tensors["layers." + std::to_string(i) + ".attention.wo.weight"] = layer.wo;
|
|
|
|
model.tensors["layers." + std::to_string(i) + ".ffn_norm.weight"] = layer.ffn_norm;
|
|
|
|
model.tensors["layers." + std::to_string(i) + ".feed_forward.w1.weight"] = layer.w1;
|
|
model.tensors["layers." + std::to_string(i) + ".feed_forward.w2.weight"] = layer.w2;
|
|
model.tensors["layers." + std::to_string(i) + ".feed_forward.w3.weight"] = layer.w3;
|
|
}
|
|
}
|
|
|
|
// key + value memory
|
|
{
|
|
const auto & hparams = model.hparams;
|
|
|
|
const int n_embd = hparams.n_embd;
|
|
const int n_layer = hparams.n_layer;
|
|
const int n_ctx = hparams.n_ctx;
|
|
|
|
const int n_mem = n_layer*n_ctx;
|
|
const int n_elements = n_embd*n_mem;
|
|
|
|
model.memory_k = ggml_new_tensor_1d(ctx, memory_type, n_elements);
|
|
model.memory_v = ggml_new_tensor_1d(ctx, memory_type, n_elements);
|
|
|
|
const size_t memory_size = ggml_nbytes(model.memory_k) + ggml_nbytes(model.memory_v);
|
|
|
|
fprintf(stderr, "%s: memory_size = %8.2f MB, n_mem = %d\n", __func__, memory_size/1024.0/1024.0, n_mem);
|
|
}
|
|
|
|
const size_t file_offset = fin.tellg();
|
|
|
|
fin.close();
|
|
|
|
std::vector<uint8_t> tmp;
|
|
|
|
for (int i = 0; i < n_parts; ++i) {
|
|
const int part_id = i;
|
|
//const int part_id = n_parts - i - 1;
|
|
|
|
std::string fname_part = fname;
|
|
if (i > 0) {
|
|
fname_part += "." + std::to_string(i);
|
|
}
|
|
|
|
fprintf(stderr, "%s: loading model part %d/%d from '%s'\n", __func__, i+1, n_parts, fname_part.c_str());
|
|
|
|
fin = std::ifstream(fname_part, std::ios::binary);
|
|
fin.rdbuf()->pubsetbuf(f_buf.data(), f_buf.size());
|
|
fin.seekg(file_offset);
|
|
|
|
// load weights
|
|
{
|
|
int n_tensors = 0;
|
|
size_t total_size = 0;
|
|
|
|
fprintf(stderr, "%s: ", __func__);
|
|
|
|
while (true) {
|
|
int32_t n_dims;
|
|
int32_t length;
|
|
int32_t ftype;
|
|
|
|
fin.read(reinterpret_cast<char *>(&n_dims), sizeof(n_dims));
|
|
fin.read(reinterpret_cast<char *>(&length), sizeof(length));
|
|
fin.read(reinterpret_cast<char *>(&ftype), sizeof(ftype));
|
|
|
|
if (fin.eof()) {
|
|
break;
|
|
}
|
|
|
|
int32_t nelements = 1;
|
|
int32_t ne[2] = { 1, 1 };
|
|
for (int i = 0; i < n_dims; ++i) {
|
|
fin.read(reinterpret_cast<char *>(&ne[i]), sizeof(ne[i]));
|
|
nelements *= ne[i];
|
|
}
|
|
|
|
std::string name(length, 0);
|
|
fin.read(&name[0], length);
|
|
|
|
if (model.tensors.find(name.data()) == model.tensors.end()) {
|
|
fprintf(stderr, "%s: unknown tensor '%s' in model file\n", __func__, name.data());
|
|
return false;
|
|
}
|
|
|
|
// split_type = 0: split by columns
|
|
// split_type = 1: split by rows
|
|
int split_type = 0;
|
|
|
|
// split_type = 0:
|
|
// regex:
|
|
// - tok_embeddings.*
|
|
// - layers.*.attention.wo.weight
|
|
// - layers.*.feed_forward.w2.weight
|
|
|
|
// split_type = 1:
|
|
// regex:
|
|
// - output.*
|
|
// - layers.*.attention.wq.weight
|
|
// - layers.*.attention.wk.weight
|
|
// - layers.*.attention.wv.weight
|
|
// - layers.*.feed_forward.w1.weight
|
|
// - layers.*.feed_forward.w3.weight
|
|
if (name.find("tok_embeddings") != std::string::npos) {
|
|
split_type = 0;
|
|
} else if (name.find("layers") != std::string::npos) {
|
|
if (name.find("attention.wo.weight") != std::string::npos) {
|
|
split_type = 0;
|
|
} else if (name.find("feed_forward.w2.weight") != std::string::npos) {
|
|
split_type = 0;
|
|
} else {
|
|
split_type = 1;
|
|
}
|
|
} else if (name.find("output") != std::string::npos) {
|
|
split_type = 1;
|
|
}
|
|
|
|
auto tensor = model.tensors[name.data()];
|
|
|
|
if (n_dims == 1) {
|
|
if (ggml_nelements(tensor) != nelements) {
|
|
fprintf(stderr, "%s: tensor '%s' has wrong size in model file\n", __func__, name.data());
|
|
return false;
|
|
}
|
|
} else {
|
|
if (ggml_nelements(tensor)/n_parts != nelements) {
|
|
fprintf(stderr, "%s: tensor '%s' has wrong size in model file\n", __func__, name.data());
|
|
return false;
|
|
}
|
|
}
|
|
|
|
if (n_dims == 1) {
|
|
if (tensor->ne[0] != ne[0] || tensor->ne[1] != ne[1]) {
|
|
fprintf(stderr, "%s: tensor '%s' has wrong shape in model file: got [%d, %d], expected [%d, %d]\n",
|
|
__func__, name.data(), tensor->ne[0], tensor->ne[1], ne[0], ne[1]);
|
|
return false;
|
|
}
|
|
} else {
|
|
if (split_type == 0) {
|
|
if (tensor->ne[0]/n_parts != ne[0] || tensor->ne[1] != ne[1]) {
|
|
fprintf(stderr, "%s: tensor '%s' has wrong shape in model file: got [%d, %d], expected [%d, %d]\n",
|
|
__func__, name.data(), tensor->ne[0]/n_parts, tensor->ne[1], ne[0], ne[1]);
|
|
return false;
|
|
}
|
|
} else {
|
|
if (tensor->ne[0] != ne[0] || tensor->ne[1]/n_parts != ne[1]) {
|
|
fprintf(stderr, "%s: tensor '%s' has wrong shape in model file: got [%d, %d], expected [%d, %d]\n",
|
|
__func__, name.data(), tensor->ne[0], tensor->ne[1]/n_parts, ne[0], ne[1]);
|
|
return false;
|
|
}
|
|
}
|
|
}
|
|
|
|
if (0) {
|
|
static const char * ftype_str[] = { "f32", "f16", "q4_0", "q4_1", };
|
|
fprintf(stderr, "%24s - [%5d, %5d], type = %6s, split = %d\n", name.data(), ne[0], ne[1], ftype_str[ftype], split_type);
|
|
}
|
|
|
|
size_t bpe = 0;
|
|
|
|
switch (ftype) {
|
|
case 0: bpe = ggml_type_size(GGML_TYPE_F32); break;
|
|
case 1: bpe = ggml_type_size(GGML_TYPE_F16); break;
|
|
case 2: bpe = ggml_type_size(GGML_TYPE_Q4_0); assert(ne[0] % 64 == 0); break;
|
|
case 3: bpe = ggml_type_size(GGML_TYPE_Q4_1); assert(ne[0] % 64 == 0); break;
|
|
default:
|
|
{
|
|
fprintf(stderr, "%s: unknown ftype %d in model file\n", __func__, ftype);
|
|
return false;
|
|
}
|
|
};
|
|
|
|
if (n_dims == 1 || n_parts == 1) {
|
|
if ((nelements*bpe)/ggml_blck_size(tensor->type) != ggml_nbytes(tensor)) {
|
|
fprintf(stderr, "%s: tensor '%s' has wrong size in model file: got %zu, expected %zu\n",
|
|
__func__, name.data(), ggml_nbytes(tensor), nelements*bpe);
|
|
return false;
|
|
}
|
|
|
|
if (part_id == 0) {
|
|
fin.read(reinterpret_cast<char *>(tensor->data), ggml_nbytes(tensor));
|
|
} else {
|
|
fin.seekg(ggml_nbytes(tensor), std::ios::cur);
|
|
}
|
|
|
|
total_size += ggml_nbytes(tensor);
|
|
} else {
|
|
if ((nelements*bpe)/ggml_blck_size(tensor->type) != ggml_nbytes(tensor)/n_parts) {
|
|
fprintf(stderr, "%s: tensor '%s' has wrong size in model file: got %zu, expected %zu\n",
|
|
__func__, name.data(), ggml_nbytes(tensor)/n_parts, nelements*bpe);
|
|
return false;
|
|
}
|
|
|
|
if (split_type == 0) {
|
|
const int np0 = ne[0];
|
|
|
|
const size_t row_size = (tensor->ne[0]/ggml_blck_size(tensor->type))*ggml_type_size(tensor->type);
|
|
assert(row_size == tensor->nb[1]);
|
|
|
|
for (int i1 = 0; i1 < ne[1]; ++i1) {
|
|
const size_t offset_row = i1*row_size;
|
|
const size_t offset = offset_row + ((part_id*np0)/ggml_blck_size(tensor->type))*ggml_type_size(tensor->type);
|
|
fin.read(reinterpret_cast<char *>(tensor->data) + offset, row_size/n_parts);
|
|
}
|
|
} else {
|
|
const int np1 = ne[1];
|
|
|
|
const size_t row_size = (tensor->ne[0]/ggml_blck_size(tensor->type))*ggml_type_size(tensor->type);
|
|
|
|
for (int i1 = 0; i1 < ne[1]; ++i1) {
|
|
const size_t offset_row = (i1 + part_id*np1)*row_size;
|
|
fin.read(reinterpret_cast<char *>(tensor->data) + offset_row, row_size);
|
|
}
|
|
}
|
|
|
|
total_size += ggml_nbytes(tensor)/n_parts;
|
|
}
|
|
|
|
//fprintf(stderr, "%42s - [%5d, %5d], type = %6s, %6.2f MB\n", name.data(), ne[0], ne[1], ftype == 0 ? "float" : "f16", ggml_nbytes(tensor)/1024.0/1024.0);
|
|
if (++n_tensors % 8 == 0) {
|
|
fprintf(stderr, ".");
|
|
fflush(stderr);
|
|
}
|
|
}
|
|
|
|
fprintf(stderr, " done\n");
|
|
|
|
fprintf(stderr, "%s: model size = %8.2f MB / num tensors = %d\n", __func__, total_size/1024.0/1024.0, n_tensors);
|
|
}
|
|
|
|
fin.close();
|
|
}
|
|
|
|
return true;
|
|
}
|
|
|
|
// evaluate the transformer
|
|
//
|
|
// - model: the model
|
|
// - n_threads: number of threads to use
|
|
// - n_past: the context size so far
|
|
// - embd_inp: the embeddings of the tokens in the context
|
|
// - embd_w: the predicted logits for the next token
|
|
//
|
|
// The GPT-J model requires about 16MB of memory per input token.
|
|
//
|
|
bool llama_eval(
|
|
const llama_model & model,
|
|
const int n_threads,
|
|
const int n_past,
|
|
const std::vector<llama_vocab::id> & embd_inp,
|
|
std::vector<float> & embd_w,
|
|
size_t & mem_per_token,
|
|
bool return_all_logits = false) {
|
|
const int N = embd_inp.size();
|
|
|
|
const auto & hparams = model.hparams;
|
|
|
|
const int n_embd = hparams.n_embd;
|
|
const int n_layer = hparams.n_layer;
|
|
const int n_ctx = hparams.n_ctx;
|
|
const int n_head = hparams.n_head;
|
|
const int n_vocab = hparams.n_vocab;
|
|
const int n_rot = hparams.n_embd/hparams.n_head;
|
|
|
|
// TODO: check if this size scales with n_ctx linearly and remove constant. somehow I feel it wasn't the case
|
|
// static size_t buf_size = hparams.n_ctx*1024*1024;
|
|
static size_t buf_size = 512u*1024*1024;
|
|
static void * buf = malloc(buf_size);
|
|
|
|
if (mem_per_token > 0 && mem_per_token*N > buf_size) {
|
|
const size_t buf_size_new = 1.3*(mem_per_token*N); // add 30% to account for ggml object overhead
|
|
//fprintf(stderr, "\n%s: reallocating buffer from %zu to %zu bytes\n", __func__, buf_size, buf_size_new);
|
|
|
|
// reallocate
|
|
buf_size = buf_size_new;
|
|
buf = realloc(buf, buf_size);
|
|
if (buf == nullptr) {
|
|
fprintf(stderr, "%s: failed to allocate %zu bytes\n", __func__, buf_size);
|
|
return false;
|
|
}
|
|
}
|
|
|
|
struct ggml_init_params params = {
|
|
/*.mem_size =*/ buf_size,
|
|
/*.mem_buffer =*/ buf,
|
|
};
|
|
|
|
struct ggml_context * ctx0 = ggml_init(params);
|
|
ggml_cgraph gf = {};
|
|
gf.n_threads = n_threads;
|
|
|
|
struct ggml_tensor * embd = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, N);
|
|
memcpy(embd->data, embd_inp.data(), N*ggml_element_size(embd));
|
|
|
|
struct ggml_tensor * inpL = ggml_get_rows(ctx0, model.tok_embeddings, embd);
|
|
|
|
for (int il = 0; il < n_layer; ++il) {
|
|
struct ggml_tensor * inpSA = inpL;
|
|
|
|
struct ggml_tensor * cur;
|
|
|
|
// norm
|
|
{
|
|
cur = ggml_rms_norm(ctx0, inpL);
|
|
|
|
// cur = attention_norm*cur
|
|
cur = ggml_mul(ctx0,
|
|
ggml_repeat(ctx0, model.layers[il].attention_norm, cur),
|
|
cur);
|
|
}
|
|
|
|
// self-attention
|
|
{
|
|
struct ggml_tensor * Qcur = ggml_mul_mat(ctx0, model.layers[il].wq, cur);
|
|
struct ggml_tensor * Kcur = ggml_mul_mat(ctx0, model.layers[il].wk, cur);
|
|
struct ggml_tensor * Vcur = ggml_mul_mat(ctx0, model.layers[il].wv, cur);
|
|
|
|
// store key and value to memory
|
|
if (N >= 1) {
|
|
struct ggml_tensor * k = ggml_view_1d(ctx0, model.memory_k, N*n_embd, (ggml_element_size(model.memory_k)*n_embd)*(il*n_ctx + n_past));
|
|
struct ggml_tensor * v = ggml_view_1d(ctx0, model.memory_v, N*n_embd, (ggml_element_size(model.memory_v)*n_embd)*(il*n_ctx + n_past));
|
|
|
|
ggml_build_forward_expand(&gf, ggml_cpy(ctx0, Kcur, k));
|
|
ggml_build_forward_expand(&gf, ggml_cpy(ctx0, Vcur, v));
|
|
}
|
|
|
|
// Q = Qcur.contiguous().view(n_embd/n_head, n_head, N).permute(0, 2, 1, 3)
|
|
struct ggml_tensor * Q =
|
|
ggml_permute(ctx0,
|
|
ggml_rope(ctx0,
|
|
ggml_cpy(ctx0,
|
|
Qcur,
|
|
ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, n_embd/n_head, n_head, N)),
|
|
n_past, n_rot, 0),
|
|
0, 2, 1, 3);
|
|
|
|
// K = Kmem.view(n_embd/n_head, n_head, n_past + N).permute(0, 2, 1, 3)
|
|
struct ggml_tensor * K =
|
|
ggml_permute(ctx0,
|
|
ggml_rope(ctx0,
|
|
ggml_reshape_3d(ctx0,
|
|
ggml_view_1d(ctx0, model.memory_k, (n_past + N)*n_embd, il*n_ctx*ggml_element_size(model.memory_k)*n_embd),
|
|
n_embd/n_head, n_head, n_past + N),
|
|
n_past, n_rot, 1),
|
|
0, 2, 1, 3);
|
|
|
|
// K * Q
|
|
struct ggml_tensor * KQ = ggml_mul_mat(ctx0, K, Q);
|
|
|
|
// KQ_scaled = KQ / sqrt(n_embd/n_head)
|
|
struct ggml_tensor * KQ_scaled =
|
|
ggml_scale(ctx0,
|
|
KQ,
|
|
ggml_new_f32(ctx0, 1.0f/sqrt(float(n_embd)/n_head))
|
|
);
|
|
|
|
// KQ_masked = mask_past(KQ_scaled)
|
|
struct ggml_tensor * KQ_masked = ggml_diag_mask_inf(ctx0, KQ_scaled, n_past);
|
|
|
|
// KQ = soft_max(KQ_masked)
|
|
struct ggml_tensor * KQ_soft_max = ggml_soft_max(ctx0, KQ_masked);
|
|
|
|
// V_trans = Vmem.view(n_embd/n_head, n_head, n_past + N).permute(1, 2, 0, 3).contiguous()
|
|
struct ggml_tensor * V_trans =
|
|
ggml_permute(ctx0,
|
|
ggml_reshape_3d(ctx0,
|
|
ggml_view_1d(ctx0, model.memory_v, (n_past + N)*n_embd, il*n_ctx*ggml_element_size(model.memory_v)*n_embd),
|
|
n_embd/n_head, n_head, n_past + N),
|
|
1, 2, 0, 3);
|
|
|
|
// KQV = transpose(V) * KQ_soft_max
|
|
struct ggml_tensor * KQV = ggml_mul_mat(ctx0, V_trans, KQ_soft_max);
|
|
|
|
// KQV_merged = KQV.permute(0, 2, 1, 3)
|
|
struct ggml_tensor * KQV_merged = ggml_permute(ctx0, KQV, 0, 2, 1, 3);
|
|
|
|
// cur = KQV_merged.contiguous().view(n_embd, N)
|
|
cur = ggml_cpy(ctx0,
|
|
KQV_merged,
|
|
ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_embd, N));
|
|
|
|
// projection (no bias)
|
|
cur = ggml_mul_mat(ctx0,
|
|
model.layers[il].wo,
|
|
cur);
|
|
}
|
|
|
|
struct ggml_tensor * inpFF = ggml_add(ctx0, cur, inpSA);
|
|
|
|
// feed-forward network
|
|
{
|
|
// norm
|
|
{
|
|
cur = ggml_rms_norm(ctx0, inpFF);
|
|
|
|
// cur = ffn_norm*cur
|
|
cur = ggml_mul(ctx0,
|
|
ggml_repeat(ctx0, model.layers[il].ffn_norm, cur),
|
|
cur);
|
|
}
|
|
|
|
struct ggml_tensor * tmp = ggml_mul_mat(ctx0,
|
|
model.layers[il].w3,
|
|
cur);
|
|
|
|
|
|
cur = ggml_mul_mat(ctx0,
|
|
model.layers[il].w1,
|
|
cur);
|
|
|
|
// SILU activation
|
|
cur = ggml_silu(ctx0, cur);
|
|
|
|
cur = ggml_mul(ctx0, cur, tmp);
|
|
|
|
cur = ggml_mul_mat(ctx0,
|
|
model.layers[il].w2,
|
|
cur);
|
|
}
|
|
|
|
cur = ggml_add(ctx0, cur, inpFF);
|
|
|
|
// input for next layer
|
|
inpL = cur;
|
|
}
|
|
|
|
// norm
|
|
{
|
|
inpL = ggml_rms_norm(ctx0, inpL);
|
|
|
|
// inpL = norm*inpL
|
|
inpL = ggml_mul(ctx0,
|
|
ggml_repeat(ctx0, model.norm, inpL),
|
|
inpL);
|
|
}
|
|
|
|
// lm_head
|
|
{
|
|
inpL = ggml_mul_mat(ctx0, model.output, inpL);
|
|
}
|
|
|
|
// logits -> probs
|
|
//inpL = ggml_soft_max(ctx0, inpL);
|
|
|
|
// run the computation
|
|
ggml_build_forward_expand(&gf, inpL);
|
|
ggml_graph_compute (ctx0, &gf);
|
|
|
|
//if (n_past%100 == 0) {
|
|
// ggml_graph_print (&gf);
|
|
// ggml_graph_dump_dot(&gf, NULL, "gpt-2.dot");
|
|
//}
|
|
|
|
//embd_w.resize(n_vocab*N);
|
|
//memcpy(embd_w.data(), ggml_get_data(inpL), sizeof(float)*n_vocab*N);
|
|
|
|
if (return_all_logits) {
|
|
embd_w.resize(n_vocab * N);
|
|
memcpy(embd_w.data(), (float *) ggml_get_data(inpL), sizeof(float)*n_vocab*N);
|
|
} else {
|
|
// return result for just the last token
|
|
embd_w.resize(n_vocab);
|
|
memcpy(embd_w.data(), (float *) ggml_get_data(inpL) + (n_vocab*(N-1)), sizeof(float)*n_vocab);
|
|
}
|
|
|
|
if (mem_per_token == 0) {
|
|
mem_per_token = ggml_used_mem(ctx0)/N;
|
|
}
|
|
//fprintf(stderr, "used_mem = %zu\n", ggml_used_mem(ctx0));
|
|
|
|
ggml_free(ctx0);
|
|
|
|
return true;
|
|
}
|
|
|
|
std::vector<double> softmax(const std::vector<float>& logits) {
|
|
std::vector<double> probs(logits.size());
|
|
float max_logit = logits[0];
|
|
for (float v : logits) max_logit = std::max(max_logit, v);
|
|
double sum_exp = 0.0;
|
|
for (size_t i = 0; i < logits.size(); i++) {
|
|
// Subtract the maximum logit value from the current logit value for numerical stability
|
|
float logit = logits[i] - max_logit;
|
|
double exp_logit = std::exp(logit);
|
|
sum_exp += exp_logit;
|
|
probs[i] = exp_logit;
|
|
}
|
|
for (size_t i = 0; i < probs.size(); i++) probs[i] /= sum_exp;
|
|
return probs;
|
|
}
|
|
|
|
void perplexity(const llama_vocab &vocab, const llama_model &model, const gpt_params ¶ms, size_t mem_per_token) {
|
|
// Download: https://s3.amazonaws.com/research.metamind.io/wikitext/wikitext-2-raw-v1.zip?ref=salesforce-research
|
|
// Run `./main --perplexity -m models/7B/ggml-model-q4_0.bin -f wiki.test.raw`
|
|
// Output: `perplexity: 13.5106 [114/114]`
|
|
std::vector<llama_vocab::id> tokens = ::llama_tokenize(vocab, params.prompt, true);
|
|
|
|
int count = 0;
|
|
double nll = 0.0;
|
|
int seq_count = tokens.size() / params.n_ctx;
|
|
printf("Calculating perplexity over %d chunks\n", seq_count);
|
|
for (int i = 0; i < seq_count; ++i) {
|
|
int start = i * params.n_ctx;
|
|
int end = start + params.n_ctx - 1;
|
|
std::vector<llama_vocab::id> embd(tokens.begin() + start, tokens.begin() + end);
|
|
std::vector<float> logits;
|
|
auto start_t = std::chrono::high_resolution_clock::now();
|
|
if (!llama_eval(model, params.n_threads, 0, embd, logits, mem_per_token, true)) {
|
|
fprintf(stderr, "Failed to predict\n");
|
|
return;
|
|
}
|
|
auto end_t = std::chrono::high_resolution_clock::now();
|
|
if (i == 0) {
|
|
double seconds = std::chrono::duration<double>(end_t - start_t).count();
|
|
printf("%.2f seconds per pass - ETA %.2f hours\n", seconds, (seconds * seq_count) / (60.0*60.0));
|
|
}
|
|
// We get the logits for all the tokens in the context window (params.n_ctx)
|
|
// from llama_eval above. Now, based on https://huggingface.co/docs/transformers/perplexity,
|
|
// calculate the perplexity over the last half the window (so the model always has
|
|
// some context to predict the token).
|
|
//
|
|
// We rely on the fact that attention in the forward pass only looks at previous
|
|
// tokens here, so the logits returned for each token are an accurate representation
|
|
// of what the model would have predicted at that point.
|
|
//
|
|
// Example, we have a context window of 512, we will compute perplexity for each of the
|
|
// last 256 tokens. Then, we split the input up into context window size chunks to
|
|
// process the entire prompt.
|
|
for (int j = params.n_ctx / 2; j < params.n_ctx - 1; ++j) {
|
|
// Calculate probability of next token, given the previous ones.
|
|
int n_vocab = model.hparams.n_vocab;
|
|
std::vector<float> tok_logits(
|
|
logits.begin() + j * n_vocab,
|
|
logits.begin() + (j + 1) * n_vocab);
|
|
double prob = softmax(tok_logits)[tokens[start + j + 1]];
|
|
nll += -std::log(prob);
|
|
++count;
|
|
}
|
|
// perplexity is e^(average negative log-likelihood)
|
|
printf("[%d]%.4lf,", i + 1, std::exp(nll / count));
|
|
fflush(stdout);
|
|
}
|
|
printf("\n");
|
|
}
|
|
|
|
static bool is_interacting = false;
|
|
|
|
#if defined (__unix__) || (defined (__APPLE__) && defined (__MACH__)) || defined (_WIN32)
|
|
void sigint_handler(int signo) {
|
|
printf(ANSI_COLOR_RESET);
|
|
printf("\n"); // this also force flush stdout.
|
|
if (signo == SIGINT) {
|
|
if (!is_interacting) {
|
|
is_interacting=true;
|
|
} else {
|
|
_exit(130);
|
|
}
|
|
}
|
|
}
|
|
#endif
|
|
|
|
const char * llama_print_system_info(void) {
|
|
static std::string s;
|
|
|
|
s = "";
|
|
s += "AVX = " + std::to_string(ggml_cpu_has_avx()) + " | ";
|
|
s += "AVX2 = " + std::to_string(ggml_cpu_has_avx2()) + " | ";
|
|
s += "AVX512 = " + std::to_string(ggml_cpu_has_avx512()) + " | ";
|
|
s += "FMA = " + std::to_string(ggml_cpu_has_fma()) + " | ";
|
|
s += "NEON = " + std::to_string(ggml_cpu_has_neon()) + " | ";
|
|
s += "ARM_FMA = " + std::to_string(ggml_cpu_has_arm_fma()) + " | ";
|
|
s += "F16C = " + std::to_string(ggml_cpu_has_f16c()) + " | ";
|
|
s += "FP16_VA = " + std::to_string(ggml_cpu_has_fp16_va()) + " | ";
|
|
s += "WASM_SIMD = " + std::to_string(ggml_cpu_has_wasm_simd()) + " | ";
|
|
s += "BLAS = " + std::to_string(ggml_cpu_has_blas()) + " | ";
|
|
s += "SSE3 = " + std::to_string(ggml_cpu_has_sse3()) + " | ";
|
|
s += "VSX = " + std::to_string(ggml_cpu_has_vsx()) + " | ";
|
|
|
|
return s.c_str();
|
|
}
|
|
|
|
int main(int argc, char ** argv) {
|
|
ggml_time_init();
|
|
const int64_t t_main_start_us = ggml_time_us();
|
|
|
|
gpt_params params;
|
|
params.model = "models/llama-7B/ggml-model.bin";
|
|
|
|
if (gpt_params_parse(argc, argv, params) == false) {
|
|
return 1;
|
|
}
|
|
|
|
if (params.n_ctx > 2048) {
|
|
fprintf(stderr, "%s: warning: model does not support context sizes greater than 2048 tokens (%d specified);"
|
|
"expect poor results\n", __func__, params.n_ctx);
|
|
}
|
|
|
|
if (params.seed < 0) {
|
|
params.seed = time(NULL);
|
|
}
|
|
|
|
fprintf(stderr, "%s: seed = %d\n", __func__, params.seed);
|
|
|
|
std::mt19937 rng(params.seed);
|
|
if (params.random_prompt) {
|
|
params.prompt = gpt_random_prompt(rng);
|
|
}
|
|
|
|
// params.prompt = R"(// this function checks if the number n is prime
|
|
//bool is_prime(int n) {)";
|
|
|
|
int64_t t_load_us = 0;
|
|
|
|
llama_vocab vocab;
|
|
llama_model model;
|
|
|
|
// load the model
|
|
{
|
|
const ggml_type memory_type = params.memory_f16 ? GGML_TYPE_F16 : GGML_TYPE_F32;
|
|
const int64_t t_start_us = ggml_time_us();
|
|
if (!llama_model_load(params.model, model, vocab, params.n_ctx, params.n_parts, memory_type)) {
|
|
fprintf(stderr, "%s: failed to load model from '%s'\n", __func__, params.model.c_str());
|
|
return 1;
|
|
}
|
|
|
|
t_load_us = ggml_time_us() - t_start_us;
|
|
}
|
|
|
|
// print system information
|
|
{
|
|
fprintf(stderr, "\n");
|
|
fprintf(stderr, "system_info: n_threads = %d / %d | %s\n",
|
|
params.n_threads, std::thread::hardware_concurrency(), llama_print_system_info());
|
|
}
|
|
|
|
std::vector<float> logits;
|
|
|
|
// determine the required inference memory per token:
|
|
size_t mem_per_token = 0;
|
|
llama_eval(model, params.n_threads, 0, { 0, 1, 2, 3 }, logits, mem_per_token);
|
|
|
|
if (params.perplexity) {
|
|
perplexity(vocab, model, params, mem_per_token);
|
|
exit(0);
|
|
}
|
|
|
|
int n_past = 0;
|
|
|
|
int64_t t_sample_us = 0;
|
|
int64_t t_predict_us = 0;
|
|
|
|
// Add a space in front of the first character to match OG llama tokenizer behavior
|
|
params.prompt.insert(0, 1, ' ');
|
|
// tokenize the prompt
|
|
std::vector<llama_vocab::id> embd_inp = ::llama_tokenize(vocab, params.prompt, true);
|
|
|
|
params.n_predict = std::min(params.n_predict, model.hparams.n_ctx - (int) embd_inp.size());
|
|
|
|
// prefix & suffix for instruct mode
|
|
const std::vector<llama_vocab::id> inp_pfx = ::llama_tokenize(vocab, "\n\n### Instruction:\n\n", true);
|
|
const std::vector<llama_vocab::id> inp_sfx = ::llama_tokenize(vocab, "\n\n### Response:\n\n", false);
|
|
|
|
// in instruct mode, we inject a prefix and a suffix to each input by the user
|
|
if (params.instruct) {
|
|
params.interactive = true;
|
|
params.antiprompt.push_back("### Instruction:\n\n");
|
|
}
|
|
|
|
// enable interactive mode if reverse prompt is specified
|
|
if (params.antiprompt.size() != 0) {
|
|
params.interactive = true;
|
|
}
|
|
|
|
fprintf(stderr, "\n");
|
|
fprintf(stderr, "%s: prompt: '%s'\n", __func__, params.prompt.c_str());
|
|
fprintf(stderr, "%s: number of tokens in prompt = %zu\n", __func__, embd_inp.size());
|
|
for (int i = 0; i < (int) embd_inp.size(); i++) {
|
|
fprintf(stderr, "%6d -> '%s'\n", embd_inp[i], vocab.id_to_token.at(embd_inp[i]).c_str());
|
|
}
|
|
fprintf(stderr, "\n");
|
|
if (params.interactive) {
|
|
#if defined (__unix__) || (defined (__APPLE__) && defined (__MACH__))
|
|
struct sigaction sigint_action;
|
|
sigint_action.sa_handler = sigint_handler;
|
|
sigemptyset (&sigint_action.sa_mask);
|
|
sigint_action.sa_flags = 0;
|
|
sigaction(SIGINT, &sigint_action, NULL);
|
|
#elif defined (_WIN32)
|
|
signal(SIGINT, sigint_handler);
|
|
#endif
|
|
|
|
fprintf(stderr, "%s: interactive mode on.\n", __func__);
|
|
|
|
if(params.antiprompt.size()) {
|
|
for (auto antiprompt : params.antiprompt) {
|
|
fprintf(stderr, "Reverse prompt: '%s'\n", antiprompt.c_str());
|
|
}
|
|
}
|
|
}
|
|
fprintf(stderr, "sampling parameters: temp = %f, top_k = %d, top_p = %f, repeat_last_n = %i, repeat_penalty = %f\n", params.temp, params.top_k, params.top_p, params.repeat_last_n, params.repeat_penalty);
|
|
fprintf(stderr, "\n\n");
|
|
|
|
std::vector<llama_vocab::id> embd;
|
|
|
|
int last_n_size = params.repeat_last_n;
|
|
std::vector<llama_vocab::id> last_n_tokens(last_n_size);
|
|
std::fill(last_n_tokens.begin(), last_n_tokens.end(), 0);
|
|
|
|
if (params.interactive) {
|
|
fprintf(stderr, "== Running in interactive mode. ==\n"
|
|
#if defined (__unix__) || (defined (__APPLE__) && defined (__MACH__)) || defined (_WIN32)
|
|
" - Press Ctrl+C to interject at any time.\n"
|
|
#endif
|
|
" - Press Return to return control to LLaMa.\n"
|
|
" - If you want to submit another line, end your input in '\\'.\n\n");
|
|
is_interacting = true;
|
|
}
|
|
|
|
int input_consumed = 0;
|
|
bool input_noecho = false;
|
|
|
|
int remaining_tokens = params.n_predict;
|
|
|
|
// set the color for the prompt which will be output initially
|
|
if (params.use_color) {
|
|
#if defined (_WIN32)
|
|
// Enable ANSI colors on Windows 10+
|
|
unsigned long dwMode = 0;
|
|
void* hConOut = GetStdHandle((unsigned long)-11); // STD_OUTPUT_HANDLE (-11)
|
|
if (hConOut && hConOut != (void*)-1 && GetConsoleMode(hConOut, &dwMode) && !(dwMode & 0x4)) {
|
|
SetConsoleMode(hConOut, dwMode | 0x4); // ENABLE_VIRTUAL_TERMINAL_PROCESSING (0x4)
|
|
}
|
|
#endif
|
|
printf(ANSI_COLOR_YELLOW);
|
|
}
|
|
|
|
while (remaining_tokens > 0 || params.interactive) {
|
|
// predict
|
|
if (embd.size() > 0) {
|
|
const int64_t t_start_us = ggml_time_us();
|
|
|
|
if (!llama_eval(model, params.n_threads, n_past, embd, logits, mem_per_token)) {
|
|
fprintf(stderr, "Failed to predict\n");
|
|
return 1;
|
|
}
|
|
|
|
t_predict_us += ggml_time_us() - t_start_us;
|
|
}
|
|
|
|
n_past += embd.size();
|
|
embd.clear();
|
|
|
|
if ((int) embd_inp.size() <= input_consumed) {
|
|
// out of user input, sample next token
|
|
const float top_k = params.top_k;
|
|
const float top_p = params.top_p;
|
|
const float temp = params.temp;
|
|
const float repeat_penalty = params.repeat_penalty;
|
|
|
|
const int n_vocab = model.hparams.n_vocab;
|
|
|
|
llama_vocab::id id = 0;
|
|
|
|
{
|
|
const int64_t t_start_sample_us = ggml_time_us();
|
|
|
|
if (params.ignore_eos) {
|
|
// set the logit of the eos token to zero to avoid sampling it
|
|
logits[logits.size() - n_vocab + EOS_TOKEN_ID] = 0;
|
|
}
|
|
|
|
id = llama_sample_top_p_top_k(vocab, logits.data() + (logits.size() - n_vocab), last_n_tokens, repeat_penalty, top_k, top_p, temp, rng);
|
|
|
|
last_n_tokens.erase(last_n_tokens.begin());
|
|
last_n_tokens.push_back(id);
|
|
|
|
t_sample_us += ggml_time_us() - t_start_sample_us;
|
|
}
|
|
|
|
// add it to the context
|
|
embd.push_back(id);
|
|
|
|
// echo this to console
|
|
input_noecho = false;
|
|
|
|
// decrement remaining sampling budget
|
|
--remaining_tokens;
|
|
} else {
|
|
// some user input remains from prompt or interaction, forward it to processing
|
|
while ((int) embd_inp.size() > input_consumed) {
|
|
embd.push_back(embd_inp[input_consumed]);
|
|
last_n_tokens.erase(last_n_tokens.begin());
|
|
last_n_tokens.push_back(embd_inp[input_consumed]);
|
|
++input_consumed;
|
|
if ((int) embd.size() >= params.n_batch) {
|
|
break;
|
|
}
|
|
}
|
|
}
|
|
|
|
// display text
|
|
if (!input_noecho) {
|
|
for (auto id : embd) {
|
|
printf("%s", vocab.id_to_token[id].c_str());
|
|
}
|
|
fflush(stdout);
|
|
}
|
|
// reset color to default if we there is no pending user input
|
|
if (!input_noecho && params.use_color && (int)embd_inp.size() == input_consumed) {
|
|
printf(ANSI_COLOR_RESET);
|
|
}
|
|
|
|
// in interactive mode, and not currently processing queued inputs;
|
|
// check if we should prompt the user for more
|
|
if (params.interactive && (int) embd_inp.size() <= input_consumed) {
|
|
// check for reverse prompt
|
|
std::string last_output;
|
|
for (auto id : last_n_tokens) {
|
|
last_output += vocab.id_to_token[id];
|
|
}
|
|
|
|
// Check if each of the reverse prompts appears at the end of the output.
|
|
for (std::string antiprompt : params.antiprompt) {
|
|
if (last_output.find(antiprompt.c_str(), last_output.length() - antiprompt.length(), antiprompt.length()) != std::string::npos) {
|
|
is_interacting = true;
|
|
break;
|
|
}
|
|
}
|
|
if (is_interacting) {
|
|
if (params.instruct) {
|
|
input_consumed = embd_inp.size();
|
|
embd_inp.insert(embd_inp.end(), inp_pfx.begin(), inp_pfx.end());
|
|
|
|
printf("\n> ");
|
|
}
|
|
|
|
// currently being interactive
|
|
if (params.use_color) printf(ANSI_BOLD ANSI_COLOR_GREEN);
|
|
std::string buffer;
|
|
std::string line;
|
|
bool another_line = true;
|
|
do {
|
|
std::getline(std::cin, line);
|
|
if (line.empty() || line.back() != '\\') {
|
|
another_line = false;
|
|
} else {
|
|
line.pop_back(); // Remove the continue character
|
|
}
|
|
buffer += line + '\n'; // Append the line to the result
|
|
} while (another_line);
|
|
if (params.use_color) printf(ANSI_COLOR_RESET);
|
|
|
|
std::vector<llama_vocab::id> line_inp = ::llama_tokenize(vocab, buffer, false);
|
|
embd_inp.insert(embd_inp.end(), line_inp.begin(), line_inp.end());
|
|
|
|
if (params.instruct) {
|
|
embd_inp.insert(embd_inp.end(), inp_sfx.begin(), inp_sfx.end());
|
|
}
|
|
|
|
remaining_tokens -= line_inp.size();
|
|
|
|
input_noecho = true; // do not echo this again
|
|
}
|
|
is_interacting = false;
|
|
}
|
|
|
|
// end of text token
|
|
if (embd.back() == EOS_TOKEN_ID) {
|
|
if (params.interactive) {
|
|
is_interacting = true;
|
|
} else {
|
|
fprintf(stderr, " [end of text]\n");
|
|
break;
|
|
}
|
|
}
|
|
|
|
// In interactive mode, respect the maximum number of tokens and drop back to user input when reached.
|
|
if (params.interactive && remaining_tokens <= 0) {
|
|
remaining_tokens = params.n_predict;
|
|
is_interacting = true;
|
|
}
|
|
}
|
|
|
|
#if defined (_WIN32)
|
|
signal(SIGINT, SIG_DFL);
|
|
#endif
|
|
|
|
// report timing
|
|
{
|
|
const int64_t t_main_end_us = ggml_time_us();
|
|
|
|
fprintf(stderr, "\n\n");
|
|
fprintf(stderr, "%s: mem per token = %8zu bytes\n", __func__, mem_per_token);
|
|
fprintf(stderr, "%s: load time = %8.2f ms\n", __func__, t_load_us/1000.0f);
|
|
fprintf(stderr, "%s: sample time = %8.2f ms\n", __func__, t_sample_us/1000.0f);
|
|
fprintf(stderr, "%s: predict time = %8.2f ms / %.2f ms per token\n", __func__, t_predict_us/1000.0f, t_predict_us/1000.0f/n_past);
|
|
fprintf(stderr, "%s: total time = %8.2f ms\n", __func__, (t_main_end_us - t_main_start_us)/1000.0f);
|
|
}
|
|
|
|
ggml_free(model.ctx);
|
|
|
|
if (params.use_color) {
|
|
printf(ANSI_COLOR_RESET);
|
|
}
|
|
|
|
return 0;
|
|
}
|