mirror of
https://git.adityakumar.xyz/llama.cpp.git
synced 2024-11-09 15:29:43 +00:00
add log_callback to llama_context_params for custom logging. (#2234)
* add log_callback to llama_context_params for custom logging. * Fix macro expansion on gcc * Add struct llama_state for global variables and move log_callback there * Turn log level into enum and some minor changes. * Remove model_for_logging parameter (not needed anymore) * Convert remaining fprintf(stderr, ...) calls to use new macros. * Fix enum and initialize g_state * Fix log calls after merge * Fix missing static * Add back all the new lines in the logging strings * Add comment for llama_log_callback and replace remaining printf calls --------- Co-authored-by: grahameth <-> Co-authored-by: Helmut <helmut.buhler@inf.h-brs.de>
This commit is contained in:
parent
25d43e0eb5
commit
ea04a4ca19
2 changed files with 177 additions and 105 deletions
263
llama.cpp
263
llama.cpp
|
@ -56,6 +56,13 @@
|
||||||
#pragma warning(disable: 4244 4267) // possible loss of data
|
#pragma warning(disable: 4244 4267) // possible loss of data
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
static void llama_log_internal(llama_log_level level, const char* format, ...);
|
||||||
|
static void llama_log_callback_default(llama_log_level level, const char * text, void * user_data);
|
||||||
|
#define LLAMA_LOG_INFO(...) llama_log_internal(LLAMA_LOG_LEVEL_INFO , __VA_ARGS__)
|
||||||
|
#define LLAMA_LOG_WARN(...) llama_log_internal(LLAMA_LOG_LEVEL_WARN , __VA_ARGS__)
|
||||||
|
#define LLAMA_LOG_ERROR(...) llama_log_internal(LLAMA_LOG_LEVEL_ERROR, __VA_ARGS__)
|
||||||
|
|
||||||
|
|
||||||
#if !defined(GGML_USE_CUBLAS) && !defined(GGML_USE_METAL)
|
#if !defined(GGML_USE_CUBLAS) && !defined(GGML_USE_METAL)
|
||||||
#include "ggml-alloc.h"
|
#include "ggml-alloc.h"
|
||||||
#define LLAMA_USE_ALLOCATOR
|
#define LLAMA_USE_ALLOCATOR
|
||||||
|
@ -438,6 +445,14 @@ struct llama_context {
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
struct llama_state {
|
||||||
|
// We save the log callback globally
|
||||||
|
llama_log_callback log_callback = llama_log_callback_default;
|
||||||
|
void * log_callback_user_data = nullptr;
|
||||||
|
};
|
||||||
|
// global state
|
||||||
|
static llama_state g_state;
|
||||||
|
|
||||||
template <typename T>
|
template <typename T>
|
||||||
static T checked_mul(T a, T b) {
|
static T checked_mul(T a, T b) {
|
||||||
T ret = a * b;
|
T ret = a * b;
|
||||||
|
@ -504,7 +519,7 @@ struct llama_file_loader {
|
||||||
|
|
||||||
llama_file_loader(const char * fname, llama_load_tensors_map & tensors_map)
|
llama_file_loader(const char * fname, llama_load_tensors_map & tensors_map)
|
||||||
: file(fname, "rb") {
|
: file(fname, "rb") {
|
||||||
fprintf(stderr, "llama.cpp: loading model from %s\n", fname);
|
LLAMA_LOG_INFO("llama.cpp: loading model from %s\n", fname);
|
||||||
read_magic();
|
read_magic();
|
||||||
read_hparams();
|
read_hparams();
|
||||||
read_vocab();
|
read_vocab();
|
||||||
|
@ -619,7 +634,7 @@ struct llama_file_saver {
|
||||||
llama_file_loader * any_file_loader;
|
llama_file_loader * any_file_loader;
|
||||||
llama_file_saver(const char * fname, llama_file_loader * any_file_loader, enum llama_ftype new_ftype)
|
llama_file_saver(const char * fname, llama_file_loader * any_file_loader, enum llama_ftype new_ftype)
|
||||||
: file(fname, "wb"), any_file_loader(any_file_loader) {
|
: file(fname, "wb"), any_file_loader(any_file_loader) {
|
||||||
fprintf(stderr, "llama.cpp: saving model to %s\n", fname);
|
LLAMA_LOG_INFO("llama.cpp: saving model to %s\n", fname);
|
||||||
write_magic();
|
write_magic();
|
||||||
write_hparams(new_ftype);
|
write_hparams(new_ftype);
|
||||||
write_vocab();
|
write_vocab();
|
||||||
|
@ -640,7 +655,7 @@ struct llama_file_saver {
|
||||||
}
|
}
|
||||||
void write_vocab() {
|
void write_vocab() {
|
||||||
if (any_file_loader->file_version == LLAMA_FILE_VERSION_GGML) {
|
if (any_file_loader->file_version == LLAMA_FILE_VERSION_GGML) {
|
||||||
fprintf(stderr, "llama.cpp: WARNING: input is an old file that doesn't have scores; will add dummy scores\n");
|
LLAMA_LOG_WARN("llama.cpp: WARNING: input is an old file that doesn't have scores; will add dummy scores\n");
|
||||||
}
|
}
|
||||||
uint32_t n_vocab = any_file_loader->hparams.n_vocab;
|
uint32_t n_vocab = any_file_loader->hparams.n_vocab;
|
||||||
for (uint32_t i = 0; i < n_vocab; i++) {
|
for (uint32_t i = 0; i < n_vocab; i++) {
|
||||||
|
@ -831,7 +846,7 @@ struct llama_model_loader {
|
||||||
uint8_t byte = lt.data[i];
|
uint8_t byte = lt.data[i];
|
||||||
sum = byte + (sum << 6) + (sum << 16) - sum; // sdbm hash
|
sum = byte + (sum << 6) + (sum << 16) - sum; // sdbm hash
|
||||||
}
|
}
|
||||||
fprintf(stderr, "%s checksum: %#08x (%s, size %zu)\n", lt.name.c_str(), sum,
|
LLAMA_LOG_INFO("%s checksum: %#08x (%s, size %zu)\n", lt.name.c_str(), sum,
|
||||||
llama_format_tensor_shape(lt.ne).c_str(), lt.size);
|
llama_format_tensor_shape(lt.ne).c_str(), lt.size);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -864,7 +879,7 @@ static bool kv_cache_init(
|
||||||
cache.ctx = ggml_init(params);
|
cache.ctx = ggml_init(params);
|
||||||
|
|
||||||
if (!cache.ctx) {
|
if (!cache.ctx) {
|
||||||
fprintf(stderr, "%s: failed to allocate memory for kv cache\n", __func__);
|
LLAMA_LOG_ERROR("%s: failed to allocate memory for kv cache\n", __func__);
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1076,7 +1091,7 @@ static void llama_model_load_internal(
|
||||||
LLAMA_ASSERT(hparams.n_head % n_gqa == 0);
|
LLAMA_ASSERT(hparams.n_head % n_gqa == 0);
|
||||||
hparams.n_head_kv = hparams.n_head / n_gqa;
|
hparams.n_head_kv = hparams.n_head / n_gqa;
|
||||||
if (model.type == e_model::MODEL_65B && n_gqa == 8) {
|
if (model.type == e_model::MODEL_65B && n_gqa == 8) {
|
||||||
fprintf(stderr, "%s: warning: assuming 70B model based on GQA == %d\n", __func__, n_gqa);
|
LLAMA_LOG_WARN("%s: warning: assuming 70B model based on GQA == %d\n", __func__, n_gqa);
|
||||||
model.type = e_model::MODEL_70B;
|
model.type = e_model::MODEL_70B;
|
||||||
hparams.f_ffn_mult = 1.3f; // from the params.json of the 70B model
|
hparams.f_ffn_mult = 1.3f; // from the params.json of the 70B model
|
||||||
}
|
}
|
||||||
|
@ -1092,22 +1107,22 @@ static void llama_model_load_internal(
|
||||||
//const uint32_t n_ff = 28672;
|
//const uint32_t n_ff = 28672;
|
||||||
|
|
||||||
{
|
{
|
||||||
fprintf(stderr, "%s: format = %s\n", __func__, llama_file_version_name(file_version));
|
LLAMA_LOG_INFO("%s: format = %s\n", __func__, llama_file_version_name(file_version));
|
||||||
fprintf(stderr, "%s: n_vocab = %u\n", __func__, hparams.n_vocab);
|
LLAMA_LOG_INFO("%s: n_vocab = %u\n", __func__, hparams.n_vocab);
|
||||||
fprintf(stderr, "%s: n_ctx = %u\n", __func__, hparams.n_ctx);
|
LLAMA_LOG_INFO("%s: n_ctx = %u\n", __func__, hparams.n_ctx);
|
||||||
fprintf(stderr, "%s: n_embd = %u\n", __func__, hparams.n_embd);
|
LLAMA_LOG_INFO("%s: n_embd = %u\n", __func__, hparams.n_embd);
|
||||||
fprintf(stderr, "%s: n_mult = %u\n", __func__, hparams.n_mult);
|
LLAMA_LOG_INFO("%s: n_mult = %u\n", __func__, hparams.n_mult);
|
||||||
fprintf(stderr, "%s: n_head = %u\n", __func__, hparams.n_head);
|
LLAMA_LOG_INFO("%s: n_head = %u\n", __func__, hparams.n_head);
|
||||||
fprintf(stderr, "%s: n_head_kv = %u\n", __func__, hparams.n_head_kv);
|
LLAMA_LOG_INFO("%s: n_head_kv = %u\n", __func__, hparams.n_head_kv);
|
||||||
fprintf(stderr, "%s: n_layer = %u\n", __func__, hparams.n_layer);
|
LLAMA_LOG_INFO("%s: n_layer = %u\n", __func__, hparams.n_layer);
|
||||||
fprintf(stderr, "%s: n_rot = %u\n", __func__, hparams.n_rot); // a.k.a. n_embd_head, n_head_dim
|
LLAMA_LOG_INFO("%s: n_rot = %u\n", __func__, hparams.n_rot); // a.k.a. n_embd_head, n_head_dim
|
||||||
fprintf(stderr, "%s: n_gqa = %u\n", __func__, hparams.n_gqa());
|
LLAMA_LOG_INFO("%s: n_gqa = %u\n", __func__, hparams.n_gqa());
|
||||||
fprintf(stderr, "%s: rnorm_eps = %.1e\n", __func__, hparams.f_rms_norm_eps);
|
LLAMA_LOG_INFO("%s: rnorm_eps = %.1e\n", __func__, hparams.f_rms_norm_eps);
|
||||||
fprintf(stderr, "%s: n_ff = %u\n", __func__, n_ff);
|
LLAMA_LOG_INFO("%s: n_ff = %u\n", __func__, n_ff);
|
||||||
fprintf(stderr, "%s: freq_base = %.1f\n", __func__, hparams.rope_freq_base);
|
LLAMA_LOG_INFO("%s: freq_base = %.1f\n", __func__, hparams.rope_freq_base);
|
||||||
fprintf(stderr, "%s: freq_scale = %g\n", __func__, hparams.rope_freq_scale);
|
LLAMA_LOG_INFO("%s: freq_scale = %g\n", __func__, hparams.rope_freq_scale);
|
||||||
fprintf(stderr, "%s: ftype = %u (%s)\n", __func__, hparams.ftype, llama_ftype_name(hparams.ftype));
|
LLAMA_LOG_INFO("%s: ftype = %u (%s)\n", __func__, hparams.ftype, llama_ftype_name(hparams.ftype));
|
||||||
fprintf(stderr, "%s: model size = %s\n", __func__, llama_model_type_name(model.type));
|
LLAMA_LOG_INFO("%s: model size = %s\n", __func__, llama_model_type_name(model.type));
|
||||||
}
|
}
|
||||||
|
|
||||||
if (file_version < LLAMA_FILE_VERSION_GGJT_V2) {
|
if (file_version < LLAMA_FILE_VERSION_GGJT_V2) {
|
||||||
|
@ -1135,7 +1150,7 @@ static void llama_model_load_internal(
|
||||||
size_t ctx_size;
|
size_t ctx_size;
|
||||||
size_t mmapped_size;
|
size_t mmapped_size;
|
||||||
ml->calc_sizes(&ctx_size, &mmapped_size);
|
ml->calc_sizes(&ctx_size, &mmapped_size);
|
||||||
fprintf(stderr, "%s: ggml ctx size = %7.2f MB\n", __func__, ctx_size/1024.0/1024.0);
|
LLAMA_LOG_INFO("%s: ggml ctx size = %7.2f MB\n", __func__, ctx_size/1024.0/1024.0);
|
||||||
|
|
||||||
// create the ggml context
|
// create the ggml context
|
||||||
{
|
{
|
||||||
|
@ -1160,13 +1175,13 @@ static void llama_model_load_internal(
|
||||||
(void) main_gpu;
|
(void) main_gpu;
|
||||||
(void) mul_mat_q;
|
(void) mul_mat_q;
|
||||||
#if defined(GGML_USE_CUBLAS)
|
#if defined(GGML_USE_CUBLAS)
|
||||||
fprintf(stderr, "%s: using CUDA for GPU acceleration\n", __func__);
|
LLAMA_LOG_INFO("%s: using CUDA for GPU acceleration\n", __func__);
|
||||||
ggml_cuda_set_main_device(main_gpu);
|
ggml_cuda_set_main_device(main_gpu);
|
||||||
ggml_cuda_set_mul_mat_q(mul_mat_q);
|
ggml_cuda_set_mul_mat_q(mul_mat_q);
|
||||||
#define LLAMA_BACKEND_OFFLOAD GGML_BACKEND_GPU
|
#define LLAMA_BACKEND_OFFLOAD GGML_BACKEND_GPU
|
||||||
#define LLAMA_BACKEND_OFFLOAD_SPLIT GGML_BACKEND_GPU_SPLIT
|
#define LLAMA_BACKEND_OFFLOAD_SPLIT GGML_BACKEND_GPU_SPLIT
|
||||||
#elif defined(GGML_USE_CLBLAST)
|
#elif defined(GGML_USE_CLBLAST)
|
||||||
fprintf(stderr, "%s: using OpenCL for GPU acceleration\n", __func__);
|
LLAMA_LOG_INFO("%s: using OpenCL for GPU acceleration\n", __func__);
|
||||||
#define LLAMA_BACKEND_OFFLOAD GGML_BACKEND_GPU
|
#define LLAMA_BACKEND_OFFLOAD GGML_BACKEND_GPU
|
||||||
#define LLAMA_BACKEND_OFFLOAD_SPLIT GGML_BACKEND_GPU
|
#define LLAMA_BACKEND_OFFLOAD_SPLIT GGML_BACKEND_GPU
|
||||||
#else
|
#else
|
||||||
|
@ -1271,14 +1286,14 @@ static void llama_model_load_internal(
|
||||||
const size_t mem_required_state =
|
const size_t mem_required_state =
|
||||||
scale*hparams.kv_size();
|
scale*hparams.kv_size();
|
||||||
|
|
||||||
fprintf(stderr, "%s: mem required = %7.2f MB (+ %7.2f MB per state)\n", __func__,
|
LLAMA_LOG_INFO("%s: mem required = %7.2f MB (+ %7.2f MB per state)\n", __func__,
|
||||||
mem_required / 1024.0 / 1024.0, mem_required_state / 1024.0 / 1024.0);
|
mem_required / 1024.0 / 1024.0, mem_required_state / 1024.0 / 1024.0);
|
||||||
|
|
||||||
(void) vram_scratch;
|
(void) vram_scratch;
|
||||||
(void) n_batch;
|
(void) n_batch;
|
||||||
#ifdef GGML_USE_CUBLAS
|
#ifdef GGML_USE_CUBLAS
|
||||||
if (low_vram) {
|
if (low_vram) {
|
||||||
fprintf(stderr, "%s: not allocating a VRAM scratch buffer due to low VRAM option\n", __func__);
|
LLAMA_LOG_INFO("%s: not allocating a VRAM scratch buffer due to low VRAM option\n", __func__);
|
||||||
ggml_cuda_set_scratch_size(0); // disable scratch
|
ggml_cuda_set_scratch_size(0); // disable scratch
|
||||||
} else {
|
} else {
|
||||||
const size_t vram_scratch_base = VRAM_REQ_SCRATCH_BASE().at(model.type);
|
const size_t vram_scratch_base = VRAM_REQ_SCRATCH_BASE().at(model.type);
|
||||||
|
@ -1286,7 +1301,7 @@ static void llama_model_load_internal(
|
||||||
vram_scratch = n_batch * (vram_scratch_base + n_ctx * vram_scratch_per_context);
|
vram_scratch = n_batch * (vram_scratch_base + n_ctx * vram_scratch_per_context);
|
||||||
ggml_cuda_set_scratch_size(vram_scratch);
|
ggml_cuda_set_scratch_size(vram_scratch);
|
||||||
if (n_gpu_layers > 0) {
|
if (n_gpu_layers > 0) {
|
||||||
fprintf(stderr, "%s: allocating batch_size x (%zd kB + n_ctx x %zd B) = %zd MB VRAM for the scratch buffer\n",
|
LLAMA_LOG_INFO("%s: allocating batch_size x (%zd kB + n_ctx x %zd B) = %zd MB VRAM for the scratch buffer\n",
|
||||||
__func__, vram_scratch_base / kB, vram_scratch_per_context,
|
__func__, vram_scratch_base / kB, vram_scratch_per_context,
|
||||||
(vram_scratch + MB - 1) / MB); // round up
|
(vram_scratch + MB - 1) / MB); // round up
|
||||||
}
|
}
|
||||||
|
@ -1296,9 +1311,9 @@ static void llama_model_load_internal(
|
||||||
#if defined(GGML_USE_CUBLAS) || defined(GGML_USE_CLBLAST)
|
#if defined(GGML_USE_CUBLAS) || defined(GGML_USE_CLBLAST)
|
||||||
const int n_gpu = std::min(n_gpu_layers, int(hparams.n_layer));
|
const int n_gpu = std::min(n_gpu_layers, int(hparams.n_layer));
|
||||||
|
|
||||||
fprintf(stderr, "%s: offloading %d repeating layers to GPU\n", __func__, n_gpu);
|
LLAMA_LOG_INFO("%s: offloading %d repeating layers to GPU\n", __func__, n_gpu);
|
||||||
if (n_gpu_layers > (int) hparams.n_layer) {
|
if (n_gpu_layers > (int) hparams.n_layer) {
|
||||||
fprintf(stderr, "%s: offloading non-repeating layers to GPU\n", __func__);
|
LLAMA_LOG_INFO("%s: offloading non-repeating layers to GPU\n", __func__);
|
||||||
}
|
}
|
||||||
size_t vram_kv_cache = 0;
|
size_t vram_kv_cache = 0;
|
||||||
|
|
||||||
|
@ -1307,17 +1322,17 @@ static void llama_model_load_internal(
|
||||||
const int max_offloadable_layers = low_vram ? hparams.n_layer + 1 : hparams.n_layer + 3;
|
const int max_offloadable_layers = low_vram ? hparams.n_layer + 1 : hparams.n_layer + 3;
|
||||||
if (n_gpu_layers > (int) hparams.n_layer + 1) {
|
if (n_gpu_layers > (int) hparams.n_layer + 1) {
|
||||||
if (low_vram) {
|
if (low_vram) {
|
||||||
fprintf(stderr, "%s: cannot offload v cache to GPU due to low VRAM option\n", __func__);
|
LLAMA_LOG_INFO("%s: cannot offload v cache to GPU due to low VRAM option\n", __func__);
|
||||||
} else {
|
} else {
|
||||||
fprintf(stderr, "%s: offloading v cache to GPU\n", __func__);
|
LLAMA_LOG_INFO("%s: offloading v cache to GPU\n", __func__);
|
||||||
vram_kv_cache += hparams.kv_size() / 2;
|
vram_kv_cache += hparams.kv_size() / 2;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (n_gpu_layers > (int) hparams.n_layer + 2) {
|
if (n_gpu_layers > (int) hparams.n_layer + 2) {
|
||||||
if (low_vram) {
|
if (low_vram) {
|
||||||
fprintf(stderr, "%s: cannot offload k cache to GPU due to low VRAM option\n", __func__);
|
LLAMA_LOG_WARN("%s: cannot offload k cache to GPU due to low VRAM option\n", __func__);
|
||||||
} else {
|
} else {
|
||||||
fprintf(stderr, "%s: offloading k cache to GPU\n", __func__);
|
LLAMA_LOG_INFO("%s: offloading k cache to GPU\n", __func__);
|
||||||
vram_kv_cache += hparams.kv_size() / 2;
|
vram_kv_cache += hparams.kv_size() / 2;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1326,9 +1341,9 @@ static void llama_model_load_internal(
|
||||||
const int max_offloadable_layers = hparams.n_layer + 1;
|
const int max_offloadable_layers = hparams.n_layer + 1;
|
||||||
#endif // GGML_USE_CUBLAS
|
#endif // GGML_USE_CUBLAS
|
||||||
|
|
||||||
fprintf(stderr, "%s: offloaded %d/%d layers to GPU\n",
|
LLAMA_LOG_INFO("%s: offloaded %d/%d layers to GPU\n",
|
||||||
__func__, std::min(n_gpu_layers, max_offloadable_layers), max_backend_supported_layers);
|
__func__, std::min(n_gpu_layers, max_offloadable_layers), max_backend_supported_layers);
|
||||||
fprintf(stderr, "%s: total VRAM used: %zu MB\n",
|
LLAMA_LOG_INFO("%s: total VRAM used: %zu MB\n",
|
||||||
__func__, (vram_weights + vram_scratch + vram_kv_cache + MB - 1) / MB); // round up
|
__func__, (vram_weights + vram_scratch + vram_kv_cache + MB - 1) / MB); // round up
|
||||||
#else
|
#else
|
||||||
(void) n_gpu_layers;
|
(void) n_gpu_layers;
|
||||||
|
@ -1387,7 +1402,7 @@ static bool llama_model_load(
|
||||||
use_mmap, use_mlock, vocab_only, progress_callback, progress_callback_user_data);
|
use_mmap, use_mlock, vocab_only, progress_callback, progress_callback_user_data);
|
||||||
return true;
|
return true;
|
||||||
} catch (const std::exception & err) {
|
} catch (const std::exception & err) {
|
||||||
fprintf(stderr, "error loading model: %s\n", err.what());
|
LLAMA_LOG_ERROR("error loading model: %s\n", err.what());
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1751,7 +1766,7 @@ static struct ggml_cgraph * llama_build_graph(
|
||||||
}
|
}
|
||||||
|
|
||||||
#if 0
|
#if 0
|
||||||
printf("\n%s: used_mem: eval ctx %.3f MB, scratch %.3f MB %.3f MB, work buf %.3f MB, n_past = %d, N = %d\n", __func__,
|
LLAMA_LOG_INFO("\n%s: used_mem: eval ctx %.3f MB, scratch %.3f MB %.3f MB, work buf %.3f MB, n_past = %d, N = %d\n", __func__,
|
||||||
ggml_used_mem(ctx0)/1024.0/1024.0,
|
ggml_used_mem(ctx0)/1024.0/1024.0,
|
||||||
lctx.get_buf_max_mem(0)/1024.0/1024.0,
|
lctx.get_buf_max_mem(0)/1024.0/1024.0,
|
||||||
lctx.get_buf_max_mem(1)/1024.0/1024.0,
|
lctx.get_buf_max_mem(1)/1024.0/1024.0,
|
||||||
|
@ -1812,7 +1827,7 @@ static bool llama_eval_internal(
|
||||||
ggml_allocr_alloc_graph(lctx.alloc, gf);
|
ggml_allocr_alloc_graph(lctx.alloc, gf);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
// fprintf(stderr, "graph build time: %.3f ms (%d nodes, %d leafs)\n", (ggml_time_us() - t_start_us)/1000.0, gf->n_nodes, gf->n_leafs);
|
// LLAMA_LOG_INFO("graph build time: %.3f ms (%d nodes, %d leafs)\n", (ggml_time_us() - t_start_us)/1000.0, gf->n_nodes, gf->n_leafs);
|
||||||
|
|
||||||
// for big prompts, if BLAS is enabled, it is better to use only one thread
|
// for big prompts, if BLAS is enabled, it is better to use only one thread
|
||||||
// otherwise, the threads are spin-lock waiting for the BLAS calls and are degrading the performance
|
// otherwise, the threads are spin-lock waiting for the BLAS calls and are degrading the performance
|
||||||
|
@ -1999,7 +2014,7 @@ struct llama_tokenizer {
|
||||||
left_sym.n += right_sym.n;
|
left_sym.n += right_sym.n;
|
||||||
right_sym.n = 0;
|
right_sym.n = 0;
|
||||||
|
|
||||||
//printf("left = '%*s' size = %zu\n", (int) left_sym.n, left_sym.text, bigram.size);
|
//LLAMA_LOG_INFO("left = '%*s' size = %zu\n", (int) left_sym.n, left_sym.text, bigram.size);
|
||||||
|
|
||||||
// remove the right sym from the chain
|
// remove the right sym from the chain
|
||||||
left_sym.next = right_sym.next;
|
left_sym.next = right_sym.next;
|
||||||
|
@ -3007,7 +3022,7 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s
|
||||||
tensor.data = read_data.addr;
|
tensor.data = read_data.addr;
|
||||||
model_loader->load_data_for(tensor);
|
model_loader->load_data_for(tensor);
|
||||||
|
|
||||||
printf("[%4zu/%4zu] %36s - %16s, type = %6s, ",
|
LLAMA_LOG_INFO("[%4zu/%4zu] %36s - %16s, type = %6s, ",
|
||||||
++idx, model_loader->tensors_map.tensors.size(),
|
++idx, model_loader->tensors_map.tensors.size(),
|
||||||
tensor.name.c_str(), llama_format_tensor_shape(tensor.ne).c_str(),
|
tensor.name.c_str(), llama_format_tensor_shape(tensor.ne).c_str(),
|
||||||
ggml_type_name(tensor.type));
|
ggml_type_name(tensor.type));
|
||||||
|
@ -3029,7 +3044,7 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s
|
||||||
new_type = tensor.type;
|
new_type = tensor.type;
|
||||||
new_data = tensor.data;
|
new_data = tensor.data;
|
||||||
new_size = tensor.size;
|
new_size = tensor.size;
|
||||||
printf("size = %8.3f MB\n", tensor.size/1024.0/1024.0);
|
LLAMA_LOG_INFO("size = %8.3f MB\n", tensor.size/1024.0/1024.0);
|
||||||
} else {
|
} else {
|
||||||
new_type = quantized_type;
|
new_type = quantized_type;
|
||||||
#ifdef GGML_USE_K_QUANTS
|
#ifdef GGML_USE_K_QUANTS
|
||||||
|
@ -3064,17 +3079,17 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s
|
||||||
int nx = tensor.ne.at(0);
|
int nx = tensor.ne.at(0);
|
||||||
int ny = tensor.ne.at(1);
|
int ny = tensor.ne.at(1);
|
||||||
if (nx % QK_K != 0 || ny % QK_K != 0) {
|
if (nx % QK_K != 0 || ny % QK_K != 0) {
|
||||||
fprintf(stderr, "\n\nTensor sizes %d x %d are not divisible by %d, required for k-quants.\n",nx,ny,QK_K);
|
LLAMA_LOG_INFO("\n\nTensor sizes %d x %d are not divisible by %d, required for k-quants.\n",nx,ny,QK_K);
|
||||||
convert_incompatible_tensor = true;
|
convert_incompatible_tensor = true;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (convert_incompatible_tensor) {
|
if (convert_incompatible_tensor) {
|
||||||
if (tensor.name == "output.weight") {
|
if (tensor.name == "output.weight") {
|
||||||
new_type = GGML_TYPE_F16; //fall back to F16 instead of just failing.
|
new_type = GGML_TYPE_F16; //fall back to F16 instead of just failing.
|
||||||
fprintf(stderr, "F16 will be used for this tensor instead.\n");
|
LLAMA_LOG_WARN("F16 will be used for this tensor instead.\n");
|
||||||
} else if (tensor.name == "tok_embeddings.weight") {
|
} else if (tensor.name == "tok_embeddings.weight") {
|
||||||
new_type = GGML_TYPE_Q4_0; //fall back to Q4_0 instead of just failing.
|
new_type = GGML_TYPE_Q4_0; //fall back to Q4_0 instead of just failing.
|
||||||
fprintf(stderr, "Q4_0 will be used for this tensor instead.\n");
|
LLAMA_LOG_WARN("Q4_0 will be used for this tensor instead.\n");
|
||||||
} else {
|
} else {
|
||||||
throw std::runtime_error("Unsupported tensor size encountered\n");
|
throw std::runtime_error("Unsupported tensor size encountered\n");
|
||||||
}
|
}
|
||||||
|
@ -3094,7 +3109,7 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s
|
||||||
f32_data = (float *) f32_conv_buf.addr;
|
f32_data = (float *) f32_conv_buf.addr;
|
||||||
}
|
}
|
||||||
|
|
||||||
printf("quantizing to %s .. ", ggml_type_name(new_type));
|
LLAMA_LOG_INFO("quantizing to %s .. ", ggml_type_name(new_type));
|
||||||
fflush(stdout);
|
fflush(stdout);
|
||||||
|
|
||||||
work.resize(nelements * 4); // upper bound on size
|
work.resize(nelements * 4); // upper bound on size
|
||||||
|
@ -3144,7 +3159,7 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
printf("size = %8.2f MB -> %8.2f MB | hist: ", tensor.size/1024.0/1024.0, new_size/1024.0/1024.0);
|
LLAMA_LOG_INFO("size = %8.2f MB -> %8.2f MB | hist: ", tensor.size/1024.0/1024.0, new_size/1024.0/1024.0);
|
||||||
int64_t tot_count = 0;
|
int64_t tot_count = 0;
|
||||||
for (size_t i = 0; i < hist_cur.size(); i++) {
|
for (size_t i = 0; i < hist_cur.size(); i++) {
|
||||||
hist_all[i] += hist_cur[i];
|
hist_all[i] += hist_cur[i];
|
||||||
|
@ -3153,18 +3168,18 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s
|
||||||
|
|
||||||
if (tot_count > 0) {
|
if (tot_count > 0) {
|
||||||
for (size_t i = 0; i < hist_cur.size(); i++) {
|
for (size_t i = 0; i < hist_cur.size(); i++) {
|
||||||
printf("%5.3f ", hist_cur[i] / float(nelements));
|
LLAMA_LOG_INFO("%5.3f ", hist_cur[i] / float(nelements));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
printf("\n");
|
LLAMA_LOG_INFO("\n");
|
||||||
}
|
}
|
||||||
total_size_org += tensor.size;
|
total_size_org += tensor.size;
|
||||||
total_size_new += new_size;
|
total_size_new += new_size;
|
||||||
file_saver.write_tensor(tensor, new_type, new_data, new_size);
|
file_saver.write_tensor(tensor, new_type, new_data, new_size);
|
||||||
}
|
}
|
||||||
|
|
||||||
printf("%s: model size = %8.2f MB\n", __func__, total_size_org/1024.0/1024.0);
|
LLAMA_LOG_INFO("%s: model size = %8.2f MB\n", __func__, total_size_org/1024.0/1024.0);
|
||||||
printf("%s: quant size = %8.2f MB\n", __func__, total_size_new/1024.0/1024.0);
|
LLAMA_LOG_INFO("%s: quant size = %8.2f MB\n", __func__, total_size_new/1024.0/1024.0);
|
||||||
|
|
||||||
{
|
{
|
||||||
int64_t sum_all = 0;
|
int64_t sum_all = 0;
|
||||||
|
@ -3173,11 +3188,11 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s
|
||||||
}
|
}
|
||||||
|
|
||||||
if (sum_all > 0) {
|
if (sum_all > 0) {
|
||||||
printf("%s: hist: ", __func__);
|
LLAMA_LOG_INFO("%s: hist: ", __func__);
|
||||||
for (size_t i = 0; i < hist_all.size(); i++) {
|
for (size_t i = 0; i < hist_all.size(); i++) {
|
||||||
printf("%5.3f ", hist_all[i] / float(sum_all));
|
LLAMA_LOG_INFO("%5.3f ", hist_all[i] / float(sum_all));
|
||||||
}
|
}
|
||||||
printf("\n");
|
LLAMA_LOG_INFO("\n");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -3201,8 +3216,8 @@ struct llama_model * llama_load_model_from_file(
|
||||||
params.main_gpu, params.tensor_split, params.mul_mat_q, params.rope_freq_base, params.rope_freq_scale,params.low_vram,
|
params.main_gpu, params.tensor_split, params.mul_mat_q, params.rope_freq_base, params.rope_freq_scale,params.low_vram,
|
||||||
memory_type, params.use_mmap, params.use_mlock, params.vocab_only, params.progress_callback,
|
memory_type, params.use_mmap, params.use_mlock, params.vocab_only, params.progress_callback,
|
||||||
params.progress_callback_user_data)) {
|
params.progress_callback_user_data)) {
|
||||||
|
LLAMA_LOG_ERROR("%s: failed to load model\n", __func__);
|
||||||
delete model;
|
delete model;
|
||||||
fprintf(stderr, "%s: failed to load model\n", __func__);
|
|
||||||
return nullptr;
|
return nullptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3235,10 +3250,9 @@ struct llama_context * llama_new_context_with_model(
|
||||||
unsigned percentage = (unsigned) (100 * progress);
|
unsigned percentage = (unsigned) (100 * progress);
|
||||||
while (percentage > *cur_percentage_p) {
|
while (percentage > *cur_percentage_p) {
|
||||||
*cur_percentage_p = percentage;
|
*cur_percentage_p = percentage;
|
||||||
fprintf(stderr, ".");
|
LLAMA_LOG_INFO(".");
|
||||||
fflush(stderr);
|
|
||||||
if (percentage >= 100) {
|
if (percentage >= 100) {
|
||||||
fprintf(stderr, "\n");
|
LLAMA_LOG_INFO("\n");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
@ -3252,14 +3266,14 @@ struct llama_context * llama_new_context_with_model(
|
||||||
// reserve memory for context buffers
|
// reserve memory for context buffers
|
||||||
if (!params.vocab_only) {
|
if (!params.vocab_only) {
|
||||||
if (!kv_cache_init(ctx->model.hparams, ctx->kv_self, memory_type, ctx->model.hparams.n_ctx, params.n_gpu_layers)) {
|
if (!kv_cache_init(ctx->model.hparams, ctx->kv_self, memory_type, ctx->model.hparams.n_ctx, params.n_gpu_layers)) {
|
||||||
fprintf(stderr, "%s: kv_cache_init() failed for self-attention cache\n", __func__);
|
LLAMA_LOG_ERROR("%s: kv_cache_init() failed for self-attention cache\n", __func__);
|
||||||
llama_free(ctx);
|
llama_free(ctx);
|
||||||
return nullptr;
|
return nullptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
{
|
{
|
||||||
const size_t memory_size = ggml_nbytes(ctx->kv_self.k) + ggml_nbytes(ctx->kv_self.v);
|
const size_t memory_size = ggml_nbytes(ctx->kv_self.k) + ggml_nbytes(ctx->kv_self.v);
|
||||||
fprintf(stderr, "%s: kv self size = %7.2f MB\n", __func__, memory_size / 1024.0 / 1024.0);
|
LLAMA_LOG_INFO("%s: kv self size = %7.2f MB\n", __func__, memory_size / 1024.0 / 1024.0);
|
||||||
}
|
}
|
||||||
|
|
||||||
const auto & hparams = ctx->model.hparams;
|
const auto & hparams = ctx->model.hparams;
|
||||||
|
@ -3293,14 +3307,14 @@ struct llama_context * llama_new_context_with_model(
|
||||||
// measure memory requirements for the graph
|
// measure memory requirements for the graph
|
||||||
size_t alloc_size = ggml_allocr_alloc_graph(ctx->alloc, gf) + tensor_alignment;
|
size_t alloc_size = ggml_allocr_alloc_graph(ctx->alloc, gf) + tensor_alignment;
|
||||||
|
|
||||||
fprintf(stderr, "%s: compute buffer total size = %7.2f MB\n", __func__, (ctx->buf_compute.size + alloc_size) / 1024.0 / 1024.0);
|
LLAMA_LOG_INFO("%s: compute buffer total size = %7.2f MB\n", __func__, (ctx->buf_compute.size + alloc_size) / 1024.0 / 1024.0);
|
||||||
|
|
||||||
// debug - for comparison with scratch buffer
|
// debug - for comparison with scratch buffer
|
||||||
//size_t prev_req =
|
//size_t prev_req =
|
||||||
// MEM_REQ_SCRATCH0(hparams.n_ctx).at(ctx->model.type) +
|
// MEM_REQ_SCRATCH0(hparams.n_ctx).at(ctx->model.type) +
|
||||||
// MEM_REQ_SCRATCH1().at(ctx->model.type) +
|
// MEM_REQ_SCRATCH1().at(ctx->model.type) +
|
||||||
// MEM_REQ_EVAL().at(ctx->model.type);
|
// MEM_REQ_EVAL().at(ctx->model.type);
|
||||||
//fprintf(stderr, "%s: (debug) equivalent with scratch buffer = %7.2f MB\n", __func__, prev_req / 1024.0 / 1024.0);
|
//LLAMA_LOG_INFO("%s: (debug) equivalent with scratch buffer = %7.2f MB\n", __func__, prev_req / 1024.0 / 1024.0);
|
||||||
|
|
||||||
// recreate allocator with exact memory requirements
|
// recreate allocator with exact memory requirements
|
||||||
ggml_allocr_free(ctx->alloc);
|
ggml_allocr_free(ctx->alloc);
|
||||||
|
@ -3336,13 +3350,13 @@ struct llama_context * llama_new_context_with_model(
|
||||||
|
|
||||||
const size_t max_size = ggml_get_max_tensor_size(ctx->model.ctx);
|
const size_t max_size = ggml_get_max_tensor_size(ctx->model.ctx);
|
||||||
|
|
||||||
fprintf(stderr, "%s: max tensor size = %8.2f MB\n", __func__, max_size/1024.0/1024.0);
|
LLAMA_LOG_INFO("%s: max tensor size = %8.2f MB\n", __func__, max_size/1024.0/1024.0);
|
||||||
|
|
||||||
#define LLAMA_METAL_CHECK_BUF(result) \
|
#define LLAMA_METAL_CHECK_BUF(result) \
|
||||||
if (!(result)) { \
|
if (!(result)) { \
|
||||||
fprintf(stderr, "%s: failed to add buffer\n", __func__); \
|
LLAMA_LOG_ERROR("%s: failed to add buffer\n", __func__); \
|
||||||
llama_free(ctx); \
|
llama_free(ctx); \
|
||||||
return NULL; \
|
return NULL; \
|
||||||
}
|
}
|
||||||
|
|
||||||
LLAMA_METAL_CHECK_BUF(ggml_metal_add_buffer(ctx->ctx_metal, "data", data_ptr, data_size, max_size));
|
LLAMA_METAL_CHECK_BUF(ggml_metal_add_buffer(ctx->ctx_metal, "data", data_ptr, data_size, max_size));
|
||||||
|
@ -3396,19 +3410,19 @@ int llama_model_quantize(
|
||||||
llama_model_quantize_internal(fname_inp, fname_out, params);
|
llama_model_quantize_internal(fname_inp, fname_out, params);
|
||||||
return 0;
|
return 0;
|
||||||
} catch (const std::exception & err) {
|
} catch (const std::exception & err) {
|
||||||
fprintf(stderr, "%s: failed to quantize: %s\n", __func__, err.what());
|
LLAMA_LOG_ERROR("%s: failed to quantize: %s\n", __func__, err.what());
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
int llama_apply_lora_from_file_internal(const struct llama_model & model, const char * path_lora, const char * path_base_model, int n_threads) {
|
int llama_apply_lora_from_file_internal(const struct llama_model & model, const char * path_lora, const char * path_base_model, int n_threads) {
|
||||||
fprintf(stderr, "%s: applying lora adapter from '%s' - please wait ...\n", __func__, path_lora);
|
LLAMA_LOG_INFO("%s: applying lora adapter from '%s' - please wait ...\n", __func__, path_lora);
|
||||||
|
|
||||||
const int64_t t_start_lora_us = ggml_time_us();
|
const int64_t t_start_lora_us = ggml_time_us();
|
||||||
|
|
||||||
auto fin = std::ifstream(path_lora, std::ios::binary);
|
auto fin = std::ifstream(path_lora, std::ios::binary);
|
||||||
if (!fin) {
|
if (!fin) {
|
||||||
fprintf(stderr, "%s: failed to open '%s'\n", __func__, path_lora);
|
LLAMA_LOG_ERROR("%s: failed to open '%s'\n", __func__, path_lora);
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3417,14 +3431,14 @@ int llama_apply_lora_from_file_internal(const struct llama_model & model, const
|
||||||
uint32_t magic;
|
uint32_t magic;
|
||||||
fin.read((char *) &magic, sizeof(magic));
|
fin.read((char *) &magic, sizeof(magic));
|
||||||
if (magic != LLAMA_FILE_MAGIC_GGLA) {
|
if (magic != LLAMA_FILE_MAGIC_GGLA) {
|
||||||
fprintf(stderr, "%s: bad file magic\n", __func__);
|
LLAMA_LOG_ERROR("%s: bad file magic\n", __func__);
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
uint32_t format_version;
|
uint32_t format_version;
|
||||||
fin.read((char *) &format_version, sizeof(format_version));
|
fin.read((char *) &format_version, sizeof(format_version));
|
||||||
|
|
||||||
if (format_version != 1) {
|
if (format_version != 1) {
|
||||||
fprintf(stderr, "%s: unsupported file version\n", __func__ );
|
LLAMA_LOG_ERROR("%s: unsupported file version\n", __func__ );
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -3435,7 +3449,7 @@ int llama_apply_lora_from_file_internal(const struct llama_model & model, const
|
||||||
fin.read((char *) &lora_alpha, sizeof(lora_alpha));
|
fin.read((char *) &lora_alpha, sizeof(lora_alpha));
|
||||||
float scaling = (float)lora_alpha / (float)lora_r;
|
float scaling = (float)lora_alpha / (float)lora_r;
|
||||||
|
|
||||||
fprintf(stderr, "%s: r = %d, alpha = %d, scaling = %.2f\n", __func__, lora_r, lora_alpha, scaling);
|
LLAMA_LOG_INFO("%s: r = %d, alpha = %d, scaling = %.2f\n", __func__, lora_r, lora_alpha, scaling);
|
||||||
|
|
||||||
|
|
||||||
// create a temporary ggml context to store the lora tensors
|
// create a temporary ggml context to store the lora tensors
|
||||||
|
@ -3461,7 +3475,7 @@ int llama_apply_lora_from_file_internal(const struct llama_model & model, const
|
||||||
ggml_context * base_ctx = NULL;
|
ggml_context * base_ctx = NULL;
|
||||||
llama_buffer base_buf;
|
llama_buffer base_buf;
|
||||||
if (path_base_model) {
|
if (path_base_model) {
|
||||||
fprintf(stderr, "%s: loading base model from '%s'\n", __func__, path_base_model);
|
LLAMA_LOG_INFO("%s: loading base model from '%s'\n", __func__, path_base_model);
|
||||||
model_loader.reset(new llama_model_loader(path_base_model, /*use_mmap*/ true));
|
model_loader.reset(new llama_model_loader(path_base_model, /*use_mmap*/ true));
|
||||||
|
|
||||||
size_t ctx_size;
|
size_t ctx_size;
|
||||||
|
@ -3518,17 +3532,17 @@ int llama_apply_lora_from_file_internal(const struct llama_model & model, const
|
||||||
const std::string lora_suffix = ".lora";
|
const std::string lora_suffix = ".lora";
|
||||||
size_t pos = name.rfind(lora_suffix);
|
size_t pos = name.rfind(lora_suffix);
|
||||||
if (pos == std::string::npos) {
|
if (pos == std::string::npos) {
|
||||||
fprintf(stderr, "%s: error: '%s' is not a lora tensor\n", __func__, name.c_str());
|
LLAMA_LOG_ERROR("%s: error: '%s' is not a lora tensor\n", __func__, name.c_str());
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
std::string lora_type = name.substr(pos + lora_suffix.length());
|
std::string lora_type = name.substr(pos + lora_suffix.length());
|
||||||
std::string base_name = name;
|
std::string base_name = name;
|
||||||
base_name.erase(pos);
|
base_name.erase(pos);
|
||||||
// fprintf(stderr, "%s: %s => %s (lora type %s) ", __func__, name.c_str(),base_name.c_str(), lora_type.c_str());
|
// LLAMA_LOG_INFO("%s: %s => %s (lora type %s) \n", __func__, name.c_str(),base_name.c_str(), lora_type.c_str());
|
||||||
|
|
||||||
if (model_tensors.find(base_name) == model_tensors.end()) {
|
if (model_tensors.find(base_name) == model_tensors.end()) {
|
||||||
fprintf(stderr, "%s: unknown tensor '%s' in lora adapter\n", __func__, name.data());
|
LLAMA_LOG_ERROR("%s: unknown tensor '%s' in lora adapter\n", __func__, name.data());
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3539,7 +3553,7 @@ int llama_apply_lora_from_file_internal(const struct llama_model & model, const
|
||||||
case 1: wtype = GGML_TYPE_F16; break;
|
case 1: wtype = GGML_TYPE_F16; break;
|
||||||
default:
|
default:
|
||||||
{
|
{
|
||||||
fprintf(stderr, "%s: invalid tensor data type '%d'\n",
|
LLAMA_LOG_ERROR("%s: invalid tensor data type '%d'\n",
|
||||||
__func__, ftype);
|
__func__, ftype);
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
@ -3549,7 +3563,7 @@ int llama_apply_lora_from_file_internal(const struct llama_model & model, const
|
||||||
lora_tensor = ggml_new_tensor_2d(lora_ctx, wtype, ne[0], ne[1]);
|
lora_tensor = ggml_new_tensor_2d(lora_ctx, wtype, ne[0], ne[1]);
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
fprintf(stderr, "%s: unsupported tensor dimension %d\n", __func__, n_dims);
|
LLAMA_LOG_ERROR("%s: unsupported tensor dimension %d\n", __func__, n_dims);
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
ggml_set_name(lora_tensor, "lora_tensor");
|
ggml_set_name(lora_tensor, "lora_tensor");
|
||||||
|
@ -3587,7 +3601,7 @@ int llama_apply_lora_from_file_internal(const struct llama_model & model, const
|
||||||
if (model_loader) {
|
if (model_loader) {
|
||||||
// load from base model
|
// load from base model
|
||||||
if (model_loader->tensors_map.name_to_idx.find(base_name) == model_loader->tensors_map.name_to_idx.end()) {
|
if (model_loader->tensors_map.name_to_idx.find(base_name) == model_loader->tensors_map.name_to_idx.end()) {
|
||||||
fprintf(stderr, "%s: error: tensor '%s' not found in base model\n", __func__, base_name.c_str());
|
LLAMA_LOG_ERROR("%s: error: tensor '%s' not found in base model\n", __func__, base_name.c_str());
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
size_t idx = model_loader->tensors_map.name_to_idx[base_name];
|
size_t idx = model_loader->tensors_map.name_to_idx[base_name];
|
||||||
|
@ -3603,8 +3617,8 @@ int llama_apply_lora_from_file_internal(const struct llama_model & model, const
|
||||||
|
|
||||||
if (ggml_is_quantized(base_t->type)) {
|
if (ggml_is_quantized(base_t->type)) {
|
||||||
if (!warned) {
|
if (!warned) {
|
||||||
fprintf(stderr, "%s: warning: using a lora adapter with a quantized model may result in poor quality, "
|
LLAMA_LOG_WARN("%s: warning: using a lora adapter with a quantized model may result in poor quality, "
|
||||||
"use a f16 or f32 base model with --lora-base\n", __func__);
|
"use a f16 or f32 base model with --lora-base\n", __func__);
|
||||||
warned = true;
|
warned = true;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -3618,8 +3632,8 @@ int llama_apply_lora_from_file_internal(const struct llama_model & model, const
|
||||||
ggml_set_name(loraB, "loraB");
|
ggml_set_name(loraB, "loraB");
|
||||||
|
|
||||||
if (base_t->ne[0] != loraA->ne[1] || base_t->ne[1] != loraB->ne[1]) {
|
if (base_t->ne[0] != loraA->ne[1] || base_t->ne[1] != loraB->ne[1]) {
|
||||||
fprintf(stderr, "%s: incompatible tensor dimensions (%" PRId64 " and %" PRId64 ");"
|
LLAMA_LOG_ERROR("%s: incompatible tensor dimensions (%" PRId64 " and %" PRId64 ");"
|
||||||
" are you sure that this adapter is for this model?\n", __func__, base_t->ne[0], loraA->ne[1]);
|
" are you sure that this adapter is for this model?\n", __func__, base_t->ne[0], loraA->ne[1]);
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3664,7 +3678,7 @@ int llama_apply_lora_from_file_internal(const struct llama_model & model, const
|
||||||
|
|
||||||
n_tensors++;
|
n_tensors++;
|
||||||
if (n_tensors % 4 == 0) {
|
if (n_tensors % 4 == 0) {
|
||||||
fprintf(stderr, ".");
|
LLAMA_LOG_INFO(".");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -3676,7 +3690,7 @@ int llama_apply_lora_from_file_internal(const struct llama_model & model, const
|
||||||
}
|
}
|
||||||
|
|
||||||
const int64_t t_lora_us = ggml_time_us() - t_start_lora_us;
|
const int64_t t_lora_us = ggml_time_us() - t_start_lora_us;
|
||||||
fprintf(stderr, " done (%.2f ms)\n", t_lora_us / 1000.0);
|
LLAMA_LOG_INFO(" done (%.2f ms)\n", t_lora_us / 1000.0);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -3685,7 +3699,7 @@ int llama_apply_lora_from_file(struct llama_context * ctx, const char * path_lor
|
||||||
try {
|
try {
|
||||||
return llama_apply_lora_from_file_internal(ctx->model, path_lora, path_base_model, n_threads);
|
return llama_apply_lora_from_file_internal(ctx->model, path_lora, path_base_model, n_threads);
|
||||||
} catch (const std::exception & err) {
|
} catch (const std::exception & err) {
|
||||||
fprintf(stderr, "%s: failed to apply lora adapter: %s\n", __func__, err.what());
|
LLAMA_LOG_ERROR("%s: failed to apply lora adapter: %s\n", __func__, err.what());
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -3694,7 +3708,7 @@ int llama_model_apply_lora_from_file(const struct llama_model * model, const cha
|
||||||
try {
|
try {
|
||||||
return llama_apply_lora_from_file_internal(*model, path_lora, path_base_model, n_threads);
|
return llama_apply_lora_from_file_internal(*model, path_lora, path_base_model, n_threads);
|
||||||
} catch (const std::exception & err) {
|
} catch (const std::exception & err) {
|
||||||
fprintf(stderr, "%s: failed to apply lora adapter: %s\n", __func__, err.what());
|
LLAMA_LOG_ERROR("%s: failed to apply lora adapter: %s\n", __func__, err.what());
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -3976,7 +3990,7 @@ static bool llama_load_session_file_internal(struct llama_context * ctx, const c
|
||||||
const uint32_t version = file.read_u32();
|
const uint32_t version = file.read_u32();
|
||||||
|
|
||||||
if (magic != LLAMA_SESSION_MAGIC || version != LLAMA_SESSION_VERSION) {
|
if (magic != LLAMA_SESSION_MAGIC || version != LLAMA_SESSION_VERSION) {
|
||||||
fprintf(stderr, "%s : unknown (magic, version) for session file: %08x, %08x\n", __func__, magic, version);
|
LLAMA_LOG_ERROR("%s : unknown (magic, version) for session file: %08x, %08x\n", __func__, magic, version);
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3984,7 +3998,7 @@ static bool llama_load_session_file_internal(struct llama_context * ctx, const c
|
||||||
file.read_raw(&session_hparams, sizeof(llama_hparams));
|
file.read_raw(&session_hparams, sizeof(llama_hparams));
|
||||||
|
|
||||||
if (session_hparams != ctx->model.hparams) {
|
if (session_hparams != ctx->model.hparams) {
|
||||||
fprintf(stderr, "%s : model hparams didn't match from session file!\n", __func__);
|
LLAMA_LOG_INFO("%s : model hparams didn't match from session file!\n", __func__);
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -3994,7 +4008,7 @@ static bool llama_load_session_file_internal(struct llama_context * ctx, const c
|
||||||
const uint32_t n_token_count = file.read_u32();
|
const uint32_t n_token_count = file.read_u32();
|
||||||
|
|
||||||
if (n_token_count > n_token_capacity) {
|
if (n_token_count > n_token_capacity) {
|
||||||
fprintf(stderr, "%s : token count in session file exceeded capacity! %u > %zu\n", __func__, n_token_count, n_token_capacity);
|
LLAMA_LOG_ERROR("%s : token count in session file exceeded capacity! %u > %zu\n", __func__, n_token_count, n_token_capacity);
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -4008,7 +4022,7 @@ static bool llama_load_session_file_internal(struct llama_context * ctx, const c
|
||||||
const size_t n_state_size_max = llama_get_state_size(ctx);
|
const size_t n_state_size_max = llama_get_state_size(ctx);
|
||||||
|
|
||||||
if (n_state_size_cur > n_state_size_max) {
|
if (n_state_size_cur > n_state_size_max) {
|
||||||
fprintf(stderr, "%s : the state size in session file is too big! max %zu, got %zu\n", __func__, n_state_size_max, n_state_size_cur);
|
LLAMA_LOG_ERROR("%s : the state size in session file is too big! max %zu, got %zu\n", __func__, n_state_size_max, n_state_size_cur);
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -4025,7 +4039,7 @@ bool llama_load_session_file(struct llama_context * ctx, const char * path_sessi
|
||||||
try {
|
try {
|
||||||
return llama_load_session_file_internal(ctx, path_session, tokens_out, n_token_capacity, n_token_count_out);
|
return llama_load_session_file_internal(ctx, path_session, tokens_out, n_token_capacity, n_token_count_out);
|
||||||
} catch (const std::exception & err) {
|
} catch (const std::exception & err) {
|
||||||
fprintf(stderr, "error loading session file: %s\n", err.what());
|
LLAMA_LOG_ERROR("error loading session file: %s\n", err.what());
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -4056,7 +4070,7 @@ int llama_eval(
|
||||||
int n_past,
|
int n_past,
|
||||||
int n_threads) {
|
int n_threads) {
|
||||||
if (!llama_eval_internal(*ctx, tokens, nullptr, n_tokens, n_past, n_threads, nullptr)) {
|
if (!llama_eval_internal(*ctx, tokens, nullptr, n_tokens, n_past, n_threads, nullptr)) {
|
||||||
fprintf(stderr, "%s: failed to eval\n", __func__);
|
LLAMA_LOG_ERROR("%s: failed to eval\n", __func__);
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -4078,7 +4092,7 @@ int llama_eval_embd(
|
||||||
int n_past,
|
int n_past,
|
||||||
int n_threads) {
|
int n_threads) {
|
||||||
if (!llama_eval_internal(*ctx, nullptr, embd, n_tokens, n_past, n_threads, nullptr)) {
|
if (!llama_eval_internal(*ctx, nullptr, embd, n_tokens, n_past, n_threads, nullptr)) {
|
||||||
fprintf(stderr, "%s: failed to eval\n", __func__);
|
LLAMA_LOG_ERROR("%s: failed to eval\n", __func__);
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -4099,7 +4113,7 @@ int llama_eval_export(struct llama_context * ctx, const char * fname) {
|
||||||
const std::vector<llama_token> tmp(n_batch, llama_token_bos());
|
const std::vector<llama_token> tmp(n_batch, llama_token_bos());
|
||||||
|
|
||||||
if (!llama_eval_internal(*ctx, tmp.data(), nullptr, tmp.size(), n_ctx, 1, fname)) {
|
if (!llama_eval_internal(*ctx, tmp.data(), nullptr, tmp.size(), n_ctx, 1, fname)) {
|
||||||
fprintf(stderr, "%s: failed to eval\n", __func__);
|
LLAMA_LOG_ERROR("%s: failed to eval\n", __func__);
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -4115,7 +4129,7 @@ int llama_tokenize_with_model(
|
||||||
auto res = llama_tokenize(model->vocab, text, add_bos);
|
auto res = llama_tokenize(model->vocab, text, add_bos);
|
||||||
|
|
||||||
if (n_max_tokens < (int) res.size()) {
|
if (n_max_tokens < (int) res.size()) {
|
||||||
fprintf(stderr, "%s: too many tokens\n", __func__);
|
LLAMA_LOG_ERROR("%s: too many tokens\n", __func__);
|
||||||
return -((int) res.size());
|
return -((int) res.size());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -4232,15 +4246,15 @@ struct llama_timings llama_get_timings(struct llama_context * ctx) {
|
||||||
void llama_print_timings(struct llama_context * ctx) {
|
void llama_print_timings(struct llama_context * ctx) {
|
||||||
const llama_timings timings = llama_get_timings(ctx);
|
const llama_timings timings = llama_get_timings(ctx);
|
||||||
|
|
||||||
fprintf(stderr, "\n");
|
LLAMA_LOG_INFO("\n");
|
||||||
fprintf(stderr, "%s: load time = %8.2f ms\n", __func__, timings.t_load_ms);
|
LLAMA_LOG_INFO("%s: load time = %8.2f ms\n", __func__, timings.t_load_ms);
|
||||||
fprintf(stderr, "%s: sample time = %8.2f ms / %5d runs (%8.2f ms per token, %8.2f tokens per second)\n",
|
LLAMA_LOG_INFO("%s: sample time = %8.2f ms / %5d runs (%8.2f ms per token, %8.2f tokens per second)\n",
|
||||||
__func__, timings.t_sample_ms, timings.n_sample, timings.t_sample_ms / timings.n_sample, 1e3 / timings.t_sample_ms * timings.n_sample);
|
__func__, timings.t_sample_ms, timings.n_sample, timings.t_sample_ms / timings.n_sample, 1e3 / timings.t_sample_ms * timings.n_sample);
|
||||||
fprintf(stderr, "%s: prompt eval time = %8.2f ms / %5d tokens (%8.2f ms per token, %8.2f tokens per second)\n",
|
LLAMA_LOG_INFO("%s: prompt eval time = %8.2f ms / %5d tokens (%8.2f ms per token, %8.2f tokens per second)\n",
|
||||||
__func__, timings.t_p_eval_ms, timings.n_p_eval, timings.t_p_eval_ms / timings.n_p_eval, 1e3 / timings.t_p_eval_ms * timings.n_p_eval);
|
__func__, timings.t_p_eval_ms, timings.n_p_eval, timings.t_p_eval_ms / timings.n_p_eval, 1e3 / timings.t_p_eval_ms * timings.n_p_eval);
|
||||||
fprintf(stderr, "%s: eval time = %8.2f ms / %5d runs (%8.2f ms per token, %8.2f tokens per second)\n",
|
LLAMA_LOG_INFO("%s: eval time = %8.2f ms / %5d runs (%8.2f ms per token, %8.2f tokens per second)\n",
|
||||||
__func__, timings.t_eval_ms, timings.n_eval, timings.t_eval_ms / timings.n_eval, 1e3 / timings.t_eval_ms * timings.n_eval);
|
__func__, timings.t_eval_ms, timings.n_eval, timings.t_eval_ms / timings.n_eval, 1e3 / timings.t_eval_ms * timings.n_eval);
|
||||||
fprintf(stderr, "%s: total time = %8.2f ms\n", __func__, (timings.t_end_ms - timings.t_start_ms));
|
LLAMA_LOG_INFO("%s: total time = %8.2f ms\n", __func__, (timings.t_end_ms - timings.t_start_ms));
|
||||||
}
|
}
|
||||||
|
|
||||||
void llama_reset_timings(struct llama_context * ctx) {
|
void llama_reset_timings(struct llama_context * ctx) {
|
||||||
|
@ -4276,3 +4290,44 @@ const char * llama_print_system_info(void) {
|
||||||
const std::vector<std::pair<std::string, struct ggml_tensor *>>& llama_internal_get_tensor_map(struct llama_context * ctx) {
|
const std::vector<std::pair<std::string, struct ggml_tensor *>>& llama_internal_get_tensor_map(struct llama_context * ctx) {
|
||||||
return ctx->model.tensors_by_name;
|
return ctx->model.tensors_by_name;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void llama_log_set(llama_log_callback log_callback, void * user_data) {
|
||||||
|
g_state.log_callback = log_callback ? log_callback : llama_log_callback_default;
|
||||||
|
g_state.log_callback_user_data = user_data;
|
||||||
|
}
|
||||||
|
|
||||||
|
#if defined(_MSC_VER) && !defined(vsnprintf)
|
||||||
|
#define vsnprintf _vsnprintf
|
||||||
|
#endif
|
||||||
|
|
||||||
|
static void llama_log_internal_v(llama_log_level level, const char * format, va_list args) {
|
||||||
|
va_list args_copy;
|
||||||
|
va_copy(args_copy, args);
|
||||||
|
char buffer[128];
|
||||||
|
int len = vsnprintf(buffer, 128, format, args);
|
||||||
|
if (len < 128) {
|
||||||
|
g_state.log_callback(level, buffer, g_state.log_callback_user_data);
|
||||||
|
} else {
|
||||||
|
char* buffer2 = new char[len+1];
|
||||||
|
vsnprintf(buffer2, len+1, format, args_copy);
|
||||||
|
buffer2[len] = 0;
|
||||||
|
g_state.log_callback(level, buffer2, g_state.log_callback_user_data);
|
||||||
|
delete[] buffer2;
|
||||||
|
}
|
||||||
|
va_end(args_copy);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void llama_log_internal(llama_log_level level, const char * format, ...) {
|
||||||
|
va_list args;
|
||||||
|
va_start(args, format);
|
||||||
|
llama_log_internal_v(level, format, args);
|
||||||
|
va_end(args);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void llama_log_callback_default(llama_log_level level, const char * text, void * user_data) {
|
||||||
|
(void) level;
|
||||||
|
(void) user_data;
|
||||||
|
fputs(text, stderr);
|
||||||
|
fflush(stderr);
|
||||||
|
}
|
||||||
|
|
19
llama.h
19
llama.h
|
@ -86,7 +86,20 @@ extern "C" {
|
||||||
|
|
||||||
typedef void (*llama_progress_callback)(float progress, void *ctx);
|
typedef void (*llama_progress_callback)(float progress, void *ctx);
|
||||||
|
|
||||||
struct llama_context_params {
|
enum llama_log_level {
|
||||||
|
LLAMA_LOG_LEVEL_ERROR = 2,
|
||||||
|
LLAMA_LOG_LEVEL_WARN = 3,
|
||||||
|
LLAMA_LOG_LEVEL_INFO = 4
|
||||||
|
};
|
||||||
|
|
||||||
|
// Signature for logging events
|
||||||
|
// Note that text includes the new line character at the end for most events.
|
||||||
|
// If your logging mechanism cannot handle that, check if the last character is '\n' and strip it
|
||||||
|
// if it exists.
|
||||||
|
// It might not exist for progress report where '.' is output repeatedly.
|
||||||
|
typedef void (*llama_log_callback)(llama_log_level level, const char * text, void * user_data);
|
||||||
|
|
||||||
|
struct llama_context_params {
|
||||||
uint32_t seed; // RNG seed, -1 for random
|
uint32_t seed; // RNG seed, -1 for random
|
||||||
int32_t n_ctx; // text context
|
int32_t n_ctx; // text context
|
||||||
int32_t n_batch; // prompt processing batch size
|
int32_t n_batch; // prompt processing batch size
|
||||||
|
@ -195,6 +208,10 @@ extern "C" {
|
||||||
int32_t n_eval;
|
int32_t n_eval;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
// Set callback for all future logging events.
|
||||||
|
// If this is not called, or NULL is supplied, everything is output on stderr.
|
||||||
|
LLAMA_API void llama_log_set(llama_log_callback log_callback, void * user_data);
|
||||||
|
|
||||||
LLAMA_API int llama_max_devices();
|
LLAMA_API int llama_max_devices();
|
||||||
|
|
||||||
LLAMA_API struct llama_context_params llama_context_default_params();
|
LLAMA_API struct llama_context_params llama_context_default_params();
|
||||||
|
|
Loading…
Reference in a new issue