mirror of
https://git.adityakumar.xyz/llama.cpp.git
synced 2024-11-15 09:29:44 +00:00
a316a425d0
- main -> examples - utils -> examples (renamed to "common") - quantize -> examples - separate tools for "perplexity" and "embedding" Hope I didn't break something !
60 lines
1.5 KiB
C++
60 lines
1.5 KiB
C++
#include "ggml.h"
|
|
#include "llama.h"
|
|
|
|
#include <cstdio>
|
|
#include <string>
|
|
|
|
const int QK = 32;
|
|
|
|
// usage:
|
|
// ./llama-quantize models/llama/ggml-model.bin models/llama/ggml-model-quant.bin type
|
|
//
|
|
int main(int argc, char ** argv) {
|
|
ggml_time_init();
|
|
|
|
if (argc != 4) {
|
|
fprintf(stderr, "usage: %s model-f32.bin model-quant.bin type\n", argv[0]);
|
|
fprintf(stderr, " type = 2 - q4_0\n");
|
|
fprintf(stderr, " type = 3 - q4_1\n");
|
|
return 1;
|
|
}
|
|
|
|
// needed to initialize f16 tables
|
|
{
|
|
struct ggml_init_params params = { 0, NULL };
|
|
struct ggml_context * ctx = ggml_init(params);
|
|
ggml_free(ctx);
|
|
}
|
|
|
|
const std::string fname_inp = argv[1];
|
|
const std::string fname_out = argv[2];
|
|
|
|
const int itype = atoi(argv[3]);
|
|
|
|
const int64_t t_main_start_us = ggml_time_us();
|
|
|
|
int64_t t_quantize_us = 0;
|
|
|
|
// load the model
|
|
{
|
|
const int64_t t_start_us = ggml_time_us();
|
|
|
|
if (llama_model_quantize(fname_inp.c_str(), fname_out.c_str(), itype, QK)) {
|
|
fprintf(stderr, "%s: failed to quantize model from '%s'\n", __func__, fname_inp.c_str());
|
|
return 1;
|
|
}
|
|
|
|
t_quantize_us = ggml_time_us() - t_start_us;
|
|
}
|
|
|
|
// report timing
|
|
{
|
|
const int64_t t_main_end_us = ggml_time_us();
|
|
|
|
printf("\n");
|
|
printf("%s: quantize time = %8.2f ms\n", __func__, t_quantize_us/1000.0f);
|
|
printf("%s: total time = %8.2f ms\n", __func__, (t_main_end_us - t_main_start_us)/1000.0f);
|
|
}
|
|
|
|
return 0;
|
|
}
|