From 2347e45e7bdb09c9a7d74b2c0bc86c2b65f0c343 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Tue, 13 Jun 2023 20:20:07 +0300 Subject: [PATCH] llama : do a warm-up eval at start for better timings (#1824) --- examples/main/main.cpp | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/examples/main/main.cpp b/examples/main/main.cpp index 66d5631..efa913e 100644 --- a/examples/main/main.cpp +++ b/examples/main/main.cpp @@ -331,6 +331,13 @@ int main(int argc, char ** argv) { std::vector embd; + // do one empty run to warm up the model + { + const std::vector tmp = { llama_token_bos(), }; + llama_eval(ctx, tmp.data(), tmp.size(), 0, params.n_threads); + llama_reset_timings(ctx); + } + while ((n_remain != 0 && !is_antiprompt) || params.interactive) { // predict if (embd.size() > 0) {