From 294f424554c1599784ac9962462fc39ace92d8a5 Mon Sep 17 00:00:00 2001 From: Rinne Date: Wed, 19 Jul 2023 15:06:40 +0800 Subject: [PATCH] llama : extend API to get max devices at runtime (#2253) --- llama.cpp | 4 ++++ llama.h | 2 ++ 2 files changed, 6 insertions(+) diff --git a/llama.cpp b/llama.cpp index fa3b7c0..3319b70 100644 --- a/llama.cpp +++ b/llama.cpp @@ -875,6 +875,10 @@ struct llama_model_quantize_params llama_model_quantize_default_params() { return result; } +int llama_max_devices() { + return LLAMA_MAX_DEVICES; +} + bool llama_mmap_supported() { return llama_mmap::SUPPORTED; } diff --git a/llama.h b/llama.h index e744584..b676a38 100644 --- a/llama.h +++ b/llama.h @@ -153,6 +153,8 @@ extern "C" { int32_t n_eval; }; + LLAMA_API int llama_max_devices(); + LLAMA_API struct llama_context_params llama_context_default_params(); LLAMA_API struct llama_model_quantize_params llama_model_quantize_default_params();