mirror of
https://git.adityakumar.xyz/llama.cpp.git
synced 2024-11-14 00:59:43 +00:00
5f939498d5
* Unit test for quantization functions Use the ggml_internal_get_quantize_fn function to loop through all quantization formats and run a sanity check on the result. Also add a microbenchmark that times these functions directly without running the rest of the GGML graph. * test-quantize-fns: CI fixes Fix issues uncovered in CI - need to use sizes divisible by 32*8 for loop unrolling - use intrinsic header that should work on Mac * test-quantize: remove Per PR comment, subsumed by test-quantize-fns * test-quantize: fix for q8_0 intermediates
11 lines
498 B
CMake
11 lines
498 B
CMake
function(llama_add_test source)
|
|
get_filename_component(TEST_TARGET ${source} NAME_WE)
|
|
add_executable(${TEST_TARGET} ${source})
|
|
target_link_libraries(${TEST_TARGET} PRIVATE llama)
|
|
add_test(NAME ${TEST_TARGET} COMMAND $<TARGET_FILE:${TEST_TARGET}> ${ARGN})
|
|
endfunction()
|
|
|
|
# llama_add_test(test-double-float.c) # SLOW
|
|
llama_add_test(test-quantize-fns.cpp)
|
|
llama_add_test(test-quantize-perf.cpp)
|
|
llama_add_test(test-tokenizer-0.cpp ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab.bin)
|