mirror of
https://git.adityakumar.xyz/llama.cpp.git
synced 2025-02-22 15:40:02 +00:00

* Major refactoring - introduce C-style API * Clean up * Add <cassert> * Add <iterator> * Add <algorithm> .... * Fix timing reporting and accumulation * Measure eval time only for single-token calls * Change llama_tokenize return meaning
4 lines
268 B
CMake
4 lines
268 B
CMake
set(TEST_TARGET test-tokenizer-0)
|
|
add_executable(${TEST_TARGET} ${TEST_TARGET}.cpp)
|
|
target_link_libraries(${TEST_TARGET} PRIVATE llama ggml utils)
|
|
add_test(NAME ${TEST_TARGET} COMMAND $<TARGET_FILE:${TEST_TARGET}> ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab.bin)
|