mirror of
https://git.adityakumar.xyz/llama.cpp.git
synced 2024-11-09 23:29:44 +00:00
tests : Fix compilation warnings (Linux/GCC) (#2451)
* fix hellaswag print format, cast away warning in test-double-float * c++11 cannot use designated initializers * add static to test-grad0.c internal functions * use memcpy in test-double-float.c * port c tests to c++ * use initializer list for ggml_init_params
This commit is contained in:
parent
a312193e18
commit
81844fbcfd
7 changed files with 40 additions and 37 deletions
6
Makefile
6
Makefile
|
@ -411,13 +411,13 @@ benchmark-matmult: examples/benchmark/benchmark-matmult.cpp build-info.h ggml.o
|
||||||
vdot: pocs/vdot/vdot.cpp ggml.o $(OBJS)
|
vdot: pocs/vdot/vdot.cpp ggml.o $(OBJS)
|
||||||
$(CXX) $(CXXFLAGS) $^ -o $@ $(LDFLAGS)
|
$(CXX) $(CXXFLAGS) $^ -o $@ $(LDFLAGS)
|
||||||
|
|
||||||
tests/test-double-float: tests/test-double-float.c build-info.h ggml.o llama.o common.o $(OBJS)
|
tests/test-double-float: tests/test-double-float.cpp build-info.h ggml.o llama.o common.o $(OBJS)
|
||||||
$(CXX) $(CXXFLAGS) $(filter-out %.txt,$^) -o $@ $(LDFLAGS)
|
$(CXX) $(CXXFLAGS) $(filter-out %.txt,$^) -o $@ $(LDFLAGS)
|
||||||
|
|
||||||
tests/test-grad0: tests/test-grad0.c build-info.h ggml.o llama.o common.o $(OBJS)
|
tests/test-grad0: tests/test-grad0.cpp build-info.h ggml.o llama.o common.o $(OBJS)
|
||||||
$(CXX) $(CXXFLAGS) $(filter-out %.txt,$^) -o $@ $(LDFLAGS)
|
$(CXX) $(CXXFLAGS) $(filter-out %.txt,$^) -o $@ $(LDFLAGS)
|
||||||
|
|
||||||
tests/test-opt: tests/test-opt.c build-info.h ggml.o llama.o common.o $(OBJS)
|
tests/test-opt: tests/test-opt.cpp build-info.h ggml.o llama.o common.o $(OBJS)
|
||||||
$(CXX) $(CXXFLAGS) $(filter-out %.txt,$^) -o $@ $(LDFLAGS)
|
$(CXX) $(CXXFLAGS) $(filter-out %.txt,$^) -o $@ $(LDFLAGS)
|
||||||
|
|
||||||
tests/test-quantize-fns: tests/test-quantize-fns.cpp build-info.h ggml.o llama.o common.o $(OBJS)
|
tests/test-quantize-fns: tests/test-quantize-fns.cpp build-info.h ggml.o llama.o common.o $(OBJS)
|
||||||
|
|
|
@ -572,7 +572,7 @@ void gpt_print_usage(int /*argc*/, char ** argv, const gpt_params & params) {
|
||||||
fprintf(stdout, " --temp N temperature (default: %.1f)\n", (double)params.temp);
|
fprintf(stdout, " --temp N temperature (default: %.1f)\n", (double)params.temp);
|
||||||
fprintf(stdout, " --perplexity compute perplexity over each ctx window of the prompt\n");
|
fprintf(stdout, " --perplexity compute perplexity over each ctx window of the prompt\n");
|
||||||
fprintf(stdout, " --hellaswag compute HellaSwag score over random tasks from datafile supplied with -f\n");
|
fprintf(stdout, " --hellaswag compute HellaSwag score over random tasks from datafile supplied with -f\n");
|
||||||
fprintf(stdout, " --hellaswag-tasks N number of tasks to use when computing the HellaSwag score (default: %d)\n", params.hellaswag_tasks);
|
fprintf(stdout, " --hellaswag-tasks N number of tasks to use when computing the HellaSwag score (default: %zu)\n", params.hellaswag_tasks);
|
||||||
fprintf(stdout, " --keep N number of tokens to keep from the initial prompt (default: %d, -1 = all)\n", params.n_keep);
|
fprintf(stdout, " --keep N number of tokens to keep from the initial prompt (default: %d, -1 = all)\n", params.n_keep);
|
||||||
fprintf(stdout, " --chunks N max number of chunks to process (default: %d, -1 = all)\n", params.n_chunks);
|
fprintf(stdout, " --chunks N max number of chunks to process (default: %d, -1 = all)\n", params.n_chunks);
|
||||||
if (llama_mlock_supported()) {
|
if (llama_mlock_supported()) {
|
||||||
|
|
|
@ -10,5 +10,5 @@ cp -rpv ../ggml/src/ggml-metal.m ./ggml-metal.m
|
||||||
cp -rpv ../ggml/src/ggml-metal.metal ./ggml-metal.metal
|
cp -rpv ../ggml/src/ggml-metal.metal ./ggml-metal.metal
|
||||||
cp -rpv ../ggml/include/ggml/ggml.h ./ggml.h
|
cp -rpv ../ggml/include/ggml/ggml.h ./ggml.h
|
||||||
|
|
||||||
cp -rpv ../ggml/tests/test-opt.c ./tests/test-opt.c
|
cp -rpv ../ggml/tests/test-opt.cpp ./tests/test-opt.cpp
|
||||||
cp -rpv ../ggml/tests/test-grad0.c ./tests/test-grad0.c
|
cp -rpv ../ggml/tests/test-grad0.cpp ./tests/test-grad0.cpp
|
||||||
|
|
|
@ -6,10 +6,10 @@ function(llama_add_test source)
|
||||||
add_test(NAME ${TEST_TARGET} COMMAND $<TARGET_FILE:${TEST_TARGET}> ${ARGN})
|
add_test(NAME ${TEST_TARGET} COMMAND $<TARGET_FILE:${TEST_TARGET}> ${ARGN})
|
||||||
endfunction()
|
endfunction()
|
||||||
|
|
||||||
# llama_add_test(test-double-float.c) # SLOW
|
# llama_add_test(test-double-float.cpp) # SLOW
|
||||||
llama_add_test(test-quantize-fns.cpp)
|
llama_add_test(test-quantize-fns.cpp)
|
||||||
llama_add_test(test-quantize-perf.cpp)
|
llama_add_test(test-quantize-perf.cpp)
|
||||||
llama_add_test(test-sampling.cpp)
|
llama_add_test(test-sampling.cpp)
|
||||||
llama_add_test(test-tokenizer-0.cpp ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab.bin)
|
llama_add_test(test-tokenizer-0.cpp ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab.bin)
|
||||||
llama_add_test(test-grad0.c) # SLOW
|
llama_add_test(test-grad0.cpp) # SLOW
|
||||||
# llama_add_test(test-opt.c) # SLOW
|
# llama_add_test(test-opt.cpp) # SLOW
|
||||||
|
|
|
@ -3,10 +3,11 @@
|
||||||
// This is done by checking all finite (non-NaN, non-infinite) floats.
|
// This is done by checking all finite (non-NaN, non-infinite) floats.
|
||||||
|
|
||||||
#undef NDEBUG
|
#undef NDEBUG
|
||||||
#include <assert.h>
|
#include <cassert>
|
||||||
#include <immintrin.h>
|
#include <immintrin.h>
|
||||||
#include <math.h>
|
#include <cmath>
|
||||||
#include <stdint.h>
|
#include <cstdint>
|
||||||
|
#include <cstring>
|
||||||
|
|
||||||
#pragma GCC diagnostic push
|
#pragma GCC diagnostic push
|
||||||
#pragma GCC diagnostic ignored "-Wdouble-promotion"
|
#pragma GCC diagnostic ignored "-Wdouble-promotion"
|
||||||
|
@ -32,8 +33,9 @@ inline static float silu_float(float x) {
|
||||||
int main(void) {
|
int main(void) {
|
||||||
uint32_t x = UINT32_MAX;
|
uint32_t x = UINT32_MAX;
|
||||||
do {
|
do {
|
||||||
float f = *(float *)&x;
|
float f;
|
||||||
assert(!isfinite(f) || (round_orig(f) == round_float(f)));
|
memcpy(&f, &x, sizeof(x));
|
||||||
|
assert(!std::isfinite(f) || (round_orig(f) == round_float(f)));
|
||||||
} while (x--);
|
} while (x--);
|
||||||
|
|
||||||
#ifdef __F16C__
|
#ifdef __F16C__
|
|
@ -1,10 +1,10 @@
|
||||||
#define _CRT_SECURE_NO_DEPRECATE // Disables ridiculous "unsafe" warnigns on Windows
|
#define _CRT_SECURE_NO_DEPRECATE // Disables ridiculous "unsafe" warnigns on Windows
|
||||||
#include "ggml.h"
|
#include "ggml.h"
|
||||||
|
|
||||||
#include <math.h>
|
#include <cmath>
|
||||||
#include <stdio.h>
|
#include <cstdio>
|
||||||
#include <stdlib.h>
|
#include <cstdlib>
|
||||||
#include <assert.h>
|
#include <cassert>
|
||||||
|
|
||||||
#if defined(_MSC_VER)
|
#if defined(_MSC_VER)
|
||||||
#pragma warning(disable: 4244 4267) // possible loss of data
|
#pragma warning(disable: 4244 4267) // possible loss of data
|
||||||
|
@ -47,16 +47,16 @@
|
||||||
|
|
||||||
#define GGML_PRINT(...) printf(__VA_ARGS__)
|
#define GGML_PRINT(...) printf(__VA_ARGS__)
|
||||||
|
|
||||||
float frand(void) {
|
static float frand(void) {
|
||||||
return (float)rand()/(float)RAND_MAX;
|
return (float)rand()/(float)RAND_MAX;
|
||||||
}
|
}
|
||||||
|
|
||||||
int irand(int n) {
|
static int irand(int n) {
|
||||||
if (n == 0) return 0;
|
if (n == 0) return 0;
|
||||||
return rand()%n;
|
return rand()%n;
|
||||||
}
|
}
|
||||||
|
|
||||||
void get_random_dims(int64_t * dims, int ndims) {
|
static void get_random_dims(int64_t * dims, int ndims) {
|
||||||
dims[0] = dims[1] = dims[2] = dims[3] = 1;
|
dims[0] = dims[1] = dims[2] = dims[3] = 1;
|
||||||
|
|
||||||
for (int i = 0; i < ndims; i++) {
|
for (int i = 0; i < ndims; i++) {
|
||||||
|
@ -64,7 +64,7 @@ void get_random_dims(int64_t * dims, int ndims) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_tensor * get_random_tensor_f32(
|
static struct ggml_tensor * get_random_tensor_f32(
|
||||||
struct ggml_context * ctx0,
|
struct ggml_context * ctx0,
|
||||||
int ndims,
|
int ndims,
|
||||||
int64_t ne[],
|
int64_t ne[],
|
||||||
|
@ -112,7 +112,7 @@ struct ggml_tensor * get_random_tensor_f32(
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_tensor * get_random_tensor_f16(
|
static struct ggml_tensor * get_random_tensor_f16(
|
||||||
struct ggml_context * ctx0,
|
struct ggml_context * ctx0,
|
||||||
int ndims,
|
int ndims,
|
||||||
int64_t ne[],
|
int64_t ne[],
|
||||||
|
@ -160,7 +160,7 @@ struct ggml_tensor * get_random_tensor_f16(
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_tensor * get_random_tensor_i32(
|
static struct ggml_tensor * get_random_tensor_i32(
|
||||||
struct ggml_context * ctx0,
|
struct ggml_context * ctx0,
|
||||||
int ndims,
|
int ndims,
|
||||||
int64_t ne[],
|
int64_t ne[],
|
||||||
|
@ -208,7 +208,7 @@ struct ggml_tensor * get_random_tensor_i32(
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
void print_elements(const char* label, const struct ggml_tensor * t) {
|
static void print_elements(const char* label, const struct ggml_tensor * t) {
|
||||||
if (!t) {
|
if (!t) {
|
||||||
printf("%s: %s = null\n", __func__, label);
|
printf("%s: %s = null\n", __func__, label);
|
||||||
return;
|
return;
|
||||||
|
@ -228,7 +228,7 @@ void print_elements(const char* label, const struct ggml_tensor * t) {
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
bool check_gradient(
|
static bool check_gradient(
|
||||||
const char * op_name,
|
const char * op_name,
|
||||||
struct ggml_context * ctx0,
|
struct ggml_context * ctx0,
|
||||||
struct ggml_tensor * x[],
|
struct ggml_tensor * x[],
|
||||||
|
@ -310,7 +310,7 @@ bool check_gradient(
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: clean-up this ..
|
// TODO: clean-up this ..
|
||||||
bool check_mat_mul(
|
static bool check_mat_mul(
|
||||||
const struct ggml_tensor * y,
|
const struct ggml_tensor * y,
|
||||||
const struct ggml_tensor * x0,
|
const struct ggml_tensor * x0,
|
||||||
const struct ggml_tensor * x1) {
|
const struct ggml_tensor * x1) {
|
||||||
|
@ -373,9 +373,9 @@ bool check_mat_mul(
|
||||||
|
|
||||||
int main(int argc, const char ** argv) {
|
int main(int argc, const char ** argv) {
|
||||||
struct ggml_init_params params = {
|
struct ggml_init_params params = {
|
||||||
.mem_size = 128*1024*1024,
|
/* .mem_size = */ 128*1024*1024,
|
||||||
.mem_buffer = NULL,
|
/* .mem_buffer = */ NULL,
|
||||||
.no_alloc = false,
|
/* .no_alloc = */ false,
|
||||||
};
|
};
|
||||||
|
|
||||||
int64_t ne[4];
|
int64_t ne[4];
|
|
@ -1,9 +1,9 @@
|
||||||
#include "ggml.h"
|
#include "ggml.h"
|
||||||
|
|
||||||
#include <math.h>
|
#include <cmath>
|
||||||
#include <stdio.h>
|
#include <cstdio>
|
||||||
#include <stdlib.h>
|
#include <cstdlib>
|
||||||
#include <assert.h>
|
#include <cassert>
|
||||||
|
|
||||||
#define MAX_NARGS 2
|
#define MAX_NARGS 2
|
||||||
|
|
||||||
|
@ -119,10 +119,11 @@ void set_element(struct ggml_tensor * t, int idx, float value) {
|
||||||
|
|
||||||
int main(void) {
|
int main(void) {
|
||||||
struct ggml_init_params params = {
|
struct ggml_init_params params = {
|
||||||
.mem_size = 1024*1024*1024,
|
/* .mem_size = */ 1024*1024*1024,
|
||||||
.mem_buffer = NULL,
|
/* .mem_buffer = */ NULL,
|
||||||
.no_alloc = false,
|
/* .no_alloc = */ false,
|
||||||
};
|
};
|
||||||
|
|
||||||
struct ggml_context * ctx = ggml_init(params);
|
struct ggml_context * ctx = ggml_init(params);
|
||||||
|
|
||||||
int64_t ne1[4] = {4, 128, 1, 1};
|
int64_t ne1[4] = {4, 128, 1, 1};
|
Loading…
Reference in a new issue