mirror of
https://git.adityakumar.xyz/llama.cpp.git
synced 2024-11-09 23:29:44 +00:00
Don't crash on ftype (formerly f16) == 4 (#917)
This commit is contained in:
parent
f76cb3a34d
commit
e7f6997f89
2 changed files with 4 additions and 1 deletions
|
@ -827,7 +827,9 @@ static const char *llama_ftype_name(enum llama_ftype ftype) {
|
||||||
case LLAMA_FTYPE_MOSTLY_F16: return "mostly F16";
|
case LLAMA_FTYPE_MOSTLY_F16: return "mostly F16";
|
||||||
case LLAMA_FTYPE_MOSTLY_Q4_0: return "mostly Q4_0";
|
case LLAMA_FTYPE_MOSTLY_Q4_0: return "mostly Q4_0";
|
||||||
case LLAMA_FTYPE_MOSTLY_Q4_1: return "mostly Q4_1";
|
case LLAMA_FTYPE_MOSTLY_Q4_1: return "mostly Q4_1";
|
||||||
default: LLAMA_ASSERT(false);
|
case LLAMA_FTYPE_MOSTLY_Q4_1_SOME_F16:
|
||||||
|
return "mostly Q4_1, some F16";
|
||||||
|
default: return "unknown, may not work";
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
1
llama.h
1
llama.h
|
@ -71,6 +71,7 @@ extern "C" {
|
||||||
LLAMA_FTYPE_MOSTLY_F16 = 1, // except 1d tensors
|
LLAMA_FTYPE_MOSTLY_F16 = 1, // except 1d tensors
|
||||||
LLAMA_FTYPE_MOSTLY_Q4_0 = 2, // except 1d tensors
|
LLAMA_FTYPE_MOSTLY_Q4_0 = 2, // except 1d tensors
|
||||||
LLAMA_FTYPE_MOSTLY_Q4_1 = 3, // except 1d tensors
|
LLAMA_FTYPE_MOSTLY_Q4_1 = 3, // except 1d tensors
|
||||||
|
LLAMA_FTYPE_MOSTLY_Q4_1_SOME_F16 = 4, // tok_embeddings.weight and output.weight are F16
|
||||||
};
|
};
|
||||||
|
|
||||||
LLAMA_API struct llama_context_params llama_context_default_params();
|
LLAMA_API struct llama_context_params llama_context_default_params();
|
||||||
|
|
Loading…
Reference in a new issue