mirror of
https://git.adityakumar.xyz/llama.cpp.git
synced 2025-02-22 15:40:02 +00:00
llama : fix comment for "output.weight" tensor
This commit is contained in:
parent
2510c1831f
commit
d40fded93e
1 changed files with 2 additions and 2 deletions
|
@ -1618,8 +1618,8 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s
|
||||||
// quantize only 2D tensors
|
// quantize only 2D tensors
|
||||||
quantize &= (tensor.ne.size() == 2);
|
quantize &= (tensor.ne.size() == 2);
|
||||||
|
|
||||||
// GG: uncomment this to keep the output layer in FP16
|
// uncomment this to keep the output layer in FP16
|
||||||
//if (tensor.name.rfind("output")) {
|
//if (tensor.name == "output.weight") {
|
||||||
// quantize = false;
|
// quantize = false;
|
||||||
//}
|
//}
|
||||||
|
|
||||||
|
|
Loading…
Reference in a new issue