mirror of
https://git.adityakumar.xyz/llama.cpp.git
synced 2025-02-22 07:40:00 +00:00
use weights_only in conversion script (#32)
this restricts malicious weights from executing arbitrary code by restricting the unpickler to only loading tensors, primitive types, and dictionaries
This commit is contained in:
parent
6a9a67f0be
commit
a93120236f
1 changed files with 1 additions and 1 deletions
|
@ -86,7 +86,7 @@ for p in range(n_parts):
|
|||
if (p > 0):
|
||||
fname_out = sys.argv[1] + "/ggml-model-" + ftype_str[ftype] + ".bin" + "." + str(p)
|
||||
|
||||
model = torch.load(fname_model, map_location="cpu")
|
||||
model = torch.load(fname_model, map_location="cpu", weights_only=True)
|
||||
|
||||
fout = open(fname_out, "wb")
|
||||
|
||||
|
|
Loading…
Reference in a new issue