Skip to content

Commit 99aa304

Browse files
authored
llama : add support for EXAONE tied word embeddings (#12451)
1 parent 8551c44 commit 99aa304

File tree

1 file changed

+6
-1
lines changed

1 file changed

+6
-1
lines changed

src/llama-model.cpp

+6-1
Original file line numberDiff line numberDiff line change
@@ -3264,7 +3264,12 @@ bool llama_model::load_tensors(llama_model_loader & ml) {
32643264

32653265
// output
32663266
output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
3267-
output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, 0);
3267+
output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED);
3268+
3269+
// if output is NULL, init from the input tok embed
3270+
if (output == NULL) {
3271+
output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED);
3272+
}
32683273

32693274
for (int i = 0; i < n_layer; ++i) {
32703275
auto & layer = layers[i];

0 commit comments

Comments
 (0)