Skip to content

Commit

Permalink
Avoid the transposed X branch in the Z = X * Y matrix multiplication (g…
Browse files Browse the repository at this point in the history
…gerganov#439)

Should make results reproducible for different number of threads and batch sizes
  • Loading branch information
ggerganov authored Mar 23, 2023
1 parent 404e1da commit 483bab2
Showing 1 changed file with 7 additions and 5 deletions.
12 changes: 7 additions & 5 deletions llama.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -727,11 +727,13 @@ static bool llama_eval_internal(

// V_trans = Vmem.view(n_embd/n_head, n_head, n_past + N).permute(1, 2, 0, 3).contiguous()
struct ggml_tensor * V_trans =
ggml_permute(ctx0,
ggml_reshape_3d(ctx0,
ggml_view_1d(ctx0, model.memory_v, (n_past + N)*n_embd, il*n_ctx*ggml_element_size(model.memory_v)*n_embd),
n_embd/n_head, n_head, n_past + N),
1, 2, 0, 3);
ggml_cpy(ctx0,
ggml_permute(ctx0,
ggml_reshape_3d(ctx0,
ggml_view_1d(ctx0, model.memory_v, (n_past + N)*n_embd, il*n_ctx*ggml_element_size(model.memory_v)*n_embd),
n_embd/n_head, n_head, n_past + N),
1, 2, 0, 3),
ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, n_past + N, n_embd/n_head, n_head));

// KQV = transpose(V) * KQ_soft_max
struct ggml_tensor * KQV = ggml_mul_mat(ctx0, V_trans, KQ_soft_max);
Expand Down

0 comments on commit 483bab2

Please sign in to comment.