Skip to content

Commit 620b4aa

Browse files
compiladeqnixsynapse
authored andcommitted
kv-cache : avoid modifying recurrent cells when setting inputs (ggml-org#13834)
* kv-cache : avoid modifying recurrent cells when setting inputs * kv-cache : remove inp_s_mask It was replaced with equivalent and simpler functionality with rs_z (the first zeroed state) and the already-existing inp_s_copy. * kv-cache : fix non-consecutive token pos warning for recurrent models The problem was apparently caused by how the tail cells were swapped. * graph : simplify logic for recurrent state copies * kv-cache : use cell without src refs for rs_z in recurrent cache * llama-graph : fix recurrent state copy The `state_copy` shuffle assumes everything is moved at once, which is not true when `states_extra` is copied back to the cache before copying the range of states between `head` and `head + n_seqs`. This is only a problem if any of the cells in [`head`, `head + n_seqs`) have an `src` in [`head + n_seqs`, `head + n_kv`), which does happen when `n_ubatch > 1` in the `llama-parallel` example. Changing the order of the operations avoids the potential overwrite before use, although when copies are avoided (like with Mamba2), this will require further changes. * llama-graph : rename n_state to state_size in build_recurrent_state This naming should reduce confusion between the state size and the number of states.
1 parent 144209a commit 620b4aa

File tree

4 files changed

+810
-2133
lines changed

4 files changed

+810
-2133
lines changed

src/llama-graph.cpp

Lines changed: 30 additions & 53 deletions
Original file line numberDiff line numberDiff line change
@@ -250,22 +250,6 @@ void llm_graph_input_s_copy::set_input(const llama_ubatch * ubatch) {
250250
}
251251
}
252252

253-
void llm_graph_input_s_mask::set_input(const llama_ubatch * ubatch) {
254-
GGML_UNUSED(ubatch);
255-
256-
const int64_t n_kv = kv_state->get_n_kv();
257-
258-
if (s_mask) {
259-
GGML_ASSERT(ggml_backend_buffer_is_host(s_mask->buffer));
260-
float * data = (float *) s_mask->data;
261-
262-
// clear unused states
263-
for (int i = 0; i < n_kv; ++i) {
264-
data[i] = kv_state->s_mask(i);
265-
}
266-
}
267-
}
268-
269253
void llm_graph_input_cross_embd::set_input(const llama_ubatch * ubatch) {
270254
GGML_UNUSED(ubatch);
271255

@@ -987,23 +971,6 @@ ggml_tensor * llm_graph_context::build_inp_s_copy() const {
987971
return cur;
988972
}
989973

990-
ggml_tensor * llm_graph_context::build_inp_s_mask() const {
991-
const auto * kv_state = static_cast<const llama_kv_cache_recurrent_state *>(mstate);
992-
993-
auto inp = std::make_unique<llm_graph_input_s_mask>(kv_state);
994-
995-
const auto n_kv = kv_state->get_n_kv();
996-
997-
auto & cur = inp->s_mask;
998-
999-
cur = ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, 1, n_kv);
1000-
ggml_set_input(cur);
1001-
1002-
res->add_input(std::move(inp));
1003-
1004-
return cur;
1005-
}
1006-
1007974
ggml_tensor * llm_graph_context::build_inp_cross_embd() const {
1008975
auto inp = std::make_unique<llm_graph_input_cross_embd>(cross);
1009976

@@ -1456,43 +1423,53 @@ ggml_tensor * llm_graph_context::build_attn(
14561423
return cur;
14571424
}
14581425

1459-
ggml_tensor * llm_graph_context::build_copy_mask_state(
1426+
ggml_tensor * llm_graph_context::build_recurrent_state(
14601427
ggml_cgraph * gf,
14611428
ggml_tensor * s,
14621429
ggml_tensor * state_copy,
1463-
ggml_tensor * state_mask,
1464-
int32_t n_state,
1465-
int32_t n_seqs) const {
1430+
int32_t state_size,
1431+
int32_t n_seqs,
1432+
bool avoid_copies) const {
14661433
const auto * kv_state = static_cast<const llama_kv_cache_recurrent_state *>(mstate);
14671434

14681435
const auto n_kv = kv_state->get_n_kv();
14691436
const auto kv_head = kv_state->get_head();
1437+
const auto rs_zero = kv_state->get_rs_z();
14701438

1471-
ggml_tensor * states = ggml_reshape_2d(ctx0, s, n_state, kv_state->get_size());
1439+
ggml_tensor * states = ggml_reshape_2d(ctx0, s, state_size, kv_state->get_size());
14721440

1473-
// copy states
1474-
// NOTE: assuming the copy destinations are ALL contained between kv_head and kv_head + n_kv
1475-
// this shrinks the tensors's ne[1] to n_kv
1476-
states = ggml_get_rows(ctx0, states, state_copy);
1441+
// Clear a single state which will then be copied to the other cleared states.
1442+
// Note that this is a no-op when the view is zero-sized.
1443+
ggml_tensor * state_zero = ggml_view_1d(ctx0, states, state_size*(rs_zero >= 0), rs_zero*states->nb[1]*(rs_zero >= 0));
1444+
ggml_build_forward_expand(gf, ggml_scale_inplace(ctx0, state_zero, 0));
14771445

1478-
// clear states of sequences which are starting at the beginning of this batch
1479-
// FIXME: zero-out NANs?
1480-
states = ggml_mul(ctx0, states, state_mask);
1446+
ggml_tensor * output_states;
1447+
1448+
if (!avoid_copies) {
1449+
// copy states
1450+
// NOTE: assuming the copy destinations are ALL contained between kv_head and kv_head + n_kv
1451+
// {state_size, kv_size} -> {state_size, n_seqs}
1452+
output_states = ggml_get_rows(ctx0, states, ggml_view_1d(ctx0, state_copy, n_seqs, 0));
1453+
ggml_build_forward_expand(gf, output_states);
1454+
} else {
1455+
// FIXME: make the gathering operation happen before the copy below
1456+
// (maybe with an optional lambda function passed as a parameter instead of `avoid_copies`?)
1457+
output_states = states;
1458+
}
14811459

1482-
// copy states which won't be changed further (between n_seqs and n_kv)
1460+
// copy extra states which won't be changed further (between n_seqs and n_kv)
1461+
ggml_tensor * states_extra = ggml_get_rows(ctx0, states, ggml_view_1d(ctx0, state_copy, n_kv - n_seqs, n_seqs*state_copy->nb[0]));
14831462
ggml_build_forward_expand(gf,
14841463
ggml_cpy(ctx0,
1485-
ggml_view_1d(ctx0, states, n_state*(n_kv - n_seqs), (n_seqs )*n_state*ggml_element_size(states)),
1486-
ggml_view_1d(ctx0, s, n_state*(n_kv - n_seqs), (kv_head + n_seqs)*n_state*ggml_element_size(s))));
1464+
states_extra,
1465+
ggml_view_1d(ctx0, s, state_size*(n_kv - n_seqs), (kv_head + n_seqs)*state_size*ggml_element_size(s))));
14871466

1488-
// the part of the states that will be used and modified
1489-
return ggml_view_2d(ctx0, states, n_state, n_seqs, states->nb[1], 0);
1467+
return output_states;
14901468
}
14911469

14921470
ggml_tensor * llm_graph_context::build_rwkv_token_shift_load(
14931471
ggml_cgraph * gf,
14941472
ggml_tensor * state_copy,
1495-
ggml_tensor * state_mask,
14961473
const llama_ubatch & ubatch,
14971474
int il) const {
14981475
const auto * kv_state = static_cast<const llama_kv_cache_recurrent_state *>(mstate);
@@ -1503,8 +1480,8 @@ ggml_tensor * llm_graph_context::build_rwkv_token_shift_load(
15031480

15041481
ggml_tensor * token_shift_all = kv_state->get_k_l(il);
15051482

1506-
ggml_tensor * token_shift = build_copy_mask_state(
1507-
gf, token_shift_all, state_copy, state_mask,
1483+
ggml_tensor * token_shift = build_recurrent_state(
1484+
gf, token_shift_all, state_copy,
15081485
hparams.n_embd_k_s(), n_seqs);
15091486

15101487
token_shift = ggml_reshape_3d(ctx0, token_shift, hparams.n_embd, token_shift_count, n_seqs);

src/llama-graph.h

Lines changed: 4 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -200,18 +200,6 @@ class llm_graph_input_s_copy : public llm_graph_input_i {
200200
const llama_kv_cache_recurrent_state * kv_state;
201201
};
202202

203-
class llm_graph_input_s_mask : public llm_graph_input_i {
204-
public:
205-
llm_graph_input_s_mask(const llama_kv_cache_recurrent_state * kv_state) : kv_state(kv_state) {}
206-
virtual ~llm_graph_input_s_mask() = default;
207-
208-
void set_input(const llama_ubatch * ubatch) override;
209-
210-
ggml_tensor * s_mask; // F32 [1, n_kv]
211-
212-
const llama_kv_cache_recurrent_state * kv_state;
213-
};
214-
215203
class llm_graph_input_cross_embd : public llm_graph_input_i {
216204
public:
217205
llm_graph_input_cross_embd(
@@ -521,7 +509,6 @@ struct llm_graph_context {
521509
ggml_tensor * build_inp_mean() const;
522510
ggml_tensor * build_inp_cls() const;
523511
ggml_tensor * build_inp_s_copy() const;
524-
ggml_tensor * build_inp_s_mask() const;
525512

526513
ggml_tensor * build_inp_cross_embd() const;
527514
ggml_tensor * build_inp_pos_bucket_enc() const;
@@ -606,18 +593,17 @@ struct llm_graph_context {
606593
// recurrent
607594
//
608595

609-
ggml_tensor * build_copy_mask_state(
596+
ggml_tensor * build_recurrent_state(
610597
ggml_cgraph * gf,
611598
ggml_tensor * s,
612599
ggml_tensor * state_copy,
613-
ggml_tensor * state_mask,
614-
int32_t n_state,
615-
int32_t n_seqs) const;
600+
int32_t state_size,
601+
int32_t n_seqs,
602+
bool avoid_copies = false) const;
616603

617604
ggml_tensor * build_rwkv_token_shift_load(
618605
ggml_cgraph * gf,
619606
ggml_tensor * state_copy,
620-
ggml_tensor * state_mask,
621607
const llama_ubatch & ubatch,
622608
int il) const;
623609

0 commit comments

Comments
 (0)