Skip to content

Commit 17193cc

Browse files
authored
kv-cache : do not quantize SWA KV cache (ggml-org#21277)
1 parent d6dac92 commit 17193cc

1 file changed

Lines changed: 2 additions & 1 deletion

File tree

src/llama-kv-cache-iswa.cpp

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -66,8 +66,9 @@ llama_kv_cache_iswa::llama_kv_cache_iswa(
6666

6767
LLAMA_LOG_INFO("%s: creating SWA KV cache, size = %u cells\n", __func__, size_swa);
6868

69+
// note: the SWA cache is never quantized because it is relatively small
6970
kv_swa = std::make_unique<llama_kv_cache>(
70-
model, type_k, type_v,
71+
model, GGML_TYPE_F16, GGML_TYPE_F16,
7172
v_trans, offload, unified, size_swa, n_seq_max, n_pad,
7273
hparams.n_swa, hparams.swa_type, filter_swa, reuse);
7374
}

0 commit comments

Comments
 (0)