mirror of
https://github.com/ollama/ollama.git
synced 2025-12-05 18:46:22 -06:00
ggml: Always set cache padding to 256
We currently use cache padding of 32 when not using flash attention and 256 with flash attention, which is based on the historic alignment requirements of these kernels. The restrictions have since been loosened but there are still performance benefits, such as better CUDA graph reuse. Since the requirement is no longer kernel-specific, set the padding uniformly to 256, as llama.cpp has.
This commit is contained in:
@@ -687,7 +687,7 @@ func (b *Backend) CacheConfig() ml.CacheConfig {
|
||||
if b.flashAttention {
|
||||
return ml.CacheConfig{CachePadding: 256, MaskDType: ml.DTypeF16, MaskBatchPadding: C.GGML_KQ_MASK_PAD}
|
||||
} else {
|
||||
return ml.CacheConfig{CachePadding: 32, PermutedV: true}
|
||||
return ml.CacheConfig{CachePadding: 256, PermutedV: true}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user