From 6344d7b6ed2f8a7d326e2213e7af381530ca261e Mon Sep 17 00:00:00 2001 From: Icey <1790571317@qq.com> Date: Fri, 14 Nov 2025 07:49:30 +0000 Subject: [PATCH] [Cherry-pick][0.11.0] Adapted to torch_npu.npu_fused_infer_attention_score Signed-off-by: Icey <1790571317@qq.com> --- vllm_ascend/attention/attention_v1.py | 2 +- vllm_ascend/patch/platform/patch_mamba_config.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/vllm_ascend/attention/attention_v1.py b/vllm_ascend/attention/attention_v1.py index e003ca6f65a..7c5e247e870 100644 --- a/vllm_ascend/attention/attention_v1.py +++ b/vllm_ascend/attention/attention_v1.py @@ -115,7 +115,7 @@ def copy_blocks( @staticmethod def get_supported_block_size() -> list[int]: - return [64] + return [128] class AscendAttentionState(Enum): diff --git a/vllm_ascend/patch/platform/patch_mamba_config.py b/vllm_ascend/patch/platform/patch_mamba_config.py index 1afb9e16783..1420facc618 100644 --- a/vllm_ascend/patch/platform/patch_mamba_config.py +++ b/vllm_ascend/patch/platform/patch_mamba_config.py @@ -51,7 +51,7 @@ def verify_and_update_config(cls, vllm_config) -> None: block_size=model_config.max_model_len, ).page_size_bytes - block_alignment_bytes = 64 + block_alignment_bytes = 128 # some attention backends (e.g. FA) only support setting # block size to multiple of 16, so let's suggest a value