Skip to content

vllm.utils.flashinfer

Compatibility wrapper for FlashInfer API changes.

Users of vLLM should always import only these wrappers.

_flashinfer_concat_mla_k

_flashinfer_concat_mla_k(
    k: Tensor, k_nope: Tensor, k_pe: Tensor
) -> None

Custom op wrapper for flashinfer's concat_mla_k.

This is an in-place operation that concatenates k_nope and k_pe into k.

The kernel is optimized for DeepSeek V3 dimensions: - num_heads=128 - nope_dim=128 - rope_dim=64

Key optimizations: - Warp-based processing with software pipelining - Vectorized memory access (int2 for nope, int for rope) - L2 prefetching for next row while processing current - Register reuse for rope values across all heads

Parameters:

Name Type Description Default
k Tensor

Output tensor, shape [num_tokens, num_heads, nope_dim + rope_dim]. Modified in-place.

required
k_nope Tensor

The nope part of k, shape [num_tokens, num_heads, nope_dim].

required
k_pe Tensor

The rope part of k (shared), shape [num_tokens, 1, rope_dim]. This is broadcast to all heads.

required
Source code in vllm/utils/flashinfer.py
def _flashinfer_concat_mla_k(
    k: torch.Tensor,
    k_nope: torch.Tensor,
    k_pe: torch.Tensor,
) -> None:
    """Custom op wrapper for flashinfer's concat_mla_k.

    This is an in-place operation that concatenates k_nope and k_pe into k.

    The kernel is optimized for DeepSeek V3 dimensions:
    - num_heads=128
    - nope_dim=128
    - rope_dim=64

    Key optimizations:
    - Warp-based processing with software pipelining
    - Vectorized memory access (int2 for nope, int for rope)
    - L2 prefetching for next row while processing current
    - Register reuse for rope values across all heads

    Args:
        k: Output tensor, shape [num_tokens, num_heads, nope_dim + rope_dim].
            Modified in-place.
        k_nope: The nope part of k, shape [num_tokens, num_heads, nope_dim].
        k_pe: The rope part of k (shared), shape [num_tokens, 1, rope_dim].
              This is broadcast to all heads.
    """
    from flashinfer.concat_ops import concat_mla_k

    concat_mla_k(k, k_nope, k_pe)

_get_submodule

_get_submodule(module_name: str) -> Any | None

Safely import a submodule and return it, or None if not available.

Source code in vllm/utils/flashinfer.py
def _get_submodule(module_name: str) -> Any | None:
    """Safely import a submodule and return it, or None if not available."""
    try:
        return importlib.import_module(module_name)
    except (ImportError, ModuleNotFoundError):
        return None

_lazy_import_wrapper

_lazy_import_wrapper(
    module_name: str,
    attr_name: str,
    fallback_fn: Callable[..., Any] = _missing,
)

Create a lazy import wrapper for a specific function.

Source code in vllm/utils/flashinfer.py
def _lazy_import_wrapper(
    module_name: str, attr_name: str, fallback_fn: Callable[..., Any] = _missing
):
    """Create a lazy import wrapper for a specific function."""

    @functools.cache
    def _get_impl():
        if not has_flashinfer():
            return None
        mod = _get_submodule(module_name)
        return getattr(mod, attr_name, None) if mod else None

    def wrapper(*args, **kwargs):
        impl = _get_impl()
        if impl is None:
            return fallback_fn(*args, **kwargs)
        return impl(*args, **kwargs)

    return wrapper

_missing

_missing(*_: Any, **__: Any) -> NoReturn

Placeholder for unavailable FlashInfer backend.

Source code in vllm/utils/flashinfer.py
def _missing(*_: Any, **__: Any) -> NoReturn:
    """Placeholder for unavailable FlashInfer backend."""
    raise RuntimeError(
        "FlashInfer backend is not available. Please install the package "
        "to enable FlashInfer kernels: "
        "https://github.com/flashinfer-ai/flashinfer"
    )

can_use_trtllm_attention

can_use_trtllm_attention(
    num_qo_heads: int, num_kv_heads: int
) -> bool

Check if the current configuration supports TRTLLM attention.

Source code in vllm/utils/flashinfer.py
def can_use_trtllm_attention(num_qo_heads: int, num_kv_heads: int) -> bool:
    """Check if the current configuration supports TRTLLM attention."""
    if force_use_trtllm_attention() is False:
        return False
    has_trtllm = supports_trtllm_attention()
    return has_trtllm and (num_qo_heads % num_kv_heads == 0)

force_use_trtllm_attention

force_use_trtllm_attention() -> bool | None

This function should only be called during initialization stage when vllm config is set. Return None if --attention-config.use_trtllm_attention is not set, return True if TRTLLM attention is forced to be used, return False if TRTLLM attention is forced to be not used.

Source code in vllm/utils/flashinfer.py
def force_use_trtllm_attention() -> bool | None:
    """
    This function should only be called during initialization stage when vllm config
    is set.
    Return `None` if --attention-config.use_trtllm_attention is not set,
    return `True` if TRTLLM attention is forced to be used,
    return `False` if TRTLLM attention is forced to be not used.
    """
    from vllm.config import get_current_vllm_config

    vllm_config = get_current_vllm_config()
    return vllm_config.attention_config.use_trtllm_attention

has_flashinfer cached

has_flashinfer() -> bool

Return True if flashinfer-python package is available.

Source code in vllm/utils/flashinfer.py
@functools.cache
def has_flashinfer() -> bool:
    """Return `True` if flashinfer-python package is available."""
    # Use find_spec to check if the module exists without importing it
    # This avoids potential CUDA initialization side effects
    if importlib.util.find_spec("flashinfer") is None:
        logger.debug_once("FlashInfer unavailable since package was not found")
        return False
    # When not using flashinfer cubin,
    # Also check if nvcc is available since it's required to JIT compile flashinfer
    if not has_flashinfer_cubin() and shutil.which("nvcc") is None:
        logger.debug_once(
            "FlashInfer unavailable since nvcc was not found "
            "and not using pre-downloaded cubins"
        )
        return False
    return True

has_flashinfer_all2all cached

has_flashinfer_all2all() -> bool

Return True if FlashInfer mnnvl all2all is available.

Source code in vllm/utils/flashinfer.py
@functools.cache
def has_flashinfer_all2all() -> bool:
    """Return `True` if FlashInfer mnnvl all2all is available."""
    if not has_flashinfer_comm():
        return False

    # Check if all required functions are available
    required_functions = [
        ("flashinfer.comm", "Mapping"),
        ("flashinfer.comm.mnnvl", "MnnvlMemory"),
        ("flashinfer.comm.trtllm_alltoall", "MnnvlMoe"),
        ("flashinfer.comm.trtllm_alltoall", "MoEAlltoallInfo"),
    ]

    for module_name, attr_name in required_functions:
        mod = _get_submodule(module_name)
        if not mod or not hasattr(mod, attr_name):
            return False
    return True

has_flashinfer_comm cached

has_flashinfer_comm() -> bool

Return True if FlashInfer comm module is available.

Source code in vllm/utils/flashinfer.py
@functools.cache
def has_flashinfer_comm() -> bool:
    """Return `True` if FlashInfer comm module is available."""
    return has_flashinfer() and importlib.util.find_spec("flashinfer.comm") is not None

has_flashinfer_cubin cached

has_flashinfer_cubin() -> bool

Return True if flashinfer-cubin package is available.

Source code in vllm/utils/flashinfer.py
@functools.cache
def has_flashinfer_cubin() -> bool:
    """Return `True` if flashinfer-cubin package is available."""
    if envs.VLLM_HAS_FLASHINFER_CUBIN:
        return True
    if importlib.util.find_spec("flashinfer_cubin") is not None:
        return True
    logger.debug_once("flashinfer-cubin package was not found")
    return False

has_flashinfer_cutedsl cached

has_flashinfer_cutedsl() -> bool

Return True if FlashInfer cutedsl module is available.

Source code in vllm/utils/flashinfer.py
@functools.cache
def has_flashinfer_cutedsl() -> bool:
    """Return ``True`` if FlashInfer cutedsl module is available."""
    return (
        has_flashinfer() and importlib.util.find_spec("flashinfer.cute_dsl") is not None
    )

has_flashinfer_cutedsl_grouped_gemm_nt_masked cached

has_flashinfer_cutedsl_grouped_gemm_nt_masked() -> bool

Return True if FlashInfer CUTLASS fused MoE is available.

Source code in vllm/utils/flashinfer.py
@functools.cache
def has_flashinfer_cutedsl_grouped_gemm_nt_masked() -> bool:
    """Return ``True`` if FlashInfer CUTLASS fused MoE is available."""
    if not has_flashinfer_cutedsl():
        return False

    # Check if all required functions are available
    required_functions = [
        ("flashinfer.cute_dsl.blockscaled_gemm", "grouped_gemm_nt_masked"),
        ("flashinfer", "scaled_fp4_grouped_quantize"),
        ("flashinfer", "silu_and_scaled_nvfp4_experts_quantize"),
    ]

    for module_name, attr_name in required_functions:
        mod = _get_submodule(module_name)
        if not mod or not hasattr(mod, attr_name):
            return False
    return True

has_flashinfer_cutlass_fused_moe cached

has_flashinfer_cutlass_fused_moe() -> bool

Return True if FlashInfer CUTLASS fused MoE is available.

Source code in vllm/utils/flashinfer.py
@functools.cache
def has_flashinfer_cutlass_fused_moe() -> bool:
    """Return `True` if FlashInfer CUTLASS fused MoE is available."""
    if not has_flashinfer_moe():
        return False

    # Check if all required functions are available
    required_functions = [
        ("flashinfer.fused_moe", "cutlass_fused_moe"),
        ("flashinfer", "fp4_quantize"),
        ("flashinfer", "nvfp4_block_scale_interleave"),
        ("flashinfer.fused_moe", "trtllm_fp4_block_scale_moe"),
    ]

    for module_name, attr_name in required_functions:
        mod = _get_submodule(module_name)
        if not mod or not hasattr(mod, attr_name):
            return False
    return True

has_flashinfer_fp8_blockscale_gemm cached

has_flashinfer_fp8_blockscale_gemm() -> bool

Return True if FlashInfer block-scale FP8 GEMM is available.

Source code in vllm/utils/flashinfer.py
@functools.cache
def has_flashinfer_fp8_blockscale_gemm() -> bool:
    """Return `True` if FlashInfer block-scale FP8 GEMM is available."""
    return (
        has_flashinfer()
        and current_platform.is_device_capability(90)
        and hasattr(_get_submodule("flashinfer.gemm"), "fp8_blockscale_gemm_sm90")
    )

has_flashinfer_moe cached

has_flashinfer_moe() -> bool

Return True if FlashInfer MoE module is available.

Source code in vllm/utils/flashinfer.py
@functools.cache
def has_flashinfer_moe() -> bool:
    """Return `True` if FlashInfer MoE module is available."""
    return (
        has_flashinfer()
        and importlib.util.find_spec("flashinfer.fused_moe") is not None
    )

has_flashinfer_trtllm_fused_moe cached

has_flashinfer_trtllm_fused_moe() -> bool

Return True if FlashInfer TRTLLM fused MoE is available.

Source code in vllm/utils/flashinfer.py
@functools.cache
def has_flashinfer_trtllm_fused_moe() -> bool:
    """Return `True` if FlashInfer TRTLLM fused MoE is available."""
    if not has_flashinfer_moe():
        return False
    required_functions = [
        ("flashinfer.fused_moe", "trtllm_fp8_block_scale_moe"),
        ("flashinfer.fused_moe", "trtllm_fp8_per_tensor_scale_moe"),
        ("flashinfer.fused_moe", "trtllm_fp4_block_scale_moe"),
        ("flashinfer.fused_moe", "trtllm_mxint4_block_scale_moe"),
    ]
    for module_name, attr_name in required_functions:
        mod = _get_submodule(module_name)
        if not mod or not hasattr(mod, attr_name):
            return False
    return True

has_nvidia_artifactory cached

has_nvidia_artifactory() -> bool

Return True if NVIDIA's artifactory is accessible.

This checks connectivity to the kernel inference library artifactory which is required for downloading certain cubin kernels like TRTLLM FHMA.

Source code in vllm/utils/flashinfer.py
@functools.cache
def has_nvidia_artifactory() -> bool:
    """Return `True` if NVIDIA's artifactory is accessible.

    This checks connectivity to the kernel inference library artifactory
    which is required for downloading certain cubin kernels like TRTLLM FHMA.
    """
    # If we have pre-downloaded cubins, we can assume the cubins are available.
    if has_flashinfer_cubin():
        return True

    try:
        # Use a short timeout to avoid blocking for too long
        response = requests.get(FLASHINFER_CUBINS_REPOSITORY, timeout=5)
        accessible = response.status_code == 200
        if accessible:
            logger.debug_once("NVIDIA artifactory is accessible")
        else:
            logger.warning_once(
                "NVIDIA artifactory returned failed status code: %d",
                response.status_code,
            )
        return accessible
    except Exception as e:
        logger.warning_once("Failed to connect to NVIDIA artifactory: %s", e)
        return False

is_flashinfer_fp8_blockscale_gemm_supported cached

is_flashinfer_fp8_blockscale_gemm_supported() -> bool

Return True if FlashInfer block-scale FP8 GEMM is supported.

Source code in vllm/utils/flashinfer.py
@functools.cache
def is_flashinfer_fp8_blockscale_gemm_supported() -> bool:
    """Return `True` if FlashInfer block-scale FP8 GEMM is supported."""
    return (
        envs.VLLM_BLOCKSCALE_FP8_GEMM_FLASHINFER
        and has_flashinfer_fp8_blockscale_gemm()
    )

supports_trtllm_attention cached

supports_trtllm_attention() -> bool

TRTLLM attention is supported if the platform is SM100, NVIDIA artifactory is accessible, and batch-invariant mode is not enabled.

Source code in vllm/utils/flashinfer.py
@functools.cache
def supports_trtllm_attention() -> bool:
    """
    TRTLLM attention is supported if the platform is SM100,
    NVIDIA artifactory is accessible, and batch-invariant mode is not enabled.
    """
    # Batch-invariant mode disables TRTLLM attention
    if vllm_is_batch_invariant():
        return False

    # Requires SM100 and NVIDIA artifactory to be accessible to download cubins
    return (
        current_platform.is_device_capability_family(100) and has_nvidia_artifactory()
    )

use_trtllm_attention

use_trtllm_attention(
    num_qo_heads: int,
    num_kv_heads: int,
    num_tokens: int,
    max_seq_len: int,
    dcp_world_size: int,
    kv_cache_dtype: str,
    q_dtype: dtype,
    is_prefill: bool,
    force_use_trtllm: bool | None = None,
    has_sinks: bool = False,
    has_spec: bool = False,
) -> bool

Return True if TRTLLM attention is used.

Source code in vllm/utils/flashinfer.py
def use_trtllm_attention(
    num_qo_heads: int,
    num_kv_heads: int,
    num_tokens: int,
    max_seq_len: int,
    dcp_world_size: int,
    kv_cache_dtype: str,
    q_dtype: torch.dtype,
    is_prefill: bool,
    # None means auto-detection, True means force on, False means force off
    force_use_trtllm: bool | None = None,
    has_sinks: bool = False,
    has_spec: bool = False,
) -> bool:
    """Return `True` if TRTLLM attention is used."""

    # CLI argument is set to 0 - respect it
    if force_use_trtllm is not None and not force_use_trtllm:
        return False

    # Decode context parallel is not supported
    if dcp_world_size > 1:
        logger.warning_once(
            "Trtllm does not support returning LSE and as a result "
            "does not support DCP, reverting to FlashInfer"
        )
        return False

    # The platform is not supported
    if not supports_trtllm_attention():
        if force_use_trtllm:
            logger.warning_once(
                "TRTLLM attention is not supported on this platform, "
                "but --attention-config.use_trtllm_attention is set to 1"
            )
        return False

    # The combination of query and key heads is not supported
    if num_qo_heads % num_kv_heads != 0:
        if force_use_trtllm:
            logger.warning_once(
                "TRTLLM attention is not supported for this combination of "
                "query and key heads, but --attention-config.use_trtllm_attention is "
                "set to 1"
            )
        return False

    if has_spec and not is_prefill:
        # Speculative decoding requires TRTLLM attention for decodes
        logger.info_once("Using TRTLLM attention (enabled for speculative decoding).")
        return True

    # Must use TRTLLM attention if query is FP8 quantized
    if q_dtype == current_platform.fp8_dtype():
        logger.info_once("Using TRTLLM attention (query is quantized).")
        return True

    # If sinks are being used, we must use TRTLLM attention as it's
    # the only backend that supports them
    if has_sinks:
        logger.info_once("Using TRTLLM attention (required for attention sinks).")
        return True

    if force_use_trtllm is None:
        # CLI argument not set - use auto-detection
        if is_prefill:
            # Prefill auto-detection
            use_trtllm = kv_cache_dtype == "auto"
            if use_trtllm:
                logger.warning_once("Using TRTLLM prefill attention (auto-detected).")
        else:
            # Decode auto-detection
            use_trtllm = num_tokens <= 256 and kv_cache_dtype == "auto"
            if use_trtllm:
                logger.warning_once("Using TRTLLM decode attention (auto-detected).")
        return use_trtllm

    # CLI argument is set to 1 - respect it
    logger.info_once(
        "Using TRTLLM attention (--attention-config.use_trtllm_attention is set to 1)"
    )
    return True