Skip to content

vllm.model_executor.models.qwen2_audio

Inference-only Qwen2-Audio model compatible with HuggingFace weights.

Qwen2AudioEmbeddingInputs

Bases: TensorSchema

Dimensions
  • bn: Batch size
  • naf: Number of audio features
  • hs: Hidden size (must match the hidden size of language model backbone)
Source code in vllm/model_executor/models/qwen2_audio.py
class Qwen2AudioEmbeddingInputs(TensorSchema):
    """
    Dimensions:
        - bn: Batch size
        - naf: Number of audio features
        - hs: Hidden size (must match the hidden size of language model
          backbone)
    """

    type: Literal["audio_embeds"] = "audio_embeds"

    audio_embeds: Annotated[
        list[torch.Tensor],
        TensorShape("bn", "naf", "hs", dynamic_dims={"naf"}),
    ]

Qwen2AudioFeatureInputs

Bases: TensorSchema

Dimensions
  • na: Number of audios
  • nmb: Number of mel bins
Source code in vllm/model_executor/models/qwen2_audio.py
class Qwen2AudioFeatureInputs(TensorSchema):
    """
    Dimensions:
        - na: Number of audios
        - nmb: Number of mel bins
    """

    type: Literal["audio_features"]
    input_features: Annotated[
        torch.Tensor | list[torch.Tensor],
        TensorShape("na", "nmb", 3000),
    ]

    feature_attention_mask: Annotated[
        torch.Tensor,
        TensorShape("na", 3000),
    ]

Qwen2AudioProcessingInfo

Bases: BaseProcessingInfo

Source code in vllm/model_executor/models/qwen2_audio.py
class Qwen2AudioProcessingInfo(BaseProcessingInfo):
    def get_hf_config(self):
        return self.ctx.get_hf_config(Qwen2AudioConfig)

    def get_hf_processor(self, **kwargs: object) -> Qwen2AudioProcessor:
        return self.ctx.get_hf_processor(Qwen2AudioProcessor, **kwargs)

    def get_feature_extractor(self, **kwargs: object) -> WhisperFeatureExtractor:
        hf_processor = self.get_hf_processor(**kwargs)
        feature_extractor = hf_processor.feature_extractor  # type: ignore
        assert isinstance(feature_extractor, WhisperFeatureExtractor)
        return feature_extractor

    def get_data_parser(self):
        feature_extractor = self.get_feature_extractor()

        return Qwen2AudioMultiModalDataParser(
            target_sr=feature_extractor.sampling_rate,
            target_channels=self.get_target_channels(),
            expected_hidden_size=self._get_expected_hidden_size(),
        )

    def get_target_channels(self) -> int:
        """Return target audio channels for Qwen2 Audio models (mono)."""
        return 1

    def get_supported_mm_limits(self) -> Mapping[str, int | None]:
        return {"audio": None}

get_target_channels

get_target_channels() -> int

Return target audio channels for Qwen2 Audio models (mono).

Source code in vllm/model_executor/models/qwen2_audio.py
def get_target_channels(self) -> int:
    """Return target audio channels for Qwen2 Audio models (mono)."""
    return 1