Skip to content

vllm.model_executor.models.qwen3_dflash

logger module-attribute

logger = init_logger(__name__)

DFlashQwen3Attention

Bases: Module

Source code in vllm/model_executor/models/qwen3_dflash.py
class DFlashQwen3Attention(nn.Module):
    def __init__(
        self,
        hidden_size: int,
        num_heads: int,
        num_kv_heads: int,
        rope_parameters: dict,
        max_position: int = 4096 * 32,
        head_dim: int | None = None,
        rms_norm_eps: float = 1e-06,
        qkv_bias: bool = False,
        cache_config: CacheConfig | None = None,
        quant_config: QuantizationConfig | None = None,
        prefix: str = "",
        attn_type: str = AttentionType.DECODER,
        dual_chunk_attention_config: dict[str, Any] | None = None,
    ) -> None:
        super().__init__()
        self.hidden_size = hidden_size
        tp_size = get_tensor_model_parallel_world_size()
        self.total_num_heads = num_heads
        assert self.total_num_heads % tp_size == 0
        self.num_heads = self.total_num_heads // tp_size
        self.total_num_kv_heads = num_kv_heads
        if self.total_num_kv_heads >= tp_size:
            assert self.total_num_kv_heads % tp_size == 0
        else:
            assert tp_size % self.total_num_kv_heads == 0
        self.num_kv_heads = max(1, self.total_num_kv_heads // tp_size)
        self.head_dim = head_dim or hidden_size // self.total_num_heads
        self.q_size = self.num_heads * self.head_dim
        self.kv_size = self.num_kv_heads * self.head_dim
        self.scaling = self.head_dim**-0.5

        self.qkv_proj = QKVParallelLinear(
            hidden_size,
            self.head_dim,
            self.total_num_heads,
            self.total_num_kv_heads,
            bias=qkv_bias,
            quant_config=quant_config,
            prefix=f"{prefix}.qkv_proj",
        )
        self.o_proj = RowParallelLinear(
            self.total_num_heads * self.head_dim,
            hidden_size,
            bias=False,
            quant_config=quant_config,
            prefix=f"{prefix}.o_proj",
        )

        self.rotary_emb = get_rope(
            self.head_dim,
            max_position=max_position,
            rope_parameters=rope_parameters,
            dual_chunk_attention_config=dual_chunk_attention_config,
        )
        self.attn = Attention(
            self.num_heads,
            self.head_dim,
            self.scaling,
            num_kv_heads=self.num_kv_heads,
            cache_config=cache_config,
            quant_config=quant_config,
            prefix=f"{prefix}.attn",
            attn_type=attn_type,
            **{
                "layer_idx": extract_layer_index(prefix),
                "dual_chunk_attention_config": dual_chunk_attention_config,
            }
            if dual_chunk_attention_config
            else {},
        )
        self.q_norm = RMSNorm(self.head_dim, eps=rms_norm_eps)
        self.k_norm = RMSNorm(self.head_dim, eps=rms_norm_eps)

    def forward(
        self,
        positions: torch.Tensor,
        hidden_states: torch.Tensor,
        context_states: torch.Tensor,
    ) -> torch.Tensor:
        # DFlash attends with query tokens against the context+query KV states.
        num_context_tokens = context_states.shape[0]
        concat_states = torch.cat([context_states, hidden_states], dim=0)

        qkv, _ = self.qkv_proj(concat_states)
        q, k, v = qkv.split([self.q_size, self.kv_size, self.kv_size], dim=-1)

        q_by_head = q.view(*q.shape[:-1], q.shape[-1] // self.head_dim, self.head_dim)
        k_by_head = k.view(*k.shape[:-1], k.shape[-1] // self.head_dim, self.head_dim)
        q = self.q_norm(q_by_head).view(q.shape)
        k = self.k_norm(k_by_head).view(k.shape)

        q, k = self.rotary_emb(positions, q, k)

        q = q[num_context_tokens:]
        attn_output = self.attn(q, k, v)
        output, _ = self.o_proj(attn_output)
        return output

attn instance-attribute

attn = Attention(
    num_heads,
    head_dim,
    scaling,
    num_kv_heads=num_kv_heads,
    cache_config=cache_config,
    quant_config=quant_config,
    prefix=f"{prefix}.attn",
    attn_type=attn_type,
    **(
        {
            "layer_idx": extract_layer_index(prefix),
            "dual_chunk_attention_config": dual_chunk_attention_config,
        }
        if dual_chunk_attention_config
        else {}
    ),
)

head_dim instance-attribute

head_dim = head_dim or hidden_size // total_num_heads

hidden_size instance-attribute

hidden_size = hidden_size

k_norm instance-attribute

k_norm = RMSNorm(head_dim, eps=rms_norm_eps)

kv_size instance-attribute

kv_size = num_kv_heads * head_dim

num_heads instance-attribute

num_heads = total_num_heads // tp_size

num_kv_heads instance-attribute

num_kv_heads = max(1, total_num_kv_heads // tp_size)

o_proj instance-attribute

o_proj = RowParallelLinear(
    total_num_heads * head_dim,
    hidden_size,
    bias=False,
    quant_config=quant_config,
    prefix=f"{prefix}.o_proj",
)

q_norm instance-attribute

q_norm = RMSNorm(head_dim, eps=rms_norm_eps)

q_size instance-attribute

q_size = num_heads * head_dim

qkv_proj instance-attribute

qkv_proj = QKVParallelLinear(
    hidden_size,
    head_dim,
    total_num_heads,
    total_num_kv_heads,
    bias=qkv_bias,
    quant_config=quant_config,
    prefix=f"{prefix}.qkv_proj",
)

rotary_emb instance-attribute

rotary_emb = get_rope(
    head_dim,
    max_position=max_position,
    rope_parameters=rope_parameters,
    dual_chunk_attention_config=dual_chunk_attention_config,
)

scaling instance-attribute

scaling = head_dim ** -0.5

total_num_heads instance-attribute

total_num_heads = num_heads

total_num_kv_heads instance-attribute

total_num_kv_heads = num_kv_heads

__init__

__init__(
    hidden_size: int,
    num_heads: int,
    num_kv_heads: int,
    rope_parameters: dict,
    max_position: int = 4096 * 32,
    head_dim: int | None = None,
    rms_norm_eps: float = 1e-06,
    qkv_bias: bool = False,
    cache_config: CacheConfig | None = None,
    quant_config: QuantizationConfig | None = None,
    prefix: str = "",
    attn_type: str = DECODER,
    dual_chunk_attention_config: dict[str, Any]
    | None = None,
) -> None
Source code in vllm/model_executor/models/qwen3_dflash.py
def __init__(
    self,
    hidden_size: int,
    num_heads: int,
    num_kv_heads: int,
    rope_parameters: dict,
    max_position: int = 4096 * 32,
    head_dim: int | None = None,
    rms_norm_eps: float = 1e-06,
    qkv_bias: bool = False,
    cache_config: CacheConfig | None = None,
    quant_config: QuantizationConfig | None = None,
    prefix: str = "",
    attn_type: str = AttentionType.DECODER,
    dual_chunk_attention_config: dict[str, Any] | None = None,
) -> None:
    super().__init__()
    self.hidden_size = hidden_size
    tp_size = get_tensor_model_parallel_world_size()
    self.total_num_heads = num_heads
    assert self.total_num_heads % tp_size == 0
    self.num_heads = self.total_num_heads // tp_size
    self.total_num_kv_heads = num_kv_heads
    if self.total_num_kv_heads >= tp_size:
        assert self.total_num_kv_heads % tp_size == 0
    else:
        assert tp_size % self.total_num_kv_heads == 0
    self.num_kv_heads = max(1, self.total_num_kv_heads // tp_size)
    self.head_dim = head_dim or hidden_size // self.total_num_heads
    self.q_size = self.num_heads * self.head_dim
    self.kv_size = self.num_kv_heads * self.head_dim
    self.scaling = self.head_dim**-0.5

    self.qkv_proj = QKVParallelLinear(
        hidden_size,
        self.head_dim,
        self.total_num_heads,
        self.total_num_kv_heads,
        bias=qkv_bias,
        quant_config=quant_config,
        prefix=f"{prefix}.qkv_proj",
    )
    self.o_proj = RowParallelLinear(
        self.total_num_heads * self.head_dim,
        hidden_size,
        bias=False,
        quant_config=quant_config,
        prefix=f"{prefix}.o_proj",
    )

    self.rotary_emb = get_rope(
        self.head_dim,
        max_position=max_position,
        rope_parameters=rope_parameters,
        dual_chunk_attention_config=dual_chunk_attention_config,
    )
    self.attn = Attention(
        self.num_heads,
        self.head_dim,
        self.scaling,
        num_kv_heads=self.num_kv_heads,
        cache_config=cache_config,
        quant_config=quant_config,
        prefix=f"{prefix}.attn",
        attn_type=attn_type,
        **{
            "layer_idx": extract_layer_index(prefix),
            "dual_chunk_attention_config": dual_chunk_attention_config,
        }
        if dual_chunk_attention_config
        else {},
    )
    self.q_norm = RMSNorm(self.head_dim, eps=rms_norm_eps)
    self.k_norm = RMSNorm(self.head_dim, eps=rms_norm_eps)

forward

forward(
    positions: Tensor,
    hidden_states: Tensor,
    context_states: Tensor,
) -> Tensor
Source code in vllm/model_executor/models/qwen3_dflash.py
def forward(
    self,
    positions: torch.Tensor,
    hidden_states: torch.Tensor,
    context_states: torch.Tensor,
) -> torch.Tensor:
    # DFlash attends with query tokens against the context+query KV states.
    num_context_tokens = context_states.shape[0]
    concat_states = torch.cat([context_states, hidden_states], dim=0)

    qkv, _ = self.qkv_proj(concat_states)
    q, k, v = qkv.split([self.q_size, self.kv_size, self.kv_size], dim=-1)

    q_by_head = q.view(*q.shape[:-1], q.shape[-1] // self.head_dim, self.head_dim)
    k_by_head = k.view(*k.shape[:-1], k.shape[-1] // self.head_dim, self.head_dim)
    q = self.q_norm(q_by_head).view(q.shape)
    k = self.k_norm(k_by_head).view(k.shape)

    q, k = self.rotary_emb(positions, q, k)

    q = q[num_context_tokens:]
    attn_output = self.attn(q, k, v)
    output, _ = self.o_proj(attn_output)
    return output

DFlashQwen3DecoderLayer

Bases: Module

Source code in vllm/model_executor/models/qwen3_dflash.py
class DFlashQwen3DecoderLayer(nn.Module):
    def __init__(
        self,
        vllm_config: VllmConfig,
        *,
        config: Qwen3Config,
        cache_config: CacheConfig | None = None,
        quant_config: QuantizationConfig | None = None,
        prefix: str = "",
        layer_idx: int = 0,
    ) -> None:
        del layer_idx
        super().__init__()
        self.hidden_size = config.hidden_size
        set_default_rope_theta(config, default_theta=1000000)
        dual_chunk_attention_config = getattr(
            config, "dual_chunk_attention_config", None
        )

        self.self_attn = DFlashQwen3Attention(
            hidden_size=self.hidden_size,
            num_heads=config.num_attention_heads,
            max_position=config.max_position_embeddings,
            num_kv_heads=config.num_key_value_heads,
            rms_norm_eps=config.rms_norm_eps,
            qkv_bias=getattr(config, "attention_bias", False),
            head_dim=getattr(config, "head_dim", None),
            cache_config=cache_config,
            quant_config=quant_config,
            rope_parameters=config.rope_parameters,
            prefix=f"{prefix}.self_attn",
            attn_type=AttentionType.DECODER,
            dual_chunk_attention_config=dual_chunk_attention_config,
        )
        self.mlp = Qwen3MLP(
            hidden_size=self.hidden_size,
            intermediate_size=config.intermediate_size,
            hidden_act=config.hidden_act,
            quant_config=quant_config,
            prefix=f"{prefix}.mlp",
        )
        self.input_layernorm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
        self.post_attention_layernorm = RMSNorm(
            config.hidden_size, eps=config.rms_norm_eps
        )

    def forward(
        self,
        positions: torch.Tensor,
        hidden_states: torch.Tensor,
        context_states: torch.Tensor,
        residual: torch.Tensor | None,
    ) -> tuple[torch.Tensor, torch.Tensor]:
        if residual is None:
            residual = hidden_states
            hidden_states = self.input_layernorm(hidden_states)
        else:
            hidden_states, residual = self.input_layernorm(hidden_states, residual)

        hidden_states = self.self_attn(
            positions=positions,
            hidden_states=hidden_states,
            context_states=context_states,
        )

        hidden_states, residual = self.post_attention_layernorm(hidden_states, residual)
        hidden_states = self.mlp(hidden_states)
        return hidden_states, residual

hidden_size instance-attribute

hidden_size = hidden_size

input_layernorm instance-attribute

input_layernorm = RMSNorm(hidden_size, eps=rms_norm_eps)

mlp instance-attribute

mlp = Qwen2MLP(
    hidden_size=hidden_size,
    intermediate_size=intermediate_size,
    hidden_act=hidden_act,
    quant_config=quant_config,
    prefix=f"{prefix}.mlp",
)

post_attention_layernorm instance-attribute

post_attention_layernorm = RMSNorm(
    hidden_size, eps=rms_norm_eps
)

self_attn instance-attribute

self_attn = DFlashQwen3Attention(
    hidden_size=hidden_size,
    num_heads=num_attention_heads,
    max_position=max_position_embeddings,
    num_kv_heads=num_key_value_heads,
    rms_norm_eps=rms_norm_eps,
    qkv_bias=getattr(config, "attention_bias", False),
    head_dim=getattr(config, "head_dim", None),
    cache_config=cache_config,
    quant_config=quant_config,
    rope_parameters=rope_parameters,
    prefix=f"{prefix}.self_attn",
    attn_type=DECODER,
    dual_chunk_attention_config=dual_chunk_attention_config,
)

__init__

__init__(
    vllm_config: VllmConfig,
    *,
    config: Qwen3Config,
    cache_config: CacheConfig | None = None,
    quant_config: QuantizationConfig | None = None,
    prefix: str = "",
    layer_idx: int = 0,
) -> None
Source code in vllm/model_executor/models/qwen3_dflash.py
def __init__(
    self,
    vllm_config: VllmConfig,
    *,
    config: Qwen3Config,
    cache_config: CacheConfig | None = None,
    quant_config: QuantizationConfig | None = None,
    prefix: str = "",
    layer_idx: int = 0,
) -> None:
    del layer_idx
    super().__init__()
    self.hidden_size = config.hidden_size
    set_default_rope_theta(config, default_theta=1000000)
    dual_chunk_attention_config = getattr(
        config, "dual_chunk_attention_config", None
    )

    self.self_attn = DFlashQwen3Attention(
        hidden_size=self.hidden_size,
        num_heads=config.num_attention_heads,
        max_position=config.max_position_embeddings,
        num_kv_heads=config.num_key_value_heads,
        rms_norm_eps=config.rms_norm_eps,
        qkv_bias=getattr(config, "attention_bias", False),
        head_dim=getattr(config, "head_dim", None),
        cache_config=cache_config,
        quant_config=quant_config,
        rope_parameters=config.rope_parameters,
        prefix=f"{prefix}.self_attn",
        attn_type=AttentionType.DECODER,
        dual_chunk_attention_config=dual_chunk_attention_config,
    )
    self.mlp = Qwen3MLP(
        hidden_size=self.hidden_size,
        intermediate_size=config.intermediate_size,
        hidden_act=config.hidden_act,
        quant_config=quant_config,
        prefix=f"{prefix}.mlp",
    )
    self.input_layernorm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
    self.post_attention_layernorm = RMSNorm(
        config.hidden_size, eps=config.rms_norm_eps
    )

forward

forward(
    positions: Tensor,
    hidden_states: Tensor,
    context_states: Tensor,
    residual: Tensor | None,
) -> tuple[Tensor, Tensor]
Source code in vllm/model_executor/models/qwen3_dflash.py
def forward(
    self,
    positions: torch.Tensor,
    hidden_states: torch.Tensor,
    context_states: torch.Tensor,
    residual: torch.Tensor | None,
) -> tuple[torch.Tensor, torch.Tensor]:
    if residual is None:
        residual = hidden_states
        hidden_states = self.input_layernorm(hidden_states)
    else:
        hidden_states, residual = self.input_layernorm(hidden_states, residual)

    hidden_states = self.self_attn(
        positions=positions,
        hidden_states=hidden_states,
        context_states=context_states,
    )

    hidden_states, residual = self.post_attention_layernorm(hidden_states, residual)
    hidden_states = self.mlp(hidden_states)
    return hidden_states, residual

DFlashQwen3ForCausalLM

Bases: Qwen3ForCausalLM

Source code in vllm/model_executor/models/qwen3_dflash.py
class DFlashQwen3ForCausalLM(Qwen3ForCausalLM):
    def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""):
        nn.Module.__init__(self)
        self.config = vllm_config.speculative_config.draft_model_config.hf_config

        if getattr(self.config, "draft_vocab_size", None) is None:
            self.config.draft_vocab_size = getattr(self.config, "vocab_size", None)

        target_layer_num = vllm_config.model_config.get_num_layers(
            vllm_config.parallel_config
        )
        self.config.target_layer_count = target_layer_num

        self.model = DFlashQwen3Model(
            vllm_config=vllm_config,
            prefix="model",
            start_layer_id=target_layer_num,
        )

        logit_scale = getattr(self.config, "logit_scale", 1.0)
        self.lm_head = ParallelLMHead(
            self.config.draft_vocab_size,
            self.config.hidden_size,
            prefix=maybe_prefix(prefix, "lm_head"),
        )
        self.logits_processor = LogitsProcessor(
            self.config.draft_vocab_size, scale=logit_scale
        )

        self.draft_id_to_target_id = nn.Parameter(
            torch.zeros(self.config.draft_vocab_size, dtype=torch.long),
            requires_grad=False,
        )

    def set_aux_hidden_state_layers(self, layers: tuple[int, ...]) -> None:
        dflash_config = getattr(self.config, "dflash_config", None)
        if not isinstance(dflash_config, dict):
            dflash_config = {}
            self.config.dflash_config = dflash_config
        dflash_config["layer_ids"] = list(layers)

    def get_eagle3_aux_hidden_state_layers(self) -> tuple[int, ...]:
        layer_ids = _resolve_dflash_layer_ids(self.config)
        if layer_ids is not None:
            return layer_ids

        target_layer_count = getattr(self.config, "target_layer_count", None)
        if isinstance(target_layer_count, int):
            return _default_aux_layer_ids(target_layer_count)

        return super().get_eagle3_aux_hidden_state_layers()

    def embed_input_ids(
        self,
        input_ids: torch.Tensor,
        multimodal_embeddings: NestedTensors | None = None,
        is_multimodal: torch.Tensor | None = None,
    ) -> torch.Tensor:
        del multimodal_embeddings, is_multimodal
        return self.model.embed_input_ids(input_ids)

    def forward(
        self,
        input_ids: torch.Tensor,
        positions: torch.Tensor,
        hidden_states: torch.Tensor,
        inputs_embeds: torch.Tensor | None = None,
    ) -> tuple[torch.Tensor, torch.Tensor]:
        return self.model(input_ids, positions, hidden_states, inputs_embeds)

    def compute_logits(
        self,
        hidden_states: torch.Tensor,
    ) -> torch.Tensor | None:
        logits = self.logits_processor(self.lm_head, hidden_states)
        if self.draft_id_to_target_id is None:
            assert logits.shape[1] == self.config.vocab_size, (
                "Expected logits to have shape "
                f"(*, {self.config.vocab_size}), but got {logits.shape}"
            )
            return logits

        base = torch.arange(self.config.draft_vocab_size, device=logits.device)
        targets = base + self.draft_id_to_target_id
        logits_new = logits.new_full(
            (
                logits.shape[0],
                self.config.vocab_size,
            ),
            float("-inf"),
        )
        logits_new[:, targets] = logits
        return logits_new

    def combine_hidden_states(
        self,
        hidden_states: torch.Tensor,
    ) -> torch.Tensor:
        if not self.model.use_aux_hidden_state:
            return hidden_states
        return self.model.hidden_norm(self.model.fc(hidden_states))

    def load_weights(self, weights: Iterable[tuple[str, torch.Tensor]]):
        model_weights = {}
        includes_draft_id_mapping = False
        includes_embed_tokens = False

        for name, loaded_weight in weights:
            if "t2d" in name:
                continue
            if "d2t" in name:
                name = name.replace("d2t", "draft_id_to_target_id")
                includes_draft_id_mapping = True
            elif "lm_head" not in name:
                name = "model." + name

            if "embed_tokens" in name:
                includes_embed_tokens = True

            model_weights[name] = loaded_weight
            process_eagle_weight(self, name)

        skip_substrs = []
        if not includes_draft_id_mapping:
            skip_substrs.append("draft_id_to_target_id")
        if not includes_embed_tokens:
            skip_substrs.append("embed_tokens")
        if not self.model.use_aux_hidden_state:
            skip_substrs.append("fc.")

        loader = AutoWeightsLoader(
            self,
            skip_prefixes=None,
            skip_substrs=skip_substrs,
        )
        loader.load_weights(model_weights.items())

config instance-attribute

config = hf_config

draft_id_to_target_id instance-attribute

draft_id_to_target_id = Parameter(
    zeros(draft_vocab_size, dtype=long), requires_grad=False
)

lm_head instance-attribute

lm_head = ParallelLMHead(
    draft_vocab_size,
    hidden_size,
    prefix=maybe_prefix(prefix, "lm_head"),
)

logits_processor instance-attribute

logits_processor = LogitsProcessor(
    draft_vocab_size, scale=logit_scale
)

model instance-attribute

model = DFlashQwen3Model(
    vllm_config=vllm_config,
    prefix="model",
    start_layer_id=target_layer_num,
)

__init__

__init__(*, vllm_config: VllmConfig, prefix: str = '')
Source code in vllm/model_executor/models/qwen3_dflash.py
def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""):
    nn.Module.__init__(self)
    self.config = vllm_config.speculative_config.draft_model_config.hf_config

    if getattr(self.config, "draft_vocab_size", None) is None:
        self.config.draft_vocab_size = getattr(self.config, "vocab_size", None)

    target_layer_num = vllm_config.model_config.get_num_layers(
        vllm_config.parallel_config
    )
    self.config.target_layer_count = target_layer_num

    self.model = DFlashQwen3Model(
        vllm_config=vllm_config,
        prefix="model",
        start_layer_id=target_layer_num,
    )

    logit_scale = getattr(self.config, "logit_scale", 1.0)
    self.lm_head = ParallelLMHead(
        self.config.draft_vocab_size,
        self.config.hidden_size,
        prefix=maybe_prefix(prefix, "lm_head"),
    )
    self.logits_processor = LogitsProcessor(
        self.config.draft_vocab_size, scale=logit_scale
    )

    self.draft_id_to_target_id = nn.Parameter(
        torch.zeros(self.config.draft_vocab_size, dtype=torch.long),
        requires_grad=False,
    )

combine_hidden_states

combine_hidden_states(hidden_states: Tensor) -> Tensor
Source code in vllm/model_executor/models/qwen3_dflash.py
def combine_hidden_states(
    self,
    hidden_states: torch.Tensor,
) -> torch.Tensor:
    if not self.model.use_aux_hidden_state:
        return hidden_states
    return self.model.hidden_norm(self.model.fc(hidden_states))

compute_logits

compute_logits(hidden_states: Tensor) -> Tensor | None
Source code in vllm/model_executor/models/qwen3_dflash.py
def compute_logits(
    self,
    hidden_states: torch.Tensor,
) -> torch.Tensor | None:
    logits = self.logits_processor(self.lm_head, hidden_states)
    if self.draft_id_to_target_id is None:
        assert logits.shape[1] == self.config.vocab_size, (
            "Expected logits to have shape "
            f"(*, {self.config.vocab_size}), but got {logits.shape}"
        )
        return logits

    base = torch.arange(self.config.draft_vocab_size, device=logits.device)
    targets = base + self.draft_id_to_target_id
    logits_new = logits.new_full(
        (
            logits.shape[0],
            self.config.vocab_size,
        ),
        float("-inf"),
    )
    logits_new[:, targets] = logits
    return logits_new

embed_input_ids

embed_input_ids(
    input_ids: Tensor,
    multimodal_embeddings: NestedTensors | None = None,
    is_multimodal: Tensor | None = None,
) -> Tensor
Source code in vllm/model_executor/models/qwen3_dflash.py
def embed_input_ids(
    self,
    input_ids: torch.Tensor,
    multimodal_embeddings: NestedTensors | None = None,
    is_multimodal: torch.Tensor | None = None,
) -> torch.Tensor:
    del multimodal_embeddings, is_multimodal
    return self.model.embed_input_ids(input_ids)

forward

forward(
    input_ids: Tensor,
    positions: Tensor,
    hidden_states: Tensor,
    inputs_embeds: Tensor | None = None,
) -> tuple[Tensor, Tensor]
Source code in vllm/model_executor/models/qwen3_dflash.py
def forward(
    self,
    input_ids: torch.Tensor,
    positions: torch.Tensor,
    hidden_states: torch.Tensor,
    inputs_embeds: torch.Tensor | None = None,
) -> tuple[torch.Tensor, torch.Tensor]:
    return self.model(input_ids, positions, hidden_states, inputs_embeds)

get_eagle3_aux_hidden_state_layers

get_eagle3_aux_hidden_state_layers() -> tuple[int, ...]
Source code in vllm/model_executor/models/qwen3_dflash.py
def get_eagle3_aux_hidden_state_layers(self) -> tuple[int, ...]:
    layer_ids = _resolve_dflash_layer_ids(self.config)
    if layer_ids is not None:
        return layer_ids

    target_layer_count = getattr(self.config, "target_layer_count", None)
    if isinstance(target_layer_count, int):
        return _default_aux_layer_ids(target_layer_count)

    return super().get_eagle3_aux_hidden_state_layers()

load_weights

load_weights(weights: Iterable[tuple[str, Tensor]])
Source code in vllm/model_executor/models/qwen3_dflash.py
def load_weights(self, weights: Iterable[tuple[str, torch.Tensor]]):
    model_weights = {}
    includes_draft_id_mapping = False
    includes_embed_tokens = False

    for name, loaded_weight in weights:
        if "t2d" in name:
            continue
        if "d2t" in name:
            name = name.replace("d2t", "draft_id_to_target_id")
            includes_draft_id_mapping = True
        elif "lm_head" not in name:
            name = "model." + name

        if "embed_tokens" in name:
            includes_embed_tokens = True

        model_weights[name] = loaded_weight
        process_eagle_weight(self, name)

    skip_substrs = []
    if not includes_draft_id_mapping:
        skip_substrs.append("draft_id_to_target_id")
    if not includes_embed_tokens:
        skip_substrs.append("embed_tokens")
    if not self.model.use_aux_hidden_state:
        skip_substrs.append("fc.")

    loader = AutoWeightsLoader(
        self,
        skip_prefixes=None,
        skip_substrs=skip_substrs,
    )
    loader.load_weights(model_weights.items())

set_aux_hidden_state_layers

set_aux_hidden_state_layers(
    layers: tuple[int, ...],
) -> None
Source code in vllm/model_executor/models/qwen3_dflash.py
def set_aux_hidden_state_layers(self, layers: tuple[int, ...]) -> None:
    dflash_config = getattr(self.config, "dflash_config", None)
    if not isinstance(dflash_config, dict):
        dflash_config = {}
        self.config.dflash_config = dflash_config
    dflash_config["layer_ids"] = list(layers)

DFlashQwen3Model

Bases: Module

Source code in vllm/model_executor/models/qwen3_dflash.py
class DFlashQwen3Model(nn.Module):
    def __init__(
        self,
        *,
        vllm_config: VllmConfig,
        start_layer_id: int = 0,
        prefix: str = "",
    ) -> None:
        super().__init__()
        self.config = vllm_config.speculative_config.draft_model_config.hf_config
        self.vocab_size = self.config.vocab_size
        self.quant_config = get_draft_quant_config(vllm_config)

        drafter_config: dict[str, Any] = {}
        eagle_config = getattr(self.config, "eagle_config", None)
        dflash_config = getattr(self.config, "dflash_config", None)
        if isinstance(eagle_config, dict):
            drafter_config.update(eagle_config)
        if isinstance(dflash_config, dict):
            drafter_config.update(dflash_config)

        self.use_aux_hidden_state = drafter_config.get("use_aux_hidden_state", True)
        self.layer_ids = _resolve_dflash_layer_ids(self.config)

        current_vllm_config = get_current_vllm_config()

        self.embed_tokens = VocabParallelEmbedding(
            self.config.vocab_size,
            self.config.hidden_size,
            prefix=maybe_prefix(prefix, "embed_tokens"),
        )

        self.layers = nn.ModuleList(
            [
                DFlashQwen3DecoderLayer(
                    current_vllm_config,
                    prefix=maybe_prefix(prefix, f"layers.{layer_idx + start_layer_id}"),
                    config=self.config,
                    layer_idx=layer_idx,
                )
                for layer_idx in range(self.config.num_hidden_layers)
            ]
        )

        if self.use_aux_hidden_state:
            if self.layer_ids is not None:
                num_features_to_use = len(self.layer_ids)
            else:
                target_layer_count = getattr(self.config, "target_layer_count", None)
                if isinstance(target_layer_count, int):
                    num_features_to_use = len(
                        _default_aux_layer_ids(target_layer_count)
                    )
                else:
                    num_features_to_use = len(
                        _default_aux_layer_ids(self.config.num_hidden_layers)
                    )

            if hasattr(self.config, "target_hidden_size"):
                fc_input_size = self.config.target_hidden_size * num_features_to_use
            else:
                fc_input_size = self.config.hidden_size * num_features_to_use

            self.fc = ReplicatedLinear(
                input_size=fc_input_size,
                output_size=self.config.hidden_size,
                bias=False,
                params_dtype=vllm_config.model_config.dtype,
                quant_config=self.quant_config,
                prefix=maybe_prefix(prefix, "fc"),
                return_bias=False,
            )

        self.hidden_norm = RMSNorm(
            self.config.hidden_size,
            eps=self.config.rms_norm_eps,
        )
        self.norm = RMSNorm(
            self.config.hidden_size,
            eps=self.config.rms_norm_eps,
        )

    def embed_input_ids(self, input_ids: torch.Tensor) -> torch.Tensor:
        return self.embed_tokens(input_ids)

    def forward(
        self,
        input_ids: torch.Tensor,
        positions: torch.Tensor,
        hidden_states: torch.Tensor,
        input_embeds: torch.Tensor | None = None,
    ) -> tuple[torch.Tensor, torch.Tensor]:
        if input_embeds is None:
            input_embeds = self.embed_input_ids(input_ids)

        assert hidden_states.shape[-1] == input_embeds.shape[-1]

        context_states = hidden_states
        hidden_states = input_embeds

        residual = None
        for layer in self.layers:
            hidden_states, residual = layer(
                positions=positions,
                hidden_states=hidden_states,
                context_states=context_states,
                residual=residual,
            )

        hidden_states, hidden_prenorm = self.norm(hidden_states, residual)
        return hidden_states, hidden_prenorm

    def load_weights(self, weights: Iterable[tuple[str, torch.Tensor]]) -> set[str]:
        stacked_params_mapping = [
            (".qkv_proj", ".q_proj", "q"),
            (".qkv_proj", ".k_proj", "k"),
            (".qkv_proj", ".v_proj", "v"),
            (".gate_up_proj", ".gate_proj", 0),
            (".gate_up_proj", ".up_proj", 1),
        ]
        params_dict = dict(self.named_parameters())
        loaded_params: set[str] = set()
        for name, loaded_weight in weights:
            if "midlayer." in name:
                name = name.replace("midlayer.", "layers.0.")
            if self.quant_config is not None and (
                scale_name := self.quant_config.get_cache_scale(name)
            ):
                param = params_dict[scale_name]
                weight_loader = getattr(param, "weight_loader", default_weight_loader)
                loaded_weight = (
                    loaded_weight if loaded_weight.dim() == 0 else loaded_weight[0]
                )
                weight_loader(param, loaded_weight)
                loaded_params.add(scale_name)
                continue
            if "scale" in name or "zero_point" in name:
                name = maybe_remap_kv_scale_name(name, params_dict)
                if name is None:
                    continue
            for param_name, weight_name, shard_id in stacked_params_mapping:
                if weight_name not in name:
                    continue
                name = name.replace(weight_name, param_name)
                param = params_dict[name]
                weight_loader = param.weight_loader
                weight_loader(param, loaded_weight, shard_id)
                break
            else:
                param = params_dict[name]
                weight_loader = getattr(param, "weight_loader", default_weight_loader)
                weight_loader(param, loaded_weight)
            loaded_params.add(name)
        return loaded_params

config instance-attribute

config = hf_config

embed_tokens instance-attribute

embed_tokens = VocabParallelEmbedding(
    vocab_size,
    hidden_size,
    prefix=maybe_prefix(prefix, "embed_tokens"),
)

fc instance-attribute

fc = ReplicatedLinear(
    input_size=fc_input_size,
    output_size=hidden_size,
    bias=False,
    params_dtype=dtype,
    quant_config=quant_config,
    prefix=maybe_prefix(prefix, "fc"),
    return_bias=False,
)

hidden_norm instance-attribute

hidden_norm = RMSNorm(hidden_size, eps=rms_norm_eps)

layer_ids instance-attribute

layer_ids = _resolve_dflash_layer_ids(config)

layers instance-attribute

layers = ModuleList(
    [
        (
            DFlashQwen3DecoderLayer(
                current_vllm_config,
                prefix=maybe_prefix(
                    prefix,
                    f"layers.{layer_idx + start_layer_id}",
                ),
                config=config,
                layer_idx=layer_idx,
            )
        )
        for layer_idx in (range(num_hidden_layers))
    ]
)

norm instance-attribute

norm = RMSNorm(hidden_size, eps=rms_norm_eps)

quant_config instance-attribute

quant_config = get_draft_quant_config(vllm_config)

use_aux_hidden_state instance-attribute

use_aux_hidden_state = get('use_aux_hidden_state', True)

vocab_size instance-attribute

vocab_size = vocab_size

__init__

__init__(
    *,
    vllm_config: VllmConfig,
    start_layer_id: int = 0,
    prefix: str = "",
) -> None
Source code in vllm/model_executor/models/qwen3_dflash.py
def __init__(
    self,
    *,
    vllm_config: VllmConfig,
    start_layer_id: int = 0,
    prefix: str = "",
) -> None:
    super().__init__()
    self.config = vllm_config.speculative_config.draft_model_config.hf_config
    self.vocab_size = self.config.vocab_size
    self.quant_config = get_draft_quant_config(vllm_config)

    drafter_config: dict[str, Any] = {}
    eagle_config = getattr(self.config, "eagle_config", None)
    dflash_config = getattr(self.config, "dflash_config", None)
    if isinstance(eagle_config, dict):
        drafter_config.update(eagle_config)
    if isinstance(dflash_config, dict):
        drafter_config.update(dflash_config)

    self.use_aux_hidden_state = drafter_config.get("use_aux_hidden_state", True)
    self.layer_ids = _resolve_dflash_layer_ids(self.config)

    current_vllm_config = get_current_vllm_config()

    self.embed_tokens = VocabParallelEmbedding(
        self.config.vocab_size,
        self.config.hidden_size,
        prefix=maybe_prefix(prefix, "embed_tokens"),
    )

    self.layers = nn.ModuleList(
        [
            DFlashQwen3DecoderLayer(
                current_vllm_config,
                prefix=maybe_prefix(prefix, f"layers.{layer_idx + start_layer_id}"),
                config=self.config,
                layer_idx=layer_idx,
            )
            for layer_idx in range(self.config.num_hidden_layers)
        ]
    )

    if self.use_aux_hidden_state:
        if self.layer_ids is not None:
            num_features_to_use = len(self.layer_ids)
        else:
            target_layer_count = getattr(self.config, "target_layer_count", None)
            if isinstance(target_layer_count, int):
                num_features_to_use = len(
                    _default_aux_layer_ids(target_layer_count)
                )
            else:
                num_features_to_use = len(
                    _default_aux_layer_ids(self.config.num_hidden_layers)
                )

        if hasattr(self.config, "target_hidden_size"):
            fc_input_size = self.config.target_hidden_size * num_features_to_use
        else:
            fc_input_size = self.config.hidden_size * num_features_to_use

        self.fc = ReplicatedLinear(
            input_size=fc_input_size,
            output_size=self.config.hidden_size,
            bias=False,
            params_dtype=vllm_config.model_config.dtype,
            quant_config=self.quant_config,
            prefix=maybe_prefix(prefix, "fc"),
            return_bias=False,
        )

    self.hidden_norm = RMSNorm(
        self.config.hidden_size,
        eps=self.config.rms_norm_eps,
    )
    self.norm = RMSNorm(
        self.config.hidden_size,
        eps=self.config.rms_norm_eps,
    )

embed_input_ids

embed_input_ids(input_ids: Tensor) -> Tensor
Source code in vllm/model_executor/models/qwen3_dflash.py
def embed_input_ids(self, input_ids: torch.Tensor) -> torch.Tensor:
    return self.embed_tokens(input_ids)

forward

forward(
    input_ids: Tensor,
    positions: Tensor,
    hidden_states: Tensor,
    input_embeds: Tensor | None = None,
) -> tuple[Tensor, Tensor]
Source code in vllm/model_executor/models/qwen3_dflash.py
def forward(
    self,
    input_ids: torch.Tensor,
    positions: torch.Tensor,
    hidden_states: torch.Tensor,
    input_embeds: torch.Tensor | None = None,
) -> tuple[torch.Tensor, torch.Tensor]:
    if input_embeds is None:
        input_embeds = self.embed_input_ids(input_ids)

    assert hidden_states.shape[-1] == input_embeds.shape[-1]

    context_states = hidden_states
    hidden_states = input_embeds

    residual = None
    for layer in self.layers:
        hidden_states, residual = layer(
            positions=positions,
            hidden_states=hidden_states,
            context_states=context_states,
            residual=residual,
        )

    hidden_states, hidden_prenorm = self.norm(hidden_states, residual)
    return hidden_states, hidden_prenorm

load_weights

load_weights(
    weights: Iterable[tuple[str, Tensor]],
) -> set[str]
Source code in vllm/model_executor/models/qwen3_dflash.py
def load_weights(self, weights: Iterable[tuple[str, torch.Tensor]]) -> set[str]:
    stacked_params_mapping = [
        (".qkv_proj", ".q_proj", "q"),
        (".qkv_proj", ".k_proj", "k"),
        (".qkv_proj", ".v_proj", "v"),
        (".gate_up_proj", ".gate_proj", 0),
        (".gate_up_proj", ".up_proj", 1),
    ]
    params_dict = dict(self.named_parameters())
    loaded_params: set[str] = set()
    for name, loaded_weight in weights:
        if "midlayer." in name:
            name = name.replace("midlayer.", "layers.0.")
        if self.quant_config is not None and (
            scale_name := self.quant_config.get_cache_scale(name)
        ):
            param = params_dict[scale_name]
            weight_loader = getattr(param, "weight_loader", default_weight_loader)
            loaded_weight = (
                loaded_weight if loaded_weight.dim() == 0 else loaded_weight[0]
            )
            weight_loader(param, loaded_weight)
            loaded_params.add(scale_name)
            continue
        if "scale" in name or "zero_point" in name:
            name = maybe_remap_kv_scale_name(name, params_dict)
            if name is None:
                continue
        for param_name, weight_name, shard_id in stacked_params_mapping:
            if weight_name not in name:
                continue
            name = name.replace(weight_name, param_name)
            param = params_dict[name]
            weight_loader = param.weight_loader
            weight_loader(param, loaded_weight, shard_id)
            break
        else:
            param = params_dict[name]
            weight_loader = getattr(param, "weight_loader", default_weight_loader)
            weight_loader(param, loaded_weight)
        loaded_params.add(name)
    return loaded_params

_default_aux_layer_ids

_default_aux_layer_ids(num_layers: int) -> tuple[int, ...]
Source code in vllm/model_executor/models/qwen3_dflash.py
def _default_aux_layer_ids(num_layers: int) -> tuple[int, ...]:
    if num_layers < 3:
        return tuple(range(num_layers))
    return (2, num_layers // 2, num_layers - 3)

_resolve_dflash_layer_ids

_resolve_dflash_layer_ids(
    config: Qwen3Config,
) -> tuple[int, ...] | None
Source code in vllm/model_executor/models/qwen3_dflash.py
def _resolve_dflash_layer_ids(config: Qwen3Config) -> tuple[int, ...] | None:
    dflash_config = getattr(config, "dflash_config", None)
    if isinstance(dflash_config, dict):
        layer_ids = dflash_config.get("layer_ids")
        if isinstance(layer_ids, (list, tuple)) and len(layer_ids) > 0:
            return tuple(int(layer_id) for layer_id in layer_ids)

    layer_ids = getattr(config, "eagle_aux_hidden_state_layer_ids", None)
    if isinstance(layer_ids, (list, tuple)) and len(layer_ids) > 0:
        return tuple(int(layer_id) for layer_id in layer_ids)

    return None