Bases: BaseInternVLDummyInputsBuilder[NVLMProcessingInfo]
Source code in vllm/model_executor/models/nvlm_d.py
  
 get_dummy_mm_data(
    seq_len: int,
    mm_counts: Mapping[str, int],
    mm_options: Mapping[str, BaseDummyOptions]
    | None = None,
) -> MultiModalDataDict
Source code in vllm/model_executor/models/nvlm_d.py
  
    
  Bases: BaseInternVLMultiModalProcessor[NVLMProcessingInfo]
Source code in vllm/model_executor/models/nvlm_d.py
  
 _get_prompt_updates(
    mm_items: MultiModalDataItems,
    hf_processor_mm_kwargs: Mapping[str, object],
    out_mm_kwargs: MultiModalKwargsItems,
) -> Sequence[PromptUpdate]
Source code in vllm/model_executor/models/nvlm_d.py
  
  Bases: BaseInternVLProcessingInfo
Source code in vllm/model_executor/models/nvlm_d.py
   
 get_hf_processor(**kwargs: object) -> NVLMProcessor
 
  Bases: BaseInternVLProcessor
Source code in vllm/model_executor/models/nvlm_d.py
  
 get_image_repl(
    feature_size: int, num_patches: int | None
) -> PromptUpdateDetails[str]
Source code in vllm/model_executor/models/nvlm_d.py
  
  Bases: InternVLChatModel
Source code in vllm/model_executor/models/nvlm_d.py
  
 _init_mlp1(config: PretrainedConfig) -> Module
Source code in vllm/model_executor/models/nvlm_d.py
  
 _init_vision_model(
    config: PretrainedConfig,
    quant_config: QuantizationConfig | None,
    *,
    is_mono: bool,
    prefix: str,
)