vllm.model_executor.models.nvlm_d
 
  Bases: BaseInternVLDummyInputsBuilder[NVLMProcessingInfo]
Source code in vllm/model_executor/models/nvlm_d.py
  
 get_dummy_mm_data(
    seq_len: int, mm_counts: Mapping[str, int]
) -> MultiModalDataDict
Source code in vllm/model_executor/models/nvlm_d.py
  
    
  Bases: BaseInternVLMultiModalProcessor[NVLMProcessingInfo]
Source code in vllm/model_executor/models/nvlm_d.py
  
 _get_prompt_updates(
    mm_items: MultiModalDataItems,
    hf_processor_mm_kwargs: Mapping[str, object],
    out_mm_kwargs: MultiModalKwargs,
) -> Sequence[PromptUpdate]
Source code in vllm/model_executor/models/nvlm_d.py
  
  Bases: BaseInternVLProcessingInfo
Source code in vllm/model_executor/models/nvlm_d.py
   
 get_hf_processor(**kwargs: object) -> NVLMProcessor
 
  Bases: BaseInternVLProcessor
Source code in vllm/model_executor/models/nvlm_d.py
  
 get_image_repl(
    feature_size: int, num_patches: Optional[int]
) -> PromptUpdateDetails[str]
Source code in vllm/model_executor/models/nvlm_d.py
  
  Bases: InternVLChatModel
Source code in vllm/model_executor/models/nvlm_d.py
  
 _init_mlp1(config: PretrainedConfig) -> Sequential
Source code in vllm/model_executor/models/nvlm_d.py
  
 _init_vision_model(
    config: PretrainedConfig,
    quant_config: Optional[QuantizationConfig],
    *,
    is_mono: bool,
    prefix: str,
)