Skip to content

Index

MultiModalLLM #

Bases: BaseComponent, DispatcherSpanMixin

Multi-Modal LLM interface.

Source code in .build/python/llama-index-core/llama_index/core/multi_modal_llms/base.py
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
class MultiModalLLM(BaseComponent, DispatcherSpanMixin):
    """Multi-Modal LLM interface."""

    model_config = ConfigDict(arbitrary_types_allowed=True)
    callback_manager: CallbackManager = Field(
        default_factory=CallbackManager, exclude=True
    )

    def __init__(self, *args: Any, **kwargs: Any) -> None:
        # Help static checkers understand this class hierarchy
        super().__init__(*args, **kwargs)

    @property
    @abstractmethod
    def metadata(self) -> MultiModalLLMMetadata:
        """Multi-Modal LLM metadata."""

    @abstractmethod
    def complete(
        self,
        prompt: str,
        image_documents: List[Union[ImageNode, ImageBlock]],
        **kwargs: Any,
    ) -> CompletionResponse:
        """Completion endpoint for Multi-Modal LLM."""

    @abstractmethod
    def stream_complete(
        self,
        prompt: str,
        image_documents: List[Union[ImageNode, ImageBlock]],
        **kwargs: Any,
    ) -> CompletionResponseGen:
        """Streaming completion endpoint for Multi-Modal LLM."""

    @abstractmethod
    def chat(
        self,
        messages: Sequence[ChatMessage],
        **kwargs: Any,
    ) -> ChatResponse:
        """Chat endpoint for Multi-Modal LLM."""

    @abstractmethod
    def stream_chat(
        self,
        messages: Sequence[ChatMessage],
        **kwargs: Any,
    ) -> ChatResponseGen:
        """Stream chat endpoint for Multi-Modal LLM."""

    # ===== Async Endpoints =====

    @abstractmethod
    async def acomplete(
        self,
        prompt: str,
        image_documents: List[Union[ImageNode, ImageBlock]],
        **kwargs: Any,
    ) -> CompletionResponse:
        """Async completion endpoint for Multi-Modal LLM."""

    @abstractmethod
    async def astream_complete(
        self,
        prompt: str,
        image_documents: List[Union[ImageNode, ImageBlock]],
        **kwargs: Any,
    ) -> CompletionResponseAsyncGen:
        """Async streaming completion endpoint for Multi-Modal LLM."""

    @abstractmethod
    async def achat(
        self,
        messages: Sequence[ChatMessage],
        **kwargs: Any,
    ) -> ChatResponse:
        """Async chat endpoint for Multi-Modal LLM."""

    @abstractmethod
    async def astream_chat(
        self,
        messages: Sequence[ChatMessage],
        **kwargs: Any,
    ) -> ChatResponseAsyncGen:
        """Async streaming chat endpoint for Multi-Modal LLM."""

    def __init_subclass__(cls, **kwargs: Any) -> None:
        """
        The callback decorators installs events, so they must be applied before
        the span decorators, otherwise the spans wouldn't contain the events.
        """
        for attr in (
            "complete",
            "acomplete",
            "stream_complete",
            "astream_complete",
            "chat",
            "achat",
            "stream_chat",
            "astream_chat",
        ):
            if callable(method := cls.__dict__.get(attr)):
                if attr.endswith("chat"):
                    setattr(cls, attr, llm_chat_callback()(method))
                else:
                    setattr(cls, attr, llm_completion_callback()(method))
        super().__init_subclass__(**kwargs)

metadata abstractmethod property #

metadata: MultiModalLLMMetadata

Multi-Modal LLM metadata.

complete abstractmethod #

complete(
    prompt: str,
    image_documents: List[Union[ImageNode, ImageBlock]],
    **kwargs: Any
) -> CompletionResponse

Completion endpoint for Multi-Modal LLM.

Source code in .build/python/llama-index-core/llama_index/core/multi_modal_llms/base.py
92
93
94
95
96
97
98
99
@abstractmethod
def complete(
    self,
    prompt: str,
    image_documents: List[Union[ImageNode, ImageBlock]],
    **kwargs: Any,
) -> CompletionResponse:
    """Completion endpoint for Multi-Modal LLM."""

stream_complete abstractmethod #

stream_complete(
    prompt: str,
    image_documents: List[Union[ImageNode, ImageBlock]],
    **kwargs: Any
) -> CompletionResponseGen

Streaming completion endpoint for Multi-Modal LLM.

Source code in .build/python/llama-index-core/llama_index/core/multi_modal_llms/base.py
101
102
103
104
105
106
107
108
@abstractmethod
def stream_complete(
    self,
    prompt: str,
    image_documents: List[Union[ImageNode, ImageBlock]],
    **kwargs: Any,
) -> CompletionResponseGen:
    """Streaming completion endpoint for Multi-Modal LLM."""

chat abstractmethod #

chat(
    messages: Sequence[ChatMessage], **kwargs: Any
) -> ChatResponse

Chat endpoint for Multi-Modal LLM.

Source code in .build/python/llama-index-core/llama_index/core/multi_modal_llms/base.py
110
111
112
113
114
115
116
@abstractmethod
def chat(
    self,
    messages: Sequence[ChatMessage],
    **kwargs: Any,
) -> ChatResponse:
    """Chat endpoint for Multi-Modal LLM."""

stream_chat abstractmethod #

stream_chat(
    messages: Sequence[ChatMessage], **kwargs: Any
) -> ChatResponseGen

Stream chat endpoint for Multi-Modal LLM.

Source code in .build/python/llama-index-core/llama_index/core/multi_modal_llms/base.py
118
119
120
121
122
123
124
@abstractmethod
def stream_chat(
    self,
    messages: Sequence[ChatMessage],
    **kwargs: Any,
) -> ChatResponseGen:
    """Stream chat endpoint for Multi-Modal LLM."""

acomplete abstractmethod async #

acomplete(
    prompt: str,
    image_documents: List[Union[ImageNode, ImageBlock]],
    **kwargs: Any
) -> CompletionResponse

Async completion endpoint for Multi-Modal LLM.

Source code in .build/python/llama-index-core/llama_index/core/multi_modal_llms/base.py
128
129
130
131
132
133
134
135
@abstractmethod
async def acomplete(
    self,
    prompt: str,
    image_documents: List[Union[ImageNode, ImageBlock]],
    **kwargs: Any,
) -> CompletionResponse:
    """Async completion endpoint for Multi-Modal LLM."""

astream_complete abstractmethod async #

astream_complete(
    prompt: str,
    image_documents: List[Union[ImageNode, ImageBlock]],
    **kwargs: Any
) -> CompletionResponseAsyncGen

Async streaming completion endpoint for Multi-Modal LLM.

Source code in .build/python/llama-index-core/llama_index/core/multi_modal_llms/base.py
137
138
139
140
141
142
143
144
@abstractmethod
async def astream_complete(
    self,
    prompt: str,
    image_documents: List[Union[ImageNode, ImageBlock]],
    **kwargs: Any,
) -> CompletionResponseAsyncGen:
    """Async streaming completion endpoint for Multi-Modal LLM."""

achat abstractmethod async #

achat(
    messages: Sequence[ChatMessage], **kwargs: Any
) -> ChatResponse

Async chat endpoint for Multi-Modal LLM.

Source code in .build/python/llama-index-core/llama_index/core/multi_modal_llms/base.py
146
147
148
149
150
151
152
@abstractmethod
async def achat(
    self,
    messages: Sequence[ChatMessage],
    **kwargs: Any,
) -> ChatResponse:
    """Async chat endpoint for Multi-Modal LLM."""

astream_chat abstractmethod async #

astream_chat(
    messages: Sequence[ChatMessage], **kwargs: Any
) -> ChatResponseAsyncGen

Async streaming chat endpoint for Multi-Modal LLM.

Source code in .build/python/llama-index-core/llama_index/core/multi_modal_llms/base.py
154
155
156
157
158
159
160
@abstractmethod
async def astream_chat(
    self,
    messages: Sequence[ChatMessage],
    **kwargs: Any,
) -> ChatResponseAsyncGen:
    """Async streaming chat endpoint for Multi-Modal LLM."""