Skip to content
Snippets Groups Projects
Unverified Commit 51fc6d0e authored by Danipulok's avatar Danipulok Committed by GitHub
Browse files

Improve `Vertex` hints (#9296)

parent 2087401a
No related branches found
No related tags found
No related merge requests found
%% Cell type:markdown id:9dced1fd-2db9-4b4e-88f5-bd65563ec1a6 tags:
# Getting Started
## Installing Vertex AI
To Install Vertex AI you need to follow the following steps
* Install Vertex Cloud SDK (https://googleapis.dev/python/aiplatform/latest/index.html)
* Setup your Default Project , credentials , region
* Setup your Default Project, credentials, region
# Basic auth example for service account
%% Cell type:code id:3d42f4996210bdc7 tags:
``` python
from llama_index.llms.vertex import Vertex
from google.oauth2 import service_account
filename = "vertex-407108-37495ce6c303.json"
credentials: service_account.Credentials = (
service_account.Credentials.from_service_account_file(filename)
)
Vertex(
model="text-bison", project=credentials.project_id, credentials=credentials
)
```
%% Cell type:markdown id:119bbfb7d84a593d tags:
## Basic Usage
a Basic call to the text-bison model
%% Cell type:code id:bf7deb10-28fe-41f2-abda-283162e9f35b tags:
``` python
from llama_index.llms.vertex import Vertex
from llama_index.llms.base import ChatMessage, MessageRole, CompletionResponse
from llama_index.llms.base import ChatMessage, MessageRole
llm = Vertex(model="text-bison", temperature=0, additional_kwargs={})
llm.complete("Hello this is a sample text").text
```
%% Output
' ```\nHello this is a sample text\n```'
%% Cell type:markdown id:c3afe813-a5cb-4175-bf9c-5484b2da0a9b tags:
## Async Usage
### Async
%% Cell type:code id:7916602b-4f97-43bb-85a9-ac683bc962f3 tags:
``` python
(await llm.acomplete("hello")).text
```
%% Output
' Hello! How can I help you?'
%% Cell type:markdown id:2cfd0b78-b779-4390-96f7-99ff60087786 tags:
# Streaming Usage
### Streaming
%% Cell type:code id:ef4765c5-ec6a-4109-8453-c9f76396d572 tags:
``` python
list(llm.stream_complete("hello"))[-1].text
```
%% Output
' Hello! How can I help you?'
%% Cell type:markdown id:ce5d9453-6465-489e-b205-a25a3da37dde tags:
# Chat Usage
### chat generation
%% Cell type:code id:2b8861d4-a151-4a5d-8a6f-d24e284009c3 tags:
``` python
chat = Vertex(model="chat-bison")
messages = [
ChatMessage(role=MessageRole.SYSTEM, content="Reply everything in french"),
ChatMessage(role=MessageRole.USER, content="Hello"),
]
```
%% Cell type:code id:f2286016-9d58-468d-a9a2-571ad90066ad tags:
``` python
chat.chat(messages=messages).message.content
```
%% Output
' Bonjour! Comment vas-tu?'
%% Cell type:markdown id:b830d59e-12f6-4ef7-b1ce-e71408ae3cbb tags:
# Async Chat
### Asynchronous chat response
%% Cell type:code id:2920968a-9566-4964-b468-9a5899f42e61 tags:
``` python
(await chat.achat(messages=messages)).message.content
```
%% Output
' Bonjour! Comment vas-tu?'
%% Cell type:markdown id:632585b4-f146-492c-bb16-30e8b64e2d52 tags:
# Streaming Chat
### streaming chat response
%% Cell type:code id:9ee22376-317a-4bda-a20e-5d54f88503f7 tags:
``` python
list(chat.stream_chat(messages=messages))[-1].message.content
```
%% Output
' Bonjour! Comment vas-tu?'
......
......@@ -45,14 +45,14 @@ class Vertex(LLM):
default=False, description="Flag to determine if current model is a Code Model"
)
_client: Any = PrivateAttr()
_chatclient: Any = PrivateAttr()
_chat_client: Any = PrivateAttr()
def __init__(
self,
model: str = "text-bison",
project: Optional[str] = None,
location: Optional[str] = None,
credential: Optional[str] = None,
credentials: Optional[Any] = None,
examples: Optional[Sequence[ChatMessage]] = None,
temperature: float = 0.1,
max_tokens: int = 512,
......@@ -61,7 +61,7 @@ class Vertex(LLM):
additional_kwargs: Optional[Dict[str, Any]] = None,
callback_manager: Optional[CallbackManager] = None,
) -> None:
init_vertexai(project=project, location=location, credentials=credential)
init_vertexai(project=project, location=location, credentials=credentials)
additional_kwargs = additional_kwargs or {}
callback_manager = callback_manager or CallbackManager([])
......@@ -69,11 +69,11 @@ class Vertex(LLM):
if model in CHAT_MODELS:
from vertexai.language_models import ChatModel
self._chatclient = ChatModel.from_pretrained(model)
self._chat_client = ChatModel.from_pretrained(model)
elif model in CODE_CHAT_MODELS:
from vertexai.language_models import CodeChatModel
self._chatclient = CodeChatModel.from_pretrained(model)
self._chat_client = CodeChatModel.from_pretrained(model)
iscode = True
elif model in CODE_MODELS:
from vertexai.language_models import CodeGenerationModel
......@@ -148,7 +148,7 @@ class Vertex(LLM):
)
generation = completion_with_retry(
client=self._chatclient,
client=self._chat_client,
prompt=question,
chat=True,
stream=False,
......@@ -195,7 +195,7 @@ class Vertex(LLM):
)
response = completion_with_retry(
client=self._chatclient,
client=self._chat_client,
prompt=question,
chat=True,
stream=True,
......@@ -267,7 +267,7 @@ class Vertex(LLM):
)
)
generation = await acompletion_with_retry(
client=self._chatclient,
client=self._chat_client,
prompt=question,
chat=True,
params=chat_params,
......
......@@ -97,7 +97,7 @@ async def acompletion_with_retry(
def init_vertexai(
project: Optional[str] = None,
location: Optional[str] = None,
credentials: Optional[str] = None,
credentials: Optional[Any] = None,
) -> None:
"""Init vertexai.
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment