Skip to content
Snippets Groups Projects
Unverified Commit 31bd709c authored by liuyhwangyh's avatar liuyhwangyh Committed by GitHub
Browse files

update dashscope embedding colab link (#10342)

parent 87b2c74c
Branches
Tags
No related merge requests found
%% Cell type:markdown id: tags:
<a href="https://colab.research.google.com/github/run-llama/llama_index/blob/main/docs/examples/embeddings/google_palm.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
<a href="https://colab.research.google.com/github/run-llama/llama_index/blob/main/docs/examples/embeddings/dashscope_embeddings.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
%% Cell type:code id: tags:
``` python
# DashScope Embeddings
```
%% Cell type:markdown id: tags:
If you're opening this Notebook on colab, you will probably need to install LlamaIndex 🦙.
%% Cell type:code id: tags:
``` python
%pip install llama-index
%pip install -U dashscope
```
%% Cell type:code id: tags:
``` python
# Set API key
%env DASHSCOPE_API_KEY=YOUR_DASHSCOPE_API_KEY
# you can set API key parameter DashScopeTextEmbedding(model=DashScopeTextEmbeddingModels.TEXT_EMBEDDING_V2, api_key=api_key)
```
%% Cell type:code id: tags:
``` python
# imports
from llama_index.embeddings import (
DashScopeEmbedding,
DashScopeTextEmbeddingModels,
DashScopeTextEmbeddingType,
)
# Create embeddings
# text_type=`document` to build index
embedder = DashScopeEmbedding(
model_name=DashScopeTextEmbeddingModels.TEXT_EMBEDDING_V2,
text_type=DashScopeTextEmbeddingType.TEXT_TYPE_DOCUMENT,
)
# Call text Embedding
embedding = embedder.get_text_embedding("衣服的质量杠杠的,很漂亮,不枉我等了这么久啊,喜欢,以后还来这里买")
print(f"Dimension of embeddings: {len(embedding)}")
print(embedding[:5])
```
%% Output
Dimension of embeddings: 1536
[-0.00838587212517078, 0.01004877272531103, 0.0015754734226650637, -0.04273583173235969, -0.05209946086276315]
%% Cell type:code id: tags:
``` python
# imports
from llama_index.embeddings import (
DashScopeEmbedding,
DashScopeTextEmbeddingModels,
DashScopeTextEmbeddingType,
)
# Create embeddings
# text_type=`query` to retrive relevant context.
embedder = DashScopeEmbedding(
model_name=DashScopeTextEmbeddingModels.TEXT_EMBEDDING_V2,
text_type=DashScopeTextEmbeddingType.TEXT_TYPE_QUERY,
)
# Call text Embedding
embedding = embedder.get_text_embedding("衣服的质量杠杠的,很漂亮,不枉我等了这么久啊,喜欢,以后还来这里买")
print(f"Dimension of embeddings: {len(embedding)}")
print(embedding[:5])
```
%% Output
Dimension of embeddings: 1536
[-0.00838587212517078, 0.01004877272531103, 0.0015754734226650637, -0.04273583173235969, -0.05209946086276315]
%% Cell type:code id: tags:
``` python
# call batch text embedding
from llama_index.embeddings import (
DashScopeEmbedding,
DashScopeBatchTextEmbeddingModels,
DashScopeTextEmbeddingType,
)
embedder = DashScopeEmbedding(
model_name=DashScopeBatchTextEmbeddingModels.TEXT_EMBEDDING_ASYNC_V2,
text_type=DashScopeTextEmbeddingType.TEXT_TYPE_DOCUMENT,
)
embedding_result_file_url = embedder.get_batch_text_embedding(
embedding_file_url="https://dashscope.oss-cn-beijing.aliyuncs.com/samples/text/text-embedding-test.txt"
)
print(embedding_result_file_url)
```
%% Output
https://dashscope-result-bj.oss-cn-beijing.aliyuncs.com/5fc5c860/2024-01-29/644ccedb-0b14-481c-a975-16bb5249282d_output_1706517940902.txt.gz?Expires=1706777144&OSSAccessKeyId=LTAI5tQZd8AEcZX6KZV4G8qL&Signature=g%2B0qcmOSwxEj8Cb2zXlvBbA6Fas%3D
%% Cell type:code id: tags:
``` python
# call multimodal embedding service
from llama_index.embeddings import (
DashScopeEmbedding,
DashScopeMultiModalEmbeddingModels,
)
embedder = DashScopeEmbedding(
model_name=DashScopeMultiModalEmbeddingModels.MULTIMODAL_EMBEDDING_ONE_PEACE_V1,
)
embedding = embedder.get_image_embedding(
img_file_path="https://dashscope.oss-cn-beijing.aliyuncs.com/images/256_1.png"
)
print(f"Dimension of embeddings: {len(embedding)}")
print(embedding[:5])
```
%% Output
Dimension of embeddings: 1536
[-0.03515625, 0.05035400390625, 0.008087158203125, 0.0163116455078125, 0.01064300537109375]
%% Cell type:code id: tags:
``` python
# call multimodal embedding service
from llama_index.embeddings import (
DashScopeEmbedding,
DashScopeMultiModalEmbeddingModels,
)
embedder = DashScopeEmbedding(
model_name=DashScopeMultiModalEmbeddingModels.MULTIMODAL_EMBEDDING_ONE_PEACE_V1,
)
input = [
{"factor": 1, "text": "你好"},
{
"factor": 2,
"audio": "https://dashscope.oss-cn-beijing.aliyuncs.com/audios/cow.flac",
},
{
"factor": 3,
"image": "https://dashscope.oss-cn-beijing.aliyuncs.com/images/256_1.png",
},
]
embedding = embedder.get_multimodal_embedding(input=input)
print(f"Dimension of embeddings: {len(embedding)}")
print(embedding[:5])
```
%% Output
Dimension of embeddings: 1536
[-0.0200169887393713, 0.041749317198991776, 0.01004155445843935, 0.03983306884765625, -0.006652673240751028]
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment