diff --git a/semantic_router/encoders/fastembed.py b/semantic_router/encoders/fastembed.py index 98cfc6cc529ff4c06e0a3e7f0d0af779df7f71dd..33c81f397263375fb58d851656a0a8d5ae43ea8b 100644 --- a/semantic_router/encoders/fastembed.py +++ b/semantic_router/encoders/fastembed.py @@ -1,4 +1,4 @@ -from typing import Any, List, Optional +from typing import Any, Optional import numpy as np from pydantic import PrivateAttr @@ -44,8 +44,8 @@ class FastEmbedEncoder(BaseEncoder): def __call__(self, docs: list[str]) -> list[list[float]]: try: - embeds: List[np.ndarray] = list(self._client.embed(docs)) - embeddings: List[List[float]] = [e.tolist() for e in embeds] + embeds: list[np.ndarray] = list(self._client.embed(docs)) + embeddings: list[list[float]] = [e.tolist() for e in embeds] return embeddings except Exception as e: raise ValueError(f"FastEmbed embed failed. Error: {e}") diff --git a/semantic_router/linear.py b/semantic_router/linear.py index 1c13262fbe55bfe4cd92f75c61fb33899d60337e..09b911fbc62245f17e564d6e368ddf627d1b3864 100644 --- a/semantic_router/linear.py +++ b/semantic_router/linear.py @@ -1,5 +1,3 @@ -from typing import Tuple - import numpy as np from numpy.linalg import norm @@ -21,7 +19,7 @@ def similarity_matrix(xq: np.ndarray, index: np.ndarray) -> np.ndarray: return sim -def top_scores(sim: np.ndarray, top_k: int = 5) -> Tuple[np.ndarray, np.ndarray]: +def top_scores(sim: np.ndarray, top_k: int = 5) -> tuple[np.ndarray, np.ndarray]: # get indices of top_k records top_k = min(top_k, sim.shape[0]) idx = np.argpartition(sim, -top_k)[-top_k:] diff --git a/semantic_router/utils/splitters.py b/semantic_router/utils/splitters.py index 746015204d702690a0eff289eaa1537c42658f23..f469fbcc4fcba36e0884d87eeac5ff73ff498077 100644 --- a/semantic_router/utils/splitters.py +++ b/semantic_router/utils/splitters.py @@ -26,7 +26,7 @@ def semantic_splitter( split_method (str): The method to use for splitting. Returns: - Dict[str, list[str]]: Splits with corresponding documents. + dict[str, list[str]]: Splits with corresponding documents. """ total_docs = len(docs) splits = {}