Skip to content
Snippets Groups Projects
Unverified Commit 79834adb authored by James Briggs's avatar James Briggs Committed by GitHub
Browse files

Merge pull request #1 from aurelio-labs/simple_classification

Add simple_classify Method to DecisionLayer Class
parents ff7d041a 005818f3
No related branches found
No related tags found
No related merge requests found
Showing with 1052 additions and 100 deletions
COHERE_API_KEY=
\ No newline at end of file
.env __pycache__
mac.env *.pyc
.venv
.DS_Store
venv/
/.vscode
**/__pycache__ **/__pycache__
**/*.py[cod]
# local env files
.env*.local
.env
mac.env
\ No newline at end of file
LICENSE 0 → 100644
MIT License
Copyright (c) 2023 Aurelio AI
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
\ No newline at end of file
# Decision Layer # Semantic Router
\ No newline at end of file
from decision_layer.decision_layer import DecisionLayer
\ No newline at end of file
from decision_layer.encoders.base import BaseEncoder
from decision_layer.encoders.cohere import CohereEncoder
from decision_layer.encoders.huggingface import HuggingFaceEncoder
from decision_layer.encoders.openai import OpenAIEncoder
\ No newline at end of file
from decision_layer.encoders import BaseEncoder
class CohereEncoder(BaseEncoder):
def __init__(self, name: str):
super().__init__(name)
def __call__(self, texts: list[str]) -> list[float]:
raise NotImplementedError
\ No newline at end of file
This diff is collapsed.
[tool.poetry] [tool.poetry]
name = "decision-layer" name = "semantic-router"
version = "0.0.1" version = "0.0.1"
description = "Super fast decision layer for AI" description = "Super fast semantic router for AI decision making"
authors = ["James Briggs <james@aurelio.ai>"] authors = [
"James Briggs <james@aurelio.ai>",
"Siraj Aizlewood <siraj@aurelio.ai>"
]
readme = "README.md" readme = "README.md"
[tool.poetry.dependencies] [tool.poetry.dependencies]
...@@ -15,6 +18,12 @@ openai = "^0.28.1" ...@@ -15,6 +18,12 @@ openai = "^0.28.1"
transformers = "^4.34.1" transformers = "^4.34.1"
cohere = "^4.32" cohere = "^4.32"
[tool.poetry.group.dev.dependencies]
ipykernel = "^6.26.0"
ruff = "^0.1.5"
black = "^23.11.0"
[build-system] [build-system]
requires = ["poetry-core"] requires = ["poetry-core"]
build-backend = "poetry.core.masonry.api" build-backend = "poetry.core.masonry.api"
from .base import BaseEncoder
from .cohere import CohereEncoder
from .huggingface import HuggingFaceEncoder
from .openai import OpenAIEncoder
__all__ = ["BaseEncoder", "CohereEncoder", "HuggingFaceEncoder", "OpenAIEncoder"]
...@@ -8,4 +8,4 @@ class BaseEncoder(BaseModel): ...@@ -8,4 +8,4 @@ class BaseEncoder(BaseModel):
arbitrary_types_allowed = True arbitrary_types_allowed = True
def __call__(self, texts: list[str]) -> list[float]: def __call__(self, texts: list[str]) -> list[float]:
pass raise NotImplementedError("Subclasses must implement this method")
import os
import cohere
from semantic_router.encoders import BaseEncoder
class CohereEncoder(BaseEncoder):
client: cohere.Client | None
def __init__(
self, name: str = "embed-english-v3.0", cohere_api_key: str | None = None
):
super().__init__(name=name)
cohere_api_key = cohere_api_key or os.getenv("COHERE_API_KEY")
if cohere_api_key is None:
raise ValueError("Cohere API key cannot be 'None'.")
self.client = cohere.Client(cohere_api_key)
def __call__(self, texts: list[str]) -> list[list[float]]:
if self.client is None:
raise ValueError("Cohere client is not initialized.")
if len(texts) == 1:
input_type = "search_query"
else:
input_type = "search_document"
embeds = self.client.embed(texts, input_type=input_type, model=self.name)
return embeds.embeddings
from decision_layer.encoders import BaseEncoder from semantic_router.encoders import BaseEncoder
class HuggingFaceEncoder(BaseEncoder): class HuggingFaceEncoder(BaseEncoder):
def __init__(self, name: str): def __init__(self, name: str):
super().__init__(name) self.name = name
def __call__(self, texts: list[str]) -> list[float]: def __call__(self, texts: list[str]) -> list[float]:
raise NotImplementedError raise NotImplementedError
\ No newline at end of file
import os import os
from time import sleep
from decision_layer.encoders import BaseEncoder
import openai import openai
from time import time from openai.error import RateLimitError
from semantic_router.encoders import BaseEncoder
class OpenAIEncoder(BaseEncoder): class OpenAIEncoder(BaseEncoder):
...@@ -16,20 +18,18 @@ class OpenAIEncoder(BaseEncoder): ...@@ -16,20 +18,18 @@ class OpenAIEncoder(BaseEncoder):
"""Encode a list of texts using the OpenAI API. Returns a list of """Encode a list of texts using the OpenAI API. Returns a list of
vector embeddings. vector embeddings.
""" """
passed = False res = None
# exponential backoff in case of RateLimitError # exponential backoff in case of RateLimitError
for j in range(5): for j in range(5):
try: try:
# create embeddings res = openai.Embedding.create(input=texts, engine=self.name)
res = openai.Embedding.create( if isinstance(res, dict) and "data" in res:
input=texts, engine=self.name break
) except RateLimitError:
passed = True sleep(2**j)
except openai.error.RateLimitError: if not res or not isinstance(res, dict) or "data" not in res:
time.sleep(2 ** j) raise ValueError("Failed to create embeddings.")
if not passed:
raise openai.error.RateLimitError
# get embeddings # get embeddings
embeds = [r["embedding"] for r in res["data"]] embeds = [r["embedding"] for r in res["data"]]
return embeds return embeds
\ No newline at end of file
from decision_layer.encoders import BaseEncoder
from decision_layer.schema import Decision
import numpy as np import numpy as np
from numpy.linalg import norm from numpy.linalg import norm
from semantic_router.encoders import BaseEncoder, CohereEncoder, OpenAIEncoder
from semantic_router.schema import Decision
class DecisionLayer: class DecisionLayer:
index = None index = None
categories = None categories = None
similarity_threshold = 0.82
def __init__(self, encoder: BaseEncoder, decisions: list[Decision] = []): def __init__(self, encoder: BaseEncoder, decisions: list[Decision] = []):
self.encoder = encoder self.encoder = encoder
# decide on default threshold based on encoder
if isinstance(encoder, OpenAIEncoder):
self.similarity_threshold = 0.82
elif isinstance(encoder, CohereEncoder):
self.similarity_threshold = 0.3
else:
self.similarity_threshold = 0.82
# if decisions list has been passed, we initialize index now # if decisions list has been passed, we initialize index now
if decisions: if decisions:
# initialize index now # initialize index now
for decision in decisions: for decision in decisions:
self._add_decision(decision=decision) self._add_decision(decision=decision)
def __call__(self, text: str): def __call__(self, text: str) -> str | None:
results = self._query(text) results = self._query(text)
raise NotImplementedError("To implement decision logic based on scores") top_class, top_class_scores = self._semantic_classify(results)
passed = self._pass_threshold(top_class_scores, self.similarity_threshold)
if passed:
return top_class
else:
return None
def add(self, decision: Decision): def add(self, decision: Decision):
self._add_decision(devision=decision) self._add_decision(decision=decision)
def _add_decision(self, decision: Decision): def _add_decision(self, decision: Decision):
# create embeddings # create embeddings
embeds = self.encoder(decision.utterances) embeds = self.encoder(decision.utterances)
# create decision array # create decision array
if self.categories is None: if self.categories is None:
self.categories = np.array([decision.name]*len(embeds)) self.categories = np.array([decision.name] * len(embeds))
else: else:
str_arr = np.array([decision.name]*len(embeds)) str_arr = np.array([decision.name] * len(embeds))
self.categories = np.concatenate([self.categories, str_arr]) self.categories = np.concatenate([self.categories, str_arr])
# create utterance array (the index) # create utterance array (the index)
if self.index is None: if self.index is None:
...@@ -40,19 +54,51 @@ class DecisionLayer: ...@@ -40,19 +54,51 @@ class DecisionLayer:
embed_arr = np.array(embeds) embed_arr = np.array(embeds)
self.index = np.concatenate([self.index, embed_arr]) self.index = np.concatenate([self.index, embed_arr])
def _query(self, text: str, top_k: int=5): def _query(self, text: str, top_k: int = 5):
"""Given some text, encodes and searches the index vector space to """Given some text, encodes and searches the index vector space to
retrieve the top_k most similar records. retrieve the top_k most similar records.
""" """
# create query vector # create query vector
xq = np.array(self.encoder([text])) xq = np.array(self.encoder([text]))
# calculate cosine similarities xq = np.squeeze(xq) # Reduce to 1d array.
sim = np.dot(self.index, xq.T) / (norm(self.index)*norm(xq.T))
# get indices of top_k records if self.index is not None:
idx = np.argpartition(sim.T[0], -top_k)[-top_k:] index_norm = norm(self.index, axis=1)
scores = sim[idx] xq_norm = norm(xq.T)
# get the utterance categories (decision names) sim = np.dot(self.index, xq.T) / (index_norm * xq_norm)
decisions = self.categories[idx] # get indices of top_k records
return [ top_k = min(top_k, sim.shape[0])
{"decision": d, "score": s.item()} for d, s in zip(decisions, scores) idx = np.argpartition(sim, -top_k)[-top_k:]
] scores = sim[idx]
# get the utterance categories (decision names)
decisions = self.categories[idx] if self.categories is not None else []
return [
{"decision": d, "score": s.item()} for d, s in zip(decisions, scores)
]
else:
return []
def _semantic_classify(self, query_results: list[dict]) -> tuple[str, list[float]]:
scores_by_class = {}
for result in query_results:
score = result["score"]
decision = result["decision"]
if decision in scores_by_class:
scores_by_class[decision].append(score)
else:
scores_by_class[decision] = [score]
# Calculate total score for each class
total_scores = {
decision: sum(scores) for decision, scores in scores_by_class.items()
}
top_class = max(total_scores, key=lambda x: total_scores[x], default=None)
# Return the top class and its associated scores
return str(top_class), scores_by_class.get(top_class, [])
def _pass_threshold(self, scores: list[float], threshold: float) -> bool:
if scores:
return max(scores) > threshold
else:
return False
from enum import Enum from enum import Enum
from pydantic import BaseModel from pydantic import BaseModel
from pydantic.dataclasses import dataclass from pydantic.dataclasses import dataclass
from decision_layer.encoders import (
from semantic_router.encoders import (
BaseEncoder, BaseEncoder,
CohereEncoder,
HuggingFaceEncoder, HuggingFaceEncoder,
OpenAIEncoder, OpenAIEncoder,
CohereEncoder,
) )
...@@ -14,11 +16,13 @@ class Decision(BaseModel): ...@@ -14,11 +16,13 @@ class Decision(BaseModel):
utterances: list[str] utterances: list[str]
description: str | None = None description: str | None = None
class EncoderType(Enum): class EncoderType(Enum):
HUGGINGFACE = "huggingface" HUGGINGFACE = "huggingface"
OPENAI = "openai" OPENAI = "openai"
COHERE = "cohere" COHERE = "cohere"
@dataclass @dataclass
class Encoder: class Encoder:
type: EncoderType type: EncoderType
...@@ -38,6 +42,7 @@ class Encoder: ...@@ -38,6 +42,7 @@ class Encoder:
def __call__(self, texts: list[str]) -> list[float]: def __call__(self, texts: list[str]) -> list[float]:
return self.model(texts) return self.model(texts)
@dataclass @dataclass
class SemanticSpace: class SemanticSpace:
id: str id: str
...@@ -49,4 +54,4 @@ class SemanticSpace: ...@@ -49,4 +54,4 @@ class SemanticSpace:
self.decisions = decisions self.decisions = decisions
def add(self, decision: Decision): def add(self, decision: Decision):
self.decisions.append(decision) self.decisions.append(decision)
\ No newline at end of file
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
# Decision Layer Walkthrough # Semantic Router Walkthrough
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
The decision layer library can be used as a super fast decision making layer on top of LLMs. That means that rather than waiting on a slow agent to decide what to do, we can use the magic of semantic vector space to make decisions. Cutting decision making time down from seconds to milliseconds. The Semantic Router library can be used as a super fast decision making layer on top of LLMs. That means rather than waiting on a slow agent to decide what to do, we can use the magic of semantic vector space to make decisions. Cutting decision making time down from seconds to milliseconds.
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
## Getting Started ## Getting Started
%% Cell type:markdown id: tags:
We start by installing the library:
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
!pip install -qU \ !pip install -qU semantic-router==0.0.1
decision-layer
``` ```
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
We start by defining a dictionary mapping decisions to example phrases that should trigger those decisions. We start by defining a dictionary mapping decisions to example phrases that should trigger those decisions.
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
from decision_layer.schema import Decision from semantic_router.schema import Decision
politics = Decision( politics = Decision(
name="politics", name="politics",
utterances=[ utterances=[
"isn't politics the best thing ever", "isn't politics the best thing ever",
"why don't you tell me about your political opinions", "why don't you tell me about your political opinions",
"don't you just love the president" "don't you just love the president"
"don't you just hate the president", "don't you just hate the president",
"they're going to destroy this country!", "they're going to destroy this country!",
"they will save the country!" "they will save the country!"
] ]
) )
``` ```
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
Let's define another for good measure: Let's define another for good measure:
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
chitchat = Decision( chitchat = Decision(
name="chitchat", name="chitchat",
utterances=[ utterances=[
"how's the weather today?", "how's the weather today?",
"how are things going?", "how are things going?",
"lovely weather today", "lovely weather today",
"the weather is horrendous", "the weather is horrendous",
"let's go to the chippy" "let's go to the chippy"
] ]
) )
decisions = [politics, chitchat] decisions = [politics, chitchat]
``` ```
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
Now we initialize our embedding model (we will add support for Hugging Face): Now we initialize our embedding model:
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
from decision_layer.encoders import OpenAIEncoder from semantic_router.encoders import CohereEncoder
from getpass import getpass
import os import os
os.environ["OPENAI_API_KEY"] = "sk-..." # os.environ["COHERE_API_KEY"] = getpass("Enter Cohere API Key: ")
encoder = OpenAIEncoder(name="text-embedding-ada-002") os.environ["COHERE_API_KEY"]
encoder = CohereEncoder()
``` ```
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
Now we define the `DecisionLayer`. When called, the decision layer will consume text (a query) and output the category (`Decision`) it belongs to — for now we can only `_query` and get the most similar `Decision` `utterances`. Now we define the `DecisionLayer`. When called, the decision layer will consume text (a query) and output the category (`Decision`) it belongs to — to initialize a `DecisionLayer` we need our `encoder` model and a list of `decisions`.
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
from decision_layer import DecisionLayer from semantic_router import DecisionLayer
dl = DecisionLayer(encoder=encoder, decisions=decisions) dl = DecisionLayer(encoder=encoder, decisions=decisions)
``` ```
%% Cell type:markdown id: tags:
Now we can test it:
%% Cell type:code id: tags:
``` python
dl("don't you love politics?")
```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
out = dl._query("don't you love politics?") dl("how's the weather today?")
out
``` ```
%% Output %% Cell type:markdown id: tags:
Both are classified accurately, what if we send a query that is unrelated to our existing `Decision` objects?
[{'decision': 'politics', 'score': 0.24968127755063652}, %% Cell type:code id: tags:
{'decision': 'politics', 'score': 0.2536216026530966},
{'decision': 'politics', 'score': 0.27568433588684954}, ``` python
{'decision': 'politics', 'score': 0.27732789989574913}, dl("I'm interested in learning about llama 2")
{'decision': 'politics', 'score': 0.28110307885950714}] ```
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
--- In this case, we return `None` because no matches were identified.
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment