From 9e5b37370cd7a5e49c367d6337bfdafb4c2efda8 Mon Sep 17 00:00:00 2001
From: James Briggs <james.briggs@hotmail.com>
Date: Sat, 1 Jun 2024 18:54:16 +0800
Subject: [PATCH] fix: cleanup modifications in trying fix HF bug for GH
 actions

---
 poetry.lock                             | 4 ++--
 pyproject.toml                          | 1 -
 tests/unit/encoders/test_clip.py        | 6 ------
 tests/unit/encoders/test_huggingface.py | 6 ------
 tests/unit/encoders/test_vit.py         | 6 ------
 5 files changed, 2 insertions(+), 21 deletions(-)

diff --git a/poetry.lock b/poetry.lock
index eb4ec63c..1b4125e3 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -4220,7 +4220,7 @@ testing = ["big-O", "jaraco.functools", "jaraco.itertools", "jaraco.test", "more
 bedrock = ["boto3"]
 google = ["google-cloud-aiplatform"]
 hybrid = ["pinecone-text"]
-local = ["huggingface-hub", "llama-cpp-python", "tokenizers", "torch", "transformers"]
+local = ["llama-cpp-python", "tokenizers", "torch", "transformers"]
 mistralai = ["mistralai"]
 pinecone = ["pinecone-client"]
 processing = ["matplotlib"]
@@ -4230,4 +4230,4 @@ vision = ["pillow", "torch", "torchvision", "transformers"]
 [metadata]
 lock-version = "2.0"
 python-versions = ">=3.9,<3.13"
-content-hash = "0233c0677c37714b0de5f6e319e055fceac9718e8088c6ed56cf969d443e29b6"
+content-hash = "17f2d76c59c4cb39899f69fe1e2242933a5747cfceb24a77a024d0cae0da1b3f"
diff --git a/pyproject.toml b/pyproject.toml
index 817f9859..88e515e8 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -28,7 +28,6 @@ pinecone-text = {version = ">=0.7.1,<0.8.0", optional = true}
 torch = {version = ">=2.1.0,<2.6.0", optional = true}
 transformers = {version = ">=4.36.2", optional = true}
 tokenizers = {version = ">=0.19", optional = true}
-huggingface-hub = {version = "!=0.23.*", optional = true}
 llama-cpp-python = {version = "^0.2.28", optional = true}
 colorama = "^0.4.6"
 pinecone-client = {version=">=3.0.0,<4.0.0", optional = true}
diff --git a/tests/unit/encoders/test_clip.py b/tests/unit/encoders/test_clip.py
index 4247b279..de997e36 100644
--- a/tests/unit/encoders/test_clip.py
+++ b/tests/unit/encoders/test_clip.py
@@ -4,14 +4,8 @@ import torch
 from PIL import Image
 
 from semantic_router.encoders import CLIPEncoder
-from transformers import AutoTokenizer, AutoModel
 
 test_model_name = "aurelio-ai/sr-test-clip"
-
-# force the model download
-tokenizer = AutoTokenizer.from_pretrained(test_model_name, force_download=True)
-model = AutoModel.from_pretrained(test_model_name, force_download=True)
-
 clip_encoder = CLIPEncoder(name=test_model_name)
 embed_dim = 64
 
diff --git a/tests/unit/encoders/test_huggingface.py b/tests/unit/encoders/test_huggingface.py
index 3d21b1df..f14c7a68 100644
--- a/tests/unit/encoders/test_huggingface.py
+++ b/tests/unit/encoders/test_huggingface.py
@@ -4,14 +4,8 @@ import numpy as np
 import pytest
 
 from semantic_router.encoders.huggingface import HuggingFaceEncoder
-from transformers import AutoTokenizer, AutoModel
 
 test_model_name = "aurelio-ai/sr-test-huggingface"
-
-# force the model download
-tokenizer = AutoTokenizer.from_pretrained(test_model_name, force_download=True)
-model = AutoModel.from_pretrained(test_model_name, force_download=True)
-
 encoder = HuggingFaceEncoder(name=test_model_name)
 
 
diff --git a/tests/unit/encoders/test_vit.py b/tests/unit/encoders/test_vit.py
index d7e3d3bf..64f605e4 100644
--- a/tests/unit/encoders/test_vit.py
+++ b/tests/unit/encoders/test_vit.py
@@ -4,14 +4,8 @@ import torch
 from PIL import Image
 
 from semantic_router.encoders import VitEncoder
-from transformers import AutoTokenizer, AutoModel
 
 test_model_name = "aurelio-ai/sr-test-vit"
-
-# force the model download
-tokenizer = AutoTokenizer.from_pretrained(test_model_name, force_download=True)
-model = AutoModel.from_pretrained(test_model_name, force_download=True)
-
 vit_encoder = VitEncoder(name=test_model_name)
 embed_dim = 32
 
-- 
GitLab