diff --git a/pyproject.toml b/pyproject.toml
index 873a2dfd21b093892bb64fba7fcdef4c68df69ea..1b1c481c7414d963feeeb2e8004621d06dd435d4 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -1,6 +1,6 @@
 [tool.poetry]
 name = "semantic-router"
-version = "0.0.36"
+version = "0.0.37"
 description = "Super fast semantic router for AI decision making"
 authors = [
     "James Briggs <james@aurelio.ai>",
diff --git a/semantic_router/__init__.py b/semantic_router/__init__.py
index 39506cc5c924a7c851a62edbaa4225e19866737c..cd0a624b34077d48ed7aa4cf74f4de382b3fa21b 100644
--- a/semantic_router/__init__.py
+++ b/semantic_router/__init__.py
@@ -4,4 +4,4 @@ from semantic_router.route import Route
 
 __all__ = ["RouteLayer", "HybridRouteLayer", "Route", "LayerConfig"]
 
-__version__ = "0.0.36"
+__version__ = "0.0.37"
diff --git a/semantic_router/encoders/openai.py b/semantic_router/encoders/openai.py
index d56a1e71ecd0ff520edd69ff0d709524617db964..3cfa2a704189c00709d406073691f7916720e24c 100644
--- a/semantic_router/encoders/openai.py
+++ b/semantic_router/encoders/openai.py
@@ -114,7 +114,8 @@ class OpenAIEncoder(BaseEncoder):
         return embeddings
 
     def _truncate(self, text: str) -> str:
-        tokens = self._token_encoder.encode(text)
+        # we use encode_ordinary as faster equivalent to encode(text, disallowed_special=())
+        tokens = self._token_encoder.encode_ordinary(text)
         if len(tokens) > self.token_limit:
             logger.warning(
                 f"Document exceeds token limit: {len(tokens)} > {self.token_limit}"