diff --git a/semantic_router/utils/llm.py b/semantic_router/utils/llm.py
index 0d22b9a66ca50a07295c78fc43815b1d53091f80..6ce28ff8c3f16e7200c15139095715bf6078d1b3 100644
--- a/semantic_router/utils/llm.py
+++ b/semantic_router/utils/llm.py
@@ -33,31 +33,31 @@ def llm(prompt: str) -> str | None:
         logger.error(f"LLM error: {e}")
         raise Exception(f"LLM error: {e}")
 
-
-async def allm(prompt: str) -> str | None:
-    try:
-        client = openai.AsyncOpenAI(
-            base_url="https://openrouter.ai/api/v1",
-            api_key=os.getenv("OPENROUTER_API_KEY"),
-        )
-
-        completion = await client.chat.completions.create(
-            model="mistralai/mistral-7b-instruct",
-            messages=[
-                {
-                    "role": "user",
-                    "content": prompt,
-                },
-            ],
-            temperature=0.01,
-            max_tokens=200,
-        )
-
-        output = completion.choices[0].message.content
-
-        if not output:
-            raise Exception("No output generated")
-        return output
-    except Exception as e:
-        logger.error(f"LLM error: {e}")
-        raise Exception(f"LLM error: {e}")
+# TODO integrate async LLM function
+# async def allm(prompt: str) -> str | None:
+#     try:
+#         client = openai.AsyncOpenAI(
+#             base_url="https://openrouter.ai/api/v1",
+#             api_key=os.getenv("OPENROUTER_API_KEY"),
+#         )
+
+#         completion = await client.chat.completions.create(
+#             model="mistralai/mistral-7b-instruct",
+#             messages=[
+#                 {
+#                     "role": "user",
+#                     "content": prompt,
+#                 },
+#             ],
+#             temperature=0.01,
+#             max_tokens=200,
+#         )
+
+#         output = completion.choices[0].message.content
+
+#         if not output:
+#             raise Exception("No output generated")
+#         return output
+#     except Exception as e:
+#         logger.error(f"LLM error: {e}")
+#         raise Exception(f"LLM error: {e}")
diff --git a/test_output.json b/test_output.json
deleted file mode 100644
index 1f93008593dc770f1f001a47b819d652c14af179..0000000000000000000000000000000000000000
--- a/test_output.json
+++ /dev/null
@@ -1 +0,0 @@
-[{"name": "test", "utterances": ["utterance"], "description": null}]
diff --git a/test_output.yaml b/test_output.yaml
deleted file mode 100644
index b71676477f7a48fff6174221c48d3c0595dbf14d..0000000000000000000000000000000000000000
--- a/test_output.yaml
+++ /dev/null
@@ -1,4 +0,0 @@
-- description: null
-  name: test
-  utterances:
-  - utterance
diff --git a/tests/unit/test_layer.py b/tests/unit/test_layer.py
index 386edf6d1a102a53c6c6a577ebba229d4f4051b1..c6898235fa01b15a5786f736ed740fe5189b1859 100644
--- a/tests/unit/test_layer.py
+++ b/tests/unit/test_layer.py
@@ -173,6 +173,32 @@ class TestRouteLayer:
         route_layer = RouteLayer(encoder=base_encoder)
         assert route_layer.score_threshold == 0.82
 
+    def test_json(self, openai_encoder, routes):
+        route_layer = RouteLayer(encoder=openai_encoder, routes=routes)
+        route_layer.to_json("test_output.json")
+        assert os.path.exists("test_output.json")
+        route_layer_from_file = RouteLayer.from_json("test_output.json")
+        assert route_layer_from_file.index is not None and route_layer_from_file.categories is not None
+        os.remove("test_output.json")
+
+    def test_yaml(self, openai_encoder, routes):
+        route_layer = RouteLayer(encoder=openai_encoder, routes=routes)
+        route_layer.to_yaml("test_output.yaml")
+        assert os.path.exists("test_output.yaml")
+        route_layer_from_file = RouteLayer.from_yaml("test_output.yaml")
+        assert route_layer_from_file.index is not None and route_layer_from_file.categories is not None
+        os.remove("test_output.yaml")
+
+    def test_config(self, openai_encoder, routes):
+        route_layer = RouteLayer(encoder=openai_encoder, routes=routes)
+        # confirm route creation functions as expected
+        layer_config = route_layer.to_config()
+        assert layer_config.routes == routes
+        # now load from config and confirm it's the same
+        route_layer_from_config = RouteLayer.from_config(layer_config)
+        assert (route_layer_from_config.index == route_layer.index).all()
+        assert (route_layer_from_config.categories == route_layer.categories).all()
+        assert route_layer_from_config.score_threshold == route_layer.score_threshold
 
 # Add more tests for edge cases and error handling as needed.