From 24768bfbe3996425943133c731ad935d498f2212 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Kamil=20Murat=20Y=C4=B1lmaz?= <kamilmuratyilmaz@gmail.com>
Date: Tue, 27 Feb 2024 21:07:19 +0300
Subject: [PATCH] Kamilmuratyilmaz/fix import error libcuda.so.1 (#11442)

---
 .gitignore                                            |  1 +
 .../llama_index/llms/vllm/base.py                     | 11 ++++++-----
 2 files changed, 7 insertions(+), 5 deletions(-)

diff --git a/.gitignore b/.gitignore
index c40424b7fa..aa14bce105 100644
--- a/.gitignore
+++ b/.gitignore
@@ -2,6 +2,7 @@
 dist/
 migration_scripts/
 venv/
+.idea
 .venv/
 .ipynb_checkpoints
 .__pycache__
diff --git a/llama-index-integrations/llms/llama-index-llms-vllm/llama_index/llms/vllm/base.py b/llama-index-integrations/llms/llama-index-llms-vllm/llama_index/llms/vllm/base.py
index e08543fb62..5b34ca956f 100644
--- a/llama-index-integrations/llms/llama-index-llms-vllm/llama_index/llms/vllm/base.py
+++ b/llama-index-integrations/llms/llama-index-llms-vllm/llama_index/llms/vllm/base.py
@@ -214,13 +214,14 @@ class Vllm(LLM):
 
     def __del__(self) -> None:
         import torch
-        from vllm.model_executor.parallel_utils.parallel_state import (
-            destroy_model_parallel,
-        )
 
-        destroy_model_parallel()
-        del self._client
         if torch.cuda.is_available():
+            from vllm.model_executor.parallel_utils.parallel_state import (
+                destroy_model_parallel,
+            )
+
+            destroy_model_parallel()
+            del self._client
             torch.cuda.synchronize()
 
     def _get_all_kwargs(self, **kwargs: Any) -> Dict[str, Any]:
-- 
GitLab