diff --git a/.vscode/settings.json b/.vscode/settings.json
index f00d45a9d62ad9cbfb381f123d85531378664577..fe6911f4bde36fa913ca140525218fd4403e9eae 100644
--- a/.vscode/settings.json
+++ b/.vscode/settings.json
@@ -1,7 +1,7 @@
 {
-    "python.formatting.provider": "black",
-    "editor.formatOnSave": true,
-    "editor.codeActionsOnSave": {
-        "source.organizeImports": true,
-    },
+  "python.formatting.provider": "black",
+  "editor.formatOnSave": true,
+  "editor.codeActionsOnSave": {
+      "source.organizeImports": true,
+  },
 }
\ No newline at end of file
diff --git a/docs/how_to/integrations/vector_stores.md b/docs/how_to/integrations/vector_stores.md
index 3cb628bc0bdef912bc8a726892863cf3d2b9b6ff..c1f5a74616438752b43e5a11647b8455ab8dbbc5 100644
--- a/docs/how_to/integrations/vector_stores.md
+++ b/docs/how_to/integrations/vector_stores.md
@@ -17,8 +17,7 @@ LlamaIndex supports loading data from the following sources. See [Data Connector
 - Faiss (`FaissReader`). [Installation](https://github.com/facebookresearch/faiss/blob/main/INSTALL.md).
 - Milvus (`MilvusReader`). [Installation](https://milvus.io/docs)
 - Zilliz (`MilvusReader`). [Quickstart](https://zilliz.com/doc/quick_start)
-- MyScale (`MyScaleReader`). [Quickstart](https://docs.myscale.com/en/quickstart/). [Installation/Python Client](https://docs.myscale.com/en/python-client/). 
-
+- MyScale (`MyScaleReader`). [Quickstart](https://docs.myscale.com/en/quickstart/). [Installation/Python Client](https://docs.myscale.com/en/python-client/).
 
 Chroma stores both documents and vectors. This is an example of how to use Chroma:
 
@@ -97,7 +96,6 @@ NOTE: Both Pinecone and Faiss data loaders assume that the respective data sourc
 
 For instance, this is an example usage of the Pinecone data loader `PineconeReader`:
 
-
 ```python
 
 from llama_index.readers.pinecone import PineconeReader
@@ -117,7 +115,6 @@ documents = reader.load_data(
 
 ```
 
-
 [Example notebooks can be found here](https://github.com/jerryjliu/llama_index/tree/main/examples/data_connectors).
 
 (vector-store-index)=
@@ -130,7 +127,7 @@ as the storage backend for `GPTVectorStoreIndex`.
 A detailed API reference is [found here](/reference/indices/vector_store.rst).
 
 Similar to any other index within LlamaIndex (tree, keyword table, list), `GPTVectorStoreIndex` can be constructed upon any collection
-of documents. 
+of documents.
 We use the vector store within the index to store embeddings for the input text chunks.
 
 Once constructed, the index can be used for querying.
@@ -138,7 +135,8 @@ Once constructed, the index can be used for querying.
 **Default Vector Store Index Construction/Querying**
 
 By default, `GPTVectorStoreIndex` uses a in-memory `SimpleVectorStore`
-that's initialized as part of the default storage context. 
+that's initialized as part of the default storage context.
+
 ```python
 from llama_index import GPTVectorStoreIndex, SimpleDirectoryReader
 
@@ -155,6 +153,7 @@ response = query_engine.query("What did the author do growing up?")
 **Custom Vector Store Index Construction/Querying**
 
 We can query over a custom vector store as follows:
+
 ```python
 from llama_index import GPTVectorStoreIndex, SimpleDirectoryReader, StorageContext
 from llama_index.vector_stores import DeepLakeVectorStore
@@ -175,7 +174,28 @@ response = query_engine.query("What did the author do growing up?")
 
 Below we show more examples of how to construct various vector stores we support.
 
+**Redis**
+First, start Redis-Stack (or get url from Redis provider)
+
+```bash
+docker run --name redis-vecdb -d -p 6379:6379 -p 8001:8001 redis/redis-stack:latest
+```
+
+Then connect and use Redis as a vector database with LlamaIndex
+
+```python
+from llama_index.vector_stores import RedisVectorStore
+vector_store = RedisVectorStore(
+    index_name="llm-project",
+    redis_url="redis://localhost:6379",
+    overwrite=True
+)
+```
+
+This can be used with the `GPTVectorStoreIndex` to provide a query interface for retrieval, querying, deleting, persisting the index, and more.
+
 **DeepLake**
+
 ```python
 import os
 import getpath
@@ -190,6 +210,7 @@ vector_store = DeepLakeVectorStore(dataset_path=dataset_path, overwrite=True)
 ```
 
 **Faiss**
+
 ```python
 import faiss
 from llama_index.vector_stores import FaissVectorStore
@@ -210,6 +231,7 @@ storage_context.persist()
 ```
 
 **Weaviate**
+
 ```python
 import weaviate
 from llama_index.vector_stores import WeaviateVectorStore
@@ -228,6 +250,7 @@ vector_store = WeaviateVectorStore(weaviate_client=client)
 ```
 
 **Pinecone**
+
 ```python
 import pinecone
 from llama_index.vector_stores import PineconeVectorStore
@@ -236,9 +259,9 @@ from llama_index.vector_stores import PineconeVectorStore
 api_key = "api_key"
 pinecone.init(api_key=api_key, environment="us-west1-gcp")
 pinecone.create_index(
-    "quickstart", 
-    dimension=1536, 
-    metric="euclidean", 
+    "quickstart",
+    dimension=1536,
+    metric="euclidean",
     pod_type="p1"
 )
 index = pinecone.Index("quickstart")
@@ -249,12 +272,13 @@ metadata_filters = {"title": "paul_graham_essay"}
 
 # construct vector store
 vector_store = PineconeVectorStore(
-    pinecone_index=index, 
+    pinecone_index=index,
     metadata_filters=metadata_filters
 )
 ```
 
 **Qdrant**
+
 ```python
 import qdrant_client
 from llama_index.vector_stores import QdrantVectorStore
@@ -270,7 +294,7 @@ collection_name = "paul_graham"
 # construct vector store
 vector_store = QdrantVectorStore(
     client=client,
-    collection_name=collection_name, 
+    collection_name=collection_name,
 )
 ```
 
@@ -292,6 +316,7 @@ vector_store = ChromaVectorStore(
 ```
 
 **Milvus**
+
 - Milvus Index offers the ability to store both Documents and their embeddings. Documents are limited to the predefined Document attributes and does not include extra_info.
 
 ```python
@@ -300,8 +325,8 @@ from llama_index.vector_stores import MilvusVectorStore
 
 # construct vector store
 vector_store = MilvusVectorStore(
-    host='localhost', 
-    port=19530, 
+    host='localhost',
+    port=19530,
     overwrite='True'
 )
 
@@ -313,8 +338,8 @@ If you get stuck at building wheel for `grpcio`, check if you are using python 3
 (there's a known issue: https://github.com/milvus-io/pymilvus/issues/1308)
 and try downgrading.
 
-
 **Zilliz**
+
 - Zilliz Cloud (hosted version of Milvus) uses the Milvus Index with some extra arguments.
 
 ```python
@@ -324,11 +349,11 @@ from llama_index.vector_stores import MilvusVectorStore
 
 # construct vector store
 vector_store = MilvusVectorStore(
-    host='foo.vectordb.zillizcloud.com', 
-    port=403, 
-    user="db_admin", 
-    password="foo", 
-    use_secure=True, 
+    host='foo.vectordb.zillizcloud.com',
+    port=403,
+    user="db_admin",
+    password="foo",
+    use_secure=True,
     overwrite='True'
 )
 ```
@@ -347,9 +372,9 @@ from llama_index.vector_stores import MyScaleVectorStore
 
 # Creating a MyScale client
 client = clickhouse_connect.get_client(
-    host='YOUR_CLUSTER_HOST', 
-    port=8443, 
-    username='YOUR_USERNAME', 
+    host='YOUR_CLUSTER_HOST',
+    port=8443,
+    username='YOUR_USERNAME',
     password='YOUR_CLUSTER_PASSWORD'
 )
 
@@ -360,16 +385,15 @@ vector_store = MyScaleVectorStore(
 )
 ```
 
-
 [Example notebooks can be found here](https://github.com/jerryjliu/llama_index/tree/main/docs/examples/vector_stores).
 
-
 ```{toctree}
 ---
 caption: Examples
 maxdepth: 1
 ---
 ../../examples/vector_stores/SimpleIndexDemo.ipynb
+../../examples/vector_stores/RedisIndexDemo.ipynb
 ../../examples/vector_stores/QdrantIndexDemo.ipynb
 ../../examples/vector_stores/FaissIndexDemo.ipynb
 ../../examples/vector_stores/DeepLakeIndexDemo.ipynb
@@ -385,4 +409,3 @@ maxdepth: 1
 ../../examples/vector_stores/PineconeIndexDemo-Hybrid.ipynb
 ../../examples/vector_stores/AsyncIndexCreationDemo.ipynb
 ```
-
diff --git a/examples/vector_indices/RedisIndexDemo.ipynb b/examples/vector_indices/RedisIndexDemo.ipynb
new file mode 100644
index 0000000000000000000000000000000000000000..f2c0efdc39d791b2a4a6acdfccf675fd1e71c317
--- /dev/null
+++ b/examples/vector_indices/RedisIndexDemo.ipynb
@@ -0,0 +1,518 @@
+{
+    "cells": [
+        {
+            "attachments": {},
+            "cell_type": "markdown",
+            "id": "0b692c73",
+            "metadata": {},
+            "source": [
+                "# Using the GPTVectoreStoreIndex with Redis"
+            ]
+        },
+        {
+            "attachments": {},
+            "cell_type": "markdown",
+            "id": "1e7787c2",
+            "metadata": {},
+            "source": [
+                "In this notebook we are going to show a quick demo of using the RedisVectorStore."
+            ]
+        },
+        {
+            "cell_type": "code",
+            "execution_count": 1,
+            "id": "47264e32",
+            "metadata": {
+                "ExecuteTime": {
+                    "end_time": "2023-02-10T12:20:23.988789Z",
+                    "start_time": "2023-02-10T12:20:23.967877Z"
+                }
+            },
+            "outputs": [
+                {
+                    "name": "stderr",
+                    "output_type": "stream",
+                    "text": [
+                        "/Users/sam.partee/.virtualenvs/llama/lib/python3.8/site-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n",
+                        "  from .autonotebook import tqdm as notebook_tqdm\n"
+                    ]
+                }
+            ],
+            "source": [
+                "import logging\n",
+                "import sys\n",
+                "\n",
+                "# Uncomment to see debug logs\n",
+                "# logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)\n",
+                "# logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))\n",
+                "\n",
+                "from llama_index import GPTVectorStoreIndex, SimpleDirectoryReader, Document\n",
+                "from llama_index.vector_stores import RedisVectorStore\n",
+                "from IPython.display import Markdown, display\n",
+                "import textwrap"
+            ]
+        },
+        {
+            "attachments": {},
+            "cell_type": "markdown",
+            "id": "3c692310",
+            "metadata": {},
+            "source": [
+                "### Start Redis\n",
+                "\n",
+                "The easiest way to start Redis as a vector database is using the [redis-stack](https://hub.docker.com/r/redis/redis-stack) docker image.\n",
+                "\n",
+                "To follow every step of this tutorial, launch the image as follows:\n",
+                "\n",
+                "```bash\n",
+                "docker run --name redis-vecdb -d -p 6379:6379 -p 8001:8001 redis/redis-stack:latest\n",
+                "```\n",
+                "\n",
+                "This will also launch the RedisInsight UI on port 8001 which you can view at http://localhost:8001.\n"
+            ]
+        },
+        {
+            "attachments": {},
+            "cell_type": "markdown",
+            "id": "f9b97a89",
+            "metadata": {},
+            "source": [
+                "### Setup OpenAI\n",
+                "Lets first begin by adding the openai api key. This will allow us to access openai for embeddings and to use chatgpt."
+            ]
+        },
+        {
+            "cell_type": "code",
+            "execution_count": 21,
+            "id": "0c9f4d21-145a-401e-95ff-ccb259e8ef84",
+            "metadata": {
+                "ExecuteTime": {
+                    "end_time": "2023-02-10T12:20:24.908956Z",
+                    "start_time": "2023-02-10T12:20:24.537064Z"
+                },
+                "pycharm": {
+                    "is_executing": true
+                }
+            },
+            "outputs": [],
+            "source": [
+                "import os\n",
+                "os.environ[\"OPENAI_API_KEY\"] = \"sk-<your-key>\"\n",
+                "os.environ[\"TOKENIZERS_PARALLELISM\"] = \"False\""
+            ]
+        },
+        {
+            "attachments": {},
+            "cell_type": "markdown",
+            "id": "59ff935d",
+            "metadata": {},
+            "source": [
+                "### Read in a dataset\n",
+                "Here we will use a set of Paul Graham essays to provide the text to turn into embeddings, store in a ``RedisVectorStore`` and query to find context for our LLM QnA loop."
+            ]
+        },
+        {
+            "cell_type": "code",
+            "execution_count": 2,
+            "id": "68cbd239-880e-41a3-98d8-dbb3fab55431",
+            "metadata": {
+                "ExecuteTime": {
+                    "end_time": "2023-02-10T12:20:30.175678Z",
+                    "start_time": "2023-02-10T12:20:30.172456Z"
+                },
+                "pycharm": {
+                    "is_executing": true
+                }
+            },
+            "outputs": [
+                {
+                    "name": "stdout",
+                    "output_type": "stream",
+                    "text": [
+                        "Document ID: 061e0348-0ae4-4231-b114-0af254f43a10 Document Hash: 77ae91ab542f3abb308c4d7c77c9bc4c9ad0ccd63144802b7cbe7e1bb3a4094e\n"
+                    ]
+                }
+            ],
+            "source": [
+                "# load documents\n",
+                "documents = SimpleDirectoryReader('../paul_graham_essay/data').load_data()\n",
+                "print('Document ID:', documents[0].doc_id, 'Document Hash:', documents[0].doc_hash)"
+            ]
+        },
+        {
+            "attachments": {},
+            "cell_type": "markdown",
+            "id": "dd270925",
+            "metadata": {},
+            "source": [
+                "### Initialize the Redis Vector Store\n",
+                "\n",
+                "Now we have our documents read in, we can initialize the Redis Vector Store. This will allow us to store our vectors in Redis and create an index.\n",
+                "\n",
+                "Here is the docstring for the RedisVectorStore:\n",
+                "\n",
+                "```python\n",
+                "class RedisVectorStore(VectorStore):\n",
+                "    \n",
+                "    def __init__(\n",
+                "        self,\n",
+                "        index_name: Optional[str],\n",
+                "        index_prefix: Optional[str] = \"gpt_index\",\n",
+                "        index_args: Optional[Dict[str, Any]] = None,\n",
+                "        redis_url: Optional[str] = \"redis://localhost:6379\",\n",
+                "        overwrite: bool = False,\n",
+                "        **kwargs: Any,\n",
+                "    ) -> None:\n",
+                "        \"\"\"Initialize RedisVectorStore.\n",
+                "\n",
+                "        Args:\n",
+                "            index_name (str): Name of the index.\n",
+                "            index_prefix (str): Prefix for the index. Defaults to \"gpt_index\".\n",
+                "            index_args (Dict[str, Any]): Arguments for the index. Defaults to None.\n",
+                "            redis_url (str): URL for the redis instance. Defaults to \"redis://localhost:6379\".\n",
+                "            overwrite (bool): Whether to overwrite the index if it already exists. Defaults to False.\n",
+                "            kwargs (Any): Additional arguments to pass to the redis client.\n",
+                "\n",
+                "        Raises:\n",
+                "            ValueError: If redis-py is not installed\n",
+                "            ValueError: If RediSearch is not installed\n",
+                "\n",
+                "        Examples:\n",
+                "            >>> from gpt_index.vector_stores.redis import RedisVectorStore\n",
+                "            >>> # Create a RedisVectorStore\n",
+                "            >>> vector_store = RedisVectorStore(\n",
+                "            >>>     index_name=\"my_index\",\n",
+                "            >>>     index_prefix=\"gpt_index\",\n",
+                "            >>>     index_args={\"algorithm\": \"HNSW\", \"m\": 16, \"efConstruction\": 200, \"distance_metric\": \"cosine\"},\n",
+                "            >>>     redis_url=\"redis://localhost:6379/\",\n",
+                "            >>>     overwrite=True)\n",
+                "\n",
+                "        \"\"\"\n",
+                "```\n"
+            ]
+        },
+        {
+            "cell_type": "code",
+            "execution_count": 6,
+            "id": "ba1558b3",
+            "metadata": {
+                "ExecuteTime": {
+                    "end_time": "2023-02-10T12:20:33.735897Z",
+                    "start_time": "2023-02-10T12:20:30.404245Z"
+                },
+                "pycharm": {
+                    "is_executing": true
+                }
+            },
+            "outputs": [],
+            "source": [
+                "from llama_index.storage.storage_context import StorageContext\n",
+                "\n",
+                "\n",
+                "vector_store = RedisVectorStore(\n",
+                "    index_name=\"pg_essays\",\n",
+                "    index_prefix=\"llama\",\n",
+                "    redis_url=\"redis://localhost:6379\",\n",
+                "    overwrite=True\n",
+                ")\n",
+                "storage_context = StorageContext.from_defaults(vector_store=vector_store)\n",
+                "index = GPTVectorStoreIndex.from_documents(documents, storage_context=storage_context)"
+            ]
+        },
+        {
+            "attachments": {},
+            "cell_type": "markdown",
+            "id": "04304299-fc3e-40a0-8600-f50c3292767e",
+            "metadata": {},
+            "source": [
+                "# Query the data\n",
+                "Now that we have our document stored in the index, we can ask questions against the index. The index will use the data stored in itself as the knowledge base for chatgpt."
+            ]
+        },
+        {
+            "cell_type": "code",
+            "execution_count": 7,
+            "id": "35369eda",
+            "metadata": {
+                "ExecuteTime": {
+                    "end_time": "2023-02-10T12:20:51.328762Z",
+                    "start_time": "2023-02-10T12:20:33.822688Z"
+                }
+            },
+            "outputs": [
+                {
+                    "name": "stderr",
+                    "output_type": "stream",
+                    "text": [
+                        "Token indices sequence length is longer than the specified maximum sequence length for this model (1812 > 1024). Running this sequence through the model will result in indexing errors\n"
+                    ]
+                }
+            ],
+            "source": [
+                "query_engine = index.as_query_engine()\n",
+                "response = query_engine.query(\"What did the author learn?\")"
+            ]
+        },
+        {
+            "cell_type": "code",
+            "execution_count": 8,
+            "id": "bedbb693-725f-478f-be26-fa7180ea38b2",
+            "metadata": {
+                "ExecuteTime": {
+                    "end_time": "2023-02-10T12:20:51.337062Z",
+                    "start_time": "2023-02-10T12:20:51.330857Z"
+                }
+            },
+            "outputs": [
+                {
+                    "name": "stdout",
+                    "output_type": "stream",
+                    "text": [
+                        " The author learned that the AI programs of the time were not capable of understanding natural\n",
+                        "language, and that the field of AI was a hoax. He also learned that he could make art, and that he\n",
+                        "could pass the entrance exam for the Accademia di Belli Arti in Florence. He also learned Lisp\n",
+                        "hacking and wrote his dissertation on applications of continuations.\n"
+                    ]
+                }
+            ],
+            "source": [
+                "print(textwrap.fill(str(response), 100))"
+            ]
+        },
+        {
+            "cell_type": "code",
+            "execution_count": 9,
+            "id": "99212d33",
+            "metadata": {
+                "ExecuteTime": {
+                    "end_time": "2023-02-10T12:21:10.337294Z",
+                    "start_time": "2023-02-10T12:20:51.338718Z"
+                }
+            },
+            "outputs": [],
+            "source": [
+                "response = query_engine.query(\"What was a hard moment for the author?\")"
+            ]
+        },
+        {
+            "cell_type": "code",
+            "execution_count": 10,
+            "id": "1a720ad6",
+            "metadata": {
+                "ExecuteTime": {
+                    "end_time": "2023-02-10T12:21:10.355872Z",
+                    "start_time": "2023-02-10T12:21:10.343486Z"
+                }
+            },
+            "outputs": [
+                {
+                    "name": "stdout",
+                    "output_type": "stream",
+                    "text": [
+                        " A hard moment for the author was when he realized that the AI programs of the time were a hoax and\n",
+                        "that there was an unbridgeable gap between what they could do and actually understanding natural\n",
+                        "language.\n"
+                    ]
+                }
+            ],
+            "source": [
+                "print(textwrap.fill(str(response), 100))"
+            ]
+        },
+        {
+            "attachments": {},
+            "cell_type": "markdown",
+            "id": "4d7bc976",
+            "metadata": {},
+            "source": [
+                "### Saving and Loading\n",
+                "\n",
+                "Redis allows the user to perform backups in the background or synchronously. With Llamaindex, the ``RedisVectorStore.persist()`` function can be used to trigger such a backup."
+            ]
+        },
+        {
+            "cell_type": "code",
+            "execution_count": 11,
+            "id": "09836567",
+            "metadata": {},
+            "outputs": [
+                {
+                    "name": "stdout",
+                    "output_type": "stream",
+                    "text": [
+                        "redis  redisinsight\n"
+                    ]
+                }
+            ],
+            "source": [
+                "!docker exec -it redis-vecdb ls /data"
+            ]
+        },
+        {
+            "cell_type": "code",
+            "execution_count": 13,
+            "id": "93ef500b",
+            "metadata": {},
+            "outputs": [],
+            "source": [
+                "vector_store.persist(persist_path=\"\") # persist_path means nothing for RedisVectorStore"
+            ]
+        },
+        {
+            "cell_type": "code",
+            "execution_count": 14,
+            "id": "ed5ab256",
+            "metadata": {},
+            "outputs": [
+                {
+                    "name": "stdout",
+                    "output_type": "stream",
+                    "text": [
+                        "dump.rdb  redis  redisinsight\n"
+                    ]
+                }
+            ],
+            "source": [
+                "!docker exec -it redis-vecdb ls /data"
+            ]
+        },
+        {
+            "cell_type": "markdown",
+            "id": "52b975a7",
+            "metadata": {},
+            "source": [
+                "### Deleting documents or index completely\n",
+                "\n",
+                "Sometimes it may be useful to delete documents or the entire index. This can be done using the `delete` and `delete_index` methods."
+            ]
+        },
+        {
+            "cell_type": "code",
+            "execution_count": 15,
+            "id": "6fe322f7",
+            "metadata": {},
+            "outputs": [
+                {
+                    "data": {
+                        "text/plain": [
+                            "'061e0348-0ae4-4231-b114-0af254f43a10'"
+                        ]
+                    },
+                    "execution_count": 15,
+                    "metadata": {},
+                    "output_type": "execute_result"
+                }
+            ],
+            "source": [
+                "document_id = documents[0].doc_id\n",
+                "document_id"
+            ]
+        },
+        {
+            "cell_type": "code",
+            "execution_count": 16,
+            "id": "ae4fb2b0",
+            "metadata": {},
+            "outputs": [
+                {
+                    "name": "stdout",
+                    "output_type": "stream",
+                    "text": [
+                        "Number of documents 43\n"
+                    ]
+                }
+            ],
+            "source": [
+                "redis_client = vector_store.client\n",
+                "print(\"Number of documents\", len(redis_client.keys()))"
+            ]
+        },
+        {
+            "cell_type": "code",
+            "execution_count": 17,
+            "id": "0ce45788",
+            "metadata": {},
+            "outputs": [],
+            "source": [
+                "vector_store.delete(document_id)"
+            ]
+        },
+        {
+            "cell_type": "code",
+            "execution_count": 18,
+            "id": "4a1ac683",
+            "metadata": {},
+            "outputs": [
+                {
+                    "name": "stdout",
+                    "output_type": "stream",
+                    "text": [
+                        "Number of documents 33\n"
+                    ]
+                }
+            ],
+            "source": [
+                "print(\"Number of documents\", len(redis_client.keys()))"
+            ]
+        },
+        {
+            "cell_type": "code",
+            "execution_count": 19,
+            "id": "c380605a",
+            "metadata": {},
+            "outputs": [],
+            "source": [
+                "# now lets delete the index entirely (happens in the background, may take a second)\n",
+                "# this will delete all the documents and the index\n",
+                "vector_store.delete_index()"
+            ]
+        },
+        {
+            "cell_type": "code",
+            "execution_count": 20,
+            "id": "474ad4ee",
+            "metadata": {},
+            "outputs": [
+                {
+                    "name": "stdout",
+                    "output_type": "stream",
+                    "text": [
+                        "Number of documents 0\n"
+                    ]
+                }
+            ],
+            "source": [
+                "print(\"Number of documents\", len(redis_client.keys()))"
+            ]
+        },
+        {
+            "cell_type": "code",
+            "execution_count": null,
+            "id": "4a028452",
+            "metadata": {},
+            "outputs": [],
+            "source": []
+        }
+    ],
+    "metadata": {
+        "kernelspec": {
+            "display_name": "Python 3 (ipykernel)",
+            "language": "python",
+            "name": "python3"
+        },
+        "language_info": {
+            "codemirror_mode": {
+                "name": "ipython",
+                "version": 3
+            },
+            "file_extension": ".py",
+            "mimetype": "text/x-python",
+            "name": "python",
+            "nbconvert_exporter": "python",
+            "pygments_lexer": "ipython3",
+            "version": "3.8.13"
+        }
+    },
+    "nbformat": 4,
+    "nbformat_minor": 5
+}
diff --git a/llama_index/readers/redis/__init__.py b/llama_index/readers/redis/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/llama_index/readers/redis/utils.py b/llama_index/readers/redis/utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..f9deb28a23ae490bcae84b70328bb8ead0eb823a
--- /dev/null
+++ b/llama_index/readers/redis/utils.py
@@ -0,0 +1,102 @@
+import logging
+import re
+from typing import TYPE_CHECKING, Any, List, Optional, Pattern
+
+import numpy as np
+
+_logger = logging.getLogger(__name__)
+
+if TYPE_CHECKING:
+    from redis.client import Redis as RedisType
+    from redis.commands.search.query import Query
+
+class TokenEscaper:
+    """
+    Escape punctuation within an input string. Taken from RedisOM Python.
+    """
+
+    # Characters that RediSearch requires us to escape during queries.
+    # Source: https://redis.io/docs/stack/search/reference/escaping/#the-rules-of-text-field-tokenization
+    DEFAULT_ESCAPED_CHARS = r"[,.<>{}\[\]\\\"\':;!@#$%^&*()\-+=~\/ ]"
+
+    def __init__(self, escape_chars_re: Optional[Pattern] = None):
+        if escape_chars_re:
+            self.escaped_chars_re = escape_chars_re
+        else:
+            self.escaped_chars_re = re.compile(self.DEFAULT_ESCAPED_CHARS)
+
+    def escape(self, value: str) -> str:
+        def escape_symbol(match: re.Match) -> str:
+            value = match.group(0)
+            return f"\\{value}"
+
+        return self.escaped_chars_re.sub(escape_symbol, value)
+
+
+# required modules
+REDIS_REQUIRED_MODULES = [
+    {"name": "search", "ver": 20400},
+    {"name": "searchlight", "ver": 20400},
+]
+
+
+def check_redis_modules_exist(client: 'RedisType') -> None:
+    """Check if the correct Redis modules are installed."""
+    installed_modules = client.module_list()
+    installed_modules = {
+        module[b"name"].decode("utf-8"): module for module in installed_modules
+    }
+    for module in REDIS_REQUIRED_MODULES:
+        if module["name"] in installed_modules and int(
+            installed_modules[module["name"]][b"ver"]
+        ) >= int(module["ver"]): # type: ignore[call-overload]
+            return
+    # otherwise raise error
+    error_message = (
+        "You must add the RediSearch (>= 2.4) module from Redis Stack. "
+        "Please refer to Redis Stack docs: https://redis.io/docs/stack/"
+    )
+    _logger.error(error_message)
+    raise ValueError(error_message)
+
+
+def get_redis_query(
+    return_fields: List[str],
+    top_k: int = 20,
+    vector_field: str = "vector",
+    sort: bool = True,
+    filters: str = "*",
+) -> 'Query':
+    """Create a vector query for use with a SearchIndex
+
+    Args:
+        return_fields (t.List[str]): A list of fields to return in the query results
+        top_k (int, optional): The number of results to return. Defaults to 20.
+        vector_field (str, optional): The name of the vector field in the index. Defaults to "vector".
+        sort (bool, optional): Whether to sort the results by score. Defaults to True.
+        filters (str, optional): string to filter the results by. Defaults to "*".
+
+    """
+    from redis.commands.search.query import Query  # noqa: F401
+
+    base_query = f"{filters}=>[KNN {top_k} @{vector_field} $vector AS vector_score]"
+    query = Query(base_query).return_fields(*return_fields).dialect(2)
+    if sort:
+        query.sort_by("vector_score")
+    return query
+
+
+def convert_bytes(data: Any) -> Any:
+    if isinstance(data, bytes):
+        return data.decode("ascii")
+    if isinstance(data, dict):
+        return dict(map(convert_bytes, data.items()))
+    if isinstance(data, list):
+        return list(map(convert_bytes, data))
+    if isinstance(data, tuple):
+        return map(convert_bytes, data)
+    return data
+
+
+def array_to_buffer(array: List[float], dtype: Any=np.float32) -> bytes:
+    return np.array(array).astype(dtype).tobytes()
diff --git a/llama_index/vector_stores/__init__.py b/llama_index/vector_stores/__init__.py
index 0150e66a811c7f0e82dac50ee2a85e9c26beef78..1b791dba633cc113ea948d25f64f51bb9bf6384d 100644
--- a/llama_index/vector_stores/__init__.py
+++ b/llama_index/vector_stores/__init__.py
@@ -4,9 +4,9 @@ from llama_index.vector_stores.chatgpt_plugin import ChatGPTRetrievalPluginClien
 from llama_index.vector_stores.chroma import ChromaVectorStore
 from llama_index.vector_stores.deeplake import DeepLakeVectorStore
 from llama_index.vector_stores.faiss import FaissVectorStore
-from llama_index.vector_stores.milvus import MilvusVectorStore
 from llama_index.vector_stores.lancedb import LanceDBVectorStore
 from llama_index.vector_stores.metal import MetalVectorStore
+from llama_index.vector_stores.milvus import MilvusVectorStore
 from llama_index.vector_stores.myscale import MyScaleVectorStore
 from llama_index.vector_stores.opensearch import (
     OpensearchVectorClient,
@@ -14,11 +14,13 @@ from llama_index.vector_stores.opensearch import (
 )
 from llama_index.vector_stores.pinecone import PineconeVectorStore
 from llama_index.vector_stores.qdrant import QdrantVectorStore
+from llama_index.vector_stores.redis import RedisVectorStore
 from llama_index.vector_stores.simple import SimpleVectorStore
 from llama_index.vector_stores.weaviate import WeaviateVectorStore
 
 __all__ = [
     "SimpleVectorStore",
+    "RedisVectorStore",
     "FaissVectorStore",
     "PineconeVectorStore",
     "WeaviateVectorStore",
diff --git a/llama_index/vector_stores/redis.py b/llama_index/vector_stores/redis.py
new file mode 100644
index 0000000000000000000000000000000000000000..933bc37442f67a9b508b1a09ff87d3b31c0a63cf
--- /dev/null
+++ b/llama_index/vector_stores/redis.py
@@ -0,0 +1,339 @@
+"""Redis Vector store index.
+
+An index that that is built on top of an existing vector store.
+"""
+import logging
+from typing import TYPE_CHECKING, Any, Dict, List, Optional
+
+from llama_index.data_structs.node import DocumentRelationship, Node
+from llama_index.readers.redis.utils import (TokenEscaper, array_to_buffer,
+                                             check_redis_modules_exist,
+                                             convert_bytes, get_redis_query)
+from llama_index.vector_stores.types import (NodeWithEmbedding, VectorStore,
+                                             VectorStoreQuery,
+                                             VectorStoreQueryResult)
+
+_logger = logging.getLogger(__name__)
+
+
+if TYPE_CHECKING:
+    from redis.client import Redis as RedisType
+    from redis.commands.search.field import VectorField
+
+
+class RedisVectorStore(VectorStore):
+    stores_text = True
+    stores_node = True
+
+    tokenizer = TokenEscaper()
+
+    def __init__(
+        self,
+        index_name: str,
+        index_prefix: str = "llama_index",
+        index_args: Optional[Dict[str, Any]] = None,
+        redis_url: str = "redis://localhost:6379",
+        overwrite: bool = False,
+        **kwargs: Any,
+    ) -> None:
+        """Initialize RedisVectorStore.
+
+        For index arguments that can be passed to RediSearch, see
+        https://redis.io/docs/stack/search/reference/vectors/
+
+        The index arguments will depend on the index type chosen. There
+        are two available index types
+            - FLAT: a flat index that uses brute force search
+            - HNSW: a hierarchical navigable small world graph index
+
+        Args:
+            index_name (str): Name of the index.
+            index_prefix (str): Prefix for the index. Defaults to "llama_index".
+            index_args (Dict[str, Any]): Arguments for the index. Defaults to None.
+            redis_url (str): URL for the redis instance. Defaults to "redis://localhost:6379".
+            overwrite (bool): Whether to overwrite the index if it already exists. Defaults to False.
+            kwargs (Any): Additional arguments to pass to the redis client.
+
+        Raises:
+            ValueError: If redis-py is not installed
+            ValueError: If RediSearch is not installed
+
+        Examples:
+            >>> from llama_index.vector_stores.redis import RedisVectorStore
+            >>> # Create a RedisVectorStore
+            >>> vector_store = RedisVectorStore(
+            >>>     index_name="my_index",
+            >>>     index_prefix="gpt_index",
+            >>>     index_args={"algorithm": "HNSW", "m": 16, "ef_construction": 200, "distance_metric": "cosine"},
+            >>>     redis_url="redis://localhost:6379/",
+            >>>     overwrite=True)
+
+        """
+        try:
+            import redis
+        except ImportError:
+            raise ValueError(
+                "Could not import redis python package. "
+                "Please install it with `pip install redis`."
+            )
+        try:
+            # connect to redis from url
+            self._redis_client = redis.from_url(redis_url, **kwargs)
+            # check if redis has redisearch module installed
+            check_redis_modules_exist(self._redis_client)
+        except ValueError as e:
+            raise ValueError(f"Redis failed to connect: {e}")
+
+        # index identifiers
+        self._prefix = index_prefix
+        self._index_name = index_name
+        self._index_args = index_args if index_args is not None else {}
+        self._overwrite = overwrite
+        self._vector_field = str(self._index_args.get("vector_field", "vector"))
+        self._vector_key = str(self._index_args.get("vector_key", "vector"))
+
+
+
+    @property
+    def client(self) -> 'RedisType':
+        """Return the redis client instance"""
+        return self._redis_client
+
+    def add(self, embedding_results: List[NodeWithEmbedding]) -> List[str]:
+        """Add embedding results to the index.
+
+        Args:
+            embedding_results (List[NodeWithEmbedding]): List of embedding results to add to the index.
+
+        Returns:
+            List[str]: List of ids of the documents added to the index.
+
+        Raises:
+            ValueError: If the index already exists and overwrite is False.
+        """
+
+        # check index exists, call once to avoid calling multiple times
+        index_exists = self._index_exists()
+        if index_exists and self._overwrite:
+            self.delete_index()
+
+        elif index_exists and not self._overwrite:
+            raise ValueError("Index already exists and overwrite is False.")
+
+        else:  # index does not exist, create it
+            # get vector dim from embedding results if index does not exist
+            # as it will be created from the embedding result attributes.
+            self._index_args["dims"] = len(embedding_results[0].embedding)
+            self._create_index()
+
+        ids = []
+        for result in embedding_results:
+            # Add extra info and node info to the index
+            # cast types to satisfy mypy
+            node_info = cast_metadata_types(result.node.node_info)
+            extra_info = cast_metadata_types(result.node.extra_info)
+
+            mapping = {
+                "id": result.id,
+                "doc_id": result.ref_doc_id,
+                "text": result.node.get_text(),
+                self._vector_key: array_to_buffer(result.embedding),
+                **node_info,
+                **extra_info,
+            }
+            ids.append(result.id)
+            key = "_".join([self._prefix, str(result.id)])
+            self._redis_client.hset(key, mapping=mapping)  # type: ignore
+
+        _logger.info(f"Added {len(ids)} documents to index {self._index_name}")
+        return ids
+
+    def delete(self, doc_id: str, **delete_kwargs: Any) -> None:
+        """Delete a specific document from the index by doc_id
+
+        Args:
+            doc_id (str): The doc_id of the document to delete.
+            delete_kwargs (Any): Additional arguments to pass to the delete method.
+
+        """
+        # use tokenizer to escape dashes in query
+        query_str = "@doc_id:{%s}" % self.tokenizer.escape(doc_id)
+        # find all documents that match a doc_id
+        results = self._redis_client.ft(self._index_name).search(query_str)
+
+        for doc in results.docs:
+            self._redis_client.delete(doc.id)
+        _logger.info(
+            f"Deleted {len(results.docs)} documents from index {self._index_name}"
+        )
+
+    def delete_index(self) -> None:
+        """Delete the index and all documents."""
+        _logger.info(f"Deleting index {self._index_name}")
+        self._redis_client.ft(self._index_name).dropindex(delete_documents=True)
+
+    def query(self, query: VectorStoreQuery) -> VectorStoreQueryResult:
+        """Query the index.
+
+        Args:
+            query (VectorStoreQuery): query object
+
+        Returns:
+            VectorStoreQueryResult: query result
+        """
+        from redis.exceptions import ResponseError as RedisResponseError
+
+
+        return_fields = ["id", "doc_id", "text", self._vector_key, "vector_score"]
+
+        redis_query = get_redis_query(
+            return_fields=return_fields, top_k=query.similarity_top_k, vector_field=self._vector_field
+        )
+        if not query.query_embedding:
+            raise ValueError("Query embedding is required for querying.")
+
+        query_params = {
+            "vector": array_to_buffer(query.query_embedding),
+        }
+        _logger.info(f"Querying index {self._index_name}")
+
+        try:
+            results = self._redis_client.ft(self._index_name).search(
+                redis_query, query_params=query_params # type: ignore
+            )
+        except RedisResponseError as e:
+            _logger.error(f"Error querying index {self._index_name}: {e}")
+            raise e
+
+        ids = []
+        nodes = []
+        scores = []
+        for doc in results.docs:
+            node = Node(
+                text=doc.text,
+                doc_id=doc.id,
+                embedding=None,
+                relationships={DocumentRelationship.SOURCE: doc.doc_id},
+            )
+            ids.append(doc.id)
+            nodes.append(node)
+            scores.append(1 - float(doc.vector_score))
+        _logger.info(f"Found {len(nodes)} results for query with id {ids}")
+
+        return VectorStoreQueryResult(nodes=nodes, ids=ids, similarities=scores)
+
+    def persist(self, persist_path: str, in_background: bool=True) -> None:
+        """Persist the vector store to disk.
+
+        Args:
+            persist_path (str): Path to persist the vector store to. (doesn't apply)
+            in_background (bool, optional): Persist in background. Defaults to True.
+        """
+        if in_background:
+            _logger.info("Saving index to disk in background")
+            self._redis_client.bgsave()
+        else:
+            _logger.info("Saving index to disk")
+            self._redis_client.save()
+
+    def _create_index(self) -> None:
+        # should never be called outside class and hence should not raise importerror
+        from redis.commands.search.field import TagField, TextField
+        from redis.commands.search.indexDefinition import (IndexDefinition,
+                                                           IndexType)
+
+        # Create Index
+        default_fields = [
+            TextField("text", weight=1.0),
+            TagField("doc_id", sortable=False),
+            TagField("id", sortable=False),
+        ]
+        # add vector field to list of index fields. Create lazily to allow user
+        # to specify index and search attributes in creation.
+        fields = default_fields + [
+            self._create_vector_field(self._vector_field, **self._index_args)
+        ]
+
+        _logger.info(f"Creating index {self._index_name}")
+        self._redis_client.ft(self._index_name).create_index(
+            fields=fields,
+            definition=IndexDefinition(
+                prefix=[self._prefix], index_type=IndexType.HASH
+            ),  # TODO support JSON
+        )
+
+    def _index_exists(self) -> bool:
+        # use FT._LIST to check if index exists
+        indices = convert_bytes(self._redis_client.execute_command("FT._LIST"))
+        return self._index_name in indices
+
+    def _create_vector_field(
+        self,
+        name: str,
+        dims: int = 1536,
+        algorithm: str = "FLAT",
+        datatype: str = "FLOAT32",
+        distance_metric: str = "COSINE",
+        initial_cap: int = 20000,
+        block_size: int = 1000,
+        m: int = 16,
+        ef_construction: int = 200,
+        ef_runtime: int = 10,
+        epsilon: float = 0.8,
+        **kwargs: Any,
+    ) -> 'VectorField':
+        """Create a RediSearch VectorField.
+
+        Args:
+            name (str): The name of the field.
+            algorithm (str): The algorithm used to index the vector.
+            dims (int): The dimensionality of the vector.
+            datatype (str): The type of the vector. default: FLOAT32
+            distance_metric (str): The distance metric used to compare vectors.
+            initial_cap (int): The initial capacity of the index.
+            block_size (int): The block size of the index.
+            m (int): The number of outgoing edges in the HNSW graph.
+            ef_construction (int): Number of maximum allowed potential outgoing edges
+                            candidates for each node in the graph, during the graph building.
+            ef_runtime (int): The umber of maximum top candidates to hold during the KNN search
+
+        returns:
+            A RediSearch VectorField.
+        """
+        from redis.commands.search.field import VectorField
+
+        if algorithm.upper() == "HNSW":
+            return VectorField(
+                name,
+                "HNSW",
+                {
+                    "TYPE": datatype.upper(),
+                    "DIM": dims,
+                    "DISTANCE_METRIC": distance_metric.upper(),
+                    "INITIAL_CAP": initial_cap,
+                    "M": m,
+                    "EF_CONSTRUCTION": ef_construction,
+                    "EF_RUNTIME": ef_runtime,
+                    "EPSILON": epsilon,
+                },
+            )
+        else:
+            return VectorField(
+                name,
+                "FLAT",
+                {
+                    "TYPE": datatype.upper(),
+                    "DIM": dims,
+                    "DISTANCE_METRIC": distance_metric.upper(),
+                    "INITIAL_CAP": initial_cap,
+                    "BLOCK_SIZE": block_size,
+                },
+            )
+
+
+def cast_metadata_types(mapping: Optional[Dict[str, Any]]) -> Dict[str, str]:
+    metadata = {}
+    if mapping:
+        for key, value in mapping.items():
+            metadata[str(key)] = str(value)
+    return metadata
\ No newline at end of file
diff --git a/llama_index/vector_stores/registry.py b/llama_index/vector_stores/registry.py
index c9b32df962542a4b0866a2551376cccb642008c8..3a89b777db5790c3ad71f37b3504287ce82210fd 100644
--- a/llama_index/vector_stores/registry.py
+++ b/llama_index/vector_stores/registry.py
@@ -5,12 +5,13 @@ from llama_index.vector_stores.chatgpt_plugin import ChatGPTRetrievalPluginClien
 from llama_index.vector_stores.chroma import ChromaVectorStore
 from llama_index.vector_stores.deeplake import DeepLakeVectorStore
 from llama_index.vector_stores.faiss import FaissVectorStore
-from llama_index.vector_stores.milvus import MilvusVectorStore
 from llama_index.vector_stores.lancedb import LanceDBVectorStore
+from llama_index.vector_stores.milvus import MilvusVectorStore
 from llama_index.vector_stores.myscale import MyScaleVectorStore
 from llama_index.vector_stores.opensearch import OpensearchVectorStore
 from llama_index.vector_stores.pinecone import PineconeVectorStore
 from llama_index.vector_stores.qdrant import QdrantVectorStore
+from llama_index.vector_stores.redis import RedisVectorStore
 from llama_index.vector_stores.simple import SimpleVectorStore
 from llama_index.vector_stores.types import VectorStore
 from llama_index.vector_stores.weaviate import WeaviateVectorStore
@@ -18,6 +19,7 @@ from llama_index.vector_stores.weaviate import WeaviateVectorStore
 
 class VectorStoreType(str, Enum):
     SIMPLE = "simple"
+    REDIS = "redis"
     WEAVIATE = "weaviate"
     QDRANT = "qdrant"
     PINECONE = "pinecone"
@@ -33,6 +35,7 @@ class VectorStoreType(str, Enum):
 
 VECTOR_STORE_TYPE_TO_VECTOR_STORE_CLASS: Dict[VectorStoreType, Type[VectorStore]] = {
     VectorStoreType.SIMPLE: SimpleVectorStore,
+    VectorStoreType.REDIS: RedisVectorStore,
     VectorStoreType.WEAVIATE: WeaviateVectorStore,
     VectorStoreType.QDRANT: QdrantVectorStore,
     VectorStoreType.LANCEDB: LanceDBVectorStore,