diff --git a/llama-index-packs/llama-index-packs-dense-x-retrieval/README.md b/llama-index-packs/llama-index-packs-dense-x-retrieval/README.md
index 0b8bf6c6ae2a03c1e31bd537f00bfe8d14e887ac..5de9c8281ed4c9c5418e67536ea1a3292142a759 100644
--- a/llama-index-packs/llama-index-packs-dense-x-retrieval/README.md
+++ b/llama-index-packs/llama-index-packs-dense-x-retrieval/README.md
@@ -41,6 +41,9 @@ documents = SimpleDirectoryReader("./data").load_data()
 
 # uses the LLM to extract propositions from every document/node!
 dense_pack = DenseXRetrievalPack(documents)
+
+# for streaming
+dense_pack = DenseXRetrievalPack(documents, streaming=True)
 ```
 
 The `run()` function is a light wrapper around `query_engine.query()`.
@@ -51,4 +54,14 @@ response = dense_pack.run("What can you tell me about LLMs?")
 print(response)
 ```
 
+for streaming:
+
+The `run()` function is a light wrapper around `query_engine.query()`.
+
+```python
+stream_response = dense_pack.run("What can you tell me about LLMs?")
+
+stream_response.print_response_stream()
+```
+
 See the [notebook on llama-hub](https://github.com/run-llama/llama-hub/blob/main/llama_hub/llama_packs/dense_x_retrieval/dense_x_retrieval.ipynb) for a full example.
diff --git a/llama-index-packs/llama-index-packs-dense-x-retrieval/llama_index/packs/dense_x_retrieval/base.py b/llama-index-packs/llama-index-packs-dense-x-retrieval/llama_index/packs/dense_x_retrieval/base.py
index 64f9daffb948f53b1cf0950b9fd33e7a34d2f9a3..5811f0bcba70f84f3e9d4f6fca1539adca27efe3 100644
--- a/llama-index-packs/llama-index-packs-dense-x-retrieval/llama_index/packs/dense_x_retrieval/base.py
+++ b/llama-index-packs/llama-index-packs-dense-x-retrieval/llama_index/packs/dense_x_retrieval/base.py
@@ -75,6 +75,7 @@ class DenseXRetrievalPack(BaseLlamaPack):
         embed_model: Optional[BaseEmbedding] = None,
         text_splitter: TextSplitter = SentenceSplitter(),
         similarity_top_k: int = 4,
+        streaming: bool = False,
     ) -> None:
         """Init params."""
         self._proposition_llm = proposition_llm or OpenAI(
@@ -112,7 +113,9 @@ class DenseXRetrievalPack(BaseLlamaPack):
         )
 
         self.query_engine = RetrieverQueryEngine.from_args(
-            self.retriever, service_context=service_context
+            self.retriever,
+            service_context=service_context,
+            streaming=streaming,
         )
 
     async def _aget_proposition(self, node: TextNode) -> List[TextNode]:
diff --git a/llama-index-packs/llama-index-packs-dense-x-retrieval/pyproject.toml b/llama-index-packs/llama-index-packs-dense-x-retrieval/pyproject.toml
index e39f3e588e8efe5b16b7cc77fd675b79da8caaaa..d43a6f60d580e9921a3bddc0e097dd10f0697285 100644
--- a/llama-index-packs/llama-index-packs-dense-x-retrieval/pyproject.toml
+++ b/llama-index-packs/llama-index-packs-dense-x-retrieval/pyproject.toml
@@ -28,7 +28,7 @@ license = "MIT"
 maintainers = ["logan-markewich"]
 name = "llama-index-packs-dense-x-retrieval"
 readme = "README.md"
-version = "0.1.3"
+version = "0.1.4"
 
 [tool.poetry.dependencies]
 python = ">=3.8.1,<4.0"