diff --git a/docs/BUILD b/docs/BUILD
new file mode 100644
index 0000000000000000000000000000000000000000..db46e8d6c978c67e301dd6c47bee08c1b3fd141c
--- /dev/null
+++ b/docs/BUILD
@@ -0,0 +1 @@
+python_sources()
diff --git a/docs/examples/discover_llamaindex/document_management/BUILD b/docs/examples/discover_llamaindex/document_management/BUILD
new file mode 100644
index 0000000000000000000000000000000000000000..db46e8d6c978c67e301dd6c47bee08c1b3fd141c
--- /dev/null
+++ b/docs/examples/discover_llamaindex/document_management/BUILD
@@ -0,0 +1 @@
+python_sources()
diff --git a/docs/examples/finetuning/embeddings/BUILD b/docs/examples/finetuning/embeddings/BUILD
new file mode 100644
index 0000000000000000000000000000000000000000..db46e8d6c978c67e301dd6c47bee08c1b3fd141c
--- /dev/null
+++ b/docs/examples/finetuning/embeddings/BUILD
@@ -0,0 +1 @@
+python_sources()
diff --git a/docs/examples/llm/mymagic.ipynb b/docs/examples/llm/mymagic.ipynb
new file mode 100644
index 0000000000000000000000000000000000000000..df35e377d987d10f31fb8fd4d62f0a3e4daf138b
--- /dev/null
+++ b/docs/examples/llm/mymagic.ipynb
@@ -0,0 +1,162 @@
+{
+ "cells": [
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "# MyMagic AI LLM"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## Introduction\n",
+    "This notebook demonstrates how to use MyMagicAI for batch inference on massive data stored in cloud buckets. The only enpoints implemented are `complete` and `acomplete` which can work on many use cases including Completion, Summariation and Extraction.\n",
+    "To use this notebook, you need an API key (Personal Access Token) from MyMagicAI and data stored in cloud buckets.\n",
+    "Sign up by clicking Get Started at [MyMagicAI's website](https://mymagic.ai/) to get your API key.\n"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## Setup\n",
+    "To set up your bucket and grant MyMagic API a secure access to your cloud storage, please visit [MyMagic docs](https://docs.mymagic.ai/) for reference.\n",
+    "If you're opening this Notebook on colab, you will probably need to install LlamaIndex 🦙.\n"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "%pip install llama-index-llms-mymagic"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "!pip install llama-index"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "from llama_index.llms.mymagic import MyMagicAI"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "llm = MyMagicAI(\n",
+    "    api_key=\"your_api_key\",\n",
+    "    storage_provider=\"your_storage_provider\",  # s3, gcs\n",
+    "    bucket_name=\"your_bucket_name\",\n",
+    "    session=\"your_session\",  # files should be located in this folder on which batch inference will be run\n",
+    "    system_prompt=\"Answer the question succinctly\",\n",
+    ")"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "resp = llm.complete(\n",
+    "    question=\"Summarize the document!\",\n",
+    "    model=\"mistral7b\",\n",
+    "    max_tokens=10,  # currently we support mistral7b, llama7b, mixtral8x7b,codellama70b, llama70b, more to come...\n",
+    ")"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# The response indicated that the final output is stored in your bucket or raises an exception if the job failed\n",
+    "print(resp)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## Asynchronous Requests by using `acomplete` endpoint\n",
+    "For asynchronous operations, use the following approach."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "import asyncio"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "async def main():\n",
+    "    allm = MyMagicAI(\n",
+    "        api_key=\"your_api_key\",\n",
+    "        storage_provider=\"your_storage_provider\",\n",
+    "        bucket_name=\"your_bucket_name\",\n",
+    "        session=\"your_session_name\",\n",
+    "        system_prompt=\"your_system_prompt\",\n",
+    "    )\n",
+    "    response = await allm.acomplete(\n",
+    "        question=\"your_question\", model=\"mistral7b\", max_tokens=10\n",
+    "    )\n",
+    "\n",
+    "    print(\"Async completion response:\", response)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "asyncio.run(main())"
+   ]
+  }
+ ],
+ "metadata": {
+  "kernelspec": {
+   "display_name": "llama-index-dKXgjzWQ-py3.11",
+   "language": "python",
+   "name": "python3"
+  },
+  "language_info": {
+   "codemirror_mode": {
+    "name": "ipython",
+    "version": 3
+   },
+   "file_extension": ".py",
+   "mimetype": "text/x-python",
+   "name": "python",
+   "nbconvert_exporter": "python",
+   "pygments_lexer": "ipython3"
+  }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 2
+}
diff --git a/docs/examples/output_parsing/BUILD b/docs/examples/output_parsing/BUILD
new file mode 100644
index 0000000000000000000000000000000000000000..db46e8d6c978c67e301dd6c47bee08c1b3fd141c
--- /dev/null
+++ b/docs/examples/output_parsing/BUILD
@@ -0,0 +1 @@
+python_sources()
diff --git a/llama-index-integrations/llms/llama-index-llms-mymagic/.gitignore b/llama-index-integrations/llms/llama-index-llms-mymagic/.gitignore
new file mode 100644
index 0000000000000000000000000000000000000000..990c18de229088f55c6c514fd0f2d49981d1b0e7
--- /dev/null
+++ b/llama-index-integrations/llms/llama-index-llms-mymagic/.gitignore
@@ -0,0 +1,153 @@
+llama_index/_static
+.DS_Store
+# Byte-compiled / optimized / DLL files
+__pycache__/
+*.py[cod]
+*$py.class
+
+# C extensions
+*.so
+
+# Distribution / packaging
+.Python
+bin/
+build/
+develop-eggs/
+dist/
+downloads/
+eggs/
+.eggs/
+etc/
+include/
+lib/
+lib64/
+parts/
+sdist/
+share/
+var/
+wheels/
+pip-wheel-metadata/
+share/python-wheels/
+*.egg-info/
+.installed.cfg
+*.egg
+MANIFEST
+
+# PyInstaller
+#  Usually these files are written by a python script from a template
+#  before PyInstaller builds the exe, so as to inject date/other infos into it.
+*.manifest
+*.spec
+
+# Installer logs
+pip-log.txt
+pip-delete-this-directory.txt
+
+# Unit test / coverage reports
+htmlcov/
+.tox/
+.nox/
+.coverage
+.coverage.*
+.cache
+nosetests.xml
+coverage.xml
+*.cover
+*.py,cover
+.hypothesis/
+.pytest_cache/
+.ruff_cache
+
+# Translations
+*.mo
+*.pot
+
+# Django stuff:
+*.log
+local_settings.py
+db.sqlite3
+db.sqlite3-journal
+
+# Flask stuff:
+instance/
+.webassets-cache
+
+# Scrapy stuff:
+.scrapy
+
+# Sphinx documentation
+docs/_build/
+
+# PyBuilder
+target/
+
+# Jupyter Notebook
+.ipynb_checkpoints
+notebooks/
+
+# IPython
+profile_default/
+ipython_config.py
+
+# pyenv
+.python-version
+
+# pipenv
+#   According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
+#   However, in case of collaboration, if having platform-specific dependencies or dependencies
+#   having no cross-platform support, pipenv may install dependencies that don't work, or not
+#   install all needed dependencies.
+#Pipfile.lock
+
+# PEP 582; used by e.g. github.com/David-OConnor/pyflow
+__pypackages__/
+
+# Celery stuff
+celerybeat-schedule
+celerybeat.pid
+
+# SageMath parsed files
+*.sage.py
+
+# Environments
+.env
+.venv
+env/
+venv/
+ENV/
+env.bak/
+venv.bak/
+pyvenv.cfg
+
+# Spyder project settings
+.spyderproject
+.spyproject
+
+# Rope project settings
+.ropeproject
+
+# mkdocs documentation
+/site
+
+# mypy
+.mypy_cache/
+.dmypy.json
+dmypy.json
+
+# Pyre type checker
+.pyre/
+
+# Jetbrains
+.idea
+modules/
+*.swp
+
+# VsCode
+.vscode
+
+# pipenv
+Pipfile
+Pipfile.lock
+
+# pyright
+pyrightconfig.json
diff --git a/llama-index-integrations/llms/llama-index-llms-mymagic/BUILD b/llama-index-integrations/llms/llama-index-llms-mymagic/BUILD
new file mode 100644
index 0000000000000000000000000000000000000000..0b67818b466ca124328fee9578153b45932c2375
--- /dev/null
+++ b/llama-index-integrations/llms/llama-index-llms-mymagic/BUILD
@@ -0,0 +1,5 @@
+python_sources()
+
+poetry_requirements(
+    name="poetry",
+)
diff --git a/llama-index-integrations/llms/llama-index-llms-mymagic/Makefile b/llama-index-integrations/llms/llama-index-llms-mymagic/Makefile
new file mode 100644
index 0000000000000000000000000000000000000000..b9eab05aa370629a4a3de75df3ff64cd53887b68
--- /dev/null
+++ b/llama-index-integrations/llms/llama-index-llms-mymagic/Makefile
@@ -0,0 +1,17 @@
+GIT_ROOT ?= $(shell git rev-parse --show-toplevel)
+
+help:	## Show all Makefile targets.
+	@grep -E '^[a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[33m%-30s\033[0m %s\n", $$1, $$2}'
+
+format:	## Run code autoformatters (black).
+	pre-commit install
+	git ls-files | xargs pre-commit run black --files
+
+lint:	## Run linters: pre-commit (black, ruff, codespell) and mypy
+	pre-commit install && git ls-files | xargs pre-commit run --show-diff-on-failure --files
+
+test:	## Run tests via pytest.
+	pytest tests
+
+watch-docs:	## Build and watch documentation.
+	sphinx-autobuild docs/ docs/_build/html --open-browser --watch $(GIT_ROOT)/llama_index/
diff --git a/llama-index-integrations/llms/llama-index-llms-mymagic/README.md b/llama-index-integrations/llms/llama-index-llms-mymagic/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..c016b53ecc3eac5a4d9cf2e0dc5118343e3369d9
--- /dev/null
+++ b/llama-index-integrations/llms/llama-index-llms-mymagic/README.md
@@ -0,0 +1 @@
+# LlamaIndex Llms Integration: Mymagic
diff --git a/llama-index-integrations/llms/llama-index-llms-mymagic/llama_index/llms/mymagic/BUILD b/llama-index-integrations/llms/llama-index-llms-mymagic/llama_index/llms/mymagic/BUILD
new file mode 100644
index 0000000000000000000000000000000000000000..db46e8d6c978c67e301dd6c47bee08c1b3fd141c
--- /dev/null
+++ b/llama-index-integrations/llms/llama-index-llms-mymagic/llama_index/llms/mymagic/BUILD
@@ -0,0 +1 @@
+python_sources()
diff --git a/llama-index-integrations/llms/llama-index-llms-mymagic/llama_index/llms/mymagic/__init__.py b/llama-index-integrations/llms/llama-index-llms-mymagic/llama_index/llms/mymagic/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e36160260b5aa69a40d7ec1ff1f6ae60b0e1b54f
--- /dev/null
+++ b/llama-index-integrations/llms/llama-index-llms-mymagic/llama_index/llms/mymagic/__init__.py
@@ -0,0 +1,4 @@
+from llama_index.llms.mymagic.base import MyMagicAI
+
+
+__all__ = ["MyMagicAI"]
diff --git a/llama-index-integrations/llms/llama-index-llms-mymagic/llama_index/llms/mymagic/base.py b/llama-index-integrations/llms/llama-index-llms-mymagic/llama_index/llms/mymagic/base.py
new file mode 100644
index 0000000000000000000000000000000000000000..f44a29ef4ce4b1aabcc853d35171f75875115bb2
--- /dev/null
+++ b/llama-index-integrations/llms/llama-index-llms-mymagic/llama_index/llms/mymagic/base.py
@@ -0,0 +1,170 @@
+import time
+from typing import Any, Dict, Optional
+import httpx
+import asyncio
+import requests
+from llama_index.core.base.llms.types import (
+    CompletionResponse,
+    CompletionResponseGen,
+    ChatResponse,
+    ChatResponseGen,
+    ChatResponseAsyncGen,
+    CompletionResponseAsyncGen,
+    LLMMetadata,
+)
+from llama_index.core.llms.llm import LLM
+from llama_index.core.bridge.pydantic import Field
+
+
+class MyMagicAI(LLM):
+    base_url_template: str = "https://{model}.mymagic.ai"
+    api_key: str = None
+    model: str = Field(default="mistral7b", description="The MyMagicAI model to use.")
+    max_tokens: int = Field(
+        default=10, description="The maximum number of tokens to generate."
+    )
+    storage_provider: str = Field(
+        default="gcs", description="The storage provider to use."
+    )
+    bucket_name: str = Field(
+        default="your-bucket-name",
+        description="The bucket name where the data is stored.",
+    )
+    session: str = Field(
+        default="test-session",
+        description="The session to use. This is a subfolder in the bucket where your data is located.",
+    )
+    role_arn: Optional[str] = Field(
+        None, description="ARN for role assumption in AWS S3"
+    )
+    system_prompt: str = Field(
+        default="Answer the question based only on the given content. Do not give explanations or examples. Do not continue generating more text after the answer.",
+        description="The system prompt to use.",
+    )
+    question_data: Dict[str, Any] = Field(
+        default_factory=dict, description="The data to send to the MyMagicAI API."
+    )
+
+    def __init__(
+        self,
+        api_key: str,
+        storage_provider: str,
+        bucket_name: str,
+        session: str,
+        system_prompt: Optional[str],
+        role_arn: Optional[str] = None,
+        **kwargs: Any,
+    ) -> None:
+        super().__init__(**kwargs)
+
+        self.question_data = {
+            "storage_provider": storage_provider,
+            "bucket_name": bucket_name,
+            "personal_access_token": api_key,
+            "session": session,
+            "max_tokens": self.max_tokens,
+            "role_arn": role_arn,
+            "system_prompt": system_prompt,
+        }
+
+    @classmethod
+    def class_name(cls) -> str:
+        return "MyMagicAI"
+
+    def _construct_url(self, model: str) -> str:
+        """Constructs the API endpoint URL based on the specified model."""
+        return self.base_url_template.format(model=model)
+
+    async def _submit_question(self, question_data: Dict[str, Any]) -> Dict[str, Any]:
+        async with httpx.AsyncClient() as client:
+            url = f"{self._construct_url(self.model)}/submit_question"
+            resp = await client.post(url, json=question_data)
+            resp.raise_for_status()
+            return resp.json()
+
+    def _submit_question_sync(self, question_data: Dict[str, Any]) -> Dict[str, Any]:
+        """Submits a question to the model synchronously."""
+        url = f"{self._construct_url(self.model)}/submit_question"
+        resp = requests.post(url, json=question_data)
+        resp.raise_for_status()
+        return resp.json()
+
+    def _get_result_sync(self, task_id: str) -> Dict[str, Any]:
+        """Polls for the result of a task synchronously."""
+        url = f"{self._construct_url(self.model)}/get_result/{task_id}"
+        response = requests.get(url)
+        response.raise_for_status()
+        return response.json()
+
+    async def _get_result(self, task_id: str) -> Dict[str, Any]:
+        async with httpx.AsyncClient() as client:
+            resp = await client.get(
+                f"{self._construct_url(self.model)}/get_result/{task_id}"
+            )
+            resp.raise_for_status()
+            return resp.json()
+
+    async def acomplete(
+        self, question: str, model: str, max_tokens: int, poll_interval: float = 1.0
+    ) -> CompletionResponse:
+        self.question_data["question"] = question
+        self.question_data["model"] = model
+        self.question_data["max_tokens"] = max_tokens
+
+        task_response = await self._submit_question(self.question_data)
+        task_id = task_response.get("task_id")
+        while True:
+            result = await self._get_result(task_id)
+            if result["status"] != "PENDING":
+                return result
+            await asyncio.sleep(poll_interval)
+
+    def complete(
+        self, question: str, model: str, max_tokens: int, poll_interval: float = 1.0
+    ) -> CompletionResponse:
+        self.question_data["question"] = question
+        self.question_data["model"] = model
+        self.question_data["max_tokens"] = max_tokens
+
+        task_response = self._submit_question_sync(self.question_data)
+        task_id = task_response.get("task_id")
+        while True:
+            result = self._get_result_sync(task_id)
+            if result["status"] != "PENDING":
+                return CompletionResponse(
+                    text=result.get("message", ""),
+                    additional_kwargs={"status": result["status"]},
+                )
+            time.sleep(poll_interval)
+
+    def stream_complete(self, question: str) -> CompletionResponseGen:
+        raise NotImplementedError(
+            "MyMagicAI does not currently support streaming completion."
+        )
+
+    async def achat(self, question: str) -> ChatResponse:
+        raise NotImplementedError("MyMagicAI does not currently support chat.")
+
+    def chat(self, question: str) -> ChatResponse:
+        raise NotImplementedError("MyMagicAI does not currently support chat.")
+
+    async def astream_complete(self, question: str) -> CompletionResponseAsyncGen:
+        raise NotImplementedError("MyMagicAI does not currently support streaming.")
+
+    async def astream_chat(self, question: str) -> ChatResponseAsyncGen:
+        raise NotImplementedError("MyMagicAI does not currently support streaming.")
+
+    def chat(self, question: str) -> ChatResponse:
+        raise NotImplementedError("MyMagicAI does not currently support chat.")
+
+    def stream_chat(self, question: str) -> ChatResponseGen:
+        raise NotImplementedError("MyMagicAI does not currently support chat.")
+
+    @property
+    def metadata(self) -> LLMMetadata:
+        """LLM metadata."""
+        return LLMMetadata(
+            num_output=self.max_tokens,
+            model_name=self.model,
+            is_chat_model=False,
+        )
diff --git a/llama-index-integrations/llms/llama-index-llms-mymagic/pyproject.toml b/llama-index-integrations/llms/llama-index-llms-mymagic/pyproject.toml
new file mode 100644
index 0000000000000000000000000000000000000000..ec7838ec7755b3c282cbe80dc2d295799a825f52
--- /dev/null
+++ b/llama-index-integrations/llms/llama-index-llms-mymagic/pyproject.toml
@@ -0,0 +1,54 @@
+[build-system]
+build-backend = "poetry.core.masonry.api"
+requires = ["poetry-core"]
+
+[tool.codespell]
+check-filenames = true
+check-hidden = true
+# Feel free to un-skip examples, and experimental, you will just need to
+# work through many typos (--write-changes and --interactive will help)
+skip = "*.csv,*.html,*.json,*.jsonl,*.pdf,*.txt,*.ipynb"
+
+[tool.llamahub]
+classes = ["MyMagic"]
+contains_example = false
+import_path = "llama_index.llms.mymagic"
+
+[tool.mypy]
+disallow_untyped_defs = true
+# Remove venv skip when integrated with pre-commit
+exclude = ["_static", "build", "examples", "notebooks", "venv"]
+ignore_missing_imports = true
+python_version = "3.8"
+
+[tool.poetry]
+authors = ["Vitali Avagyan <vitali@mymagic.ai>"]
+description = "llama-index llms mymagic integration"
+license = "MIT"
+name = "llama-index-llms-mymagic"
+packages = [{include = "llama_index/"}]
+readme = "README.md"
+version = "0.1.0"
+
+[tool.poetry.dependencies]
+python = ">=3.8.1,<3.12"
+llama-index-core = "^0.10.0"
+
+[tool.poetry.group.dev.dependencies]
+black = {extras = ["jupyter"], version = "<=23.9.1,>=23.7.0"}
+codespell = {extras = ["toml"], version = ">=v2.2.6"}
+ipython = "8.10.0"
+jupyter = "^1.0.0"
+mypy = "0.991"
+pre-commit = "3.2.0"
+pylint = "2.15.10"
+pytest = "7.2.1"
+pytest-mock = "3.11.1"
+ruff = "0.0.292"
+tree-sitter-languages = "^1.8.0"
+types-Deprecated = ">=0.1.0"
+types-PyYAML = "^6.0.12.12"
+types-protobuf = "^4.24.0.4"
+types-redis = "4.5.5.0"
+types-requests = "2.28.11.8"  # TODO: unpin when mypy>0.991
+types-setuptools = "67.1.0.0"