From 6f345c7be0fce1be36c2e154605eeb35b9f4f535 Mon Sep 17 00:00:00 2001
From: Sohan <sohan@pipeshift.com>
Date: Sat, 26 Oct 2024 22:05:35 +0530
Subject: [PATCH] Pipeshift llama index integration (#16610)

---
 docs/docs/api_reference/llms/pipeshift.md     |   4 +
 docs/docs/examples/llm/pipeshift.ipynb        | 384 ++++++++++++++++++
 .../docs/module_guides/models/llms/modules.md |   1 +
 docs/mkdocs.yml                               |   4 +
 .../llama_index/cli/upgrade/mappings.json     |   1 +
 .../core/command_line/mappings.json           |   1 +
 .../llama-index-llms-pipeshift/.gitignore     | 153 +++++++
 .../llms/llama-index-llms-pipeshift/BUILD     |   3 +
 .../llms/llama-index-llms-pipeshift/Makefile  |  18 +
 .../llms/llama-index-llms-pipeshift/README.md | 112 +++++
 .../llama_index/llms/pipeshift/BUILD          |   1 +
 .../llama_index/llms/pipeshift/__init__.py    |   4 +
 .../llama_index/llms/pipeshift/base.py        |  77 ++++
 .../llama-index-llms-pipeshift/pyproject.toml |  57 +++
 .../llama-index-llms-pipeshift/tests/BUILD    |   1 +
 .../tests/__init__.py                         |   0
 .../tests/test_llms_pipeshift.py              |   7 +
 17 files changed, 828 insertions(+)
 create mode 100644 docs/docs/api_reference/llms/pipeshift.md
 create mode 100644 docs/docs/examples/llm/pipeshift.ipynb
 create mode 100644 llama-index-integrations/llms/llama-index-llms-pipeshift/.gitignore
 create mode 100644 llama-index-integrations/llms/llama-index-llms-pipeshift/BUILD
 create mode 100644 llama-index-integrations/llms/llama-index-llms-pipeshift/Makefile
 create mode 100644 llama-index-integrations/llms/llama-index-llms-pipeshift/README.md
 create mode 100644 llama-index-integrations/llms/llama-index-llms-pipeshift/llama_index/llms/pipeshift/BUILD
 create mode 100644 llama-index-integrations/llms/llama-index-llms-pipeshift/llama_index/llms/pipeshift/__init__.py
 create mode 100644 llama-index-integrations/llms/llama-index-llms-pipeshift/llama_index/llms/pipeshift/base.py
 create mode 100644 llama-index-integrations/llms/llama-index-llms-pipeshift/pyproject.toml
 create mode 100644 llama-index-integrations/llms/llama-index-llms-pipeshift/tests/BUILD
 create mode 100644 llama-index-integrations/llms/llama-index-llms-pipeshift/tests/__init__.py
 create mode 100644 llama-index-integrations/llms/llama-index-llms-pipeshift/tests/test_llms_pipeshift.py

diff --git a/docs/docs/api_reference/llms/pipeshift.md b/docs/docs/api_reference/llms/pipeshift.md
new file mode 100644
index 0000000000..66635380be
--- /dev/null
+++ b/docs/docs/api_reference/llms/pipeshift.md
@@ -0,0 +1,4 @@
+::: llama_index.llms.pipeshift
+    options:
+      members:
+        - Pipeshift
diff --git a/docs/docs/examples/llm/pipeshift.ipynb b/docs/docs/examples/llm/pipeshift.ipynb
new file mode 100644
index 0000000000..fdb7f4e889
--- /dev/null
+++ b/docs/docs/examples/llm/pipeshift.ipynb
@@ -0,0 +1,384 @@
+{
+ "cells": [
+  {
+   "cell_type": "markdown",
+   "id": "9e3a8796-edc8-43f2-94ad-fe4fb20d70ed",
+   "metadata": {},
+   "source": [
+    "# Pipeshift"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "081d07d2",
+   "metadata": {},
+   "source": [
+    "If you're opening this Notebook on colab, you will probably need to install LlamaIndex 🦙."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "3f6f8702",
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "Defaulting to user installation because normal site-packages is not writeable\n",
+      "\u001b[31mERROR: Could not find a version that satisfies the requirement llama-index-llms-pipeshift (from versions: none)\u001b[0m\u001b[31m\n",
+      "\u001b[0m\u001b[31mERROR: No matching distribution found for llama-index-llms-pipeshift\u001b[0m\u001b[31m\n",
+      "\u001b[0mNote: you may need to restart the kernel to use updated packages.\n"
+     ]
+    }
+   ],
+   "source": [
+    "%pip install llama-index-llms-pipeshift"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "83ea30ee",
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "Defaulting to user installation because normal site-packages is not writeable\n",
+      "Requirement already satisfied: llama-index in /Users/work/Library/Python/3.9/lib/python/site-packages (0.11.18)\n",
+      "Requirement already satisfied: llama-index-agent-openai<0.4.0,>=0.3.4 in /Users/work/Library/Python/3.9/lib/python/site-packages (from llama-index) (0.3.4)\n",
+      "Requirement already satisfied: llama-index-cli<0.4.0,>=0.3.1 in /Users/work/Library/Python/3.9/lib/python/site-packages (from llama-index) (0.3.1)\n",
+      "Requirement already satisfied: llama-index-core<0.12.0,>=0.11.18 in /Users/work/Library/Python/3.9/lib/python/site-packages (from llama-index) (0.11.18)\n",
+      "Requirement already satisfied: llama-index-embeddings-openai<0.3.0,>=0.2.4 in /Users/work/Library/Python/3.9/lib/python/site-packages (from llama-index) (0.2.5)\n",
+      "Requirement already satisfied: llama-index-indices-managed-llama-cloud>=0.3.0 in /Users/work/Library/Python/3.9/lib/python/site-packages (from llama-index) (0.4.0)\n",
+      "Requirement already satisfied: llama-index-legacy<0.10.0,>=0.9.48 in /Users/work/Library/Python/3.9/lib/python/site-packages (from llama-index) (0.9.48.post3)\n",
+      "Requirement already satisfied: llama-index-llms-openai<0.3.0,>=0.2.10 in /Users/work/Library/Python/3.9/lib/python/site-packages (from llama-index) (0.2.15)\n",
+      "Requirement already satisfied: llama-index-multi-modal-llms-openai<0.3.0,>=0.2.0 in /Users/work/Library/Python/3.9/lib/python/site-packages (from llama-index) (0.2.2)\n",
+      "Requirement already satisfied: llama-index-program-openai<0.3.0,>=0.2.0 in /Users/work/Library/Python/3.9/lib/python/site-packages (from llama-index) (0.2.0)\n",
+      "Requirement already satisfied: llama-index-question-gen-openai<0.3.0,>=0.2.0 in /Users/work/Library/Python/3.9/lib/python/site-packages (from llama-index) (0.2.0)\n",
+      "Requirement already satisfied: llama-index-readers-file<0.3.0,>=0.2.0 in /Users/work/Library/Python/3.9/lib/python/site-packages (from llama-index) (0.2.2)\n",
+      "Requirement already satisfied: llama-index-readers-llama-parse>=0.3.0 in /Users/work/Library/Python/3.9/lib/python/site-packages (from llama-index) (0.3.0)\n",
+      "Requirement already satisfied: nltk>3.8.1 in /Users/work/Library/Python/3.9/lib/python/site-packages (from llama-index) (3.9.1)\n",
+      "Requirement already satisfied: openai>=1.14.0 in /Users/work/Library/Python/3.9/lib/python/site-packages (from llama-index-agent-openai<0.4.0,>=0.3.4->llama-index) (1.52.0)\n",
+      "Requirement already satisfied: PyYAML>=6.0.1 in /Users/work/Library/Python/3.9/lib/python/site-packages (from llama-index-core<0.12.0,>=0.11.18->llama-index) (6.0.2)\n",
+      "Requirement already satisfied: SQLAlchemy>=1.4.49 in /Users/work/Library/Python/3.9/lib/python/site-packages (from SQLAlchemy[asyncio]>=1.4.49->llama-index-core<0.12.0,>=0.11.18->llama-index) (2.0.34)\n",
+      "Requirement already satisfied: aiohttp<4.0.0,>=3.8.6 in /Users/work/Library/Python/3.9/lib/python/site-packages (from llama-index-core<0.12.0,>=0.11.18->llama-index) (3.10.5)\n",
+      "Requirement already satisfied: dataclasses-json in /Users/work/Library/Python/3.9/lib/python/site-packages (from llama-index-core<0.12.0,>=0.11.18->llama-index) (0.6.7)\n",
+      "Requirement already satisfied: deprecated>=1.2.9.3 in /Users/work/Library/Python/3.9/lib/python/site-packages (from llama-index-core<0.12.0,>=0.11.18->llama-index) (1.2.14)\n",
+      "Requirement already satisfied: dirtyjson<2.0.0,>=1.0.8 in /Users/work/Library/Python/3.9/lib/python/site-packages (from llama-index-core<0.12.0,>=0.11.18->llama-index) (1.0.8)\n",
+      "Requirement already satisfied: fsspec>=2023.5.0 in /Users/work/Library/Python/3.9/lib/python/site-packages (from llama-index-core<0.12.0,>=0.11.18->llama-index) (2024.9.0)\n",
+      "Requirement already satisfied: httpx in /Users/work/Library/Python/3.9/lib/python/site-packages (from llama-index-core<0.12.0,>=0.11.18->llama-index) (0.27.2)\n",
+      "Requirement already satisfied: nest-asyncio<2.0.0,>=1.5.8 in /Users/work/Library/Python/3.9/lib/python/site-packages (from llama-index-core<0.12.0,>=0.11.18->llama-index) (1.6.0)\n",
+      "Requirement already satisfied: networkx>=3.0 in /Users/work/Library/Python/3.9/lib/python/site-packages (from llama-index-core<0.12.0,>=0.11.18->llama-index) (3.2.1)\n",
+      "Requirement already satisfied: numpy<2.0.0 in /Users/work/Library/Python/3.9/lib/python/site-packages (from llama-index-core<0.12.0,>=0.11.18->llama-index) (1.26.4)\n",
+      "Requirement already satisfied: pillow>=9.0.0 in /Users/work/Library/Python/3.9/lib/python/site-packages (from llama-index-core<0.12.0,>=0.11.18->llama-index) (11.0.0)\n",
+      "Requirement already satisfied: pydantic<3.0.0,>=2.7.0 in /Users/work/Library/Python/3.9/lib/python/site-packages (from llama-index-core<0.12.0,>=0.11.18->llama-index) (2.9.1)\n",
+      "Requirement already satisfied: requests>=2.31.0 in /Users/work/Library/Python/3.9/lib/python/site-packages (from llama-index-core<0.12.0,>=0.11.18->llama-index) (2.32.3)\n",
+      "Requirement already satisfied: tenacity!=8.4.0,<9.0.0,>=8.2.0 in /Users/work/Library/Python/3.9/lib/python/site-packages (from llama-index-core<0.12.0,>=0.11.18->llama-index) (8.5.0)\n",
+      "Requirement already satisfied: tiktoken>=0.3.3 in /Users/work/Library/Python/3.9/lib/python/site-packages (from llama-index-core<0.12.0,>=0.11.18->llama-index) (0.8.0)\n",
+      "Requirement already satisfied: tqdm<5.0.0,>=4.66.1 in /Users/work/Library/Python/3.9/lib/python/site-packages (from llama-index-core<0.12.0,>=0.11.18->llama-index) (4.66.5)\n",
+      "Requirement already satisfied: typing-extensions>=4.5.0 in /Users/work/Library/Python/3.9/lib/python/site-packages (from llama-index-core<0.12.0,>=0.11.18->llama-index) (4.12.2)\n",
+      "Requirement already satisfied: typing-inspect>=0.8.0 in /Users/work/Library/Python/3.9/lib/python/site-packages (from llama-index-core<0.12.0,>=0.11.18->llama-index) (0.9.0)\n",
+      "Requirement already satisfied: wrapt in /Users/work/Library/Python/3.9/lib/python/site-packages (from llama-index-core<0.12.0,>=0.11.18->llama-index) (1.16.0)\n",
+      "Requirement already satisfied: llama-cloud>=0.0.11 in /Users/work/Library/Python/3.9/lib/python/site-packages (from llama-index-indices-managed-llama-cloud>=0.3.0->llama-index) (0.1.4)\n",
+      "Requirement already satisfied: pandas in /Users/work/Library/Python/3.9/lib/python/site-packages (from llama-index-legacy<0.10.0,>=0.9.48->llama-index) (2.2.3)\n",
+      "Requirement already satisfied: beautifulsoup4<5.0.0,>=4.12.3 in /Users/work/Library/Python/3.9/lib/python/site-packages (from llama-index-readers-file<0.3.0,>=0.2.0->llama-index) (4.12.3)\n",
+      "Requirement already satisfied: pypdf<5.0.0,>=4.0.1 in /Users/work/Library/Python/3.9/lib/python/site-packages (from llama-index-readers-file<0.3.0,>=0.2.0->llama-index) (4.3.1)\n",
+      "Requirement already satisfied: striprtf<0.0.27,>=0.0.26 in /Users/work/Library/Python/3.9/lib/python/site-packages (from llama-index-readers-file<0.3.0,>=0.2.0->llama-index) (0.0.26)\n",
+      "Requirement already satisfied: llama-parse>=0.5.0 in /Users/work/Library/Python/3.9/lib/python/site-packages (from llama-index-readers-llama-parse>=0.3.0->llama-index) (0.5.10)\n",
+      "Requirement already satisfied: click in /Users/work/Library/Python/3.9/lib/python/site-packages (from nltk>3.8.1->llama-index) (8.1.7)\n",
+      "Requirement already satisfied: joblib in /Users/work/Library/Python/3.9/lib/python/site-packages (from nltk>3.8.1->llama-index) (1.4.2)\n",
+      "Requirement already satisfied: regex>=2021.8.3 in /Users/work/Library/Python/3.9/lib/python/site-packages (from nltk>3.8.1->llama-index) (2024.9.11)\n",
+      "Requirement already satisfied: aiohappyeyeballs>=2.3.0 in /Users/work/Library/Python/3.9/lib/python/site-packages (from aiohttp<4.0.0,>=3.8.6->llama-index-core<0.12.0,>=0.11.18->llama-index) (2.4.0)\n",
+      "Requirement already satisfied: aiosignal>=1.1.2 in /Users/work/Library/Python/3.9/lib/python/site-packages (from aiohttp<4.0.0,>=3.8.6->llama-index-core<0.12.0,>=0.11.18->llama-index) (1.3.1)\n",
+      "Requirement already satisfied: attrs>=17.3.0 in /Users/work/Library/Python/3.9/lib/python/site-packages (from aiohttp<4.0.0,>=3.8.6->llama-index-core<0.12.0,>=0.11.18->llama-index) (24.2.0)\n",
+      "Requirement already satisfied: frozenlist>=1.1.1 in /Users/work/Library/Python/3.9/lib/python/site-packages (from aiohttp<4.0.0,>=3.8.6->llama-index-core<0.12.0,>=0.11.18->llama-index) (1.4.1)\n",
+      "Requirement already satisfied: multidict<7.0,>=4.5 in /Users/work/Library/Python/3.9/lib/python/site-packages (from aiohttp<4.0.0,>=3.8.6->llama-index-core<0.12.0,>=0.11.18->llama-index) (6.1.0)\n",
+      "Requirement already satisfied: yarl<2.0,>=1.0 in /Users/work/Library/Python/3.9/lib/python/site-packages (from aiohttp<4.0.0,>=3.8.6->llama-index-core<0.12.0,>=0.11.18->llama-index) (1.11.1)\n",
+      "Requirement already satisfied: async-timeout<5.0,>=4.0 in /Users/work/Library/Python/3.9/lib/python/site-packages (from aiohttp<4.0.0,>=3.8.6->llama-index-core<0.12.0,>=0.11.18->llama-index) (4.0.3)\n",
+      "Requirement already satisfied: soupsieve>1.2 in /Users/work/Library/Python/3.9/lib/python/site-packages (from beautifulsoup4<5.0.0,>=4.12.3->llama-index-readers-file<0.3.0,>=0.2.0->llama-index) (2.6)\n",
+      "Requirement already satisfied: anyio in /Users/work/Library/Python/3.9/lib/python/site-packages (from httpx->llama-index-core<0.12.0,>=0.11.18->llama-index) (4.4.0)\n",
+      "Requirement already satisfied: certifi in /Users/work/Library/Python/3.9/lib/python/site-packages (from httpx->llama-index-core<0.12.0,>=0.11.18->llama-index) (2024.8.30)\n",
+      "Requirement already satisfied: httpcore==1.* in /Users/work/Library/Python/3.9/lib/python/site-packages (from httpx->llama-index-core<0.12.0,>=0.11.18->llama-index) (1.0.5)\n",
+      "Requirement already satisfied: idna in /Users/work/Library/Python/3.9/lib/python/site-packages (from httpx->llama-index-core<0.12.0,>=0.11.18->llama-index) (3.9)\n",
+      "Requirement already satisfied: sniffio in /Users/work/Library/Python/3.9/lib/python/site-packages (from httpx->llama-index-core<0.12.0,>=0.11.18->llama-index) (1.3.1)\n",
+      "Requirement already satisfied: h11<0.15,>=0.13 in /Users/work/Library/Python/3.9/lib/python/site-packages (from httpcore==1.*->httpx->llama-index-core<0.12.0,>=0.11.18->llama-index) (0.14.0)\n",
+      "Requirement already satisfied: distro<2,>=1.7.0 in /Users/work/Library/Python/3.9/lib/python/site-packages (from openai>=1.14.0->llama-index-agent-openai<0.4.0,>=0.3.4->llama-index) (1.9.0)\n",
+      "Requirement already satisfied: jiter<1,>=0.4.0 in /Users/work/Library/Python/3.9/lib/python/site-packages (from openai>=1.14.0->llama-index-agent-openai<0.4.0,>=0.3.4->llama-index) (0.6.1)\n",
+      "Requirement already satisfied: annotated-types>=0.6.0 in /Users/work/Library/Python/3.9/lib/python/site-packages (from pydantic<3.0.0,>=2.7.0->llama-index-core<0.12.0,>=0.11.18->llama-index) (0.7.0)\n",
+      "Requirement already satisfied: pydantic-core==2.23.3 in /Users/work/Library/Python/3.9/lib/python/site-packages (from pydantic<3.0.0,>=2.7.0->llama-index-core<0.12.0,>=0.11.18->llama-index) (2.23.3)\n",
+      "Requirement already satisfied: charset-normalizer<4,>=2 in /Users/work/Library/Python/3.9/lib/python/site-packages (from requests>=2.31.0->llama-index-core<0.12.0,>=0.11.18->llama-index) (3.3.2)\n",
+      "Requirement already satisfied: urllib3<3,>=1.21.1 in /Users/work/Library/Python/3.9/lib/python/site-packages (from requests>=2.31.0->llama-index-core<0.12.0,>=0.11.18->llama-index) (2.2.3)\n",
+      "Requirement already satisfied: greenlet!=0.4.17 in /Users/work/Library/Python/3.9/lib/python/site-packages (from SQLAlchemy[asyncio]>=1.4.49->llama-index-core<0.12.0,>=0.11.18->llama-index) (3.1.1)\n",
+      "Requirement already satisfied: mypy-extensions>=0.3.0 in /Users/work/Library/Python/3.9/lib/python/site-packages (from typing-inspect>=0.8.0->llama-index-core<0.12.0,>=0.11.18->llama-index) (1.0.0)\n",
+      "Requirement already satisfied: marshmallow<4.0.0,>=3.18.0 in /Users/work/Library/Python/3.9/lib/python/site-packages (from dataclasses-json->llama-index-core<0.12.0,>=0.11.18->llama-index) (3.23.0)\n",
+      "Requirement already satisfied: python-dateutil>=2.8.2 in /Users/work/Library/Python/3.9/lib/python/site-packages (from pandas->llama-index-legacy<0.10.0,>=0.9.48->llama-index) (2.9.0.post0)\n",
+      "Requirement already satisfied: pytz>=2020.1 in /Users/work/Library/Python/3.9/lib/python/site-packages (from pandas->llama-index-legacy<0.10.0,>=0.9.48->llama-index) (2024.2)\n",
+      "Requirement already satisfied: tzdata>=2022.7 in /Users/work/Library/Python/3.9/lib/python/site-packages (from pandas->llama-index-legacy<0.10.0,>=0.9.48->llama-index) (2024.2)\n",
+      "Requirement already satisfied: exceptiongroup>=1.0.2 in /Users/work/Library/Python/3.9/lib/python/site-packages (from anyio->httpx->llama-index-core<0.12.0,>=0.11.18->llama-index) (1.2.2)\n",
+      "Requirement already satisfied: packaging>=17.0 in /Users/work/Library/Python/3.9/lib/python/site-packages (from marshmallow<4.0.0,>=3.18.0->dataclasses-json->llama-index-core<0.12.0,>=0.11.18->llama-index) (24.1)\n",
+      "Requirement already satisfied: six>=1.5 in /Applications/Xcode.app/Contents/Developer/Library/Frameworks/Python3.framework/Versions/3.9/lib/python3.9/site-packages (from python-dateutil>=2.8.2->pandas->llama-index-legacy<0.10.0,>=0.9.48->llama-index) (1.15.0)\n",
+      "Note: you may need to restart the kernel to use updated packages.\n"
+     ]
+    }
+   ],
+   "source": [
+    "%pip install llama-index"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "b007403c-6b7a-420c-92f1-4171d05ed9bb",
+   "metadata": {},
+   "source": [
+    "## Basic Usage"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "f7f51114",
+   "metadata": {},
+   "source": [
+    "Head on to the [models](https://dashboard.pipeshift.com/models) section of pipeshift dashboard to see the list of available models."
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "8ead155e-b8bd-46f9-ab9b-28fc009361dd",
+   "metadata": {},
+   "source": [
+    "#### Call `complete` with a prompt"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "60be18ae-c957-4ac2-a58a-0652e18ee6d6",
+   "metadata": {},
+   "outputs": [
+    {
+     "ename": "ModuleNotFoundError",
+     "evalue": "No module named 'llama_index.llms.pipeshift'",
+     "output_type": "error",
+     "traceback": [
+      "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
+      "\u001b[0;31mModuleNotFoundError\u001b[0m                       Traceback (most recent call last)",
+      "Cell \u001b[0;32mIn[12], line 1\u001b[0m\n\u001b[0;32m----> 1\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01mllama_index\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mllms\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mpipeshift\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m Pipeshift\n\u001b[1;32m      3\u001b[0m llm \u001b[38;5;241m=\u001b[39m Pipeshift(\n\u001b[1;32m      4\u001b[0m             model\u001b[38;5;241m=\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mmistralai/Mistral-7B-Instruct-v0.3\u001b[39m\u001b[38;5;124m\"\u001b[39m, \n\u001b[1;32m      5\u001b[0m             \u001b[38;5;66;03m# api_key=\"YOUR_API_KEY\" # alternative way to pass api_key if not specified in environment variable\u001b[39;00m\n\u001b[1;32m      6\u001b[0m         )\n\u001b[1;32m      7\u001b[0m res \u001b[38;5;241m=\u001b[39m llm\u001b[38;5;241m.\u001b[39mcomplete(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124msupercars are \u001b[39m\u001b[38;5;124m\"\u001b[39m)\n",
+      "\u001b[0;31mModuleNotFoundError\u001b[0m: No module named 'llama_index.llms.pipeshift'"
+     ]
+    }
+   ],
+   "source": [
+    "from llama_index.llms.pipeshift import Pipeshift\n",
+    "\n",
+    "llm = Pipeshift(\n",
+    "    model=\"mistralai/Mistral-7B-Instruct-v0.3\",\n",
+    "    # api_key=\"YOUR_API_KEY\" # alternative way to pass api_key if not specified in environment variable\n",
+    ")\n",
+    "res = llm.complete(\"supercars are \")"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "ac2cbebb-a444-4a46-9d85-b265a3483d68",
+   "metadata": {},
+   "outputs": [
+    {
+     "ename": "NameError",
+     "evalue": "name 'res' is not defined",
+     "output_type": "error",
+     "traceback": [
+      "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
+      "\u001b[0;31mNameError\u001b[0m                                 Traceback (most recent call last)",
+      "Cell \u001b[0;32mIn[1], line 1\u001b[0m\n\u001b[0;32m----> 1\u001b[0m \u001b[38;5;28mprint\u001b[39m(\u001b[43mres\u001b[49m)\n",
+      "\u001b[0;31mNameError\u001b[0m: name 'res' is not defined"
+     ]
+    }
+   ],
+   "source": [
+    "print(res)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "14831268-f90f-499d-9d86-925dbc88292b",
+   "metadata": {},
+   "source": [
+    "#### Call `chat` with a list of messages"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "bbe29574-4af1-48d5-9739-f60652b6ce6c",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "from llama_index.core.llms import ChatMessage\n",
+    "from llama_index.llms.pipeshift import Pipeshift\n",
+    "\n",
+    "messages = [\n",
+    "    ChatMessage(\n",
+    "        role=\"system\", content=\"You are sales person at supercar showroom\"\n",
+    "    ),\n",
+    "    ChatMessage(role=\"user\", content=\"why should I pick porsche 911 gt3 rs\"),\n",
+    "]\n",
+    "res = Pipeshift(\n",
+    "    model=\"mistralai/Mistral-7B-Instruct-v0.3\", max_tokens=50\n",
+    ").chat(messages)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "9cbd550a-0264-4a11-9b2c-a08d8723a5ae",
+   "metadata": {},
+   "outputs": [
+    {
+     "ename": "NameError",
+     "evalue": "name 'resp' is not defined",
+     "output_type": "error",
+     "traceback": [
+      "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
+      "\u001b[0;31mNameError\u001b[0m                                 Traceback (most recent call last)",
+      "Cell \u001b[0;32mIn[2], line 1\u001b[0m\n\u001b[0;32m----> 1\u001b[0m \u001b[38;5;28mprint\u001b[39m(\u001b[43mresp\u001b[49m)\n",
+      "\u001b[0;31mNameError\u001b[0m: name 'resp' is not defined"
+     ]
+    }
+   ],
+   "source": [
+    "print(res)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "2ed5e894-4597-4911-a623-591560f72b82",
+   "metadata": {},
+   "source": [
+    "## Streaming"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "4cb7986f-aaed-42e2-abdd-f274f6d4fc59",
+   "metadata": {},
+   "source": [
+    "Using `stream_complete` endpoint"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "d43f17a2-0aeb-464b-a7a7-732ba5e8ef24",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "from llama_index.llms.pipeshift import Pipeshift\n",
+    "\n",
+    "llm = Pipeshift(model=\"mistralai/Mistral-7B-Instruct-v0.3\")\n",
+    "resp = llm.stream_complete(\"porsche GT3 RS is \")"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "0214e911-cf0d-489c-bc48-9bb1d8bf65d8",
+   "metadata": {},
+   "outputs": [
+    {
+     "ename": "NameError",
+     "evalue": "name 'resp' is not defined",
+     "output_type": "error",
+     "traceback": [
+      "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
+      "\u001b[0;31mNameError\u001b[0m                                 Traceback (most recent call last)",
+      "Cell \u001b[0;32mIn[10], line 1\u001b[0m\n\u001b[0;32m----> 1\u001b[0m \u001b[38;5;28;01mfor\u001b[39;00m r \u001b[38;5;129;01min\u001b[39;00m \u001b[43mresp\u001b[49m:\n\u001b[1;32m      2\u001b[0m     \u001b[38;5;28mprint\u001b[39m(r\u001b[38;5;241m.\u001b[39mdelta, end\u001b[38;5;241m=\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124m\"\u001b[39m)\n",
+      "\u001b[0;31mNameError\u001b[0m: name 'resp' is not defined"
+     ]
+    }
+   ],
+   "source": [
+    "for r in resp:\n",
+    "    print(r.delta, end=\"\")"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "40350dd8-3f50-4a2f-8545-5723942039bb",
+   "metadata": {},
+   "source": [
+    "Using `stream_chat` endpoint"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "bc636e65-a67b-4dcd-ac60-b25abc9d8dbd",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "from llama_index.llms.pipeshift import Pipeshift\n",
+    "from llama_index.core.llms import ChatMessage\n",
+    "\n",
+    "llm = Pipeshift(model=\"mistralai/Mistral-7B-Instruct-v0.3\")\n",
+    "messages = [\n",
+    "    ChatMessage(\n",
+    "        role=\"system\", content=\"You are sales person at supercar showroom\"\n",
+    "    ),\n",
+    "    ChatMessage(role=\"user\", content=\"how fast can porsche gt3 rs it go?\"),\n",
+    "]\n",
+    "resp = llm.stream_chat(messages)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "4475a6bc-1051-4287-abce-ba83324aeb9e",
+   "metadata": {},
+   "outputs": [
+    {
+     "ename": "NameError",
+     "evalue": "name 'resp' is not defined",
+     "output_type": "error",
+     "traceback": [
+      "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
+      "\u001b[0;31mNameError\u001b[0m                                 Traceback (most recent call last)",
+      "Cell \u001b[0;32mIn[9], line 1\u001b[0m\n\u001b[0;32m----> 1\u001b[0m \u001b[38;5;28;01mfor\u001b[39;00m r \u001b[38;5;129;01min\u001b[39;00m \u001b[43mresp\u001b[49m:\n\u001b[1;32m      2\u001b[0m     \u001b[38;5;28mprint\u001b[39m(r\u001b[38;5;241m.\u001b[39mdelta, end\u001b[38;5;241m=\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124m\"\u001b[39m)\n",
+      "\u001b[0;31mNameError\u001b[0m: name 'resp' is not defined"
+     ]
+    }
+   ],
+   "source": [
+    "for r in resp:\n",
+    "    print(r.delta, end=\"\")"
+   ]
+  }
+ ],
+ "metadata": {
+  "colab": {
+   "provenance": []
+  },
+  "kernelspec": {
+   "display_name": "Python 3 (ipykernel)",
+   "language": "python",
+   "name": "python3"
+  },
+  "language_info": {
+   "codemirror_mode": {
+    "name": "ipython",
+    "version": 3
+   },
+   "file_extension": ".py",
+   "mimetype": "text/x-python",
+   "name": "python",
+   "nbconvert_exporter": "python",
+   "pygments_lexer": "ipython3"
+  }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 5
+}
diff --git a/docs/docs/module_guides/models/llms/modules.md b/docs/docs/module_guides/models/llms/modules.md
index 6a95eac841..0b712beb23 100644
--- a/docs/docs/module_guides/models/llms/modules.md
+++ b/docs/docs/module_guides/models/llms/modules.md
@@ -40,6 +40,7 @@ We support integrations with OpenAI, Anthropic, Hugging Face, PaLM, and more.
 - [OpenRouter](../../../examples/llm/openrouter.ipynb)
 - [PaLM](../../../examples/llm/palm.ipynb)
 - [Perplexity](../../../examples/llm/perplexity.ipynb)
+- [Pipeshift](../../../examples/llm/pipeshift.ipynb)
 - [PremAI](../../../examples/llm/premai.ipynb)
 - [Portkey](../../../examples/llm/portkey.ipynb)
 - [Predibase](../../../examples/llm/predibase.ipynb)
diff --git a/docs/mkdocs.yml b/docs/mkdocs.yml
index fb2d1b4781..760298507b 100644
--- a/docs/mkdocs.yml
+++ b/docs/mkdocs.yml
@@ -369,6 +369,7 @@ nav:
           - ./examples/llm/paieas.ipynb
           - ./examples/llm/palm.ipynb
           - ./examples/llm/perplexity.ipynb
+          - ./examples/llm/pipeshift.ipynb
           - ./examples/llm/portkey.ipynb
           - ./examples/llm/predibase.ipynb
           - ./examples/llm/premai.ipynb
@@ -1023,6 +1024,7 @@ nav:
           - ./api_reference/llms/paieas.md
           - ./api_reference/llms/palm.md
           - ./api_reference/llms/perplexity.md
+          - ./api_reference/llms/pipeshift.md
           - ./api_reference/llms/portkey.md
           - ./api_reference/llms/predibase.md
           - ./api_reference/llms/premai.md
@@ -2277,6 +2279,7 @@ plugins:
             - ../llama-index-integrations/readers/llama-index-readers-document360
             - ../llama-index-integrations/llms/llama-index-llms-gaudi
             - ../llama-index-integrations/llms/llama-index-llms-zhipuai
+            - ../llama-index-integrations/llms/llama-index-llms-pipeshift
             - ../llama-index-integrations/vector_stores/llama-index-vector-stores-hnswlib
             - ../llama-index-integrations/vector_stores/llama-index-vector-stores-oceanbase
             - ../llama-index-integrations/embeddings/llama-index-embeddings-gaudi
@@ -2574,6 +2577,7 @@ plugins:
         ./examples/llm/openrouter.html: https://docs.llamaindex.ai/en/stable/examples/llm/openrouter/
         ./examples/llm/palm.html: https://docs.llamaindex.ai/en/stable/examples/llm/palm/
         ./examples/llm/perplexity.html: https://docs.llamaindex.ai/en/stable/examples/llm/perplexity/
+        ./examples/llm/pipeshift.html: https://docs.llamaindex.ai/en/stable/examples/llm/pipeshift/
         ./examples/llm/portkey.html: https://docs.llamaindex.ai/en/stable/examples/llm/portkey/
         ./examples/llm/predibase.html: https://docs.llamaindex.ai/en/stable/examples/llm/predibase/
         ./examples/llm/premai.html: https://docs.llamaindex.ai/en/stable/examples/llm/premai/
diff --git a/llama-index-cli/llama_index/cli/upgrade/mappings.json b/llama-index-cli/llama_index/cli/upgrade/mappings.json
index a6606901be..f0a4a477a8 100644
--- a/llama-index-cli/llama_index/cli/upgrade/mappings.json
+++ b/llama-index-cli/llama_index/cli/upgrade/mappings.json
@@ -1013,6 +1013,7 @@
   "SyncOpenAI": "llama_index.llms.openai",
   "AsyncOpenAI": "llama_index.llms.openai",
   "LMStudio": "llama_index.llms.lmstudio",
+  "Pipeshift": "llama_index.llms.pipeshift",
   "GradientBaseModelLLM": "llama_index.llms.gradient",
   "GradientModelAdapterLLM": "llama_index.llms.gradient",
   "EntityExtractor": "llama_index.extractors.entity",
diff --git a/llama-index-core/llama_index/core/command_line/mappings.json b/llama-index-core/llama_index/core/command_line/mappings.json
index 9b5c7be375..af50b9ce2d 100644
--- a/llama-index-core/llama_index/core/command_line/mappings.json
+++ b/llama-index-core/llama_index/core/command_line/mappings.json
@@ -1012,6 +1012,7 @@
   "SyncOpenAI": "llama_index.llms.openai",
   "AsyncOpenAI": "llama_index.llms.openai",
   "LMStudio": "llama_index.llms.lmstudio",
+  "Pipeshift": "llama_index.llms.pipeshift",
   "GradientBaseModelLLM": "llama_index.llms.gradient",
   "GradientModelAdapterLLM": "llama_index.llms.gradient",
   "EntityExtractor": "llama_index.extractors.entity",
diff --git a/llama-index-integrations/llms/llama-index-llms-pipeshift/.gitignore b/llama-index-integrations/llms/llama-index-llms-pipeshift/.gitignore
new file mode 100644
index 0000000000..990c18de22
--- /dev/null
+++ b/llama-index-integrations/llms/llama-index-llms-pipeshift/.gitignore
@@ -0,0 +1,153 @@
+llama_index/_static
+.DS_Store
+# Byte-compiled / optimized / DLL files
+__pycache__/
+*.py[cod]
+*$py.class
+
+# C extensions
+*.so
+
+# Distribution / packaging
+.Python
+bin/
+build/
+develop-eggs/
+dist/
+downloads/
+eggs/
+.eggs/
+etc/
+include/
+lib/
+lib64/
+parts/
+sdist/
+share/
+var/
+wheels/
+pip-wheel-metadata/
+share/python-wheels/
+*.egg-info/
+.installed.cfg
+*.egg
+MANIFEST
+
+# PyInstaller
+#  Usually these files are written by a python script from a template
+#  before PyInstaller builds the exe, so as to inject date/other infos into it.
+*.manifest
+*.spec
+
+# Installer logs
+pip-log.txt
+pip-delete-this-directory.txt
+
+# Unit test / coverage reports
+htmlcov/
+.tox/
+.nox/
+.coverage
+.coverage.*
+.cache
+nosetests.xml
+coverage.xml
+*.cover
+*.py,cover
+.hypothesis/
+.pytest_cache/
+.ruff_cache
+
+# Translations
+*.mo
+*.pot
+
+# Django stuff:
+*.log
+local_settings.py
+db.sqlite3
+db.sqlite3-journal
+
+# Flask stuff:
+instance/
+.webassets-cache
+
+# Scrapy stuff:
+.scrapy
+
+# Sphinx documentation
+docs/_build/
+
+# PyBuilder
+target/
+
+# Jupyter Notebook
+.ipynb_checkpoints
+notebooks/
+
+# IPython
+profile_default/
+ipython_config.py
+
+# pyenv
+.python-version
+
+# pipenv
+#   According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
+#   However, in case of collaboration, if having platform-specific dependencies or dependencies
+#   having no cross-platform support, pipenv may install dependencies that don't work, or not
+#   install all needed dependencies.
+#Pipfile.lock
+
+# PEP 582; used by e.g. github.com/David-OConnor/pyflow
+__pypackages__/
+
+# Celery stuff
+celerybeat-schedule
+celerybeat.pid
+
+# SageMath parsed files
+*.sage.py
+
+# Environments
+.env
+.venv
+env/
+venv/
+ENV/
+env.bak/
+venv.bak/
+pyvenv.cfg
+
+# Spyder project settings
+.spyderproject
+.spyproject
+
+# Rope project settings
+.ropeproject
+
+# mkdocs documentation
+/site
+
+# mypy
+.mypy_cache/
+.dmypy.json
+dmypy.json
+
+# Pyre type checker
+.pyre/
+
+# Jetbrains
+.idea
+modules/
+*.swp
+
+# VsCode
+.vscode
+
+# pipenv
+Pipfile
+Pipfile.lock
+
+# pyright
+pyrightconfig.json
diff --git a/llama-index-integrations/llms/llama-index-llms-pipeshift/BUILD b/llama-index-integrations/llms/llama-index-llms-pipeshift/BUILD
new file mode 100644
index 0000000000..0896ca890d
--- /dev/null
+++ b/llama-index-integrations/llms/llama-index-llms-pipeshift/BUILD
@@ -0,0 +1,3 @@
+poetry_requirements(
+    name="poetry",
+)
diff --git a/llama-index-integrations/llms/llama-index-llms-pipeshift/Makefile b/llama-index-integrations/llms/llama-index-llms-pipeshift/Makefile
new file mode 100644
index 0000000000..12aaa7764c
--- /dev/null
+++ b/llama-index-integrations/llms/llama-index-llms-pipeshift/Makefile
@@ -0,0 +1,18 @@
+GIT_ROOT ?= $(shell git rev-parse --show-toplevel)
+
+help:	## Show all Makefile targets.
+	@grep -E '^[a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[33m%-30s\033[0m %s\n", $$1, $$2}'
+
+format:	## Run code autoformatters (black).
+	pre-commit install
+	git ls-files | xargs pre-commit run black --files
+
+lint:	## Run linters: pre-commit (black, ruff, codespell) and mypy
+	pre-commit install && git ls-files | xargs pre-commit run --show-diff-on-failure --files
+
+TEST_FILE ?= tests
+test:	## Run tests via pytest.
+	poetry run pytest ${TEST_FILE}
+
+watch-docs:	## Build and watch documentation.
+	sphinx-autobuild docs/ docs/_build/html --open-browser --watch $(GIT_ROOT)/llama_index/
diff --git a/llama-index-integrations/llms/llama-index-llms-pipeshift/README.md b/llama-index-integrations/llms/llama-index-llms-pipeshift/README.md
new file mode 100644
index 0000000000..775425c3a4
--- /dev/null
+++ b/llama-index-integrations/llms/llama-index-llms-pipeshift/README.md
@@ -0,0 +1,112 @@
+# LlamaIndex Llms Integration: Pipeshift
+
+## Installation
+
+1. Install the required Python packages:
+
+   ```bash
+   %pip install llama-index-llms-pipeshift
+   %pip install llama-index
+   ```
+
+2. Set the PIPESHIFT_API_KEY as an environment variable or pass it directly to the class constructor.
+
+## Usage
+
+### Basic Completion
+
+To generate a simple completion, use the `complete` method:
+
+```python
+from llama_index.llms.pipeshift import Pipeshift
+
+llm = Pipeshift(
+    model="mistralai/Mistral-7B-Instruct-v0.3",
+    # api_key="YOUR_API_KEY" # alternative way to pass api_key if not specified in environment variable
+)
+res = llm.complete("supercars are ")
+print(res)
+```
+
+Example output:
+
+```
+Supercars are high-performance sports cars that are designed to deliver exceptional speed, power, and luxury. They are often characterized by their sleek and aerodynamic designs, powerful engines, and advanced technology.
+```
+
+### Basic Chat
+
+To simulate a chat with multiple messages:
+
+```python
+from llama_index.core.llms import ChatMessage
+from llama_index.llms.pipeshift import Pipeshift
+
+messages = [
+    ChatMessage(
+        role="system", content="You are sales person at supercar showroom"
+    ),
+    ChatMessage(role="user", content="why should I pick porsche 911 gt3 rs"),
+]
+res = Pipeshift(
+    model="mistralai/Mistral-7B-Instruct-v0.3", max_tokens=50
+).chat(messages)
+print(res)
+```
+
+Example output:
+
+```
+assistant: 1. Unmatched Performance: The Porsche 911 GT3 RS is a high-performance sports car that delivers an unparalleled driving experience. It boasts a powerful 4.0-liter flat
+```
+
+### Streaming Completion
+
+To stream a response in real-time using `stream_complete`:
+
+```python
+from llama_index.llms.pipeshift import Pipeshift
+
+llm = Pipeshift(model="mistralai/Mistral-7B-Instruct-v0.3")
+resp = llm.stream_complete("porsche GT3 RS is ")
+
+for r in resp:
+    print(r.delta, end="")
+```
+
+Example output (partial):
+
+```
+ The Porsche 911 GT3 RS is a high-performance sports car produced by Porsche AG. It is part of the 911 (991 and 992 generations) series and is%
+```
+
+### Streaming Chat
+
+For a streamed conversation, use `stream_chat`:
+
+```python
+from llama_index.llms.pipeshift import Pipeshift
+from llama_index.core.llms import ChatMessage
+
+llm = Pipeshift(model="mistralai/Mistral-7B-Instruct-v0.3")
+messages = [
+    ChatMessage(
+        role="system", content="You are sales person at supercar showroom"
+    ),
+    ChatMessage(role="user", content="how fast can porsche gt3 rs it go?"),
+]
+resp = llm.stream_chat(messages)
+
+for r in resp:
+    print(r.delta, end="")
+```
+
+Example output (partial):
+
+```
+The Porsche 911 GT3 RS is an incredible piece of engineering. This high-performance sports car can reach a top speed of approximately 193 mph (310 km/h) according to P%
+```
+
+### LLM Implementation example
+
+https://docs.llamaindex.ai/en/stable/examples/llm/pipeshift/
diff --git a/llama-index-integrations/llms/llama-index-llms-pipeshift/llama_index/llms/pipeshift/BUILD b/llama-index-integrations/llms/llama-index-llms-pipeshift/llama_index/llms/pipeshift/BUILD
new file mode 100644
index 0000000000..db46e8d6c9
--- /dev/null
+++ b/llama-index-integrations/llms/llama-index-llms-pipeshift/llama_index/llms/pipeshift/BUILD
@@ -0,0 +1 @@
+python_sources()
diff --git a/llama-index-integrations/llms/llama-index-llms-pipeshift/llama_index/llms/pipeshift/__init__.py b/llama-index-integrations/llms/llama-index-llms-pipeshift/llama_index/llms/pipeshift/__init__.py
new file mode 100644
index 0000000000..63aee3c5c3
--- /dev/null
+++ b/llama-index-integrations/llms/llama-index-llms-pipeshift/llama_index/llms/pipeshift/__init__.py
@@ -0,0 +1,4 @@
+from llama_index.llms.pipeshift.base import Pipeshift
+
+
+__all__ = ["Pipeshift"]
diff --git a/llama-index-integrations/llms/llama-index-llms-pipeshift/llama_index/llms/pipeshift/base.py b/llama-index-integrations/llms/llama-index-llms-pipeshift/llama_index/llms/pipeshift/base.py
new file mode 100644
index 0000000000..121ed9fefa
--- /dev/null
+++ b/llama-index-integrations/llms/llama-index-llms-pipeshift/llama_index/llms/pipeshift/base.py
@@ -0,0 +1,77 @@
+import os
+from typing import Any, Optional
+
+from llama_index.llms.openai_like import OpenAILike
+
+DEFAULT_API_BASE = "https://api.pipeshift.com/api/v0"
+
+
+def validate_api_key_and_model(api_key: str, model: str) -> None:
+    """
+    Validate the API key and model name.
+
+    Args:
+        api_key (str): The API key to validate.
+        model (str): The model name to validate.
+
+    Raises:
+        ValueError: If the API key or model name is invalid.
+    """
+    if not api_key:
+        raise ValueError("Pipeshift API Key not found!")
+    elif not isinstance(api_key, str) or len(api_key.strip()) == 0:
+        raise ValueError("Invalid API key: API key must be a non-empty string.")
+
+    if not model:
+        raise ValueError("Model not specified. PLease enter model name")
+    if not isinstance(model, str) or len(model.strip()) == 0:
+        raise ValueError("Invalid model name: Model name must be a non-empty string.")
+
+
+class Pipeshift(OpenAILike):
+    """Pipeshift LLM.
+
+    Examples:
+        `pip install llama-index-llms-pipeshift`
+
+        ```python
+        from llama_index.llms.pipeshift import Pipeshift
+
+        # set api key in env or in llm
+        # import os
+        # os.environ["PIPESHIFT_API_KEY"] = "your api key"
+
+        llm = Pipeshift(
+            model="mistralai/Mistral-7B-Instruct-v0.3", api_key="your_api_key"
+        )
+
+        resp = llm.complete("How fast is porsche gt3 rs?")
+        print(resp)
+        ```
+    """
+
+    def __init__(
+        self,
+        model: str,
+        api_key: Optional[str] = None,
+        api_base: str = DEFAULT_API_BASE,
+        is_chat_model: bool = True,
+        **kwargs: Any,
+    ) -> None:
+        api_key = api_key or os.environ.get("PIPESHIFT_API_KEY", None)
+        try:
+            validate_api_key_and_model(api_key, model)
+            super().__init__(
+                model=model,
+                api_key=api_key,
+                api_base=api_base,
+                is_chat_model=is_chat_model,
+                **kwargs,
+            )
+        except ValueError as e:
+            raise ValueError(e)
+
+    @classmethod
+    def class_name(cls) -> str:
+        """Get class name."""
+        return "Pipeshift"
diff --git a/llama-index-integrations/llms/llama-index-llms-pipeshift/pyproject.toml b/llama-index-integrations/llms/llama-index-llms-pipeshift/pyproject.toml
new file mode 100644
index 0000000000..a01f292d23
--- /dev/null
+++ b/llama-index-integrations/llms/llama-index-llms-pipeshift/pyproject.toml
@@ -0,0 +1,57 @@
+[build-system]
+build-backend = "poetry.core.masonry.api"
+requires = ["poetry-core"]
+
+[tool.codespell]
+check-filenames = true
+check-hidden = true
+# Feel free to un-skip examples, and experimental, you will just need to
+# work through many typos (--write-changes and --interactive will help)
+skip = "*.csv,*.html,*.json,*.jsonl,*.pdf,*.txt,*.ipynb"
+
+[tool.llamahub]
+contains_example = false
+import_path = "llama_index.llms.pipeshift"
+
+[tool.llamahub.class_authors]
+Pipeshift = "pipeshift"
+
+[tool.mypy]
+disallow_untyped_defs = true
+# Remove venv skip when integrated with pre-commit
+exclude = ["_static", "build", "examples", "notebooks", "venv"]
+ignore_missing_imports = true
+python_version = "3.8"
+
+[tool.poetry]
+authors = ["Pipeshift"]
+description = "llama-index llms pipeshift integration"
+license = "MIT"
+name = "llama-index-llms-pipeshift"
+packages = [{include = "llama_index/"}]
+readme = "README.md"
+version = "0.1.0"
+
+[tool.poetry.dependencies]
+python = ">=3.8.1,<4.0"
+llama-index-core = "^0.11.0"
+llama-index-llms-openai-like = "^0.2.0"
+
+[tool.poetry.group.dev.dependencies]
+black = {extras = ["jupyter"], version = "<=23.9.1,>=23.7.0"}
+codespell = {extras = ["toml"], version = ">=v2.2.6"}
+ipython = "8.10.0"
+jupyter = "^1.0.0"
+mypy = "0.991"
+pre-commit = "3.2.0"
+pylint = "2.15.10"
+pytest = "7.2.1"
+pytest-mock = "3.11.1"
+ruff = "0.0.292"
+tree-sitter-languages = "^1.8.0"
+types-Deprecated = ">=0.1.0"
+types-PyYAML = "^6.0.12.12"
+types-protobuf = "^4.24.0.4"
+types-redis = "4.5.5.0"
+types-requests = "2.28.11.8"  # TODO: unpin when mypy>0.991
+types-setuptools = "67.1.0.0"
diff --git a/llama-index-integrations/llms/llama-index-llms-pipeshift/tests/BUILD b/llama-index-integrations/llms/llama-index-llms-pipeshift/tests/BUILD
new file mode 100644
index 0000000000..dabf212d7e
--- /dev/null
+++ b/llama-index-integrations/llms/llama-index-llms-pipeshift/tests/BUILD
@@ -0,0 +1 @@
+python_tests()
diff --git a/llama-index-integrations/llms/llama-index-llms-pipeshift/tests/__init__.py b/llama-index-integrations/llms/llama-index-llms-pipeshift/tests/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/llama-index-integrations/llms/llama-index-llms-pipeshift/tests/test_llms_pipeshift.py b/llama-index-integrations/llms/llama-index-llms-pipeshift/tests/test_llms_pipeshift.py
new file mode 100644
index 0000000000..1fe0fad392
--- /dev/null
+++ b/llama-index-integrations/llms/llama-index-llms-pipeshift/tests/test_llms_pipeshift.py
@@ -0,0 +1,7 @@
+from llama_index.core.base.llms.base import BaseLLM
+from llama_index.llms.pipeshift import Pipeshift
+
+
+def test_embedding_class():
+    names_of_base_classes = [b.__name__ for b in Pipeshift.__mro__]
+    assert BaseLLM.__name__ in names_of_base_classes
-- 
GitLab