diff --git a/llama-index-packs/llama-index-packs-raft-dataset/examples/raft_dataset.ipynb b/llama-index-packs/llama-index-packs-raft-dataset/examples/raft_dataset.ipynb index e4caa75c0d81f9f57dfb0e62c8e13e10fbbce1b6..a78e928f53c500e839c2c7e0f2c9b38ce1fe5769 100644 --- a/llama-index-packs/llama-index-packs-raft-dataset/examples/raft_dataset.ipynb +++ b/llama-index-packs/llama-index-packs-raft-dataset/examples/raft_dataset.ipynb @@ -11,7 +11,11 @@ "\n", "This LlamaPack implements RAFT: Adapting Language Model to Domain Specific RAG [paper](https://arxiv.org/abs/2403.10131)\n", "\n", - "Retrieval Augmented FineTuning (RAFT) is a training recipe introduced in this paper that aims to improve the performance of large language models (LLMs) in open-book, in-domain question-answering tasks. Given a question and a set of retrieved documents, RAFT trains the LLM to identify and cite verbatim the most relevant sequences from the documents that help answer the question, while ignoring irrelevant or distracting information. By explicitly training the model to distinguish between relevant and irrelevant information and to provide evidence from the relevant documents, RAFT encourages the LLM to develop better reasoning and explanation abilities, ultimately improving its ability to answer questions accurately and rationally in scenarios where additional context or knowledge is available." + "Retrieval Augmented FineTuning (RAFT) is a training recipe introduced in this paper that aims to improve the performance of large language models (LLMs) in open-book, in-domain question-answering tasks. Given a question and a set of retrieved documents, RAFT trains the LLM to identify and cite verbatim the most relevant sequences from the documents that help answer the question, while ignoring irrelevant or distracting information. By explicitly training the model to distinguish between relevant and irrelevant information and to provide evidence from the relevant documents, RAFT encourages the LLM to develop better reasoning and explanation abilities, ultimately improving its ability to answer questions accurately and rationally in scenarios where additional context or knowledge is available.\n", + "\n", + "A key component of RAFT is how the dataset is generated for fine-tuning. Each QA pair also includes an \"oracle\" document from which the answer to the question can be deduced as well as \"distractor\" documents which are irrelevant. During training this forces the model to learn which information is relevant/irrelevant and also memorize domain knowledge.\n", + "\n", + "In this notebook we will create `RAFT Dataset` using `RAFTDatasetPack` LlamaPack." ] }, { @@ -28,7 +32,8 @@ "metadata": {}, "outputs": [], "source": [ - "!pip install llama-index" + "!pip install llama-index\n", + "!pip install llama-index-packs-raft-dataset" ] }, { @@ -39,7 +44,7 @@ "source": [ "import os\n", "\n", - "os.environ[\"OPENAI_API_KEY\"] = \"sk-\"" + "os.environ[\"OPENAI_API_KEY\"] = \"<YOUR OPENAI API KEY>\"" ] }, { diff --git a/llama-index-packs/llama-index-packs-raft-dataset/pyproject.toml b/llama-index-packs/llama-index-packs-raft-dataset/pyproject.toml index 95c7f66f9e8ec3ee53b979f5f3218da1ccd96a6b..6adc63327766f9f187585d27f9e4b69dd143241f 100644 --- a/llama-index-packs/llama-index-packs-raft-dataset/pyproject.toml +++ b/llama-index-packs/llama-index-packs-raft-dataset/pyproject.toml @@ -29,7 +29,7 @@ license = "MIT" maintainers = ["ravi-theja"] name = "llama-index-packs-raft-dataset" readme = "README.md" -version = "0.1.1" +version = "0.1.2" [tool.poetry.dependencies] python = ">=3.8.1,<4.0"