diff --git a/llama-demo-apps/HelloLlamaLocal.ipynb b/llama-demo-apps/HelloLlamaLocal.ipynb
new file mode 100644
index 0000000000000000000000000000000000000000..d7136a2f75ceb142f3b8fd8bd5be465b7e785f16
--- /dev/null
+++ b/llama-demo-apps/HelloLlamaLocal.ipynb
@@ -0,0 +1,879 @@
+{
+ "cells": [
+  {
+   "cell_type": "markdown",
+   "id": "b7cabb96-2715-492e-825a-8f4ff161dc3b",
+   "metadata": {},
+   "source": [
+    "## This demo app shows:\n",
+    "* how to run Llama2 locally on a Mac using llama-cpp-python and the llama-cpp's quantized Llama2 model;\n",
+    "* how to use LangChain to ask Llama general questions;\n",
+    "* how to use LangChain to load a recent PDF doc - the Llama2 paper pdf - and ask questions about it. This is the well known RAG (Retrieval Augmented Generation) method to let LLM such as Llama2 be able to answer questions about the data not publicly available when Llama2 was trained, or about your own data. RAG is one way to prevent LLM's hallucination. "
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 1,
+   "id": "2922732e-29e8-4ea7-8828-53364f5bf6fd",
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "Requirement already satisfied: llama-cpp-python in /Users/jeffxtang/anaconda3/envs/llama-demo-apps/lib/python3.8/site-packages (0.2.11)\n",
+      "Requirement already satisfied: typing-extensions>=4.5.0 in /Users/jeffxtang/anaconda3/envs/llama-demo-apps/lib/python3.8/site-packages (from llama-cpp-python) (4.8.0)\n",
+      "Requirement already satisfied: numpy>=1.20.0 in /Users/jeffxtang/anaconda3/envs/llama-demo-apps/lib/python3.8/site-packages (from llama-cpp-python) (1.24.4)\n",
+      "Requirement already satisfied: diskcache>=5.6.1 in /Users/jeffxtang/anaconda3/envs/llama-demo-apps/lib/python3.8/site-packages (from llama-cpp-python) (5.6.3)\n",
+      "Requirement already satisfied: pypdf in /Users/jeffxtang/anaconda3/envs/llama-demo-apps/lib/python3.8/site-packages (3.16.2)\n",
+      "Requirement already satisfied: sentence-transformers in /Users/jeffxtang/anaconda3/envs/llama-demo-apps/lib/python3.8/site-packages (2.2.2)\n",
+      "Requirement already satisfied: chromadb in /Users/jeffxtang/anaconda3/envs/llama-demo-apps/lib/python3.8/site-packages (0.4.13)\n",
+      "Requirement already satisfied: langchain in /Users/jeffxtang/anaconda3/envs/llama-demo-apps/lib/python3.8/site-packages (0.0.308)\n",
+      "Requirement already satisfied: typing_extensions>=3.7.4.3 in /Users/jeffxtang/anaconda3/envs/llama-demo-apps/lib/python3.8/site-packages (from pypdf) (4.8.0)\n",
+      "Requirement already satisfied: transformers<5.0.0,>=4.6.0 in /Users/jeffxtang/anaconda3/envs/llama-demo-apps/lib/python3.8/site-packages (from sentence-transformers) (4.34.0)\n",
+      "Requirement already satisfied: tqdm in /Users/jeffxtang/anaconda3/envs/llama-demo-apps/lib/python3.8/site-packages (from sentence-transformers) (4.66.1)\n",
+      "Requirement already satisfied: torch>=1.6.0 in /Users/jeffxtang/anaconda3/envs/llama-demo-apps/lib/python3.8/site-packages (from sentence-transformers) (2.1.0)\n",
+      "Requirement already satisfied: torchvision in /Users/jeffxtang/anaconda3/envs/llama-demo-apps/lib/python3.8/site-packages (from sentence-transformers) (0.16.0)\n",
+      "Requirement already satisfied: numpy in /Users/jeffxtang/anaconda3/envs/llama-demo-apps/lib/python3.8/site-packages (from sentence-transformers) (1.24.4)\n",
+      "Requirement already satisfied: scikit-learn in /Users/jeffxtang/anaconda3/envs/llama-demo-apps/lib/python3.8/site-packages (from sentence-transformers) (1.3.1)\n",
+      "Requirement already satisfied: scipy in /Users/jeffxtang/anaconda3/envs/llama-demo-apps/lib/python3.8/site-packages (from sentence-transformers) (1.10.1)\n",
+      "Requirement already satisfied: nltk in /Users/jeffxtang/anaconda3/envs/llama-demo-apps/lib/python3.8/site-packages (from sentence-transformers) (3.8.1)\n",
+      "Requirement already satisfied: sentencepiece in /Users/jeffxtang/anaconda3/envs/llama-demo-apps/lib/python3.8/site-packages (from sentence-transformers) (0.1.99)\n",
+      "Requirement already satisfied: huggingface-hub>=0.4.0 in /Users/jeffxtang/anaconda3/envs/llama-demo-apps/lib/python3.8/site-packages (from sentence-transformers) (0.16.4)\n",
+      "Requirement already satisfied: requests>=2.28 in /Users/jeffxtang/anaconda3/envs/llama-demo-apps/lib/python3.8/site-packages (from chromadb) (2.31.0)\n",
+      "Requirement already satisfied: pydantic>=1.9 in /Users/jeffxtang/anaconda3/envs/llama-demo-apps/lib/python3.8/site-packages (from chromadb) (2.4.2)\n",
+      "Requirement already satisfied: chroma-hnswlib==0.7.3 in /Users/jeffxtang/anaconda3/envs/llama-demo-apps/lib/python3.8/site-packages (from chromadb) (0.7.3)\n",
+      "Requirement already satisfied: fastapi>=0.95.2 in /Users/jeffxtang/anaconda3/envs/llama-demo-apps/lib/python3.8/site-packages (from chromadb) (0.103.2)\n",
+      "Requirement already satisfied: uvicorn[standard]>=0.18.3 in /Users/jeffxtang/anaconda3/envs/llama-demo-apps/lib/python3.8/site-packages (from chromadb) (0.23.2)\n",
+      "Requirement already satisfied: posthog>=2.4.0 in /Users/jeffxtang/anaconda3/envs/llama-demo-apps/lib/python3.8/site-packages (from chromadb) (3.0.2)\n",
+      "Requirement already satisfied: pulsar-client>=3.1.0 in /Users/jeffxtang/anaconda3/envs/llama-demo-apps/lib/python3.8/site-packages (from chromadb) (3.3.0)\n",
+      "Requirement already satisfied: onnxruntime>=1.14.1 in /Users/jeffxtang/anaconda3/envs/llama-demo-apps/lib/python3.8/site-packages (from chromadb) (1.16.0)\n",
+      "Requirement already satisfied: tokenizers>=0.13.2 in /Users/jeffxtang/anaconda3/envs/llama-demo-apps/lib/python3.8/site-packages (from chromadb) (0.14.0)\n",
+      "Requirement already satisfied: pypika>=0.48.9 in /Users/jeffxtang/anaconda3/envs/llama-demo-apps/lib/python3.8/site-packages (from chromadb) (0.48.9)\n",
+      "Requirement already satisfied: overrides>=7.3.1 in /Users/jeffxtang/anaconda3/envs/llama-demo-apps/lib/python3.8/site-packages (from chromadb) (7.4.0)\n",
+      "Requirement already satisfied: importlib-resources in /Users/jeffxtang/anaconda3/envs/llama-demo-apps/lib/python3.8/site-packages (from chromadb) (6.1.0)\n",
+      "Requirement already satisfied: bcrypt>=4.0.1 in /Users/jeffxtang/anaconda3/envs/llama-demo-apps/lib/python3.8/site-packages (from chromadb) (4.0.1)\n",
+      "Requirement already satisfied: typer>=0.9.0 in /Users/jeffxtang/anaconda3/envs/llama-demo-apps/lib/python3.8/site-packages (from chromadb) (0.9.0)\n",
+      "Requirement already satisfied: graphlib-backport>=1.0.3 in /Users/jeffxtang/anaconda3/envs/llama-demo-apps/lib/python3.8/site-packages (from chromadb) (1.0.3)\n",
+      "Requirement already satisfied: PyYAML>=5.3 in /Users/jeffxtang/anaconda3/envs/llama-demo-apps/lib/python3.8/site-packages (from langchain) (6.0.1)\n",
+      "Requirement already satisfied: SQLAlchemy<3,>=1.4 in /Users/jeffxtang/anaconda3/envs/llama-demo-apps/lib/python3.8/site-packages (from langchain) (2.0.21)\n",
+      "Requirement already satisfied: aiohttp<4.0.0,>=3.8.3 in /Users/jeffxtang/anaconda3/envs/llama-demo-apps/lib/python3.8/site-packages (from langchain) (3.8.5)\n",
+      "Requirement already satisfied: anyio<4.0 in /Users/jeffxtang/anaconda3/envs/llama-demo-apps/lib/python3.8/site-packages (from langchain) (3.7.1)\n",
+      "Requirement already satisfied: async-timeout<5.0.0,>=4.0.0 in /Users/jeffxtang/anaconda3/envs/llama-demo-apps/lib/python3.8/site-packages (from langchain) (4.0.3)\n",
+      "Requirement already satisfied: dataclasses-json<0.7,>=0.5.7 in /Users/jeffxtang/anaconda3/envs/llama-demo-apps/lib/python3.8/site-packages (from langchain) (0.6.1)\n",
+      "Requirement already satisfied: jsonpatch<2.0,>=1.33 in /Users/jeffxtang/anaconda3/envs/llama-demo-apps/lib/python3.8/site-packages (from langchain) (1.33)\n",
+      "Requirement already satisfied: langsmith<0.1.0,>=0.0.40 in /Users/jeffxtang/anaconda3/envs/llama-demo-apps/lib/python3.8/site-packages (from langchain) (0.0.41)\n",
+      "Requirement already satisfied: tenacity<9.0.0,>=8.1.0 in /Users/jeffxtang/anaconda3/envs/llama-demo-apps/lib/python3.8/site-packages (from langchain) (8.2.3)\n",
+      "Requirement already satisfied: attrs>=17.3.0 in /Users/jeffxtang/anaconda3/envs/llama-demo-apps/lib/python3.8/site-packages (from aiohttp<4.0.0,>=3.8.3->langchain) (23.1.0)\n",
+      "Requirement already satisfied: charset-normalizer<4.0,>=2.0 in /Users/jeffxtang/anaconda3/envs/llama-demo-apps/lib/python3.8/site-packages (from aiohttp<4.0.0,>=3.8.3->langchain) (3.3.0)\n",
+      "Requirement already satisfied: multidict<7.0,>=4.5 in /Users/jeffxtang/anaconda3/envs/llama-demo-apps/lib/python3.8/site-packages (from aiohttp<4.0.0,>=3.8.3->langchain) (6.0.4)\n",
+      "Requirement already satisfied: yarl<2.0,>=1.0 in /Users/jeffxtang/anaconda3/envs/llama-demo-apps/lib/python3.8/site-packages (from aiohttp<4.0.0,>=3.8.3->langchain) (1.9.2)\n",
+      "Requirement already satisfied: frozenlist>=1.1.1 in /Users/jeffxtang/anaconda3/envs/llama-demo-apps/lib/python3.8/site-packages (from aiohttp<4.0.0,>=3.8.3->langchain) (1.4.0)\n",
+      "Requirement already satisfied: aiosignal>=1.1.2 in /Users/jeffxtang/anaconda3/envs/llama-demo-apps/lib/python3.8/site-packages (from aiohttp<4.0.0,>=3.8.3->langchain) (1.3.1)\n",
+      "Requirement already satisfied: idna>=2.8 in /Users/jeffxtang/anaconda3/envs/llama-demo-apps/lib/python3.8/site-packages (from anyio<4.0->langchain) (3.4)\n",
+      "Requirement already satisfied: sniffio>=1.1 in /Users/jeffxtang/anaconda3/envs/llama-demo-apps/lib/python3.8/site-packages (from anyio<4.0->langchain) (1.3.0)\n",
+      "Requirement already satisfied: exceptiongroup in /Users/jeffxtang/anaconda3/envs/llama-demo-apps/lib/python3.8/site-packages (from anyio<4.0->langchain) (1.1.3)\n",
+      "Requirement already satisfied: marshmallow<4.0.0,>=3.18.0 in /Users/jeffxtang/anaconda3/envs/llama-demo-apps/lib/python3.8/site-packages (from dataclasses-json<0.7,>=0.5.7->langchain) (3.20.1)\n",
+      "Requirement already satisfied: typing-inspect<1,>=0.4.0 in /Users/jeffxtang/anaconda3/envs/llama-demo-apps/lib/python3.8/site-packages (from dataclasses-json<0.7,>=0.5.7->langchain) (0.9.0)\n",
+      "Requirement already satisfied: starlette<0.28.0,>=0.27.0 in /Users/jeffxtang/anaconda3/envs/llama-demo-apps/lib/python3.8/site-packages (from fastapi>=0.95.2->chromadb) (0.27.0)\n",
+      "Requirement already satisfied: filelock in /Users/jeffxtang/anaconda3/envs/llama-demo-apps/lib/python3.8/site-packages (from huggingface-hub>=0.4.0->sentence-transformers) (3.12.4)\n",
+      "Requirement already satisfied: fsspec in /Users/jeffxtang/anaconda3/envs/llama-demo-apps/lib/python3.8/site-packages (from huggingface-hub>=0.4.0->sentence-transformers) (2023.9.2)\n",
+      "Requirement already satisfied: packaging>=20.9 in /Users/jeffxtang/anaconda3/envs/llama-demo-apps/lib/python3.8/site-packages (from huggingface-hub>=0.4.0->sentence-transformers) (23.2)\n",
+      "Requirement already satisfied: jsonpointer>=1.9 in /Users/jeffxtang/anaconda3/envs/llama-demo-apps/lib/python3.8/site-packages (from jsonpatch<2.0,>=1.33->langchain) (2.4)\n",
+      "Requirement already satisfied: coloredlogs in /Users/jeffxtang/anaconda3/envs/llama-demo-apps/lib/python3.8/site-packages (from onnxruntime>=1.14.1->chromadb) (15.0.1)\n",
+      "Requirement already satisfied: flatbuffers in /Users/jeffxtang/anaconda3/envs/llama-demo-apps/lib/python3.8/site-packages (from onnxruntime>=1.14.1->chromadb) (23.5.26)\n",
+      "Requirement already satisfied: protobuf in /Users/jeffxtang/anaconda3/envs/llama-demo-apps/lib/python3.8/site-packages (from onnxruntime>=1.14.1->chromadb) (4.24.4)\n",
+      "Requirement already satisfied: sympy in /Users/jeffxtang/anaconda3/envs/llama-demo-apps/lib/python3.8/site-packages (from onnxruntime>=1.14.1->chromadb) (1.12)\n",
+      "Requirement already satisfied: six>=1.5 in /Users/jeffxtang/anaconda3/envs/llama-demo-apps/lib/python3.8/site-packages (from posthog>=2.4.0->chromadb) (1.16.0)\n",
+      "Requirement already satisfied: monotonic>=1.5 in /Users/jeffxtang/anaconda3/envs/llama-demo-apps/lib/python3.8/site-packages (from posthog>=2.4.0->chromadb) (1.6)\n",
+      "Requirement already satisfied: backoff>=1.10.0 in /Users/jeffxtang/anaconda3/envs/llama-demo-apps/lib/python3.8/site-packages (from posthog>=2.4.0->chromadb) (2.2.1)\n",
+      "Requirement already satisfied: python-dateutil>2.1 in /Users/jeffxtang/anaconda3/envs/llama-demo-apps/lib/python3.8/site-packages (from posthog>=2.4.0->chromadb) (2.8.2)\n",
+      "Requirement already satisfied: certifi in /Users/jeffxtang/anaconda3/envs/llama-demo-apps/lib/python3.8/site-packages (from pulsar-client>=3.1.0->chromadb) (2023.7.22)\n",
+      "Requirement already satisfied: annotated-types>=0.4.0 in /Users/jeffxtang/anaconda3/envs/llama-demo-apps/lib/python3.8/site-packages (from pydantic>=1.9->chromadb) (0.5.0)\n",
+      "Requirement already satisfied: pydantic-core==2.10.1 in /Users/jeffxtang/anaconda3/envs/llama-demo-apps/lib/python3.8/site-packages (from pydantic>=1.9->chromadb) (2.10.1)\n",
+      "Requirement already satisfied: urllib3<3,>=1.21.1 in /Users/jeffxtang/anaconda3/envs/llama-demo-apps/lib/python3.8/site-packages (from requests>=2.28->chromadb) (2.0.6)\n",
+      "Requirement already satisfied: networkx in /Users/jeffxtang/anaconda3/envs/llama-demo-apps/lib/python3.8/site-packages (from torch>=1.6.0->sentence-transformers) (3.1)\n",
+      "Requirement already satisfied: jinja2 in /Users/jeffxtang/anaconda3/envs/llama-demo-apps/lib/python3.8/site-packages (from torch>=1.6.0->sentence-transformers) (3.1.2)\n",
+      "Requirement already satisfied: regex!=2019.12.17 in /Users/jeffxtang/anaconda3/envs/llama-demo-apps/lib/python3.8/site-packages (from transformers<5.0.0,>=4.6.0->sentence-transformers) (2023.10.3)\n",
+      "Requirement already satisfied: safetensors>=0.3.1 in /Users/jeffxtang/anaconda3/envs/llama-demo-apps/lib/python3.8/site-packages (from transformers<5.0.0,>=4.6.0->sentence-transformers) (0.3.3)\n",
+      "Requirement already satisfied: click<9.0.0,>=7.1.1 in /Users/jeffxtang/anaconda3/envs/llama-demo-apps/lib/python3.8/site-packages (from typer>=0.9.0->chromadb) (8.1.7)\n",
+      "Requirement already satisfied: h11>=0.8 in /Users/jeffxtang/anaconda3/envs/llama-demo-apps/lib/python3.8/site-packages (from uvicorn[standard]>=0.18.3->chromadb) (0.14.0)\n",
+      "Requirement already satisfied: httptools>=0.5.0 in /Users/jeffxtang/anaconda3/envs/llama-demo-apps/lib/python3.8/site-packages (from uvicorn[standard]>=0.18.3->chromadb) (0.6.0)\n",
+      "Requirement already satisfied: python-dotenv>=0.13 in /Users/jeffxtang/anaconda3/envs/llama-demo-apps/lib/python3.8/site-packages (from uvicorn[standard]>=0.18.3->chromadb) (1.0.0)\n",
+      "Requirement already satisfied: uvloop!=0.15.0,!=0.15.1,>=0.14.0 in /Users/jeffxtang/anaconda3/envs/llama-demo-apps/lib/python3.8/site-packages (from uvicorn[standard]>=0.18.3->chromadb) (0.17.0)\n",
+      "Requirement already satisfied: watchfiles>=0.13 in /Users/jeffxtang/anaconda3/envs/llama-demo-apps/lib/python3.8/site-packages (from uvicorn[standard]>=0.18.3->chromadb) (0.20.0)\n",
+      "Requirement already satisfied: websockets>=10.4 in /Users/jeffxtang/anaconda3/envs/llama-demo-apps/lib/python3.8/site-packages (from uvicorn[standard]>=0.18.3->chromadb) (11.0.3)\n",
+      "Requirement already satisfied: zipp>=3.1.0 in /Users/jeffxtang/anaconda3/envs/llama-demo-apps/lib/python3.8/site-packages (from importlib-resources->chromadb) (3.17.0)\n",
+      "Requirement already satisfied: joblib in /Users/jeffxtang/anaconda3/envs/llama-demo-apps/lib/python3.8/site-packages (from nltk->sentence-transformers) (1.3.2)\n",
+      "Requirement already satisfied: threadpoolctl>=2.0.0 in /Users/jeffxtang/anaconda3/envs/llama-demo-apps/lib/python3.8/site-packages (from scikit-learn->sentence-transformers) (3.2.0)\n",
+      "Requirement already satisfied: pillow!=8.3.*,>=5.3.0 in /Users/jeffxtang/anaconda3/envs/llama-demo-apps/lib/python3.8/site-packages (from torchvision->sentence-transformers) (10.0.1)\n",
+      "Requirement already satisfied: mypy-extensions>=0.3.0 in /Users/jeffxtang/anaconda3/envs/llama-demo-apps/lib/python3.8/site-packages (from typing-inspect<1,>=0.4.0->dataclasses-json<0.7,>=0.5.7->langchain) (1.0.0)\n",
+      "Requirement already satisfied: humanfriendly>=9.1 in /Users/jeffxtang/anaconda3/envs/llama-demo-apps/lib/python3.8/site-packages (from coloredlogs->onnxruntime>=1.14.1->chromadb) (10.0)\n",
+      "Requirement already satisfied: MarkupSafe>=2.0 in /Users/jeffxtang/anaconda3/envs/llama-demo-apps/lib/python3.8/site-packages (from jinja2->torch>=1.6.0->sentence-transformers) (2.1.3)\n",
+      "Requirement already satisfied: mpmath>=0.19 in /Users/jeffxtang/anaconda3/envs/llama-demo-apps/lib/python3.8/site-packages (from sympy->onnxruntime>=1.14.1->chromadb) (1.3.0)\n"
+     ]
+    }
+   ],
+   "source": [
+    "# install all the required packages for the demo\n",
+    "!CMAKE_ARGS=\"-DLLAMA_METAL=on\" FORCE_CMAKE=1 pip install llama-cpp-python\n",
+    "!pip install pypdf sentence-transformers chromadb langchain"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 2,
+   "id": "26bc4912",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "from langchain.llms import LlamaCpp\n",
+    "from langchain.chains import LLMChain\n",
+    "from langchain.callbacks.manager import CallbackManager\n",
+    "from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler\n",
+    "from langchain.prompts import PromptTemplate"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 3,
+   "id": "01fe5b9c",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# for token-wise streaming so you'll see the answer gets generated token by token when Llama is answering your question\n",
+    "callback_manager = CallbackManager([StreamingStdOutCallbackHandler()])"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 4,
+   "id": "dff6aa6b",
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stderr",
+     "output_type": "stream",
+     "text": [
+      "llama_model_loader: loaded meta data with 16 key-value pairs and 363 tensors from /Users/jeffxtang/repos/llama/llama-2-13b-chat/ggml-model-q4_0.gguf (version GGUF V2 (latest))\n",
+      "llama_model_loader: - tensor    0:                token_embd.weight q4_0     [  5120, 32000,     1,     1 ]\n",
+      "llama_model_loader: - tensor    1:               output_norm.weight f32      [  5120,     1,     1,     1 ]\n",
+      "llama_model_loader: - tensor    2:                    output.weight q6_K     [  5120, 32000,     1,     1 ]\n",
+      "llama_model_loader: - tensor    3:              blk.0.attn_q.weight q4_0     [  5120,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor    4:              blk.0.attn_k.weight q4_0     [  5120,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor    5:              blk.0.attn_v.weight q4_0     [  5120,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor    6:         blk.0.attn_output.weight q4_0     [  5120,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor    7:            blk.0.ffn_gate.weight q4_0     [  5120, 13824,     1,     1 ]\n",
+      "llama_model_loader: - tensor    8:            blk.0.ffn_down.weight q4_0     [ 13824,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor    9:              blk.0.ffn_up.weight q4_0     [  5120, 13824,     1,     1 ]\n",
+      "llama_model_loader: - tensor   10:           blk.0.attn_norm.weight f32      [  5120,     1,     1,     1 ]\n",
+      "llama_model_loader: - tensor   11:            blk.0.ffn_norm.weight f32      [  5120,     1,     1,     1 ]\n",
+      "llama_model_loader: - tensor   12:              blk.1.attn_q.weight q4_0     [  5120,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor   13:              blk.1.attn_k.weight q4_0     [  5120,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor   14:              blk.1.attn_v.weight q4_0     [  5120,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor   15:         blk.1.attn_output.weight q4_0     [  5120,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor   16:            blk.1.ffn_gate.weight q4_0     [  5120, 13824,     1,     1 ]\n",
+      "llama_model_loader: - tensor   17:            blk.1.ffn_down.weight q4_0     [ 13824,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor   18:              blk.1.ffn_up.weight q4_0     [  5120, 13824,     1,     1 ]\n",
+      "llama_model_loader: - tensor   19:           blk.1.attn_norm.weight f32      [  5120,     1,     1,     1 ]\n",
+      "llama_model_loader: - tensor   20:            blk.1.ffn_norm.weight f32      [  5120,     1,     1,     1 ]\n",
+      "llama_model_loader: - tensor   21:              blk.2.attn_q.weight q4_0     [  5120,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor   22:              blk.2.attn_k.weight q4_0     [  5120,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor   23:              blk.2.attn_v.weight q4_0     [  5120,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor   24:         blk.2.attn_output.weight q4_0     [  5120,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor   25:            blk.2.ffn_gate.weight q4_0     [  5120, 13824,     1,     1 ]\n",
+      "llama_model_loader: - tensor   26:            blk.2.ffn_down.weight q4_0     [ 13824,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor   27:              blk.2.ffn_up.weight q4_0     [  5120, 13824,     1,     1 ]\n",
+      "llama_model_loader: - tensor   28:           blk.2.attn_norm.weight f32      [  5120,     1,     1,     1 ]\n",
+      "llama_model_loader: - tensor   29:            blk.2.ffn_norm.weight f32      [  5120,     1,     1,     1 ]\n",
+      "llama_model_loader: - tensor   30:              blk.3.attn_q.weight q4_0     [  5120,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor   31:              blk.3.attn_k.weight q4_0     [  5120,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor   32:              blk.3.attn_v.weight q4_0     [  5120,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor   33:         blk.3.attn_output.weight q4_0     [  5120,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor   34:            blk.3.ffn_gate.weight q4_0     [  5120, 13824,     1,     1 ]\n",
+      "llama_model_loader: - tensor   35:            blk.3.ffn_down.weight q4_0     [ 13824,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor   36:              blk.3.ffn_up.weight q4_0     [  5120, 13824,     1,     1 ]\n",
+      "llama_model_loader: - tensor   37:           blk.3.attn_norm.weight f32      [  5120,     1,     1,     1 ]\n",
+      "llama_model_loader: - tensor   38:            blk.3.ffn_norm.weight f32      [  5120,     1,     1,     1 ]\n",
+      "llama_model_loader: - tensor   39:              blk.4.attn_q.weight q4_0     [  5120,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor   40:              blk.4.attn_k.weight q4_0     [  5120,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor   41:              blk.4.attn_v.weight q4_0     [  5120,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor   42:         blk.4.attn_output.weight q4_0     [  5120,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor   43:            blk.4.ffn_gate.weight q4_0     [  5120, 13824,     1,     1 ]\n",
+      "llama_model_loader: - tensor   44:            blk.4.ffn_down.weight q4_0     [ 13824,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor   45:              blk.4.ffn_up.weight q4_0     [  5120, 13824,     1,     1 ]\n",
+      "llama_model_loader: - tensor   46:           blk.4.attn_norm.weight f32      [  5120,     1,     1,     1 ]\n",
+      "llama_model_loader: - tensor   47:            blk.4.ffn_norm.weight f32      [  5120,     1,     1,     1 ]\n",
+      "llama_model_loader: - tensor   48:              blk.5.attn_q.weight q4_0     [  5120,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor   49:              blk.5.attn_k.weight q4_0     [  5120,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor   50:              blk.5.attn_v.weight q4_0     [  5120,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor   51:         blk.5.attn_output.weight q4_0     [  5120,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor   52:            blk.5.ffn_gate.weight q4_0     [  5120, 13824,     1,     1 ]\n",
+      "llama_model_loader: - tensor   53:            blk.5.ffn_down.weight q4_0     [ 13824,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor   54:              blk.5.ffn_up.weight q4_0     [  5120, 13824,     1,     1 ]\n",
+      "llama_model_loader: - tensor   55:           blk.5.attn_norm.weight f32      [  5120,     1,     1,     1 ]\n",
+      "llama_model_loader: - tensor   56:            blk.5.ffn_norm.weight f32      [  5120,     1,     1,     1 ]\n",
+      "llama_model_loader: - tensor   57:              blk.6.attn_q.weight q4_0     [  5120,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor   58:              blk.6.attn_k.weight q4_0     [  5120,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor   59:              blk.6.attn_v.weight q4_0     [  5120,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor   60:         blk.6.attn_output.weight q4_0     [  5120,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor   61:            blk.6.ffn_gate.weight q4_0     [  5120, 13824,     1,     1 ]\n",
+      "llama_model_loader: - tensor   62:            blk.6.ffn_down.weight q4_0     [ 13824,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor   63:              blk.6.ffn_up.weight q4_0     [  5120, 13824,     1,     1 ]\n",
+      "llama_model_loader: - tensor   64:           blk.6.attn_norm.weight f32      [  5120,     1,     1,     1 ]\n",
+      "llama_model_loader: - tensor   65:            blk.6.ffn_norm.weight f32      [  5120,     1,     1,     1 ]\n",
+      "llama_model_loader: - tensor   66:              blk.7.attn_q.weight q4_0     [  5120,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor   67:              blk.7.attn_k.weight q4_0     [  5120,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor   68:              blk.7.attn_v.weight q4_0     [  5120,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor   69:         blk.7.attn_output.weight q4_0     [  5120,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor   70:            blk.7.ffn_gate.weight q4_0     [  5120, 13824,     1,     1 ]\n",
+      "llama_model_loader: - tensor   71:            blk.7.ffn_down.weight q4_0     [ 13824,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor   72:              blk.7.ffn_up.weight q4_0     [  5120, 13824,     1,     1 ]\n",
+      "llama_model_loader: - tensor   73:           blk.7.attn_norm.weight f32      [  5120,     1,     1,     1 ]\n",
+      "llama_model_loader: - tensor   74:            blk.7.ffn_norm.weight f32      [  5120,     1,     1,     1 ]\n",
+      "llama_model_loader: - tensor   75:              blk.8.attn_q.weight q4_0     [  5120,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor   76:              blk.8.attn_k.weight q4_0     [  5120,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor   77:              blk.8.attn_v.weight q4_0     [  5120,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor   78:         blk.8.attn_output.weight q4_0     [  5120,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor   79:            blk.8.ffn_gate.weight q4_0     [  5120, 13824,     1,     1 ]\n",
+      "llama_model_loader: - tensor   80:            blk.8.ffn_down.weight q4_0     [ 13824,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor   81:              blk.8.ffn_up.weight q4_0     [  5120, 13824,     1,     1 ]\n",
+      "llama_model_loader: - tensor   82:           blk.8.attn_norm.weight f32      [  5120,     1,     1,     1 ]\n",
+      "llama_model_loader: - tensor   83:            blk.8.ffn_norm.weight f32      [  5120,     1,     1,     1 ]\n",
+      "llama_model_loader: - tensor   84:              blk.9.attn_q.weight q4_0     [  5120,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor   85:              blk.9.attn_k.weight q4_0     [  5120,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor   86:              blk.9.attn_v.weight q4_0     [  5120,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor   87:         blk.9.attn_output.weight q4_0     [  5120,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor   88:            blk.9.ffn_gate.weight q4_0     [  5120, 13824,     1,     1 ]\n",
+      "llama_model_loader: - tensor   89:            blk.9.ffn_down.weight q4_0     [ 13824,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor   90:              blk.9.ffn_up.weight q4_0     [  5120, 13824,     1,     1 ]\n",
+      "llama_model_loader: - tensor   91:           blk.9.attn_norm.weight f32      [  5120,     1,     1,     1 ]\n",
+      "llama_model_loader: - tensor   92:            blk.9.ffn_norm.weight f32      [  5120,     1,     1,     1 ]\n",
+      "llama_model_loader: - tensor   93:             blk.10.attn_q.weight q4_0     [  5120,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor   94:             blk.10.attn_k.weight q4_0     [  5120,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor   95:             blk.10.attn_v.weight q4_0     [  5120,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor   96:        blk.10.attn_output.weight q4_0     [  5120,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor   97:           blk.10.ffn_gate.weight q4_0     [  5120, 13824,     1,     1 ]\n",
+      "llama_model_loader: - tensor   98:           blk.10.ffn_down.weight q4_0     [ 13824,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor   99:             blk.10.ffn_up.weight q4_0     [  5120, 13824,     1,     1 ]\n",
+      "llama_model_loader: - tensor  100:          blk.10.attn_norm.weight f32      [  5120,     1,     1,     1 ]\n",
+      "llama_model_loader: - tensor  101:           blk.10.ffn_norm.weight f32      [  5120,     1,     1,     1 ]\n",
+      "llama_model_loader: - tensor  102:             blk.11.attn_q.weight q4_0     [  5120,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor  103:             blk.11.attn_k.weight q4_0     [  5120,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor  104:             blk.11.attn_v.weight q4_0     [  5120,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor  105:        blk.11.attn_output.weight q4_0     [  5120,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor  106:           blk.11.ffn_gate.weight q4_0     [  5120, 13824,     1,     1 ]\n",
+      "llama_model_loader: - tensor  107:           blk.11.ffn_down.weight q4_0     [ 13824,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor  108:             blk.11.ffn_up.weight q4_0     [  5120, 13824,     1,     1 ]\n",
+      "llama_model_loader: - tensor  109:          blk.11.attn_norm.weight f32      [  5120,     1,     1,     1 ]\n",
+      "llama_model_loader: - tensor  110:           blk.11.ffn_norm.weight f32      [  5120,     1,     1,     1 ]\n",
+      "llama_model_loader: - tensor  111:             blk.12.attn_q.weight q4_0     [  5120,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor  112:             blk.12.attn_k.weight q4_0     [  5120,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor  113:             blk.12.attn_v.weight q4_0     [  5120,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor  114:        blk.12.attn_output.weight q4_0     [  5120,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor  115:           blk.12.ffn_gate.weight q4_0     [  5120, 13824,     1,     1 ]\n",
+      "llama_model_loader: - tensor  116:           blk.12.ffn_down.weight q4_0     [ 13824,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor  117:             blk.12.ffn_up.weight q4_0     [  5120, 13824,     1,     1 ]\n",
+      "llama_model_loader: - tensor  118:          blk.12.attn_norm.weight f32      [  5120,     1,     1,     1 ]\n",
+      "llama_model_loader: - tensor  119:           blk.12.ffn_norm.weight f32      [  5120,     1,     1,     1 ]\n",
+      "llama_model_loader: - tensor  120:             blk.13.attn_q.weight q4_0     [  5120,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor  121:             blk.13.attn_k.weight q4_0     [  5120,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor  122:             blk.13.attn_v.weight q4_0     [  5120,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor  123:        blk.13.attn_output.weight q4_0     [  5120,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor  124:           blk.13.ffn_gate.weight q4_0     [  5120, 13824,     1,     1 ]\n",
+      "llama_model_loader: - tensor  125:           blk.13.ffn_down.weight q4_0     [ 13824,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor  126:             blk.13.ffn_up.weight q4_0     [  5120, 13824,     1,     1 ]\n",
+      "llama_model_loader: - tensor  127:          blk.13.attn_norm.weight f32      [  5120,     1,     1,     1 ]\n",
+      "llama_model_loader: - tensor  128:           blk.13.ffn_norm.weight f32      [  5120,     1,     1,     1 ]\n",
+      "llama_model_loader: - tensor  129:             blk.14.attn_q.weight q4_0     [  5120,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor  130:             blk.14.attn_k.weight q4_0     [  5120,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor  131:             blk.14.attn_v.weight q4_0     [  5120,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor  132:        blk.14.attn_output.weight q4_0     [  5120,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor  133:           blk.14.ffn_gate.weight q4_0     [  5120, 13824,     1,     1 ]\n",
+      "llama_model_loader: - tensor  134:           blk.14.ffn_down.weight q4_0     [ 13824,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor  135:             blk.14.ffn_up.weight q4_0     [  5120, 13824,     1,     1 ]\n",
+      "llama_model_loader: - tensor  136:          blk.14.attn_norm.weight f32      [  5120,     1,     1,     1 ]\n",
+      "llama_model_loader: - tensor  137:           blk.14.ffn_norm.weight f32      [  5120,     1,     1,     1 ]\n",
+      "llama_model_loader: - tensor  138:             blk.15.attn_q.weight q4_0     [  5120,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor  139:             blk.15.attn_k.weight q4_0     [  5120,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor  140:             blk.15.attn_v.weight q4_0     [  5120,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor  141:        blk.15.attn_output.weight q4_0     [  5120,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor  142:           blk.15.ffn_gate.weight q4_0     [  5120, 13824,     1,     1 ]\n",
+      "llama_model_loader: - tensor  143:           blk.15.ffn_down.weight q4_0     [ 13824,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor  144:             blk.15.ffn_up.weight q4_0     [  5120, 13824,     1,     1 ]\n",
+      "llama_model_loader: - tensor  145:          blk.15.attn_norm.weight f32      [  5120,     1,     1,     1 ]\n",
+      "llama_model_loader: - tensor  146:           blk.15.ffn_norm.weight f32      [  5120,     1,     1,     1 ]\n",
+      "llama_model_loader: - tensor  147:             blk.16.attn_q.weight q4_0     [  5120,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor  148:             blk.16.attn_k.weight q4_0     [  5120,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor  149:             blk.16.attn_v.weight q4_0     [  5120,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor  150:        blk.16.attn_output.weight q4_0     [  5120,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor  151:           blk.16.ffn_gate.weight q4_0     [  5120, 13824,     1,     1 ]\n",
+      "llama_model_loader: - tensor  152:           blk.16.ffn_down.weight q4_0     [ 13824,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor  153:             blk.16.ffn_up.weight q4_0     [  5120, 13824,     1,     1 ]\n",
+      "llama_model_loader: - tensor  154:          blk.16.attn_norm.weight f32      [  5120,     1,     1,     1 ]\n",
+      "llama_model_loader: - tensor  155:           blk.16.ffn_norm.weight f32      [  5120,     1,     1,     1 ]\n",
+      "llama_model_loader: - tensor  156:             blk.17.attn_q.weight q4_0     [  5120,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor  157:             blk.17.attn_k.weight q4_0     [  5120,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor  158:             blk.17.attn_v.weight q4_0     [  5120,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor  159:        blk.17.attn_output.weight q4_0     [  5120,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor  160:           blk.17.ffn_gate.weight q4_0     [  5120, 13824,     1,     1 ]\n",
+      "llama_model_loader: - tensor  161:           blk.17.ffn_down.weight q4_0     [ 13824,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor  162:             blk.17.ffn_up.weight q4_0     [  5120, 13824,     1,     1 ]\n",
+      "llama_model_loader: - tensor  163:          blk.17.attn_norm.weight f32      [  5120,     1,     1,     1 ]\n",
+      "llama_model_loader: - tensor  164:           blk.17.ffn_norm.weight f32      [  5120,     1,     1,     1 ]\n",
+      "llama_model_loader: - tensor  165:             blk.18.attn_q.weight q4_0     [  5120,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor  166:             blk.18.attn_k.weight q4_0     [  5120,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor  167:             blk.18.attn_v.weight q4_0     [  5120,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor  168:        blk.18.attn_output.weight q4_0     [  5120,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor  169:           blk.18.ffn_gate.weight q4_0     [  5120, 13824,     1,     1 ]\n",
+      "llama_model_loader: - tensor  170:           blk.18.ffn_down.weight q4_0     [ 13824,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor  171:             blk.18.ffn_up.weight q4_0     [  5120, 13824,     1,     1 ]\n",
+      "llama_model_loader: - tensor  172:          blk.18.attn_norm.weight f32      [  5120,     1,     1,     1 ]\n",
+      "llama_model_loader: - tensor  173:           blk.18.ffn_norm.weight f32      [  5120,     1,     1,     1 ]\n",
+      "llama_model_loader: - tensor  174:             blk.19.attn_q.weight q4_0     [  5120,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor  175:             blk.19.attn_k.weight q4_0     [  5120,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor  176:             blk.19.attn_v.weight q4_0     [  5120,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor  177:        blk.19.attn_output.weight q4_0     [  5120,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor  178:           blk.19.ffn_gate.weight q4_0     [  5120, 13824,     1,     1 ]\n",
+      "llama_model_loader: - tensor  179:           blk.19.ffn_down.weight q4_0     [ 13824,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor  180:             blk.19.ffn_up.weight q4_0     [  5120, 13824,     1,     1 ]\n",
+      "llama_model_loader: - tensor  181:          blk.19.attn_norm.weight f32      [  5120,     1,     1,     1 ]\n",
+      "llama_model_loader: - tensor  182:           blk.19.ffn_norm.weight f32      [  5120,     1,     1,     1 ]\n",
+      "llama_model_loader: - tensor  183:             blk.20.attn_q.weight q4_0     [  5120,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor  184:             blk.20.attn_k.weight q4_0     [  5120,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor  185:             blk.20.attn_v.weight q4_0     [  5120,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor  186:        blk.20.attn_output.weight q4_0     [  5120,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor  187:           blk.20.ffn_gate.weight q4_0     [  5120, 13824,     1,     1 ]\n",
+      "llama_model_loader: - tensor  188:           blk.20.ffn_down.weight q4_0     [ 13824,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor  189:             blk.20.ffn_up.weight q4_0     [  5120, 13824,     1,     1 ]\n",
+      "llama_model_loader: - tensor  190:          blk.20.attn_norm.weight f32      [  5120,     1,     1,     1 ]\n",
+      "llama_model_loader: - tensor  191:           blk.20.ffn_norm.weight f32      [  5120,     1,     1,     1 ]\n",
+      "llama_model_loader: - tensor  192:             blk.21.attn_q.weight q4_0     [  5120,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor  193:             blk.21.attn_k.weight q4_0     [  5120,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor  194:             blk.21.attn_v.weight q4_0     [  5120,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor  195:        blk.21.attn_output.weight q4_0     [  5120,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor  196:           blk.21.ffn_gate.weight q4_0     [  5120, 13824,     1,     1 ]\n",
+      "llama_model_loader: - tensor  197:           blk.21.ffn_down.weight q4_0     [ 13824,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor  198:             blk.21.ffn_up.weight q4_0     [  5120, 13824,     1,     1 ]\n",
+      "llama_model_loader: - tensor  199:          blk.21.attn_norm.weight f32      [  5120,     1,     1,     1 ]\n",
+      "llama_model_loader: - tensor  200:           blk.21.ffn_norm.weight f32      [  5120,     1,     1,     1 ]\n",
+      "llama_model_loader: - tensor  201:             blk.22.attn_q.weight q4_0     [  5120,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor  202:             blk.22.attn_k.weight q4_0     [  5120,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor  203:             blk.22.attn_v.weight q4_0     [  5120,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor  204:        blk.22.attn_output.weight q4_0     [  5120,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor  205:           blk.22.ffn_gate.weight q4_0     [  5120, 13824,     1,     1 ]\n",
+      "llama_model_loader: - tensor  206:           blk.22.ffn_down.weight q4_0     [ 13824,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor  207:             blk.22.ffn_up.weight q4_0     [  5120, 13824,     1,     1 ]\n",
+      "llama_model_loader: - tensor  208:          blk.22.attn_norm.weight f32      [  5120,     1,     1,     1 ]\n",
+      "llama_model_loader: - tensor  209:           blk.22.ffn_norm.weight f32      [  5120,     1,     1,     1 ]\n",
+      "llama_model_loader: - tensor  210:             blk.23.attn_q.weight q4_0     [  5120,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor  211:             blk.23.attn_k.weight q4_0     [  5120,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor  212:             blk.23.attn_v.weight q4_0     [  5120,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor  213:        blk.23.attn_output.weight q4_0     [  5120,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor  214:           blk.23.ffn_gate.weight q4_0     [  5120, 13824,     1,     1 ]\n",
+      "llama_model_loader: - tensor  215:           blk.23.ffn_down.weight q4_0     [ 13824,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor  216:             blk.23.ffn_up.weight q4_0     [  5120, 13824,     1,     1 ]\n",
+      "llama_model_loader: - tensor  217:          blk.23.attn_norm.weight f32      [  5120,     1,     1,     1 ]\n",
+      "llama_model_loader: - tensor  218:           blk.23.ffn_norm.weight f32      [  5120,     1,     1,     1 ]\n",
+      "llama_model_loader: - tensor  219:             blk.24.attn_q.weight q4_0     [  5120,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor  220:             blk.24.attn_k.weight q4_0     [  5120,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor  221:             blk.24.attn_v.weight q4_0     [  5120,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor  222:        blk.24.attn_output.weight q4_0     [  5120,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor  223:           blk.24.ffn_gate.weight q4_0     [  5120, 13824,     1,     1 ]\n",
+      "llama_model_loader: - tensor  224:           blk.24.ffn_down.weight q4_0     [ 13824,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor  225:             blk.24.ffn_up.weight q4_0     [  5120, 13824,     1,     1 ]\n",
+      "llama_model_loader: - tensor  226:          blk.24.attn_norm.weight f32      [  5120,     1,     1,     1 ]\n",
+      "llama_model_loader: - tensor  227:           blk.24.ffn_norm.weight f32      [  5120,     1,     1,     1 ]\n",
+      "llama_model_loader: - tensor  228:             blk.25.attn_q.weight q4_0     [  5120,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor  229:             blk.25.attn_k.weight q4_0     [  5120,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor  230:             blk.25.attn_v.weight q4_0     [  5120,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor  231:        blk.25.attn_output.weight q4_0     [  5120,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor  232:           blk.25.ffn_gate.weight q4_0     [  5120, 13824,     1,     1 ]\n",
+      "llama_model_loader: - tensor  233:           blk.25.ffn_down.weight q4_0     [ 13824,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor  234:             blk.25.ffn_up.weight q4_0     [  5120, 13824,     1,     1 ]\n",
+      "llama_model_loader: - tensor  235:          blk.25.attn_norm.weight f32      [  5120,     1,     1,     1 ]\n",
+      "llama_model_loader: - tensor  236:           blk.25.ffn_norm.weight f32      [  5120,     1,     1,     1 ]\n",
+      "llama_model_loader: - tensor  237:             blk.26.attn_q.weight q4_0     [  5120,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor  238:             blk.26.attn_k.weight q4_0     [  5120,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor  239:             blk.26.attn_v.weight q4_0     [  5120,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor  240:        blk.26.attn_output.weight q4_0     [  5120,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor  241:           blk.26.ffn_gate.weight q4_0     [  5120, 13824,     1,     1 ]\n",
+      "llama_model_loader: - tensor  242:           blk.26.ffn_down.weight q4_0     [ 13824,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor  243:             blk.26.ffn_up.weight q4_0     [  5120, 13824,     1,     1 ]\n",
+      "llama_model_loader: - tensor  244:          blk.26.attn_norm.weight f32      [  5120,     1,     1,     1 ]\n",
+      "llama_model_loader: - tensor  245:           blk.26.ffn_norm.weight f32      [  5120,     1,     1,     1 ]\n",
+      "llama_model_loader: - tensor  246:             blk.27.attn_q.weight q4_0     [  5120,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor  247:             blk.27.attn_k.weight q4_0     [  5120,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor  248:             blk.27.attn_v.weight q4_0     [  5120,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor  249:        blk.27.attn_output.weight q4_0     [  5120,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor  250:           blk.27.ffn_gate.weight q4_0     [  5120, 13824,     1,     1 ]\n",
+      "llama_model_loader: - tensor  251:           blk.27.ffn_down.weight q4_0     [ 13824,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor  252:             blk.27.ffn_up.weight q4_0     [  5120, 13824,     1,     1 ]\n",
+      "llama_model_loader: - tensor  253:          blk.27.attn_norm.weight f32      [  5120,     1,     1,     1 ]\n",
+      "llama_model_loader: - tensor  254:           blk.27.ffn_norm.weight f32      [  5120,     1,     1,     1 ]\n",
+      "llama_model_loader: - tensor  255:             blk.28.attn_q.weight q4_0     [  5120,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor  256:             blk.28.attn_k.weight q4_0     [  5120,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor  257:             blk.28.attn_v.weight q4_0     [  5120,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor  258:        blk.28.attn_output.weight q4_0     [  5120,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor  259:           blk.28.ffn_gate.weight q4_0     [  5120, 13824,     1,     1 ]\n",
+      "llama_model_loader: - tensor  260:           blk.28.ffn_down.weight q4_0     [ 13824,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor  261:             blk.28.ffn_up.weight q4_0     [  5120, 13824,     1,     1 ]\n",
+      "llama_model_loader: - tensor  262:          blk.28.attn_norm.weight f32      [  5120,     1,     1,     1 ]\n",
+      "llama_model_loader: - tensor  263:           blk.28.ffn_norm.weight f32      [  5120,     1,     1,     1 ]\n",
+      "llama_model_loader: - tensor  264:             blk.29.attn_q.weight q4_0     [  5120,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor  265:             blk.29.attn_k.weight q4_0     [  5120,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor  266:             blk.29.attn_v.weight q4_0     [  5120,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor  267:        blk.29.attn_output.weight q4_0     [  5120,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor  268:           blk.29.ffn_gate.weight q4_0     [  5120, 13824,     1,     1 ]\n",
+      "llama_model_loader: - tensor  269:           blk.29.ffn_down.weight q4_0     [ 13824,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor  270:             blk.29.ffn_up.weight q4_0     [  5120, 13824,     1,     1 ]\n",
+      "llama_model_loader: - tensor  271:          blk.29.attn_norm.weight f32      [  5120,     1,     1,     1 ]\n",
+      "llama_model_loader: - tensor  272:           blk.29.ffn_norm.weight f32      [  5120,     1,     1,     1 ]\n",
+      "llama_model_loader: - tensor  273:             blk.30.attn_q.weight q4_0     [  5120,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor  274:             blk.30.attn_k.weight q4_0     [  5120,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor  275:             blk.30.attn_v.weight q4_0     [  5120,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor  276:        blk.30.attn_output.weight q4_0     [  5120,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor  277:           blk.30.ffn_gate.weight q4_0     [  5120, 13824,     1,     1 ]\n",
+      "llama_model_loader: - tensor  278:           blk.30.ffn_down.weight q4_0     [ 13824,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor  279:             blk.30.ffn_up.weight q4_0     [  5120, 13824,     1,     1 ]\n",
+      "llama_model_loader: - tensor  280:          blk.30.attn_norm.weight f32      [  5120,     1,     1,     1 ]\n",
+      "llama_model_loader: - tensor  281:           blk.30.ffn_norm.weight f32      [  5120,     1,     1,     1 ]\n",
+      "llama_model_loader: - tensor  282:             blk.31.attn_q.weight q4_0     [  5120,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor  283:             blk.31.attn_k.weight q4_0     [  5120,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor  284:             blk.31.attn_v.weight q4_0     [  5120,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor  285:        blk.31.attn_output.weight q4_0     [  5120,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor  286:           blk.31.ffn_gate.weight q4_0     [  5120, 13824,     1,     1 ]\n",
+      "llama_model_loader: - tensor  287:           blk.31.ffn_down.weight q4_0     [ 13824,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor  288:             blk.31.ffn_up.weight q4_0     [  5120, 13824,     1,     1 ]\n",
+      "llama_model_loader: - tensor  289:          blk.31.attn_norm.weight f32      [  5120,     1,     1,     1 ]\n",
+      "llama_model_loader: - tensor  290:           blk.31.ffn_norm.weight f32      [  5120,     1,     1,     1 ]\n",
+      "llama_model_loader: - tensor  291:             blk.32.attn_q.weight q4_0     [  5120,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor  292:             blk.32.attn_k.weight q4_0     [  5120,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor  293:             blk.32.attn_v.weight q4_0     [  5120,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor  294:        blk.32.attn_output.weight q4_0     [  5120,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor  295:           blk.32.ffn_gate.weight q4_0     [  5120, 13824,     1,     1 ]\n",
+      "llama_model_loader: - tensor  296:           blk.32.ffn_down.weight q4_0     [ 13824,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor  297:             blk.32.ffn_up.weight q4_0     [  5120, 13824,     1,     1 ]\n",
+      "llama_model_loader: - tensor  298:          blk.32.attn_norm.weight f32      [  5120,     1,     1,     1 ]\n",
+      "llama_model_loader: - tensor  299:           blk.32.ffn_norm.weight f32      [  5120,     1,     1,     1 ]\n",
+      "llama_model_loader: - tensor  300:             blk.33.attn_q.weight q4_0     [  5120,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor  301:             blk.33.attn_k.weight q4_0     [  5120,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor  302:             blk.33.attn_v.weight q4_0     [  5120,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor  303:        blk.33.attn_output.weight q4_0     [  5120,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor  304:           blk.33.ffn_gate.weight q4_0     [  5120, 13824,     1,     1 ]\n",
+      "llama_model_loader: - tensor  305:           blk.33.ffn_down.weight q4_0     [ 13824,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor  306:             blk.33.ffn_up.weight q4_0     [  5120, 13824,     1,     1 ]\n",
+      "llama_model_loader: - tensor  307:          blk.33.attn_norm.weight f32      [  5120,     1,     1,     1 ]\n",
+      "llama_model_loader: - tensor  308:           blk.33.ffn_norm.weight f32      [  5120,     1,     1,     1 ]\n",
+      "llama_model_loader: - tensor  309:             blk.34.attn_q.weight q4_0     [  5120,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor  310:             blk.34.attn_k.weight q4_0     [  5120,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor  311:             blk.34.attn_v.weight q4_0     [  5120,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor  312:        blk.34.attn_output.weight q4_0     [  5120,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor  313:           blk.34.ffn_gate.weight q4_0     [  5120, 13824,     1,     1 ]\n",
+      "llama_model_loader: - tensor  314:           blk.34.ffn_down.weight q4_0     [ 13824,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor  315:             blk.34.ffn_up.weight q4_0     [  5120, 13824,     1,     1 ]\n",
+      "llama_model_loader: - tensor  316:          blk.34.attn_norm.weight f32      [  5120,     1,     1,     1 ]\n",
+      "llama_model_loader: - tensor  317:           blk.34.ffn_norm.weight f32      [  5120,     1,     1,     1 ]\n",
+      "llama_model_loader: - tensor  318:             blk.35.attn_q.weight q4_0     [  5120,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor  319:             blk.35.attn_k.weight q4_0     [  5120,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor  320:             blk.35.attn_v.weight q4_0     [  5120,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor  321:        blk.35.attn_output.weight q4_0     [  5120,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor  322:           blk.35.ffn_gate.weight q4_0     [  5120, 13824,     1,     1 ]\n",
+      "llama_model_loader: - tensor  323:           blk.35.ffn_down.weight q4_0     [ 13824,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor  324:             blk.35.ffn_up.weight q4_0     [  5120, 13824,     1,     1 ]\n",
+      "llama_model_loader: - tensor  325:          blk.35.attn_norm.weight f32      [  5120,     1,     1,     1 ]\n",
+      "llama_model_loader: - tensor  326:           blk.35.ffn_norm.weight f32      [  5120,     1,     1,     1 ]\n",
+      "llama_model_loader: - tensor  327:             blk.36.attn_q.weight q4_0     [  5120,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor  328:             blk.36.attn_k.weight q4_0     [  5120,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor  329:             blk.36.attn_v.weight q4_0     [  5120,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor  330:        blk.36.attn_output.weight q4_0     [  5120,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor  331:           blk.36.ffn_gate.weight q4_0     [  5120, 13824,     1,     1 ]\n",
+      "llama_model_loader: - tensor  332:           blk.36.ffn_down.weight q4_0     [ 13824,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor  333:             blk.36.ffn_up.weight q4_0     [  5120, 13824,     1,     1 ]\n",
+      "llama_model_loader: - tensor  334:          blk.36.attn_norm.weight f32      [  5120,     1,     1,     1 ]\n",
+      "llama_model_loader: - tensor  335:           blk.36.ffn_norm.weight f32      [  5120,     1,     1,     1 ]\n",
+      "llama_model_loader: - tensor  336:             blk.37.attn_q.weight q4_0     [  5120,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor  337:             blk.37.attn_k.weight q4_0     [  5120,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor  338:             blk.37.attn_v.weight q4_0     [  5120,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor  339:        blk.37.attn_output.weight q4_0     [  5120,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor  340:           blk.37.ffn_gate.weight q4_0     [  5120, 13824,     1,     1 ]\n",
+      "llama_model_loader: - tensor  341:           blk.37.ffn_down.weight q4_0     [ 13824,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor  342:             blk.37.ffn_up.weight q4_0     [  5120, 13824,     1,     1 ]\n",
+      "llama_model_loader: - tensor  343:          blk.37.attn_norm.weight f32      [  5120,     1,     1,     1 ]\n",
+      "llama_model_loader: - tensor  344:           blk.37.ffn_norm.weight f32      [  5120,     1,     1,     1 ]\n",
+      "llama_model_loader: - tensor  345:             blk.38.attn_q.weight q4_0     [  5120,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor  346:             blk.38.attn_k.weight q4_0     [  5120,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor  347:             blk.38.attn_v.weight q4_0     [  5120,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor  348:        blk.38.attn_output.weight q4_0     [  5120,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor  349:           blk.38.ffn_gate.weight q4_0     [  5120, 13824,     1,     1 ]\n",
+      "llama_model_loader: - tensor  350:           blk.38.ffn_down.weight q4_0     [ 13824,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor  351:             blk.38.ffn_up.weight q4_0     [  5120, 13824,     1,     1 ]\n",
+      "llama_model_loader: - tensor  352:          blk.38.attn_norm.weight f32      [  5120,     1,     1,     1 ]\n",
+      "llama_model_loader: - tensor  353:           blk.38.ffn_norm.weight f32      [  5120,     1,     1,     1 ]\n",
+      "llama_model_loader: - tensor  354:             blk.39.attn_q.weight q4_0     [  5120,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor  355:             blk.39.attn_k.weight q4_0     [  5120,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor  356:             blk.39.attn_v.weight q4_0     [  5120,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor  357:        blk.39.attn_output.weight q4_0     [  5120,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor  358:           blk.39.ffn_gate.weight q4_0     [  5120, 13824,     1,     1 ]\n",
+      "llama_model_loader: - tensor  359:           blk.39.ffn_down.weight q4_0     [ 13824,  5120,     1,     1 ]\n",
+      "llama_model_loader: - tensor  360:             blk.39.ffn_up.weight q4_0     [  5120, 13824,     1,     1 ]\n",
+      "llama_model_loader: - tensor  361:          blk.39.attn_norm.weight f32      [  5120,     1,     1,     1 ]\n",
+      "llama_model_loader: - tensor  362:           blk.39.ffn_norm.weight f32      [  5120,     1,     1,     1 ]\n",
+      "llama_model_loader: - kv   0:                       general.architecture str     \n",
+      "llama_model_loader: - kv   1:                               general.name str     \n",
+      "llama_model_loader: - kv   2:                       llama.context_length u32     \n",
+      "llama_model_loader: - kv   3:                     llama.embedding_length u32     \n",
+      "llama_model_loader: - kv   4:                          llama.block_count u32     \n",
+      "llama_model_loader: - kv   5:                  llama.feed_forward_length u32     \n",
+      "llama_model_loader: - kv   6:                 llama.rope.dimension_count u32     \n",
+      "llama_model_loader: - kv   7:                 llama.attention.head_count u32     \n",
+      "llama_model_loader: - kv   8:              llama.attention.head_count_kv u32     \n",
+      "llama_model_loader: - kv   9:     llama.attention.layer_norm_rms_epsilon f32     \n",
+      "llama_model_loader: - kv  10:                          general.file_type u32     \n",
+      "llama_model_loader: - kv  11:                       tokenizer.ggml.model str     \n",
+      "llama_model_loader: - kv  12:                      tokenizer.ggml.tokens arr     \n",
+      "llama_model_loader: - kv  13:                      tokenizer.ggml.scores arr     \n",
+      "llama_model_loader: - kv  14:                  tokenizer.ggml.token_type arr     \n",
+      "llama_model_loader: - kv  15:               general.quantization_version u32     \n",
+      "llama_model_loader: - type  f32:   81 tensors\n",
+      "llama_model_loader: - type q4_0:  281 tensors\n",
+      "llama_model_loader: - type q6_K:    1 tensors\n",
+      "llm_load_print_meta: format           = GGUF V2 (latest)\n",
+      "llm_load_print_meta: arch             = llama\n",
+      "llm_load_print_meta: vocab type       = SPM\n",
+      "llm_load_print_meta: n_vocab          = 32000\n",
+      "llm_load_print_meta: n_merges         = 0\n",
+      "llm_load_print_meta: n_ctx_train      = 4096\n",
+      "llm_load_print_meta: n_embd           = 5120\n",
+      "llm_load_print_meta: n_head           = 40\n",
+      "llm_load_print_meta: n_head_kv        = 40\n",
+      "llm_load_print_meta: n_layer          = 40\n",
+      "llm_load_print_meta: n_rot            = 128\n",
+      "llm_load_print_meta: n_gqa            = 1\n",
+      "llm_load_print_meta: f_norm_eps       = 0.0e+00\n",
+      "llm_load_print_meta: f_norm_rms_eps   = 1.0e-05\n",
+      "llm_load_print_meta: n_ff             = 13824\n",
+      "llm_load_print_meta: freq_base_train  = 10000.0\n",
+      "llm_load_print_meta: freq_scale_train = 1\n",
+      "llm_load_print_meta: model type       = 13B\n",
+      "llm_load_print_meta: model ftype      = mostly Q4_0\n",
+      "llm_load_print_meta: model params     = 13.02 B\n",
+      "llm_load_print_meta: model size       = 6.86 GiB (4.53 BPW) \n",
+      "llm_load_print_meta: general.name   = LLaMA v2\n",
+      "llm_load_print_meta: BOS token = 1 '<s>'\n",
+      "llm_load_print_meta: EOS token = 2 '</s>'\n",
+      "llm_load_print_meta: UNK token = 0 '<unk>'\n",
+      "llm_load_print_meta: LF token  = 13 '<0x0A>'\n",
+      "llm_load_tensors: ggml ctx size =    0.12 MB\n",
+      "llm_load_tensors: mem required  = 7024.01 MB\n",
+      "...................................................................................................\n",
+      "llama_new_context_with_model: n_ctx      = 6000\n",
+      "llama_new_context_with_model: freq_base  = 10000.0\n",
+      "llama_new_context_with_model: freq_scale = 1\n",
+      "llama_new_context_with_model: kv self size  = 4687.50 MB\n",
+      "llama_new_context_with_model: compute buffer total size = 13.85 MB\n",
+      "AVX = 0 | AVX2 = 0 | AVX512 = 0 | AVX512_VBMI = 0 | AVX512_VNNI = 0 | FMA = 0 | NEON = 1 | ARM_FMA = 1 | F16C = 0 | FP16_VA = 1 | WASM_SIMD = 0 | BLAS = 1 | SSE3 = 0 | SSSE3 = 0 | VSX = 0 | \n"
+     ]
+    }
+   ],
+   "source": [
+    "# create the Llama2 model - for more info see https://python.langchain.com/docs/integrations/llms/llamacpp\n",
+    "llm = LlamaCpp(\n",
+    "    model_path=\"<path-to-ggml-model-q4_0.gguf>\"\n",
+    "    temperature=0.0,\n",
+    "    top_p=1,\n",
+    "    n_ctx=6000,\n",
+    "    callback_manager=callback_manager, \n",
+    "    verbose=True,\n",
+    ")"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 5,
+   "id": "0e78549c-9c93-4bc2-b525-38d578a94fae",
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "\n",
+      "\n",
+      "The book \"The Innovator's Dilemma\" was written by Clayton Christensen, a professor at Harvard Business School. It was first published in 1997 and has since become a widely influential book on business strategy and innovation."
+     ]
+    },
+    {
+     "name": "stderr",
+     "output_type": "stream",
+     "text": [
+      "\n",
+      "llama_print_timings:        load time =  1202.24 ms\n",
+      "llama_print_timings:      sample time =    46.44 ms /    58 runs   (    0.80 ms per token,  1249.03 tokens per second)\n",
+      "llama_print_timings: prompt eval time =  1815.15 ms /    15 tokens (  121.01 ms per token,     8.26 tokens per second)\n",
+      "llama_print_timings:        eval time =  5582.64 ms /    57 runs   (   97.94 ms per token,    10.21 tokens per second)\n",
+      "llama_print_timings:       total time =  7545.78 ms\n"
+     ]
+    }
+   ],
+   "source": [
+    "# the simplest way to ask Llama some general questions\n",
+    "question = \"who wrote the book Innovator's dilemma?\"\n",
+    "answer = llm(question)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 6,
+   "id": "f7305c5b-6f55-4664-9206-2d7467653498",
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stderr",
+     "output_type": "stream",
+     "text": [
+      "Llama.generate: prefix-match hit\n"
+     ]
+    },
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "\n",
+      "\n",
+      "Clayton Christensen is the author of \"The Innovator's Dilemma,\" which was first published in 1997. The book explores why successful companies often struggle to adapt to disruptive technologies and business models that ultimately lead to their downfall. Christensen argues that these companies are faced with a dilemma because they have invested so heavily in their existing products and processes that it is difficult for them to pivot and embrace new, disruptive technologies. He also introduces the concept of \"disruptive innovation,\" which he defines as a process by which a small company with limited resources is able to successfully challenge established industry leaders."
+     ]
+    },
+    {
+     "name": "stderr",
+     "output_type": "stream",
+     "text": [
+      "\n",
+      "llama_print_timings:        load time =  1202.24 ms\n",
+      "llama_print_timings:      sample time =   116.69 ms /   147 runs   (    0.79 ms per token,  1259.79 tokens per second)\n",
+      "llama_print_timings: prompt eval time =  1180.31 ms /     8 tokens (  147.54 ms per token,     6.78 tokens per second)\n",
+      "llama_print_timings:        eval time = 13192.98 ms /   147 runs   (   89.75 ms per token,    11.14 tokens per second)\n",
+      "llama_print_timings:       total time = 14746.13 ms\n"
+     ]
+    }
+   ],
+   "source": [
+    "# a more flexible way to ask Llama general questions using LangChain's PromptTemplate and LLMChain\n",
+    "prompt = PromptTemplate.from_template(\n",
+    "    \"who wrote {book}?\"\n",
+    ")\n",
+    "chain = LLMChain(llm=llm, prompt=prompt)\n",
+    "answer = chain.run(\"innovator's dilemma\")"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 7,
+   "id": "8ba66a29-77e9-4149-9523-63a09545584e",
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stderr",
+     "output_type": "stream",
+     "text": [
+      "Llama.generate: prefix-match hit\n"
+     ]
+    },
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "\n",
+      "\n",
+      "Llama2 is a free, open-source tool for generating high-quality, randomized test data for software applications. It is designed to be easy to use and to produce realistic, diverse test data that can help you identify and fix bugs in your application before it is released.\n",
+      "\n",
+      "Llama2 is the successor to the popular Llama tool, and it includes many new features and improvements over its predecessor. Some of the key features of Llama2 include:\n",
+      "\n",
+      "* Support for a wide range of data types, including strings, numbers, dates, and more\n",
+      "* The ability to generate random data based on user-defined rules and constraints\n",
+      "* A powerful and flexible API that allows you to customize and extend the tool to meet your specific needs\n",
+      "* Integration with popular testing frameworks and tools, such as JUnit and TestNG\n",
+      "* Support for a variety of programming languages, including Java, Python, C#, and more.\n",
+      "\n",
+      "Overall, Llama2 is a powerful and flexible tool that can help you improve the quality and reliability of your software applications by generating realistic and diverse test data."
+     ]
+    },
+    {
+     "name": "stderr",
+     "output_type": "stream",
+     "text": [
+      "\n",
+      "llama_print_timings:        load time =  1202.24 ms\n",
+      "llama_print_timings:      sample time =   191.25 ms /   240 runs   (    0.80 ms per token,  1254.87 tokens per second)\n",
+      "llama_print_timings: prompt eval time =   480.79 ms /     6 tokens (   80.13 ms per token,    12.48 tokens per second)\n",
+      "llama_print_timings:        eval time = 22013.19 ms /   239 runs   (   92.11 ms per token,    10.86 tokens per second)\n",
+      "llama_print_timings:       total time = 23111.55 ms\n"
+     ]
+    }
+   ],
+   "source": [
+    "# let's see how Llama2 hallucinates, because it doesn't have the knowledge about Llama2 while the model was trained, \n",
+    "# but by default it behaves like a know-it-all expert who can't afford to say I don't know\n",
+    "prompt = PromptTemplate.from_template(\n",
+    "    \"What is {what}?\"\n",
+    ")\n",
+    "chain = LLMChain(llm=llm, prompt=prompt)\n",
+    "answer = chain.run(\"llama2\")"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 8,
+   "id": "f3ebc261",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# to fix the LLM's hallucination, one way is to use RAG, to augment it with more recent or custom data that holds the info for it to answer correctly\n",
+    "# first load the Llama2 paper via the LangChain's PDF loader\n",
+    "from langchain.document_loaders import PyPDFLoader\n",
+    "loader = PyPDFLoader(\"llama2.pdf\")\n",
+    "documents = loader.load()"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 9,
+   "id": "302eaa54",
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "77 Llama 2 : Open Foundation and Fine-Tuned Chat Models\n",
+      "Hugo Touvron∗Louis Martin†Kevin Stone†\n",
+      "Peter Albert Amjad Almahairi Yasmine Babaei Nikolay Bashlykov Soumya Batra\n",
+      "Prajjwal Bhargava Shruti Bhosale Dan Bikel Lukas Blecher Cristian Canton Ferrer Moya Chen\n",
+      "Guillem Cucurull David Esiobu Jude Fernande\n"
+     ]
+    }
+   ],
+   "source": [
+    "# quick check on the loaded document for the correct pages etc\n",
+    "print(len(documents), documents[0].page_content[0:300])"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 10,
+   "id": "4f94f6f8",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# there're more 30 vector stores (DBs) supported by LangChain. Chroma is light-weight and in memory so it's easy to get started with\n",
+    "# other vector stores can be used to store large amount of data - see https://python.langchain.com/docs/integrations/vectorstores\n",
+    "from langchain.vectorstores import Chroma\n",
+    "\n",
+    "# embeddings are numerical representations of the question and answer text\n",
+    "from langchain.embeddings import HuggingFaceEmbeddings\n",
+    "\n",
+    "# use a common text splitter to split text into chunks\n",
+    "from langchain.text_splitter import RecursiveCharacterTextSplitter"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 11,
+   "id": "2b101485",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# split the loaded documents into chunks \n",
+    "text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=20)\n",
+    "all_splits = text_splitter.split_documents(documents)\n",
+    "\n",
+    "# create the vector db to store all the split chunks as embeddings\n",
+    "embeddings = HuggingFaceEmbeddings()\n",
+    "vectordb = Chroma.from_documents(\n",
+    "    documents=all_splits,\n",
+    "    embedding=embeddings,\n",
+    ")"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 12,
+   "id": "1a2472c9",
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stderr",
+     "output_type": "stream",
+     "text": [
+      "Llama.generate: prefix-match hit\n"
+     ]
+    },
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      " Llama 2 is a new language model developed by Meta AI that has been released openly to encourage responsible AI innovation. It is a fine-tuned version of the original Llama model and is optimized for dialogue use cases. The model has not covered all scenarios and may produce inaccurate or objectionable responses, so developers should perform safety testing and tuning before deploying any applications of Llama 2."
+     ]
+    },
+    {
+     "name": "stderr",
+     "output_type": "stream",
+     "text": [
+      "\n",
+      "llama_print_timings:        load time =  1202.24 ms\n",
+      "llama_print_timings:      sample time =    76.83 ms /    97 runs   (    0.79 ms per token,  1262.48 tokens per second)\n",
+      "llama_print_timings: prompt eval time = 97067.98 ms /  1146 tokens (   84.70 ms per token,    11.81 tokens per second)\n",
+      "llama_print_timings:        eval time = 10431.81 ms /    96 runs   (  108.66 ms per token,     9.20 tokens per second)\n",
+      "llama_print_timings:       total time = 107897.31 ms\n"
+     ]
+    }
+   ],
+   "source": [
+    "# use another LangChain's chain, RetrievalQA, to associate Llama with the loaded documents stored in the vector db\n",
+    "from langchain.chains import RetrievalQA\n",
+    "\n",
+    "qa_chain = RetrievalQA.from_chain_type(\n",
+    "    llm,\n",
+    "    retriever=vectordb.as_retriever()\n",
+    ")\n",
+    "\n",
+    "# for each question, LangChain performs a semantic similarity search of it in the vector db, then passes the search results as the context\n",
+    "# the Llama to answer question about the data stored in the verctor db\n",
+    "question = \"What is llama2?\"\n",
+    "result = qa_chain({\"query\": question})\n",
+    "# it takes close to 2 minutes to return the result (but using other vector store than Chroma such as FAISS can take longer), because \n",
+    "# Llama2 is running on a local Mac. To get much faster results, you can use a cloud service with GPU used for inference - see HelloLlamaCloud \n",
+    "# for a demo."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "dd2e62a4-6ea2-4ea7-b7ae-800185177e6c",
+   "metadata": {},
+   "outputs": [],
+   "source": []
+  }
+ ],
+ "metadata": {
+  "kernelspec": {
+   "display_name": "Python 3 (ipykernel)",
+   "language": "python",
+   "name": "python3"
+  },
+  "language_info": {
+   "codemirror_mode": {
+    "name": "ipython",
+    "version": 3
+   },
+   "file_extension": ".py",
+   "mimetype": "text/x-python",
+   "name": "python",
+   "nbconvert_exporter": "python",
+   "pygments_lexer": "ipython3",
+   "version": "3.8.18"
+  }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 5
+}
diff --git a/llama-demo-apps/llama2.pdf b/llama-demo-apps/llama2.pdf
new file mode 100644
index 0000000000000000000000000000000000000000..53a1c03913bd273548ab6606f948710076fffcc7
Binary files /dev/null and b/llama-demo-apps/llama2.pdf differ