diff --git a/apps/docs/docusaurus.config.js b/apps/docs/docusaurus.config.js
index 35cbe5aa02425e535859e7c4812ede9dabf2dc44..50cb5cb0d4157d1a6a27737b0d9d47a1ae5ceb03 100644
--- a/apps/docs/docusaurus.config.js
+++ b/apps/docs/docusaurus.config.js
@@ -29,7 +29,23 @@ const config = {
   // to replace "en" with "zh-Hans".
   i18n: {
     defaultLocale: "en",
-    locales: ["en", "fr", "zh-Hans"],
+    locales: [
+    "en", 
+    "zh-Hans", 
+    "es",
+    "fr",
+    "de",
+    "ja",
+    "ko",
+    "pt",
+    "ar",
+    "it",
+    "tr",
+    "pl",
+    "nl",
+    "vi",
+    "th",
+    ], // "fa", "ru", "ro", "sv", "hu", "cs", "el", "da", "fi", "he", "no", "hi", "in", "sl", "se", "sk", "uk", "bg", "hr", "lt", "lv", "et", "cat"
   },
 
   presets: [
diff --git a/apps/docs/i18n/ar/docusaurus-plugin-content-docs/current/_static/concepts/indexing.jpg b/apps/docs/i18n/ar/docusaurus-plugin-content-docs/current/_static/concepts/indexing.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..8672967213caf28fb27682b6b9e2e5111beb9aaa
Binary files /dev/null and b/apps/docs/i18n/ar/docusaurus-plugin-content-docs/current/_static/concepts/indexing.jpg differ
diff --git a/apps/docs/i18n/ar/docusaurus-plugin-content-docs/current/_static/concepts/querying.jpg b/apps/docs/i18n/ar/docusaurus-plugin-content-docs/current/_static/concepts/querying.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..3c241bdda5ba98d3f3ee2e163b9f19280c5c7503
Binary files /dev/null and b/apps/docs/i18n/ar/docusaurus-plugin-content-docs/current/_static/concepts/querying.jpg differ
diff --git a/apps/docs/i18n/ar/docusaurus-plugin-content-docs/current/_static/concepts/rag.jpg b/apps/docs/i18n/ar/docusaurus-plugin-content-docs/current/_static/concepts/rag.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..b68eca2564f8535b33d9ed6d4ce4d4410d2a0698
Binary files /dev/null and b/apps/docs/i18n/ar/docusaurus-plugin-content-docs/current/_static/concepts/rag.jpg differ
diff --git a/apps/docs/i18n/ar/docusaurus-plugin-content-docs/current/api b/apps/docs/i18n/ar/docusaurus-plugin-content-docs/current/api
new file mode 120000
index 0000000000000000000000000000000000000000..3513e32979f4abb8b2df2b54c8d0cc480bdd847e
--- /dev/null
+++ b/apps/docs/i18n/ar/docusaurus-plugin-content-docs/current/api
@@ -0,0 +1 @@
+../../../../docs/api
\ No newline at end of file
diff --git a/apps/docs/i18n/ar/docusaurus-plugin-content-docs/current/concepts.md b/apps/docs/i18n/ar/docusaurus-plugin-content-docs/current/concepts.md
new file mode 100644
index 0000000000000000000000000000000000000000..f4efaafb4f64d489ff508dfe5da841b6a812549e
--- /dev/null
+++ b/apps/docs/i18n/ar/docusaurus-plugin-content-docs/current/concepts.md
@@ -0,0 +1,86 @@
+---
+sidebar_position: 3
+---
+
+# المفاهيم على المستوى العالي
+
+`تمت ترجمة هذه الوثيقة تلقائيًا وقد تحتوي على أخطاء. لا تتردد في فتح طلب سحب لاقتراح تغييرات.`
+
+يساعدك LlamaIndex.TS في بناء تطبيقات قائمة على LLM (مثل Q&A و chatbot) على بيانات مخصصة.
+
+في هذا الدليل عن المفاهيم على المستوى العالي، ستتعلم:
+
+- كيف يمكن لـ LLM الإجابة على الأسئلة باستخدام بياناتك الخاصة.
+- المفاهيم الرئيسية والوحدات في LlamaIndex.TS لبناء خط أنابيب الاستعلام الخاص بك.
+
+## الإجابة على الأسئلة عبر بياناتك
+
+يستخدم LlamaIndex طريقة مكونة من مرحلتين عند استخدام LLM مع بياناتك:
+
+1. **مرحلة الفهرسة**: إعداد قاعدة المعرفة، و
+2. **مرحلة الاستعلام**: استرداد السياق ذي الصلة من المعرفة لمساعدة LLM في الاستجابة لسؤال
+
+![](./_static/concepts/rag.jpg)
+
+تُعرف هذه العملية أيضًا باسم "استرجاع معزز للتوليد" (RAG).
+
+يوفر LlamaIndex.TS أدوات أساسية لجعل كلا المرحلتين سهلتين للغاية.
+
+دعنا نستكشف كل مرحلة بالتفصيل.
+
+### مرحلة الفهرسة
+
+يساعدك LlamaIndex.TS في إعداد قاعدة المعرفة باستخدام مجموعة من موصلات البيانات والفهارس.
+
+![](./_static/concepts/indexing.jpg)
+
+[**محمّلات البيانات**](./modules/high_level/data_loader.md):
+موصل البيانات (أي `Reader`) يقوم بتجميع البيانات من مصادر بيانات مختلفة وتنسيقات بيانات مختلفة في تمثيل بسيط للـ `Document` (نص وبيانات تعريفية بسيطة).
+
+[**المستندات / العقد**](./modules/high_level/documents_and_nodes.md): المستند هو حاوية عامة حول أي مصدر بيانات - على سبيل المثال، ملف PDF، نتائج واجهة برمجة التطبيقات، أو بيانات استرداد من قاعدة بيانات. العقد هو الوحدة الذرية للبيانات في LlamaIndex ويمثل "قطعة" من المستند الأصلي. إنه تمثيل غني يتضمن بيانات تعريفية وعلاقات (مع عقد أخرى) لتمكين عمليات الاسترجاع الدقيقة والتعبيرية.
+
+[**فهارس البيانات**](./modules/high_level/data_index.md):
+بمجرد أن تقوم بتجميع بياناتك، يساعدك LlamaIndex في فهرسة البيانات في تنسيق سهل الاسترداد.
+
+تحت الغطاء، يقوم LlamaIndex بتحليل المستندات الخام إلى تمثيلات وسيطة، وحساب تضمينات الناقلات، وتخزين بياناتك في الذاكرة أو على القرص.
+
+"
+
+### مرحلة الاستعلام
+
+في مرحلة الاستعلام، يقوم خط الأنابيب للاستعلام بجلب السياق الأكثر صلة بناءً على استعلام المستخدم،
+ويمرر ذلك إلى LLM (جنبًا إلى جنب مع الاستعلام) لتوليد استجابة.
+
+يمنح ذلك LLM معرفة محدثة ليست موجودة في بيانات التدريب الأصلية لديه،
+(مما يقلل أيضًا من الهلوسة).
+
+التحدي الرئيسي في مرحلة الاستعلام هو الاسترجاع والتنسيق والاستدلال عبر قواعد المعرفة (المحتملة).
+
+يوفر LlamaIndex وحدات قابلة للتركيب تساعدك في بناء ودمج خطوط أنابيب RAG لـ Q&A (محرك الاستعلام)، chatbot (محرك الدردشة)، أو كجزء من وكيل.
+
+يمكن تخصيص هذه الكتل البنائية لتعكس تفضيلات التصنيف، وكذلك تركيبها للاستدلال عبر عدة قواعد معرفة بطريقة منظمة.
+
+![](./_static/concepts/querying.jpg)
+
+#### الكتل الأساسية
+
+[**مسترجعات**](./modules/low_level/retriever.md):
+يحدد المسترجع كيفية استرجاع السياق ذي الصلة بكفاءة من قاعدة المعرفة (أي الفهرس) عند إعطاء استعلام.
+تختلف منطق الاسترجاع المحددة حسب الفهارس المختلفة، والأكثر شيوعًا هو الاسترجاع الكثيف ضد فهرس الناقل.
+
+[**مركبات الاستجابة**](./modules/low_level/response_synthesizer.md):
+تقوم مركبة الاستجابة بتوليد استجابة من LLM باستخدام استعلام المستخدم ومجموعة معينة من أجزاء النص المسترجعة.
+
+"
+
+#### خطوط الأنابيب
+
+[**محركات الاستعلام**](./modules/high_level/query_engine.md):
+محرك الاستعلام هو خط أنابيب شامل يتيح لك طرح الأسئلة على بياناتك.
+يأخذ استعلامًا بلغة طبيعية ويعيد استجابة، جنبًا إلى جنب مع السياق المرجعي المسترجع والممرر إلى LLM.
+
+[**محركات الدردشة**](./modules/high_level/chat_engine.md):
+محرك الدردشة هو خط أنابيب شامل لإجراء محادثة مع بياناتك
+(عدة تفاعلات بدلاً من سؤال وجواب واحد).
+
+"
diff --git a/apps/docs/i18n/ar/docusaurus-plugin-content-docs/current/end_to_end.md b/apps/docs/i18n/ar/docusaurus-plugin-content-docs/current/end_to_end.md
new file mode 100644
index 0000000000000000000000000000000000000000..cf105ecf43028e8b3a184355ba578e48f6a134c7
--- /dev/null
+++ b/apps/docs/i18n/ar/docusaurus-plugin-content-docs/current/end_to_end.md
@@ -0,0 +1,61 @@
+---
+sidebar_position: 4
+---
+
+# أمثلة من البداية إلى النهاية
+
+`تمت ترجمة هذه الوثيقة تلقائيًا وقد تحتوي على أخطاء. لا تتردد في فتح طلب سحب لاقتراح تغييرات.`
+
+نقدم العديد من الأمثلة من البداية إلى النهاية باستخدام LlamaIndex.TS في المستودع
+
+تحقق من الأمثلة أدناه أو جربها وأكملها في دقائق مع دروس تفاعلية على Github Codespace المقدمة من Dev-Docs [هنا](https://codespaces.new/team-dev-docs/lits-dev-docs-playground?devcontainer_path=.devcontainer%2Fjavascript_ltsquickstart%2Fdevcontainer.json):
+
+## [محرك الدردشة](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/chatEngine.ts)
+
+اقرأ ملفًا وتحدث عنه مع LLM.
+
+## [فهرس الفيكتور](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/vectorIndex.ts)
+
+إنشاء فهرس فيكتور واستعلامه. سيستخدم فهرس الفيكتور التضمينات لاسترداد أعلى k عقد ذات صلة. بشكل افتراضي ، يكون k الأعلى هو 2.
+
+"
+
+## [مؤشر الملخص](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/summaryIndex.ts)
+
+إنشاء مؤشر قائمة واستعلامه. يستخدم هذا المثال أيضًا `LLMRetriever` ، الذي سيستخدم LLM لتحديد أفضل العقد لاستخدامها عند إنشاء الإجابة.
+
+"
+
+## [حفظ / تحميل فهرس](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/storageContext.ts)
+
+إنشاء وتحميل فهرس ناقل. يحدث التخزين المؤقت على القرص تلقائيًا في LlamaIndex.TS بمجرد إنشاء كائن سياق التخزين.
+
+"
+
+## [فهرس الناقل المخصص](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/vectorIndexCustomize.ts)
+
+إنشاء فهرس ناقل واستعلامه، مع تكوين `LLM` و `ServiceContext` و `similarity_top_k`.
+
+"
+
+## [OpenAI LLM](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/openai.ts)
+
+أنشئ OpenAI LLM واستخدمه مباشرة للدردشة.
+
+"
+
+## [Llama2 DeuceLLM](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/llamadeuce.ts)
+
+إنشاء Llama-2 LLM واستخدامه مباشرة للدردشة.
+
+"
+
+## [محرك استعلام الأسئلة الفرعية](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/subquestion.ts)
+
+يستخدم `محرك استعلام الأسئلة الفرعية` الذي يقسم الاستعلامات المعقدة إلى أسئلة فرعية متعددة، ثم يجمع الاستجابة عبر الإجابات على جميع الأسئلة الفرعية.
+
+"
+
+## [وحدات منخفضة المستوى](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/lowlevel.ts)
+
+يستخدم هذا المثال العديد من المكونات منخفضة المستوى، مما يزيل الحاجة إلى محرك استعلام فعلي. يمكن استخدام هذه المكونات في أي مكان، في أي تطبيق، أو تخصيصها وتصنيفها الفرعي لتلبية احتياجاتك الخاصة.
diff --git a/apps/docs/i18n/ar/docusaurus-plugin-content-docs/current/environments.md b/apps/docs/i18n/ar/docusaurus-plugin-content-docs/current/environments.md
new file mode 100644
index 0000000000000000000000000000000000000000..9b3ea65e0aafccf252c1ffe848074d6fbd3b5da9
--- /dev/null
+++ b/apps/docs/i18n/ar/docusaurus-plugin-content-docs/current/environments.md
@@ -0,0 +1,31 @@
+---
+sidebar_position: 5
+---
+
+# البيئات
+
+`تمت ترجمة هذه الوثيقة تلقائيًا وقد تحتوي على أخطاء. لا تتردد في فتح طلب سحب لاقتراح تغييرات.`
+
+يدعم LlamaIndex حاليًا رسميًا NodeJS 18 و NodeJS 20.
+
+## NextJS App Router
+
+إذا كنت تستخدم معالج الطرق / الوظائف الخادمة في NextJS App Router ، فستحتاج إلى استخدام وضع NodeJS:
+
+```js
+export const runtime = "nodejs"; // الافتراضي
+```
+
+وستحتاج أيضًا إلى إضافة استثناء لـ pdf-parse في next.config.js الخاص بك
+
+```js
+// next.config.js
+/** @type {import('next').NextConfig} */
+const nextConfig = {
+  experimental: {
+    serverComponentsExternalPackages: ["pdf-parse"], // يضع pdf-parse في وضع NodeJS الفعلي مع NextJS App Router
+  },
+};
+
+module.exports = nextConfig;
+```
diff --git a/apps/docs/i18n/ar/docusaurus-plugin-content-docs/current/installation.mdx b/apps/docs/i18n/ar/docusaurus-plugin-content-docs/current/installation.mdx
new file mode 100644
index 0000000000000000000000000000000000000000..84aeb912b9ed5c430387e444d9a70ad7eb048c29
--- /dev/null
+++ b/apps/docs/i18n/ar/docusaurus-plugin-content-docs/current/installation.mdx
@@ -0,0 +1,71 @@
+---
+sidebar_position: 1
+---
+
+
+# التثبيت والإعداد
+
+```تمت ترجمة هذه الوثيقة تلقائيًا وقد تحتوي على أخطاء. لا تتردد في فتح طلب سحب لاقتراح تغييرات.```
+
+
+تأكد من أن لديك NodeJS v18 أو أحدث.
+
+
+## باستخدام create-llama
+
+أسهل طريقة للبدء مع LlamaIndex هي باستخدام `create-llama`. هذه الأداة سطر الأوامر تمكنك من بدء بناء تطبيق LlamaIndex جديد بسرعة، مع كل شيء معد لك.
+
+ما عليك سوى تشغيل
+
+<Tabs>
+<TabItem value="1" label="npm" default>
+
+```bash
+npx create-llama@latest
+```
+
+</TabItem>
+<TabItem value="2" label="Yarn">
+
+```bash
+yarn create llama
+```
+
+</TabItem>
+<TabItem value="3" label="pnpm">
+
+```bash
+pnpm create llama@latest
+```
+
+</TabItem>
+</Tabs>
+
+للبدء. بمجرد إنشاء التطبيق الخاص بك، قم بتشغيل
+
+```bash npm2yarn
+npm run dev
+```
+
+لبدء خادم التطوير. يمكنك ثم زيارة [http://localhost:3000](http://localhost:3000) لرؤية تطبيقك.
+## التثبيت من NPM
+
+```bash npm2yarn
+npm install llamaindex
+```
+
+
+### المتغيرات البيئية
+
+تستخدم أمثلتنا OpenAI افتراضيًا. ستحتاج إلى إعداد مفتاح Open AI الخاص بك على النحو التالي:
+
+```bash
+export OPENAI_API_KEY="sk-......" # استبدله بالمفتاح الخاص بك من https://platform.openai.com/account/api-keys
+```
+
+إذا كنت ترغب في تحميله تلقائيًا في كل مرة، قم بإضافته إلى ملف .zshrc/.bashrc الخاص بك.
+
+تحذير: لا تقم بإضافة مفتاح OpenAI الخاص بك إلى نظام التحكم في الإصدارات.
+
+
+"
diff --git a/apps/docs/i18n/ar/docusaurus-plugin-content-docs/current/introduction.md b/apps/docs/i18n/ar/docusaurus-plugin-content-docs/current/introduction.md
new file mode 100644
index 0000000000000000000000000000000000000000..58ae7ee36e8c677cd4f3be431c4ebd39fe6b6aa1
--- /dev/null
+++ b/apps/docs/i18n/ar/docusaurus-plugin-content-docs/current/introduction.md
@@ -0,0 +1,60 @@
+---
+sidebar_position: 0
+slug: /
+---
+
+# ما هو LlamaIndex.TS؟
+
+`تمت ترجمة هذه الوثيقة تلقائيًا وقد تحتوي على أخطاء. لا تتردد في فتح طلب سحب لاقتراح تغييرات.`
+
+LlamaIndex.TS هو إطار بيانات لتطبيقات LLM لاستيعاب وتنظيم والوصول إلى البيانات الخاصة أو الخاصة بالمجال. في حين أن حزمة Python متاحة أيضًا (انظر [هنا](https://docs.llamaindex.ai/en/stable/)), يوفر LlamaIndex.TS ميزات أساسية في حزمة بسيطة ، محسنة للاستخدام مع TypeScript.
+
+## 🚀 لماذا LlamaIndex.TS؟
+
+في جوهرها ، توفر LLMs واجهة لغة طبيعية بين البشر والبيانات المستنتجة. تأتي النماذج المتاحة على نطاق واسع محملة مسبقًا بكميات هائلة من البيانات المتاحة للجمهور ، من ويكيبيديا وقوائم البريد الإلكتروني إلى الكتب المدرسية وشفرة المصدر.
+
+غالبًا ما تتطلب التطبيقات المبنية على LLMs تعزيز هذه النماذج بالبيانات الخاصة أو الخاصة بالمجال. للأسف ، يمكن توزيع هذه البيانات عبر تطبيقات ومخازن بيانات معزولة. إنها خلف واجهات برمجة التطبيقات ، في قواعد البيانات SQL ، أو محبوسة في ملفات PDF وعروض تقديمية.
+
+هنا يأتي دور **LlamaIndex.TS**.
+
+## 🦙 كيف يمكن أن يساعد LlamaIndex.TS؟
+
+يوفر LlamaIndex.TS الأدوات التالية:
+
+- **تحميل البيانات**: استيعاب البيانات الحالية الخاصة بك بتنسيقات `.txt`, `.pdf`, `.csv`, `.md` و `.docx` مباشرة.
+- **فهارس البيانات**: تنظيم البيانات الخاصة بك في تمثيلات وسيطة سهلة وفعالة للاستخدام من قبل LLMs.
+- **المحركات**: توفر واجهات الوصول إلى اللغة الطبيعية لبياناتك. على سبيل المثال:
+  - محركات الاستعلام هي واجهات استرجاع قوية للإخراج المعزز بالمعرفة.
+  - محركات الدردشة هي واجهات محادثة للتفاعلات "ذهابًا وإيابًا" متعددة الرسائل مع بياناتك.
+
+## 👨‍👩‍👧‍👦 من أجل من هو LlamaIndex؟
+
+يوفر LlamaIndex.TS مجموعة أدوات أساسية ، ضرورية لأي شخص يقوم ببناء تطبيقات LLM باستخدام JavaScript و TypeScript.
+
+يتيح لنا واجهة برمجة التطبيقات على مستوى عالي استخدام LlamaIndex.TS لاستيعاب واستعلام البيانات الخاصة بهم.
+
+بالنسبة للتطبيقات المعقدة أكثر ، تتيح لنا واجهات برمجة التطبيقات على مستوى أدنى للمستخدمين المتقدمين تخصيص وتوسيع أي وحدة - موصلات البيانات والفهارس وأجهزة الاسترجاع ومحركات الاستعلام - لتناسب احتياجاتهم.
+
+## البدء
+
+`npm install llamaindex`
+
+تتضمن وثائقنا [تعليمات التثبيت](./installation.md) و[دليل البداية](./starter.md) لبناء تطبيقك الأول.
+
+بمجرد أن تكون جاهزًا وتعمل ، يحتوي [مفاهيم عالية المستوى](./concepts.md) على نظرة عامة على الهندسة المعمارية المتعددة المستويات لـ LlamaIndex. لمزيد من الأمثلة العملية التفصيلية ، يمكنك الاطلاع على [دروس النهاية إلى النهاية](./end_to_end.md).
+
+## 🗺️ النظام البيئي
+
+لتنزيل أو المساهمة ، ابحث عن LlamaIndex على:
+
+- Github: https://github.com/run-llama/LlamaIndexTS
+- NPM: https://www.npmjs.com/package/llamaindex
+
+"
+
+## المجتمع
+
+هل تحتاج إلى مساعدة؟ هل لديك اقتراح لميزة؟ انضم إلى مجتمع LlamaIndex:
+
+- تويتر: https://twitter.com/llama_index
+- ديسكورد: https://discord.gg/dGcwcsnxhU
diff --git a/apps/docs/i18n/ar/docusaurus-plugin-content-docs/current/modules/high_level/chat_engine.md b/apps/docs/i18n/ar/docusaurus-plugin-content-docs/current/modules/high_level/chat_engine.md
new file mode 100644
index 0000000000000000000000000000000000000000..caa7c25c5dba90a9bad7b788e0b687b968096944
--- /dev/null
+++ b/apps/docs/i18n/ar/docusaurus-plugin-content-docs/current/modules/high_level/chat_engine.md
@@ -0,0 +1,22 @@
+---
+sidebar_position: 4
+---
+
+# محرك الدردشة (ChatEngine)
+
+`تمت ترجمة هذه الوثيقة تلقائيًا وقد تحتوي على أخطاء. لا تتردد في فتح طلب سحب لاقتراح تغييرات.`
+
+محرك الدردشة هو طريقة سريعة وبسيطة للدردشة مع البيانات في الفهرس الخاص بك.
+
+```typescript
+const retriever = index.asRetriever();
+const chatEngine = new ContextChatEngine({ retriever });
+
+// بدء الدردشة
+const response = await chatEngine.chat(query);
+```
+
+## مراجع الواجهة البرمجية
+
+- [محرك الدردشة السياقي (ContextChatEngine)](../../api/classes/ContextChatEngine.md)
+- [محرك الدردشة المكثف للأسئلة (CondenseQuestionChatEngine)](../../api/classes/ContextChatEngine.md)
diff --git a/apps/docs/i18n/ar/docusaurus-plugin-content-docs/current/modules/high_level/data_index.md b/apps/docs/i18n/ar/docusaurus-plugin-content-docs/current/modules/high_level/data_index.md
new file mode 100644
index 0000000000000000000000000000000000000000..fbe34dbf98969668ae8ea26f5233d0f4d31ba566
--- /dev/null
+++ b/apps/docs/i18n/ar/docusaurus-plugin-content-docs/current/modules/high_level/data_index.md
@@ -0,0 +1,27 @@
+---
+sidebar_position: 2
+---
+
+# الفهرس
+
+`تمت ترجمة هذه الوثيقة تلقائيًا وقد تحتوي على أخطاء. لا تتردد في فتح طلب سحب لاقتراح تغييرات.`
+
+الفهرس هو الحاوية الأساسية والتنظيم لبياناتك. يدعم LlamaIndex.TS نوعين من الفهارس:
+
+- `VectorStoreIndex` - سيقوم بإرسال أعلى `Node` الموجودة إلى LLM عند إنشاء استجابة. القيمة الافتراضية لأعلى `k` هي 2.
+- `SummaryIndex` - سيقوم بإرسال كل `Node` في الفهرس إلى LLM لإنشاء استجابة.
+
+```typescript
+import { Document, VectorStoreIndex } from "llamaindex";
+
+const document = new Document({ text: "اختبار" });
+
+const index = await VectorStoreIndex.fromDocuments([document]);
+```
+
+## مرجع الواجهة البرمجية
+
+- [SummaryIndex](../../api/classes/SummaryIndex.md)
+- [VectorStoreIndex](../../api/classes/VectorStoreIndex.md)
+
+"
diff --git a/apps/docs/i18n/ar/docusaurus-plugin-content-docs/current/modules/high_level/data_loader.md b/apps/docs/i18n/ar/docusaurus-plugin-content-docs/current/modules/high_level/data_loader.md
new file mode 100644
index 0000000000000000000000000000000000000000..e7a6ca993259c93eabab21a7df1716facc6dff0d
--- /dev/null
+++ b/apps/docs/i18n/ar/docusaurus-plugin-content-docs/current/modules/high_level/data_loader.md
@@ -0,0 +1,19 @@
+---
+sidebar_position: 1
+---
+
+# قارئ / محمل
+
+`تمت ترجمة هذه الوثيقة تلقائيًا وقد تحتوي على أخطاء. لا تتردد في فتح طلب سحب لاقتراح تغييرات.`
+
+يدعم LlamaIndex.TS تحميل الملفات بسهولة من المجلدات باستخدام فئة `SimpleDirectoryReader`. حاليًا ، يتم دعم الملفات `.txt` ، `.pdf` ، `.csv` ، `.md` و `.docx` ، مع المزيد المخطط له في المستقبل!
+
+```typescript
+import { SimpleDirectoryReader } from "llamaindex";
+
+documents = new SimpleDirectoryReader().loadData("./data");
+```
+
+## مرجع الواجهة البرمجية
+
+- [SimpleDirectoryReader](../../api/classes/SimpleDirectoryReader.md)
diff --git a/apps/docs/i18n/ar/docusaurus-plugin-content-docs/current/modules/high_level/documents_and_nodes.md b/apps/docs/i18n/ar/docusaurus-plugin-content-docs/current/modules/high_level/documents_and_nodes.md
new file mode 100644
index 0000000000000000000000000000000000000000..e99e40c7708336e8ff39b98103da19d0ad42db0f
--- /dev/null
+++ b/apps/docs/i18n/ar/docusaurus-plugin-content-docs/current/modules/high_level/documents_and_nodes.md
@@ -0,0 +1,22 @@
+---
+sidebar_position: 0
+---
+
+# المستندات والعقد
+
+`تمت ترجمة هذه الوثيقة تلقائيًا وقد تحتوي على أخطاء. لا تتردد في فتح طلب سحب لاقتراح تغييرات.`
+
+`المستندات` و `العقد` هما العناصر الأساسية لأي فهرس. بينما يكون واجهة برمجة التطبيق (API) لهذه الكائنات مشابهة، يُمثل كائن `المستند` ملفات كاملة، بينما تكون `العقد` قطعًا أصغر من ذلك المستند الأصلي، والتي تكون مناسبة لـ LLM و Q&A.
+
+```typescript
+import { Document } from "llamaindex";
+
+document = new Document({ text: "نص", metadata: { key: "val" } });
+```
+
+## مرجع الواجهة البرمجية
+
+- [المستند (Document)](../../api/classes/Document.md)
+- [نص العقد (TextNode)](../../api/classes/TextNode.md)
+
+"
diff --git a/apps/docs/i18n/ar/docusaurus-plugin-content-docs/current/modules/high_level/query_engine.md b/apps/docs/i18n/ar/docusaurus-plugin-content-docs/current/modules/high_level/query_engine.md
new file mode 100644
index 0000000000000000000000000000000000000000..89ad1101168e70f062fc8b3367c9ec8a0111e732
--- /dev/null
+++ b/apps/docs/i18n/ar/docusaurus-plugin-content-docs/current/modules/high_level/query_engine.md
@@ -0,0 +1,42 @@
+---
+sidebar_position: 3
+---
+
+# محرك الاستعلامات (QueryEngine)
+
+`تمت ترجمة هذه الوثيقة تلقائيًا وقد تحتوي على أخطاء. لا تتردد في فتح طلب سحب لاقتراح تغييرات.`
+
+يقوم محرك الاستعلامات بتجميع "Retriever" و "ResponseSynthesizer" في أنبوبة، والتي ستستخدم سلسلة الاستعلام لاسترداد العقد ومن ثم إرسالها إلى LLM لتوليد استجابة.
+
+```typescript
+const queryEngine = index.asQueryEngine();
+const response = await queryEngine.query("سلسلة الاستعلام");
+```
+
+## محرك الاستعلام للأسئلة الفرعية
+
+الفكرة الأساسية لمحرك الاستعلام للأسئلة الفرعية هي تقسيم استعلام واحد إلى استعلامات متعددة، والحصول على إجابة لكل من تلك الاستعلامات، ثم دمج تلك الإجابات المختلفة في استجابة واحدة متسقة للمستخدم. يمكنك أن تفكر فيها كتقنية "فكر في ذلك خطوة بخطوة" ولكن بتكرار مصادر البيانات الخاصة بك!
+
+### البدء
+
+أسهل طريقة لبدء تجربة محرك الاستعلام للأسئلة الفرعية هي تشغيل ملف subquestion.ts في [examples](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/subquestion.ts).
+
+```bash
+npx ts-node subquestion.ts
+```
+
+"
+
+### الأدوات
+
+يتم تنفيذ محرك الاستعلام للأسئلة الفرعية باستخدام الأدوات. الفكرة الأساسية للأدوات هي أنها خيارات قابلة للتنفيذ لنموذج اللغة الكبيرة. في هذه الحالة، يعتمد محرك الاستعلام للأسئلة الفرعية على أداة QueryEngineTool، والتي كما تخمن هي أداة لتشغيل استعلامات على محرك الاستعلام. يتيح لنا ذلك إعطاء النموذج خيارًا للاستعلام عن وثائق مختلفة لأسئلة مختلفة على سبيل المثال. يمكنك أيضًا أن تتخيل أن محرك الاستعلام للأسئلة الفرعية يمكنه استخدام أداة تبحث عن شيء ما على الويب أو تحصل على إجابة باستخدام Wolfram Alpha.
+
+يمكنك معرفة المزيد عن الأدوات من خلال الاطلاع على وثائق LlamaIndex Python https://gpt-index.readthedocs.io/en/latest/core_modules/agent_modules/tools/root.html
+
+"
+
+## مرجع واجهة برمجة التطبيق (API)
+
+- [محرك استعلام الاسترجاع (RetrieverQueryEngine)](../../api/classes/RetrieverQueryEngine.md)
+- [محرك استعلام السؤال الفرعي (SubQuestionQueryEngine)](../../api/classes/SubQuestionQueryEngine.md)
+- [أداة محرك الاستعلام (QueryEngineTool)](../../api/interfaces/QueryEngineTool.md)
diff --git a/apps/docs/i18n/ar/docusaurus-plugin-content-docs/current/modules/index.md b/apps/docs/i18n/ar/docusaurus-plugin-content-docs/current/modules/index.md
new file mode 100644
index 0000000000000000000000000000000000000000..6735d127a4e8feb1943059749ad7f562b2809af0
--- /dev/null
+++ b/apps/docs/i18n/ar/docusaurus-plugin-content-docs/current/modules/index.md
@@ -0,0 +1,33 @@
+# الوحدات الأساسية
+
+`تمت ترجمة هذه الوثيقة تلقائيًا وقد تحتوي على أخطاء. لا تتردد في فتح طلب سحب لاقتراح تغييرات.`
+
+يوفر LlamaIndex.TS عدة وحدات أساسية، مقسمة إلى وحدات عالية المستوى للبدء السريع ووحدات منخفضة المستوى لتخصيص المكونات الرئيسية حسب الحاجة.
+
+## وحدات عالية المستوى
+
+- [**المستند**](./high_level/documents_and_nodes.md): يمثل المستند ملف نصي أو ملف PDF أو قطعة بيانات متتابعة أخرى.
+
+- [**العقدة**](./high_level/documents_and_nodes.md): هو البناء الأساسي للبيانات. في أغلب الأحيان، تكون هذه أجزاء من المستند المقسمة إلى قطع قابلة للإدارة وصغيرة بما يكفي ليتم تغذيتها إلى نموذج التضمين و LLM.
+
+- [**القارئ/المحمل**](./high_level/data_loader.md): القارئ أو المحمل هو شيء يأخذ المستند في العالم الحقيقي ويحوله إلى فئة المستند التي يمكن استخدامها في الفهرس الخاص بك والاستعلامات. ندعم حاليًا ملفات النص العادي وملفات PDF والمزيد الكثير.
+
+- [**الفهارس**](./high_level/data_index.md): تخزن الفهارس العقد وتضمينات تلك العقد.
+
+- [**محرك الاستعلامات**](./high_level/query_engine.md): محركات الاستعلامات هي التي تولد الاستعلام الذي تدخله وتعيد لك النتيجة. عمومًا، تجمع محركات الاستعلامات بين تعليمة مسبقة مبنية مع العقد المحددة من الفهرس الخاص بك لتعطي LLM السياق الذي يحتاجه للإجابة على استعلامك.
+
+- [**محرك الدردشة**](./high_level/chat_engine.md): يساعدك محرك الدردشة على بناء روبوت دردشة سيتفاعل مع فهرسك.
+
+## وحدة منخفضة المستوى
+
+- [**LLM**](./low_level/llm.md): فئة LLM هي واجهة موحدة فوق مزود نموذج لغة كبير مثل OpenAI GPT-4 أو Anthropic Claude أو Meta LLaMA. يمكنك توريثها لكتابة موصل إلى نموذج اللغة الخاص بك.
+
+- [**Embedding**](./low_level/embedding.md): يتم تمثيل التضمين كمتجه من الأرقام العائمة. نموذج التضمين الافتراضي لدينا هو text-embedding-ada-002 من OpenAI ويتكون كل تضمين يولده من 1,536 رقمًا عائمًا. نموذج التضمين الشائع الآخر هو BERT الذي يستخدم 768 رقمًا عائمًا لتمثيل كل عقدة. نوفر عددًا من الأدوات للعمل مع التضمين بما في ذلك 3 خيارات لحساب التشابه و Maximum Marginal Relevance.
+
+- [**TextSplitter/NodeParser**](./low_level/node_parser.md): تعتبر استراتيجيات تقسيم النصوص مهمة للغاية لفعالية البحث في التضمين. حاليًا، على الرغم من أن لدينا قيمة افتراضية، إلا أنه لا يوجد حلاً مناسبًا للجميع. اعتمادًا على وثائق المصدر، قد ترغب في استخدام أحجام واستراتيجيات تقسيم مختلفة. حاليًا، ندعم التقسيم حسب الحجم الثابت، التقسيم حسب الحجم الثابت مع أجزاء تتداخل، التقسيم حسب الجملة، والتقسيم حسب الفقرة. يتم استخدام مقسم النصوص بواسطة NodeParser عند تقسيم `Document` إلى `Node`.
+
+- [**Retriever**](./low_level/retriever.md): يقوم Retriever بتحديد العقد التي يتم استردادها من الفهرس. هنا، قد ترغب في محاولة استرداد عدد أكبر أو أقل من العقد لكل استعلام، تغيير وظيفة التشابه الخاصة بك، أو إنشاء استرداد خاص بك لكل حالة استخدام فردية في تطبيقك. على سبيل المثال، قد ترغب في وجود استرداد منفصل لمحتوى الشفرة مقابل محتوى النص.
+
+- [**ResponseSynthesizer**](./low_level/response_synthesizer.md): يتحمل ResponseSynthesizer مسؤولية أخذ سلسلة استعلام واستخدام قائمة من العقد لإنشاء استجابة. يمكن أن يأخذ هذا الشكل العديد من الأشكال، مثل التكرار عبر جميع السياق وتحسين الإجابة، أو بناء شجرة من الملخصات وإرجاع الملخص الجذري.
+
+- [**Storage**](./low_level/storage.md): في نقطة ما، سترغب في تخزين الفهارس والبيانات والمتجهات بدلاً من إعادة تشغيل نماذج التضمين في كل مرة. IndexStore و DocStore و VectorStore و KVStore هي تجريدات تتيح لك ذلك. مجتمعة، تشكل هذه التجريدات سياق التخزين. حاليًا، نسمح لك بالاحتفاظ بالتضمينات الخاصة بك في ملفات على نظام الملفات (أو نظام ملفات افتراضي في الذاكرة)، ولكننا نضيف أيضًا تكاملات مع قواعد بيانات المتجهات.
diff --git a/apps/docs/i18n/ar/docusaurus-plugin-content-docs/current/modules/low_level/embedding.md b/apps/docs/i18n/ar/docusaurus-plugin-content-docs/current/modules/low_level/embedding.md
new file mode 100644
index 0000000000000000000000000000000000000000..e1078207a9601ae6a5a04aadc5045b4b68e32f3f
--- /dev/null
+++ b/apps/docs/i18n/ar/docusaurus-plugin-content-docs/current/modules/low_level/embedding.md
@@ -0,0 +1,26 @@
+---
+sidebar_position: 1
+---
+
+# تضمين
+
+`تمت ترجمة هذه الوثيقة تلقائيًا وقد تحتوي على أخطاء. لا تتردد في فتح طلب سحب لاقتراح تغييرات.`
+
+يتولى النموذج المضمن في LlamaIndex إنشاء تمثيلات رقمية للنص. بشكل افتراضي ، ستستخدم LlamaIndex نموذج `text-embedding-ada-002` من OpenAI.
+
+يمكن تعيين ذلك بشكل صريح في كائن `ServiceContext`.
+
+```typescript
+import { OpenAIEmbedding, serviceContextFromDefaults } from "llamaindex";
+
+const openaiEmbeds = new OpenAIEmbedding();
+
+const serviceContext = serviceContextFromDefaults({ embedModel: openaiEmbeds });
+```
+
+## مرجع الواجهة البرمجية
+
+- [OpenAIEmbedding](../../api/classes/OpenAIEmbedding.md)
+- [ServiceContext](../../api/interfaces/ServiceContext.md)
+
+"
diff --git a/apps/docs/i18n/ar/docusaurus-plugin-content-docs/current/modules/low_level/llm.md b/apps/docs/i18n/ar/docusaurus-plugin-content-docs/current/modules/low_level/llm.md
new file mode 100644
index 0000000000000000000000000000000000000000..36e1473616aac5564caa91f173c9b56a6bc31778
--- /dev/null
+++ b/apps/docs/i18n/ar/docusaurus-plugin-content-docs/current/modules/low_level/llm.md
@@ -0,0 +1,26 @@
+---
+sidebar_position: 0
+---
+
+# LLM
+
+`تمت ترجمة هذه الوثيقة تلقائيًا وقد تحتوي على أخطاء. لا تتردد في فتح طلب سحب لاقتراح تغييرات.`
+
+يتولى LLM قراءة النص وتوليد استجابات لغوية طبيعية للاستفسارات. بشكل افتراضي ، يستخدم LlamaIndex.TS `gpt-3.5-turbo`.
+
+يمكن تعيين LLM بشكل صريح في كائن `ServiceContext`.
+
+```typescript
+import { OpenAI, serviceContextFromDefaults } from "llamaindex";
+
+const openaiLLM = new OpenAI({ model: "gpt-3.5-turbo", temperature: 0 });
+
+const serviceContext = serviceContextFromDefaults({ llm: openaiLLM });
+```
+
+## مرجع الواجهة البرمجية
+
+- [OpenAI](../../api/classes/OpenAI.md)
+- [ServiceContext](../../api/interfaces/ServiceContext.md)
+
+"
diff --git a/apps/docs/i18n/ar/docusaurus-plugin-content-docs/current/modules/low_level/node_parser.md b/apps/docs/i18n/ar/docusaurus-plugin-content-docs/current/modules/low_level/node_parser.md
new file mode 100644
index 0000000000000000000000000000000000000000..e7bf92611b04fde20ff5d84c8ac58837c71c51a1
--- /dev/null
+++ b/apps/docs/i18n/ar/docusaurus-plugin-content-docs/current/modules/low_level/node_parser.md
@@ -0,0 +1,37 @@
+---
+sidebar_position: 3
+---
+
+# NodeParser (محلل العقدة)
+
+`تمت ترجمة هذه الوثيقة تلقائيًا وقد تحتوي على أخطاء. لا تتردد في فتح طلب سحب لاقتراح تغييرات.`
+
+`NodeParser` في LlamaIndex مسؤول عن تقسيم كائنات `Document` إلى كائنات `Node` أكثر إدارة. عند استدعاء `.fromDocuments()`, يتم استخدام `NodeParser` من `ServiceContext` للقيام بذلك تلقائيًا بالنسبة لك. بدلاً من ذلك ، يمكنك استخدامه لتقسيم المستندات مسبقًا.
+
+```typescript
+import { Document, SimpleNodeParser } from "llamaindex";
+
+const nodeParser = new SimpleNodeParser();
+const nodes = nodeParser.getNodesFromDocuments([
+  new Document({ text: "أنا عمري 10 سنوات. جون عمره 20 سنة." }),
+]);
+```
+
+## TextSplitter (مقسم النص)
+
+سيقوم مقسم النص الأساسي بتقسيم النص إلى جمل. يمكن أيضًا استخدامه كوحدة مستقلة لتقسيم النص الخام.
+
+```typescript
+import { SentenceSplitter } from "llamaindex";
+
+const splitter = new SentenceSplitter({ chunkSize: 1 });
+
+const textSplits = splitter.splitText("مرحبًا بالعالم");
+```
+
+## مرجع الواجهة البرمجية
+
+- [SimpleNodeParser (محلل العقدة البسيط)](../../api/classes/SimpleNodeParser.md)
+- [SentenceSplitter (مقسم الجمل)](../../api/classes/SentenceSplitter.md)
+
+"
diff --git a/apps/docs/i18n/ar/docusaurus-plugin-content-docs/current/modules/low_level/response_synthesizer.md b/apps/docs/i18n/ar/docusaurus-plugin-content-docs/current/modules/low_level/response_synthesizer.md
new file mode 100644
index 0000000000000000000000000000000000000000..70afd36c8ea5d5a4a78c03a2c17c4bc3f068d146
--- /dev/null
+++ b/apps/docs/i18n/ar/docusaurus-plugin-content-docs/current/modules/low_level/response_synthesizer.md
@@ -0,0 +1,45 @@
+---
+sidebar_position: 6
+---
+
+# مركب الاستجابة (ResponseSynthesizer)
+
+`تمت ترجمة هذه الوثيقة تلقائيًا وقد تحتوي على أخطاء. لا تتردد في فتح طلب سحب لاقتراح تغييرات.`
+
+مركب الاستجابة (ResponseSynthesizer) مسؤول عن إرسال الاستعلام والعقد وقوالب الاستفسار إلى LLM لتوليد استجابة. هناك بعض وسائط رئيسية لتوليد استجابة:
+
+- `تحسين`: "إنشاء وتحسين" إجابة عن طريق المرور تتاليًا عبر كل قطعة نص مُسترجعة. يتم إجراء استدعاء LLM منفصل لكل عقدة. جيد للإجابات المفصلة.
+- `مضغوط وتحسين` (الافتراضي): "ضغط" الاستفسار أثناء كل استدعاء LLM عن طريق حشو أكبر عدد ممكن من قطع النص التي يمكن أن تتناسب مع حجم الاستفسار الأقصى. إذا كان هناك الكثير من القطع لتعبئتها في استفسار واحد، "إنشاء وتحسين" إجابة عن طريق المرور بعدة استفسارات مضغوطة. نفس العملية كـ `تحسين`، ولكن يجب أن تؤدي إلى مزيد من استدعاءات LLM أقل.
+- `ملخص الشجرة`: بناء شجرة بشكل متكرر بناءً على مجموعة من قطع النص والاستعلام، وإرجاع العقدة الجذرية كاستجابة. جيد لأغراض التلخيص.
+- `منشئ الاستجابة البسيط`: تطبيق الاستعلام على كل قطعة نص وتجميع الاستجابات في مصفوفة. يعيد سلسلة متصلة من جميع الاستجابات. جيد عندما تحتاج إلى تشغيل نفس الاستعلام بشكل منفصل على كل قطعة نص.
+
+```typescript
+import { NodeWithScore, ResponseSynthesizer, TextNode } from "llamaindex";
+
+const responseSynthesizer = new ResponseSynthesizer();
+
+const nodesWithScore: NodeWithScore[] = [
+  {
+    node: new TextNode({ text: "أنا عمري 10 سنوات." }),
+    score: 1,
+  },
+  {
+    node: new TextNode({ text: "جون عمره 20 سنة." }),
+    score: 0.5,
+  },
+];
+
+const response = await responseSynthesizer.synthesize(
+  "ما هو عمري؟",
+  nodesWithScore,
+);
+console.log(response.response);
+```
+
+## مرجع الواجهة البرمجية
+
+- [مركب الاستجابة (ResponseSynthesizer)](../../api/classes/ResponseSynthesizer.md)
+- [تحسين (Refine)](../../api/classes/Refine.md)
+- [مضغوط وتحسين (CompactAndRefine)](../../api/classes/CompactAndRefine.md)
+- [ملخص الشجرة (TreeSummarize)](../../api/classes/TreeSummarize.md)
+- [منشئ الاستجابة البسيط (SimpleResponseBuilder)](../../api/classes/SimpleResponseBuilder.md)
diff --git a/apps/docs/i18n/ar/docusaurus-plugin-content-docs/current/modules/low_level/retriever.md b/apps/docs/i18n/ar/docusaurus-plugin-content-docs/current/modules/low_level/retriever.md
new file mode 100644
index 0000000000000000000000000000000000000000..01106a849124cd3573251461c059d08a77a4f5c4
--- /dev/null
+++ b/apps/docs/i18n/ar/docusaurus-plugin-content-docs/current/modules/low_level/retriever.md
@@ -0,0 +1,23 @@
+---
+sidebar_position: 5
+---
+
+# الباحث (Retriever)
+
+`تمت ترجمة هذه الوثيقة تلقائيًا وقد تحتوي على أخطاء. لا تتردد في فتح طلب سحب لاقتراح تغييرات.`
+
+الباحث في LlamaIndex هو ما يُستخدم لاسترداد العقد (`Node`) من فهرس باستخدام سلسلة الاستعلام. سيقوم الباحث `VectorIndexRetriever` بجلب أعلى k عقد مشابهة. بينما سيقوم الباحث `SummaryIndexRetriever` بجلب جميع العقد بغض النظر عن الاستعلام.
+
+```typescript
+const retriever = vector_index.asRetriever();
+retriever.similarityTopK = 3;
+
+// جلب العقد!
+const nodesWithScore = await retriever.retrieve("سلسلة الاستعلام");
+```
+
+## مرجع الواجهة البرمجية (API Reference)
+
+- [SummaryIndexRetriever](../../api/classes/SummaryIndexRetriever.md)
+- [SummaryIndexLLMRetriever](../../api/classes/SummaryIndexLLMRetriever.md)
+- [VectorIndexRetriever](../../api/classes/VectorIndexRetriever.md)
diff --git a/apps/docs/i18n/ar/docusaurus-plugin-content-docs/current/modules/low_level/storage.md b/apps/docs/i18n/ar/docusaurus-plugin-content-docs/current/modules/low_level/storage.md
new file mode 100644
index 0000000000000000000000000000000000000000..5c02b5d4fb846572e55f73a0892a399a462fa5b8
--- /dev/null
+++ b/apps/docs/i18n/ar/docusaurus-plugin-content-docs/current/modules/low_level/storage.md
@@ -0,0 +1,30 @@
+---
+sidebar_position: 7
+---
+
+# التخزين
+
+`تمت ترجمة هذه الوثيقة تلقائيًا وقد تحتوي على أخطاء. لا تتردد في فتح طلب سحب لاقتراح تغييرات.`
+
+يعمل التخزين في LlamaIndex.TS تلقائيًا بمجرد تكوين كائن `StorageContext`. قم بتكوين `persistDir` وربطه بفهرس.
+
+في الوقت الحالي ، يتم دعم حفظ وتحميل البيانات من القرص فقط ، مع وجود تكاملات مستقبلية مخططة!
+
+```typescript
+import { Document, VectorStoreIndex, storageContextFromDefaults } from "./src";
+
+const storageContext = await storageContextFromDefaults({
+  persistDir: "./storage",
+});
+
+const document = new Document({ text: "نص اختبار" });
+const index = await VectorStoreIndex.fromDocuments([document], {
+  storageContext,
+});
+```
+
+## مرجع الواجهة البرمجية
+
+- [StorageContext](../../api/interfaces/StorageContext.md)
+
+"
diff --git a/apps/docs/i18n/ar/docusaurus-plugin-content-docs/current/starter.md b/apps/docs/i18n/ar/docusaurus-plugin-content-docs/current/starter.md
new file mode 100644
index 0000000000000000000000000000000000000000..cf79fc6310eacd55c8c68b78ff4e46ada3a406eb
--- /dev/null
+++ b/apps/docs/i18n/ar/docusaurus-plugin-content-docs/current/starter.md
@@ -0,0 +1,58 @@
+---
+sidebar_position: 2
+---
+
+# دليل البداية
+
+`تمت ترجمة هذه الوثيقة تلقائيًا وقد تحتوي على أخطاء. لا تتردد في فتح طلب سحب لاقتراح تغييرات.`
+
+بمجرد [تثبيت LlamaIndex.TS باستخدام NPM](installation) وإعداد مفتاح OpenAI الخاص بك، أنت الآن جاهز لبدء تطبيقك الأول:
+
+في مجلد جديد:
+
+```bash npm2yarn
+npm install typescript
+npm install @types/node
+npx tsc --init # إذا لزم الأمر
+```
+
+أنشئ ملف `example.ts`. سيقوم هذا الكود بتحميل بعض البيانات المثالية، وإنشاء وثيقة، وفهرسة الوثيقة (مما ينشئ تضمينات باستخدام OpenAI)، ثم إنشاء محرك الاستعلام للإجابة على الأسئلة حول البيانات.
+
+```ts
+// example.ts
+import fs from "fs/promises";
+import { Document, VectorStoreIndex } from "llamaindex";
+
+async function main() {
+  // تحميل المقالة من abramov.txt في Node
+  const essay = await fs.readFile(
+    "node_modules/llamaindex/examples/abramov.txt",
+    "utf-8",
+  );
+
+  // إنشاء كائن Document بواسطة المقالة
+  const document = new Document({ text: essay });
+
+  // تقسيم النص وإنشاء التضمينات. تخزينها في VectorStoreIndex
+  const index = await VectorStoreIndex.fromDocuments([document]);
+
+  // استعلام الفهرس
+  const queryEngine = index.asQueryEngine();
+  const response = await queryEngine.query("ماذا فعل الكاتب في الكلية؟");
+
+  // إخراج الاستجابة
+  console.log(response.toString());
+}
+
+main();
+```
+
+ثم يمكنك تشغيله باستخدام
+
+```bash
+npx ts-node example.ts
+```
+
+هل أنت مستعد للمزيد من التعلم؟ تفضل بزيارة منصة NextJS الخاصة بنا على https://llama-playground.vercel.app/. يمكنك العثور على المصدر على https://github.com/run-llama/ts-playground
+
+"
diff --git a/apps/docs/i18n/bg/docusaurus-plugin-content-docs/current/_static/concepts/indexing.jpg b/apps/docs/i18n/bg/docusaurus-plugin-content-docs/current/_static/concepts/indexing.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..8672967213caf28fb27682b6b9e2e5111beb9aaa
Binary files /dev/null and b/apps/docs/i18n/bg/docusaurus-plugin-content-docs/current/_static/concepts/indexing.jpg differ
diff --git a/apps/docs/i18n/bg/docusaurus-plugin-content-docs/current/_static/concepts/querying.jpg b/apps/docs/i18n/bg/docusaurus-plugin-content-docs/current/_static/concepts/querying.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..3c241bdda5ba98d3f3ee2e163b9f19280c5c7503
Binary files /dev/null and b/apps/docs/i18n/bg/docusaurus-plugin-content-docs/current/_static/concepts/querying.jpg differ
diff --git a/apps/docs/i18n/bg/docusaurus-plugin-content-docs/current/_static/concepts/rag.jpg b/apps/docs/i18n/bg/docusaurus-plugin-content-docs/current/_static/concepts/rag.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..b68eca2564f8535b33d9ed6d4ce4d4410d2a0698
Binary files /dev/null and b/apps/docs/i18n/bg/docusaurus-plugin-content-docs/current/_static/concepts/rag.jpg differ
diff --git a/apps/docs/i18n/bg/docusaurus-plugin-content-docs/current/api b/apps/docs/i18n/bg/docusaurus-plugin-content-docs/current/api
new file mode 120000
index 0000000000000000000000000000000000000000..3513e32979f4abb8b2df2b54c8d0cc480bdd847e
--- /dev/null
+++ b/apps/docs/i18n/bg/docusaurus-plugin-content-docs/current/api
@@ -0,0 +1 @@
+../../../../docs/api
\ No newline at end of file
diff --git a/apps/docs/i18n/bg/docusaurus-plugin-content-docs/current/concepts.md b/apps/docs/i18n/bg/docusaurus-plugin-content-docs/current/concepts.md
new file mode 100644
index 0000000000000000000000000000000000000000..9aa3f484e2a8f8e51d3a8792b03f6506f89c19be
--- /dev/null
+++ b/apps/docs/i18n/bg/docusaurus-plugin-content-docs/current/concepts.md
@@ -0,0 +1,86 @@
+---
+sidebar_position: 3
+---
+
+# Високо ниво на концепции
+
+`Тази документация е преведена автоматично и може да съдържа грешки. Не се колебайте да отворите Pull Request, за да предложите промени.`
+
+LlamaIndex.TS ви помага да създавате приложения, базирани на LLM (например Q&A, чатбот) върху персонализирани данни.
+
+В това ръководство за високо ниво на концепции ще научите:
+
+- как LLM може да отговори на въпроси, използвайки вашите собствени данни.
+- ключови концепции и модули в LlamaIndex.TS за създаване на ваша собствена заявка.
+
+## Отговаряне на въпроси върху вашите данни
+
+LlamaIndex използва двустепенен метод при използване на LLM с вашите данни:
+
+1. **стъпка за индексиране**: подготовка на база от знания и
+2. **стъпка за заявка**: извличане на съответния контекст от знанията, за да помогне на LLM да отговори на въпрос
+
+![](./_static/concepts/rag.jpg)
+
+Този процес е известен също като Retrieval Augmented Generation (RAG).
+
+LlamaIndex.TS предоставя основния инструментариум, който прави и двете стъпки изключително лесни.
+
+Нека изследваме всяка стъпка подробно.
+
+### Стъпка на индексиране
+
+LlamaIndex.TS ви помага да подготвите базата от знания с помощта на набор от конектори за данни и индекси.
+
+![](./_static/concepts/indexing.jpg)
+
+[**Data Loaders**](./modules/high_level/data_loader.md):
+Конектор за данни (т.е. `Reader`) поема данни от различни източници на данни и формати на данни и ги превръща в просто представяне на `Document` (текст и прости метаданни).
+
+[**Documents / Nodes**](./modules/high_level/documents_and_nodes.md): `Document` е общ контейнер за всякакъв вид данни - например PDF, изход от API или извлечени данни от база данни. `Node` е атомарната единица от данни в LlamaIndex и представлява "част" от източниковия `Document`. Това е богато представяне, което включва метаданни и връзки (към други възли), за да позволи точни и изразителни операции за извличане.
+
+[**Data Indexes**](./modules/high_level/data_index.md):
+След като сте поели данните си, LlamaIndex ви помага да индексирате данните във формат, който е лесен за извличане.
+
+Под капака, LlamaIndex анализира суровите документи в промеждинни представяния, изчислява векторни вложения и съхранява данните в паметта или на диска.
+
+"
+
+### Стъпка за заявка
+
+В стъпката за заявка, конвейерът за заявки извлича най-съответния контекст, даден на потребителска заявка,
+и го предава на LLM (заедно със заявката), за да синтезира отговор.
+
+Това дава на LLM актуални познания, които не са в неговите оригинални обучаващи данни,
+(също така намалява халюцинацията).
+
+Основното предизвикателство в стъпката за заявка е извличането, организирането и резонирането върху (потенциално много) бази от знания.
+
+LlamaIndex предоставя модули, които могат да се комбинират и помагат за създаването и интегрирането на RAG конвейери за Q&A (заявки), чатбот (чат двигател) или като част от агент.
+
+Тези строителни блокове могат да бъдат персонализирани, за да отразяват предпочитанията за ранжиране, както и да бъдат комбинирани, за да резонират върху множество бази от знания по структуриран начин.
+
+![](./_static/concepts/querying.jpg)
+
+#### Строителни блокове
+
+[**Извличатели**](./modules/low_level/retriever.md):
+Извличател дефинира как да се извлича ефективно съответния контекст от база от знания (т.е. индекс), когато се предостави заявка.
+Конкретната логика за извличане се различава за различни индекси, като най-популярното е плътно извличане срещу векторен индекс.
+
+[**Синтезатори на отговори**](./modules/low_level/response_synthesizer.md):
+Синтезаторът на отговор генерира отговор от LLM, използвайки потребителска заявка и даден набор от извлечени текстови части.
+
+"
+
+#### Конвейери
+
+[**Заявки**](./modules/high_level/query_engine.md):
+Заявката е цялостен конвейер, който ви позволява да задавате въпроси относно вашите данни.
+Тя приема заявка на естествен език и връща отговор, заедно с извлечения контекст, предаден на LLM.
+
+[**Чат двигатели**](./modules/high_level/chat_engine.md):
+Чат двигателът е цялостен конвейер за провеждане на разговор с вашите данни
+(множество въпроси и отговори вместо единичен въпрос и отговор).
+
+"
diff --git a/apps/docs/i18n/bg/docusaurus-plugin-content-docs/current/end_to_end.md b/apps/docs/i18n/bg/docusaurus-plugin-content-docs/current/end_to_end.md
new file mode 100644
index 0000000000000000000000000000000000000000..8c9b4061e7d689372c1747b89705523ef139f41c
--- /dev/null
+++ b/apps/docs/i18n/bg/docusaurus-plugin-content-docs/current/end_to_end.md
@@ -0,0 +1,57 @@
+---
+sidebar_position: 4
+---
+
+# Примери от начало до край
+
+`Тази документация е преведена автоматично и може да съдържа грешки. Не се колебайте да отворите Pull Request, за да предложите промени.`
+
+Включени са няколко примера от начало до край, използвайки LlamaIndex.TS в хранилището
+
+Разгледайте примерите по-долу или ги опитайте и завършете за минути с интерактивни уроци на Github Codespace, предоставени от Dev-Docs [тук](https://codespaces.new/team-dev-docs/lits-dev-docs-playground?devcontainer_path=.devcontainer%2Fjavascript_ltsquickstart%2Fdevcontainer.json):
+
+## [Чат двигател (Chat Engine)](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/chatEngine.ts)
+
+Прочетете файл и обсъждайте го с LLM.
+
+## [Векторен индекс](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/vectorIndex.ts)
+
+Създайте векторен индекс и го запитайте. Векторният индекс ще използва вграждания, за да извлече най-релевантните k върха. По подразбиране, k е 2.
+
+"
+
+## [Summary Index](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/summaryIndex.ts)
+
+Създайте списъчен индекс и го заявете. Този пример също използва `LLMRetriever`, който използва LLM, за да избере най-добрите възли за използване при генериране на отговор.
+
+"
+
+## [Запазване / Зареждане на индекс](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/storageContext.ts)
+
+Създайте и заредете векторен индекс. Запазването на диска в LlamaIndex.TS става автоматично, веднага след като е създаден обект за контекст на съхранение.
+
+## [Персонализиран векторен индекс](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/vectorIndexCustomize.ts)
+
+Създайте векторен индекс и го заявете, като конфигурирате `LLM`, `ServiceContext` и `similarity_top_k`.
+
+"
+
+## [OpenAI LLM](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/openai.ts)
+
+Създайте OpenAI LLM и го използвайте директно за чат.
+
+## [Llama2 DeuceLLM](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/llamadeuce.ts)
+
+Създайте Llama-2 LLM и го използвайте директно за чат.
+
+## [SubQuestionQueryEngine](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/subquestion.ts)
+
+Използва `SubQuestionQueryEngine`, който разбива сложни заявки на множество въпроси и след това агрегира отговорите на всички под-въпроси.
+
+"
+
+## [Модули с ниско ниво](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/lowlevel.ts)
+
+Този пример използва няколко компонента с ниско ниво, които премахват нуждата от реален двигател за заявки. Тези компоненти могат да се използват навсякъде, във всяко приложение или да бъдат персонализирани и подкласирани, за да отговарят на вашите нужди.
+
+"
diff --git a/apps/docs/i18n/bg/docusaurus-plugin-content-docs/current/environments.md b/apps/docs/i18n/bg/docusaurus-plugin-content-docs/current/environments.md
new file mode 100644
index 0000000000000000000000000000000000000000..f8d0b00bef5bb3518c2ac8570bec6b450cd3427f
--- /dev/null
+++ b/apps/docs/i18n/bg/docusaurus-plugin-content-docs/current/environments.md
@@ -0,0 +1,31 @@
+---
+sidebar_position: 5
+---
+
+# Среди
+
+`Тази документация е преведена автоматично и може да съдържа грешки. Не се колебайте да отворите Pull Request, за да предложите промени.`
+
+LlamaIndex в момента официално поддържа NodeJS 18 и NodeJS 20.
+
+## NextJS App Router
+
+Ако използвате обработчици на маршрути/сървърни функции на NextJS App Router, ще трябва да използвате режима на NodeJS:
+
+```js
+export const runtime = "nodejs"; // по подразбиране
+```
+
+и ще трябва да добавите изключение за pdf-parse във вашия next.config.js
+
+```js
+// next.config.js
+/** @type {import('next').NextConfig} */
+const nextConfig = {
+  experimental: {
+    serverComponentsExternalPackages: ["pdf-parse"], // Поставя pdf-parse в реален режим на NodeJS с NextJS App Router
+  },
+};
+
+module.exports = nextConfig;
+```
diff --git a/apps/docs/i18n/bg/docusaurus-plugin-content-docs/current/installation.mdx b/apps/docs/i18n/bg/docusaurus-plugin-content-docs/current/installation.mdx
new file mode 100644
index 0000000000000000000000000000000000000000..d8e216b432b237662d54e52b0ddaa97531de6021
--- /dev/null
+++ b/apps/docs/i18n/bg/docusaurus-plugin-content-docs/current/installation.mdx
@@ -0,0 +1,68 @@
+---
+sidebar_position: 1
+---
+
+
+# Инсталация и настройка
+
+```Тази документация е преведена автоматично и може да съдържа грешки. Не се колебайте да отворите Pull Request, за да предложите промени.```
+
+
+Уверете се, че имате NodeJS v18 или по-нова версия.
+
+
+## Използване на create-llama
+
+Най-лесният начин да започнете с LlamaIndex е чрез използването на `create-llama`. Този инструмент с команден ред ви позволява бързо да започнете да създавате ново приложение LlamaIndex, като всичко е настроено за вас.
+
+Просто изпълнете
+
+<Tabs>
+<TabItem value="1" label="npm" default>
+
+```bash
+npx create-llama@latest
+```
+
+</TabItem>
+<TabItem value="2" label="Yarn">
+
+```bash
+yarn create llama
+```
+
+</TabItem>
+<TabItem value="3" label="pnpm">
+
+```bash
+pnpm create llama@latest
+```
+
+</TabItem>
+</Tabs>
+
+за да започнете. След като приложението ви е генерирано, изпълнете
+
+```bash npm2yarn
+npm run dev
+```
+
+за да стартирате сървъра за разработка. След това можете да посетите [http://localhost:3000](http://localhost:3000), за да видите вашето приложение.
+## Инсталация от NPM
+
+```bash npm2yarn
+npm install llamaindex
+```
+
+
+### Променливи на средата
+
+Нашият пример използва OpenAI по подразбиране. Ще трябва да настроите вашия Open AI ключ по следния начин:
+
+```bash
+export OPENAI_API_KEY="sk-......" # Заменете с вашия ключ от https://platform.openai.com/account/api-keys
+```
+
+Ако искате да го зареждате автоматично всеки път, добавете го към вашия .zshrc/.bashrc.
+
+ВНИМАНИЕ: не добавяйте вашия OpenAI ключ в системата за контрол на версиите.
diff --git a/apps/docs/i18n/bg/docusaurus-plugin-content-docs/current/introduction.md b/apps/docs/i18n/bg/docusaurus-plugin-content-docs/current/introduction.md
new file mode 100644
index 0000000000000000000000000000000000000000..52693905ea48bc4d30e0a90718a71083945db1d4
--- /dev/null
+++ b/apps/docs/i18n/bg/docusaurus-plugin-content-docs/current/introduction.md
@@ -0,0 +1,62 @@
+---
+sidebar_position: 0
+slug: /
+---
+
+# Какво е LlamaIndex.TS?
+
+`Тази документация е преведена автоматично и може да съдържа грешки. Не се колебайте да отворите Pull Request, за да предложите промени.`
+
+LlamaIndex.TS е рамка за данни за приложения на LLM, която позволява внасяне, структуриране и достъп до частни или специфични за домейна данни. Въпреки че има наличен и пакет на Python (вижте [тук](https://docs.llamaindex.ai/en/stable/)), LlamaIndex.TS предлага основни функции в едно просто пакетиране, оптимизирано за използване с TypeScript.
+
+## 🚀 Защо LlamaIndex.TS?
+
+В основата си, LLM-ите предлагат естествен езиков интерфейс между хората и изводените данни. Широко разпространените модели са предварително обучени на голямо количество публично достъпни данни, от Уикипедия и списания до учебници и изходен код.
+
+Приложенията, построени върху LLM-и, често изискват допълване на тези модели с частни или специфични за домейна данни. За съжаление, тези данни могат да бъдат разпределени в различни приложения и хранилища на данни. Те се намират зад API-и, в SQL бази данни или са затворени в PDF файлове и презентации.
+
+Тук идва **LlamaIndex.TS**.
+
+## 🦙 Как може да помогне LlamaIndex.TS?
+
+LlamaIndex.TS предоставя следните инструменти:
+
+- **Зареждане на данни** - внасяйте вашите съществуващи данни във формат `.txt`, `.pdf`, `.csv`, `.md` и `.docx` директно
+- **Индекси на данни** - структурирайте данните си в промежуточни представяния, които са лесни и ефективни за консумация от LLM.
+- **Двигатели** - предоставят достъп до вашите данни чрез естествен език. Например:
+  - Заявителни двигатели са мощни интерфейси за извличане на знания.
+  - Чат двигатели са разговорни интерфейси за многократни, "напред и назад" взаимодействия с вашите данни.
+
+"
+
+## 👨‍👩‍👧‍👦 За кого е LlamaIndex?
+
+LlamaIndex.TS предоставя основен набор от инструменти, необходими за всеки, който създава LLM приложения с JavaScript и TypeScript.
+
+Нашето API на високо ниво позволява на начинаещите потребители да използват LlamaIndex.TS за внасяне и заявка на техните данни.
+
+За по-сложни приложения нашите API на по-ниско ниво позволяват на напредналите потребители да персонализират и разширят всяко модул - връзки с данни, индекси, извличатели и заявки, за да отговарят на техните нужди.
+
+## Започване
+
+`npm install llamaindex`
+
+Документацията ни включва [Инструкции за инсталиране](./installation.md) и [Урок за начинаещи](./starter.md), за да построите първото си приложение.
+
+След като сте готови, [Високо ниво концепции](./concepts.md) представя общ преглед на модулната архитектура на LlamaIndex. За повече практически примери, разгледайте нашите [Уроци от начало до край](./end_to_end.md).
+
+## 🗺️ Екосистема
+
+За да изтеглите или допринесете, намерете LlamaIndex на:
+
+- Github: https://github.com/run-llama/LlamaIndexTS
+- NPM: https://www.npmjs.com/package/llamaindex
+
+"
+
+## Общност
+
+Нуждаете се от помощ? Имате предложение за функционалност? Присъединете се към общността на LlamaIndex:
+
+- Twitter: https://twitter.com/llama_index
+- Discord: https://discord.gg/dGcwcsnxhU
diff --git a/apps/docs/i18n/bg/docusaurus-plugin-content-docs/current/modules/high_level/chat_engine.md b/apps/docs/i18n/bg/docusaurus-plugin-content-docs/current/modules/high_level/chat_engine.md
new file mode 100644
index 0000000000000000000000000000000000000000..ec70241e2b9181c8f15ba0917565047e6471e35f
--- /dev/null
+++ b/apps/docs/i18n/bg/docusaurus-plugin-content-docs/current/modules/high_level/chat_engine.md
@@ -0,0 +1,24 @@
+---
+sidebar_position: 4
+---
+
+# Чат двигател (ChatEngine)
+
+`Тази документация е преведена автоматично и може да съдържа грешки. Не се колебайте да отворите Pull Request, за да предложите промени.`
+
+Чат двигателят е бърз и прост начин да чатите с данните във вашата индекс.
+
+```typescript
+const retriever = index.asRetriever();
+const chatEngine = new ContextChatEngine({ retriever });
+
+// започнете да чатите
+const response = await chatEngine.chat(query);
+```
+
+## Api Референции
+
+- [Чат двигател за контекст (ContextChatEngine)](../../api/classes/ContextChatEngine.md)
+- [Чат двигател за кондензиране на въпроси (CondenseQuestionChatEngine)](../../api/classes/ContextChatEngine.md)
+
+"
diff --git a/apps/docs/i18n/bg/docusaurus-plugin-content-docs/current/modules/high_level/data_index.md b/apps/docs/i18n/bg/docusaurus-plugin-content-docs/current/modules/high_level/data_index.md
new file mode 100644
index 0000000000000000000000000000000000000000..92df48a01424d223c4594cf82633fd6fa614c7f5
--- /dev/null
+++ b/apps/docs/i18n/bg/docusaurus-plugin-content-docs/current/modules/high_level/data_index.md
@@ -0,0 +1,27 @@
+---
+sidebar_position: 2
+---
+
+# Индекс
+
+`Тази документация е преведена автоматично и може да съдържа грешки. Не се колебайте да отворите Pull Request, за да предложите промени.`
+
+Индексът е основният контейнер и организация за вашите данни. LlamaIndex.TS поддържа два вида индекси:
+
+- `VectorStoreIndex` - ще изпраща най-добрите `Node` до LLM при генериране на отговор. По подразбиране, най-добрите два.
+- `SummaryIndex` - ще изпраща всеки `Node` в индекса до LLM, за да генерира отговор.
+
+```typescript
+import { Document, VectorStoreIndex } from "llamaindex";
+
+const document = new Document({ text: "тест" });
+
+const index = await VectorStoreIndex.fromDocuments([document]);
+```
+
+## API Референция
+
+- [SummaryIndex](../../api/classes/SummaryIndex.md)
+- [VectorStoreIndex](../../api/classes/VectorStoreIndex.md)
+
+"
diff --git a/apps/docs/i18n/bg/docusaurus-plugin-content-docs/current/modules/high_level/data_loader.md b/apps/docs/i18n/bg/docusaurus-plugin-content-docs/current/modules/high_level/data_loader.md
new file mode 100644
index 0000000000000000000000000000000000000000..c3dc17cbb671c81bfd72e45bfd47b84b9a5ed854
--- /dev/null
+++ b/apps/docs/i18n/bg/docusaurus-plugin-content-docs/current/modules/high_level/data_loader.md
@@ -0,0 +1,21 @@
+---
+sidebar_position: 1
+---
+
+# Четец / Зареждач
+
+`Тази документация е преведена автоматично и може да съдържа грешки. Не се колебайте да отворите Pull Request, за да предложите промени.`
+
+LlamaIndex.TS поддържа лесно зареждане на файлове от папки с помощта на класа `SimpleDirectoryReader`. В момента се поддържат файлове с разширения `.txt`, `.pdf`, `.csv`, `.md` и `.docx`, с планове за добавяне на още в бъдеще!
+
+```typescript
+import { SimpleDirectoryReader } from "llamaindex";
+
+documents = new SimpleDirectoryReader().loadData("./data");
+```
+
+## API Референция
+
+- [SimpleDirectoryReader](../../api/classes/SimpleDirectoryReader.md)
+
+"
diff --git a/apps/docs/i18n/bg/docusaurus-plugin-content-docs/current/modules/high_level/documents_and_nodes.md b/apps/docs/i18n/bg/docusaurus-plugin-content-docs/current/modules/high_level/documents_and_nodes.md
new file mode 100644
index 0000000000000000000000000000000000000000..f46809bf29046ff6f3e31852bbda6a159174a56f
--- /dev/null
+++ b/apps/docs/i18n/bg/docusaurus-plugin-content-docs/current/modules/high_level/documents_and_nodes.md
@@ -0,0 +1,22 @@
+---
+sidebar_position: 0
+---
+
+# Документи и Възли
+
+`Тази документация е преведена автоматично и може да съдържа грешки. Не се колебайте да отворите Pull Request, за да предложите промени.`
+
+`Документи` и `Възли` са основните строителни блокове на всяко индексиране. Въпреки че API-то за тези обекти е подобно, обектите `Документ` представляват цели файлове, докато `Възли` са по-малки части от оригиналния документ, които са подходящи за LLM и Q&A.
+
+```typescript
+import { Document } from "llamaindex";
+
+document = new Document({ text: "текст", metadata: { key: "val" } });
+```
+
+## API Референция
+
+- [Документ](../../api/classes/Document.md)
+- [ТекстовВъзел](../../api/classes/TextNode.md)
+
+"
diff --git a/apps/docs/i18n/bg/docusaurus-plugin-content-docs/current/modules/high_level/query_engine.md b/apps/docs/i18n/bg/docusaurus-plugin-content-docs/current/modules/high_level/query_engine.md
new file mode 100644
index 0000000000000000000000000000000000000000..fb17ec73c7aaf7024ec50c672a31fe2ec3efc42e
--- /dev/null
+++ b/apps/docs/i18n/bg/docusaurus-plugin-content-docs/current/modules/high_level/query_engine.md
@@ -0,0 +1,40 @@
+---
+sidebar_position: 3
+---
+
+# QueryEngine (Заявка на двигател)
+
+`Тази документация е преведена автоматично и може да съдържа грешки. Не се колебайте да отворите Pull Request, за да предложите промени.`
+
+Заявка на двигател обвива `Retriever` и `ResponseSynthesizer` в тръбопровод, който използва низа от заявки, за да извлече възли и след това ги изпраща към LLM, за да генерира отговор.
+
+```typescript
+const queryEngine = index.asQueryEngine();
+const response = await queryEngine.query("query string");
+```
+
+## Заявка на подзапитване на двигател
+
+Основната концепция на Заявка на подзапитване на двигател е, че тя разделя една заявка на множество заявки, получава отговор за всяка от тези заявки и след това комбинира тези различни отговори в един цялостен отговор за потребителя. Можете да си представите това като техника за "мислене стъпка по стъпка", но като итерира върху източниците на данни!
+
+### Започване
+
+Най-лесният начин да започнете да използвате Заявка на подзапитване на двигател е да стартирате файла subquestion.ts в [примерите](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/subquestion.ts).
+
+```bash
+npx ts-node subquestion.ts
+```
+
+### Инструменти
+
+Заявка на подзапитване на двигател е реализирана с инструменти. Основната идея на инструментите е, че те са изпълними опции за големия езиков модел. В този случай нашият Заявка на подзапитване на двигател разчита на QueryEngineTool, който, както се предполага, е инструмент за изпълнение на заявки върху QueryEngine. Това ни позволява да дадем на модела възможност да заявява различни документи за различни въпроси, например. Също така можем да си представим, че Заявка на подзапитване на двигател може да използва инструмент, който търси нещо в Интернет или получава отговор, използвайки Wolfram Alpha.
+
+Можете да научите повече за инструментите, като разгледате документацията на LlamaIndex Python https://gpt-index.readthedocs.io/en/latest/core_modules/agent_modules/tools/root.html
+
+## API Reference (API справка)
+
+- [RetrieverQueryEngine (Заявка на двигател за извличане)](../../api/classes/RetrieverQueryEngine.md)
+- [SubQuestionQueryEngine (Заявка на двигател за подзапитване)](../../api/classes/SubQuestionQueryEngine.md)
+- [QueryEngineTool (Инструмент за заявка на двигател)](../../api/interfaces/QueryEngineTool.md)
+
+"
diff --git a/apps/docs/i18n/bg/docusaurus-plugin-content-docs/current/modules/index.md b/apps/docs/i18n/bg/docusaurus-plugin-content-docs/current/modules/index.md
new file mode 100644
index 0000000000000000000000000000000000000000..5717a4388251a918e264f65f044d227b7e5257a7
--- /dev/null
+++ b/apps/docs/i18n/bg/docusaurus-plugin-content-docs/current/modules/index.md
@@ -0,0 +1,33 @@
+# Основни модули
+
+`Тази документация е преведена автоматично и може да съдържа грешки. Не се колебайте да отворите Pull Request, за да предложите промени.`
+
+LlamaIndex.TS предлага няколко основни модула, разделени на модули на високо ниво, за бързо стартиране, и модули на ниско ниво, за персонализиране на ключовите компоненти според вашите нужди.
+
+## Модули на високо ниво
+
+- [**Документ**](./high_level/documents_and_nodes.md): Документ представлява текстов файл, PDF файл или друг непрекъснат парче данни.
+
+- [**Възел**](./high_level/documents_and_nodes.md): Основният строителен блок от данни. Най-често това са части от документа, разделени на управляеми парчета, достатъчно малки, за да бъдат подадени на модел за вграждане и LLM.
+
+- [**Четец/Зареждач**](./high_level/data_loader.md): Четецът или зареждачът е нещо, което приема документ от реалния свят и го преобразува в клас Документ, който после може да се използва в индекса и заявките ви. В момента поддържаме обикновени текстови файлове и PDF файлове, с много други, които ще бъдат добавени.
+
+- [**Индекси**](./high_level/data_index.md): Индексите съхраняват Възлите и вгражданията на тези възли.
+
+- [**QueryEngine**](./high_level/query_engine.md): Заявките са това, което генерира заявката, която въвеждате и ви връща резултата. Заявките обикновено комбинират предварително изграден prompt със избрани възли от вашия индекс, за да предоставят на LLM контекста, от който се нуждае, за да отговори на вашата заявка.
+
+- [**ChatEngine**](./high_level/chat_engine.md): ChatEngine ви помага да построите чатбот, който ще взаимодейства с вашите индекси.
+
+## Модули на ниско ниво
+
+- [**LLM**](./low_level/llm.md): Класът LLM е обединен интерфейс над голям доставчик на модели на езика като OpenAI GPT-4, Anthropic Claude или Meta LLaMA. Можете да го наследите, за да напишете конектор към собствен модел на голям език.
+
+- [**Embedding**](./low_level/embedding.md): Вграждането се представя като вектор от числа с плаваща запетая. Нашето вграждане по подразбиране е OpenAI's text-embedding-ada-002 и всяко вграждане, което генерира, се състои от 1,536 числа с плаваща запетая. Друго популярно вграждане е BERT, което използва 768 числа с плаваща запетая, за да представи всеки възел. Предоставяме няколко помощни функции за работа с вграждания, включително 3 опции за изчисляване на подобие и Maximum Marginal Relevance.
+
+- [**TextSplitter/NodeParser**](./low_level/node_parser.md): Стратегиите за разделяне на текст са изключително важни за общата ефективност на търсенето на вграждания. В момента, въпреки че имаме стойност по подразбиране, няма универсално решение. В зависимост от източниците на документите, може да искате да използвате различни размери и стратегии за разделяне. В момента поддържаме разделяне по фиксиран размер, разделяне по фиксиран размер с препокриващи се секции, разделяне по изречение и разделяне по параграф. TextSplitter се използва от NodeParser при разделянето на `Document` на `Node`.
+
+- [**Retriever**](./low_level/retriever.md): Retriever е този, който наистина избира възлите за връщане от индекса. Тук може да желаете да опитате да вземете повече или по-малко възли за всяка заявка, да промените функцията за подобие или да създадете собствен retriever за всеки отделен случай в приложението си. Например може да желаете да имате отделен retriever за съдържание на код срещу текстово съдържание.
+
+- [**ResponseSynthesizer**](./low_level/response_synthesizer.md): ResponseSynthesizer е отговорен за вземането на низ от заявка и използването на списък от `Node`-и за генериране на отговор. Това може да бъде в различни форми, като обхождане на всички контексти и уточняване на отговор, или изграждане на дърво от резюмета и връщане на кореновото резюме.
+
+- [**Storage**](./low_level/storage.md): На някакъв етап ще искате да съхранявате индексите, данните и векторите си, вместо да изпълнявате моделите за вграждане всеки път. IndexStore, DocStore, VectorStore и KVStore са абстракции, които ви позволяват да го направите. Заедно те формират StorageContext. В момента ви позволяваме да запазвате вгражданията си във файлове на файловата система (или виртуална файлова система в паметта), но също така активно добавяме интеграции към Vector Databases.
diff --git a/apps/docs/i18n/bg/docusaurus-plugin-content-docs/current/modules/low_level/embedding.md b/apps/docs/i18n/bg/docusaurus-plugin-content-docs/current/modules/low_level/embedding.md
new file mode 100644
index 0000000000000000000000000000000000000000..aa265a5a17350cbd183711edcc56ee5d1347bbc8
--- /dev/null
+++ b/apps/docs/i18n/bg/docusaurus-plugin-content-docs/current/modules/low_level/embedding.md
@@ -0,0 +1,26 @@
+---
+sidebar_position: 1
+---
+
+# Вграждане (Embedding)
+
+`Тази документация е преведена автоматично и може да съдържа грешки. Не се колебайте да отворите Pull Request, за да предложите промени.`
+
+Моделът за вграждане в LlamaIndex е отговорен за създаването на числови представяния на текст. По подразбиране, LlamaIndex използва модела `text-embedding-ada-002` от OpenAI.
+
+Това може да бъде явно зададено в обекта `ServiceContext`.
+
+```typescript
+import { OpenAIEmbedding, serviceContextFromDefaults } from "llamaindex";
+
+const openaiEmbeds = new OpenAIEmbedding();
+
+const serviceContext = serviceContextFromDefaults({ embedModel: openaiEmbeds });
+```
+
+## API Референция
+
+- [OpenAIEmbedding](../../api/classes/OpenAIEmbedding.md)
+- [ServiceContext](../../api/interfaces/ServiceContext.md)
+
+"
diff --git a/apps/docs/i18n/bg/docusaurus-plugin-content-docs/current/modules/low_level/llm.md b/apps/docs/i18n/bg/docusaurus-plugin-content-docs/current/modules/low_level/llm.md
new file mode 100644
index 0000000000000000000000000000000000000000..c65d00221c4c56e1b8bcc5cda50084a16fe1467a
--- /dev/null
+++ b/apps/docs/i18n/bg/docusaurus-plugin-content-docs/current/modules/low_level/llm.md
@@ -0,0 +1,26 @@
+---
+sidebar_position: 0
+---
+
+# LLM
+
+`Тази документация е преведена автоматично и може да съдържа грешки. Не се колебайте да отворите Pull Request, за да предложите промени.`
+
+LLM е отговорен за четене на текст и генериране на отговори на естествен език на заявки. По подразбиране, LlamaIndex.TS използва `gpt-3.5-turbo`.
+
+LLM може да бъде явно зададен в обекта `ServiceContext`.
+
+```typescript
+import { OpenAI, serviceContextFromDefaults } from "llamaindex";
+
+const openaiLLM = new OpenAI({ model: "gpt-3.5-turbo", temperature: 0 });
+
+const serviceContext = serviceContextFromDefaults({ llm: openaiLLM });
+```
+
+## API Референция
+
+- [OpenAI](../../api/classes/OpenAI.md)
+- [ServiceContext](../../api/interfaces/ServiceContext.md)
+
+"
diff --git a/apps/docs/i18n/bg/docusaurus-plugin-content-docs/current/modules/low_level/node_parser.md b/apps/docs/i18n/bg/docusaurus-plugin-content-docs/current/modules/low_level/node_parser.md
new file mode 100644
index 0000000000000000000000000000000000000000..c072adef0ef142d6519c0514b23c019f7a825f89
--- /dev/null
+++ b/apps/docs/i18n/bg/docusaurus-plugin-content-docs/current/modules/low_level/node_parser.md
@@ -0,0 +1,37 @@
+---
+sidebar_position: 3
+---
+
+# NodeParser (Анализатор на възли)
+
+`Тази документация е преведена автоматично и може да съдържа грешки. Не се колебайте да отворите Pull Request, за да предложите промени.`
+
+`NodeParser` в LlamaIndex е отговорен за разделянето на обекти от тип `Document` на по-лесни за управление обекти от тип `Node`. Когато извикате `.fromDocuments()`, `NodeParser` от `ServiceContext` се използва автоматично за това. Алтернативно, можете да го използвате, за да разделяте документи предварително.
+
+```typescript
+import { Document, SimpleNodeParser } from "llamaindex";
+
+const nodeParser = new SimpleNodeParser();
+const nodes = nodeParser.getNodesFromDocuments([
+  new Document({ text: "Аз съм на 10 години. Джон е на 20 години." }),
+]);
+```
+
+## TextSplitter (TextSplitter)
+
+Основният разделящ текст ще раздели текста на изречения. Той може също да се използва като самостоятелен модул за разделяне на суров текст.
+
+```typescript
+import { SentenceSplitter } from "llamaindex";
+
+const splitter = new SentenceSplitter({ chunkSize: 1 });
+
+const textSplits = splitter.splitText("Здравей, свят");
+```
+
+## API Reference (API справка)
+
+- [SimpleNodeParser (Прост анализатор на възли)](../../api/classes/SimpleNodeParser.md)
+- [SentenceSplitter (Разделяне на изречения)](../../api/classes/SentenceSplitter.md)
+
+"
diff --git a/apps/docs/i18n/bg/docusaurus-plugin-content-docs/current/modules/low_level/response_synthesizer.md b/apps/docs/i18n/bg/docusaurus-plugin-content-docs/current/modules/low_level/response_synthesizer.md
new file mode 100644
index 0000000000000000000000000000000000000000..2564620622b44fc32dc5f422bfc3c3bf31ae60c0
--- /dev/null
+++ b/apps/docs/i18n/bg/docusaurus-plugin-content-docs/current/modules/low_level/response_synthesizer.md
@@ -0,0 +1,47 @@
+---
+sidebar_position: 6
+---
+
+# ResponseSynthesizer (Синтезатор на отговори)
+
+`Тази документация е преведена автоматично и може да съдържа грешки. Не се колебайте да отворите Pull Request, за да предложите промени.`
+
+ResponseSynthesizer е отговорен за изпращането на заявката, възлите и шаблоните за подсказки към LLM, за да генерира отговор. Има няколко ключови режима за генериране на отговор:
+
+- `Refine` (Подобряване): "създаване и подобряване" на отговор, като последователно се преминава през всеки извлечен текстов фрагмент. Това прави отделно LLM обаждане за всеки възел. Подходящо за по-подробни отговори.
+- `CompactAndRefine` (Компактно и подобряване) (по подразбиране): "компактиране" на подсказката по време на всяко LLM обаждане, като се пълни с колкото може повече текстови фрагменти, които могат да се поберат в максималния размер на подсказката. Ако има твърде много фрагменти, които не могат да се поберат в една подсказка, се "създава и подобрява" отговор, като се преминава през няколко компактни подсказки. Същото като `refine`, но трябва да доведе до по-малко LLM обаждания.
+- `TreeSummarize` (Сумиране на дърво): Дадени набор от текстови фрагменти и заявката, рекурсивно се конструира дърво и се връща кореновият възел като отговор. Подходящо за цели на сумиране.
+- `SimpleResponseBuilder` (Прост създател на отговори): Дадени набор от текстови фрагменти и заявката, се прилага заявката към всеки текстов фрагмент, като се натрупват отговорите в масив. Връща конкатениран низ от всички отговори. Подходящо, когато трябва да изпълните същата заявка отделно за всеки текстов фрагмент.
+
+```typescript
+import { NodeWithScore, ResponseSynthesizer, TextNode } from "llamaindex";
+
+const responseSynthesizer = new ResponseSynthesizer();
+
+const nodesWithScore: NodeWithScore[] = [
+  {
+    node: new TextNode({ text: "Аз съм на 10 години." }),
+    score: 1,
+  },
+  {
+    node: new TextNode({ text: "Джон е на 20 години." }),
+    score: 0.5,
+  },
+];
+
+const response = await responseSynthesizer.synthesize(
+  "Колко години съм?",
+  nodesWithScore,
+);
+console.log(response.response);
+```
+
+## API Референция
+
+- [ResponseSynthesizer (Синтезатор на отговори)](../../api/classes/ResponseSynthesizer.md)
+- [Refine (Подобряване)](../../api/classes/Refine.md)
+- [CompactAndRefine (Компактно и подобряване)](../../api/classes/CompactAndRefine.md)
+- [TreeSummarize (Сумиране на дърво)](../../api/classes/TreeSummarize.md)
+- [SimpleResponseBuilder (Прост създател на отговори)](../../api/classes/SimpleResponseBuilder.md)
+
+"
diff --git a/apps/docs/i18n/bg/docusaurus-plugin-content-docs/current/modules/low_level/retriever.md b/apps/docs/i18n/bg/docusaurus-plugin-content-docs/current/modules/low_level/retriever.md
new file mode 100644
index 0000000000000000000000000000000000000000..d359dfa5e4bda225ca4a2af2ea1eac1ef4d15a91
--- /dev/null
+++ b/apps/docs/i18n/bg/docusaurus-plugin-content-docs/current/modules/low_level/retriever.md
@@ -0,0 +1,25 @@
+---
+sidebar_position: 5
+---
+
+# Retriever (Извличател)
+
+`Тази документация е преведена автоматично и може да съдържа грешки. Не се колебайте да отворите Pull Request, за да предложите промени.`
+
+Извличател в LlamaIndex е това, което се използва за извличане на `Node` от индекс чрез заявка. `VectorIndexRetriever` ще извлече най-подобните k върха. В същото време, `SummaryIndexRetriever` ще извлече всички върхове, независимо от заявката.
+
+```typescript
+const retriever = vector_index.asRetriever();
+retriever.similarityTopK = 3;
+
+// Извличане на върхове!
+const nodesWithScore = await retriever.retrieve("query string");
+```
+
+## API Reference (API справка)
+
+- [SummaryIndexRetriever](../../api/classes/SummaryIndexRetriever.md)
+- [SummaryIndexLLMRetriever](../../api/classes/SummaryIndexLLMRetriever.md)
+- [VectorIndexRetriever](../../api/classes/VectorIndexRetriever.md)
+
+"
diff --git a/apps/docs/i18n/bg/docusaurus-plugin-content-docs/current/modules/low_level/storage.md b/apps/docs/i18n/bg/docusaurus-plugin-content-docs/current/modules/low_level/storage.md
new file mode 100644
index 0000000000000000000000000000000000000000..bebf4bbacfda734b945319c18372a5ee87f1a888
--- /dev/null
+++ b/apps/docs/i18n/bg/docusaurus-plugin-content-docs/current/modules/low_level/storage.md
@@ -0,0 +1,30 @@
+---
+sidebar_position: 7
+---
+
+# Съхранение (Storage)
+
+`Тази документация е преведена автоматично и може да съдържа грешки. Не се колебайте да отворите Pull Request, за да предложите промени.`
+
+Съхранението в LlamaIndex.TS работи автоматично, след като сте конфигурирали обект `StorageContext`. Просто конфигурирайте `persistDir` и го свържете с индекс.
+
+В момента се поддържа само запазване и зареждане от диск, с планирани бъдещи интеграции!
+
+```typescript
+import { Document, VectorStoreIndex, storageContextFromDefaults } from "./src";
+
+const storageContext = await storageContextFromDefaults({
+  persistDir: "./storage",
+});
+
+const document = new Document({ text: "Тестов текст" });
+const index = await VectorStoreIndex.fromDocuments([document], {
+  storageContext,
+});
+```
+
+## API Референция
+
+- [StorageContext](../../api/interfaces/StorageContext.md)
+
+"
diff --git a/apps/docs/i18n/bg/docusaurus-plugin-content-docs/current/starter.md b/apps/docs/i18n/bg/docusaurus-plugin-content-docs/current/starter.md
new file mode 100644
index 0000000000000000000000000000000000000000..f0e33c95e7c1d540eb25081b9631160e490b8224
--- /dev/null
+++ b/apps/docs/i18n/bg/docusaurus-plugin-content-docs/current/starter.md
@@ -0,0 +1,60 @@
+---
+sidebar_position: 2
+---
+
+# Начално ръководство
+
+`Тази документация е преведена автоматично и може да съдържа грешки. Не се колебайте да отворите Pull Request, за да предложите промени.`
+
+След като сте [инсталирали LlamaIndex.TS с помощта на NPM](installation) и сте настроили вашия OpenAI ключ, вие сте готови да стартирате първото си приложение:
+
+В нова папка:
+
+```bash npm2yarn
+npm install typescript
+npm install @types/node
+npx tsc --init # ако е необходимо
+```
+
+Създайте файла `example.ts`. Този код ще зареди някакви примерни данни, ще създаде документ, ще го индексира (което създава вграждания с помощта на OpenAI) и след това ще създаде търсачка, която да отговаря на въпроси относно данните.
+
+```ts
+// example.ts
+import fs from "fs/promises";
+import { Document, VectorStoreIndex } from "llamaindex";
+
+async function main() {
+  // Заредете есе от abramov.txt в Node
+  const essay = await fs.readFile(
+    "node_modules/llamaindex/examples/abramov.txt",
+    "utf-8",
+  );
+
+  // Създайте обект Document с есето
+  const document = new Document({ text: essay });
+
+  // Разделете текста и създайте вграждания. Запазете ги в VectorStoreIndex
+  const index = await VectorStoreIndex.fromDocuments([document]);
+
+  // Заявете индекса
+  const queryEngine = index.asQueryEngine();
+  const response = await queryEngine.query(
+    "Какво направи авторът по време на колеж?",
+  );
+
+  // Изведете отговора
+  console.log(response.toString());
+}
+
+main();
+```
+
+След това можете да го стартирате чрез
+
+```bash
+npx ts-node example.ts
+```
+
+Готови ли сте да научите още? Проверете нашия NextJS игрален площад на адрес https://llama-playground.vercel.app/. Изходният код е достъпен на адрес https://github.com/run-llama/ts-playground
+
+"
diff --git a/apps/docs/i18n/cat/docusaurus-plugin-content-docs/current/_static/concepts/indexing.jpg b/apps/docs/i18n/cat/docusaurus-plugin-content-docs/current/_static/concepts/indexing.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..8672967213caf28fb27682b6b9e2e5111beb9aaa
Binary files /dev/null and b/apps/docs/i18n/cat/docusaurus-plugin-content-docs/current/_static/concepts/indexing.jpg differ
diff --git a/apps/docs/i18n/cat/docusaurus-plugin-content-docs/current/_static/concepts/querying.jpg b/apps/docs/i18n/cat/docusaurus-plugin-content-docs/current/_static/concepts/querying.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..3c241bdda5ba98d3f3ee2e163b9f19280c5c7503
Binary files /dev/null and b/apps/docs/i18n/cat/docusaurus-plugin-content-docs/current/_static/concepts/querying.jpg differ
diff --git a/apps/docs/i18n/cat/docusaurus-plugin-content-docs/current/_static/concepts/rag.jpg b/apps/docs/i18n/cat/docusaurus-plugin-content-docs/current/_static/concepts/rag.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..b68eca2564f8535b33d9ed6d4ce4d4410d2a0698
Binary files /dev/null and b/apps/docs/i18n/cat/docusaurus-plugin-content-docs/current/_static/concepts/rag.jpg differ
diff --git a/apps/docs/i18n/cat/docusaurus-plugin-content-docs/current/api b/apps/docs/i18n/cat/docusaurus-plugin-content-docs/current/api
new file mode 120000
index 0000000000000000000000000000000000000000..3513e32979f4abb8b2df2b54c8d0cc480bdd847e
--- /dev/null
+++ b/apps/docs/i18n/cat/docusaurus-plugin-content-docs/current/api
@@ -0,0 +1 @@
+../../../../docs/api
\ No newline at end of file
diff --git a/apps/docs/i18n/cat/docusaurus-plugin-content-docs/current/concepts.md b/apps/docs/i18n/cat/docusaurus-plugin-content-docs/current/concepts.md
new file mode 100644
index 0000000000000000000000000000000000000000..f948a50bdc539d1fa9f3c2162927961d6a037940
--- /dev/null
+++ b/apps/docs/i18n/cat/docusaurus-plugin-content-docs/current/concepts.md
@@ -0,0 +1,86 @@
+---
+sidebar_position: 3
+---
+
+# Conceptes de Nivell Alt
+
+`Aquesta documentació s'ha traduït automàticament i pot contenir errors. No dubteu a obrir una Pull Request per suggerir canvis.`
+
+LlamaIndex.TS t'ajuda a construir aplicacions amb potència LLM (per exemple, Q&A, chatbot) sobre dades personalitzades.
+
+En aquesta guia de conceptes de nivell alt, aprendràs:
+
+- com un LLM pot respondre preguntes utilitzant les teves pròpies dades.
+- conceptes clau i mòduls en LlamaIndex.TS per compondre la teva pròpia canalització de consulta.
+
+## Resposta a preguntes a través de les teves dades
+
+LlamaIndex utilitza un mètode de dues etapes quan utilitza un LLM amb les teves dades:
+
+1. **etapa d'indexació**: preparació d'una base de coneixement, i
+2. **etapa de consulta**: recuperació de context rellevant de la base de coneixement per ajudar el LLM a respondre a una pregunta
+
+![](./_static/concepts/rag.jpg)
+
+Aquest procés també és conegut com a Generació Augmentada per Recuperació (RAG).
+
+LlamaIndex.TS proporciona les eines essencials per facilitar ambdós passos.
+
+Explorarem cada etapa en detall.
+
+### Etapa d'Indexació
+
+LlamaIndex.TS t'ajuda a preparar la base de coneixement amb una sèrie de connectors de dades i índexs.
+
+![](./_static/concepts/indexing.jpg)
+
+[**Carregadors de Dades**](./modules/high_level/data_loader.md):
+Un connector de dades (és a dir, `Reader`) ingestiona dades de diferents fonts de dades i formats de dades en una representació simple de `Document` (text i metadades simples).
+
+[**Documents / Nodes**](./modules/high_level/documents_and_nodes.md): Un `Document` és un contenidor genèric al voltant de qualsevol font de dades - per exemple, un PDF, una sortida d'API o dades recuperades d'una base de dades. Un `Node` és la unitat atòmica de dades en LlamaIndex i representa un "tros" d'un `Document` origen. És una representació completa que inclou metadades i relacions (amb altres nodes) per permetre operacions de recuperació precises i expressives.
+
+[**Índexs de Dades**](./modules/high_level/data_index.md):
+Un cop hagis ingestat les teves dades, LlamaIndex t'ajuda a indexar les dades en un format fàcil de recuperar.
+
+A sota dels panells, LlamaIndex analitza els documents en representacions intermèdies, calcula incrustacions vectorials i emmagatzema les teves dades a la memòria o al disc.
+
+"
+
+### Etapa de Consulta
+
+En l'etapa de consulta, la canalització de consulta recupera el context més rellevant donada una consulta de l'usuari,
+i ho passa al LLM (juntament amb la consulta) per sintetitzar una resposta.
+
+Això proporciona al LLM un coneixement actualitzat que no es troba en les seves dades d'entrenament originals,
+(i també redueix la al·lucinació).
+
+El repte clau en l'etapa de consulta és la recuperació, l'orquestració i el raonament sobre bases de coneixement (potencialment moltes).
+
+LlamaIndex proporciona mòduls componibles que t'ajuden a construir i integrar canalitzacions RAG per a Q&A (motor de consulta), chatbot (motor de xat) o com a part d'un agent.
+
+Aquests blocs de construcció es poden personalitzar per reflectir les preferències de classificació, així com compondre el raonament sobre múltiples bases de coneixement de manera estructurada.
+
+![](./_static/concepts/querying.jpg)
+
+#### Blocs de Construcció
+
+[**Recuperadors**](./modules/low_level/retriever.md):
+Un recuperador defineix com recuperar eficientment el context rellevant d'una base de coneixement (és a dir, índex) quan se li dóna una consulta.
+La lògica de recuperació específica difereix per a diferents índexs, sent la més popular la recuperació densa contra un índex vectorial.
+
+[**Sintetitzadors de Resposta**](./modules/low_level/response_synthesizer.md):
+Un sintetitzador de resposta genera una resposta a partir d'un LLM, utilitzant una consulta de l'usuari i un conjunt donat de trossos de text recuperats.
+
+"
+
+#### Canalitzacions
+
+[**Motor de Consulta**](./modules/high_level/query_engine.md):
+Un motor de consulta és una canalització de cap a cap que et permet fer preguntes sobre les teves dades.
+Rebutja una consulta en llenguatge natural i retorna una resposta, juntament amb el context de referència recuperat i passat al LLM.
+
+[**Motor de Xat**](./modules/high_level/chat_engine.md):
+Un motor de xat és una canalització de cap a cap per mantenir una conversa amb les teves dades
+(múltiples intercanvis en lloc d'una única pregunta i resposta).
+
+"
diff --git a/apps/docs/i18n/cat/docusaurus-plugin-content-docs/current/end_to_end.md b/apps/docs/i18n/cat/docusaurus-plugin-content-docs/current/end_to_end.md
new file mode 100644
index 0000000000000000000000000000000000000000..545830b1bb8cf305bcdc6827cfbd21e7ee8e2e53
--- /dev/null
+++ b/apps/docs/i18n/cat/docusaurus-plugin-content-docs/current/end_to_end.md
@@ -0,0 +1,59 @@
+---
+sidebar_position: 4
+---
+
+# Exemples de principi a fi
+
+`Aquesta documentació s'ha traduït automàticament i pot contenir errors. No dubteu a obrir una Pull Request per suggerir canvis.`
+
+Incluïm diversos exemples de principi a fi utilitzant LlamaIndex.TS en el repositori.
+
+Comproveu els exemples a continuació o proveu-los i completeu-los en qüestió de minuts amb els tutorials interactius de Github Codespace proporcionats per Dev-Docs [aquí](https://codespaces.new/team-dev-docs/lits-dev-docs-playground?devcontainer_path=.devcontainer%2Fjavascript_ltsquickstart%2Fdevcontainer.json):
+
+## [Motor de xat](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/chatEngine.ts)
+
+Llegeix un fitxer i xerra sobre això amb el LLM.
+
+## [Índex de vectors](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/vectorIndex.ts)
+
+Creeu un índex de vectors i consulteu-lo. L'índex de vectors utilitzarà incrustacions per obtenir els nodes més rellevants més importants. Per defecte, els nodes més importants són 2.
+
+"
+
+## [Índex de resum](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/summaryIndex.ts)
+
+Creeu un índex de llista i consulteu-lo. Aquest exemple també utilitza el `LLMRetriever`, que utilitzarà el LLM per seleccionar els millors nodes a utilitzar en la generació de la resposta.
+
+"
+
+## [Guardar / Carregar un Índex](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/storageContext.ts)
+
+Creeu i carregueu un índex de vectors. La persistència al disc en LlamaIndex.TS es produeix automàticament una vegada que es crea un objecte de context d'emmagatzematge.
+
+## [Índex de vectors personalitzat](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/vectorIndexCustomize.ts)
+
+Creeu un índex de vectors i consulteu-lo, mentre configureu el `LLM`, el `ServiceContext` i el `similarity_top_k`.
+
+## [OpenAI LLM](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/openai.ts)
+
+Crea un OpenAI LLM i utilitza'l directament per a xatejar.
+
+"
+
+## [Llama2 DeuceLLM](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/llamadeuce.ts)
+
+Crea un Llama-2 LLM i utilitza'l directament per a xatejar.
+
+"
+
+## [Motor de consulta de subpreguntes](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/subquestion.ts)
+
+Utilitza el `Motor de consulta de subpreguntes`, que descompon les consultes complexes en múltiples preguntes i després agrega una resposta a través de les respostes a totes les subpreguntes.
+
+"
+
+## [Mòduls de baix nivell](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/lowlevel.ts)
+
+Aquest exemple utilitza diversos components de baix nivell, el que elimina la necessitat d'un motor de consulta real. Aquests components es poden utilitzar en qualsevol lloc, en qualsevol aplicació, o personalitzar i sub-classificar per satisfer les vostres pròpies necessitats.
+
+"
diff --git a/apps/docs/i18n/cat/docusaurus-plugin-content-docs/current/environments.md b/apps/docs/i18n/cat/docusaurus-plugin-content-docs/current/environments.md
new file mode 100644
index 0000000000000000000000000000000000000000..b80869294c700a95d67b8d64ba63bf4be1ec452c
--- /dev/null
+++ b/apps/docs/i18n/cat/docusaurus-plugin-content-docs/current/environments.md
@@ -0,0 +1,31 @@
+---
+sidebar_position: 5
+---
+
+# Entorns
+
+`Aquesta documentació s'ha traduït automàticament i pot contenir errors. No dubteu a obrir una Pull Request per suggerir canvis.`
+
+LlamaIndex actualment suporta oficialment NodeJS 18 i NodeJS 20.
+
+## Enrutador d'aplicacions NextJS
+
+Si utilitzeu els gestors de rutes/funcions sense servidor de l'enrutador d'aplicacions NextJS, haureu d'utilitzar el mode NodeJS:
+
+```js
+export const runtime = "nodejs"; // per defecte
+```
+
+i haureu d'afegir una excepció per a pdf-parse al vostre next.config.js
+
+```js
+// next.config.js
+/** @type {import('next').NextConfig} */
+const nextConfig = {
+  experimental: {
+    serverComponentsExternalPackages: ["pdf-parse"], // Posiciona pdf-parse en el mode NodeJS real amb l'enrutador d'aplicacions NextJS
+  },
+};
+
+module.exports = nextConfig;
+```
diff --git a/apps/docs/i18n/cat/docusaurus-plugin-content-docs/current/installation.mdx b/apps/docs/i18n/cat/docusaurus-plugin-content-docs/current/installation.mdx
new file mode 100644
index 0000000000000000000000000000000000000000..804eeb13521464fdaafe336bb5442de1e532f22a
--- /dev/null
+++ b/apps/docs/i18n/cat/docusaurus-plugin-content-docs/current/installation.mdx
@@ -0,0 +1,68 @@
+---
+sidebar_position: 1
+---
+
+
+# Instal·lació i configuració
+
+```Aquesta documentació s'ha traduït automàticament i pot contenir errors. No dubteu a obrir una Pull Request per suggerir canvis.```
+
+
+Assegureu-vos de tenir NodeJS v18 o superior.
+
+
+## Utilitzant create-llama
+
+La manera més senzilla de començar amb LlamaIndex és utilitzant `create-llama`. Aquesta eina de línia de comandes us permet començar ràpidament a construir una nova aplicació LlamaIndex, amb tot configurat per a vosaltres.
+
+Simplement executeu
+
+<Tabs>
+<TabItem value="1" label="npm" default>
+
+```bash
+npx create-llama@latest
+```
+
+</TabItem>
+<TabItem value="2" label="Yarn">
+
+```bash
+yarn create llama
+```
+
+</TabItem>
+<TabItem value="3" label="pnpm">
+
+```bash
+pnpm create llama@latest
+```
+
+</TabItem>
+</Tabs>
+
+per començar. Un cop la vostra aplicació estigui generada, executeu
+
+```bash npm2yarn
+npm run dev
+```
+
+per iniciar el servidor de desenvolupament. A continuació, podeu visitar [http://localhost:3000](http://localhost:3000) per veure la vostra aplicació.
+## Instal·lació des de NPM
+
+```bash npm2yarn
+npm install llamaindex
+```
+
+
+### Variables d'entorn
+
+Els nostres exemples utilitzen OpenAI per defecte. Hauràs de configurar la teva clau d'Open AI de la següent manera:
+
+```bash
+export OPENAI_API_KEY="sk-......" # Reemplaça amb la teva clau de https://platform.openai.com/account/api-keys
+```
+
+Si vols que es carregui automàticament cada vegada, afegiu-la al teu .zshrc/.bashrc.
+
+ADVERTÈNCIA: no afegiu la vostra clau d'OpenAI al control de versions.
diff --git a/apps/docs/i18n/cat/docusaurus-plugin-content-docs/current/introduction.md b/apps/docs/i18n/cat/docusaurus-plugin-content-docs/current/introduction.md
new file mode 100644
index 0000000000000000000000000000000000000000..70b7f9558f4360a9a22f24f4db11d7a543c2e48b
--- /dev/null
+++ b/apps/docs/i18n/cat/docusaurus-plugin-content-docs/current/introduction.md
@@ -0,0 +1,60 @@
+---
+sidebar_position: 0
+slug: /
+---
+
+# Què és LlamaIndex.TS?
+
+`Aquesta documentació s'ha traduït automàticament i pot contenir errors. No dubteu a obrir una Pull Request per suggerir canvis.`
+
+LlamaIndex.TS és un marc de dades per a aplicacions LLM per a ingestió, estructuració i accés a dades privades o específiques del domini. Tot i que també hi ha un paquet de Python disponible (vegeu [aquí](https://docs.llamaindex.ai/en/stable/)), LlamaIndex.TS ofereix funcionalitats principals en un paquet senzill, optimitzat per a l'ús amb TypeScript.
+
+## 🚀 Per què LlamaIndex.TS?
+
+En el seu nucli, els LLM ofereixen una interfície de llenguatge natural entre els humans i les dades inferides. Els models àmpliament disponibles estan preentrenats amb grans quantitats de dades disponibles públicament, des de Wikipedia i llistes de correu fins a llibres de text i codi font.
+
+Les aplicacions construïdes sobre els LLM sovint requereixen augmentar aquests models amb dades privades o específiques del domini. Desafortunadament, aquestes dades es poden trobar distribuïdes en aplicacions i emmagatzematges de dades aïllats. Es troben darrere d'APIs, en bases de dades SQL o atrapades en PDFs i presentacions.
+
+Aquí és on entra en joc **LlamaIndex.TS**.
+
+## 🦙 Com pot ajudar LlamaIndex.TS?
+
+LlamaIndex.TS proporciona les següents eines:
+
+- **Càrrega de dades** per a la ingestió directa de les vostres dades en format `.txt`, `.pdf`, `.csv`, `.md` i `.docx`.
+- **Índexs de dades** per a l'estructuració de les vostres dades en representacions intermèdies que siguin fàcils i eficients per als LLM per a consumir.
+- **Motors** que proporcionen accés en llenguatge natural a les vostres dades. Per exemple:
+  - Els motors de consulta són interfícies de recuperació potents per a una sortida augmentada de coneixement.
+  - Els motors de xat són interfícies conversacionals per a interaccions de "anar i venir" amb múltiples missatges amb les vostres dades.
+
+## 👨‍👩‍👧‍👦 Per a qui és LlamaIndex?
+
+LlamaIndex.TS proporciona un conjunt d'eines bàsiques essencials per a qualsevol persona que construeixi aplicacions LLM amb JavaScript i TypeScript.
+
+La nostra API de nivell superior permet als usuaris principiants utilitzar LlamaIndex.TS per a la ingestió i consulta de les seves dades.
+
+Per a aplicacions més complexes, les nostres API de nivell inferior permeten als usuaris avançats personalitzar i ampliar qualsevol mòdul: connectors de dades, índexs, recuperadors i motors de consulta, per adaptar-se a les seves necessitats.
+
+## Començar
+
+`npm install llamaindex`
+
+La nostra documentació inclou [Instruccions d'Instal·lació](./installation.md) i un [Tutorial d'Inici](./starter.md) per a construir la vostra primera aplicació.
+
+Un cop tingueu tot a punt, [Conceptes de Nivell Alt](./concepts.md) ofereix una visió general de l'arquitectura modular de LlamaIndex. Per a més exemples pràctics, consulteu els nostres [Tutorials de Principi a Fi](./end_to_end.md).
+
+## 🗺️ Ecosistema
+
+Per descarregar o contribuir, troba LlamaIndex a:
+
+- Github: https://github.com/run-llama/LlamaIndexTS
+- NPM: https://www.npmjs.com/package/llamaindex
+
+"
+
+## Comunitat
+
+Necessiteu ajuda? Teniu alguna suggerència de funcionalitat? Uneix-te a la comunitat de LlamaIndex:
+
+- Twitter: https://twitter.com/llama_index
+- Discord: https://discord.gg/dGcwcsnxhU
diff --git a/apps/docs/i18n/cat/docusaurus-plugin-content-docs/current/modules/high_level/chat_engine.md b/apps/docs/i18n/cat/docusaurus-plugin-content-docs/current/modules/high_level/chat_engine.md
new file mode 100644
index 0000000000000000000000000000000000000000..5b998dde08f28260510e2abaf28f5b2e31d9fd1c
--- /dev/null
+++ b/apps/docs/i18n/cat/docusaurus-plugin-content-docs/current/modules/high_level/chat_engine.md
@@ -0,0 +1,22 @@
+---
+sidebar_position: 4
+---
+
+# ChatEngine (Motor de Xat)
+
+`Aquesta documentació s'ha traduït automàticament i pot contenir errors. No dubteu a obrir una Pull Request per suggerir canvis.`
+
+El motor de xat és una manera ràpida i senzilla de xatejar amb les dades del teu índex.
+
+```typescript
+const retriever = index.asRetriever();
+const chatEngine = new ContextChatEngine({ retriever });
+
+// comença a xatejar
+const response = await chatEngine.chat(query);
+```
+
+## Referències de l'API
+
+- [ContextChatEngine (Motor de Xat de Context)](../../api/classes/ContextChatEngine.md)
+- [CondenseQuestionChatEngine (Motor de Xat de Pregunta Condensada)](../../api/classes/ContextChatEngine.md)
diff --git a/apps/docs/i18n/cat/docusaurus-plugin-content-docs/current/modules/high_level/data_index.md b/apps/docs/i18n/cat/docusaurus-plugin-content-docs/current/modules/high_level/data_index.md
new file mode 100644
index 0000000000000000000000000000000000000000..433b8714b242c9aae31dba3a666c919b9e3235b2
--- /dev/null
+++ b/apps/docs/i18n/cat/docusaurus-plugin-content-docs/current/modules/high_level/data_index.md
@@ -0,0 +1,25 @@
+---
+sidebar_position: 2
+---
+
+# Índex
+
+`Aquesta documentació s'ha traduït automàticament i pot contenir errors. No dubteu a obrir una Pull Request per suggerir canvis.`
+
+Un índex és el contenidor bàsic i l'organització de les dades. LlamaIndex.TS suporta dos índexos:
+
+- `VectorStoreIndex` - enviarà els `Node`s més rellevants al LLM quan generi una resposta. El valor per defecte de top-k és 2.
+- `SummaryIndex` - enviarà cada `Node` de l'índex al LLM per generar una resposta.
+
+```typescript
+import { Document, VectorStoreIndex } from "llamaindex";
+
+const document = new Document({ text: "prova" });
+
+const index = await VectorStoreIndex.fromDocuments([document]);
+```
+
+## Referència de l'API
+
+- [SummaryIndex](../../api/classes/SummaryIndex.md)
+- [VectorStoreIndex](../../api/classes/VectorStoreIndex.md)
diff --git a/apps/docs/i18n/cat/docusaurus-plugin-content-docs/current/modules/high_level/data_loader.md b/apps/docs/i18n/cat/docusaurus-plugin-content-docs/current/modules/high_level/data_loader.md
new file mode 100644
index 0000000000000000000000000000000000000000..e1d9bbe15903349b0b703f0019bace28b68a1979
--- /dev/null
+++ b/apps/docs/i18n/cat/docusaurus-plugin-content-docs/current/modules/high_level/data_loader.md
@@ -0,0 +1,21 @@
+---
+sidebar_position: 1
+---
+
+# Lector / Carregador
+
+`Aquesta documentació s'ha traduït automàticament i pot contenir errors. No dubteu a obrir una Pull Request per suggerir canvis.`
+
+LlamaIndex.TS permet carregar fàcilment fitxers des de carpetes utilitzant la classe `SimpleDirectoryReader`. Actualment, són compatibles els fitxers `.txt`, `.pdf`, `.csv`, `.md` i `.docx`, i s'està planejant afegir-ne més en el futur!
+
+```typescript
+import { SimpleDirectoryReader } from "llamaindex";
+
+documents = new SimpleDirectoryReader().loadData("./data");
+```
+
+## Referència de l'API
+
+- [SimpleDirectoryReader](../../api/classes/SimpleDirectoryReader.md)
+
+"
diff --git a/apps/docs/i18n/cat/docusaurus-plugin-content-docs/current/modules/high_level/documents_and_nodes.md b/apps/docs/i18n/cat/docusaurus-plugin-content-docs/current/modules/high_level/documents_and_nodes.md
new file mode 100644
index 0000000000000000000000000000000000000000..6b3fdc63696a3cb72609966efcf43d3437e95a01
--- /dev/null
+++ b/apps/docs/i18n/cat/docusaurus-plugin-content-docs/current/modules/high_level/documents_and_nodes.md
@@ -0,0 +1,22 @@
+---
+sidebar_position: 0
+---
+
+# Documents i Nodes
+
+`Aquesta documentació s'ha traduït automàticament i pot contenir errors. No dubteu a obrir una Pull Request per suggerir canvis.`
+
+Els `Document`s i els `Node`s són els blocs de construcció bàsics de qualsevol índex. Tot i que l'API per a aquests objectes és similar, els objectes `Document` representen fitxers sencers, mentre que els `Node`s són peces més petites d'aquest document original, que són adequades per a un LLM i una Q&A.
+
+```typescript
+import { Document } from "llamaindex";
+
+document = new Document({ text: "text", metadata: { key: "val" } });
+```
+
+## Referència de l'API
+
+- [Document](../../api/classes/Document.md)
+- [TextNode](../../api/classes/TextNode.md)
+
+"
diff --git a/apps/docs/i18n/cat/docusaurus-plugin-content-docs/current/modules/high_level/query_engine.md b/apps/docs/i18n/cat/docusaurus-plugin-content-docs/current/modules/high_level/query_engine.md
new file mode 100644
index 0000000000000000000000000000000000000000..6678e71aaa38e7cb5c091eb6185db116e92775aa
--- /dev/null
+++ b/apps/docs/i18n/cat/docusaurus-plugin-content-docs/current/modules/high_level/query_engine.md
@@ -0,0 +1,40 @@
+---
+sidebar_position: 3
+---
+
+# Motor de Consulta (QueryEngine)
+
+`Aquesta documentació s'ha traduït automàticament i pot contenir errors. No dubteu a obrir una Pull Request per suggerir canvis.`
+
+Un motor de consulta envolta un `Retriever` i un `ResponseSynthesizer` en un pipeline, que utilitzarà la cadena de consulta per obtenir nodes i després enviar-los a LLM per generar una resposta.
+
+```typescript
+const queryEngine = index.asQueryEngine();
+const response = await queryEngine.query("cadena de consulta");
+```
+
+## Motor de Consulta de Subpreguntes
+
+El concepte bàsic del Motor de Consulta de Subpreguntes és que divideix una única consulta en múltiples consultes, obté una resposta per a cada una d'aquestes consultes i després combina aquestes respostes diferents en una única resposta coherent per a l'usuari. Podeu pensar-hi com a tècnica de "pensa-ho pas a pas" però iterant sobre les fonts de dades!
+
+### Començar
+
+La manera més senzilla de començar a provar el Motor de Consulta de Subpreguntes és executar el fitxer subquestion.ts a [examples](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/subquestion.ts).
+
+```bash
+npx ts-node subquestion.ts
+```
+
+"
+
+### Eines
+
+El SubQuestionQueryEngine s'implementa amb eines. La idea bàsica de les eines és que són opcions executables per al gran model de llenguatge. En aquest cas, el nostre SubQuestionQueryEngine es basa en QueryEngineTool, que, com podeu imaginar, és una eina per executar consultes en un QueryEngine. Això ens permet donar al model una opció per consultar diferents documents per a diferents preguntes, per exemple. També podeu imaginar que el SubQuestionQueryEngine podria utilitzar una eina que cerqui alguna cosa a la web o obtingui una resposta utilitzant Wolfram Alpha.
+
+Podeu obtenir més informació sobre les eines consultant la documentació de LlamaIndex Python a https://gpt-index.readthedocs.io/en/latest/core_modules/agent_modules/tools/root.html
+
+## Referència de l'API
+
+- [Motor de Consulta del Recuperador (RetrieverQueryEngine)](../../api/classes/RetrieverQueryEngine.md)
+- [Motor de Consulta de Subpreguntes (SubQuestionQueryEngine)](../../api/classes/SubQuestionQueryEngine.md)
+- [Eina del Motor de Consulta (QueryEngineTool)](../../api/interfaces/QueryEngineTool.md)
diff --git a/apps/docs/i18n/cat/docusaurus-plugin-content-docs/current/modules/index.md b/apps/docs/i18n/cat/docusaurus-plugin-content-docs/current/modules/index.md
new file mode 100644
index 0000000000000000000000000000000000000000..e7675ad0d3adc2164443badd0b85eedae32d0753
--- /dev/null
+++ b/apps/docs/i18n/cat/docusaurus-plugin-content-docs/current/modules/index.md
@@ -0,0 +1,33 @@
+# Mòduls principals
+
+`Aquesta documentació s'ha traduït automàticament i pot contenir errors. No dubteu a obrir una Pull Request per suggerir canvis.`
+
+LlamaIndex.TS ofereix diversos mòduls principals, separats en mòduls de nivell alt per començar ràpidament i mòduls de nivell baix per personalitzar els components clau segons les teves necessitats.
+
+## Mòduls de Nivell Alt
+
+- [**Document**](./high_level/documents_and_nodes.md): Un document representa un fitxer de text, un fitxer PDF o una altra peça de dades contínua.
+
+- [**Node**](./high_level/documents_and_nodes.md): El bloc de construcció de dades bàsic. Normalment, aquests són parts del document dividides en peces manejables que són prou petites per ser alimentades a un model d'incrustació i LLM.
+
+- [**Reader/Loader**](./high_level/data_loader.md): Un lector o carregador és quelcom que pren un document del món real i el transforma en una classe Document que després es pot utilitzar en el teu índex i consultes. Actualment, donem suport a fitxers de text pla i PDFs, i en el futur en donarem suport a molts més.
+
+- [**Índexs**](./high_level/data_index.md): els índexs emmagatzemen els Nodes i les incrustacions d'aquests nodes.
+
+- [**Motor de Consulta**](./high_level/query_engine.md): Els motors de consulta són els que generen la consulta que introduïu i us retornen el resultat. Els motors de consulta generalment combinen una indicació predefinida amb nodes seleccionats del vostre índex per donar al LLM el context que necessita per respondre a la vostra consulta.
+
+- [**Motor de Xat**](./high_level/chat_engine.md): Un motor de xat us ajuda a construir un chatbot que interactuarà amb els vostres índexs.
+
+## Mòdul de nivell baix
+
+- [**LLM**](./low_level/llm.md): La classe LLM és una interfície unificada per a un proveïdor de models de llenguatge gran com OpenAI GPT-4, Anthropic Claude o Meta LLaMA. Pots crear una subclasse per escriure un connector per al teu propi model de llenguatge gran.
+
+- [**Embedding**](./low_level/embedding.md): Un embedding es representa com un vector de nombres de punt flotant. El nostre model d'embedding per defecte és el text-embedding-ada-002 de OpenAI i cada embedding que genera consisteix en 1.536 nombres de punt flotant. Un altre model d'embedding popular és BERT, que utilitza 768 nombres de punt flotant per representar cada node. Proporcionem diverses utilitats per treballar amb embeddings, incloent-hi 3 opcions de càlcul de similitud i Maximum Marginal Relevance.
+
+- [**TextSplitter/NodeParser**](./low_level/node_parser.md): Les estratègies de divisió de text són increïblement importants per a l'eficàcia global de la cerca d'embedding. Actualment, tot i que tenim una opció per defecte, no hi ha una solució única per a tots els casos. Depenent dels documents font, potser voldràs utilitzar diferents mides i estratègies de divisió. Actualment, donem suport a la divisió per mida fixa, la divisió per mida fixa amb seccions superposades, la divisió per frases i la divisió per paràgrafs. El text splitter s'utilitza pel NodeParser per dividir els `Document`s en `Node`s.
+
+- [**Retriever**](./low_level/retriever.md): El Retriever és el que decideix quins Nodes recuperar de l'índex. Aquí, potser voldràs provar a recuperar més o menys Nodes per consulta, canviar la funció de similitud o crear el teu propi retriever per a cada cas d'ús individual de l'aplicació. Per exemple, potser voldràs tenir un retriever separat per al contingut de codi i el contingut de text.
+
+- [**ResponseSynthesizer**](./low_level/response_synthesizer.md): El ResponseSynthesizer és responsable de prendre una cadena de consulta i utilitzar una llista de `Node`s per generar una resposta. Això pot prendre diverses formes, com iterar sobre tot el context i refinar una resposta o construir un arbre de resums i retornar el resum principal.
+
+- [**Storage**](./low_level/storage.md): En algun moment, voldràs emmagatzemar els teus índexs, dades i vectors en comptes d'executar els models d'embedding cada vegada. IndexStore, DocStore, VectorStore i KVStore són abstraccions que et permeten fer-ho. En conjunt, formen el StorageContext. Actualment, et permetem persistir els teus embeddings en fitxers al sistema de fitxers (o en un sistema de fitxers virtual a la memòria), però també estem afegint activament integracions amb bases de dades de vectors.
diff --git a/apps/docs/i18n/cat/docusaurus-plugin-content-docs/current/modules/low_level/embedding.md b/apps/docs/i18n/cat/docusaurus-plugin-content-docs/current/modules/low_level/embedding.md
new file mode 100644
index 0000000000000000000000000000000000000000..28665784dde1cb00532e94d3439092fd7d25b04d
--- /dev/null
+++ b/apps/docs/i18n/cat/docusaurus-plugin-content-docs/current/modules/low_level/embedding.md
@@ -0,0 +1,26 @@
+---
+sidebar_position: 1
+---
+
+# Incrustació
+
+`Aquesta documentació s'ha traduït automàticament i pot contenir errors. No dubteu a obrir una Pull Request per suggerir canvis.`
+
+El model d'incrustació a LlamaIndex és responsable de crear representacions numèriques de text. Per defecte, LlamaIndex utilitzarà el model `text-embedding-ada-002` de OpenAI.
+
+Això es pot establir explícitament a l'objecte `ServiceContext`.
+
+```typescript
+import { OpenAIEmbedding, serviceContextFromDefaults } from "llamaindex";
+
+const openaiEmbeds = new OpenAIEmbedding();
+
+const serviceContext = serviceContextFromDefaults({ embedModel: openaiEmbeds });
+```
+
+## Referència de l'API
+
+- [OpenAIEmbedding](../../api/classes/OpenAIEmbedding.md)
+- [ServiceContext](../../api/interfaces/ServiceContext.md)
+
+"
diff --git a/apps/docs/i18n/cat/docusaurus-plugin-content-docs/current/modules/low_level/llm.md b/apps/docs/i18n/cat/docusaurus-plugin-content-docs/current/modules/low_level/llm.md
new file mode 100644
index 0000000000000000000000000000000000000000..321a00821b538b0c4f7b45732aa131f8d3d9a691
--- /dev/null
+++ b/apps/docs/i18n/cat/docusaurus-plugin-content-docs/current/modules/low_level/llm.md
@@ -0,0 +1,26 @@
+---
+sidebar_position: 0
+---
+
+# LLM
+
+`Aquesta documentació s'ha traduït automàticament i pot contenir errors. No dubteu a obrir una Pull Request per suggerir canvis.`
+
+El LLM és responsable de llegir text i generar respostes en llenguatge natural a les consultes. Per defecte, LlamaIndex.TS utilitza `gpt-3.5-turbo`.
+
+El LLM es pot establir explícitament a l'objecte `ServiceContext`.
+
+```typescript
+import { OpenAI, serviceContextFromDefaults } from "llamaindex";
+
+const openaiLLM = new OpenAI({ model: "gpt-3.5-turbo", temperature: 0 });
+
+const serviceContext = serviceContextFromDefaults({ llm: openaiLLM });
+```
+
+## Referència de l'API
+
+- [OpenAI](../../api/classes/OpenAI.md)
+- [ServiceContext](../../api/interfaces/ServiceContext.md)
+
+"
diff --git a/apps/docs/i18n/cat/docusaurus-plugin-content-docs/current/modules/low_level/node_parser.md b/apps/docs/i18n/cat/docusaurus-plugin-content-docs/current/modules/low_level/node_parser.md
new file mode 100644
index 0000000000000000000000000000000000000000..1c3ae74305ace72f8e3ff056ab93569814f5bc2c
--- /dev/null
+++ b/apps/docs/i18n/cat/docusaurus-plugin-content-docs/current/modules/low_level/node_parser.md
@@ -0,0 +1,39 @@
+---
+sidebar_position: 3
+---
+
+# NodeParser
+
+`Aquesta documentació s'ha traduït automàticament i pot contenir errors. No dubteu a obrir una Pull Request per suggerir canvis.`
+
+El `NodeParser` a LlamaIndex és responsable de dividir els objectes `Document` en objectes `Node` més manejables. Quan truqueu a `.fromDocuments()`, s'utilitza el `NodeParser` del `ServiceContext` per fer-ho automàticament per a vosaltres. Alternativament, podeu utilitzar-lo per dividir els documents amb antelació.
+
+```typescript
+import { Document, SimpleNodeParser } from "llamaindex";
+
+const nodeParser = new SimpleNodeParser();
+const nodes = nodeParser.getNodesFromDocuments([
+  new Document({ text: "Tinc 10 anys. John té 20 anys." }),
+]);
+```
+
+## TextSplitter
+
+El separador de text subjacent dividirà el text per frases. També es pot utilitzar com a mòdul independent per dividir text en brut.
+
+```typescript
+import { SentenceSplitter } from "llamaindex";
+
+const splitter = new SentenceSplitter({ chunkSize: 1 });
+
+const textSplits = splitter.splitText("Hola món");
+```
+
+"
+
+## Referència de l'API
+
+- [SimpleNodeParser](../../api/classes/SimpleNodeParser.md)
+- [SentenceSplitter](../../api/classes/SentenceSplitter.md)
+
+"
diff --git a/apps/docs/i18n/cat/docusaurus-plugin-content-docs/current/modules/low_level/response_synthesizer.md b/apps/docs/i18n/cat/docusaurus-plugin-content-docs/current/modules/low_level/response_synthesizer.md
new file mode 100644
index 0000000000000000000000000000000000000000..468288a9356445f3cc3d7957c934cacdba458d1c
--- /dev/null
+++ b/apps/docs/i18n/cat/docusaurus-plugin-content-docs/current/modules/low_level/response_synthesizer.md
@@ -0,0 +1,47 @@
+---
+sidebar_position: 6
+---
+
+# ResponseSynthesizer (Sintetitzador de Resposta)
+
+`Aquesta documentació s'ha traduït automàticament i pot contenir errors. No dubteu a obrir una Pull Request per suggerir canvis.`
+
+El ResponseSynthesizer és responsable d'enviar la consulta, els nodes i les plantilles de prompt al LLM per generar una resposta. Hi ha alguns modes clau per generar una resposta:
+
+- `Refine` (Refinar): "crear i refinar" una resposta passant seqüencialment per cada fragment de text recuperat. Això fa una crida separada al LLM per a cada Node. És bo per a respostes més detallades.
+- `CompactAndRefine` (Compactar i Refinar) (per defecte): "compactar" el prompt durant cada crida al LLM, omplint tants fragments de text com puguin cabre dins de la mida màxima del prompt. Si hi ha massa fragments per a omplir en un sol prompt, "crear i refinar" una resposta passant per diversos prompts compactes. És el mateix que `refine`, però hauria de resultar en menys crides al LLM.
+- `TreeSummarize` (Resumir en forma d'arbre): Donat un conjunt de fragments de text i la consulta, construeix recursivament un arbre i retorna el node arrel com a resposta. És bo per a fins de resum.
+- `SimpleResponseBuilder` (Constructor de Resposta Simple): Donat un conjunt de fragments de text i la consulta, aplica la consulta a cada fragment de text mentre acumula les respostes en un array. Retorna una cadena concatenada de totes les respostes. És bo quan necessites executar la mateixa consulta per separat en cada fragment de text.
+
+```typescript
+import { NodeWithScore, ResponseSynthesizer, TextNode } from "llamaindex";
+
+const responseSynthesizer = new ResponseSynthesizer();
+
+const nodesWithScore: NodeWithScore[] = [
+  {
+    node: new TextNode({ text: "Tinc 10 anys." }),
+    score: 1,
+  },
+  {
+    node: new TextNode({ text: "John té 20 anys." }),
+    score: 0.5,
+  },
+];
+
+const response = await responseSynthesizer.synthesize(
+  "Quina edat tinc?",
+  nodesWithScore,
+);
+console.log(response.response);
+```
+
+## Referència de l'API
+
+- [ResponseSynthesizer (Sintetitzador de Resposta)](../../api/classes/ResponseSynthesizer.md)
+- [Refine (Refinar)](../../api/classes/Refine.md)
+- [CompactAndRefine (Compactar i Refinar)](../../api/classes/CompactAndRefine.md)
+- [TreeSummarize (Resumir en forma d'arbre)](../../api/classes/TreeSummarize.md)
+- [SimpleResponseBuilder (Constructor de Resposta Simple)](../../api/classes/SimpleResponseBuilder.md)
+
+"
diff --git a/apps/docs/i18n/cat/docusaurus-plugin-content-docs/current/modules/low_level/retriever.md b/apps/docs/i18n/cat/docusaurus-plugin-content-docs/current/modules/low_level/retriever.md
new file mode 100644
index 0000000000000000000000000000000000000000..c4fe80912aa370fc06831a9e42aa2af69d0ed260
--- /dev/null
+++ b/apps/docs/i18n/cat/docusaurus-plugin-content-docs/current/modules/low_level/retriever.md
@@ -0,0 +1,23 @@
+---
+sidebar_position: 5
+---
+
+# Retriever (Recuperador)
+
+`Aquesta documentació s'ha traduït automàticament i pot contenir errors. No dubteu a obrir una Pull Request per suggerir canvis.`
+
+Un recuperador a LlamaIndex és el que s'utilitza per obtenir `Node`s d'un índex utilitzant una cadena de consulta. Un `VectorIndexRetriever` obtindrà els nodes més similars al top-k. Mentrestant, un `SummaryIndexRetriever` obtindrà tots els nodes, independentment de la consulta.
+
+```typescript
+const recuperador = vector_index.asRetriever();
+recuperador.similarityTopK = 3;
+
+// Obteniu els nodes!
+const nodesAmbPuntuació = await recuperador.retrieve("cadena de consulta");
+```
+
+## Referència de l'API
+
+- [SummaryIndexRetriever (Recuperador d'índex de resum)](../../api/classes/SummaryIndexRetriever.md)
+- [SummaryIndexLLMRetriever (Recuperador d'índex de resum LLM)](../../api/classes/SummaryIndexLLMRetriever.md)
+- [VectorIndexRetriever (Recuperador d'índex de vectors)](../../api/classes/VectorIndexRetriever.md)
diff --git a/apps/docs/i18n/cat/docusaurus-plugin-content-docs/current/modules/low_level/storage.md b/apps/docs/i18n/cat/docusaurus-plugin-content-docs/current/modules/low_level/storage.md
new file mode 100644
index 0000000000000000000000000000000000000000..cc07a84354d8f6dd009ded5e31d8e2f20bbbe516
--- /dev/null
+++ b/apps/docs/i18n/cat/docusaurus-plugin-content-docs/current/modules/low_level/storage.md
@@ -0,0 +1,30 @@
+---
+sidebar_position: 7
+---
+
+# Emmagatzematge
+
+`Aquesta documentació s'ha traduït automàticament i pot contenir errors. No dubteu a obrir una Pull Request per suggerir canvis.`
+
+L'emmagatzematge a LlamaIndex.TS funciona automàticament un cop hagueu configurat un objecte `StorageContext`. Simplement configureu el `persistDir` i adjunteu-lo a un índex.
+
+En aquest moment, només s'admet guardar i carregar des del disc, amb integracions futures planejades!
+
+```typescript
+import { Document, VectorStoreIndex, storageContextFromDefaults } from "./src";
+
+const storageContext = await storageContextFromDefaults({
+  persistDir: "./storage",
+});
+
+const document = new Document({ text: "Text de prova" });
+const index = await VectorStoreIndex.fromDocuments([document], {
+  storageContext,
+});
+```
+
+## Referència de l'API
+
+- [StorageContext](../../api/interfaces/StorageContext.md)
+
+"
diff --git a/apps/docs/i18n/cat/docusaurus-plugin-content-docs/current/starter.md b/apps/docs/i18n/cat/docusaurus-plugin-content-docs/current/starter.md
new file mode 100644
index 0000000000000000000000000000000000000000..87758dd97a5d7990fcc78ec9171b2acb3c73a48f
--- /dev/null
+++ b/apps/docs/i18n/cat/docusaurus-plugin-content-docs/current/starter.md
@@ -0,0 +1,60 @@
+---
+sidebar_position: 2
+---
+
+# Tutorial d'Inici
+
+`Aquesta documentació s'ha traduït automàticament i pot contenir errors. No dubteu a obrir una Pull Request per suggerir canvis.`
+
+Un cop hagueu [instal·lat LlamaIndex.TS utilitzant NPM](installation) i hagueu configurat la vostra clau d'OpenAI, esteu preparats per començar la vostra primera aplicació:
+
+En una nova carpeta:
+
+```bash npm2yarn
+npm install typescript
+npm install @types/node
+npx tsc --init # si cal
+```
+
+Creeu el fitxer `example.ts`. Aquest codi carregarà algunes dades d'exemple, crearà un document, l'indexarà (que crea incrustacions utilitzant OpenAI) i després crearà un motor de consulta per respondre preguntes sobre les dades.
+
+```ts
+// example.ts
+import fs from "fs/promises";
+import { Document, VectorStoreIndex } from "llamaindex";
+
+async function main() {
+  // Carrega l'assaig des de abramov.txt a Node
+  const assaig = await fs.readFile(
+    "node_modules/llamaindex/examples/abramov.txt",
+    "utf-8",
+  );
+
+  // Crea un objecte Document amb l'assaig
+  const document = new Document({ text: assaig });
+
+  // Divideix el text i crea incrustacions. Emmagatzema-les en un VectorStoreIndex
+  const index = await VectorStoreIndex.fromDocuments([document]);
+
+  // Consulta l'índex
+  const motorConsulta = index.asQueryEngine();
+  const resposta = await motorConsulta.query(
+    "Què va fer l'autor a la universitat?",
+  );
+
+  // Mostra la resposta
+  console.log(resposta.toString());
+}
+
+main();
+```
+
+A continuació, podeu executar-lo utilitzant
+
+```bash
+npx ts-node example.ts
+```
+
+Preparat per aprendre més? Consulteu el nostre espai de jocs NextJS a https://llama-playground.vercel.app/. El codi font està disponible a https://github.com/run-llama/ts-playground
+
+"
diff --git a/apps/docs/i18n/cs/docusaurus-plugin-content-docs/current/_static/concepts/indexing.jpg b/apps/docs/i18n/cs/docusaurus-plugin-content-docs/current/_static/concepts/indexing.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..8672967213caf28fb27682b6b9e2e5111beb9aaa
Binary files /dev/null and b/apps/docs/i18n/cs/docusaurus-plugin-content-docs/current/_static/concepts/indexing.jpg differ
diff --git a/apps/docs/i18n/cs/docusaurus-plugin-content-docs/current/_static/concepts/querying.jpg b/apps/docs/i18n/cs/docusaurus-plugin-content-docs/current/_static/concepts/querying.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..3c241bdda5ba98d3f3ee2e163b9f19280c5c7503
Binary files /dev/null and b/apps/docs/i18n/cs/docusaurus-plugin-content-docs/current/_static/concepts/querying.jpg differ
diff --git a/apps/docs/i18n/cs/docusaurus-plugin-content-docs/current/_static/concepts/rag.jpg b/apps/docs/i18n/cs/docusaurus-plugin-content-docs/current/_static/concepts/rag.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..b68eca2564f8535b33d9ed6d4ce4d4410d2a0698
Binary files /dev/null and b/apps/docs/i18n/cs/docusaurus-plugin-content-docs/current/_static/concepts/rag.jpg differ
diff --git a/apps/docs/i18n/cs/docusaurus-plugin-content-docs/current/api b/apps/docs/i18n/cs/docusaurus-plugin-content-docs/current/api
new file mode 120000
index 0000000000000000000000000000000000000000..3513e32979f4abb8b2df2b54c8d0cc480bdd847e
--- /dev/null
+++ b/apps/docs/i18n/cs/docusaurus-plugin-content-docs/current/api
@@ -0,0 +1 @@
+../../../../docs/api
\ No newline at end of file
diff --git a/apps/docs/i18n/cs/docusaurus-plugin-content-docs/current/concepts.md b/apps/docs/i18n/cs/docusaurus-plugin-content-docs/current/concepts.md
new file mode 100644
index 0000000000000000000000000000000000000000..c1a875ce64bb650eb3a2093074fed7ac20aec5f3
--- /dev/null
+++ b/apps/docs/i18n/cs/docusaurus-plugin-content-docs/current/concepts.md
@@ -0,0 +1,86 @@
+---
+sidebar_position: 3
+---
+
+# Vysokoúrovňové koncepty
+
+`Tato dokumentace byla automaticky přeložena a může obsahovat chyby. Neváhejte otevřít Pull Request pro navrhování změn.`
+
+LlamaIndex.TS vám pomáhá vytvářet aplikace s podporou LLM (např. Q&A, chatbot) nad vlastními daty.
+
+V tomto průvodci vysokoúrovňovými koncepty se dozvíte:
+
+- jak LLM může odpovídat na otázky pomocí vašich vlastních dat.
+- klíčové koncepty a moduly v LlamaIndex.TS pro sestavení vlastního dotazovacího řetězce.
+
+## Odpovídání na otázky v rámci vašich dat
+
+LlamaIndex používá dvoustupňovou metodu při použití LLM s vašimi daty:
+
+1. **indexační fáze**: příprava znalostní báze a
+2. **dotazovací fáze**: získání relevantního kontextu z informací, které pomohou LLM odpovědět na otázku
+
+![](./_static/concepts/rag.jpg)
+
+Tento proces je také známý jako Retrieval Augmented Generation (RAG).
+
+LlamaIndex.TS poskytuje základní nástroje, které vám usnadní oba kroky.
+
+Pojďme si každou fázi prozkoumat podrobněji.
+
+### Indexační fáze
+
+LlamaIndex.TS vám pomáhá připravit znalostní bázi pomocí sady konektorů a indexů dat.
+
+![](./_static/concepts/indexing.jpg)
+
+[**Datoví načítadla**](./modules/high_level/data_loader.md):
+Datový konektor (tzv. `Reader`) načítá data z různých zdrojů dat a formátů do jednoduché reprezentace `Document` (text a jednoduchá metadata).
+
+[**Dokumenty / Uzly**](./modules/high_level/documents_and_nodes.md): `Document` je obecný kontejner pro jakýkoli zdroj dat - například PDF, výstup z API nebo načtená data z databáze. `Node` je atomická jednotka dat v LlamaIndex a představuje "část" zdrojového `Document`. Jedná se o bohatou reprezentaci, která zahrnuje metadata a vztahy (k ostatním uzlům), aby umožnila přesné a výstižné operace získávání.
+
+[**Indexy dat**](./modules/high_level/data_index.md):
+Jakmile jste načetli svá data, LlamaIndex vám pomáhá indexovat data do formátu, který je snadno získatelný.
+
+Pod pokličkou LlamaIndex analyzuje nezpracované dokumenty do mezireprezentací, vypočítá vektorová vnoření a ukládá vaše data do paměti nebo na disk.
+
+"
+
+### Dotazovací fáze
+
+V dotazovací fázi dotazovací řetězec získává nejrelevantnější kontext na základě uživatelského dotazu
+a předává ho LLM (spolu s dotazem) k syntéze odpovědi.
+
+Tímto způsobem LLM získává aktuální znalosti, které nejsou obsaženy v jeho původních trénovacích datech,
+(což také snižuje halucinace).
+
+Klíčovou výzvou v dotazovací fázi je získávání, orchestrace a dedukce z (potenciálně mnoha) znalostních bází.
+
+LlamaIndex poskytuje komponovatelné moduly, které vám pomáhají sestavit a integrovat RAG řetězce pro Q&A (dotazovací engine), chatbot (chatovací engine) nebo jako součást agenta.
+
+Tyto stavební bloky lze přizpůsobit tak, aby odrážely preference ohodnocování a byly sestaveny tak, aby dedukovaly z více znalostních bází strukturovaným způsobem.
+
+![](./_static/concepts/querying.jpg)
+
+#### Stavební bloky
+
+[**Retrievery**](./modules/low_level/retriever.md):
+Retriever definuje, jak efektivně získat relevantní kontext z znalostní báze (tj. indexu) na základě dotazu.
+Konkrétní logika získávání se liší pro různé indexy, nejpopulárnější je husté získávání pomocí vektorového indexu.
+
+[**Syntetizátory odpovědí**](./modules/low_level/response_synthesizer.md):
+Syntetizátor odpovědí generuje odpověď z LLM pomocí uživatelského dotazu a daného souboru získaných textových částí.
+
+"
+
+#### Řetězce
+
+[**Dotazovací enginy**](./modules/high_level/query_engine.md):
+Dotazovací engine je koncový řetězec, který vám umožňuje klást otázky nad vašimi daty.
+Přijímá dotaz v přirozeném jazyce a vrací odpověď spolu s referenčním kontextem získaným a předaným LLM.
+
+[**Chatovací enginy**](./modules/high_level/chat_engine.md):
+Chatovací engine je koncový řetězec pro konverzaci s vašimi daty
+(více vzájemných otázek a odpovědí namísto jedné otázky a odpovědi).
+
+"
diff --git a/apps/docs/i18n/cs/docusaurus-plugin-content-docs/current/end_to_end.md b/apps/docs/i18n/cs/docusaurus-plugin-content-docs/current/end_to_end.md
new file mode 100644
index 0000000000000000000000000000000000000000..d3e4c77718e802dbfb1e612d19aef2ce757c7728
--- /dev/null
+++ b/apps/docs/i18n/cs/docusaurus-plugin-content-docs/current/end_to_end.md
@@ -0,0 +1,59 @@
+---
+sidebar_position: 4
+---
+
+# Příklady od začátku do konce
+
+`Tato dokumentace byla automaticky přeložena a může obsahovat chyby. Neváhejte otevřít Pull Request pro navrhování změn.`
+
+V repozitáři jsou k dispozici několik příkladů od začátku do konce, které používají LlamaIndex.TS.
+
+Podívejte se na následující příklady nebo je vyzkoušejte a dokončete je během několika minut s interaktivními tutoriály na Github Codespace poskytovanými Dev-Docs [zde](https://codespaces.new/team-dev-docs/lits-dev-docs-playground?devcontainer_path=.devcontainer%2Fjavascript_ltsquickstart%2Fdevcontainer.json):
+
+## [Chatovací engine](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/chatEngine.ts)
+
+Načtěte soubor a diskutujte o něm s LLM.
+
+## [Vektorový index](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/vectorIndex.ts)
+
+Vytvořte vektorový index a vyhledejte v něm. Vektorový index používá vnoření pro získání nejrelevantnějších uzlů. Výchozí hodnota pro nejrelevantnější uzly je 2.
+
+"
+
+## [Index shrnutí](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/summaryIndex.ts)
+
+Vytvořte seznamový index a vyhledejte v něm. Tento příklad také používá `LLMRetriever`, který používá LLM k výběru nejlepších uzlů pro generování odpovědi.
+
+"
+
+## [Uložení / Načtení indexu](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/storageContext.ts)
+
+Vytvoření a načtení vektorového indexu. Ukládání na disk v LlamaIndex.TS se provádí automaticky poté, co je vytvořen objekt kontextu úložiště.
+
+"
+
+## [Přizpůsobený vektorový index](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/vectorIndexCustomize.ts)
+
+Vytvořte vektorový index a dotazujte se na něj, přičemž také konfigurujte `LLM`, `ServiceContext` a `similarity_top_k`.
+
+## [OpenAI LLM](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/openai.ts)
+
+Vytvořte OpenAI LLM a použijte ho přímo pro chatování.
+
+"
+
+## [Llama2 DeuceLLM](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/llamadeuce.ts)
+
+Vytvořte Llama-2 LLM a použijte jej přímo pro chatování.
+
+"
+
+## [SubQuestionQueryEngine](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/subquestion.ts)
+
+Používá `SubQuestionQueryEngine`, který rozděluje složité dotazy na více poddotazů a poté agreguje odpověď na všechny poddotazy.
+
+"
+
+## [Moduly nízké úrovně](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/lowlevel.ts)
+
+Tento příklad používá několik modulů nízké úrovně, což odstraňuje potřebu skutečného dotazovacího enginu. Tyto moduly lze použít kdekoli, v jakékoliv aplikaci, nebo je lze upravit a podřadit, aby vyhovovaly vašim vlastním potřebám.
diff --git a/apps/docs/i18n/cs/docusaurus-plugin-content-docs/current/environments.md b/apps/docs/i18n/cs/docusaurus-plugin-content-docs/current/environments.md
new file mode 100644
index 0000000000000000000000000000000000000000..307673af687bb4c9488f32890fe47c2e001d3299
--- /dev/null
+++ b/apps/docs/i18n/cs/docusaurus-plugin-content-docs/current/environments.md
@@ -0,0 +1,31 @@
+---
+sidebar_position: 5
+---
+
+# Prostředí
+
+`Tato dokumentace byla automaticky přeložena a může obsahovat chyby. Neváhejte otevřít Pull Request pro navrhování změn.`
+
+LlamaIndex aktuálně oficiálně podporuje NodeJS 18 a NodeJS 20.
+
+## NextJS App Router
+
+Pokud používáte NextJS App Router pro zpracování tras/route a serverless funkce, budete muset použít režim NodeJS:
+
+```js
+export const runtime = "nodejs"; // výchozí hodnota
+```
+
+a budete muset přidat výjimku pro pdf-parse ve vašem next.config.js
+
+```js
+// next.config.js
+/** @type {import('next').NextConfig} */
+const nextConfig = {
+  experimental: {
+    serverComponentsExternalPackages: ["pdf-parse"], // Přepne pdf-parse do režimu NodeJS s NextJS App Router
+  },
+};
+
+module.exports = nextConfig;
+```
diff --git a/apps/docs/i18n/cs/docusaurus-plugin-content-docs/current/installation.mdx b/apps/docs/i18n/cs/docusaurus-plugin-content-docs/current/installation.mdx
new file mode 100644
index 0000000000000000000000000000000000000000..523332ab0356e6bed06fe8ff93098ca8da06536e
--- /dev/null
+++ b/apps/docs/i18n/cs/docusaurus-plugin-content-docs/current/installation.mdx
@@ -0,0 +1,68 @@
+---
+sidebar_position: 1
+---
+
+
+# Instalace a nastavení
+
+```Tato dokumentace byla automaticky přeložena a může obsahovat chyby. Neváhejte otevřít Pull Request pro navrhování změn.```
+
+
+Ujistěte se, že máte nainstalovaný NodeJS ve verzi 18 nebo vyšší.
+
+
+## Použití create-llama
+
+Nejjednodušší způsob, jak začít s LlamaIndexem, je použití `create-llama`. Tento nástroj příkazového řádku vám umožní rychle začít s vytvářením nové aplikace LlamaIndex s přednastaveným prostředím.
+
+Jen spusťte
+
+<Tabs>
+<TabItem value="1" label="npm" default>
+
+```bash
+npx create-llama@latest
+```
+
+</TabItem>
+<TabItem value="2" label="Yarn">
+
+```bash
+yarn create llama
+```
+
+</TabItem>
+<TabItem value="3" label="pnpm">
+
+```bash
+pnpm create llama@latest
+```
+
+</TabItem>
+</Tabs>
+
+abyste začali. Jakmile je vaše aplikace vygenerována, spusťte
+
+```bash npm2yarn
+npm run dev
+```
+
+pro spuštění vývojového serveru. Poté můžete navštívit [http://localhost:3000](http://localhost:3000), abyste viděli vaši aplikaci.
+## Instalace pomocí NPM
+
+```bash npm2yarn
+npm install llamaindex
+```
+
+
+### Proměnné prostředí
+
+Naše příklady výchozí používají OpenAI. Budete potřebovat nastavit svůj Open AI klíč následovně:
+
+```bash
+export OPENAI_API_KEY="sk-......" # Nahraďte svým klíčem z https://platform.openai.com/account/api-keys
+```
+
+Pokud chcete, aby se klíč automaticky načítal pokaždé, přidejte ho do souboru .zshrc/.bashrc.
+
+VAROVÁNÍ: Neukládejte svůj OpenAI klíč do verzovacího systému.
diff --git a/apps/docs/i18n/cs/docusaurus-plugin-content-docs/current/introduction.md b/apps/docs/i18n/cs/docusaurus-plugin-content-docs/current/introduction.md
new file mode 100644
index 0000000000000000000000000000000000000000..eb9debbc70ef72c4394b9be38ebe74a0c373bc4c
--- /dev/null
+++ b/apps/docs/i18n/cs/docusaurus-plugin-content-docs/current/introduction.md
@@ -0,0 +1,60 @@
+---
+sidebar_position: 0
+slug: /
+---
+
+# Co je LlamaIndex.TS?
+
+`Tato dokumentace byla automaticky přeložena a může obsahovat chyby. Neváhejte otevřít Pull Request pro navrhování změn.`
+
+LlamaIndex.TS je datový framework pro aplikace LLM, který slouží k příjmu, strukturování a přístupu k soukromým nebo doménově specifickým datům. Zatímco je také k dispozici python balíček (viz [zde](https://docs.llamaindex.ai/en/stable/)), LlamaIndex.TS nabízí základní funkce v jednoduchém balíčku, optimalizovaném pro použití s TypeScriptem.
+
+## 🚀 Proč LlamaIndex.TS?
+
+V jádru LLMs nabízejí přirozené jazykové rozhraní mezi lidmi a odvozenými daty. Široce dostupné modely jsou předtrénované na obrovském množství veřejně dostupných dat, od Wikipedie a mailingových seznamů po učebnice a zdrojový kód.
+
+Aplikace postavené na LLMs často vyžadují rozšíření těchto modelů o soukromá nebo doménově specifická data. Bohužel, tato data mohou být rozptýlena mezi izolovanými aplikacemi a úložišti dat. Jsou za API, v SQL databázích nebo uvězněna v PDF a prezentacích.
+
+A právě zde přichází **LlamaIndex.TS**.
+
+## 🦙 Jak může LlamaIndex.TS pomoci?
+
+LlamaIndex.TS poskytuje následující nástroje:
+
+- **Načítání dat** - příjem vašich existujících dat ve formátech `.txt`, `.pdf`, `.csv`, `.md` a `.docx` přímo
+- **Indexy dat** - strukturování vašich dat do prostředních reprezentací, které jsou snadné a výkonné pro použití s LLM.
+- **Engine** poskytují přirozený přístup k vašim datům. Například:
+  - Dotazovací enginy jsou výkonná rozhraní pro získávání znalostmi rozšířeného výstupu.
+  - Chat enginy jsou konverzační rozhraní pro interakce s vašimi daty ve více zprávách, "zpětně a vpřed".
+
+## 👨‍👩‍👧‍👦 Pro koho je LlamaIndex určen?
+
+LlamaIndex.TS poskytuje základní sadu nástrojů, které jsou nezbytné pro všechny, kteří staví LLM aplikace s použitím JavaScriptu a TypeScriptu.
+
+Naše API na vyšší úrovni umožňuje začátečníkům používat LlamaIndex.TS k příjmu a dotazování dat.
+
+Pro složitější aplikace naše API na nižší úrovni umožňuje pokročilým uživatelům upravit a rozšířit libovolný modul - konektory dat, indexy, získávače a dotazovací enginy, aby vyhovoval jejich potřebám.
+
+## Začínáme
+
+`npm install llamaindex`
+
+Naše dokumentace obsahuje [Návod k instalaci](./installation.md) a [Úvodní tutoriál](./starter.md) pro vytvoření vaší první aplikace.
+
+Jakmile jste připraveni, [Vysokoúrovňové koncepty](./concepts.md) poskytují přehled o modulární architektuře LlamaIndexu. Pro více praktických příkladů se podívejte na naše [Tutoriály od začátku do konce](./end_to_end.md).
+
+## 🗺️ Ekosystém
+
+Pro stažení nebo přispění najdete LlamaIndex na:
+
+- Github: https://github.com/run-llama/LlamaIndexTS
+- NPM: https://www.npmjs.com/package/llamaindex
+
+"
+
+## Komunita
+
+Potřebujete pomoc? Máte návrh na novou funkci? Připojte se do komunity LlamaIndex:
+
+- Twitter: https://twitter.com/llama_index
+- Discord: https://discord.gg/dGcwcsnxhU
diff --git a/apps/docs/i18n/cs/docusaurus-plugin-content-docs/current/modules/high_level/chat_engine.md b/apps/docs/i18n/cs/docusaurus-plugin-content-docs/current/modules/high_level/chat_engine.md
new file mode 100644
index 0000000000000000000000000000000000000000..e3cbd00f301e59c2e767d30d6bca5cbb279a0c4a
--- /dev/null
+++ b/apps/docs/i18n/cs/docusaurus-plugin-content-docs/current/modules/high_level/chat_engine.md
@@ -0,0 +1,22 @@
+---
+sidebar_position: 4
+---
+
+# ChatEngine (聊天引擎)
+
+`Tato dokumentace byla automaticky přeložena a může obsahovat chyby. Neváhejte otevřít Pull Request pro navrhování změn.`
+
+ChatEngine je rychlý a jednoduchý způsob, jak chatovat s daty ve vašem indexu.
+
+```typescript
+const retriever = index.asRetriever();
+const chatEngine = new ContextChatEngine({ retriever });
+
+// začněte chatovat
+const response = await chatEngine.chat(query);
+```
+
+## Api Reference (Odkazy na API)
+
+- [ContextChatEngine (Kontextový chatovací engine)](../../api/classes/ContextChatEngine.md)
+- [CondenseQuestionChatEngine (Kondenzovaný chatovací engine)](../../api/classes/ContextChatEngine.md)
diff --git a/apps/docs/i18n/cs/docusaurus-plugin-content-docs/current/modules/high_level/data_index.md b/apps/docs/i18n/cs/docusaurus-plugin-content-docs/current/modules/high_level/data_index.md
new file mode 100644
index 0000000000000000000000000000000000000000..b899042e70b66863bb74b7e5eedd42219ae6190b
--- /dev/null
+++ b/apps/docs/i18n/cs/docusaurus-plugin-content-docs/current/modules/high_level/data_index.md
@@ -0,0 +1,27 @@
+---
+sidebar_position: 2
+---
+
+# Index (Index)
+
+`Tato dokumentace byla automaticky přeložena a může obsahovat chyby. Neváhejte otevřít Pull Request pro navrhování změn.`
+
+Index je základním kontejnerem a organizací vašich dat. LlamaIndex.TS podporuje dva indexy:
+
+- `VectorStoreIndex` - při generování odpovědi odešle nejlepších k `Node` do LLM. Výchozí hodnota pro nejlepších k je 2.
+- `SummaryIndex` - při generování odpovědi odešle každý `Node` v indexu do LLM.
+
+```typescript
+import { Document, VectorStoreIndex } from "llamaindex";
+
+const document = new Document({ text: "test" });
+
+const index = await VectorStoreIndex.fromDocuments([document]);
+```
+
+## API Reference (API Reference)
+
+- [SummaryIndex](../../api/classes/SummaryIndex.md)
+- [VectorStoreIndex](../../api/classes/VectorStoreIndex.md)
+
+"
diff --git a/apps/docs/i18n/cs/docusaurus-plugin-content-docs/current/modules/high_level/data_loader.md b/apps/docs/i18n/cs/docusaurus-plugin-content-docs/current/modules/high_level/data_loader.md
new file mode 100644
index 0000000000000000000000000000000000000000..64ee832e5b69227593eda795e2518b11c7e39cef
--- /dev/null
+++ b/apps/docs/i18n/cs/docusaurus-plugin-content-docs/current/modules/high_level/data_loader.md
@@ -0,0 +1,21 @@
+---
+sidebar_position: 1
+---
+
+# Čtenář / Načítání
+
+`Tato dokumentace byla automaticky přeložena a může obsahovat chyby. Neváhejte otevřít Pull Request pro navrhování změn.`
+
+LlamaIndex.TS podporuje snadné načítání souborů z adresářů pomocí třídy `SimpleDirectoryReader`. V současné době jsou podporovány soubory `.txt`, `.pdf`, `.csv`, `.md` a `.docx`, s plánem na podporu dalších typů souborů v budoucnosti!
+
+```typescript
+import { SimpleDirectoryReader } from "llamaindex";
+
+documents = new SimpleDirectoryReader().loadData("./data");
+```
+
+## API Reference
+
+- [SimpleDirectoryReader](../../api/classes/SimpleDirectoryReader.md)
+
+"
diff --git a/apps/docs/i18n/cs/docusaurus-plugin-content-docs/current/modules/high_level/documents_and_nodes.md b/apps/docs/i18n/cs/docusaurus-plugin-content-docs/current/modules/high_level/documents_and_nodes.md
new file mode 100644
index 0000000000000000000000000000000000000000..b70c0d39b0351cad0dee4d289e3e15c7abd5a122
--- /dev/null
+++ b/apps/docs/i18n/cs/docusaurus-plugin-content-docs/current/modules/high_level/documents_and_nodes.md
@@ -0,0 +1,22 @@
+---
+sidebar_position: 0
+---
+
+# Dokumenty a uzly
+
+`Tato dokumentace byla automaticky přeložena a může obsahovat chyby. Neváhejte otevřít Pull Request pro navrhování změn.`
+
+`Dokumenty` a `Uzly` jsou základní stavební kameny každého indexu. Zatímco API pro tyto objekty je podobné, objekty `Dokument` představují celé soubory, zatímco `Uzly` jsou menší části tohoto původního dokumentu, které jsou vhodné pro LLM a Q&A.
+
+```typescript
+import { Document } from "llamaindex";
+
+document = new Document({ text: "text", metadata: { key: "val" } });
+```
+
+## API Reference
+
+- [Dokument](../../api/classes/Document.md)
+- [TextNode](../../api/classes/TextNode.md)
+
+"
diff --git a/apps/docs/i18n/cs/docusaurus-plugin-content-docs/current/modules/high_level/query_engine.md b/apps/docs/i18n/cs/docusaurus-plugin-content-docs/current/modules/high_level/query_engine.md
new file mode 100644
index 0000000000000000000000000000000000000000..8e468e8515714d04f650d9205d46b54a05311c85
--- /dev/null
+++ b/apps/docs/i18n/cs/docusaurus-plugin-content-docs/current/modules/high_level/query_engine.md
@@ -0,0 +1,40 @@
+---
+sidebar_position: 3
+---
+
+# QueryEngine (Dotazovací engine)
+
+`Tato dokumentace byla automaticky přeložena a může obsahovat chyby. Neváhejte otevřít Pull Request pro navrhování změn.`
+
+Dotazovací engine obaluje `Retriever` a `ResponseSynthesizer` do potrubí, které použije řetězec dotazu k získání uzlů a poté je odešle do LLM pro generování odpovědi.
+
+```typescript
+const queryEngine = index.asQueryEngine();
+const response = await queryEngine.query("řetězec dotazu");
+```
+
+## Dotazovací engine pro poddotazy
+
+Základní koncept Dotazovacího engine pro poddotazy spočívá v rozdělení jednoho dotazu na více dotazů, získání odpovědi na každý z těchto dotazů a následné kombinaci těchto různých odpovědí do jedné soudržné odpovědi pro uživatele. Můžete si to představit jako techniku "promyslete to krok za krokem", ale s iterací přes zdroje dat!
+
+### Začínáme
+
+Nejjednodušší způsob, jak začít vyzkoušet Dotazovací engine pro poddotazy, je spustit soubor subquestion.ts v adresáři [examples](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/subquestion.ts).
+
+```bash
+npx ts-node subquestion.ts
+```
+
+"
+
+### Nástroje
+
+Dotazovací engine pro poddotazy je implementován pomocí nástrojů (Tools). Základní myšlenkou nástrojů je, že jsou to proveditelné možnosti pro velký jazykový model. V tomto případě se náš Dotazovací engine pro poddotazy spoléhá na nástroj QueryEngineTool, který, jak jste si již domysleli, je nástrojem pro spouštění dotazů na Dotazovací engine. To nám umožňuje modelu nabídnout možnost dotazovat se na různé dokumenty pro různé otázky například. Můžete si také představit, že Dotazovací engine pro poddotazy může použít nástroj, který vyhledává něco na webu nebo získává odpověď pomocí Wolfram Alpha.
+
+Více informací o nástrojích najdete v dokumentaci k LlamaIndex Python na adrese https://gpt-index.readthedocs.io/en/latest/core_modules/agent_modules/tools/root.html
+
+## API Reference (Odkazy na rozhraní API)
+
+- [RetrieverQueryEngine (Dotazovací engine pro získávání)](../../api/classes/RetrieverQueryEngine.md)
+- [SubQuestionQueryEngine (Dotazovací engine pro poddotazy)](../../api/classes/SubQuestionQueryEngine.md)
+- [QueryEngineTool (Nástroj pro dotazovací engine)](../../api/interfaces/QueryEngineTool.md)
diff --git a/apps/docs/i18n/cs/docusaurus-plugin-content-docs/current/modules/index.md b/apps/docs/i18n/cs/docusaurus-plugin-content-docs/current/modules/index.md
new file mode 100644
index 0000000000000000000000000000000000000000..7e561d4f15e842f013c1a9a4fd935ef55445f933
--- /dev/null
+++ b/apps/docs/i18n/cs/docusaurus-plugin-content-docs/current/modules/index.md
@@ -0,0 +1,33 @@
+# Hlavní moduly
+
+`Tato dokumentace byla automaticky přeložena a může obsahovat chyby. Neváhejte otevřít Pull Request pro navrhování změn.`
+
+LlamaIndex.TS nabízí několik hlavních modulů, které jsou rozděleny na vysoce úrovňové moduly pro rychlý start a nízkoúrovňové moduly pro přizpůsobení klíčových komponent podle vašich potřeb.
+
+## Moduly vyšší úrovně
+
+- [**Dokument**](./high_level/documents_and_nodes.md): Dokument představuje textový soubor, PDF soubor nebo jiný souvislý datový blok.
+
+- [**Uzel**](./high_level/documents_and_nodes.md): Základní stavební blok dat. Nejčastěji se jedná o části dokumentu rozdělené do spravovatelných kusů, které jsou dostatečně malé na to, aby mohly být vloženy do modelu a LLM.
+
+- [**Čtečka/Načítání**](./high_level/data_loader.md): Čtečka nebo načítání je něco, co přijímá dokument ve skutečném světě a přeměňuje ho na třídu Dokument, kterou lze poté použít ve vašem indexu a dotazech. V současné době podporujeme soubory s čistým textem a PDF soubory a mnoho dalších bude následovat.
+
+- [**Indexy**](./high_level/data_index.md): Indexy ukládají uzly a vektory těchto uzlů.
+
+- [**Dotazovací engine**](./high_level/query_engine.md): Dotazovací enginy generují dotaz, který zadáte, a vracejí vám výsledek. Dotazovací enginy obvykle kombinují předem vytvořený prompt s vybranými uzly z vašeho indexu, aby poskytly LLM kontext, který potřebuje k odpovědi na váš dotaz.
+
+- [**Chatovací engine**](./high_level/chat_engine.md): Chatovací engine vám pomáhá vytvořit chatbota, který bude interagovat s vašimi indexy.
+
+## Nízkoúrovňový modul
+
+- [**LLM**](./low_level/llm.md): Třída LLM je sjednocené rozhraní nad velkým poskytovatelem jazykového modelu, jako je OpenAI GPT-4, Anthropic Claude nebo Meta LLaMA. Můžete ji podřídit, abyste vytvořili konektor pro vlastní velký jazykový model.
+
+- [**Embedding**](./low_level/embedding.md): Embedding je reprezentován jako vektor s plovoucími čísly. Výchozím modelem pro embedding je OpenAI text-embedding-ada-002 a každý vygenerovaný embedding se skládá z 1 536 plovoucích čísel. Dalším populárním modelem embeddingu je BERT, který používá 768 plovoucích čísel k reprezentaci každého uzlu. Poskytujeme několik nástrojů pro práci s embeddiny, včetně 3 možností výpočtu podobnosti a Maximum Marginal Relevance.
+
+- [**TextSplitter/NodeParser**](./low_level/node_parser.md): Strategie rozdělování textu jsou nesmírně důležité pro celkovou účinnost vyhledávání v embeddinzích. V současné době nemáme žádné univerzální řešení. V závislosti na zdrojových dokumentech můžete chtít použít různé velikosti a strategie rozdělování. V současné době podporujeme rozdělování podle pevné velikosti, rozdělování podle pevné velikosti s překrývajícími se částmi, rozdělování podle věty a rozdělování podle odstavce. Textový splitter je používán NodeParserem při rozdělování `Dokumentů` na `Uzly`.
+
+- [**Retriever**](./low_level/retriever.md): Retriever je ten, který skutečně vybírá uzly, které mají být získány z indexu. Zde můžete zkusit získat více nebo méně uzlů na dotaz, změnit funkci podobnosti nebo vytvořit vlastní retriever pro každý jednotlivý případ použití ve vaší aplikaci. Například můžete mít samostatného retrievera pro obsah kódu a textový obsah.
+
+- [**ResponseSynthesizer**](./low_level/response_synthesizer.md): ResponseSynthesizer je zodpovědný za přijetí řetězce dotazu a použití seznamu `Uzly` k vygenerování odpovědi. To může mít různé formy, například procházení všech kontextů a zpřesňování odpovědi nebo vytváření stromu shrnutí a vrácení kořenového shrnutí.
+
+- [**Storage**](./low_level/storage.md): V nějakém okamžiku budete chtít uložit své indexy, data a vektory místo opakovaného spouštění modelů embeddingu pokaždé. IndexStore, DocStore, VectorStore a KVStore jsou abstrakce, které vám to umožňují. Společně tvoří StorageContext. V současné době vám umožňujeme ukládat vaše embeddiny do souborů na souborovém systému (nebo do virtuálního paměťového souborového systému), ale aktivně také přidáváme integrace do Vector Databází.
diff --git a/apps/docs/i18n/cs/docusaurus-plugin-content-docs/current/modules/low_level/embedding.md b/apps/docs/i18n/cs/docusaurus-plugin-content-docs/current/modules/low_level/embedding.md
new file mode 100644
index 0000000000000000000000000000000000000000..34bb22d96ead0b92936945141a560fad8c6e546b
--- /dev/null
+++ b/apps/docs/i18n/cs/docusaurus-plugin-content-docs/current/modules/low_level/embedding.md
@@ -0,0 +1,26 @@
+---
+sidebar_position: 1
+---
+
+# Vkládání
+
+`Tato dokumentace byla automaticky přeložena a může obsahovat chyby. Neváhejte otevřít Pull Request pro navrhování změn.`
+
+Model vkládání v LlamaIndexu je zodpovědný za vytváření číselných reprezentací textu. Ve výchozím nastavení LlamaIndex používá model `text-embedding-ada-002` od OpenAI.
+
+Toto lze explicitně nastavit v objektu `ServiceContext`.
+
+```typescript
+import { OpenAIEmbedding, serviceContextFromDefaults } from "llamaindex";
+
+const openaiEmbeds = new OpenAIEmbedding();
+
+const serviceContext = serviceContextFromDefaults({ embedModel: openaiEmbeds });
+```
+
+## API Reference
+
+- [OpenAIEmbedding](../../api/classes/OpenAIEmbedding.md)
+- [ServiceContext](../../api/interfaces/ServiceContext.md)
+
+"
diff --git a/apps/docs/i18n/cs/docusaurus-plugin-content-docs/current/modules/low_level/llm.md b/apps/docs/i18n/cs/docusaurus-plugin-content-docs/current/modules/low_level/llm.md
new file mode 100644
index 0000000000000000000000000000000000000000..03237b0c39034517025b817d823547c800b52a5d
--- /dev/null
+++ b/apps/docs/i18n/cs/docusaurus-plugin-content-docs/current/modules/low_level/llm.md
@@ -0,0 +1,26 @@
+---
+sidebar_position: 0
+---
+
+# LLM
+
+`Tato dokumentace byla automaticky přeložena a může obsahovat chyby. Neváhejte otevřít Pull Request pro navrhování změn.`
+
+LLM je zodpovědný za čtení textu a generování přirozených jazykových odpovědí na dotazy. Výchozím modelem pro LlamaIndex.TS je `gpt-3.5-turbo`.
+
+LLM lze explicitně nastavit v objektu `ServiceContext`.
+
+```typescript
+import { OpenAI, serviceContextFromDefaults } from "llamaindex";
+
+const openaiLLM = new OpenAI({ model: "gpt-3.5-turbo", temperature: 0 });
+
+const serviceContext = serviceContextFromDefaults({ llm: openaiLLM });
+```
+
+## API Reference
+
+- [OpenAI](../../api/classes/OpenAI.md)
+- [ServiceContext](../../api/interfaces/ServiceContext.md)
+
+"
diff --git a/apps/docs/i18n/cs/docusaurus-plugin-content-docs/current/modules/low_level/node_parser.md b/apps/docs/i18n/cs/docusaurus-plugin-content-docs/current/modules/low_level/node_parser.md
new file mode 100644
index 0000000000000000000000000000000000000000..2a9731c4ef875ffbd99e73f2134a85b991405736
--- /dev/null
+++ b/apps/docs/i18n/cs/docusaurus-plugin-content-docs/current/modules/low_level/node_parser.md
@@ -0,0 +1,37 @@
+---
+sidebar_position: 3
+---
+
+# NodeParser (Parsování uzlů)
+
+`Tato dokumentace byla automaticky přeložena a může obsahovat chyby. Neváhejte otevřít Pull Request pro navrhování změn.`
+
+`NodeParser` v LlamaIndexu je zodpovědný za rozdělování objektů `Document` na snadno zpracovatelné objekty `Node`. Když zavoláte `.fromDocuments()`, `NodeParser` z `ServiceContextu` je automaticky použit k tomu, aby to udělal za vás. Alternativně ho můžete použít k rozdělení dokumentů předem.
+
+```typescript
+import { Document, SimpleNodeParser } from "llamaindex";
+
+const nodeParser = new SimpleNodeParser();
+const nodes = nodeParser.getNodesFromDocuments([
+  new Document({ text: "Je mi 10 let. Johnovi je 20 let." }),
+]);
+```
+
+## TextSplitter
+
+Podkladový textový rozdělovač rozdělí text na věty. Může být také použit jako samostatný modul pro rozdělení čistého textu.
+
+```typescript
+import { SentenceSplitter } from "llamaindex";
+
+const splitter = new SentenceSplitter({ chunkSize: 1 });
+
+const textSplits = splitter.splitText("Ahoj světe");
+```
+
+## API Reference (Odkazy na API)
+
+- [SimpleNodeParser (Jednoduchý parsovací uzel)](../../api/classes/SimpleNodeParser.md)
+- [SentenceSplitter (Rozdělovač vět)](../../api/classes/SentenceSplitter.md)
+
+"
diff --git a/apps/docs/i18n/cs/docusaurus-plugin-content-docs/current/modules/low_level/response_synthesizer.md b/apps/docs/i18n/cs/docusaurus-plugin-content-docs/current/modules/low_level/response_synthesizer.md
new file mode 100644
index 0000000000000000000000000000000000000000..115d2c8754b9afebac0e4bc8d3bd3b972c0d5116
--- /dev/null
+++ b/apps/docs/i18n/cs/docusaurus-plugin-content-docs/current/modules/low_level/response_synthesizer.md
@@ -0,0 +1,45 @@
+---
+sidebar_position: 6
+---
+
+# ResponseSynthesizer (Syntetizátor odpovědí)
+
+`Tato dokumentace byla automaticky přeložena a může obsahovat chyby. Neváhejte otevřít Pull Request pro navrhování změn.`
+
+ResponseSynthesizer je zodpovědný za odesílání dotazu, uzlů a šablon promptů do LLM (Language Model) pro generování odpovědi. Existuje několik klíčových režimů pro generování odpovědi:
+
+- `Refine` (Vylepšit): "vytvořit a vylepšit" odpověď postupným procházením každého získaného textového úseku. Tímto způsobem se provádí samostatný volání LLM pro každý uzel. Dobré pro podrobnější odpovědi.
+- `CompactAndRefine` (Kompaktní a vylepšit) (výchozí): "zkompaktovat" prompt během každého volání LLM tím, že se do maximální velikosti promptu vloží co nejvíce textových úseků. Pokud je příliš mnoho úseků na vložení do jednoho promptu, "vytvořit a vylepšit" odpověď postupným procházením více kompaktních promptů. Stejné jako `Refine`, ale mělo by to vést k menšímu počtu volání LLM.
+- `TreeSummarize` (Stromové shrnutí): Na základě sady textových úseků a dotazu rekurzivně sestaví strom a vrátí kořenový uzel jako odpověď. Dobré pro účely shrnutí.
+- `SimpleResponseBuilder` (Jednoduchý generátor odpovědí): Na základě sady textových úseků a dotazu aplikuje dotaz na každý textový úsek a odpovědi akumuluje do pole. Vrátí spojený řetězec všech odpovědí. Dobré, když potřebujete spustit stejný dotaz samostatně pro každý textový úsek.
+
+```typescript
+import { NodeWithScore, ResponseSynthesizer, TextNode } from "llamaindex";
+
+const responseSynthesizer = new ResponseSynthesizer();
+
+const nodesWithScore: NodeWithScore[] = [
+  {
+    node: new TextNode({ text: "Je mi 10 let." }),
+    score: 1,
+  },
+  {
+    node: new TextNode({ text: "Johnovi je 20 let." }),
+    score: 0.5,
+  },
+];
+
+const response = await responseSynthesizer.synthesize(
+  "Kolik mi je let?",
+  nodesWithScore,
+);
+console.log(response.response);
+```
+
+## API Reference (Referenční příručka)
+
+- [ResponseSynthesizer (Syntetizátor odpovědí)](../../api/classes/ResponseSynthesizer.md)
+- [Refine (Vylepšit)](../../api/classes/Refine.md)
+- [CompactAndRefine (Kompaktní a vylepšit)](../../api/classes/CompactAndRefine.md)
+- [TreeSummarize (Stromové shrnutí)](../../api/classes/TreeSummarize.md)
+- [SimpleResponseBuilder (Jednoduchý generátor odpovědí)](../../api/classes/SimpleResponseBuilder.md)
diff --git a/apps/docs/i18n/cs/docusaurus-plugin-content-docs/current/modules/low_level/retriever.md b/apps/docs/i18n/cs/docusaurus-plugin-content-docs/current/modules/low_level/retriever.md
new file mode 100644
index 0000000000000000000000000000000000000000..6377c59acdc3a63823e882cc3437a3b22809e3f0
--- /dev/null
+++ b/apps/docs/i18n/cs/docusaurus-plugin-content-docs/current/modules/low_level/retriever.md
@@ -0,0 +1,25 @@
+---
+sidebar_position: 5
+---
+
+# Retriever (Získávač)
+
+`Tato dokumentace byla automaticky přeložena a může obsahovat chyby. Neváhejte otevřít Pull Request pro navrhování změn.`
+
+Získávač (Retriever) v LlamaIndexu je používán k získání uzlů (`Node`) z indexu pomocí dotazovacího řetězce. Získávač `VectorIndexRetriever` získává nejpodobnější uzly s nejvyšším skóre. Zatímco získávač `SummaryIndexRetriever` získává všechny uzly bez ohledu na dotaz.
+
+```typescript
+const retriever = vector_index.asRetriever();
+retriever.similarityTopK = 3;
+
+// Získání uzlů!
+const nodesWithScore = await retriever.retrieve("dotazovací řetězec");
+```
+
+## API Reference (Odkazy na rozhraní)
+
+- [SummaryIndexRetriever](../../api/classes/SummaryIndexRetriever.md)
+- [SummaryIndexLLMRetriever](../../api/classes/SummaryIndexLLMRetriever.md)
+- [VectorIndexRetriever](../../api/classes/VectorIndexRetriever.md)
+
+"
diff --git a/apps/docs/i18n/cs/docusaurus-plugin-content-docs/current/modules/low_level/storage.md b/apps/docs/i18n/cs/docusaurus-plugin-content-docs/current/modules/low_level/storage.md
new file mode 100644
index 0000000000000000000000000000000000000000..ac3ee194ea9a462b9f54c31bf1789dd07b8e4260
--- /dev/null
+++ b/apps/docs/i18n/cs/docusaurus-plugin-content-docs/current/modules/low_level/storage.md
@@ -0,0 +1,30 @@
+---
+sidebar_position: 7
+---
+
+# Úložiště
+
+`Tato dokumentace byla automaticky přeložena a může obsahovat chyby. Neváhejte otevřít Pull Request pro navrhování změn.`
+
+Úložiště v LlamaIndex.TS funguje automaticky poté, co jste nakonfigurovali objekt `StorageContext`. Stačí nakonfigurovat `persistDir` a připojit ho k indexu.
+
+V současné době je podporováno pouze ukládání a načítání z disku, s plánovanými budoucími integracemi!
+
+```typescript
+import { Document, VectorStoreIndex, storageContextFromDefaults } from "./src";
+
+const storageContext = await storageContextFromDefaults({
+  persistDir: "./storage",
+});
+
+const document = new Document({ text: "Testovací text" });
+const index = await VectorStoreIndex.fromDocuments([document], {
+  storageContext,
+});
+```
+
+## API Reference
+
+- [StorageContext](../../api/interfaces/StorageContext.md)
+
+"
diff --git a/apps/docs/i18n/cs/docusaurus-plugin-content-docs/current/starter.md b/apps/docs/i18n/cs/docusaurus-plugin-content-docs/current/starter.md
new file mode 100644
index 0000000000000000000000000000000000000000..8190b26b41c0b9afcb53c4c0c6246b55c7e1c80c
--- /dev/null
+++ b/apps/docs/i18n/cs/docusaurus-plugin-content-docs/current/starter.md
@@ -0,0 +1,58 @@
+---
+sidebar_position: 2
+---
+
+# Úvodní tutoriál
+
+`Tato dokumentace byla automaticky přeložena a může obsahovat chyby. Neváhejte otevřít Pull Request pro navrhování změn.`
+
+Jakmile jste [nainstalovali LlamaIndex.TS pomocí NPM](installation) a nastavili svůj OpenAI klíč, jste připraveni spustit svou první aplikaci:
+
+V novém adresáři:
+
+```bash npm2yarn
+npm install typescript
+npm install @types/node
+npx tsc --init # pokud je to potřeba
+```
+
+Vytvořte soubor `example.ts`. Tento kód načte některá ukázková data, vytvoří dokument, vytvoří index (který vytváří vnoření pomocí OpenAI) a poté vytvoří dotazovací engine pro odpovědi na otázky o datech.
+
+```ts
+// example.ts
+import fs from "fs/promises";
+import { Document, VectorStoreIndex } from "llamaindex";
+
+async function main() {
+  // Načtěte esej z abramov.txt v Node
+  const essay = await fs.readFile(
+    "node_modules/llamaindex/examples/abramov.txt",
+    "utf-8",
+  );
+
+  // Vytvořte objekt Document s esejem
+  const document = new Document({ text: essay });
+
+  // Rozdělte text a vytvořte vnoření. Uložte je do VectorStoreIndexu
+  const index = await VectorStoreIndex.fromDocuments([document]);
+
+  // Dotaz na index
+  const queryEngine = index.asQueryEngine();
+  const response = await queryEngine.query("Co autor dělal na vysoké škole?");
+
+  // Výstup odpovědi
+  console.log(response.toString());
+}
+
+main();
+```
+
+Poté jej můžete spustit pomocí
+
+```bash
+npx ts-node example.ts
+```
+
+Připraveni se dozvědět více? Podívejte se na naše NextJS hřiště na adrese https://llama-playground.vercel.app/. Zdrojový kód je k dispozici na adrese https://github.com/run-llama/ts-playground
+
+"
diff --git a/apps/docs/i18n/da/docusaurus-plugin-content-docs/current/_static/concepts/indexing.jpg b/apps/docs/i18n/da/docusaurus-plugin-content-docs/current/_static/concepts/indexing.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..8672967213caf28fb27682b6b9e2e5111beb9aaa
Binary files /dev/null and b/apps/docs/i18n/da/docusaurus-plugin-content-docs/current/_static/concepts/indexing.jpg differ
diff --git a/apps/docs/i18n/da/docusaurus-plugin-content-docs/current/_static/concepts/querying.jpg b/apps/docs/i18n/da/docusaurus-plugin-content-docs/current/_static/concepts/querying.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..3c241bdda5ba98d3f3ee2e163b9f19280c5c7503
Binary files /dev/null and b/apps/docs/i18n/da/docusaurus-plugin-content-docs/current/_static/concepts/querying.jpg differ
diff --git a/apps/docs/i18n/da/docusaurus-plugin-content-docs/current/_static/concepts/rag.jpg b/apps/docs/i18n/da/docusaurus-plugin-content-docs/current/_static/concepts/rag.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..b68eca2564f8535b33d9ed6d4ce4d4410d2a0698
Binary files /dev/null and b/apps/docs/i18n/da/docusaurus-plugin-content-docs/current/_static/concepts/rag.jpg differ
diff --git a/apps/docs/i18n/da/docusaurus-plugin-content-docs/current/api b/apps/docs/i18n/da/docusaurus-plugin-content-docs/current/api
new file mode 120000
index 0000000000000000000000000000000000000000..3513e32979f4abb8b2df2b54c8d0cc480bdd847e
--- /dev/null
+++ b/apps/docs/i18n/da/docusaurus-plugin-content-docs/current/api
@@ -0,0 +1 @@
+../../../../docs/api
\ No newline at end of file
diff --git a/apps/docs/i18n/da/docusaurus-plugin-content-docs/current/concepts.md b/apps/docs/i18n/da/docusaurus-plugin-content-docs/current/concepts.md
new file mode 100644
index 0000000000000000000000000000000000000000..ae5acc91992017f5a785914e6db2dd81ff6d8e70
--- /dev/null
+++ b/apps/docs/i18n/da/docusaurus-plugin-content-docs/current/concepts.md
@@ -0,0 +1,86 @@
+---
+sidebar_position: 3
+---
+
+# Højtstående Koncepter
+
+`Denne dokumentation er blevet automatisk oversat og kan indeholde fejl. Tøv ikke med at åbne en Pull Request for at foreslå ændringer.`
+
+LlamaIndex.TS hjælper dig med at opbygge LLM-drevne applikationer (f.eks. Q&A, chatbot) over brugerdefinerede data.
+
+I denne guide til højtstående koncepter vil du lære:
+
+- hvordan en LLM kan besvare spørgsmål ved hjælp af dine egne data.
+- centrale begreber og moduler i LlamaIndex.TS til sammensætning af din egen forespørgselspipeline.
+
+## Besvarelse af spørgsmål på tværs af dine data
+
+LlamaIndex bruger en totrinsmetode, når du bruger en LLM med dine data:
+
+1. **indekseringsfase**: forberedelse af en vidensbase, og
+2. **forespørgselsfase**: hentning af relevant kontekst fra viden for at hjælpe LLM med at svare på et spørgsmål
+
+![](./_static/concepts/rag.jpg)
+
+Denne proces er også kendt som Retrieval Augmented Generation (RAG).
+
+LlamaIndex.TS giver det essentielle værktøjssæt til at gøre begge trin super nemme.
+
+Lad os udforske hver fase i detaljer.
+
+### Indekseringsfase
+
+LlamaIndex.TS hjælper dig med at forberede vidensbasen med en række dataforbindelser og indekser.
+
+![](./_static/concepts/indexing.jpg)
+
+[**Dataindlæsere**](./modules/high_level/data_loader.md):
+En dataforbindelse (dvs. `Reader`) indlæser data fra forskellige datakilder og dataformater i en simpel `Document`-repræsentation (tekst og simpel metadata).
+
+[**Dokumenter / Noder**](./modules/high_level/documents_and_nodes.md): Et `Document` er en generisk beholder omkring enhver datakilde - for eksempel en PDF, en API-udgang eller hentede data fra en database. En `Node` er den atomare enhed af data i LlamaIndex og repræsenterer en "chunk" af en kilde-`Document`. Det er en rig repræsentation, der inkluderer metadata og relationer (til andre noder) for at muliggøre præcise og udtryksfulde hentningsoperationer.
+
+[**Dataindeks**](./modules/high_level/data_index.md):
+Når du har indlæst dine data, hjælper LlamaIndex dig med at indeksere data i et format, der er nemt at hente.
+
+Under motorhjelmen analyserer LlamaIndex de rå dokumenter til mellemliggende repræsentationer, beregner vektorindlejringer og gemmer dine data i hukommelsen eller på disken.
+
+"
+
+### Forespørgselsfase
+
+I forespørgselsfasen henter forespørgselspipelinen den mest relevante kontekst ud fra en brugerforespørgsel,
+og sender det til LLM'en (sammen med forespørgslen) for at syntetisere et svar.
+
+Dette giver LLM'en opdateret viden, der ikke er i dens oprindelige træningsdata,
+(samtidig med at hallucination reduceres).
+
+Den største udfordring i forespørgselsfasen er hentning, orkestrering og ræsonnement over (potentielt mange) vidensbaser.
+
+LlamaIndex tilbyder sammensættelige moduler, der hjælper dig med at opbygge og integrere RAG-pipeliner til Q&A (forespørgselsmotor), chatbot (chatmotor) eller som en del af en agent.
+
+Disse byggeklodser kan tilpasses til at afspejle rangeringspræferencer samt sammensættes til at ræsonnere over flere vidensbaser på en struktureret måde.
+
+![](./_static/concepts/querying.jpg)
+
+#### Byggeklodser
+
+[**Retrievers**](./modules/low_level/retriever.md):
+En retriever definerer, hvordan man effektivt henter relevant kontekst fra en vidensbase (dvs. indeks), når der gives en forespørgsel.
+Den specifikke hentelogik varierer for forskellige indeks, hvoraf den mest populære er tæt hentning mod en vektorindeks.
+
+[**Response Synthesizers**](./modules/low_level/response_synthesizer.md):
+En response synthesizer genererer et svar fra en LLM ved hjælp af en brugerforespørgsel og en given mængde hentede tekststykker.
+
+"
+
+#### Pipelines
+
+[**Forespørgselsmotorer**](./modules/high_level/query_engine.md):
+En forespørgselsmotor er en end-to-end pipeline, der giver dig mulighed for at stille spørgsmål om dine data.
+Den modtager en naturligt sprog forespørgsel og returnerer et svar sammen med den hentede referencekontekst, der sendes til LLM'en.
+
+[**Chatmotorer**](./modules/high_level/chat_engine.md):
+En chatmotor er en end-to-end pipeline til at føre en samtale med dine data
+(flere frem-og-tilbage i stedet for et enkelt spørgsmål og svar).
+
+"
diff --git a/apps/docs/i18n/da/docusaurus-plugin-content-docs/current/end_to_end.md b/apps/docs/i18n/da/docusaurus-plugin-content-docs/current/end_to_end.md
new file mode 100644
index 0000000000000000000000000000000000000000..11fc2e27dcc3e314d623f38b8035c2fb84c12c41
--- /dev/null
+++ b/apps/docs/i18n/da/docusaurus-plugin-content-docs/current/end_to_end.md
@@ -0,0 +1,53 @@
+---
+sidebar_position: 4
+---
+
+# End-to-End Eksempler
+
+`Denne dokumentation er blevet automatisk oversat og kan indeholde fejl. Tøv ikke med at åbne en Pull Request for at foreslå ændringer.`
+
+Vi inkluderer flere end-to-end eksempler ved hjælp af LlamaIndex.TS i repository'et.
+
+Tjek eksemplerne nedenfor eller prøv dem og fuldfør dem på få minutter med interaktive Github Codespace tutorials leveret af Dev-Docs [her](https://codespaces.new/team-dev-docs/lits-dev-docs-playground?devcontainer_path=.devcontainer%2Fjavascript_ltsquickstart%2Fdevcontainer.json):
+
+## [Chat Engine](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/chatEngine.ts)
+
+Læs en fil og chat om den med LLM.
+
+## [Vektor Index](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/vectorIndex.ts)
+
+Opret en vektor index og forespørg på den. Vektor indexet vil bruge embeddings til at hente de mest relevante noder. Som standard er de mest relevante noder 2.
+
+## [Summary Index](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/summaryIndex.ts)
+
+Opret en listeindeks og forespørg på det. Dette eksempel bruger også `LLMRetriever`, som vil bruge LLM til at vælge de bedste noder at bruge, når der genereres svar.
+
+"
+
+## [Gem / Indlæs en Indeks](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/storageContext.ts)
+
+Opret og indlæs en vektorindeks. Persistens til disk i LlamaIndex.TS sker automatisk, når et storage context objekt er oprettet.
+
+"
+
+## [Tilpasset Vektor Indeks](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/vectorIndexCustomize.ts)
+
+Opret et vektor indeks og forespørg det, samtidig med at du konfigurerer `LLM`, `ServiceContext` og `similarity_top_k`.
+
+## [OpenAI LLM](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/openai.ts)
+
+Opret en OpenAI LLM og brug den direkte til chat.
+
+## [Llama2 DeuceLLM](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/llamadeuce.ts)
+
+Opret en Llama-2 LLM og brug den direkte til chat.
+
+"
+
+## [SubQuestionQueryEngine](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/subquestion.ts)
+
+Bruger `SubQuestionQueryEngine`, som opdeler komplekse forespørgsler i flere spørgsmål og derefter samler et svar på tværs af svarene på alle under-spørgsmål.
+
+## [Lavniveau Moduler](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/lowlevel.ts)
+
+Dette eksempel bruger flere lavniveau komponenter, som fjerner behovet for en faktisk forespørgselsmotor. Disse komponenter kan bruges hvor som helst, i enhver applikation, eller tilpasses og underklassificeres for at imødekomme dine egne behov.
diff --git a/apps/docs/i18n/da/docusaurus-plugin-content-docs/current/environments.md b/apps/docs/i18n/da/docusaurus-plugin-content-docs/current/environments.md
new file mode 100644
index 0000000000000000000000000000000000000000..2e5f58139eb8640be7cb76d6449db7d38069e99c
--- /dev/null
+++ b/apps/docs/i18n/da/docusaurus-plugin-content-docs/current/environments.md
@@ -0,0 +1,31 @@
+---
+sidebar_position: 5
+---
+
+# Miljøer
+
+`Denne dokumentation er blevet automatisk oversat og kan indeholde fejl. Tøv ikke med at åbne en Pull Request for at foreslå ændringer.`
+
+LlamaIndex understøtter i øjeblikket officielt NodeJS 18 og NodeJS 20.
+
+## NextJS App Router
+
+Hvis du bruger NextJS App Router route handlers/serverless functions, skal du bruge NodeJS-tilstand:
+
+```js
+export const runtime = "nodejs"; // standard
+```
+
+og du skal tilføje en undtagelse for pdf-parse i din next.config.js
+
+```js
+// next.config.js
+/** @type {import('next').NextConfig} */
+const nextConfig = {
+  experimental: {
+    serverComponentsExternalPackages: ["pdf-parse"], // Sætter pdf-parse i faktisk NodeJS-tilstand med NextJS App Router
+  },
+};
+
+module.exports = nextConfig;
+```
diff --git a/apps/docs/i18n/da/docusaurus-plugin-content-docs/current/installation.mdx b/apps/docs/i18n/da/docusaurus-plugin-content-docs/current/installation.mdx
new file mode 100644
index 0000000000000000000000000000000000000000..580bb274a3d0316320da0b46661bf5761c112e54
--- /dev/null
+++ b/apps/docs/i18n/da/docusaurus-plugin-content-docs/current/installation.mdx
@@ -0,0 +1,68 @@
+---
+sidebar_position: 1
+---
+
+
+# Installation og opsætning
+
+```Denne dokumentation er blevet automatisk oversat og kan indeholde fejl. Tøv ikke med at åbne en Pull Request for at foreslå ændringer.```
+
+
+Sørg for at have NodeJS v18 eller nyere.
+
+
+## Brug af create-llama
+
+Den nemmeste måde at komme i gang med LlamaIndex er ved at bruge `create-llama`. Dette CLI-værktøj gør det muligt for dig at hurtigt starte med at bygge en ny LlamaIndex-applikation, hvor alt er sat op for dig.
+
+Kør bare
+
+<Tabs>
+<TabItem value="1" label="npm" default>
+
+```bash
+npx create-llama@latest
+```
+
+</TabItem>
+<TabItem value="2" label="Yarn">
+
+```bash
+yarn create llama
+```
+
+</TabItem>
+<TabItem value="3" label="pnpm">
+
+```bash
+pnpm create llama@latest
+```
+
+</TabItem>
+</Tabs>
+
+for at komme i gang. Når din app er genereret, kør
+
+```bash npm2yarn
+npm run dev
+```
+
+for at starte udviklingsserveren. Du kan derefter besøge [http://localhost:3000](http://localhost:3000) for at se din app.
+## Installation fra NPM
+
+```bash npm2yarn
+npm install llamaindex
+```
+
+
+### Miljøvariabler
+
+Vores eksempler bruger som standard OpenAI. Du skal konfigurere din Open AI-nøgle som følger:
+
+```bash
+export OPENAI_API_KEY="sk-......" # Erstat med din nøgle fra https://platform.openai.com/account/api-keys
+```
+
+Hvis du vil have den indlæst automatisk hver gang, skal du tilføje den til din .zshrc/.bashrc.
+
+ADVARSEL: Undlad at uploade din OpenAI-nøgle til versionsstyring.
diff --git a/apps/docs/i18n/da/docusaurus-plugin-content-docs/current/introduction.md b/apps/docs/i18n/da/docusaurus-plugin-content-docs/current/introduction.md
new file mode 100644
index 0000000000000000000000000000000000000000..640cb161d7390519fc39a248cea1b10299c7c83e
--- /dev/null
+++ b/apps/docs/i18n/da/docusaurus-plugin-content-docs/current/introduction.md
@@ -0,0 +1,60 @@
+---
+sidebar_position: 0
+slug: /
+---
+
+# Hvad er LlamaIndex.TS?
+
+`Denne dokumentation er blevet automatisk oversat og kan indeholde fejl. Tøv ikke med at åbne en Pull Request for at foreslå ændringer.`
+
+LlamaIndex.TS er et dataframework til LLM-applikationer til at indtage, strukturere og få adgang til private eller domænespecifikke data. Mens der også er en python-pakke tilgængelig (se [her](https://docs.llamaindex.ai/en/stable/)), tilbyder LlamaIndex.TS kernefunktioner i en simpel pakke, optimeret til brug med TypeScript.
+
+## 🚀 Hvorfor LlamaIndex.TS?
+
+På deres kerne tilbyder LLM'er et naturligt sproginterface mellem mennesker og infererede data. Bredt tilgængelige modeller er forudtrænet på enorme mængder offentligt tilgængelige data, fra Wikipedia og mailinglister til lærebøger og kildekode.
+
+Applikationer bygget oven på LLM'er kræver ofte at supplere disse modeller med private eller domænespecifikke data. Desværre kan disse data være fordelt på isolerede applikationer og datalagre. Det er bag API'er, i SQL-databaser eller fanget i PDF'er og præsentationer.
+
+Det er her, **LlamaIndex.TS** kommer ind i billedet.
+
+## 🦙 Hvordan kan LlamaIndex.TS hjælpe?
+
+LlamaIndex.TS tilbyder følgende værktøjer:
+
+- **Indlæsning af data** - indtag dine eksisterende `.txt`, `.pdf`, `.csv`, `.md` og `.docx` data direkte.
+- **Dataindeks** - strukturer dine data i mellemliggende repræsentationer, der er nemme og effektive for LLM'er at forbruge.
+- **Engines** - giver naturlig sprogadgang til dine data. For eksempel:
+  - Query engines er kraftfulde hentningsgrænseflader til viden-forøget output.
+  - Chat engines er samtalegrænseflader til flerbesked-, "frem og tilbage"-interaktioner med dine data.
+
+## 👨‍👩‍👧‍👦 Hvem er LlamaIndex til?
+
+LlamaIndex.TS tilbyder et kerne sæt værktøjer, der er essentielle for alle, der bygger LLM-apps med JavaScript og TypeScript.
+
+Vores API på højt niveau giver begynderbrugere mulighed for at bruge LlamaIndex.TS til at indtage og forespørge deres data.
+
+Til mere komplekse applikationer giver vores API'er på lavere niveau avancerede brugere mulighed for at tilpasse og udvide enhver modul - dataforbindelser, indekser, hentere og forespørgselsmotorer - for at imødekomme deres behov.
+
+## Kom godt i gang
+
+`npm install llamaindex`
+
+Vores dokumentation inkluderer [Installationsinstruktioner](./installation.md) og en [Starter Tutorial](./starter.md) til at bygge din første applikation.
+
+Når du er i gang, giver [Højniveaukoncepter](./concepts.md) et overblik over LlamaIndex's modulære arkitektur. For flere praktiske eksempler, kan du kigge igennem vores [End-to-End Tutorials](./end_to_end.md).
+
+## 🗺️ Økosystem
+
+For at downloade eller bidrage, find LlamaIndex på:
+
+- Github: https://github.com/run-llama/LlamaIndexTS
+- NPM: https://www.npmjs.com/package/llamaindex
+
+"
+
+## Fællesskab
+
+Brug for hjælp? Har du et forslag til en funktion? Bliv en del af LlamaIndex-fællesskabet:
+
+- Twitter: https://twitter.com/llama_index
+- Discord: https://discord.gg/dGcwcsnxhU
diff --git a/apps/docs/i18n/da/docusaurus-plugin-content-docs/current/modules/high_level/chat_engine.md b/apps/docs/i18n/da/docusaurus-plugin-content-docs/current/modules/high_level/chat_engine.md
new file mode 100644
index 0000000000000000000000000000000000000000..271a630731cb1a71a12b5f65b0a9a00799934180
--- /dev/null
+++ b/apps/docs/i18n/da/docusaurus-plugin-content-docs/current/modules/high_level/chat_engine.md
@@ -0,0 +1,22 @@
+---
+sidebar_position: 4
+---
+
+# ChatEngine (聊天引擎)
+
+`Denne dokumentation er blevet automatisk oversat og kan indeholde fejl. Tøv ikke med at åbne en Pull Request for at foreslå ændringer.`
+
+ChatEngine (聊天引擎) er en hurtig og enkel måde at chatte med dataene i din indeks.
+
+```typescript
+const retriever = index.asRetriever();
+const chatEngine = new ContextChatEngine({ retriever });
+
+// start chatting
+const response = await chatEngine.chat(query);
+```
+
+## Api Referencer
+
+- [ContextChatEngine (KontekstChatEngine)](../../api/classes/ContextChatEngine.md)
+- [CondenseQuestionChatEngine (CondenseQuestionChatEngine)](../../api/classes/ContextChatEngine.md)
diff --git a/apps/docs/i18n/da/docusaurus-plugin-content-docs/current/modules/high_level/data_index.md b/apps/docs/i18n/da/docusaurus-plugin-content-docs/current/modules/high_level/data_index.md
new file mode 100644
index 0000000000000000000000000000000000000000..d23a80ca12e74d7ad6e3b61c555273a8a807d4bc
--- /dev/null
+++ b/apps/docs/i18n/da/docusaurus-plugin-content-docs/current/modules/high_level/data_index.md
@@ -0,0 +1,27 @@
+---
+sidebar_position: 2
+---
+
+# Indeks
+
+`Denne dokumentation er blevet automatisk oversat og kan indeholde fejl. Tøv ikke med at åbne en Pull Request for at foreslå ændringer.`
+
+Et indeks er den grundlæggende beholder og organisering af dine data. LlamaIndex.TS understøtter to indeks:
+
+- `VectorStoreIndex` - sender de øverste-k `Node`er til LLM, når der genereres et svar. Standard top-k er 2.
+- `SummaryIndex` - sender hver `Node` i indekset til LLM for at generere et svar.
+
+```typescript
+import { Document, VectorStoreIndex } from "llamaindex";
+
+const document = new Document({ text: "test" });
+
+const index = await VectorStoreIndex.fromDocuments([document]);
+```
+
+## API Reference
+
+- [SummaryIndex](../../api/classes/SummaryIndex.md)
+- [VectorStoreIndex](../../api/classes/VectorStoreIndex.md)
+
+"
diff --git a/apps/docs/i18n/da/docusaurus-plugin-content-docs/current/modules/high_level/data_loader.md b/apps/docs/i18n/da/docusaurus-plugin-content-docs/current/modules/high_level/data_loader.md
new file mode 100644
index 0000000000000000000000000000000000000000..cf2b742d637d7465e5de1a80d5ef44ed65ed69c5
--- /dev/null
+++ b/apps/docs/i18n/da/docusaurus-plugin-content-docs/current/modules/high_level/data_loader.md
@@ -0,0 +1,21 @@
+---
+sidebar_position: 1
+---
+
+# Læser / Loader
+
+`Denne dokumentation er blevet automatisk oversat og kan indeholde fejl. Tøv ikke med at åbne en Pull Request for at foreslå ændringer.`
+
+LlamaIndex.TS understøtter nem indlæsning af filer fra mapper ved hjælp af klassen `SimpleDirectoryReader`. I øjeblikket understøttes `.txt`, `.pdf`, `.csv`, `.md` og `.docx` filer, med flere planlagt i fremtiden!
+
+```typescript
+import { SimpleDirectoryReader } from "llamaindex";
+
+documents = new SimpleDirectoryReader().loadData("./data");
+```
+
+## API Reference
+
+- [SimpleDirectoryReader](../../api/classes/SimpleDirectoryReader.md)
+
+"
diff --git a/apps/docs/i18n/da/docusaurus-plugin-content-docs/current/modules/high_level/documents_and_nodes.md b/apps/docs/i18n/da/docusaurus-plugin-content-docs/current/modules/high_level/documents_and_nodes.md
new file mode 100644
index 0000000000000000000000000000000000000000..5cfa52d2f9a142e45b7d509ccdd7368303cabc70
--- /dev/null
+++ b/apps/docs/i18n/da/docusaurus-plugin-content-docs/current/modules/high_level/documents_and_nodes.md
@@ -0,0 +1,22 @@
+---
+sidebar_position: 0
+---
+
+# Dokumenter og Noder
+
+`Denne dokumentation er blevet automatisk oversat og kan indeholde fejl. Tøv ikke med at åbne en Pull Request for at foreslå ændringer.`
+
+`Dokumenter` og `Noder` er de grundlæggende byggeklodser i enhver indeks. Selvom API'en for disse objekter er ens, repræsenterer `Dokument` objekter hele filer, mens `Noder` er mindre dele af det oprindelige dokument, der er velegnede til LLM og Q&A.
+
+```typescript
+import { Document } from "llamaindex";
+
+document = new Document({ text: "tekst", metadata: { nøgle: "værdi" } });
+```
+
+## API Reference
+
+- [Dokument](../../api/classes/Document.md)
+- [TextNode](../../api/classes/TextNode.md)
+
+"
diff --git a/apps/docs/i18n/da/docusaurus-plugin-content-docs/current/modules/high_level/query_engine.md b/apps/docs/i18n/da/docusaurus-plugin-content-docs/current/modules/high_level/query_engine.md
new file mode 100644
index 0000000000000000000000000000000000000000..53a18cabc07b5d1c3f421fe5eb043e47ed39873a
--- /dev/null
+++ b/apps/docs/i18n/da/docusaurus-plugin-content-docs/current/modules/high_level/query_engine.md
@@ -0,0 +1,40 @@
+---
+sidebar_position: 3
+---
+
+# QueryEngine
+
+`Denne dokumentation er blevet automatisk oversat og kan indeholde fejl. Tøv ikke med at åbne en Pull Request for at foreslå ændringer.`
+
+En query engine pakker en `Retriever` og en `ResponseSynthesizer` ind i en pipeline, der vil bruge query strengen til at hente noder og derefter sende dem til LLM for at generere et svar.
+
+```typescript
+const queryEngine = index.asQueryEngine();
+const response = await queryEngine.query("query streng");
+```
+
+## Underforespørgselsmotor (Sub Question Query Engine)
+
+Det grundlæggende koncept for Underforespørgselsmotoren er, at den opdeler en enkelt forespørgsel i flere forespørgsler, får et svar på hver af disse forespørgsler og kombinerer derefter disse forskellige svar til et enkelt sammenhængende svar til brugeren. Du kan tænke på det som teknikken "tænk dette igennem trin for trin", men hvor du itererer over dine datakilder!
+
+### Kom godt i gang
+
+Den nemmeste måde at begynde at prøve Underforespørgselsmotoren er at køre filen subquestion.ts i [eksemplerne](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/subquestion.ts).
+
+```bash
+npx ts-node subquestion.ts
+```
+
+"
+
+### Værktøjer (Tools)
+
+Underforespørgselsmotoren er implementeret med værktøjer (Tools). Den grundlæggende idé med værktøjer er, at de er eksekverbare muligheder for det store sprogmodel. I dette tilfælde er vores Underforespørgselsmotor afhængig af QueryEngineTool, som som du nok gættede, er et værktøj til at køre forespørgsler på en QueryEngine. Dette giver os mulighed for at give modellen mulighed for at forespørge forskellige dokumenter til forskellige spørgsmål, for eksempel. Du kan også forestille dig, at Underforespørgselsmotoren kan bruge et værktøj, der søger efter noget på nettet eller får et svar ved hjælp af Wolfram Alpha.
+
+Du kan lære mere om værktøjer ved at kigge på LlamaIndex Python-dokumentationen [her](https://gpt-index.readthedocs.io/en/latest/core_modules/agent_modules/tools/root.html).
+
+## API Reference
+
+- [RetrieverQueryEngine](../../api/classes/RetrieverQueryEngine.md) (RetrieverQueryEngine)
+- [SubQuestionQueryEngine](../../api/classes/SubQuestionQueryEngine.md) (SubQuestionQueryEngine)
+- [QueryEngineTool](../../api/interfaces/QueryEngineTool.md) (QueryEngineTool)
diff --git a/apps/docs/i18n/da/docusaurus-plugin-content-docs/current/modules/index.md b/apps/docs/i18n/da/docusaurus-plugin-content-docs/current/modules/index.md
new file mode 100644
index 0000000000000000000000000000000000000000..29d072ee2911c4bf575f07fcd729416f36dbda59
--- /dev/null
+++ b/apps/docs/i18n/da/docusaurus-plugin-content-docs/current/modules/index.md
@@ -0,0 +1,33 @@
+# Kerne Moduler
+
+`Denne dokumentation er blevet automatisk oversat og kan indeholde fejl. Tøv ikke med at åbne en Pull Request for at foreslå ændringer.`
+
+LlamaIndex.TS tilbyder flere kerne moduler, opdelt i højniveau moduler til hurtig opstart og lavniveau moduler til tilpasning af nøglekomponenter efter behov.
+
+## Højniveau Moduler
+
+- [**Dokument**](./high_level/documents_and_nodes.md): Et dokument repræsenterer en tekstfil, PDF-fil eller anden sammenhængende data.
+
+- [**Node**](./high_level/documents_and_nodes.md): Den grundlæggende databyggesten. Typisk er disse dele af dokumentet opdelt i håndterbare stykker, der er små nok til at blive fodret ind i en indlejringsmodel og LLM.
+
+- [**Læser/Indlæser**](./high_level/data_loader.md): En læser eller indlæser er noget, der tager et dokument i den virkelige verden og omdanner det til en Dokumentklasse, der derefter kan bruges i din Indeks og forespørgsler. Vi understøtter i øjeblikket almindelige tekstfiler og PDF'er med mange flere på vej.
+
+- [**Indeks**](./high_level/data_index.md): Indeks gemmer Noderne og indlejringerne af disse noder.
+
+- [**Forespørgselsmotor**](./high_level/query_engine.md): Forespørgselsmotorer genererer den forespørgsel, du indtaster, og giver dig resultatet tilbage. Forespørgselsmotorer kombinerer generelt en forudbygget prompt med valgte noder fra dit Indeks for at give LLM'en den kontekst, den har brug for for at besvare din forespørgsel.
+
+- [**Chatmotor**](./high_level/chat_engine.md): En Chatmotor hjælper dig med at opbygge en chatbot, der vil interagere med dine Indeks.
+
+## Lavniveau Modul
+
+- [**LLM**](./low_level/llm.md): LLM klassen er et forenet interface over en stor sprogmodeludbyder som f.eks. OpenAI GPT-4, Anthropic Claude eller Meta LLaMA. Du kan nedarve den for at skrive en forbindelse til din egen store sprogmodel.
+
+- [**Embedding**](./low_level/embedding.md): En embedding repræsenteres som en vektor af flydende punkt tal. OpenAI's text-embedding-ada-002 er vores standard embedding model, og hver embedding den genererer består af 1.536 flydende punkt tal. En anden populær embedding model er BERT, som bruger 768 flydende punkt tal til at repræsentere hver Node. Vi tilbyder en række hjælpeværktøjer til at arbejde med embeddings, herunder 3 muligheder for beregning af lighed og Maximum Marginal Relevance.
+
+- [**TextSplitter/NodeParser**](./low_level/node_parser.md): Tekstopdelingsstrategier er utroligt vigtige for den overordnede effektivitet af embedding søgningen. I øjeblikket har vi en standard, men der er ingen universel løsning. Afhængigt af kildedokumenterne kan du ønske at bruge forskellige opdelingsstørrelser og strategier. I øjeblikket understøtter vi opdeling efter fast størrelse, opdeling efter fast størrelse med overlappende sektioner, opdeling efter sætning og opdeling efter afsnit. Tekstopdeleren bruges af NodeParseren til at opdele `Dokumenter` i `Noder`.
+
+- [**Retriever**](./low_level/retriever.md): Retrieveren er det, der faktisk vælger Noderne, der skal hentes fra indekset. Her kan du ønske at prøve at hente flere eller færre Noder pr. forespørgsel, ændre din lighedsfunktion eller oprette din egen retriever til hver enkelt brugssag i din applikation. For eksempel kan du ønske at have en separat retriever til kodeindhold vs. tekstindhold.
+
+- [**ResponseSynthesizer**](./low_level/response_synthesizer.md): ResponseSynthesizeren er ansvarlig for at tage en forespørgselsstreng og bruge en liste af `Noder` til at generere et svar. Dette kan tage mange former, som f.eks. at iterere over al kontekst og forfine et svar eller opbygge et træ af sammenfatninger og returnere roden.
+
+- [**Storage**](./low_level/storage.md): På et tidspunkt vil du gerne gemme dine indekser, data og vektorer i stedet for at køre embedding modellerne hver gang. IndexStore, DocStore, VectorStore og KVStore er abstraktioner, der giver dig mulighed for at gøre det. Sammen udgør de StorageContext. I øjeblikket tillader vi dig at gemme dine embeddings i filer på filsystemet (eller et virtuelt hukommelsesbaseret filsystem), men vi tilføjer også aktivt integrationer til Vector Databaser.
diff --git a/apps/docs/i18n/da/docusaurus-plugin-content-docs/current/modules/low_level/embedding.md b/apps/docs/i18n/da/docusaurus-plugin-content-docs/current/modules/low_level/embedding.md
new file mode 100644
index 0000000000000000000000000000000000000000..4eb16b265817c571672736defdb1173ca4c7687d
--- /dev/null
+++ b/apps/docs/i18n/da/docusaurus-plugin-content-docs/current/modules/low_level/embedding.md
@@ -0,0 +1,26 @@
+---
+sidebar_position: 1
+---
+
+# Indlejring
+
+`Denne dokumentation er blevet automatisk oversat og kan indeholde fejl. Tøv ikke med at åbne en Pull Request for at foreslå ændringer.`
+
+Indlejringmodellen i LlamaIndex er ansvarlig for at skabe numeriske repræsentationer af tekst. Som standard vil LlamaIndex bruge modellen `text-embedding-ada-002` fra OpenAI.
+
+Dette kan eksplicit sættes i `ServiceContext`-objektet.
+
+```typescript
+import { OpenAIEmbedding, serviceContextFromDefaults } from "llamaindex";
+
+const openaiEmbeds = new OpenAIEmbedding();
+
+const serviceContext = serviceContextFromDefaults({ embedModel: openaiEmbeds });
+```
+
+## API Reference
+
+- [OpenAIEmbedding](../../api/classes/OpenAIEmbedding.md)
+- [ServiceContext](../../api/interfaces/ServiceContext.md)
+
+"
diff --git a/apps/docs/i18n/da/docusaurus-plugin-content-docs/current/modules/low_level/llm.md b/apps/docs/i18n/da/docusaurus-plugin-content-docs/current/modules/low_level/llm.md
new file mode 100644
index 0000000000000000000000000000000000000000..1b0852d362153dc48582611f48d823a4b0d79cfe
--- /dev/null
+++ b/apps/docs/i18n/da/docusaurus-plugin-content-docs/current/modules/low_level/llm.md
@@ -0,0 +1,26 @@
+---
+sidebar_position: 0
+---
+
+# LLM
+
+`Denne dokumentation er blevet automatisk oversat og kan indeholde fejl. Tøv ikke med at åbne en Pull Request for at foreslå ændringer.`
+
+LLM er ansvarlig for at læse tekst og generere naturlige sprogsvare på forespørgsler. Som standard bruger LlamaIndex.TS `gpt-3.5-turbo`.
+
+LLM kan eksplicit sættes i `ServiceContext` objektet.
+
+```typescript
+import { OpenAI, serviceContextFromDefaults } from "llamaindex";
+
+const openaiLLM = new OpenAI({ model: "gpt-3.5-turbo", temperature: 0 });
+
+const serviceContext = serviceContextFromDefaults({ llm: openaiLLM });
+```
+
+## API Reference
+
+- [OpenAI](../../api/classes/OpenAI.md)
+- [ServiceContext](../../api/interfaces/ServiceContext.md)
+
+"
diff --git a/apps/docs/i18n/da/docusaurus-plugin-content-docs/current/modules/low_level/node_parser.md b/apps/docs/i18n/da/docusaurus-plugin-content-docs/current/modules/low_level/node_parser.md
new file mode 100644
index 0000000000000000000000000000000000000000..72801ee7b170c3a061a1506118c03e31e61ffa2a
--- /dev/null
+++ b/apps/docs/i18n/da/docusaurus-plugin-content-docs/current/modules/low_level/node_parser.md
@@ -0,0 +1,37 @@
+---
+sidebar_position: 3
+---
+
+# NodeParser
+
+`Denne dokumentation er blevet automatisk oversat og kan indeholde fejl. Tøv ikke med at åbne en Pull Request for at foreslå ændringer.`
+
+`NodeParser` i LlamaIndex er ansvarlig for at opdele `Document` objekter i mere håndterbare `Node` objekter. Når du kalder `.fromDocuments()`, bruges `NodeParser` fra `ServiceContext` til automatisk at gøre dette for dig. Alternativt kan du bruge det til at opdele dokumenter på forhånd.
+
+```typescript
+import { Document, SimpleNodeParser } from "llamaindex";
+
+const nodeParser = new SimpleNodeParser();
+const nodes = nodeParser.getNodesFromDocuments([
+  new Document({ text: "Jeg er 10 år gammel. John er 20 år gammel." }),
+]);
+```
+
+## TextSplitter
+
+Den underliggende tekstsplitter opdeler teksten i sætninger. Den kan også bruges som en selvstændig modul til at opdele rå tekst.
+
+```typescript
+import { SentenceSplitter } from "llamaindex";
+
+const splitter = new SentenceSplitter({ chunkSize: 1 });
+
+const textSplits = splitter.splitText("Hej Verden");
+```
+
+## API Reference
+
+- [SimpleNodeParser](../../api/classes/SimpleNodeParser.md)
+- [SentenceSplitter](../../api/classes/SentenceSplitter.md)
+
+"
diff --git a/apps/docs/i18n/da/docusaurus-plugin-content-docs/current/modules/low_level/response_synthesizer.md b/apps/docs/i18n/da/docusaurus-plugin-content-docs/current/modules/low_level/response_synthesizer.md
new file mode 100644
index 0000000000000000000000000000000000000000..3ed3ff3fdea496cca2fd6d826811e1a33cd7f7cf
--- /dev/null
+++ b/apps/docs/i18n/da/docusaurus-plugin-content-docs/current/modules/low_level/response_synthesizer.md
@@ -0,0 +1,48 @@
+---
+sidebar_position: 6
+---
+
+# ResponseSynthesizer
+
+`Denne dokumentation er blevet automatisk oversat og kan indeholde fejl. Tøv ikke med at åbne en Pull Request for at foreslå ændringer.`
+
+ResponseSynthesizer er ansvarlig for at sende forespørgslen, noderne og promptskabelonerne til LLM for at generere et svar. Der er nogle få nøgletilstande til generering af et svar:
+
+- `Refine`: "opret og forbedre" et svar ved at gå sekventielt gennem hver hentet tekstblok.
+  Dette foretager et separat LLM-opkald pr. node. Godt til mere detaljerede svar.
+- `CompactAndRefine` (standard): "kompakt" prompten under hvert LLM-opkald ved at fylde så mange tekstblokke som muligt inden for den maksimale promptstørrelse. Hvis der er for mange blokke til at fylde i én prompt, "opret og forbedre" et svar ved at gå gennem flere kompakte prompts. Det samme som `refine`, men bør resultere i færre LLM-opkald.
+- `TreeSummarize`: Givet en række tekstblokke og forespørgslen, konstruer rekursivt et træ og returner rodnoden som svaret. Godt til opsummeringsformål.
+- `SimpleResponseBuilder`: Givet en række tekstblokke og forespørgslen, anvend forespørgslen på hver tekstblok, mens svarene akkumuleres i en matrix. Returnerer en sammensat streng af alle svar. Godt, når du har brug for at køre den samme forespørgsel separat mod hver tekstblok.
+
+```typescript
+import { NodeWithScore, ResponseSynthesizer, TextNode } from "llamaindex";
+
+const responseSynthesizer = new ResponseSynthesizer();
+
+const nodesWithScore: NodeWithScore[] = [
+  {
+    node: new TextNode({ text: "Jeg er 10 år gammel." }),
+    score: 1,
+  },
+  {
+    node: new TextNode({ text: "John er 20 år gammel." }),
+    score: 0.5,
+  },
+];
+
+const response = await responseSynthesizer.synthesize(
+  "Hvor gammel er jeg?",
+  nodesWithScore,
+);
+console.log(response.response);
+```
+
+## API Reference
+
+- [ResponseSynthesizer](../../api/classes/ResponseSynthesizer.md)
+- [Refine](../../api/classes/Refine.md)
+- [CompactAndRefine](../../api/classes/CompactAndRefine.md)
+- [TreeSummarize](../../api/classes/TreeSummarize.md)
+- [SimpleResponseBuilder](../../api/classes/SimpleResponseBuilder.md)
+
+"
diff --git a/apps/docs/i18n/da/docusaurus-plugin-content-docs/current/modules/low_level/retriever.md b/apps/docs/i18n/da/docusaurus-plugin-content-docs/current/modules/low_level/retriever.md
new file mode 100644
index 0000000000000000000000000000000000000000..add4ef4b68ebea54cda3d632d0588a228c95ee91
--- /dev/null
+++ b/apps/docs/i18n/da/docusaurus-plugin-content-docs/current/modules/low_level/retriever.md
@@ -0,0 +1,25 @@
+---
+sidebar_position: 5
+---
+
+# Retriever
+
+`Denne dokumentation er blevet automatisk oversat og kan indeholde fejl. Tøv ikke med at åbne en Pull Request for at foreslå ændringer.`
+
+En retriever i LlamaIndex er det, der bruges til at hente `Node` fra en indeks ved hjælp af en forespørgselsstreng. En `VectorIndexRetriever` vil hente de mest lignende noder i top-k. Imens vil en `SummaryIndexRetriever` hente alle noder, uanset forespørgslen.
+
+```typescript
+const retriever = vector_index.asRetriever();
+retriever.similarityTopK = 3;
+
+// Hent noder!
+const nodesWithScore = await retriever.retrieve("forespørgselsstreng");
+```
+
+## API Reference
+
+- [SummaryIndexRetriever](../../api/classes/SummaryIndexRetriever.md)
+- [SummaryIndexLLMRetriever](../../api/classes/SummaryIndexLLMRetriever.md)
+- [VectorIndexRetriever](../../api/classes/VectorIndexRetriever.md)
+
+"
diff --git a/apps/docs/i18n/da/docusaurus-plugin-content-docs/current/modules/low_level/storage.md b/apps/docs/i18n/da/docusaurus-plugin-content-docs/current/modules/low_level/storage.md
new file mode 100644
index 0000000000000000000000000000000000000000..b83aa81f4e03e6231bdf4b8a115ab65ba10c722c
--- /dev/null
+++ b/apps/docs/i18n/da/docusaurus-plugin-content-docs/current/modules/low_level/storage.md
@@ -0,0 +1,30 @@
+---
+sidebar_position: 7
+---
+
+# Opbevaring
+
+`Denne dokumentation er blevet automatisk oversat og kan indeholde fejl. Tøv ikke med at åbne en Pull Request for at foreslå ændringer.`
+
+Opbevaring i LlamaIndex.TS fungerer automatisk, når du har konfigureret et `StorageContext` objekt. Du skal bare konfigurere `persistDir` og tilknytte det til en indeks.
+
+Lige nu understøttes kun gemme og indlæse fra disk, med planlagte fremtidige integrationer!
+
+```typescript
+import { Document, VectorStoreIndex, storageContextFromDefaults } from "./src";
+
+const storageContext = await storageContextFromDefaults({
+  persistDir: "./storage",
+});
+
+const document = new Document({ text: "Test Tekst" });
+const index = await VectorStoreIndex.fromDocuments([document], {
+  storageContext,
+});
+```
+
+## API Reference
+
+- [StorageContext](../../api/interfaces/StorageContext.md)
+
+"
diff --git a/apps/docs/i18n/da/docusaurus-plugin-content-docs/current/starter.md b/apps/docs/i18n/da/docusaurus-plugin-content-docs/current/starter.md
new file mode 100644
index 0000000000000000000000000000000000000000..90ea58b040798d9ffbd96f2184542ab818b68695
--- /dev/null
+++ b/apps/docs/i18n/da/docusaurus-plugin-content-docs/current/starter.md
@@ -0,0 +1,60 @@
+---
+sidebar_position: 2
+---
+
+# Startvejledning
+
+`Denne dokumentation er blevet automatisk oversat og kan indeholde fejl. Tøv ikke med at åbne en Pull Request for at foreslå ændringer.`
+
+Når du har [installeret LlamaIndex.TS ved hjælp af NPM](installation) og har konfigureret din OpenAI-nøgle, er du klar til at starte din første app:
+
+I en ny mappe:
+
+```bash npm2yarn
+npm install typescript
+npm install @types/node
+npx tsc --init # hvis det er nødvendigt
+```
+
+Opret filen `example.ts`. Denne kode vil indlæse nogle eksempeldata, oprette et dokument, indeksere det (som opretter indlejringer ved hjælp af OpenAI) og derefter oprette en forespørgselsmotor til at besvare spørgsmål om dataene.
+
+```ts
+// example.ts
+import fs from "fs/promises";
+import { Document, VectorStoreIndex } from "llamaindex";
+
+async function main() {
+  // Indlæs essay fra abramov.txt i Node
+  const essay = await fs.readFile(
+    "node_modules/llamaindex/examples/abramov.txt",
+    "utf-8",
+  );
+
+  // Opret Document-objekt med essay
+  const document = new Document({ text: essay });
+
+  // Opdel tekst og opret indlejringer. Gem dem i en VectorStoreIndex
+  const index = await VectorStoreIndex.fromDocuments([document]);
+
+  // Forespørg på indekset
+  const queryEngine = index.asQueryEngine();
+  const response = await queryEngine.query(
+    "Hvad gjorde forfatteren på college?",
+  );
+
+  // Vis svar
+  console.log(response.toString());
+}
+
+main();
+```
+
+Derefter kan du køre det ved hjælp af
+
+```bash
+npx ts-node example.ts
+```
+
+Klar til at lære mere? Tjek vores NextJS-legeplads på https://llama-playground.vercel.app/. Kildekoden er tilgængelig på https://github.com/run-llama/ts-playground
+
+"
diff --git a/apps/docs/i18n/de/docusaurus-plugin-content-docs/current/_static/concepts/indexing.jpg b/apps/docs/i18n/de/docusaurus-plugin-content-docs/current/_static/concepts/indexing.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..8672967213caf28fb27682b6b9e2e5111beb9aaa
Binary files /dev/null and b/apps/docs/i18n/de/docusaurus-plugin-content-docs/current/_static/concepts/indexing.jpg differ
diff --git a/apps/docs/i18n/de/docusaurus-plugin-content-docs/current/_static/concepts/querying.jpg b/apps/docs/i18n/de/docusaurus-plugin-content-docs/current/_static/concepts/querying.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..3c241bdda5ba98d3f3ee2e163b9f19280c5c7503
Binary files /dev/null and b/apps/docs/i18n/de/docusaurus-plugin-content-docs/current/_static/concepts/querying.jpg differ
diff --git a/apps/docs/i18n/de/docusaurus-plugin-content-docs/current/_static/concepts/rag.jpg b/apps/docs/i18n/de/docusaurus-plugin-content-docs/current/_static/concepts/rag.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..b68eca2564f8535b33d9ed6d4ce4d4410d2a0698
Binary files /dev/null and b/apps/docs/i18n/de/docusaurus-plugin-content-docs/current/_static/concepts/rag.jpg differ
diff --git a/apps/docs/i18n/de/docusaurus-plugin-content-docs/current/api b/apps/docs/i18n/de/docusaurus-plugin-content-docs/current/api
new file mode 120000
index 0000000000000000000000000000000000000000..3513e32979f4abb8b2df2b54c8d0cc480bdd847e
--- /dev/null
+++ b/apps/docs/i18n/de/docusaurus-plugin-content-docs/current/api
@@ -0,0 +1 @@
+../../../../docs/api
\ No newline at end of file
diff --git a/apps/docs/i18n/de/docusaurus-plugin-content-docs/current/concepts.md b/apps/docs/i18n/de/docusaurus-plugin-content-docs/current/concepts.md
new file mode 100644
index 0000000000000000000000000000000000000000..20920126703a66d077058aed0e68756da0332a60
--- /dev/null
+++ b/apps/docs/i18n/de/docusaurus-plugin-content-docs/current/concepts.md
@@ -0,0 +1,86 @@
+---
+sidebar_position: 3
+---
+
+# Hochrangige Konzepte
+
+`Diese Dokumentation wurde automatisch übersetzt und kann Fehler enthalten. Zögern Sie nicht, einen Pull Request zu öffnen, um Änderungen vorzuschlagen.`
+
+LlamaIndex.TS hilft Ihnen beim Erstellen von LLM-basierten Anwendungen (z. B. Q&A, Chatbot) über benutzerdefinierte Daten.
+
+In diesem Leitfaden zu den hochrangigen Konzepten erfahren Sie:
+
+- wie ein LLM Fragen mithilfe Ihrer eigenen Daten beantworten kann.
+- wichtige Konzepte und Module in LlamaIndex.TS zum Erstellen Ihrer eigenen Abfrage-Pipeline.
+
+## Beantwortung von Fragen über Ihre Daten
+
+LlamaIndex verwendet eine zweistufige Methode, wenn Sie einen LLM mit Ihren Daten verwenden:
+
+1. **Indexierungsstufe**: Vorbereitung einer Wissensbasis und
+2. **Abfragestufe**: Abrufen relevanter Kontextinformationen aus dem Wissen, um dem LLM bei der Beantwortung einer Frage zu helfen.
+
+![](./_static/concepts/rag.jpg)
+
+Dieser Prozess wird auch als Retrieval Augmented Generation (RAG) bezeichnet.
+
+LlamaIndex.TS bietet das wesentliche Toolkit, um beide Schritte super einfach zu machen.
+
+Lassen Sie uns jede Stufe im Detail erkunden.
+
+### Indexierungsphase
+
+LlamaIndex.TS hilft Ihnen bei der Vorbereitung der Wissensbasis mit einer Reihe von Datenverbindern und Indizes.
+
+![](./_static/concepts/indexing.jpg)
+
+[**Datenlader**](./modules/high_level/data_loader.md):
+Ein Datenverbinder (d. h. `Reader`) nimmt Daten aus verschiedenen Datenquellen und Datenformaten auf und stellt sie in einer einfachen `Document`-Darstellung (Text und einfache Metadaten) bereit.
+
+[**Dokumente / Knoten**](./modules/high_level/documents_and_nodes.md): Ein `Document` ist ein generischer Container für jede Datenquelle - zum Beispiel ein PDF, eine API-Ausgabe oder abgerufene Daten aus einer Datenbank. Ein `Node` ist die atomare Dateneinheit in LlamaIndex und repräsentiert einen "Chunk" eines Quelldokuments. Es handelt sich um eine umfassende Darstellung, die Metadaten und Beziehungen (zu anderen Knoten) enthält, um genaue und ausdrucksstarke Abrufoperationen zu ermöglichen.
+
+[**Datenindizes**](./modules/high_level/data_index.md):
+Nachdem Sie Ihre Daten aufgenommen haben, hilft Ihnen LlamaIndex dabei, die Daten in einem Format zu indizieren, das leicht abgerufen werden kann.
+
+Unter der Haube analysiert LlamaIndex die Rohdokumente in Zwischenrepräsentationen, berechnet Vektor-Einbettungen und speichert Ihre Daten im Speicher oder auf der Festplatte.
+
+"
+
+### Abfragestufe
+
+In der Abfragestufe ruft die Abfrage-Pipeline den relevantesten Kontext ab, der einer Benutzerabfrage entspricht,
+und gibt diesen zusammen mit der Abfrage an den LLM weiter, um eine Antwort zu synthetisieren.
+
+Dies gibt dem LLM aktuelles Wissen, das nicht in seinen ursprünglichen Trainingsdaten enthalten ist,
+(reduziert auch Halluzinationen).
+
+Die Hauptherausforderung in der Abfragestufe besteht darin, Informationen aus (potenziell vielen) Wissensbasen abzurufen, zu orchestrieren und zu analysieren.
+
+LlamaIndex bietet zusammensetzbare Module, die Ihnen beim Aufbau und Integrieren von RAG-Pipelines für Q&A (Abfrage-Engine), Chatbot (Chat-Engine) oder als Teil eines Agenten helfen.
+
+Diese Bausteine können an individuelle Ranking-Präferenzen angepasst und strukturiert verwendet werden, um über mehrere Wissensbasen hinweg zu analysieren.
+
+![](./_static/concepts/querying.jpg)
+
+#### Bausteine
+
+[**Retrievers**](./modules/low_level/retriever.md):
+Ein Retriever definiert, wie relevanter Kontext effizient aus einer Wissensbasis (d. h. Index) abgerufen werden kann, wenn eine Abfrage vorliegt.
+Die spezifische Abruflogik unterscheidet sich je nach Index, wobei die beliebteste Methode ein dichter Abruf gegen einen Vektorindex ist.
+
+[**Response Synthesizers**](./modules/low_level/response_synthesizer.md):
+Ein Response Synthesizer generiert eine Antwort aus einem LLM, unter Verwendung einer Benutzerabfrage und einer gegebenen Menge abgerufener Textfragmente.
+
+"
+
+#### Pipelines
+
+[**Abfrage-Engines**](./modules/high_level/query_engine.md):
+Eine Abfrage-Engine ist eine End-to-End-Pipeline, mit der Sie Fragen zu Ihren Daten stellen können.
+Sie nimmt eine natürliche Sprachabfrage entgegen und liefert eine Antwort sowie den abgerufenen Referenzkontext, der an den LLM weitergegeben wird.
+
+[**Chat-Engines**](./modules/high_level/chat_engine.md):
+Eine Chat-Engine ist eine End-to-End-Pipeline, um eine Konversation mit Ihren Daten zu führen
+(mehrere Hin und Her statt einer einzelnen Frage und Antwort).
+
+"
diff --git a/apps/docs/i18n/de/docusaurus-plugin-content-docs/current/end_to_end.md b/apps/docs/i18n/de/docusaurus-plugin-content-docs/current/end_to_end.md
new file mode 100644
index 0000000000000000000000000000000000000000..594d7637c7ae2c6402bdcad086e7e4d6a0f69ca9
--- /dev/null
+++ b/apps/docs/i18n/de/docusaurus-plugin-content-docs/current/end_to_end.md
@@ -0,0 +1,55 @@
+---
+sidebar_position: 4
+---
+
+# End-to-End-Beispiele
+
+`Diese Dokumentation wurde automatisch übersetzt und kann Fehler enthalten. Zögern Sie nicht, einen Pull Request zu öffnen, um Änderungen vorzuschlagen.`
+
+Wir haben mehrere End-to-End-Beispiele mit LlamaIndex.TS im Repository enthalten.
+
+Schauen Sie sich die folgenden Beispiele an oder probieren Sie sie aus und vervollständigen Sie sie in wenigen Minuten mit interaktiven Github Codespace-Tutorials, die von Dev-Docs [hier](https://codespaces.new/team-dev-docs/lits-dev-docs-playground?devcontainer_path=.devcontainer%2Fjavascript_ltsquickstart%2Fdevcontainer.json) bereitgestellt werden:
+
+## [Chat Engine](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/chatEngine.ts)
+
+Lesen Sie eine Datei und unterhalten Sie sich darüber mit dem LLM.
+
+## [Vektor-Index](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/vectorIndex.ts)
+
+Erstellen Sie einen Vektor-Index und fragen Sie ihn ab. Der Vektor-Index verwendet Einbettungen, um die k relevantesten Knoten abzurufen. Standardmäßig ist k gleich 2.
+
+"
+
+## [Zusammenfassungsindex](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/summaryIndex.ts)
+
+Erstellen Sie einen Listenindex und fragen Sie ihn ab. Dieses Beispiel verwendet auch den `LLMRetriever`, der den LLM verwendet, um die besten Knoten auszuwählen, die beim Generieren einer Antwort verwendet werden sollen.
+
+## [Index speichern / laden](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/storageContext.ts)
+
+Erstellen und laden Sie einen Vektorindex. Die Persistenz auf der Festplatte in LlamaIndex.TS erfolgt automatisch, sobald ein Speicherkontextobjekt erstellt wird.
+
+## [Angepasster Vektorindex](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/vectorIndexCustomize.ts)
+
+Erstellen Sie einen Vektorindex und fragen Sie ihn ab, während Sie auch das `LLM`, den `ServiceContext` und das `similarity_top_k` konfigurieren.
+
+## [OpenAI LLM](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/openai.ts)
+
+Erstellen Sie ein OpenAI LLM und verwenden Sie es direkt für den Chat.
+
+"
+
+## [Llama2 DeuceLLM](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/llamadeuce.ts)
+
+Erstellen Sie einen Llama-2 LLM und verwenden Sie ihn direkt für den Chat.
+
+"
+
+## [SubQuestionQueryEngine](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/subquestion.ts)
+
+Verwendet den `SubQuestionQueryEngine`, der komplexe Abfragen in mehrere Fragen aufteilt und dann eine Antwort über die Antworten auf alle Teilfragen aggregiert.
+
+"
+
+## [Niedrigstufige Module](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/lowlevel.ts)
+
+Dieses Beispiel verwendet mehrere niedrigstufige Komponenten, die den Bedarf an einer tatsächlichen Abfrage-Engine beseitigen. Diese Komponenten können überall verwendet werden, in jeder Anwendung, oder angepasst und untergeordnet werden, um Ihren eigenen Bedürfnissen gerecht zu werden.
diff --git a/apps/docs/i18n/de/docusaurus-plugin-content-docs/current/environments.md b/apps/docs/i18n/de/docusaurus-plugin-content-docs/current/environments.md
new file mode 100644
index 0000000000000000000000000000000000000000..20abc87f96e1c38689f4dba7aa50c208064d700a
--- /dev/null
+++ b/apps/docs/i18n/de/docusaurus-plugin-content-docs/current/environments.md
@@ -0,0 +1,31 @@
+---
+sidebar_position: 5
+---
+
+# Umgebungen
+
+`Diese Dokumentation wurde automatisch übersetzt und kann Fehler enthalten. Zögern Sie nicht, einen Pull Request zu öffnen, um Änderungen vorzuschlagen.`
+
+LlamaIndex unterstützt derzeit offiziell NodeJS 18 und NodeJS 20.
+
+## NextJS App Router
+
+Wenn Sie den NextJS App Router für Routen-Handler/Serverless-Funktionen verwenden, müssen Sie den NodeJS-Modus verwenden:
+
+```js
+export const runtime = "nodejs"; // Standardwert
+```
+
+und Sie müssen eine Ausnahme für pdf-parse in Ihrer next.config.js hinzufügen:
+
+```js
+// next.config.js
+/** @type {import('next').NextConfig} */
+const nextConfig = {
+  experimental: {
+    serverComponentsExternalPackages: ["pdf-parse"], // Setzt pdf-parse in den tatsächlichen NodeJS-Modus mit NextJS App Router
+  },
+};
+
+module.exports = nextConfig;
+```
diff --git a/apps/docs/i18n/de/docusaurus-plugin-content-docs/current/installation.mdx b/apps/docs/i18n/de/docusaurus-plugin-content-docs/current/installation.mdx
new file mode 100644
index 0000000000000000000000000000000000000000..a8e7854e35a7eedfa96d322d61ae611d74ccf4ca
--- /dev/null
+++ b/apps/docs/i18n/de/docusaurus-plugin-content-docs/current/installation.mdx
@@ -0,0 +1,68 @@
+---
+sidebar_position: 1
+---
+
+
+# Installation und Einrichtung
+
+```Diese Dokumentation wurde automatisch übersetzt und kann Fehler enthalten. Zögern Sie nicht, einen Pull Request zu öffnen, um Änderungen vorzuschlagen.```
+
+
+Stellen Sie sicher, dass Sie NodeJS Version 18 oder höher installiert haben.
+
+
+## Verwendung von create-llama
+
+Der einfachste Weg, um mit LlamaIndex zu beginnen, besteht darin, `create-llama` zu verwenden. Dieses CLI-Tool ermöglicht es Ihnen, schnell eine neue LlamaIndex-Anwendung zu erstellen, bei der alles für Sie eingerichtet ist.
+
+Führen Sie einfach den folgenden Befehl aus:
+
+<Tabs>
+<TabItem value="1" label="npm" default>
+
+```bash
+npx create-llama@latest
+```
+
+</TabItem>
+<TabItem value="2" label="Yarn">
+
+```bash
+yarn create llama
+```
+
+</TabItem>
+<TabItem value="3" label="pnpm">
+
+```bash
+pnpm create llama@latest
+```
+
+</TabItem>
+</Tabs>
+
+um loszulegen. Sobald Ihre App generiert wurde, führen Sie den folgenden Befehl aus:
+
+```bash npm2yarn
+npm run dev
+```
+
+um den Entwicklungsserver zu starten. Sie können dann [http://localhost:3000](http://localhost:3000) besuchen, um Ihre App zu sehen.
+## Installation über NPM
+
+```bash npm2yarn
+npm install llamaindex
+```
+
+
+### Umgebungsvariablen
+
+Unsere Beispiele verwenden standardmäßig OpenAI. Sie müssen Ihren OpenAI-Schlüssel wie folgt einrichten:
+
+```bash
+export OPENAI_API_KEY="sk-......" # Ersetzen Sie dies durch Ihren Schlüssel von https://platform.openai.com/account/api-keys
+```
+
+Wenn Sie möchten, dass er jedes Mal automatisch geladen wird, fügen Sie ihn Ihrer .zshrc/.bashrc hinzu.
+
+WARNUNG: Geben Sie Ihren OpenAI-Schlüssel nicht in die Versionskontrolle ein.
diff --git a/apps/docs/i18n/de/docusaurus-plugin-content-docs/current/introduction.md b/apps/docs/i18n/de/docusaurus-plugin-content-docs/current/introduction.md
new file mode 100644
index 0000000000000000000000000000000000000000..ff67888b6ef867d82a5426394cdf6ec571300e1b
--- /dev/null
+++ b/apps/docs/i18n/de/docusaurus-plugin-content-docs/current/introduction.md
@@ -0,0 +1,60 @@
+---
+sidebar_position: 0
+slug: /
+---
+
+# Was ist LlamaIndex.TS?
+
+`Diese Dokumentation wurde automatisch übersetzt und kann Fehler enthalten. Zögern Sie nicht, einen Pull Request zu öffnen, um Änderungen vorzuschlagen.`
+
+LlamaIndex.TS ist ein Datenframework für LLM-Anwendungen zum Aufnehmen, Strukturieren und Zugreifen auf private oder domänenspezifische Daten. Während auch ein Python-Paket verfügbar ist (siehe [hier](https://docs.llamaindex.ai/en/stable/)), bietet LlamaIndex.TS Kernfunktionen in einem einfachen Paket, das für die Verwendung mit TypeScript optimiert ist.
+
+## 🚀 Warum LlamaIndex.TS?
+
+Im Kern bieten LLMs eine natürliche Sprachschnittstelle zwischen Menschen und abgeleiteten Daten. Weit verbreitete Modelle sind vortrainiert auf riesigen Mengen öffentlich verfügbarer Daten, von Wikipedia und Mailinglisten bis hin zu Lehrbüchern und Quellcode.
+
+Anwendungen, die auf LLMs aufbauen, erfordern oft die Ergänzung dieser Modelle um private oder domänenspezifische Daten. Leider können diese Daten über verschiedene Anwendungen und Datenspeicher verteilt sein. Sie befinden sich hinter APIs, in SQL-Datenbanken oder sind in PDFs und Präsentationen gefangen.
+
+Genau hier kommt **LlamaIndex.TS** ins Spiel.
+
+## 🦙 Wie kann LlamaIndex.TS helfen?
+
+LlamaIndex.TS bietet folgende Tools:
+
+- **Datenladen** - Importieren Sie Ihre vorhandenen `.txt`, `.pdf`, `.csv`, `.md` und `.docx` Daten direkt.
+- **Datenindizes** - Strukturieren Sie Ihre Daten in Zwischenrepräsentationen, die für LLMs einfach und leistungsstark zu verarbeiten sind.
+- **Engines** - Bieten Sie einen natürlichsprachlichen Zugriff auf Ihre Daten. Zum Beispiel:
+  - Abfrage-Engines sind leistungsstarke Abfrage-Schnittstellen für wissensgestützte Ausgaben.
+  - Chat-Engines sind konversationelle Schnittstellen für Interaktionen mit Ihren Daten, bei denen mehrere Nachrichten hin und her ausgetauscht werden.
+
+## 👨‍👩‍👧‍👦 Für wen ist LlamaIndex?
+
+LlamaIndex.TS bietet einen Kernsatz von Tools, die für alle geeignet sind, die LLM-Apps mit JavaScript und TypeScript entwickeln.
+
+Unsere API auf hoher Ebene ermöglicht es Anfängern, LlamaIndex.TS zum Aufnehmen und Abfragen ihrer Daten zu verwenden.
+
+Für komplexere Anwendungen ermöglichen unsere APIs auf niedrigerer Ebene fortgeschrittenen Benutzern, jedes Modul - Datenverbindungen, Indizes, Retriever und Abfrage-Engines - anzupassen und zu erweitern, um ihren Anforderungen gerecht zu werden.
+
+## Erste Schritte
+
+`npm install llamaindex`
+
+Unsere Dokumentation enthält [Installationsanweisungen](./installation.md) und ein [Einführungstutorial](./starter.md), um Ihre erste Anwendung zu erstellen.
+
+Sobald Sie bereit sind, bietet [High-Level-Konzepte](./concepts.md) einen Überblick über die modulare Architektur von LlamaIndex. Für praktische Beispiele schauen Sie sich unsere [End-to-End-Tutorials](./end_to_end.md) an.
+
+## 🗺️ Ökosystem
+
+Um LlamaIndex herunterzuladen oder beizutragen, finden Sie es auf:
+
+- Github: https://github.com/run-llama/LlamaIndexTS
+- NPM: https://www.npmjs.com/package/llamaindex
+
+"
+
+## Community
+
+Brauchen Sie Hilfe? Haben Sie einen Funktionsvorschlag? Treten Sie der LlamaIndex-Community bei:
+
+- Twitter: https://twitter.com/llama_index
+- Discord: https://discord.gg/dGcwcsnxhU
diff --git a/apps/docs/i18n/de/docusaurus-plugin-content-docs/current/modules/high_level/chat_engine.md b/apps/docs/i18n/de/docusaurus-plugin-content-docs/current/modules/high_level/chat_engine.md
new file mode 100644
index 0000000000000000000000000000000000000000..c4b5049c5f372053909ed636233f0c4a0c464c7b
--- /dev/null
+++ b/apps/docs/i18n/de/docusaurus-plugin-content-docs/current/modules/high_level/chat_engine.md
@@ -0,0 +1,22 @@
+---
+sidebar_position: 4
+---
+
+# ChatEngine (聊天引擎)
+
+`Diese Dokumentation wurde automatisch übersetzt und kann Fehler enthalten. Zögern Sie nicht, einen Pull Request zu öffnen, um Änderungen vorzuschlagen.`
+
+Der Chat-Engine ist eine schnelle und einfache Möglichkeit, mit den Daten in Ihrem Index zu chatten.
+
+```typescript
+const retriever = index.asRetriever();
+const chatEngine = new ContextChatEngine({ retriever });
+
+// start chatting
+const response = await chatEngine.chat(query);
+```
+
+## API-Referenzen
+
+- [ContextChatEngine (Kontext-Chat-Engine)](../../api/classes/ContextChatEngine.md)
+- [CondenseQuestionChatEngine (Kondensierte-Fragen-Chat-Engine)](../../api/classes/ContextChatEngine.md)
diff --git a/apps/docs/i18n/de/docusaurus-plugin-content-docs/current/modules/high_level/data_index.md b/apps/docs/i18n/de/docusaurus-plugin-content-docs/current/modules/high_level/data_index.md
new file mode 100644
index 0000000000000000000000000000000000000000..afa6dba9ce586ec3bd46e6cc59ade6c8862f0e16
--- /dev/null
+++ b/apps/docs/i18n/de/docusaurus-plugin-content-docs/current/modules/high_level/data_index.md
@@ -0,0 +1,25 @@
+---
+sidebar_position: 2
+---
+
+# Index
+
+`Diese Dokumentation wurde automatisch übersetzt und kann Fehler enthalten. Zögern Sie nicht, einen Pull Request zu öffnen, um Änderungen vorzuschlagen.`
+
+Ein Index ist der grundlegende Container und die Organisation für Ihre Daten. LlamaIndex.TS unterstützt zwei Indizes:
+
+- `VectorStoreIndex` - sendet die Top-k-`Node`s an das LLM, wenn eine Antwort generiert wird. Die Standard-Top-k ist 2.
+- `SummaryIndex` - sendet jede `Node` im Index an das LLM, um eine Antwort zu generieren.
+
+```typescript
+import { Document, VectorStoreIndex } from "llamaindex";
+
+const document = new Document({ text: "test" });
+
+const index = await VectorStoreIndex.fromDocuments([document]);
+```
+
+## API-Referenz
+
+- [SummaryIndex](../../api/classes/SummaryIndex.md)
+- [VectorStoreIndex](../../api/classes/VectorStoreIndex.md)
diff --git a/apps/docs/i18n/de/docusaurus-plugin-content-docs/current/modules/high_level/data_loader.md b/apps/docs/i18n/de/docusaurus-plugin-content-docs/current/modules/high_level/data_loader.md
new file mode 100644
index 0000000000000000000000000000000000000000..24ba339c90c7c2424b96900608bad557392b1a5c
--- /dev/null
+++ b/apps/docs/i18n/de/docusaurus-plugin-content-docs/current/modules/high_level/data_loader.md
@@ -0,0 +1,21 @@
+---
+sidebar_position: 1
+---
+
+# Reader / Loader
+
+`Diese Dokumentation wurde automatisch übersetzt und kann Fehler enthalten. Zögern Sie nicht, einen Pull Request zu öffnen, um Änderungen vorzuschlagen.`
+
+LlamaIndex.TS unterstützt das einfache Laden von Dateien aus Ordnern mithilfe der Klasse `SimpleDirectoryReader`. Derzeit werden `.txt`, `.pdf`, `.csv`, `.md` und `.docx` Dateien unterstützt, mit weiteren geplanten Dateiformaten in der Zukunft!
+
+```typescript
+import { SimpleDirectoryReader } from "llamaindex";
+
+documents = new SimpleDirectoryReader().loadData("./data");
+```
+
+## API-Referenz
+
+- [SimpleDirectoryReader](../../api/classes/SimpleDirectoryReader.md)
+
+"
diff --git a/apps/docs/i18n/de/docusaurus-plugin-content-docs/current/modules/high_level/documents_and_nodes.md b/apps/docs/i18n/de/docusaurus-plugin-content-docs/current/modules/high_level/documents_and_nodes.md
new file mode 100644
index 0000000000000000000000000000000000000000..0e6d503968d82fc80126819ff7fb57dc9982f6d3
--- /dev/null
+++ b/apps/docs/i18n/de/docusaurus-plugin-content-docs/current/modules/high_level/documents_and_nodes.md
@@ -0,0 +1,22 @@
+---
+sidebar_position: 0
+---
+
+# Dokumente und Knoten
+
+`Diese Dokumentation wurde automatisch übersetzt und kann Fehler enthalten. Zögern Sie nicht, einen Pull Request zu öffnen, um Änderungen vorzuschlagen.`
+
+`Dokumente` und `Knoten` sind die grundlegenden Bausteine eines jeden Index. Obwohl die API für diese Objekte ähnlich ist, repräsentieren `Dokument`-Objekte ganze Dateien, während `Knoten` kleinere Teile des ursprünglichen Dokuments sind, die für eine LLM und Q&A geeignet sind.
+
+```typescript
+import { Document } from "llamaindex";
+
+document = new Document({ text: "Text", metadata: { key: "val" } });
+```
+
+## API-Referenz
+
+- [Dokument](../../api/classes/Document.md)
+- [TextNode](../../api/classes/TextNode.md)
+
+"
diff --git a/apps/docs/i18n/de/docusaurus-plugin-content-docs/current/modules/high_level/query_engine.md b/apps/docs/i18n/de/docusaurus-plugin-content-docs/current/modules/high_level/query_engine.md
new file mode 100644
index 0000000000000000000000000000000000000000..6ad02b1798400e16d67bd66d9901bf190c4e80a5
--- /dev/null
+++ b/apps/docs/i18n/de/docusaurus-plugin-content-docs/current/modules/high_level/query_engine.md
@@ -0,0 +1,38 @@
+---
+sidebar_position: 3
+---
+
+# QueryEngine (Abfrage-Engine)
+
+`Diese Dokumentation wurde automatisch übersetzt und kann Fehler enthalten. Zögern Sie nicht, einen Pull Request zu öffnen, um Änderungen vorzuschlagen.`
+
+Eine Abfrage-Engine umschließt einen `Retriever` und einen `ResponseSynthesizer` in einer Pipeline, die den Abfrage-String verwendet, um Knoten abzurufen und sie dann an den LLM zu senden, um eine Antwort zu generieren.
+
+```typescript
+const queryEngine = index.asQueryEngine();
+const response = await queryEngine.query("Abfrage-String");
+```
+
+## Sub Question Query Engine (Unterfrage-Abfrage-Engine)
+
+Das grundlegende Konzept der Unterfrage-Abfrage-Engine besteht darin, eine einzelne Abfrage in mehrere Abfragen aufzuteilen, für jede dieser Abfragen eine Antwort zu erhalten und dann diese verschiedenen Antworten zu einer einzigen kohärenten Antwort für den Benutzer zu kombinieren. Sie können es sich als die Technik des "Schritt für Schritt durchdenkens" vorstellen, indem Sie Ihre Datenquellen durchlaufen!
+
+### Erste Schritte
+
+Der einfachste Weg, um die Unterfrage-Abfrage-Engine auszuprobieren, besteht darin, die Datei subquestion.ts in [Beispielen](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/subquestion.ts) auszuführen.
+
+```bash
+npx ts-node subquestion.ts
+```
+
+### Tools
+
+Die SubQuestionQueryEngine wird mit Tools implementiert. Die grundlegende Idee von Tools besteht darin, dass sie ausführbare Optionen für das große Sprachmodell sind. In diesem Fall stützt sich unsere SubQuestionQueryEngine auf QueryEngineTool, das, wie Sie vermutet haben, ein Tool zum Ausführen von Abfragen auf einer QueryEngine ist. Dadurch können wir dem Modell die Möglichkeit geben, verschiedene Dokumente für verschiedene Fragen abzufragen. Sie könnten sich auch vorstellen, dass die SubQuestionQueryEngine ein Tool verwenden könnte, das im Web nach etwas sucht oder eine Antwort mit Wolfram Alpha erhält.
+
+Weitere Informationen zu Tools finden Sie in der Python-Dokumentation von LlamaIndex unter https://gpt-index.readthedocs.io/en/latest/core_modules/agent_modules/tools/root.html
+
+## API-Referenz
+
+- [RetrieverQueryEngine (Retriever-Abfrage-Engine)](../../api/classes/RetrieverQueryEngine.md)
+- [SubQuestionQueryEngine (Unterfrage-Abfrage-Engine)](../../api/classes/SubQuestionQueryEngine.md)
+- [QueryEngineTool (Abfrage-Engine-Tool)](../../api/interfaces/QueryEngineTool.md)
diff --git a/apps/docs/i18n/de/docusaurus-plugin-content-docs/current/modules/index.md b/apps/docs/i18n/de/docusaurus-plugin-content-docs/current/modules/index.md
new file mode 100644
index 0000000000000000000000000000000000000000..fb59ce2d7b4d9d29d4eea91a16b20ee95db7da62
--- /dev/null
+++ b/apps/docs/i18n/de/docusaurus-plugin-content-docs/current/modules/index.md
@@ -0,0 +1,33 @@
+# Kernmodule
+
+`Diese Dokumentation wurde automatisch übersetzt und kann Fehler enthalten. Zögern Sie nicht, einen Pull Request zu öffnen, um Änderungen vorzuschlagen.`
+
+LlamaIndex.TS bietet mehrere Kernmodule, die in High-Level-Module für einen schnellen Einstieg und Low-Level-Module für die Anpassung von Schlüsselkomponenten unterteilt sind, wie Sie es benötigen.
+
+## High-Level-Module
+
+- [**Dokument**](./high_level/documents_and_nodes.md): Ein Dokument repräsentiert eine Textdatei, eine PDF-Datei oder ein anderes zusammenhängendes Datenstück.
+
+- [**Knoten**](./high_level/documents_and_nodes.md): Das grundlegende Datenbausteinelement. Am häufigsten handelt es sich dabei um Teile des Dokuments, die in handhabbare Stücke aufgeteilt sind, die klein genug sind, um in ein Einbettungsmodell und LLM eingespeist zu werden.
+
+- [**Reader/Loader**](./high_level/data_loader.md): Ein Reader oder Loader ist etwas, das ein Dokument aus der realen Welt aufnimmt und in eine Dokumentklasse umwandelt, die dann in Ihrem Index und Ihren Abfragen verwendet werden kann. Derzeit unterstützen wir einfache Textdateien und PDFs mit vielen weiteren Formaten in Arbeit.
+
+- [**Indizes**](./high_level/data_index.md): Indizes speichern die Knoten und die Einbettungen dieser Knoten.
+
+- [**QueryEngine**](./high_level/query_engine.md): Query Engines generieren die Abfrage, die Sie eingeben, und geben Ihnen das Ergebnis zurück. Query Engines kombinieren in der Regel eine vorgefertigte Eingabeaufforderung mit ausgewählten Knoten aus Ihrem Index, um dem LLM den Kontext zu geben, den er zur Beantwortung Ihrer Abfrage benötigt.
+
+- [**ChatEngine**](./high_level/chat_engine.md): Eine ChatEngine hilft Ihnen beim Aufbau eines Chatbots, der mit Ihren Indizes interagiert.
+
+## Low-Level-Modul
+
+- [**LLM**](./low_level/llm.md): Die LLM-Klasse ist eine einheitliche Schnittstelle über einen großen Sprachmodellanbieter wie OpenAI GPT-4, Anthropic Claude oder Meta LLaMA. Sie können sie unterklassifizieren, um eine Verbindung zu Ihrem eigenen großen Sprachmodell herzustellen.
+
+- [**Embedding**](./low_level/embedding.md): Ein Embedding wird als Vektor von Gleitkommazahlen dargestellt. Das Standard-Embedding-Modell von OpenAI, text-embedding-ada-002, besteht aus 1.536 Gleitkommazahlen. Ein weiteres beliebtes Embedding-Modell ist BERT, das 768 Gleitkommazahlen verwendet, um jeden Knoten darzustellen. Wir bieten eine Reihe von Hilfsprogrammen zum Arbeiten mit Embeddings an, einschließlich 3 Optionen zur Ähnlichkeitsberechnung und Maximum Marginal Relevance.
+
+- [**TextSplitter/NodeParser**](./low_level/node_parser.md): Textaufteilungsstrategien sind von entscheidender Bedeutung für die Gesamtwirksamkeit der Embedding-Suche. Derzeit haben wir zwar eine Standardlösung, aber es gibt keine universelle Lösung. Je nach Quelldokumenten möchten Sie möglicherweise unterschiedliche Aufteilungsgrößen und -strategien verwenden. Derzeit unterstützen wir die Aufteilung nach fester Größe, die Aufteilung nach fester Größe mit überlappenden Abschnitten, die Aufteilung nach Satz und die Aufteilung nach Absatz. Der TextSplitter wird vom NodeParser verwendet, um `Document`s in `Node`s aufzuteilen.
+
+- [**Retriever**](./low_level/retriever.md): Der Retriever wählt tatsächlich die Nodes aus, die aus dem Index abgerufen werden sollen. Hier können Sie versuchen, mehr oder weniger Nodes pro Abfrage abzurufen, Ihre Ähnlichkeitsfunktion zu ändern oder Ihren eigenen Retriever für jeden einzelnen Anwendungsfall in Ihrer Anwendung zu erstellen. Möglicherweise möchten Sie beispielsweise einen separaten Retriever für Code-Inhalte und Textinhalte haben.
+
+- [**ResponseSynthesizer**](./low_level/response_synthesizer.md): Der ResponseSynthesizer ist dafür verantwortlich, einen Abfragestring zu nehmen und mithilfe einer Liste von `Node`s eine Antwort zu generieren. Dies kann in verschiedenen Formen erfolgen, z. B. durch Iterieren über den gesamten Kontext und Verfeinern einer Antwort oder durch Erstellen eines Baums von Zusammenfassungen und Rückgabe der Wurzelzusammenfassung.
+
+- [**Storage**](./low_level/storage.md): Irgendwann möchten Sie Ihre Indizes, Daten und Vektoren speichern, anstatt die Embedding-Modelle jedes Mal neu auszuführen. IndexStore, DocStore, VectorStore und KVStore sind Abstraktionen, mit denen Sie dies tun können. Zusammen bilden sie den StorageContext. Derzeit können Sie Ihre Embeddings in Dateien im Dateisystem (oder einem virtuellen Dateisystem im Arbeitsspeicher) speichern, aber wir fügen auch aktiv Integrationen zu Vektordatenbanken hinzu.
diff --git a/apps/docs/i18n/de/docusaurus-plugin-content-docs/current/modules/low_level/embedding.md b/apps/docs/i18n/de/docusaurus-plugin-content-docs/current/modules/low_level/embedding.md
new file mode 100644
index 0000000000000000000000000000000000000000..6f05a6a657bf20aec2850f8565b62a6a65ae3d08
--- /dev/null
+++ b/apps/docs/i18n/de/docusaurus-plugin-content-docs/current/modules/low_level/embedding.md
@@ -0,0 +1,26 @@
+---
+sidebar_position: 1
+---
+
+# Einbetten
+
+`Diese Dokumentation wurde automatisch übersetzt und kann Fehler enthalten. Zögern Sie nicht, einen Pull Request zu öffnen, um Änderungen vorzuschlagen.`
+
+Das Einbettungsmodell in LlamaIndex ist dafür verantwortlich, numerische Darstellungen von Text zu erstellen. Standardmäßig verwendet LlamaIndex das Modell `text-embedding-ada-002` von OpenAI.
+
+Dies kann explizit im `ServiceContext`-Objekt festgelegt werden.
+
+```typescript
+import { OpenAIEmbedding, serviceContextFromDefaults } from "llamaindex";
+
+const openaiEmbeds = new OpenAIEmbedding();
+
+const serviceContext = serviceContextFromDefaults({ embedModel: openaiEmbeds });
+```
+
+## API-Referenz
+
+- [OpenAIEmbedding](../../api/classes/OpenAIEmbedding.md)
+- [ServiceContext](../../api/interfaces/ServiceContext.md)
+
+"
diff --git a/apps/docs/i18n/de/docusaurus-plugin-content-docs/current/modules/low_level/llm.md b/apps/docs/i18n/de/docusaurus-plugin-content-docs/current/modules/low_level/llm.md
new file mode 100644
index 0000000000000000000000000000000000000000..a86a076bd4f9fd11d03bb68c24eef566bc464521
--- /dev/null
+++ b/apps/docs/i18n/de/docusaurus-plugin-content-docs/current/modules/low_level/llm.md
@@ -0,0 +1,26 @@
+---
+sidebar_position: 0
+---
+
+# LLM
+
+`Diese Dokumentation wurde automatisch übersetzt und kann Fehler enthalten. Zögern Sie nicht, einen Pull Request zu öffnen, um Änderungen vorzuschlagen.`
+
+Der LLM ist dafür verantwortlich, Texte zu lesen und natürliche Sprachantworten auf Anfragen zu generieren. Standardmäßig verwendet LlamaIndex.TS `gpt-3.5-turbo`.
+
+Der LLM kann explizit im `ServiceContext`-Objekt festgelegt werden.
+
+```typescript
+import { OpenAI, serviceContextFromDefaults } from "llamaindex";
+
+const openaiLLM = new OpenAI({ model: "gpt-3.5-turbo", temperature: 0 });
+
+const serviceContext = serviceContextFromDefaults({ llm: openaiLLM });
+```
+
+## API-Referenz
+
+- [OpenAI](../../api/classes/OpenAI.md)
+- [ServiceContext](../../api/interfaces/ServiceContext.md)
+
+"
diff --git a/apps/docs/i18n/de/docusaurus-plugin-content-docs/current/modules/low_level/node_parser.md b/apps/docs/i18n/de/docusaurus-plugin-content-docs/current/modules/low_level/node_parser.md
new file mode 100644
index 0000000000000000000000000000000000000000..be44ba4e3feb0ed85b65cffe2468a1538e3323c5
--- /dev/null
+++ b/apps/docs/i18n/de/docusaurus-plugin-content-docs/current/modules/low_level/node_parser.md
@@ -0,0 +1,37 @@
+---
+sidebar_position: 3
+---
+
+# NodeParser
+
+`Diese Dokumentation wurde automatisch übersetzt und kann Fehler enthalten. Zögern Sie nicht, einen Pull Request zu öffnen, um Änderungen vorzuschlagen.`
+
+Der `NodeParser` in LlamaIndex ist dafür verantwortlich, `Document`-Objekte in handlichere `Node`-Objekte aufzuteilen. Wenn Sie `.fromDocuments()` aufrufen, wird automatisch der `NodeParser` aus dem `ServiceContext` verwendet, um dies für Sie zu erledigen. Alternativ können Sie ihn verwenden, um Dokumente im Voraus aufzuteilen.
+
+```typescript
+import { Document, SimpleNodeParser } from "llamaindex";
+
+const nodeParser = new SimpleNodeParser();
+const nodes = nodeParser.getNodesFromDocuments([
+  new Document({ text: "Ich bin 10 Jahre alt. John ist 20 Jahre alt." }),
+]);
+```
+
+## TextSplitter
+
+Der zugrunde liegende Textsplitter teilt den Text in Sätze auf. Er kann auch als eigenständiges Modul zum Aufteilen von Rohtext verwendet werden.
+
+```typescript
+import { SentenceSplitter } from "llamaindex";
+
+const splitter = new SentenceSplitter({ chunkSize: 1 });
+
+const textSplits = splitter.splitText("Hallo Welt");
+```
+
+## API-Referenz
+
+- [SimpleNodeParser](../../api/classes/SimpleNodeParser.md)
+- [SentenceSplitter](../../api/classes/SentenceSplitter.md)
+
+"
diff --git a/apps/docs/i18n/de/docusaurus-plugin-content-docs/current/modules/low_level/response_synthesizer.md b/apps/docs/i18n/de/docusaurus-plugin-content-docs/current/modules/low_level/response_synthesizer.md
new file mode 100644
index 0000000000000000000000000000000000000000..5a681337db0f725f26cbb7d338fa51bd4f24fa15
--- /dev/null
+++ b/apps/docs/i18n/de/docusaurus-plugin-content-docs/current/modules/low_level/response_synthesizer.md
@@ -0,0 +1,48 @@
+---
+sidebar_position: 6
+---
+
+# ResponseSynthesizer
+
+`Diese Dokumentation wurde automatisch übersetzt und kann Fehler enthalten. Zögern Sie nicht, einen Pull Request zu öffnen, um Änderungen vorzuschlagen.`
+
+Der ResponseSynthesizer ist dafür verantwortlich, die Abfrage, Knoten und Vorlagen für die Antwort an den LLM zu senden, um eine Antwort zu generieren. Es gibt einige wichtige Modi zur Generierung einer Antwort:
+
+- `Refine`: "Erstellen und Verfeinern" einer Antwort, indem jeder abgerufene Textabschnitt sequenziell durchlaufen wird.
+  Dies führt zu einem separaten LLM-Aufruf pro Knoten. Gut für detailliertere Antworten.
+- `CompactAndRefine` (Standard): "Kompaktieren" der Eingabeaufforderung während jedes LLM-Aufrufs, indem so viele Textabschnitte wie möglich in die maximale Größe der Eingabeaufforderung gepackt werden. Wenn es zu viele Abschnitte gibt, um in eine Eingabeaufforderung zu passen, wird eine Antwort durch "Erstellen und Verfeinern" durch mehrere kompakte Eingabeaufforderungen erzeugt. Das Gleiche wie `refine`, sollte jedoch zu weniger LLM-Aufrufen führen.
+- `TreeSummarize`: Basierend auf einer Reihe von Textabschnitten und der Abfrage wird rekursiv ein Baum erstellt und der Wurzelknoten als Antwort zurückgegeben. Gut für Zusammenfassungszwecke.
+- `SimpleResponseBuilder`: Basierend auf einer Reihe von Textabschnitten und der Abfrage wird die Abfrage auf jeden Textabschnitt angewendet und die Antworten in einem Array akkumuliert. Gibt eine verkettete Zeichenkette aller Antworten zurück. Gut, wenn Sie dieselbe Abfrage separat gegen jeden Textabschnitt ausführen müssen.
+
+```typescript
+import { NodeWithScore, ResponseSynthesizer, TextNode } from "llamaindex";
+
+const responseSynthesizer = new ResponseSynthesizer();
+
+const nodesWithScore: NodeWithScore[] = [
+  {
+    node: new TextNode({ text: "Ich bin 10 Jahre alt." }),
+    score: 1,
+  },
+  {
+    node: new TextNode({ text: "John ist 20 Jahre alt." }),
+    score: 0.5,
+  },
+];
+
+const response = await responseSynthesizer.synthesize(
+  "Wie alt bin ich?",
+  nodesWithScore,
+);
+console.log(response.response);
+```
+
+## API-Referenz
+
+- [ResponseSynthesizer](../../api/classes/ResponseSynthesizer.md)
+- [Refine](../../api/classes/Refine.md)
+- [CompactAndRefine](../../api/classes/CompactAndRefine.md)
+- [TreeSummarize](../../api/classes/TreeSummarize.md)
+- [SimpleResponseBuilder](../../api/classes/SimpleResponseBuilder.md)
+
+"
diff --git a/apps/docs/i18n/de/docusaurus-plugin-content-docs/current/modules/low_level/retriever.md b/apps/docs/i18n/de/docusaurus-plugin-content-docs/current/modules/low_level/retriever.md
new file mode 100644
index 0000000000000000000000000000000000000000..d5cf4391700e12a6fe5a43454ed8d669d6e854f4
--- /dev/null
+++ b/apps/docs/i18n/de/docusaurus-plugin-content-docs/current/modules/low_level/retriever.md
@@ -0,0 +1,25 @@
+---
+sidebar_position: 5
+---
+
+# Retriever (Abrufgerät)
+
+`Diese Dokumentation wurde automatisch übersetzt und kann Fehler enthalten. Zögern Sie nicht, einen Pull Request zu öffnen, um Änderungen vorzuschlagen.`
+
+Ein Retriever in LlamaIndex ist das, was verwendet wird, um `Node`s anhand einer Abfragezeichenfolge aus einem Index abzurufen. Ein `VectorIndexRetriever` ruft die k-ähnlichsten Knoten ab. Ein `SummaryIndexRetriever` hingegen ruft alle Knoten unabhängig von der Abfrage ab.
+
+```typescript
+const retriever = vector_index.asRetriever();
+retriever.similarityTopK = 3;
+
+// Knoten abrufen!
+const nodesWithScore = await retriever.retrieve("Abfragezeichenfolge");
+```
+
+## API-Referenz
+
+- [SummaryIndexRetriever](../../api/classes/SummaryIndexRetriever.md)
+- [SummaryIndexLLMRetriever](../../api/classes/SummaryIndexLLMRetriever.md)
+- [VectorIndexRetriever](../../api/classes/VectorIndexRetriever.md)
+
+"
diff --git a/apps/docs/i18n/de/docusaurus-plugin-content-docs/current/modules/low_level/storage.md b/apps/docs/i18n/de/docusaurus-plugin-content-docs/current/modules/low_level/storage.md
new file mode 100644
index 0000000000000000000000000000000000000000..59d980ecb3411b3027ec4a6f8980b6e2f25400fa
--- /dev/null
+++ b/apps/docs/i18n/de/docusaurus-plugin-content-docs/current/modules/low_level/storage.md
@@ -0,0 +1,30 @@
+---
+sidebar_position: 7
+---
+
+# Speicherung
+
+`Diese Dokumentation wurde automatisch übersetzt und kann Fehler enthalten. Zögern Sie nicht, einen Pull Request zu öffnen, um Änderungen vorzuschlagen.`
+
+Die Speicherung in LlamaIndex.TS funktioniert automatisch, sobald Sie ein `StorageContext`-Objekt konfiguriert haben. Konfigurieren Sie einfach das `persistDir` und fügen Sie es einem Index hinzu.
+
+Derzeit wird nur das Speichern und Laden von der Festplatte unterstützt, mit zukünftigen Integrationen geplant!
+
+```typescript
+import { Document, VectorStoreIndex, storageContextFromDefaults } from "./src";
+
+const storageContext = await storageContextFromDefaults({
+  persistDir: "./storage",
+});
+
+const document = new Document({ text: "Test Text" });
+const index = await VectorStoreIndex.fromDocuments([document], {
+  storageContext,
+});
+```
+
+## API-Referenz
+
+- [StorageContext](../../api/interfaces/StorageContext.md)
+
+"
diff --git a/apps/docs/i18n/de/docusaurus-plugin-content-docs/current/starter.md b/apps/docs/i18n/de/docusaurus-plugin-content-docs/current/starter.md
new file mode 100644
index 0000000000000000000000000000000000000000..72a477a56c7e415b79a32c6e08c0653b7a4291e5
--- /dev/null
+++ b/apps/docs/i18n/de/docusaurus-plugin-content-docs/current/starter.md
@@ -0,0 +1,60 @@
+---
+sidebar_position: 2
+---
+
+# Starter-Tutorial
+
+`Diese Dokumentation wurde automatisch übersetzt und kann Fehler enthalten. Zögern Sie nicht, einen Pull Request zu öffnen, um Änderungen vorzuschlagen.`
+
+Sobald Sie [LlamaIndex.TS mit NPM installiert](installation) und Ihren OpenAI-Schlüssel eingerichtet haben, sind Sie bereit, Ihre erste App zu starten:
+
+In einem neuen Ordner:
+
+```bash npm2yarn
+npm install typescript
+npm install @types/node
+npx tsc --init # falls erforderlich
+```
+
+Erstellen Sie die Datei `example.ts`. Dieser Code lädt einige Beispieldaten, erstellt ein Dokument, indexiert es (wodurch Embeddings mit OpenAI erstellt werden) und erstellt dann eine Abfrage-Engine, um Fragen zu den Daten zu beantworten.
+
+```ts
+// example.ts
+import fs from "fs/promises";
+import { Document, VectorStoreIndex } from "llamaindex";
+
+async function main() {
+  // Lade den Aufsatz aus abramov.txt in Node
+  const essay = await fs.readFile(
+    "node_modules/llamaindex/examples/abramov.txt",
+    "utf-8",
+  );
+
+  // Erstelle ein Document-Objekt mit dem Aufsatz
+  const document = new Document({ text: essay });
+
+  // Teile den Text auf und erstelle Embeddings. Speichere sie in einem VectorStoreIndex
+  const index = await VectorStoreIndex.fromDocuments([document]);
+
+  // Abfrage des Index
+  const queryEngine = index.asQueryEngine();
+  const response = await queryEngine.query(
+    "Was hat der Autor im College gemacht?",
+  );
+
+  // Ausgabe der Antwort
+  console.log(response.toString());
+}
+
+main();
+```
+
+Dann können Sie es ausführen mit
+
+```bash
+npx ts-node example.ts
+```
+
+Bereit, mehr zu lernen? Schauen Sie sich unseren NextJS-Playground unter https://llama-playground.vercel.app/ an. Der Quellcode ist unter https://github.com/run-llama/ts-playground verfügbar.
+
+"
diff --git a/apps/docs/i18n/el/docusaurus-plugin-content-docs/current/_static/concepts/indexing.jpg b/apps/docs/i18n/el/docusaurus-plugin-content-docs/current/_static/concepts/indexing.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..8672967213caf28fb27682b6b9e2e5111beb9aaa
Binary files /dev/null and b/apps/docs/i18n/el/docusaurus-plugin-content-docs/current/_static/concepts/indexing.jpg differ
diff --git a/apps/docs/i18n/el/docusaurus-plugin-content-docs/current/_static/concepts/querying.jpg b/apps/docs/i18n/el/docusaurus-plugin-content-docs/current/_static/concepts/querying.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..3c241bdda5ba98d3f3ee2e163b9f19280c5c7503
Binary files /dev/null and b/apps/docs/i18n/el/docusaurus-plugin-content-docs/current/_static/concepts/querying.jpg differ
diff --git a/apps/docs/i18n/el/docusaurus-plugin-content-docs/current/_static/concepts/rag.jpg b/apps/docs/i18n/el/docusaurus-plugin-content-docs/current/_static/concepts/rag.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..b68eca2564f8535b33d9ed6d4ce4d4410d2a0698
Binary files /dev/null and b/apps/docs/i18n/el/docusaurus-plugin-content-docs/current/_static/concepts/rag.jpg differ
diff --git a/apps/docs/i18n/el/docusaurus-plugin-content-docs/current/api b/apps/docs/i18n/el/docusaurus-plugin-content-docs/current/api
new file mode 120000
index 0000000000000000000000000000000000000000..3513e32979f4abb8b2df2b54c8d0cc480bdd847e
--- /dev/null
+++ b/apps/docs/i18n/el/docusaurus-plugin-content-docs/current/api
@@ -0,0 +1 @@
+../../../../docs/api
\ No newline at end of file
diff --git a/apps/docs/i18n/el/docusaurus-plugin-content-docs/current/concepts.md b/apps/docs/i18n/el/docusaurus-plugin-content-docs/current/concepts.md
new file mode 100644
index 0000000000000000000000000000000000000000..22ddb82453856a6c3675b9e296e0e237f4ff3b46
--- /dev/null
+++ b/apps/docs/i18n/el/docusaurus-plugin-content-docs/current/concepts.md
@@ -0,0 +1,84 @@
+---
+sidebar_position: 3
+---
+
+# Έννοιες Υψηλού Επιπέδου
+
+`Αυτό το έγγραφο έχει μεταφραστεί αυτόματα και μπορεί να περιέχει λάθη. Μη διστάσετε να ανοίξετε ένα Pull Request για να προτείνετε αλλαγές.`
+
+Το LlamaIndex.TS σας βοηθά να δημιουργήσετε εφαρμογές με LLM (π.χ. Q&A, chatbot) πάνω σε προσαρμοσμένα δεδομένα.
+
+Σε αυτόν τον οδηγό με υψηλού επιπέδου έννοιες, θα μάθετε:
+
+- πώς ένα LLM μπορεί να απαντήσει σε ερωτήσεις χρησιμοποιώντας τα δικά σας δεδομένα.
+- βασικές έννοιες και ενότητες στο LlamaIndex.TS για τη σύνθεση του δικού σας παραγωγής ερωτήματος.
+
+## Απάντηση σε Ερωτήσεις Πάνω στα Δεδομένα Σας
+
+Το LlamaIndex χρησιμοποιεί μια διαδικασία δύο σταδίων όταν χρησιμοποιείτε ένα LLM με τα δεδομένα σας:
+
+1. **στάδιο ευρετηρίασης**: προετοιμασία μιας βάσης γνώσης και
+2. **στάδιο ερωτήματος**: ανάκτηση σχετικού περιβάλλοντος από τη γνώση για να βοηθήσει το LLM να απαντήσει σε μια ερώτηση.
+
+![](./_static/concepts/rag.jpg)
+
+Αυτή η διαδικασία είναι επίσης γνωστή ως Επαναφορά Εμπλουτισμένης Δημιουργίας (RAG).
+
+Το LlamaIndex.TS παρέχει το απαραίτητο εργαλείο για να καταστήσει και τα δύο βήματα πολύ εύκολα.
+
+Ας εξερευνήσουμε κάθε στάδιο αναλυτικά.
+
+### Στάδιο Ευρετηρίασης
+
+Το LlamaIndex.TS σας βοηθά να προετοιμάσετε τη βάση γνώσης με ένα σύνολο συνδέσεων δεδομένων και ευρετηρίων.
+
+![](./_static/concepts/indexing.jpg)
+
+[**Φορτωτές Δεδομένων**](./modules/high_level/data_loader.md):
+Ένας συνδετήρας δεδομένων (δηλαδή `Reader`) εισάγει δεδομένα από διάφορες πηγές δεδομένων και μορφές δεδομένων σε μια απλή αναπαράσταση `Document` (κείμενο και απλή μεταδεδομένα).
+
+[**Έγγραφα / Κόμβοι**](./modules/high_level/documents_and_nodes.md): Ένα `Document` είναι ένα γενικός δοχείο γύρω από οποιαδήποτε πηγή δεδομένων - για παράδειγμα, ένα PDF, ένα αποτέλεσμα API ή ανακτηθέντα δεδομένα από μια βάση δεδομένων. Ένας `Κόμβος` είναι η ατομική μονάδα δεδομένων στο LlamaIndex και αναπαριστά ένα "κομμάτι" ενός πηγαίου `Έγγραφου`. Είναι μια πλούσια αναπαράσταση που περιλαμβάνει μεταδεδομένα και σχέσεις (με άλλους κόμβους) για να επιτρέψει ακριβείς και εκφραστικές λειτουργίες ανάκτησης.
+
+[**Ευρετήρια Δεδομένων**](./modules/high_level/data_index.md):
+Αφού εισάγετε τα δεδομένα σας, το LlamaIndex σας βοηθά να ευρετηριάσετε τα δεδομένα σε ένα μορφότυπο που είναι εύκολο να ανακτηθεί.
+
+Κάτω από το καπό, το LlamaIndex αναλύει τα αρχικά έγγραφα σε ενδιάμεσες αναπαραστάσεις, υπολογίζει διανύσματα ενσωμάτωσης και αποθηκεύει τα δεδομένα σας στη μνήμη ή στον δίσκο.
+
+### Στάδιο Ερωτήματος
+
+Στο στάδιο του ερωτήματος, η αλυσίδα ερωτημάτων ανακτά το πιο σχετικό περιβάλλον δεδομένων δεδομένου μιας ερωτήσεως χρήστη
+και το περνά στο LLM (μαζί με το ερώτημα) για να συνθέσει μια απάντηση.
+
+Αυτό δίνει στο LLM ενημερωμένη γνώση που δεν υπάρχει στα αρχικά δεδομένα εκπαίδευσής του,
+(μείωση επίσης της ψευδαισθησίας).
+
+Η κύρια πρόκληση στο στάδιο του ερωτήματος είναι η ανάκτηση, οργάνωση και συλλογισμός πάνω σε (πιθανώς πολλές) βάσεις γνώσης.
+
+Το LlamaIndex παρέχει ενότητες που μπορούν να συνθέσουν και να βοηθήσουν στην κατασκευή παραγωγικών ερωτημάτων RAG για Q&A (μηχανή ερωτήσεων), chatbot (μηχανή συνομιλίας) ή ως μέρος ενός πράκτορα.
+
+Αυτά τα κτίρια μπορούν να προσαρμοστούν για να αντανακλούν τις προτιμήσεις κατάταξης, καθώς και να συνθέσουν συλλογισμό πάνω σε πολλαπλές βάσεις γνώσης με δομημένο τρόπο.
+
+![](./_static/concepts/querying.jpg)
+
+#### Κτίρια
+
+[**Retrievers**](./modules/low_level/retriever.md):
+Ένας ανακτητής καθορίζει πώς να ανακτήσετε αποτελεσματικά σχετικό περιβάλλον από μια βάση γνώσης (δηλαδή ευρετήριο) όταν δίνεται ένα ερώτημα.
+Η συγκεκριμένη λογική ανάκτησης διαφέρει για διάφορα ευρετήρια, με το πιο δημοφιλές να είναι η πυκνή ανάκτηση έναντι ενός διανυσματικού ευρετηρίου.
+
+[**Response Synthesizers**](./modules/low_level/response_synthesizer.md):
+Ένας συνθέτης απόκρισης δημιουργεί μια απάντηση από ένα LLM, χρησιμοποιώντας ένα ερώτημα χρήστη και ένα σύνολο ανακτημένων τμημάτων κειμένου.
+
+"
+
+#### Αλυσίδες
+
+[**Μηχανές Ερωτήσεων**](./modules/high_level/query_engine.md):
+Μια μηχανή ερωτήσεων είναι μια αλυσίδα που σας επιτρέπει να κάνετε ερωτήσεις στα δεδομένα σας.
+Παίρνει μια φυσική γλώσσα ερώτηση και επιστρέφει μια απάντηση, μαζί με το ανακτημένο περιβάλλον αναφοράς που περνά στο LLM.
+
+[**Μηχανές Συνομιλίας**](./modules/high_level/chat_engine.md):
+Μια μηχανή συνομιλίας είναι μια αλυσίδα για να έχετε μια συνομιλία με τα δεδομένα σας
+(πολλαπλές ανταλλαγές αντί για μια μόνο ερώτηση και απάντηση).
+
+"
diff --git a/apps/docs/i18n/el/docusaurus-plugin-content-docs/current/end_to_end.md b/apps/docs/i18n/el/docusaurus-plugin-content-docs/current/end_to_end.md
new file mode 100644
index 0000000000000000000000000000000000000000..1abc8d8963811aa1799225107ffb388a9bc1ee99
--- /dev/null
+++ b/apps/docs/i18n/el/docusaurus-plugin-content-docs/current/end_to_end.md
@@ -0,0 +1,61 @@
+---
+sidebar_position: 4
+---
+
+# Παραδείγματα από άκρη σε άκρη
+
+`Αυτό το έγγραφο έχει μεταφραστεί αυτόματα και μπορεί να περιέχει λάθη. Μη διστάσετε να ανοίξετε ένα Pull Request για να προτείνετε αλλαγές.`
+
+Περιλαμβάνουμε αρκετά παραδείγματα από άκρη σε άκρη χρησιμοποιώντας το LlamaIndex.TS στο αποθετήριο
+
+Ελέγξτε τα παρακάτω παραδείγματα ή δοκιμάστε τα και ολοκληρώστε τα σε λίγα λεπτά με τα διαδραστικά εκπαιδευτικά παραδείγματα του Github Codespace που παρέχονται από το Dev-Docs [εδώ](https://codespaces.new/team-dev-docs/lits-dev-docs-playground?devcontainer_path=.devcontainer%2Fjavascript_ltsquickstart%2Fdevcontainer.json):
+
+## [Μηχανή Συνομιλίας](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/chatEngine.ts)
+
+Διαβάστε ένα αρχείο και συζητήστε για αυτό με το LLM.
+
+## [Δείκτης Vector](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/vectorIndex.ts)
+
+Δημιουργήστε έναν δείκτη vector και κάντε ερωτήματα σε αυτόν. Ο δείκτης vector θα χρησιμοποιήσει ενσωματώσεις για να ανακτήσει τους k πιο σχετικούς κόμβους. Από προεπιλογή, το k είναι 2.
+
+"
+
+## [Σύνοψη Δείκτη](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/summaryIndex.ts)
+
+Δημιουργήστε έναν δείκτη λίστας και κάντε ερωτήσεις σε αυτόν. Αυτό το παράδειγμα χρησιμοποιεί επίσης τον `LLMRetriever`, ο οποίος θα χρησιμοποιήσει το LLM για να επιλέξει τους καλύτερους κόμβους για χρήση κατά τη δημιουργία απάντησης.
+
+"
+
+## [Αποθήκευση / Φόρτωση ενός Δείκτη](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/storageContext.ts)
+
+Δημιουργία και φόρτωση ενός δείκτη διανύσματος. Η αποθήκευση στον δίσκο στο LlamaIndex.TS γίνεται αυτόματα μόλις δημιουργηθεί ένα αντικείμενο περιβάλλοντος αποθήκευσης.
+
+"
+
+## [Προσαρμοσμένο Διάνυσμα Ευρετηρίου](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/vectorIndexCustomize.ts)
+
+Δημιουργήστε ένα διάνυσμα ευρετηρίου και κάντε ερωτήσεις σε αυτό, ενώ παράλληλα ρυθμίζετε το `LLM`, το `ServiceContext` και το `similarity_top_k`.
+
+## [OpenAI LLM](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/openai.ts)
+
+Δημιουργήστε ένα OpenAI LLM και χρησιμοποιήστε το απευθείας για συνομιλία.
+
+"
+
+## [Llama2 DeuceLLM](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/llamadeuce.ts)
+
+Δημιουργήστε ένα Llama-2 LLM και χρησιμοποιήστε το απευθείας για συνομιλία.
+
+"
+
+## [Μηχανή Ερωτήσεων Υπο-Ερώτησης (SubQuestionQueryEngine)](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/subquestion.ts)
+
+Χρησιμοποιεί την `Μηχανή Ερωτήσεων Υπο-Ερώτησης (SubQuestionQueryEngine)`, η οποία διασπά πολύπλοκες ερωτήσεις σε πολλαπλές υπο-ερωτήσεις και στη συνέχεια συγκεντρώνει μια απάντηση από τις απαντήσεις όλων των υπο-ερωτήσεων.
+
+"
+
+## [Χαμηλού Επιπέδου Ενότητες](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/lowlevel.ts)
+
+Αυτό το παράδειγμα χρησιμοποιεί αρκετές ενότητες χαμηλού επιπέδου, οι οποίες αφαιρούν την ανάγκη για έναν πραγματικό μηχανισμό ερωτήσεων. Αυτές οι ενότητες μπορούν να χρησιμοποιηθούν οπουδήποτε, σε οποιαδήποτε εφαρμογή, ή να προσαρμοστούν και να υποκλασιοποιηθούν για να ικανοποιήσουν τις δικές σας ανάγκες.
+
+"
diff --git a/apps/docs/i18n/el/docusaurus-plugin-content-docs/current/environments.md b/apps/docs/i18n/el/docusaurus-plugin-content-docs/current/environments.md
new file mode 100644
index 0000000000000000000000000000000000000000..1153778ed98089354a0c433dddad7535f78fe35c
--- /dev/null
+++ b/apps/docs/i18n/el/docusaurus-plugin-content-docs/current/environments.md
@@ -0,0 +1,31 @@
+---
+sidebar_position: 5
+---
+
+# Περιβάλλοντα
+
+`Αυτό το έγγραφο έχει μεταφραστεί αυτόματα και μπορεί να περιέχει λάθη. Μη διστάσετε να ανοίξετε ένα Pull Request για να προτείνετε αλλαγές.`
+
+Το LlamaIndex υποστηρίζει επίσημα το NodeJS 18 και το NodeJS 20.
+
+## Δρομολογητής εφαρμογής NextJS
+
+Εάν χρησιμοποιείτε τους χειριστές δρομολογητή NextJS App Router / serverless functions, θα πρέπει να χρησιμοποιήσετε τη λειτουργία NodeJS:
+
+```js
+export const runtime = "nodejs"; // προεπιλογή
+```
+
+και θα πρέπει να προσθέσετε μια εξαίρεση για το pdf-parse στο next.config.js σας
+
+```js
+// next.config.js
+/** @type {import('next').NextConfig} */
+const nextConfig = {
+  experimental: {
+    serverComponentsExternalPackages: ["pdf-parse"], // Βάζει το pdf-parse σε πραγματική λειτουργία NodeJS με τον δρομολογητή NextJS App
+  },
+};
+
+module.exports = nextConfig;
+```
diff --git a/apps/docs/i18n/el/docusaurus-plugin-content-docs/current/installation.mdx b/apps/docs/i18n/el/docusaurus-plugin-content-docs/current/installation.mdx
new file mode 100644
index 0000000000000000000000000000000000000000..5e4a23a5941b13f5ab2a14d59db9db521dd8b06f
--- /dev/null
+++ b/apps/docs/i18n/el/docusaurus-plugin-content-docs/current/installation.mdx
@@ -0,0 +1,68 @@
+---
+sidebar_position: 1
+---
+
+
+# Εγκατάσταση και Ρύθμιση
+
+```Αυτό το έγγραφο έχει μεταφραστεί αυτόματα και μπορεί να περιέχει λάθη. Μη διστάσετε να ανοίξετε ένα Pull Request για να προτείνετε αλλαγές.```
+
+
+Βεβαιωθείτε ότι έχετε το NodeJS v18 ή νεότερη έκδοση.
+
+
+## Χρήση του create-llama
+
+Ο ευκολότερος τρόπος για να ξεκινήσετε με το LlamaIndex είναι να χρησιμοποιήσετε το `create-llama`. Αυτό το εργαλείο γραμμής εντολών σας επιτρέπει να ξεκινήσετε γρήγορα τη δημιουργία μιας νέας εφαρμογής LlamaIndex, με όλα τα απαραίτητα προεπιλεγμένα ρυθμισμένα για εσάς.
+
+Απλά εκτελέστε
+
+<Tabs>
+<TabItem value="1" label="npm" default>
+
+```bash
+npx create-llama@latest
+```
+
+</TabItem>
+<TabItem value="2" label="Yarn">
+
+```bash
+yarn create llama
+```
+
+</TabItem>
+<TabItem value="3" label="pnpm">
+
+```bash
+pnpm create llama@latest
+```
+
+</TabItem>
+</Tabs>
+
+για να ξεκινήσετε. Αφού δημιουργηθεί η εφαρμογή σας, εκτελέστε
+
+```bash npm2yarn
+npm run dev
+```
+
+για να ξεκινήσετε τον διακομιστή ανάπτυξης. Στη συνέχεια, μπορείτε να επισκεφθείτε τη διεύθυνση [http://localhost:3000](http://localhost:3000) για να δείτε την εφαρμογή σας.
+## Εγκατάσταση από το NPM
+
+```bash npm2yarn
+npm install llamaindex
+```
+
+
+### Μεταβλητές περιβάλλοντος
+
+Τα παραδείγματά μας χρησιμοποιούν το OpenAI από προεπιλογή. Θα πρέπει να ρυθμίσετε το κλειδί σας για το Open AI ως εξής:
+
+```bash
+export OPENAI_API_KEY="sk-......" # Αντικαταστήστε με το κλειδί σας από τη διεύθυνση https://platform.openai.com/account/api-keys
+```
+
+Εάν θέλετε να φορτώνεται αυτόματα κάθε φορά, προσθέστε το στο .zshrc/.bashrc σας.
+
+ΠΡΟΕΙΔΟΠΟΙΗΣΗ: Μην κάνετε commit το κλειδί σας για το OpenAI στον έλεγχο εκδόσεων.
diff --git a/apps/docs/i18n/el/docusaurus-plugin-content-docs/current/introduction.md b/apps/docs/i18n/el/docusaurus-plugin-content-docs/current/introduction.md
new file mode 100644
index 0000000000000000000000000000000000000000..6d02288cc26c3d4fb8698ba10f4dedfdba367488
--- /dev/null
+++ b/apps/docs/i18n/el/docusaurus-plugin-content-docs/current/introduction.md
@@ -0,0 +1,62 @@
+---
+sidebar_position: 0
+slug: /
+---
+
+# Τι είναι το LlamaIndex.TS;
+
+`Αυτό το έγγραφο έχει μεταφραστεί αυτόματα και μπορεί να περιέχει λάθη. Μη διστάσετε να ανοίξετε ένα Pull Request για να προτείνετε αλλαγές.`
+
+Το LlamaIndex.TS είναι ένα πλαίσιο δεδομένων για εφαρμογές LLM για την εισαγωγή, δομή και πρόσβαση σε ιδιωτικά ή τομεακά δεδομένα. Ενώ υπάρχει επίσης ένα πακέτο python διαθέσιμο (δείτε [εδώ](https://docs.llamaindex.ai/en/stable/)), το LlamaIndex.TS προσφέρει βασικές λειτουργίες σε ένα απλό πακέτο, βελτιστοποιημένο για χρήση με την TypeScript.
+
+## 🚀 Γιατί το LlamaIndex.TS?
+
+Στην ουσία τους, τα LLM προσφέρουν μια φυσική γλωσσική διεπαφή μεταξύ ανθρώπων και εξαγόμενων δεδομένων. Διαθέσιμα μοντέλα έρχονται προ-εκπαιδευμένα με τεράστιες ποσότητες δημόσιων δεδομένων, από τη Βικιπαίδεια και τις λίστες αλληλογραφίας μέχρι τα εγχειρίδια και τον πηγαίο κώδικα.
+
+Οι εφαρμογές που βασίζονται σε LLM συχνά απαιτούν την επέκταση αυτών των μοντέλων με ιδιωτικά ή τομεακά δεδομένα. Δυστυχώς, αυτά τα δεδομένα μπορεί να είναι κατανεμημένα σε εφαρμογές και αποθηκευτικούς χώρους δεδομένων. Βρίσκονται πίσω από διεπαφές προγραμματισμού εφαρμογών (APIs), σε βάσεις δεδομένων SQL ή παγιδευμένα σε PDF και παρουσιάσεις.
+
+Εδώ εμφανίζεται το **LlamaIndex.TS**.
+
+## 🦙 Πώς μπορεί να βοηθήσει το LlamaIndex.TS;
+
+Το LlamaIndex.TS παρέχει τα εξής εργαλεία:
+
+- **Φόρτωση δεδομένων** εισαγωγή των υπαρχόντων δεδομένων σας απευθείας από αρχεία `.txt`, `.pdf`, `.csv`, `.md` και `.docx`
+- **Δείκτες δεδομένων** δομήστε τα δεδομένα σας σε ενδιάμεσες αναπαραστάσεις που είναι εύκολες και αποδοτικές για την κατανάλωση από τα LLMs.
+- **Μηχανές** παρέχουν φυσική γλώσσα πρόσβασης στα δεδομένα σας. Για παράδειγμα:
+  - Οι μηχανές ερωτήσεων είναι ισχυρές διεπαφές ανάκτησης για επιπλέον γνώση.
+  - Οι μηχανές συνομιλίας είναι διαδραστικές διεπαφές για πολυ-μηνυματικές, "πίσω και μπροστά" αλληλεπιδράσεις με τα δεδομένα σας.
+
+"
+
+## 👨‍👩‍👧‍👦 Για ποιους είναι το LlamaIndex;
+
+Το LlamaIndex.TS παρέχει έναν πυρήνα εργαλείων, απαραίτητο για οποιονδήποτε δημιουργεί εφαρμογές LLM με JavaScript και TypeScript.
+
+Η υψηλού επιπέδου διεπαφή μας επιτρέπει στους αρχάριους χρήστες να χρησιμοποιούν το LlamaIndex.TS για την εισαγωγή και ανάκτηση των δεδομένων τους.
+
+Για πιο πολύπλοκες εφαρμογές, οι χαμηλότερου επιπέδου διεπαφές μας επιτρέπουν στους προχωρημένους χρήστες να προσαρμόσουν και να επεκτείνουν οποιοδήποτε μονάδα - συνδέσεις δεδομένων, ευρετήρια, ανακτητές και μηχανές ερωτήσεων - για να ταιριάζουν στις ανάγκες τους.
+
+## Ξεκινώντας
+
+`npm install llamaindex`
+
+Η τεκμηρίωσή μας περιλαμβάνει [Οδηγίες Εγκατάστασης](./installation.md) και ένα [Εισαγωγικό Εκπαιδευτικό Πρόγραμμα](./starter.md) για να δημιουργήσετε την πρώτη σας εφαρμογή.
+
+Αφού ξεκινήσετε, οι [Υψηλού Επιπέδου Έννοιες](./concepts.md) παρέχουν μια επισκόπηση της μοντουλαρισμένης αρχιτεκτονικής του LlamaIndex. Για περισσότερα πρακτικά παραδείγματα, ρίξτε μια ματιά στα [Ολοκληρωμένα Εκπαιδευτικά Προγράμματα](./end_to_end.md).
+
+## 🗺️ Οικοσύστημα
+
+Για να κατεβάσετε ή να συνεισφέρετε, βρείτε το LlamaIndex στα παρακάτω:
+
+- Github: https://github.com/run-llama/LlamaIndexTS
+- NPM: https://www.npmjs.com/package/llamaindex
+
+"
+
+## Κοινότητα
+
+Χρειάζεστε βοήθεια; Έχετε πρόταση για λειτουργία; Εγγραφείτε στην κοινότητα του LlamaIndex:
+
+- Twitter: https://twitter.com/llama_index
+- Discord: https://discord.gg/dGcwcsnxhU
diff --git a/apps/docs/i18n/el/docusaurus-plugin-content-docs/current/modules/high_level/chat_engine.md b/apps/docs/i18n/el/docusaurus-plugin-content-docs/current/modules/high_level/chat_engine.md
new file mode 100644
index 0000000000000000000000000000000000000000..64a29ee75f46c172dff240be93338aff56e611d1
--- /dev/null
+++ b/apps/docs/i18n/el/docusaurus-plugin-content-docs/current/modules/high_level/chat_engine.md
@@ -0,0 +1,22 @@
+---
+sidebar_position: 4
+---
+
+# Μηχανή Συνομιλίας (ChatEngine)
+
+`Αυτό το έγγραφο έχει μεταφραστεί αυτόματα και μπορεί να περιέχει λάθη. Μη διστάσετε να ανοίξετε ένα Pull Request για να προτείνετε αλλαγές.`
+
+Η μηχανή συνομιλίας είναι ένας γρήγορος και απλός τρόπος για να συνομιλήσετε με τα δεδομένα στον δείκτη σας.
+
+```typescript
+const retriever = index.asRetriever();
+const chatEngine = new ContextChatEngine({ retriever });
+
+// ξεκινήστε τη συνομιλία
+const response = await chatEngine.chat(query);
+```
+
+## Αναφορές Api
+
+- [Μηχανή Συνομιλίας Περιβάλλοντος (ContextChatEngine)](../../api/classes/ContextChatEngine.md)
+- [Μηχανή Συνομιλίας Συμπίεσης Ερωτήσεων (CondenseQuestionChatEngine)](../../api/classes/ContextChatEngine.md)
diff --git a/apps/docs/i18n/el/docusaurus-plugin-content-docs/current/modules/high_level/data_index.md b/apps/docs/i18n/el/docusaurus-plugin-content-docs/current/modules/high_level/data_index.md
new file mode 100644
index 0000000000000000000000000000000000000000..5811b682b60636b2534dd9cae5fac4cd298a8b14
--- /dev/null
+++ b/apps/docs/i18n/el/docusaurus-plugin-content-docs/current/modules/high_level/data_index.md
@@ -0,0 +1,27 @@
+---
+sidebar_position: 2
+---
+
+# Ευρετήριο
+
+`Αυτό το έγγραφο έχει μεταφραστεί αυτόματα και μπορεί να περιέχει λάθη. Μη διστάσετε να ανοίξετε ένα Pull Request για να προτείνετε αλλαγές.`
+
+Ένα ευρετήριο είναι ο βασικός δοχείο και οργανωτής για τα δεδομένα σας. Το LlamaIndex.TS υποστηρίζει δύο ευρετήρια:
+
+- `VectorStoreIndex` - θα στείλει τα κορυφαία `Node`s στο LLM κατά τη δημιουργία μιας απάντησης. Το προεπιλεγμένο top-k είναι 2.
+- `SummaryIndex` - θα στείλει κάθε `Node` στο ευρετήριο στο LLM για να δημιουργήσει μια απάντηση.
+
+```typescript
+import { Document, VectorStoreIndex } from "llamaindex";
+
+const document = new Document({ text: "test" });
+
+const index = await VectorStoreIndex.fromDocuments([document]);
+```
+
+## Αναφορά API
+
+- [SummaryIndex](../../api/classes/SummaryIndex.md)
+- [VectorStoreIndex](../../api/classes/VectorStoreIndex.md)
+
+"
diff --git a/apps/docs/i18n/el/docusaurus-plugin-content-docs/current/modules/high_level/data_loader.md b/apps/docs/i18n/el/docusaurus-plugin-content-docs/current/modules/high_level/data_loader.md
new file mode 100644
index 0000000000000000000000000000000000000000..11fcf3a7f573dd0e4fe5083ec0bb08f15a24e23c
--- /dev/null
+++ b/apps/docs/i18n/el/docusaurus-plugin-content-docs/current/modules/high_level/data_loader.md
@@ -0,0 +1,21 @@
+---
+sidebar_position: 1
+---
+
+# Αναγνώστης / Φορτωτής
+
+`Αυτό το έγγραφο έχει μεταφραστεί αυτόματα και μπορεί να περιέχει λάθη. Μη διστάσετε να ανοίξετε ένα Pull Request για να προτείνετε αλλαγές.`
+
+Το LlamaIndex.TS υποστηρίζει την εύκολη φόρτωση αρχείων από φακέλους χρησιμοποιώντας την κλάση `SimpleDirectoryReader`. Αυτή τη στιγμή, υποστηρίζονται αρχεία `.txt`, `.pdf`, `.csv`, `.md` και `.docx`, με περισσότερα να προγραμματίζονται για το μέλλον!
+
+```typescript
+import { SimpleDirectoryReader } from "llamaindex";
+
+documents = new SimpleDirectoryReader().loadData("./data");
+```
+
+## Αναφορά API
+
+- [SimpleDirectoryReader](../../api/classes/SimpleDirectoryReader.md)
+
+"
diff --git a/apps/docs/i18n/el/docusaurus-plugin-content-docs/current/modules/high_level/documents_and_nodes.md b/apps/docs/i18n/el/docusaurus-plugin-content-docs/current/modules/high_level/documents_and_nodes.md
new file mode 100644
index 0000000000000000000000000000000000000000..0fd0f948fae17da40912454e368d10646befb63c
--- /dev/null
+++ b/apps/docs/i18n/el/docusaurus-plugin-content-docs/current/modules/high_level/documents_and_nodes.md
@@ -0,0 +1,22 @@
+---
+sidebar_position: 0
+---
+
+# Έγγραφα και Κόμβοι
+
+`Αυτό το έγγραφο έχει μεταφραστεί αυτόματα και μπορεί να περιέχει λάθη. Μη διστάσετε να ανοίξετε ένα Pull Request για να προτείνετε αλλαγές.`
+
+Τα `Έγγραφα` και οι `Κόμβοι` είναι τα βασικά στοιχεία κατασκευής οποιουδήποτε ευρετηρίου. Αν και η API για αυτά τα αντικείμενα είναι παρόμοια, τα αντικείμενα `Έγγραφο` αναπαριστούν ολόκληρα αρχεία, ενώ οι `Κόμβοι` είναι μικρότερα κομμάτια αυτού του αρχικού εγγράφου, που είναι κατάλληλα για ένα LLM και Q&A.
+
+```typescript
+import { Document } from "llamaindex";
+
+document = new Document({ text: "κείμενο", metadata: { κλειδί: "τιμή" } });
+```
+
+## Αναφορά API
+
+- [Έγγραφο](../../api/classes/Document.md)
+- [ΚείμενοΚόμβος](../../api/classes/TextNode.md)
+
+"
diff --git a/apps/docs/i18n/el/docusaurus-plugin-content-docs/current/modules/high_level/query_engine.md b/apps/docs/i18n/el/docusaurus-plugin-content-docs/current/modules/high_level/query_engine.md
new file mode 100644
index 0000000000000000000000000000000000000000..e1bf88d36fb3a60378434d37d9f650165fe80dfd
--- /dev/null
+++ b/apps/docs/i18n/el/docusaurus-plugin-content-docs/current/modules/high_level/query_engine.md
@@ -0,0 +1,40 @@
+---
+sidebar_position: 3
+---
+
+# QueryEngine (Μηχανή Ερωτήματος)
+
+`Αυτό το έγγραφο έχει μεταφραστεί αυτόματα και μπορεί να περιέχει λάθη. Μη διστάσετε να ανοίξετε ένα Pull Request για να προτείνετε αλλαγές.`
+
+Μια μηχανή ερωτήματος (query engine) συσκευάζει έναν `Retriever` και έναν `ResponseSynthesizer` σε ένα παράθυρο (pipeline), που θα χρησιμοποιήσει το αλφαριθμητικό ερωτήματος για να ανακτήσει κόμβους και στη συνέχεια να τους στείλει στο LLM για να δημιουργήσει μια απάντηση.
+
+```typescript
+const queryEngine = index.asQueryEngine();
+const response = await queryEngine.query("αλφαριθμητικό ερώτημα");
+```
+
+## Μηχανή Ερωτήματος Υποερώτησης
+
+Το βασικό συναρτησιακό σχήμα της Μηχανής Ερωτήματος Υποερώτησης είναι ότι διαιρεί ένα μόνο ερώτημα σε πολλαπλά ερωτήματα, ανακτά μια απάντηση για κάθε ένα από αυτά τα ερωτήματα και στη συνέχεια συνδυάζει αυτές τις διάφορες απαντήσεις σε μια συνεκτική απάντηση για τον χρήστη. Μπορείτε να το σκεφτείτε ως την τεχνική "σκέψου αυτό βήμα προς βήμα" αλλά επαναλαμβάνοντας τις πηγές δεδομένων σας!
+
+### Ξεκινώντας
+
+Ο ευκολότερος τρόπος για να αρχίσετε να δοκιμάζετε τη Μηχανή Ερωτήματος Υποερώτησης (Sub Question Query Engine) είναι να εκτελέσετε το αρχείο subquestion.ts στον φάκελο [παραδείγματα](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/subquestion.ts).
+
+```bash
+npx ts-node subquestion.ts
+```
+
+### Εργαλεία
+
+Η Μηχανή Ερωτήματος Υποερώτησης υλοποιείται με τα Εργαλεία. Η βασική ιδέα των Εργαλείων είναι ότι είναι εκτελέσιμες επιλογές για το μεγάλο μοντέλο γλώσσας. Σε αυτήν την περίπτωση, η Μηχανή Ερωτήματος Υποερώτησης μας βασίζεται στο QueryEngineTool, το οποίο, όπως φαντάζεστε, είναι ένα εργαλείο για την εκτέλεση ερωτημάτων σε ένα QueryEngine. Αυτό μας επιτρέπει να δώσουμε στο μοντέλο μια επιλογή για να ερωτήσει διάφορα έγγραφα για διάφορες ερωτήσεις, για παράδειγμα. Μπορείτε επίσης να φανταστείτε ότι η Μηχανή Ερωτήματος Υποερώτησης μπορεί να χρησιμοποιήσει ένα Εργαλείο που αναζητά κάτι στον ιστό ή παίρνει μια απάντηση χρησιμοποιώντας το Wolfram Alpha.
+
+Μπορείτε να μάθετε περισσότερα για τα Εργαλεία ανατρέχοντας στην τεκμηρίωση της Python για το LlamaIndex https://gpt-index.readthedocs.io/en/latest/core_modules/agent_modules/tools/root.html
+
+"
+
+## Αναφορά API
+
+- [RetrieverQueryEngine (Μηχανή Ανάκτησης Ερωτήσεων)](../../api/classes/RetrieverQueryEngine.md)
+- [SubQuestionQueryEngine (Μηχανή Υποερωτήσεων)](../../api/classes/SubQuestionQueryEngine.md)
+- [QueryEngineTool (Εργαλείο Μηχανής Ερωτήματος)](../../api/interfaces/QueryEngineTool.md)
diff --git a/apps/docs/i18n/el/docusaurus-plugin-content-docs/current/modules/index.md b/apps/docs/i18n/el/docusaurus-plugin-content-docs/current/modules/index.md
new file mode 100644
index 0000000000000000000000000000000000000000..726bdf211e0a051f6a2c9a768fb7970a9471b6b6
--- /dev/null
+++ b/apps/docs/i18n/el/docusaurus-plugin-content-docs/current/modules/index.md
@@ -0,0 +1,33 @@
+# Βασικά Αρθρώματα
+
+`Αυτό το έγγραφο έχει μεταφραστεί αυτόματα και μπορεί να περιέχει λάθη. Μη διστάσετε να ανοίξετε ένα Pull Request για να προτείνετε αλλαγές.`
+
+Το LlamaIndex.TS προσφέρει αρκετά βασικά αρθρώματα, χωρισμένα σε υψηλού επιπέδου αρθρώματα για γρήγορη εκκίνηση και χαμηλού επιπέδου αρθρώματα για την προσαρμογή των βασικών στοιχείων όπως χρειάζεστε.
+
+## Ενότητες Υψηλού Επιπέδου
+
+- [**Έγγραφο**](./high_level/documents_and_nodes.md): Ένα έγγραφο αναπαριστά ένα αρχείο κειμένου, αρχείο PDF ή άλλα συνεχή δεδομένα.
+
+- [**Κόμβος**](./high_level/documents_and_nodes.md): Το βασικό δομικό στοιχείο δεδομένων. Συνήθως, αυτά είναι μέρη του εγγράφου που χωρίζονται σε διαχειρίσιμα κομμάτια που είναι αρκετά μικρά για να τροφοδοτηθούν σε ένα μοντέλο ενσωμάτωσης και LLM.
+
+- [**Αναγνώστης/Φορτωτής**](./high_level/data_loader.md): Ένας αναγνώστης ή φορτωτής είναι κάτι που παίρνει ένα έγγραφο στον πραγματικό κόσμο και το μετατρέπει σε μια κλάση έγγραφου που μπορείτε να χρησιμοποιήσετε στον Δείκτη σας και στις ερωτήσεις σας. Αυτή τη στιγμή υποστηρίζουμε αρχεία απλού κειμένου και PDF με πολλές ακόμα επιλογές που θα έρθουν.
+
+- [**Δείκτες**](./high_level/data_index.md): Οι δείκτες αποθηκεύουν τους κόμβους και τις ενσωματώσεις αυτών των κόμβων.
+
+- [**Μηχανή Ερωτήσεων**](./high_level/query_engine.md): Οι μηχανές ερωτήσεων είναι αυτές που δημιουργούν την ερώτηση που εισάγετε και σας δίνουν το αποτέλεσμα. Οι μηχανές ερωτήσεων συνδυάζουν συνήθως ένα προκαθορισμένο προτροπή με επιλεγμένους κόμβους από τον Δείκτη σας για να δώσουν στο LLM το πλαίσιο που χρειάζεται για να απαντήσει στην ερώτησή σας.
+
+- [**Μηχανή Συνομιλίας**](./high_level/chat_engine.md): Μια μηχανή συνομιλίας σας βοηθά να δημιουργήσετε ένα chatbot που θα αλληλεπιδρά με τους Δείκτες σας.
+
+## Αρθρώματα Χαμηλού Επιπέδου
+
+- [**LLM**](./low_level/llm.md): Η κλάση LLM είναι μια ενοποιημένη διεπαφή πάνω από έναν μεγάλο πάροχο μοντέλου γλώσσας όπως το OpenAI GPT-4, το Anthropic Claude ή το Meta LLaMA. Μπορείτε να την υποκλάσετε για να γράψετε ένα συνδετικό για το δικό σας μεγάλο μοντέλο γλώσσας.
+
+- [**Ενσωμάτωση**](./low_level/embedding.md): Μια ενσωμάτωση αναπαρίσταται ως ένα διάνυσμα από αριθμούς κινητής υποδιαστολής. Το προεπιλεγμένο μοντέλο ενσωμάτωσης της OpenAI με το όνομα text-embedding-ada-002 αποτελείται από 1.536 αριθμούς κινητής υποδιαστολής. Ένα άλλο δημοφιλές μοντέλο ενσωμάτωσης είναι το BERT, το οποίο χρησιμοποιεί 768 αριθμούς κινητής υποδιαστολής για να αναπαραστήσει κάθε κόμβο. Παρέχουμε μια σειρά από εργαλεία για την εργασία με ενσωματώσεις, συμπεριλαμβανομένων 3 επιλογών υπολογισμού ομοιότητας και της μεγαλύτερης αποκλειστικής σχέσης.
+
+- [**TextSplitter/NodeParser**](./low_level/node_parser.md): Οι στρατηγικές διαίρεσης κειμένου είναι απίστευτα σημαντικές για τη συνολική αποτελεσματικότητα της αναζήτησης ενσωματώσεων. Αυτή τη στιγμή, παρόλο που έχουμε μια προεπιλεγμένη λύση, δεν υπάρχει μια μέγεθος που να ταιριάζει σε όλους λύση. Ανάλογα με τα αρχεία πηγής, μπορεί να θέλετε να χρησιμοποιήσετε διαφορετικά μεγέθη και στρατηγικές διαίρεσης. Αυτή τη στιγμή υποστηρίζουμε τη διαίρεση με βάση το σταθερό μέγεθος, τη διαίρεση με βάση το σταθερό μέγεθος με επικαλυπτόμενα τμήματα, τη διαίρεση ανά πρόταση και τη διαίρεση ανά παράγραφο. Ο διαχωριστής κειμένου χρησιμοποιείται από τον NodeParser κατά τη διαίρεση των `Εγγράφων` σε `Κόμβους`.
+
+- [**Retriever**](./low_level/retriever.md): Ο Retriever είναι αυτός που πραγματοποιεί την επιλογή των Κόμβων που θα ανακτηθούν από τον δείκτη. Εδώ, μπορείτε να δοκιμάσετε να ανακτήσετε περισσότερους ή λιγότερους Κόμβους ανά ερώτημα, να αλλάξετε τη συνάρτηση ομοιότητας ή να δημιουργήσετε τον δικό σας retriever για κάθε ξεχωριστή περίπτωση χρήσης στην εφαρμογή σας. Για παράδειγμα, μπορείτε να έχετε έναν ξεχωριστό retriever για περιεχόμενο κώδικα έναντι περιεχομένου κειμένου.
+
+- [**ResponseSynthesizer**](./low_level/response_synthesizer.md): Ο ResponseSynthesizer είναι υπεύθυνος για τη λήψη μιας συμβολοσειράς ερωτήματος και τη χρήση μιας λίστας `Κόμβων` για τη δημιουργία μιας απάντησης. Αυτό μπορεί να πάρει πολλές μορφές, όπως η επανάληψη όλου του περιεχομένου και η καλλιέργεια μιας απάντησης ή η δημιουργία ενός δέντρου περιλήψεων και η επιστροφή της ρίζας.
+
+- [**Αποθήκευση**](./low_level/storage.md): Σε κάποιο σημείο θα θέλετε να αποθηκεύσετε τους δείκτες, τα δεδομένα και τα διανύσματά σας αντί να εκτελείτε τα μοντέλα ενσωμάτωσης κάθε φορά. Οι IndexStore, DocStore, VectorStore και KVStore είναι αφαιρέσεις που σας επιτρέπουν να το κάνετε αυτό. Συνδυασμένα, σχηματίζουν το StorageContext. Αυτή τη στιγμή, σας επιτρέπουμε να διατηρήσετε τις ενσωματώσεις σας σε αρχεία στο σύστημα αρχείων (ή ένα εικονικό σύστημα αρχείων στη μνήμη), αλλά προσθέτουμε επίσης ενεργά ενσωματώσεις σε Βάσεις Δεδομένων Διανυσμάτων.
diff --git a/apps/docs/i18n/el/docusaurus-plugin-content-docs/current/modules/low_level/embedding.md b/apps/docs/i18n/el/docusaurus-plugin-content-docs/current/modules/low_level/embedding.md
new file mode 100644
index 0000000000000000000000000000000000000000..a2224a0e5aea8426665551fa60d4a356b815364c
--- /dev/null
+++ b/apps/docs/i18n/el/docusaurus-plugin-content-docs/current/modules/low_level/embedding.md
@@ -0,0 +1,26 @@
+---
+sidebar_position: 1
+---
+
+# Ενσωμάτωση
+
+`Αυτό το έγγραφο έχει μεταφραστεί αυτόματα και μπορεί να περιέχει λάθη. Μη διστάσετε να ανοίξετε ένα Pull Request για να προτείνετε αλλαγές.`
+
+Το μοντέλο ενσωμάτωσης στο LlamaIndex είναι υπεύθυνο για τη δημιουργία αριθμητικών αναπαραστάσεων του κειμένου. Από προεπιλογή, το LlamaIndex θα χρησιμοποιήσει το μοντέλο `text-embedding-ada-002` από το OpenAI.
+
+Αυτό μπορεί να οριστεί ρητά στο αντικείμενο `ServiceContext`.
+
+```typescript
+import { OpenAIEmbedding, serviceContextFromDefaults } from "llamaindex";
+
+const openaiEmbeds = new OpenAIEmbedding();
+
+const serviceContext = serviceContextFromDefaults({ embedModel: openaiEmbeds });
+```
+
+## Αναφορά API
+
+- [OpenAIEmbedding](../../api/classes/OpenAIEmbedding.md)
+- [ServiceContext](../../api/interfaces/ServiceContext.md)
+
+"
diff --git a/apps/docs/i18n/el/docusaurus-plugin-content-docs/current/modules/low_level/llm.md b/apps/docs/i18n/el/docusaurus-plugin-content-docs/current/modules/low_level/llm.md
new file mode 100644
index 0000000000000000000000000000000000000000..b798bba5c30823990423b1fa336e2b4d16ae3b0b
--- /dev/null
+++ b/apps/docs/i18n/el/docusaurus-plugin-content-docs/current/modules/low_level/llm.md
@@ -0,0 +1,26 @@
+---
+sidebar_position: 0
+---
+
+# LLM
+
+`Αυτό το έγγραφο έχει μεταφραστεί αυτόματα και μπορεί να περιέχει λάθη. Μη διστάσετε να ανοίξετε ένα Pull Request για να προτείνετε αλλαγές.`
+
+Το LLM είναι υπεύθυνο για την ανάγνωση κειμένου και τη δημιουργία φυσικής γλώσσας απαντήσεων σε ερωτήματα. Από προεπιλογή, το LlamaIndex.TS χρησιμοποιεί το `gpt-3.5-turbo`.
+
+Το LLM μπορεί να οριστεί ρητά στο αντικείμενο `ServiceContext`.
+
+```typescript
+import { OpenAI, serviceContextFromDefaults } from "llamaindex";
+
+const openaiLLM = new OpenAI({ model: "gpt-3.5-turbo", temperature: 0 });
+
+const serviceContext = serviceContextFromDefaults({ llm: openaiLLM });
+```
+
+## Αναφορά API
+
+- [OpenAI](../../api/classes/OpenAI.md)
+- [ServiceContext](../../api/interfaces/ServiceContext.md)
+
+"
diff --git a/apps/docs/i18n/el/docusaurus-plugin-content-docs/current/modules/low_level/node_parser.md b/apps/docs/i18n/el/docusaurus-plugin-content-docs/current/modules/low_level/node_parser.md
new file mode 100644
index 0000000000000000000000000000000000000000..e8379b758a7da4168c7bc65dfff7c0248b6f5285
--- /dev/null
+++ b/apps/docs/i18n/el/docusaurus-plugin-content-docs/current/modules/low_level/node_parser.md
@@ -0,0 +1,39 @@
+---
+sidebar_position: 3
+---
+
+# NodeParser (Αναλυτής Κόμβων)
+
+`Αυτό το έγγραφο έχει μεταφραστεί αυτόματα και μπορεί να περιέχει λάθη. Μη διστάσετε να ανοίξετε ένα Pull Request για να προτείνετε αλλαγές.`
+
+Ο `NodeParser` στο LlamaIndex είναι υπεύθυνος για τον διαχωρισμό των αντικειμένων `Document` σε πιο διαχειρίσιμα αντικείμενα `Node`. Όταν καλείτε την `.fromDocuments()`, ο `NodeParser` από το `ServiceContext` χρησιμοποιείται για να το κάνει αυτό αυτόματα για εσάς. Εναλλακτικά, μπορείτε να το χρησιμοποιήσετε για να διαχωρίσετε τα έγγραφα εκ των προτέρων.
+
+```typescript
+import { Document, SimpleNodeParser } from "llamaindex";
+
+const nodeParser = new SimpleNodeParser();
+const nodes = nodeParser.getNodesFromDocuments([
+  new Document({ text: "Είμαι 10 χρονών. Ο John είναι 20 χρονών." }),
+]);
+```
+
+## TextSplitter (Διαχωριστής Κειμένου)
+
+Ο κάτωθι διαχωριστής κειμένου θα διαχωρίσει το κείμενο σε προτάσεις. Μπορεί επίσης να χρησιμοποιηθεί ως αυτόνομη μονάδα για τον διαχωρισμό ακατέργαστου κειμένου.
+
+```typescript
+import { SentenceSplitter } from "llamaindex";
+
+const splitter = new SentenceSplitter({ chunkSize: 1 });
+
+const textSplits = splitter.splitText("Γεια σου Κόσμε");
+```
+
+"
+
+## Αναφορά API
+
+- [SimpleNodeParser (Απλός Αναλυτής Κόμβων)](../../api/classes/SimpleNodeParser.md)
+- [SentenceSplitter (Διαχωριστής Προτάσεων)](../../api/classes/SentenceSplitter.md)
+
+"
diff --git a/apps/docs/i18n/el/docusaurus-plugin-content-docs/current/modules/low_level/response_synthesizer.md b/apps/docs/i18n/el/docusaurus-plugin-content-docs/current/modules/low_level/response_synthesizer.md
new file mode 100644
index 0000000000000000000000000000000000000000..bc2bb75bd0fbfa739a180cd511d6daaf42f6b52f
--- /dev/null
+++ b/apps/docs/i18n/el/docusaurus-plugin-content-docs/current/modules/low_level/response_synthesizer.md
@@ -0,0 +1,47 @@
+---
+sidebar_position: 6
+---
+
+# ResponseSynthesizer (ΣυνθέτηςΑπόκρισης)
+
+`Αυτό το έγγραφο έχει μεταφραστεί αυτόματα και μπορεί να περιέχει λάθη. Μη διστάσετε να ανοίξετε ένα Pull Request για να προτείνετε αλλαγές.`
+
+Ο ResponseSynthesizer είναι υπεύθυνος για την αποστολή του ερωτήματος, των κόμβων και των προτύπων προτροπής στο LLM για τη δημιουργία μιας απόκρισης. Υπάρχουν μερικές βασικές λειτουργίες για τη δημιουργία μιας απόκρισης:
+
+- `Refine` (Βελτίωση): "δημιουργία και βελτίωση" μιας απάντησης πηγαίνοντας σειριακά μέσω κάθε ανακτημένου τμήματος κειμένου. Αυτό κάνει ένα ξεχωριστό κλήση LLM ανά κόμβο. Καλό για πιο λεπτομερείς απαντήσεις.
+- `CompactAndRefine` (ΣυμπίεσηΚαιΒελτίωση) (προεπιλογή): "συμπίεση" της προτροπής κατά την κάθε κλήση LLM, εισάγοντας όσα τμήματα κειμένου μπορούν να χωρέσουν μέσα στο μέγιστο μέγεθος της προτροπής. Εάν υπάρχουν πολλά τμήματα για να χωρέσουν σε μια προτροπή, "δημιουργία και βελτίωση" μιας απάντησης πηγαίνοντας μέσω πολλαπλών συμπιεσμένων προτροπών. Ίδιο με το `refine`, αλλά θα πρέπει να οδηγεί σε λιγότερες κλήσεις LLM.
+- `TreeSummarize` (ΣύνοψηΔέντρου): Δεδομένου ενός συνόλου τμημάτων κειμένου και του ερωτήματος, αναδρομικά κατασκευάζει ένα δέντρο και επιστρέφει τον ριζικό κόμβο ως απόκριση. Καλό για σκοπούς σύνοψης.
+- `SimpleResponseBuilder` (ΑπλόςΔημιουργόςΑπόκρισης): Δεδομένου ενός συνόλου τμημάτων κειμένου και του ερωτήματος, εφαρμόζει το ερώτημα σε κάθε τμήμα κειμένου ενώ συγκεντρώνει τις απαντήσεις σε έναν πίνακα. Επιστρέφει ένα συνενωμένο συμβολοσειρά όλων των απαντήσεων. Καλό για όταν χρειάζεστε να εκτελέσετε το ίδιο ερώτημα ξεχωριστά για κάθε τμήμα κειμένου.
+
+```typescript
+import { NodeWithScore, ResponseSynthesizer, TextNode } from "llamaindex";
+
+const responseSynthesizer = new ResponseSynthesizer();
+
+const nodesWithScore: NodeWithScore[] = [
+  {
+    node: new TextNode({ text: "Είμαι 10 ετών." }),
+    score: 1,
+  },
+  {
+    node: new TextNode({ text: "Ο John είναι 20 ετών." }),
+    score: 0.5,
+  },
+];
+
+const response = await responseSynthesizer.synthesize(
+  "Πόσων ετών είμαι;",
+  nodesWithScore,
+);
+console.log(response.response);
+```
+
+## Αναφορά API
+
+- [ResponseSynthesizer (ΣυνθέτηςΑπόκρισης)](../../api/classes/ResponseSynthesizer.md)
+- [Refine (Βελτίωση)](../../api/classes/Refine.md)
+- [CompactAndRefine (ΣυμπίεσηΚαιΒελτίωση)](../../api/classes/CompactAndRefine.md)
+- [TreeSummarize (ΣύνοψηΔέντρου)](../../api/classes/TreeSummarize.md)
+- [SimpleResponseBuilder (ΑπλόςΔημιουργόςΑπόκρισης)](../../api/classes/SimpleResponseBuilder.md)
+
+"
diff --git a/apps/docs/i18n/el/docusaurus-plugin-content-docs/current/modules/low_level/retriever.md b/apps/docs/i18n/el/docusaurus-plugin-content-docs/current/modules/low_level/retriever.md
new file mode 100644
index 0000000000000000000000000000000000000000..7ea01cbc4eb4caa6e5bc58e409fea113f8422407
--- /dev/null
+++ b/apps/docs/i18n/el/docusaurus-plugin-content-docs/current/modules/low_level/retriever.md
@@ -0,0 +1,25 @@
+---
+sidebar_position: 5
+---
+
+# Ανάκτηση
+
+`Αυτό το έγγραφο έχει μεταφραστεί αυτόματα και μπορεί να περιέχει λάθη. Μη διστάσετε να ανοίξετε ένα Pull Request για να προτείνετε αλλαγές.`
+
+Ένας ανάκτης στο LlamaIndex είναι αυτός που χρησιμοποιείται για να ανακτήσει τα `Node`s από ένα ευρετήριο χρησιμοποιώντας μια συμβολοσειρά ερωτήματος. Ένας `VectorIndexRetriever` θα ανακτήσει τα πιο παρόμοια κόμβους top-k. Από την άλλη, ένας `SummaryIndexRetriever` θα ανακτήσει όλους τους κόμβους ανεξάρτητα από το ερώτημα.
+
+```typescript
+const retriever = vector_index.asRetriever();
+retriever.similarityTopK = 3;
+
+// Ανάκτηση κόμβων!
+const nodesWithScore = await retriever.retrieve("συμβολοσειρά ερωτήματος");
+```
+
+## Αναφορά API
+
+- [SummaryIndexRetriever](../../api/classes/SummaryIndexRetriever.md)
+- [SummaryIndexLLMRetriever](../../api/classes/SummaryIndexLLMRetriever.md)
+- [VectorIndexRetriever](../../api/classes/VectorIndexRetriever.md)
+
+"
diff --git a/apps/docs/i18n/el/docusaurus-plugin-content-docs/current/modules/low_level/storage.md b/apps/docs/i18n/el/docusaurus-plugin-content-docs/current/modules/low_level/storage.md
new file mode 100644
index 0000000000000000000000000000000000000000..c3e5629ee0b4ceb40a83db19b0136af8d80104f4
--- /dev/null
+++ b/apps/docs/i18n/el/docusaurus-plugin-content-docs/current/modules/low_level/storage.md
@@ -0,0 +1,30 @@
+---
+sidebar_position: 7
+---
+
+# Αποθήκευση (Storage)
+
+`Αυτό το έγγραφο έχει μεταφραστεί αυτόματα και μπορεί να περιέχει λάθη. Μη διστάσετε να ανοίξετε ένα Pull Request για να προτείνετε αλλαγές.`
+
+Η αποθήκευση στο LlamaIndex.TS λειτουργεί αυτόματα μόλις έχετε διαμορφώσει ένα αντικείμενο `StorageContext`. Απλά διαμορφώστε το `persistDir` και συνδέστε το με ένα δείκτη.
+
+Αυτή τη στιγμή, υποστηρίζεται μόνο η αποθήκευση και φόρτωση από τον δίσκο, με μελλοντικές ενσωματώσεις που έχουν προγραμματιστεί!
+
+```typescript
+import { Document, VectorStoreIndex, storageContextFromDefaults } from "./src";
+
+const storageContext = await storageContextFromDefaults({
+  persistDir: "./storage",
+});
+
+const document = new Document({ text: "Δοκιμαστικό Κείμενο" });
+const index = await VectorStoreIndex.fromDocuments([document], {
+  storageContext,
+});
+```
+
+## Αναφορά API (API Reference)
+
+- [StorageContext](../../api/interfaces/StorageContext.md)
+
+"
diff --git a/apps/docs/i18n/el/docusaurus-plugin-content-docs/current/starter.md b/apps/docs/i18n/el/docusaurus-plugin-content-docs/current/starter.md
new file mode 100644
index 0000000000000000000000000000000000000000..eb45e7088672094b4eaf47885df80a2c5673b09b
--- /dev/null
+++ b/apps/docs/i18n/el/docusaurus-plugin-content-docs/current/starter.md
@@ -0,0 +1,58 @@
+---
+sidebar_position: 2
+---
+
+# Οδηγός Έναρξης
+
+`Αυτό το έγγραφο έχει μεταφραστεί αυτόματα και μπορεί να περιέχει λάθη. Μη διστάσετε να ανοίξετε ένα Pull Request για να προτείνετε αλλαγές.`
+
+Αφού [εγκαταστήσετε το LlamaIndex.TS χρησιμοποιώντας το NPM](installation) και ρυθμίσετε το κλειδί σας για το OpenAI, είστε έτοιμοι να ξεκινήσετε την πρώτη σας εφαρμογή:
+
+Σε ένα νέο φάκελο:
+
+```bash npm2yarn
+npm install typescript
+npm install @types/node
+npx tsc --init # αν χρειαστεί
+```
+
+Δημιουργήστε το αρχείο `example.ts`. Αυτός ο κώδικας θα φορτώσει μερικά παραδείγματα δεδομένων, θα δημιουργήσει ένα έγγραφο, θα το ευρετηριάσει (δημιουργώντας embeddings χρησιμοποιώντας το OpenAI) και στη συνέχεια θα δημιουργήσει έναν μηχανισμό ερωτήσεων για να απαντάει σε ερωτήσεις σχετικά με τα δεδομένα.
+
+```ts
+// example.ts
+import fs from "fs/promises";
+import { Document, VectorStoreIndex } from "llamaindex";
+
+async function main() {
+  // Φορτώστε το δοκίμιο από το abramov.txt στο Node
+  const essay = await fs.readFile(
+    "node_modules/llamaindex/examples/abramov.txt",
+    "utf-8",
+  );
+
+  // Δημιουργήστε ένα αντικείμενο Document με το δοκίμιο
+  const document = new Document({ text: essay });
+
+  // Διαχωρίστε το κείμενο και δημιουργήστε τα embeddings. Αποθηκεύστε τα σε ένα VectorStoreIndex
+  const index = await VectorStoreIndex.fromDocuments([document]);
+
+  // Ερωτήστε το ευρετήριο
+  const queryEngine = index.asQueryEngine();
+  const response = await queryEngine.query(
+    "Τι έκανε ο συγγραφέας στο κολέγιο;",
+  );
+
+  // Εμφάνιση της απάντησης
+  console.log(response.toString());
+}
+
+main();
+```
+
+Στη συνέχεια, μπορείτε να το εκτελέσετε χρησιμοποιώντας
+
+```bash
+npx ts-node example.ts
+```
+
+Έτοιμοι να μάθετε περισσότερα; Ελέγξτε το περιβάλλον μας για το NextJS στο https://llama-playground.vercel.app/. Ο πηγαίος κώδικας είναι διαθέσιμος στο https://github.com/run-llama/ts-playground
diff --git a/apps/docs/i18n/es/docusaurus-plugin-content-docs/current/_static/concepts/indexing.jpg b/apps/docs/i18n/es/docusaurus-plugin-content-docs/current/_static/concepts/indexing.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..8672967213caf28fb27682b6b9e2e5111beb9aaa
Binary files /dev/null and b/apps/docs/i18n/es/docusaurus-plugin-content-docs/current/_static/concepts/indexing.jpg differ
diff --git a/apps/docs/i18n/es/docusaurus-plugin-content-docs/current/_static/concepts/querying.jpg b/apps/docs/i18n/es/docusaurus-plugin-content-docs/current/_static/concepts/querying.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..3c241bdda5ba98d3f3ee2e163b9f19280c5c7503
Binary files /dev/null and b/apps/docs/i18n/es/docusaurus-plugin-content-docs/current/_static/concepts/querying.jpg differ
diff --git a/apps/docs/i18n/es/docusaurus-plugin-content-docs/current/_static/concepts/rag.jpg b/apps/docs/i18n/es/docusaurus-plugin-content-docs/current/_static/concepts/rag.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..b68eca2564f8535b33d9ed6d4ce4d4410d2a0698
Binary files /dev/null and b/apps/docs/i18n/es/docusaurus-plugin-content-docs/current/_static/concepts/rag.jpg differ
diff --git a/apps/docs/i18n/es/docusaurus-plugin-content-docs/current/api b/apps/docs/i18n/es/docusaurus-plugin-content-docs/current/api
new file mode 120000
index 0000000000000000000000000000000000000000..3513e32979f4abb8b2df2b54c8d0cc480bdd847e
--- /dev/null
+++ b/apps/docs/i18n/es/docusaurus-plugin-content-docs/current/api
@@ -0,0 +1 @@
+../../../../docs/api
\ No newline at end of file
diff --git a/apps/docs/i18n/es/docusaurus-plugin-content-docs/current/concepts.md b/apps/docs/i18n/es/docusaurus-plugin-content-docs/current/concepts.md
new file mode 100644
index 0000000000000000000000000000000000000000..2c720c4890cca70db6d049714d35d22e556125a0
--- /dev/null
+++ b/apps/docs/i18n/es/docusaurus-plugin-content-docs/current/concepts.md
@@ -0,0 +1,84 @@
+---
+sidebar_position: 3
+---
+
+# Conceptos de Alto Nivel
+
+`Esta documentación ha sido traducida automáticamente y puede contener errores. No dudes en abrir una Pull Request para sugerir cambios.`
+
+LlamaIndex.TS te ayuda a construir aplicaciones impulsadas por LLM (por ejemplo, Q&A, chatbot) sobre datos personalizados.
+
+En esta guía de conceptos de alto nivel, aprenderás:
+
+- cómo un LLM puede responder preguntas utilizando tus propios datos.
+- conceptos clave y módulos en LlamaIndex.TS para componer tu propia canalización de consultas.
+
+## Responder Preguntas en tus Datos
+
+LlamaIndex utiliza un método de dos etapas al utilizar un LLM con tus datos:
+
+1. **etapa de indexación**: preparar una base de conocimientos, y
+2. **etapa de consulta**: recuperar el contexto relevante de los conocimientos para ayudar al LLM a responder una pregunta.
+
+![](./_static/concepts/rag.jpg)
+
+Este proceso también se conoce como Generación Aumentada por Recuperación (RAG).
+
+LlamaIndex.TS proporciona el conjunto de herramientas esenciales para hacer que ambos pasos sean muy fáciles.
+
+Vamos a explorar cada etapa en detalle.
+
+### Etapa de Indexación
+
+LlamaIndex.TS te ayuda a preparar la base de conocimientos con una serie de conectores de datos e índices.
+
+![](./_static/concepts/indexing.jpg)
+
+[**Cargadores de Datos**](./modules/high_level/data_loader.md):
+Un conector de datos (es decir, `Reader`) ingiere datos de diferentes fuentes y formatos de datos en una representación simple de `Document` (texto y metadatos simples).
+
+[**Documentos / Nodos**](./modules/high_level/documents_and_nodes.md): Un `Document` es un contenedor genérico alrededor de cualquier fuente de datos, por ejemplo, un PDF, una salida de API o datos recuperados de una base de datos. Un `Node` es la unidad atómica de datos en LlamaIndex y representa un "fragmento" de un `Document` de origen. Es una representación completa que incluye metadatos y relaciones (con otros nodos) para permitir operaciones de recuperación precisas y expresivas.
+
+[**Índices de Datos**](./modules/high_level/data_index.md):
+Una vez que hayas ingresado tus datos, LlamaIndex te ayuda a indexar los datos en un formato fácil de recuperar.
+
+Bajo el capó, LlamaIndex analiza los documentos en representaciones intermedias, calcula incrustaciones vectoriales y almacena tus datos en memoria o en disco.
+
+### Etapa de Consulta
+
+En la etapa de consulta, la canalización de consultas recupera el contexto más relevante dado una consulta del usuario,
+y lo pasa al LLM (junto con la consulta) para sintetizar una respuesta.
+
+Esto le brinda al LLM conocimientos actualizados que no están en sus datos de entrenamiento originales,
+(también reduciendo la alucinación).
+
+El desafío clave en la etapa de consulta es la recuperación, orquestación y razonamiento sobre bases de conocimientos (potencialmente muchas).
+
+LlamaIndex proporciona módulos componibles que te ayudan a construir e integrar canalizaciones RAG para Q&A (motor de consulta), chatbot (motor de chat) o como parte de un agente.
+
+Estos bloques de construcción se pueden personalizar para reflejar las preferencias de clasificación, así como componerse para razonar sobre múltiples bases de conocimientos de manera estructurada.
+
+![](./_static/concepts/querying.jpg)
+
+#### Bloques de Construcción
+
+[**Recuperadores**](./modules/low_level/retriever.md):
+Un recuperador define cómo recuperar de manera eficiente el contexto relevante de una base de conocimientos (es decir, índice) cuando se le proporciona una consulta.
+La lógica de recuperación específica difiere para diferentes índices, siendo la más popular la recuperación densa contra un índice vectorial.
+
+[**Sintetizadores de Respuesta**](./modules/low_level/response_synthesizer.md):
+Un sintetizador de respuesta genera una respuesta a partir de un LLM, utilizando una consulta del usuario y un conjunto dado de fragmentos de texto recuperados.
+
+"
+
+#### Canalizaciones
+
+[**Motores de Consulta**](./modules/high_level/query_engine.md):
+Un motor de consulta es una canalización de extremo a extremo que te permite hacer preguntas sobre tus datos.
+Recibe una consulta en lenguaje natural y devuelve una respuesta, junto con el contexto de referencia recuperado y pasado al LLM.
+
+[**Motores de Chat**](./modules/high_level/chat_engine.md):
+Un motor de chat es una canalización de extremo a extremo para tener una conversación con tus datos
+(varias idas y vueltas en lugar de una sola pregunta y respuesta).
+
+"
diff --git a/apps/docs/i18n/es/docusaurus-plugin-content-docs/current/end_to_end.md b/apps/docs/i18n/es/docusaurus-plugin-content-docs/current/end_to_end.md
new file mode 100644
index 0000000000000000000000000000000000000000..deb2cd8d87be9ef5a5bed60aa9b35082a030cd47
--- /dev/null
+++ b/apps/docs/i18n/es/docusaurus-plugin-content-docs/current/end_to_end.md
@@ -0,0 +1,49 @@
+---
+sidebar_position: 4
+---
+
+# Ejemplos de principio a fin
+
+`Esta documentación ha sido traducida automáticamente y puede contener errores. No dudes en abrir una Pull Request para sugerir cambios.`
+
+Incluimos varios ejemplos de principio a fin utilizando LlamaIndex.TS en el repositorio.
+
+Echa un vistazo a los ejemplos a continuación o pruébalos y complétalos en minutos con los tutoriales interactivos de Github Codespace proporcionados por Dev-Docs [aquí](https://codespaces.new/team-dev-docs/lits-dev-docs-playground?devcontainer_path=.devcontainer%2Fjavascript_ltsquickstart%2Fdevcontainer.json):
+
+## [Motor de Chat](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/chatEngine.ts)
+
+Lee un archivo y chatea sobre él con el LLM.
+
+## [Índice de Vectores](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/vectorIndex.ts)
+
+Crea un índice de vectores y realiza consultas en él. El índice de vectores utilizará embeddings para obtener los nodos más relevantes en función de los k mejores. De forma predeterminada, el valor de k es 2.
+
+## [Índice de resumen](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/summaryIndex.ts)
+
+Crea un índice de lista y realiza consultas en él. Este ejemplo también utiliza el `LLMRetriever`, que utilizará el LLM para seleccionar los mejores nodos a utilizar al generar una respuesta.
+
+## [Guardar / Cargar un Índice](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/storageContext.ts)
+
+Crea y carga un índice de vectores. La persistencia en disco en LlamaIndex.TS ocurre automáticamente una vez que se crea un objeto de contexto de almacenamiento.
+
+## [Índice de Vector Personalizado](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/vectorIndexCustomize.ts)
+
+Crea un índice de vector y realiza consultas en él, al mismo tiempo que configuras el `LLM`, el `ServiceContext` y el `similarity_top_k`.
+
+## [OpenAI LLM](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/openai.ts)
+
+Crea un OpenAI LLM y úsalo directamente para chatear.
+
+## [Llama2 DeuceLLM](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/llamadeuce.ts)
+
+Crea un Llama-2 LLM y úsalo directamente para chatear.
+
+## [SubQuestionQueryEngine](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/subquestion.ts)
+
+Utiliza el `SubQuestionQueryEngine`, que divide las consultas complejas en varias preguntas y luego agrega una respuesta a través de las respuestas a todas las subpreguntas.
+
+"
+
+## [Módulos de bajo nivel](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/lowlevel.ts)
+
+Este ejemplo utiliza varios componentes de bajo nivel, lo que elimina la necesidad de un motor de consulta real. Estos componentes se pueden utilizar en cualquier lugar, en cualquier aplicación, o personalizar y subclasificar para satisfacer tus propias necesidades.
diff --git a/apps/docs/i18n/es/docusaurus-plugin-content-docs/current/environments.md b/apps/docs/i18n/es/docusaurus-plugin-content-docs/current/environments.md
new file mode 100644
index 0000000000000000000000000000000000000000..448de3a64d9582b0ece1883d84a7478fbdb81984
--- /dev/null
+++ b/apps/docs/i18n/es/docusaurus-plugin-content-docs/current/environments.md
@@ -0,0 +1,31 @@
+---
+sidebar_position: 5
+---
+
+# Entornos
+
+`Esta documentación ha sido traducida automáticamente y puede contener errores. No dudes en abrir una Pull Request para sugerir cambios.`
+
+LlamaIndex actualmente admite oficialmente NodeJS 18 y NodeJS 20.
+
+## Enrutador de aplicaciones NextJS
+
+Si estás utilizando los controladores de ruta/funciones sin servidor del enrutador de aplicaciones NextJS, deberás utilizar el modo NodeJS:
+
+```js
+export const runtime = "nodejs"; // por defecto
+```
+
+y deberás agregar una excepción para pdf-parse en tu archivo next.config.js
+
+```js
+// next.config.js
+/** @type {import('next').NextConfig} */
+const nextConfig = {
+  experimental: {
+    serverComponentsExternalPackages: ["pdf-parse"], // Pone pdf-parse en el modo NodeJS real con el enrutador de aplicaciones NextJS
+  },
+};
+
+module.exports = nextConfig;
+```
diff --git a/apps/docs/i18n/es/docusaurus-plugin-content-docs/current/installation.mdx b/apps/docs/i18n/es/docusaurus-plugin-content-docs/current/installation.mdx
new file mode 100644
index 0000000000000000000000000000000000000000..689e77ead6b2d5a0459fcfd7e0e1739cec8f7d8f
--- /dev/null
+++ b/apps/docs/i18n/es/docusaurus-plugin-content-docs/current/installation.mdx
@@ -0,0 +1,68 @@
+---
+sidebar_position: 1
+---
+
+
+# Instalación y Configuración
+
+```Esta documentación ha sido traducida automáticamente y puede contener errores. No dudes en abrir una Pull Request para sugerir cambios.```
+
+
+Asegúrese de tener NodeJS v18 o superior.
+
+
+## Usando create-llama
+
+La forma más fácil de comenzar con LlamaIndex es usando `create-llama`. Esta herramienta de línea de comandos te permite comenzar rápidamente a construir una nueva aplicación LlamaIndex, con todo configurado para ti.
+
+Simplemente ejecuta
+
+<Tabs>
+<TabItem value="1" label="npm" default>
+
+```bash
+npx create-llama@latest
+```
+
+</TabItem>
+<TabItem value="2" label="Yarn">
+
+```bash
+yarn create llama
+```
+
+</TabItem>
+<TabItem value="3" label="pnpm">
+
+```bash
+pnpm create llama@latest
+```
+
+</TabItem>
+</Tabs>
+
+para comenzar. Una vez que se genere tu aplicación, ejecuta
+
+```bash npm2yarn
+npm run dev
+```
+
+para iniciar el servidor de desarrollo. Luego puedes visitar [http://localhost:3000](http://localhost:3000) para ver tu aplicación.
+## Instalación desde NPM
+
+```bash npm2yarn
+npm install llamaindex
+```
+
+
+### Variables de entorno
+
+Nuestros ejemplos utilizan OpenAI de forma predeterminada. Deberá configurar su clave de Open AI de la siguiente manera:
+
+```bash
+export OPENAI_API_KEY="sk-......" # Reemplace con su clave de https://platform.openai.com/account/api-keys
+```
+
+Si desea que se cargue automáticamente cada vez, agréguelo a su .zshrc/.bashrc.
+
+ADVERTENCIA: no incluya su clave de OpenAI en el control de versiones.
diff --git a/apps/docs/i18n/es/docusaurus-plugin-content-docs/current/introduction.md b/apps/docs/i18n/es/docusaurus-plugin-content-docs/current/introduction.md
new file mode 100644
index 0000000000000000000000000000000000000000..da512643df65cac4b77eeae40b8ea09bf6cc7b9b
--- /dev/null
+++ b/apps/docs/i18n/es/docusaurus-plugin-content-docs/current/introduction.md
@@ -0,0 +1,60 @@
+---
+sidebar_position: 0
+slug: /
+---
+
+# ¿Qué es LlamaIndex.TS?
+
+`Esta documentación ha sido traducida automáticamente y puede contener errores. No dudes en abrir una Pull Request para sugerir cambios.`
+
+LlamaIndex.TS es un marco de datos para aplicaciones LLM que permite la ingestión, estructuración y acceso a datos privados o específicos de dominio. Si bien también está disponible un paquete de Python (ver [aquí](https://docs.llamaindex.ai/en/stable/)), LlamaIndex.TS ofrece características principales en un paquete sencillo, optimizado para su uso con TypeScript.
+
+## 🚀 ¿Por qué LlamaIndex.TS?
+
+En su esencia, los LLM ofrecen una interfaz de lenguaje natural entre los humanos y los datos inferidos. Los modelos ampliamente disponibles vienen pre-entrenados con grandes cantidades de datos disponibles públicamente, desde Wikipedia y listas de correo hasta libros de texto y código fuente.
+
+Las aplicaciones construidas sobre los LLM a menudo requieren mejorar estos modelos con datos privados o específicos de dominio. Desafortunadamente, esos datos pueden estar distribuidos en aplicaciones y almacenes de datos aislados. Están detrás de APIs, en bases de datos SQL o atrapados en PDF y presentaciones.
+
+Ahí es donde entra en juego **LlamaIndex.TS**.
+
+## 🦙 ¿Cómo puede ayudar LlamaIndex.TS?
+
+LlamaIndex.TS proporciona las siguientes herramientas:
+
+- **Carga de datos** permite ingresar directamente sus datos existentes en formatos como `.txt`, `.pdf`, `.csv`, `.md` y `.docx`.
+- **Índices de datos** estructura sus datos en representaciones intermedias que son fáciles y eficientes para que los LLM puedan consumirlos.
+- **Motores** proporcionan acceso en lenguaje natural a sus datos. Por ejemplo:
+  - Los motores de consulta son interfaces de recuperación potentes para obtener resultados mejorados con conocimiento.
+  - Los motores de chat son interfaces conversacionales para interacciones de "ida y vuelta" con sus datos.
+
+## 👨‍👩‍👧‍👦 ¿Para quién es LlamaIndex?
+
+LlamaIndex.TS proporciona un conjunto básico de herramientas esenciales para cualquier persona que desarrolle aplicaciones LLM con JavaScript y TypeScript.
+
+Nuestra API de alto nivel permite a los usuarios principiantes utilizar LlamaIndex.TS para ingresar y consultar sus datos.
+
+Para aplicaciones más complejas, nuestras API de nivel inferior permiten a los usuarios avanzados personalizar y ampliar cualquier módulo, como conectores de datos, índices, recuperadores y motores de consulta, para adaptarse a sus necesidades.
+
+## Empezando
+
+`npm install llamaindex`
+
+Nuestra documentación incluye [Instrucciones de instalación](./installation.md) y un [Tutorial de inicio](./starter.md) para construir tu primera aplicación.
+
+Una vez que estés en funcionamiento, [Conceptos de alto nivel](./concepts.md) ofrece una visión general de la arquitectura modular de LlamaIndex. Para obtener ejemplos prácticos más detallados, consulta nuestros [Tutoriales de extremo a extremo](./end_to_end.md).
+
+## 🗺️ Ecosistema
+
+Para descargar o contribuir, encuentra LlamaIndex en:
+
+- Github: https://github.com/run-llama/LlamaIndexTS
+- NPM: https://www.npmjs.com/package/llamaindex
+
+"
+
+## Comunidad
+
+¿Necesitas ayuda? ¿Tienes alguna sugerencia de función? Únete a la comunidad de LlamaIndex:
+
+- Twitter: https://twitter.com/llama_index
+- Discord: https://discord.gg/dGcwcsnxhU
diff --git a/apps/docs/i18n/es/docusaurus-plugin-content-docs/current/modules/high_level/chat_engine.md b/apps/docs/i18n/es/docusaurus-plugin-content-docs/current/modules/high_level/chat_engine.md
new file mode 100644
index 0000000000000000000000000000000000000000..3d0c90242c3d74ddc234beef14710d7fe5eb0ca2
--- /dev/null
+++ b/apps/docs/i18n/es/docusaurus-plugin-content-docs/current/modules/high_level/chat_engine.md
@@ -0,0 +1,22 @@
+---
+sidebar_position: 4
+---
+
+# ChatEngine (Motor de Chat)
+
+`Esta documentación ha sido traducida automáticamente y puede contener errores. No dudes en abrir una Pull Request para sugerir cambios.`
+
+El motor de chat es una forma rápida y sencilla de chatear con los datos en tu índice.
+
+```typescript
+const retriever = index.asRetriever();
+const chatEngine = new ContextChatEngine({ retriever });
+
+// comenzar a chatear
+const response = await chatEngine.chat(query);
+```
+
+## Referencias de la API
+
+- [ContextChatEngine (Motor de Chat de Contexto)](../../api/classes/ContextChatEngine.md)
+- [CondenseQuestionChatEngine (Motor de Chat de Pregunta Condensada)](../../api/classes/ContextChatEngine.md)
diff --git a/apps/docs/i18n/es/docusaurus-plugin-content-docs/current/modules/high_level/data_index.md b/apps/docs/i18n/es/docusaurus-plugin-content-docs/current/modules/high_level/data_index.md
new file mode 100644
index 0000000000000000000000000000000000000000..4140c6450abb3341671c4f2c917094751db97ec6
--- /dev/null
+++ b/apps/docs/i18n/es/docusaurus-plugin-content-docs/current/modules/high_level/data_index.md
@@ -0,0 +1,25 @@
+---
+sidebar_position: 2
+---
+
+# Índice
+
+`Esta documentación ha sido traducida automáticamente y puede contener errores. No dudes en abrir una Pull Request para sugerir cambios.`
+
+Un índice es el contenedor básico y la organización de sus datos. LlamaIndex.TS admite dos tipos de índices:
+
+- `VectorStoreIndex` - enviará los mejores `Node`s al LLM al generar una respuesta. El valor predeterminado de mejores es 2.
+- `SummaryIndex` - enviará cada `Node` en el índice al LLM para generar una respuesta.
+
+```typescript
+import { Document, VectorStoreIndex } from "llamaindex";
+
+const document = new Document({ text: "test" });
+
+const index = await VectorStoreIndex.fromDocuments([document]);
+```
+
+## Referencia de la API
+
+- [SummaryIndex](../../api/classes/SummaryIndex.md)
+- [VectorStoreIndex](../../api/classes/VectorStoreIndex.md)
diff --git a/apps/docs/i18n/es/docusaurus-plugin-content-docs/current/modules/high_level/data_loader.md b/apps/docs/i18n/es/docusaurus-plugin-content-docs/current/modules/high_level/data_loader.md
new file mode 100644
index 0000000000000000000000000000000000000000..59cd7b3074b388aeb15ab99dc8bd7a26eb20ccaa
--- /dev/null
+++ b/apps/docs/i18n/es/docusaurus-plugin-content-docs/current/modules/high_level/data_loader.md
@@ -0,0 +1,21 @@
+---
+sidebar_position: 1
+---
+
+# Lector / Cargador
+
+`Esta documentación ha sido traducida automáticamente y puede contener errores. No dudes en abrir una Pull Request para sugerir cambios.`
+
+LlamaIndex.TS admite la carga fácil de archivos desde carpetas utilizando la clase `SimpleDirectoryReader`. Actualmente, se admiten archivos `.txt`, `.pdf`, `.csv`, `.md` y `.docx`, ¡con más planeados para el futuro!
+
+```typescript
+import { SimpleDirectoryReader } from "llamaindex";
+
+documents = new SimpleDirectoryReader().loadData("./data");
+```
+
+## Referencia de API
+
+- [SimpleDirectoryReader](../../api/classes/SimpleDirectoryReader.md)
+
+"
diff --git a/apps/docs/i18n/es/docusaurus-plugin-content-docs/current/modules/high_level/documents_and_nodes.md b/apps/docs/i18n/es/docusaurus-plugin-content-docs/current/modules/high_level/documents_and_nodes.md
new file mode 100644
index 0000000000000000000000000000000000000000..17d856ecf45dcc0527515f7f2512e2f94e95ec18
--- /dev/null
+++ b/apps/docs/i18n/es/docusaurus-plugin-content-docs/current/modules/high_level/documents_and_nodes.md
@@ -0,0 +1,22 @@
+---
+sidebar_position: 0
+---
+
+# Documentos y Nodos
+
+`Esta documentación ha sido traducida automáticamente y puede contener errores. No dudes en abrir una Pull Request para sugerir cambios.`
+
+Los `Documentos` y los `Nodos` son los bloques de construcción básicos de cualquier índice. Si bien la API para estos objetos es similar, los objetos `Documentos` representan archivos completos, mientras que los `Nodos` son fragmentos más pequeños de ese documento original, adecuados para un LLM y Q&A.
+
+```typescript
+import { Documento } from "llamaindex";
+
+documento = new Documento({ texto: "texto", metadatos: { clave: "valor" } });
+```
+
+## Referencia de la API
+
+- [Documento](../../api/classes/Documento.md)
+- [NodoTexto](../../api/classes/NodoTexto.md)
+
+"
diff --git a/apps/docs/i18n/es/docusaurus-plugin-content-docs/current/modules/high_level/query_engine.md b/apps/docs/i18n/es/docusaurus-plugin-content-docs/current/modules/high_level/query_engine.md
new file mode 100644
index 0000000000000000000000000000000000000000..9afd6eb0bd275408df5f8d02cf4425c9bcd797c9
--- /dev/null
+++ b/apps/docs/i18n/es/docusaurus-plugin-content-docs/current/modules/high_level/query_engine.md
@@ -0,0 +1,38 @@
+---
+sidebar_position: 3
+---
+
+# QueryEngine (Motor de Consulta)
+
+`Esta documentación ha sido traducida automáticamente y puede contener errores. No dudes en abrir una Pull Request para sugerir cambios.`
+
+Un motor de consulta envuelve un `Retriever` y un `ResponseSynthesizer` en un pipeline, que utilizará la cadena de consulta para obtener nodos y luego enviarlos al LLM para generar una respuesta.
+
+```typescript
+const queryEngine = index.asQueryEngine();
+const response = await queryEngine.query("cadena de consulta");
+```
+
+## Motor de Consulta de Subpreguntas
+
+El concepto básico del Motor de Consulta de Subpreguntas es que divide una sola consulta en múltiples consultas, obtiene una respuesta para cada una de esas consultas y luego combina esas respuestas diferentes en una única respuesta coherente para el usuario. Puedes pensar en ello como la técnica de "pensar paso a paso" pero iterando sobre tus fuentes de datos.
+
+### Empezando
+
+La forma más sencilla de comenzar a probar el Motor de Consulta de Subpreguntas es ejecutar el archivo subquestion.ts en [ejemplos](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/subquestion.ts).
+
+```bash
+npx ts-node subquestion.ts
+```
+
+### Herramientas
+
+El Motor de Consulta de Subpreguntas se implementa con Herramientas. La idea básica de las Herramientas es que son opciones ejecutables para el modelo de lenguaje grande. En este caso, nuestro Motor de Consulta de Subpreguntas se basa en QueryEngineTool, que, como habrás adivinado, es una herramienta para ejecutar consultas en un Motor de Consulta. Esto nos permite darle al modelo la opción de consultar diferentes documentos para diferentes preguntas, por ejemplo. También podrías imaginar que el Motor de Consulta de Subpreguntas podría utilizar una Herramienta que busca algo en la web o que obtiene una respuesta utilizando Wolfram Alpha.
+
+Puedes obtener más información sobre las Herramientas echando un vistazo a la documentación de LlamaIndex Python en https://gpt-index.readthedocs.io/en/latest/core_modules/agent_modules/tools/root.html
+
+## Referencia de la API
+
+- [RetrieverQueryEngine (Motor de Consulta de Recuperador)](../../api/classes/RetrieverQueryEngine.md)
+- [SubQuestionQueryEngine (Motor de Consulta de Subpreguntas)](../../api/classes/SubQuestionQueryEngine.md)
+- [QueryEngineTool (Herramienta de Motor de Consulta)](../../api/interfaces/QueryEngineTool.md)
diff --git a/apps/docs/i18n/es/docusaurus-plugin-content-docs/current/modules/index.md b/apps/docs/i18n/es/docusaurus-plugin-content-docs/current/modules/index.md
new file mode 100644
index 0000000000000000000000000000000000000000..2794be533a80e015e73223e85cd1c1d2f9e4a13d
--- /dev/null
+++ b/apps/docs/i18n/es/docusaurus-plugin-content-docs/current/modules/index.md
@@ -0,0 +1,33 @@
+# Módulos principales
+
+`Esta documentación ha sido traducida automáticamente y puede contener errores. No dudes en abrir una Pull Request para sugerir cambios.`
+
+LlamaIndex.TS ofrece varios módulos principales, separados en módulos de alto nivel para comenzar rápidamente y módulos de bajo nivel para personalizar los componentes clave según sea necesario.
+
+## Módulos de alto nivel
+
+- [**Documento**](./high_level/documents_and_nodes.md): Un documento representa un archivo de texto, un archivo PDF u otra pieza de datos contiguos.
+
+- [**Nodo**](./high_level/documents_and_nodes.md): El bloque básico de construcción de datos. Comúnmente, estos son partes del documento divididas en piezas manejables que son lo suficientemente pequeñas como para ser alimentadas en un modelo de incrustación y LLM.
+
+- [**Lector/Cargador**](./high_level/data_loader.md): Un lector o cargador es algo que toma un documento del mundo real y lo transforma en una clase Document que luego se puede utilizar en su Índice y consultas. Actualmente admitimos archivos de texto sin formato y PDF con muchos más por venir.
+
+- [**Índices**](./high_level/data_index.md): los índices almacenan los Nodos y las incrustaciones de esos nodos.
+
+- [**Motor de consulta**](./high_level/query_engine.md): Los motores de consulta son los que generan la consulta que ingresas y te devuelven el resultado. Los motores de consulta generalmente combinan una indicación predefinida con nodos seleccionados de su Índice para brindarle al LLM el contexto que necesita para responder su consulta.
+
+- [**Motor de chat**](./high_level/chat_engine.md): Un motor de chat te ayuda a construir un chatbot que interactuará con tus Índices.
+
+## Módulo de bajo nivel
+
+- [**LLM**](./low_level/llm.md): La clase LLM es una interfaz unificada sobre un proveedor de modelos de lenguaje amplio como OpenAI GPT-4, Anthropic Claude o Meta LLaMA. Puede heredar de ella para escribir un conector para su propio modelo de lenguaje amplio.
+
+- [**Embedding**](./low_level/embedding.md): Un embedding se representa como un vector de números de punto flotante. El modelo de embedding predeterminado es text-embedding-ada-002 de OpenAI, y cada embedding que genera consta de 1.536 números de punto flotante. Otro modelo de embedding popular es BERT, que utiliza 768 números de punto flotante para representar cada nodo. Proporcionamos varias utilidades para trabajar con embeddings, incluyendo 3 opciones de cálculo de similitud y Maximum Marginal Relevance.
+
+- [**TextSplitter/NodeParser**](./low_level/node_parser.md): Las estrategias de división de texto son increíblemente importantes para la eficacia general de la búsqueda de embeddings. Actualmente, aunque tenemos una configuración predeterminada, no hay una solución única para todos los casos. Dependiendo de los documentos fuente, es posible que desee utilizar diferentes tamaños y estrategias de división. Actualmente admitimos la división por tamaño fijo, la división por tamaño fijo con secciones superpuestas, la división por oración y la división por párrafo. El text splitter se utiliza en el NodeParser para dividir los `Documentos` en `Nodos`.
+
+- [**Retriever**](./low_level/retriever.md): El Retriever es el encargado de elegir los Nodos que se van a recuperar del índice. Aquí, es posible que desee intentar recuperar más o menos Nodos por consulta, cambiar la función de similitud o crear su propio retriever para cada caso de uso individual en su aplicación. Por ejemplo, es posible que desee tener un retriever separado para el contenido de código y el contenido de texto.
+
+- [**ResponseSynthesizer**](./low_level/response_synthesizer.md): El ResponseSynthesizer es el responsable de tomar una cadena de consulta y utilizar una lista de `Nodos` para generar una respuesta. Esto puede tomar muchas formas, como iterar sobre todo el contexto y refinar una respuesta, o construir un árbol de resúmenes y devolver el resumen principal.
+
+- [**Storage**](./low_level/storage.md): En algún momento, querrá almacenar sus índices, datos y vectores en lugar de volver a ejecutar los modelos de embedding cada vez. IndexStore, DocStore, VectorStore y KVStore son abstracciones que le permiten hacer eso. En conjunto, forman el StorageContext. Actualmente, le permitimos persistir sus embeddings en archivos en el sistema de archivos (o en un sistema de archivos virtual en memoria), pero también estamos agregando activamente integraciones con bases de datos de vectores.
diff --git a/apps/docs/i18n/es/docusaurus-plugin-content-docs/current/modules/low_level/embedding.md b/apps/docs/i18n/es/docusaurus-plugin-content-docs/current/modules/low_level/embedding.md
new file mode 100644
index 0000000000000000000000000000000000000000..5d94e4628f162378daba6a90029617e11d2d3379
--- /dev/null
+++ b/apps/docs/i18n/es/docusaurus-plugin-content-docs/current/modules/low_level/embedding.md
@@ -0,0 +1,26 @@
+---
+sidebar_position: 1
+---
+
+# Incrustación
+
+`Esta documentación ha sido traducida automáticamente y puede contener errores. No dudes en abrir una Pull Request para sugerir cambios.`
+
+El modelo de incrustación en LlamaIndex es responsable de crear representaciones numéricas de texto. Por defecto, LlamaIndex utilizará el modelo `text-embedding-ada-002` de OpenAI.
+
+Esto se puede establecer explícitamente en el objeto `ServiceContext`.
+
+```typescript
+import { OpenAIEmbedding, serviceContextFromDefaults } from "llamaindex";
+
+const openaiEmbeds = new OpenAIEmbedding();
+
+const serviceContext = serviceContextFromDefaults({ embedModel: openaiEmbeds });
+```
+
+## Referencia de la API
+
+- [OpenAIEmbedding](../../api/classes/OpenAIEmbedding.md)
+- [ServiceContext](../../api/interfaces/ServiceContext.md)
+
+"
diff --git a/apps/docs/i18n/es/docusaurus-plugin-content-docs/current/modules/low_level/llm.md b/apps/docs/i18n/es/docusaurus-plugin-content-docs/current/modules/low_level/llm.md
new file mode 100644
index 0000000000000000000000000000000000000000..19760c863a75f67775be87532e7f1990bfd7fa67
--- /dev/null
+++ b/apps/docs/i18n/es/docusaurus-plugin-content-docs/current/modules/low_level/llm.md
@@ -0,0 +1,26 @@
+---
+sidebar_position: 0
+---
+
+# LLM (Lenguaje y Generación de Respuestas)
+
+`Esta documentación ha sido traducida automáticamente y puede contener errores. No dudes en abrir una Pull Request para sugerir cambios.`
+
+El LLM es responsable de leer texto y generar respuestas en lenguaje natural a consultas. Por defecto, LlamaIndex.TS utiliza `gpt-3.5-turbo`.
+
+El LLM se puede establecer explícitamente en el objeto `ServiceContext`.
+
+```typescript
+import { OpenAI, serviceContextFromDefaults } from "llamaindex";
+
+const openaiLLM = new OpenAI({ model: "gpt-3.5-turbo", temperature: 0 });
+
+const serviceContext = serviceContextFromDefaults({ llm: openaiLLM });
+```
+
+## Referencia de la API
+
+- [OpenAI](../../api/classes/OpenAI.md)
+- [ServiceContext](../../api/interfaces/ServiceContext.md)
+
+"
diff --git a/apps/docs/i18n/es/docusaurus-plugin-content-docs/current/modules/low_level/node_parser.md b/apps/docs/i18n/es/docusaurus-plugin-content-docs/current/modules/low_level/node_parser.md
new file mode 100644
index 0000000000000000000000000000000000000000..e4607be9b436522dfcf78aa4210b07ece0964ee7
--- /dev/null
+++ b/apps/docs/i18n/es/docusaurus-plugin-content-docs/current/modules/low_level/node_parser.md
@@ -0,0 +1,37 @@
+---
+sidebar_position: 3
+---
+
+# NodeParser (Analizador de Nodos)
+
+`Esta documentación ha sido traducida automáticamente y puede contener errores. No dudes en abrir una Pull Request para sugerir cambios.`
+
+El `NodeParser` en LlamaIndex es responsable de dividir los objetos `Document` en objetos `Node` más manejables. Cuando llamas a `.fromDocuments()`, el `NodeParser` del `ServiceContext` se utiliza automáticamente para hacer esto por ti. Alternativamente, puedes usarlo para dividir documentos de antemano.
+
+```typescript
+import { Document, SimpleNodeParser } from "llamaindex";
+
+const nodeParser = new SimpleNodeParser();
+const nodes = nodeParser.getNodesFromDocuments([
+  new Document({ text: "Tengo 10 años. John tiene 20 años." }),
+]);
+```
+
+## TextSplitter (Divisor de Texto)
+
+El divisor de texto subyacente dividirá el texto por oraciones. También se puede utilizar como un módulo independiente para dividir texto sin formato.
+
+```typescript
+import { SentenceSplitter } from "llamaindex";
+
+const splitter = new SentenceSplitter({ chunkSize: 1 });
+
+const textSplits = splitter.splitText("Hola Mundo");
+```
+
+## Referencia de la API
+
+- [SimpleNodeParser (Analizador de Nodos Simple)](../../api/classes/SimpleNodeParser.md)
+- [SentenceSplitter (Divisor de Oraciones)](../../api/classes/SentenceSplitter.md)
+
+"
diff --git a/apps/docs/i18n/es/docusaurus-plugin-content-docs/current/modules/low_level/response_synthesizer.md b/apps/docs/i18n/es/docusaurus-plugin-content-docs/current/modules/low_level/response_synthesizer.md
new file mode 100644
index 0000000000000000000000000000000000000000..9513f1401423ab6c8575d9834793f5e08ac3c88a
--- /dev/null
+++ b/apps/docs/i18n/es/docusaurus-plugin-content-docs/current/modules/low_level/response_synthesizer.md
@@ -0,0 +1,47 @@
+---
+sidebar_position: 6
+---
+
+# ResponseSynthesizer (SintetizadorDeRespuestas)
+
+`Esta documentación ha sido traducida automáticamente y puede contener errores. No dudes en abrir una Pull Request para sugerir cambios.`
+
+El ResponseSynthesizer es responsable de enviar la consulta, los nodos y las plantillas de indicaciones al LLM para generar una respuesta. Hay algunos modos clave para generar una respuesta:
+
+- `Refine` (Refinar): "crear y refinar" una respuesta pasando secuencialmente por cada fragmento de texto recuperado. Esto realiza una llamada separada al LLM por cada Nodo. Bueno para respuestas más detalladas.
+- `CompactAndRefine` (CompactarYRefinar) (por defecto): "compactar" la indicación durante cada llamada al LLM al llenar tantos fragmentos de texto como sea posible dentro del tamaño máximo de la indicación. Si hay demasiados fragmentos para llenar en una sola indicación, "crear y refinar" una respuesta pasando por múltiples indicaciones compactas. Es lo mismo que `refine`, pero debería resultar en menos llamadas al LLM.
+- `TreeSummarize` (ResumirÁrbol): Dado un conjunto de fragmentos de texto y la consulta, construye recursivamente un árbol y devuelve el nodo raíz como respuesta. Bueno para fines de resumen.
+- `SimpleResponseBuilder` (ConstructorDeRespuestasSimples): Dado un conjunto de fragmentos de texto y la consulta, aplica la consulta a cada fragmento de texto mientras acumula las respuestas en un array. Devuelve una cadena concatenada de todas las respuestas. Bueno cuando necesitas ejecutar la misma consulta por separado en cada fragmento de texto.
+
+```typescript
+import { NodeWithScore, ResponseSynthesizer, TextNode } from "llamaindex";
+
+const responseSynthesizer = new ResponseSynthesizer();
+
+const nodesWithScore: NodeWithScore[] = [
+  {
+    node: new TextNode({ text: "Tengo 10 años." }),
+    score: 1,
+  },
+  {
+    node: new TextNode({ text: "John tiene 20 años." }),
+    score: 0.5,
+  },
+];
+
+const response = await responseSynthesizer.synthesize(
+  "¿Qué edad tengo?",
+  nodesWithScore,
+);
+console.log(response.response);
+```
+
+## Referencia de la API
+
+- [ResponseSynthesizer (SintetizadorDeRespuestas)](../../api/classes/ResponseSynthesizer.md)
+- [Refine (Refinar)](../../api/classes/Refine.md)
+- [CompactAndRefine (CompactarYRefinar)](../../api/classes/CompactAndRefine.md)
+- [TreeSummarize (ResumirÁrbol)](../../api/classes/TreeSummarize.md)
+- [SimpleResponseBuilder (ConstructorDeRespuestasSimples)](../../api/classes/SimpleResponseBuilder.md)
+
+"
diff --git a/apps/docs/i18n/es/docusaurus-plugin-content-docs/current/modules/low_level/retriever.md b/apps/docs/i18n/es/docusaurus-plugin-content-docs/current/modules/low_level/retriever.md
new file mode 100644
index 0000000000000000000000000000000000000000..afa70f1c50d896258ac2fb9d73419ed4f9459439
--- /dev/null
+++ b/apps/docs/i18n/es/docusaurus-plugin-content-docs/current/modules/low_level/retriever.md
@@ -0,0 +1,23 @@
+---
+sidebar_position: 5
+---
+
+# Retriever (Recuperador)
+
+`Esta documentación ha sido traducida automáticamente y puede contener errores. No dudes en abrir una Pull Request para sugerir cambios.`
+
+Un recuperador en LlamaIndex es lo que se utiliza para obtener `Node`s de un índice utilizando una cadena de consulta. Un `VectorIndexRetriever` obtendrá los nodos más similares a los k mejores. Mientras tanto, un `SummaryIndexRetriever` obtendrá todos los nodos sin importar la consulta.
+
+```typescript
+const recuperador = vector_index.asRetriever();
+recuperador.similarityTopK = 3;
+
+// ¡Obtener nodos!
+const nodosConPuntuación = await recuperador.retrieve("cadena de consulta");
+```
+
+## Referencia de la API
+
+- [SummaryIndexRetriever (Recuperador de Índice de Resumen)](../../api/classes/SummaryIndexRetriever.md)
+- [SummaryIndexLLMRetriever (Recuperador de Índice de Resumen LLM)](../../api/classes/SummaryIndexLLMRetriever.md)
+- [VectorIndexRetriever (Recuperador de Índice Vectorial)](../../api/classes/VectorIndexRetriever.md)
diff --git a/apps/docs/i18n/es/docusaurus-plugin-content-docs/current/modules/low_level/storage.md b/apps/docs/i18n/es/docusaurus-plugin-content-docs/current/modules/low_level/storage.md
new file mode 100644
index 0000000000000000000000000000000000000000..8d730e0180ced9a91884df75e0af8c245814ba00
--- /dev/null
+++ b/apps/docs/i18n/es/docusaurus-plugin-content-docs/current/modules/low_level/storage.md
@@ -0,0 +1,30 @@
+---
+sidebar_position: 7
+---
+
+# Almacenamiento
+
+`Esta documentación ha sido traducida automáticamente y puede contener errores. No dudes en abrir una Pull Request para sugerir cambios.`
+
+El almacenamiento en LlamaIndex.TS funciona automáticamente una vez que hayas configurado un objeto `StorageContext`. Simplemente configura el `persistDir` y adjúntalo a un índice.
+
+¡En este momento, solo se admite guardar y cargar desde el disco, con integraciones futuras planeadas!
+
+```typescript
+import { Document, VectorStoreIndex, storageContextFromDefaults } from "./src";
+
+const storageContext = await storageContextFromDefaults({
+  persistDir: "./storage",
+});
+
+const document = new Document({ text: "Texto de prueba" });
+const index = await VectorStoreIndex.fromDocuments([document], {
+  storageContext,
+});
+```
+
+## Referencia de la API
+
+- [StorageContext](../../api/interfaces/StorageContext.md)
+
+"
diff --git a/apps/docs/i18n/es/docusaurus-plugin-content-docs/current/starter.md b/apps/docs/i18n/es/docusaurus-plugin-content-docs/current/starter.md
new file mode 100644
index 0000000000000000000000000000000000000000..dd7e114fd09b7d8aff432e39086c393e37b84cf9
--- /dev/null
+++ b/apps/docs/i18n/es/docusaurus-plugin-content-docs/current/starter.md
@@ -0,0 +1,60 @@
+---
+sidebar_position: 2
+---
+
+# Tutorial de Inicio
+
+`Esta documentación ha sido traducida automáticamente y puede contener errores. No dudes en abrir una Pull Request para sugerir cambios.`
+
+Una vez que hayas [instalado LlamaIndex.TS usando NPM](installation) y configurado tu clave de OpenAI, estás listo para comenzar tu primera aplicación:
+
+En una nueva carpeta:
+
+```bash npm2yarn
+npm install typescript
+npm install @types/node
+npx tsc --init # si es necesario
+```
+
+Crea el archivo `example.ts`. Este código cargará algunos datos de ejemplo, creará un documento, lo indexará (lo cual crea embeddings utilizando OpenAI) y luego creará un motor de consulta para responder preguntas sobre los datos.
+
+```ts
+// example.ts
+import fs from "fs/promises";
+import { Document, VectorStoreIndex } from "llamaindex";
+
+async function main() {
+  // Carga el ensayo desde abramov.txt en Node
+  const ensayo = await fs.readFile(
+    "node_modules/llamaindex/examples/abramov.txt",
+    "utf-8",
+  );
+
+  // Crea un objeto Document con el ensayo
+  const documento = new Document({ text: ensayo });
+
+  // Divide el texto y crea embeddings. Almacénalos en un VectorStoreIndex
+  const indice = await VectorStoreIndex.fromDocuments([documento]);
+
+  // Consulta el índice
+  const motorConsulta = indice.asQueryEngine();
+  const respuesta = await motorConsulta.query(
+    "¿Qué hizo el autor en la universidad?",
+  );
+
+  // Muestra la respuesta
+  console.log(respuesta.toString());
+}
+
+main();
+```
+
+Luego puedes ejecutarlo usando
+
+```bash
+npx ts-node example.ts
+```
+
+¿Listo para aprender más? Echa un vistazo a nuestro playground de NextJS en https://llama-playground.vercel.app/. El código fuente está disponible en https://github.com/run-llama/ts-playground
+
+"
diff --git a/apps/docs/i18n/et/docusaurus-plugin-content-docs/current/_static/concepts/indexing.jpg b/apps/docs/i18n/et/docusaurus-plugin-content-docs/current/_static/concepts/indexing.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..8672967213caf28fb27682b6b9e2e5111beb9aaa
Binary files /dev/null and b/apps/docs/i18n/et/docusaurus-plugin-content-docs/current/_static/concepts/indexing.jpg differ
diff --git a/apps/docs/i18n/et/docusaurus-plugin-content-docs/current/_static/concepts/querying.jpg b/apps/docs/i18n/et/docusaurus-plugin-content-docs/current/_static/concepts/querying.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..3c241bdda5ba98d3f3ee2e163b9f19280c5c7503
Binary files /dev/null and b/apps/docs/i18n/et/docusaurus-plugin-content-docs/current/_static/concepts/querying.jpg differ
diff --git a/apps/docs/i18n/et/docusaurus-plugin-content-docs/current/_static/concepts/rag.jpg b/apps/docs/i18n/et/docusaurus-plugin-content-docs/current/_static/concepts/rag.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..b68eca2564f8535b33d9ed6d4ce4d4410d2a0698
Binary files /dev/null and b/apps/docs/i18n/et/docusaurus-plugin-content-docs/current/_static/concepts/rag.jpg differ
diff --git a/apps/docs/i18n/et/docusaurus-plugin-content-docs/current/api b/apps/docs/i18n/et/docusaurus-plugin-content-docs/current/api
new file mode 120000
index 0000000000000000000000000000000000000000..3513e32979f4abb8b2df2b54c8d0cc480bdd847e
--- /dev/null
+++ b/apps/docs/i18n/et/docusaurus-plugin-content-docs/current/api
@@ -0,0 +1 @@
+../../../../docs/api
\ No newline at end of file
diff --git a/apps/docs/i18n/et/docusaurus-plugin-content-docs/current/concepts.md b/apps/docs/i18n/et/docusaurus-plugin-content-docs/current/concepts.md
new file mode 100644
index 0000000000000000000000000000000000000000..d0e87e20e30c60e52f2aff68f939957d001ed0c0
--- /dev/null
+++ b/apps/docs/i18n/et/docusaurus-plugin-content-docs/current/concepts.md
@@ -0,0 +1,86 @@
+---
+sidebar_position: 3
+---
+
+# Kõrgtasemel kontseptsioonid
+
+`See dokumentatsioon on tõlgitud automaatselt ja võib sisaldada vigu. Ärge kartke avada Pull Request, et pakkuda muudatusi.`
+
+LlamaIndex.TS aitab teil luua LLM-toega rakendusi (nt küsimuste ja vastuste süsteem, vestlusrobot) kohandatud andmete põhjal.
+
+Selles kõrgtasemel kontseptsioonide juhendis saate teada:
+
+- kuidas LLM saab vastata küsimustele teie enda andmete abil.
+- olulised kontseptsioonid ja moodulid LlamaIndex.TS-s, et koostada oma päringute ahel.
+
+## Küsimustele vastamine teie andmete põhjal
+
+LlamaIndex kasutab LLM-i kasutades kaheastmelist meetodit teie andmetega:
+
+1. **indekseerimisetapp**: teadmiste baasi ettevalmistamine ja
+2. **päringuetapp**: asjakohase konteksti saamine teadmistest, et aidata LLM-il vastata küsimusele
+
+![](./_static/concepts/rag.jpg)
+
+Seda protsessi tuntakse ka kui Retrieval Augmented Generation (RAG).
+
+LlamaIndex.TS pakub hädavajalikku tööriistakomplekti, et mõlemad etapid oleksid väga lihtsad.
+
+Uurime nüüd iga etappi üksikasjalikumalt.
+
+### Indekseerimisetapp
+
+LlamaIndex.TS aitab teil teadmiste baasi ette valmistada andmekonnektorite ja indeksite komplektiga.
+
+![](./_static/concepts/indexing.jpg)
+
+[**Andmekoormuse laadijad**](./modules/high_level/data_loader.md):
+Andmekonnektor (nt `Reader`) võtab vastu andmeid erinevatest andmeallikatest ja andmevormingutest ning esitab need lihtsa `Dokumendi` esinduse kujul (tekst ja lihtne metaandmed).
+
+[**Dokumendid / Sõlmed**](./modules/high_level/documents_and_nodes.md): `Dokument` on üldine konteiner mis tahes andmeallika ümber - näiteks PDF, API väljund või andmed andmebaasist. `Sõlm` on LlamaIndexis andme aatomüksus ja esindab allika `Dokumendi` "tükki". See on rikas esindus, mis sisaldab metaandmeid ja suhteid (teiste sõlmedega), et võimaldada täpseid ja väljendusrikkaid taastamistoiminguid.
+
+[**Andmeindeksid**](./modules/high_level/data_index.md):
+Kui olete oma andmed vastu võtnud, aitab LlamaIndex teil andmed indekseerida kergesti taastatavasse vormingusse.
+
+LlamaIndex töötleb sisuliselt toorete dokumentide vahepealseid esindusi, arvutab vektorite kinnistusi ja salvestab teie andmed mällu või kettale.
+
+"
+
+### Päringuetapp
+
+Päringuetapis toob päringute ahel kasutaja päringu põhjal kõige asjakohasema konteksti
+ja edastab selle LLM-ile (koos päringuga), et sünteesida vastus.
+
+See annab LLM-ile ajakohaseid teadmisi, mis pole tema algse koolituse andmetes,
+(vähendades ka hallutsinatsiooni).
+
+Päringuetapi peamine väljakutse seisneb teadmiste otsimises, orkestreerimises ja järeldamises (võimalik, et mitme) teadmiste baasi üle.
+
+LlamaIndex pakub koostatavaid mooduleid, mis aitavad teil luua ja integreerida RAG-päringute ahelaid küsimuste ja vastuste (päringumootor), vestlusroboti (vestlusmootor) või agendi osana.
+
+Neid ehitusplokke saab kohandada, et kajastada paremusjärjestuse eelistusi, samuti koostada struktureeritud viisil järeldusi mitme teadmiste baasi üle.
+
+![](./_static/concepts/querying.jpg)
+
+#### Ehitusplokid
+
+[**Otsijad**](./modules/low_level/retriever.md):
+Otsija määratleb, kuidas tõhusalt saada asjakohast konteksti teadmistebaasist (st indeksist) päringu põhjal.
+Konkreetne otsinguloogika erineb erinevate indeksite puhul, kõige populaarsem neist on tiheda otsingu kasutamine vektorindeksi vastu.
+
+[**Vastuse sünteesijad**](./modules/low_level/response_synthesizer.md):
+Vastuse sünteesija genereerib vastuse LLM-ist, kasutades kasutaja päringut ja antud hulka saadud tekstilõike.
+
+"
+
+#### Ahelad
+
+[**Päringumootorid**](./modules/high_level/query_engine.md):
+Päringumootor on lõpuni viidud ahel, mis võimaldab teil esitada küsimusi oma andmete kohta.
+See võtab vastu loomuliku keele päringu ja tagastab vastuse koos tagastatud kontekstiga, mis edastatakse LLM-ile.
+
+[**Vestlusmootorid**](./modules/high_level/chat_engine.md):
+Vestlusmootor on lõpuni viidud ahel, mis võimaldab teil oma andmetega vestlust pidada
+(mitte üksik küsimus ja vastus, vaid mitu edasi-tagasi).
+
+"
diff --git a/apps/docs/i18n/et/docusaurus-plugin-content-docs/current/end_to_end.md b/apps/docs/i18n/et/docusaurus-plugin-content-docs/current/end_to_end.md
new file mode 100644
index 0000000000000000000000000000000000000000..5ae49ca93df61e1279c5e7f167a5cbff83b25635
--- /dev/null
+++ b/apps/docs/i18n/et/docusaurus-plugin-content-docs/current/end_to_end.md
@@ -0,0 +1,63 @@
+---
+sidebar_position: 4
+---
+
+# Lõpust lõppu näited
+
+`See dokumentatsioon on tõlgitud automaatselt ja võib sisaldada vigu. Ärge kartke avada Pull Request, et pakkuda muudatusi.`
+
+Meie repositooriumis on mitmeid lõpust lõppu näiteid, kasutades LlamaIndex.TS
+
+Vaadake allpool olevaid näiteid või proovige neid ja lõpetage need minutitega interaktiivsete Github Codespace'i õpetuste abil, mida pakub Dev-Docs [siin](https://codespaces.new/team-dev-docs/lits-dev-docs-playground?devcontainer_path=.devcontainer%2Fjavascript_ltsquickstart%2Fdevcontainer.json):
+
+## [Vestlusmootor (Chat Engine)](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/chatEngine.ts)
+
+Loe faili ja vestle sellest LLM-iga.
+
+## [Vektoriindeks](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/vectorIndex.ts)
+
+Loo vektoriindeks ja päri seda. Vektoriindeks kasutab sissevõtteid, et tuua välja kõige olulisemad sõlmed. Vaikimisi on kõige olulisemad 2.
+
+"
+
+## [Kokkuvõtte indeks](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/summaryIndex.ts)
+
+Loo loendi indeks ja päri seda. See näide kasutab ka `LLMRetriever`-it, mis kasutab LLM-i parimate sõlmede valimiseks vastuse genereerimisel.
+
+"
+
+## [Salvesta / Laadi indeks](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/storageContext.ts)
+
+Loo ja laadi vektori indeks. LlamaIndex.TS-s toimub automaatne salvestamine kettale, kui salvestuskonteksti objekt on loodud.
+
+"
+
+## [Kohandatud vektoriindeks](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/vectorIndexCustomize.ts)
+
+Loo vektoriindeks ja päri seda, samal ajal konfigureerides `LLM`, `ServiceContext` ja `similarity_top_k`.
+
+"
+
+## [OpenAI LLM](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/openai.ts)
+
+Loo OpenAI LLM ja kasuta seda otse vestluseks.
+
+"
+
+## [Llama2 DeuceLLM](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/llamadeuce.ts)
+
+Loo Llama-2 LLM ja kasuta seda otse vestluseks.
+
+"
+
+## [SubQuestionQueryEngine](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/subquestion.ts)
+
+Kasutab `SubQuestionQueryEngine`-i, mis jagab keerulised päringud mitmeks alampäringuks ja seejärel kogub vastuse kõikide alampäringute vastuste põhjal kokku.
+
+"
+
+## [Madalama taseme moodulid](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/lowlevel.ts)
+
+See näide kasutab mitmeid madalama taseme komponente, mis eemaldavad vajaduse tegeliku päringumootori järele. Neid komponente saab kasutada kõikjal, igas rakenduses või kohandada ja alamklassideks muuta vastavalt teie enda vajadustele.
+
+"
diff --git a/apps/docs/i18n/et/docusaurus-plugin-content-docs/current/environments.md b/apps/docs/i18n/et/docusaurus-plugin-content-docs/current/environments.md
new file mode 100644
index 0000000000000000000000000000000000000000..7c47741bfbb5e4bfebd868cca9032929686e11c5
--- /dev/null
+++ b/apps/docs/i18n/et/docusaurus-plugin-content-docs/current/environments.md
@@ -0,0 +1,31 @@
+---
+sidebar_position: 5
+---
+
+# Keskkonnad
+
+`See dokumentatsioon on tõlgitud automaatselt ja võib sisaldada vigu. Ärge kartke avada Pull Request, et pakkuda muudatusi.`
+
+LlamaIndex toetab praegu ametlikult NodeJS 18 ja NodeJS 20.
+
+## NextJS rakenduse marsruuter
+
+Kui kasutate NextJS rakenduse marsruuteri marsruutide käsitlejaid/serverita funktsioone, peate kasutama NodeJS režiimi:
+
+```js
+export const runtime = "nodejs"; // vaikimisi
+```
+
+ja peate lisama erandi pdf-parse jaoks oma next.config.js failis
+
+```js
+// next.config.js
+/** @type {import('next').NextConfig} */
+const nextConfig = {
+  experimental: {
+    serverComponentsExternalPackages: ["pdf-parse"], // Paneb pdf-parse tegelikult NodeJS režiimi koos NextJS rakenduse marsruuteriga
+  },
+};
+
+module.exports = nextConfig;
+```
diff --git a/apps/docs/i18n/et/docusaurus-plugin-content-docs/current/installation.mdx b/apps/docs/i18n/et/docusaurus-plugin-content-docs/current/installation.mdx
new file mode 100644
index 0000000000000000000000000000000000000000..bfba9d30ee1adb5c2598c3530048a9ee263406e2
--- /dev/null
+++ b/apps/docs/i18n/et/docusaurus-plugin-content-docs/current/installation.mdx
@@ -0,0 +1,68 @@
+---
+sidebar_position: 1
+---
+
+
+# Paigaldamine ja seadistamine
+
+```See dokumentatsioon on tõlgitud automaatselt ja võib sisaldada vigu. Ärge kartke avada Pull Request, et pakkuda muudatusi.```
+
+
+Veenduge, et teil oleks NodeJS versioon 18 või uuem.
+
+
+## Kasutades create-llama
+
+Lihtsaim viis LlamaIndexiga alustamiseks on kasutada `create-llama` tööriista. See käsurea tööriist võimaldab teil kiiresti alustada uue LlamaIndex rakenduse loomist, kõik on juba teie jaoks seadistatud.
+
+Lihtsalt käivitage
+
+<Tabs>
+<TabItem value="1" label="npm" default>
+
+```bash
+npx create-llama@latest
+```
+
+</TabItem>
+<TabItem value="2" label="Yarn">
+
+```bash
+yarn create llama
+```
+
+</TabItem>
+<TabItem value="3" label="pnpm">
+
+```bash
+pnpm create llama@latest
+```
+
+</TabItem>
+</Tabs>
+
+alustamiseks. Kui teie rakendus on genereeritud, käivitage
+
+```bash npm2yarn
+npm run dev
+```
+
+arendusserveri käivitamiseks. Seejärel saate külastada [http://localhost:3000](http://localhost:3000), et näha oma rakendust.
+## Paigaldamine NPM-ist
+
+```bash npm2yarn
+npm install llamaindex
+```
+
+
+### Keskkonnamuutujad
+
+Meie näidetes kasutatakse vaikimisi OpenAI-d. Peate oma Open AI võtme seadistama järgmiselt:
+
+```bash
+export OPENAI_API_KEY="sk-......" # Asendage oma võti aadressilt https://platform.openai.com/account/api-keys
+```
+
+Kui soovite, et see laaditakse automaatselt iga kord, lisage see oma .zshrc/.bashrc faili.
+
+HOIATUS: ärge lisage oma OpenAI võtit versioonihaldusesse.
diff --git a/apps/docs/i18n/et/docusaurus-plugin-content-docs/current/introduction.md b/apps/docs/i18n/et/docusaurus-plugin-content-docs/current/introduction.md
new file mode 100644
index 0000000000000000000000000000000000000000..76ff5e1f1c1fa97f182b99be13dda9b11b898674
--- /dev/null
+++ b/apps/docs/i18n/et/docusaurus-plugin-content-docs/current/introduction.md
@@ -0,0 +1,60 @@
+---
+sidebar_position: 0
+slug: /
+---
+
+# Mis on LlamaIndex.TS?
+
+`See dokumentatsioon on tõlgitud automaatselt ja võib sisaldada vigu. Ärge kartke avada Pull Request, et pakkuda muudatusi.`
+
+LlamaIndex.TS on andmefraimvork LLM rakenduste jaoks, mis võimaldab andmete vastuvõtmist, struktureerimist ja juurdepääsu privaatsetele või domeenispetsiifilistele andmetele. Kuigi on saadaval ka Pythoni pakett (vaata [siit](https://docs.llamaindex.ai/en/stable/)), pakub LlamaIndex.TS põhifunktsioone lihtsas pakendis, mis on optimeeritud TypeScripti kasutamiseks.
+
+## 🚀 Miks kasutada LlamaIndex.TS?
+
+LLM-id pakuvad inimeste ja järeldatud andmete vahel loomuliku keele liidest. Laialdaselt kättesaadavad mudelid on eelnevalt treenitud suurel hulgal avalikult kättesaadavatel andmetel, alates Vikipeediast ja meililistidest kuni õpikute ja lähtekoodini.
+
+LLM-ide peal ehitatud rakendused vajavad sageli nende mudelite täiendamist privaatsete või domeenispetsiifiliste andmetega. Kahjuks võivad need andmed olla jaotatud eraldatud rakenduste ja andmebaaside vahel. Need võivad olla API-de taga, SQL-andmebaasides või lõksus PDF-failides ja slaidide komplektides.
+
+Siin tuleb appi **LlamaIndex.TS**.
+
+## 🦙 Kuidas saab LlamaIndex.TS aidata?
+
+LlamaIndex.TS pakub järgmisi tööriistu:
+
+- **Andmete laadimine** - võimaldab olemasolevate `.txt`, `.pdf`, `.csv`, `.md` ja `.docx` andmete otsest sissevõtmist.
+- **Andmeindeksid** - struktureerib andmed vahepealsetesse esitustesse, mis on LLM-idele lihtsad ja tõhusad.
+- **Mootorid** - pakuvad loomuliku keele juurdepääsu andmetele. Näiteks:
+  - Päringumootorid on võimsad otsingu liidesed teadmistega täiendatud väljundite jaoks.
+  - Vestlusmootorid on vestlusliidesed mitme sõnumi "edasi-tagasi" suhtluseks andmetega.
+
+## 👨‍👩‍👧‍👦 Kellele on LlamaIndex mõeldud?
+
+LlamaIndex.TS pakub põhikomplekti tööriistu, mis on olulised kõigile, kes ehitavad LLM rakendusi JavaScripti ja TypeScriptiga.
+
+Meie kõrgtasemel API võimaldab algajatel kasutajatel kasutada LlamaIndex.TS-i andmete sissevõtmiseks ja päringute tegemiseks.
+
+Täpsemate rakenduste jaoks võimaldavad meie madalama taseme API-d edasijõudnud kasutajatel kohandada ja laiendada mis tahes moodulit - andmeühendusi, indekseid, taastajaid ja päringumootoreid vastavalt nende vajadustele.
+
+## Alustamine
+
+`npm install llamaindex`
+
+Meie dokumentatsioonis on [paigaldusjuhised](./installation.md) ja [algõpetus](./starter.md) oma esimese rakenduse loomiseks.
+
+Kui olete valmis ja töötate, siis [kõrgtasemel kontseptsioonid](./concepts.md) annavad ülevaate LlamaIndexi moodularhitektuurist. Praktiliste näidete jaoks vaadake läbi meie [otsast lõpuni õpetused](./end_to_end.md).
+
+## 🗺️ Ökosüsteem
+
+LlamaIndexi saate alla laadida või sellele kaasa aidata järgmistel platvormidel:
+
+- Github: https://github.com/run-llama/LlamaIndexTS
+- NPM: https://www.npmjs.com/package/llamaindex
+
+"
+
+## Kogukond
+
+Vajate abi? On teil funktsioonisoovitus? Liituge LlamaIndex kogukonnaga:
+
+- Twitter: https://twitter.com/llama_index
+- Discord: https://discord.gg/dGcwcsnxhU
diff --git a/apps/docs/i18n/et/docusaurus-plugin-content-docs/current/modules/high_level/chat_engine.md b/apps/docs/i18n/et/docusaurus-plugin-content-docs/current/modules/high_level/chat_engine.md
new file mode 100644
index 0000000000000000000000000000000000000000..ece0dd04a1c02f347611dacfe7e63dba39533b7f
--- /dev/null
+++ b/apps/docs/i18n/et/docusaurus-plugin-content-docs/current/modules/high_level/chat_engine.md
@@ -0,0 +1,22 @@
+---
+sidebar_position: 4
+---
+
+# ChatEngine (聊天引擎)
+
+`See dokumentatsioon on tõlgitud automaatselt ja võib sisaldada vigu. Ärge kartke avada Pull Request, et pakkuda muudatusi.`
+
+ChatEngine (聊天引擎) on kiire ja lihtne viis suhelda andmetega oma indeksis.
+
+```typescript
+const retriever = index.asRetriever();
+const chatEngine = new ContextChatEngine({ retriever });
+
+// alusta vestlust
+const response = await chatEngine.chat(query);
+```
+
+## API viited
+
+- [ContextChatEngine (KontekstVestlusMootor)](../../api/classes/ContextChatEngine.md)
+- [CondenseQuestionChatEngine (KontekstVestlusMootor)](../../api/classes/ContextChatEngine.md)
diff --git a/apps/docs/i18n/et/docusaurus-plugin-content-docs/current/modules/high_level/data_index.md b/apps/docs/i18n/et/docusaurus-plugin-content-docs/current/modules/high_level/data_index.md
new file mode 100644
index 0000000000000000000000000000000000000000..ce71fbb5658579da97505c648bac3a9041c7ae6b
--- /dev/null
+++ b/apps/docs/i18n/et/docusaurus-plugin-content-docs/current/modules/high_level/data_index.md
@@ -0,0 +1,27 @@
+---
+sidebar_position: 2
+---
+
+# Indeks
+
+`See dokumentatsioon on tõlgitud automaatselt ja võib sisaldada vigu. Ärge kartke avada Pull Request, et pakkuda muudatusi.`
+
+Indeks on teie andmete põhiline konteiner ja korraldus. LlamaIndex.TS toetab kahte indeksit:
+
+- `VectorStoreIndex` - saadab LLM-ile vastuse genereerimisel ülem-k `Node`-d. Vaikimisi ülem-k on 2.
+- `SummaryIndex` - saadab iga `Node` indeksis LLM-ile vastuse genereerimiseks
+
+```typescript
+import { Document, VectorStoreIndex } from "llamaindex";
+
+const document = new Document({ text: "test" });
+
+const index = await VectorStoreIndex.fromDocuments([document]);
+```
+
+## API viide
+
+- [SummaryIndex](../../api/classes/SummaryIndex.md)
+- [VectorStoreIndex](../../api/classes/VectorStoreIndex.md)
+
+"
diff --git a/apps/docs/i18n/et/docusaurus-plugin-content-docs/current/modules/high_level/data_loader.md b/apps/docs/i18n/et/docusaurus-plugin-content-docs/current/modules/high_level/data_loader.md
new file mode 100644
index 0000000000000000000000000000000000000000..7e447c0dfabd0c406fcbc51b41b19e2b1110b489
--- /dev/null
+++ b/apps/docs/i18n/et/docusaurus-plugin-content-docs/current/modules/high_level/data_loader.md
@@ -0,0 +1,19 @@
+---
+sidebar_position: 1
+---
+
+# Lugeja / Laadija
+
+`See dokumentatsioon on tõlgitud automaatselt ja võib sisaldada vigu. Ärge kartke avada Pull Request, et pakkuda muudatusi.`
+
+LlamaIndex.TS toetab failide lihtsat laadimist kaustadest, kasutades `SimpleDirectoryReader` klassi. Praegu toetatakse `.txt`, `.pdf`, `.csv`, `.md` ja `.docx` faile, tulevikus on plaanis lisada veel rohkem!
+
+```typescript
+import { SimpleDirectoryReader } from "llamaindex";
+
+dokumendid = new SimpleDirectoryReader().loadData("./andmed");
+```
+
+## API viide
+
+- [SimpleDirectoryReader](../../api/classes/SimpleDirectoryReader.md)
diff --git a/apps/docs/i18n/et/docusaurus-plugin-content-docs/current/modules/high_level/documents_and_nodes.md b/apps/docs/i18n/et/docusaurus-plugin-content-docs/current/modules/high_level/documents_and_nodes.md
new file mode 100644
index 0000000000000000000000000000000000000000..d04989cbbd41399217f3209da5d94da5d04ac33e
--- /dev/null
+++ b/apps/docs/i18n/et/docusaurus-plugin-content-docs/current/modules/high_level/documents_and_nodes.md
@@ -0,0 +1,22 @@
+---
+sidebar_position: 0
+---
+
+# Dokumendid ja sõlmed
+
+`See dokumentatsioon on tõlgitud automaatselt ja võib sisaldada vigu. Ärge kartke avada Pull Request, et pakkuda muudatusi.`
+
+`Dokumendid` ja `Sõlmed` on igasuguse indeksi põhilised ehitusplokid. Kuigi nende objektide API on sarnane, esindavad `Dokumendi` objektid terviklikke faile, samas kui `Sõlmed` on väiksemad tükid sellest algsest dokumendist, mis sobivad LLM-iks ja küsimustele-vastustele.
+
+```typescript
+import { Document } from "llamaindex";
+
+dokument = new Document({ text: "tekst", metadata: { key: "val" } });
+```
+
+## API viide
+
+- [Dokument](../../api/classes/Document.md)
+- [TekstiSõlm](../../api/classes/TextNode.md)
+
+"
diff --git a/apps/docs/i18n/et/docusaurus-plugin-content-docs/current/modules/high_level/query_engine.md b/apps/docs/i18n/et/docusaurus-plugin-content-docs/current/modules/high_level/query_engine.md
new file mode 100644
index 0000000000000000000000000000000000000000..b75969ca25a47b15061e0e6f4fbd51ec3e4e87e1
--- /dev/null
+++ b/apps/docs/i18n/et/docusaurus-plugin-content-docs/current/modules/high_level/query_engine.md
@@ -0,0 +1,40 @@
+---
+sidebar_position: 3
+---
+
+# QueryEngine (Päringumootor)
+
+`See dokumentatsioon on tõlgitud automaatselt ja võib sisaldada vigu. Ärge kartke avada Pull Request, et pakkuda muudatusi.`
+
+Päringumootor ümbritseb `Retriever`-i ja `ResponseSynthesizer`-i torustikku, mis kasutab päringu stringi sõlmede toomiseks ja seejärel saadab need LLM-ile vastuse genereerimiseks.
+
+```typescript
+const queryEngine = index.asQueryEngine();
+const response = await queryEngine.query("päringu string");
+```
+
+## Alampäringu päringumootor
+
+Alampäringu päringumootori põhikontseptsioon seisneb ühe päringu jagamises mitmeks päringuks, vastuse saamises iga päringu jaoks ning nende erinevate vastuste ühendamises ühtseks arusaadavaks vastuseks kasutajale. Võite seda kujutada ette kui "mõtle seda samm-sammult läbi" meetodit, kuid andmeallikate üle iteratsiooniga!
+
+### Alustamine
+
+Lihtsaim viis alampäringu päringumootori proovimiseks on käivitada subquestion.ts fail [näidetes](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/subquestion.ts).
+
+```bash
+npx ts-node subquestion.ts
+```
+
+### Tööriistad
+
+Alampäringu päringumootor on rakendatud tööriistadega. Tööriistade põhiline idee seisneb selles, et need on käivitatavad valikud suurele keelemudelile. Selles konkreetses juhul sõltub meie alampäringu päringumootor QueryEngineTool-ist, mis, nagu arvata võite, on tööriist päringute käitamiseks päringumootoris. See võimaldab meil mudelile anda võimaluse erinevate küsimuste jaoks pärida erinevaid dokumente. Võite ka ette kujutada, et alampäringu päringumootor võiks kasutada tööriista, mis otsib midagi veebist või saab vastuse Wolfram Alpha abil.
+
+Tööriistade kohta saate rohkem teavet, vaadates LlamaIndex Pythoni dokumentatsiooni aadressil https://gpt-index.readthedocs.io/en/latest/core_modules/agent_modules/tools/root.html
+
+"
+
+## API viide
+
+- [RetrieverQueryEngine (Retrieveri päringumootor)](../../api/classes/RetrieverQueryEngine.md)
+- [SubQuestionQueryEngine (Alamküsimuse päringumootor)](../../api/classes/SubQuestionQueryEngine.md)
+- [QueryEngineTool (Päringumootori tööriist)](../../api/interfaces/QueryEngineTool.md)
diff --git a/apps/docs/i18n/et/docusaurus-plugin-content-docs/current/modules/index.md b/apps/docs/i18n/et/docusaurus-plugin-content-docs/current/modules/index.md
new file mode 100644
index 0000000000000000000000000000000000000000..f3622095987af759cbf9ac8a70989d161b942f42
--- /dev/null
+++ b/apps/docs/i18n/et/docusaurus-plugin-content-docs/current/modules/index.md
@@ -0,0 +1,33 @@
+# Põhimoodulid
+
+`See dokumentatsioon on tõlgitud automaatselt ja võib sisaldada vigu. Ärge kartke avada Pull Request, et pakkuda muudatusi.`
+
+LlamaIndex.TS pakub mitmeid põhimooduleid, mis on jaotatud kõrgtasemel mooduliteks kiireks alustamiseks ja madalatasemelisteks mooduliteks, et kohandada olulisi komponente vastavalt vajadusele.
+
+## Kõrgtasemel moodulid
+
+- [**Dokument**](./high_level/documents_and_nodes.md): Dokument esindab tekstifaili, PDF-faili või muud järjestikust andmeplokki.
+
+- [**Sõlm**](./high_level/documents_and_nodes.md): Põhiline andmete ehitusplokk. Enamasti on need dokumendi osad, mis on jagatud haldatavateks tükkideks, mis on piisavalt väikesed, et neid saaks sööta manustamismudelisse ja LLM-i.
+
+- [**Lugeja/Laadur**](./high_level/data_loader.md): Lugeja või laadur on midagi, mis võtab reaalses maailmas dokumendi vastu ja muudab selle dokumendi klassiks, mida saab seejärel kasutada teie indeksis ja päringutes. Hetkel toetame lihttekstifaile ja PDF-e ning tulevikus lisandub veel palju rohkem.
+
+- [**Indeksid**](./high_level/data_index.md): Indeksid salvestavad sõlmed ja nende sõlmede manustamised.
+
+- [**Päringumootor**](./high_level/query_engine.md): Päringumootorid genereerivad päringu, mille sisestate, ja annavad teile tulemuse. Päringumootorid ühendavad üldiselt eelnevalt loodud vihje valitud sõlmedega teie indeksist, et anda LLM-ile vajalik kontekst teie päringu vastamiseks.
+
+- [**Vestlusmootor**](./high_level/chat_engine.md): Vestlusmootor aitab teil luua vestlusroboti, mis suhtleb teie indeksitega.
+
+## Madalatasemeline moodul
+
+- [**LLM**](./low_level/llm.md): LLM klass on ühtne liides suure keelemudeli pakkujale, nagu näiteks OpenAI GPT-4, Anthropic Claude või Meta LLaMA. Seda saab alamklassina kasutada oma suure keelemudeli ühenduse kirjutamiseks.
+
+- [**Embedding**](./low_level/embedding.md): Embedding (sisseehitatud) esitatakse ujuvkomaarvude vektorina. OpenAI teksti sisseehitatud ada-002 on meie vaike sisseehitatud mudel ja iga sisseehitatud mudel koosneb 1536 ujuvkomaarvust. Teine populaarne sisseehitatud mudel on BERT, mis kasutab iga sõlme esitamiseks 768 ujuvkomaarvu. Pakume mitmeid tööriistu sisseehitatud mudelitega töötamiseks, sealhulgas 3 sarnasuse arvutamise võimalust ja maksimaalset marginaalset asjakohasust.
+
+- [**TextSplitter/NodeParser**](./low_level/node_parser.md): Teksti jagamise strateegiad on sisseehitatud otsingu üldise tõhususe jaoks äärmiselt olulised. Praegu on meil vaikeväärtus, kuid ühtset lahendust pole. Sõltuvalt lähteandmetest võite soovida kasutada erineva suurusega jagamist ja strateegiaid. Praegu toetame jagamist fikseeritud suurusega, jagamist fikseeritud suurusega kattuvate osadega, jagamist lause järgi ja jagamist lõigu järgi. Teksti jagaja kasutatakse NodeParseri poolt, kui jagatakse `Dokumente` `Sõlmedeks`.
+
+- [**Retriever**](./low_level/retriever.md): Retriever valib tegelikult sõlmed, mida taastada indeksist. Siin võite soovida proovida rohkem või vähem sõlmi päringu kohta, muuta sarnasusfunktsiooni või luua oma taastaja igaks individuaalseks kasutusjuhuks oma rakenduses. Näiteks võite soovida eraldi taastajat koodisisu ja tekstisisu jaoks.
+
+- [**ResponseSynthesizer**](./low_level/response_synthesizer.md): ResponseSynthesizer vastutab päringu stringi võtmise ja `Sõlmede` loendi kasutamise eest vastuse genereerimiseks. See võib võtta mitmeid vorme, näiteks kõigi konteksti üle iteratsioon ja vastuse täpsustamine või kokkuvõtete puu loomine ja juurkokkuvõtte tagastamine.
+
+- [**Storage**](./low_level/storage.md): Varem või hiljem soovite oma indeksid, andmed ja vektorid salvestada, et mitte iga kord käivitada sisseehitatud mudeleid. IndexStore, DocStore, VectorStore ja KVStore on abstraktsioonid, mis võimaldavad seda teha. Koos moodustavad nad StorageContexti. Praegu lubame sisseehitatud mudelite püsivust failides failisüsteemis (või virtuaalses mälufailisüsteemis), kuid lisame aktiivselt ka integreerimisi vektorandmebaasidega.
diff --git a/apps/docs/i18n/et/docusaurus-plugin-content-docs/current/modules/low_level/embedding.md b/apps/docs/i18n/et/docusaurus-plugin-content-docs/current/modules/low_level/embedding.md
new file mode 100644
index 0000000000000000000000000000000000000000..a27794300fa788810fe1770802226a6604dd9ed0
--- /dev/null
+++ b/apps/docs/i18n/et/docusaurus-plugin-content-docs/current/modules/low_level/embedding.md
@@ -0,0 +1,26 @@
+---
+sidebar_position: 1
+---
+
+# Sisseehitamine (Embedding)
+
+`See dokumentatsioon on tõlgitud automaatselt ja võib sisaldada vigu. Ärge kartke avada Pull Request, et pakkuda muudatusi.`
+
+Sisseehitamise mudel LlamaIndexis vastutab teksti numbriliste esituste loomise eest. Vaikimisi kasutab LlamaIndex OpenAI `text-embedding-ada-002` mudelit.
+
+Seda saab selgelt määrata `ServiceContext` objektis.
+
+```typescript
+import { OpenAIEmbedding, serviceContextFromDefaults } from "llamaindex";
+
+const openaiEmbeds = new OpenAIEmbedding();
+
+const serviceContext = serviceContextFromDefaults({ embedModel: openaiEmbeds });
+```
+
+## API viide
+
+- [OpenAIEmbedding](../../api/classes/OpenAIEmbedding.md)
+- [ServiceContext](../../api/interfaces/ServiceContext.md)
+
+"
diff --git a/apps/docs/i18n/et/docusaurus-plugin-content-docs/current/modules/low_level/llm.md b/apps/docs/i18n/et/docusaurus-plugin-content-docs/current/modules/low_level/llm.md
new file mode 100644
index 0000000000000000000000000000000000000000..3e3892159efec6153d854bd902df558e071f8c34
--- /dev/null
+++ b/apps/docs/i18n/et/docusaurus-plugin-content-docs/current/modules/low_level/llm.md
@@ -0,0 +1,26 @@
+---
+sidebar_position: 0
+---
+
+# LLM
+
+`See dokumentatsioon on tõlgitud automaatselt ja võib sisaldada vigu. Ärge kartke avada Pull Request, et pakkuda muudatusi.`
+
+LLM vastutab teksti lugemise ja loomuliku keele vastuste genereerimise eest päringutele. Vaikimisi kasutab LlamaIndex.TS `gpt-3.5-turbo`-d.
+
+LLM saab määrata selgelt `ServiceContext` objektis.
+
+```typescript
+import { OpenAI, serviceContextFromDefaults } from "llamaindex";
+
+const openaiLLM = new OpenAI({ model: "gpt-3.5-turbo", temperature: 0 });
+
+const serviceContext = serviceContextFromDefaults({ llm: openaiLLM });
+```
+
+## API viide
+
+- [OpenAI](../../api/classes/OpenAI.md)
+- [ServiceContext](../../api/interfaces/ServiceContext.md)
+
+"
diff --git a/apps/docs/i18n/et/docusaurus-plugin-content-docs/current/modules/low_level/node_parser.md b/apps/docs/i18n/et/docusaurus-plugin-content-docs/current/modules/low_level/node_parser.md
new file mode 100644
index 0000000000000000000000000000000000000000..de4e7739cd3c93287be402b8dcdeb4ae93d4abc8
--- /dev/null
+++ b/apps/docs/i18n/et/docusaurus-plugin-content-docs/current/modules/low_level/node_parser.md
@@ -0,0 +1,39 @@
+---
+sidebar_position: 3
+---
+
+# NodeParser
+
+`See dokumentatsioon on tõlgitud automaatselt ja võib sisaldada vigu. Ärge kartke avada Pull Request, et pakkuda muudatusi.`
+
+`NodeParser` LlamaIndexis on vastutav `Document` objektide jagamise eest hõlpsamini hallatavateks `Node` objektideks. Kui kutsute `.fromDocuments()` meetodit, kasutatakse `ServiceContext`-i `NodeParser`-it selle automaatseks tegemiseks. Võite seda ka kasutada dokumentide ette jagamiseks.
+
+```typescript
+import { Document, SimpleNodeParser } from "llamaindex";
+
+const nodeParser = new SimpleNodeParser();
+const nodes = nodeParser.getNodesFromDocuments([
+  new Document({ text: "Olen 10-aastane. John on 20-aastane." }),
+]);
+```
+
+## TextSplitter
+
+Aluseks olev teksti jagaja jagab teksti lauseteks. Seda saab kasutada ka iseseisva moodulina toore teksti jagamiseks.
+
+```typescript
+import { SentenceSplitter } from "llamaindex";
+
+const splitter = new SentenceSplitter({ chunkSize: 1 });
+
+const textSplits = splitter.splitText("Tere maailm");
+```
+
+"
+
+## API viide
+
+- [SimpleNodeParser](../../api/classes/SimpleNodeParser.md)
+- [SentenceSplitter](../../api/classes/SentenceSplitter.md)
+
+"
diff --git a/apps/docs/i18n/et/docusaurus-plugin-content-docs/current/modules/low_level/response_synthesizer.md b/apps/docs/i18n/et/docusaurus-plugin-content-docs/current/modules/low_level/response_synthesizer.md
new file mode 100644
index 0000000000000000000000000000000000000000..afb913266493ea2a28f17df017a786810e61c635
--- /dev/null
+++ b/apps/docs/i18n/et/docusaurus-plugin-content-docs/current/modules/low_level/response_synthesizer.md
@@ -0,0 +1,48 @@
+---
+sidebar_position: 6
+---
+
+# ResponseSynthesizer (Vastuse sünteesija)
+
+`See dokumentatsioon on tõlgitud automaatselt ja võib sisaldada vigu. Ärge kartke avada Pull Request, et pakkuda muudatusi.`
+
+ResponseSynthesizer on vastutav päringu, sõlmede ja vihjete mallide saatmise eest LLM-ile vastuse genereerimiseks. On mõned olulised režiimid vastuse genereerimiseks:
+
+- `Refine` (Täpsusta): "loo ja täpsusta" vastust, minnes järjest läbi iga leitud tekstitüki.
+  See teeb iga sõlme jaoks eraldi LLM-kõne. Sobib üksikasjalikumate vastuste jaoks.
+- `CompactAndRefine` (Vaikimisi): "kokku suru" vihje iga LLM-kõne ajal, täites maksimaalse vihje suurusega nii palju tekstitükke kui võimalik. Kui on liiga palju tükke, et need ühte vihjesse mahutada, "loo ja täpsusta" vastus, minnes läbi mitme kokku surutud vihje. Sama mis `refine`, kuid peaks vähendama LLM-kõnede arvu.
+- `TreeSummarize` (Puusummeerimine): Antud tekstitükkide kogumi ja päringu korral koosta rekursiivselt puu ja tagasta juursõlm vastusena. Sobib kokkuvõtte tegemiseks.
+- `SimpleResponseBuilder` (Lihtne vastuse koostaja): Antud tekstitükkide kogumi ja päringu korral rakenda päringut igale tekstitükile, kogudes vastused massiivi. Tagastab kõigi vastuste kokkuliidetud stringi. Sobib olukordades, kus peate sama päringut eraldi käitama igale tekstitükile.
+
+```typescript
+import { NodeWithScore, ResponseSynthesizer, TextNode } from "llamaindex";
+
+const responseSynthesizer = new ResponseSynthesizer();
+
+const nodesWithScore: NodeWithScore[] = [
+  {
+    node: new TextNode({ text: "Olen 10-aastane." }),
+    score: 1,
+  },
+  {
+    node: new TextNode({ text: "John on 20-aastane." }),
+    score: 0.5,
+  },
+];
+
+const response = await responseSynthesizer.synthesize(
+  "Kui vana ma olen?",
+  nodesWithScore,
+);
+console.log(response.response);
+```
+
+## API viide
+
+- [ResponseSynthesizer (Vastuse sünteesija)](../../api/classes/ResponseSynthesizer.md)
+- [Refine (Täpsusta)](../../api/classes/Refine.md)
+- [CompactAndRefine (Kokku suru ja täpsusta)](../../api/classes/CompactAndRefine.md)
+- [TreeSummarize (Puusummeerimine)](../../api/classes/TreeSummarize.md)
+- [SimpleResponseBuilder (Lihtne vastuse koostaja)](../../api/classes/SimpleResponseBuilder.md)
+
+"
diff --git a/apps/docs/i18n/et/docusaurus-plugin-content-docs/current/modules/low_level/retriever.md b/apps/docs/i18n/et/docusaurus-plugin-content-docs/current/modules/low_level/retriever.md
new file mode 100644
index 0000000000000000000000000000000000000000..5c22d1435308c0ee1cbffd00d696540ecf816da4
--- /dev/null
+++ b/apps/docs/i18n/et/docusaurus-plugin-content-docs/current/modules/low_level/retriever.md
@@ -0,0 +1,23 @@
+---
+sidebar_position: 5
+---
+
+# Retriever (Taastaja)
+
+`See dokumentatsioon on tõlgitud automaatselt ja võib sisaldada vigu. Ärge kartke avada Pull Request, et pakkuda muudatusi.`
+
+Retriever (Taastaja) LlamaIndexis on see, mida kasutatakse `Node`-de toomiseks indeksist päringu stringi abil. `VectorIndexRetriever` toob kõige sarnasemad sõlmed top-k kujul. Samal ajal toob `SummaryIndexRetriever` kõik sõlmed olenemata päringust.
+
+```typescript
+const retriever = vector_index.asRetriever();
+retriever.similarityTopK = 3;
+
+// Too sõlmed!
+const nodesWithScore = await retriever.retrieve("päringu string");
+```
+
+## API viide
+
+- [SummaryIndexRetriever](../../api/classes/SummaryIndexRetriever.md)
+- [SummaryIndexLLMRetriever](../../api/classes/SummaryIndexLLMRetriever.md)
+- [VectorIndexRetriever](../../api/classes/VectorIndexRetriever.md)
diff --git a/apps/docs/i18n/et/docusaurus-plugin-content-docs/current/modules/low_level/storage.md b/apps/docs/i18n/et/docusaurus-plugin-content-docs/current/modules/low_level/storage.md
new file mode 100644
index 0000000000000000000000000000000000000000..892146a6d209492bdefa2b4ed06ec5e8499bba91
--- /dev/null
+++ b/apps/docs/i18n/et/docusaurus-plugin-content-docs/current/modules/low_level/storage.md
@@ -0,0 +1,28 @@
+---
+sidebar_position: 7
+---
+
+# Andmehoidla (Storage)
+
+`See dokumentatsioon on tõlgitud automaatselt ja võib sisaldada vigu. Ärge kartke avada Pull Request, et pakkuda muudatusi.`
+
+Andmehoidla LlamaIndex.TS-s töötab automaatselt, kui olete konfigureerinud `StorageContext` objekti. Lihtsalt seadistage `persistDir` ja kinnitage see indeksile.
+
+Hetkel toetatakse ainult salvestamist ja laadimist kettalt, tulevased integreerimised on planeeritud!
+
+```typescript
+import { Document, VectorStoreIndex, storageContextFromDefaults } from "./src";
+
+const storageContext = await storageContextFromDefaults({
+  persistDir: "./storage",
+});
+
+const document = new Document({ text: "Test Text" });
+const index = await VectorStoreIndex.fromDocuments([document], {
+  storageContext,
+});
+```
+
+## API viide (API Reference)
+
+- [Andmehoidla kontekst (StorageContext)](../../api/interfaces/StorageContext.md)
diff --git a/apps/docs/i18n/et/docusaurus-plugin-content-docs/current/starter.md b/apps/docs/i18n/et/docusaurus-plugin-content-docs/current/starter.md
new file mode 100644
index 0000000000000000000000000000000000000000..bf0243eab0e1405a82e545b2146ae7e01696b5c3
--- /dev/null
+++ b/apps/docs/i18n/et/docusaurus-plugin-content-docs/current/starter.md
@@ -0,0 +1,58 @@
+---
+sidebar_position: 2
+---
+
+# Alustamise õpetus
+
+`See dokumentatsioon on tõlgitud automaatselt ja võib sisaldada vigu. Ärge kartke avada Pull Request, et pakkuda muudatusi.`
+
+Kui olete [LlamaIndex.TS installinud NPM-i abil](installation) ja seadistanud oma OpenAI võtme, olete valmis oma esimest rakendust alustama:
+
+Uues kaustas:
+
+```bash npm2yarn
+npm install typescript
+npm install @types/node
+npx tsc --init # vajadusel
+```
+
+Loo fail `example.ts`. See kood laadib mõned näidisandmed, loob dokumendi, indekseerib selle (kasutades OpenAI-ga loodud sisseehitatud andmeid) ja loob siis päringumootori, et vastata andmete kohta esitatud küsimustele.
+
+```ts
+// example.ts
+import fs from "fs/promises";
+import { Document, VectorStoreIndex } from "llamaindex";
+
+async function main() {
+  // Lae essee abramov.txt Node'ist
+  const essay = await fs.readFile(
+    "node_modules/llamaindex/examples/abramov.txt",
+    "utf-8",
+  );
+
+  // Loo dokumendi objekt essee abil
+  const document = new Document({ text: essay });
+
+  // Jaga tekst ja loo sisseehitatud andmed. Salvesta need VectorStoreIndexisse
+  const index = await VectorStoreIndex.fromDocuments([document]);
+
+  // Tee päring indeksisse
+  const queryEngine = index.asQueryEngine();
+  const response = await queryEngine.query("Mida autor kolledžis tegi?");
+
+  // Väljasta vastus
+  console.log(response.toString());
+}
+
+main();
+```
+
+Seejärel saate selle käivitada järgmiselt
+
+```bash
+npx ts-node example.ts
+```
+
+Valmis rohkem õppima? Vaadake meie NextJS mänguväljakut aadressil https://llama-playground.vercel.app/. Lähtekood on saadaval aadressil https://github.com/run-llama/ts-playground
+
+"
diff --git a/apps/docs/i18n/fa/docusaurus-plugin-content-docs/current/_static/concepts/indexing.jpg b/apps/docs/i18n/fa/docusaurus-plugin-content-docs/current/_static/concepts/indexing.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..8672967213caf28fb27682b6b9e2e5111beb9aaa
Binary files /dev/null and b/apps/docs/i18n/fa/docusaurus-plugin-content-docs/current/_static/concepts/indexing.jpg differ
diff --git a/apps/docs/i18n/fa/docusaurus-plugin-content-docs/current/_static/concepts/querying.jpg b/apps/docs/i18n/fa/docusaurus-plugin-content-docs/current/_static/concepts/querying.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..3c241bdda5ba98d3f3ee2e163b9f19280c5c7503
Binary files /dev/null and b/apps/docs/i18n/fa/docusaurus-plugin-content-docs/current/_static/concepts/querying.jpg differ
diff --git a/apps/docs/i18n/fa/docusaurus-plugin-content-docs/current/_static/concepts/rag.jpg b/apps/docs/i18n/fa/docusaurus-plugin-content-docs/current/_static/concepts/rag.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..b68eca2564f8535b33d9ed6d4ce4d4410d2a0698
Binary files /dev/null and b/apps/docs/i18n/fa/docusaurus-plugin-content-docs/current/_static/concepts/rag.jpg differ
diff --git a/apps/docs/i18n/fa/docusaurus-plugin-content-docs/current/api b/apps/docs/i18n/fa/docusaurus-plugin-content-docs/current/api
new file mode 120000
index 0000000000000000000000000000000000000000..3513e32979f4abb8b2df2b54c8d0cc480bdd847e
--- /dev/null
+++ b/apps/docs/i18n/fa/docusaurus-plugin-content-docs/current/api
@@ -0,0 +1 @@
+../../../../docs/api
\ No newline at end of file
diff --git a/apps/docs/i18n/fa/docusaurus-plugin-content-docs/current/concepts.md b/apps/docs/i18n/fa/docusaurus-plugin-content-docs/current/concepts.md
new file mode 100644
index 0000000000000000000000000000000000000000..6c6d2d2353e6b1afbb507f8646bebce9a97a00d4
--- /dev/null
+++ b/apps/docs/i18n/fa/docusaurus-plugin-content-docs/current/concepts.md
@@ -0,0 +1,86 @@
+---
+sidebar_position: 3
+---
+
+# مفاهیم سطح بالا
+
+`undefined`
+
+LlamaIndex.TS به شما کمک می کند برنامه های قدرتمند LLM (مانند سوال و پاسخ، ربات گفتگو) را بر روی داده های سفارشی خود بسازید.
+
+در این راهنمای مفاهیم سطح بالا، شما خواهید آموخت:
+
+- چگونه یک LLM می تواند با استفاده از داده های خود به سوالات پاسخ دهد.
+- مفاهیم کلیدی و ماژول های LlamaIndex.TS برای ساخت لوله کشی پرس و جوی خود.
+
+## پاسخ دادن به سوالات در سراسر داده های شما
+
+LlamaIndex از یک روش دو مرحله ای استفاده می کند هنگام استفاده از یک LLM با داده های شما:
+
+1. **مرحله فهرست بندی**: آماده سازی یک پایگاه دانش، و
+2. **مرحله پرس و جو**: بازیابی متناسب بازهم از دانش برای کمک به LLM در پاسخ به یک سوال
+
+![](./_static/concepts/rag.jpg)
+
+این فرآیند همچنین به عنوان بازیابی تولید افزوده (RAG) شناخته می شود.
+
+LlamaIndex.TS ابزارهای ضروری را برای ساخت هر دو مرحله به شما ارائه می دهد.
+
+بیایید هر مرحله را به تفصیل بررسی کنیم.
+
+### مرحله فهرست بندی
+
+LlamaIndex.TS به شما کمک می کند پایگاه دانش را با مجموعه ای از اتصال دهنده ها و فهرست ها آماده کنید.
+
+![](./_static/concepts/indexing.jpg)
+
+[**بارگیری داده ها**](./modules/high_level/data_loader.md):
+یک اتصال دهنده داده (به عنوان مثال `Reader`) داده ها را از منابع داده های مختلف و فرمت های داده مختلف به یک نمایش ساده `Document` (متن و متادیتا ساده) وارد می کند.
+
+[**اسناد / گره ها**](./modules/high_level/documents_and_nodes.md): یک `Document` یک ظرف عمومی برای هر منبع داده است - به عنوان مثال، یک فایل PDF، خروجی یک API یا داده های بازیابی شده از یک پایگاه داده. یک `Node` واحد اتمی داده در LlamaIndex است و یک "تکه" از یک `Document` منبع را نمایش می دهد. این یک نمایش غنی است که شامل متادیتا و روابط (با گره های دیگر) برای امکان عملیات بازیابی دقیق و بیانگر است.
+
+[**فهرست داده ها**](./modules/high_level/data_index.md):
+با وارد کردن داده های خود، LlamaIndex به شما کمک می کند داده ها را به یک فرمتی که به راحتی قابل بازیابی است، فهرست کنید.
+
+درون LlamaIndex، اسناد خام را به نمایش های میانی تجزیه می کند، تعبیرهای برداری را محاسبه می کند و داده های شما را در حافظه یا روی دیسک ذخیره می کند.
+
+"
+
+### مرحله پرس و جو
+
+در مرحله پرس و جو، لوله کشی پرس و جو متناسب با پرس و جوی کاربر، متناسب با متناسبت مربوطه را بازیابی می کند
+و آن را به LLM (همراه با پرس و جو) منتقل می کند تا یک پاسخ ترکیبی را تولید کند.
+
+این به LLM دانش به روز رسانی شده ای می دهد که در داده های آموزشی اصلی آن نیست
+(همچنین کاهش هالوسیناسیون).
+
+چالش اصلی در مرحله پرس و جو بازیابی، هماهنگی و استدلال بر روی پایگاه های دانش (احتمالاً بسیاری) است.
+
+LlamaIndex ماژول های قابل ترکیبی را ارائه می دهد که به شما در ساخت و یکپارچه سازی لوله کشی RAG برای سوال و جو (موتور پرس و جو)، ربات گفتگو (موتور گفتگو) یا به عنوان بخشی از یک عامل کمک می کند.
+
+این بلاک های ساختمانی قابل سفارشی سازی هستند تا ترجیحات رتبه بندی را منعکس کنند، و همچنین برای استدلال بر روی چندین پایگاه دانش به صورت ساختار یافته ترکیب شوند.
+
+![](./_static/concepts/querying.jpg)
+
+#### بلاک های ساختمانی
+
+[**بازیابی کننده ها**](./modules/low_level/retriever.md):
+بازیابی کننده تعریف می کند که چگونه می توان به طور کارآمد متناسب با متناسب با پایگاه دانش (یعنی ایندکس) با یک کوئری داده شده بازیابی کرد.
+منطق بازیابی خاص برای اندیس های مختلف متفاوت است، محبوب ترین آنها بازیابی متمرکز در برابر یک ایندکس برداری است.
+
+[**ترکیب کننده های پاسخ**](./modules/low_level/response_synthesizer.md):
+ترکیب کننده پاسخ، یک پاسخ را از یک LLM تولید می کند، با استفاده از یک کوئری کاربر و مجموعه داده های متنی بازیابی شده.
+
+"
+
+#### لوله های کشی
+
+[**موتورهای پرس و جو**](./modules/high_level/query_engine.md):
+موتور پرس و جو یک لوله کشی از ابتدا تا انتها است که به شما امکان می دهد بر روی داده های خود سوال بپرسید.
+این موتور یک پرس و جوی زبان طبیعی را دریافت می کند و یک پاسخ را همراه با متناسب بازیابی شده و به LLM منتقل می کند.
+
+[**موتورهای گفتگو**](./modules/high_level/chat_engine.md):
+موتور گفتگو یک لوله کشی از ابتدا تا انتها برای داشتن یک گفتگو با داده های شما است
+(چندین بار به جای یک سوال و پاسخ).
+
+"
diff --git a/apps/docs/i18n/fa/docusaurus-plugin-content-docs/current/end_to_end.md b/apps/docs/i18n/fa/docusaurus-plugin-content-docs/current/end_to_end.md
new file mode 100644
index 0000000000000000000000000000000000000000..ec3f374a2aed46257fa7a9c938ae8a4d585adb2d
--- /dev/null
+++ b/apps/docs/i18n/fa/docusaurus-plugin-content-docs/current/end_to_end.md
@@ -0,0 +1,63 @@
+---
+sidebar_position: 4
+---
+
+# مثال‌های انتها به انتها
+
+`undefined`
+
+ما چندین مثال انتها به انتها با استفاده از LlamaIndex.TS را در مخزن قرار داده‌ایم.
+
+مثال‌های زیر را بررسی کنید یا آن‌ها را امتحان کنید و در عرض چند دقیقه با آموزش‌های تعاملی Github Codespace ارائه شده توسط Dev-Docs [اینجا](https://codespaces.new/team-dev-docs/lits-dev-docs-playground?devcontainer_path=.devcontainer%2Fjavascript_ltsquickstart%2Fdevcontainer.json) کامل کنید:
+
+## [موتور چت](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/chatEngine.ts)
+
+یک فایل را بخوانید و درباره آن با LLM چت کنید.
+
+## [ایندکس برداری](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/vectorIndex.ts)
+
+یک ایندکس برداری بسازید و آن را جستجو کنید. ایندکس برداری برای بازیابی k گره مرتبط‌تر از طریق تعبیه‌ها استفاده می‌کند. به طور پیش فرض، k برابر 2 است.
+
+"
+
+## [فهرست خلاصه](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/summaryIndex.ts)
+
+یک فهرست ایجاد کنید و آن را جستجو کنید. این مثال همچنین از `LLMRetriever` استفاده می‌کند که از LLM برای انتخاب بهترین گره‌ها برای استفاده در تولید پاسخ استفاده می‌کند.
+
+"
+
+## [ذخیره / بارگیری یک ایندکس](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/storageContext.ts)
+
+ایجاد و بارگیری یک ایندکس برداری. ذخیره‌سازی در دیسک در LlamaIndex.TS به طور خودکار انجام می‌شود هنگامی که یک شیء متناظر با محیط ذخیره‌سازی ایجاد می‌شود.
+
+"
+
+## [ایندکس بردار سفارشی](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/vectorIndexCustomize.ts)
+
+ایجاد یک ایندکس بردار و استعلام آن را انجام دهید، در عین حال تنظیم کردن `LLM`، `ServiceContext` و `similarity_top_k`.
+
+"
+
+## [OpenAI LLM](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/openai.ts)
+
+یک OpenAI LLM ایجاد کنید و مستقیماً از آن برای چت استفاده کنید.
+
+"
+
+## [Llama2 DeuceLLM](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/llamadeuce.ts)
+
+یک Llama-2 LLM ایجاد کنید و مستقیماً از آن برای چت استفاده کنید.
+
+"
+
+## [موتور پرسش زیرسوال](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/subquestion.ts)
+
+از `موتور پرسش زیرسوال` استفاده می‌کند که پرسش‌های پیچیده را به چندین سوال کوچک تقسیم کرده و سپس پاسخ را در میان پاسخ‌های همه زیرسوال‌ها تجمیع می‌کند.
+
+"
+
+## [ماژول‌های سطح پایین](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/lowlevel.ts)
+
+این مثال از چندین مولفه سطح پایین استفاده می‌کند که نیاز به یک موتور پرس و جو واقعی را برطرف می‌کند. این مولفه‌ها می‌توانند در هر کجا، در هر برنامه‌ای استفاده شوند یا برای برآورده کردن نیازهای خود سفارشی شده و زیرکلاس‌هایی از آن‌ها ایجاد شود.
+
+"
diff --git a/apps/docs/i18n/fa/docusaurus-plugin-content-docs/current/environments.md b/apps/docs/i18n/fa/docusaurus-plugin-content-docs/current/environments.md
new file mode 100644
index 0000000000000000000000000000000000000000..629bd1adcc9ac86506867384a58ad7808b682f13
--- /dev/null
+++ b/apps/docs/i18n/fa/docusaurus-plugin-content-docs/current/environments.md
@@ -0,0 +1,31 @@
+---
+sidebar_position: 5
+---
+
+# محیط ها
+
+`undefined`
+
+LlamaIndex در حال حاضر به طور رسمی NodeJS 18 و NodeJS 20 را پشتیبانی می کند.
+
+## مسیریابی برنامه NextJS
+
+اگر از مسیریابی برنامه NextJS استفاده می کنید، برای استفاده از حالت NodeJS نیاز خواهید داشت:
+
+```js
+export const runtime = "nodejs"; // پیش فرض
+```
+
+و برای pdf-parse باید یک استثناء در next.config.js اضافه کنید
+
+```js
+// next.config.js
+/** @type {import('next').NextConfig} */
+const nextConfig = {
+  experimental: {
+    serverComponentsExternalPackages: ["pdf-parse"], // pdf-parse را در حالت واقعی NodeJS با مسیریاب برنامه NextJS قرار می دهد
+  },
+};
+
+module.exports = nextConfig;
+```
diff --git a/apps/docs/i18n/fa/docusaurus-plugin-content-docs/current/installation.mdx b/apps/docs/i18n/fa/docusaurus-plugin-content-docs/current/installation.mdx
new file mode 100644
index 0000000000000000000000000000000000000000..aed0a9a78929cd184000ffa0f7270942d439c709
--- /dev/null
+++ b/apps/docs/i18n/fa/docusaurus-plugin-content-docs/current/installation.mdx
@@ -0,0 +1,71 @@
+---
+sidebar_position: 1
+---
+
+
+# نصب و راه‌اندازی
+
+```undefined```
+
+
+اطمینان حاصل کنید که NodeJS نسخه 18 یا بالاتر را دارید.
+
+
+## استفاده از create-llama
+
+ساده‌ترین راه برای شروع با LlamaIndex استفاده از `create-llama` است. این ابزار CLI به شما امکان می‌دهد به سرعت یک برنامه جدید LlamaIndex راه‌اندازی کنید و همه چیز برای شما تنظیم شده باشد.
+
+فقط اجرا کنید
+
+<Tabs>
+<TabItem value="1" label="npm" default>
+
+```bash
+npx create-llama@latest
+```
+
+</TabItem>
+<TabItem value="2" label="Yarn">
+
+```bash
+yarn create llama
+```
+
+</TabItem>
+<TabItem value="3" label="pnpm">
+
+```bash
+pnpm create llama@latest
+```
+
+</TabItem>
+</Tabs>
+
+برای شروع. پس از تولید برنامه خود، اجرا کنید
+
+```bash npm2yarn
+npm run dev
+```
+
+برای راه‌اندازی سرور توسعه. سپس می‌توانید به [http://localhost:3000](http://localhost:3000) بروید تا برنامه خود را مشاهده کنید.
+## نصب از NPM
+
+```bash npm2yarn
+npm install llamaindex
+```
+
+
+### متغیرهای محیطی
+
+مثال‌های ما به طور پیش فرض از OpenAI استفاده می‌کنند. برای اینکه بتوانید از آن استفاده کنید، باید کلید Open AI خود را به صورت زیر تنظیم کنید:
+
+```bash
+export OPENAI_API_KEY="sk-......" # جایگزین کنید با کلید خود از https://platform.openai.com/account/api-keys
+```
+
+اگر می‌خواهید هر بار به صورت خودکار بارگذاری شود، آن را به .zshrc/.bashrc خود اضافه کنید.
+
+هشدار: کلید OpenAI خود را در کنترل نسخه گذاری قرار ندهید.
+
+
+"
diff --git a/apps/docs/i18n/fa/docusaurus-plugin-content-docs/current/introduction.md b/apps/docs/i18n/fa/docusaurus-plugin-content-docs/current/introduction.md
new file mode 100644
index 0000000000000000000000000000000000000000..94bec4a0325f2fbf8715c8d007740275d42a89a3
--- /dev/null
+++ b/apps/docs/i18n/fa/docusaurus-plugin-content-docs/current/introduction.md
@@ -0,0 +1,64 @@
+---
+sidebar_position: 0
+slug: /
+---
+
+# لاماایندکس.TS چیست؟
+
+`undefined`
+
+لاماایندکس.TS یک چارچوب داده برای برنامه های LLM است که امکان دریافت، ساختاردهی و دسترسی به داده های خصوصی یا مربوط به دامنه را فراهم می کند. در حالی که یک بسته پایتون نیز در دسترس است (برای مشاهده [اینجا](https://docs.llamaindex.ai/en/stable/) را ببینید)، لاماایندکس.TS ویژگی های اصلی را در یک بسته ساده ارائه می دهد که برای استفاده با TypeScript بهینه شده است.
+
+## 🚀 چرا لاماایندکس.TS؟
+
+در اصل، LLM ها یک رابط زبان طبیعی بین انسان ها و داده های استنباطی ارائه می دهند. مدل هایی که در دسترس هستند، پیش از این بر روی مقادیر زیادی از داده های عمومی که از ویکیپدیا و لیست های پستی تا کتاب های درسی و کد منبع استفاده می کنند، آموزش دیده اند.
+
+برنامه هایی که بر روی LLM ها ساخته می شوند، اغلب نیاز به تقویت این مدل ها با داده های خصوصی یا مربوط به دامنه دارند. متأسفانه، این داده ها ممکن است در برنامه ها و فروشگاه های داده مختلف پخش شده باشند. آنها پشت API ها، در پایگاه داده های SQL یا در فایل های PDF و اسلاید دیک ها قرار دارند.
+
+در اینجاست که **لاماایندکس.TS** وارد عمل می شود.
+
+## 🦙 چگونه لاماایندکس.TS می‌تواند کمک کند؟
+
+لاماایندکس.TS ابزارهای زیر را فراهم می‌کند:
+
+- **بارگیری داده**: امکان دریافت مستقیم داده‌های موجود در فرمت‌های `.txt`، `.pdf`، `.csv`، `.md` و `.docx`
+- **شاخص‌های داده**: ساختاردهی داده‌های خود را در نمایش‌های میانی که برای استفاده آسان و با عملکرد بالا برای LLM ها مناسب است، انجام دهید.
+- **موتورها**: دسترسی به داده‌های خود را از طریق زبان طبیعی فراهم می‌کنند. به عنوان مثال:
+  - موتورهای پرس و جو، رابط‌های بازیابی قدرتمندی برای خروجی افزایش دانش هستند.
+  - موتورهای چت، رابط‌های مکالمه‌ای برای تعاملات چند پیامی و "پیام به پیام" با داده‌های شما هستند.
+
+"
+
+## 👨‍👩‍👧‍👦 لاماایندکس برای چه کسانی است؟
+
+لاماایندکس.TS مجموعه‌ای از ابزارهای اصلی را فراهم می‌کند که برای هر کسی که در حال ساخت برنامه‌های LLM با جاوااسکریپت و TypeScript است، ضروری است.
+
+API سطح بالای ما به کاربران مبتدی امکان استفاده از لاماایندکس.TS برای دریافت و پرس و جوی داده‌های خود را می‌دهد.
+
+برای برنامه‌های پیچیده‌تر، APIهای سطح پایین ما به کاربران پیشرفته امکان سفارشی‌سازی و گسترش هر ماژول - اتصال‌دهنده‌های داده، اندیس‌ها، بازیابان‌ها و موتورهای پرس و جو - را برای تناسب با نیازهای خود می‌دهد.
+
+## شروع کار
+
+`npm install llamaindex`
+
+مستندات ما شامل [دستورالعمل نصب](./installation.md) و [آموزش شروع کار](./starter.md) برای ساخت اولین برنامه شما است.
+
+با راه اندازی و اجرا شدن، [مفاهیم سطح بالا](./concepts.md) یک نمای کلی از معماری ماژولار لاماایندکس را ارائه می دهد. برای مثال های عملی بیشتر، به [آموزش های پایان به پایان](./end_to_end.md) مراجعه کنید.
+
+"
+
+## 🗺️ اکوسیستم
+
+برای دانلود یا همکاری، لاماایندکس را در مکان های زیر پیدا کنید:
+
+- Github: https://github.com/run-llama/LlamaIndexTS
+- NPM: https://www.npmjs.com/package/llamaindex
+
+"
+
+## جامعه
+
+نیاز به کمک دارید؟ پیشنهادی برای ویژگی دارید؟ به جامعه لاماایندکس بپیوندید:
+
+- توییتر: https://twitter.com/llama_index
+- دیسکورد: https://discord.gg/dGcwcsnxhU
diff --git a/apps/docs/i18n/fa/docusaurus-plugin-content-docs/current/modules/high_level/chat_engine.md b/apps/docs/i18n/fa/docusaurus-plugin-content-docs/current/modules/high_level/chat_engine.md
new file mode 100644
index 0000000000000000000000000000000000000000..d0a00c8233450a4002c3a4a4f5390681c6f7db2a
--- /dev/null
+++ b/apps/docs/i18n/fa/docusaurus-plugin-content-docs/current/modules/high_level/chat_engine.md
@@ -0,0 +1,24 @@
+---
+sidebar_position: 4
+---
+
+# چت انجین (ChatEngine)
+
+`undefined`
+
+چت انجین یک راه سریع و ساده برای چت با داده ها در شاخص شماست.
+
+```typescript
+const retriever = index.asRetriever();
+const chatEngine = new ContextChatEngine({ retriever });
+
+// شروع چت
+const response = await chatEngine.chat(query);
+```
+
+## مراجعه به API
+
+- [چت انجین متن](../../api/classes/ContextChatEngine.md)
+- [چت انجین سوال کوتاه](../../api/classes/ContextChatEngine.md)
+
+"
diff --git a/apps/docs/i18n/fa/docusaurus-plugin-content-docs/current/modules/high_level/data_index.md b/apps/docs/i18n/fa/docusaurus-plugin-content-docs/current/modules/high_level/data_index.md
new file mode 100644
index 0000000000000000000000000000000000000000..5032269fa008845e22f173f693c78e6f591c8c1d
--- /dev/null
+++ b/apps/docs/i18n/fa/docusaurus-plugin-content-docs/current/modules/high_level/data_index.md
@@ -0,0 +1,27 @@
+---
+sidebar_position: 2
+---
+
+# فهرست
+
+`undefined`
+
+یک فهرست، بستر و سازمان بندی اصلی برای داده های شما است. LlamaIndex.TS دو فهرست را پشتیبانی می کند:
+
+- `VectorStoreIndex` - هنگام تولید پاسخ، بالاترین k `Node` ها را به LLM ارسال می کند. بالاترین k پیش فرض 2 است.
+- `SummaryIndex` - هر `Node` را در فهرست به LLM ارسال می کند تا پاسخی تولید شود.
+
+```typescript
+import { Document, VectorStoreIndex } from "llamaindex";
+
+const document = new Document({ text: "تست" });
+
+const index = await VectorStoreIndex.fromDocuments([document]);
+```
+
+## مرجع API
+
+- [SummaryIndex](../../api/classes/SummaryIndex.md)
+- [VectorStoreIndex](../../api/classes/VectorStoreIndex.md)
+
+"
diff --git a/apps/docs/i18n/fa/docusaurus-plugin-content-docs/current/modules/high_level/data_loader.md b/apps/docs/i18n/fa/docusaurus-plugin-content-docs/current/modules/high_level/data_loader.md
new file mode 100644
index 0000000000000000000000000000000000000000..0626a4da4517eb99fd807bbd3df6340f9747f6a2
--- /dev/null
+++ b/apps/docs/i18n/fa/docusaurus-plugin-content-docs/current/modules/high_level/data_loader.md
@@ -0,0 +1,19 @@
+---
+sidebar_position: 1
+---
+
+# خواننده / بارگذار
+
+`undefined`
+
+LlamaIndex.TS از طریق کلاس `SimpleDirectoryReader` بارگذاری آسان فایل ها از پوشه ها را پشتیبانی می کند. در حال حاضر، فایل های `.txt`، `.pdf`، `.csv`، `.md` و `.docx` پشتیبانی می شوند و در آینده بیشتری نیز برنامه ریزی شده است!
+
+```typescript
+import { SimpleDirectoryReader } from "llamaindex";
+
+documents = new SimpleDirectoryReader().loadData("./data");
+```
+
+## مرجع API
+
+- [SimpleDirectoryReader](../../api/classes/SimpleDirectoryReader.md)
diff --git a/apps/docs/i18n/fa/docusaurus-plugin-content-docs/current/modules/high_level/documents_and_nodes.md b/apps/docs/i18n/fa/docusaurus-plugin-content-docs/current/modules/high_level/documents_and_nodes.md
new file mode 100644
index 0000000000000000000000000000000000000000..16c857c081af0964f08fb4a4c94319f076381ecd
--- /dev/null
+++ b/apps/docs/i18n/fa/docusaurus-plugin-content-docs/current/modules/high_level/documents_and_nodes.md
@@ -0,0 +1,22 @@
+---
+sidebar_position: 0
+---
+
+# سند ها و گره ها
+
+`undefined`
+
+`سند` ها و `گره` ها از اجزای اساسی هر نمایه هستند. در حالی که API برای این اشیاء مشابه است، اشیاء `سند` فایل های کامل را نمایندگی می کنند، در حالی که `گره` ها قطعات کوچکتری از آن سند اصلی هستند که برای یک LLM و Q&A مناسب هستند.
+
+```typescript
+import { Document } from "llamaindex";
+
+document = new Document({ text: "متن", metadata: { کلید: "مقدار" } });
+```
+
+## مرجع API
+
+- [سند](../../api/classes/Document.md)
+- [گره متنی](../../api/classes/TextNode.md)
+
+"
diff --git a/apps/docs/i18n/fa/docusaurus-plugin-content-docs/current/modules/high_level/query_engine.md b/apps/docs/i18n/fa/docusaurus-plugin-content-docs/current/modules/high_level/query_engine.md
new file mode 100644
index 0000000000000000000000000000000000000000..673c1e7f36cc075486f0e3267414773dcdf16a3a
--- /dev/null
+++ b/apps/docs/i18n/fa/docusaurus-plugin-content-docs/current/modules/high_level/query_engine.md
@@ -0,0 +1,40 @@
+---
+sidebar_position: 3
+---
+
+# موتور پرس و جو (QueryEngine)
+
+`undefined`
+
+موتور پرس و جو یک `Retriever` و یک `ResponseSynthesizer` را در یک لوله قرار می دهد که از رشته پرس و جو برای دریافت گره ها استفاده می کند و سپس آنها را به LLM ارسال می کند تا پاسخی تولید کند.
+
+```typescript
+const queryEngine = index.asQueryEngine();
+const response = await queryEngine.query("رشته پرس و جو");
+```
+
+## موتور پرس و جو سوال فرعی (Sub Question Query Engine)
+
+مفهوم اساسی موتور پرس و جو سوال فرعی این است که یک پرس و جوی تک را به چندین پرس و جو تقسیم کند، برای هر یک از این پرس و جوها یک پاسخ دریافت کند و سپس این پاسخ های مختلف را به یک پاسخ یکپارچه برای کاربر ترکیب کند. می توانید به آن به عنوان تکنیک "تفکر مرحله به مرحله" برای پردازش منابع داده خود فکر کنید!
+
+### شروع کردن
+
+آسان ترین راه برای شروع تست موتور پرس و جو سوال فرعی اجرای فایل subquestion.ts در [examples](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/subquestion.ts) است.
+
+```bash
+npx ts-node subquestion.ts
+```
+
+"
+
+### ابزارها
+
+موتور پرس و جو سوال فرعی با استفاده از ابزارها پیاده سازی شده است. ایده اصلی ابزارها این است که آنها گزینه های قابل اجرا برای مدل زبان بزرگ هستند. در این حالت، موتور پرس و جو سوال فرعی ما بر ابزار QueryEngineTool تکیه می کند، که همانطور که حدس زدید، یک ابزار برای اجرای پرس و جوها در یک موتور پرس و جو است. این به ما امکان می دهد تا به مدل یک گزینه بدهیم تا برای سوالات مختلف از اسناد مختلف پرس و جو کند. همچنین می توانید تصور کنید که موتور پرس و جو سوال فرعی می تواند از یک ابزار استفاده کند که برای جستجوی چیزی در وب یا دریافت پاسخی با استفاده از Wolfram Alpha طراحی شده است.
+
+برای کسب اطلاعات بیشتر در مورد ابزارها، به مستندات پایتون LlamaIndex مراجعه کنید: https://gpt-index.readthedocs.io/en/latest/core_modules/agent_modules/tools/root.html
+
+## مرجع API
+
+- [موتور پرس و جو بازیابی کننده (RetrieverQueryEngine)](../../api/classes/RetrieverQueryEngine.md)
+- [موتور پرس و جو زیرسوال (SubQuestionQueryEngine)](../../api/classes/SubQuestionQueryEngine.md)
+- [ابزار موتور پرس و جو (QueryEngineTool)](../../api/interfaces/QueryEngineTool.md)
diff --git a/apps/docs/i18n/fa/docusaurus-plugin-content-docs/current/modules/index.md b/apps/docs/i18n/fa/docusaurus-plugin-content-docs/current/modules/index.md
new file mode 100644
index 0000000000000000000000000000000000000000..422e1a70122595bd4b599a484f0ac568dd63afc1
--- /dev/null
+++ b/apps/docs/i18n/fa/docusaurus-plugin-content-docs/current/modules/index.md
@@ -0,0 +1,33 @@
+# ماژول‌های اصلی
+
+`undefined`
+
+LlamaIndex.TS چندین ماژول اصلی را ارائه می‌دهد که به ماژول‌های سطح بالا برای شروع سریع و ماژول‌های سطح پایین برای سفارشی‌سازی اجزای کلیدی به میزانی که نیاز دارید تقسیم می‌شوند.
+
+## ماژول‌های سطح بالا
+
+- [**سند**](./high_level/documents_and_nodes.md): یک سند نماینده یک فایل متنی، فایل PDF یا قطعه داده پیوسته دیگر است.
+
+- [**نود**](./high_level/documents_and_nodes.md): بلوک ساختاری اصلی داده. بیشترین استفاده از این ماژول‌ها برای تقسیم سند به قطعات قابل مدیریت است که به اندازه کافی کوچک هستند تا به یک مدل تعبیه شده و LLM تغذیه شوند.
+
+- [**خواننده/بارگذار**](./high_level/data_loader.md): خواننده یا بارگذار چیزی است که یک سند را در دنیای واقعی دریافت کرده و آن را به یک کلاس سند تبدیل می‌کند که سپس در شاخص و پرسمان‌های شما قابل استفاده است. در حال حاضر ما پشتیبانی می‌کنیم از فایل‌های متنی ساده و PDF با بسیاری از فرمت‌های دیگر.
+
+- [**شاخص‌ها**](./high_level/data_index.md): شاخص‌ها نودها و تعبیه‌های آن‌ها را ذخیره می‌کنند.
+
+- [**موتور پرسمان**](./high_level/query_engine.md): موتورهای پرسمان آنچه را که در آن قرار می‌دهید تولید می‌کنند و نتیجه را به شما باز می‌گردانند. موتورهای پرسمان به طور کلی یک پیش‌نویس از پیش ساخته را با گرفتن نودهای انتخاب شده از شاخص شما ترکیب می‌کنند تا متناظر با پرسمان شما، متناظر با پرسمان شما را به LLM بدهند.
+
+- [**موتور چت**](./high_level/chat_engine.md): یک موتور چت به شما کمک می‌کند تا یک ربات چت بسازید که با شاخص‌های شما تعامل داشته باشد.
+
+## ماژول سطح پایین
+
+- [**LLM**](./low_level/llm.md): کلاس LLM یک رابط یکپارچه بر روی یک ارائه دهنده مدل زبان بزرگ مانند OpenAI GPT-4، Anthropic Claude یا Meta LLaMA است. شما می‌توانید آن را زیرکلاس کنید تا یک اتصال به مدل زبان بزرگ خود بنویسید.
+
+- [**Embedding**](./low_level/embedding.md): یک embedding به عنوان یک بردار از اعداد اعشاری نمایش داده می‌شود. مدل embedding پیش‌فرض ما text-embedding-ada-002 از OpenAI است و هر embedding که تولید می‌کند، شامل ۱۵۳۶ عدد اعشاری است. یک مدل embedding محبوب دیگر BERT است که از ۷۶۸ عدد اعشاری برای نمایش هر نود استفاده می‌کند. ما تعدادی ابزار برای کار با embeddings ارائه می‌دهیم که شامل ۳ گزینه محاسبه شباهت و Maximum Marginal Relevance است.
+
+- [**TextSplitter/NodeParser**](./low_level/node_parser.md): استراتژی‌های تقسیم متن بسیار مهم برای کارآیی کلی جستجوی embedding هستند. در حال حاضر، هرچند که ما یک مقدار پیش‌فرض داریم، اما هیچ راه‌حل یک اندازه برای همه وجود ندارد. بسته به سند منبع، شما ممکن است بخواهید از اندازه‌ها و استراتژی‌های تقسیم مختلف استفاده کنید. در حال حاضر، ما پشتیبانی از تقسیم بر اساس اندازه ثابت، تقسیم بر اساس اندازه ثابت با بخش‌های همپوشانی، تقسیم بر اساس جمله و تقسیم بر اساس پاراگراف را ارائه می‌دهیم. TextSplitter هنگام تقسیم سند‌ها به نودها از NodeParser استفاده می‌کند.
+
+- [**Retriever**](./low_level/retriever.md): Retriever وظیفه انتخاب نودهایی را که باید از ایندکس بازیابی شوند، بر عهده دارد. در اینجا، شما ممکن است بخواهید تعداد بیشتر یا کمتری نود در هر پرس و جو بازیابی کنید، تابع شباهت خود را تغییر دهید یا برای هر مورد استفاده مجزا در برنامه خود یک بازیابی‌کننده جداگانه ایجاد کنید. به عنوان مثال، ممکن است بخواهید برای محتوای کد در مقابل محتوای متنی یک بازیابی‌کننده جداگانه داشته باشید.
+
+- [**ResponseSynthesizer**](./low_level/response_synthesizer.md): ResponseSynthesizer مسئول دریافت یک رشته پرس و جو است و با استفاده از لیستی از نودها، یک پاسخ تولید می‌کند. این می‌تواند به شکل‌های مختلفی باشد، مانند تکرار کل متن و بهبود پاسخ، یا ساختن یک درخت از خلاصه‌ها و برگرداندن خلاصه ریشه.
+
+- [**Storage**](./low_level/storage.md): در نهایت، شما می‌خواهید ایندکس‌ها، داده‌ها و بردارهای خود را ذخیره کنید به جای اجرای مدل‌های embedding هر بار. IndexStore، DocStore، VectorStore و KVStore انتزاعاتی هستند که به شما این امکان را می‌دهند. با هم ترکیب شده، آنها StorageContext را تشکیل می‌دهند. در حال حاضر، ما به شما اجازه می‌دهیم تا embedding های خود را در فایل‌ها در سیستم فایل (یا یک سیستم فایل مجازی در حافظه) ذخیره کنید، اما همچنین در حال اضافه کردن اتصالات به پایگاه‌های داده برداری فعال هستیم.
diff --git a/apps/docs/i18n/fa/docusaurus-plugin-content-docs/current/modules/low_level/embedding.md b/apps/docs/i18n/fa/docusaurus-plugin-content-docs/current/modules/low_level/embedding.md
new file mode 100644
index 0000000000000000000000000000000000000000..ad904e2923395d04cd0b695581761836fe00adaf
--- /dev/null
+++ b/apps/docs/i18n/fa/docusaurus-plugin-content-docs/current/modules/low_level/embedding.md
@@ -0,0 +1,26 @@
+---
+sidebar_position: 1
+---
+
+# تعبیه کردن
+
+`undefined`
+
+مدل تعبیه کردن در LlamaIndex مسئول ایجاد نمایش عددی از متن است. به طور پیش فرض، LlamaIndex از مدل `text-embedding-ada-002` از OpenAI استفاده می کند.
+
+این می تواند به صورت صریح در شی `ServiceContext` تنظیم شود.
+
+```typescript
+import { OpenAIEmbedding, serviceContextFromDefaults } from "llamaindex";
+
+const openaiEmbeds = new OpenAIEmbedding();
+
+const serviceContext = serviceContextFromDefaults({ embedModel: openaiEmbeds });
+```
+
+## مرجع API
+
+- [OpenAIEmbedding](../../api/classes/OpenAIEmbedding.md)
+- [ServiceContext](../../api/interfaces/ServiceContext.md)
+
+"
diff --git a/apps/docs/i18n/fa/docusaurus-plugin-content-docs/current/modules/low_level/llm.md b/apps/docs/i18n/fa/docusaurus-plugin-content-docs/current/modules/low_level/llm.md
new file mode 100644
index 0000000000000000000000000000000000000000..b6e91ce5156cdd6aed8b1db8d93a915ee66029c4
--- /dev/null
+++ b/apps/docs/i18n/fa/docusaurus-plugin-content-docs/current/modules/low_level/llm.md
@@ -0,0 +1,26 @@
+---
+sidebar_position: 0
+---
+
+# LLM
+
+`undefined`
+
+LLM مسئول خواندن متن و تولید پاسخ های زبان طبیعی به پرسش ها است. به طور پیش فرض، LlamaIndex.TS از `gpt-3.5-turbo` استفاده می کند.
+
+LLM می تواند به صورت صریح در شی `ServiceContext` تنظیم شود.
+
+```typescript
+import { OpenAI, serviceContextFromDefaults } from "llamaindex";
+
+const openaiLLM = new OpenAI({ model: "gpt-3.5-turbo", temperature: 0 });
+
+const serviceContext = serviceContextFromDefaults({ llm: openaiLLM });
+```
+
+## مرجع API
+
+- [OpenAI](../../api/classes/OpenAI.md)
+- [ServiceContext](../../api/interfaces/ServiceContext.md)
+
+"
diff --git a/apps/docs/i18n/fa/docusaurus-plugin-content-docs/current/modules/low_level/node_parser.md b/apps/docs/i18n/fa/docusaurus-plugin-content-docs/current/modules/low_level/node_parser.md
new file mode 100644
index 0000000000000000000000000000000000000000..a8ce02fd8ed661e18c62273be98c016c6ee1b9e4
--- /dev/null
+++ b/apps/docs/i18n/fa/docusaurus-plugin-content-docs/current/modules/low_level/node_parser.md
@@ -0,0 +1,37 @@
+---
+sidebar_position: 3
+---
+
+# NodeParser (تجزیه کننده گره)
+
+`undefined`
+
+`NodeParser` در LlamaIndex مسئول تقسیم اشیاء `Document` به اشیاء `Node` قابل مدیریت تر است. وقتی شما `.fromDocuments()` را فراخوانی می کنید، `NodeParser` از `ServiceContext` برای انجام این کار به صورت خودکار استفاده می شود. به طور جایگزین، می توانید از آن برای تقسیم سند ها قبل از زمان استفاده استفاده کنید.
+
+```typescript
+import { Document, SimpleNodeParser } from "llamaindex";
+
+const nodeParser = new SimpleNodeParser();
+const nodes = nodeParser.getNodesFromDocuments([
+  new Document({ text: "من 10 سال دارم. جان 20 سال دارد." }),
+]);
+```
+
+## TextSplitter (تقسیم کننده متن)
+
+تقسیم کننده متن زیر، متن را بر اساس جملات تقسیم می کند. همچنین می توانید از آن به عنوان یک ماژول مستقل برای تقسیم متن خام استفاده کنید.
+
+```typescript
+import { SentenceSplitter } from "llamaindex";
+
+const splitter = new SentenceSplitter({ chunkSize: 1 });
+
+const textSplits = splitter.splitText("سلام دنیا");
+```
+
+## مرجع API
+
+- [SimpleNodeParser (تجزیه کننده گره ساده)](../../api/classes/SimpleNodeParser.md)
+- [SentenceSplitter (تقسیم کننده جمله)](../../api/classes/SentenceSplitter.md)
+
+"
diff --git a/apps/docs/i18n/fa/docusaurus-plugin-content-docs/current/modules/low_level/response_synthesizer.md b/apps/docs/i18n/fa/docusaurus-plugin-content-docs/current/modules/low_level/response_synthesizer.md
new file mode 100644
index 0000000000000000000000000000000000000000..b01652dfd92736f210a43c4ff65884acbe234a76
--- /dev/null
+++ b/apps/docs/i18n/fa/docusaurus-plugin-content-docs/current/modules/low_level/response_synthesizer.md
@@ -0,0 +1,45 @@
+---
+sidebar_position: 6
+---
+
+# ResponseSynthesizer (ترکیب پاسخ)
+
+`undefined`
+
+ResponseSynthesizer مسئول ارسال پرس و جو، گره ها و الگوهای پیشنهادی به LLM برای تولید یک پاسخ است. چندین حالت کلیدی برای تولید یک پاسخ وجود دارد:
+
+- `Refine` (بهبود): "ایجاد و بهبود" یک پاسخ با پیمایش متوالی هر قطعه متن بازیابی شده. این باعث می شود که برای هر گره یک تماس جداگانه با LLM انجام شود. برای پاسخ های مفصل مناسب است.
+- `CompactAndRefine` (پیمایش کوچک و بهبود) (پیش فرض): "فشرده کردن" الگو در هر تماس LLM با پر کردن تعدادی قطعه متن که در حداکثر اندازه الگو جا می شوند. اگر تعداد زیادی قطعه متن برای جاگذاری در یک الگو وجود داشته باشد، "ایجاد و بهبود" یک پاسخ با پیمایش چندین الگوی فشرده انجام می شود. همانطور که `refine` است، اما باید تعداد تماس های کمتری با LLM داشته باشد.
+- `TreeSummarize` (خلاصه سازی درختی): با توجه به مجموعه ای از قطعات متن و پرس و جو، به صورت بازگشتی یک درخت ساختاری را تشکیل داده و گره ریشه را به عنوان پاسخ برمی گرداند. برای اهداف خلاصه سازی مناسب است.
+- `SimpleResponseBuilder` (سازنده پاسخ ساده): با توجه به مجموعه ای از قطعات متن و پرس و جو، پرس و جو را به هر قطعه متن اعمال کرده و پاسخ ها را در یک آرایه جمع آوری می کند. یک رشته ادغام شده از تمام پاسخ ها را برمی گرداند. برای زمانی که نیاز به اجرای همان پرس و جو به صورت جداگانه بر روی هر قطعه متن دارید مناسب است.
+
+```typescript
+import { NodeWithScore, ResponseSynthesizer, TextNode } from "llamaindex";
+
+const responseSynthesizer = new ResponseSynthesizer();
+
+const nodesWithScore: NodeWithScore[] = [
+  {
+    node: new TextNode({ text: "من 10 سال دارم." }),
+    score: 1,
+  },
+  {
+    node: new TextNode({ text: "جان 20 سال دارد." }),
+    score: 0.5,
+  },
+];
+
+const response = await responseSynthesizer.synthesize(
+  "چند سال دارم؟",
+  nodesWithScore,
+);
+console.log(response.response);
+```
+
+## مرجع API
+
+- [ResponseSynthesizer (ترکیب پاسخ)](../../api/classes/ResponseSynthesizer.md)
+- [Refine (بهبود)](../../api/classes/Refine.md)
+- [CompactAndRefine (پیمایش کوچک و بهبود)](../../api/classes/CompactAndRefine.md)
+- [TreeSummarize (خلاصه سازی درختی)](../../api/classes/TreeSummarize.md)
+- [SimpleResponseBuilder (سازنده پاسخ ساده)](../../api/classes/SimpleResponseBuilder.md)
diff --git a/apps/docs/i18n/fa/docusaurus-plugin-content-docs/current/modules/low_level/retriever.md b/apps/docs/i18n/fa/docusaurus-plugin-content-docs/current/modules/low_level/retriever.md
new file mode 100644
index 0000000000000000000000000000000000000000..510c4ab4974e2064d730aa45a4a313bb450ecfa0
--- /dev/null
+++ b/apps/docs/i18n/fa/docusaurus-plugin-content-docs/current/modules/low_level/retriever.md
@@ -0,0 +1,23 @@
+---
+sidebar_position: 5
+---
+
+# بازیابی کننده
+
+`undefined`
+
+در LlamaIndex، بازیابی کننده مورد استفاده برای بازیابی گره ها از یک فهرست با استفاده از رشته پرس و جو است. بازیابی کننده `VectorIndexRetriever` گره های مشابه برتر-k را بازیابی می کند. در عین حال، بازیابی کننده `SummaryIndexRetriever` تمام گره ها را بدون توجه به پرس و جو بازیابی می کند.
+
+```typescript
+const retriever = vector_index.asRetriever();
+retriever.similarityTopK = 3;
+
+// بازیابی گره ها!
+const nodesWithScore = await retriever.retrieve("رشته پرس و جو");
+```
+
+## مرجع API
+
+- [SummaryIndexRetriever](../../api/classes/SummaryIndexRetriever.md)
+- [SummaryIndexLLMRetriever](../../api/classes/SummaryIndexLLMRetriever.md)
+- [VectorIndexRetriever](../../api/classes/VectorIndexRetriever.md)
diff --git a/apps/docs/i18n/fa/docusaurus-plugin-content-docs/current/modules/low_level/storage.md b/apps/docs/i18n/fa/docusaurus-plugin-content-docs/current/modules/low_level/storage.md
new file mode 100644
index 0000000000000000000000000000000000000000..65e5bdbd842ad5c09cf185066380a51571bede5c
--- /dev/null
+++ b/apps/docs/i18n/fa/docusaurus-plugin-content-docs/current/modules/low_level/storage.md
@@ -0,0 +1,30 @@
+---
+sidebar_position: 7
+---
+
+# ذخیره سازی
+
+`undefined`
+
+ذخیره سازی در LlamaIndex.TS به طور خودکار کار می کند، بعد از پیکربندی یک شی `StorageContext`. فقط کافیست `persistDir` را پیکربندی کنید و آن را به یک ایندکس متصل کنید.
+
+در حال حاضر، فقط ذخیره و بارگیری از دیسک پشتیبانی می شود و ادغام های آینده نیز در دست برنامه ریزی است!
+
+```typescript
+import { Document, VectorStoreIndex, storageContextFromDefaults } from "./src";
+
+const storageContext = await storageContextFromDefaults({
+  persistDir: "./storage",
+});
+
+const document = new Document({ text: "متن آزمایشی" });
+const index = await VectorStoreIndex.fromDocuments([document], {
+  storageContext,
+});
+```
+
+## مرجع API
+
+- [StorageContext](../../api/interfaces/StorageContext.md)
+
+"
diff --git a/apps/docs/i18n/fa/docusaurus-plugin-content-docs/current/starter.md b/apps/docs/i18n/fa/docusaurus-plugin-content-docs/current/starter.md
new file mode 100644
index 0000000000000000000000000000000000000000..bc3acca0bc9b4584088d192ba38277c64e086323
--- /dev/null
+++ b/apps/docs/i18n/fa/docusaurus-plugin-content-docs/current/starter.md
@@ -0,0 +1,60 @@
+---
+sidebar_position: 2
+---
+
+# آموزش مقدماتی
+
+`undefined`
+
+بعد از [نصب LlamaIndex.TS با استفاده از NPM](installation) و تنظیم کردن کلید OpenAI خود، آماده شروع اولین برنامه خود هستید:
+
+در یک پوشه جدید:
+
+```bash npm2yarn
+npm install typescript
+npm install @types/node
+npx tsc --init # در صورت نیاز
+```
+
+فایل `example.ts` را ایجاد کنید. این کد داده های مثالی را بارگیری می کند، یک سند ایجاد می کند، آن را ایندکس می کند (که با استفاده از OpenAI تعبیه ها ایجاد می کند) و سپس یک موتور پرس و جو برای پاسخ به سوالات در مورد داده ها ایجاد می کند.
+
+```ts
+// example.ts
+import fs from "fs/promises";
+import { Document, VectorStoreIndex } from "llamaindex";
+
+async function main() {
+  // از abramov.txt در Node مقاله را بارگیری کنید
+  const essay = await fs.readFile(
+    "node_modules/llamaindex/examples/abramov.txt",
+    "utf-8",
+  );
+
+  // شیء Document با مقاله ایجاد کنید
+  const document = new Document({ text: essay });
+
+  // متن را تقسیم کنید و تعبیه ها را ایجاد کنید. آنها را در یک VectorStoreIndex ذخیره کنید
+  const index = await VectorStoreIndex.fromDocuments([document]);
+
+  // به ایندکس پرس و جو کنید
+  const queryEngine = index.asQueryEngine();
+  const response = await queryEngine.query(
+    "نویسنده در دانشگاه چه کاری انجام داد؟",
+  );
+
+  // پاسخ را خروجی دهید
+  console.log(response.toString());
+}
+
+main();
+```
+
+سپس می توانید آن را با استفاده از دستور زیر اجرا کنید
+
+```bash
+npx ts-node example.ts
+```
+
+آماده برای یادگیری بیشتر هستید؟ به زمین بازی NextJS ما در https://llama-playground.vercel.app/ مراجعه کنید. منبع آن در https://github.com/run-llama/ts-playground در دسترس است.
+
+"
diff --git a/apps/docs/i18n/fi/docusaurus-plugin-content-docs/current/_static/concepts/indexing.jpg b/apps/docs/i18n/fi/docusaurus-plugin-content-docs/current/_static/concepts/indexing.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..8672967213caf28fb27682b6b9e2e5111beb9aaa
Binary files /dev/null and b/apps/docs/i18n/fi/docusaurus-plugin-content-docs/current/_static/concepts/indexing.jpg differ
diff --git a/apps/docs/i18n/fi/docusaurus-plugin-content-docs/current/_static/concepts/querying.jpg b/apps/docs/i18n/fi/docusaurus-plugin-content-docs/current/_static/concepts/querying.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..3c241bdda5ba98d3f3ee2e163b9f19280c5c7503
Binary files /dev/null and b/apps/docs/i18n/fi/docusaurus-plugin-content-docs/current/_static/concepts/querying.jpg differ
diff --git a/apps/docs/i18n/fi/docusaurus-plugin-content-docs/current/_static/concepts/rag.jpg b/apps/docs/i18n/fi/docusaurus-plugin-content-docs/current/_static/concepts/rag.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..b68eca2564f8535b33d9ed6d4ce4d4410d2a0698
Binary files /dev/null and b/apps/docs/i18n/fi/docusaurus-plugin-content-docs/current/_static/concepts/rag.jpg differ
diff --git a/apps/docs/i18n/fi/docusaurus-plugin-content-docs/current/api b/apps/docs/i18n/fi/docusaurus-plugin-content-docs/current/api
new file mode 120000
index 0000000000000000000000000000000000000000..3513e32979f4abb8b2df2b54c8d0cc480bdd847e
--- /dev/null
+++ b/apps/docs/i18n/fi/docusaurus-plugin-content-docs/current/api
@@ -0,0 +1 @@
+../../../../docs/api
\ No newline at end of file
diff --git a/apps/docs/i18n/fi/docusaurus-plugin-content-docs/current/concepts.md b/apps/docs/i18n/fi/docusaurus-plugin-content-docs/current/concepts.md
new file mode 100644
index 0000000000000000000000000000000000000000..620eb70a12f3f682f413db9303474ce45c59da34
--- /dev/null
+++ b/apps/docs/i18n/fi/docusaurus-plugin-content-docs/current/concepts.md
@@ -0,0 +1,86 @@
+---
+sidebar_position: 3
+---
+
+# Korkean tason käsitteet
+
+`Tämä dokumentaatio on käännetty automaattisesti ja se saattaa sisältää virheitä. Älä epäröi avata Pull Requestia ehdottaaksesi muutoksia.`
+
+LlamaIndex.TS auttaa sinua rakentamaan LLM-teknologiaa hyödyntäviä sovelluksia (esim. kysymys-vastaus, chatbotti) omien tietojen päälle.
+
+Tässä korkean tason käsitteiden oppaassa opit:
+
+- miten LLM voi vastata kysymyksiin omien tietojen avulla.
+- keskeiset käsitteet ja moduulit LlamaIndex.TS:ssä oman kyselyputken luomiseen.
+
+## Kysymysten vastaaminen tietokannassasi
+
+LlamaIndex käyttää kahta vaihetta LLM:n käyttämisessä tietojesi kanssa:
+
+1. **indeksointivaihe**: tietokannan valmistelu, ja
+2. **kyselyvaihe**: relevantin kontekstin noutaminen tiedoista auttamaan LLM:ää vastaamaan kysymykseen
+
+![](./_static/concepts/rag.jpg)
+
+Tätä prosessia kutsutaan myös nimellä Retrieval Augmented Generation (RAG).
+
+LlamaIndex.TS tarjoaa olennaisen työkalupakin, joka tekee molemmat vaiheet erittäin helpoiksi.
+
+Tutkitaan jokaista vaihetta tarkemmin.
+
+### Indeksointivaihe
+
+LlamaIndex.TS auttaa sinua valmistelemaan tiedon perustan käyttämällä erilaisia tietoliittymiä ja indeksejä.
+
+![](./_static/concepts/indexing.jpg)
+
+[**Tietokuormaajat**](./modules/high_level/data_loader.md):
+Tietoliitin (eli `Reader`) ottaa vastaan tietoa eri tietolähteistä ja tietomuodoista yksinkertaiseen `Document`-esitykseen (teksti ja yksinkertainen metatieto).
+
+[**Dokumentit / Solmut**](./modules/high_level/documents_and_nodes.md): `Document` on yleinen säiliö minkä tahansa tietolähteen ympärillä - esimerkiksi PDF, API:n tuotos tai haettu tieto tietokannasta. `Node` on LlamaIndexin atomiyksikkö ja edustaa "pala" lähteen `Document`-tiedosta. Se on rikas esitys, joka sisältää metatiedon ja suhteet (muihin solmuihin), jotta tarkat ja ilmaisuvoimaiset noutotoiminnot ovat mahdollisia.
+
+[**Tietoindeksit**](./modules/high_level/data_index.md):
+Kun olet ottanut tietosi vastaan, LlamaIndex auttaa sinua indeksoimaan tiedot helposti noudettavaan muotoon.
+
+LlamaIndex jäsentelee raakadokumentit väliaikaisiin esityksiin, laskee vektoriembeddingit ja tallentaa tietosi muistiin tai levylle.
+
+"
+
+### Kyselyvaihe
+
+Kyselyvaiheessa kyselyputki noutaa käyttäjän kysymykseen liittyvän relevantin kontekstin
+ja välittää sen LLM:lle (yhdessä kyselyn kanssa) syntetisoimaan vastauksen.
+
+Tämä antaa LLM:lle ajan tasalla olevaa tietoa, jota ei ole sen alkuperäisessä koulutusdatassa,
+(vähentäen myös harha-aistimuksia).
+
+Kyselyvaiheen keskeinen haaste on tiedon noutaminen, orkestrointi ja päättely (mahdollisesti useista) tietokannoista.
+
+LlamaIndex tarjoaa yhdisteltäviä moduuleja, jotka auttavat sinua rakentamaan ja integroimaan RAG-kyselyputkia kysymys-vastaus (kyselymoottori), chatbotti (chatmoottori) tai osana agenttia.
+
+Nämä rakennuspalikat voidaan räätälöidä heijastamaan arvojärjestystä sekä yhdistää päättelyyn useista tietokannoista rakenteellisella tavalla.
+
+![](./_static/concepts/querying.jpg)
+
+#### Rakennuspalikat
+
+[**Noutajat**](./modules/low_level/retriever.md):
+Noutaja määrittelee, miten relevantti konteksti noudetaan tehokkaasti tietokannasta (eli indeksistä) annetun kyselyn perusteella.
+Tietynlainen noutologiikka vaihtelee eri indekseille, suosituimpana tiheä nouto vektori-indeksin avulla.
+
+[**Vastauksen syntetisaattorit**](./modules/low_level/response_synthesizer.md):
+Vastauksen syntetisaattori generoi vastauksen LLM:ltä käyttäen käyttäjän kyselyä ja annettua joukkoa noudettuja tekstipätkiä.
+
+"
+
+#### Putket
+
+[**Kyselymoottorit**](./modules/high_level/query_engine.md):
+Kyselymoottori on päästä päähän -putki, joka mahdollistaa kysymysten esittämisen tietokantaasi.
+Se ottaa vastaan luonnollisen kielen kyselyn ja palauttaa vastauksen yhdessä noudetun viitekontekstin kanssa, joka välitetään LLM:lle.
+
+[**Chatmoottorit**](./modules/high_level/chat_engine.md):
+Chatmoottori on päästä päähän -putki, joka mahdollistaa keskustelun käymisen tietojesi kanssa
+(useita vuoropuheluja yhden kysymyksen ja vastauksen sijaan).
+
+"
diff --git a/apps/docs/i18n/fi/docusaurus-plugin-content-docs/current/end_to_end.md b/apps/docs/i18n/fi/docusaurus-plugin-content-docs/current/end_to_end.md
new file mode 100644
index 0000000000000000000000000000000000000000..06b1a37fec9e41a7329de91f655489732a76a72f
--- /dev/null
+++ b/apps/docs/i18n/fi/docusaurus-plugin-content-docs/current/end_to_end.md
@@ -0,0 +1,61 @@
+---
+sidebar_position: 4
+---
+
+# Esimerkkejä päästä päähän
+
+`Tämä dokumentaatio on käännetty automaattisesti ja se saattaa sisältää virheitä. Älä epäröi avata Pull Requestia ehdottaaksesi muutoksia.`
+
+Sisällytämme useita esimerkkejä, jotka käyttävät LlamaIndex.TS:ää repositoryssa.
+
+Tutustu alla oleviin esimerkkeihin tai kokeile niitä ja suorita ne minuuteissa interaktiivisten Github Codespace -opetusohjelmien avulla, jotka tarjoaa Dev-Docs [täältä](https://codespaces.new/team-dev-docs/lits-dev-docs-playground?devcontainer_path=.devcontainer%2Fjavascript_ltsquickstart%2Fdevcontainer.json):
+
+## [Chat Engine](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/chatEngine.ts)
+
+Lue tiedosto ja keskustele siitä LLM:n kanssa.
+
+## [Vektori-indeksi](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/vectorIndex.ts)
+
+Luo vektori-indeksi ja kysely se. Vektori-indeksi käyttää upotuksia hakeakseen k k relevanttia solmua. Oletuksena k on 2.
+
+"
+
+## [Yhteenvetoindeksi](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/summaryIndex.ts)
+
+Luo luetteloindeksi ja kysy sitä. Tässä esimerkissä käytetään myös `LLMRetriever`ia, joka käyttää LLM:ää valitessaan parhaita solmuja käytettäväksi vastausta generoidessa.
+
+"
+
+## [Tallenna / Lataa indeksi](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/storageContext.ts)
+
+Luo ja lataa vektori-indeksi. Tallennus levylle LlamaIndex.TS:ssä tapahtuu automaattisesti, kun tallennuskontekstiobjekti luodaan.
+
+"
+
+## [Mukautettu vektori-indeksi](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/vectorIndexCustomize.ts)
+
+Luo vektori-indeksi ja kysely sitä samalla määrittäen `LLM`, `ServiceContext` ja `similarity_top_k`.
+
+"
+
+## [OpenAI LLM](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/openai.ts)
+
+Luo OpenAI LLM ja käytä sitä suoraan keskusteluun.
+
+"
+
+## [Llama2 DeuceLLM](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/llamadeuce.ts)
+
+Luo Llama-2 LLM ja käytä sitä suoraan keskusteluun.
+
+"
+
+## [SubQuestionQueryEngine](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/subquestion.ts)
+
+Käyttää `SubQuestionQueryEngine` -moduulia, joka jakaa monimutkaiset kyselyt useisiin alikysymyksiin ja sitten kokoaa vastauksen kaikkien alikysymysten vastauksiin.
+
+"
+
+## [Matalan tason moduulit](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/lowlevel.ts)
+
+Tämä esimerkki käyttää useita matalan tason komponentteja, jotka poistavat tarpeen todelliselle kyselymoottorille. Näitä komponentteja voidaan käyttää missä tahansa sovelluksessa tai mukauttaa ja aliluokittaa vastaamaan omia tarpeitasi.
diff --git a/apps/docs/i18n/fi/docusaurus-plugin-content-docs/current/environments.md b/apps/docs/i18n/fi/docusaurus-plugin-content-docs/current/environments.md
new file mode 100644
index 0000000000000000000000000000000000000000..ea4cbb371fe745c21257463fcf024cfb6de8627f
--- /dev/null
+++ b/apps/docs/i18n/fi/docusaurus-plugin-content-docs/current/environments.md
@@ -0,0 +1,31 @@
+---
+sidebar_position: 5
+---
+
+# Ympäristöt
+
+`Tämä dokumentaatio on käännetty automaattisesti ja se saattaa sisältää virheitä. Älä epäröi avata Pull Requestia ehdottaaksesi muutoksia.`
+
+LlamaIndex tukee virallisesti tällä hetkellä NodeJS:n versioita 18 ja 20.
+
+## NextJS-sovelluksen reititin
+
+Jos käytät NextJS-sovelluksen reitittimen reitinkäsittelijöitä/palveluttomia funktioita, sinun tulee käyttää NodeJS-tilaa:
+
+```js
+export const runtime = "nodejs"; // oletusarvo
+```
+
+ja sinun tulee lisätä poikkeus pdf-parse:lle next.config.js-tiedostossasi
+
+```js
+// next.config.js
+/** @type {import('next').NextConfig} */
+const nextConfig = {
+  experimental: {
+    serverComponentsExternalPackages: ["pdf-parse"], // Asettaa pdf-parsen todelliseen NodeJS-tilaan NextJS-sovelluksen reitittimen kanssa
+  },
+};
+
+module.exports = nextConfig;
+```
diff --git a/apps/docs/i18n/fi/docusaurus-plugin-content-docs/current/installation.mdx b/apps/docs/i18n/fi/docusaurus-plugin-content-docs/current/installation.mdx
new file mode 100644
index 0000000000000000000000000000000000000000..757b0af025a596ffba11a56a0db568f687875391
--- /dev/null
+++ b/apps/docs/i18n/fi/docusaurus-plugin-content-docs/current/installation.mdx
@@ -0,0 +1,71 @@
+---
+sidebar_position: 1
+---
+
+
+# Asennus ja asetukset
+
+```Tämä dokumentaatio on käännetty automaattisesti ja se saattaa sisältää virheitä. Älä epäröi avata Pull Requestia ehdottaaksesi muutoksia.```
+
+
+Varmista, että sinulla on NodeJS v18 tai uudempi.
+
+
+## Käyttäen create-llamaa
+
+Helpoin tapa aloittaa LlamaIndexin käyttö on käyttää `create-llama` -työkalua. Tämä komentorivityökalu mahdollistaa uuden LlamaIndex-sovelluksen nopean rakentamisen, kaikki tarvittava on valmiiksi asennettu.
+
+Suorita vain
+
+<Tabs>
+<TabItem value="1" label="npm" default>
+
+```bash
+npx create-llama@latest
+```
+
+</TabItem>
+<TabItem value="2" label="Yarn">
+
+```bash
+yarn create llama
+```
+
+</TabItem>
+<TabItem value="3" label="pnpm">
+
+```bash
+pnpm create llama@latest
+```
+
+</TabItem>
+</Tabs>
+
+aloittaaksesi. Kun sovelluksesi on luotu, suorita
+
+```bash npm2yarn
+npm run dev
+```
+
+käynnistääksesi kehityspalvelimen. Voit sitten käydä osoitteessa [http://localhost:3000](http://localhost:3000) nähdäksesi sovelluksesi.
+## Asennus NPM:stä
+
+```bash npm2yarn
+npm install llamaindex
+```
+
+
+### Ympäristömuuttujat
+
+Esimerkkimme käyttävät oletuksena OpenAI:ta. Sinun täytyy asettaa Open AI -avain seuraavasti:
+
+```bash
+export OPENAI_API_KEY="sk-......" # Korvaa avain osoitteesta https://platform.openai.com/account/api-keys
+```
+
+Jos haluat, että se ladataan automaattisesti joka kerta, lisää se .zshrc/.bashrc-tiedostoon.
+
+VAROITUS: Älä tallenna OpenAI-avaintasi versionhallintaan.
+
+
+"
diff --git a/apps/docs/i18n/fi/docusaurus-plugin-content-docs/current/introduction.md b/apps/docs/i18n/fi/docusaurus-plugin-content-docs/current/introduction.md
new file mode 100644
index 0000000000000000000000000000000000000000..b4201801f9baaa607a4303f224bb933612564d6c
--- /dev/null
+++ b/apps/docs/i18n/fi/docusaurus-plugin-content-docs/current/introduction.md
@@ -0,0 +1,62 @@
+---
+sidebar_position: 0
+slug: /
+---
+
+# Mikä on LlamaIndex.TS?
+
+`Tämä dokumentaatio on käännetty automaattisesti ja se saattaa sisältää virheitä. Älä epäröi avata Pull Requestia ehdottaaksesi muutoksia.`
+
+LlamaIndex.TS on tietokehys LLM-sovelluksille, joka mahdollistaa yksityisten tai aluekohtaisten tietojen syöttämisen, rakenteen luomisen ja käyttämisen. Vaikka Python-paketti on myös saatavilla (katso [täältä](https://docs.llamaindex.ai/en/stable/)), LlamaIndex.TS tarjoaa ydintoiminnot yksinkertaisessa paketissa, joka on optimoitu TypeScriptin käyttöön.
+
+## 🚀 Miksi LlamaIndex.TS?
+
+LLM-sovellukset tarjoavat luonnollisen kielirajapinnan ihmisten ja pääteltyjen tietojen välillä. Laajalti saatavilla olevat mallit on esikoulutettu valtavilla määrillä julkisesti saatavilla olevaa tietoa, kuten Wikipediaa, postituslistoja, oppikirjoja ja lähdekoodia.
+
+LLM-mallien päälle rakennetut sovellukset vaativat usein näiden mallien täydentämistä yksityisillä tai aluekohtaisilla tiedoilla. Valitettavasti nämä tiedot voivat olla hajallaan eri sovellusten ja tietovarastojen välillä. Ne voivat olla API:en takana, SQL-tietokannoissa tai jumissa PDF-tiedostoissa ja diaesityksissä.
+
+Tässä kohtaa tulee avuksi **LlamaIndex.TS**.
+
+## 🦙 Kuinka LlamaIndex.TS voi auttaa?
+
+LlamaIndex.TS tarjoaa seuraavat työkalut:
+
+- **Tietojen lataaminen** syötä olemassa olevat `.txt`, `.pdf`, `.csv`, `.md` ja `.docx` -tiedot suoraan
+- **Tietoindeksit** rakenna tietosi välittävät edustukset, jotka ovat helppoja ja suorituskykyisiä LLM:ien käyttää.
+- **Moottorit** tarjoavat luonnollisen kielen pääsyn tietoihisi. Esimerkiksi:
+  - Kyselymoottorit ovat tehokkaita hakuliittymiä tietoa täydentävään tulosteeseen.
+  - Keskustelumoottorit ovat keskustelevia liittymiä moniviestisiin "edestakaisiin" vuorovaikutuksiin tietojesi kanssa.
+
+"
+
+## 👨‍👩‍👧‍👦 Kenelle LlamaIndex on tarkoitettu?
+
+LlamaIndex.TS tarjoaa ydintyökalut, jotka ovat olennaisia kaikille, jotka rakentavat LLM-sovelluksia JavaScriptin ja TypeScriptin avulla.
+
+Korkean tason API:llamme aloittelijakäyttäjät voivat käyttää LlamaIndex.TS:ää tietojen syöttämiseen ja kyselyyn.
+
+Monimutkaisempiin sovelluksiin tarjoamme matalamman tason API:t, jotka mahdollistavat edistyneiden käyttäjien mukauttaa ja laajentaa mitä tahansa moduulia - tietoliikenne, indeksit, noutajat ja kyselymoottorit - vastaamaan heidän tarpeitaan.
+
+## Aloittaminen
+
+`npm install llamaindex`
+
+Dokumentaatiostamme löydät [asennusohjeet](./installation.md) ja [aloitusopetusohjelman](./starter.md) ensimmäisen sovelluksesi rakentamiseen.
+
+Kun olet päässyt vauhtiin, [Korkean tason käsitteet](./concepts.md) antaa yleiskuvan LlamaIndexin modulaarisesta arkkitehtuurista. Lisää käytännön esimerkkejä löydät [Päästä päähän -opetusohjelmista](./end_to_end.md).
+
+## 🗺️ Ekosysteemi
+
+LlamaIndexin lataamiseksi tai osallistumiseksi löydät sen seuraavista paikoista:
+
+- Github: https://github.com/run-llama/LlamaIndexTS
+- NPM: https://www.npmjs.com/package/llamaindex
+
+"
+
+## Yhteisö
+
+Tarvitsetko apua? Onko sinulla toimintoehtoehdotus? Liity LlamaIndex-yhteisöön:
+
+- Twitter: https://twitter.com/llama_index
+- Discord: https://discord.gg/dGcwcsnxhU
diff --git a/apps/docs/i18n/fi/docusaurus-plugin-content-docs/current/modules/high_level/chat_engine.md b/apps/docs/i18n/fi/docusaurus-plugin-content-docs/current/modules/high_level/chat_engine.md
new file mode 100644
index 0000000000000000000000000000000000000000..33f9efdfabfc0fb04ddc2164bff311eb6b6e867c
--- /dev/null
+++ b/apps/docs/i18n/fi/docusaurus-plugin-content-docs/current/modules/high_level/chat_engine.md
@@ -0,0 +1,24 @@
+---
+sidebar_position: 4
+---
+
+# ChatEngine (聊天引擎)
+
+`Tämä dokumentaatio on käännetty automaattisesti ja se saattaa sisältää virheitä. Älä epäröi avata Pull Requestia ehdottaaksesi muutoksia.`
+
+ChatEngine on nopea ja yksinkertainen tapa keskustella tietojen kanssa indeksissäsi.
+
+```typescript
+const retriever = index.asRetriever();
+const chatEngine = new ContextChatEngine({ retriever });
+
+// aloita keskustelu
+const response = await chatEngine.chat(query);
+```
+
+## Api-viittaukset
+
+- [ContextChatEngine (KontekstiKeskusteluMoottori)](../../api/classes/ContextChatEngine.md)
+- [CondenseQuestionChatEngine (TiivistäKysymysKeskusteluMoottori)](../../api/classes/ContextChatEngine.md)
+
+"
diff --git a/apps/docs/i18n/fi/docusaurus-plugin-content-docs/current/modules/high_level/data_index.md b/apps/docs/i18n/fi/docusaurus-plugin-content-docs/current/modules/high_level/data_index.md
new file mode 100644
index 0000000000000000000000000000000000000000..b690ebf3548bb12af174204be2dd4a5451ee65ea
--- /dev/null
+++ b/apps/docs/i18n/fi/docusaurus-plugin-content-docs/current/modules/high_level/data_index.md
@@ -0,0 +1,27 @@
+---
+sidebar_position: 2
+---
+
+# Sisällysluettelo
+
+`Tämä dokumentaatio on käännetty automaattisesti ja se saattaa sisältää virheitä. Älä epäröi avata Pull Requestia ehdottaaksesi muutoksia.`
+
+Sisällysluettelo on peruscontainer ja organisaatio tietojesi säilyttämiseen. LlamaIndex.TS tukee kahta tyyppiä indeksejä:
+
+- `VectorStoreIndex` - lähettää LLM:lle top-k `Node`:t generoidessaan vastausta. Oletusarvoinen top-k on 2.
+- `SummaryIndex` - lähettää jokaisen `Node`:n indeksissä LLM:lle vastauksen generoimiseksi.
+
+```typescript
+import { Document, VectorStoreIndex } from "llamaindex";
+
+const document = new Document({ text: "test" });
+
+const index = await VectorStoreIndex.fromDocuments([document]);
+```
+
+## API Viite
+
+- [SummaryIndex](../../api/classes/SummaryIndex.md)
+- [VectorStoreIndex](../../api/classes/VectorStoreIndex.md)
+
+"
diff --git a/apps/docs/i18n/fi/docusaurus-plugin-content-docs/current/modules/high_level/data_loader.md b/apps/docs/i18n/fi/docusaurus-plugin-content-docs/current/modules/high_level/data_loader.md
new file mode 100644
index 0000000000000000000000000000000000000000..47174cc0c5971e2a3012e6b88a49a7f8a961eaf9
--- /dev/null
+++ b/apps/docs/i18n/fi/docusaurus-plugin-content-docs/current/modules/high_level/data_loader.md
@@ -0,0 +1,21 @@
+---
+sidebar_position: 1
+---
+
+# Lukija / Lataaja
+
+`Tämä dokumentaatio on käännetty automaattisesti ja se saattaa sisältää virheitä. Älä epäröi avata Pull Requestia ehdottaaksesi muutoksia.`
+
+LlamaIndex.TS tukee tiedostojen helppoa lataamista kansioista käyttämällä `SimpleDirectoryReader` -luokkaa. Tällä hetkellä tuetaan `.txt`, `.pdf`, `.csv`, `.md` ja `.docx` -tiedostoja, ja tulevaisuudessa on suunnitteilla lisää!
+
+```typescript
+import { SimpleDirectoryReader } from "llamaindex";
+
+documents = new SimpleDirectoryReader().loadData("./data");
+```
+
+## API-viite
+
+- [SimpleDirectoryReader](../../api/classes/SimpleDirectoryReader.md)
+
+"
diff --git a/apps/docs/i18n/fi/docusaurus-plugin-content-docs/current/modules/high_level/documents_and_nodes.md b/apps/docs/i18n/fi/docusaurus-plugin-content-docs/current/modules/high_level/documents_and_nodes.md
new file mode 100644
index 0000000000000000000000000000000000000000..410d1ca4dffc1bda4f919746ff35fb490bb46379
--- /dev/null
+++ b/apps/docs/i18n/fi/docusaurus-plugin-content-docs/current/modules/high_level/documents_and_nodes.md
@@ -0,0 +1,22 @@
+---
+sidebar_position: 0
+---
+
+# Dokumentit ja solmut
+
+`Tämä dokumentaatio on käännetty automaattisesti ja se saattaa sisältää virheitä. Älä epäröi avata Pull Requestia ehdottaaksesi muutoksia.`
+
+`Dokumentit` ja `Solmut` ovat minkä tahansa indeksin perusrakennuspalikoita. Vaikka näiden objektien API on samankaltainen, `Dokumentti`-objektit edustavat kokonaisia tiedostoja, kun taas `Solmut` ovat pienempiä osia alkuperäisestä dokumentista, jotka sopivat LLM:ään ja kysymyksiin ja vastauksiin.
+
+```typescript
+import { Document } from "llamaindex";
+
+document = new Document({ text: "teksti", metadata: { avain: "arvo" } });
+```
+
+## API-viite
+
+- [Dokumentti](../../api/classes/Document.md)
+- [TekstiSolmu](../../api/classes/TextNode.md)
+
+"
diff --git a/apps/docs/i18n/fi/docusaurus-plugin-content-docs/current/modules/high_level/query_engine.md b/apps/docs/i18n/fi/docusaurus-plugin-content-docs/current/modules/high_level/query_engine.md
new file mode 100644
index 0000000000000000000000000000000000000000..d076d3c72c5ef0f6d8681bbf05beebe5f23cabb9
--- /dev/null
+++ b/apps/docs/i18n/fi/docusaurus-plugin-content-docs/current/modules/high_level/query_engine.md
@@ -0,0 +1,40 @@
+---
+sidebar_position: 3
+---
+
+# QueryEngine (Kyselymoottori)
+
+`Tämä dokumentaatio on käännetty automaattisesti ja se saattaa sisältää virheitä. Älä epäröi avata Pull Requestia ehdottaaksesi muutoksia.`
+
+Kyselymoottori käärii `Retriever`-objektin ja `ResponseSynthesizer`-objektin putkeen, joka käyttää kyselymerkkijonoa hakeakseen solmuja ja lähettää ne sitten LLM:lle vastauksen generoimiseksi.
+
+```typescript
+const queryEngine = index.asQueryEngine();
+const response = await queryEngine.query("kyselymerkkijono");
+```
+
+## Alakysymyskyselymoottori
+
+Alakysymyskyselymoottorin perusajatus on jakaa yksi kysely useiksi kyselyiksi, saada vastaus jokaiseen näistä kyselyistä ja sitten yhdistää nämä erilaiset vastaukset yhdeksi johdonmukaiseksi vastaukseksi käyttäjälle. Voit ajatella sitä "ajattele tämä läpi vaihe vaiheelta" -tekniikkana, jossa käydään läpi tietolähteitä!
+
+### Aloittaminen
+
+Helpoin tapa aloittaa Alakysymyskyselymoottorin kokeileminen on ajaa subquestion.ts-tiedosto [esimerkeissä](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/subquestion.ts).
+
+```bash
+npx ts-node subquestion.ts
+```
+
+### Työkalut
+
+Alakysymyskyselymoottori on toteutettu työkalujen avulla. Työkalujen perusajatus on, että ne ovat suoritettavia vaihtoehtoja suurelle kielimallille. Tässä tapauksessa alakysymyskyselymoottorimme perustuu QueryEngineTooliin, joka, kuten arvasitkin, on työkalu kyselyjen suorittamiseen kyselymoottorilla. Tämä mahdollistaa sen, että voimme antaa mallille mahdollisuuden kysyä eri asiakirjoja eri kysymyksiin esimerkiksi. Voit myös kuvitella, että alakysymyskyselymoottori voi käyttää työkalua, joka etsii jotain verkosta tai saa vastauksen käyttäen Wolfram Alphaa.
+
+Voit oppia lisää työkaluista tutustumalla LlamaIndex Python -dokumentaatioon: https://gpt-index.readthedocs.io/en/latest/core_modules/agent_modules/tools/root.html
+
+## API-viite
+
+- [RetrieverQueryEngine (Hakumoottorin kyselymoottori)](../../api/classes/RetrieverQueryEngine.md)
+- [SubQuestionQueryEngine (Alakysymyksen kyselymoottori)](../../api/classes/SubQuestionQueryEngine.md)
+- [QueryEngineTool (Kyselymoottorin työkalu)](../../api/interfaces/QueryEngineTool.md)
+
+"
diff --git a/apps/docs/i18n/fi/docusaurus-plugin-content-docs/current/modules/index.md b/apps/docs/i18n/fi/docusaurus-plugin-content-docs/current/modules/index.md
new file mode 100644
index 0000000000000000000000000000000000000000..bd69a1938c826f3f7d7948ebff3824bc1ad160f5
--- /dev/null
+++ b/apps/docs/i18n/fi/docusaurus-plugin-content-docs/current/modules/index.md
@@ -0,0 +1,33 @@
+# Ydinmoduulit
+
+`Tämä dokumentaatio on käännetty automaattisesti ja se saattaa sisältää virheitä. Älä epäröi avata Pull Requestia ehdottaaksesi muutoksia.`
+
+LlamaIndex.TS tarjoaa useita ydinmoduuleja, jotka on jaettu korkean tason moduuleihin nopeaa aloittamista varten ja matalan tason moduuleihin, joiden avulla voit mukauttaa keskeisiä komponentteja tarpeidesi mukaan.
+
+## Korkean tason moduulit
+
+- [**Dokumentti**](./high_level/documents_and_nodes.md): Dokumentti edustaa teksti-, PDF- tai muuta jatkuvaa tietopläjäystä.
+
+- [**Solmu**](./high_level/documents_and_nodes.md): Perusdatan rakennuspalikka. Yleensä nämä ovat dokumentin osia, jotka on jaettu hallittaviin palasiin, jotka ovat tarpeeksi pieniä syötettäväksi upotusmalliin ja LLM:ään.
+
+- [**Lukija/Lataaja**](./high_level/data_loader.md): Lukija tai lataaja on jotain, joka ottaa vastaan dokumentin todellisessa maailmassa ja muuntaa sen Dokumentti-luokaksi, jota voidaan sitten käyttää indeksissäsi ja kyselyissäsi. Tällä hetkellä tuemme tavallisia tekstitiedostoja ja PDF-tiedostoja, ja tuemme tulevaisuudessa monia muita tiedostomuotoja.
+
+- [**Indeksit**](./high_level/data_index.md): Indeksit tallentavat solmut ja niiden upotukset.
+
+- [**Kyselymoottori**](./high_level/query_engine.md): Kyselymoottorit luovat kyselyn, jonka syötät ja antavat sinulle tuloksen. Kyselymoottorit yleensä yhdistävät valmiin vihjeen indeksistäsi valittujen solmujen kanssa antaakseen LLM:lle tarvittavan kontekstin vastataksesi kyselyysi.
+
+- [**Keskustelumoottori**](./high_level/chat_engine.md): Keskustelumoottori auttaa sinua rakentamaan keskusteluavustajan, joka vuorovaikuttaa indeksiesi kanssa.
+
+## Matalan tason moduuli
+
+- [**LLM**](./low_level/llm.md): LLM-luokka on yhtenäinen rajapinta suuren kielioppimallin tarjoajalle, kuten OpenAI GPT-4, Anthropic Claude tai Meta LLaMA. Voit luoda siitä aliluokan ja kirjoittaa yhteyden oman suuren kielioppimallisi kanssa.
+
+- [**Upotus**](./low_level/embedding.md): Upotus edustetaan liukulukujen vektorina. OpenAI:n tekstiupotus-ada-002 on oletusupotusmallimme, ja jokainen sen luoma upotus koostuu 1 536 liukuluvusta. Toinen suosittu upotusmalli on BERT, joka käyttää 768 liukulukua kunkin solmun edustamiseen. Tarjoamme useita työkaluja upotusten käsittelyyn, mukaan lukien 3 samankaltaisuuden laskentavaihtoehtoa ja maksimaalinen marginaalinen merkitys.
+
+- [**Tekstin jakaja/NodeParser**](./low_level/node_parser.md): Tekstin jakamisstrategiat ovat äärimmäisen tärkeitä upotushakujen kokonaisvaikuttavuuden kannalta. Tällä hetkellä meillä on oletusarvo, mutta ei ole yhtä ratkaisua kaikille. Lähdetekstien perusteella saatat haluta käyttää erilaisia jakokokoja ja -strategioita. Tällä hetkellä tuemme kiinteän koon jakamista, kiinteän koon jakamista päällekkäisillä osilla, lauseen jakamista ja kappaleen jakamista. Tekstin jakaja käytetään NodeParserin avulla, kun jaetaan `Dokumentteja` `Solmuihin`.
+
+- [**Hakija**](./low_level/retriever.md): Hakija valitsee todellisuudessa Solmut, jotka palautetaan indeksistä. Tässä voit haluta kokeilla enemmän tai vähemmän Solmuja kyselyä kohden, muuttaa samankaltaisuusfunktiota tai luoda oman hakijan jokaiseen yksittäiseen käyttötapaukseen sovelluksessasi. Esimerkiksi voit haluta erillisen hakijan koodisisällölle ja tekstisisällölle.
+
+- [**Vastetekstin syntetisaattori**](./low_level/response_synthesizer.md): Vastetekstin syntetisaattori vastaa kyselymerkkijonon ottamisesta ja käyttää `Solmu`jen luetteloa vastauksen luomiseen. Tämä voi tapahtua monin tavoin, kuten kaikkien kontekstien läpikäynti ja vastauksen tarkentaminen tai tiivistelmien puun rakentaminen ja juuritiivistelmän palauttaminen.
+
+- [**Säilytys**](./low_level/storage.md): Jossain vaiheessa haluat tallentaa indeksisi, tiedot ja vektorit sen sijaan, että suorittaisit upotusmallit joka kerta uudelleen. IndexStore, DocStore, VectorStore ja KVStore ovat abstraktioita, jotka mahdollistavat tämän. Yhdessä ne muodostavat StorageContextin. Tällä hetkellä voit tallentaa upotuksesi tiedostoihin tiedostojärjestelmään (tai virtuaaliseen muistitiedostojärjestelmään), mutta lisäämme myös aktiivisesti integraatioita vektoritietokantoihin.
diff --git a/apps/docs/i18n/fi/docusaurus-plugin-content-docs/current/modules/low_level/embedding.md b/apps/docs/i18n/fi/docusaurus-plugin-content-docs/current/modules/low_level/embedding.md
new file mode 100644
index 0000000000000000000000000000000000000000..b1d891b5725922dbab33be2258f2915552825304
--- /dev/null
+++ b/apps/docs/i18n/fi/docusaurus-plugin-content-docs/current/modules/low_level/embedding.md
@@ -0,0 +1,26 @@
+---
+sidebar_position: 1
+---
+
+# Upotus
+
+`Tämä dokumentaatio on käännetty automaattisesti ja se saattaa sisältää virheitä. Älä epäröi avata Pull Requestia ehdottaaksesi muutoksia.`
+
+Upotusmalli LlamaIndexissä vastaa tekstin numeeristen edustusten luomisesta. Oletusarvoisesti LlamaIndex käyttää OpenAI:n `text-embedding-ada-002` -mallia.
+
+Tämä voidaan asettaa nimenomaisesti `ServiceContext`-objektissa.
+
+```typescript
+import { OpenAIEmbedding, serviceContextFromDefaults } from "llamaindex";
+
+const openaiEmbeds = new OpenAIEmbedding();
+
+const serviceContext = serviceContextFromDefaults({ embedModel: openaiEmbeds });
+```
+
+## API-viite
+
+- [OpenAIEmbedding](../../api/classes/OpenAIEmbedding.md)
+- [ServiceContext](../../api/interfaces/ServiceContext.md)
+
+"
diff --git a/apps/docs/i18n/fi/docusaurus-plugin-content-docs/current/modules/low_level/llm.md b/apps/docs/i18n/fi/docusaurus-plugin-content-docs/current/modules/low_level/llm.md
new file mode 100644
index 0000000000000000000000000000000000000000..0ae9d4d6ff72ca976956ec9442e8c450361d3805
--- /dev/null
+++ b/apps/docs/i18n/fi/docusaurus-plugin-content-docs/current/modules/low_level/llm.md
@@ -0,0 +1,26 @@
+---
+sidebar_position: 0
+---
+
+# LLM
+
+`Tämä dokumentaatio on käännetty automaattisesti ja se saattaa sisältää virheitä. Älä epäröi avata Pull Requestia ehdottaaksesi muutoksia.`
+
+LLM vastaa tekstin lukemisesta ja luonnollisten kielten vastausten tuottamisesta kyselyihin. Oletusarvoisesti LlamaIndex.TS käyttää `gpt-3.5-turbo` -mallia.
+
+LLM voidaan asettaa nimenomaisesti `ServiceContext` -objektissa.
+
+```typescript
+import { OpenAI, serviceContextFromDefaults } from "llamaindex";
+
+const openaiLLM = new OpenAI({ model: "gpt-3.5-turbo", temperature: 0 });
+
+const serviceContext = serviceContextFromDefaults({ llm: openaiLLM });
+```
+
+## API-viite
+
+- [OpenAI](../../api/classes/OpenAI.md)
+- [ServiceContext](../../api/interfaces/ServiceContext.md)
+
+"
diff --git a/apps/docs/i18n/fi/docusaurus-plugin-content-docs/current/modules/low_level/node_parser.md b/apps/docs/i18n/fi/docusaurus-plugin-content-docs/current/modules/low_level/node_parser.md
new file mode 100644
index 0000000000000000000000000000000000000000..a1df99fcf64a8230231d49cc7c12bc376a8b7eef
--- /dev/null
+++ b/apps/docs/i18n/fi/docusaurus-plugin-content-docs/current/modules/low_level/node_parser.md
@@ -0,0 +1,37 @@
+---
+sidebar_position: 3
+---
+
+# NodeParser
+
+`Tämä dokumentaatio on käännetty automaattisesti ja se saattaa sisältää virheitä. Älä epäröi avata Pull Requestia ehdottaaksesi muutoksia.`
+
+`NodeParser` LlamaIndexissä on vastuussa `Document`-objektien jakamisesta hallittavampiin `Node`-objekteihin. Kun kutsut `.fromDocuments()`, `ServiceContext`-ista käytetään `NodeParser`-objektia, joka tekee tämän automaattisesti puolestasi. Vaihtoehtoisesti voit käyttää sitä dokumenttien jakamiseen etukäteen.
+
+```typescript
+import { Document, SimpleNodeParser } from "llamaindex";
+
+const nodeParser = new SimpleNodeParser();
+const nodes = nodeParser.getNodesFromDocuments([
+  new Document({ text: "Olen 10-vuotias. John on 20-vuotias." }),
+]);
+```
+
+## TextSplitter
+
+Taustalla oleva tekstijakaja jakaa tekstin lauseisiin. Sitä voidaan myös käyttää itsenäisenä moduulina raakatekstin jakamiseen.
+
+```typescript
+import { SentenceSplitter } from "llamaindex";
+
+const splitter = new SentenceSplitter({ chunkSize: 1 });
+
+const textSplits = splitter.splitText("Hei maailma");
+```
+
+## API-viite
+
+- [SimpleNodeParser](../../api/classes/SimpleNodeParser.md)
+- [SentenceSplitter](../../api/classes/SentenceSplitter.md)
+
+"
diff --git a/apps/docs/i18n/fi/docusaurus-plugin-content-docs/current/modules/low_level/response_synthesizer.md b/apps/docs/i18n/fi/docusaurus-plugin-content-docs/current/modules/low_level/response_synthesizer.md
new file mode 100644
index 0000000000000000000000000000000000000000..2f66002883abde525fc3ec40e910eff579df3b7f
--- /dev/null
+++ b/apps/docs/i18n/fi/docusaurus-plugin-content-docs/current/modules/low_level/response_synthesizer.md
@@ -0,0 +1,55 @@
+---
+sidebar_position: 6
+---
+
+# ResponseSynthesizer
+
+`Tämä dokumentaatio on käännetty automaattisesti ja se saattaa sisältää virheitä. Älä epäröi avata Pull Requestia ehdottaaksesi muutoksia.`
+
+ResponseSynthesizer on vastuussa kyselyn, solmujen ja ohjepohjien lähettämisestä LLM:lle vastauksen luomiseksi. On muutamia avainmoodit vastauksen luomiseen:
+
+- `Refine`: "luo ja hienosäädä" vastaus käymällä läpi jokainen noudettu tekstipala peräkkäin.
+  Tämä tekee erillisen LLM-kutsun jokaiselle solmulle. Hyvä yksityiskohtaisempiin vastauksiin.
+- `CompactAndRefine` (oletus): "tiivistä" ohje jokaisen LLM-kutsun aikana täyttämällä niin
+  monta tekstipalaa kuin mahtuu enimmäisohjeen kokoon. Jos on
+  liian monta palaa täytettäväksi yhteen ohjeeseen, "luo ja hienosäädä" vastaus käymällä läpi
+  useita tiivistettyjä ohjeita. Sama kuin `refine`, mutta tulisi johtaa vähemmän LLM-kutsuihin.
+- `TreeSummarize`: Annetaan joukko tekstipaloja ja kysely, rakennetaan rekursiivisesti puu
+  ja palautetaan juurisolmu vastauksena. Hyvä yhteenvedon tarkoituksiin.
+- `SimpleResponseBuilder`: Annetaan joukko tekstipaloja ja kysely, sovelletaan kyselyä jokaiseen tekstiin
+  palan aikana vastausten keräämiseksi taulukkoon. Palauttaa kaikkien
+  vastausten yhdistetty merkkijono. Hyvä, kun tarvitset suorittaa saman kyselyn erikseen jokaiselle tekstille
+  pala.
+
+```typescript
+import { NodeWithScore, ResponseSynthesizer, TextNode } from "llamaindex";
+
+const responseSynthesizer = new ResponseSynthesizer();
+
+const nodesWithScore: NodeWithScore[] = [
+  {
+    node: new TextNode({ text: "Olen 10-vuotias." }),
+    score: 1,
+  },
+  {
+    node: new TextNode({ text: "John on 20-vuotias." }),
+    score: 0.5,
+  },
+];
+
+const response = await responseSynthesizer.synthesize(
+  "Kuinka vanha minä olen?",
+  nodesWithScore,
+);
+console.log(response.response);
+```
+
+## API-viite
+
+- [ResponseSynthesizer](../../api/classes/ResponseSynthesizer.md)
+- [Refine](../../api/classes/Refine.md)
+- [CompactAndRefine](../../api/classes/CompactAndRefine.md)
+- [TreeSummarize](../../api/classes/TreeSummarize.md)
+- [SimpleResponseBuilder](../../api/classes/SimpleResponseBuilder.md)
+
+"
diff --git a/apps/docs/i18n/fi/docusaurus-plugin-content-docs/current/modules/low_level/retriever.md b/apps/docs/i18n/fi/docusaurus-plugin-content-docs/current/modules/low_level/retriever.md
new file mode 100644
index 0000000000000000000000000000000000000000..f4dca038dadbf2bf0e47fdcd001a18b2f8ac1fdd
--- /dev/null
+++ b/apps/docs/i18n/fi/docusaurus-plugin-content-docs/current/modules/low_level/retriever.md
@@ -0,0 +1,25 @@
+---
+sidebar_position: 5
+---
+
+# Retriever (Hakija)
+
+`Tämä dokumentaatio on käännetty automaattisesti ja se saattaa sisältää virheitä. Älä epäröi avata Pull Requestia ehdottaaksesi muutoksia.`
+
+Retriever (Hakija) LlamaIndexissä on se, mitä käytetään `Node`jen hakemiseen indeksistä käyttäen kyselymerkkijonoa. `VectorIndexRetriever` hakee k-kpl samankaltaisimpia solmuja. Toisaalta `SummaryIndexRetriever` hakee kaikki solmut riippumatta kyselystä.
+
+```typescript
+const retriever = vector_index.asRetriever();
+retriever.similarityTopK = 3;
+
+// Hae solmut!
+const nodesWithScore = await retriever.retrieve("kyselymerkkijono");
+```
+
+## API-viite
+
+- [SummaryIndexRetriever](../../api/classes/SummaryIndexRetriever.md)
+- [SummaryIndexLLMRetriever](../../api/classes/SummaryIndexLLMRetriever.md)
+- [VectorIndexRetriever](../../api/classes/VectorIndexRetriever.md)
+
+"
diff --git a/apps/docs/i18n/fi/docusaurus-plugin-content-docs/current/modules/low_level/storage.md b/apps/docs/i18n/fi/docusaurus-plugin-content-docs/current/modules/low_level/storage.md
new file mode 100644
index 0000000000000000000000000000000000000000..d486b6376d0a57914d4698c46f4aecd0650e9b0b
--- /dev/null
+++ b/apps/docs/i18n/fi/docusaurus-plugin-content-docs/current/modules/low_level/storage.md
@@ -0,0 +1,30 @@
+---
+sidebar_position: 7
+---
+
+# Tallennus
+
+`Tämä dokumentaatio on käännetty automaattisesti ja se saattaa sisältää virheitä. Älä epäröi avata Pull Requestia ehdottaaksesi muutoksia.`
+
+Tallennus LlamaIndex.TS:ssä toimii automaattisesti, kun olet määrittänyt `StorageContext`-objektin. Aseta vain `persistDir` ja liitä se indeksiin.
+
+Tällä hetkellä tuetaan vain tallentamista ja lataamista levyltä, tulevaisuudessa on suunnitteilla lisää integraatioita!
+
+```typescript
+import { Document, VectorStoreIndex, storageContextFromDefaults } from "./src";
+
+const storageContext = await storageContextFromDefaults({
+  persistDir: "./storage",
+});
+
+const document = new Document({ text: "Testiteksti" });
+const index = await VectorStoreIndex.fromDocuments([document], {
+  storageContext,
+});
+```
+
+## API-viite
+
+- [StorageContext](../../api/interfaces/StorageContext.md)
+
+"
diff --git a/apps/docs/i18n/fi/docusaurus-plugin-content-docs/current/starter.md b/apps/docs/i18n/fi/docusaurus-plugin-content-docs/current/starter.md
new file mode 100644
index 0000000000000000000000000000000000000000..0a9baa8ee3ff82abcbae248c40ddcfb27de30fdb
--- /dev/null
+++ b/apps/docs/i18n/fi/docusaurus-plugin-content-docs/current/starter.md
@@ -0,0 +1,60 @@
+---
+sidebar_position: 2
+---
+
+# Aloitusopas
+
+`Tämä dokumentaatio on käännetty automaattisesti ja se saattaa sisältää virheitä. Älä epäröi avata Pull Requestia ehdottaaksesi muutoksia.`
+
+Kun olet [asentanut LlamaIndex.TS:n käyttäen NPM:ää](asennus) ja määrittänyt OpenAI-avaimen, olet valmis aloittamaan ensimmäisen sovelluksesi:
+
+Uudessa kansiossa:
+
+```bash npm2yarn
+npm install typescript
+npm install @types/node
+npx tsc --init # tarvittaessa
+```
+
+Luo tiedosto `example.ts`. Tämä koodi lataa esimerkkidataa, luo dokumentin, indeksoi sen (luo upotuksia käyttäen OpenAI:ta) ja luo sitten kyselymoottorin vastaamaan kysymyksiin tiedosta.
+
+```ts
+// example.ts
+import fs from "fs/promises";
+import { Document, VectorStoreIndex } from "llamaindex";
+
+async function main() {
+  // Lataa essee abramov.txt-tiedostosta Node:ssa
+  const essay = await fs.readFile(
+    "node_modules/llamaindex/examples/abramov.txt",
+    "utf-8",
+  );
+
+  // Luo Document-objekti esseellä
+  const document = new Document({ text: essay });
+
+  // Jaa teksti ja luo upotuksia. Tallenna ne VectorStoreIndexiin
+  const index = await VectorStoreIndex.fromDocuments([document]);
+
+  // Kysely indeksille
+  const queryEngine = index.asQueryEngine();
+  const response = await queryEngine.query(
+    "Mitä kirjoittaja teki yliopistossa?",
+  );
+
+  // Tulosta vastaus
+  console.log(response.toString());
+}
+
+main();
+```
+
+Voit sitten ajaa sen käyttäen
+
+```bash
+npx ts-node example.ts
+```
+
+Valmis oppimaan lisää? Tutustu NextJS-leikkikenttäämme osoitteessa https://llama-playground.vercel.app/. Lähdekoodi on saatavilla osoitteessa https://github.com/run-llama/ts-playground.
+
+"
diff --git a/apps/docs/i18n/he/docusaurus-plugin-content-docs/current/_static/concepts/indexing.jpg b/apps/docs/i18n/he/docusaurus-plugin-content-docs/current/_static/concepts/indexing.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..8672967213caf28fb27682b6b9e2e5111beb9aaa
Binary files /dev/null and b/apps/docs/i18n/he/docusaurus-plugin-content-docs/current/_static/concepts/indexing.jpg differ
diff --git a/apps/docs/i18n/he/docusaurus-plugin-content-docs/current/_static/concepts/querying.jpg b/apps/docs/i18n/he/docusaurus-plugin-content-docs/current/_static/concepts/querying.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..3c241bdda5ba98d3f3ee2e163b9f19280c5c7503
Binary files /dev/null and b/apps/docs/i18n/he/docusaurus-plugin-content-docs/current/_static/concepts/querying.jpg differ
diff --git a/apps/docs/i18n/he/docusaurus-plugin-content-docs/current/_static/concepts/rag.jpg b/apps/docs/i18n/he/docusaurus-plugin-content-docs/current/_static/concepts/rag.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..b68eca2564f8535b33d9ed6d4ce4d4410d2a0698
Binary files /dev/null and b/apps/docs/i18n/he/docusaurus-plugin-content-docs/current/_static/concepts/rag.jpg differ
diff --git a/apps/docs/i18n/he/docusaurus-plugin-content-docs/current/api b/apps/docs/i18n/he/docusaurus-plugin-content-docs/current/api
new file mode 120000
index 0000000000000000000000000000000000000000..3513e32979f4abb8b2df2b54c8d0cc480bdd847e
--- /dev/null
+++ b/apps/docs/i18n/he/docusaurus-plugin-content-docs/current/api
@@ -0,0 +1 @@
+../../../../docs/api
\ No newline at end of file
diff --git a/apps/docs/i18n/he/docusaurus-plugin-content-docs/current/concepts.md b/apps/docs/i18n/he/docusaurus-plugin-content-docs/current/concepts.md
new file mode 100644
index 0000000000000000000000000000000000000000..fa96308b287360b0b967ab4efb572d67eed67e4a
--- /dev/null
+++ b/apps/docs/i18n/he/docusaurus-plugin-content-docs/current/concepts.md
@@ -0,0 +1,84 @@
+---
+sidebar_position: 3
+---
+
+# מושגים ברמה גבוהה
+
+`התיעוד הזה תורגם באופן אוטומטי ועשוי להכיל טעויות. אל תהסס לפתוח בקשת משיכה כדי להציע שינויים.`
+
+LlamaIndex.TS עוזר לך לבנות אפליקציות מבוססות LLM (לדוגמה, שאלות ותשובות, צ'אטבוט) על נתונים מותאמים אישית.
+
+במדריך זה על מושגים ברמה גבוהה, תלמד:
+
+- איך LLM יכול לענות על שאלות באמצעות הנתונים שלך.
+- מושגים מרכזיים ומודולים ב- LlamaIndex.TS לבניית צינור השאילתות האישי שלך.
+
+## עניית שאלות בכל הנתונים שלך
+
+LlamaIndex משתמש בשיטה דו-שלבית בעת שימוש ב- LLM עם הנתונים שלך:
+
+1. **שלב האינדקס**: הכנת בסיס ידע, ו
+2. **שלב השאילתה**: איתור הקשר הרלוונטי מהידע כדי לסייע ל- LLM להגיב לשאלה
+
+![](./_static/concepts/rag.jpg)
+
+תהליך זה ידוע גם בשם Retrieval Augmented Generation (RAG).
+
+LlamaIndex.TS מספק את הכלי הקריטי להקל על שני השלבים.
+
+בואו נבחן כל שלב בפרט.
+
+### שלב האינדקס
+
+LlamaIndex.TS עוזר לך להכין את בסיס הידע עם אוסף של מחברי נתונים ואינדקסים.
+
+![](./_static/concepts/indexing.jpg)
+
+[**מטעני נתונים**](./modules/high_level/data_loader.md):
+מחבר נתונים (כמו `Reader`) מטעין נתונים ממקורות נתונים שונים ומסוגים שונים לתוך ייצוג פשוט של `Document` (טקסט ומטה-נתונים פשוטים).
+
+[**מסמכים / צמתים**](./modules/high_level/documents_and_nodes.md): `Document` הוא מעטפת גנרית סביב כל מקור נתונים - לדוגמה, PDF, פלט API או נתונים שמושגבים ממסד נתונים. `Node` הוא היחידה האטומית של נתונים ב-LlamaIndex ומייצגת "חתיכה" של `Document` מקור. זו ייצוג עשיר הכולל מטה-נתונים וקשרים (לצמתים אחרים) שמאפשרים פעולות איתור מדויקות וביטויות.
+
+[**אינדקסים של נתונים**](./modules/high_level/data_index.md):
+לאחר שטענת את הנתונים שלך, LlamaIndex עוזר לך לאינדקס את הנתונים לתבנית שקל לאחזר.
+
+מתחת למסך, LlamaIndex מפענח את המסמכים הגולמיים לייצוגים אמצעיים, מחשב מוטות ושומר את הנתונים שלך בזיכרון או בדיסק.
+
+"
+
+### שלב השאילתה
+
+בשלב השאילתה, צינור השאילתות מחזיר את ההקשר הרלוונטי ביותר לפי שאילתת המשתמש,
+ומעביר אותו ל- LLM (יחד עם השאילתה) כדי לסינתז תשובה.
+
+זה נותן ל- LLM ידע מעודכן שאינו נמצא בנתוני האימון המקוריים שלו,
+(ומפחית גם הזיות).
+
+האתגר המרכזי בשלב השאילתה הוא איתור, אורכסטרציה והיגיון מעל בסיסי הידע (אולי רבים).
+
+LlamaIndex מספק מודולים שניתן להרכיב ביחד ולעזור לך לבנות ולשלב צינורות RAG עבור שאלות ותשובות (מנוע השאילתות), צ'אטבוט (מנוע הצ'אט), או כחלק מסוכן.
+
+אלה יכולים להתאים אישית כדי לשקף העדפות דירוג, וגם להרכיב על בסיסי הידע הרבים בדרך מובנית.
+
+![](./_static/concepts/querying.jpg)
+
+#### בלוקי בנייה
+
+[**מחזירים**](./modules/low_level/retriever.md):
+מחזיר מגדיר איך לאחזר ביעילות הקשר הרלוונטי מבסיס ידע (כלומר אינדקס) כאשר נתונה שאילתת משתמש.
+הלוגיקה הספציפית לאיחוד נמצאת באינדקסים שונים, הפופולריים ביותר הם איחוד צפיפה נגד אינדקס וקטור.
+
+[**מסינתזי תגובה**](./modules/low_level/response_synthesizer.md):
+מסינתזי תגובה יוצרים תגובה מ-LLM, באמצעות שאילתת משתמש וסט נתונים מוחזרים של קטעי טקסט.
+
+#### צינורות
+
+[**מנועי שאילתה**](./modules/high_level/query_engine.md):
+מנוע שאילתה הוא צינור סוף-לסוף שמאפשר לך לשאול שאלה על הנתונים שלך.
+הוא מקבל שאילתה בשפת טבע, ומחזיר תשובה, יחד עם ההקשר המתאים שאוחזר ומועבר ל- LLM.
+
+[**מנועי צ'אט**](./modules/high_level/chat_engine.md):
+מנוע צ'אט הוא צינור סוף-לסוף לנהוג שיחה עם הנתונים שלך
+(מרובות הודעות במקום שאלה יחידה ותשובה).
+
+"
diff --git a/apps/docs/i18n/he/docusaurus-plugin-content-docs/current/end_to_end.md b/apps/docs/i18n/he/docusaurus-plugin-content-docs/current/end_to_end.md
new file mode 100644
index 0000000000000000000000000000000000000000..c475449e38cc30f804049514833edf1299b90bc3
--- /dev/null
+++ b/apps/docs/i18n/he/docusaurus-plugin-content-docs/current/end_to_end.md
@@ -0,0 +1,59 @@
+---
+sidebar_position: 4
+---
+
+# דוגמאות מתחילה עד סוף
+
+`התיעוד הזה תורגם באופן אוטומטי ועשוי להכיל טעויות. אל תהסס לפתוח בקשת משיכה כדי להציע שינויים.`
+
+אנחנו כוללים מספר דוגמאות מתחילה עד סוף בשימוש ב־LlamaIndex.TS במאגר
+
+בדקו את הדוגמאות למטה או נסו אותן והשלימו אותן בדקות עם המדריכים האינטראקטיביים של Github Codespace שמסופקים על ידי Dev-Docs [כאן](https://codespaces.new/team-dev-docs/lits-dev-docs-playground?devcontainer_path=.devcontainer%2Fjavascript_ltsquickstart%2Fdevcontainer.json):
+
+## [מנוע צ'אט](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/chatEngine.ts)
+
+קרא קובץ ודבר עליו עם ה־LLM.
+
+## [אינדקס וקטור](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/vectorIndex.ts)
+
+צור אינדקס ופנה אליו. האינדקס ישתמש ב־embeddings כדי להביא את הצמתים המרכזיים ביותר ב־k הגבוה ביותר. כברירת מחדל, k הוא 2.
+
+"
+
+## [אינדקס סיכום](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/summaryIndex.ts)
+
+צור אינדקס רשימה ושאילתה אותו. דוגמה זו משתמשת גם ב- `LLMRetriever`, שיכול להשתמש ב- LLM כדי לבחור את הצמתים הטובים ביותר לשימוש בעת יצירת תשובה.
+
+"
+
+## [שמירה / טעינה של אינדקס](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/storageContext.ts)
+
+יצירה וטעינה של אינדקס וקטור. השמירה לדיסק ב־LlamaIndex.TS מתרחשת אוטומטית כאשר נוצר אובייקט של מקור אחסון.
+
+"
+
+## [מסד נתונים מותאם אישית של מדד וקטורים](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/vectorIndexCustomize.ts)
+
+צור מסד נתונים מדד וקטורים ושאל אותו, תוך הגדרת ה־`LLM`, ה־`ServiceContext`, וה־`similarity_top_k`.
+
+"
+
+## [OpenAI LLM](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/openai.ts)
+
+צור OpenAI LLM והשתמש בו ישירות לשיחה.
+
+## [Llama2 DeuceLLM](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/llamadeuce.ts)
+
+צור Llama-2 LLM והשתמש בו ישירות לצורך צ'אט.
+
+## [מנוע שאילתות לשאלות משנה](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/subquestion.ts)
+
+משתמש ב־`מנוע שאילתות לשאלות משנה`, שמפצל שאילתות מורכבות לשאלות משנה מרובות, ואז מצטבר תשובה מעל כל התשובות לשאלות המשנה.
+
+"
+
+## [מודולים ברמה נמוכה](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/lowlevel.ts)
+
+דוגמה זו משתמשת במרכיבים ברמה נמוכה מרובים, שמסירים את הצורך במנוע שאילתות אמיתי. ניתן להשתמש במרכיבים אלו בכל מקום, בכל אפליקציה, או להתאים אותם וליצור תת-מחלקות על פי הצורך האישי שלכם.
+
+"
diff --git a/apps/docs/i18n/he/docusaurus-plugin-content-docs/current/environments.md b/apps/docs/i18n/he/docusaurus-plugin-content-docs/current/environments.md
new file mode 100644
index 0000000000000000000000000000000000000000..6da429dfff12900dd4d4a618b8c424cff0688042
--- /dev/null
+++ b/apps/docs/i18n/he/docusaurus-plugin-content-docs/current/environments.md
@@ -0,0 +1,31 @@
+---
+sidebar_position: 5
+---
+
+# סביבות
+
+`התיעוד הזה תורגם באופן אוטומטי ועשוי להכיל טעויות. אל תהסס לפתוח בקשת משיכה כדי להציע שינויים.`
+
+LlamaIndex כרגע תומך באופן רשמי ב-NodeJS 18 ו-NodeJS 20.
+
+## מסלולי הראוטר של NextJS App
+
+אם אתה משתמש במסלולי הראוטר של NextJS App לטיפול במסלולים/פונקציות שרת, יהיה עליך להשתמש במצב NodeJS:
+
+```js
+export const runtime = "nodejs"; // ברירת מחדל
+```
+
+ועליך להוסיף יוצאת דופן עבור pdf-parse בקובץ next.config.js שלך
+
+```js
+// next.config.js
+/** @type {import('next').NextConfig} */
+const nextConfig = {
+  experimental: {
+    serverComponentsExternalPackages: ["pdf-parse"], // מעביר את pdf-parse למצב NodeJS האמיתי עם מסלולי הראוטר של NextJS App
+  },
+};
+
+module.exports = nextConfig;
+```
diff --git a/apps/docs/i18n/he/docusaurus-plugin-content-docs/current/installation.mdx b/apps/docs/i18n/he/docusaurus-plugin-content-docs/current/installation.mdx
new file mode 100644
index 0000000000000000000000000000000000000000..bdfac5730436f30a8e24d03555bcfbfac0295ec4
--- /dev/null
+++ b/apps/docs/i18n/he/docusaurus-plugin-content-docs/current/installation.mdx
@@ -0,0 +1,71 @@
+---
+sidebar_position: 1
+---
+
+
+# התקנה והגדרה
+
+```התיעוד הזה תורגם באופן אוטומטי ועשוי להכיל טעויות. אל תהסס לפתוח בקשת משיכה כדי להציע שינויים.```
+
+
+ודא שיש לך את NodeJS v18 או גרסה גבוהה יותר.
+
+
+## באמצעות create-llama
+
+הדרך הקלה ביותר להתחיל עם LlamaIndex היא באמצעות `create-llama`. כלי זה מאפשר לך להתחיל בניית אפליקציה חדשה של LlamaIndex בצורה מהירה, עם הכל מוגדר עבורך.
+
+רק הרץ
+
+<Tabs>
+<TabItem value="1" label="npm" default>
+
+```bash
+npx create-llama@latest
+```
+
+</TabItem>
+<TabItem value="2" label="Yarn">
+
+```bash
+yarn create llama
+```
+
+</TabItem>
+<TabItem value="3" label="pnpm">
+
+```bash
+pnpm create llama@latest
+```
+
+</TabItem>
+</Tabs>
+
+כדי להתחיל. לאחר שנוצרה האפליקציה שלך, הרץ
+
+```bash npm2yarn
+npm run dev
+```
+
+כדי להתחיל את שרת הפיתוח. אתה יכול לבקר ב-[http://localhost:3000](http://localhost:3000) כדי לראות את האפליקציה שלך.
+## התקנה מ-NPM
+
+```bash npm2yarn
+npm install llamaindex
+```
+
+
+### משתנים סביבתיים
+
+הדוגמאות שלנו משתמשות ב-OpenAI כברירת מחדל. יהיה עליך להגדיר את מפתח ה-Open AI שלך כך:
+
+```bash
+export OPENAI_API_KEY="sk-......" # החלף עם המפתח שלך מ-https://platform.openai.com/account/api-keys
+```
+
+אם ברצונך לטעון אותו באופן אוטומטי בכל פעם, הוסף אותו לקובץ ה-.zshrc/.bashrc שלך.
+
+אזהרה: אל תכלול את מפתח ה-OpenAI שלך בבקרת גרסה.
+
+
+"
diff --git a/apps/docs/i18n/he/docusaurus-plugin-content-docs/current/introduction.md b/apps/docs/i18n/he/docusaurus-plugin-content-docs/current/introduction.md
new file mode 100644
index 0000000000000000000000000000000000000000..8d2762575cde13b5b112568677aa5a5b77aee48f
--- /dev/null
+++ b/apps/docs/i18n/he/docusaurus-plugin-content-docs/current/introduction.md
@@ -0,0 +1,64 @@
+---
+sidebar_position: 0
+slug: /
+---
+
+# מהו LlamaIndex.TS?
+
+`התיעוד הזה תורגם באופן אוטומטי ועשוי להכיל טעויות. אל תהסס לפתוח בקשת משיכה כדי להציע שינויים.`
+
+LlamaIndex.TS הוא מסגרת נתונים עבור יישומים LLM לספיגת, מבנה וגישה לנתונים פרטיים או דומיין-ספציפיים. בעוד חבילת פייתון זמינה גם (ראה [כאן](https://docs.llamaindex.ai/en/stable/)), LlamaIndex.TS מציע תכונות יסודיות בחבילה פשוטה, מותאמת לשימוש עם TypeScript.
+
+## 🚀 למה LlamaIndex.TS?
+
+במהותם, LLMs מציעים ממשק שפה טבעית בין אנשים לנתונים שנגזרים. מודלים זמינים בצורה נרחבת מאוד מאומצים מראש על כמויות עצומות של נתונים זמינים לציבור, מוויקיפדיה ורשימות דיוור עד לספרי לימוד וקוד מקור.
+
+יישומים שנבנים על בסיס LLMs דורשים לעיתים קרובות להוסיף למודלים אלו נתונים פרטיים או דומיין-ספציפיים. לצערנו, הנתונים הללו יכולים להיות מפוזרים בין יישומים ומאגרי נתונים מופרדים. הם נמצאים מאחורי ממשקי תכנות (APIs), במסדי נתונים SQL או נתונים תפוסים בקבצי PDF ומצגות.
+
+זהו המקום שבו מתערבת **LlamaIndex.TS**.
+
+## 🦙 איך LlamaIndex.TS יכול לעזור?
+
+LlamaIndex.TS מספק את הכלים הבאים:
+
+- **טעינת נתונים** ספיגת הנתונים הקיימים שלך בפורמטים `.txt`, `.pdf`, `.csv`, `.md` ו-`.docx` ישירות
+- **אינדקסים לנתונים** מבנה את הנתונים שלך בייצוגים אמצעיים שהם קלים ויעילים לשימוש עם LLMs.
+- **מנועים** מספקים גישה בשפת הטבע לנתונים שלך. לדוגמה:
+  - מנועי שאילתות הם ממשקי גישה עוצמתיים לפלט מועשר בידע.
+  - מנועי צ'אט הם ממשקים שיחתיים למרות הודעות, התקשרויות "הלוך ושוב" עם הנתונים שלך.
+
+"
+
+## 👨‍👩‍👧‍👦 למי מיועד LlamaIndex?
+
+LlamaIndex.TS מספק סט כלים יסודי, הכרחיים לכל מי שמבנה אפליקציות LLM עם JavaScript ו-TypeScript.
+
+API הרמה הגבוהה שלנו מאפשר למשתמשים מתחילים להשתמש ב-LlamaIndex.TS לספיגת ושאילתת הנתונים שלהם.
+
+ליישומים מורכבים יותר, ה- API הרמה הנמוכה שלנו מאפשר למשתמשים מתקדמים להתאים אישית ולהרחיב כל מודול - מחברי נתונים, אינדקסים, מחזירים ומנועי שאילתות, כדי להתאים אותם לצרכיהם.
+
+## התחלה מהירה
+
+`npm install llamaindex`
+
+התיעוד שלנו כולל [הוראות התקנה](./installation.md) ו[מדריך התחלה](./starter.md) לבניית היישום הראשון שלך.
+
+כאשר אתה מוכן ורץ, [מושגים ברמה גבוהה](./concepts.md) מציג סקירה על ארכיטקטורה מודולרית של LlamaIndex. לדוגמאות פרקטיות יותר, עיין ב[מדריכים מתקדמים מתחילה ועד סוף](./end_to_end.md).
+
+"
+
+## 🗺️ אקוסיסטם
+
+כדי להוריד או לתרום, תמצא את LlamaIndex ב:
+
+- Github: https://github.com/run-llama/LlamaIndexTS
+- NPM: https://www.npmjs.com/package/llamaindex
+
+"
+
+## קהילה
+
+צריך עזרה? יש לך הצעת תכונה? הצטרף לקהילת LlamaIndex:
+
+- Twitter: https://twitter.com/llama_index
+- Discord https://discord.gg/dGcwcsnxhU
diff --git a/apps/docs/i18n/he/docusaurus-plugin-content-docs/current/modules/high_level/chat_engine.md b/apps/docs/i18n/he/docusaurus-plugin-content-docs/current/modules/high_level/chat_engine.md
new file mode 100644
index 0000000000000000000000000000000000000000..7247221ce934e762a9cdfc6ce61443e2e79d2713
--- /dev/null
+++ b/apps/docs/i18n/he/docusaurus-plugin-content-docs/current/modules/high_level/chat_engine.md
@@ -0,0 +1,22 @@
+---
+sidebar_position: 4
+---
+
+# תומך בשיחה (ChatEngine)
+
+`התיעוד הזה תורגם באופן אוטומטי ועשוי להכיל טעויות. אל תהסס לפתוח בקשת משיכה כדי להציע שינויים.`
+
+תומך בשיחה הוא דרך מהירה ופשוטה לשוחח עם הנתונים באינדקס שלך.
+
+```typescript
+const retriever = index.asRetriever();
+const chatEngine = new ContextChatEngine({ retriever });
+
+// התחל לשוחח
+const response = await chatEngine.chat(query);
+```
+
+## מדריך לממשק תכנות (API)
+
+- [תומך בשיחה עם הקשר (ContextChatEngine)](../../api/classes/ContextChatEngine.md)
+- [תומך בשיחה עם שאלה מקוצרת (CondenseQuestionChatEngine)](../../api/classes/ContextChatEngine.md)
diff --git a/apps/docs/i18n/he/docusaurus-plugin-content-docs/current/modules/high_level/data_index.md b/apps/docs/i18n/he/docusaurus-plugin-content-docs/current/modules/high_level/data_index.md
new file mode 100644
index 0000000000000000000000000000000000000000..f2e3d8ca71fa876895a9a214b89d26434c442c11
--- /dev/null
+++ b/apps/docs/i18n/he/docusaurus-plugin-content-docs/current/modules/high_level/data_index.md
@@ -0,0 +1,27 @@
+---
+sidebar_position: 2
+---
+
+# אינדקס
+
+`התיעוד הזה תורגם באופן אוטומטי ועשוי להכיל טעויות. אל תהסס לפתוח בקשת משיכה כדי להציע שינויים.`
+
+אינדקס הוא המיכל הבסיסי והארגון של הנתונים שלך. LlamaIndex.TS תומך בשני אינדקסים:
+
+- `VectorStoreIndex` - ישלח את ה-`Node` הכי גבוהים-k ל-LLM בעת יצירת תגובה. ברירת המחדל של top-k היא 2.
+- `SummaryIndex` - ישלח כל `Node` באינדקס ל-LLM כדי ליצור תגובה
+
+```typescript
+import { Document, VectorStoreIndex } from "llamaindex";
+
+const document = new Document({ text: "test" });
+
+const index = await VectorStoreIndex.fromDocuments([document]);
+```
+
+## מדריך לממשק API
+
+- [SummaryIndex](../../api/classes/SummaryIndex.md)
+- [VectorStoreIndex](../../api/classes/VectorStoreIndex.md)
+
+"
diff --git a/apps/docs/i18n/he/docusaurus-plugin-content-docs/current/modules/high_level/data_loader.md b/apps/docs/i18n/he/docusaurus-plugin-content-docs/current/modules/high_level/data_loader.md
new file mode 100644
index 0000000000000000000000000000000000000000..4bebc72778926cb00e595ad4130298f99a61a6b0
--- /dev/null
+++ b/apps/docs/i18n/he/docusaurus-plugin-content-docs/current/modules/high_level/data_loader.md
@@ -0,0 +1,19 @@
+---
+sidebar_position: 1
+---
+
+# קורא / טוען
+
+`התיעוד הזה תורגם באופן אוטומטי ועשוי להכיל טעויות. אל תהסס לפתוח בקשת משיכה כדי להציע שינויים.`
+
+LlamaIndex.TS תומך בטעינה קלה של קבצים מתוך תיקיות באמצעות המחלקה `SimpleDirectoryReader`. כרגע, נתמכים קבצים בפורמטים `.txt`, `.pdf`, `.csv`, `.md` ו `.docx`, ותוכנן להוסיף עוד בעתיד!
+
+```typescript
+import { SimpleDirectoryReader } from "llamaindex";
+
+documents = new SimpleDirectoryReader().loadData("./data");
+```
+
+## מדריך לממשק תכנות (API)
+
+- [SimpleDirectoryReader](../../api/classes/SimpleDirectoryReader.md)
diff --git a/apps/docs/i18n/he/docusaurus-plugin-content-docs/current/modules/high_level/documents_and_nodes.md b/apps/docs/i18n/he/docusaurus-plugin-content-docs/current/modules/high_level/documents_and_nodes.md
new file mode 100644
index 0000000000000000000000000000000000000000..6191927190d617fd7826e637cd64a5bf71b8350d
--- /dev/null
+++ b/apps/docs/i18n/he/docusaurus-plugin-content-docs/current/modules/high_level/documents_and_nodes.md
@@ -0,0 +1,22 @@
+---
+sidebar_position: 0
+---
+
+# מסמכים וצמתים
+
+`התיעוד הזה תורגם באופן אוטומטי ועשוי להכיל טעויות. אל תהסס לפתוח בקשת משיכה כדי להציע שינויים.`
+
+`מסמך` ו`צומת` הם בניינים הבסיסיים של כל אינדקס. בעוד שממשק ה-API עבור אובייקטים אלו דומה, אובייקטי `מסמך` מייצגים קבצים שלמים, בעוד ש`צמתים` הם חלקים קטנים יותר של המסמך המקורי, שמתאימים ל-LLM ושאלות ותשובות.
+
+```typescript
+import { Document } from "llamaindex";
+
+document = new Document({ text: "טקסט", metadata: { מפתח: "ערך" } });
+```
+
+## מדריך ל-API
+
+- [מסמך](../../api/classes/Document.md)
+- [צומת טקסט](../../api/classes/TextNode.md)
+
+"
diff --git a/apps/docs/i18n/he/docusaurus-plugin-content-docs/current/modules/high_level/query_engine.md b/apps/docs/i18n/he/docusaurus-plugin-content-docs/current/modules/high_level/query_engine.md
new file mode 100644
index 0000000000000000000000000000000000000000..d0e36cb9f2759985326fa75f2fa06fc2e5b11f6f
--- /dev/null
+++ b/apps/docs/i18n/he/docusaurus-plugin-content-docs/current/modules/high_level/query_engine.md
@@ -0,0 +1,40 @@
+---
+sidebar_position: 3
+---
+
+# מנוע שאילתות (QueryEngine)
+
+`התיעוד הזה תורגם באופן אוטומטי ועשוי להכיל טעויות. אל תהסס לפתוח בקשת משיכה כדי להציע שינויים.`
+
+מנוע שאילתות מעטפת את `Retriever` ו-`ResponseSynthesizer` לתוך צינור, שישתמש במחרוזת השאילתא כדי לאחזר צמתים ולשלוח אותם ל-LLM כדי ליצור תשובה.
+
+```typescript
+const queryEngine = index.asQueryEngine();
+const response = await queryEngine.query("מחרוזת שאילתא");
+```
+
+## מנוע שאלות משנה (Sub Question Query Engine)
+
+הרעיון הבסיסי של מנוע שאלות משנה הוא לחלק שאילתה יחידה למספר שאילות, לקבל תשובה עבור כל אחת מהשאילות הללו, ולאחד את התשובות השונות הללו לתשובה קוהרנטית אחת עבור המשתמש. ניתן לחשוב על זה כעל טכניקת הצעד-אחר-צעד "חשוב זאת בקפיצים" אבל בעזרת מקורות המידע שלך!
+
+### התחלה מהירה
+
+הדרך הקלה ביותר להתחיל לנסות את מנוע שאילתות שאלה משנה היא להריץ את הקובץ subquestion.ts בתיקיית [דוגמאות](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/subquestion.ts).
+
+```bash
+npx ts-node subquestion.ts
+```
+
+"
+
+### כלים
+
+מנוע שאלות משנה מיושם בעזרת כלים. הרעיון הבסיסי של כלים הוא שהם אפשרויות ניתנות לביצוע עבור המודל השפה הגדול. במקרה זה, המנוע שאלות משנה שלנו מתבסס על QueryEngineTool, שכפי שניחשת, הוא כלי להרצת שאילות על מנוע שאילתות. זה מאפשר לנו לתת למודל אפשרות לשאול מסמכים שונים לשאלות שונות לדוגמה. ניתן גם לדמיין שמנוע שאלות משנה יכול להשתמש בכלי שמחפש משהו ברשת או מקבל תשובה באמצעות Wolfram Alpha.
+
+ניתן ללמוד עוד על כלים על ידי הצצה לתיעוד ה-Python של LlamaIndex https://gpt-index.readthedocs.io/en/latest/core_modules/agent_modules/tools/root.html
+
+## מדריך לממשק API
+
+- [מנוע שאילתות של Retriever (RetrieverQueryEngine)](../../api/classes/RetrieverQueryEngine.md)
+- [מנוע שאלה משנה (SubQuestionQueryEngine)](../../api/classes/SubQuestionQueryEngine.md)
+- [כלי מנוע שאילתות (QueryEngineTool)](../../api/interfaces/QueryEngineTool.md)
diff --git a/apps/docs/i18n/he/docusaurus-plugin-content-docs/current/modules/index.md b/apps/docs/i18n/he/docusaurus-plugin-content-docs/current/modules/index.md
new file mode 100644
index 0000000000000000000000000000000000000000..1c714c7e5d938eaf932c5d9af03c83f68b81fe34
--- /dev/null
+++ b/apps/docs/i18n/he/docusaurus-plugin-content-docs/current/modules/index.md
@@ -0,0 +1,33 @@
+# מודולים יסודיים
+
+`התיעוד הזה תורגם באופן אוטומטי ועשוי להכיל טעויות. אל תהסס לפתוח בקשת משיכה כדי להציע שינויים.`
+
+LlamaIndex.TS מציע מספר מודולים יסודיים, מחולקים למודולים ברמה גבוהה להתחלה מהירה ולמודולים ברמה נמוכה להתאמה אישית של רכיבים מרכזיים כפי שנדרש.
+
+## מודולים ברמה גבוהה
+
+- [**מסמך**](./high_level/documents_and_nodes.md): מסמך מייצג קובץ טקסט, קובץ PDF או חתיכת נתונים רציפה אחרת.
+
+- [**צומת**](./high_level/documents_and_nodes.md): בלוק הנתונים הבסיסי. ברוב המקרים, אלה הם חלקים של המסמך שמחולקים לחלקים ניתנים לניהול שקט וקטנים מספיק כדי להיות מועברים למודל המשובץ ול-LLM.
+
+- [**קורא/טוען**](./high_level/data_loader.md): קורא או טוען הוא משהו שמקבל מסמך בעולם האמיתי ומעביר אותו למחלקת מסמך שיכולה לשמש באינדקס ובשאילתות שלך. אנחנו תומכים כרגע בקבצי טקסט פשוטים וב- PDF עם עוד הרבה יותר לבוא.
+
+- [**אינדקסים**](./high_level/data_index.md): אינדקסים אחסונים את הצמתים ואת השבצים של הצמתים האלה.
+
+- [**מנוע שאילתות**](./high_level/query_engine.md): מנועי שאילתות הם אלה שיוצרים את השאילתה שאתה מזין ומחזירים לך את התוצאה. מנועי השאילתות משלבים בדרך כלל פרומפט מובנה מראש עם צמתים נבחרים מהאינדקס שלך כדי לתת ל-LLM את ההקשר שהוא צריך כדי לענות על השאילתה שלך.
+
+- [**מנוע צ'אט**](./high_level/chat_engine.md): מנוע צ'אט עוזר לך לבנות צ'אטבוט שיתקשר עם האינדקסים שלך.
+
+## מודול ברמה נמוכה
+
+- [**LLM**](./low_level/llm.md): מחלקת ה-LLM היא ממשק מאוחד מעל ספק מודל שפה גדול כמו OpenAI GPT-4, Anthropic Claude או Meta LLaMA. ניתן ליצור תת-מחלקה על מנת ליצור חיבור למודל שפה גדול משלך.
+
+- [**Embedding**](./low_level/embedding.md): הטמעה מיוצגת כוקטור של מספרים עשרוניים. המודל המוגדר כברירת מחדל שלנו הוא text-embedding-ada-002 של OpenAI וכל הטמעה שהוא מייצר מורכבת מ-1,536 מספרים עשרוניים. מודל הטמעה המוכר השני הוא BERT שמשתמש ב-768 מספרים עשרוניים כדי לייצג כל צומת. אנחנו מספקים מספר יעודיות לעבוד עם טמות כולל 3 אפשרויות לחישוב דמיון ומרכול מרבי מרבי.
+
+- [**TextSplitter/NodeParser**](./low_level/node_parser.md): אסטרטגיות חלוקת הטקסט חשובות ביותר ליעילות הכללית של חיפוש הטמעה. כרגע, למרות שיש לנו ערך ברירת מחדל, אין פתרון אחד שמתאים לכולם. בהתאם למסמכים המקוריים, ייתכן ותרצה להשתמש בגדלי חלוקה שונים ואסטרטגיות שונות. כרגע אנחנו תומכים בחלוקה לפי גודל קבוע, חלוקה לפי גודל קבוע עם חלקים מצטלבים, חלוקה לפי משפט וחלוקה לפי פסקה. המחלקה TextSplitter משמשת על ידי המחלקה NodeParser כאשר היא מפצלת מסמכים לצמתים.
+
+- [**Retriever**](./low_level/retriever.md): ה-Retriever הוא המחלקה שבאמת בוחרת את הצמתים לשליפה מהאינדקס. כאן, תוכל לנסות לשלוף יותר או פחות צמתים לכל שאילתה, לשנות את פונקציית הדמיון שלך או ליצור רטריבר משלך לכל מקרה שימוש יחיד ביישום שלך. לדוגמה, תוכל לרצות להכין רטריבר נפרד עבור תוכן קוד מול תוכן טקסט.
+
+- [**ResponseSynthesizer**](./low_level/response_synthesizer.md): ה-ResponseSynthesizer אחראי לקבלת מחרוזת שאילתה ושימוש ברשימת צמתים כדי ליצור תשובה. זה יכול להיות בצורות רבות, כמו לעבור על כל ההקשר ולשפר תשובה, או לבנות עץ של סיכומים ולהחזיר את הסיכום הראשי.
+
+- [**Storage**](./low_level/storage.md): בסופו של דבר, תרצה לאחסן את האינדקסים, הנתונים והוקטורים שלך במקום להריץ מחדש את מודלי הטמעה בכל פעם. IndexStore, DocStore, VectorStore ו-KVStore הם מופעים שמאפשרים לך לעשות זאת. בשילוב, הם מהווים את ה-StorageContext. כרגע, אנחנו מאפשרים לך לשמור את הטמות שלך בקבצים במערכת הקבצים (או במערכת קבצים וירטואלית בזיכרון), אך אנחנו גם מוסיפים פעילות לשילוב עם מסדי נתונים של וקטורים.
diff --git a/apps/docs/i18n/he/docusaurus-plugin-content-docs/current/modules/low_level/embedding.md b/apps/docs/i18n/he/docusaurus-plugin-content-docs/current/modules/low_level/embedding.md
new file mode 100644
index 0000000000000000000000000000000000000000..7adfc753badfdabde2ac73035a503decfbcb2b35
--- /dev/null
+++ b/apps/docs/i18n/he/docusaurus-plugin-content-docs/current/modules/low_level/embedding.md
@@ -0,0 +1,26 @@
+---
+sidebar_position: 1
+---
+
+# הטמעה
+
+`התיעוד הזה תורגם באופן אוטומטי ועשוי להכיל טעויות. אל תהסס לפתוח בקשת משיכה כדי להציע שינויים.`
+
+הדגם המוטמע ב־LlamaIndex אחראי ליצירת ייצוגים מספריים של טקסט. כברירת מחדל, LlamaIndex ישתמש בדגם `text-embedding-ada-002` מ־OpenAI.
+
+ניתן להגדיר זאת באופן מפורש באובייקט `ServiceContext`.
+
+```typescript
+import { OpenAIEmbedding, serviceContextFromDefaults } from "llamaindex";
+
+const openaiEmbeds = new OpenAIEmbedding();
+
+const serviceContext = serviceContextFromDefaults({ embedModel: openaiEmbeds });
+```
+
+## מדריך לממשק API
+
+- [OpenAIEmbedding](../../api/classes/OpenAIEmbedding.md)
+- [ServiceContext](../../api/interfaces/ServiceContext.md)
+
+"
diff --git a/apps/docs/i18n/he/docusaurus-plugin-content-docs/current/modules/low_level/llm.md b/apps/docs/i18n/he/docusaurus-plugin-content-docs/current/modules/low_level/llm.md
new file mode 100644
index 0000000000000000000000000000000000000000..afb766f74bcb9aa4a527305ea83f890a45c3e03a
--- /dev/null
+++ b/apps/docs/i18n/he/docusaurus-plugin-content-docs/current/modules/low_level/llm.md
@@ -0,0 +1,26 @@
+---
+sidebar_position: 0
+---
+
+# LLM (מנוע שפה טבעית)
+
+`התיעוד הזה תורגם באופן אוטומטי ועשוי להכיל טעויות. אל תהסס לפתוח בקשת משיכה כדי להציע שינויים.`
+
+ה-LLM אחראי לקריאת טקסט ויצירת תגובות בשפה טבעית לשאילתות. כברירת מחדל, LlamaIndex.TS משתמש ב-`gpt-3.5-turbo`.
+
+ניתן להגדיר את ה-LLM באופן ישיר באמצעות אובייקט ה-`ServiceContext`.
+
+```typescript
+import { OpenAI, serviceContextFromDefaults } from "llamaindex";
+
+const openaiLLM = new OpenAI({ model: "gpt-3.5-turbo", temperature: 0 });
+
+const serviceContext = serviceContextFromDefaults({ llm: openaiLLM });
+```
+
+## מדריך לממשק API
+
+- [OpenAI](../../api/classes/OpenAI.md)
+- [ServiceContext](../../api/interfaces/ServiceContext.md)
+
+"
diff --git a/apps/docs/i18n/he/docusaurus-plugin-content-docs/current/modules/low_level/node_parser.md b/apps/docs/i18n/he/docusaurus-plugin-content-docs/current/modules/low_level/node_parser.md
new file mode 100644
index 0000000000000000000000000000000000000000..ae226197705cf01b790e9199130f47e94f22e833
--- /dev/null
+++ b/apps/docs/i18n/he/docusaurus-plugin-content-docs/current/modules/low_level/node_parser.md
@@ -0,0 +1,37 @@
+---
+sidebar_position: 3
+---
+
+# NodeParser (מנתח צומת)
+
+`התיעוד הזה תורגם באופן אוטומטי ועשוי להכיל טעויות. אל תהסס לפתוח בקשת משיכה כדי להציע שינויים.`
+
+ה-`NodeParser` ב-LlamaIndex אחראי לחלק את אובייקטי ה-`Document` לתתי אובייקטים נוספים וניהוליים יותר של צמתים (`Node`). כאשר אתה קורא ל-`.fromDocuments()`, ה-`NodeParser` מתוך ה-`ServiceContext` משמש לעשות זאת באופן אוטומטי עבורך. בנוסף, תוכל להשתמש בו כדי לחלק את המסמכים מראש.
+
+```typescript
+import { Document, SimpleNodeParser } from "llamaindex";
+
+const nodeParser = new SimpleNodeParser();
+const nodes = nodeParser.getNodesFromDocuments([
+  new Document({ text: "אני בן 10. ג'ון בן 20." }),
+]);
+```
+
+## TextSplitter (מפצל הטקסט)
+
+מפצל הטקסט הבסיסי מפצל את הטקסט לפי משפטים. ניתן גם להשתמש בו כמודול עצמאי לפיצול טקסט גולמי.
+
+```typescript
+import { SentenceSplitter } from "llamaindex";
+
+const splitter = new SentenceSplitter({ chunkSize: 1 });
+
+const textSplits = splitter.splitText("שלום עולם");
+```
+
+## מדריך לממשק API
+
+- [SimpleNodeParser (מנתח צומת פשוט)](../../api/classes/SimpleNodeParser.md)
+- [SentenceSplitter (מפצל משפטים)](../../api/classes/SentenceSplitter.md)
+
+"
diff --git a/apps/docs/i18n/he/docusaurus-plugin-content-docs/current/modules/low_level/response_synthesizer.md b/apps/docs/i18n/he/docusaurus-plugin-content-docs/current/modules/low_level/response_synthesizer.md
new file mode 100644
index 0000000000000000000000000000000000000000..e2bc7996f234fcdc05ec6ea7f0f1d28cdca4dc29
--- /dev/null
+++ b/apps/docs/i18n/he/docusaurus-plugin-content-docs/current/modules/low_level/response_synthesizer.md
@@ -0,0 +1,54 @@
+---
+sidebar_position: 6
+---
+
+# מסנכרן תגובה (ResponseSynthesizer)
+
+`התיעוד הזה תורגם באופן אוטומטי ועשוי להכיל טעויות. אל תהסס לפתוח בקשת משיכה כדי להציע שינויים.`
+
+המסנכרן תגובה (ResponseSynthesizer) אחראי לשליחת השאילתה, הצמתים ותבניות הפרומפט ל-LLM כדי ליצור תגובה. ישנם כמה מצבים מרכזיים ליצירת תגובה:
+
+- `Refine`: "יצירה ושיפור" של תשובה על ידי עבר סידרתית דרך כל חתיכת טקסט שנמצאה.
+  זה עושה שיחת LLM נפרדת לכל צומת. מתאים לתשובות מפורטות יותר.
+- `CompactAndRefine` (ברירת מחדל): "כיווץ" הפרומפט במהלך כל שיחת LLM על ידי מילוי כמה חתיכות טקסט שיכנסו בגודל המרבי של הפרומפט. אם יש
+  יותר מדי חתיכות למלא בפרומפט אחד, "יצירה ושיפור" של תשובה על ידי עבר דרך
+  מספר פרומפטים קומפקטיים. זהה ל-`refine`, אך צריך לגרום לפחות שיחות LLM.
+- `TreeSummarize`: בהתבסס על קבוצה של חתיכות טקסט והשאילתה, בנה עץ באופן רקורסיבי
+  והחזר את הצומת השורש כתגובה. מתאים לצורך סיכום.
+- `SimpleResponseBuilder`: בהתבסס על קבוצה של חתיכות טקסט והשאילתה, החל את השאילתה על כל חתיכת
+  טקסט תוך צבירת התגובות למערך. מחזיר מחרוזת מחוברת של כל
+  התגובות. מתאים כאשר יש צורך להריץ את אותה שאילתה בנפרד על כל חתיכת
+  טקסט.
+
+```typescript
+import { NodeWithScore, ResponseSynthesizer, TextNode } from "llamaindex";
+
+const responseSynthesizer = new ResponseSynthesizer();
+
+const nodesWithScore: NodeWithScore[] = [
+  {
+    node: new TextNode({ text: "אני בן 10 שנים." }),
+    score: 1,
+  },
+  {
+    node: new TextNode({ text: "ג'ון בן 20 שנה." }),
+    score: 0.5,
+  },
+];
+
+const response = await responseSynthesizer.synthesize(
+  "בן כמה אני?",
+  nodesWithScore,
+);
+console.log(response.response);
+```
+
+## מדריך לממשק תכנות (API Reference)
+
+- [מסנכרן תגובה (ResponseSynthesizer)](../../api/classes/ResponseSynthesizer.md)
+- [Refine](../../api/classes/Refine.md)
+- [CompactAndRefine](../../api/classes/CompactAndRefine.md)
+- [TreeSummarize](../../api/classes/TreeSummarize.md)
+- [SimpleResponseBuilder](../../api/classes/SimpleResponseBuilder.md)
+
+"
diff --git a/apps/docs/i18n/he/docusaurus-plugin-content-docs/current/modules/low_level/retriever.md b/apps/docs/i18n/he/docusaurus-plugin-content-docs/current/modules/low_level/retriever.md
new file mode 100644
index 0000000000000000000000000000000000000000..c28f08aade2ec666f363ad24e591eaa99457d637
--- /dev/null
+++ b/apps/docs/i18n/he/docusaurus-plugin-content-docs/current/modules/low_level/retriever.md
@@ -0,0 +1,25 @@
+---
+sidebar_position: 5
+---
+
+# רטריבר
+
+`התיעוד הזה תורגם באופן אוטומטי ועשוי להכיל טעויות. אל תהסס לפתוח בקשת משיכה כדי להציע שינויים.`
+
+רטריבר ב- LlamaIndex הוא מה שמשמש לאחזור `Node` מאינדקס באמצעות מחרוזת שאילתה. רטריבר מסוג `VectorIndexRetriever` יחזיר את ה- k הכי דומים לצמתים. בינתיים, רטריבר מסוג `SummaryIndexRetriever` יחזיר את כל הצמתים ללא קשר לשאילתה.
+
+```typescript
+const retriever = vector_index.asRetriever();
+retriever.similarityTopK = 3;
+
+// אחזור צמתים!
+const nodesWithScore = await retriever.retrieve("מחרוזת שאילתה");
+```
+
+## מדריך לממשק API
+
+- [SummaryIndexRetriever](../../api/classes/SummaryIndexRetriever.md)
+- [SummaryIndexLLMRetriever](../../api/classes/SummaryIndexLLMRetriever.md)
+- [VectorIndexRetriever](../../api/classes/VectorIndexRetriever.md)
+
+"
diff --git a/apps/docs/i18n/he/docusaurus-plugin-content-docs/current/modules/low_level/storage.md b/apps/docs/i18n/he/docusaurus-plugin-content-docs/current/modules/low_level/storage.md
new file mode 100644
index 0000000000000000000000000000000000000000..401f85ce9d8a6fd2d5e581f4e73f2f4d94b01def
--- /dev/null
+++ b/apps/docs/i18n/he/docusaurus-plugin-content-docs/current/modules/low_level/storage.md
@@ -0,0 +1,30 @@
+---
+sidebar_position: 7
+---
+
+# אחסון
+
+`התיעוד הזה תורגם באופן אוטומטי ועשוי להכיל טעויות. אל תהסס לפתוח בקשת משיכה כדי להציע שינויים.`
+
+אחסון ב-LlamaIndex.TS עובד באופן אוטומטי לאחר הגדרת אובייקט `StorageContext`. פשוט הגדר את `persistDir` וצרף אותו לאינדקס.
+
+כרגע, נתמך רק שמירה וטעינה מהדיסק, עם אינטגרציות עתידיות מתוכננות!
+
+```typescript
+import { Document, VectorStoreIndex, storageContextFromDefaults } from "./src";
+
+const storageContext = await storageContextFromDefaults({
+  persistDir: "./storage",
+});
+
+const document = new Document({ text: "Test Text" });
+const index = await VectorStoreIndex.fromDocuments([document], {
+  storageContext,
+});
+```
+
+## מדריך לממשק API
+
+- [StorageContext](../../api/interfaces/StorageContext.md)
+
+"
diff --git a/apps/docs/i18n/he/docusaurus-plugin-content-docs/current/starter.md b/apps/docs/i18n/he/docusaurus-plugin-content-docs/current/starter.md
new file mode 100644
index 0000000000000000000000000000000000000000..bf2cd1b550bbbe19a848d941b373b188c7c0a37a
--- /dev/null
+++ b/apps/docs/i18n/he/docusaurus-plugin-content-docs/current/starter.md
@@ -0,0 +1,58 @@
+---
+sidebar_position: 2
+---
+
+# מדריך למתחילים
+
+`התיעוד הזה תורגם באופן אוטומטי ועשוי להכיל טעויות. אל תהסס לפתוח בקשת משיכה כדי להציע שינויים.`
+
+לאחר שהתקנת את LlamaIndex.TS באמצעות NPM והגדרת את מפתח ה-OpenAI שלך, אתה מוכן להתחיל את האפליקציה הראשונה שלך:
+
+בתיקייה חדשה:
+
+```bash npm2yarn
+npm install typescript
+npm install @types/node
+npx tsc --init # אם נדרש
+```
+
+צור את הקובץ `example.ts`. קוד זה יטען נתוני דוגמה, יצור מסמך, יבצע אינדקס (שיצירת embeddings באמצעות OpenAI) ויצור מנוע שאילתות כדי לענות על שאלות על הנתונים.
+
+```ts
+// example.ts
+import fs from "fs/promises";
+import { Document, VectorStoreIndex } from "llamaindex";
+
+async function main() {
+  // טען את המאמר מתוך abramov.txt ב-Node
+  const essay = await fs.readFile(
+    "node_modules/llamaindex/examples/abramov.txt",
+    "utf-8",
+  );
+
+  // צור אובייקט Document עם המאמר
+  const document = new Document({ text: essay });
+
+  // פצל את הטקסט וצור embeddings. שמור אותם ב-VectorStoreIndex
+  const index = await VectorStoreIndex.fromDocuments([document]);
+
+  // שאילתה לאינדקס
+  const queryEngine = index.asQueryEngine();
+  const response = await queryEngine.query("מה עשה המחבר בזמן הקולג'?");
+
+  // הצג תשובה
+  console.log(response.toString());
+}
+
+main();
+```
+
+אז תוכל להריץ את זה באמצעות
+
+```bash
+npx ts-node example.ts
+```
+
+מוכן ללמוד עוד? בדוק את הגינה שלנו של NextJS בכתובת https://llama-playground.vercel.app/. המקור זמין בכתובת https://github.com/run-llama/ts-playground
+
+"
diff --git a/apps/docs/i18n/hi/docusaurus-plugin-content-docs/current/_static/concepts/indexing.jpg b/apps/docs/i18n/hi/docusaurus-plugin-content-docs/current/_static/concepts/indexing.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..8672967213caf28fb27682b6b9e2e5111beb9aaa
Binary files /dev/null and b/apps/docs/i18n/hi/docusaurus-plugin-content-docs/current/_static/concepts/indexing.jpg differ
diff --git a/apps/docs/i18n/hi/docusaurus-plugin-content-docs/current/_static/concepts/querying.jpg b/apps/docs/i18n/hi/docusaurus-plugin-content-docs/current/_static/concepts/querying.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..3c241bdda5ba98d3f3ee2e163b9f19280c5c7503
Binary files /dev/null and b/apps/docs/i18n/hi/docusaurus-plugin-content-docs/current/_static/concepts/querying.jpg differ
diff --git a/apps/docs/i18n/hi/docusaurus-plugin-content-docs/current/_static/concepts/rag.jpg b/apps/docs/i18n/hi/docusaurus-plugin-content-docs/current/_static/concepts/rag.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..b68eca2564f8535b33d9ed6d4ce4d4410d2a0698
Binary files /dev/null and b/apps/docs/i18n/hi/docusaurus-plugin-content-docs/current/_static/concepts/rag.jpg differ
diff --git a/apps/docs/i18n/hi/docusaurus-plugin-content-docs/current/api b/apps/docs/i18n/hi/docusaurus-plugin-content-docs/current/api
new file mode 120000
index 0000000000000000000000000000000000000000..3513e32979f4abb8b2df2b54c8d0cc480bdd847e
--- /dev/null
+++ b/apps/docs/i18n/hi/docusaurus-plugin-content-docs/current/api
@@ -0,0 +1 @@
+../../../../docs/api
\ No newline at end of file
diff --git a/apps/docs/i18n/hi/docusaurus-plugin-content-docs/current/concepts.md b/apps/docs/i18n/hi/docusaurus-plugin-content-docs/current/concepts.md
new file mode 100644
index 0000000000000000000000000000000000000000..4f20fd68979d0b2ac5d23991b97c457b1a851eef
--- /dev/null
+++ b/apps/docs/i18n/hi/docusaurus-plugin-content-docs/current/concepts.md
@@ -0,0 +1,84 @@
+---
+sidebar_position: 3
+---
+
+# उच्च स्तरीय अवधारणाएँ
+
+`इस दस्तावेज़ का अनुवाद स्वचालित रूप से किया गया है और इसमें त्रुटियाँ हो सकती हैं। परिवर्तन सुझाने के लिए पुल रिक्वेस्ट खोलने में संकोच न करें।`
+
+LlamaIndex.TS आपको अपने डेटा पर LLM-सशक्त एप्लिकेशन (उदा। Q&A, चैटबॉट) बनाने में मदद करता है।
+
+इस उच्च स्तरीय अवधारणाएँ गाइड में, आप सीखेंगे:
+
+- अपने खुद के डेटा का उपयोग करके LLM कैसे सवालों का जवाब दे सकता है।
+- LlamaIndex.TS में मुख्य अवधारणाएँ और मॉड्यूल्स के बारे में, जिनका उपयोग अपने स्वयं के क्वेरी पाइपलाइन को संयोजित करने के लिए किया जा सकता है।
+
+## अपने डेटा पर सवालों का जवाब देना
+
+LlamaIndex आपके डेटा के साथ LLM का उपयोग करते समय एक दो स्तरीय प्रक्रिया का उपयोग करता है:
+
+1. **इंडेक्सिंग स्टेज**: ज्ञान आधार की तैयारी, और
+2. **क्वेरी स्टेज**: सवाल का जवाब देने के लिए LLM की सहायता करने के लिए ज्ञान से संबंधित संदर्भ प्राप्त करना
+
+![](./_static/concepts/rag.jpg)
+
+इस प्रक्रिया को रिट्रीवल ऑगमेंटेड जनरेशन (RAG) भी कहा जाता है।
+
+LlamaIndex.TS दोनों चरणों को सुपर आसान बनाने के लिए आवश्यक टूलकिट प्रदान करता है।
+
+चलिए विस्तार से प्रत्येक चरण को जानते हैं।
+
+### इंडेक्सिंग स्टेज
+
+LlamaIndex.TS आपको डेटा कनेक्टर्स और इंडेक्स के साथ ज्ञान आधार की तैयारी में मदद करता है।
+
+![](./_static/concepts/indexing.jpg)
+
+[**डेटा लोडर्स**](./modules/high_level/data_loader.md):
+डेटा कनेक्टर (उदा। `Reader`) विभिन्न डेटा स्रोतों और डेटा प्रारूपों से डेटा को एक सरल `डॉक्यूमेंट` प्रतिनिधित्व (पाठ और सरल मेटाडेटा) में आपके डेटा को अवलोकन करता है।
+
+[**डॉक्यूमेंट्स / नोड्स**](./modules/high_level/documents_and_nodes.md): एक `डॉक्यूमेंट` किसी भी डेटा स्रोत के आसपास एक साधारण कंटेनर है - उदाहरण के लिए, एक पीडीएफ, एपीआई का आउटपुट, या डेटाबेस से प्राप्त की गई डेटा। एक `नोड` LlamaIndex में डेटा की एटॉमिक इकाई है और एक स्रोत `डॉक्यूमेंट` का "टुकड़ा" प्रतिष्ठित करता है। यह एक समृद्ध प्रतिनिधित्व है जिसमें मेटाडेटा और संबंध (अन्य नोड्स के साथ) शामिल हैं जो सटीक और प्रभावशाली रिट्रीवल आपरेशन को संभव बनाने के लिए होते हैं।
+
+[**डेटा इंडेक्स**](./modules/high_level/data_index.md):
+अपने डेटा को अवलोकन करने के लिए, जब आप अपने डेटा को अवलोकित कर लेते हैं, LlamaIndex आपको डेटा को एक ऐसे प्रारूप में इंडेक्स करने में मदद करता है जिसे आसानी से प्राप्त किया जा सकता है।
+
+अंदरूनी तरफ, LlamaIndex कच्चे दस्तावेज़ों को इंटरमीडिएट प्रतिनिधित्व में पारस्परिक रूप से विश्लेषण करता है, वेक्टर एम्बेडिंग्स की गणना करता है, और आपके डेटा को मेमोरी में या डिस्क पर संग्रहीत करता है।
+
+"
+
+### क्वेरी स्टेज
+
+क्वेरी स्टेज में, क्वेरी पाइपलाइन एक उपयोगकर्ता क्वेरी के दिए गए सबसे प्रासंगिक संदर्भ को प्राप्त करता है,
+और उसे LLM को संश्लेषित करने के लिए क्वेरी के साथ (साथ ही) एक प्रतिक्रिया का संश्लेषण करता है।
+
+इससे LLM को अपने मूल प्रशिक्षण डेटा में नहीं होने वाली नवीनतम ज्ञान मिलता है,
+(हल्लुसिनेशन को भी कम करता है)।
+
+क्वेरी स्टेज में मुख्य चुनौती रिट्रीवल, संगठन, और तर्क करने में होती है (संभवतः कई) ज्ञान आधारों पर।
+
+LlamaIndex उन्हें संरचित तरीके से रैंकिंग प्राथमिकताओं को प्रतिबिंबित करने और संख्यात्मक तरीके से कई ज्ञान आधारों पर तर्क करने में मदद करने वाले संरचनात्मक मॉड्यूल्स प्रदान करता है।
+
+![](./_static/concepts/querying.jpg)
+
+#### बिल्डिंग ब्लॉक्स
+
+[**रिट्रीवर्स**](./modules/low_level/retriever.md):
+रिट्रीवर यह निर्धारित करता है कि जब एक क्वेरी दी जाती है तो एक ज्ञान आधार (अर्थात सूचकांक) से संबंधित संदर्भ कैसे प्राप्त किया जाए।
+विशेष रिट्रीवल तर्क अलग-अलग सूचकांकों के लिए अलग होता है, सबसे लोकप्रिय वाला एक वेक्टर सूचकांक के खिलाफ घन रिट्रीवल होता है।
+
+[**रिस्पॉन्स सिंथेसाइज़र्स**](./modules/low_level/response_synthesizer.md):
+रिस्पॉन्स सिंथेसाइज़र एक प्रतिक्रिया उत्पन्न करता है जो एक LLM से उपयोगकर्ता क्वेरी और दिए गए संदर्भ पाठ के सेट का उपयोग करके बनाई जाती है।
+
+"
+
+#### पाइपलाइन
+
+[**क्वेरी इंजन**](./modules/high_level/query_engine.md):
+क्वेरी इंजन एक एंड-टू-एंड पाइपलाइन है जो आपको अपने डेटा पर सवाल पूछने की अनुमति देता है।
+इसमें एक प्राकृतिक भाषा क्वेरी ली जाती है, और एक प्रतिक्रिया, साथ ही संदर्भ संदर्भ प्राप्त करके LLM को पास की जाती है।
+
+[**चैट इंजन**](./modules/high_level/chat_engine.md):
+चैट इंजन एक एंड-टू-एंड पाइपलाइन है जिसका उपयोग डेटा के साथ बातचीत करने के लिए किया जाता है
+(एकल प्रश्न और उत्तर के बजाय कई बार आगे-पीछे)।
+
+"
diff --git a/apps/docs/i18n/hi/docusaurus-plugin-content-docs/current/end_to_end.md b/apps/docs/i18n/hi/docusaurus-plugin-content-docs/current/end_to_end.md
new file mode 100644
index 0000000000000000000000000000000000000000..c18e5b693b056bb9546b7a353585f603c67a3cd2
--- /dev/null
+++ b/apps/docs/i18n/hi/docusaurus-plugin-content-docs/current/end_to_end.md
@@ -0,0 +1,57 @@
+---
+sidebar_position: 4
+---
+
+# एंड टू एंड उदाहरण
+
+`इस दस्तावेज़ का अनुवाद स्वचालित रूप से किया गया है और इसमें त्रुटियाँ हो सकती हैं। परिवर्तन सुझाने के लिए पुल रिक्वेस्ट खोलने में संकोच न करें।`
+
+हम रिपॉजिटरी में LlamaIndex.TS का उपयोग करके कई एंड टू एंड उदाहरण शामिल करते हैं।
+
+नीचे दिए गए उदाहरणों की जांच करें या उन्हें आज़माएं और इंटरैक्टिव GitHub Codespace ट्यूटोरियल के साथ मिनटों में पूरा करें, जो Dev-Docs द्वारा प्रदान किए जाते हैं [यहां](https://codespaces.new/team-dev-docs/lits-dev-docs-playground?devcontainer_path=.devcontainer%2Fjavascript_ltsquickstart%2Fdevcontainer.json):
+
+## [चैट इंजन](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/chatEngine.ts)
+
+एक फ़ाइल पढ़ें और LLM के साथ इसके बारे में चैट करें।
+
+## [वेक्टर इंडेक्स](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/vectorIndex.ts)
+
+एक वेक्टर इंडेक्स बनाएं और इसे क्वेरी करें। वेक्टर इंडेक्स अधिकतम k सबसे संबंधित नोड्स प्राप्त करने के लिए एम्बेडिंग का उपयोग करेगा। डिफ़ॉल्ट रूप से, शीर्ष k 2 होता है।
+
+## [सारांश सूचकांक](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/summaryIndex.ts)
+
+एक सूचकांक सृजित करें और इसे क्वेरी करें। इस उदाहरण में `LLMRetriever` भी उपयोग किया जाएगा, जो उत्तर उत्पन्न करते समय उपयोग करने के लिए सर्वश्रेष्ठ नोड का चयन करेगा।
+
+"
+
+## [एक इंडेक्स सहेजें / लोड करें](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/storageContext.ts)
+
+एक वेक्टर इंडेक्स बनाएं और लोड करें। LlamaIndex.TS में संग्रह संदर्भ ऑब्जेक्ट बनाने के बाद डिस्क में स्वचालित रूप से सहेजा जाता है।
+
+## [कस्टमाइज़ वेक्टर इंडेक्स](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/vectorIndexCustomize.ts)
+
+एक वेक्टर इंडेक्स बनाएं और इसे क्वेरी करें, साथ ही `LLM`, `ServiceContext`, और `similarity_top_k` को भी कॉन्फ़िगर करें।
+
+## [OpenAI LLM](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/openai.ts)
+
+एक OpenAI LLM बनाएं और सीधे चैट के लिए उसका उपयोग करें।
+
+"
+
+## [Llama2 DeuceLLM](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/llamadeuce.ts)
+
+एक Llama-2 LLM बनाएं और सीधे चैट के लिए उसका उपयोग करें।
+
+"
+
+## [SubQuestionQueryEngine](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/subquestion.ts)
+
+`SubQuestionQueryEngine` का उपयोग करता है, जो जटिल क्वेरी को कई सवालों में विभाजित करता है, और फिर सभी उप-सवालों के जवाबों के साथ एक प्रतिक्रिया को एकत्रित करता है।
+
+"
+
+## [लो लेवल मॉड्यूल](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/lowlevel.ts)
+
+इस उदाहरण में कई लो-लेवल कॉम्पोनेंट का उपयोग किया जाता है, जो एक वास्तविक क्वेरी इंजन की आवश्यकता को हटा देता है। ये कॉम्पोनेंट किसी भी एप्लिकेशन में कहीं भी उपयोग किए जा सकते हैं, या आपकी खुद की आवश्यकताओं को पूरा करने के लिए उन्हें अनुकूलित और सब-क्लास किया जा सकता है।
+
+"
diff --git a/apps/docs/i18n/hi/docusaurus-plugin-content-docs/current/environments.md b/apps/docs/i18n/hi/docusaurus-plugin-content-docs/current/environments.md
new file mode 100644
index 0000000000000000000000000000000000000000..3c8f605eab9f1082cda6a3fa6771dec97aa29c93
--- /dev/null
+++ b/apps/docs/i18n/hi/docusaurus-plugin-content-docs/current/environments.md
@@ -0,0 +1,31 @@
+---
+sidebar_position: 5
+---
+
+# पर्यावरण (Environments)
+
+`इस दस्तावेज़ का अनुवाद स्वचालित रूप से किया गया है और इसमें त्रुटियाँ हो सकती हैं। परिवर्तन सुझाने के लिए पुल रिक्वेस्ट खोलने में संकोच न करें।`
+
+LlamaIndex वर्तमान में NodeJS 18 और NodeJS 20 का आधिकारिक समर्थन करता है।
+
+## NextJS ऐप राउटर (NextJS App Router)
+
+यदि आप NextJS ऐप राउटर रूट हैंडलर / सर्वरलेस फंक्शन का उपयोग कर रहे हैं, तो आपको NodeJS मोड का उपयोग करना होगा:
+
+```js
+export const runtime = "nodejs"; // डिफ़ॉल्ट
+```
+
+और आपको अपने next.config.js में pdf-parse के लिए एक अपवाद जोड़ने की आवश्यकता होगी
+
+```js
+// next.config.js
+/** @type {import('next').NextConfig} */
+const nextConfig = {
+  experimental: {
+    serverComponentsExternalPackages: ["pdf-parse"], // NextJS ऐप राउटर में pdf-parse को वास्तविक NodeJS मोड में रखता है
+  },
+};
+
+module.exports = nextConfig;
+```
diff --git a/apps/docs/i18n/hi/docusaurus-plugin-content-docs/current/installation.mdx b/apps/docs/i18n/hi/docusaurus-plugin-content-docs/current/installation.mdx
new file mode 100644
index 0000000000000000000000000000000000000000..721f4525e998d590063212b0c7c677f3b2ac9098
--- /dev/null
+++ b/apps/docs/i18n/hi/docusaurus-plugin-content-docs/current/installation.mdx
@@ -0,0 +1,74 @@
+---
+sidebar_position: 1
+---
+
+
+# स्थापना और सेटअप
+
+```इस दस्तावेज़ का अनुवाद स्वचालित रूप से किया गया है और इसमें त्रुटियाँ हो सकती हैं। परिवर्तन सुझाने के लिए पुल रिक्वेस्ट खोलने में संकोच न करें।```
+
+
+सुनिश्चित करें कि आपके पास NodeJS v18 या उच्चतर संस्करण है।
+
+
+## create-llama का उपयोग करें
+
+LlamaIndex के साथ शुरू होने का सबसे आसान तरीका `create-llama` का उपयोग करके है। यह CLI टूल आपको त्वरित रूप से एक नया LlamaIndex एप्लिकेशन बनाने की सुविधा प्रदान करता है, जिसमें सब कुछ आपके लिए सेटअप किया जाता है।
+
+बस निम्नलिखित को चलाएँ
+
+<Tabs>
+<TabItem value="1" label="npm" default>
+
+```bash
+npx create-llama@latest
+```
+
+</TabItem>
+<TabItem value="2" label="Yarn">
+
+```bash
+yarn create llama
+```
+
+</TabItem>
+<TabItem value="3" label="pnpm">
+
+```bash
+pnpm create llama@latest
+```
+
+</TabItem>
+</Tabs>
+
+शुरू होने के लिए। जब आपका ऐप उत्पन्न हो जाए, तो चलाएँ
+
+```bash npm2yarn
+npm run dev
+```
+
+विकास सर्वर चालू करने के लिए। फिर आप [http://localhost:3000](http://localhost:3000) पर अपना ऐप देख सकते हैं।
+
+
+"
+## NPM से स्थापना
+
+```bash npm2yarn
+npm install llamaindex
+```
+
+
+### पर्यावरण चर
+
+हमारे उदाहरण डिफ़ॉल्ट रूप से OpenAI का उपयोग करते हैं। आपको निम्नलिखित तरीके से अपनी Open AI कुंजी को सेटअप करने की आवश्यकता होगी:
+
+```bash
+export OPENAI_API_KEY="sk-......" # अपनी कुंजी के साथ बदलें https://platform.openai.com/account/api-keys से
+```
+
+यदि आप चाहते हैं कि यह हर बार स्वचालित रूप से लोड हो जाए, तो इसे अपने .zshrc/.bashrc में जोड़ें।
+
+चेतावनी: अपनी OpenAI कुंजी को संस्करण नियंत्रण में न देखें।
+
+
+"
diff --git a/apps/docs/i18n/hi/docusaurus-plugin-content-docs/current/introduction.md b/apps/docs/i18n/hi/docusaurus-plugin-content-docs/current/introduction.md
new file mode 100644
index 0000000000000000000000000000000000000000..046b9655bd02335441317d018a2617efebafe619
--- /dev/null
+++ b/apps/docs/i18n/hi/docusaurus-plugin-content-docs/current/introduction.md
@@ -0,0 +1,64 @@
+---
+sidebar_position: 0
+slug: /
+---
+
+# LlamaIndex.TS क्या है?
+
+`इस दस्तावेज़ का अनुवाद स्वचालित रूप से किया गया है और इसमें त्रुटियाँ हो सकती हैं। परिवर्तन सुझाने के लिए पुल रिक्वेस्ट खोलने में संकोच न करें।`
+
+LlamaIndex.TS एक डेटा फ्रेमवर्क है जो LLM एप्लिकेशन के लिए डेटा को इंजेस्ट, संरचित करने और निजी या डोमेन-विशिष्ट डेटा तक पहुंच करने के लिए उपयोग किया जाता है। जबकि एक पायथन पैकेज भी उपलब्ध है (यहां देखें [यहां](https://docs.llamaindex.ai/en/stable/)), LlamaIndex.TS एक सरल पैकेज में मूल विशेषताओं को पेश करता है, जो TypeScript के साथ उपयोग के लिए अनुकूलित है।
+
+## 🚀 LlamaIndex.TS क्यों?
+
+अपने मूल में, LLMs मानवों और अनुमानित डेटा के बीच एक प्राकृतिक भाषा इंटरफ़ेस प्रदान करते हैं। व्यापक रूप से उपलब्ध मॉडल पब्लिकली उपलब्ध डेटा पर पूर्व-प्रशिक्षित होते हैं, जैसे विकिपीडिया और मेलिंग सूचियों से लेकर पाठपुस्तकों और स्रोत कोड तक।
+
+LLMs पर निर्मित एप्लिकेशन अक्सर इन मॉडल्स को निजी या डोमेन-विशिष्ट डेटा के साथ बढ़ाने की आवश्यकता होती है। दुर्भाग्य से, वह डेटा साइलोड एप्लिकेशन और डेटा स्टोर में वितरित हो सकता है। यह API के पीछे होता है, SQL डेटाबेस में होता है, या पीडीएफ और स्लाइड डेक में फंसा होता है।
+
+यहां **LlamaIndex.TS** की भूमिका आती है।
+
+## 🦙 LlamaIndex.TS कैसे मदद कर सकता है?
+
+LlamaIndex.TS निम्नलिखित उपकरण प्रदान करता है:
+
+- **डेटा लोडिंग** आपके मौजूदा `.txt`, `.pdf`, `.csv`, `.md` और `.docx` डेटा को सीधे इंजेस्ट करें
+- **डेटा इंडेक्स** अपने डेटा को इंटरमीडिएट प्रतिष्ठानों में संरचित करें जो LLMs के लिए सरल और प्रदर्शनशील होते हैं।
+- **इंजन** आपके डेटा तक प्राकृतिक भाषा पहुंच प्रदान करते हैं। उदाहरण के लिए:
+  - क्वेरी इंजन ज्ञान-वृद्धि युक्त आउटपुट के लिए शक्तिशाली रिट्रीवल इंटरफेस होते हैं।
+  - चैट इंजन आपके डेटा के साथ "आगे-पीछे" बहस करने वाले बहु-संदेश, संवादात्मक इंटरफेस होते हैं।
+
+"
+
+## 👨‍👩‍👧‍👦 LlamaIndex किसके लिए है?
+
+LlamaIndex.TS जावास्क्रिप्ट और TypeScript के साथ LLM ऐप्स बनाने वाले लोगों के लिए एक महत्वपूर्ण सेट के उपकरण प्रदान करता है।
+
+हमारा हाई-लेवल API शुरुआती उपयोगकर्ताओं को LlamaIndex.TS का उपयोग डेटा को इंजेस्ट और क्वेरी करने के लिए करने की अनुमति देता है।
+
+अधिक जटिल एप्लिकेशनों के लिए, हमारे लोअर-लेवल API उन्नत उपयोगकर्ताओं को अपनी आवश्यकताओं के अनुसार किसी भी मॉड्यूल - डेटा कनेक्टर, इंडेक्स, रिट्रीवर्स और क्वेरी इंजन्स को कस्टमाइज़ और विस्तारित करने की अनुमति देते हैं।
+
+## शुरू करना
+
+`npm install llamaindex`
+
+हमारी दस्तावेज़ी में [स्थापना निर्देश](./installation.md) और [स्टार्टर ट्यूटोरियल](./starter.md) शामिल हैं, जिनका उपयोग करके आप अपना पहला एप्लिकेशन बना सकते हैं।
+
+एक बार जब आप शुरू हो जाएं, [उच्च स्तरीय अवधारणाएँ](./concepts.md) में LlamaIndex की मॉड्यूलर आर्किटेक्चर का अवलोकन है। अधिक हैंड्स-ऑन प्रैक्टिकल उदाहरणों के लिए, हमारे [एंड-टू-एंड ट्यूटोरियल](./end_to_end.md) को देखें।
+
+"
+
+## 🗺️ पारिस्थितिकी
+
+डाउनलोड या सहयोग करने के लिए, LlamaIndex को यहां ढूंढें:
+
+- Github: https://github.com/run-llama/LlamaIndexTS
+- NPM: https://www.npmjs.com/package/llamaindex
+
+"
+
+## समुदाय
+
+मदद चाहिए? कोई सुविधा सुझाव है? LlamaIndex समुदाय में शामिल हों:
+
+- Twitter: https://twitter.com/llama_index
+- Discord https://discord.gg/dGcwcsnxhU
diff --git a/apps/docs/i18n/hi/docusaurus-plugin-content-docs/current/modules/high_level/chat_engine.md b/apps/docs/i18n/hi/docusaurus-plugin-content-docs/current/modules/high_level/chat_engine.md
new file mode 100644
index 0000000000000000000000000000000000000000..eb2048c1400aaca89bfad686b4f72b2ec99b1c03
--- /dev/null
+++ b/apps/docs/i18n/hi/docusaurus-plugin-content-docs/current/modules/high_level/chat_engine.md
@@ -0,0 +1,22 @@
+---
+sidebar_position: 4
+---
+
+# चैट इंजन (ChatEngine)
+
+`इस दस्तावेज़ का अनुवाद स्वचालित रूप से किया गया है और इसमें त्रुटियाँ हो सकती हैं। परिवर्तन सुझाने के लिए पुल रिक्वेस्ट खोलने में संकोच न करें।`
+
+चैट इंजन आपके इंडेक्स में डेटा के साथ चैट करने का एक त्वरित और सरल तरीका है।
+
+```typescript
+const retriever = index.asRetriever();
+const chatEngine = new ContextChatEngine({ retriever });
+
+// चैट शुरू करें
+const response = await chatEngine.chat(query);
+```
+
+## एपीआई संदर्भ
+
+- [ContextChatEngine](../../api/classes/ContextChatEngine.md)
+- [CondenseQuestionChatEngine](../../api/classes/ContextChatEngine.md)
diff --git a/apps/docs/i18n/hi/docusaurus-plugin-content-docs/current/modules/high_level/data_index.md b/apps/docs/i18n/hi/docusaurus-plugin-content-docs/current/modules/high_level/data_index.md
new file mode 100644
index 0000000000000000000000000000000000000000..fd18887ff01c3bf3fd3c42e76cc0e9b9cac41c3c
--- /dev/null
+++ b/apps/docs/i18n/hi/docusaurus-plugin-content-docs/current/modules/high_level/data_index.md
@@ -0,0 +1,27 @@
+---
+sidebar_position: 2
+---
+
+# सूचकांक
+
+`इस दस्तावेज़ का अनुवाद स्वचालित रूप से किया गया है और इसमें त्रुटियाँ हो सकती हैं। परिवर्तन सुझाने के लिए पुल रिक्वेस्ट खोलने में संकोच न करें।`
+
+एक सूचकांक आपके डेटा के लिए मूल आपूर्ति और संगठन है। LlamaIndex.TS दो सूचकांकों का समर्थन करता है:
+
+- `VectorStoreIndex` - जब एक प्रतिक्रिया उत्पन्न करने के लिए LLM को शीर्ष-k `नोड` भेजेगा। डिफ़ॉल्ट शीर्ष-k 2 है।
+- `SummaryIndex` - प्रतिक्रिया उत्पन्न करने के लिए LLM को सूचकांक में हर `नोड` भेजेगा।
+
+```typescript
+import { Document, VectorStoreIndex } from "llamaindex";
+
+const document = new Document({ text: "परीक्षण" });
+
+const index = await VectorStoreIndex.fromDocuments([document]);
+```
+
+## एपीआई संदर्भ
+
+- [SummaryIndex](../../api/classes/SummaryIndex.md)
+- [VectorStoreIndex](../../api/classes/VectorStoreIndex.md)
+
+"
diff --git a/apps/docs/i18n/hi/docusaurus-plugin-content-docs/current/modules/high_level/data_loader.md b/apps/docs/i18n/hi/docusaurus-plugin-content-docs/current/modules/high_level/data_loader.md
new file mode 100644
index 0000000000000000000000000000000000000000..ab9f2afa4aa1d25bc5103071dd71da37f2ed4e69
--- /dev/null
+++ b/apps/docs/i18n/hi/docusaurus-plugin-content-docs/current/modules/high_level/data_loader.md
@@ -0,0 +1,21 @@
+---
+sidebar_position: 1
+---
+
+# रीडर / लोडर
+
+`इस दस्तावेज़ का अनुवाद स्वचालित रूप से किया गया है और इसमें त्रुटियाँ हो सकती हैं। परिवर्तन सुझाने के लिए पुल रिक्वेस्ट खोलने में संकोच न करें।`
+
+LlamaIndex.TS `SimpleDirectoryReader` कक्षा का उपयोग करके फ़ोल्डर से आसानी से फ़ाइलों को लोड करने का समर्थन करता है। वर्तमान में, `.txt`, `.pdf`, `.csv`, `.md` और `.docx` फ़ाइलें समर्थित हैं, और भविष्य में और भी अधिक समर्थित होंगी!
+
+```typescript
+import { SimpleDirectoryReader } from "llamaindex";
+
+documents = new SimpleDirectoryReader().loadData("./data");
+```
+
+## एपीआई संदर्भ
+
+- [SimpleDirectoryReader](../../api/classes/SimpleDirectoryReader.md)
+
+"
diff --git a/apps/docs/i18n/hi/docusaurus-plugin-content-docs/current/modules/high_level/documents_and_nodes.md b/apps/docs/i18n/hi/docusaurus-plugin-content-docs/current/modules/high_level/documents_and_nodes.md
new file mode 100644
index 0000000000000000000000000000000000000000..d61cf0202de93770e4a1d7e73bc3498edde4cb22
--- /dev/null
+++ b/apps/docs/i18n/hi/docusaurus-plugin-content-docs/current/modules/high_level/documents_and_nodes.md
@@ -0,0 +1,22 @@
+---
+sidebar_position: 0
+---
+
+# दस्तावेज़ और नोड
+
+`इस दस्तावेज़ का अनुवाद स्वचालित रूप से किया गया है और इसमें त्रुटियाँ हो सकती हैं। परिवर्तन सुझाने के लिए पुल रिक्वेस्ट खोलने में संकोच न करें।`
+
+`दस्तावेज़` और `नोड` किसी भी इंडेक्स के मूल निर्माण खंड हैं। इन ऑब्जेक्ट्स के लिए API समान होता है, `दस्तावेज़` ऑब्जेक्ट पूरे फ़ाइल को प्रतिष्ठित करते हैं, जबकि `नोड` मूल दस्तावेज़ के छोटे टुकड़े होते हैं, जो एक LLM और Q&A के लिए उपयुक्त होते हैं।
+
+```typescript
+import { Document } from "llamaindex";
+
+document = new Document({ text: "पाठ", metadata: { कुंजी: "मान" } });
+```
+
+## API संदर्भ
+
+- [दस्तावेज़](../../api/classes/Document.md)
+- [टेक्स्टनोड](../../api/classes/TextNode.md)
+
+"
diff --git a/apps/docs/i18n/hi/docusaurus-plugin-content-docs/current/modules/high_level/query_engine.md b/apps/docs/i18n/hi/docusaurus-plugin-content-docs/current/modules/high_level/query_engine.md
new file mode 100644
index 0000000000000000000000000000000000000000..702993514a07e4a4215543e398f63f447c477749
--- /dev/null
+++ b/apps/docs/i18n/hi/docusaurus-plugin-content-docs/current/modules/high_level/query_engine.md
@@ -0,0 +1,38 @@
+---
+sidebar_position: 3
+---
+
+# क्वेरी इंजन (QueryEngine)
+
+`इस दस्तावेज़ का अनुवाद स्वचालित रूप से किया गया है और इसमें त्रुटियाँ हो सकती हैं। परिवर्तन सुझाने के लिए पुल रिक्वेस्ट खोलने में संकोच न करें।`
+
+क्वेरी इंजन एक `Retriever` और एक `ResponseSynthesizer` को एक पाइपलाइन में बांधता है, जो क्वेरी स्ट्रिंग का उपयोग करके नोड्स को प्राप्त करेगा और फिर उन्हें LLM को भेजेगा ताकि एक प्रतिक्रिया उत्पन्न की जा सके।
+
+```typescript
+const queryEngine = index.asQueryEngine();
+const response = await queryEngine.query("query string");
+```
+
+## सब प्रश्न क्वेरी इंजन (Sub Question Query Engine)
+
+सब प्रश्न क्वेरी इंजन की मूल अवधारणा यह है कि यह एक एकल क्वेरी को एकाधिक क्वेरी में विभाजित करता है, प्रत्येक क्वेरी के लिए एक उत्तर प्राप्त करता है, और फिर उन विभिन्न उत्तरों को एक संगठित प्रतिक्रिया में जोड़ता है जो उपयोगकर्ता के लिए होती है। आप इसे "इसे चरण-चरण सोचें" प्रॉम्प्ट तकनीक के रूप में सोच सकते हैं, लेकिन अपने डेटा स्रोतों पर इटरेशन करते हुए!
+
+### शुरू करना
+
+सब प्रश्न क्वेरी इंजन को आजमाने का सबसे आसान तरीका [उदाहरणों](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/subquestion.ts) में subquestion.ts फ़ाइल को चलाना है।
+
+```bash
+npx ts-node subquestion.ts
+```
+
+### उपकरण (Tools)
+
+सब प्रश्न क्वेरी इंजन उपकरणों के साथ लागू किया जाता है। उपकरणों की मूल विचारधारा यह है कि वे बड़े भाषा मॉडल के लिए क्रियान्वयन योग्य विकल्प होते हैं। इस मामले में, हमारा सब प्रश्न क्वेरी इंजन QueryEngineTool पर निर्भर करता है, जो कि आपने सोचा होगा कि यह क्वेरी इंजन पर क्वेरी चलाने के लिए एक उपकरण है। इससे हम मॉडल को विभिन्न प्रश्नों के लिए विभिन्न दस्तावेज़ों पर क्वेरी करने का विकल्प दे सकते हैं। आप यह भी कल्पना कर सकते हैं कि सब प्रश्न क्वेरी इंजन वेब पर कुछ खोजता है या Wolfram Alpha का उपयोग करके एक उत्तर प्राप्त करता है के लिए एक उपकरण का उपयोग कर सकता है।
+
+आप LlamaIndex Python दस्तावेज़ीकरण पर जाकर उपकरणों के बारे में और अधिक जान सकते हैं https://gpt-index.readthedocs.io/en/latest/core_modules/agent_modules/tools/root.html
+
+## एपीआई संदर्भ (API Reference)
+
+- [RetrieverQueryEngine](../../api/classes/RetrieverQueryEngine.md)
+- [SubQuestionQueryEngine](../../api/classes/SubQuestionQueryEngine.md)
+- [QueryEngineTool](../../api/interfaces/QueryEngineTool.md)
diff --git a/apps/docs/i18n/hi/docusaurus-plugin-content-docs/current/modules/index.md b/apps/docs/i18n/hi/docusaurus-plugin-content-docs/current/modules/index.md
new file mode 100644
index 0000000000000000000000000000000000000000..41bbf63d214aee07798adb958630288e16b51bb6
--- /dev/null
+++ b/apps/docs/i18n/hi/docusaurus-plugin-content-docs/current/modules/index.md
@@ -0,0 +1,19 @@
+#
+
+`इस दस्तावेज़ का अनुवाद स्वचालित रूप से किया गया है और इसमें त्रुटियाँ हो सकती हैं। परिवर्तन सुझाने के लिए पुल रिक्वेस्ट खोलने में संकोच न करें।`
+
+## उच्च स्तरीय मॉड्यूल
+
+- [**दस्तावेज़**](./high_level/documents_and_nodes.md): एक दस्तावेज़ एक पाठ फ़ाइल, पीडीएफ़ फ़ाइल या अन्य संयुक्त डेटा को प्रतिष्ठित करता है।
+
+- [**नोड**](./high_level/documents_and_nodes.md): मूल डेटा निर्माण इकाई। आमतौर पर, ये दस्तावेज़ के भाग होते हैं जो संचालन योग्य टुकड़ों में विभाजित होते हैं जो पर्याप्त छोटे होते हैं ताकि इन्हें एक एम्बेडिंग मॉडल और LLM में खिलाया जा सके।
+
+- [**पाठक/लोडर**](./high_level/data_loader.md): एक पाठक या लोडर वह होता है जो वास्तविक दुनिया में एक दस्तावेज़ को लेता है और इसे एक दस्तावेज़ कक्षा में परिवर्तित करता है जिसे आप फिर अपने इंडेक्स और क्वेरी में उपयोग कर सकते हैं। हम वर्तमान में सादा पाठ फ़ाइलें और बहुत सारे पीडीएफ़ का समर्थन करते हैं।
+
+- [**इंडेक्स**](./high_level/data_index.md): इंडेक्स नोड्स और उन नोड्स के एम्बेडिंग को संग्रहीत करते हैं।
+
+- [**क्वेरी इंजन**](./high_level/query_engine.md): क्वेरी इंजन वह होते हैं जो आपके द्वारा दी गई क्वेरी को उत्पन्न करते हैं और आपको परिणाम देते हैं। क्वेरी इंजन आमतौर पर एक पूर्व-निर्मित प्रॉम्प्ट को चयनित नोड्स के साथ मिलाकर आपको उत्तर देने के लिए एलएलएम को संदर्भ देते हैं।
+
+- [**चैट इंजन**](./high_level/chat_engine.md): चैट इंजन आपको आपके इंडेक्स के साथ संवाद करने वाले चैटबॉट का निर्माण करने में मदद करता है।
+
+##
diff --git a/apps/docs/i18n/hi/docusaurus-plugin-content-docs/current/modules/low_level/embedding.md b/apps/docs/i18n/hi/docusaurus-plugin-content-docs/current/modules/low_level/embedding.md
new file mode 100644
index 0000000000000000000000000000000000000000..e965894d7c6e385b4f04bbaa36fe02918ab05e4a
--- /dev/null
+++ b/apps/docs/i18n/hi/docusaurus-plugin-content-docs/current/modules/low_level/embedding.md
@@ -0,0 +1,26 @@
+---
+sidebar_position: 1
+---
+
+# एम्बेडिंग (Embedding)
+
+`इस दस्तावेज़ का अनुवाद स्वचालित रूप से किया गया है और इसमें त्रुटियाँ हो सकती हैं। परिवर्तन सुझाने के लिए पुल रिक्वेस्ट खोलने में संकोच न करें।`
+
+LlamaIndex में एम्बेडिंग मॉडल टेक्स्ट के संख्यात्मक प्रतिनिधित्व बनाने के लिए जिम्मेदार है। डिफ़ॉल्ट रूप से, LlamaIndex `text-embedding-ada-002` मॉडल का उपयोग करेगा जो OpenAI से है।
+
+इसे `ServiceContext` ऑब्जेक्ट में स्पष्ट रूप से सेट किया जा सकता है।
+
+```typescript
+import { OpenAIEmbedding, serviceContextFromDefaults } from "llamaindex";
+
+const openaiEmbeds = new OpenAIEmbedding();
+
+const serviceContext = serviceContextFromDefaults({ embedModel: openaiEmbeds });
+```
+
+## API संदर्भ
+
+- [OpenAIEmbedding](../../api/classes/OpenAIEmbedding.md)
+- [ServiceContext](../../api/interfaces/ServiceContext.md)
+
+"
diff --git a/apps/docs/i18n/hi/docusaurus-plugin-content-docs/current/modules/low_level/llm.md b/apps/docs/i18n/hi/docusaurus-plugin-content-docs/current/modules/low_level/llm.md
new file mode 100644
index 0000000000000000000000000000000000000000..6ab3aebd10887ae5a17adb26ca9ae912b8b654fa
--- /dev/null
+++ b/apps/docs/i18n/hi/docusaurus-plugin-content-docs/current/modules/low_level/llm.md
@@ -0,0 +1,26 @@
+---
+sidebar_position: 0
+---
+
+# एलएलएम (LLM)
+
+`इस दस्तावेज़ का अनुवाद स्वचालित रूप से किया गया है और इसमें त्रुटियाँ हो सकती हैं। परिवर्तन सुझाने के लिए पुल रिक्वेस्ट खोलने में संकोच न करें।`
+
+एलएलएम टेक्स्ट को पढ़ने और प्रश्नों के लिए प्राकृतिक भाषा के जवाब उत्पन्न करने के लिए जिम्मेदार है। डिफ़ॉल्ट रूप से, LlamaIndex.TS `gpt-3.5-turbo` का उपयोग करता है।
+
+एलएलएम को `ServiceContext` ऑब्जेक्ट में स्पष्ट रूप से सेट किया जा सकता है।
+
+```typescript
+import { OpenAI, serviceContextFromDefaults } from "llamaindex";
+
+const openaiLLM = new OpenAI({ model: "gpt-3.5-turbo", temperature: 0 });
+
+const serviceContext = serviceContextFromDefaults({ llm: openaiLLM });
+```
+
+## एपीआई संदर्भ
+
+- [OpenAI](../../api/classes/OpenAI.md)
+- [ServiceContext](../../api/interfaces/ServiceContext.md)
+
+"
diff --git a/apps/docs/i18n/hi/docusaurus-plugin-content-docs/current/modules/low_level/node_parser.md b/apps/docs/i18n/hi/docusaurus-plugin-content-docs/current/modules/low_level/node_parser.md
new file mode 100644
index 0000000000000000000000000000000000000000..779fca64d94730c8741a4b703613f1c37d402338
--- /dev/null
+++ b/apps/docs/i18n/hi/docusaurus-plugin-content-docs/current/modules/low_level/node_parser.md
@@ -0,0 +1,39 @@
+---
+sidebar_position: 3
+---
+
+# नोडपार्सर (NodeParser)
+
+`इस दस्तावेज़ का अनुवाद स्वचालित रूप से किया गया है और इसमें त्रुटियाँ हो सकती हैं। परिवर्तन सुझाने के लिए पुल रिक्वेस्ट खोलने में संकोच न करें।`
+
+`लामा इंडेक्स (LlamaIndex)` में `नोडपार्सर (NodeParser)` `डॉक्यूमेंट (Document)` ऑब्जेक्ट को और संचालनीय `नोड (Node)` ऑब्जेक्ट में विभाजित करने के लिए जिम्मेदार है। जब आप `.fromDocuments()` को कॉल करते हैं, तो `सर्विस कॉन्टेक्स्ट (ServiceContext)` के `नोडपार्सर (NodeParser)` का उपयोग आपके लिए यह स्वचालित रूप से करता है। वैकल्पिक रूप से, आप इसे समय से पहले डॉक्यूमेंट को विभाजित करने के लिए उपयोग कर सकते हैं।
+
+```typescript
+import { Document, SimpleNodeParser } from "llamaindex";
+
+const nodeParser = new SimpleNodeParser();
+const nodes = nodeParser.getNodesFromDocuments([
+  new Document({ text: "मैं 10 साल का हूँ। जॉन 20 साल का है।" }),
+]);
+```
+
+## TextSplitter (टेक्स्टस्प्लिटर)
+
+आंतरजालीय पाठ विभाजक पाठ को वाक्यों में विभाजित करेगा। यह केवल रॉ टेक्स्ट को विभाजित करने के लिए एक स्वतंत्र मॉड्यूल के रूप में भी उपयोग किया जा सकता है।
+
+```typescript
+import { SentenceSplitter } from "llamaindex";
+
+const splitter = new SentenceSplitter({ chunkSize: 1 });
+
+const textSplits = splitter.splitText("नमस्ते दुनिया");
+```
+
+"
+
+## एपीआई संदर्भ (API Reference)
+
+- [सिम्पलनोडपार्सर (SimpleNodeParser)](../../api/classes/SimpleNodeParser.md)
+- [सेंटेंसस्प्लिटर (SentenceSplitter)](../../api/classes/SentenceSplitter.md)
+
+"
diff --git a/apps/docs/i18n/hi/docusaurus-plugin-content-docs/current/modules/low_level/response_synthesizer.md b/apps/docs/i18n/hi/docusaurus-plugin-content-docs/current/modules/low_level/response_synthesizer.md
new file mode 100644
index 0000000000000000000000000000000000000000..8c6ad2d7ea83eb763cb1c9072ecd4944cab7e166
--- /dev/null
+++ b/apps/docs/i18n/hi/docusaurus-plugin-content-docs/current/modules/low_level/response_synthesizer.md
@@ -0,0 +1,52 @@
+---
+sidebar_position: 6
+---
+
+# रिस्पॉन्स सिंथेसाइज़र (ResponseSynthesizer)
+
+`इस दस्तावेज़ का अनुवाद स्वचालित रूप से किया गया है और इसमें त्रुटियाँ हो सकती हैं। परिवर्तन सुझाने के लिए पुल रिक्वेस्ट खोलने में संकोच न करें।`
+
+रिस्पॉन्स सिंथेसाइज़र जवाब उत्पन्न करने के लिए क्वेरी, नोड और प्रॉम्प्ट टेम्पलेट को LLM को भेजने के लिए जिम्मेदार है। एक जवाब उत्पन्न करने के लिए कुछ मुख्य मोड हैं:
+
+- `Refine`: प्राप्त पाठ चंक के माध्यम से एक उत्तर "बनाएं और संशोधित करें"।
+  इसमें प्रत्येक प्राप्त नोड के लिए एक अलग LLM कॉल होती है। अधिक विस्तृत उत्तरों के लिए अच्छा है।
+- `CompactAndRefine` (डिफ़ॉल्ट): प्रत्येक LLM कॉल के दौरान प्रॉम्प्ट को "संक्षेपित" करें और अधिकतम प्रॉम्प्ट साइज़ के भीतर फिट होने वाले पाठ चंकों को भरें। यदि एक प्रॉम्प्ट में बहुत सारे चंक होते हैं, तो "बनाएं और संशोधित करें" के द्वारा एक उत्तर बनाएं और जांचें
+  कई संक्षेपित प्रॉम्प्ट के माध्यम से। `Refine` के समान है, लेकिन कम LLM कॉल के परिणामस्वरूप होना चाहिए।
+- `TreeSummarize`: एक सेट के साथ पाठ चंकों और क्वेरी द्वारा, एक पेड़ का निर्माण करें
+  और मूल नोड को उत्तर के रूप में लौटाएं। संक्षेपण के उद्देश्यों के लिए अच्छा है।
+- `SimpleResponseBuilder`: एक सेट के साथ पाठ चंकों और क्वेरी द्वारा, प्रत्येक पाठ को लागू करें
+  चंक जबकि प्रतिक्रियाएँ एक एरे में जमा होती हैं। सभी की एक संयुक्त स्ट्रिंग लौटाता है
+  प्रतिक्रियाओं के लिए। हर पाठ के लिए अलग से क्वेरी चलाने की जरूरत होने पर अच्छा है।
+
+```typescript
+import { NodeWithScore, ResponseSynthesizer, TextNode } from "llamaindex";
+
+const responseSynthesizer = new ResponseSynthesizer();
+
+const nodesWithScore: NodeWithScore[] = [
+  {
+    node: new TextNode({ text: "मैं 10 साल का हूँ।" }),
+    score: 1,
+  },
+  {
+    node: new TextNode({ text: "जॉन 20 साल का है।" }),
+    score: 0.5,
+  },
+];
+
+const response = await responseSynthesizer.synthesize(
+  "मैं कितने साल का हूँ?",
+  nodesWithScore,
+);
+console.log(response.response);
+```
+
+## एपीआई संदर्भ
+
+- [रिस्पॉन्स सिंथेसाइज़र (ResponseSynthesizer)](../../api/classes/ResponseSynthesizer.md)
+- [Refine](../../api/classes/Refine.md)
+- [CompactAndRefine](../../api/classes/CompactAndRefine.md)
+- [TreeSummarize](../../api/classes/TreeSummarize.md)
+- [SimpleResponseBuilder](../../api/classes/SimpleResponseBuilder.md)
+
+"
diff --git a/apps/docs/i18n/hi/docusaurus-plugin-content-docs/current/modules/low_level/retriever.md b/apps/docs/i18n/hi/docusaurus-plugin-content-docs/current/modules/low_level/retriever.md
new file mode 100644
index 0000000000000000000000000000000000000000..16f921481b6b760806574dabd8591bc896844219
--- /dev/null
+++ b/apps/docs/i18n/hi/docusaurus-plugin-content-docs/current/modules/low_level/retriever.md
@@ -0,0 +1,23 @@
+---
+sidebar_position: 5
+---
+
+# रिट्रीवर (Retriever)
+
+`इस दस्तावेज़ का अनुवाद स्वचालित रूप से किया गया है और इसमें त्रुटियाँ हो सकती हैं। परिवर्तन सुझाने के लिए पुल रिक्वेस्ट खोलने में संकोच न करें।`
+
+LlamaIndex में एक रिट्रीवर वह होता है जिसका उपयोग क्वेरी स्ट्रिंग का उपयोग करके इंडेक्स से `Node` को प्राप्त करने के लिए किया जाता है। `VectorIndexRetriever` शीर्ष-k सबसे समान नोड्स को प्राप्त करेगा। वहीं, `SummaryIndexRetriever` क्वेरी के बावजूद सभी नोड्स को प्राप्त करेगा।
+
+```typescript
+const retriever = vector_index.asRetriever();
+retriever.similarityTopK = 3;
+
+// नोड्स प्राप्त करें!
+const nodesWithScore = await retriever.retrieve("क्वेरी स्ट्रिंग");
+```
+
+## एपीआई संदर्भ (API Reference)
+
+- [SummaryIndexRetriever](../../api/classes/SummaryIndexRetriever.md)
+- [SummaryIndexLLMRetriever](../../api/classes/SummaryIndexLLMRetriever.md)
+- [VectorIndexRetriever](../../api/classes/VectorIndexRetriever.md)
diff --git a/apps/docs/i18n/hi/docusaurus-plugin-content-docs/current/modules/low_level/storage.md b/apps/docs/i18n/hi/docusaurus-plugin-content-docs/current/modules/low_level/storage.md
new file mode 100644
index 0000000000000000000000000000000000000000..78642e24067418d6323a6e99dba4043579e90ece
--- /dev/null
+++ b/apps/docs/i18n/hi/docusaurus-plugin-content-docs/current/modules/low_level/storage.md
@@ -0,0 +1,30 @@
+---
+sidebar_position: 7
+---
+
+# संग्रहण (Storage)
+
+`इस दस्तावेज़ का अनुवाद स्वचालित रूप से किया गया है और इसमें त्रुटियाँ हो सकती हैं। परिवर्तन सुझाने के लिए पुल रिक्वेस्ट खोलने में संकोच न करें।`
+
+LlamaIndex.TS में संग्रहण स्वचालित रूप से काम करता है जब आपने एक `StorageContext` ऑब्जेक्ट कॉन्फ़िगर कर लिया हो। बस `persistDir` को कॉन्फ़िगर करें और इसे एक इंडेक्स से जोड़ें।
+
+वर्तमान में, केवल डिस्क से सहेजना और लोड करना समर्थित है, भविष्य की एकीकरण योजनाएं हैं!
+
+```typescript
+import { Document, VectorStoreIndex, storageContextFromDefaults } from "./src";
+
+const storageContext = await storageContextFromDefaults({
+  persistDir: "./storage",
+});
+
+const document = new Document({ text: "परीक्षण पाठ" });
+const index = await VectorStoreIndex.fromDocuments([document], {
+  storageContext,
+});
+```
+
+## API संदर्भ
+
+- [संग्रहण संदर्भ (StorageContext)](../../api/interfaces/StorageContext.md)
+
+"
diff --git a/apps/docs/i18n/hi/docusaurus-plugin-content-docs/current/starter.md b/apps/docs/i18n/hi/docusaurus-plugin-content-docs/current/starter.md
new file mode 100644
index 0000000000000000000000000000000000000000..ef23698ce54470e569c920ad8b820395577bc66f
--- /dev/null
+++ b/apps/docs/i18n/hi/docusaurus-plugin-content-docs/current/starter.md
@@ -0,0 +1,56 @@
+---
+sidebar_position: 2
+---
+
+# स्टार्टर ट्यूटोरियल
+
+`इस दस्तावेज़ का अनुवाद स्वचालित रूप से किया गया है और इसमें त्रुटियाँ हो सकती हैं। परिवर्तन सुझाने के लिए पुल रिक्वेस्ट खोलने में संकोच न करें।`
+
+जब आप [NPM का उपयोग करके LlamaIndex.TS को स्थापित](installation) कर लिया हो और अपनी OpenAI कुंजी को सेटअप कर लिया हो, तो आप अपना पहला ऐप शुरू करने के लिए तैयार हैं:
+
+एक नई फ़ोल्डर में:
+
+```bash npm2yarn
+npm install typescript
+npm install @types/node
+npx tsc --init # यदि आवश्यक हो
+```
+
+`example.ts` नामक फ़ाइल बनाएं। यह कोड कुछ उदाहरण डेटा लोड करेगा, एक दस्तावेज़ बनाएगा, इसे इंडेक्स करेगा (जिसमें OpenAI का उपयोग करके embeddings बनाए जाते हैं), और फिर डेटा के बारे में सवालों का उत्तर देने के लिए क्वेरी इंजन बनाएगा।
+
+```ts
+// example.ts
+import fs from "fs/promises";
+import { Document, VectorStoreIndex } from "llamaindex";
+
+async function main() {
+  // नोड में abramov.txt से निबंध लोड करें
+  const essay = await fs.readFile(
+    "node_modules/llamaindex/examples/abramov.txt",
+    "utf-8",
+  );
+
+  // निबंध के साथ डॉक्यूमेंट ऑब्जेक्ट बनाएं
+  const document = new Document({ text: essay });
+
+  // पाठ को विभाजित करें और embeddings बनाएं। उन्हें एक VectorStoreIndex में संग्रहीत करें
+  const index = await VectorStoreIndex.fromDocuments([document]);
+
+  // इंडेक्स पर क्वेरी करें
+  const queryEngine = index.asQueryEngine();
+  const response = await queryEngine.query("कॉलेज में लेखक ने क्या किया था?");
+
+  // उत्तर को आउटपुट करें
+  console.log(response.toString());
+}
+
+main();
+```
+
+फिर आप इसे निम्नलिखित का उपयोग करके चला सकते हैं
+
+```bash
+npx ts-node example.ts
+```
+
+और अधिक सीखने के लिए तैयार हैं? हमारे NextJS प्लेग्राउंड को देखें https://llama-playground.vercel.app/. स्रोत उपलब्ध है https://github.com/run-llama/ts-playground
diff --git a/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/_static/concepts/indexing.jpg b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/_static/concepts/indexing.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..8672967213caf28fb27682b6b9e2e5111beb9aaa
Binary files /dev/null and b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/_static/concepts/indexing.jpg differ
diff --git a/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/_static/concepts/querying.jpg b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/_static/concepts/querying.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..3c241bdda5ba98d3f3ee2e163b9f19280c5c7503
Binary files /dev/null and b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/_static/concepts/querying.jpg differ
diff --git a/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/_static/concepts/rag.jpg b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/_static/concepts/rag.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..b68eca2564f8535b33d9ed6d4ce4d4410d2a0698
Binary files /dev/null and b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/_static/concepts/rag.jpg differ
diff --git a/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/_category_.yml b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/_category_.yml
new file mode 100644
index 0000000000000000000000000000000000000000..79eb4b8a2939888be2393145205ffe6bc5fea186
--- /dev/null
+++ b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/_category_.yml
@@ -0,0 +1,2 @@
+label: "API"
+position: 6
\ No newline at end of file
diff --git a/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/Anthropic.md b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/Anthropic.md
new file mode 100644
index 0000000000000000000000000000000000000000..453d9a0e51ffad4794016b05c9caa8f7aded9b15
--- /dev/null
+++ b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/Anthropic.md
@@ -0,0 +1,318 @@
+---
+id: "Anthropic"
+title: "Class: Anthropic"
+sidebar_label: "Anthropic"
+sidebar_position: 0
+custom_edit_url: null
+---
+
+Anthropic LLM implementation
+
+## Implements
+
+- [`LLM`](../interfaces/LLM.md)
+
+## Constructors
+
+### constructor
+
+• **new Anthropic**(`init?`)
+
+#### Parameters
+
+| Name    | Type                                    |
+| :------ | :-------------------------------------- |
+| `init?` | `Partial`<[`Anthropic`](Anthropic.md)\> |
+
+#### Defined in
+
+[packages/core/src/llm/LLM.ts:669](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/llm/LLM.ts#L669)
+
+## Properties
+
+### apiKey
+
+• `Optional` **apiKey**: `string` = `undefined`
+
+#### Defined in
+
+[packages/core/src/llm/LLM.ts:662](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/llm/LLM.ts#L662)
+
+---
+
+### callbackManager
+
+• `Optional` **callbackManager**: [`CallbackManager`](CallbackManager.md)
+
+#### Defined in
+
+[packages/core/src/llm/LLM.ts:667](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/llm/LLM.ts#L667)
+
+---
+
+### hasStreaming
+
+• **hasStreaming**: `boolean` = `true`
+
+#### Implementation of
+
+[LLM](../interfaces/LLM.md).[hasStreaming](../interfaces/LLM.md#hasstreaming)
+
+#### Defined in
+
+[packages/core/src/llm/LLM.ts:653](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/llm/LLM.ts#L653)
+
+---
+
+### maxRetries
+
+• **maxRetries**: `number`
+
+#### Defined in
+
+[packages/core/src/llm/LLM.ts:663](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/llm/LLM.ts#L663)
+
+---
+
+### maxTokens
+
+• `Optional` **maxTokens**: `number`
+
+#### Defined in
+
+[packages/core/src/llm/LLM.ts:659](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/llm/LLM.ts#L659)
+
+---
+
+### model
+
+• **model**: `"claude-2"` \| `"claude-instant-1"`
+
+#### Defined in
+
+[packages/core/src/llm/LLM.ts:656](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/llm/LLM.ts#L656)
+
+---
+
+### session
+
+• **session**: `AnthropicSession`
+
+#### Defined in
+
+[packages/core/src/llm/LLM.ts:665](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/llm/LLM.ts#L665)
+
+---
+
+### temperature
+
+• **temperature**: `number`
+
+#### Defined in
+
+[packages/core/src/llm/LLM.ts:657](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/llm/LLM.ts#L657)
+
+---
+
+### timeout
+
+• `Optional` **timeout**: `number`
+
+#### Defined in
+
+[packages/core/src/llm/LLM.ts:664](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/llm/LLM.ts#L664)
+
+---
+
+### topP
+
+• **topP**: `number`
+
+#### Defined in
+
+[packages/core/src/llm/LLM.ts:658](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/llm/LLM.ts#L658)
+
+## Accessors
+
+### metadata
+
+• `get` **metadata**(): `Object`
+
+#### Returns
+
+`Object`
+
+| Name            | Type                                 |
+| :-------------- | :----------------------------------- |
+| `contextWindow` | `number`                             |
+| `maxTokens`     | `undefined` \| `number`              |
+| `model`         | `"claude-2"` \| `"claude-instant-1"` |
+| `temperature`   | `number`                             |
+| `tokenizer`     | `undefined`                          |
+| `topP`          | `number`                             |
+
+#### Implementation of
+
+[LLM](../interfaces/LLM.md).[metadata](../interfaces/LLM.md#metadata)
+
+#### Defined in
+
+[packages/core/src/llm/LLM.ts:693](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/llm/LLM.ts#L693)
+
+## Methods
+
+### chat
+
+▸ **chat**<`T`, `R`\>(`messages`, `parentEvent?`, `streaming?`): `Promise`<`R`\>
+
+Get a chat response from the LLM
+
+#### Type parameters
+
+| Name | Type                                                                                                                  |
+| :--- | :-------------------------------------------------------------------------------------------------------------------- |
+| `T`  | extends `undefined` \| `boolean` = `undefined`                                                                        |
+| `R`  | `T` extends `true` ? `AsyncGenerator`<`string`, `void`, `unknown`\> : [`ChatResponse`](../interfaces/ChatResponse.md) |
+
+#### Parameters
+
+| Name           | Type                                            | Description                                                                                      |
+| :------------- | :---------------------------------------------- | :----------------------------------------------------------------------------------------------- |
+| `messages`     | [`ChatMessage`](../interfaces/ChatMessage.md)[] | The return type of chat() and complete() are set by the "streaming" parameter being set to True. |
+| `parentEvent?` | [`Event`](../interfaces/Event.md)               | -                                                                                                |
+| `streaming?`   | `T`                                             | -                                                                                                |
+
+#### Returns
+
+`Promise`<`R`\>
+
+#### Implementation of
+
+[LLM](../interfaces/LLM.md).[chat](../interfaces/LLM.md#chat)
+
+#### Defined in
+
+[packages/core/src/llm/LLM.ts:721](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/llm/LLM.ts#L721)
+
+---
+
+### complete
+
+▸ **complete**<`T`, `R`\>(`prompt`, `parentEvent?`, `streaming?`): `Promise`<`R`\>
+
+Get a prompt completion from the LLM
+
+#### Type parameters
+
+| Name | Type                                                                                                                  |
+| :--- | :-------------------------------------------------------------------------------------------------------------------- |
+| `T`  | extends `undefined` \| `boolean` = `undefined`                                                                        |
+| `R`  | `T` extends `true` ? `AsyncGenerator`<`string`, `void`, `unknown`\> : [`ChatResponse`](../interfaces/ChatResponse.md) |
+
+#### Parameters
+
+| Name           | Type                              | Description            |
+| :------------- | :-------------------------------- | :--------------------- |
+| `prompt`       | `string`                          | the prompt to complete |
+| `parentEvent?` | [`Event`](../interfaces/Event.md) | -                      |
+| `streaming?`   | `T`                               | -                      |
+
+#### Returns
+
+`Promise`<`R`\>
+
+#### Implementation of
+
+[LLM](../interfaces/LLM.md).[complete](../interfaces/LLM.md#complete)
+
+#### Defined in
+
+[packages/core/src/llm/LLM.ts:778](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/llm/LLM.ts#L778)
+
+---
+
+### mapMessagesToPrompt
+
+▸ **mapMessagesToPrompt**(`messages`): `string`
+
+#### Parameters
+
+| Name       | Type                                            |
+| :--------- | :---------------------------------------------- |
+| `messages` | [`ChatMessage`](../interfaces/ChatMessage.md)[] |
+
+#### Returns
+
+`string`
+
+#### Defined in
+
+[packages/core/src/llm/LLM.ts:704](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/llm/LLM.ts#L704)
+
+---
+
+### streamChat
+
+▸ `Protected` **streamChat**(`messages`, `parentEvent?`): `AsyncGenerator`<`string`, `void`, `unknown`\>
+
+#### Parameters
+
+| Name           | Type                                            |
+| :------------- | :---------------------------------------------- |
+| `messages`     | [`ChatMessage`](../interfaces/ChatMessage.md)[] |
+| `parentEvent?` | [`Event`](../interfaces/Event.md)               |
+
+#### Returns
+
+`AsyncGenerator`<`string`, `void`, `unknown`\>
+
+#### Defined in
+
+[packages/core/src/llm/LLM.ts:753](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/llm/LLM.ts#L753)
+
+---
+
+### streamComplete
+
+▸ `Protected` **streamComplete**(`prompt`, `parentEvent?`): `AsyncGenerator`<`string`, `void`, `unknown`\>
+
+#### Parameters
+
+| Name           | Type                              |
+| :------------- | :-------------------------------- |
+| `prompt`       | `string`                          |
+| `parentEvent?` | [`Event`](../interfaces/Event.md) |
+
+#### Returns
+
+`AsyncGenerator`<`string`, `void`, `unknown`\>
+
+#### Defined in
+
+[packages/core/src/llm/LLM.ts:796](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/llm/LLM.ts#L796)
+
+---
+
+### tokens
+
+▸ **tokens**(`messages`): `number`
+
+Calculates the number of tokens needed for the given chat messages
+
+#### Parameters
+
+| Name       | Type                                            |
+| :--------- | :---------------------------------------------- |
+| `messages` | [`ChatMessage`](../interfaces/ChatMessage.md)[] |
+
+#### Returns
+
+`number`
+
+#### Implementation of
+
+[LLM](../interfaces/LLM.md).[tokens](../interfaces/LLM.md#tokens)
+
+#### Defined in
+
+[packages/core/src/llm/LLM.ts:689](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/llm/LLM.ts#L689)
diff --git a/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/AudioSubtitlesReader.md b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/AudioSubtitlesReader.md
new file mode 100644
index 0000000000000000000000000000000000000000..12a1681f13e1d20889cbad07b8046ef023cebb6b
--- /dev/null
+++ b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/AudioSubtitlesReader.md
@@ -0,0 +1,128 @@
+---
+id: "AudioSubtitlesReader"
+title: "Class: AudioSubtitlesReader"
+sidebar_label: "AudioSubtitlesReader"
+sidebar_position: 0
+custom_edit_url: null
+---
+
+Transcribe audio a transcript and read subtitles for the transcript as `srt` or `vtt` format.
+
+## Hierarchy
+
+- `AssemblyAIReader`
+
+  ↳ **`AudioSubtitlesReader`**
+
+## Constructors
+
+### constructor
+
+• **new AudioSubtitlesReader**(`assemblyAIOptions?`)
+
+Creates a new AssemblyAI Reader.
+
+#### Parameters
+
+| Name                 | Type                            | Description                                                                                                                                                                              |
+| :------------------- | :------------------------------ | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
+| `assemblyAIOptions?` | `Partial`<`BaseServiceParams`\> | The options to configure the AssemblyAI Reader. Configure the `assemblyAIOptions.apiKey` with your AssemblyAI API key, or configure it as the `ASSEMBLYAI_API_KEY` environment variable. |
+
+#### Inherited from
+
+AssemblyAIReader.constructor
+
+#### Defined in
+
+[packages/core/src/readers/AssemblyAI.ts:25](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/readers/AssemblyAI.ts#L25)
+
+## Properties
+
+### client
+
+• `Protected` **client**: `AssemblyAI`
+
+#### Inherited from
+
+AssemblyAIReader.client
+
+#### Defined in
+
+[packages/core/src/readers/AssemblyAI.ts:18](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/readers/AssemblyAI.ts#L18)
+
+## Methods
+
+### getTranscriptId
+
+▸ `Protected` **getTranscriptId**(`params`): `Promise`<`string`\>
+
+#### Parameters
+
+| Name     | Type                                                   |
+| :------- | :----------------------------------------------------- |
+| `params` | `string` \| [`TranscribeParams`](../#transcribeparams) |
+
+#### Returns
+
+`Promise`<`string`\>
+
+#### Inherited from
+
+AssemblyAIReader.getTranscriptId
+
+#### Defined in
+
+[packages/core/src/readers/AssemblyAI.ts:52](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/readers/AssemblyAI.ts#L52)
+
+---
+
+### loadData
+
+▸ **loadData**(`params`, `subtitleFormat?`): `Promise`<[`Document`](Document.md)<[`Metadata`](../#metadata)\>[]\>
+
+Transcribe audio or get a transcript and reads subtitles for the transcript as `srt` or `vtt` format.
+
+#### Parameters
+
+| Name             | Type                                                   | Default value | Description                                                       |
+| :--------------- | :----------------------------------------------------- | :------------ | :---------------------------------------------------------------- |
+| `params`         | `string` \| [`TranscribeParams`](../#transcribeparams) | `undefined`   | The parameters to transcribe audio or get an existing transcript. |
+| `subtitleFormat` | [`SubtitleFormat`](../#subtitleformat)                 | `"srt"`       | The format of the subtitles, either `srt` or `vtt`.               |
+
+#### Returns
+
+`Promise`<[`Document`](Document.md)<[`Metadata`](../#metadata)\>[]\>
+
+A promise that resolves a document containing the subtitles as the page content.
+
+#### Overrides
+
+AssemblyAIReader.loadData
+
+#### Defined in
+
+[packages/core/src/readers/AssemblyAI.ts:124](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/readers/AssemblyAI.ts#L124)
+
+---
+
+### transcribeOrGetTranscript
+
+▸ `Protected` **transcribeOrGetTranscript**(`params`): `Promise`<`Transcript`\>
+
+#### Parameters
+
+| Name     | Type                                                   |
+| :------- | :----------------------------------------------------- |
+| `params` | `string` \| [`TranscribeParams`](../#transcribeparams) |
+
+#### Returns
+
+`Promise`<`Transcript`\>
+
+#### Inherited from
+
+AssemblyAIReader.transcribeOrGetTranscript
+
+#### Defined in
+
+[packages/core/src/readers/AssemblyAI.ts:44](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/readers/AssemblyAI.ts#L44)
diff --git a/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/AudioTranscriptParagraphsReader.md b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/AudioTranscriptParagraphsReader.md
new file mode 100644
index 0000000000000000000000000000000000000000..4661589e77444f71135122073b0944cd5f957656
--- /dev/null
+++ b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/AudioTranscriptParagraphsReader.md
@@ -0,0 +1,127 @@
+---
+id: "AudioTranscriptParagraphsReader"
+title: "Class: AudioTranscriptParagraphsReader"
+sidebar_label: "AudioTranscriptParagraphsReader"
+sidebar_position: 0
+custom_edit_url: null
+---
+
+Transcribe audio and return a document for each paragraph.
+
+## Hierarchy
+
+- `AssemblyAIReader`
+
+  ↳ **`AudioTranscriptParagraphsReader`**
+
+## Constructors
+
+### constructor
+
+• **new AudioTranscriptParagraphsReader**(`assemblyAIOptions?`)
+
+Creates a new AssemblyAI Reader.
+
+#### Parameters
+
+| Name                 | Type                            | Description                                                                                                                                                                              |
+| :------------------- | :------------------------------ | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
+| `assemblyAIOptions?` | `Partial`<`BaseServiceParams`\> | The options to configure the AssemblyAI Reader. Configure the `assemblyAIOptions.apiKey` with your AssemblyAI API key, or configure it as the `ASSEMBLYAI_API_KEY` environment variable. |
+
+#### Inherited from
+
+AssemblyAIReader.constructor
+
+#### Defined in
+
+[packages/core/src/readers/AssemblyAI.ts:25](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/readers/AssemblyAI.ts#L25)
+
+## Properties
+
+### client
+
+• `Protected` **client**: `AssemblyAI`
+
+#### Inherited from
+
+AssemblyAIReader.client
+
+#### Defined in
+
+[packages/core/src/readers/AssemblyAI.ts:18](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/readers/AssemblyAI.ts#L18)
+
+## Methods
+
+### getTranscriptId
+
+▸ `Protected` **getTranscriptId**(`params`): `Promise`<`string`\>
+
+#### Parameters
+
+| Name     | Type                                                   |
+| :------- | :----------------------------------------------------- |
+| `params` | `string` \| [`TranscribeParams`](../#transcribeparams) |
+
+#### Returns
+
+`Promise`<`string`\>
+
+#### Inherited from
+
+AssemblyAIReader.getTranscriptId
+
+#### Defined in
+
+[packages/core/src/readers/AssemblyAI.ts:52](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/readers/AssemblyAI.ts#L52)
+
+---
+
+### loadData
+
+▸ **loadData**(`params`): `Promise`<[`Document`](Document.md)<[`Metadata`](../#metadata)\>[]\>
+
+Transcribe audio or get a transcript, and returns a document for each paragraph.
+
+#### Parameters
+
+| Name     | Type                                                   | Description                                                       |
+| :------- | :----------------------------------------------------- | :---------------------------------------------------------------- |
+| `params` | `string` \| [`TranscribeParams`](../#transcribeparams) | The parameters to transcribe audio or get an existing transcript. |
+
+#### Returns
+
+`Promise`<[`Document`](Document.md)<[`Metadata`](../#metadata)\>[]\>
+
+A promise that resolves to an array of documents, each containing a paragraph of the transcript.
+
+#### Overrides
+
+AssemblyAIReader.loadData
+
+#### Defined in
+
+[packages/core/src/readers/AssemblyAI.ts:85](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/readers/AssemblyAI.ts#L85)
+
+---
+
+### transcribeOrGetTranscript
+
+▸ `Protected` **transcribeOrGetTranscript**(`params`): `Promise`<`Transcript`\>
+
+#### Parameters
+
+| Name     | Type                                                   |
+| :------- | :----------------------------------------------------- |
+| `params` | `string` \| [`TranscribeParams`](../#transcribeparams) |
+
+#### Returns
+
+`Promise`<`Transcript`\>
+
+#### Inherited from
+
+AssemblyAIReader.transcribeOrGetTranscript
+
+#### Defined in
+
+[packages/core/src/readers/AssemblyAI.ts:44](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/readers/AssemblyAI.ts#L44)
diff --git a/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/AudioTranscriptReader.md b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/AudioTranscriptReader.md
new file mode 100644
index 0000000000000000000000000000000000000000..bc6d1f3b27b1f6854fe8145b2833d2b7aaa6f1ca
--- /dev/null
+++ b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/AudioTranscriptReader.md
@@ -0,0 +1,127 @@
+---
+id: "AudioTranscriptReader"
+title: "Class: AudioTranscriptReader"
+sidebar_label: "AudioTranscriptReader"
+sidebar_position: 0
+custom_edit_url: null
+---
+
+Transcribe audio and read the transcript as a document using AssemblyAI.
+
+## Hierarchy
+
+- `AssemblyAIReader`
+
+  ↳ **`AudioTranscriptReader`**
+
+## Constructors
+
+### constructor
+
+• **new AudioTranscriptReader**(`assemblyAIOptions?`)
+
+Creates a new AssemblyAI Reader.
+
+#### Parameters
+
+| Name                 | Type                            | Description                                                                                                                                                                              |
+| :------------------- | :------------------------------ | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
+| `assemblyAIOptions?` | `Partial`<`BaseServiceParams`\> | The options to configure the AssemblyAI Reader. Configure the `assemblyAIOptions.apiKey` with your AssemblyAI API key, or configure it as the `ASSEMBLYAI_API_KEY` environment variable. |
+
+#### Inherited from
+
+AssemblyAIReader.constructor
+
+#### Defined in
+
+[packages/core/src/readers/AssemblyAI.ts:25](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/readers/AssemblyAI.ts#L25)
+
+## Properties
+
+### client
+
+• `Protected` **client**: `AssemblyAI`
+
+#### Inherited from
+
+AssemblyAIReader.client
+
+#### Defined in
+
+[packages/core/src/readers/AssemblyAI.ts:18](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/readers/AssemblyAI.ts#L18)
+
+## Methods
+
+### getTranscriptId
+
+▸ `Protected` **getTranscriptId**(`params`): `Promise`<`string`\>
+
+#### Parameters
+
+| Name     | Type                                                   |
+| :------- | :----------------------------------------------------- |
+| `params` | `string` \| [`TranscribeParams`](../#transcribeparams) |
+
+#### Returns
+
+`Promise`<`string`\>
+
+#### Inherited from
+
+AssemblyAIReader.getTranscriptId
+
+#### Defined in
+
+[packages/core/src/readers/AssemblyAI.ts:52](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/readers/AssemblyAI.ts#L52)
+
+---
+
+### loadData
+
+▸ **loadData**(`params`): `Promise`<[`Document`](Document.md)<[`Metadata`](../#metadata)\>[]\>
+
+Transcribe audio or get a transcript and load the transcript as a document using AssemblyAI.
+
+#### Parameters
+
+| Name     | Type                                                   | Description                                                           |
+| :------- | :----------------------------------------------------- | :-------------------------------------------------------------------- |
+| `params` | `string` \| [`TranscribeParams`](../#transcribeparams) | Parameters to transcribe an audio file or get an existing transcript. |
+
+#### Returns
+
+`Promise`<[`Document`](Document.md)<[`Metadata`](../#metadata)\>[]\>
+
+A promise that resolves to a single document containing the transcript text.
+
+#### Overrides
+
+AssemblyAIReader.loadData
+
+#### Defined in
+
+[packages/core/src/readers/AssemblyAI.ts:70](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/readers/AssemblyAI.ts#L70)
+
+---
+
+### transcribeOrGetTranscript
+
+▸ `Protected` **transcribeOrGetTranscript**(`params`): `Promise`<`Transcript`\>
+
+#### Parameters
+
+| Name     | Type                                                   |
+| :------- | :----------------------------------------------------- |
+| `params` | `string` \| [`TranscribeParams`](../#transcribeparams) |
+
+#### Returns
+
+`Promise`<`Transcript`\>
+
+#### Inherited from
+
+AssemblyAIReader.transcribeOrGetTranscript
+
+#### Defined in
+
+[packages/core/src/readers/AssemblyAI.ts:44](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/readers/AssemblyAI.ts#L44)
diff --git a/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/AudioTranscriptSentencesReader.md b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/AudioTranscriptSentencesReader.md
new file mode 100644
index 0000000000000000000000000000000000000000..8178bef4ab5390a1a1a1adf54821e5d4e890b675
--- /dev/null
+++ b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/AudioTranscriptSentencesReader.md
@@ -0,0 +1,127 @@
+---
+id: "AudioTranscriptSentencesReader"
+title: "Class: AudioTranscriptSentencesReader"
+sidebar_label: "AudioTranscriptSentencesReader"
+sidebar_position: 0
+custom_edit_url: null
+---
+
+Transcribe audio and return a document for each sentence.
+
+## Hierarchy
+
+- `AssemblyAIReader`
+
+  ↳ **`AudioTranscriptSentencesReader`**
+
+## Constructors
+
+### constructor
+
+• **new AudioTranscriptSentencesReader**(`assemblyAIOptions?`)
+
+Creates a new AssemblyAI Reader.
+
+#### Parameters
+
+| Name                 | Type                            | Description                                                                                                                                                                              |
+| :------------------- | :------------------------------ | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
+| `assemblyAIOptions?` | `Partial`<`BaseServiceParams`\> | The options to configure the AssemblyAI Reader. Configure the `assemblyAIOptions.apiKey` with your AssemblyAI API key, or configure it as the `ASSEMBLYAI_API_KEY` environment variable. |
+
+#### Inherited from
+
+AssemblyAIReader.constructor
+
+#### Defined in
+
+[packages/core/src/readers/AssemblyAI.ts:25](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/readers/AssemblyAI.ts#L25)
+
+## Properties
+
+### client
+
+• `Protected` **client**: `AssemblyAI`
+
+#### Inherited from
+
+AssemblyAIReader.client
+
+#### Defined in
+
+[packages/core/src/readers/AssemblyAI.ts:18](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/readers/AssemblyAI.ts#L18)
+
+## Methods
+
+### getTranscriptId
+
+▸ `Protected` **getTranscriptId**(`params`): `Promise`<`string`\>
+
+#### Parameters
+
+| Name     | Type                                                   |
+| :------- | :----------------------------------------------------- |
+| `params` | `string` \| [`TranscribeParams`](../#transcribeparams) |
+
+#### Returns
+
+`Promise`<`string`\>
+
+#### Inherited from
+
+AssemblyAIReader.getTranscriptId
+
+#### Defined in
+
+[packages/core/src/readers/AssemblyAI.ts:52](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/readers/AssemblyAI.ts#L52)
+
+---
+
+### loadData
+
+▸ **loadData**(`params`): `Promise`<[`Document`](Document.md)<[`Metadata`](../#metadata)\>[]\>
+
+Transcribe audio or get a transcript, and returns a document for each sentence.
+
+#### Parameters
+
+| Name     | Type                                                   | Description                                                       |
+| :------- | :----------------------------------------------------- | :---------------------------------------------------------------- |
+| `params` | `string` \| [`TranscribeParams`](../#transcribeparams) | The parameters to transcribe audio or get an existing transcript. |
+
+#### Returns
+
+`Promise`<[`Document`](Document.md)<[`Metadata`](../#metadata)\>[]\>
+
+A promise that resolves to an array of documents, each containing a sentence of the transcript.
+
+#### Overrides
+
+AssemblyAIReader.loadData
+
+#### Defined in
+
+[packages/core/src/readers/AssemblyAI.ts:104](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/readers/AssemblyAI.ts#L104)
+
+---
+
+### transcribeOrGetTranscript
+
+▸ `Protected` **transcribeOrGetTranscript**(`params`): `Promise`<`Transcript`\>
+
+#### Parameters
+
+| Name     | Type                                                   |
+| :------- | :----------------------------------------------------- |
+| `params` | `string` \| [`TranscribeParams`](../#transcribeparams) |
+
+#### Returns
+
+`Promise`<`Transcript`\>
+
+#### Inherited from
+
+AssemblyAIReader.transcribeOrGetTranscript
+
+#### Defined in
+
+[packages/core/src/readers/AssemblyAI.ts:44](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/readers/AssemblyAI.ts#L44)
diff --git a/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/BaseDocumentStore.md b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/BaseDocumentStore.md
new file mode 100644
index 0000000000000000000000000000000000000000..f90b88e5f341d7e2577be49ed7e07b41a9fb0278
--- /dev/null
+++ b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/BaseDocumentStore.md
@@ -0,0 +1,289 @@
+---
+id: "BaseDocumentStore"
+title: "Class: BaseDocumentStore"
+sidebar_label: "BaseDocumentStore"
+sidebar_position: 0
+custom_edit_url: null
+---
+
+## Constructors
+
+### constructor
+
+• **new BaseDocumentStore**()
+
+## Methods
+
+### addDocuments
+
+▸ `Abstract` **addDocuments**(`docs`, `allowUpdate`): `Promise`<`void`\>
+
+#### Parameters
+
+| Name          | Type                                                     |
+| :------------ | :------------------------------------------------------- |
+| `docs`        | [`BaseNode`](BaseNode.md)<[`Metadata`](../#metadata)\>[] |
+| `allowUpdate` | `boolean`                                                |
+
+#### Returns
+
+`Promise`<`void`\>
+
+#### Defined in
+
+[packages/core/src/storage/docStore/types.ts:27](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/storage/docStore/types.ts#L27)
+
+---
+
+### deleteDocument
+
+▸ `Abstract` **deleteDocument**(`docId`, `raiseError`): `Promise`<`void`\>
+
+#### Parameters
+
+| Name         | Type      |
+| :----------- | :-------- |
+| `docId`      | `string`  |
+| `raiseError` | `boolean` |
+
+#### Returns
+
+`Promise`<`void`\>
+
+#### Defined in
+
+[packages/core/src/storage/docStore/types.ts:34](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/storage/docStore/types.ts#L34)
+
+---
+
+### deleteRefDoc
+
+▸ `Abstract` **deleteRefDoc**(`refDocId`, `raiseError`): `Promise`<`void`\>
+
+#### Parameters
+
+| Name         | Type      |
+| :----------- | :-------- |
+| `refDocId`   | `string`  |
+| `raiseError` | `boolean` |
+
+#### Returns
+
+`Promise`<`void`\>
+
+#### Defined in
+
+[packages/core/src/storage/docStore/types.ts:48](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/storage/docStore/types.ts#L48)
+
+---
+
+### docs
+
+▸ `Abstract` **docs**(): `Promise`<`Record`<`string`, [`BaseNode`](BaseNode.md)<[`Metadata`](../#metadata)\>\>\>
+
+#### Returns
+
+`Promise`<`Record`<`string`, [`BaseNode`](BaseNode.md)<[`Metadata`](../#metadata)\>\>\>
+
+#### Defined in
+
+[packages/core/src/storage/docStore/types.ts:25](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/storage/docStore/types.ts#L25)
+
+---
+
+### documentExists
+
+▸ `Abstract` **documentExists**(`docId`): `Promise`<`boolean`\>
+
+#### Parameters
+
+| Name    | Type     |
+| :------ | :------- |
+| `docId` | `string` |
+
+#### Returns
+
+`Promise`<`boolean`\>
+
+#### Defined in
+
+[packages/core/src/storage/docStore/types.ts:36](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/storage/docStore/types.ts#L36)
+
+---
+
+### getAllRefDocInfo
+
+▸ `Abstract` **getAllRefDocInfo**(): `Promise`<`undefined` \| `Record`<`string`, [`RefDocInfo`](../interfaces/RefDocInfo.md)\>\>
+
+#### Returns
+
+`Promise`<`undefined` \| `Record`<`string`, [`RefDocInfo`](../interfaces/RefDocInfo.md)\>\>
+
+#### Defined in
+
+[packages/core/src/storage/docStore/types.ts:44](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/storage/docStore/types.ts#L44)
+
+---
+
+### getDocument
+
+▸ `Abstract` **getDocument**(`docId`, `raiseError`): `Promise`<`undefined` \| [`BaseNode`](BaseNode.md)<[`Metadata`](../#metadata)\>\>
+
+#### Parameters
+
+| Name         | Type      |
+| :----------- | :-------- |
+| `docId`      | `string`  |
+| `raiseError` | `boolean` |
+
+#### Returns
+
+`Promise`<`undefined` \| [`BaseNode`](BaseNode.md)<[`Metadata`](../#metadata)\>\>
+
+#### Defined in
+
+[packages/core/src/storage/docStore/types.ts:29](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/storage/docStore/types.ts#L29)
+
+---
+
+### getDocumentHash
+
+▸ `Abstract` **getDocumentHash**(`docId`): `Promise`<`undefined` \| `string`\>
+
+#### Parameters
+
+| Name    | Type     |
+| :------ | :------- |
+| `docId` | `string` |
+
+#### Returns
+
+`Promise`<`undefined` \| `string`\>
+
+#### Defined in
+
+[packages/core/src/storage/docStore/types.ts:41](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/storage/docStore/types.ts#L41)
+
+---
+
+### getNode
+
+▸ **getNode**(`nodeId`, `raiseError?`): `Promise`<[`BaseNode`](BaseNode.md)<[`Metadata`](../#metadata)\>\>
+
+#### Parameters
+
+| Name         | Type      | Default value |
+| :----------- | :-------- | :------------ |
+| `nodeId`     | `string`  | `undefined`   |
+| `raiseError` | `boolean` | `true`        |
+
+#### Returns
+
+`Promise`<[`BaseNode`](BaseNode.md)<[`Metadata`](../#metadata)\>\>
+
+#### Defined in
+
+[packages/core/src/storage/docStore/types.ts:57](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/storage/docStore/types.ts#L57)
+
+---
+
+### getNodeDict
+
+▸ **getNodeDict**(`nodeIdDict`): `Promise`<`Record`<`number`, [`BaseNode`](BaseNode.md)<[`Metadata`](../#metadata)\>\>\>
+
+#### Parameters
+
+| Name         | Type     |
+| :----------- | :------- |
+| `nodeIdDict` | `Object` |
+
+#### Returns
+
+`Promise`<`Record`<`number`, [`BaseNode`](BaseNode.md)<[`Metadata`](../#metadata)\>\>\>
+
+#### Defined in
+
+[packages/core/src/storage/docStore/types.ts:65](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/storage/docStore/types.ts#L65)
+
+---
+
+### getNodes
+
+▸ **getNodes**(`nodeIds`, `raiseError?`): `Promise`<[`BaseNode`](BaseNode.md)<[`Metadata`](../#metadata)\>[]\>
+
+#### Parameters
+
+| Name         | Type       | Default value |
+| :----------- | :--------- | :------------ |
+| `nodeIds`    | `string`[] | `undefined`   |
+| `raiseError` | `boolean`  | `true`        |
+
+#### Returns
+
+`Promise`<[`BaseNode`](BaseNode.md)<[`Metadata`](../#metadata)\>[]\>
+
+#### Defined in
+
+[packages/core/src/storage/docStore/types.ts:51](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/storage/docStore/types.ts#L51)
+
+---
+
+### getRefDocInfo
+
+▸ `Abstract` **getRefDocInfo**(`refDocId`): `Promise`<`undefined` \| [`RefDocInfo`](../interfaces/RefDocInfo.md)\>
+
+#### Parameters
+
+| Name       | Type     |
+| :--------- | :------- |
+| `refDocId` | `string` |
+
+#### Returns
+
+`Promise`<`undefined` \| [`RefDocInfo`](../interfaces/RefDocInfo.md)\>
+
+#### Defined in
+
+[packages/core/src/storage/docStore/types.ts:46](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/storage/docStore/types.ts#L46)
+
+---
+
+### persist
+
+▸ **persist**(`persistPath?`, `fs?`): `void`
+
+#### Parameters
+
+| Name          | Type                                                      | Default value        |
+| :------------ | :-------------------------------------------------------- | :------------------- |
+| `persistPath` | `string`                                                  | `defaultPersistPath` |
+| `fs?`         | [`GenericFileSystem`](../interfaces/GenericFileSystem.md) | `undefined`          |
+
+#### Returns
+
+`void`
+
+#### Defined in
+
+[packages/core/src/storage/docStore/types.ts:17](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/storage/docStore/types.ts#L17)
+
+---
+
+### setDocumentHash
+
+▸ `Abstract` **setDocumentHash**(`docId`, `docHash`): `void`
+
+#### Parameters
+
+| Name      | Type     |
+| :-------- | :------- |
+| `docId`   | `string` |
+| `docHash` | `string` |
+
+#### Returns
+
+`void`
+
+#### Defined in
+
+[packages/core/src/storage/docStore/types.ts:39](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/storage/docStore/types.ts#L39)
diff --git a/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/BaseEmbedding.md b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/BaseEmbedding.md
new file mode 100644
index 0000000000000000000000000000000000000000..b370af25e76a8e518a7cd7c4195e2ba347fc740c
--- /dev/null
+++ b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/BaseEmbedding.md
@@ -0,0 +1,83 @@
+---
+id: "BaseEmbedding"
+title: "Class: BaseEmbedding"
+sidebar_label: "BaseEmbedding"
+sidebar_position: 0
+custom_edit_url: null
+---
+
+## Hierarchy
+
+- **`BaseEmbedding`**
+
+  ↳ [`MultiModalEmbedding`](MultiModalEmbedding.md)
+
+  ↳ [`OpenAIEmbedding`](OpenAIEmbedding.md)
+
+## Constructors
+
+### constructor
+
+• **new BaseEmbedding**()
+
+## Methods
+
+### getQueryEmbedding
+
+▸ `Abstract` **getQueryEmbedding**(`query`): `Promise`<`number`[]\>
+
+#### Parameters
+
+| Name    | Type     |
+| :------ | :------- |
+| `query` | `string` |
+
+#### Returns
+
+`Promise`<`number`[]\>
+
+#### Defined in
+
+[packages/core/src/embeddings/types.ts:23](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/embeddings/types.ts#L23)
+
+---
+
+### getTextEmbedding
+
+▸ `Abstract` **getTextEmbedding**(`text`): `Promise`<`number`[]\>
+
+#### Parameters
+
+| Name   | Type     |
+| :----- | :------- |
+| `text` | `string` |
+
+#### Returns
+
+`Promise`<`number`[]\>
+
+#### Defined in
+
+[packages/core/src/embeddings/types.ts:22](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/embeddings/types.ts#L22)
+
+---
+
+### similarity
+
+▸ **similarity**(`embedding1`, `embedding2`, `mode?`): `number`
+
+#### Parameters
+
+| Name         | Type                                           | Default value            |
+| :----------- | :--------------------------------------------- | :----------------------- |
+| `embedding1` | `number`[]                                     | `undefined`              |
+| `embedding2` | `number`[]                                     | `undefined`              |
+| `mode`       | [`SimilarityType`](../enums/SimilarityType.md) | `SimilarityType.DEFAULT` |
+
+#### Returns
+
+`number`
+
+#### Defined in
+
+[packages/core/src/embeddings/types.ts:14](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/embeddings/types.ts#L14)
diff --git a/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/BaseInMemoryKVStore.md b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/BaseInMemoryKVStore.md
new file mode 100644
index 0000000000000000000000000000000000000000..3b2921742e3d9531050c90d090ce774a1b86aa54
--- /dev/null
+++ b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/BaseInMemoryKVStore.md
@@ -0,0 +1,164 @@
+---
+id: "BaseInMemoryKVStore"
+title: "Class: BaseInMemoryKVStore"
+sidebar_label: "BaseInMemoryKVStore"
+sidebar_position: 0
+custom_edit_url: null
+---
+
+## Hierarchy
+
+- [`BaseKVStore`](BaseKVStore.md)
+
+  ↳ **`BaseInMemoryKVStore`**
+
+## Constructors
+
+### constructor
+
+• **new BaseInMemoryKVStore**()
+
+#### Inherited from
+
+[BaseKVStore](BaseKVStore.md).[constructor](BaseKVStore.md#constructor)
+
+## Methods
+
+### delete
+
+▸ `Abstract` **delete**(`key`, `collection?`): `Promise`<`boolean`\>
+
+#### Parameters
+
+| Name          | Type     |
+| :------------ | :------- |
+| `key`         | `string` |
+| `collection?` | `string` |
+
+#### Returns
+
+`Promise`<`boolean`\>
+
+#### Inherited from
+
+[BaseKVStore](BaseKVStore.md).[delete](BaseKVStore.md#delete)
+
+#### Defined in
+
+[packages/core/src/storage/kvStore/types.ts:14](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/storage/kvStore/types.ts#L14)
+
+---
+
+### get
+
+▸ `Abstract` **get**(`key`, `collection?`): `Promise`<`StoredValue`\>
+
+#### Parameters
+
+| Name          | Type     |
+| :------------ | :------- |
+| `key`         | `string` |
+| `collection?` | `string` |
+
+#### Returns
+
+`Promise`<`StoredValue`\>
+
+#### Inherited from
+
+[BaseKVStore](BaseKVStore.md).[get](BaseKVStore.md#get)
+
+#### Defined in
+
+[packages/core/src/storage/kvStore/types.ts:12](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/storage/kvStore/types.ts#L12)
+
+---
+
+### getAll
+
+▸ `Abstract` **getAll**(`collection?`): `Promise`<`Record`<`string`, `StoredValue`\>\>
+
+#### Parameters
+
+| Name          | Type     |
+| :------------ | :------- |
+| `collection?` | `string` |
+
+#### Returns
+
+`Promise`<`Record`<`string`, `StoredValue`\>\>
+
+#### Inherited from
+
+[BaseKVStore](BaseKVStore.md).[getAll](BaseKVStore.md#getall)
+
+#### Defined in
+
+[packages/core/src/storage/kvStore/types.ts:13](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/storage/kvStore/types.ts#L13)
+
+---
+
+### persist
+
+▸ `Abstract` **persist**(`persistPath`, `fs?`): `void`
+
+#### Parameters
+
+| Name          | Type                                                      |
+| :------------ | :-------------------------------------------------------- |
+| `persistPath` | `string`                                                  |
+| `fs?`         | [`GenericFileSystem`](../interfaces/GenericFileSystem.md) |
+
+#### Returns
+
+`void`
+
+#### Defined in
+
+[packages/core/src/storage/kvStore/types.ts:18](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/storage/kvStore/types.ts#L18)
+
+---
+
+### put
+
+▸ `Abstract` **put**(`key`, `val`, `collection?`): `Promise`<`void`\>
+
+#### Parameters
+
+| Name          | Type                       |
+| :------------ | :------------------------- |
+| `key`         | `string`                   |
+| `val`         | `Record`<`string`, `any`\> |
+| `collection?` | `string`                   |
+
+#### Returns
+
+`Promise`<`void`\>
+
+#### Inherited from
+
+[BaseKVStore](BaseKVStore.md).[put](BaseKVStore.md#put)
+
+#### Defined in
+
+[packages/core/src/storage/kvStore/types.ts:7](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/storage/kvStore/types.ts#L7)
+
+---
+
+### fromPersistPath
+
+▸ `Static` **fromPersistPath**(`persistPath`): [`BaseInMemoryKVStore`](BaseInMemoryKVStore.md)
+
+#### Parameters
+
+| Name          | Type     |
+| :------------ | :------- |
+| `persistPath` | `string` |
+
+#### Returns
+
+[`BaseInMemoryKVStore`](BaseInMemoryKVStore.md)
+
+#### Defined in
+
+[packages/core/src/storage/kvStore/types.ts:19](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/storage/kvStore/types.ts#L19)
diff --git a/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/BaseIndex.md b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/BaseIndex.md
new file mode 100644
index 0000000000000000000000000000000000000000..f0e2a70307318c955d6b8577287734bcc4d72f8b
--- /dev/null
+++ b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/BaseIndex.md
@@ -0,0 +1,218 @@
+---
+id: "BaseIndex"
+title: "Class: BaseIndex<T>"
+sidebar_label: "BaseIndex"
+sidebar_position: 0
+custom_edit_url: null
+---
+
+Indexes are the data structure that we store our nodes and embeddings in so
+they can be retrieved for our queries.
+
+## Type parameters
+
+| Name |
+| :--- |
+| `T`  |
+
+## Hierarchy
+
+- **`BaseIndex`**
+
+  ↳ [`KeywordTableIndex`](KeywordTableIndex.md)
+
+  ↳ [`SummaryIndex`](SummaryIndex.md)
+
+  ↳ [`VectorStoreIndex`](VectorStoreIndex.md)
+
+## Constructors
+
+### constructor
+
+• **new BaseIndex**<`T`\>(`init`)
+
+#### Type parameters
+
+| Name |
+| :--- |
+| `T`  |
+
+#### Parameters
+
+| Name   | Type                                                    |
+| :----- | :------------------------------------------------------ |
+| `init` | [`BaseIndexInit`](../interfaces/BaseIndexInit.md)<`T`\> |
+
+#### Defined in
+
+[packages/core/src/indices/BaseIndex.ts:161](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/indices/BaseIndex.ts#L161)
+
+## Properties
+
+### docStore
+
+• **docStore**: [`BaseDocumentStore`](BaseDocumentStore.md)
+
+#### Defined in
+
+[packages/core/src/indices/BaseIndex.ts:156](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/indices/BaseIndex.ts#L156)
+
+---
+
+### indexStore
+
+• `Optional` **indexStore**: [`BaseIndexStore`](BaseIndexStore.md)
+
+#### Defined in
+
+[packages/core/src/indices/BaseIndex.ts:158](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/indices/BaseIndex.ts#L158)
+
+---
+
+### indexStruct
+
+• **indexStruct**: `T`
+
+#### Defined in
+
+[packages/core/src/indices/BaseIndex.ts:159](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/indices/BaseIndex.ts#L159)
+
+---
+
+### serviceContext
+
+• **serviceContext**: [`ServiceContext`](../interfaces/ServiceContext.md)
+
+#### Defined in
+
+[packages/core/src/indices/BaseIndex.ts:154](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/indices/BaseIndex.ts#L154)
+
+---
+
+### storageContext
+
+• **storageContext**: [`StorageContext`](../interfaces/StorageContext.md)
+
+#### Defined in
+
+[packages/core/src/indices/BaseIndex.ts:155](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/indices/BaseIndex.ts#L155)
+
+---
+
+### vectorStore
+
+• `Optional` **vectorStore**: [`VectorStore`](../interfaces/VectorStore.md)
+
+#### Defined in
+
+[packages/core/src/indices/BaseIndex.ts:157](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/indices/BaseIndex.ts#L157)
+
+## Methods
+
+### asQueryEngine
+
+▸ `Abstract` **asQueryEngine**(`options?`): [`BaseQueryEngine`](../interfaces/BaseQueryEngine.md)
+
+Create a new query engine from the index. It will also create a retriever
+and response synthezier if they are not provided.
+
+#### Parameters
+
+| Name                           | Type                                              | Description                                                      |
+| :----------------------------- | :------------------------------------------------ | :--------------------------------------------------------------- |
+| `options?`                     | `Object`                                          | you can supply your own custom Retriever and ResponseSynthesizer |
+| `options.responseSynthesizer?` | [`ResponseSynthesizer`](ResponseSynthesizer.md)   | -                                                                |
+| `options.retriever?`           | [`BaseRetriever`](../interfaces/BaseRetriever.md) | -                                                                |
+
+#### Returns
+
+[`BaseQueryEngine`](../interfaces/BaseQueryEngine.md)
+
+#### Defined in
+
+[packages/core/src/indices/BaseIndex.ts:181](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/indices/BaseIndex.ts#L181)
+
+---
+
+### asRetriever
+
+▸ `Abstract` **asRetriever**(`options?`): [`BaseRetriever`](../interfaces/BaseRetriever.md)
+
+Create a new retriever from the index.
+
+#### Parameters
+
+| Name       | Type  |
+| :--------- | :---- |
+| `options?` | `any` |
+
+#### Returns
+
+[`BaseRetriever`](../interfaces/BaseRetriever.md)
+
+#### Defined in
+
+[packages/core/src/indices/BaseIndex.ts:174](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/indices/BaseIndex.ts#L174)
+
+---
+
+### deleteRefDoc
+
+▸ `Abstract` **deleteRefDoc**(`refDocId`, `deleteFromDocStore?`): `Promise`<`void`\>
+
+#### Parameters
+
+| Name                  | Type      |
+| :-------------------- | :-------- |
+| `refDocId`            | `string`  |
+| `deleteFromDocStore?` | `boolean` |
+
+#### Returns
+
+`Promise`<`void`\>
+
+#### Defined in
+
+[packages/core/src/indices/BaseIndex.ts:199](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/indices/BaseIndex.ts#L199)
+
+---
+
+### insert
+
+▸ **insert**(`document`): `Promise`<`void`\>
+
+Insert a document into the index.
+
+#### Parameters
+
+| Name       | Type                                                   |
+| :--------- | :----------------------------------------------------- |
+| `document` | [`Document`](Document.md)<[`Metadata`](../#metadata)\> |
+
+#### Returns
+
+`Promise`<`void`\>
+
+#### Defined in
+
+[packages/core/src/indices/BaseIndex.ts:190](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/indices/BaseIndex.ts#L190)
+
+---
+
+### insertNodes
+
+▸ `Abstract` **insertNodes**(`nodes`): `Promise`<`void`\>
+
+#### Parameters
+
+| Name    | Type                                                     |
+| :------ | :------------------------------------------------------- |
+| `nodes` | [`BaseNode`](BaseNode.md)<[`Metadata`](../#metadata)\>[] |
+
+#### Returns
+
+`Promise`<`void`\>
+
+#### Defined in
+
+[packages/core/src/indices/BaseIndex.ts:198](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/indices/BaseIndex.ts#L198)
diff --git a/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/BaseIndexStore.md b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/BaseIndexStore.md
new file mode 100644
index 0000000000000000000000000000000000000000..83a350c5915323e335396fc03a65f4c2c55c933d
--- /dev/null
+++ b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/BaseIndexStore.md
@@ -0,0 +1,108 @@
+---
+id: "BaseIndexStore"
+title: "Class: BaseIndexStore"
+sidebar_label: "BaseIndexStore"
+sidebar_position: 0
+custom_edit_url: null
+---
+
+## Constructors
+
+### constructor
+
+• **new BaseIndexStore**()
+
+## Methods
+
+### addIndexStruct
+
+▸ `Abstract` **addIndexStruct**(`indexStruct`): `Promise`<`void`\>
+
+#### Parameters
+
+| Name          | Type                            |
+| :------------ | :------------------------------ |
+| `indexStruct` | [`IndexStruct`](IndexStruct.md) |
+
+#### Returns
+
+`Promise`<`void`\>
+
+#### Defined in
+
+[packages/core/src/storage/indexStore/types.ts:13](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/storage/indexStore/types.ts#L13)
+
+---
+
+### deleteIndexStruct
+
+▸ `Abstract` **deleteIndexStruct**(`key`): `Promise`<`void`\>
+
+#### Parameters
+
+| Name  | Type     |
+| :---- | :------- |
+| `key` | `string` |
+
+#### Returns
+
+`Promise`<`void`\>
+
+#### Defined in
+
+[packages/core/src/storage/indexStore/types.ts:15](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/storage/indexStore/types.ts#L15)
+
+---
+
+### getIndexStruct
+
+▸ `Abstract` **getIndexStruct**(`structId?`): `Promise`<`undefined` \| [`IndexStruct`](IndexStruct.md)\>
+
+#### Parameters
+
+| Name        | Type     |
+| :---------- | :------- |
+| `structId?` | `string` |
+
+#### Returns
+
+`Promise`<`undefined` \| [`IndexStruct`](IndexStruct.md)\>
+
+#### Defined in
+
+[packages/core/src/storage/indexStore/types.ts:17](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/storage/indexStore/types.ts#L17)
+
+---
+
+### getIndexStructs
+
+▸ `Abstract` **getIndexStructs**(): `Promise`<[`IndexStruct`](IndexStruct.md)[]\>
+
+#### Returns
+
+`Promise`<[`IndexStruct`](IndexStruct.md)[]\>
+
+#### Defined in
+
+[packages/core/src/storage/indexStore/types.ts:11](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/storage/indexStore/types.ts#L11)
+
+---
+
+### persist
+
+▸ **persist**(`persistPath?`, `fs?`): `Promise`<`void`\>
+
+#### Parameters
+
+| Name          | Type                                                      | Default value        |
+| :------------ | :-------------------------------------------------------- | :------------------- |
+| `persistPath` | `string`                                                  | `defaultPersistPath` |
+| `fs?`         | [`GenericFileSystem`](../interfaces/GenericFileSystem.md) | `undefined`          |
+
+#### Returns
+
+`Promise`<`void`\>
+
+#### Defined in
+
+[packages/core/src/storage/indexStore/types.ts:19](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/storage/indexStore/types.ts#L19)
diff --git a/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/BaseKVStore.md b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/BaseKVStore.md
new file mode 100644
index 0000000000000000000000000000000000000000..929faba193bfb387f761179fd7434e82cc9bb7c6
--- /dev/null
+++ b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/BaseKVStore.md
@@ -0,0 +1,105 @@
+---
+id: "BaseKVStore"
+title: "Class: BaseKVStore"
+sidebar_label: "BaseKVStore"
+sidebar_position: 0
+custom_edit_url: null
+---
+
+## Hierarchy
+
+- **`BaseKVStore`**
+
+  ↳ [`SimpleKVStore`](SimpleKVStore.md)
+
+  ↳ [`BaseInMemoryKVStore`](BaseInMemoryKVStore.md)
+
+## Constructors
+
+### constructor
+
+• **new BaseKVStore**()
+
+## Methods
+
+### delete
+
+▸ `Abstract` **delete**(`key`, `collection?`): `Promise`<`boolean`\>
+
+#### Parameters
+
+| Name          | Type     |
+| :------------ | :------- |
+| `key`         | `string` |
+| `collection?` | `string` |
+
+#### Returns
+
+`Promise`<`boolean`\>
+
+#### Defined in
+
+[packages/core/src/storage/kvStore/types.ts:14](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/storage/kvStore/types.ts#L14)
+
+---
+
+### get
+
+▸ `Abstract` **get**(`key`, `collection?`): `Promise`<`StoredValue`\>
+
+#### Parameters
+
+| Name          | Type     |
+| :------------ | :------- |
+| `key`         | `string` |
+| `collection?` | `string` |
+
+#### Returns
+
+`Promise`<`StoredValue`\>
+
+#### Defined in
+
+[packages/core/src/storage/kvStore/types.ts:12](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/storage/kvStore/types.ts#L12)
+
+---
+
+### getAll
+
+▸ `Abstract` **getAll**(`collection?`): `Promise`<`Record`<`string`, `StoredValue`\>\>
+
+#### Parameters
+
+| Name          | Type     |
+| :------------ | :------- |
+| `collection?` | `string` |
+
+#### Returns
+
+`Promise`<`Record`<`string`, `StoredValue`\>\>
+
+#### Defined in
+
+[packages/core/src/storage/kvStore/types.ts:13](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/storage/kvStore/types.ts#L13)
+
+---
+
+### put
+
+▸ `Abstract` **put**(`key`, `val`, `collection?`): `Promise`<`void`\>
+
+#### Parameters
+
+| Name          | Type                       |
+| :------------ | :------------------------- |
+| `key`         | `string`                   |
+| `val`         | `Record`<`string`, `any`\> |
+| `collection?` | `string`                   |
+
+#### Returns
+
+`Promise`<`void`\>
+
+#### Defined in
+
+[packages/core/src/storage/kvStore/types.ts:7](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/storage/kvStore/types.ts#L7)
diff --git a/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/BaseNode.md b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/BaseNode.md
new file mode 100644
index 0000000000000000000000000000000000000000..b4088fcc0d1d74ab7a2d14c0fd8d9a18dd9a14be
--- /dev/null
+++ b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/BaseNode.md
@@ -0,0 +1,320 @@
+---
+id: "BaseNode"
+title: "Class: BaseNode<T>"
+sidebar_label: "BaseNode"
+sidebar_position: 0
+custom_edit_url: null
+---
+
+Generic abstract class for retrievable nodes
+
+## Type parameters
+
+| Name | Type                                                            |
+| :--- | :-------------------------------------------------------------- |
+| `T`  | extends [`Metadata`](../#metadata) = [`Metadata`](../#metadata) |
+
+## Hierarchy
+
+- **`BaseNode`**
+
+  ↳ [`TextNode`](TextNode.md)
+
+## Constructors
+
+### constructor
+
+• **new BaseNode**<`T`\>(`init?`)
+
+#### Type parameters
+
+| Name | Type                                                            |
+| :--- | :-------------------------------------------------------------- |
+| `T`  | extends [`Metadata`](../#metadata) = [`Metadata`](../#metadata) |
+
+#### Parameters
+
+| Name    | Type                                        |
+| :------ | :------------------------------------------ |
+| `init?` | `Partial`<[`BaseNode`](BaseNode.md)<`T`\>\> |
+
+#### Defined in
+
+[packages/core/src/Node.ts:60](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Node.ts#L60)
+
+## Properties
+
+### embedding
+
+• `Optional` **embedding**: `number`[]
+
+#### Defined in
+
+[packages/core/src/Node.ts:51](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Node.ts#L51)
+
+---
+
+### excludedEmbedMetadataKeys
+
+• **excludedEmbedMetadataKeys**: `string`[] = `[]`
+
+#### Defined in
+
+[packages/core/src/Node.ts:55](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Node.ts#L55)
+
+---
+
+### excludedLlmMetadataKeys
+
+• **excludedLlmMetadataKeys**: `string`[] = `[]`
+
+#### Defined in
+
+[packages/core/src/Node.ts:56](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Node.ts#L56)
+
+---
+
+### hash
+
+• **hash**: `string` = `""`
+
+#### Defined in
+
+[packages/core/src/Node.ts:58](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Node.ts#L58)
+
+---
+
+### id\_
+
+• **id\_**: `string`
+
+The unique ID of the Node/Document. The trailing underscore is here
+to avoid collisions with the id keyword in Python.
+
+Set to a UUID by default.
+
+#### Defined in
+
+[packages/core/src/Node.ts:50](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Node.ts#L50)
+
+---
+
+### metadata
+
+• **metadata**: `T`
+
+#### Defined in
+
+[packages/core/src/Node.ts:54](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Node.ts#L54)
+
+---
+
+### relationships
+
+• **relationships**: `Partial`<`Record`<[`NodeRelationship`](../enums/NodeRelationship.md), [`RelatedNodeType`](../#relatednodetype)<`T`\>\>\> = `{}`
+
+#### Defined in
+
+[packages/core/src/Node.ts:57](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Node.ts#L57)
+
+## Accessors
+
+### childNodes
+
+• `get` **childNodes**(): `undefined` \| [`RelatedNodeInfo`](../interfaces/RelatedNodeInfo.md)<`T`\>[]
+
+#### Returns
+
+`undefined` \| [`RelatedNodeInfo`](../interfaces/RelatedNodeInfo.md)<`T`\>[]
+
+#### Defined in
+
+[packages/core/src/Node.ts:112](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Node.ts#L112)
+
+---
+
+### nextNode
+
+• `get` **nextNode**(): `undefined` \| [`RelatedNodeInfo`](../interfaces/RelatedNodeInfo.md)<`T`\>
+
+#### Returns
+
+`undefined` \| [`RelatedNodeInfo`](../interfaces/RelatedNodeInfo.md)<`T`\>
+
+#### Defined in
+
+[packages/core/src/Node.ts:92](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Node.ts#L92)
+
+---
+
+### parentNode
+
+• `get` **parentNode**(): `undefined` \| [`RelatedNodeInfo`](../interfaces/RelatedNodeInfo.md)<`T`\>
+
+#### Returns
+
+`undefined` \| [`RelatedNodeInfo`](../interfaces/RelatedNodeInfo.md)<`T`\>
+
+#### Defined in
+
+[packages/core/src/Node.ts:102](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Node.ts#L102)
+
+---
+
+### prevNode
+
+• `get` **prevNode**(): `undefined` \| [`RelatedNodeInfo`](../interfaces/RelatedNodeInfo.md)<`T`\>
+
+#### Returns
+
+`undefined` \| [`RelatedNodeInfo`](../interfaces/RelatedNodeInfo.md)<`T`\>
+
+#### Defined in
+
+[packages/core/src/Node.ts:80](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Node.ts#L80)
+
+---
+
+### sourceNode
+
+• `get` **sourceNode**(): `undefined` \| [`RelatedNodeInfo`](../interfaces/RelatedNodeInfo.md)<`T`\>
+
+#### Returns
+
+`undefined` \| [`RelatedNodeInfo`](../interfaces/RelatedNodeInfo.md)<`T`\>
+
+#### Defined in
+
+[packages/core/src/Node.ts:70](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Node.ts#L70)
+
+## Methods
+
+### asRelatedNodeInfo
+
+▸ **asRelatedNodeInfo**(): [`RelatedNodeInfo`](../interfaces/RelatedNodeInfo.md)<`T`\>
+
+#### Returns
+
+[`RelatedNodeInfo`](../interfaces/RelatedNodeInfo.md)<`T`\>
+
+#### Defined in
+
+[packages/core/src/Node.ts:134](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Node.ts#L134)
+
+---
+
+### generateHash
+
+▸ `Abstract` **generateHash**(): `string`
+
+#### Returns
+
+`string`
+
+#### Defined in
+
+[packages/core/src/Node.ts:124](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Node.ts#L124)
+
+---
+
+### getContent
+
+▸ `Abstract` **getContent**(`metadataMode`): `string`
+
+#### Parameters
+
+| Name           | Type                                       |
+| :------------- | :----------------------------------------- |
+| `metadataMode` | [`MetadataMode`](../enums/MetadataMode.md) |
+
+#### Returns
+
+`string`
+
+#### Defined in
+
+[packages/core/src/Node.ts:66](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Node.ts#L66)
+
+---
+
+### getEmbedding
+
+▸ **getEmbedding**(): `number`[]
+
+#### Returns
+
+`number`[]
+
+#### Defined in
+
+[packages/core/src/Node.ts:126](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Node.ts#L126)
+
+---
+
+### getMetadataStr
+
+▸ `Abstract` **getMetadataStr**(`metadataMode`): `string`
+
+#### Parameters
+
+| Name           | Type                                       |
+| :------------- | :----------------------------------------- |
+| `metadataMode` | [`MetadataMode`](../enums/MetadataMode.md) |
+
+#### Returns
+
+`string`
+
+#### Defined in
+
+[packages/core/src/Node.ts:67](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Node.ts#L67)
+
+---
+
+### getType
+
+▸ `Abstract` **getType**(): [`ObjectType`](../enums/ObjectType.md)
+
+#### Returns
+
+[`ObjectType`](../enums/ObjectType.md)
+
+#### Defined in
+
+[packages/core/src/Node.ts:64](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Node.ts#L64)
+
+---
+
+### setContent
+
+▸ `Abstract` **setContent**(`value`): `void`
+
+#### Parameters
+
+| Name    | Type  |
+| :------ | :---- |
+| `value` | `any` |
+
+#### Returns
+
+`void`
+
+#### Defined in
+
+[packages/core/src/Node.ts:68](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Node.ts#L68)
+
+---
+
+### toJSON
+
+▸ **toJSON**(): `Record`<`string`, `any`\>
+
+Used with built in JSON.stringify
+
+#### Returns
+
+`Record`<`string`, `any`\>
+
+#### Defined in
+
+[packages/core/src/Node.ts:146](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Node.ts#L146)
diff --git a/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/CallbackManager.md b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/CallbackManager.md
new file mode 100644
index 0000000000000000000000000000000000000000..e7d410af4c48a50c715764c34d6455fb070d6041
--- /dev/null
+++ b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/CallbackManager.md
@@ -0,0 +1,83 @@
+---
+id: "CallbackManager"
+title: "Class: CallbackManager"
+sidebar_label: "CallbackManager"
+sidebar_position: 0
+custom_edit_url: null
+---
+
+## Implements
+
+- `CallbackManagerMethods`
+
+## Constructors
+
+### constructor
+
+• **new CallbackManager**(`handlers?`)
+
+#### Parameters
+
+| Name        | Type                     |
+| :---------- | :----------------------- |
+| `handlers?` | `CallbackManagerMethods` |
+
+#### Defined in
+
+[packages/core/src/callbacks/CallbackManager.ts:86](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/callbacks/CallbackManager.ts#L86)
+
+## Properties
+
+### onLLMStream
+
+• `Optional` **onLLMStream**: (`params`: [`StreamCallbackResponse`](../interfaces/StreamCallbackResponse.md)) => `void` \| `Promise`<`void`\>
+
+#### Type declaration
+
+▸ (`params`): `void` \| `Promise`<`void`\>
+
+##### Parameters
+
+| Name     | Type                                                                |
+| :------- | :------------------------------------------------------------------ |
+| `params` | [`StreamCallbackResponse`](../interfaces/StreamCallbackResponse.md) |
+
+##### Returns
+
+`void` \| `Promise`<`void`\>
+
+#### Implementation of
+
+CallbackManagerMethods.onLLMStream
+
+#### Defined in
+
+[packages/core/src/callbacks/CallbackManager.ts:83](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/callbacks/CallbackManager.ts#L83)
+
+---
+
+### onRetrieve
+
+• `Optional` **onRetrieve**: (`params`: [`RetrievalCallbackResponse`](../interfaces/RetrievalCallbackResponse.md)) => `void` \| `Promise`<`void`\>
+
+#### Type declaration
+
+▸ (`params`): `void` \| `Promise`<`void`\>
+
+##### Parameters
+
+| Name     | Type                                                                      |
+| :------- | :------------------------------------------------------------------------ |
+| `params` | [`RetrievalCallbackResponse`](../interfaces/RetrievalCallbackResponse.md) |
+
+##### Returns
+
+`void` \| `Promise`<`void`\>
+
+#### Implementation of
+
+CallbackManagerMethods.onRetrieve
+
+#### Defined in
+
+[packages/core/src/callbacks/CallbackManager.ts:84](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/callbacks/CallbackManager.ts#L84)
diff --git a/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/ClipEmbedding.md b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/ClipEmbedding.md
new file mode 100644
index 0000000000000000000000000000000000000000..e6612b79712e627e69d8cf8f50cfa736d9e10c62
--- /dev/null
+++ b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/ClipEmbedding.md
@@ -0,0 +1,251 @@
+---
+id: "ClipEmbedding"
+title: "Class: ClipEmbedding"
+sidebar_label: "ClipEmbedding"
+sidebar_position: 0
+custom_edit_url: null
+---
+
+## Hierarchy
+
+- [`MultiModalEmbedding`](MultiModalEmbedding.md)
+
+  ↳ **`ClipEmbedding`**
+
+## Constructors
+
+### constructor
+
+• **new ClipEmbedding**()
+
+#### Inherited from
+
+[MultiModalEmbedding](MultiModalEmbedding.md).[constructor](MultiModalEmbedding.md#constructor)
+
+## Properties
+
+### modelType
+
+• **modelType**: [`ClipEmbeddingModelType`](../enums/ClipEmbeddingModelType.md) = `ClipEmbeddingModelType.XENOVA_CLIP_VIT_BASE_PATCH16`
+
+#### Defined in
+
+[packages/core/src/embeddings/ClipEmbedding.ts:11](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/embeddings/ClipEmbedding.ts#L11)
+
+---
+
+### processor
+
+• `Private` **processor**: `any`
+
+#### Defined in
+
+[packages/core/src/embeddings/ClipEmbedding.ts:15](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/embeddings/ClipEmbedding.ts#L15)
+
+---
+
+### textModel
+
+• `Private` **textModel**: `any`
+
+#### Defined in
+
+[packages/core/src/embeddings/ClipEmbedding.ts:17](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/embeddings/ClipEmbedding.ts#L17)
+
+---
+
+### tokenizer
+
+• `Private` **tokenizer**: `any`
+
+#### Defined in
+
+[packages/core/src/embeddings/ClipEmbedding.ts:14](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/embeddings/ClipEmbedding.ts#L14)
+
+---
+
+### visionModel
+
+• `Private` **visionModel**: `any`
+
+#### Defined in
+
+[packages/core/src/embeddings/ClipEmbedding.ts:16](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/embeddings/ClipEmbedding.ts#L16)
+
+## Methods
+
+### getImageEmbedding
+
+▸ **getImageEmbedding**(`image`): `Promise`<`number`[]\>
+
+#### Parameters
+
+| Name    | Type                         |
+| :------ | :--------------------------- |
+| `image` | [`ImageType`](../#imagetype) |
+
+#### Returns
+
+`Promise`<`number`[]\>
+
+#### Overrides
+
+[MultiModalEmbedding](MultiModalEmbedding.md).[getImageEmbedding](MultiModalEmbedding.md#getimageembedding)
+
+#### Defined in
+
+[packages/core/src/embeddings/ClipEmbedding.ts:61](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/embeddings/ClipEmbedding.ts#L61)
+
+---
+
+### getImageEmbeddings
+
+▸ **getImageEmbeddings**(`images`): `Promise`<`number`[][]\>
+
+#### Parameters
+
+| Name     | Type                           |
+| :------- | :----------------------------- |
+| `images` | [`ImageType`](../#imagetype)[] |
+
+#### Returns
+
+`Promise`<`number`[][]\>
+
+#### Inherited from
+
+[MultiModalEmbedding](MultiModalEmbedding.md).[getImageEmbeddings](MultiModalEmbedding.md#getimageembeddings)
+
+#### Defined in
+
+[packages/core/src/embeddings/MultiModalEmbedding.ts:11](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/embeddings/MultiModalEmbedding.ts#L11)
+
+---
+
+### getProcessor
+
+▸ **getProcessor**(): `Promise`<`any`\>
+
+#### Returns
+
+`Promise`<`any`\>
+
+#### Defined in
+
+[packages/core/src/embeddings/ClipEmbedding.ts:27](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/embeddings/ClipEmbedding.ts#L27)
+
+---
+
+### getQueryEmbedding
+
+▸ **getQueryEmbedding**(`query`): `Promise`<`number`[]\>
+
+#### Parameters
+
+| Name    | Type     |
+| :------ | :------- |
+| `query` | `string` |
+
+#### Returns
+
+`Promise`<`number`[]\>
+
+#### Overrides
+
+[MultiModalEmbedding](MultiModalEmbedding.md).[getQueryEmbedding](MultiModalEmbedding.md#getqueryembedding)
+
+#### Defined in
+
+[packages/core/src/embeddings/ClipEmbedding.ts:76](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/embeddings/ClipEmbedding.ts#L76)
+
+---
+
+### getTextEmbedding
+
+▸ **getTextEmbedding**(`text`): `Promise`<`number`[]\>
+
+#### Parameters
+
+| Name   | Type     |
+| :----- | :------- |
+| `text` | `string` |
+
+#### Returns
+
+`Promise`<`number`[]\>
+
+#### Overrides
+
+[MultiModalEmbedding](MultiModalEmbedding.md).[getTextEmbedding](MultiModalEmbedding.md#gettextembedding)
+
+#### Defined in
+
+[packages/core/src/embeddings/ClipEmbedding.ts:68](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/embeddings/ClipEmbedding.ts#L68)
+
+---
+
+### getTextModel
+
+▸ **getTextModel**(): `Promise`<`any`\>
+
+#### Returns
+
+`Promise`<`any`\>
+
+#### Defined in
+
+[packages/core/src/embeddings/ClipEmbedding.ts:48](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/embeddings/ClipEmbedding.ts#L48)
+
+---
+
+### getTokenizer
+
+▸ **getTokenizer**(): `Promise`<`any`\>
+
+#### Returns
+
+`Promise`<`any`\>
+
+#### Defined in
+
+[packages/core/src/embeddings/ClipEmbedding.ts:19](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/embeddings/ClipEmbedding.ts#L19)
+
+---
+
+### getVisionModel
+
+▸ **getVisionModel**(): `Promise`<`any`\>
+
+#### Returns
+
+`Promise`<`any`\>
+
+#### Defined in
+
+[packages/core/src/embeddings/ClipEmbedding.ts:35](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/embeddings/ClipEmbedding.ts#L35)
+
+---
+
+### similarity
+
+▸ **similarity**(`embedding1`, `embedding2`, `mode?`): `number`
+
+#### Parameters
+
+| Name         | Type                                           | Default value            |
+| :----------- | :--------------------------------------------- | :----------------------- |
+| `embedding1` | `number`[]                                     | `undefined`              |
+| `embedding2` | `number`[]                                     | `undefined`              |
+| `mode`       | [`SimilarityType`](../enums/SimilarityType.md) | `SimilarityType.DEFAULT` |
+
+#### Returns
+
+`number`
+
+#### Inherited from
+
+[MultiModalEmbedding](MultiModalEmbedding.md).[similarity](MultiModalEmbedding.md#similarity)
+
+#### Defined in
+
+[packages/core/src/embeddings/types.ts:14](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/embeddings/types.ts#L14)
diff --git a/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/CompactAndRefine.md b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/CompactAndRefine.md
new file mode 100644
index 0000000000000000000000000000000000000000..b488664e5877be5a421d76534943a8a4f30b6519
--- /dev/null
+++ b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/CompactAndRefine.md
@@ -0,0 +1,134 @@
+---
+id: "CompactAndRefine"
+title: "Class: CompactAndRefine"
+sidebar_label: "CompactAndRefine"
+sidebar_position: 0
+custom_edit_url: null
+---
+
+CompactAndRefine is a slight variation of Refine that first compacts the text chunks into the smallest possible number of chunks.
+
+## Hierarchy
+
+- [`Refine`](Refine.md)
+
+  ↳ **`CompactAndRefine`**
+
+## Constructors
+
+### constructor
+
+• **new CompactAndRefine**(`serviceContext`, `textQATemplate?`, `refineTemplate?`)
+
+#### Parameters
+
+| Name              | Type                                                |
+| :---------------- | :-------------------------------------------------- |
+| `serviceContext`  | [`ServiceContext`](../interfaces/ServiceContext.md) |
+| `textQATemplate?` | (`__namedParameters`: `Object`) => `string`         |
+| `refineTemplate?` | (`__namedParameters`: `Object`) => `string`         |
+
+#### Inherited from
+
+[Refine](Refine.md).[constructor](Refine.md#constructor)
+
+#### Defined in
+
+[packages/core/src/ResponseSynthesizer.ts:82](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/ResponseSynthesizer.ts#L82)
+
+## Properties
+
+### refineTemplate
+
+• **refineTemplate**: (`__namedParameters`: `Object`) => `string`
+
+#### Type declaration
+
+▸ (`«destructured»`): `string`
+
+##### Parameters
+
+| Name             | Type     |
+| :--------------- | :------- |
+| `«destructured»` | `Object` |
+
+##### Returns
+
+`string`
+
+#### Inherited from
+
+[Refine](Refine.md).[refineTemplate](Refine.md#refinetemplate)
+
+#### Defined in
+
+[packages/core/src/ResponseSynthesizer.ts:80](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/ResponseSynthesizer.ts#L80)
+
+---
+
+### serviceContext
+
+• **serviceContext**: [`ServiceContext`](../interfaces/ServiceContext.md)
+
+#### Inherited from
+
+[Refine](Refine.md).[serviceContext](Refine.md#servicecontext)
+
+#### Defined in
+
+[packages/core/src/ResponseSynthesizer.ts:78](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/ResponseSynthesizer.ts#L78)
+
+---
+
+### textQATemplate
+
+• **textQATemplate**: (`__namedParameters`: `Object`) => `string`
+
+#### Type declaration
+
+▸ (`«destructured»`): `string`
+
+##### Parameters
+
+| Name             | Type     |
+| :--------------- | :------- |
+| `«destructured»` | `Object` |
+
+##### Returns
+
+`string`
+
+#### Inherited from
+
+[Refine](Refine.md).[textQATemplate](Refine.md#textqatemplate)
+
+#### Defined in
+
+[packages/core/src/ResponseSynthesizer.ts:79](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/ResponseSynthesizer.ts#L79)
+
+## Methods
+
+### getResponse
+
+▸ **getResponse**(`query`, `textChunks`, `parentEvent?`, `prevResponse?`): `Promise`<`string`\>
+
+#### Parameters
+
+| Name            | Type                              |
+| :-------------- | :-------------------------------- |
+| `query`         | `string`                          |
+| `textChunks`    | `string`[]                        |
+| `parentEvent?`  | [`Event`](../interfaces/Event.md) |
+| `prevResponse?` | `string`                          |
+
+#### Returns
+
+`Promise`<`string`\>
+
+#### Overrides
+
+[Refine](Refine.md).[getResponse](Refine.md#getresponse)
+
+#### Defined in
+
+[packages/core/src/ResponseSynthesizer.ts:185](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/ResponseSynthesizer.ts#L185)
diff --git a/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/CondenseQuestionChatEngine.md b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/CondenseQuestionChatEngine.md
new file mode 100644
index 0000000000000000000000000000000000000000..69ebec045b1b82f52bb18e2efd052f8aed7ac678
--- /dev/null
+++ b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/CondenseQuestionChatEngine.md
@@ -0,0 +1,170 @@
+---
+id: "CondenseQuestionChatEngine"
+title: "Class: CondenseQuestionChatEngine"
+sidebar_label: "CondenseQuestionChatEngine"
+sidebar_position: 0
+custom_edit_url: null
+---
+
+CondenseQuestionChatEngine is used in conjunction with a Index (for example VectorStoreIndex).
+It does two steps on taking a user's chat message: first, it condenses the chat message
+with the previous chat history into a question with more context.
+Then, it queries the underlying Index using the new question with context and returns
+the response.
+CondenseQuestionChatEngine performs well when the input is primarily questions about the
+underlying data. It performs less well when the chat messages are not questions about the
+data, or are very referential to previous context.
+
+## Implements
+
+- [`ChatEngine`](../interfaces/ChatEngine.md)
+
+## Constructors
+
+### constructor
+
+• **new CondenseQuestionChatEngine**(`init`)
+
+#### Parameters
+
+| Name                          | Type                                                  |
+| :---------------------------- | :---------------------------------------------------- |
+| `init`                        | `Object`                                              |
+| `init.chatHistory`            | [`ChatMessage`](../interfaces/ChatMessage.md)[]       |
+| `init.condenseMessagePrompt?` | (`__namedParameters`: `Object`) => `string`           |
+| `init.queryEngine`            | [`BaseQueryEngine`](../interfaces/BaseQueryEngine.md) |
+| `init.serviceContext?`        | [`ServiceContext`](../interfaces/ServiceContext.md)   |
+
+#### Defined in
+
+[packages/core/src/ChatEngine.ts:122](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/ChatEngine.ts#L122)
+
+## Properties
+
+### chatHistory
+
+• **chatHistory**: [`ChatMessage`](../interfaces/ChatMessage.md)[]
+
+#### Defined in
+
+[packages/core/src/ChatEngine.ts:118](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/ChatEngine.ts#L118)
+
+---
+
+### condenseMessagePrompt
+
+• **condenseMessagePrompt**: (`__namedParameters`: `Object`) => `string`
+
+#### Type declaration
+
+▸ (`«destructured»`): `string`
+
+##### Parameters
+
+| Name             | Type     |
+| :--------------- | :------- |
+| `«destructured»` | `Object` |
+
+##### Returns
+
+`string`
+
+#### Defined in
+
+[packages/core/src/ChatEngine.ts:120](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/ChatEngine.ts#L120)
+
+---
+
+### queryEngine
+
+• **queryEngine**: [`BaseQueryEngine`](../interfaces/BaseQueryEngine.md)
+
+#### Defined in
+
+[packages/core/src/ChatEngine.ts:117](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/ChatEngine.ts#L117)
+
+---
+
+### serviceContext
+
+• **serviceContext**: [`ServiceContext`](../interfaces/ServiceContext.md)
+
+#### Defined in
+
+[packages/core/src/ChatEngine.ts:119](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/ChatEngine.ts#L119)
+
+## Methods
+
+### chat
+
+▸ **chat**<`T`, `R`\>(`message`, `chatHistory?`, `streaming?`): `Promise`<`R`\>
+
+Send message along with the class's current chat history to the LLM.
+
+#### Type parameters
+
+| Name | Type                                                                                            |
+| :--- | :---------------------------------------------------------------------------------------------- |
+| `T`  | extends `undefined` \| `boolean` = `undefined`                                                  |
+| `R`  | `T` extends `true` ? `AsyncGenerator`<`string`, `void`, `unknown`\> : [`Response`](Response.md) |
+
+#### Parameters
+
+| Name           | Type                                            | Description                                                        |
+| :------------- | :---------------------------------------------- | :----------------------------------------------------------------- |
+| `message`      | [`MessageContent`](../#messagecontent)          |                                                                    |
+| `chatHistory?` | [`ChatMessage`](../interfaces/ChatMessage.md)[] | optional chat history if you want to customize the chat history    |
+| `streaming?`   | `T`                                             | optional streaming flag, which auto-sets the return value if True. |
+
+#### Returns
+
+`Promise`<`R`\>
+
+#### Implementation of
+
+[ChatEngine](../interfaces/ChatEngine.md).[chat](../interfaces/ChatEngine.md#chat)
+
+#### Defined in
+
+[packages/core/src/ChatEngine.ts:147](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/ChatEngine.ts#L147)
+
+---
+
+### condenseQuestion
+
+▸ `Private` **condenseQuestion**(`chatHistory`, `question`): `Promise`<[`ChatResponse`](../interfaces/ChatResponse.md)\>
+
+#### Parameters
+
+| Name          | Type                                            |
+| :------------ | :---------------------------------------------- |
+| `chatHistory` | [`ChatMessage`](../interfaces/ChatMessage.md)[] |
+| `question`    | `string`                                        |
+
+#### Returns
+
+`Promise`<[`ChatResponse`](../interfaces/ChatResponse.md)\>
+
+#### Defined in
+
+[packages/core/src/ChatEngine.ts:136](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/ChatEngine.ts#L136)
+
+---
+
+### reset
+
+▸ **reset**(): `void`
+
+Resets the chat history so that it's empty.
+
+#### Returns
+
+`void`
+
+#### Implementation of
+
+[ChatEngine](../interfaces/ChatEngine.md).[reset](../interfaces/ChatEngine.md#reset)
+
+#### Defined in
+
+[packages/core/src/ChatEngine.ts:169](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/ChatEngine.ts#L169)
diff --git a/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/ContextChatEngine.md b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/ContextChatEngine.md
new file mode 100644
index 0000000000000000000000000000000000000000..225acc86dc206185d78c84c676c6555a65bdd05d
--- /dev/null
+++ b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/ContextChatEngine.md
@@ -0,0 +1,142 @@
+---
+id: "ContextChatEngine"
+title: "Class: ContextChatEngine"
+sidebar_label: "ContextChatEngine"
+sidebar_position: 0
+custom_edit_url: null
+---
+
+ContextChatEngine uses the Index to get the appropriate context for each query.
+The context is stored in the system prompt, and the chat history is preserved,
+ideally allowing the appropriate context to be surfaced for each query.
+
+## Implements
+
+- [`ChatEngine`](../interfaces/ChatEngine.md)
+
+## Constructors
+
+### constructor
+
+• **new ContextChatEngine**(`init`)
+
+#### Parameters
+
+| Name                        | Type                                                                |
+| :-------------------------- | :------------------------------------------------------------------ |
+| `init`                      | `Object`                                                            |
+| `init.chatHistory?`         | [`ChatMessage`](../interfaces/ChatMessage.md)[]                     |
+| `init.chatModel?`           | [`LLM`](../interfaces/LLM.md)                                       |
+| `init.contextSystemPrompt?` | (`__namedParameters`: `Object`) => `string`                         |
+| `init.nodePostprocessors?`  | [`BaseNodePostprocessor`](../interfaces/BaseNodePostprocessor.md)[] |
+| `init.retriever`            | [`BaseRetriever`](../interfaces/BaseRetriever.md)                   |
+
+#### Defined in
+
+[packages/core/src/ChatEngine.ts:243](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/ChatEngine.ts#L243)
+
+## Properties
+
+### chatHistory
+
+• **chatHistory**: [`ChatMessage`](../interfaces/ChatMessage.md)[]
+
+#### Defined in
+
+[packages/core/src/ChatEngine.ts:240](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/ChatEngine.ts#L240)
+
+---
+
+### chatModel
+
+• **chatModel**: [`LLM`](../interfaces/LLM.md)
+
+#### Defined in
+
+[packages/core/src/ChatEngine.ts:239](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/ChatEngine.ts#L239)
+
+---
+
+### contextGenerator
+
+• **contextGenerator**: [`ContextGenerator`](../interfaces/ContextGenerator.md)
+
+#### Defined in
+
+[packages/core/src/ChatEngine.ts:241](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/ChatEngine.ts#L241)
+
+## Methods
+
+### chat
+
+▸ **chat**<`T`, `R`\>(`message`, `chatHistory?`, `streaming?`): `Promise`<`R`\>
+
+Send message along with the class's current chat history to the LLM.
+
+#### Type parameters
+
+| Name | Type                                                                                            |
+| :--- | :---------------------------------------------------------------------------------------------- |
+| `T`  | extends `undefined` \| `boolean` = `undefined`                                                  |
+| `R`  | `T` extends `true` ? `AsyncGenerator`<`string`, `void`, `unknown`\> : [`Response`](Response.md) |
+
+#### Parameters
+
+| Name           | Type                                            | Description                                                        |
+| :------------- | :---------------------------------------------- | :----------------------------------------------------------------- |
+| `message`      | [`MessageContent`](../#messagecontent)          |                                                                    |
+| `chatHistory?` | [`ChatMessage`](../interfaces/ChatMessage.md)[] | optional chat history if you want to customize the chat history    |
+| `streaming?`   | `T`                                             | optional streaming flag, which auto-sets the return value if True. |
+
+#### Returns
+
+`Promise`<`R`\>
+
+#### Implementation of
+
+[ChatEngine](../interfaces/ChatEngine.md).[chat](../interfaces/ChatEngine.md#chat)
+
+#### Defined in
+
+[packages/core/src/ChatEngine.ts:259](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/ChatEngine.ts#L259)
+
+---
+
+### reset
+
+▸ **reset**(): `void`
+
+Resets the chat history so that it's empty.
+
+#### Returns
+
+`void`
+
+#### Implementation of
+
+[ChatEngine](../interfaces/ChatEngine.md).[reset](../interfaces/ChatEngine.md#reset)
+
+#### Defined in
+
+[packages/core/src/ChatEngine.ts:336](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/ChatEngine.ts#L336)
+
+---
+
+### streamChat
+
+▸ `Protected` **streamChat**(`message`, `chatHistory?`): `AsyncGenerator`<`string`, `void`, `unknown`\>
+
+#### Parameters
+
+| Name           | Type                                            |
+| :------------- | :---------------------------------------------- |
+| `message`      | [`MessageContent`](../#messagecontent)          |
+| `chatHistory?` | [`ChatMessage`](../interfaces/ChatMessage.md)[] |
+
+#### Returns
+
+`AsyncGenerator`<`string`, `void`, `unknown`\>
+
+#### Defined in
+
+[packages/core/src/ChatEngine.ts:300](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/ChatEngine.ts#L300)
diff --git a/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/DefaultContextGenerator.md b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/DefaultContextGenerator.md
new file mode 100644
index 0000000000000000000000000000000000000000..11e563b404629f0c7b545f7fc3b534b6277d177c
--- /dev/null
+++ b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/DefaultContextGenerator.md
@@ -0,0 +1,119 @@
+---
+id: "DefaultContextGenerator"
+title: "Class: DefaultContextGenerator"
+sidebar_label: "DefaultContextGenerator"
+sidebar_position: 0
+custom_edit_url: null
+---
+
+## Implements
+
+- [`ContextGenerator`](../interfaces/ContextGenerator.md)
+
+## Constructors
+
+### constructor
+
+• **new DefaultContextGenerator**(`init`)
+
+#### Parameters
+
+| Name                        | Type                                                                |
+| :-------------------------- | :------------------------------------------------------------------ |
+| `init`                      | `Object`                                                            |
+| `init.contextSystemPrompt?` | (`__namedParameters`: `Object`) => `string`                         |
+| `init.nodePostprocessors?`  | [`BaseNodePostprocessor`](../interfaces/BaseNodePostprocessor.md)[] |
+| `init.retriever`            | [`BaseRetriever`](../interfaces/BaseRetriever.md)                   |
+
+#### Defined in
+
+[packages/core/src/ChatEngine.ts:188](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/ChatEngine.ts#L188)
+
+## Properties
+
+### contextSystemPrompt
+
+• **contextSystemPrompt**: (`__namedParameters`: `Object`) => `string`
+
+#### Type declaration
+
+▸ (`«destructured»`): `string`
+
+##### Parameters
+
+| Name             | Type     |
+| :--------------- | :------- |
+| `«destructured»` | `Object` |
+
+##### Returns
+
+`string`
+
+#### Defined in
+
+[packages/core/src/ChatEngine.ts:185](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/ChatEngine.ts#L185)
+
+---
+
+### nodePostprocessors
+
+• **nodePostprocessors**: [`BaseNodePostprocessor`](../interfaces/BaseNodePostprocessor.md)[]
+
+#### Defined in
+
+[packages/core/src/ChatEngine.ts:186](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/ChatEngine.ts#L186)
+
+---
+
+### retriever
+
+• **retriever**: [`BaseRetriever`](../interfaces/BaseRetriever.md)
+
+#### Defined in
+
+[packages/core/src/ChatEngine.ts:184](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/ChatEngine.ts#L184)
+
+## Methods
+
+### applyNodePostprocessors
+
+▸ `Private` **applyNodePostprocessors**(`nodes`): [`NodeWithScore`](../interfaces/NodeWithScore.md)<[`Metadata`](../#metadata)\>[]
+
+#### Parameters
+
+| Name    | Type                                                                             |
+| :------ | :------------------------------------------------------------------------------- |
+| `nodes` | [`NodeWithScore`](../interfaces/NodeWithScore.md)<[`Metadata`](../#metadata)\>[] |
+
+#### Returns
+
+[`NodeWithScore`](../interfaces/NodeWithScore.md)<[`Metadata`](../#metadata)\>[]
+
+#### Defined in
+
+[packages/core/src/ChatEngine.ts:199](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/ChatEngine.ts#L199)
+
+---
+
+### generate
+
+▸ **generate**(`message`, `parentEvent?`): `Promise`<[`Context`](../interfaces/Context.md)\>
+
+#### Parameters
+
+| Name           | Type                              |
+| :------------- | :-------------------------------- |
+| `message`      | `string`                          |
+| `parentEvent?` | [`Event`](../interfaces/Event.md) |
+
+#### Returns
+
+`Promise`<[`Context`](../interfaces/Context.md)\>
+
+#### Implementation of
+
+[ContextGenerator](../interfaces/ContextGenerator.md).[generate](../interfaces/ContextGenerator.md#generate)
+
+#### Defined in
+
+[packages/core/src/ChatEngine.ts:206](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/ChatEngine.ts#L206)
diff --git a/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/Document.md b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/Document.md
new file mode 100644
index 0000000000000000000000000000000000000000..32b1afaa01088e6809530fa53c7bd565eb1975c6
--- /dev/null
+++ b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/Document.md
@@ -0,0 +1,504 @@
+---
+id: "Document"
+title: "Class: Document<T>"
+sidebar_label: "Document"
+sidebar_position: 0
+custom_edit_url: null
+---
+
+A document is just a special text node with a docId.
+
+## Type parameters
+
+| Name | Type                                                            |
+| :--- | :-------------------------------------------------------------- |
+| `T`  | extends [`Metadata`](../#metadata) = [`Metadata`](../#metadata) |
+
+## Hierarchy
+
+- [`TextNode`](TextNode.md)<`T`\>
+
+  ↳ **`Document`**
+
+## Constructors
+
+### constructor
+
+• **new Document**<`T`\>(`init?`)
+
+#### Type parameters
+
+| Name | Type                                                            |
+| :--- | :-------------------------------------------------------------- |
+| `T`  | extends [`Metadata`](../#metadata) = [`Metadata`](../#metadata) |
+
+#### Parameters
+
+| Name    | Type                                        |
+| :------ | :------------------------------------------ |
+| `init?` | `Partial`<[`Document`](Document.md)<`T`\>\> |
+
+#### Overrides
+
+[TextNode](TextNode.md).[constructor](TextNode.md#constructor)
+
+#### Defined in
+
+[packages/core/src/Node.ts:254](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Node.ts#L254)
+
+## Properties
+
+### embedding
+
+• `Optional` **embedding**: `number`[]
+
+#### Inherited from
+
+[TextNode](TextNode.md).[embedding](TextNode.md#embedding)
+
+#### Defined in
+
+[packages/core/src/Node.ts:51](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Node.ts#L51)
+
+---
+
+### endCharIdx
+
+• `Optional` **endCharIdx**: `number`
+
+#### Inherited from
+
+[TextNode](TextNode.md).[endCharIdx](TextNode.md#endcharidx)
+
+#### Defined in
+
+[packages/core/src/Node.ts:157](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Node.ts#L157)
+
+---
+
+### excludedEmbedMetadataKeys
+
+• **excludedEmbedMetadataKeys**: `string`[] = `[]`
+
+#### Inherited from
+
+[TextNode](TextNode.md).[excludedEmbedMetadataKeys](TextNode.md#excludedembedmetadatakeys)
+
+#### Defined in
+
+[packages/core/src/Node.ts:55](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Node.ts#L55)
+
+---
+
+### excludedLlmMetadataKeys
+
+• **excludedLlmMetadataKeys**: `string`[] = `[]`
+
+#### Inherited from
+
+[TextNode](TextNode.md).[excludedLlmMetadataKeys](TextNode.md#excludedllmmetadatakeys)
+
+#### Defined in
+
+[packages/core/src/Node.ts:56](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Node.ts#L56)
+
+---
+
+### hash
+
+• **hash**: `string` = `""`
+
+#### Inherited from
+
+[TextNode](TextNode.md).[hash](TextNode.md#hash)
+
+#### Defined in
+
+[packages/core/src/Node.ts:58](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Node.ts#L58)
+
+---
+
+### id\_
+
+• **id\_**: `string`
+
+The unique ID of the Node/Document. The trailing underscore is here
+to avoid collisions with the id keyword in Python.
+
+Set to a UUID by default.
+
+#### Inherited from
+
+[TextNode](TextNode.md).[id\_](TextNode.md#id_)
+
+#### Defined in
+
+[packages/core/src/Node.ts:50](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Node.ts#L50)
+
+---
+
+### metadata
+
+• **metadata**: `T`
+
+#### Inherited from
+
+[TextNode](TextNode.md).[metadata](TextNode.md#metadata)
+
+#### Defined in
+
+[packages/core/src/Node.ts:54](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Node.ts#L54)
+
+---
+
+### metadataSeparator
+
+• **metadataSeparator**: `string` = `"\n"`
+
+#### Inherited from
+
+[TextNode](TextNode.md).[metadataSeparator](TextNode.md#metadataseparator)
+
+#### Defined in
+
+[packages/core/src/Node.ts:160](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Node.ts#L160)
+
+---
+
+### relationships
+
+• **relationships**: `Partial`<`Record`<[`NodeRelationship`](../enums/NodeRelationship.md), [`RelatedNodeType`](../#relatednodetype)<`T`\>\>\> = `{}`
+
+#### Inherited from
+
+[TextNode](TextNode.md).[relationships](TextNode.md#relationships)
+
+#### Defined in
+
+[packages/core/src/Node.ts:57](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Node.ts#L57)
+
+---
+
+### startCharIdx
+
+• `Optional` **startCharIdx**: `number`
+
+#### Inherited from
+
+[TextNode](TextNode.md).[startCharIdx](TextNode.md#startcharidx)
+
+#### Defined in
+
+[packages/core/src/Node.ts:156](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Node.ts#L156)
+
+---
+
+### text
+
+• **text**: `string` = `""`
+
+#### Inherited from
+
+[TextNode](TextNode.md).[text](TextNode.md#text)
+
+#### Defined in
+
+[packages/core/src/Node.ts:155](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Node.ts#L155)
+
+## Accessors
+
+### childNodes
+
+• `get` **childNodes**(): `undefined` \| [`RelatedNodeInfo`](../interfaces/RelatedNodeInfo.md)<`T`\>[]
+
+#### Returns
+
+`undefined` \| [`RelatedNodeInfo`](../interfaces/RelatedNodeInfo.md)<`T`\>[]
+
+#### Inherited from
+
+TextNode.childNodes
+
+#### Defined in
+
+[packages/core/src/Node.ts:112](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Node.ts#L112)
+
+---
+
+### nextNode
+
+• `get` **nextNode**(): `undefined` \| [`RelatedNodeInfo`](../interfaces/RelatedNodeInfo.md)<`T`\>
+
+#### Returns
+
+`undefined` \| [`RelatedNodeInfo`](../interfaces/RelatedNodeInfo.md)<`T`\>
+
+#### Inherited from
+
+TextNode.nextNode
+
+#### Defined in
+
+[packages/core/src/Node.ts:92](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Node.ts#L92)
+
+---
+
+### parentNode
+
+• `get` **parentNode**(): `undefined` \| [`RelatedNodeInfo`](../interfaces/RelatedNodeInfo.md)<`T`\>
+
+#### Returns
+
+`undefined` \| [`RelatedNodeInfo`](../interfaces/RelatedNodeInfo.md)<`T`\>
+
+#### Inherited from
+
+TextNode.parentNode
+
+#### Defined in
+
+[packages/core/src/Node.ts:102](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Node.ts#L102)
+
+---
+
+### prevNode
+
+• `get` **prevNode**(): `undefined` \| [`RelatedNodeInfo`](../interfaces/RelatedNodeInfo.md)<`T`\>
+
+#### Returns
+
+`undefined` \| [`RelatedNodeInfo`](../interfaces/RelatedNodeInfo.md)<`T`\>
+
+#### Inherited from
+
+TextNode.prevNode
+
+#### Defined in
+
+[packages/core/src/Node.ts:80](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Node.ts#L80)
+
+---
+
+### sourceNode
+
+• `get` **sourceNode**(): `undefined` \| [`RelatedNodeInfo`](../interfaces/RelatedNodeInfo.md)<`T`\>
+
+#### Returns
+
+`undefined` \| [`RelatedNodeInfo`](../interfaces/RelatedNodeInfo.md)<`T`\>
+
+#### Inherited from
+
+TextNode.sourceNode
+
+#### Defined in
+
+[packages/core/src/Node.ts:70](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Node.ts#L70)
+
+## Methods
+
+### asRelatedNodeInfo
+
+▸ **asRelatedNodeInfo**(): [`RelatedNodeInfo`](../interfaces/RelatedNodeInfo.md)<`T`\>
+
+#### Returns
+
+[`RelatedNodeInfo`](../interfaces/RelatedNodeInfo.md)<`T`\>
+
+#### Inherited from
+
+[TextNode](TextNode.md).[asRelatedNodeInfo](TextNode.md#asrelatednodeinfo)
+
+#### Defined in
+
+[packages/core/src/Node.ts:134](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Node.ts#L134)
+
+---
+
+### generateHash
+
+▸ **generateHash**(): `string`
+
+Generate a hash of the text node.
+The ID is not part of the hash as it can change independent of content.
+
+#### Returns
+
+`string`
+
+#### Inherited from
+
+[TextNode](TextNode.md).[generateHash](TextNode.md#generatehash)
+
+#### Defined in
+
+[packages/core/src/Node.ts:178](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Node.ts#L178)
+
+---
+
+### getContent
+
+▸ **getContent**(`metadataMode?`): `string`
+
+#### Parameters
+
+| Name           | Type                                       | Default value       |
+| :------------- | :----------------------------------------- | :------------------ |
+| `metadataMode` | [`MetadataMode`](../enums/MetadataMode.md) | `MetadataMode.NONE` |
+
+#### Returns
+
+`string`
+
+#### Inherited from
+
+[TextNode](TextNode.md).[getContent](TextNode.md#getcontent)
+
+#### Defined in
+
+[packages/core/src/Node.ts:192](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Node.ts#L192)
+
+---
+
+### getEmbedding
+
+▸ **getEmbedding**(): `number`[]
+
+#### Returns
+
+`number`[]
+
+#### Inherited from
+
+[TextNode](TextNode.md).[getEmbedding](TextNode.md#getembedding)
+
+#### Defined in
+
+[packages/core/src/Node.ts:126](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Node.ts#L126)
+
+---
+
+### getMetadataStr
+
+▸ **getMetadataStr**(`metadataMode`): `string`
+
+#### Parameters
+
+| Name           | Type                                       |
+| :------------- | :----------------------------------------- |
+| `metadataMode` | [`MetadataMode`](../enums/MetadataMode.md) |
+
+#### Returns
+
+`string`
+
+#### Inherited from
+
+[TextNode](TextNode.md).[getMetadataStr](TextNode.md#getmetadatastr)
+
+#### Defined in
+
+[packages/core/src/Node.ts:197](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Node.ts#L197)
+
+---
+
+### getNodeInfo
+
+▸ **getNodeInfo**(): `Object`
+
+#### Returns
+
+`Object`
+
+| Name    | Type                    |
+| :------ | :---------------------- |
+| `end`   | `undefined` \| `number` |
+| `start` | `undefined` \| `number` |
+
+#### Inherited from
+
+[TextNode](TextNode.md).[getNodeInfo](TextNode.md#getnodeinfo)
+
+#### Defined in
+
+[packages/core/src/Node.ts:224](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Node.ts#L224)
+
+---
+
+### getText
+
+▸ **getText**(): `string`
+
+#### Returns
+
+`string`
+
+#### Inherited from
+
+[TextNode](TextNode.md).[getText](TextNode.md#gettext)
+
+#### Defined in
+
+[packages/core/src/Node.ts:228](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Node.ts#L228)
+
+---
+
+### getType
+
+▸ **getType**(): [`ObjectType`](../enums/ObjectType.md)
+
+#### Returns
+
+[`ObjectType`](../enums/ObjectType.md)
+
+#### Overrides
+
+[TextNode](TextNode.md).[getType](TextNode.md#gettype)
+
+#### Defined in
+
+[packages/core/src/Node.ts:263](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Node.ts#L263)
+
+---
+
+### setContent
+
+▸ **setContent**(`value`): `void`
+
+#### Parameters
+
+| Name    | Type     |
+| :------ | :------- |
+| `value` | `string` |
+
+#### Returns
+
+`void`
+
+#### Inherited from
+
+[TextNode](TextNode.md).[setContent](TextNode.md#setcontent)
+
+#### Defined in
+
+[packages/core/src/Node.ts:218](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Node.ts#L218)
+
+---
+
+### toJSON
+
+▸ **toJSON**(): `Record`<`string`, `any`\>
+
+Used with built in JSON.stringify
+
+#### Returns
+
+`Record`<`string`, `any`\>
+
+#### Inherited from
+
+[TextNode](TextNode.md).[toJSON](TextNode.md#tojson)
+
+#### Defined in
+
+[packages/core/src/Node.ts:146](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Node.ts#L146)
diff --git a/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/HTMLReader.md b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/HTMLReader.md
new file mode 100644
index 0000000000000000000000000000000000000000..e5367098a3e7318537cdbe8f050ac2444774d013
--- /dev/null
+++ b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/HTMLReader.md
@@ -0,0 +1,109 @@
+---
+id: "HTMLReader"
+title: "Class: HTMLReader"
+sidebar_label: "HTMLReader"
+sidebar_position: 0
+custom_edit_url: null
+---
+
+Extract the significant text from an arbitrary HTML document.
+The contents of any head, script, style, and xml tags are removed completely.
+The URLs for a[href] tags are extracted, along with the inner text of the tag.
+All other tags are removed, and the inner text is kept intact.
+Html entities (e.g., &amp;) are not decoded.
+
+## Implements
+
+- [`BaseReader`](../interfaces/BaseReader.md)
+
+## Constructors
+
+### constructor
+
+• **new HTMLReader**()
+
+## Methods
+
+### getOptions
+
+▸ **getOptions**(): `Object`
+
+Wrapper for our configuration options passed to string-strip-html library
+
+#### Returns
+
+`Object`
+
+An object of options for the underlying library
+
+| Name                             | Type       |
+| :------------------------------- | :--------- |
+| `skipHtmlDecoding`               | `boolean`  |
+| `stripTogetherWithTheirContents` | `string`[] |
+
+**`See`**
+
+https://codsen.com/os/string-strip-html/examples
+
+#### Defined in
+
+[packages/core/src/readers/HTMLReader.ts:48](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/readers/HTMLReader.ts#L48)
+
+---
+
+### loadData
+
+▸ **loadData**(`file`, `fs?`): `Promise`<[`Document`](Document.md)<[`Metadata`](../#metadata)\>[]\>
+
+Public method for this reader.
+Required by BaseReader interface.
+
+#### Parameters
+
+| Name   | Type                                                      | Default value | Description                                        |
+| :----- | :-------------------------------------------------------- | :------------ | :------------------------------------------------- |
+| `file` | `string`                                                  | `undefined`   | Path/name of the file to be loaded.                |
+| `fs`   | [`GenericFileSystem`](../interfaces/GenericFileSystem.md) | `DEFAULT_FS`  | fs wrapper interface for getting the file content. |
+
+#### Returns
+
+`Promise`<[`Document`](Document.md)<[`Metadata`](../#metadata)\>[]\>
+
+Promise<Document[]> A Promise object, eventually yielding zero or one Document parsed from the HTML content of the specified file.
+
+#### Implementation of
+
+[BaseReader](../interfaces/BaseReader.md).[loadData](../interfaces/BaseReader.md#loaddata)
+
+#### Defined in
+
+[packages/core/src/readers/HTMLReader.ts:21](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/readers/HTMLReader.ts#L21)
+
+---
+
+### parseContent
+
+▸ **parseContent**(`html`, `options?`): `Promise`<`string`\>
+
+Wrapper for string-strip-html usage.
+
+#### Parameters
+
+| Name      | Type     | Description                                     |
+| :-------- | :------- | :---------------------------------------------- |
+| `html`    | `string` | Raw HTML content to be parsed.                  |
+| `options` | `any`    | An object of options for the underlying library |
+
+#### Returns
+
+`Promise`<`string`\>
+
+The HTML content, stripped of unwanted tags and attributes
+
+**`See`**
+
+getOptions
+
+#### Defined in
+
+[packages/core/src/readers/HTMLReader.ts:38](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/readers/HTMLReader.ts#L38)
diff --git a/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/HistoryChatEngine.md b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/HistoryChatEngine.md
new file mode 100644
index 0000000000000000000000000000000000000000..8072a22f718d2a8e01f264eacfdf8c896b470286
--- /dev/null
+++ b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/HistoryChatEngine.md
@@ -0,0 +1,120 @@
+---
+id: "HistoryChatEngine"
+title: "Class: HistoryChatEngine"
+sidebar_label: "HistoryChatEngine"
+sidebar_position: 0
+custom_edit_url: null
+---
+
+HistoryChatEngine is a ChatEngine that uses a `ChatHistory` object
+to keeps track of chat's message history.
+A `ChatHistory` object is passed as a parameter for each call to the `chat` method,
+so the state of the chat engine is preserved between calls.
+Optionally, a `ContextGenerator` can be used to generate an additional context for each call to `chat`.
+
+## Constructors
+
+### constructor
+
+• **new HistoryChatEngine**(`init?`)
+
+#### Parameters
+
+| Name    | Type                                                    |
+| :------ | :------------------------------------------------------ |
+| `init?` | `Partial`<[`HistoryChatEngine`](HistoryChatEngine.md)\> |
+
+#### Defined in
+
+[packages/core/src/ChatEngine.ts:381](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/ChatEngine.ts#L381)
+
+## Properties
+
+### contextGenerator
+
+• `Optional` **contextGenerator**: [`ContextGenerator`](../interfaces/ContextGenerator.md)
+
+#### Defined in
+
+[packages/core/src/ChatEngine.ts:379](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/ChatEngine.ts#L379)
+
+---
+
+### llm
+
+• **llm**: [`LLM`](../interfaces/LLM.md)
+
+#### Defined in
+
+[packages/core/src/ChatEngine.ts:378](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/ChatEngine.ts#L378)
+
+## Methods
+
+### chat
+
+▸ **chat**<`T`, `R`\>(`message`, `chatHistory`, `streaming?`): `Promise`<`R`\>
+
+#### Type parameters
+
+| Name | Type                                                                                            |
+| :--- | :---------------------------------------------------------------------------------------------- |
+| `T`  | extends `undefined` \| `boolean` = `undefined`                                                  |
+| `R`  | `T` extends `true` ? `AsyncGenerator`<`string`, `void`, `unknown`\> : [`Response`](Response.md) |
+
+#### Parameters
+
+| Name          | Type                                          |
+| :------------ | :-------------------------------------------- |
+| `message`     | [`MessageContent`](../#messagecontent)        |
+| `chatHistory` | [`ChatHistory`](../interfaces/ChatHistory.md) |
+| `streaming?`  | `T`                                           |
+
+#### Returns
+
+`Promise`<`R`\>
+
+#### Defined in
+
+[packages/core/src/ChatEngine.ts:386](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/ChatEngine.ts#L386)
+
+---
+
+### prepareRequestMessages
+
+▸ `Private` **prepareRequestMessages**(`message`, `chatHistory`): `Promise`<[`ChatMessage`](../interfaces/ChatMessage.md)[]\>
+
+#### Parameters
+
+| Name          | Type                                          |
+| :------------ | :-------------------------------------------- |
+| `message`     | [`MessageContent`](../#messagecontent)        |
+| `chatHistory` | [`ChatHistory`](../interfaces/ChatHistory.md) |
+
+#### Returns
+
+`Promise`<[`ChatMessage`](../interfaces/ChatMessage.md)[]\>
+
+#### Defined in
+
+[packages/core/src/ChatEngine.ts:433](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/ChatEngine.ts#L433)
+
+---
+
+### streamChat
+
+▸ `Protected` **streamChat**(`message`, `chatHistory`): `AsyncGenerator`<`string`, `void`, `unknown`\>
+
+#### Parameters
+
+| Name          | Type                                          |
+| :------------ | :-------------------------------------------- |
+| `message`     | [`MessageContent`](../#messagecontent)        |
+| `chatHistory` | [`ChatHistory`](../interfaces/ChatHistory.md) |
+
+#### Returns
+
+`AsyncGenerator`<`string`, `void`, `unknown`\>
+
+#### Defined in
+
+[packages/core/src/ChatEngine.ts:407](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/ChatEngine.ts#L407)
diff --git a/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/ImageDocument.md b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/ImageDocument.md
new file mode 100644
index 0000000000000000000000000000000000000000..f0773a8b2450eb1c31cffd20941a1192ed19b66a
--- /dev/null
+++ b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/ImageDocument.md
@@ -0,0 +1,518 @@
+---
+id: "ImageDocument"
+title: "Class: ImageDocument<T>"
+sidebar_label: "ImageDocument"
+sidebar_position: 0
+custom_edit_url: null
+---
+
+TextNode is the default node type for text. Most common node type in LlamaIndex.TS
+
+## Type parameters
+
+| Name | Type                                                            |
+| :--- | :-------------------------------------------------------------- |
+| `T`  | extends [`Metadata`](../#metadata) = [`Metadata`](../#metadata) |
+
+## Hierarchy
+
+- [`ImageNode`](ImageNode.md)<`T`\>
+
+  ↳ **`ImageDocument`**
+
+## Constructors
+
+### constructor
+
+• **new ImageDocument**<`T`\>(`init`)
+
+#### Type parameters
+
+| Name | Type                                                            |
+| :--- | :-------------------------------------------------------------- |
+| `T`  | extends [`Metadata`](../#metadata) = [`Metadata`](../#metadata) |
+
+#### Parameters
+
+| Name   | Type                                                               |
+| :----- | :----------------------------------------------------------------- |
+| `init` | [`ImageNodeConstructorProps`](../#imagenodeconstructorprops)<`T`\> |
+
+#### Overrides
+
+[ImageNode](ImageNode.md).[constructor](ImageNode.md#constructor)
+
+#### Defined in
+
+[packages/core/src/Node.ts:310](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Node.ts#L310)
+
+## Properties
+
+### embedding
+
+• `Optional` **embedding**: `number`[]
+
+#### Inherited from
+
+[ImageNode](ImageNode.md).[embedding](ImageNode.md#embedding)
+
+#### Defined in
+
+[packages/core/src/Node.ts:51](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Node.ts#L51)
+
+---
+
+### endCharIdx
+
+• `Optional` **endCharIdx**: `number`
+
+#### Inherited from
+
+[ImageNode](ImageNode.md).[endCharIdx](ImageNode.md#endcharidx)
+
+#### Defined in
+
+[packages/core/src/Node.ts:157](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Node.ts#L157)
+
+---
+
+### excludedEmbedMetadataKeys
+
+• **excludedEmbedMetadataKeys**: `string`[] = `[]`
+
+#### Inherited from
+
+[ImageNode](ImageNode.md).[excludedEmbedMetadataKeys](ImageNode.md#excludedembedmetadatakeys)
+
+#### Defined in
+
+[packages/core/src/Node.ts:55](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Node.ts#L55)
+
+---
+
+### excludedLlmMetadataKeys
+
+• **excludedLlmMetadataKeys**: `string`[] = `[]`
+
+#### Inherited from
+
+[ImageNode](ImageNode.md).[excludedLlmMetadataKeys](ImageNode.md#excludedllmmetadatakeys)
+
+#### Defined in
+
+[packages/core/src/Node.ts:56](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Node.ts#L56)
+
+---
+
+### hash
+
+• **hash**: `string` = `""`
+
+#### Inherited from
+
+[ImageNode](ImageNode.md).[hash](ImageNode.md#hash)
+
+#### Defined in
+
+[packages/core/src/Node.ts:58](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Node.ts#L58)
+
+---
+
+### id\_
+
+• **id\_**: `string`
+
+The unique ID of the Node/Document. The trailing underscore is here
+to avoid collisions with the id keyword in Python.
+
+Set to a UUID by default.
+
+#### Inherited from
+
+[ImageNode](ImageNode.md).[id\_](ImageNode.md#id_)
+
+#### Defined in
+
+[packages/core/src/Node.ts:50](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Node.ts#L50)
+
+---
+
+### image
+
+• **image**: [`ImageType`](../#imagetype)
+
+#### Inherited from
+
+[ImageNode](ImageNode.md).[image](ImageNode.md#image)
+
+#### Defined in
+
+[packages/core/src/Node.ts:297](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Node.ts#L297)
+
+---
+
+### metadata
+
+• **metadata**: `T`
+
+#### Inherited from
+
+[ImageNode](ImageNode.md).[metadata](ImageNode.md#metadata)
+
+#### Defined in
+
+[packages/core/src/Node.ts:54](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Node.ts#L54)
+
+---
+
+### metadataSeparator
+
+• **metadataSeparator**: `string` = `"\n"`
+
+#### Inherited from
+
+[ImageNode](ImageNode.md).[metadataSeparator](ImageNode.md#metadataseparator)
+
+#### Defined in
+
+[packages/core/src/Node.ts:160](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Node.ts#L160)
+
+---
+
+### relationships
+
+• **relationships**: `Partial`<`Record`<[`NodeRelationship`](../enums/NodeRelationship.md), [`RelatedNodeType`](../#relatednodetype)<`T`\>\>\> = `{}`
+
+#### Inherited from
+
+[ImageNode](ImageNode.md).[relationships](ImageNode.md#relationships)
+
+#### Defined in
+
+[packages/core/src/Node.ts:57](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Node.ts#L57)
+
+---
+
+### startCharIdx
+
+• `Optional` **startCharIdx**: `number`
+
+#### Inherited from
+
+[ImageNode](ImageNode.md).[startCharIdx](ImageNode.md#startcharidx)
+
+#### Defined in
+
+[packages/core/src/Node.ts:156](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Node.ts#L156)
+
+---
+
+### text
+
+• **text**: `string` = `""`
+
+#### Inherited from
+
+[ImageNode](ImageNode.md).[text](ImageNode.md#text)
+
+#### Defined in
+
+[packages/core/src/Node.ts:155](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Node.ts#L155)
+
+## Accessors
+
+### childNodes
+
+• `get` **childNodes**(): `undefined` \| [`RelatedNodeInfo`](../interfaces/RelatedNodeInfo.md)<`T`\>[]
+
+#### Returns
+
+`undefined` \| [`RelatedNodeInfo`](../interfaces/RelatedNodeInfo.md)<`T`\>[]
+
+#### Inherited from
+
+ImageNode.childNodes
+
+#### Defined in
+
+[packages/core/src/Node.ts:112](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Node.ts#L112)
+
+---
+
+### nextNode
+
+• `get` **nextNode**(): `undefined` \| [`RelatedNodeInfo`](../interfaces/RelatedNodeInfo.md)<`T`\>
+
+#### Returns
+
+`undefined` \| [`RelatedNodeInfo`](../interfaces/RelatedNodeInfo.md)<`T`\>
+
+#### Inherited from
+
+ImageNode.nextNode
+
+#### Defined in
+
+[packages/core/src/Node.ts:92](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Node.ts#L92)
+
+---
+
+### parentNode
+
+• `get` **parentNode**(): `undefined` \| [`RelatedNodeInfo`](../interfaces/RelatedNodeInfo.md)<`T`\>
+
+#### Returns
+
+`undefined` \| [`RelatedNodeInfo`](../interfaces/RelatedNodeInfo.md)<`T`\>
+
+#### Inherited from
+
+ImageNode.parentNode
+
+#### Defined in
+
+[packages/core/src/Node.ts:102](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Node.ts#L102)
+
+---
+
+### prevNode
+
+• `get` **prevNode**(): `undefined` \| [`RelatedNodeInfo`](../interfaces/RelatedNodeInfo.md)<`T`\>
+
+#### Returns
+
+`undefined` \| [`RelatedNodeInfo`](../interfaces/RelatedNodeInfo.md)<`T`\>
+
+#### Inherited from
+
+ImageNode.prevNode
+
+#### Defined in
+
+[packages/core/src/Node.ts:80](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Node.ts#L80)
+
+---
+
+### sourceNode
+
+• `get` **sourceNode**(): `undefined` \| [`RelatedNodeInfo`](../interfaces/RelatedNodeInfo.md)<`T`\>
+
+#### Returns
+
+`undefined` \| [`RelatedNodeInfo`](../interfaces/RelatedNodeInfo.md)<`T`\>
+
+#### Inherited from
+
+ImageNode.sourceNode
+
+#### Defined in
+
+[packages/core/src/Node.ts:70](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Node.ts#L70)
+
+## Methods
+
+### asRelatedNodeInfo
+
+▸ **asRelatedNodeInfo**(): [`RelatedNodeInfo`](../interfaces/RelatedNodeInfo.md)<`T`\>
+
+#### Returns
+
+[`RelatedNodeInfo`](../interfaces/RelatedNodeInfo.md)<`T`\>
+
+#### Inherited from
+
+[ImageNode](ImageNode.md).[asRelatedNodeInfo](ImageNode.md#asrelatednodeinfo)
+
+#### Defined in
+
+[packages/core/src/Node.ts:134](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Node.ts#L134)
+
+---
+
+### generateHash
+
+▸ **generateHash**(): `string`
+
+Generate a hash of the text node.
+The ID is not part of the hash as it can change independent of content.
+
+#### Returns
+
+`string`
+
+#### Inherited from
+
+[ImageNode](ImageNode.md).[generateHash](ImageNode.md#generatehash)
+
+#### Defined in
+
+[packages/core/src/Node.ts:178](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Node.ts#L178)
+
+---
+
+### getContent
+
+▸ **getContent**(`metadataMode?`): `string`
+
+#### Parameters
+
+| Name           | Type                                       | Default value       |
+| :------------- | :----------------------------------------- | :------------------ |
+| `metadataMode` | [`MetadataMode`](../enums/MetadataMode.md) | `MetadataMode.NONE` |
+
+#### Returns
+
+`string`
+
+#### Inherited from
+
+[ImageNode](ImageNode.md).[getContent](ImageNode.md#getcontent)
+
+#### Defined in
+
+[packages/core/src/Node.ts:192](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Node.ts#L192)
+
+---
+
+### getEmbedding
+
+▸ **getEmbedding**(): `number`[]
+
+#### Returns
+
+`number`[]
+
+#### Inherited from
+
+[ImageNode](ImageNode.md).[getEmbedding](ImageNode.md#getembedding)
+
+#### Defined in
+
+[packages/core/src/Node.ts:126](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Node.ts#L126)
+
+---
+
+### getMetadataStr
+
+▸ **getMetadataStr**(`metadataMode`): `string`
+
+#### Parameters
+
+| Name           | Type                                       |
+| :------------- | :----------------------------------------- |
+| `metadataMode` | [`MetadataMode`](../enums/MetadataMode.md) |
+
+#### Returns
+
+`string`
+
+#### Inherited from
+
+[ImageNode](ImageNode.md).[getMetadataStr](ImageNode.md#getmetadatastr)
+
+#### Defined in
+
+[packages/core/src/Node.ts:197](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Node.ts#L197)
+
+---
+
+### getNodeInfo
+
+▸ **getNodeInfo**(): `Object`
+
+#### Returns
+
+`Object`
+
+| Name    | Type                    |
+| :------ | :---------------------- |
+| `end`   | `undefined` \| `number` |
+| `start` | `undefined` \| `number` |
+
+#### Inherited from
+
+[ImageNode](ImageNode.md).[getNodeInfo](ImageNode.md#getnodeinfo)
+
+#### Defined in
+
+[packages/core/src/Node.ts:224](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Node.ts#L224)
+
+---
+
+### getText
+
+▸ **getText**(): `string`
+
+#### Returns
+
+`string`
+
+#### Inherited from
+
+[ImageNode](ImageNode.md).[getText](ImageNode.md#gettext)
+
+#### Defined in
+
+[packages/core/src/Node.ts:228](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Node.ts#L228)
+
+---
+
+### getType
+
+▸ **getType**(): [`ObjectType`](../enums/ObjectType.md)
+
+#### Returns
+
+[`ObjectType`](../enums/ObjectType.md)
+
+#### Overrides
+
+[ImageNode](ImageNode.md).[getType](ImageNode.md#gettype)
+
+#### Defined in
+
+[packages/core/src/Node.ts:318](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Node.ts#L318)
+
+---
+
+### setContent
+
+▸ **setContent**(`value`): `void`
+
+#### Parameters
+
+| Name    | Type     |
+| :------ | :------- |
+| `value` | `string` |
+
+#### Returns
+
+`void`
+
+#### Inherited from
+
+[ImageNode](ImageNode.md).[setContent](ImageNode.md#setcontent)
+
+#### Defined in
+
+[packages/core/src/Node.ts:218](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Node.ts#L218)
+
+---
+
+### toJSON
+
+▸ **toJSON**(): `Record`<`string`, `any`\>
+
+Used with built in JSON.stringify
+
+#### Returns
+
+`Record`<`string`, `any`\>
+
+#### Inherited from
+
+[ImageNode](ImageNode.md).[toJSON](ImageNode.md#tojson)
+
+#### Defined in
+
+[packages/core/src/Node.ts:146](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Node.ts#L146)
diff --git a/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/ImageNode.md b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/ImageNode.md
new file mode 100644
index 0000000000000000000000000000000000000000..0b83e9b7f464c5257d68d0b877edc08ded2f9d58
--- /dev/null
+++ b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/ImageNode.md
@@ -0,0 +1,516 @@
+---
+id: "ImageNode"
+title: "Class: ImageNode<T>"
+sidebar_label: "ImageNode"
+sidebar_position: 0
+custom_edit_url: null
+---
+
+TextNode is the default node type for text. Most common node type in LlamaIndex.TS
+
+## Type parameters
+
+| Name | Type                                                            |
+| :--- | :-------------------------------------------------------------- |
+| `T`  | extends [`Metadata`](../#metadata) = [`Metadata`](../#metadata) |
+
+## Hierarchy
+
+- [`TextNode`](TextNode.md)<`T`\>
+
+  ↳ **`ImageNode`**
+
+  ↳↳ [`ImageDocument`](ImageDocument.md)
+
+## Constructors
+
+### constructor
+
+• **new ImageNode**<`T`\>(`init`)
+
+#### Type parameters
+
+| Name | Type                                                            |
+| :--- | :-------------------------------------------------------------- |
+| `T`  | extends [`Metadata`](../#metadata) = [`Metadata`](../#metadata) |
+
+#### Parameters
+
+| Name   | Type                                                               |
+| :----- | :----------------------------------------------------------------- |
+| `init` | [`ImageNodeConstructorProps`](../#imagenodeconstructorprops)<`T`\> |
+
+#### Overrides
+
+[TextNode](TextNode.md).[constructor](TextNode.md#constructor)
+
+#### Defined in
+
+[packages/core/src/Node.ts:299](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Node.ts#L299)
+
+## Properties
+
+### embedding
+
+• `Optional` **embedding**: `number`[]
+
+#### Inherited from
+
+[TextNode](TextNode.md).[embedding](TextNode.md#embedding)
+
+#### Defined in
+
+[packages/core/src/Node.ts:51](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Node.ts#L51)
+
+---
+
+### endCharIdx
+
+• `Optional` **endCharIdx**: `number`
+
+#### Inherited from
+
+[TextNode](TextNode.md).[endCharIdx](TextNode.md#endcharidx)
+
+#### Defined in
+
+[packages/core/src/Node.ts:157](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Node.ts#L157)
+
+---
+
+### excludedEmbedMetadataKeys
+
+• **excludedEmbedMetadataKeys**: `string`[] = `[]`
+
+#### Inherited from
+
+[TextNode](TextNode.md).[excludedEmbedMetadataKeys](TextNode.md#excludedembedmetadatakeys)
+
+#### Defined in
+
+[packages/core/src/Node.ts:55](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Node.ts#L55)
+
+---
+
+### excludedLlmMetadataKeys
+
+• **excludedLlmMetadataKeys**: `string`[] = `[]`
+
+#### Inherited from
+
+[TextNode](TextNode.md).[excludedLlmMetadataKeys](TextNode.md#excludedllmmetadatakeys)
+
+#### Defined in
+
+[packages/core/src/Node.ts:56](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Node.ts#L56)
+
+---
+
+### hash
+
+• **hash**: `string` = `""`
+
+#### Inherited from
+
+[TextNode](TextNode.md).[hash](TextNode.md#hash)
+
+#### Defined in
+
+[packages/core/src/Node.ts:58](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Node.ts#L58)
+
+---
+
+### id\_
+
+• **id\_**: `string`
+
+The unique ID of the Node/Document. The trailing underscore is here
+to avoid collisions with the id keyword in Python.
+
+Set to a UUID by default.
+
+#### Inherited from
+
+[TextNode](TextNode.md).[id\_](TextNode.md#id_)
+
+#### Defined in
+
+[packages/core/src/Node.ts:50](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Node.ts#L50)
+
+---
+
+### image
+
+• **image**: [`ImageType`](../#imagetype)
+
+#### Defined in
+
+[packages/core/src/Node.ts:297](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Node.ts#L297)
+
+---
+
+### metadata
+
+• **metadata**: `T`
+
+#### Inherited from
+
+[TextNode](TextNode.md).[metadata](TextNode.md#metadata)
+
+#### Defined in
+
+[packages/core/src/Node.ts:54](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Node.ts#L54)
+
+---
+
+### metadataSeparator
+
+• **metadataSeparator**: `string` = `"\n"`
+
+#### Inherited from
+
+[TextNode](TextNode.md).[metadataSeparator](TextNode.md#metadataseparator)
+
+#### Defined in
+
+[packages/core/src/Node.ts:160](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Node.ts#L160)
+
+---
+
+### relationships
+
+• **relationships**: `Partial`<`Record`<[`NodeRelationship`](../enums/NodeRelationship.md), [`RelatedNodeType`](../#relatednodetype)<`T`\>\>\> = `{}`
+
+#### Inherited from
+
+[TextNode](TextNode.md).[relationships](TextNode.md#relationships)
+
+#### Defined in
+
+[packages/core/src/Node.ts:57](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Node.ts#L57)
+
+---
+
+### startCharIdx
+
+• `Optional` **startCharIdx**: `number`
+
+#### Inherited from
+
+[TextNode](TextNode.md).[startCharIdx](TextNode.md#startcharidx)
+
+#### Defined in
+
+[packages/core/src/Node.ts:156](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Node.ts#L156)
+
+---
+
+### text
+
+• **text**: `string` = `""`
+
+#### Inherited from
+
+[TextNode](TextNode.md).[text](TextNode.md#text)
+
+#### Defined in
+
+[packages/core/src/Node.ts:155](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Node.ts#L155)
+
+## Accessors
+
+### childNodes
+
+• `get` **childNodes**(): `undefined` \| [`RelatedNodeInfo`](../interfaces/RelatedNodeInfo.md)<`T`\>[]
+
+#### Returns
+
+`undefined` \| [`RelatedNodeInfo`](../interfaces/RelatedNodeInfo.md)<`T`\>[]
+
+#### Inherited from
+
+TextNode.childNodes
+
+#### Defined in
+
+[packages/core/src/Node.ts:112](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Node.ts#L112)
+
+---
+
+### nextNode
+
+• `get` **nextNode**(): `undefined` \| [`RelatedNodeInfo`](../interfaces/RelatedNodeInfo.md)<`T`\>
+
+#### Returns
+
+`undefined` \| [`RelatedNodeInfo`](../interfaces/RelatedNodeInfo.md)<`T`\>
+
+#### Inherited from
+
+TextNode.nextNode
+
+#### Defined in
+
+[packages/core/src/Node.ts:92](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Node.ts#L92)
+
+---
+
+### parentNode
+
+• `get` **parentNode**(): `undefined` \| [`RelatedNodeInfo`](../interfaces/RelatedNodeInfo.md)<`T`\>
+
+#### Returns
+
+`undefined` \| [`RelatedNodeInfo`](../interfaces/RelatedNodeInfo.md)<`T`\>
+
+#### Inherited from
+
+TextNode.parentNode
+
+#### Defined in
+
+[packages/core/src/Node.ts:102](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Node.ts#L102)
+
+---
+
+### prevNode
+
+• `get` **prevNode**(): `undefined` \| [`RelatedNodeInfo`](../interfaces/RelatedNodeInfo.md)<`T`\>
+
+#### Returns
+
+`undefined` \| [`RelatedNodeInfo`](../interfaces/RelatedNodeInfo.md)<`T`\>
+
+#### Inherited from
+
+TextNode.prevNode
+
+#### Defined in
+
+[packages/core/src/Node.ts:80](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Node.ts#L80)
+
+---
+
+### sourceNode
+
+• `get` **sourceNode**(): `undefined` \| [`RelatedNodeInfo`](../interfaces/RelatedNodeInfo.md)<`T`\>
+
+#### Returns
+
+`undefined` \| [`RelatedNodeInfo`](../interfaces/RelatedNodeInfo.md)<`T`\>
+
+#### Inherited from
+
+TextNode.sourceNode
+
+#### Defined in
+
+[packages/core/src/Node.ts:70](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Node.ts#L70)
+
+## Methods
+
+### asRelatedNodeInfo
+
+▸ **asRelatedNodeInfo**(): [`RelatedNodeInfo`](../interfaces/RelatedNodeInfo.md)<`T`\>
+
+#### Returns
+
+[`RelatedNodeInfo`](../interfaces/RelatedNodeInfo.md)<`T`\>
+
+#### Inherited from
+
+[TextNode](TextNode.md).[asRelatedNodeInfo](TextNode.md#asrelatednodeinfo)
+
+#### Defined in
+
+[packages/core/src/Node.ts:134](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Node.ts#L134)
+
+---
+
+### generateHash
+
+▸ **generateHash**(): `string`
+
+Generate a hash of the text node.
+The ID is not part of the hash as it can change independent of content.
+
+#### Returns
+
+`string`
+
+#### Inherited from
+
+[TextNode](TextNode.md).[generateHash](TextNode.md#generatehash)
+
+#### Defined in
+
+[packages/core/src/Node.ts:178](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Node.ts#L178)
+
+---
+
+### getContent
+
+▸ **getContent**(`metadataMode?`): `string`
+
+#### Parameters
+
+| Name           | Type                                       | Default value       |
+| :------------- | :----------------------------------------- | :------------------ |
+| `metadataMode` | [`MetadataMode`](../enums/MetadataMode.md) | `MetadataMode.NONE` |
+
+#### Returns
+
+`string`
+
+#### Inherited from
+
+[TextNode](TextNode.md).[getContent](TextNode.md#getcontent)
+
+#### Defined in
+
+[packages/core/src/Node.ts:192](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Node.ts#L192)
+
+---
+
+### getEmbedding
+
+▸ **getEmbedding**(): `number`[]
+
+#### Returns
+
+`number`[]
+
+#### Inherited from
+
+[TextNode](TextNode.md).[getEmbedding](TextNode.md#getembedding)
+
+#### Defined in
+
+[packages/core/src/Node.ts:126](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Node.ts#L126)
+
+---
+
+### getMetadataStr
+
+▸ **getMetadataStr**(`metadataMode`): `string`
+
+#### Parameters
+
+| Name           | Type                                       |
+| :------------- | :----------------------------------------- |
+| `metadataMode` | [`MetadataMode`](../enums/MetadataMode.md) |
+
+#### Returns
+
+`string`
+
+#### Inherited from
+
+[TextNode](TextNode.md).[getMetadataStr](TextNode.md#getmetadatastr)
+
+#### Defined in
+
+[packages/core/src/Node.ts:197](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Node.ts#L197)
+
+---
+
+### getNodeInfo
+
+▸ **getNodeInfo**(): `Object`
+
+#### Returns
+
+`Object`
+
+| Name    | Type                    |
+| :------ | :---------------------- |
+| `end`   | `undefined` \| `number` |
+| `start` | `undefined` \| `number` |
+
+#### Inherited from
+
+[TextNode](TextNode.md).[getNodeInfo](TextNode.md#getnodeinfo)
+
+#### Defined in
+
+[packages/core/src/Node.ts:224](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Node.ts#L224)
+
+---
+
+### getText
+
+▸ **getText**(): `string`
+
+#### Returns
+
+`string`
+
+#### Inherited from
+
+[TextNode](TextNode.md).[getText](TextNode.md#gettext)
+
+#### Defined in
+
+[packages/core/src/Node.ts:228](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Node.ts#L228)
+
+---
+
+### getType
+
+▸ **getType**(): [`ObjectType`](../enums/ObjectType.md)
+
+#### Returns
+
+[`ObjectType`](../enums/ObjectType.md)
+
+#### Overrides
+
+[TextNode](TextNode.md).[getType](TextNode.md#gettype)
+
+#### Defined in
+
+[packages/core/src/Node.ts:304](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Node.ts#L304)
+
+---
+
+### setContent
+
+▸ **setContent**(`value`): `void`
+
+#### Parameters
+
+| Name    | Type     |
+| :------ | :------- |
+| `value` | `string` |
+
+#### Returns
+
+`void`
+
+#### Inherited from
+
+[TextNode](TextNode.md).[setContent](TextNode.md#setcontent)
+
+#### Defined in
+
+[packages/core/src/Node.ts:218](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Node.ts#L218)
+
+---
+
+### toJSON
+
+▸ **toJSON**(): `Record`<`string`, `any`\>
+
+Used with built in JSON.stringify
+
+#### Returns
+
+`Record`<`string`, `any`\>
+
+#### Inherited from
+
+[TextNode](TextNode.md).[toJSON](TextNode.md#tojson)
+
+#### Defined in
+
+[packages/core/src/Node.ts:146](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Node.ts#L146)
diff --git a/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/InMemoryFileSystem.md b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/InMemoryFileSystem.md
new file mode 100644
index 0000000000000000000000000000000000000000..8a8830616c4b75025018d4bbfe6933a289a04967
--- /dev/null
+++ b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/InMemoryFileSystem.md
@@ -0,0 +1,129 @@
+---
+id: "InMemoryFileSystem"
+title: "Class: InMemoryFileSystem"
+sidebar_label: "InMemoryFileSystem"
+sidebar_position: 0
+custom_edit_url: null
+---
+
+A filesystem implementation that stores files in memory.
+
+## Implements
+
+- [`GenericFileSystem`](../interfaces/GenericFileSystem.md)
+
+## Constructors
+
+### constructor
+
+• **new InMemoryFileSystem**()
+
+## Properties
+
+### files
+
+• `Private` **files**: `Record`<`string`, `any`\> = `{}`
+
+#### Defined in
+
+[packages/core/src/storage/FileSystem.ts:25](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/storage/FileSystem.ts#L25)
+
+## Methods
+
+### access
+
+▸ **access**(`path`): `Promise`<`void`\>
+
+#### Parameters
+
+| Name   | Type     |
+| :----- | :------- |
+| `path` | `string` |
+
+#### Returns
+
+`Promise`<`void`\>
+
+#### Implementation of
+
+[GenericFileSystem](../interfaces/GenericFileSystem.md).[access](../interfaces/GenericFileSystem.md#access)
+
+#### Defined in
+
+[packages/core/src/storage/FileSystem.ts:38](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/storage/FileSystem.ts#L38)
+
+---
+
+### mkdir
+
+▸ **mkdir**(`path`, `options?`): `Promise`<`void`\>
+
+#### Parameters
+
+| Name       | Type     |
+| :--------- | :------- |
+| `path`     | `string` |
+| `options?` | `any`    |
+
+#### Returns
+
+`Promise`<`void`\>
+
+#### Implementation of
+
+[GenericFileSystem](../interfaces/GenericFileSystem.md).[mkdir](../interfaces/GenericFileSystem.md#mkdir)
+
+#### Defined in
+
+[packages/core/src/storage/FileSystem.ts:44](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/storage/FileSystem.ts#L44)
+
+---
+
+### readFile
+
+▸ **readFile**(`path`, `options?`): `Promise`<`string`\>
+
+#### Parameters
+
+| Name       | Type     |
+| :--------- | :------- |
+| `path`     | `string` |
+| `options?` | `any`    |
+
+#### Returns
+
+`Promise`<`string`\>
+
+#### Implementation of
+
+[GenericFileSystem](../interfaces/GenericFileSystem.md).[readFile](../interfaces/GenericFileSystem.md#readfile)
+
+#### Defined in
+
+[packages/core/src/storage/FileSystem.ts:31](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/storage/FileSystem.ts#L31)
+
+---
+
+### writeFile
+
+▸ **writeFile**(`path`, `content`, `options?`): `Promise`<`void`\>
+
+#### Parameters
+
+| Name       | Type     |
+| :--------- | :------- |
+| `path`     | `string` |
+| `content`  | `string` |
+| `options?` | `any`    |
+
+#### Returns
+
+`Promise`<`void`\>
+
+#### Implementation of
+
+[GenericFileSystem](../interfaces/GenericFileSystem.md).[writeFile](../interfaces/GenericFileSystem.md#writefile)
+
+#### Defined in
+
+[packages/core/src/storage/FileSystem.ts:27](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/storage/FileSystem.ts#L27)
diff --git a/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/IndexDict.md b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/IndexDict.md
new file mode 100644
index 0000000000000000000000000000000000000000..d235bb2afa6e287aa9ed6bef147b6052d4152ebf
--- /dev/null
+++ b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/IndexDict.md
@@ -0,0 +1,161 @@
+---
+id: "IndexDict"
+title: "Class: IndexDict"
+sidebar_label: "IndexDict"
+sidebar_position: 0
+custom_edit_url: null
+---
+
+The underlying structure of each index.
+
+## Hierarchy
+
+- [`IndexStruct`](IndexStruct.md)
+
+  ↳ **`IndexDict`**
+
+## Constructors
+
+### constructor
+
+• **new IndexDict**(`indexId?`, `summary?`)
+
+#### Parameters
+
+| Name      | Type        | Default value |
+| :-------- | :---------- | :------------ |
+| `indexId` | `string`    | `undefined`   |
+| `summary` | `undefined` | `undefined`   |
+
+#### Inherited from
+
+[IndexStruct](IndexStruct.md).[constructor](IndexStruct.md#constructor)
+
+#### Defined in
+
+[packages/core/src/indices/BaseIndex.ts:19](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/indices/BaseIndex.ts#L19)
+
+## Properties
+
+### indexId
+
+• **indexId**: `string`
+
+#### Inherited from
+
+[IndexStruct](IndexStruct.md).[indexId](IndexStruct.md#indexid)
+
+#### Defined in
+
+[packages/core/src/indices/BaseIndex.ts:16](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/indices/BaseIndex.ts#L16)
+
+---
+
+### nodesDict
+
+• **nodesDict**: `Record`<`string`, [`BaseNode`](BaseNode.md)<[`Metadata`](../#metadata)\>\> = `{}`
+
+#### Defined in
+
+[packages/core/src/indices/BaseIndex.ts:46](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/indices/BaseIndex.ts#L46)
+
+---
+
+### summary
+
+• `Optional` **summary**: `string`
+
+#### Inherited from
+
+[IndexStruct](IndexStruct.md).[summary](IndexStruct.md#summary)
+
+#### Defined in
+
+[packages/core/src/indices/BaseIndex.ts:17](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/indices/BaseIndex.ts#L17)
+
+---
+
+### type
+
+• **type**: [`IndexStructType`](../enums/IndexStructType.md) = `IndexStructType.SIMPLE_DICT`
+
+#### Defined in
+
+[packages/core/src/indices/BaseIndex.ts:47](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/indices/BaseIndex.ts#L47)
+
+## Methods
+
+### addNode
+
+▸ **addNode**(`node`, `textId?`): `void`
+
+#### Parameters
+
+| Name      | Type                                                   |
+| :-------- | :----------------------------------------------------- |
+| `node`    | [`BaseNode`](BaseNode.md)<[`Metadata`](../#metadata)\> |
+| `textId?` | `string`                                               |
+
+#### Returns
+
+`void`
+
+#### Defined in
+
+[packages/core/src/indices/BaseIndex.ts:56](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/indices/BaseIndex.ts#L56)
+
+---
+
+### delete
+
+▸ **delete**(`nodeId`): `void`
+
+#### Parameters
+
+| Name     | Type     |
+| :------- | :------- |
+| `nodeId` | `string` |
+
+#### Returns
+
+`void`
+
+#### Defined in
+
+[packages/core/src/indices/BaseIndex.ts:69](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/indices/BaseIndex.ts#L69)
+
+---
+
+### getSummary
+
+▸ **getSummary**(): `string`
+
+#### Returns
+
+`string`
+
+#### Overrides
+
+[IndexStruct](IndexStruct.md).[getSummary](IndexStruct.md#getsummary)
+
+#### Defined in
+
+[packages/core/src/indices/BaseIndex.ts:49](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/indices/BaseIndex.ts#L49)
+
+---
+
+### toJson
+
+▸ **toJson**(): `Record`<`string`, `unknown`\>
+
+#### Returns
+
+`Record`<`string`, `unknown`\>
+
+#### Overrides
+
+[IndexStruct](IndexStruct.md).[toJson](IndexStruct.md#tojson)
+
+#### Defined in
+
+[packages/core/src/indices/BaseIndex.ts:61](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/indices/BaseIndex.ts#L61)
diff --git a/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/IndexList.md b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/IndexList.md
new file mode 100644
index 0000000000000000000000000000000000000000..c9d4da63328c912fc6d30a588017fbe4ba78fd78
--- /dev/null
+++ b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/IndexList.md
@@ -0,0 +1,140 @@
+---
+id: "IndexList"
+title: "Class: IndexList"
+sidebar_label: "IndexList"
+sidebar_position: 0
+custom_edit_url: null
+---
+
+The underlying structure of each index.
+
+## Hierarchy
+
+- [`IndexStruct`](IndexStruct.md)
+
+  ↳ **`IndexList`**
+
+## Constructors
+
+### constructor
+
+• **new IndexList**(`indexId?`, `summary?`)
+
+#### Parameters
+
+| Name      | Type        | Default value |
+| :-------- | :---------- | :------------ |
+| `indexId` | `string`    | `undefined`   |
+| `summary` | `undefined` | `undefined`   |
+
+#### Inherited from
+
+[IndexStruct](IndexStruct.md).[constructor](IndexStruct.md#constructor)
+
+#### Defined in
+
+[packages/core/src/indices/BaseIndex.ts:19](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/indices/BaseIndex.ts#L19)
+
+## Properties
+
+### indexId
+
+• **indexId**: `string`
+
+#### Inherited from
+
+[IndexStruct](IndexStruct.md).[indexId](IndexStruct.md#indexid)
+
+#### Defined in
+
+[packages/core/src/indices/BaseIndex.ts:16](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/indices/BaseIndex.ts#L16)
+
+---
+
+### nodes
+
+• **nodes**: `string`[] = `[]`
+
+#### Defined in
+
+[packages/core/src/indices/BaseIndex.ts:94](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/indices/BaseIndex.ts#L94)
+
+---
+
+### summary
+
+• `Optional` **summary**: `string`
+
+#### Inherited from
+
+[IndexStruct](IndexStruct.md).[summary](IndexStruct.md#summary)
+
+#### Defined in
+
+[packages/core/src/indices/BaseIndex.ts:17](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/indices/BaseIndex.ts#L17)
+
+---
+
+### type
+
+• **type**: [`IndexStructType`](../enums/IndexStructType.md) = `IndexStructType.LIST`
+
+#### Defined in
+
+[packages/core/src/indices/BaseIndex.ts:95](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/indices/BaseIndex.ts#L95)
+
+## Methods
+
+### addNode
+
+▸ **addNode**(`node`): `void`
+
+#### Parameters
+
+| Name   | Type                                                   |
+| :----- | :----------------------------------------------------- |
+| `node` | [`BaseNode`](BaseNode.md)<[`Metadata`](../#metadata)\> |
+
+#### Returns
+
+`void`
+
+#### Defined in
+
+[packages/core/src/indices/BaseIndex.ts:97](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/indices/BaseIndex.ts#L97)
+
+---
+
+### getSummary
+
+▸ **getSummary**(): `string`
+
+#### Returns
+
+`string`
+
+#### Inherited from
+
+[IndexStruct](IndexStruct.md).[getSummary](IndexStruct.md#getsummary)
+
+#### Defined in
+
+[packages/core/src/indices/BaseIndex.ts:31](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/indices/BaseIndex.ts#L31)
+
+---
+
+### toJson
+
+▸ **toJson**(): `Record`<`string`, `unknown`\>
+
+#### Returns
+
+`Record`<`string`, `unknown`\>
+
+#### Overrides
+
+[IndexStruct](IndexStruct.md).[toJson](IndexStruct.md#tojson)
+
+#### Defined in
+
+[packages/core/src/indices/BaseIndex.ts:101](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/indices/BaseIndex.ts#L101)
diff --git a/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/IndexNode.md b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/IndexNode.md
new file mode 100644
index 0000000000000000000000000000000000000000..429aaa511106e25ae789472fd339f608bcd99b08
--- /dev/null
+++ b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/IndexNode.md
@@ -0,0 +1,514 @@
+---
+id: "IndexNode"
+title: "Class: IndexNode<T>"
+sidebar_label: "IndexNode"
+sidebar_position: 0
+custom_edit_url: null
+---
+
+TextNode is the default node type for text. Most common node type in LlamaIndex.TS
+
+## Type parameters
+
+| Name | Type                                                            |
+| :--- | :-------------------------------------------------------------- |
+| `T`  | extends [`Metadata`](../#metadata) = [`Metadata`](../#metadata) |
+
+## Hierarchy
+
+- [`TextNode`](TextNode.md)<`T`\>
+
+  ↳ **`IndexNode`**
+
+## Constructors
+
+### constructor
+
+• **new IndexNode**<`T`\>(`init?`)
+
+#### Type parameters
+
+| Name | Type                                                            |
+| :--- | :-------------------------------------------------------------- |
+| `T`  | extends [`Metadata`](../#metadata) = [`Metadata`](../#metadata) |
+
+#### Parameters
+
+| Name    | Type                                          |
+| :------ | :-------------------------------------------- |
+| `init?` | `Partial`<[`IndexNode`](IndexNode.md)<`T`\>\> |
+
+#### Overrides
+
+[TextNode](TextNode.md).[constructor](TextNode.md#constructor)
+
+#### Defined in
+
+[packages/core/src/Node.ts:236](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Node.ts#L236)
+
+## Properties
+
+### embedding
+
+• `Optional` **embedding**: `number`[]
+
+#### Inherited from
+
+[TextNode](TextNode.md).[embedding](TextNode.md#embedding)
+
+#### Defined in
+
+[packages/core/src/Node.ts:51](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Node.ts#L51)
+
+---
+
+### endCharIdx
+
+• `Optional` **endCharIdx**: `number`
+
+#### Inherited from
+
+[TextNode](TextNode.md).[endCharIdx](TextNode.md#endcharidx)
+
+#### Defined in
+
+[packages/core/src/Node.ts:157](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Node.ts#L157)
+
+---
+
+### excludedEmbedMetadataKeys
+
+• **excludedEmbedMetadataKeys**: `string`[] = `[]`
+
+#### Inherited from
+
+[TextNode](TextNode.md).[excludedEmbedMetadataKeys](TextNode.md#excludedembedmetadatakeys)
+
+#### Defined in
+
+[packages/core/src/Node.ts:55](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Node.ts#L55)
+
+---
+
+### excludedLlmMetadataKeys
+
+• **excludedLlmMetadataKeys**: `string`[] = `[]`
+
+#### Inherited from
+
+[TextNode](TextNode.md).[excludedLlmMetadataKeys](TextNode.md#excludedllmmetadatakeys)
+
+#### Defined in
+
+[packages/core/src/Node.ts:56](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Node.ts#L56)
+
+---
+
+### hash
+
+• **hash**: `string` = `""`
+
+#### Inherited from
+
+[TextNode](TextNode.md).[hash](TextNode.md#hash)
+
+#### Defined in
+
+[packages/core/src/Node.ts:58](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Node.ts#L58)
+
+---
+
+### id\_
+
+• **id\_**: `string`
+
+The unique ID of the Node/Document. The trailing underscore is here
+to avoid collisions with the id keyword in Python.
+
+Set to a UUID by default.
+
+#### Inherited from
+
+[TextNode](TextNode.md).[id\_](TextNode.md#id_)
+
+#### Defined in
+
+[packages/core/src/Node.ts:50](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Node.ts#L50)
+
+---
+
+### indexId
+
+• **indexId**: `string` = `""`
+
+#### Defined in
+
+[packages/core/src/Node.ts:234](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Node.ts#L234)
+
+---
+
+### metadata
+
+• **metadata**: `T`
+
+#### Inherited from
+
+[TextNode](TextNode.md).[metadata](TextNode.md#metadata)
+
+#### Defined in
+
+[packages/core/src/Node.ts:54](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Node.ts#L54)
+
+---
+
+### metadataSeparator
+
+• **metadataSeparator**: `string` = `"\n"`
+
+#### Inherited from
+
+[TextNode](TextNode.md).[metadataSeparator](TextNode.md#metadataseparator)
+
+#### Defined in
+
+[packages/core/src/Node.ts:160](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Node.ts#L160)
+
+---
+
+### relationships
+
+• **relationships**: `Partial`<`Record`<[`NodeRelationship`](../enums/NodeRelationship.md), [`RelatedNodeType`](../#relatednodetype)<`T`\>\>\> = `{}`
+
+#### Inherited from
+
+[TextNode](TextNode.md).[relationships](TextNode.md#relationships)
+
+#### Defined in
+
+[packages/core/src/Node.ts:57](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Node.ts#L57)
+
+---
+
+### startCharIdx
+
+• `Optional` **startCharIdx**: `number`
+
+#### Inherited from
+
+[TextNode](TextNode.md).[startCharIdx](TextNode.md#startcharidx)
+
+#### Defined in
+
+[packages/core/src/Node.ts:156](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Node.ts#L156)
+
+---
+
+### text
+
+• **text**: `string` = `""`
+
+#### Inherited from
+
+[TextNode](TextNode.md).[text](TextNode.md#text)
+
+#### Defined in
+
+[packages/core/src/Node.ts:155](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Node.ts#L155)
+
+## Accessors
+
+### childNodes
+
+• `get` **childNodes**(): `undefined` \| [`RelatedNodeInfo`](../interfaces/RelatedNodeInfo.md)<`T`\>[]
+
+#### Returns
+
+`undefined` \| [`RelatedNodeInfo`](../interfaces/RelatedNodeInfo.md)<`T`\>[]
+
+#### Inherited from
+
+TextNode.childNodes
+
+#### Defined in
+
+[packages/core/src/Node.ts:112](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Node.ts#L112)
+
+---
+
+### nextNode
+
+• `get` **nextNode**(): `undefined` \| [`RelatedNodeInfo`](../interfaces/RelatedNodeInfo.md)<`T`\>
+
+#### Returns
+
+`undefined` \| [`RelatedNodeInfo`](../interfaces/RelatedNodeInfo.md)<`T`\>
+
+#### Inherited from
+
+TextNode.nextNode
+
+#### Defined in
+
+[packages/core/src/Node.ts:92](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Node.ts#L92)
+
+---
+
+### parentNode
+
+• `get` **parentNode**(): `undefined` \| [`RelatedNodeInfo`](../interfaces/RelatedNodeInfo.md)<`T`\>
+
+#### Returns
+
+`undefined` \| [`RelatedNodeInfo`](../interfaces/RelatedNodeInfo.md)<`T`\>
+
+#### Inherited from
+
+TextNode.parentNode
+
+#### Defined in
+
+[packages/core/src/Node.ts:102](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Node.ts#L102)
+
+---
+
+### prevNode
+
+• `get` **prevNode**(): `undefined` \| [`RelatedNodeInfo`](../interfaces/RelatedNodeInfo.md)<`T`\>
+
+#### Returns
+
+`undefined` \| [`RelatedNodeInfo`](../interfaces/RelatedNodeInfo.md)<`T`\>
+
+#### Inherited from
+
+TextNode.prevNode
+
+#### Defined in
+
+[packages/core/src/Node.ts:80](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Node.ts#L80)
+
+---
+
+### sourceNode
+
+• `get` **sourceNode**(): `undefined` \| [`RelatedNodeInfo`](../interfaces/RelatedNodeInfo.md)<`T`\>
+
+#### Returns
+
+`undefined` \| [`RelatedNodeInfo`](../interfaces/RelatedNodeInfo.md)<`T`\>
+
+#### Inherited from
+
+TextNode.sourceNode
+
+#### Defined in
+
+[packages/core/src/Node.ts:70](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Node.ts#L70)
+
+## Methods
+
+### asRelatedNodeInfo
+
+▸ **asRelatedNodeInfo**(): [`RelatedNodeInfo`](../interfaces/RelatedNodeInfo.md)<`T`\>
+
+#### Returns
+
+[`RelatedNodeInfo`](../interfaces/RelatedNodeInfo.md)<`T`\>
+
+#### Inherited from
+
+[TextNode](TextNode.md).[asRelatedNodeInfo](TextNode.md#asrelatednodeinfo)
+
+#### Defined in
+
+[packages/core/src/Node.ts:134](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Node.ts#L134)
+
+---
+
+### generateHash
+
+▸ **generateHash**(): `string`
+
+Generate a hash of the text node.
+The ID is not part of the hash as it can change independent of content.
+
+#### Returns
+
+`string`
+
+#### Inherited from
+
+[TextNode](TextNode.md).[generateHash](TextNode.md#generatehash)
+
+#### Defined in
+
+[packages/core/src/Node.ts:178](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Node.ts#L178)
+
+---
+
+### getContent
+
+▸ **getContent**(`metadataMode?`): `string`
+
+#### Parameters
+
+| Name           | Type                                       | Default value       |
+| :------------- | :----------------------------------------- | :------------------ |
+| `metadataMode` | [`MetadataMode`](../enums/MetadataMode.md) | `MetadataMode.NONE` |
+
+#### Returns
+
+`string`
+
+#### Inherited from
+
+[TextNode](TextNode.md).[getContent](TextNode.md#getcontent)
+
+#### Defined in
+
+[packages/core/src/Node.ts:192](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Node.ts#L192)
+
+---
+
+### getEmbedding
+
+▸ **getEmbedding**(): `number`[]
+
+#### Returns
+
+`number`[]
+
+#### Inherited from
+
+[TextNode](TextNode.md).[getEmbedding](TextNode.md#getembedding)
+
+#### Defined in
+
+[packages/core/src/Node.ts:126](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Node.ts#L126)
+
+---
+
+### getMetadataStr
+
+▸ **getMetadataStr**(`metadataMode`): `string`
+
+#### Parameters
+
+| Name           | Type                                       |
+| :------------- | :----------------------------------------- |
+| `metadataMode` | [`MetadataMode`](../enums/MetadataMode.md) |
+
+#### Returns
+
+`string`
+
+#### Inherited from
+
+[TextNode](TextNode.md).[getMetadataStr](TextNode.md#getmetadatastr)
+
+#### Defined in
+
+[packages/core/src/Node.ts:197](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Node.ts#L197)
+
+---
+
+### getNodeInfo
+
+▸ **getNodeInfo**(): `Object`
+
+#### Returns
+
+`Object`
+
+| Name    | Type                    |
+| :------ | :---------------------- |
+| `end`   | `undefined` \| `number` |
+| `start` | `undefined` \| `number` |
+
+#### Inherited from
+
+[TextNode](TextNode.md).[getNodeInfo](TextNode.md#getnodeinfo)
+
+#### Defined in
+
+[packages/core/src/Node.ts:224](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Node.ts#L224)
+
+---
+
+### getText
+
+▸ **getText**(): `string`
+
+#### Returns
+
+`string`
+
+#### Inherited from
+
+[TextNode](TextNode.md).[getText](TextNode.md#gettext)
+
+#### Defined in
+
+[packages/core/src/Node.ts:228](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Node.ts#L228)
+
+---
+
+### getType
+
+▸ **getType**(): [`ObjectType`](../enums/ObjectType.md)
+
+#### Returns
+
+[`ObjectType`](../enums/ObjectType.md)
+
+#### Overrides
+
+[TextNode](TextNode.md).[getType](TextNode.md#gettype)
+
+#### Defined in
+
+[packages/core/src/Node.ts:245](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Node.ts#L245)
+
+---
+
+### setContent
+
+▸ **setContent**(`value`): `void`
+
+#### Parameters
+
+| Name    | Type     |
+| :------ | :------- |
+| `value` | `string` |
+
+#### Returns
+
+`void`
+
+#### Inherited from
+
+[TextNode](TextNode.md).[setContent](TextNode.md#setcontent)
+
+#### Defined in
+
+[packages/core/src/Node.ts:218](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Node.ts#L218)
+
+---
+
+### toJSON
+
+▸ **toJSON**(): `Record`<`string`, `any`\>
+
+Used with built in JSON.stringify
+
+#### Returns
+
+`Record`<`string`, `any`\>
+
+#### Inherited from
+
+[TextNode](TextNode.md).[toJSON](TextNode.md#tojson)
+
+#### Defined in
+
+[packages/core/src/Node.ts:146](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Node.ts#L146)
diff --git a/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/IndexStruct.md b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/IndexStruct.md
new file mode 100644
index 0000000000000000000000000000000000000000..6b17cede1fc9a49de35851c117a84afab284a632
--- /dev/null
+++ b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/IndexStruct.md
@@ -0,0 +1,84 @@
+---
+id: "IndexStruct"
+title: "Class: IndexStruct"
+sidebar_label: "IndexStruct"
+sidebar_position: 0
+custom_edit_url: null
+---
+
+The underlying structure of each index.
+
+## Hierarchy
+
+- **`IndexStruct`**
+
+  ↳ [`IndexDict`](IndexDict.md)
+
+  ↳ [`IndexList`](IndexList.md)
+
+  ↳ [`KeywordTable`](KeywordTable.md)
+
+## Constructors
+
+### constructor
+
+• **new IndexStruct**(`indexId?`, `summary?`)
+
+#### Parameters
+
+| Name      | Type        | Default value |
+| :-------- | :---------- | :------------ |
+| `indexId` | `string`    | `undefined`   |
+| `summary` | `undefined` | `undefined`   |
+
+#### Defined in
+
+[packages/core/src/indices/BaseIndex.ts:19](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/indices/BaseIndex.ts#L19)
+
+## Properties
+
+### indexId
+
+• **indexId**: `string`
+
+#### Defined in
+
+[packages/core/src/indices/BaseIndex.ts:16](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/indices/BaseIndex.ts#L16)
+
+---
+
+### summary
+
+• `Optional` **summary**: `string`
+
+#### Defined in
+
+[packages/core/src/indices/BaseIndex.ts:17](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/indices/BaseIndex.ts#L17)
+
+## Methods
+
+### getSummary
+
+▸ **getSummary**(): `string`
+
+#### Returns
+
+`string`
+
+#### Defined in
+
+[packages/core/src/indices/BaseIndex.ts:31](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/indices/BaseIndex.ts#L31)
+
+---
+
+### toJson
+
+▸ **toJson**(): `Record`<`string`, `unknown`\>
+
+#### Returns
+
+`Record`<`string`, `unknown`\>
+
+#### Defined in
+
+[packages/core/src/indices/BaseIndex.ts:24](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/indices/BaseIndex.ts#L24)
diff --git a/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/KeywordTable.md b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/KeywordTable.md
new file mode 100644
index 0000000000000000000000000000000000000000..47ee1d5792e7ecce990bca6d86d96e5c66364596
--- /dev/null
+++ b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/KeywordTable.md
@@ -0,0 +1,162 @@
+---
+id: "KeywordTable"
+title: "Class: KeywordTable"
+sidebar_label: "KeywordTable"
+sidebar_position: 0
+custom_edit_url: null
+---
+
+The underlying structure of each index.
+
+## Hierarchy
+
+- [`IndexStruct`](IndexStruct.md)
+
+  ↳ **`KeywordTable`**
+
+## Constructors
+
+### constructor
+
+• **new KeywordTable**(`indexId?`, `summary?`)
+
+#### Parameters
+
+| Name      | Type        | Default value |
+| :-------- | :---------- | :------------ |
+| `indexId` | `string`    | `undefined`   |
+| `summary` | `undefined` | `undefined`   |
+
+#### Inherited from
+
+[IndexStruct](IndexStruct.md).[constructor](IndexStruct.md#constructor)
+
+#### Defined in
+
+[packages/core/src/indices/BaseIndex.ts:19](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/indices/BaseIndex.ts#L19)
+
+## Properties
+
+### indexId
+
+• **indexId**: `string`
+
+#### Inherited from
+
+[IndexStruct](IndexStruct.md).[indexId](IndexStruct.md#indexid)
+
+#### Defined in
+
+[packages/core/src/indices/BaseIndex.ts:16](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/indices/BaseIndex.ts#L16)
+
+---
+
+### summary
+
+• `Optional` **summary**: `string`
+
+#### Inherited from
+
+[IndexStruct](IndexStruct.md).[summary](IndexStruct.md#summary)
+
+#### Defined in
+
+[packages/core/src/indices/BaseIndex.ts:17](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/indices/BaseIndex.ts#L17)
+
+---
+
+### table
+
+• **table**: `Map`<`string`, `Set`<`string`\>\>
+
+#### Defined in
+
+[packages/core/src/indices/BaseIndex.ts:112](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/indices/BaseIndex.ts#L112)
+
+---
+
+### type
+
+• **type**: [`IndexStructType`](../enums/IndexStructType.md) = `IndexStructType.KEYWORD_TABLE`
+
+#### Defined in
+
+[packages/core/src/indices/BaseIndex.ts:113](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/indices/BaseIndex.ts#L113)
+
+## Methods
+
+### addNode
+
+▸ **addNode**(`keywords`, `nodeId`): `void`
+
+#### Parameters
+
+| Name       | Type       |
+| :--------- | :--------- |
+| `keywords` | `string`[] |
+| `nodeId`   | `string`   |
+
+#### Returns
+
+`void`
+
+#### Defined in
+
+[packages/core/src/indices/BaseIndex.ts:114](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/indices/BaseIndex.ts#L114)
+
+---
+
+### deleteNode
+
+▸ **deleteNode**(`keywords`, `nodeId`): `void`
+
+#### Parameters
+
+| Name       | Type       |
+| :--------- | :--------- |
+| `keywords` | `string`[] |
+| `nodeId`   | `string`   |
+
+#### Returns
+
+`void`
+
+#### Defined in
+
+[packages/core/src/indices/BaseIndex.ts:123](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/indices/BaseIndex.ts#L123)
+
+---
+
+### getSummary
+
+▸ **getSummary**(): `string`
+
+#### Returns
+
+`string`
+
+#### Inherited from
+
+[IndexStruct](IndexStruct.md).[getSummary](IndexStruct.md#getsummary)
+
+#### Defined in
+
+[packages/core/src/indices/BaseIndex.ts:31](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/indices/BaseIndex.ts#L31)
+
+---
+
+### toJson
+
+▸ **toJson**(): `Record`<`string`, `unknown`\>
+
+#### Returns
+
+`Record`<`string`, `unknown`\>
+
+#### Overrides
+
+[IndexStruct](IndexStruct.md).[toJson](IndexStruct.md#tojson)
+
+#### Defined in
+
+[packages/core/src/indices/BaseIndex.ts:131](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/indices/BaseIndex.ts#L131)
diff --git a/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/KeywordTableIndex.md b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/KeywordTableIndex.md
new file mode 100644
index 0000000000000000000000000000000000000000..fd3164fb2dc62f4ac1d0cd7cd326aee9f57f58df
--- /dev/null
+++ b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/KeywordTableIndex.md
@@ -0,0 +1,382 @@
+---
+id: "KeywordTableIndex"
+title: "Class: KeywordTableIndex"
+sidebar_label: "KeywordTableIndex"
+sidebar_position: 0
+custom_edit_url: null
+---
+
+The KeywordTableIndex, an index that extracts keywords from each Node and builds a mapping from each keyword to the corresponding Nodes of that keyword.
+
+## Hierarchy
+
+- [`BaseIndex`](BaseIndex.md)<[`KeywordTable`](KeywordTable.md)\>
+
+  ↳ **`KeywordTableIndex`**
+
+## Constructors
+
+### constructor
+
+• **new KeywordTableIndex**(`init`)
+
+#### Parameters
+
+| Name   | Type                                                                                  |
+| :----- | :------------------------------------------------------------------------------------ |
+| `init` | [`BaseIndexInit`](../interfaces/BaseIndexInit.md)<[`KeywordTable`](KeywordTable.md)\> |
+
+#### Overrides
+
+[BaseIndex](BaseIndex.md).[constructor](BaseIndex.md#constructor)
+
+#### Defined in
+
+[packages/core/src/indices/keyword/KeywordTableIndex.ts:49](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/indices/keyword/KeywordTableIndex.ts#L49)
+
+## Properties
+
+### docStore
+
+• **docStore**: [`BaseDocumentStore`](BaseDocumentStore.md)
+
+#### Inherited from
+
+[BaseIndex](BaseIndex.md).[docStore](BaseIndex.md#docstore)
+
+#### Defined in
+
+[packages/core/src/indices/BaseIndex.ts:156](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/indices/BaseIndex.ts#L156)
+
+---
+
+### indexStore
+
+• `Optional` **indexStore**: [`BaseIndexStore`](BaseIndexStore.md)
+
+#### Inherited from
+
+[BaseIndex](BaseIndex.md).[indexStore](BaseIndex.md#indexstore)
+
+#### Defined in
+
+[packages/core/src/indices/BaseIndex.ts:158](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/indices/BaseIndex.ts#L158)
+
+---
+
+### indexStruct
+
+• **indexStruct**: [`KeywordTable`](KeywordTable.md)
+
+#### Inherited from
+
+[BaseIndex](BaseIndex.md).[indexStruct](BaseIndex.md#indexstruct)
+
+#### Defined in
+
+[packages/core/src/indices/BaseIndex.ts:159](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/indices/BaseIndex.ts#L159)
+
+---
+
+### serviceContext
+
+• **serviceContext**: [`ServiceContext`](../interfaces/ServiceContext.md)
+
+#### Inherited from
+
+[BaseIndex](BaseIndex.md).[serviceContext](BaseIndex.md#servicecontext)
+
+#### Defined in
+
+[packages/core/src/indices/BaseIndex.ts:154](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/indices/BaseIndex.ts#L154)
+
+---
+
+### storageContext
+
+• **storageContext**: [`StorageContext`](../interfaces/StorageContext.md)
+
+#### Inherited from
+
+[BaseIndex](BaseIndex.md).[storageContext](BaseIndex.md#storagecontext)
+
+#### Defined in
+
+[packages/core/src/indices/BaseIndex.ts:155](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/indices/BaseIndex.ts#L155)
+
+---
+
+### vectorStore
+
+• `Optional` **vectorStore**: [`VectorStore`](../interfaces/VectorStore.md)
+
+#### Inherited from
+
+[BaseIndex](BaseIndex.md).[vectorStore](BaseIndex.md#vectorstore)
+
+#### Defined in
+
+[packages/core/src/indices/BaseIndex.ts:157](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/indices/BaseIndex.ts#L157)
+
+## Methods
+
+### asQueryEngine
+
+▸ **asQueryEngine**(`options?`): [`BaseQueryEngine`](../interfaces/BaseQueryEngine.md)
+
+Create a new query engine from the index. It will also create a retriever
+and response synthezier if they are not provided.
+
+#### Parameters
+
+| Name                           | Type                                                                | Description                                                      |
+| :----------------------------- | :------------------------------------------------------------------ | :--------------------------------------------------------------- |
+| `options?`                     | `Object`                                                            | you can supply your own custom Retriever and ResponseSynthesizer |
+| `options.nodePostprocessors?`  | [`BaseNodePostprocessor`](../interfaces/BaseNodePostprocessor.md)[] | -                                                                |
+| `options.preFilters?`          | `unknown`                                                           | -                                                                |
+| `options.responseSynthesizer?` | [`ResponseSynthesizer`](ResponseSynthesizer.md)                     | -                                                                |
+| `options.retriever?`           | [`BaseRetriever`](../interfaces/BaseRetriever.md)                   | -                                                                |
+
+#### Returns
+
+[`BaseQueryEngine`](../interfaces/BaseQueryEngine.md)
+
+#### Overrides
+
+[BaseIndex](BaseIndex.md).[asQueryEngine](BaseIndex.md#asqueryengine)
+
+#### Defined in
+
+[packages/core/src/indices/keyword/KeywordTableIndex.ts:130](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/indices/keyword/KeywordTableIndex.ts#L130)
+
+---
+
+### asRetriever
+
+▸ **asRetriever**(`options?`): [`BaseRetriever`](../interfaces/BaseRetriever.md)
+
+Create a new retriever from the index.
+
+#### Parameters
+
+| Name       | Type  |
+| :--------- | :---- |
+| `options?` | `any` |
+
+#### Returns
+
+[`BaseRetriever`](../interfaces/BaseRetriever.md)
+
+#### Overrides
+
+[BaseIndex](BaseIndex.md).[asRetriever](BaseIndex.md#asretriever)
+
+#### Defined in
+
+[packages/core/src/indices/keyword/KeywordTableIndex.ts:119](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/indices/keyword/KeywordTableIndex.ts#L119)
+
+---
+
+### deleteNode
+
+▸ **deleteNode**(`nodeId`): `void`
+
+#### Parameters
+
+| Name     | Type     |
+| :------- | :------- |
+| `nodeId` | `string` |
+
+#### Returns
+
+`void`
+
+#### Defined in
+
+[packages/core/src/indices/keyword/KeywordTableIndex.ts:224](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/indices/keyword/KeywordTableIndex.ts#L224)
+
+---
+
+### deleteNodes
+
+▸ **deleteNodes**(`nodeIds`, `deleteFromDocStore`): `Promise`<`void`\>
+
+#### Parameters
+
+| Name                 | Type       |
+| :------------------- | :--------- |
+| `nodeIds`            | `string`[] |
+| `deleteFromDocStore` | `boolean`  |
+
+#### Returns
+
+`Promise`<`void`\>
+
+#### Defined in
+
+[packages/core/src/indices/keyword/KeywordTableIndex.ts:242](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/indices/keyword/KeywordTableIndex.ts#L242)
+
+---
+
+### deleteRefDoc
+
+▸ **deleteRefDoc**(`refDocId`, `deleteFromDocStore?`): `Promise`<`void`\>
+
+#### Parameters
+
+| Name                  | Type      |
+| :-------------------- | :-------- |
+| `refDocId`            | `string`  |
+| `deleteFromDocStore?` | `boolean` |
+
+#### Returns
+
+`Promise`<`void`\>
+
+#### Overrides
+
+[BaseIndex](BaseIndex.md).[deleteRefDoc](BaseIndex.md#deleterefdoc)
+
+#### Defined in
+
+[packages/core/src/indices/keyword/KeywordTableIndex.ts:256](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/indices/keyword/KeywordTableIndex.ts#L256)
+
+---
+
+### insert
+
+▸ **insert**(`document`): `Promise`<`void`\>
+
+Insert a document into the index.
+
+#### Parameters
+
+| Name       | Type                                                   |
+| :--------- | :----------------------------------------------------- |
+| `document` | [`Document`](Document.md)<[`Metadata`](../#metadata)\> |
+
+#### Returns
+
+`Promise`<`void`\>
+
+#### Inherited from
+
+[BaseIndex](BaseIndex.md).[insert](BaseIndex.md#insert)
+
+#### Defined in
+
+[packages/core/src/indices/BaseIndex.ts:190](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/indices/BaseIndex.ts#L190)
+
+---
+
+### insertNodes
+
+▸ **insertNodes**(`nodes`): `Promise`<`void`\>
+
+#### Parameters
+
+| Name    | Type                                                     |
+| :------ | :------------------------------------------------------- |
+| `nodes` | [`BaseNode`](BaseNode.md)<[`Metadata`](../#metadata)\>[] |
+
+#### Returns
+
+`Promise`<`void`\>
+
+#### Overrides
+
+[BaseIndex](BaseIndex.md).[insertNodes](BaseIndex.md#insertnodes)
+
+#### Defined in
+
+[packages/core/src/indices/keyword/KeywordTableIndex.ts:214](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/indices/keyword/KeywordTableIndex.ts#L214)
+
+---
+
+### buildIndexFromNodes
+
+▸ `Static` **buildIndexFromNodes**(`nodes`, `docStore`, `serviceContext`): `Promise`<[`KeywordTable`](KeywordTable.md)\>
+
+Get keywords for nodes and place them into the index.
+
+#### Parameters
+
+| Name             | Type                                                     |
+| :--------------- | :------------------------------------------------------- |
+| `nodes`          | [`BaseNode`](BaseNode.md)<[`Metadata`](../#metadata)\>[] |
+| `docStore`       | [`BaseDocumentStore`](BaseDocumentStore.md)              |
+| `serviceContext` | [`ServiceContext`](../interfaces/ServiceContext.md)      |
+
+#### Returns
+
+`Promise`<[`KeywordTable`](KeywordTable.md)\>
+
+#### Defined in
+
+[packages/core/src/indices/keyword/KeywordTableIndex.ts:197](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/indices/keyword/KeywordTableIndex.ts#L197)
+
+---
+
+### extractKeywords
+
+▸ `Static` **extractKeywords**(`text`, `serviceContext`): `Promise`<`Set`<`string`\>\>
+
+#### Parameters
+
+| Name             | Type                                                |
+| :--------------- | :-------------------------------------------------- |
+| `text`           | `string`                                            |
+| `serviceContext` | [`ServiceContext`](../interfaces/ServiceContext.md) |
+
+#### Returns
+
+`Promise`<`Set`<`string`\>\>
+
+#### Defined in
+
+[packages/core/src/indices/keyword/KeywordTableIndex.ts:145](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/indices/keyword/KeywordTableIndex.ts#L145)
+
+---
+
+### fromDocuments
+
+▸ `Static` **fromDocuments**(`documents`, `args?`): `Promise`<[`KeywordTableIndex`](KeywordTableIndex.md)\>
+
+High level API: split documents, get keywords, and build index.
+
+#### Parameters
+
+| Name                   | Type                                                     |
+| :--------------------- | :------------------------------------------------------- |
+| `documents`            | [`Document`](Document.md)<[`Metadata`](../#metadata)\>[] |
+| `args`                 | `Object`                                                 |
+| `args.serviceContext?` | [`ServiceContext`](../interfaces/ServiceContext.md)      |
+| `args.storageContext?` | [`StorageContext`](../interfaces/StorageContext.md)      |
+
+#### Returns
+
+`Promise`<[`KeywordTableIndex`](KeywordTableIndex.md)\>
+
+#### Defined in
+
+[packages/core/src/indices/keyword/KeywordTableIndex.ts:164](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/indices/keyword/KeywordTableIndex.ts#L164)
+
+---
+
+### init
+
+▸ `Static` **init**(`options`): `Promise`<[`KeywordTableIndex`](KeywordTableIndex.md)\>
+
+#### Parameters
+
+| Name      | Type                  |
+| :-------- | :-------------------- |
+| `options` | `KeywordIndexOptions` |
+
+#### Returns
+
+`Promise`<[`KeywordTableIndex`](KeywordTableIndex.md)\>
+
+#### Defined in
+
+[packages/core/src/indices/keyword/KeywordTableIndex.ts:53](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/indices/keyword/KeywordTableIndex.ts#L53)
diff --git a/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/KeywordTableLLMRetriever.md b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/KeywordTableLLMRetriever.md
new file mode 100644
index 0000000000000000000000000000000000000000..33d36de40c19cea8c0c90aa164608b1458350156
--- /dev/null
+++ b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/KeywordTableLLMRetriever.md
@@ -0,0 +1,244 @@
+---
+id: "KeywordTableLLMRetriever"
+title: "Class: KeywordTableLLMRetriever"
+sidebar_label: "KeywordTableLLMRetriever"
+sidebar_position: 0
+custom_edit_url: null
+---
+
+## Hierarchy
+
+- `BaseKeywordTableRetriever`
+
+  ↳ **`KeywordTableLLMRetriever`**
+
+## Constructors
+
+### constructor
+
+• **new KeywordTableLLMRetriever**(`«destructured»`)
+
+#### Parameters
+
+| Name                             | Type                                        |
+| :------------------------------- | :------------------------------------------ |
+| `«destructured»`                 | `Object`                                    |
+| › `index`                        | [`KeywordTableIndex`](KeywordTableIndex.md) |
+| › `keywordExtractTemplate?`      | (`__namedParameters`: `Object`) => `string` |
+| › `maxKeywordsPerQuery`          | `number`                                    |
+| › `numChunksPerQuery`            | `number`                                    |
+| › `queryKeywordExtractTemplate?` | (`__namedParameters`: `Object`) => `string` |
+
+#### Inherited from
+
+BaseKeywordTableRetriever.constructor
+
+#### Defined in
+
+[packages/core/src/indices/keyword/KeywordTableIndexRetriever.ts:31](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/indices/keyword/KeywordTableIndexRetriever.ts#L31)
+
+## Properties
+
+### docstore
+
+• `Protected` **docstore**: [`BaseDocumentStore`](BaseDocumentStore.md)
+
+#### Inherited from
+
+BaseKeywordTableRetriever.docstore
+
+#### Defined in
+
+[packages/core/src/indices/keyword/KeywordTableIndexRetriever.ts:23](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/indices/keyword/KeywordTableIndexRetriever.ts#L23)
+
+---
+
+### index
+
+• `Protected` **index**: [`KeywordTableIndex`](KeywordTableIndex.md)
+
+#### Inherited from
+
+BaseKeywordTableRetriever.index
+
+#### Defined in
+
+[packages/core/src/indices/keyword/KeywordTableIndexRetriever.ts:21](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/indices/keyword/KeywordTableIndexRetriever.ts#L21)
+
+---
+
+### indexStruct
+
+• `Protected` **indexStruct**: [`KeywordTable`](KeywordTable.md)
+
+#### Inherited from
+
+BaseKeywordTableRetriever.indexStruct
+
+#### Defined in
+
+[packages/core/src/indices/keyword/KeywordTableIndexRetriever.ts:22](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/indices/keyword/KeywordTableIndexRetriever.ts#L22)
+
+---
+
+### keywordExtractTemplate
+
+• `Protected` **keywordExtractTemplate**: (`__namedParameters`: `Object`) => `string`
+
+#### Type declaration
+
+▸ (`«destructured»`): `string`
+
+##### Parameters
+
+| Name             | Type     |
+| :--------------- | :------- |
+| `«destructured»` | `Object` |
+
+##### Returns
+
+`string`
+
+#### Inherited from
+
+BaseKeywordTableRetriever.keywordExtractTemplate
+
+#### Defined in
+
+[packages/core/src/indices/keyword/KeywordTableIndexRetriever.ts:28](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/indices/keyword/KeywordTableIndexRetriever.ts#L28)
+
+---
+
+### maxKeywordsPerQuery
+
+• `Protected` **maxKeywordsPerQuery**: `number`
+
+#### Inherited from
+
+BaseKeywordTableRetriever.maxKeywordsPerQuery
+
+#### Defined in
+
+[packages/core/src/indices/keyword/KeywordTableIndexRetriever.ts:26](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/indices/keyword/KeywordTableIndexRetriever.ts#L26)
+
+---
+
+### numChunksPerQuery
+
+• `Protected` **numChunksPerQuery**: `number`
+
+#### Inherited from
+
+BaseKeywordTableRetriever.numChunksPerQuery
+
+#### Defined in
+
+[packages/core/src/indices/keyword/KeywordTableIndexRetriever.ts:27](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/indices/keyword/KeywordTableIndexRetriever.ts#L27)
+
+---
+
+### queryKeywordExtractTemplate
+
+• `Protected` **queryKeywordExtractTemplate**: (`__namedParameters`: `Object`) => `string`
+
+#### Type declaration
+
+▸ (`«destructured»`): `string`
+
+##### Parameters
+
+| Name             | Type     |
+| :--------------- | :------- |
+| `«destructured»` | `Object` |
+
+##### Returns
+
+`string`
+
+#### Inherited from
+
+BaseKeywordTableRetriever.queryKeywordExtractTemplate
+
+#### Defined in
+
+[packages/core/src/indices/keyword/KeywordTableIndexRetriever.ts:29](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/indices/keyword/KeywordTableIndexRetriever.ts#L29)
+
+---
+
+### serviceContext
+
+• `Protected` **serviceContext**: [`ServiceContext`](../interfaces/ServiceContext.md)
+
+#### Inherited from
+
+BaseKeywordTableRetriever.serviceContext
+
+#### Defined in
+
+[packages/core/src/indices/keyword/KeywordTableIndexRetriever.ts:24](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/indices/keyword/KeywordTableIndexRetriever.ts#L24)
+
+## Methods
+
+### getKeywords
+
+▸ **getKeywords**(`query`): `Promise`<`string`[]\>
+
+#### Parameters
+
+| Name    | Type     |
+| :------ | :------- |
+| `query` | `string` |
+
+#### Returns
+
+`Promise`<`string`[]\>
+
+#### Overrides
+
+BaseKeywordTableRetriever.getKeywords
+
+#### Defined in
+
+[packages/core/src/indices/keyword/KeywordTableIndexRetriever.ts:88](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/indices/keyword/KeywordTableIndexRetriever.ts#L88)
+
+---
+
+### getServiceContext
+
+▸ **getServiceContext**(): [`ServiceContext`](../interfaces/ServiceContext.md)
+
+#### Returns
+
+[`ServiceContext`](../interfaces/ServiceContext.md)
+
+#### Inherited from
+
+BaseKeywordTableRetriever.getServiceContext
+
+#### Defined in
+
+[packages/core/src/indices/keyword/KeywordTableIndexRetriever.ts:81](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/indices/keyword/KeywordTableIndexRetriever.ts#L81)
+
+---
+
+### retrieve
+
+▸ **retrieve**(`query`): `Promise`<[`NodeWithScore`](../interfaces/NodeWithScore.md)<[`Metadata`](../#metadata)\>[]\>
+
+#### Parameters
+
+| Name    | Type     |
+| :------ | :------- |
+| `query` | `string` |
+
+#### Returns
+
+`Promise`<[`NodeWithScore`](../interfaces/NodeWithScore.md)<[`Metadata`](../#metadata)\>[]\>
+
+#### Inherited from
+
+BaseKeywordTableRetriever.retrieve
+
+#### Defined in
+
+[packages/core/src/indices/keyword/KeywordTableIndexRetriever.ts:59](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/indices/keyword/KeywordTableIndexRetriever.ts#L59)
diff --git a/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/KeywordTableRAKERetriever.md b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/KeywordTableRAKERetriever.md
new file mode 100644
index 0000000000000000000000000000000000000000..e9d35c1efe3778092540881a5714e5dbf3f1f1af
--- /dev/null
+++ b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/KeywordTableRAKERetriever.md
@@ -0,0 +1,244 @@
+---
+id: "KeywordTableRAKERetriever"
+title: "Class: KeywordTableRAKERetriever"
+sidebar_label: "KeywordTableRAKERetriever"
+sidebar_position: 0
+custom_edit_url: null
+---
+
+## Hierarchy
+
+- `BaseKeywordTableRetriever`
+
+  ↳ **`KeywordTableRAKERetriever`**
+
+## Constructors
+
+### constructor
+
+• **new KeywordTableRAKERetriever**(`«destructured»`)
+
+#### Parameters
+
+| Name                             | Type                                        |
+| :------------------------------- | :------------------------------------------ |
+| `«destructured»`                 | `Object`                                    |
+| › `index`                        | [`KeywordTableIndex`](KeywordTableIndex.md) |
+| › `keywordExtractTemplate?`      | (`__namedParameters`: `Object`) => `string` |
+| › `maxKeywordsPerQuery`          | `number`                                    |
+| › `numChunksPerQuery`            | `number`                                    |
+| › `queryKeywordExtractTemplate?` | (`__namedParameters`: `Object`) => `string` |
+
+#### Inherited from
+
+BaseKeywordTableRetriever.constructor
+
+#### Defined in
+
+[packages/core/src/indices/keyword/KeywordTableIndexRetriever.ts:31](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/indices/keyword/KeywordTableIndexRetriever.ts#L31)
+
+## Properties
+
+### docstore
+
+• `Protected` **docstore**: [`BaseDocumentStore`](BaseDocumentStore.md)
+
+#### Inherited from
+
+BaseKeywordTableRetriever.docstore
+
+#### Defined in
+
+[packages/core/src/indices/keyword/KeywordTableIndexRetriever.ts:23](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/indices/keyword/KeywordTableIndexRetriever.ts#L23)
+
+---
+
+### index
+
+• `Protected` **index**: [`KeywordTableIndex`](KeywordTableIndex.md)
+
+#### Inherited from
+
+BaseKeywordTableRetriever.index
+
+#### Defined in
+
+[packages/core/src/indices/keyword/KeywordTableIndexRetriever.ts:21](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/indices/keyword/KeywordTableIndexRetriever.ts#L21)
+
+---
+
+### indexStruct
+
+• `Protected` **indexStruct**: [`KeywordTable`](KeywordTable.md)
+
+#### Inherited from
+
+BaseKeywordTableRetriever.indexStruct
+
+#### Defined in
+
+[packages/core/src/indices/keyword/KeywordTableIndexRetriever.ts:22](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/indices/keyword/KeywordTableIndexRetriever.ts#L22)
+
+---
+
+### keywordExtractTemplate
+
+• `Protected` **keywordExtractTemplate**: (`__namedParameters`: `Object`) => `string`
+
+#### Type declaration
+
+▸ (`«destructured»`): `string`
+
+##### Parameters
+
+| Name             | Type     |
+| :--------------- | :------- |
+| `«destructured»` | `Object` |
+
+##### Returns
+
+`string`
+
+#### Inherited from
+
+BaseKeywordTableRetriever.keywordExtractTemplate
+
+#### Defined in
+
+[packages/core/src/indices/keyword/KeywordTableIndexRetriever.ts:28](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/indices/keyword/KeywordTableIndexRetriever.ts#L28)
+
+---
+
+### maxKeywordsPerQuery
+
+• `Protected` **maxKeywordsPerQuery**: `number`
+
+#### Inherited from
+
+BaseKeywordTableRetriever.maxKeywordsPerQuery
+
+#### Defined in
+
+[packages/core/src/indices/keyword/KeywordTableIndexRetriever.ts:26](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/indices/keyword/KeywordTableIndexRetriever.ts#L26)
+
+---
+
+### numChunksPerQuery
+
+• `Protected` **numChunksPerQuery**: `number`
+
+#### Inherited from
+
+BaseKeywordTableRetriever.numChunksPerQuery
+
+#### Defined in
+
+[packages/core/src/indices/keyword/KeywordTableIndexRetriever.ts:27](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/indices/keyword/KeywordTableIndexRetriever.ts#L27)
+
+---
+
+### queryKeywordExtractTemplate
+
+• `Protected` **queryKeywordExtractTemplate**: (`__namedParameters`: `Object`) => `string`
+
+#### Type declaration
+
+▸ (`«destructured»`): `string`
+
+##### Parameters
+
+| Name             | Type     |
+| :--------------- | :------- |
+| `«destructured»` | `Object` |
+
+##### Returns
+
+`string`
+
+#### Inherited from
+
+BaseKeywordTableRetriever.queryKeywordExtractTemplate
+
+#### Defined in
+
+[packages/core/src/indices/keyword/KeywordTableIndexRetriever.ts:29](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/indices/keyword/KeywordTableIndexRetriever.ts#L29)
+
+---
+
+### serviceContext
+
+• `Protected` **serviceContext**: [`ServiceContext`](../interfaces/ServiceContext.md)
+
+#### Inherited from
+
+BaseKeywordTableRetriever.serviceContext
+
+#### Defined in
+
+[packages/core/src/indices/keyword/KeywordTableIndexRetriever.ts:24](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/indices/keyword/KeywordTableIndexRetriever.ts#L24)
+
+## Methods
+
+### getKeywords
+
+▸ **getKeywords**(`query`): `Promise`<`string`[]\>
+
+#### Parameters
+
+| Name    | Type     |
+| :------ | :------- |
+| `query` | `string` |
+
+#### Returns
+
+`Promise`<`string`[]\>
+
+#### Overrides
+
+BaseKeywordTableRetriever.getKeywords
+
+#### Defined in
+
+[packages/core/src/indices/keyword/KeywordTableIndexRetriever.ts:114](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/indices/keyword/KeywordTableIndexRetriever.ts#L114)
+
+---
+
+### getServiceContext
+
+▸ **getServiceContext**(): [`ServiceContext`](../interfaces/ServiceContext.md)
+
+#### Returns
+
+[`ServiceContext`](../interfaces/ServiceContext.md)
+
+#### Inherited from
+
+BaseKeywordTableRetriever.getServiceContext
+
+#### Defined in
+
+[packages/core/src/indices/keyword/KeywordTableIndexRetriever.ts:81](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/indices/keyword/KeywordTableIndexRetriever.ts#L81)
+
+---
+
+### retrieve
+
+▸ **retrieve**(`query`): `Promise`<[`NodeWithScore`](../interfaces/NodeWithScore.md)<[`Metadata`](../#metadata)\>[]\>
+
+#### Parameters
+
+| Name    | Type     |
+| :------ | :------- |
+| `query` | `string` |
+
+#### Returns
+
+`Promise`<[`NodeWithScore`](../interfaces/NodeWithScore.md)<[`Metadata`](../#metadata)\>[]\>
+
+#### Inherited from
+
+BaseKeywordTableRetriever.retrieve
+
+#### Defined in
+
+[packages/core/src/indices/keyword/KeywordTableIndexRetriever.ts:59](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/indices/keyword/KeywordTableIndexRetriever.ts#L59)
diff --git a/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/KeywordTableSimpleRetriever.md b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/KeywordTableSimpleRetriever.md
new file mode 100644
index 0000000000000000000000000000000000000000..332b2a33ec7cfb473b4378c29f4964c7b98a18af
--- /dev/null
+++ b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/KeywordTableSimpleRetriever.md
@@ -0,0 +1,244 @@
+---
+id: "KeywordTableSimpleRetriever"
+title: "Class: KeywordTableSimpleRetriever"
+sidebar_label: "KeywordTableSimpleRetriever"
+sidebar_position: 0
+custom_edit_url: null
+---
+
+## Hierarchy
+
+- `BaseKeywordTableRetriever`
+
+  ↳ **`KeywordTableSimpleRetriever`**
+
+## Constructors
+
+### constructor
+
+• **new KeywordTableSimpleRetriever**(`«destructured»`)
+
+#### Parameters
+
+| Name                             | Type                                        |
+| :------------------------------- | :------------------------------------------ |
+| `«destructured»`                 | `Object`                                    |
+| › `index`                        | [`KeywordTableIndex`](KeywordTableIndex.md) |
+| › `keywordExtractTemplate?`      | (`__namedParameters`: `Object`) => `string` |
+| › `maxKeywordsPerQuery`          | `number`                                    |
+| › `numChunksPerQuery`            | `number`                                    |
+| › `queryKeywordExtractTemplate?` | (`__namedParameters`: `Object`) => `string` |
+
+#### Inherited from
+
+BaseKeywordTableRetriever.constructor
+
+#### Defined in
+
+[packages/core/src/indices/keyword/KeywordTableIndexRetriever.ts:31](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/indices/keyword/KeywordTableIndexRetriever.ts#L31)
+
+## Properties
+
+### docstore
+
+• `Protected` **docstore**: [`BaseDocumentStore`](BaseDocumentStore.md)
+
+#### Inherited from
+
+BaseKeywordTableRetriever.docstore
+
+#### Defined in
+
+[packages/core/src/indices/keyword/KeywordTableIndexRetriever.ts:23](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/indices/keyword/KeywordTableIndexRetriever.ts#L23)
+
+---
+
+### index
+
+• `Protected` **index**: [`KeywordTableIndex`](KeywordTableIndex.md)
+
+#### Inherited from
+
+BaseKeywordTableRetriever.index
+
+#### Defined in
+
+[packages/core/src/indices/keyword/KeywordTableIndexRetriever.ts:21](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/indices/keyword/KeywordTableIndexRetriever.ts#L21)
+
+---
+
+### indexStruct
+
+• `Protected` **indexStruct**: [`KeywordTable`](KeywordTable.md)
+
+#### Inherited from
+
+BaseKeywordTableRetriever.indexStruct
+
+#### Defined in
+
+[packages/core/src/indices/keyword/KeywordTableIndexRetriever.ts:22](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/indices/keyword/KeywordTableIndexRetriever.ts#L22)
+
+---
+
+### keywordExtractTemplate
+
+• `Protected` **keywordExtractTemplate**: (`__namedParameters`: `Object`) => `string`
+
+#### Type declaration
+
+▸ (`«destructured»`): `string`
+
+##### Parameters
+
+| Name             | Type     |
+| :--------------- | :------- |
+| `«destructured»` | `Object` |
+
+##### Returns
+
+`string`
+
+#### Inherited from
+
+BaseKeywordTableRetriever.keywordExtractTemplate
+
+#### Defined in
+
+[packages/core/src/indices/keyword/KeywordTableIndexRetriever.ts:28](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/indices/keyword/KeywordTableIndexRetriever.ts#L28)
+
+---
+
+### maxKeywordsPerQuery
+
+• `Protected` **maxKeywordsPerQuery**: `number`
+
+#### Inherited from
+
+BaseKeywordTableRetriever.maxKeywordsPerQuery
+
+#### Defined in
+
+[packages/core/src/indices/keyword/KeywordTableIndexRetriever.ts:26](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/indices/keyword/KeywordTableIndexRetriever.ts#L26)
+
+---
+
+### numChunksPerQuery
+
+• `Protected` **numChunksPerQuery**: `number`
+
+#### Inherited from
+
+BaseKeywordTableRetriever.numChunksPerQuery
+
+#### Defined in
+
+[packages/core/src/indices/keyword/KeywordTableIndexRetriever.ts:27](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/indices/keyword/KeywordTableIndexRetriever.ts#L27)
+
+---
+
+### queryKeywordExtractTemplate
+
+• `Protected` **queryKeywordExtractTemplate**: (`__namedParameters`: `Object`) => `string`
+
+#### Type declaration
+
+▸ (`«destructured»`): `string`
+
+##### Parameters
+
+| Name             | Type     |
+| :--------------- | :------- |
+| `«destructured»` | `Object` |
+
+##### Returns
+
+`string`
+
+#### Inherited from
+
+BaseKeywordTableRetriever.queryKeywordExtractTemplate
+
+#### Defined in
+
+[packages/core/src/indices/keyword/KeywordTableIndexRetriever.ts:29](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/indices/keyword/KeywordTableIndexRetriever.ts#L29)
+
+---
+
+### serviceContext
+
+• `Protected` **serviceContext**: [`ServiceContext`](../interfaces/ServiceContext.md)
+
+#### Inherited from
+
+BaseKeywordTableRetriever.serviceContext
+
+#### Defined in
+
+[packages/core/src/indices/keyword/KeywordTableIndexRetriever.ts:24](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/indices/keyword/KeywordTableIndexRetriever.ts#L24)
+
+## Methods
+
+### getKeywords
+
+▸ **getKeywords**(`query`): `Promise`<`string`[]\>
+
+#### Parameters
+
+| Name    | Type     |
+| :------ | :------- |
+| `query` | `string` |
+
+#### Returns
+
+`Promise`<`string`[]\>
+
+#### Overrides
+
+BaseKeywordTableRetriever.getKeywords
+
+#### Defined in
+
+[packages/core/src/indices/keyword/KeywordTableIndexRetriever.ts:105](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/indices/keyword/KeywordTableIndexRetriever.ts#L105)
+
+---
+
+### getServiceContext
+
+▸ **getServiceContext**(): [`ServiceContext`](../interfaces/ServiceContext.md)
+
+#### Returns
+
+[`ServiceContext`](../interfaces/ServiceContext.md)
+
+#### Inherited from
+
+BaseKeywordTableRetriever.getServiceContext
+
+#### Defined in
+
+[packages/core/src/indices/keyword/KeywordTableIndexRetriever.ts:81](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/indices/keyword/KeywordTableIndexRetriever.ts#L81)
+
+---
+
+### retrieve
+
+▸ **retrieve**(`query`): `Promise`<[`NodeWithScore`](../interfaces/NodeWithScore.md)<[`Metadata`](../#metadata)\>[]\>
+
+#### Parameters
+
+| Name    | Type     |
+| :------ | :------- |
+| `query` | `string` |
+
+#### Returns
+
+`Promise`<[`NodeWithScore`](../interfaces/NodeWithScore.md)<[`Metadata`](../#metadata)\>[]\>
+
+#### Inherited from
+
+BaseKeywordTableRetriever.retrieve
+
+#### Defined in
+
+[packages/core/src/indices/keyword/KeywordTableIndexRetriever.ts:59](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/indices/keyword/KeywordTableIndexRetriever.ts#L59)
diff --git a/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/LLMQuestionGenerator.md b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/LLMQuestionGenerator.md
new file mode 100644
index 0000000000000000000000000000000000000000..5a6fff0e1bbde9d390e46c164e0694b09635182c
--- /dev/null
+++ b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/LLMQuestionGenerator.md
@@ -0,0 +1,98 @@
+---
+id: "LLMQuestionGenerator"
+title: "Class: LLMQuestionGenerator"
+sidebar_label: "LLMQuestionGenerator"
+sidebar_position: 0
+custom_edit_url: null
+---
+
+LLMQuestionGenerator uses the LLM to generate new questions for the LLM using tools and a user query.
+
+## Implements
+
+- [`BaseQuestionGenerator`](../interfaces/BaseQuestionGenerator.md)
+
+## Constructors
+
+### constructor
+
+• **new LLMQuestionGenerator**(`init?`)
+
+#### Parameters
+
+| Name    | Type                                                          |
+| :------ | :------------------------------------------------------------ |
+| `init?` | `Partial`<[`LLMQuestionGenerator`](LLMQuestionGenerator.md)\> |
+
+#### Defined in
+
+[packages/core/src/QuestionGenerator.ts:34](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/QuestionGenerator.ts#L34)
+
+## Properties
+
+### llm
+
+• **llm**: [`LLM`](../interfaces/LLM.md)
+
+#### Defined in
+
+[packages/core/src/QuestionGenerator.ts:30](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/QuestionGenerator.ts#L30)
+
+---
+
+### outputParser
+
+• **outputParser**: [`BaseOutputParser`](../interfaces/BaseOutputParser.md)<[`StructuredOutput`](../interfaces/StructuredOutput.md)<[`SubQuestion`](../interfaces/SubQuestion.md)[]\>\>
+
+#### Defined in
+
+[packages/core/src/QuestionGenerator.ts:32](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/QuestionGenerator.ts#L32)
+
+---
+
+### prompt
+
+• **prompt**: (`__namedParameters`: `Object`) => `string`
+
+#### Type declaration
+
+▸ (`«destructured»`): `string`
+
+##### Parameters
+
+| Name             | Type     |
+| :--------------- | :------- |
+| `«destructured»` | `Object` |
+
+##### Returns
+
+`string`
+
+#### Defined in
+
+[packages/core/src/QuestionGenerator.ts:31](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/QuestionGenerator.ts#L31)
+
+## Methods
+
+### generate
+
+▸ **generate**(`tools`, `query`): `Promise`<[`SubQuestion`](../interfaces/SubQuestion.md)[]\>
+
+#### Parameters
+
+| Name    | Type                                              |
+| :------ | :------------------------------------------------ |
+| `tools` | [`ToolMetadata`](../interfaces/ToolMetadata.md)[] |
+| `query` | `string`                                          |
+
+#### Returns
+
+`Promise`<[`SubQuestion`](../interfaces/SubQuestion.md)[]\>
+
+#### Implementation of
+
+[BaseQuestionGenerator](../interfaces/BaseQuestionGenerator.md).[generate](../interfaces/BaseQuestionGenerator.md#generate)
+
+#### Defined in
+
+[packages/core/src/QuestionGenerator.ts:40](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/QuestionGenerator.ts#L40)
diff --git a/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/LlamaDeuce.md b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/LlamaDeuce.md
new file mode 100644
index 0000000000000000000000000000000000000000..8b183d96f58caad9752691b13c440110678035b1
--- /dev/null
+++ b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/LlamaDeuce.md
@@ -0,0 +1,325 @@
+---
+id: "LlamaDeuce"
+title: "Class: LlamaDeuce"
+sidebar_label: "LlamaDeuce"
+sidebar_position: 0
+custom_edit_url: null
+---
+
+Llama2 LLM implementation
+
+## Implements
+
+- [`LLM`](../interfaces/LLM.md)
+
+## Constructors
+
+### constructor
+
+• **new LlamaDeuce**(`init?`)
+
+#### Parameters
+
+| Name    | Type                                      |
+| :------ | :---------------------------------------- |
+| `init?` | `Partial`<[`LlamaDeuce`](LlamaDeuce.md)\> |
+
+#### Defined in
+
+[packages/core/src/llm/LLM.ts:436](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/llm/LLM.ts#L436)
+
+## Properties
+
+### chatStrategy
+
+• **chatStrategy**: [`DeuceChatStrategy`](../enums/DeuceChatStrategy.md)
+
+#### Defined in
+
+[packages/core/src/llm/LLM.ts:429](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/llm/LLM.ts#L429)
+
+---
+
+### hasStreaming
+
+• **hasStreaming**: `boolean`
+
+#### Implementation of
+
+[LLM](../interfaces/LLM.md).[hasStreaming](../interfaces/LLM.md#hasstreaming)
+
+#### Defined in
+
+[packages/core/src/llm/LLM.ts:434](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/llm/LLM.ts#L434)
+
+---
+
+### maxTokens
+
+• `Optional` **maxTokens**: `number`
+
+#### Defined in
+
+[packages/core/src/llm/LLM.ts:432](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/llm/LLM.ts#L432)
+
+---
+
+### model
+
+• **model**: `"Llama-2-70b-chat-old"` \| `"Llama-2-70b-chat-4bit"` \| `"Llama-2-13b-chat-old"` \| `"Llama-2-13b-chat-4bit"` \| `"Llama-2-7b-chat-old"` \| `"Llama-2-7b-chat-4bit"`
+
+#### Defined in
+
+[packages/core/src/llm/LLM.ts:428](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/llm/LLM.ts#L428)
+
+---
+
+### replicateSession
+
+• **replicateSession**: `ReplicateSession`
+
+#### Defined in
+
+[packages/core/src/llm/LLM.ts:433](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/llm/LLM.ts#L433)
+
+---
+
+### temperature
+
+• **temperature**: `number`
+
+#### Defined in
+
+[packages/core/src/llm/LLM.ts:430](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/llm/LLM.ts#L430)
+
+---
+
+### topP
+
+• **topP**: `number`
+
+#### Defined in
+
+[packages/core/src/llm/LLM.ts:431](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/llm/LLM.ts#L431)
+
+## Accessors
+
+### metadata
+
+• `get` **metadata**(): `Object`
+
+#### Returns
+
+`Object`
+
+| Name            | Type                                                                                                                                                                  |
+| :-------------- | :-------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
+| `contextWindow` | `number`                                                                                                                                                              |
+| `maxTokens`     | `undefined` \| `number`                                                                                                                                               |
+| `model`         | `"Llama-2-70b-chat-old"` \| `"Llama-2-70b-chat-4bit"` \| `"Llama-2-13b-chat-old"` \| `"Llama-2-13b-chat-4bit"` \| `"Llama-2-7b-chat-old"` \| `"Llama-2-7b-chat-4bit"` |
+| `temperature`   | `number`                                                                                                                                                              |
+| `tokenizer`     | `undefined`                                                                                                                                                           |
+| `topP`          | `number`                                                                                                                                                              |
+
+#### Implementation of
+
+[LLM](../interfaces/LLM.md).[metadata](../interfaces/LLM.md#metadata)
+
+#### Defined in
+
+[packages/core/src/llm/LLM.ts:456](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/llm/LLM.ts#L456)
+
+## Methods
+
+### chat
+
+▸ **chat**<`T`, `R`\>(`messages`, `_parentEvent?`, `streaming?`): `Promise`<`R`\>
+
+Get a chat response from the LLM
+
+#### Type parameters
+
+| Name | Type                                                                                                                  |
+| :--- | :-------------------------------------------------------------------------------------------------------------------- |
+| `T`  | extends `undefined` \| `boolean` = `undefined`                                                                        |
+| `R`  | `T` extends `true` ? `AsyncGenerator`<`string`, `void`, `unknown`\> : [`ChatResponse`](../interfaces/ChatResponse.md) |
+
+#### Parameters
+
+| Name            | Type                                            | Description                                                                                      |
+| :-------------- | :---------------------------------------------- | :----------------------------------------------------------------------------------------------- |
+| `messages`      | [`ChatMessage`](../interfaces/ChatMessage.md)[] | The return type of chat() and complete() are set by the "streaming" parameter being set to True. |
+| `_parentEvent?` | [`Event`](../interfaces/Event.md)               | -                                                                                                |
+| `streaming?`    | `T`                                             | -                                                                                                |
+
+#### Returns
+
+`Promise`<`R`\>
+
+#### Implementation of
+
+[LLM](../interfaces/LLM.md).[chat](../interfaces/LLM.md#chat)
+
+#### Defined in
+
+[packages/core/src/llm/LLM.ts:594](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/llm/LLM.ts#L594)
+
+---
+
+### complete
+
+▸ **complete**<`T`, `R`\>(`prompt`, `parentEvent?`, `streaming?`): `Promise`<`R`\>
+
+Get a prompt completion from the LLM
+
+#### Type parameters
+
+| Name | Type                                                                                                                  |
+| :--- | :-------------------------------------------------------------------------------------------------------------------- |
+| `T`  | extends `undefined` \| `boolean` = `undefined`                                                                        |
+| `R`  | `T` extends `true` ? `AsyncGenerator`<`string`, `void`, `unknown`\> : [`ChatResponse`](../interfaces/ChatResponse.md) |
+
+#### Parameters
+
+| Name           | Type                              | Description            |
+| :------------- | :-------------------------------- | :--------------------- |
+| `prompt`       | `string`                          | the prompt to complete |
+| `parentEvent?` | [`Event`](../interfaces/Event.md) | -                      |
+| `streaming?`   | `T`                               | -                      |
+
+#### Returns
+
+`Promise`<`R`\>
+
+#### Implementation of
+
+[LLM](../interfaces/LLM.md).[complete](../interfaces/LLM.md#complete)
+
+#### Defined in
+
+[packages/core/src/llm/LLM.ts:634](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/llm/LLM.ts#L634)
+
+---
+
+### mapMessageTypeA16Z
+
+▸ **mapMessageTypeA16Z**(`messageType`): `string`
+
+#### Parameters
+
+| Name          | Type                             |
+| :------------ | :------------------------------- |
+| `messageType` | [`MessageType`](../#messagetype) |
+
+#### Returns
+
+`string`
+
+#### Defined in
+
+[packages/core/src/llm/LLM.ts:503](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/llm/LLM.ts#L503)
+
+---
+
+### mapMessagesToPrompt
+
+▸ **mapMessagesToPrompt**(`messages`): `Object`
+
+#### Parameters
+
+| Name       | Type                                            |
+| :--------- | :---------------------------------------------- |
+| `messages` | [`ChatMessage`](../interfaces/ChatMessage.md)[] |
+
+#### Returns
+
+`Object`
+
+| Name           | Type     |
+| :------------- | :------- |
+| `prompt`       | `string` |
+| `systemPrompt` | `any`    |
+
+#### Defined in
+
+[packages/core/src/llm/LLM.ts:467](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/llm/LLM.ts#L467)
+
+---
+
+### mapMessagesToPromptA16Z
+
+▸ **mapMessagesToPromptA16Z**(`messages`): `Object`
+
+#### Parameters
+
+| Name       | Type                                            |
+| :--------- | :---------------------------------------------- |
+| `messages` | [`ChatMessage`](../interfaces/ChatMessage.md)[] |
+
+#### Returns
+
+`Object`
+
+| Name           | Type        |
+| :------------- | :---------- |
+| `prompt`       | `string`    |
+| `systemPrompt` | `undefined` |
+
+#### Defined in
+
+[packages/core/src/llm/LLM.ts:489](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/llm/LLM.ts#L489)
+
+---
+
+### mapMessagesToPromptMeta
+
+▸ **mapMessagesToPromptMeta**(`messages`, `opts?`): `Object`
+
+#### Parameters
+
+| Name                  | Type                                            |
+| :-------------------- | :---------------------------------------------- |
+| `messages`            | [`ChatMessage`](../interfaces/ChatMessage.md)[] |
+| `opts?`               | `Object`                                        |
+| `opts.replicate4Bit?` | `boolean`                                       |
+| `opts.withBos?`       | `boolean`                                       |
+| `opts.withNewlines?`  | `boolean`                                       |
+
+#### Returns
+
+`Object`
+
+| Name           | Type     |
+| :------------- | :------- |
+| `prompt`       | `string` |
+| `systemPrompt` | `any`    |
+
+#### Defined in
+
+[packages/core/src/llm/LLM.ts:516](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/llm/LLM.ts#L516)
+
+---
+
+### tokens
+
+▸ **tokens**(`messages`): `number`
+
+Calculates the number of tokens needed for the given chat messages
+
+#### Parameters
+
+| Name       | Type                                            |
+| :--------- | :---------------------------------------------- |
+| `messages` | [`ChatMessage`](../interfaces/ChatMessage.md)[] |
+
+#### Returns
+
+`number`
+
+#### Implementation of
+
+[LLM](../interfaces/LLM.md).[tokens](../interfaces/LLM.md#tokens)
+
+#### Defined in
+
+[packages/core/src/llm/LLM.ts:452](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/llm/LLM.ts#L452)
diff --git a/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/MarkdownReader.md b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/MarkdownReader.md
new file mode 100644
index 0000000000000000000000000000000000000000..31fc4817dc58e6ea9851054e1d6247da469be033
--- /dev/null
+++ b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/MarkdownReader.md
@@ -0,0 +1,161 @@
+---
+id: "MarkdownReader"
+title: "Class: MarkdownReader"
+sidebar_label: "MarkdownReader"
+sidebar_position: 0
+custom_edit_url: null
+---
+
+Extract text from markdown files.
+Returns dictionary with keys as headers and values as the text between headers.
+
+## Implements
+
+- [`BaseReader`](../interfaces/BaseReader.md)
+
+## Constructors
+
+### constructor
+
+• **new MarkdownReader**(`removeHyperlinks?`, `removeImages?`)
+
+#### Parameters
+
+| Name                | Type      | Default value | Description                                     |
+| :------------------ | :-------- | :------------ | :---------------------------------------------- |
+| `removeHyperlinks?` | `boolean` | `true`        | Indicates whether hyperlinks should be removed. |
+| `removeImages?`     | `boolean` | `true`        | Indicates whether images should be removed.     |
+
+#### Defined in
+
+[packages/core/src/readers/MarkdownReader.ts:19](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/readers/MarkdownReader.ts#L19)
+
+## Properties
+
+### \_removeHyperlinks
+
+• `Private` **\_removeHyperlinks**: `boolean`
+
+#### Defined in
+
+[packages/core/src/readers/MarkdownReader.ts:12](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/readers/MarkdownReader.ts#L12)
+
+---
+
+### \_removeImages
+
+• `Private` **\_removeImages**: `boolean`
+
+#### Defined in
+
+[packages/core/src/readers/MarkdownReader.ts:13](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/readers/MarkdownReader.ts#L13)
+
+## Methods
+
+### loadData
+
+▸ **loadData**(`file`, `fs?`): `Promise`<[`Document`](Document.md)<[`Metadata`](../#metadata)\>[]\>
+
+#### Parameters
+
+| Name   | Type                                                      | Default value |
+| :----- | :-------------------------------------------------------- | :------------ |
+| `file` | `string`                                                  | `undefined`   |
+| `fs`   | [`GenericFileSystem`](../interfaces/GenericFileSystem.md) | `DEFAULT_FS`  |
+
+#### Returns
+
+`Promise`<[`Document`](Document.md)<[`Metadata`](../#metadata)\>[]\>
+
+#### Implementation of
+
+[BaseReader](../interfaces/BaseReader.md).[loadData](../interfaces/BaseReader.md#loaddata)
+
+#### Defined in
+
+[packages/core/src/readers/MarkdownReader.ts:90](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/readers/MarkdownReader.ts#L90)
+
+---
+
+### markdownToTups
+
+▸ **markdownToTups**(`markdownText`): `MarkdownTuple`[]
+
+Convert a markdown file to a dictionary.
+The keys are the headers and the values are the text under each header.
+
+#### Parameters
+
+| Name           | Type     | Description                   |
+| :------------- | :------- | :---------------------------- |
+| `markdownText` | `string` | The markdown text to convert. |
+
+#### Returns
+
+`MarkdownTuple`[]
+
+- An array of tuples, where each tuple contains a header (or null) and its corresponding text.
+
+#### Defined in
+
+[packages/core/src/readers/MarkdownReader.ts:30](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/readers/MarkdownReader.ts#L30)
+
+---
+
+### parseTups
+
+▸ **parseTups**(`content`): `MarkdownTuple`[]
+
+#### Parameters
+
+| Name      | Type     |
+| :-------- | :------- |
+| `content` | `string` |
+
+#### Returns
+
+`MarkdownTuple`[]
+
+#### Defined in
+
+[packages/core/src/readers/MarkdownReader.ts:79](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/readers/MarkdownReader.ts#L79)
+
+---
+
+### removeHyperlinks
+
+▸ **removeHyperlinks**(`content`): `string`
+
+#### Parameters
+
+| Name      | Type     |
+| :-------- | :------- |
+| `content` | `string` |
+
+#### Returns
+
+`string`
+
+#### Defined in
+
+[packages/core/src/readers/MarkdownReader.ts:74](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/readers/MarkdownReader.ts#L74)
+
+---
+
+### removeImages
+
+▸ **removeImages**(`content`): `string`
+
+#### Parameters
+
+| Name      | Type     |
+| :-------- | :------- |
+| `content` | `string` |
+
+#### Returns
+
+`string`
+
+#### Defined in
+
+[packages/core/src/readers/MarkdownReader.ts:69](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/readers/MarkdownReader.ts#L69)
diff --git a/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/MongoDBAtlasVectorSearch.md b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/MongoDBAtlasVectorSearch.md
new file mode 100644
index 0000000000000000000000000000000000000000..06dc037fad039010b9b3d5c91b8935fb6f83c9d0
--- /dev/null
+++ b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/MongoDBAtlasVectorSearch.md
@@ -0,0 +1,223 @@
+---
+id: "MongoDBAtlasVectorSearch"
+title: "Class: MongoDBAtlasVectorSearch"
+sidebar_label: "MongoDBAtlasVectorSearch"
+sidebar_position: 0
+custom_edit_url: null
+---
+
+## Implements
+
+- [`VectorStore`](../interfaces/VectorStore.md)
+
+## Constructors
+
+### constructor
+
+• **new MongoDBAtlasVectorSearch**(`init`)
+
+#### Parameters
+
+| Name   | Type                                                                                                                        |
+| :----- | :-------------------------------------------------------------------------------------------------------------------------- |
+| `init` | `Partial`<[`MongoDBAtlasVectorSearch`](MongoDBAtlasVectorSearch.md)\> & { `collectionName`: `string` ; `dbName`: `string` } |
+
+#### Defined in
+
+[packages/core/src/storage/vectorStore/MongoDBAtlasVectorStore.ts:36](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/storage/vectorStore/MongoDBAtlasVectorStore.ts#L36)
+
+## Properties
+
+### collection
+
+• `Private` **collection**: `Collection`<`Document`\>
+
+#### Defined in
+
+[packages/core/src/storage/vectorStore/MongoDBAtlasVectorStore.ts:34](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/storage/vectorStore/MongoDBAtlasVectorStore.ts#L34)
+
+---
+
+### embeddingKey
+
+• **embeddingKey**: `string`
+
+#### Defined in
+
+[packages/core/src/storage/vectorStore/MongoDBAtlasVectorStore.ts:29](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/storage/vectorStore/MongoDBAtlasVectorStore.ts#L29)
+
+---
+
+### flatMetadata
+
+• **flatMetadata**: `boolean` = `true`
+
+#### Defined in
+
+[packages/core/src/storage/vectorStore/MongoDBAtlasVectorStore.ts:25](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/storage/vectorStore/MongoDBAtlasVectorStore.ts#L25)
+
+---
+
+### idKey
+
+• **idKey**: `string`
+
+#### Defined in
+
+[packages/core/src/storage/vectorStore/MongoDBAtlasVectorStore.ts:30](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/storage/vectorStore/MongoDBAtlasVectorStore.ts#L30)
+
+---
+
+### indexName
+
+• **indexName**: `string`
+
+#### Defined in
+
+[packages/core/src/storage/vectorStore/MongoDBAtlasVectorStore.ts:28](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/storage/vectorStore/MongoDBAtlasVectorStore.ts#L28)
+
+---
+
+### insertOptions
+
+• `Optional` **insertOptions**: `BulkWriteOptions`
+
+#### Defined in
+
+[packages/core/src/storage/vectorStore/MongoDBAtlasVectorStore.ts:33](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/storage/vectorStore/MongoDBAtlasVectorStore.ts#L33)
+
+---
+
+### metadataKey
+
+• **metadataKey**: `string`
+
+#### Defined in
+
+[packages/core/src/storage/vectorStore/MongoDBAtlasVectorStore.ts:32](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/storage/vectorStore/MongoDBAtlasVectorStore.ts#L32)
+
+---
+
+### mongodbClient
+
+• **mongodbClient**: `MongoClient`
+
+#### Defined in
+
+[packages/core/src/storage/vectorStore/MongoDBAtlasVectorStore.ts:27](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/storage/vectorStore/MongoDBAtlasVectorStore.ts#L27)
+
+---
+
+### storesText
+
+• **storesText**: `boolean` = `true`
+
+#### Implementation of
+
+[VectorStore](../interfaces/VectorStore.md).[storesText](../interfaces/VectorStore.md#storestext)
+
+#### Defined in
+
+[packages/core/src/storage/vectorStore/MongoDBAtlasVectorStore.ts:24](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/storage/vectorStore/MongoDBAtlasVectorStore.ts#L24)
+
+---
+
+### textKey
+
+• **textKey**: `string`
+
+#### Defined in
+
+[packages/core/src/storage/vectorStore/MongoDBAtlasVectorStore.ts:31](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/storage/vectorStore/MongoDBAtlasVectorStore.ts#L31)
+
+## Accessors
+
+### client
+
+• `get` **client**(): `any`
+
+#### Returns
+
+`any`
+
+#### Implementation of
+
+VectorStore.client
+
+#### Defined in
+
+[packages/core/src/storage/vectorStore/MongoDBAtlasVectorStore.ts:103](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/storage/vectorStore/MongoDBAtlasVectorStore.ts#L103)
+
+## Methods
+
+### add
+
+▸ **add**(`nodes`): `Promise`<`string`[]\>
+
+#### Parameters
+
+| Name    | Type                                                     |
+| :------ | :------------------------------------------------------- |
+| `nodes` | [`BaseNode`](BaseNode.md)<[`Metadata`](../#metadata)\>[] |
+
+#### Returns
+
+`Promise`<`string`[]\>
+
+#### Implementation of
+
+[VectorStore](../interfaces/VectorStore.md).[add](../interfaces/VectorStore.md#add)
+
+#### Defined in
+
+[packages/core/src/storage/vectorStore/MongoDBAtlasVectorStore.ts:65](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/storage/vectorStore/MongoDBAtlasVectorStore.ts#L65)
+
+---
+
+### delete
+
+▸ **delete**(`refDocId`, `deleteOptions?`): `Promise`<`void`\>
+
+#### Parameters
+
+| Name             | Type     |
+| :--------------- | :------- |
+| `refDocId`       | `string` |
+| `deleteOptions?` | `any`    |
+
+#### Returns
+
+`Promise`<`void`\>
+
+#### Implementation of
+
+[VectorStore](../interfaces/VectorStore.md).[delete](../interfaces/VectorStore.md#delete)
+
+#### Defined in
+
+[packages/core/src/storage/vectorStore/MongoDBAtlasVectorStore.ts:94](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/storage/vectorStore/MongoDBAtlasVectorStore.ts#L94)
+
+---
+
+### query
+
+▸ **query**(`query`, `options?`): `Promise`<[`VectorStoreQueryResult`](../interfaces/VectorStoreQueryResult.md)\>
+
+#### Parameters
+
+| Name       | Type                                                    |
+| :--------- | :------------------------------------------------------ |
+| `query`    | [`VectorStoreQuery`](../interfaces/VectorStoreQuery.md) |
+| `options?` | `any`                                                   |
+
+#### Returns
+
+`Promise`<[`VectorStoreQueryResult`](../interfaces/VectorStoreQueryResult.md)\>
+
+#### Implementation of
+
+[VectorStore](../interfaces/VectorStore.md).[query](../interfaces/VectorStore.md#query)
+
+#### Defined in
+
+[packages/core/src/storage/vectorStore/MongoDBAtlasVectorStore.ts:107](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/storage/vectorStore/MongoDBAtlasVectorStore.ts#L107)
diff --git a/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/MultiModalEmbedding.md b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/MultiModalEmbedding.md
new file mode 100644
index 0000000000000000000000000000000000000000..97252f42f35c6cf98b1c9072fcf45e0084f5093e
--- /dev/null
+++ b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/MultiModalEmbedding.md
@@ -0,0 +1,139 @@
+---
+id: "MultiModalEmbedding"
+title: "Class: MultiModalEmbedding"
+sidebar_label: "MultiModalEmbedding"
+sidebar_position: 0
+custom_edit_url: null
+---
+
+## Hierarchy
+
+- [`BaseEmbedding`](BaseEmbedding.md)
+
+  ↳ **`MultiModalEmbedding`**
+
+  ↳↳ [`ClipEmbedding`](ClipEmbedding.md)
+
+## Constructors
+
+### constructor
+
+• **new MultiModalEmbedding**()
+
+#### Inherited from
+
+[BaseEmbedding](BaseEmbedding.md).[constructor](BaseEmbedding.md#constructor)
+
+## Methods
+
+### getImageEmbedding
+
+▸ `Abstract` **getImageEmbedding**(`images`): `Promise`<`number`[]\>
+
+#### Parameters
+
+| Name     | Type                         |
+| :------- | :--------------------------- |
+| `images` | [`ImageType`](../#imagetype) |
+
+#### Returns
+
+`Promise`<`number`[]\>
+
+#### Defined in
+
+[packages/core/src/embeddings/MultiModalEmbedding.ts:9](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/embeddings/MultiModalEmbedding.ts#L9)
+
+---
+
+### getImageEmbeddings
+
+▸ **getImageEmbeddings**(`images`): `Promise`<`number`[][]\>
+
+#### Parameters
+
+| Name     | Type                           |
+| :------- | :----------------------------- |
+| `images` | [`ImageType`](../#imagetype)[] |
+
+#### Returns
+
+`Promise`<`number`[][]\>
+
+#### Defined in
+
+[packages/core/src/embeddings/MultiModalEmbedding.ts:11](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/embeddings/MultiModalEmbedding.ts#L11)
+
+---
+
+### getQueryEmbedding
+
+▸ `Abstract` **getQueryEmbedding**(`query`): `Promise`<`number`[]\>
+
+#### Parameters
+
+| Name    | Type     |
+| :------ | :------- |
+| `query` | `string` |
+
+#### Returns
+
+`Promise`<`number`[]\>
+
+#### Inherited from
+
+[BaseEmbedding](BaseEmbedding.md).[getQueryEmbedding](BaseEmbedding.md#getqueryembedding)
+
+#### Defined in
+
+[packages/core/src/embeddings/types.ts:23](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/embeddings/types.ts#L23)
+
+---
+
+### getTextEmbedding
+
+▸ `Abstract` **getTextEmbedding**(`text`): `Promise`<`number`[]\>
+
+#### Parameters
+
+| Name   | Type     |
+| :----- | :------- |
+| `text` | `string` |
+
+#### Returns
+
+`Promise`<`number`[]\>
+
+#### Inherited from
+
+[BaseEmbedding](BaseEmbedding.md).[getTextEmbedding](BaseEmbedding.md#gettextembedding)
+
+#### Defined in
+
+[packages/core/src/embeddings/types.ts:22](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/embeddings/types.ts#L22)
+
+---
+
+### similarity
+
+▸ **similarity**(`embedding1`, `embedding2`, `mode?`): `number`
+
+#### Parameters
+
+| Name         | Type                                           | Default value            |
+| :----------- | :--------------------------------------------- | :----------------------- |
+| `embedding1` | `number`[]                                     | `undefined`              |
+| `embedding2` | `number`[]                                     | `undefined`              |
+| `mode`       | [`SimilarityType`](../enums/SimilarityType.md) | `SimilarityType.DEFAULT` |
+
+#### Returns
+
+`number`
+
+#### Inherited from
+
+[BaseEmbedding](BaseEmbedding.md).[similarity](BaseEmbedding.md#similarity)
+
+#### Defined in
+
+[packages/core/src/embeddings/types.ts:14](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/embeddings/types.ts#L14)
diff --git a/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/NotionReader.md b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/NotionReader.md
new file mode 100644
index 0000000000000000000000000000000000000000..71d0e9a925c0f3ea2ab42873ccf166a26e46634d
--- /dev/null
+++ b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/NotionReader.md
@@ -0,0 +1,135 @@
+---
+id: "NotionReader"
+title: "Class: NotionReader"
+sidebar_label: "NotionReader"
+sidebar_position: 0
+custom_edit_url: null
+---
+
+Notion pages are retrieved recursively and converted to Document objects.
+Notion Database can also be loaded, and [the serialization method can be customized](https://github.com/TomPenguin/notion-md-crawler/tree/main).
+
+[Note] To use this reader, must be created the Notion integration must be created in advance
+Please refer to [this document](https://www.notion.so/help/create-integrations-with-the-notion-api) for details.
+
+## Implements
+
+- [`BaseReader`](../interfaces/BaseReader.md)
+
+## Constructors
+
+### constructor
+
+• **new NotionReader**(`options`)
+
+Constructor for the NotionReader class
+
+#### Parameters
+
+| Name      | Type                  | Description                          |
+| :-------- | :-------------------- | :----------------------------------- |
+| `options` | `NotionReaderOptions` | Configuration options for the reader |
+
+#### Defined in
+
+[packages/core/src/readers/NotionReader.ts:33](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/readers/NotionReader.ts#L33)
+
+## Properties
+
+### crawl
+
+• `Private` **crawl**: (`rootPageId`: `string`) => `Promise`<`Pages`\>
+
+#### Type declaration
+
+▸ (`rootPageId`): `Promise`<`Pages`\>
+
+##### Parameters
+
+| Name         | Type     |
+| :----------- | :------- |
+| `rootPageId` | `string` |
+
+##### Returns
+
+`Promise`<`Pages`\>
+
+#### Defined in
+
+[packages/core/src/readers/NotionReader.ts:27](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/readers/NotionReader.ts#L27)
+
+## Methods
+
+### loadData
+
+▸ **loadData**(`rootPageId`): `Promise`<[`Document`](Document.md)<[`Metadata`](../#metadata)\>[]\>
+
+Loads recursively Notion pages and converts them to an array of Document objects
+
+#### Parameters
+
+| Name         | Type     | Description             |
+| :----------- | :------- | :---------------------- |
+| `rootPageId` | `string` | The root Notion page ID |
+
+#### Returns
+
+`Promise`<[`Document`](Document.md)<[`Metadata`](../#metadata)\>[]\>
+
+A Promise that resolves to an array of Document objects
+
+#### Implementation of
+
+[BaseReader](../interfaces/BaseReader.md).[loadData](../interfaces/BaseReader.md#loaddata)
+
+#### Defined in
+
+[packages/core/src/readers/NotionReader.ts:63](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/readers/NotionReader.ts#L63)
+
+---
+
+### loadPages
+
+▸ **loadPages**(`rootPageId`): `Promise`<`Pages`\>
+
+Loads recursively the Notion page with the specified root page ID.
+
+#### Parameters
+
+| Name         | Type     | Description             |
+| :----------- | :------- | :---------------------- |
+| `rootPageId` | `string` | The root Notion page ID |
+
+#### Returns
+
+`Promise`<`Pages`\>
+
+A Promise that resolves to a Pages object(Convertible with the `toDocuments` method)
+
+#### Defined in
+
+[packages/core/src/readers/NotionReader.ts:54](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/readers/NotionReader.ts#L54)
+
+---
+
+### toDocuments
+
+▸ **toDocuments**(`pages`): [`Document`](Document.md)<[`Metadata`](../#metadata)\>[]
+
+Converts Pages to an array of Document objects
+
+#### Parameters
+
+| Name    | Type    | Description                                               |
+| :------ | :------ | :-------------------------------------------------------- |
+| `pages` | `Pages` | The Notion pages to convert (Return value of `loadPages`) |
+
+#### Returns
+
+[`Document`](Document.md)<[`Metadata`](../#metadata)\>[]
+
+An array of Document objects
+
+#### Defined in
+
+[packages/core/src/readers/NotionReader.ts:42](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/readers/NotionReader.ts#L42)
diff --git a/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/OpenAI.md b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/OpenAI.md
new file mode 100644
index 0000000000000000000000000000000000000000..d395d9a167722de1bbf4a094111ed9911ffecbe9
--- /dev/null
+++ b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/OpenAI.md
@@ -0,0 +1,338 @@
+---
+id: "OpenAI"
+title: "Class: OpenAI"
+sidebar_label: "OpenAI"
+sidebar_position: 0
+custom_edit_url: null
+---
+
+OpenAI LLM implementation
+
+## Implements
+
+- [`LLM`](../interfaces/LLM.md)
+
+## Constructors
+
+### constructor
+
+• **new OpenAI**(`init?`)
+
+#### Parameters
+
+| Name    | Type                                                                  |
+| :------ | :-------------------------------------------------------------------- |
+| `init?` | `Partial`<[`OpenAI`](OpenAI.md)\> & { `azure?`: `AzureOpenAIConfig` } |
+
+#### Defined in
+
+[packages/core/src/llm/LLM.ts:152](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/llm/LLM.ts#L152)
+
+## Properties
+
+### additionalChatOptions
+
+• `Optional` **additionalChatOptions**: `Omit`<`Partial`<`ChatCompletionCreateParams`\>, `"model"` \| `"temperature"` \| `"max_tokens"` \| `"messages"` \| `"top_p"` \| `"streaming"`\>
+
+#### Defined in
+
+[packages/core/src/llm/LLM.ts:135](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/llm/LLM.ts#L135)
+
+---
+
+### additionalSessionOptions
+
+• `Optional` **additionalSessionOptions**: `Omit`<`Partial`<`ClientOptions`\>, `"apiKey"` \| `"timeout"` \| `"maxRetries"`\>
+
+#### Defined in
+
+[packages/core/src/llm/LLM.ts:145](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/llm/LLM.ts#L145)
+
+---
+
+### apiKey
+
+• `Optional` **apiKey**: `string` = `undefined`
+
+#### Defined in
+
+[packages/core/src/llm/LLM.ts:141](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/llm/LLM.ts#L141)
+
+---
+
+### callbackManager
+
+• `Optional` **callbackManager**: [`CallbackManager`](CallbackManager.md)
+
+#### Defined in
+
+[packages/core/src/llm/LLM.ts:150](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/llm/LLM.ts#L150)
+
+---
+
+### hasStreaming
+
+• **hasStreaming**: `boolean` = `true`
+
+#### Implementation of
+
+[LLM](../interfaces/LLM.md).[hasStreaming](../interfaces/LLM.md#hasstreaming)
+
+#### Defined in
+
+[packages/core/src/llm/LLM.ts:128](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/llm/LLM.ts#L128)
+
+---
+
+### maxRetries
+
+• **maxRetries**: `number`
+
+#### Defined in
+
+[packages/core/src/llm/LLM.ts:142](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/llm/LLM.ts#L142)
+
+---
+
+### maxTokens
+
+• `Optional` **maxTokens**: `number`
+
+#### Defined in
+
+[packages/core/src/llm/LLM.ts:134](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/llm/LLM.ts#L134)
+
+---
+
+### model
+
+• **model**: `"gpt-3.5-turbo"` \| `"gpt-3.5-turbo-1106"` \| `"gpt-3.5-turbo-16k"` \| `"gpt-4"` \| `"gpt-4-32k"` \| `"gpt-4-1106-preview"` \| `"gpt-4-vision-preview"`
+
+#### Defined in
+
+[packages/core/src/llm/LLM.ts:131](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/llm/LLM.ts#L131)
+
+---
+
+### session
+
+• **session**: `OpenAISession`
+
+#### Defined in
+
+[packages/core/src/llm/LLM.ts:144](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/llm/LLM.ts#L144)
+
+---
+
+### temperature
+
+• **temperature**: `number`
+
+#### Defined in
+
+[packages/core/src/llm/LLM.ts:132](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/llm/LLM.ts#L132)
+
+---
+
+### timeout
+
+• `Optional` **timeout**: `number`
+
+#### Defined in
+
+[packages/core/src/llm/LLM.ts:143](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/llm/LLM.ts#L143)
+
+---
+
+### topP
+
+• **topP**: `number`
+
+#### Defined in
+
+[packages/core/src/llm/LLM.ts:133](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/llm/LLM.ts#L133)
+
+## Accessors
+
+### metadata
+
+• `get` **metadata**(): `Object`
+
+#### Returns
+
+`Object`
+
+| Name            | Type                                                                                                                                                     |
+| :-------------- | :------------------------------------------------------------------------------------------------------------------------------------------------------- |
+| `contextWindow` | `number`                                                                                                                                                 |
+| `maxTokens`     | `undefined` \| `number`                                                                                                                                  |
+| `model`         | `"gpt-3.5-turbo"` \| `"gpt-3.5-turbo-1106"` \| `"gpt-3.5-turbo-16k"` \| `"gpt-4"` \| `"gpt-4-32k"` \| `"gpt-4-1106-preview"` \| `"gpt-4-vision-preview"` |
+| `temperature`   | `number`                                                                                                                                                 |
+| `tokenizer`     | [`CL100K_BASE`](../enums/Tokenizers.md#cl100k_base)                                                                                                      |
+| `topP`          | `number`                                                                                                                                                 |
+
+#### Implementation of
+
+[LLM](../interfaces/LLM.md).[metadata](../interfaces/LLM.md#metadata)
+
+#### Defined in
+
+[packages/core/src/llm/LLM.ts:206](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/llm/LLM.ts#L206)
+
+## Methods
+
+### chat
+
+▸ **chat**<`T`, `R`\>(`messages`, `parentEvent?`, `streaming?`): `Promise`<`R`\>
+
+Get a chat response from the LLM
+
+#### Type parameters
+
+| Name | Type                                                                                                                  |
+| :--- | :-------------------------------------------------------------------------------------------------------------------- |
+| `T`  | extends `undefined` \| `boolean` = `undefined`                                                                        |
+| `R`  | `T` extends `true` ? `AsyncGenerator`<`string`, `void`, `unknown`\> : [`ChatResponse`](../interfaces/ChatResponse.md) |
+
+#### Parameters
+
+| Name           | Type                                            | Description                                                                                      |
+| :------------- | :---------------------------------------------- | :----------------------------------------------------------------------------------------------- |
+| `messages`     | [`ChatMessage`](../interfaces/ChatMessage.md)[] | The return type of chat() and complete() are set by the "streaming" parameter being set to True. |
+| `parentEvent?` | [`Event`](../interfaces/Event.md)               | -                                                                                                |
+| `streaming?`   | `T`                                             | -                                                                                                |
+
+#### Returns
+
+`Promise`<`R`\>
+
+#### Implementation of
+
+[LLM](../interfaces/LLM.md).[chat](../interfaces/LLM.md#chat)
+
+#### Defined in
+
+[packages/core/src/llm/LLM.ts:249](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/llm/LLM.ts#L249)
+
+---
+
+### complete
+
+▸ **complete**<`T`, `R`\>(`prompt`, `parentEvent?`, `streaming?`): `Promise`<`R`\>
+
+Get a prompt completion from the LLM
+
+#### Type parameters
+
+| Name | Type                                                                                                                  |
+| :--- | :-------------------------------------------------------------------------------------------------------------------- |
+| `T`  | extends `undefined` \| `boolean` = `undefined`                                                                        |
+| `R`  | `T` extends `true` ? `AsyncGenerator`<`string`, `void`, `unknown`\> : [`ChatResponse`](../interfaces/ChatResponse.md) |
+
+#### Parameters
+
+| Name           | Type                              | Description            |
+| :------------- | :-------------------------------- | :--------------------- |
+| `prompt`       | `string`                          | the prompt to complete |
+| `parentEvent?` | [`Event`](../interfaces/Event.md) | -                      |
+| `streaming?`   | `T`                               | -                      |
+
+#### Returns
+
+`Promise`<`R`\>
+
+#### Implementation of
+
+[LLM](../interfaces/LLM.md).[complete](../interfaces/LLM.md#complete)
+
+#### Defined in
+
+[packages/core/src/llm/LLM.ts:286](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/llm/LLM.ts#L286)
+
+---
+
+### mapMessageType
+
+▸ **mapMessageType**(`messageType`): `"function"` \| `"user"` \| `"assistant"` \| `"system"`
+
+#### Parameters
+
+| Name          | Type                             |
+| :------------ | :------------------------------- |
+| `messageType` | [`MessageType`](../#messagetype) |
+
+#### Returns
+
+`"function"` \| `"user"` \| `"assistant"` \| `"system"`
+
+#### Defined in
+
+[packages/core/src/llm/LLM.ts:232](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/llm/LLM.ts#L232)
+
+---
+
+### streamChat
+
+▸ `Protected` **streamChat**(`messages`, `parentEvent?`): `AsyncGenerator`<`string`, `void`, `unknown`\>
+
+#### Parameters
+
+| Name           | Type                                            |
+| :------------- | :---------------------------------------------- |
+| `messages`     | [`ChatMessage`](../interfaces/ChatMessage.md)[] |
+| `parentEvent?` | [`Event`](../interfaces/Event.md)               |
+
+#### Returns
+
+`AsyncGenerator`<`string`, `void`, `unknown`\>
+
+#### Defined in
+
+[packages/core/src/llm/LLM.ts:300](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/llm/LLM.ts#L300)
+
+---
+
+### streamComplete
+
+▸ `Protected` **streamComplete**(`query`, `parentEvent?`): `AsyncGenerator`<`string`, `void`, `unknown`\>
+
+#### Parameters
+
+| Name           | Type                              |
+| :------------- | :-------------------------------- |
+| `query`        | `string`                          |
+| `parentEvent?` | [`Event`](../interfaces/Event.md) |
+
+#### Returns
+
+`AsyncGenerator`<`string`, `void`, `unknown`\>
+
+#### Defined in
+
+[packages/core/src/llm/LLM.ts:364](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/llm/LLM.ts#L364)
+
+---
+
+### tokens
+
+▸ **tokens**(`messages`): `number`
+
+Calculates the number of tokens needed for the given chat messages
+
+#### Parameters
+
+| Name       | Type                                            |
+| :--------- | :---------------------------------------------- |
+| `messages` | [`ChatMessage`](../interfaces/ChatMessage.md)[] |
+
+#### Returns
+
+`number`
+
+#### Implementation of
+
+[LLM](../interfaces/LLM.md).[tokens](../interfaces/LLM.md#tokens)
+
+#### Defined in
+
+[packages/core/src/llm/LLM.ts:217](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/llm/LLM.ts#L217)
diff --git a/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/OpenAIEmbedding.md b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/OpenAIEmbedding.md
new file mode 100644
index 0000000000000000000000000000000000000000..c8abf47610e30c000639f96fc32f202fce6a9e2d
--- /dev/null
+++ b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/OpenAIEmbedding.md
@@ -0,0 +1,187 @@
+---
+id: "OpenAIEmbedding"
+title: "Class: OpenAIEmbedding"
+sidebar_label: "OpenAIEmbedding"
+sidebar_position: 0
+custom_edit_url: null
+---
+
+## Hierarchy
+
+- [`BaseEmbedding`](BaseEmbedding.md)
+
+  ↳ **`OpenAIEmbedding`**
+
+## Constructors
+
+### constructor
+
+• **new OpenAIEmbedding**(`init?`)
+
+#### Parameters
+
+| Name    | Type                                                                                    |
+| :------ | :-------------------------------------------------------------------------------------- |
+| `init?` | `Partial`<[`OpenAIEmbedding`](OpenAIEmbedding.md)\> & { `azure?`: `AzureOpenAIConfig` } |
+
+#### Overrides
+
+[BaseEmbedding](BaseEmbedding.md).[constructor](BaseEmbedding.md#constructor)
+
+#### Defined in
+
+[packages/core/src/embeddings/OpenAIEmbedding.ts:30](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/embeddings/OpenAIEmbedding.ts#L30)
+
+## Properties
+
+### additionalSessionOptions
+
+• `Optional` **additionalSessionOptions**: `Omit`<`Partial`<`ClientOptions`\>, `"apiKey"` \| `"timeout"` \| `"maxRetries"`\>
+
+#### Defined in
+
+[packages/core/src/embeddings/OpenAIEmbedding.ts:23](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/embeddings/OpenAIEmbedding.ts#L23)
+
+---
+
+### apiKey
+
+• `Optional` **apiKey**: `string` = `undefined`
+
+#### Defined in
+
+[packages/core/src/embeddings/OpenAIEmbedding.ts:20](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/embeddings/OpenAIEmbedding.ts#L20)
+
+---
+
+### maxRetries
+
+• **maxRetries**: `number`
+
+#### Defined in
+
+[packages/core/src/embeddings/OpenAIEmbedding.ts:21](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/embeddings/OpenAIEmbedding.ts#L21)
+
+---
+
+### model
+
+• **model**: [`TEXT_EMBED_ADA_002`](../enums/OpenAIEmbeddingModelType.md#text_embed_ada_002)
+
+#### Defined in
+
+[packages/core/src/embeddings/OpenAIEmbedding.ts:17](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/embeddings/OpenAIEmbedding.ts#L17)
+
+---
+
+### session
+
+• **session**: `OpenAISession`
+
+#### Defined in
+
+[packages/core/src/embeddings/OpenAIEmbedding.ts:28](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/embeddings/OpenAIEmbedding.ts#L28)
+
+---
+
+### timeout
+
+• `Optional` **timeout**: `number`
+
+#### Defined in
+
+[packages/core/src/embeddings/OpenAIEmbedding.ts:22](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/embeddings/OpenAIEmbedding.ts#L22)
+
+## Methods
+
+### getOpenAIEmbedding
+
+▸ `Private` **getOpenAIEmbedding**(`input`): `Promise`<`number`[]\>
+
+#### Parameters
+
+| Name    | Type     |
+| :------ | :------- |
+| `input` | `string` |
+
+#### Returns
+
+`Promise`<`number`[]\>
+
+#### Defined in
+
+[packages/core/src/embeddings/OpenAIEmbedding.ts:76](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/embeddings/OpenAIEmbedding.ts#L76)
+
+---
+
+### getQueryEmbedding
+
+▸ **getQueryEmbedding**(`query`): `Promise`<`number`[]\>
+
+#### Parameters
+
+| Name    | Type     |
+| :------ | :------- |
+| `query` | `string` |
+
+#### Returns
+
+`Promise`<`number`[]\>
+
+#### Overrides
+
+[BaseEmbedding](BaseEmbedding.md).[getQueryEmbedding](BaseEmbedding.md#getqueryembedding)
+
+#### Defined in
+
+[packages/core/src/embeddings/OpenAIEmbedding.ts:89](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/embeddings/OpenAIEmbedding.ts#L89)
+
+---
+
+### getTextEmbedding
+
+▸ **getTextEmbedding**(`text`): `Promise`<`number`[]\>
+
+#### Parameters
+
+| Name   | Type     |
+| :----- | :------- |
+| `text` | `string` |
+
+#### Returns
+
+`Promise`<`number`[]\>
+
+#### Overrides
+
+[BaseEmbedding](BaseEmbedding.md).[getTextEmbedding](BaseEmbedding.md#gettextembedding)
+
+#### Defined in
+
+[packages/core/src/embeddings/OpenAIEmbedding.ts:85](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/embeddings/OpenAIEmbedding.ts#L85)
+
+---
+
+### similarity
+
+▸ **similarity**(`embedding1`, `embedding2`, `mode?`): `number`
+
+#### Parameters
+
+| Name         | Type                                           | Default value            |
+| :----------- | :--------------------------------------------- | :----------------------- |
+| `embedding1` | `number`[]                                     | `undefined`              |
+| `embedding2` | `number`[]                                     | `undefined`              |
+| `mode`       | [`SimilarityType`](../enums/SimilarityType.md) | `SimilarityType.DEFAULT` |
+
+#### Returns
+
+`number`
+
+#### Inherited from
+
+[BaseEmbedding](BaseEmbedding.md).[similarity](BaseEmbedding.md#similarity)
+
+#### Defined in
+
+[packages/core/src/embeddings/types.ts:14](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/embeddings/types.ts#L14)
diff --git a/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/PDFReader.md b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/PDFReader.md
new file mode 100644
index 0000000000000000000000000000000000000000..d28006ba33cb5f84f4c460e6ba51b7c5b6fd3a38
--- /dev/null
+++ b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/PDFReader.md
@@ -0,0 +1,44 @@
+---
+id: "PDFReader"
+title: "Class: PDFReader"
+sidebar_label: "PDFReader"
+sidebar_position: 0
+custom_edit_url: null
+---
+
+Read the text of a PDF
+
+## Implements
+
+- [`BaseReader`](../interfaces/BaseReader.md)
+
+## Constructors
+
+### constructor
+
+• **new PDFReader**()
+
+## Methods
+
+### loadData
+
+▸ **loadData**(`file`, `fs?`): `Promise`<[`Document`](Document.md)<[`Metadata`](../#metadata)\>[]\>
+
+#### Parameters
+
+| Name   | Type                                                      | Default value |
+| :----- | :-------------------------------------------------------- | :------------ |
+| `file` | `string`                                                  | `undefined`   |
+| `fs`   | [`GenericFileSystem`](../interfaces/GenericFileSystem.md) | `DEFAULT_FS`  |
+
+#### Returns
+
+`Promise`<[`Document`](Document.md)<[`Metadata`](../#metadata)\>[]\>
+
+#### Implementation of
+
+[BaseReader](../interfaces/BaseReader.md).[loadData](../interfaces/BaseReader.md#loaddata)
+
+#### Defined in
+
+[packages/core/src/readers/PDFReader.ts:11](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/readers/PDFReader.ts#L11)
diff --git a/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/PGVectorStore.md b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/PGVectorStore.md
new file mode 100644
index 0000000000000000000000000000000000000000..fb1fc5a6743db1d4b1f2ea513be7a77ad7e9e662
--- /dev/null
+++ b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/PGVectorStore.md
@@ -0,0 +1,293 @@
+---
+id: "PGVectorStore"
+title: "Class: PGVectorStore"
+sidebar_label: "PGVectorStore"
+sidebar_position: 0
+custom_edit_url: null
+---
+
+Provides support for writing and querying vector data in Postgres.
+
+## Implements
+
+- [`VectorStore`](../interfaces/VectorStore.md)
+
+## Constructors
+
+### constructor
+
+• **new PGVectorStore**()
+
+#### Defined in
+
+[packages/core/src/storage/vectorStore/PGVectorStore.ts:40](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/storage/vectorStore/PGVectorStore.ts#L40)
+
+## Properties
+
+### collection
+
+• `Private` **collection**: `string` = `""`
+
+#### Defined in
+
+[packages/core/src/storage/vectorStore/PGVectorStore.ts:18](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/storage/vectorStore/PGVectorStore.ts#L18)
+
+---
+
+### db
+
+• `Optional` **db**: `Client`
+
+#### Defined in
+
+[packages/core/src/storage/vectorStore/PGVectorStore.ts:38](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/storage/vectorStore/PGVectorStore.ts#L38)
+
+---
+
+### storesText
+
+• **storesText**: `boolean` = `true`
+
+#### Implementation of
+
+[VectorStore](../interfaces/VectorStore.md).[storesText](../interfaces/VectorStore.md#storestext)
+
+#### Defined in
+
+[packages/core/src/storage/vectorStore/PGVectorStore.ts:16](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/storage/vectorStore/PGVectorStore.ts#L16)
+
+## Methods
+
+### add
+
+▸ **add**(`embeddingResults`): `Promise`<`string`[]\>
+
+Adds vector record(s) to the table.
+NOTE: Uses the collection property controlled by setCollection/getCollection.
+
+#### Parameters
+
+| Name               | Type                                                     | Description                                                     |
+| :----------------- | :------------------------------------------------------- | :-------------------------------------------------------------- |
+| `embeddingResults` | [`BaseNode`](BaseNode.md)<[`Metadata`](../#metadata)\>[] | The Nodes to be inserted, optionally including metadata tuples. |
+
+#### Returns
+
+`Promise`<`string`[]\>
+
+A list of zero or more id values for the created records.
+
+#### Implementation of
+
+[VectorStore](../interfaces/VectorStore.md).[add](../interfaces/VectorStore.md#add)
+
+#### Defined in
+
+[packages/core/src/storage/vectorStore/PGVectorStore.ts:144](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/storage/vectorStore/PGVectorStore.ts#L144)
+
+---
+
+### checkSchema
+
+▸ `Private` **checkSchema**(`db`): `Promise`<`Client`\>
+
+#### Parameters
+
+| Name | Type     |
+| :--- | :------- |
+| `db` | `Client` |
+
+#### Returns
+
+`Promise`<`Client`\>
+
+#### Defined in
+
+[packages/core/src/storage/vectorStore/PGVectorStore.ts:90](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/storage/vectorStore/PGVectorStore.ts#L90)
+
+---
+
+### clearCollection
+
+▸ **clearCollection**(): `Promise`<`QueryResult`<`any`\>\>
+
+Delete all vector records for the specified collection.
+NOTE: Uses the collection property controlled by setCollection/getCollection.
+
+#### Returns
+
+`Promise`<`QueryResult`<`any`\>\>
+
+The result of the delete query.
+
+#### Defined in
+
+[packages/core/src/storage/vectorStore/PGVectorStore.ts:128](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/storage/vectorStore/PGVectorStore.ts#L128)
+
+---
+
+### client
+
+▸ **client**(): `Promise`<`Client`\>
+
+Connects to the database specified in environment vars.
+This method also checks and creates the vector extension,
+the destination table and indexes if not found.
+
+#### Returns
+
+`Promise`<`Client`\>
+
+A connection to the database, or the error encountered while connecting/setting up.
+
+#### Implementation of
+
+[VectorStore](../interfaces/VectorStore.md).[client](../interfaces/VectorStore.md#client)
+
+#### Defined in
+
+[packages/core/src/storage/vectorStore/PGVectorStore.ts:119](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/storage/vectorStore/PGVectorStore.ts#L119)
+
+---
+
+### delete
+
+▸ **delete**(`refDocId`, `deleteKwargs?`): `Promise`<`void`\>
+
+Deletes a single record from the database by id.
+NOTE: Uses the collection property controlled by setCollection/getCollection.
+
+#### Parameters
+
+| Name            | Type     | Description                                           |
+| :-------------- | :------- | :---------------------------------------------------- |
+| `refDocId`      | `string` | Unique identifier for the record to delete.           |
+| `deleteKwargs?` | `any`    | Required by VectorStore interface. Currently ignored. |
+
+#### Returns
+
+`Promise`<`void`\>
+
+Promise that resolves if the delete query did not throw an error.
+
+#### Implementation of
+
+[VectorStore](../interfaces/VectorStore.md).[delete](../interfaces/VectorStore.md#delete)
+
+#### Defined in
+
+[packages/core/src/storage/vectorStore/PGVectorStore.ts:196](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/storage/vectorStore/PGVectorStore.ts#L196)
+
+---
+
+### getCollection
+
+▸ **getCollection**(): `string`
+
+Getter for the collection property.
+Using a collection allows for simple segregation of vector data,
+e.g. by user, source, or access-level.
+Leave/set blank to ignore the collection value when querying.
+
+#### Returns
+
+`string`
+
+The currently-set collection value. Default is empty string.
+
+#### Defined in
+
+[packages/core/src/storage/vectorStore/PGVectorStore.ts:60](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/storage/vectorStore/PGVectorStore.ts#L60)
+
+---
+
+### getDb
+
+▸ `Private` **getDb**(): `Promise`<`Client`\>
+
+#### Returns
+
+`Promise`<`Client`\>
+
+#### Defined in
+
+[packages/core/src/storage/vectorStore/PGVectorStore.ts:64](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/storage/vectorStore/PGVectorStore.ts#L64)
+
+---
+
+### persist
+
+▸ **persist**(`persistPath`, `fs?`): `Promise`<`void`\>
+
+Required by VectorStore interface. Currently ignored.
+
+#### Parameters
+
+| Name          | Type                                                      |
+| :------------ | :-------------------------------------------------------- |
+| `persistPath` | `string`                                                  |
+| `fs?`         | [`GenericFileSystem`](../interfaces/GenericFileSystem.md) |
+
+#### Returns
+
+`Promise`<`void`\>
+
+Resolved Promise.
+
+#### Defined in
+
+[packages/core/src/storage/vectorStore/PGVectorStore.ts:269](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/storage/vectorStore/PGVectorStore.ts#L269)
+
+---
+
+### query
+
+▸ **query**(`query`, `options?`): `Promise`<[`VectorStoreQueryResult`](../interfaces/VectorStoreQueryResult.md)\>
+
+Query the vector store for the closest matching data to the query embeddings
+
+#### Parameters
+
+| Name       | Type                                                    | Description                                           |
+| :--------- | :------------------------------------------------------ | :---------------------------------------------------- |
+| `query`    | [`VectorStoreQuery`](../interfaces/VectorStoreQuery.md) | The VectorStoreQuery to be used                       |
+| `options?` | `any`                                                   | Required by VectorStore interface. Currently ignored. |
+
+#### Returns
+
+`Promise`<[`VectorStoreQueryResult`](../interfaces/VectorStoreQueryResult.md)\>
+
+Zero or more Document instances with data from the vector store.
+
+#### Implementation of
+
+[VectorStore](../interfaces/VectorStore.md).[query](../interfaces/VectorStore.md#query)
+
+#### Defined in
+
+[packages/core/src/storage/vectorStore/PGVectorStore.ts:217](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/storage/vectorStore/PGVectorStore.ts#L217)
+
+---
+
+### setCollection
+
+▸ **setCollection**(`coll`): `void`
+
+Setter for the collection property.
+Using a collection allows for simple segregation of vector data,
+e.g. by user, source, or access-level.
+Leave/set blank to ignore the collection value when querying.
+
+#### Parameters
+
+| Name   | Type     | Description              |
+| :----- | :------- | :----------------------- |
+| `coll` | `string` | Name for the collection. |
+
+#### Returns
+
+`void`
+
+#### Defined in
+
+[packages/core/src/storage/vectorStore/PGVectorStore.ts:49](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/storage/vectorStore/PGVectorStore.ts#L49)
diff --git a/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/PapaCSVReader.md b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/PapaCSVReader.md
new file mode 100644
index 0000000000000000000000000000000000000000..4ae917281edbc5dbc288e2c615fe522540a13f24
--- /dev/null
+++ b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/PapaCSVReader.md
@@ -0,0 +1,105 @@
+---
+id: "PapaCSVReader"
+title: "Class: PapaCSVReader"
+sidebar_label: "PapaCSVReader"
+sidebar_position: 0
+custom_edit_url: null
+---
+
+papaparse-based csv parser
+
+**`Implements`**
+
+BaseReader
+
+## Implements
+
+- [`BaseReader`](../interfaces/BaseReader.md)
+
+## Constructors
+
+### constructor
+
+• **new PapaCSVReader**(`concatRows?`, `colJoiner?`, `rowJoiner?`, `papaConfig?`)
+
+Constructs a new instance of the class.
+
+#### Parameters
+
+| Name          | Type                               | Default value | Description                                                                                                                 |
+| :------------ | :--------------------------------- | :------------ | :-------------------------------------------------------------------------------------------------------------------------- |
+| `concatRows?` | `boolean`                          | `true`        | whether to concatenate all rows into one document.If set to False, a Document will be created for each row.True by default. |
+| `colJoiner?`  | `string`                           | `", "`        | -                                                                                                                           |
+| `rowJoiner?`  | `string`                           | `"\n"`        | Separator to use for joining each row.Only used when `concat_rows=True`.Set to "\n" by default.                             |
+| `papaConfig?` | `ParseConfig`<`any`, `undefined`\> | `undefined`   | -                                                                                                                           |
+
+#### Defined in
+
+[packages/core/src/readers/CSVReader.ts:23](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/readers/CSVReader.ts#L23)
+
+## Properties
+
+### colJoiner
+
+• `Private` **colJoiner**: `string`
+
+#### Defined in
+
+[packages/core/src/readers/CSVReader.ts:13](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/readers/CSVReader.ts#L13)
+
+---
+
+### concatRows
+
+• `Private` **concatRows**: `boolean`
+
+#### Defined in
+
+[packages/core/src/readers/CSVReader.ts:12](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/readers/CSVReader.ts#L12)
+
+---
+
+### papaConfig
+
+• `Private` `Optional` **papaConfig**: `ParseConfig`<`any`, `undefined`\>
+
+#### Defined in
+
+[packages/core/src/readers/CSVReader.ts:15](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/readers/CSVReader.ts#L15)
+
+---
+
+### rowJoiner
+
+• `Private` **rowJoiner**: `string`
+
+#### Defined in
+
+[packages/core/src/readers/CSVReader.ts:14](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/readers/CSVReader.ts#L14)
+
+## Methods
+
+### loadData
+
+▸ **loadData**(`file`, `fs?`): `Promise`<[`Document`](Document.md)<[`Metadata`](../#metadata)\>[]\>
+
+Loads data from csv files
+
+#### Parameters
+
+| Name   | Type                                                      | Default value | Description                                  |
+| :----- | :-------------------------------------------------------- | :------------ | :------------------------------------------- |
+| `file` | `string`                                                  | `undefined`   | The path to the file to load.                |
+| `fs?`  | [`GenericFileSystem`](../interfaces/GenericFileSystem.md) | `DEFAULT_FS`  | The file system to use for reading the file. |
+
+#### Returns
+
+`Promise`<[`Document`](Document.md)<[`Metadata`](../#metadata)\>[]\>
+
+#### Implementation of
+
+[BaseReader](../interfaces/BaseReader.md).[loadData](../interfaces/BaseReader.md#loaddata)
+
+#### Defined in
+
+[packages/core/src/readers/CSVReader.ts:41](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/readers/CSVReader.ts#L41)
diff --git a/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/Portkey.md b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/Portkey.md
new file mode 100644
index 0000000000000000000000000000000000000000..d80c101fd071d80b57ea79bc1452da4a6b0d6526
--- /dev/null
+++ b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/Portkey.md
@@ -0,0 +1,261 @@
+---
+id: "Portkey"
+title: "Class: Portkey"
+sidebar_label: "Portkey"
+sidebar_position: 0
+custom_edit_url: null
+---
+
+Unified language model interface
+
+## Implements
+
+- [`LLM`](../interfaces/LLM.md)
+
+## Constructors
+
+### constructor
+
+• **new Portkey**(`init?`)
+
+#### Parameters
+
+| Name    | Type                                |
+| :------ | :---------------------------------- |
+| `init?` | `Partial`<[`Portkey`](Portkey.md)\> |
+
+#### Defined in
+
+[packages/core/src/llm/LLM.ts:814](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/llm/LLM.ts#L814)
+
+## Properties
+
+### apiKey
+
+• `Optional` **apiKey**: `string` = `undefined`
+
+#### Defined in
+
+[packages/core/src/llm/LLM.ts:807](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/llm/LLM.ts#L807)
+
+---
+
+### baseURL
+
+• `Optional` **baseURL**: `string` = `undefined`
+
+#### Defined in
+
+[packages/core/src/llm/LLM.ts:808](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/llm/LLM.ts#L808)
+
+---
+
+### callbackManager
+
+• `Optional` **callbackManager**: [`CallbackManager`](CallbackManager.md)
+
+#### Defined in
+
+[packages/core/src/llm/LLM.ts:812](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/llm/LLM.ts#L812)
+
+---
+
+### hasStreaming
+
+• **hasStreaming**: `boolean` = `true`
+
+#### Implementation of
+
+[LLM](../interfaces/LLM.md).[hasStreaming](../interfaces/LLM.md#hasstreaming)
+
+#### Defined in
+
+[packages/core/src/llm/LLM.ts:805](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/llm/LLM.ts#L805)
+
+---
+
+### llms
+
+• `Optional` **llms**: `null` \| [`LLMOptions`] = `undefined`
+
+#### Defined in
+
+[packages/core/src/llm/LLM.ts:810](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/llm/LLM.ts#L810)
+
+---
+
+### mode
+
+• `Optional` **mode**: `string` = `undefined`
+
+#### Defined in
+
+[packages/core/src/llm/LLM.ts:809](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/llm/LLM.ts#L809)
+
+---
+
+### session
+
+• **session**: `PortkeySession`
+
+#### Defined in
+
+[packages/core/src/llm/LLM.ts:811](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/llm/LLM.ts#L811)
+
+## Accessors
+
+### metadata
+
+• `get` **metadata**(): [`LLMMetadata`](../interfaces/LLMMetadata.md)
+
+#### Returns
+
+[`LLMMetadata`](../interfaces/LLMMetadata.md)
+
+#### Implementation of
+
+[LLM](../interfaces/LLM.md).[metadata](../interfaces/LLM.md#metadata)
+
+#### Defined in
+
+[packages/core/src/llm/LLM.ts:832](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/llm/LLM.ts#L832)
+
+## Methods
+
+### chat
+
+▸ **chat**<`T`, `R`\>(`messages`, `parentEvent?`, `streaming?`, `params?`): `Promise`<`R`\>
+
+Get a chat response from the LLM
+
+#### Type parameters
+
+| Name | Type                                                                                                                  |
+| :--- | :-------------------------------------------------------------------------------------------------------------------- |
+| `T`  | extends `undefined` \| `boolean` = `undefined`                                                                        |
+| `R`  | `T` extends `true` ? `AsyncGenerator`<`string`, `void`, `unknown`\> : [`ChatResponse`](../interfaces/ChatResponse.md) |
+
+#### Parameters
+
+| Name           | Type                                            | Description                                                                                      |
+| :------------- | :---------------------------------------------- | :----------------------------------------------------------------------------------------------- |
+| `messages`     | [`ChatMessage`](../interfaces/ChatMessage.md)[] | The return type of chat() and complete() are set by the "streaming" parameter being set to True. |
+| `parentEvent?` | [`Event`](../interfaces/Event.md)               | -                                                                                                |
+| `streaming?`   | `T`                                             | -                                                                                                |
+| `params?`      | `Record`<`string`, `any`\>                      | -                                                                                                |
+
+#### Returns
+
+`Promise`<`R`\>
+
+#### Implementation of
+
+[LLM](../interfaces/LLM.md).[chat](../interfaces/LLM.md#chat)
+
+#### Defined in
+
+[packages/core/src/llm/LLM.ts:836](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/llm/LLM.ts#L836)
+
+---
+
+### complete
+
+▸ **complete**<`T`, `R`\>(`prompt`, `parentEvent?`, `streaming?`): `Promise`<`R`\>
+
+Get a prompt completion from the LLM
+
+#### Type parameters
+
+| Name | Type                                                                                                                  |
+| :--- | :-------------------------------------------------------------------------------------------------------------------- |
+| `T`  | extends `undefined` \| `boolean` = `undefined`                                                                        |
+| `R`  | `T` extends `true` ? `AsyncGenerator`<`string`, `void`, `unknown`\> : [`ChatResponse`](../interfaces/ChatResponse.md) |
+
+#### Parameters
+
+| Name           | Type                              | Description            |
+| :------------- | :-------------------------------- | :--------------------- |
+| `prompt`       | `string`                          | the prompt to complete |
+| `parentEvent?` | [`Event`](../interfaces/Event.md) | -                      |
+| `streaming?`   | `T`                               | -                      |
+
+#### Returns
+
+`Promise`<`R`\>
+
+#### Implementation of
+
+[LLM](../interfaces/LLM.md).[complete](../interfaces/LLM.md#complete)
+
+#### Defined in
+
+[packages/core/src/llm/LLM.ts:860](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/llm/LLM.ts#L860)
+
+---
+
+### streamChat
+
+▸ **streamChat**(`messages`, `parentEvent?`, `params?`): `AsyncGenerator`<`string`, `void`, `unknown`\>
+
+#### Parameters
+
+| Name           | Type                                            |
+| :------------- | :---------------------------------------------- |
+| `messages`     | [`ChatMessage`](../interfaces/ChatMessage.md)[] |
+| `parentEvent?` | [`Event`](../interfaces/Event.md)               |
+| `params?`      | `Record`<`string`, `any`\>                      |
+
+#### Returns
+
+`AsyncGenerator`<`string`, `void`, `unknown`\>
+
+#### Defined in
+
+[packages/core/src/llm/LLM.ts:875](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/llm/LLM.ts#L875)
+
+---
+
+### streamComplete
+
+▸ **streamComplete**(`query`, `parentEvent?`): `AsyncGenerator`<`string`, `void`, `unknown`\>
+
+#### Parameters
+
+| Name           | Type                              |
+| :------------- | :-------------------------------- |
+| `query`        | `string`                          |
+| `parentEvent?` | [`Event`](../interfaces/Event.md) |
+
+#### Returns
+
+`AsyncGenerator`<`string`, `void`, `unknown`\>
+
+#### Defined in
+
+[packages/core/src/llm/LLM.ts:922](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/llm/LLM.ts#L922)
+
+---
+
+### tokens
+
+▸ **tokens**(`messages`): `number`
+
+Calculates the number of tokens needed for the given chat messages
+
+#### Parameters
+
+| Name       | Type                                            |
+| :--------- | :---------------------------------------------- |
+| `messages` | [`ChatMessage`](../interfaces/ChatMessage.md)[] |
+
+#### Returns
+
+`number`
+
+#### Implementation of
+
+[LLM](../interfaces/LLM.md).[tokens](../interfaces/LLM.md#tokens)
+
+#### Defined in
+
+[packages/core/src/llm/LLM.ts:828](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/llm/LLM.ts#L828)
diff --git a/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/PromptHelper.md b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/PromptHelper.md
new file mode 100644
index 0000000000000000000000000000000000000000..dd62a7deca8df1b415d0f7320d82bacfd581e2ba
--- /dev/null
+++ b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/PromptHelper.md
@@ -0,0 +1,198 @@
+---
+id: "PromptHelper"
+title: "Class: PromptHelper"
+sidebar_label: "PromptHelper"
+sidebar_position: 0
+custom_edit_url: null
+---
+
+A collection of helper functions for working with prompts.
+
+## Constructors
+
+### constructor
+
+• **new PromptHelper**(`contextWindow?`, `numOutput?`, `chunkOverlapRatio?`, `chunkSizeLimit?`, `tokenizer?`, `separator?`)
+
+#### Parameters
+
+| Name                | Type                                | Default value                 |
+| :------------------ | :---------------------------------- | :---------------------------- |
+| `contextWindow`     | `number`                            | `DEFAULT_CONTEXT_WINDOW`      |
+| `numOutput`         | `number`                            | `DEFAULT_NUM_OUTPUTS`         |
+| `chunkOverlapRatio` | `number`                            | `DEFAULT_CHUNK_OVERLAP_RATIO` |
+| `chunkSizeLimit?`   | `number`                            | `undefined`                   |
+| `tokenizer?`        | (`text`: `string`) => `Uint32Array` | `undefined`                   |
+| `separator`         | `string`                            | `" "`                         |
+
+#### Defined in
+
+[packages/core/src/PromptHelper.ts:40](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/PromptHelper.ts#L40)
+
+## Properties
+
+### chunkOverlapRatio
+
+• **chunkOverlapRatio**: `number` = `DEFAULT_CHUNK_OVERLAP_RATIO`
+
+#### Defined in
+
+[packages/core/src/PromptHelper.ts:35](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/PromptHelper.ts#L35)
+
+---
+
+### chunkSizeLimit
+
+• `Optional` **chunkSizeLimit**: `number`
+
+#### Defined in
+
+[packages/core/src/PromptHelper.ts:36](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/PromptHelper.ts#L36)
+
+---
+
+### contextWindow
+
+• **contextWindow**: `number` = `DEFAULT_CONTEXT_WINDOW`
+
+#### Defined in
+
+[packages/core/src/PromptHelper.ts:33](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/PromptHelper.ts#L33)
+
+---
+
+### numOutput
+
+• **numOutput**: `number` = `DEFAULT_NUM_OUTPUTS`
+
+#### Defined in
+
+[packages/core/src/PromptHelper.ts:34](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/PromptHelper.ts#L34)
+
+---
+
+### separator
+
+• **separator**: `string` = `" "`
+
+#### Defined in
+
+[packages/core/src/PromptHelper.ts:38](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/PromptHelper.ts#L38)
+
+---
+
+### tokenizer
+
+• **tokenizer**: (`text`: `string`) => `Uint32Array`
+
+#### Type declaration
+
+▸ (`text`): `Uint32Array`
+
+##### Parameters
+
+| Name   | Type     |
+| :----- | :------- |
+| `text` | `string` |
+
+##### Returns
+
+`Uint32Array`
+
+#### Defined in
+
+[packages/core/src/PromptHelper.ts:37](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/PromptHelper.ts#L37)
+
+## Methods
+
+### getAvailableChunkSize
+
+▸ `Private` **getAvailableChunkSize**(`prompt`, `numChunks?`, `padding?`): `number`
+
+Find the maximum size of each chunk given a prompt.
+
+#### Parameters
+
+| Name        | Type                               | Default value |
+| :---------- | :--------------------------------- | :------------ |
+| `prompt`    | [`SimplePrompt`](../#simpleprompt) | `undefined`   |
+| `numChunks` | `number`                           | `1`           |
+| `padding`   | `number`                           | `5`           |
+
+#### Returns
+
+`number`
+
+#### Defined in
+
+[packages/core/src/PromptHelper.ts:76](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/PromptHelper.ts#L76)
+
+---
+
+### getAvailableContextSize
+
+▸ `Private` **getAvailableContextSize**(`prompt`): `number`
+
+Given a prompt, return the maximum size of the inputs to the prompt.
+
+#### Parameters
+
+| Name     | Type                               |
+| :------- | :--------------------------------- |
+| `prompt` | [`SimplePrompt`](../#simpleprompt) |
+
+#### Returns
+
+`number`
+
+#### Defined in
+
+[packages/core/src/PromptHelper.ts:61](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/PromptHelper.ts#L61)
+
+---
+
+### getTextSplitterGivenPrompt
+
+▸ **getTextSplitterGivenPrompt**(`prompt`, `numChunks?`, `padding?`): [`SentenceSplitter`](SentenceSplitter.md)
+
+Creates a text splitter with the correct chunk sizes and overlaps given a prompt.
+
+#### Parameters
+
+| Name        | Type                               | Default value     |
+| :---------- | :--------------------------------- | :---------------- |
+| `prompt`    | [`SimplePrompt`](../#simpleprompt) | `undefined`       |
+| `numChunks` | `number`                           | `1`               |
+| `padding`   | `number`                           | `DEFAULT_PADDING` |
+
+#### Returns
+
+[`SentenceSplitter`](SentenceSplitter.md)
+
+#### Defined in
+
+[packages/core/src/PromptHelper.ts:99](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/PromptHelper.ts#L99)
+
+---
+
+### repack
+
+▸ **repack**(`prompt`, `textChunks`, `padding?`): `string`[]
+
+Repack resplits the strings based on the optimal text splitter.
+
+#### Parameters
+
+| Name         | Type                               | Default value     |
+| :----------- | :--------------------------------- | :---------------- |
+| `prompt`     | [`SimplePrompt`](../#simpleprompt) | `undefined`       |
+| `textChunks` | `string`[]                         | `undefined`       |
+| `padding`    | `number`                           | `DEFAULT_PADDING` |
+
+#### Returns
+
+`string`[]
+
+#### Defined in
+
+[packages/core/src/PromptHelper.ts:120](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/PromptHelper.ts#L120)
diff --git a/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/Refine.md b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/Refine.md
new file mode 100644
index 0000000000000000000000000000000000000000..0d6858a851bdf2425b3df29ef84b15b59aecb5f0
--- /dev/null
+++ b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/Refine.md
@@ -0,0 +1,167 @@
+---
+id: "Refine"
+title: "Class: Refine"
+sidebar_label: "Refine"
+sidebar_position: 0
+custom_edit_url: null
+---
+
+A response builder that uses the query to ask the LLM generate a better response using multiple text chunks.
+
+## Hierarchy
+
+- **`Refine`**
+
+  ↳ [`CompactAndRefine`](CompactAndRefine.md)
+
+## Implements
+
+- `BaseResponseBuilder`
+
+## Constructors
+
+### constructor
+
+• **new Refine**(`serviceContext`, `textQATemplate?`, `refineTemplate?`)
+
+#### Parameters
+
+| Name              | Type                                                |
+| :---------------- | :-------------------------------------------------- |
+| `serviceContext`  | [`ServiceContext`](../interfaces/ServiceContext.md) |
+| `textQATemplate?` | (`__namedParameters`: `Object`) => `string`         |
+| `refineTemplate?` | (`__namedParameters`: `Object`) => `string`         |
+
+#### Defined in
+
+[packages/core/src/ResponseSynthesizer.ts:82](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/ResponseSynthesizer.ts#L82)
+
+## Properties
+
+### refineTemplate
+
+• **refineTemplate**: (`__namedParameters`: `Object`) => `string`
+
+#### Type declaration
+
+▸ (`«destructured»`): `string`
+
+##### Parameters
+
+| Name             | Type     |
+| :--------------- | :------- |
+| `«destructured»` | `Object` |
+
+##### Returns
+
+`string`
+
+#### Defined in
+
+[packages/core/src/ResponseSynthesizer.ts:80](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/ResponseSynthesizer.ts#L80)
+
+---
+
+### serviceContext
+
+• **serviceContext**: [`ServiceContext`](../interfaces/ServiceContext.md)
+
+#### Defined in
+
+[packages/core/src/ResponseSynthesizer.ts:78](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/ResponseSynthesizer.ts#L78)
+
+---
+
+### textQATemplate
+
+• **textQATemplate**: (`__namedParameters`: `Object`) => `string`
+
+#### Type declaration
+
+▸ (`«destructured»`): `string`
+
+##### Parameters
+
+| Name             | Type     |
+| :--------------- | :------- |
+| `«destructured»` | `Object` |
+
+##### Returns
+
+`string`
+
+#### Defined in
+
+[packages/core/src/ResponseSynthesizer.ts:79](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/ResponseSynthesizer.ts#L79)
+
+## Methods
+
+### getResponse
+
+▸ **getResponse**(`query`, `textChunks`, `parentEvent?`, `prevResponse?`): `Promise`<`string`\>
+
+#### Parameters
+
+| Name            | Type                              |
+| :-------------- | :-------------------------------- |
+| `query`         | `string`                          |
+| `textChunks`    | `string`[]                        |
+| `parentEvent?`  | [`Event`](../interfaces/Event.md) |
+| `prevResponse?` | `string`                          |
+
+#### Returns
+
+`Promise`<`string`\>
+
+#### Implementation of
+
+BaseResponseBuilder.getResponse
+
+#### Defined in
+
+[packages/core/src/ResponseSynthesizer.ts:92](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/ResponseSynthesizer.ts#L92)
+
+---
+
+### giveResponseSingle
+
+▸ `Private` **giveResponseSingle**(`queryStr`, `textChunk`, `parentEvent?`): `Promise`<`string`\>
+
+#### Parameters
+
+| Name           | Type                              |
+| :------------- | :-------------------------------- |
+| `queryStr`     | `string`                          |
+| `textChunk`    | `string`                          |
+| `parentEvent?` | [`Event`](../interfaces/Event.md) |
+
+#### Returns
+
+`Promise`<`string`\>
+
+#### Defined in
+
+[packages/core/src/ResponseSynthesizer.ts:117](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/ResponseSynthesizer.ts#L117)
+
+---
+
+### refineResponseSingle
+
+▸ `Private` **refineResponseSingle**(`response`, `queryStr`, `textChunk`, `parentEvent?`): `Promise`<`string`\>
+
+#### Parameters
+
+| Name           | Type                              |
+| :------------- | :-------------------------------- |
+| `response`     | `string`                          |
+| `queryStr`     | `string`                          |
+| `textChunk`    | `string`                          |
+| `parentEvent?` | [`Event`](../interfaces/Event.md) |
+
+#### Returns
+
+`Promise`<`string`\>
+
+#### Defined in
+
+[packages/core/src/ResponseSynthesizer.ts:153](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/ResponseSynthesizer.ts#L153)
diff --git a/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/Response.md b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/Response.md
new file mode 100644
index 0000000000000000000000000000000000000000..6901533d34c198e1c1a3bd3518848bd6c5099ce3
--- /dev/null
+++ b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/Response.md
@@ -0,0 +1,74 @@
+---
+id: "Response"
+title: "Class: Response"
+sidebar_label: "Response"
+sidebar_position: 0
+custom_edit_url: null
+---
+
+Respone is the output of a LLM
+
+## Constructors
+
+### constructor
+
+• **new Response**(`response`, `sourceNodes?`)
+
+#### Parameters
+
+| Name           | Type                                                     |
+| :------------- | :------------------------------------------------------- |
+| `response`     | `string`                                                 |
+| `sourceNodes?` | [`BaseNode`](BaseNode.md)<[`Metadata`](../#metadata)\>[] |
+
+#### Defined in
+
+[packages/core/src/Response.ts:10](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Response.ts#L10)
+
+## Properties
+
+### response
+
+• **response**: `string`
+
+#### Defined in
+
+[packages/core/src/Response.ts:7](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Response.ts#L7)
+
+---
+
+### sourceNodes
+
+• `Optional` **sourceNodes**: [`BaseNode`](BaseNode.md)<[`Metadata`](../#metadata)\>[]
+
+#### Defined in
+
+[packages/core/src/Response.ts:8](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Response.ts#L8)
+
+## Methods
+
+### getFormattedSources
+
+▸ **getFormattedSources**(): `void`
+
+#### Returns
+
+`void`
+
+#### Defined in
+
+[packages/core/src/Response.ts:15](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Response.ts#L15)
+
+---
+
+### toString
+
+▸ **toString**(): `string`
+
+#### Returns
+
+`string`
+
+#### Defined in
+
+[packages/core/src/Response.ts:19](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Response.ts#L19)
diff --git a/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/ResponseSynthesizer.md b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/ResponseSynthesizer.md
new file mode 100644
index 0000000000000000000000000000000000000000..48445005d1ec2c04cc3664d325fe92cafe335ffd
--- /dev/null
+++ b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/ResponseSynthesizer.md
@@ -0,0 +1,80 @@
+---
+id: "ResponseSynthesizer"
+title: "Class: ResponseSynthesizer"
+sidebar_label: "ResponseSynthesizer"
+sidebar_position: 0
+custom_edit_url: null
+---
+
+A ResponseSynthesizer is used to generate a response from a query and a list of nodes.
+
+## Constructors
+
+### constructor
+
+• **new ResponseSynthesizer**(`«destructured»?`)
+
+#### Parameters
+
+| Name                 | Type                                                |
+| :------------------- | :-------------------------------------------------- |
+| `«destructured»`     | `Object`                                            |
+| › `metadataMode?`    | [`MetadataMode`](../enums/MetadataMode.md)          |
+| › `responseBuilder?` | `BaseResponseBuilder`                               |
+| › `serviceContext?`  | [`ServiceContext`](../interfaces/ServiceContext.md) |
+
+#### Defined in
+
+[packages/core/src/ResponseSynthesizer.ts:295](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/ResponseSynthesizer.ts#L295)
+
+## Properties
+
+### metadataMode
+
+• **metadataMode**: [`MetadataMode`](../enums/MetadataMode.md)
+
+#### Defined in
+
+[packages/core/src/ResponseSynthesizer.ts:293](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/ResponseSynthesizer.ts#L293)
+
+---
+
+### responseBuilder
+
+• **responseBuilder**: `BaseResponseBuilder`
+
+#### Defined in
+
+[packages/core/src/ResponseSynthesizer.ts:291](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/ResponseSynthesizer.ts#L291)
+
+---
+
+### serviceContext
+
+• **serviceContext**: [`ServiceContext`](../interfaces/ServiceContext.md)
+
+#### Defined in
+
+[packages/core/src/ResponseSynthesizer.ts:292](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/ResponseSynthesizer.ts#L292)
+
+## Methods
+
+### synthesize
+
+▸ **synthesize**(`query`, `nodesWithScore`, `parentEvent?`): `Promise`<[`Response`](Response.md)\>
+
+#### Parameters
+
+| Name             | Type                                                                             |
+| :--------------- | :------------------------------------------------------------------------------- |
+| `query`          | `string`                                                                         |
+| `nodesWithScore` | [`NodeWithScore`](../interfaces/NodeWithScore.md)<[`Metadata`](../#metadata)\>[] |
+| `parentEvent?`   | [`Event`](../interfaces/Event.md)                                                |
+
+#### Returns
+
+`Promise`<[`Response`](Response.md)\>
+
+#### Defined in
+
+[packages/core/src/ResponseSynthesizer.ts:310](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/ResponseSynthesizer.ts#L310)
diff --git a/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/RetrieverQueryEngine.md b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/RetrieverQueryEngine.md
new file mode 100644
index 0000000000000000000000000000000000000000..07959f5932469586810b66796660d208c75f530b
--- /dev/null
+++ b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/RetrieverQueryEngine.md
@@ -0,0 +1,140 @@
+---
+id: "RetrieverQueryEngine"
+title: "Class: RetrieverQueryEngine"
+sidebar_label: "RetrieverQueryEngine"
+sidebar_position: 0
+custom_edit_url: null
+---
+
+A query engine that uses a retriever to query an index and then synthesizes the response.
+
+## Implements
+
+- [`BaseQueryEngine`](../interfaces/BaseQueryEngine.md)
+
+## Constructors
+
+### constructor
+
+• **new RetrieverQueryEngine**(`retriever`, `responseSynthesizer?`, `preFilters?`, `nodePostprocessors?`)
+
+#### Parameters
+
+| Name                   | Type                                                                |
+| :--------------------- | :------------------------------------------------------------------ |
+| `retriever`            | [`BaseRetriever`](../interfaces/BaseRetriever.md)                   |
+| `responseSynthesizer?` | [`ResponseSynthesizer`](ResponseSynthesizer.md)                     |
+| `preFilters?`          | `unknown`                                                           |
+| `nodePostprocessors?`  | [`BaseNodePostprocessor`](../interfaces/BaseNodePostprocessor.md)[] |
+
+#### Defined in
+
+[packages/core/src/QueryEngine.ts:37](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/QueryEngine.ts#L37)
+
+## Properties
+
+### nodePostprocessors
+
+• **nodePostprocessors**: [`BaseNodePostprocessor`](../interfaces/BaseNodePostprocessor.md)[]
+
+#### Defined in
+
+[packages/core/src/QueryEngine.ts:34](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/QueryEngine.ts#L34)
+
+---
+
+### preFilters
+
+• `Optional` **preFilters**: `unknown`
+
+#### Defined in
+
+[packages/core/src/QueryEngine.ts:35](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/QueryEngine.ts#L35)
+
+---
+
+### responseSynthesizer
+
+• **responseSynthesizer**: [`ResponseSynthesizer`](ResponseSynthesizer.md)
+
+#### Defined in
+
+[packages/core/src/QueryEngine.ts:33](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/QueryEngine.ts#L33)
+
+---
+
+### retriever
+
+• **retriever**: [`BaseRetriever`](../interfaces/BaseRetriever.md)
+
+#### Defined in
+
+[packages/core/src/QueryEngine.ts:32](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/QueryEngine.ts#L32)
+
+## Methods
+
+### applyNodePostprocessors
+
+▸ `Private` **applyNodePostprocessors**(`nodes`): [`NodeWithScore`](../interfaces/NodeWithScore.md)<[`Metadata`](../#metadata)\>[]
+
+#### Parameters
+
+| Name    | Type                                                                             |
+| :------ | :------------------------------------------------------------------------------- |
+| `nodes` | [`NodeWithScore`](../interfaces/NodeWithScore.md)<[`Metadata`](../#metadata)\>[] |
+
+#### Returns
+
+[`NodeWithScore`](../interfaces/NodeWithScore.md)<[`Metadata`](../#metadata)\>[]
+
+#### Defined in
+
+[packages/core/src/QueryEngine.ts:52](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/QueryEngine.ts#L52)
+
+---
+
+### query
+
+▸ **query**(`query`, `parentEvent?`): `Promise`<[`Response`](Response.md)\>
+
+Query the query engine and get a response.
+
+#### Parameters
+
+| Name           | Type                              |
+| :------------- | :-------------------------------- |
+| `query`        | `string`                          |
+| `parentEvent?` | [`Event`](../interfaces/Event.md) |
+
+#### Returns
+
+`Promise`<[`Response`](Response.md)\>
+
+#### Implementation of
+
+[BaseQueryEngine](../interfaces/BaseQueryEngine.md).[query](../interfaces/BaseQueryEngine.md#query)
+
+#### Defined in
+
+[packages/core/src/QueryEngine.ts:69](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/QueryEngine.ts#L69)
+
+---
+
+### retrieve
+
+▸ `Private` **retrieve**(`query`, `parentEvent`): `Promise`<[`NodeWithScore`](../interfaces/NodeWithScore.md)<[`Metadata`](../#metadata)\>[]\>
+
+#### Parameters
+
+| Name          | Type                              |
+| :------------ | :-------------------------------- |
+| `query`       | `string`                          |
+| `parentEvent` | [`Event`](../interfaces/Event.md) |
+
+#### Returns
+
+`Promise`<[`NodeWithScore`](../interfaces/NodeWithScore.md)<[`Metadata`](../#metadata)\>[]\>
+
+#### Defined in
+
+[packages/core/src/QueryEngine.ts:59](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/QueryEngine.ts#L59)
diff --git a/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/SentenceSplitter.md b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/SentenceSplitter.md
new file mode 100644
index 0000000000000000000000000000000000000000..e01c27467a31e0eabe3011122eaef621551b1f20
--- /dev/null
+++ b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/SentenceSplitter.md
@@ -0,0 +1,271 @@
+---
+id: "SentenceSplitter"
+title: "Class: SentenceSplitter"
+sidebar_label: "SentenceSplitter"
+sidebar_position: 0
+custom_edit_url: null
+---
+
+SentenceSplitter is our default text splitter that supports splitting into sentences, paragraphs, or fixed length chunks with overlap.
+
+One of the advantages of SentenceSplitter is that even in the fixed length chunks it will try to keep sentences together.
+
+## Constructors
+
+### constructor
+
+• **new SentenceSplitter**(`options?`)
+
+#### Parameters
+
+| Name                           | Type                                               |
+| :----------------------------- | :------------------------------------------------- |
+| `options?`                     | `Object`                                           |
+| `options.chunkOverlap?`        | `number`                                           |
+| `options.chunkSize?`           | `number`                                           |
+| `options.chunkingTokenizerFn?` | (`text`: `string`) => `null` \| `RegExpMatchArray` |
+| `options.paragraphSeparator?`  | `string`                                           |
+| `options.splitLongSentences?`  | `boolean`                                          |
+| `options.tokenizer?`           | `any`                                              |
+| `options.tokenizerDecoder?`    | `any`                                              |
+
+#### Defined in
+
+[packages/core/src/TextSplitter.ts:67](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/TextSplitter.ts#L67)
+
+## Properties
+
+### chunkOverlap
+
+• `Private` **chunkOverlap**: `number`
+
+#### Defined in
+
+[packages/core/src/TextSplitter.ts:60](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/TextSplitter.ts#L60)
+
+---
+
+### chunkSize
+
+• `Private` **chunkSize**: `number`
+
+#### Defined in
+
+[packages/core/src/TextSplitter.ts:59](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/TextSplitter.ts#L59)
+
+---
+
+### chunkingTokenizerFn
+
+• `Private` **chunkingTokenizerFn**: (`text`: `string`) => `null` \| `RegExpMatchArray`
+
+#### Type declaration
+
+▸ (`text`): `null` \| `RegExpMatchArray`
+
+##### Parameters
+
+| Name   | Type     |
+| :----- | :------- |
+| `text` | `string` |
+
+##### Returns
+
+`null` \| `RegExpMatchArray`
+
+#### Defined in
+
+[packages/core/src/TextSplitter.ts:64](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/TextSplitter.ts#L64)
+
+---
+
+### paragraphSeparator
+
+• `Private` **paragraphSeparator**: `string`
+
+#### Defined in
+
+[packages/core/src/TextSplitter.ts:63](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/TextSplitter.ts#L63)
+
+---
+
+### splitLongSentences
+
+• `Private` **splitLongSentences**: `boolean`
+
+#### Defined in
+
+[packages/core/src/TextSplitter.ts:65](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/TextSplitter.ts#L65)
+
+---
+
+### tokenizer
+
+• `Private` **tokenizer**: `any`
+
+#### Defined in
+
+[packages/core/src/TextSplitter.ts:61](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/TextSplitter.ts#L61)
+
+---
+
+### tokenizerDecoder
+
+• `Private` **tokenizerDecoder**: `any`
+
+#### Defined in
+
+[packages/core/src/TextSplitter.ts:62](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/TextSplitter.ts#L62)
+
+## Methods
+
+### combineTextSplits
+
+▸ **combineTextSplits**(`newSentenceSplits`, `effectiveChunkSize`): `TextSplit`[]
+
+#### Parameters
+
+| Name                 | Type         |
+| :------------------- | :----------- |
+| `newSentenceSplits`  | `SplitRep`[] |
+| `effectiveChunkSize` | `number`     |
+
+#### Returns
+
+`TextSplit`[]
+
+#### Defined in
+
+[packages/core/src/TextSplitter.ts:205](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/TextSplitter.ts#L205)
+
+---
+
+### getEffectiveChunkSize
+
+▸ `Private` **getEffectiveChunkSize**(`extraInfoStr?`): `number`
+
+#### Parameters
+
+| Name            | Type     |
+| :-------------- | :------- |
+| `extraInfoStr?` | `string` |
+
+#### Returns
+
+`number`
+
+#### Defined in
+
+[packages/core/src/TextSplitter.ts:104](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/TextSplitter.ts#L104)
+
+---
+
+### getParagraphSplits
+
+▸ **getParagraphSplits**(`text`, `effectiveChunkSize?`): `string`[]
+
+#### Parameters
+
+| Name                  | Type     |
+| :-------------------- | :------- |
+| `text`                | `string` |
+| `effectiveChunkSize?` | `number` |
+
+#### Returns
+
+`string`[]
+
+#### Defined in
+
+[packages/core/src/TextSplitter.ts:121](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/TextSplitter.ts#L121)
+
+---
+
+### getSentenceSplits
+
+▸ **getSentenceSplits**(`text`, `effectiveChunkSize?`): `string`[]
+
+#### Parameters
+
+| Name                  | Type     |
+| :-------------------- | :------- |
+| `text`                | `string` |
+| `effectiveChunkSize?` | `number` |
+
+#### Returns
+
+`string`[]
+
+#### Defined in
+
+[packages/core/src/TextSplitter.ts:147](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/TextSplitter.ts#L147)
+
+---
+
+### processSentenceSplits
+
+▸ `Private` **processSentenceSplits**(`sentenceSplits`, `effectiveChunkSize`): `SplitRep`[]
+
+Splits sentences into chunks if necessary.
+
+This isn't great behavior because it can split down the middle of a
+word or in non-English split down the middle of a Unicode codepoint
+so the splitting is turned off by default. If you need it, please
+set the splitLongSentences option to true.
+
+#### Parameters
+
+| Name                 | Type       |
+| :------------------- | :--------- |
+| `sentenceSplits`     | `string`[] |
+| `effectiveChunkSize` | `number`   |
+
+#### Returns
+
+`SplitRep`[]
+
+#### Defined in
+
+[packages/core/src/TextSplitter.ts:176](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/TextSplitter.ts#L176)
+
+---
+
+### splitText
+
+▸ **splitText**(`text`, `extraInfoStr?`): `string`[]
+
+#### Parameters
+
+| Name            | Type     |
+| :-------------- | :------- |
+| `text`          | `string` |
+| `extraInfoStr?` | `string` |
+
+#### Returns
+
+`string`[]
+
+#### Defined in
+
+[packages/core/src/TextSplitter.ts:297](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/TextSplitter.ts#L297)
+
+---
+
+### splitTextWithOverlaps
+
+▸ **splitTextWithOverlaps**(`text`, `extraInfoStr?`): `TextSplit`[]
+
+#### Parameters
+
+| Name            | Type     |
+| :-------------- | :------- |
+| `text`          | `string` |
+| `extraInfoStr?` | `string` |
+
+#### Returns
+
+`TextSplit`[]
+
+#### Defined in
+
+[packages/core/src/TextSplitter.ts:269](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/TextSplitter.ts#L269)
diff --git a/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/SimilarityPostprocessor.md b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/SimilarityPostprocessor.md
new file mode 100644
index 0000000000000000000000000000000000000000..7f3e135a7c04f854c10a7f241c8b44b1dd08960b
--- /dev/null
+++ b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/SimilarityPostprocessor.md
@@ -0,0 +1,62 @@
+---
+id: "SimilarityPostprocessor"
+title: "Class: SimilarityPostprocessor"
+sidebar_label: "SimilarityPostprocessor"
+sidebar_position: 0
+custom_edit_url: null
+---
+
+## Implements
+
+- [`BaseNodePostprocessor`](../interfaces/BaseNodePostprocessor.md)
+
+## Constructors
+
+### constructor
+
+• **new SimilarityPostprocessor**(`options?`)
+
+#### Parameters
+
+| Name                        | Type     |
+| :-------------------------- | :------- |
+| `options?`                  | `Object` |
+| `options.similarityCutoff?` | `number` |
+
+#### Defined in
+
+[packages/core/src/indices/BaseNodePostprocessor.ts:10](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/indices/BaseNodePostprocessor.ts#L10)
+
+## Properties
+
+### similarityCutoff
+
+• `Optional` **similarityCutoff**: `number`
+
+#### Defined in
+
+[packages/core/src/indices/BaseNodePostprocessor.ts:8](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/indices/BaseNodePostprocessor.ts#L8)
+
+## Methods
+
+### postprocessNodes
+
+▸ **postprocessNodes**(`nodes`): [`NodeWithScore`](../interfaces/NodeWithScore.md)<[`Metadata`](../#metadata)\>[]
+
+#### Parameters
+
+| Name    | Type                                                                             |
+| :------ | :------------------------------------------------------------------------------- |
+| `nodes` | [`NodeWithScore`](../interfaces/NodeWithScore.md)<[`Metadata`](../#metadata)\>[] |
+
+#### Returns
+
+[`NodeWithScore`](../interfaces/NodeWithScore.md)<[`Metadata`](../#metadata)\>[]
+
+#### Implementation of
+
+[BaseNodePostprocessor](../interfaces/BaseNodePostprocessor.md).[postprocessNodes](../interfaces/BaseNodePostprocessor.md#postprocessnodes)
+
+#### Defined in
+
+[packages/core/src/indices/BaseNodePostprocessor.ts:14](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/indices/BaseNodePostprocessor.ts#L14)
diff --git a/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/SimpleChatEngine.md b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/SimpleChatEngine.md
new file mode 100644
index 0000000000000000000000000000000000000000..a37f491c2ed9cbdaddb6a034da4df6499ef7e7dc
--- /dev/null
+++ b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/SimpleChatEngine.md
@@ -0,0 +1,125 @@
+---
+id: "SimpleChatEngine"
+title: "Class: SimpleChatEngine"
+sidebar_label: "SimpleChatEngine"
+sidebar_position: 0
+custom_edit_url: null
+---
+
+SimpleChatEngine is the simplest possible chat engine. Useful for using your own custom prompts.
+
+## Implements
+
+- [`ChatEngine`](../interfaces/ChatEngine.md)
+
+## Constructors
+
+### constructor
+
+• **new SimpleChatEngine**(`init?`)
+
+#### Parameters
+
+| Name    | Type                                                  |
+| :------ | :---------------------------------------------------- |
+| `init?` | `Partial`<[`SimpleChatEngine`](SimpleChatEngine.md)\> |
+
+#### Defined in
+
+[packages/core/src/ChatEngine.ts:51](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/ChatEngine.ts#L51)
+
+## Properties
+
+### chatHistory
+
+• **chatHistory**: [`ChatMessage`](../interfaces/ChatMessage.md)[]
+
+#### Defined in
+
+[packages/core/src/ChatEngine.ts:48](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/ChatEngine.ts#L48)
+
+---
+
+### llm
+
+• **llm**: [`LLM`](../interfaces/LLM.md)
+
+#### Defined in
+
+[packages/core/src/ChatEngine.ts:49](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/ChatEngine.ts#L49)
+
+## Methods
+
+### chat
+
+▸ **chat**<`T`, `R`\>(`message`, `chatHistory?`, `streaming?`): `Promise`<`R`\>
+
+Send message along with the class's current chat history to the LLM.
+
+#### Type parameters
+
+| Name | Type                                                                                            |
+| :--- | :---------------------------------------------------------------------------------------------- |
+| `T`  | extends `undefined` \| `boolean` = `undefined`                                                  |
+| `R`  | `T` extends `true` ? `AsyncGenerator`<`string`, `void`, `unknown`\> : [`Response`](Response.md) |
+
+#### Parameters
+
+| Name           | Type                                            | Description                                                        |
+| :------------- | :---------------------------------------------- | :----------------------------------------------------------------- |
+| `message`      | [`MessageContent`](../#messagecontent)          |                                                                    |
+| `chatHistory?` | [`ChatMessage`](../interfaces/ChatMessage.md)[] | optional chat history if you want to customize the chat history    |
+| `streaming?`   | `T`                                             | optional streaming flag, which auto-sets the return value if True. |
+
+#### Returns
+
+`Promise`<`R`\>
+
+#### Implementation of
+
+[ChatEngine](../interfaces/ChatEngine.md).[chat](../interfaces/ChatEngine.md#chat)
+
+#### Defined in
+
+[packages/core/src/ChatEngine.ts:56](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/ChatEngine.ts#L56)
+
+---
+
+### reset
+
+▸ **reset**(): `void`
+
+Resets the chat history so that it's empty.
+
+#### Returns
+
+`void`
+
+#### Implementation of
+
+[ChatEngine](../interfaces/ChatEngine.md).[reset](../interfaces/ChatEngine.md#reset)
+
+#### Defined in
+
+[packages/core/src/ChatEngine.ts:101](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/ChatEngine.ts#L101)
+
+---
+
+### streamChat
+
+▸ `Protected` **streamChat**(`message`, `chatHistory?`): `AsyncGenerator`<`string`, `void`, `unknown`\>
+
+#### Parameters
+
+| Name           | Type                                            |
+| :------------- | :---------------------------------------------- |
+| `message`      | [`MessageContent`](../#messagecontent)          |
+| `chatHistory?` | [`ChatMessage`](../interfaces/ChatMessage.md)[] |
+
+#### Returns
+
+`AsyncGenerator`<`string`, `void`, `unknown`\>
+
+#### Defined in
+
+[packages/core/src/ChatEngine.ts:78](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/ChatEngine.ts#L78)
diff --git a/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/SimpleChatHistory.md b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/SimpleChatHistory.md
new file mode 100644
index 0000000000000000000000000000000000000000..76c4e9f4732bfbde9f770e3eb957933e273b6151
--- /dev/null
+++ b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/SimpleChatHistory.md
@@ -0,0 +1,145 @@
+---
+id: "SimpleChatHistory"
+title: "Class: SimpleChatHistory"
+sidebar_label: "SimpleChatHistory"
+sidebar_position: 0
+custom_edit_url: null
+---
+
+A ChatHistory is used to keep the state of back and forth chat messages
+
+## Implements
+
+- [`ChatHistory`](../interfaces/ChatHistory.md)
+
+## Constructors
+
+### constructor
+
+• **new SimpleChatHistory**(`init?`)
+
+#### Parameters
+
+| Name    | Type                                                    |
+| :------ | :------------------------------------------------------ |
+| `init?` | `Partial`<[`SimpleChatHistory`](SimpleChatHistory.md)\> |
+
+#### Defined in
+
+[packages/core/src/ChatHistory.ts:39](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/ChatHistory.ts#L39)
+
+## Properties
+
+### messages
+
+• **messages**: [`ChatMessage`](../interfaces/ChatMessage.md)[]
+
+#### Implementation of
+
+[ChatHistory](../interfaces/ChatHistory.md).[messages](../interfaces/ChatHistory.md#messages)
+
+#### Defined in
+
+[packages/core/src/ChatHistory.ts:36](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/ChatHistory.ts#L36)
+
+---
+
+### messagesBefore
+
+• `Private` **messagesBefore**: `number`
+
+#### Defined in
+
+[packages/core/src/ChatHistory.ts:37](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/ChatHistory.ts#L37)
+
+## Methods
+
+### addMessage
+
+▸ **addMessage**(`message`): `void`
+
+Adds a message to the chat history.
+
+#### Parameters
+
+| Name      | Type                                          |
+| :-------- | :-------------------------------------------- |
+| `message` | [`ChatMessage`](../interfaces/ChatMessage.md) |
+
+#### Returns
+
+`void`
+
+#### Implementation of
+
+[ChatHistory](../interfaces/ChatHistory.md).[addMessage](../interfaces/ChatHistory.md#addmessage)
+
+#### Defined in
+
+[packages/core/src/ChatHistory.ts:44](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/ChatHistory.ts#L44)
+
+---
+
+### newMessages
+
+▸ **newMessages**(): [`ChatMessage`](../interfaces/ChatMessage.md)[]
+
+Returns the new messages since the last call to this function (or since calling the constructor)
+
+#### Returns
+
+[`ChatMessage`](../interfaces/ChatMessage.md)[]
+
+#### Implementation of
+
+[ChatHistory](../interfaces/ChatHistory.md).[newMessages](../interfaces/ChatHistory.md#newmessages)
+
+#### Defined in
+
+[packages/core/src/ChatHistory.ts:56](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/ChatHistory.ts#L56)
+
+---
+
+### requestMessages
+
+▸ **requestMessages**(`transientMessages?`): `Promise`<[`ChatMessage`](../interfaces/ChatMessage.md)[]\>
+
+Returns the messages that should be used as input to the LLM.
+
+#### Parameters
+
+| Name                 | Type                                            |
+| :------------------- | :---------------------------------------------- |
+| `transientMessages?` | [`ChatMessage`](../interfaces/ChatMessage.md)[] |
+
+#### Returns
+
+`Promise`<[`ChatMessage`](../interfaces/ChatMessage.md)[]\>
+
+#### Implementation of
+
+[ChatHistory](../interfaces/ChatHistory.md).[requestMessages](../interfaces/ChatHistory.md#requestmessages)
+
+#### Defined in
+
+[packages/core/src/ChatHistory.ts:48](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/ChatHistory.ts#L48)
+
+---
+
+### reset
+
+▸ **reset**(): `void`
+
+Resets the chat history so that it's empty.
+
+#### Returns
+
+`void`
+
+#### Implementation of
+
+[ChatHistory](../interfaces/ChatHistory.md).[reset](../interfaces/ChatHistory.md#reset)
+
+#### Defined in
+
+[packages/core/src/ChatHistory.ts:52](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/ChatHistory.ts#L52)
diff --git a/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/SimpleDirectoryReader.md b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/SimpleDirectoryReader.md
new file mode 100644
index 0000000000000000000000000000000000000000..1320d9684ab063bfb215c87dc885fdf4dd455c16
--- /dev/null
+++ b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/SimpleDirectoryReader.md
@@ -0,0 +1,88 @@
+---
+id: "SimpleDirectoryReader"
+title: "Class: SimpleDirectoryReader"
+sidebar_label: "SimpleDirectoryReader"
+sidebar_position: 0
+custom_edit_url: null
+---
+
+Read all of the documents in a directory.
+By default, supports the list of file types
+in the FILE_EXT_TO_READER map.
+
+## Implements
+
+- [`BaseReader`](../interfaces/BaseReader.md)
+
+## Constructors
+
+### constructor
+
+• **new SimpleDirectoryReader**(`observer?`)
+
+#### Parameters
+
+| Name        | Type             |
+| :---------- | :--------------- |
+| `observer?` | `ReaderCallback` |
+
+#### Defined in
+
+[packages/core/src/readers/SimpleDirectoryReader.ts:65](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/readers/SimpleDirectoryReader.ts#L65)
+
+## Properties
+
+### observer
+
+• `Private` `Optional` **observer**: `ReaderCallback`
+
+#### Defined in
+
+[packages/core/src/readers/SimpleDirectoryReader.ts:65](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/readers/SimpleDirectoryReader.ts#L65)
+
+## Methods
+
+### doObserverCheck
+
+▸ `Private` **doObserverCheck**(`category`, `name`, `status`, `message?`): `boolean`
+
+#### Parameters
+
+| Name       | Type                      |
+| :--------- | :------------------------ |
+| `category` | `"file"` \| `"directory"` |
+| `name`     | `string`                  |
+| `status`   | `ReaderStatus`            |
+| `message?` | `string`                  |
+
+#### Returns
+
+`boolean`
+
+#### Defined in
+
+[packages/core/src/readers/SimpleDirectoryReader.ts:135](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/readers/SimpleDirectoryReader.ts#L135)
+
+---
+
+### loadData
+
+▸ **loadData**(`«destructured»`): `Promise`<[`Document`](Document.md)<[`Metadata`](../#metadata)\>[]\>
+
+#### Parameters
+
+| Name             | Type                                                                           |
+| :--------------- | :----------------------------------------------------------------------------- |
+| `«destructured»` | [`SimpleDirectoryReaderLoadDataProps`](../#simpledirectoryreaderloaddataprops) |
+
+#### Returns
+
+`Promise`<[`Document`](Document.md)<[`Metadata`](../#metadata)\>[]\>
+
+#### Implementation of
+
+[BaseReader](../interfaces/BaseReader.md).[loadData](../interfaces/BaseReader.md#loaddata)
+
+#### Defined in
+
+[packages/core/src/readers/SimpleDirectoryReader.ts:67](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/readers/SimpleDirectoryReader.ts#L67)
diff --git a/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/SimpleDocumentStore.md b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/SimpleDocumentStore.md
new file mode 100644
index 0000000000000000000000000000000000000000..8ed4f22749038ac8c4ccf1b6b23c460c4666d702
--- /dev/null
+++ b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/SimpleDocumentStore.md
@@ -0,0 +1,480 @@
+---
+id: "SimpleDocumentStore"
+title: "Class: SimpleDocumentStore"
+sidebar_label: "SimpleDocumentStore"
+sidebar_position: 0
+custom_edit_url: null
+---
+
+## Hierarchy
+
+- `KVDocumentStore`
+
+  ↳ **`SimpleDocumentStore`**
+
+## Constructors
+
+### constructor
+
+• **new SimpleDocumentStore**(`kvStore?`, `namespace?`)
+
+#### Parameters
+
+| Name         | Type                                |
+| :----------- | :---------------------------------- |
+| `kvStore?`   | [`SimpleKVStore`](SimpleKVStore.md) |
+| `namespace?` | `string`                            |
+
+#### Overrides
+
+KVDocumentStore.constructor
+
+#### Defined in
+
+[packages/core/src/storage/docStore/SimpleDocumentStore.ts:19](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/storage/docStore/SimpleDocumentStore.ts#L19)
+
+## Properties
+
+### kvStore
+
+• `Private` **kvStore**: [`SimpleKVStore`](SimpleKVStore.md)
+
+#### Defined in
+
+[packages/core/src/storage/docStore/SimpleDocumentStore.ts:17](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/storage/docStore/SimpleDocumentStore.ts#L17)
+
+## Methods
+
+### addDocuments
+
+▸ **addDocuments**(`docs`, `allowUpdate?`): `Promise`<`void`\>
+
+#### Parameters
+
+| Name          | Type                                                     | Default value |
+| :------------ | :------------------------------------------------------- | :------------ |
+| `docs`        | [`BaseNode`](BaseNode.md)<[`Metadata`](../#metadata)\>[] | `undefined`   |
+| `allowUpdate` | `boolean`                                                | `true`        |
+
+#### Returns
+
+`Promise`<`void`\>
+
+#### Inherited from
+
+KVDocumentStore.addDocuments
+
+#### Defined in
+
+[packages/core/src/storage/docStore/KVDocumentStore.ts:33](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/storage/docStore/KVDocumentStore.ts#L33)
+
+---
+
+### deleteDocument
+
+▸ **deleteDocument**(`docId`, `raiseError?`, `removeRefDocNode?`): `Promise`<`void`\>
+
+#### Parameters
+
+| Name               | Type      | Default value |
+| :----------------- | :-------- | :------------ |
+| `docId`            | `string`  | `undefined`   |
+| `raiseError`       | `boolean` | `true`        |
+| `removeRefDocNode` | `boolean` | `true`        |
+
+#### Returns
+
+`Promise`<`void`\>
+
+#### Inherited from
+
+KVDocumentStore.deleteDocument
+
+#### Defined in
+
+[packages/core/src/storage/docStore/KVDocumentStore.ts:131](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/storage/docStore/KVDocumentStore.ts#L131)
+
+---
+
+### deleteRefDoc
+
+▸ **deleteRefDoc**(`refDocId`, `raiseError?`): `Promise`<`void`\>
+
+#### Parameters
+
+| Name         | Type      | Default value |
+| :----------- | :-------- | :------------ |
+| `refDocId`   | `string`  | `undefined`   |
+| `raiseError` | `boolean` | `true`        |
+
+#### Returns
+
+`Promise`<`void`\>
+
+#### Inherited from
+
+KVDocumentStore.deleteRefDoc
+
+#### Defined in
+
+[packages/core/src/storage/docStore/KVDocumentStore.ts:148](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/storage/docStore/KVDocumentStore.ts#L148)
+
+---
+
+### docs
+
+▸ **docs**(): `Promise`<`Record`<`string`, [`BaseNode`](BaseNode.md)<[`Metadata`](../#metadata)\>\>\>
+
+#### Returns
+
+`Promise`<`Record`<`string`, [`BaseNode`](BaseNode.md)<[`Metadata`](../#metadata)\>\>\>
+
+#### Inherited from
+
+KVDocumentStore.docs
+
+#### Defined in
+
+[packages/core/src/storage/docStore/KVDocumentStore.ts:24](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/storage/docStore/KVDocumentStore.ts#L24)
+
+---
+
+### documentExists
+
+▸ **documentExists**(`docId`): `Promise`<`boolean`\>
+
+#### Parameters
+
+| Name    | Type     |
+| :------ | :------- |
+| `docId` | `string` |
+
+#### Returns
+
+`Promise`<`boolean`\>
+
+#### Inherited from
+
+KVDocumentStore.documentExists
+
+#### Defined in
+
+[packages/core/src/storage/docStore/KVDocumentStore.ts:105](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/storage/docStore/KVDocumentStore.ts#L105)
+
+---
+
+### getAllRefDocInfo
+
+▸ **getAllRefDocInfo**(): `Promise`<`undefined` \| `Record`<`string`, [`RefDocInfo`](../interfaces/RefDocInfo.md)\>\>
+
+#### Returns
+
+`Promise`<`undefined` \| `Record`<`string`, [`RefDocInfo`](../interfaces/RefDocInfo.md)\>\>
+
+#### Inherited from
+
+KVDocumentStore.getAllRefDocInfo
+
+#### Defined in
+
+[packages/core/src/storage/docStore/KVDocumentStore.ts:93](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/storage/docStore/KVDocumentStore.ts#L93)
+
+---
+
+### getDocument
+
+▸ **getDocument**(`docId`, `raiseError?`): `Promise`<`undefined` \| [`BaseNode`](BaseNode.md)<[`Metadata`](../#metadata)\>\>
+
+#### Parameters
+
+| Name         | Type      | Default value |
+| :----------- | :-------- | :------------ |
+| `docId`      | `string`  | `undefined`   |
+| `raiseError` | `boolean` | `true`        |
+
+#### Returns
+
+`Promise`<`undefined` \| [`BaseNode`](BaseNode.md)<[`Metadata`](../#metadata)\>\>
+
+#### Inherited from
+
+KVDocumentStore.getDocument
+
+#### Defined in
+
+[packages/core/src/storage/docStore/KVDocumentStore.ts:73](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/storage/docStore/KVDocumentStore.ts#L73)
+
+---
+
+### getDocumentHash
+
+▸ **getDocumentHash**(`docId`): `Promise`<`undefined` \| `string`\>
+
+#### Parameters
+
+| Name    | Type     |
+| :------ | :------- |
+| `docId` | `string` |
+
+#### Returns
+
+`Promise`<`undefined` \| `string`\>
+
+#### Inherited from
+
+KVDocumentStore.getDocumentHash
+
+#### Defined in
+
+[packages/core/src/storage/docStore/KVDocumentStore.ts:174](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/storage/docStore/KVDocumentStore.ts#L174)
+
+---
+
+### getNode
+
+▸ **getNode**(`nodeId`, `raiseError?`): `Promise`<[`BaseNode`](BaseNode.md)<[`Metadata`](../#metadata)\>\>
+
+#### Parameters
+
+| Name         | Type      | Default value |
+| :----------- | :-------- | :------------ |
+| `nodeId`     | `string`  | `undefined`   |
+| `raiseError` | `boolean` | `true`        |
+
+#### Returns
+
+`Promise`<[`BaseNode`](BaseNode.md)<[`Metadata`](../#metadata)\>\>
+
+#### Inherited from
+
+KVDocumentStore.getNode
+
+#### Defined in
+
+[packages/core/src/storage/docStore/types.ts:57](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/storage/docStore/types.ts#L57)
+
+---
+
+### getNodeDict
+
+▸ **getNodeDict**(`nodeIdDict`): `Promise`<`Record`<`number`, [`BaseNode`](BaseNode.md)<[`Metadata`](../#metadata)\>\>\>
+
+#### Parameters
+
+| Name         | Type     |
+| :----------- | :------- |
+| `nodeIdDict` | `Object` |
+
+#### Returns
+
+`Promise`<`Record`<`number`, [`BaseNode`](BaseNode.md)<[`Metadata`](../#metadata)\>\>\>
+
+#### Inherited from
+
+KVDocumentStore.getNodeDict
+
+#### Defined in
+
+[packages/core/src/storage/docStore/types.ts:65](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/storage/docStore/types.ts#L65)
+
+---
+
+### getNodes
+
+▸ **getNodes**(`nodeIds`, `raiseError?`): `Promise`<[`BaseNode`](BaseNode.md)<[`Metadata`](../#metadata)\>[]\>
+
+#### Parameters
+
+| Name         | Type       | Default value |
+| :----------- | :--------- | :------------ |
+| `nodeIds`    | `string`[] | `undefined`   |
+| `raiseError` | `boolean`  | `true`        |
+
+#### Returns
+
+`Promise`<[`BaseNode`](BaseNode.md)<[`Metadata`](../#metadata)\>[]\>
+
+#### Inherited from
+
+KVDocumentStore.getNodes
+
+#### Defined in
+
+[packages/core/src/storage/docStore/types.ts:51](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/storage/docStore/types.ts#L51)
+
+---
+
+### getRefDocInfo
+
+▸ **getRefDocInfo**(`refDocId`): `Promise`<`undefined` \| [`RefDocInfo`](../interfaces/RefDocInfo.md)\>
+
+#### Parameters
+
+| Name       | Type     |
+| :--------- | :------- |
+| `refDocId` | `string` |
+
+#### Returns
+
+`Promise`<`undefined` \| [`RefDocInfo`](../interfaces/RefDocInfo.md)\>
+
+#### Inherited from
+
+KVDocumentStore.getRefDocInfo
+
+#### Defined in
+
+[packages/core/src/storage/docStore/KVDocumentStore.ts:88](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/storage/docStore/KVDocumentStore.ts#L88)
+
+---
+
+### persist
+
+▸ **persist**(`persistPath?`, `fs?`): `Promise`<`void`\>
+
+#### Parameters
+
+| Name          | Type                                                      |
+| :------------ | :-------------------------------------------------------- |
+| `persistPath` | `string`                                                  |
+| `fs?`         | [`GenericFileSystem`](../interfaces/GenericFileSystem.md) |
+
+#### Returns
+
+`Promise`<`void`\>
+
+#### Overrides
+
+KVDocumentStore.persist
+
+#### Defined in
+
+[packages/core/src/storage/docStore/SimpleDocumentStore.ts:52](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/storage/docStore/SimpleDocumentStore.ts#L52)
+
+---
+
+### refDocExists
+
+▸ **refDocExists**(`refDocId`): `Promise`<`boolean`\>
+
+#### Parameters
+
+| Name       | Type     |
+| :--------- | :------- |
+| `refDocId` | `string` |
+
+#### Returns
+
+`Promise`<`boolean`\>
+
+#### Inherited from
+
+KVDocumentStore.refDocExists
+
+#### Defined in
+
+[packages/core/src/storage/docStore/KVDocumentStore.ts:101](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/storage/docStore/KVDocumentStore.ts#L101)
+
+---
+
+### setDocumentHash
+
+▸ **setDocumentHash**(`docId`, `docHash`): `Promise`<`void`\>
+
+#### Parameters
+
+| Name      | Type     |
+| :-------- | :------- |
+| `docId`   | `string` |
+| `docHash` | `string` |
+
+#### Returns
+
+`Promise`<`void`\>
+
+#### Inherited from
+
+KVDocumentStore.setDocumentHash
+
+#### Defined in
+
+[packages/core/src/storage/docStore/KVDocumentStore.ts:169](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/storage/docStore/KVDocumentStore.ts#L169)
+
+---
+
+### toDict
+
+▸ **toDict**(): `SaveDict`
+
+#### Returns
+
+`SaveDict`
+
+#### Defined in
+
+[packages/core/src/storage/docStore/SimpleDocumentStore.ts:73](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/storage/docStore/SimpleDocumentStore.ts#L73)
+
+---
+
+### fromDict
+
+▸ `Static` **fromDict**(`saveDict`, `namespace?`): [`SimpleDocumentStore`](SimpleDocumentStore.md)
+
+#### Parameters
+
+| Name         | Type       |
+| :----------- | :--------- |
+| `saveDict`   | `SaveDict` |
+| `namespace?` | `string`   |
+
+#### Returns
+
+[`SimpleDocumentStore`](SimpleDocumentStore.md)
+
+#### Defined in
+
+[packages/core/src/storage/docStore/SimpleDocumentStore.ts:68](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/storage/docStore/SimpleDocumentStore.ts#L68)
+
+---
+
+### fromPersistDir
+
+▸ `Static` **fromPersistDir**(`persistDir?`, `namespace?`, `fsModule?`): `Promise`<[`SimpleDocumentStore`](SimpleDocumentStore.md)\>
+
+#### Parameters
+
+| Name         | Type                                                      | Default value         |
+| :----------- | :-------------------------------------------------------- | :-------------------- |
+| `persistDir` | `string`                                                  | `DEFAULT_PERSIST_DIR` |
+| `namespace?` | `string`                                                  | `undefined`           |
+| `fsModule?`  | [`GenericFileSystem`](../interfaces/GenericFileSystem.md) | `undefined`           |
+
+#### Returns
+
+`Promise`<[`SimpleDocumentStore`](SimpleDocumentStore.md)\>
+
+#### Defined in
+
+[packages/core/src/storage/docStore/SimpleDocumentStore.ts:26](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/storage/docStore/SimpleDocumentStore.ts#L26)
+
+---
+
+### fromPersistPath
+
+▸ `Static` **fromPersistPath**(`persistPath`, `namespace?`, `fs?`): `Promise`<[`SimpleDocumentStore`](SimpleDocumentStore.md)\>
+
+#### Parameters
+
+| Name          | Type                                                      |
+| :------------ | :-------------------------------------------------------- |
+| `persistPath` | `string`                                                  |
+| `namespace?`  | `string`                                                  |
+| `fs?`         | [`GenericFileSystem`](../interfaces/GenericFileSystem.md) |
+
+#### Returns
+
+`Promise`<[`SimpleDocumentStore`](SimpleDocumentStore.md)\>
+
+#### Defined in
+
+[packages/core/src/storage/docStore/SimpleDocumentStore.ts:42](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/storage/docStore/SimpleDocumentStore.ts#L42)
diff --git a/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/SimpleIndexStore.md b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/SimpleIndexStore.md
new file mode 100644
index 0000000000000000000000000000000000000000..d6b64518f1ee568c4325ede2c54e74bad0022964
--- /dev/null
+++ b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/SimpleIndexStore.md
@@ -0,0 +1,234 @@
+---
+id: "SimpleIndexStore"
+title: "Class: SimpleIndexStore"
+sidebar_label: "SimpleIndexStore"
+sidebar_position: 0
+custom_edit_url: null
+---
+
+## Hierarchy
+
+- `KVIndexStore`
+
+  ↳ **`SimpleIndexStore`**
+
+## Constructors
+
+### constructor
+
+• **new SimpleIndexStore**(`kvStore?`)
+
+#### Parameters
+
+| Name       | Type                                            |
+| :--------- | :---------------------------------------------- |
+| `kvStore?` | [`BaseInMemoryKVStore`](BaseInMemoryKVStore.md) |
+
+#### Overrides
+
+KVIndexStore.constructor
+
+#### Defined in
+
+[packages/core/src/storage/indexStore/SimpleIndexStore.ts:15](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/storage/indexStore/SimpleIndexStore.ts#L15)
+
+## Properties
+
+### kvStore
+
+• `Private` **kvStore**: [`BaseInMemoryKVStore`](BaseInMemoryKVStore.md)
+
+#### Defined in
+
+[packages/core/src/storage/indexStore/SimpleIndexStore.ts:13](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/storage/indexStore/SimpleIndexStore.ts#L13)
+
+## Methods
+
+### addIndexStruct
+
+▸ **addIndexStruct**(`indexStruct`): `Promise`<`void`\>
+
+#### Parameters
+
+| Name          | Type                            |
+| :------------ | :------------------------------ |
+| `indexStruct` | [`IndexStruct`](IndexStruct.md) |
+
+#### Returns
+
+`Promise`<`void`\>
+
+#### Inherited from
+
+KVIndexStore.addIndexStruct
+
+#### Defined in
+
+[packages/core/src/storage/indexStore/KVIndexStore.ts:17](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/storage/indexStore/KVIndexStore.ts#L17)
+
+---
+
+### deleteIndexStruct
+
+▸ **deleteIndexStruct**(`key`): `Promise`<`void`\>
+
+#### Parameters
+
+| Name  | Type     |
+| :---- | :------- |
+| `key` | `string` |
+
+#### Returns
+
+`Promise`<`void`\>
+
+#### Inherited from
+
+KVIndexStore.deleteIndexStruct
+
+#### Defined in
+
+[packages/core/src/storage/indexStore/KVIndexStore.ts:23](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/storage/indexStore/KVIndexStore.ts#L23)
+
+---
+
+### getIndexStruct
+
+▸ **getIndexStruct**(`structId?`): `Promise`<`undefined` \| [`IndexStruct`](IndexStruct.md)\>
+
+#### Parameters
+
+| Name        | Type     |
+| :---------- | :------- |
+| `structId?` | `string` |
+
+#### Returns
+
+`Promise`<`undefined` \| [`IndexStruct`](IndexStruct.md)\>
+
+#### Inherited from
+
+KVIndexStore.getIndexStruct
+
+#### Defined in
+
+[packages/core/src/storage/indexStore/KVIndexStore.ts:27](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/storage/indexStore/KVIndexStore.ts#L27)
+
+---
+
+### getIndexStructs
+
+▸ **getIndexStructs**(): `Promise`<[`IndexStruct`](IndexStruct.md)[]\>
+
+#### Returns
+
+`Promise`<[`IndexStruct`](IndexStruct.md)[]\>
+
+#### Inherited from
+
+KVIndexStore.getIndexStructs
+
+#### Defined in
+
+[packages/core/src/storage/indexStore/KVIndexStore.ts:43](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/storage/indexStore/KVIndexStore.ts#L43)
+
+---
+
+### persist
+
+▸ **persist**(`persistPath?`, `fs?`): `Promise`<`void`\>
+
+#### Parameters
+
+| Name          | Type                                                      | Default value         |
+| :------------ | :-------------------------------------------------------- | :-------------------- |
+| `persistPath` | `string`                                                  | `DEFAULT_PERSIST_DIR` |
+| `fs`          | [`GenericFileSystem`](../interfaces/GenericFileSystem.md) | `DEFAULT_FS`          |
+
+#### Returns
+
+`Promise`<`void`\>
+
+#### Overrides
+
+KVIndexStore.persist
+
+#### Defined in
+
+[packages/core/src/storage/indexStore/SimpleIndexStore.ts:40](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/storage/indexStore/SimpleIndexStore.ts#L40)
+
+---
+
+### toDict
+
+▸ **toDict**(): `Record`<`string`, `unknown`\>
+
+#### Returns
+
+`Record`<`string`, `unknown`\>
+
+#### Defined in
+
+[packages/core/src/storage/indexStore/SimpleIndexStore.ts:52](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/storage/indexStore/SimpleIndexStore.ts#L52)
+
+---
+
+### fromDict
+
+▸ `Static` **fromDict**(`saveDict`): [`SimpleIndexStore`](SimpleIndexStore.md)
+
+#### Parameters
+
+| Name       | Type       |
+| :--------- | :--------- |
+| `saveDict` | `DataType` |
+
+#### Returns
+
+[`SimpleIndexStore`](SimpleIndexStore.md)
+
+#### Defined in
+
+[packages/core/src/storage/indexStore/SimpleIndexStore.ts:47](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/storage/indexStore/SimpleIndexStore.ts#L47)
+
+---
+
+### fromPersistDir
+
+▸ `Static` **fromPersistDir**(`persistDir?`, `fs?`): `Promise`<[`SimpleIndexStore`](SimpleIndexStore.md)\>
+
+#### Parameters
+
+| Name         | Type                                                      | Default value         |
+| :----------- | :-------------------------------------------------------- | :-------------------- |
+| `persistDir` | `string`                                                  | `DEFAULT_PERSIST_DIR` |
+| `fs`         | [`GenericFileSystem`](../interfaces/GenericFileSystem.md) | `DEFAULT_FS`          |
+
+#### Returns
+
+`Promise`<[`SimpleIndexStore`](SimpleIndexStore.md)\>
+
+#### Defined in
+
+[packages/core/src/storage/indexStore/SimpleIndexStore.ts:21](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/storage/indexStore/SimpleIndexStore.ts#L21)
+
+---
+
+### fromPersistPath
+
+▸ `Static` **fromPersistPath**(`persistPath`, `fs?`): `Promise`<[`SimpleIndexStore`](SimpleIndexStore.md)\>
+
+#### Parameters
+
+| Name          | Type                                                      | Default value |
+| :------------ | :-------------------------------------------------------- | :------------ |
+| `persistPath` | `string`                                                  | `undefined`   |
+| `fs`          | [`GenericFileSystem`](../interfaces/GenericFileSystem.md) | `DEFAULT_FS`  |
+
+#### Returns
+
+`Promise`<[`SimpleIndexStore`](SimpleIndexStore.md)\>
+
+#### Defined in
+
+[packages/core/src/storage/indexStore/SimpleIndexStore.ts:32](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/storage/indexStore/SimpleIndexStore.ts#L32)
diff --git a/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/SimpleKVStore.md b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/SimpleKVStore.md
new file mode 100644
index 0000000000000000000000000000000000000000..55ea07c969eefc694bcbbc16a1129f11997aae82
--- /dev/null
+++ b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/SimpleKVStore.md
@@ -0,0 +1,239 @@
+---
+id: "SimpleKVStore"
+title: "Class: SimpleKVStore"
+sidebar_label: "SimpleKVStore"
+sidebar_position: 0
+custom_edit_url: null
+---
+
+## Hierarchy
+
+- [`BaseKVStore`](BaseKVStore.md)
+
+  ↳ **`SimpleKVStore`**
+
+## Constructors
+
+### constructor
+
+• **new SimpleKVStore**(`data?`)
+
+#### Parameters
+
+| Name    | Type       |
+| :------ | :--------- |
+| `data?` | `DataType` |
+
+#### Overrides
+
+[BaseKVStore](BaseKVStore.md).[constructor](BaseKVStore.md#constructor)
+
+#### Defined in
+
+[packages/core/src/storage/kvStore/SimpleKVStore.ts:14](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/storage/kvStore/SimpleKVStore.ts#L14)
+
+## Properties
+
+### data
+
+• `Private` **data**: `DataType`
+
+#### Defined in
+
+[packages/core/src/storage/kvStore/SimpleKVStore.ts:10](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/storage/kvStore/SimpleKVStore.ts#L10)
+
+---
+
+### fs
+
+• `Private` **fs**: `undefined` \| [`GenericFileSystem`](../interfaces/GenericFileSystem.md)
+
+#### Defined in
+
+[packages/core/src/storage/kvStore/SimpleKVStore.ts:12](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/storage/kvStore/SimpleKVStore.ts#L12)
+
+---
+
+### persistPath
+
+• `Private` **persistPath**: `undefined` \| `string`
+
+#### Defined in
+
+[packages/core/src/storage/kvStore/SimpleKVStore.ts:11](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/storage/kvStore/SimpleKVStore.ts#L11)
+
+## Methods
+
+### delete
+
+▸ **delete**(`key`, `collection?`): `Promise`<`boolean`\>
+
+#### Parameters
+
+| Name         | Type     | Default value        |
+| :----------- | :------- | :------------------- |
+| `key`        | `string` | `undefined`          |
+| `collection` | `string` | `DEFAULT_COLLECTION` |
+
+#### Returns
+
+`Promise`<`boolean`\>
+
+#### Overrides
+
+[BaseKVStore](BaseKVStore.md).[delete](BaseKVStore.md#delete)
+
+#### Defined in
+
+[packages/core/src/storage/kvStore/SimpleKVStore.ts:52](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/storage/kvStore/SimpleKVStore.ts#L52)
+
+---
+
+### get
+
+▸ **get**(`key`, `collection?`): `Promise`<`any`\>
+
+#### Parameters
+
+| Name         | Type     | Default value        |
+| :----------- | :------- | :------------------- |
+| `key`        | `string` | `undefined`          |
+| `collection` | `string` | `DEFAULT_COLLECTION` |
+
+#### Returns
+
+`Promise`<`any`\>
+
+#### Overrides
+
+[BaseKVStore](BaseKVStore.md).[get](BaseKVStore.md#get)
+
+#### Defined in
+
+[packages/core/src/storage/kvStore/SimpleKVStore.ts:34](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/storage/kvStore/SimpleKVStore.ts#L34)
+
+---
+
+### getAll
+
+▸ **getAll**(`collection?`): `Promise`<`DataType`\>
+
+#### Parameters
+
+| Name         | Type     | Default value        |
+| :----------- | :------- | :------------------- |
+| `collection` | `string` | `DEFAULT_COLLECTION` |
+
+#### Returns
+
+`Promise`<`DataType`\>
+
+#### Overrides
+
+[BaseKVStore](BaseKVStore.md).[getAll](BaseKVStore.md#getall)
+
+#### Defined in
+
+[packages/core/src/storage/kvStore/SimpleKVStore.ts:48](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/storage/kvStore/SimpleKVStore.ts#L48)
+
+---
+
+### persist
+
+▸ **persist**(`persistPath`, `fs?`): `Promise`<`void`\>
+
+#### Parameters
+
+| Name          | Type                                                      |
+| :------------ | :-------------------------------------------------------- |
+| `persistPath` | `string`                                                  |
+| `fs?`         | [`GenericFileSystem`](../interfaces/GenericFileSystem.md) |
+
+#### Returns
+
+`Promise`<`void`\>
+
+#### Defined in
+
+[packages/core/src/storage/kvStore/SimpleKVStore.ts:63](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/storage/kvStore/SimpleKVStore.ts#L63)
+
+---
+
+### put
+
+▸ **put**(`key`, `val`, `collection?`): `Promise`<`void`\>
+
+#### Parameters
+
+| Name         | Type     | Default value        |
+| :----------- | :------- | :------------------- |
+| `key`        | `string` | `undefined`          |
+| `val`        | `any`    | `undefined`          |
+| `collection` | `string` | `DEFAULT_COLLECTION` |
+
+#### Returns
+
+`Promise`<`void`\>
+
+#### Overrides
+
+[BaseKVStore](BaseKVStore.md).[put](BaseKVStore.md#put)
+
+#### Defined in
+
+[packages/core/src/storage/kvStore/SimpleKVStore.ts:19](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/storage/kvStore/SimpleKVStore.ts#L19)
+
+---
+
+### toDict
+
+▸ **toDict**(): `DataType`
+
+#### Returns
+
+`DataType`
+
+#### Defined in
+
+[packages/core/src/storage/kvStore/SimpleKVStore.ts:99](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/storage/kvStore/SimpleKVStore.ts#L99)
+
+---
+
+### fromDict
+
+▸ `Static` **fromDict**(`saveDict`): [`SimpleKVStore`](SimpleKVStore.md)
+
+#### Parameters
+
+| Name       | Type       |
+| :--------- | :--------- |
+| `saveDict` | `DataType` |
+
+#### Returns
+
+[`SimpleKVStore`](SimpleKVStore.md)
+
+#### Defined in
+
+[packages/core/src/storage/kvStore/SimpleKVStore.ts:103](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/storage/kvStore/SimpleKVStore.ts#L103)
+
+---
+
+### fromPersistPath
+
+▸ `Static` **fromPersistPath**(`persistPath`, `fs?`): `Promise`<[`SimpleKVStore`](SimpleKVStore.md)\>
+
+#### Parameters
+
+| Name          | Type                                                      |
+| :------------ | :-------------------------------------------------------- |
+| `persistPath` | `string`                                                  |
+| `fs?`         | [`GenericFileSystem`](../interfaces/GenericFileSystem.md) |
+
+#### Returns
+
+`Promise`<[`SimpleKVStore`](SimpleKVStore.md)\>
+
+#### Defined in
+
+[packages/core/src/storage/kvStore/SimpleKVStore.ts:73](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/storage/kvStore/SimpleKVStore.ts#L73)
diff --git a/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/SimpleMongoReader.md b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/SimpleMongoReader.md
new file mode 100644
index 0000000000000000000000000000000000000000..f5d158e44890bdceb37b9dfa733e973bb4f4476e
--- /dev/null
+++ b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/SimpleMongoReader.md
@@ -0,0 +1,99 @@
+---
+id: "SimpleMongoReader"
+title: "Class: SimpleMongoReader"
+sidebar_label: "SimpleMongoReader"
+sidebar_position: 0
+custom_edit_url: null
+---
+
+Read in from MongoDB
+
+## Implements
+
+- [`BaseReader`](../interfaces/BaseReader.md)
+
+## Constructors
+
+### constructor
+
+• **new SimpleMongoReader**(`client`)
+
+#### Parameters
+
+| Name     | Type          |
+| :------- | :------------ |
+| `client` | `MongoClient` |
+
+#### Defined in
+
+[packages/core/src/readers/SimpleMongoReader.ts:11](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/readers/SimpleMongoReader.ts#L11)
+
+## Properties
+
+### client
+
+• `Private` **client**: `MongoClient`
+
+#### Defined in
+
+[packages/core/src/readers/SimpleMongoReader.ts:9](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/readers/SimpleMongoReader.ts#L9)
+
+## Methods
+
+### flatten
+
+▸ `Private` **flatten**(`texts`): `string`[]
+
+Flattens an array of strings or string arrays into a single-dimensional array of strings.
+
+#### Parameters
+
+| Name    | Type                       | Description                                       |
+| :------ | :------------------------- | :------------------------------------------------ |
+| `texts` | (`string` \| `string`[])[] | The array of strings or string arrays to flatten. |
+
+#### Returns
+
+`string`[]
+
+The flattened array of strings.
+
+#### Defined in
+
+[packages/core/src/readers/SimpleMongoReader.ts:20](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/readers/SimpleMongoReader.ts#L20)
+
+---
+
+### loadData
+
+▸ **loadData**(`dbName`, `collectionName`, `fieldNames?`, `separator?`, `filterQuery?`, `maxDocs?`, `metadataNames?`): `Promise`<[`Document`](Document.md)<[`Metadata`](../#metadata)\>[]\>
+
+Loads data from MongoDB collection
+
+#### Parameters
+
+| Name             | Type                       | Default value | Description                                                                                    |
+| :--------------- | :------------------------- | :------------ | :--------------------------------------------------------------------------------------------- |
+| `dbName`         | `string`                   | `undefined`   | The name of the database to load.                                                              |
+| `collectionName` | `string`                   | `undefined`   | The name of the collection to load.                                                            |
+| `fieldNames`     | `string`[]                 | `undefined`   | An array of field names to retrieve from each document. Defaults to ["text"].                  |
+| `separator`      | `string`                   | `""`          | The separator to join multiple field values. Defaults to an empty string.                      |
+| `filterQuery`    | `Record`<`string`, `any`\> | `{}`          | Specific query, as specified by MongoDB NodeJS documentation.                                  |
+| `maxDocs`        | `number`                   | `0`           | The maximum number of documents to retrieve. Defaults to 0 (retrieve all documents).           |
+| `metadataNames?` | `string`[]                 | `undefined`   | An optional array of metadata field names. If specified extracts this information as metadata. |
+
+#### Returns
+
+`Promise`<[`Document`](Document.md)<[`Metadata`](../#metadata)\>[]\>
+
+**`Throws`**
+
+If a field specified in fieldNames or metadataNames is not found in a MongoDB document.
+
+#### Implementation of
+
+[BaseReader](../interfaces/BaseReader.md).[loadData](../interfaces/BaseReader.md#loaddata)
+
+#### Defined in
+
+[packages/core/src/readers/SimpleMongoReader.ts:39](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/readers/SimpleMongoReader.ts#L39)
diff --git a/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/SimpleNodeParser.md b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/SimpleNodeParser.md
new file mode 100644
index 0000000000000000000000000000000000000000..f284dcd9e43c94fa7055f8fe0ba281d370515d43
--- /dev/null
+++ b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/SimpleNodeParser.md
@@ -0,0 +1,120 @@
+---
+id: "SimpleNodeParser"
+title: "Class: SimpleNodeParser"
+sidebar_label: "SimpleNodeParser"
+sidebar_position: 0
+custom_edit_url: null
+---
+
+SimpleNodeParser is the default NodeParser. It splits documents into TextNodes using a splitter, by default SentenceSplitter
+
+## Implements
+
+- [`NodeParser`](../interfaces/NodeParser.md)
+
+## Constructors
+
+### constructor
+
+• **new SimpleNodeParser**(`init?`)
+
+#### Parameters
+
+| Name                       | Type                                      |
+| :------------------------- | :---------------------------------------- |
+| `init?`                    | `Object`                                  |
+| `init.chunkOverlap?`       | `number`                                  |
+| `init.chunkSize?`          | `number`                                  |
+| `init.includeMetadata?`    | `boolean`                                 |
+| `init.includePrevNextRel?` | `boolean`                                 |
+| `init.textSplitter?`       | [`SentenceSplitter`](SentenceSplitter.md) |
+
+#### Defined in
+
+[packages/core/src/NodeParser.ts:106](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/NodeParser.ts#L106)
+
+## Properties
+
+### includeMetadata
+
+• **includeMetadata**: `boolean`
+
+Whether to include metadata in the nodes.
+
+#### Defined in
+
+[packages/core/src/NodeParser.ts:100](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/NodeParser.ts#L100)
+
+---
+
+### includePrevNextRel
+
+• **includePrevNextRel**: `boolean`
+
+Whether to include previous and next relationships in the nodes.
+
+#### Defined in
+
+[packages/core/src/NodeParser.ts:104](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/NodeParser.ts#L104)
+
+---
+
+### textSplitter
+
+• **textSplitter**: [`SentenceSplitter`](SentenceSplitter.md)
+
+The text splitter to use.
+
+#### Defined in
+
+[packages/core/src/NodeParser.ts:96](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/NodeParser.ts#L96)
+
+## Methods
+
+### getNodesFromDocuments
+
+▸ **getNodesFromDocuments**(`documents`): ([`TextNode`](TextNode.md)<[`Metadata`](../#metadata)\> \| [`ImageDocument`](ImageDocument.md)<`any`\>)[]
+
+Generate Node objects from documents
+
+#### Parameters
+
+| Name        | Type                                                     |
+| :---------- | :------------------------------------------------------- |
+| `documents` | [`BaseNode`](BaseNode.md)<[`Metadata`](../#metadata)\>[] |
+
+#### Returns
+
+([`TextNode`](TextNode.md)<[`Metadata`](../#metadata)\> \| [`ImageDocument`](ImageDocument.md)<`any`\>)[]
+
+#### Implementation of
+
+[NodeParser](../interfaces/NodeParser.md).[getNodesFromDocuments](../interfaces/NodeParser.md#getnodesfromdocuments)
+
+#### Defined in
+
+[packages/core/src/NodeParser.ts:137](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/NodeParser.ts#L137)
+
+---
+
+### fromDefaults
+
+▸ `Static` **fromDefaults**(`init?`): [`SimpleNodeParser`](SimpleNodeParser.md)
+
+#### Parameters
+
+| Name                       | Type      |
+| :------------------------- | :-------- |
+| `init?`                    | `Object`  |
+| `init.chunkOverlap?`       | `number`  |
+| `init.chunkSize?`          | `number`  |
+| `init.includeMetadata?`    | `boolean` |
+| `init.includePrevNextRel?` | `boolean` |
+
+#### Returns
+
+[`SimpleNodeParser`](SimpleNodeParser.md)
+
+#### Defined in
+
+[packages/core/src/NodeParser.ts:124](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/NodeParser.ts#L124)
diff --git a/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/SimpleResponseBuilder.md b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/SimpleResponseBuilder.md
new file mode 100644
index 0000000000000000000000000000000000000000..7e79026ac45eee6319360a514c0c5effda9fd42d
--- /dev/null
+++ b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/SimpleResponseBuilder.md
@@ -0,0 +1,75 @@
+---
+id: "SimpleResponseBuilder"
+title: "Class: SimpleResponseBuilder"
+sidebar_label: "SimpleResponseBuilder"
+sidebar_position: 0
+custom_edit_url: null
+---
+
+A response builder that just concatenates responses.
+
+## Implements
+
+- `BaseResponseBuilder`
+
+## Constructors
+
+### constructor
+
+• **new SimpleResponseBuilder**(`serviceContext`)
+
+#### Parameters
+
+| Name             | Type                                                |
+| :--------------- | :-------------------------------------------------- |
+| `serviceContext` | [`ServiceContext`](../interfaces/ServiceContext.md) |
+
+#### Defined in
+
+[packages/core/src/ResponseSynthesizer.ts:53](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/ResponseSynthesizer.ts#L53)
+
+## Properties
+
+### llm
+
+• **llm**: [`LLM`](../interfaces/LLM.md)
+
+#### Defined in
+
+[packages/core/src/ResponseSynthesizer.ts:50](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/ResponseSynthesizer.ts#L50)
+
+---
+
+### textQATemplate
+
+• **textQATemplate**: [`SimplePrompt`](../#simpleprompt)
+
+#### Defined in
+
+[packages/core/src/ResponseSynthesizer.ts:51](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/ResponseSynthesizer.ts#L51)
+
+## Methods
+
+### getResponse
+
+▸ **getResponse**(`query`, `textChunks`, `parentEvent?`): `Promise`<`string`\>
+
+#### Parameters
+
+| Name           | Type                              |
+| :------------- | :-------------------------------- |
+| `query`        | `string`                          |
+| `textChunks`   | `string`[]                        |
+| `parentEvent?` | [`Event`](../interfaces/Event.md) |
+
+#### Returns
+
+`Promise`<`string`\>
+
+#### Implementation of
+
+BaseResponseBuilder.getResponse
+
+#### Defined in
+
+[packages/core/src/ResponseSynthesizer.ts:58](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/ResponseSynthesizer.ts#L58)
diff --git a/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/SimpleVectorStore.md b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/SimpleVectorStore.md
new file mode 100644
index 0000000000000000000000000000000000000000..9562ce8d8cfc775cab73209cd28f13580073536e
--- /dev/null
+++ b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/SimpleVectorStore.md
@@ -0,0 +1,279 @@
+---
+id: "SimpleVectorStore"
+title: "Class: SimpleVectorStore"
+sidebar_label: "SimpleVectorStore"
+sidebar_position: 0
+custom_edit_url: null
+---
+
+## Implements
+
+- [`VectorStore`](../interfaces/VectorStore.md)
+
+## Constructors
+
+### constructor
+
+• **new SimpleVectorStore**(`data?`, `fs?`)
+
+#### Parameters
+
+| Name    | Type                                                      |
+| :------ | :-------------------------------------------------------- |
+| `data?` | `SimpleVectorStoreData`                                   |
+| `fs?`   | [`GenericFileSystem`](../interfaces/GenericFileSystem.md) |
+
+#### Defined in
+
+[packages/core/src/storage/vectorStore/SimpleVectorStore.ts:37](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/storage/vectorStore/SimpleVectorStore.ts#L37)
+
+## Properties
+
+### data
+
+• `Private` **data**: `SimpleVectorStoreData`
+
+#### Defined in
+
+[packages/core/src/storage/vectorStore/SimpleVectorStore.ts:33](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/storage/vectorStore/SimpleVectorStore.ts#L33)
+
+---
+
+### fs
+
+• `Private` **fs**: [`GenericFileSystem`](../interfaces/GenericFileSystem.md) = `DEFAULT_FS`
+
+#### Defined in
+
+[packages/core/src/storage/vectorStore/SimpleVectorStore.ts:34](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/storage/vectorStore/SimpleVectorStore.ts#L34)
+
+---
+
+### persistPath
+
+• `Private` **persistPath**: `undefined` \| `string`
+
+#### Defined in
+
+[packages/core/src/storage/vectorStore/SimpleVectorStore.ts:35](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/storage/vectorStore/SimpleVectorStore.ts#L35)
+
+---
+
+### storesText
+
+• **storesText**: `boolean` = `false`
+
+#### Implementation of
+
+[VectorStore](../interfaces/VectorStore.md).[storesText](../interfaces/VectorStore.md#storestext)
+
+#### Defined in
+
+[packages/core/src/storage/vectorStore/SimpleVectorStore.ts:32](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/storage/vectorStore/SimpleVectorStore.ts#L32)
+
+## Accessors
+
+### client
+
+• `get` **client**(): `any`
+
+#### Returns
+
+`any`
+
+#### Implementation of
+
+VectorStore.client
+
+#### Defined in
+
+[packages/core/src/storage/vectorStore/SimpleVectorStore.ts:50](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/storage/vectorStore/SimpleVectorStore.ts#L50)
+
+## Methods
+
+### add
+
+▸ **add**(`embeddingResults`): `Promise`<`string`[]\>
+
+#### Parameters
+
+| Name               | Type                                                     |
+| :----------------- | :------------------------------------------------------- |
+| `embeddingResults` | [`BaseNode`](BaseNode.md)<[`Metadata`](../#metadata)\>[] |
+
+#### Returns
+
+`Promise`<`string`[]\>
+
+#### Implementation of
+
+[VectorStore](../interfaces/VectorStore.md).[add](../interfaces/VectorStore.md#add)
+
+#### Defined in
+
+[packages/core/src/storage/vectorStore/SimpleVectorStore.ts:58](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/storage/vectorStore/SimpleVectorStore.ts#L58)
+
+---
+
+### delete
+
+▸ **delete**(`refDocId`): `Promise`<`void`\>
+
+#### Parameters
+
+| Name       | Type     |
+| :--------- | :------- |
+| `refDocId` | `string` |
+
+#### Returns
+
+`Promise`<`void`\>
+
+#### Implementation of
+
+[VectorStore](../interfaces/VectorStore.md).[delete](../interfaces/VectorStore.md#delete)
+
+#### Defined in
+
+[packages/core/src/storage/vectorStore/SimpleVectorStore.ts:77](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/storage/vectorStore/SimpleVectorStore.ts#L77)
+
+---
+
+### get
+
+▸ **get**(`textId`): `Promise`<`number`[]\>
+
+#### Parameters
+
+| Name     | Type     |
+| :------- | :------- |
+| `textId` | `string` |
+
+#### Returns
+
+`Promise`<`number`[]\>
+
+#### Defined in
+
+[packages/core/src/storage/vectorStore/SimpleVectorStore.ts:54](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/storage/vectorStore/SimpleVectorStore.ts#L54)
+
+---
+
+### persist
+
+▸ **persist**(`persistPath?`, `fs?`): `Promise`<`void`\>
+
+#### Parameters
+
+| Name          | Type                                                      |
+| :------------ | :-------------------------------------------------------- |
+| `persistPath` | `string`                                                  |
+| `fs?`         | [`GenericFileSystem`](../interfaces/GenericFileSystem.md) |
+
+#### Returns
+
+`Promise`<`void`\>
+
+#### Defined in
+
+[packages/core/src/storage/vectorStore/SimpleVectorStore.ts:146](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/storage/vectorStore/SimpleVectorStore.ts#L146)
+
+---
+
+### query
+
+▸ **query**(`query`): `Promise`<[`VectorStoreQueryResult`](../interfaces/VectorStoreQueryResult.md)\>
+
+#### Parameters
+
+| Name    | Type                                                    |
+| :------ | :------------------------------------------------------ |
+| `query` | [`VectorStoreQuery`](../interfaces/VectorStoreQuery.md) |
+
+#### Returns
+
+`Promise`<[`VectorStoreQueryResult`](../interfaces/VectorStoreQueryResult.md)\>
+
+#### Implementation of
+
+[VectorStore](../interfaces/VectorStore.md).[query](../interfaces/VectorStore.md#query)
+
+#### Defined in
+
+[packages/core/src/storage/vectorStore/SimpleVectorStore.ts:88](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/storage/vectorStore/SimpleVectorStore.ts#L88)
+
+---
+
+### toDict
+
+▸ **toDict**(): `SimpleVectorStoreData`
+
+#### Returns
+
+`SimpleVectorStoreData`
+
+#### Defined in
+
+[packages/core/src/storage/vectorStore/SimpleVectorStore.ts:196](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/storage/vectorStore/SimpleVectorStore.ts#L196)
+
+---
+
+### fromDict
+
+▸ `Static` **fromDict**(`saveDict`): [`SimpleVectorStore`](SimpleVectorStore.md)
+
+#### Parameters
+
+| Name       | Type                    |
+| :--------- | :---------------------- |
+| `saveDict` | `SimpleVectorStoreData` |
+
+#### Returns
+
+[`SimpleVectorStore`](SimpleVectorStore.md)
+
+#### Defined in
+
+[packages/core/src/storage/vectorStore/SimpleVectorStore.ts:189](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/storage/vectorStore/SimpleVectorStore.ts#L189)
+
+---
+
+### fromPersistDir
+
+▸ `Static` **fromPersistDir**(`persistDir?`, `fs?`): `Promise`<[`SimpleVectorStore`](SimpleVectorStore.md)\>
+
+#### Parameters
+
+| Name         | Type                                                      | Default value         |
+| :----------- | :-------------------------------------------------------- | :-------------------- |
+| `persistDir` | `string`                                                  | `DEFAULT_PERSIST_DIR` |
+| `fs`         | [`GenericFileSystem`](../interfaces/GenericFileSystem.md) | `DEFAULT_FS`          |
+
+#### Returns
+
+`Promise`<[`SimpleVectorStore`](SimpleVectorStore.md)\>
+
+#### Defined in
+
+[packages/core/src/storage/vectorStore/SimpleVectorStore.ts:42](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/storage/vectorStore/SimpleVectorStore.ts#L42)
+
+---
+
+### fromPersistPath
+
+▸ `Static` **fromPersistPath**(`persistPath`, `fs?`): `Promise`<[`SimpleVectorStore`](SimpleVectorStore.md)\>
+
+#### Parameters
+
+| Name          | Type                                                      |
+| :------------ | :-------------------------------------------------------- |
+| `persistPath` | `string`                                                  |
+| `fs?`         | [`GenericFileSystem`](../interfaces/GenericFileSystem.md) |
+
+#### Returns
+
+`Promise`<[`SimpleVectorStore`](SimpleVectorStore.md)\>
+
+#### Defined in
+
+[packages/core/src/storage/vectorStore/SimpleVectorStore.ts:159](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/storage/vectorStore/SimpleVectorStore.ts#L159)
diff --git a/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/SubQuestionOutputParser.md b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/SubQuestionOutputParser.md
new file mode 100644
index 0000000000000000000000000000000000000000..b4930f5ee5224eac5ee94edb2bdfd93b69f9c380
--- /dev/null
+++ b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/SubQuestionOutputParser.md
@@ -0,0 +1,67 @@
+---
+id: "SubQuestionOutputParser"
+title: "Class: SubQuestionOutputParser"
+sidebar_label: "SubQuestionOutputParser"
+sidebar_position: 0
+custom_edit_url: null
+---
+
+SubQuestionOutputParser is used to parse the output of the SubQuestionGenerator.
+
+## Implements
+
+- [`BaseOutputParser`](../interfaces/BaseOutputParser.md)<[`StructuredOutput`](../interfaces/StructuredOutput.md)<[`SubQuestion`](../interfaces/SubQuestion.md)[]\>\>
+
+## Constructors
+
+### constructor
+
+• **new SubQuestionOutputParser**()
+
+## Methods
+
+### format
+
+▸ **format**(`output`): `string`
+
+#### Parameters
+
+| Name     | Type     |
+| :------- | :------- |
+| `output` | `string` |
+
+#### Returns
+
+`string`
+
+#### Implementation of
+
+[BaseOutputParser](../interfaces/BaseOutputParser.md).[format](../interfaces/BaseOutputParser.md#format)
+
+#### Defined in
+
+[packages/core/src/OutputParser.ts:98](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/OutputParser.ts#L98)
+
+---
+
+### parse
+
+▸ **parse**(`output`): [`StructuredOutput`](../interfaces/StructuredOutput.md)<[`SubQuestion`](../interfaces/SubQuestion.md)[]\>
+
+#### Parameters
+
+| Name     | Type     |
+| :------- | :------- |
+| `output` | `string` |
+
+#### Returns
+
+[`StructuredOutput`](../interfaces/StructuredOutput.md)<[`SubQuestion`](../interfaces/SubQuestion.md)[]\>
+
+#### Implementation of
+
+[BaseOutputParser](../interfaces/BaseOutputParser.md).[parse](../interfaces/BaseOutputParser.md#parse)
+
+#### Defined in
+
+[packages/core/src/OutputParser.ts:90](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/OutputParser.ts#L90)
diff --git a/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/SubQuestionQueryEngine.md b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/SubQuestionQueryEngine.md
new file mode 100644
index 0000000000000000000000000000000000000000..aaad04c50c07bdc309ac9eb41683b4acae17ddd5
--- /dev/null
+++ b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/SubQuestionQueryEngine.md
@@ -0,0 +1,143 @@
+---
+id: "SubQuestionQueryEngine"
+title: "Class: SubQuestionQueryEngine"
+sidebar_label: "SubQuestionQueryEngine"
+sidebar_position: 0
+custom_edit_url: null
+---
+
+SubQuestionQueryEngine decomposes a question into subquestions and then
+
+## Implements
+
+- [`BaseQueryEngine`](../interfaces/BaseQueryEngine.md)
+
+## Constructors
+
+### constructor
+
+• **new SubQuestionQueryEngine**(`init`)
+
+#### Parameters
+
+| Name                       | Type                                                              |
+| :------------------------- | :---------------------------------------------------------------- |
+| `init`                     | `Object`                                                          |
+| `init.queryEngineTools`    | [`QueryEngineTool`](../interfaces/QueryEngineTool.md)[]           |
+| `init.questionGen`         | [`BaseQuestionGenerator`](../interfaces/BaseQuestionGenerator.md) |
+| `init.responseSynthesizer` | [`ResponseSynthesizer`](ResponseSynthesizer.md)                   |
+
+#### Defined in
+
+[packages/core/src/QueryEngine.ts:89](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/QueryEngine.ts#L89)
+
+## Properties
+
+### metadatas
+
+• **metadatas**: [`ToolMetadata`](../interfaces/ToolMetadata.md)[]
+
+#### Defined in
+
+[packages/core/src/QueryEngine.ts:87](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/QueryEngine.ts#L87)
+
+---
+
+### queryEngines
+
+• **queryEngines**: `Record`<`string`, [`BaseQueryEngine`](../interfaces/BaseQueryEngine.md)\>
+
+#### Defined in
+
+[packages/core/src/QueryEngine.ts:86](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/QueryEngine.ts#L86)
+
+---
+
+### questionGen
+
+• **questionGen**: [`BaseQuestionGenerator`](../interfaces/BaseQuestionGenerator.md)
+
+#### Defined in
+
+[packages/core/src/QueryEngine.ts:85](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/QueryEngine.ts#L85)
+
+---
+
+### responseSynthesizer
+
+• **responseSynthesizer**: [`ResponseSynthesizer`](ResponseSynthesizer.md)
+
+#### Defined in
+
+[packages/core/src/QueryEngine.ts:84](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/QueryEngine.ts#L84)
+
+## Methods
+
+### query
+
+▸ **query**(`query`): `Promise`<[`Response`](Response.md)\>
+
+Query the query engine and get a response.
+
+#### Parameters
+
+| Name    | Type     |
+| :------ | :------- |
+| `query` | `string` |
+
+#### Returns
+
+`Promise`<[`Response`](Response.md)\>
+
+#### Implementation of
+
+[BaseQueryEngine](../interfaces/BaseQueryEngine.md).[query](../interfaces/BaseQueryEngine.md#query)
+
+#### Defined in
+
+[packages/core/src/QueryEngine.ts:130](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/QueryEngine.ts#L130)
+
+---
+
+### querySubQ
+
+▸ `Private` **querySubQ**(`subQ`, `parentEvent?`): `Promise`<`null` \| [`NodeWithScore`](../interfaces/NodeWithScore.md)<[`Metadata`](../#metadata)\>\>
+
+#### Parameters
+
+| Name           | Type                                          |
+| :------------- | :-------------------------------------------- |
+| `subQ`         | [`SubQuestion`](../interfaces/SubQuestion.md) |
+| `parentEvent?` | [`Event`](../interfaces/Event.md)             |
+
+#### Returns
+
+`Promise`<`null` \| [`NodeWithScore`](../interfaces/NodeWithScore.md)<[`Metadata`](../#metadata)\>\>
+
+#### Defined in
+
+[packages/core/src/QueryEngine.ts:158](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/QueryEngine.ts#L158)
+
+---
+
+### fromDefaults
+
+▸ `Static` **fromDefaults**(`init`): [`SubQuestionQueryEngine`](SubQuestionQueryEngine.md)
+
+#### Parameters
+
+| Name                        | Type                                                              |
+| :-------------------------- | :---------------------------------------------------------------- |
+| `init`                      | `Object`                                                          |
+| `init.queryEngineTools`     | [`QueryEngineTool`](../interfaces/QueryEngineTool.md)[]           |
+| `init.questionGen?`         | [`BaseQuestionGenerator`](../interfaces/BaseQuestionGenerator.md) |
+| `init.responseSynthesizer?` | [`ResponseSynthesizer`](ResponseSynthesizer.md)                   |
+| `init.serviceContext?`      | [`ServiceContext`](../interfaces/ServiceContext.md)               |
+
+#### Returns
+
+[`SubQuestionQueryEngine`](SubQuestionQueryEngine.md)
+
+#### Defined in
+
+[packages/core/src/QueryEngine.ts:106](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/QueryEngine.ts#L106)
diff --git a/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/SummaryChatHistory.md b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/SummaryChatHistory.md
new file mode 100644
index 0000000000000000000000000000000000000000..e676373f6125c1cf17a831c5d35c22c57c2a5a4e
--- /dev/null
+++ b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/SummaryChatHistory.md
@@ -0,0 +1,289 @@
+---
+id: "SummaryChatHistory"
+title: "Class: SummaryChatHistory"
+sidebar_label: "SummaryChatHistory"
+sidebar_position: 0
+custom_edit_url: null
+---
+
+A ChatHistory is used to keep the state of back and forth chat messages
+
+## Implements
+
+- [`ChatHistory`](../interfaces/ChatHistory.md)
+
+## Constructors
+
+### constructor
+
+• **new SummaryChatHistory**(`init?`)
+
+#### Parameters
+
+| Name    | Type                                                      |
+| :------ | :-------------------------------------------------------- |
+| `init?` | `Partial`<[`SummaryChatHistory`](SummaryChatHistory.md)\> |
+
+#### Defined in
+
+[packages/core/src/ChatHistory.ts:70](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/ChatHistory.ts#L70)
+
+## Properties
+
+### llm
+
+• **llm**: [`LLM`](../interfaces/LLM.md)
+
+#### Defined in
+
+[packages/core/src/ChatHistory.ts:67](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/ChatHistory.ts#L67)
+
+---
+
+### messages
+
+• **messages**: [`ChatMessage`](../interfaces/ChatMessage.md)[]
+
+#### Implementation of
+
+[ChatHistory](../interfaces/ChatHistory.md).[messages](../interfaces/ChatHistory.md#messages)
+
+#### Defined in
+
+[packages/core/src/ChatHistory.ts:65](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/ChatHistory.ts#L65)
+
+---
+
+### messagesBefore
+
+• `Private` **messagesBefore**: `number`
+
+#### Defined in
+
+[packages/core/src/ChatHistory.ts:68](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/ChatHistory.ts#L68)
+
+---
+
+### summaryPrompt
+
+• **summaryPrompt**: (`__namedParameters`: `Object`) => `string`
+
+#### Type declaration
+
+▸ (`«destructured»`): `string`
+
+##### Parameters
+
+| Name             | Type     |
+| :--------------- | :------- |
+| `«destructured»` | `Object` |
+
+##### Returns
+
+`string`
+
+#### Defined in
+
+[packages/core/src/ChatHistory.ts:66](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/ChatHistory.ts#L66)
+
+---
+
+### tokensToSummarize
+
+• **tokensToSummarize**: `number`
+
+#### Defined in
+
+[packages/core/src/ChatHistory.ts:64](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/ChatHistory.ts#L64)
+
+## Accessors
+
+### nonSystemMessages
+
+• `Private` `get` **nonSystemMessages**(): [`ChatMessage`](../interfaces/ChatMessage.md)[]
+
+#### Returns
+
+[`ChatMessage`](../interfaces/ChatMessage.md)[]
+
+#### Defined in
+
+[packages/core/src/ChatHistory.ts:127](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/ChatHistory.ts#L127)
+
+---
+
+### systemMessages
+
+• `Private` `get` **systemMessages**(): [`ChatMessage`](../interfaces/ChatMessage.md)[]
+
+#### Returns
+
+[`ChatMessage`](../interfaces/ChatMessage.md)[]
+
+#### Defined in
+
+[packages/core/src/ChatHistory.ts:122](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/ChatHistory.ts#L122)
+
+## Methods
+
+### addMessage
+
+▸ **addMessage**(`message`): `void`
+
+Adds a message to the chat history.
+
+#### Parameters
+
+| Name      | Type                                          |
+| :-------- | :-------------------------------------------- |
+| `message` | [`ChatMessage`](../interfaces/ChatMessage.md) |
+
+#### Returns
+
+`void`
+
+#### Implementation of
+
+[ChatHistory](../interfaces/ChatHistory.md).[addMessage](../interfaces/ChatHistory.md#addmessage)
+
+#### Defined in
+
+[packages/core/src/ChatHistory.ts:106](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/ChatHistory.ts#L106)
+
+---
+
+### calcConversationMessages
+
+▸ `Private` **calcConversationMessages**(`transformSummary?`): [`ChatMessage`](../interfaces/ChatMessage.md)[]
+
+Calculates the messages that describe the conversation so far.
+If there's no memory, all non-system messages are used.
+If there's a memory, uses all messages after the last summary message.
+
+#### Parameters
+
+| Name                | Type      |
+| :------------------ | :-------- |
+| `transformSummary?` | `boolean` |
+
+#### Returns
+
+[`ChatMessage`](../interfaces/ChatMessage.md)[]
+
+#### Defined in
+
+[packages/core/src/ChatHistory.ts:137](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/ChatHistory.ts#L137)
+
+---
+
+### calcCurrentRequestMessages
+
+▸ `Private` **calcCurrentRequestMessages**(`transientMessages?`): [`ChatMessage`](../interfaces/ChatMessage.md)[]
+
+#### Parameters
+
+| Name                 | Type                                            |
+| :------------------- | :---------------------------------------------- |
+| `transientMessages?` | [`ChatMessage`](../interfaces/ChatMessage.md)[] |
+
+#### Returns
+
+[`ChatMessage`](../interfaces/ChatMessage.md)[]
+
+#### Defined in
+
+[packages/core/src/ChatHistory.ts:155](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/ChatHistory.ts#L155)
+
+---
+
+### getLastSummaryIndex
+
+▸ `Private` **getLastSummaryIndex**(): `null` \| `number`
+
+#### Returns
+
+`null` \| `number`
+
+#### Defined in
+
+[packages/core/src/ChatHistory.ts:111](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/ChatHistory.ts#L111)
+
+---
+
+### newMessages
+
+▸ **newMessages**(): [`ChatMessage`](../interfaces/ChatMessage.md)[]
+
+Returns the new messages since the last call to this function (or since calling the constructor)
+
+#### Returns
+
+[`ChatMessage`](../interfaces/ChatMessage.md)[]
+
+#### Implementation of
+
+[ChatHistory](../interfaces/ChatHistory.md).[newMessages](../interfaces/ChatHistory.md#newmessages)
+
+#### Defined in
+
+[packages/core/src/ChatHistory.ts:195](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/ChatHistory.ts#L195)
+
+---
+
+### requestMessages
+
+▸ **requestMessages**(`transientMessages?`): `Promise`<[`ChatMessage`](../interfaces/ChatMessage.md)[]\>
+
+Returns the messages that should be used as input to the LLM.
+
+#### Parameters
+
+| Name                 | Type                                            |
+| :------------------- | :---------------------------------------------- |
+| `transientMessages?` | [`ChatMessage`](../interfaces/ChatMessage.md)[] |
+
+#### Returns
+
+`Promise`<[`ChatMessage`](../interfaces/ChatMessage.md)[]\>
+
+#### Implementation of
+
+[ChatHistory](../interfaces/ChatHistory.md).[requestMessages](../interfaces/ChatHistory.md#requestmessages)
+
+#### Defined in
+
+[packages/core/src/ChatHistory.ts:165](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/ChatHistory.ts#L165)
+
+---
+
+### reset
+
+▸ **reset**(): `void`
+
+Resets the chat history so that it's empty.
+
+#### Returns
+
+`void`
+
+#### Implementation of
+
+[ChatHistory](../interfaces/ChatHistory.md).[reset](../interfaces/ChatHistory.md#reset)
+
+#### Defined in
+
+[packages/core/src/ChatHistory.ts:191](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/ChatHistory.ts#L191)
+
+---
+
+### summarize
+
+▸ `Private` **summarize**(): `Promise`<[`ChatMessage`](../interfaces/ChatMessage.md)\>
+
+#### Returns
+
+`Promise`<[`ChatMessage`](../interfaces/ChatMessage.md)\>
+
+#### Defined in
+
+[packages/core/src/ChatHistory.ts:84](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/ChatHistory.ts#L84)
diff --git a/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/SummaryIndex.md b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/SummaryIndex.md
new file mode 100644
index 0000000000000000000000000000000000000000..4a44a43d251ceae04d069617ef8380c17e162c22
--- /dev/null
+++ b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/SummaryIndex.md
@@ -0,0 +1,352 @@
+---
+id: "SummaryIndex"
+title: "Class: SummaryIndex"
+sidebar_label: "SummaryIndex"
+sidebar_position: 0
+custom_edit_url: null
+---
+
+A SummaryIndex keeps nodes in a sequential order for use with summarization.
+
+## Hierarchy
+
+- [`BaseIndex`](BaseIndex.md)<[`IndexList`](IndexList.md)\>
+
+  ↳ **`SummaryIndex`**
+
+## Constructors
+
+### constructor
+
+• **new SummaryIndex**(`init`)
+
+#### Parameters
+
+| Name   | Type                                                                            |
+| :----- | :------------------------------------------------------------------------------ |
+| `init` | [`BaseIndexInit`](../interfaces/BaseIndexInit.md)<[`IndexList`](IndexList.md)\> |
+
+#### Overrides
+
+[BaseIndex](BaseIndex.md).[constructor](BaseIndex.md#constructor)
+
+#### Defined in
+
+[packages/core/src/indices/summary/SummaryIndex.ts:48](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/indices/summary/SummaryIndex.ts#L48)
+
+## Properties
+
+### docStore
+
+• **docStore**: [`BaseDocumentStore`](BaseDocumentStore.md)
+
+#### Inherited from
+
+[BaseIndex](BaseIndex.md).[docStore](BaseIndex.md#docstore)
+
+#### Defined in
+
+[packages/core/src/indices/BaseIndex.ts:156](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/indices/BaseIndex.ts#L156)
+
+---
+
+### indexStore
+
+• `Optional` **indexStore**: [`BaseIndexStore`](BaseIndexStore.md)
+
+#### Inherited from
+
+[BaseIndex](BaseIndex.md).[indexStore](BaseIndex.md#indexstore)
+
+#### Defined in
+
+[packages/core/src/indices/BaseIndex.ts:158](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/indices/BaseIndex.ts#L158)
+
+---
+
+### indexStruct
+
+• **indexStruct**: [`IndexList`](IndexList.md)
+
+#### Inherited from
+
+[BaseIndex](BaseIndex.md).[indexStruct](BaseIndex.md#indexstruct)
+
+#### Defined in
+
+[packages/core/src/indices/BaseIndex.ts:159](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/indices/BaseIndex.ts#L159)
+
+---
+
+### serviceContext
+
+• **serviceContext**: [`ServiceContext`](../interfaces/ServiceContext.md)
+
+#### Inherited from
+
+[BaseIndex](BaseIndex.md).[serviceContext](BaseIndex.md#servicecontext)
+
+#### Defined in
+
+[packages/core/src/indices/BaseIndex.ts:154](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/indices/BaseIndex.ts#L154)
+
+---
+
+### storageContext
+
+• **storageContext**: [`StorageContext`](../interfaces/StorageContext.md)
+
+#### Inherited from
+
+[BaseIndex](BaseIndex.md).[storageContext](BaseIndex.md#storagecontext)
+
+#### Defined in
+
+[packages/core/src/indices/BaseIndex.ts:155](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/indices/BaseIndex.ts#L155)
+
+---
+
+### vectorStore
+
+• `Optional` **vectorStore**: [`VectorStore`](../interfaces/VectorStore.md)
+
+#### Inherited from
+
+[BaseIndex](BaseIndex.md).[vectorStore](BaseIndex.md#vectorstore)
+
+#### Defined in
+
+[packages/core/src/indices/BaseIndex.ts:157](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/indices/BaseIndex.ts#L157)
+
+## Methods
+
+### asQueryEngine
+
+▸ **asQueryEngine**(`options?`): [`BaseQueryEngine`](../interfaces/BaseQueryEngine.md)
+
+Create a new query engine from the index. It will also create a retriever
+and response synthezier if they are not provided.
+
+#### Parameters
+
+| Name                           | Type                                                                | Description                                                      |
+| :----------------------------- | :------------------------------------------------------------------ | :--------------------------------------------------------------- |
+| `options?`                     | `Object`                                                            | you can supply your own custom Retriever and ResponseSynthesizer |
+| `options.nodePostprocessors?`  | [`BaseNodePostprocessor`](../interfaces/BaseNodePostprocessor.md)[] | -                                                                |
+| `options.preFilters?`          | `unknown`                                                           | -                                                                |
+| `options.responseSynthesizer?` | [`ResponseSynthesizer`](ResponseSynthesizer.md)                     | -                                                                |
+| `options.retriever?`           | [`BaseRetriever`](../interfaces/BaseRetriever.md)                   | -                                                                |
+
+#### Returns
+
+[`BaseQueryEngine`](../interfaces/BaseQueryEngine.md)
+
+#### Overrides
+
+[BaseIndex](BaseIndex.md).[asQueryEngine](BaseIndex.md#asqueryengine)
+
+#### Defined in
+
+[packages/core/src/indices/summary/SummaryIndex.ts:156](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/indices/summary/SummaryIndex.ts#L156)
+
+---
+
+### asRetriever
+
+▸ **asRetriever**(`options?`): [`BaseRetriever`](../interfaces/BaseRetriever.md)
+
+Create a new retriever from the index.
+
+#### Parameters
+
+| Name           | Type                                                       |
+| :------------- | :--------------------------------------------------------- |
+| `options?`     | `Object`                                                   |
+| `options.mode` | [`SummaryRetrieverMode`](../enums/SummaryRetrieverMode.md) |
+
+#### Returns
+
+[`BaseRetriever`](../interfaces/BaseRetriever.md)
+
+#### Overrides
+
+[BaseIndex](BaseIndex.md).[asRetriever](BaseIndex.md#asretriever)
+
+#### Defined in
+
+[packages/core/src/indices/summary/SummaryIndex.ts:143](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/indices/summary/SummaryIndex.ts#L143)
+
+---
+
+### deleteNodes
+
+▸ **deleteNodes**(`nodeIds`, `deleteFromDocStore`): `Promise`<`void`\>
+
+#### Parameters
+
+| Name                 | Type       |
+| :------------------- | :--------- |
+| `nodeIds`            | `string`[] |
+| `deleteFromDocStore` | `boolean`  |
+
+#### Returns
+
+`Promise`<`void`\>
+
+#### Defined in
+
+[packages/core/src/indices/summary/SummaryIndex.ts:224](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/indices/summary/SummaryIndex.ts#L224)
+
+---
+
+### deleteRefDoc
+
+▸ **deleteRefDoc**(`refDocId`, `deleteFromDocStore?`): `Promise`<`void`\>
+
+#### Parameters
+
+| Name                  | Type      |
+| :-------------------- | :-------- |
+| `refDocId`            | `string`  |
+| `deleteFromDocStore?` | `boolean` |
+
+#### Returns
+
+`Promise`<`void`\>
+
+#### Overrides
+
+[BaseIndex](BaseIndex.md).[deleteRefDoc](BaseIndex.md#deleterefdoc)
+
+#### Defined in
+
+[packages/core/src/indices/summary/SummaryIndex.ts:205](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/indices/summary/SummaryIndex.ts#L205)
+
+---
+
+### getRefDocInfo
+
+▸ **getRefDocInfo**(): `Promise`<`Record`<`string`, [`RefDocInfo`](../interfaces/RefDocInfo.md)\>\>
+
+#### Returns
+
+`Promise`<`Record`<`string`, [`RefDocInfo`](../interfaces/RefDocInfo.md)\>\>
+
+#### Defined in
+
+[packages/core/src/indices/summary/SummaryIndex.ts:238](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/indices/summary/SummaryIndex.ts#L238)
+
+---
+
+### insert
+
+▸ **insert**(`document`): `Promise`<`void`\>
+
+Insert a document into the index.
+
+#### Parameters
+
+| Name       | Type                                                   |
+| :--------- | :----------------------------------------------------- |
+| `document` | [`Document`](Document.md)<[`Metadata`](../#metadata)\> |
+
+#### Returns
+
+`Promise`<`void`\>
+
+#### Inherited from
+
+[BaseIndex](BaseIndex.md).[insert](BaseIndex.md#insert)
+
+#### Defined in
+
+[packages/core/src/indices/BaseIndex.ts:190](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/indices/BaseIndex.ts#L190)
+
+---
+
+### insertNodes
+
+▸ **insertNodes**(`nodes`): `Promise`<`void`\>
+
+#### Parameters
+
+| Name    | Type                                                     |
+| :------ | :------------------------------------------------------- |
+| `nodes` | [`BaseNode`](BaseNode.md)<[`Metadata`](../#metadata)\>[] |
+
+#### Returns
+
+`Promise`<`void`\>
+
+#### Overrides
+
+[BaseIndex](BaseIndex.md).[insertNodes](BaseIndex.md#insertnodes)
+
+#### Defined in
+
+[packages/core/src/indices/summary/SummaryIndex.ts:199](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/indices/summary/SummaryIndex.ts#L199)
+
+---
+
+### buildIndexFromNodes
+
+▸ `Static` **buildIndexFromNodes**(`nodes`, `docStore`, `indexStruct?`): `Promise`<[`IndexList`](IndexList.md)\>
+
+#### Parameters
+
+| Name           | Type                                                     |
+| :------------- | :------------------------------------------------------- |
+| `nodes`        | [`BaseNode`](BaseNode.md)<[`Metadata`](../#metadata)\>[] |
+| `docStore`     | [`BaseDocumentStore`](BaseDocumentStore.md)              |
+| `indexStruct?` | [`IndexList`](IndexList.md)                              |
+
+#### Returns
+
+`Promise`<[`IndexList`](IndexList.md)\>
+
+#### Defined in
+
+[packages/core/src/indices/summary/SummaryIndex.ts:184](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/indices/summary/SummaryIndex.ts#L184)
+
+---
+
+### fromDocuments
+
+▸ `Static` **fromDocuments**(`documents`, `args?`): `Promise`<[`SummaryIndex`](SummaryIndex.md)\>
+
+#### Parameters
+
+| Name                   | Type                                                     |
+| :--------------------- | :------------------------------------------------------- |
+| `documents`            | [`Document`](Document.md)<[`Metadata`](../#metadata)\>[] |
+| `args`                 | `Object`                                                 |
+| `args.serviceContext?` | [`ServiceContext`](../interfaces/ServiceContext.md)      |
+| `args.storageContext?` | [`StorageContext`](../interfaces/StorageContext.md)      |
+
+#### Returns
+
+`Promise`<[`SummaryIndex`](SummaryIndex.md)\>
+
+#### Defined in
+
+[packages/core/src/indices/summary/SummaryIndex.ts:117](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/indices/summary/SummaryIndex.ts#L117)
+
+---
+
+### init
+
+▸ `Static` **init**(`options`): `Promise`<[`SummaryIndex`](SummaryIndex.md)\>
+
+#### Parameters
+
+| Name      | Type                  |
+| :-------- | :-------------------- |
+| `options` | `SummaryIndexOptions` |
+
+#### Returns
+
+`Promise`<[`SummaryIndex`](SummaryIndex.md)\>
+
+#### Defined in
+
+[packages/core/src/indices/summary/SummaryIndex.ts:52](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/indices/summary/SummaryIndex.ts#L52)
diff --git a/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/SummaryIndexLLMRetriever.md b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/SummaryIndexLLMRetriever.md
new file mode 100644
index 0000000000000000000000000000000000000000..ae360efa12696636e3fc5a8492045e8350ca339d
--- /dev/null
+++ b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/SummaryIndexLLMRetriever.md
@@ -0,0 +1,151 @@
+---
+id: "SummaryIndexLLMRetriever"
+title: "Class: SummaryIndexLLMRetriever"
+sidebar_label: "SummaryIndexLLMRetriever"
+sidebar_position: 0
+custom_edit_url: null
+---
+
+LLM retriever for SummaryIndex which lets you select the most relevant chunks.
+
+## Implements
+
+- [`BaseRetriever`](../interfaces/BaseRetriever.md)
+
+## Constructors
+
+### constructor
+
+• **new SummaryIndexLLMRetriever**(`index`, `choiceSelectPrompt?`, `choiceBatchSize?`, `formatNodeBatchFn?`, `parseChoiceSelectAnswerFn?`, `serviceContext?`)
+
+#### Parameters
+
+| Name                         | Type                                                | Default value |
+| :--------------------------- | :-------------------------------------------------- | :------------ |
+| `index`                      | [`SummaryIndex`](SummaryIndex.md)                   | `undefined`   |
+| `choiceSelectPrompt?`        | (`__namedParameters`: `Object`) => `string`         | `undefined`   |
+| `choiceBatchSize`            | `number`                                            | `10`          |
+| `formatNodeBatchFn?`         | `NodeFormatterFunction`                             | `undefined`   |
+| `parseChoiceSelectAnswerFn?` | `ChoiceSelectParserFunction`                        | `undefined`   |
+| `serviceContext?`            | [`ServiceContext`](../interfaces/ServiceContext.md) | `undefined`   |
+
+#### Defined in
+
+[packages/core/src/indices/summary/SummaryIndexRetriever.ts:64](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/indices/summary/SummaryIndexRetriever.ts#L64)
+
+## Properties
+
+### choiceBatchSize
+
+• **choiceBatchSize**: `number`
+
+#### Defined in
+
+[packages/core/src/indices/summary/SummaryIndexRetriever.ts:59](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/indices/summary/SummaryIndexRetriever.ts#L59)
+
+---
+
+### choiceSelectPrompt
+
+• **choiceSelectPrompt**: (`__namedParameters`: `Object`) => `string`
+
+#### Type declaration
+
+▸ (`«destructured»`): `string`
+
+##### Parameters
+
+| Name             | Type     |
+| :--------------- | :------- |
+| `«destructured»` | `Object` |
+
+##### Returns
+
+`string`
+
+#### Defined in
+
+[packages/core/src/indices/summary/SummaryIndexRetriever.ts:58](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/indices/summary/SummaryIndexRetriever.ts#L58)
+
+---
+
+### formatNodeBatchFn
+
+• **formatNodeBatchFn**: `NodeFormatterFunction`
+
+#### Defined in
+
+[packages/core/src/indices/summary/SummaryIndexRetriever.ts:60](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/indices/summary/SummaryIndexRetriever.ts#L60)
+
+---
+
+### index
+
+• **index**: [`SummaryIndex`](SummaryIndex.md)
+
+#### Defined in
+
+[packages/core/src/indices/summary/SummaryIndexRetriever.ts:57](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/indices/summary/SummaryIndexRetriever.ts#L57)
+
+---
+
+### parseChoiceSelectAnswerFn
+
+• **parseChoiceSelectAnswerFn**: `ChoiceSelectParserFunction`
+
+#### Defined in
+
+[packages/core/src/indices/summary/SummaryIndexRetriever.ts:61](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/indices/summary/SummaryIndexRetriever.ts#L61)
+
+---
+
+### serviceContext
+
+• **serviceContext**: [`ServiceContext`](../interfaces/ServiceContext.md)
+
+#### Defined in
+
+[packages/core/src/indices/summary/SummaryIndexRetriever.ts:62](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/indices/summary/SummaryIndexRetriever.ts#L62)
+
+## Methods
+
+### getServiceContext
+
+▸ **getServiceContext**(): [`ServiceContext`](../interfaces/ServiceContext.md)
+
+#### Returns
+
+[`ServiceContext`](../interfaces/ServiceContext.md)
+
+#### Implementation of
+
+[BaseRetriever](../interfaces/BaseRetriever.md).[getServiceContext](../interfaces/BaseRetriever.md#getservicecontext)
+
+#### Defined in
+
+[packages/core/src/indices/summary/SummaryIndexRetriever.ts:127](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/indices/summary/SummaryIndexRetriever.ts#L127)
+
+---
+
+### retrieve
+
+▸ **retrieve**(`query`, `parentEvent?`): `Promise`<[`NodeWithScore`](../interfaces/NodeWithScore.md)<[`Metadata`](../#metadata)\>[]\>
+
+#### Parameters
+
+| Name           | Type                              |
+| :------------- | :-------------------------------- |
+| `query`        | `string`                          |
+| `parentEvent?` | [`Event`](../interfaces/Event.md) |
+
+#### Returns
+
+`Promise`<[`NodeWithScore`](../interfaces/NodeWithScore.md)<[`Metadata`](../#metadata)\>[]\>
+
+#### Implementation of
+
+[BaseRetriever](../interfaces/BaseRetriever.md).[retrieve](../interfaces/BaseRetriever.md#retrieve)
+
+#### Defined in
+
+[packages/core/src/indices/summary/SummaryIndexRetriever.ts:81](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/indices/summary/SummaryIndexRetriever.ts#L81)
diff --git a/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/SummaryIndexRetriever.md b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/SummaryIndexRetriever.md
new file mode 100644
index 0000000000000000000000000000000000000000..3772a09f1f5132605a570c0b4a2d23d327dd4cd5
--- /dev/null
+++ b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/SummaryIndexRetriever.md
@@ -0,0 +1,82 @@
+---
+id: "SummaryIndexRetriever"
+title: "Class: SummaryIndexRetriever"
+sidebar_label: "SummaryIndexRetriever"
+sidebar_position: 0
+custom_edit_url: null
+---
+
+Simple retriever for SummaryIndex that returns all nodes
+
+## Implements
+
+- [`BaseRetriever`](../interfaces/BaseRetriever.md)
+
+## Constructors
+
+### constructor
+
+• **new SummaryIndexRetriever**(`index`)
+
+#### Parameters
+
+| Name    | Type                              |
+| :------ | :-------------------------------- |
+| `index` | [`SummaryIndex`](SummaryIndex.md) |
+
+#### Defined in
+
+[packages/core/src/indices/summary/SummaryIndexRetriever.ts:22](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/indices/summary/SummaryIndexRetriever.ts#L22)
+
+## Properties
+
+### index
+
+• **index**: [`SummaryIndex`](SummaryIndex.md)
+
+#### Defined in
+
+[packages/core/src/indices/summary/SummaryIndexRetriever.ts:20](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/indices/summary/SummaryIndexRetriever.ts#L20)
+
+## Methods
+
+### getServiceContext
+
+▸ **getServiceContext**(): [`ServiceContext`](../interfaces/ServiceContext.md)
+
+#### Returns
+
+[`ServiceContext`](../interfaces/ServiceContext.md)
+
+#### Implementation of
+
+[BaseRetriever](../interfaces/BaseRetriever.md).[getServiceContext](../interfaces/BaseRetriever.md#getservicecontext)
+
+#### Defined in
+
+[packages/core/src/indices/summary/SummaryIndexRetriever.ts:48](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/indices/summary/SummaryIndexRetriever.ts#L48)
+
+---
+
+### retrieve
+
+▸ **retrieve**(`query`, `parentEvent?`): `Promise`<[`NodeWithScore`](../interfaces/NodeWithScore.md)<[`Metadata`](../#metadata)\>[]\>
+
+#### Parameters
+
+| Name           | Type                              |
+| :------------- | :-------------------------------- |
+| `query`        | `string`                          |
+| `parentEvent?` | [`Event`](../interfaces/Event.md) |
+
+#### Returns
+
+`Promise`<[`NodeWithScore`](../interfaces/NodeWithScore.md)<[`Metadata`](../#metadata)\>[]\>
+
+#### Implementation of
+
+[BaseRetriever](../interfaces/BaseRetriever.md).[retrieve](../interfaces/BaseRetriever.md#retrieve)
+
+#### Defined in
+
+[packages/core/src/indices/summary/SummaryIndexRetriever.ts:26](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/indices/summary/SummaryIndexRetriever.ts#L26)
diff --git a/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/TextFileReader.md b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/TextFileReader.md
new file mode 100644
index 0000000000000000000000000000000000000000..c6f2138c2bbcd6473a89d84032dc0ccbae87468e
--- /dev/null
+++ b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/TextFileReader.md
@@ -0,0 +1,44 @@
+---
+id: "TextFileReader"
+title: "Class: TextFileReader"
+sidebar_label: "TextFileReader"
+sidebar_position: 0
+custom_edit_url: null
+---
+
+Read a .txt file
+
+## Implements
+
+- [`BaseReader`](../interfaces/BaseReader.md)
+
+## Constructors
+
+### constructor
+
+• **new TextFileReader**()
+
+## Methods
+
+### loadData
+
+▸ **loadData**(`file`, `fs?`): `Promise`<[`Document`](Document.md)<[`Metadata`](../#metadata)\>[]\>
+
+#### Parameters
+
+| Name   | Type                                           |
+| :----- | :--------------------------------------------- |
+| `file` | `string`                                       |
+| `fs`   | [`CompleteFileSystem`](../#completefilesystem) |
+
+#### Returns
+
+`Promise`<[`Document`](Document.md)<[`Metadata`](../#metadata)\>[]\>
+
+#### Implementation of
+
+[BaseReader](../interfaces/BaseReader.md).[loadData](../interfaces/BaseReader.md#loaddata)
+
+#### Defined in
+
+[packages/core/src/readers/SimpleDirectoryReader.ts:29](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/readers/SimpleDirectoryReader.ts#L29)
diff --git a/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/TextNode.md b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/TextNode.md
new file mode 100644
index 0000000000000000000000000000000000000000..fbc67f241779369910d9ee5a53465e8a294afb48
--- /dev/null
+++ b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/TextNode.md
@@ -0,0 +1,486 @@
+---
+id: "TextNode"
+title: "Class: TextNode<T>"
+sidebar_label: "TextNode"
+sidebar_position: 0
+custom_edit_url: null
+---
+
+TextNode is the default node type for text. Most common node type in LlamaIndex.TS
+
+## Type parameters
+
+| Name | Type                                                            |
+| :--- | :-------------------------------------------------------------- |
+| `T`  | extends [`Metadata`](../#metadata) = [`Metadata`](../#metadata) |
+
+## Hierarchy
+
+- [`BaseNode`](BaseNode.md)<`T`\>
+
+  ↳ **`TextNode`**
+
+  ↳↳ [`IndexNode`](IndexNode.md)
+
+  ↳↳ [`Document`](Document.md)
+
+  ↳↳ [`ImageNode`](ImageNode.md)
+
+## Constructors
+
+### constructor
+
+• **new TextNode**<`T`\>(`init?`)
+
+#### Type parameters
+
+| Name | Type                                                            |
+| :--- | :-------------------------------------------------------------- |
+| `T`  | extends [`Metadata`](../#metadata) = [`Metadata`](../#metadata) |
+
+#### Parameters
+
+| Name    | Type                                        |
+| :------ | :------------------------------------------ |
+| `init?` | `Partial`<[`TextNode`](TextNode.md)<`T`\>\> |
+
+#### Overrides
+
+[BaseNode](BaseNode.md).[constructor](BaseNode.md#constructor)
+
+#### Defined in
+
+[packages/core/src/Node.ts:162](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Node.ts#L162)
+
+## Properties
+
+### embedding
+
+• `Optional` **embedding**: `number`[]
+
+#### Inherited from
+
+[BaseNode](BaseNode.md).[embedding](BaseNode.md#embedding)
+
+#### Defined in
+
+[packages/core/src/Node.ts:51](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Node.ts#L51)
+
+---
+
+### endCharIdx
+
+• `Optional` **endCharIdx**: `number`
+
+#### Defined in
+
+[packages/core/src/Node.ts:157](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Node.ts#L157)
+
+---
+
+### excludedEmbedMetadataKeys
+
+• **excludedEmbedMetadataKeys**: `string`[] = `[]`
+
+#### Inherited from
+
+[BaseNode](BaseNode.md).[excludedEmbedMetadataKeys](BaseNode.md#excludedembedmetadatakeys)
+
+#### Defined in
+
+[packages/core/src/Node.ts:55](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Node.ts#L55)
+
+---
+
+### excludedLlmMetadataKeys
+
+• **excludedLlmMetadataKeys**: `string`[] = `[]`
+
+#### Inherited from
+
+[BaseNode](BaseNode.md).[excludedLlmMetadataKeys](BaseNode.md#excludedllmmetadatakeys)
+
+#### Defined in
+
+[packages/core/src/Node.ts:56](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Node.ts#L56)
+
+---
+
+### hash
+
+• **hash**: `string` = `""`
+
+#### Inherited from
+
+[BaseNode](BaseNode.md).[hash](BaseNode.md#hash)
+
+#### Defined in
+
+[packages/core/src/Node.ts:58](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Node.ts#L58)
+
+---
+
+### id\_
+
+• **id\_**: `string`
+
+The unique ID of the Node/Document. The trailing underscore is here
+to avoid collisions with the id keyword in Python.
+
+Set to a UUID by default.
+
+#### Inherited from
+
+[BaseNode](BaseNode.md).[id\_](BaseNode.md#id_)
+
+#### Defined in
+
+[packages/core/src/Node.ts:50](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Node.ts#L50)
+
+---
+
+### metadata
+
+• **metadata**: `T`
+
+#### Inherited from
+
+[BaseNode](BaseNode.md).[metadata](BaseNode.md#metadata)
+
+#### Defined in
+
+[packages/core/src/Node.ts:54](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Node.ts#L54)
+
+---
+
+### metadataSeparator
+
+• **metadataSeparator**: `string` = `"\n"`
+
+#### Defined in
+
+[packages/core/src/Node.ts:160](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Node.ts#L160)
+
+---
+
+### relationships
+
+• **relationships**: `Partial`<`Record`<[`NodeRelationship`](../enums/NodeRelationship.md), [`RelatedNodeType`](../#relatednodetype)<`T`\>\>\> = `{}`
+
+#### Inherited from
+
+[BaseNode](BaseNode.md).[relationships](BaseNode.md#relationships)
+
+#### Defined in
+
+[packages/core/src/Node.ts:57](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Node.ts#L57)
+
+---
+
+### startCharIdx
+
+• `Optional` **startCharIdx**: `number`
+
+#### Defined in
+
+[packages/core/src/Node.ts:156](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Node.ts#L156)
+
+---
+
+### text
+
+• **text**: `string` = `""`
+
+#### Defined in
+
+[packages/core/src/Node.ts:155](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Node.ts#L155)
+
+## Accessors
+
+### childNodes
+
+• `get` **childNodes**(): `undefined` \| [`RelatedNodeInfo`](../interfaces/RelatedNodeInfo.md)<`T`\>[]
+
+#### Returns
+
+`undefined` \| [`RelatedNodeInfo`](../interfaces/RelatedNodeInfo.md)<`T`\>[]
+
+#### Inherited from
+
+BaseNode.childNodes
+
+#### Defined in
+
+[packages/core/src/Node.ts:112](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Node.ts#L112)
+
+---
+
+### nextNode
+
+• `get` **nextNode**(): `undefined` \| [`RelatedNodeInfo`](../interfaces/RelatedNodeInfo.md)<`T`\>
+
+#### Returns
+
+`undefined` \| [`RelatedNodeInfo`](../interfaces/RelatedNodeInfo.md)<`T`\>
+
+#### Inherited from
+
+BaseNode.nextNode
+
+#### Defined in
+
+[packages/core/src/Node.ts:92](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Node.ts#L92)
+
+---
+
+### parentNode
+
+• `get` **parentNode**(): `undefined` \| [`RelatedNodeInfo`](../interfaces/RelatedNodeInfo.md)<`T`\>
+
+#### Returns
+
+`undefined` \| [`RelatedNodeInfo`](../interfaces/RelatedNodeInfo.md)<`T`\>
+
+#### Inherited from
+
+BaseNode.parentNode
+
+#### Defined in
+
+[packages/core/src/Node.ts:102](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Node.ts#L102)
+
+---
+
+### prevNode
+
+• `get` **prevNode**(): `undefined` \| [`RelatedNodeInfo`](../interfaces/RelatedNodeInfo.md)<`T`\>
+
+#### Returns
+
+`undefined` \| [`RelatedNodeInfo`](../interfaces/RelatedNodeInfo.md)<`T`\>
+
+#### Inherited from
+
+BaseNode.prevNode
+
+#### Defined in
+
+[packages/core/src/Node.ts:80](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Node.ts#L80)
+
+---
+
+### sourceNode
+
+• `get` **sourceNode**(): `undefined` \| [`RelatedNodeInfo`](../interfaces/RelatedNodeInfo.md)<`T`\>
+
+#### Returns
+
+`undefined` \| [`RelatedNodeInfo`](../interfaces/RelatedNodeInfo.md)<`T`\>
+
+#### Inherited from
+
+BaseNode.sourceNode
+
+#### Defined in
+
+[packages/core/src/Node.ts:70](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Node.ts#L70)
+
+## Methods
+
+### asRelatedNodeInfo
+
+▸ **asRelatedNodeInfo**(): [`RelatedNodeInfo`](../interfaces/RelatedNodeInfo.md)<`T`\>
+
+#### Returns
+
+[`RelatedNodeInfo`](../interfaces/RelatedNodeInfo.md)<`T`\>
+
+#### Inherited from
+
+[BaseNode](BaseNode.md).[asRelatedNodeInfo](BaseNode.md#asrelatednodeinfo)
+
+#### Defined in
+
+[packages/core/src/Node.ts:134](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Node.ts#L134)
+
+---
+
+### generateHash
+
+▸ **generateHash**(): `string`
+
+Generate a hash of the text node.
+The ID is not part of the hash as it can change independent of content.
+
+#### Returns
+
+`string`
+
+#### Overrides
+
+[BaseNode](BaseNode.md).[generateHash](BaseNode.md#generatehash)
+
+#### Defined in
+
+[packages/core/src/Node.ts:178](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Node.ts#L178)
+
+---
+
+### getContent
+
+▸ **getContent**(`metadataMode?`): `string`
+
+#### Parameters
+
+| Name           | Type                                       | Default value       |
+| :------------- | :----------------------------------------- | :------------------ |
+| `metadataMode` | [`MetadataMode`](../enums/MetadataMode.md) | `MetadataMode.NONE` |
+
+#### Returns
+
+`string`
+
+#### Overrides
+
+[BaseNode](BaseNode.md).[getContent](BaseNode.md#getcontent)
+
+#### Defined in
+
+[packages/core/src/Node.ts:192](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Node.ts#L192)
+
+---
+
+### getEmbedding
+
+▸ **getEmbedding**(): `number`[]
+
+#### Returns
+
+`number`[]
+
+#### Inherited from
+
+[BaseNode](BaseNode.md).[getEmbedding](BaseNode.md#getembedding)
+
+#### Defined in
+
+[packages/core/src/Node.ts:126](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Node.ts#L126)
+
+---
+
+### getMetadataStr
+
+▸ **getMetadataStr**(`metadataMode`): `string`
+
+#### Parameters
+
+| Name           | Type                                       |
+| :------------- | :----------------------------------------- |
+| `metadataMode` | [`MetadataMode`](../enums/MetadataMode.md) |
+
+#### Returns
+
+`string`
+
+#### Overrides
+
+[BaseNode](BaseNode.md).[getMetadataStr](BaseNode.md#getmetadatastr)
+
+#### Defined in
+
+[packages/core/src/Node.ts:197](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Node.ts#L197)
+
+---
+
+### getNodeInfo
+
+▸ **getNodeInfo**(): `Object`
+
+#### Returns
+
+`Object`
+
+| Name    | Type                    |
+| :------ | :---------------------- |
+| `end`   | `undefined` \| `number` |
+| `start` | `undefined` \| `number` |
+
+#### Defined in
+
+[packages/core/src/Node.ts:224](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Node.ts#L224)
+
+---
+
+### getText
+
+▸ **getText**(): `string`
+
+#### Returns
+
+`string`
+
+#### Defined in
+
+[packages/core/src/Node.ts:228](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Node.ts#L228)
+
+---
+
+### getType
+
+▸ **getType**(): [`ObjectType`](../enums/ObjectType.md)
+
+#### Returns
+
+[`ObjectType`](../enums/ObjectType.md)
+
+#### Overrides
+
+[BaseNode](BaseNode.md).[getType](BaseNode.md#gettype)
+
+#### Defined in
+
+[packages/core/src/Node.ts:188](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Node.ts#L188)
+
+---
+
+### setContent
+
+▸ **setContent**(`value`): `void`
+
+#### Parameters
+
+| Name    | Type     |
+| :------ | :------- |
+| `value` | `string` |
+
+#### Returns
+
+`void`
+
+#### Overrides
+
+[BaseNode](BaseNode.md).[setContent](BaseNode.md#setcontent)
+
+#### Defined in
+
+[packages/core/src/Node.ts:218](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Node.ts#L218)
+
+---
+
+### toJSON
+
+▸ **toJSON**(): `Record`<`string`, `any`\>
+
+Used with built in JSON.stringify
+
+#### Returns
+
+`Record`<`string`, `any`\>
+
+#### Inherited from
+
+[BaseNode](BaseNode.md).[toJSON](BaseNode.md#tojson)
+
+#### Defined in
+
+[packages/core/src/Node.ts:146](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Node.ts#L146)
diff --git a/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/TreeSummarize.md b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/TreeSummarize.md
new file mode 100644
index 0000000000000000000000000000000000000000..61a788bd2a7d737ba6d0f7065cd4c8a1cc72bad7
--- /dev/null
+++ b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/TreeSummarize.md
@@ -0,0 +1,90 @@
+---
+id: "TreeSummarize"
+title: "Class: TreeSummarize"
+sidebar_label: "TreeSummarize"
+sidebar_position: 0
+custom_edit_url: null
+---
+
+TreeSummarize repacks the text chunks into the smallest possible number of chunks and then summarizes them, then recursively does so until there's one chunk left.
+
+## Implements
+
+- `BaseResponseBuilder`
+
+## Constructors
+
+### constructor
+
+• **new TreeSummarize**(`serviceContext`, `summaryTemplate?`)
+
+#### Parameters
+
+| Name               | Type                                                |
+| :----------------- | :-------------------------------------------------- |
+| `serviceContext`   | [`ServiceContext`](../interfaces/ServiceContext.md) |
+| `summaryTemplate?` | (`__namedParameters`: `Object`) => `string`         |
+
+#### Defined in
+
+[packages/core/src/ResponseSynthesizer.ts:217](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/ResponseSynthesizer.ts#L217)
+
+## Properties
+
+### serviceContext
+
+• **serviceContext**: [`ServiceContext`](../interfaces/ServiceContext.md)
+
+#### Defined in
+
+[packages/core/src/ResponseSynthesizer.ts:214](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/ResponseSynthesizer.ts#L214)
+
+---
+
+### summaryTemplate
+
+• **summaryTemplate**: (`__namedParameters`: `Object`) => `string`
+
+#### Type declaration
+
+▸ (`«destructured»`): `string`
+
+##### Parameters
+
+| Name             | Type     |
+| :--------------- | :------- |
+| `«destructured»` | `Object` |
+
+##### Returns
+
+`string`
+
+#### Defined in
+
+[packages/core/src/ResponseSynthesizer.ts:215](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/ResponseSynthesizer.ts#L215)
+
+## Methods
+
+### getResponse
+
+▸ **getResponse**(`query`, `textChunks`, `parentEvent?`): `Promise`<`string`\>
+
+#### Parameters
+
+| Name           | Type                              |
+| :------------- | :-------------------------------- |
+| `query`        | `string`                          |
+| `textChunks`   | `string`[]                        |
+| `parentEvent?` | [`Event`](../interfaces/Event.md) |
+
+#### Returns
+
+`Promise`<`string`\>
+
+#### Implementation of
+
+BaseResponseBuilder.getResponse
+
+#### Defined in
+
+[packages/core/src/ResponseSynthesizer.ts:225](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/ResponseSynthesizer.ts#L225)
diff --git a/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/VectorIndexRetriever.md b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/VectorIndexRetriever.md
new file mode 100644
index 0000000000000000000000000000000000000000..960dda48819221433fd107c055d2a2d8a2ab1fa2
--- /dev/null
+++ b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/VectorIndexRetriever.md
@@ -0,0 +1,210 @@
+---
+id: "VectorIndexRetriever"
+title: "Class: VectorIndexRetriever"
+sidebar_label: "VectorIndexRetriever"
+sidebar_position: 0
+custom_edit_url: null
+---
+
+VectorIndexRetriever retrieves nodes from a VectorIndex.
+
+## Implements
+
+- [`BaseRetriever`](../interfaces/BaseRetriever.md)
+
+## Constructors
+
+### constructor
+
+• **new VectorIndexRetriever**(`«destructured»`)
+
+#### Parameters
+
+| Name                | Type                                      |
+| :------------------ | :---------------------------------------- |
+| `«destructured»`    | `Object`                                  |
+| › `index`           | [`VectorStoreIndex`](VectorStoreIndex.md) |
+| › `similarityTopK?` | `number`                                  |
+
+#### Defined in
+
+[packages/core/src/indices/vectorStore/VectorIndexRetriever.ts:24](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/indices/vectorStore/VectorIndexRetriever.ts#L24)
+
+## Properties
+
+### index
+
+• **index**: [`VectorStoreIndex`](VectorStoreIndex.md)
+
+#### Defined in
+
+[packages/core/src/indices/vectorStore/VectorIndexRetriever.ts:20](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/indices/vectorStore/VectorIndexRetriever.ts#L20)
+
+---
+
+### serviceContext
+
+• `Private` **serviceContext**: [`ServiceContext`](../interfaces/ServiceContext.md)
+
+#### Defined in
+
+[packages/core/src/indices/vectorStore/VectorIndexRetriever.ts:22](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/indices/vectorStore/VectorIndexRetriever.ts#L22)
+
+---
+
+### similarityTopK
+
+• **similarityTopK**: `number`
+
+#### Defined in
+
+[packages/core/src/indices/vectorStore/VectorIndexRetriever.ts:21](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/indices/vectorStore/VectorIndexRetriever.ts#L21)
+
+## Methods
+
+### buildNodeListFromQueryResult
+
+▸ `Protected` **buildNodeListFromQueryResult**(`result`): [`NodeWithScore`](../interfaces/NodeWithScore.md)<[`Metadata`](../#metadata)\>[]
+
+#### Parameters
+
+| Name     | Type                                                                |
+| :------- | :------------------------------------------------------------------ |
+| `result` | [`VectorStoreQueryResult`](../interfaces/VectorStoreQueryResult.md) |
+
+#### Returns
+
+[`NodeWithScore`](../interfaces/NodeWithScore.md)<[`Metadata`](../#metadata)\>[]
+
+#### Defined in
+
+[packages/core/src/indices/vectorStore/VectorIndexRetriever.ts:102](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/indices/vectorStore/VectorIndexRetriever.ts#L102)
+
+---
+
+### buildVectorStoreQuery
+
+▸ `Protected` **buildVectorStoreQuery**(`embedModel`, `query`): `Promise`<[`VectorStoreQuery`](../interfaces/VectorStoreQuery.md)\>
+
+#### Parameters
+
+| Name         | Type                                |
+| :----------- | :---------------------------------- |
+| `embedModel` | [`BaseEmbedding`](BaseEmbedding.md) |
+| `query`      | `string`                            |
+
+#### Returns
+
+`Promise`<[`VectorStoreQuery`](../interfaces/VectorStoreQuery.md)\>
+
+#### Defined in
+
+[packages/core/src/indices/vectorStore/VectorIndexRetriever.ts:89](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/indices/vectorStore/VectorIndexRetriever.ts#L89)
+
+---
+
+### getServiceContext
+
+▸ **getServiceContext**(): [`ServiceContext`](../interfaces/ServiceContext.md)
+
+#### Returns
+
+[`ServiceContext`](../interfaces/ServiceContext.md)
+
+#### Implementation of
+
+[BaseRetriever](../interfaces/BaseRetriever.md).[getServiceContext](../interfaces/BaseRetriever.md#getservicecontext)
+
+#### Defined in
+
+[packages/core/src/indices/vectorStore/VectorIndexRetriever.ts:120](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/indices/vectorStore/VectorIndexRetriever.ts#L120)
+
+---
+
+### retrieve
+
+▸ **retrieve**(`query`, `parentEvent?`, `preFilters?`): `Promise`<[`NodeWithScore`](../interfaces/NodeWithScore.md)<[`Metadata`](../#metadata)\>[]\>
+
+#### Parameters
+
+| Name           | Type                              |
+| :------------- | :-------------------------------- |
+| `query`        | `string`                          |
+| `parentEvent?` | [`Event`](../interfaces/Event.md) |
+| `preFilters?`  | `unknown`                         |
+
+#### Returns
+
+`Promise`<[`NodeWithScore`](../interfaces/NodeWithScore.md)<[`Metadata`](../#metadata)\>[]\>
+
+#### Implementation of
+
+[BaseRetriever](../interfaces/BaseRetriever.md).[retrieve](../interfaces/BaseRetriever.md#retrieve)
+
+#### Defined in
+
+[packages/core/src/indices/vectorStore/VectorIndexRetriever.ts:37](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/indices/vectorStore/VectorIndexRetriever.ts#L37)
+
+---
+
+### sendEvent
+
+▸ `Protected` **sendEvent**(`query`, `nodesWithScores`, `parentEvent`): `void`
+
+#### Parameters
+
+| Name              | Type                                                                             |
+| :---------------- | :------------------------------------------------------------------------------- |
+| `query`           | `string`                                                                         |
+| `nodesWithScores` | [`NodeWithScore`](../interfaces/NodeWithScore.md)<[`Metadata`](../#metadata)\>[] |
+| `parentEvent`     | `undefined` \| [`Event`](../interfaces/Event.md)                                 |
+
+#### Returns
+
+`void`
+
+#### Defined in
+
+[packages/core/src/indices/vectorStore/VectorIndexRetriever.ts:72](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/indices/vectorStore/VectorIndexRetriever.ts#L72)
+
+---
+
+### textRetrieve
+
+▸ `Protected` **textRetrieve**(`query`, `preFilters?`): `Promise`<[`NodeWithScore`](../interfaces/NodeWithScore.md)<[`Metadata`](../#metadata)\>[]\>
+
+#### Parameters
+
+| Name          | Type      |
+| :------------ | :-------- |
+| `query`       | `string`  |
+| `preFilters?` | `unknown` |
+
+#### Returns
+
+`Promise`<[`NodeWithScore`](../interfaces/NodeWithScore.md)<[`Metadata`](../#metadata)\>[]\>
+
+#### Defined in
+
+[packages/core/src/indices/vectorStore/VectorIndexRetriever.ts:50](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/indices/vectorStore/VectorIndexRetriever.ts#L50)
+
+---
+
+### textToImageRetrieve
+
+▸ `Private` **textToImageRetrieve**(`query`, `preFilters?`): `Promise`<[`NodeWithScore`](../interfaces/NodeWithScore.md)<[`Metadata`](../#metadata)\>[]\>
+
+#### Parameters
+
+| Name          | Type      |
+| :------------ | :-------- |
+| `query`       | `string`  |
+| `preFilters?` | `unknown` |
+
+#### Returns
+
+`Promise`<[`NodeWithScore`](../interfaces/NodeWithScore.md)<[`Metadata`](../#metadata)\>[]\>
+
+#### Defined in
+
+[packages/core/src/indices/vectorStore/VectorIndexRetriever.ts:59](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/indices/vectorStore/VectorIndexRetriever.ts#L59)
diff --git a/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/VectorStoreIndex.md b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/VectorStoreIndex.md
new file mode 100644
index 0000000000000000000000000000000000000000..325ce502f6eb2596552485a53534a6b68d302d58
--- /dev/null
+++ b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/VectorStoreIndex.md
@@ -0,0 +1,503 @@
+---
+id: "VectorStoreIndex"
+title: "Class: VectorStoreIndex"
+sidebar_label: "VectorStoreIndex"
+sidebar_position: 0
+custom_edit_url: null
+---
+
+The VectorStoreIndex, an index that stores the nodes only according to their vector embedings.
+
+## Hierarchy
+
+- [`BaseIndex`](BaseIndex.md)<[`IndexDict`](IndexDict.md)\>
+
+  ↳ **`VectorStoreIndex`**
+
+## Constructors
+
+### constructor
+
+• `Private` **new VectorStoreIndex**(`init`)
+
+#### Parameters
+
+| Name   | Type                          |
+| :----- | :---------------------------- |
+| `init` | `VectorIndexConstructorProps` |
+
+#### Overrides
+
+[BaseIndex](BaseIndex.md).[constructor](BaseIndex.md#constructor)
+
+#### Defined in
+
+[packages/core/src/indices/vectorStore/VectorStoreIndex.ts:64](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/indices/vectorStore/VectorStoreIndex.ts#L64)
+
+## Properties
+
+### docStore
+
+• **docStore**: [`BaseDocumentStore`](BaseDocumentStore.md)
+
+#### Inherited from
+
+[BaseIndex](BaseIndex.md).[docStore](BaseIndex.md#docstore)
+
+#### Defined in
+
+[packages/core/src/indices/BaseIndex.ts:156](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/indices/BaseIndex.ts#L156)
+
+---
+
+### embedModel
+
+• **embedModel**: [`BaseEmbedding`](BaseEmbedding.md)
+
+#### Defined in
+
+[packages/core/src/indices/vectorStore/VectorStoreIndex.ts:60](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/indices/vectorStore/VectorStoreIndex.ts#L60)
+
+---
+
+### imageEmbedModel
+
+• `Optional` **imageEmbedModel**: [`MultiModalEmbedding`](MultiModalEmbedding.md)
+
+#### Defined in
+
+[packages/core/src/indices/vectorStore/VectorStoreIndex.ts:62](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/indices/vectorStore/VectorStoreIndex.ts#L62)
+
+---
+
+### imageVectorStore
+
+• `Optional` **imageVectorStore**: [`VectorStore`](../interfaces/VectorStore.md)
+
+#### Defined in
+
+[packages/core/src/indices/vectorStore/VectorStoreIndex.ts:61](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/indices/vectorStore/VectorStoreIndex.ts#L61)
+
+---
+
+### indexStore
+
+• **indexStore**: [`BaseIndexStore`](BaseIndexStore.md)
+
+#### Overrides
+
+[BaseIndex](BaseIndex.md).[indexStore](BaseIndex.md#indexstore)
+
+#### Defined in
+
+[packages/core/src/indices/vectorStore/VectorStoreIndex.ts:59](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/indices/vectorStore/VectorStoreIndex.ts#L59)
+
+---
+
+### indexStruct
+
+• **indexStruct**: [`IndexDict`](IndexDict.md)
+
+#### Inherited from
+
+[BaseIndex](BaseIndex.md).[indexStruct](BaseIndex.md#indexstruct)
+
+#### Defined in
+
+[packages/core/src/indices/BaseIndex.ts:159](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/indices/BaseIndex.ts#L159)
+
+---
+
+### serviceContext
+
+• **serviceContext**: [`ServiceContext`](../interfaces/ServiceContext.md)
+
+#### Inherited from
+
+[BaseIndex](BaseIndex.md).[serviceContext](BaseIndex.md#servicecontext)
+
+#### Defined in
+
+[packages/core/src/indices/BaseIndex.ts:154](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/indices/BaseIndex.ts#L154)
+
+---
+
+### storageContext
+
+• **storageContext**: [`StorageContext`](../interfaces/StorageContext.md)
+
+#### Inherited from
+
+[BaseIndex](BaseIndex.md).[storageContext](BaseIndex.md#storagecontext)
+
+#### Defined in
+
+[packages/core/src/indices/BaseIndex.ts:155](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/indices/BaseIndex.ts#L155)
+
+---
+
+### vectorStore
+
+• **vectorStore**: [`VectorStore`](../interfaces/VectorStore.md)
+
+#### Overrides
+
+[BaseIndex](BaseIndex.md).[vectorStore](BaseIndex.md#vectorstore)
+
+#### Defined in
+
+[packages/core/src/indices/vectorStore/VectorStoreIndex.ts:58](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/indices/vectorStore/VectorStoreIndex.ts#L58)
+
+## Methods
+
+### asQueryEngine
+
+▸ **asQueryEngine**(`options?`): [`BaseQueryEngine`](../interfaces/BaseQueryEngine.md)
+
+Create a new query engine from the index. It will also create a retriever
+and response synthezier if they are not provided.
+
+#### Parameters
+
+| Name                           | Type                                                                | Description                                                      |
+| :----------------------------- | :------------------------------------------------------------------ | :--------------------------------------------------------------- |
+| `options?`                     | `Object`                                                            | you can supply your own custom Retriever and ResponseSynthesizer |
+| `options.nodePostprocessors?`  | [`BaseNodePostprocessor`](../interfaces/BaseNodePostprocessor.md)[] | -                                                                |
+| `options.preFilters?`          | `unknown`                                                           | -                                                                |
+| `options.responseSynthesizer?` | [`ResponseSynthesizer`](ResponseSynthesizer.md)                     | -                                                                |
+| `options.retriever?`           | [`BaseRetriever`](../interfaces/BaseRetriever.md)                   | -                                                                |
+
+#### Returns
+
+[`BaseQueryEngine`](../interfaces/BaseQueryEngine.md)
+
+#### Overrides
+
+[BaseIndex](BaseIndex.md).[asQueryEngine](BaseIndex.md#asqueryengine)
+
+#### Defined in
+
+[packages/core/src/indices/vectorStore/VectorStoreIndex.ts:244](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/indices/vectorStore/VectorStoreIndex.ts#L244)
+
+---
+
+### asRetriever
+
+▸ **asRetriever**(`options?`): [`VectorIndexRetriever`](VectorIndexRetriever.md)
+
+Create a new retriever from the index.
+
+#### Parameters
+
+| Name       | Type  |
+| :--------- | :---- |
+| `options?` | `any` |
+
+#### Returns
+
+[`VectorIndexRetriever`](VectorIndexRetriever.md)
+
+#### Overrides
+
+[BaseIndex](BaseIndex.md).[asRetriever](BaseIndex.md#asretriever)
+
+#### Defined in
+
+[packages/core/src/indices/vectorStore/VectorStoreIndex.ts:240](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/indices/vectorStore/VectorStoreIndex.ts#L240)
+
+---
+
+### buildIndexFromNodes
+
+▸ **buildIndexFromNodes**(`nodes`): `Promise`<`void`\>
+
+Get embeddings for nodes and place them into the index.
+
+#### Parameters
+
+| Name    | Type                                                     |
+| :------ | :------------------------------------------------------- |
+| `nodes` | [`BaseNode`](BaseNode.md)<[`Metadata`](../#metadata)\>[] |
+
+#### Returns
+
+`Promise`<`void`\>
+
+#### Defined in
+
+[packages/core/src/indices/vectorStore/VectorStoreIndex.ts:178](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/indices/vectorStore/VectorStoreIndex.ts#L178)
+
+---
+
+### deleteRefDoc
+
+▸ **deleteRefDoc**(`refDocId`, `deleteFromDocStore?`): `Promise`<`void`\>
+
+#### Parameters
+
+| Name                 | Type      | Default value |
+| :------------------- | :-------- | :------------ |
+| `refDocId`           | `string`  | `undefined`   |
+| `deleteFromDocStore` | `boolean` | `true`        |
+
+#### Returns
+
+`Promise`<`void`\>
+
+#### Overrides
+
+[BaseIndex](BaseIndex.md).[deleteRefDoc](BaseIndex.md#deleterefdoc)
+
+#### Defined in
+
+[packages/core/src/indices/vectorStore/VectorStoreIndex.ts:305](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/indices/vectorStore/VectorStoreIndex.ts#L305)
+
+---
+
+### deleteRefDocFromStore
+
+▸ `Protected` **deleteRefDocFromStore**(`vectorStore`, `refDocId`): `Promise`<`void`\>
+
+#### Parameters
+
+| Name          | Type                                          |
+| :------------ | :-------------------------------------------- |
+| `vectorStore` | [`VectorStore`](../interfaces/VectorStore.md) |
+| `refDocId`    | `string`                                      |
+
+#### Returns
+
+`Promise`<`void`\>
+
+#### Defined in
+
+[packages/core/src/indices/vectorStore/VectorStoreIndex.ts:319](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/indices/vectorStore/VectorStoreIndex.ts#L319)
+
+---
+
+### getImageNodeEmbeddingResults
+
+▸ **getImageNodeEmbeddingResults**(`nodes`, `logProgress?`): `Promise`<[`BaseNode`](BaseNode.md)<[`Metadata`](../#metadata)\>[]\>
+
+Get the embeddings for image nodes.
+
+#### Parameters
+
+| Name          | Type                                                       | Default value | Description                                    |
+| :------------ | :--------------------------------------------------------- | :------------ | :--------------------------------------------- |
+| `nodes`       | [`ImageNode`](ImageNode.md)<[`Metadata`](../#metadata)\>[] | `undefined`   |                                                |
+| `logProgress` | `boolean`                                                  | `false`       | log progress to console (useful for debugging) |
+
+#### Returns
+
+`Promise`<[`BaseNode`](BaseNode.md)<[`Metadata`](../#metadata)\>[]\>
+
+#### Defined in
+
+[packages/core/src/indices/vectorStore/VectorStoreIndex.ts:345](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/indices/vectorStore/VectorStoreIndex.ts#L345)
+
+---
+
+### getNodeEmbeddingResults
+
+▸ **getNodeEmbeddingResults**(`nodes`, `logProgress?`): `Promise`<[`BaseNode`](BaseNode.md)<[`Metadata`](../#metadata)\>[]\>
+
+Get the embeddings for nodes.
+
+#### Parameters
+
+| Name          | Type                                                     | Default value | Description                                    |
+| :------------ | :------------------------------------------------------- | :------------ | :--------------------------------------------- |
+| `nodes`       | [`BaseNode`](BaseNode.md)<[`Metadata`](../#metadata)\>[] | `undefined`   |                                                |
+| `logProgress` | `boolean`                                                | `false`       | log progress to console (useful for debugging) |
+
+#### Returns
+
+`Promise`<[`BaseNode`](BaseNode.md)<[`Metadata`](../#metadata)\>[]\>
+
+#### Defined in
+
+[packages/core/src/indices/vectorStore/VectorStoreIndex.ts:155](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/indices/vectorStore/VectorStoreIndex.ts#L155)
+
+---
+
+### insert
+
+▸ **insert**(`document`): `Promise`<`void`\>
+
+Insert a document into the index.
+
+#### Parameters
+
+| Name       | Type                                                   |
+| :--------- | :----------------------------------------------------- |
+| `document` | [`Document`](Document.md)<[`Metadata`](../#metadata)\> |
+
+#### Returns
+
+`Promise`<`void`\>
+
+#### Inherited from
+
+[BaseIndex](BaseIndex.md).[insert](BaseIndex.md#insert)
+
+#### Defined in
+
+[packages/core/src/indices/BaseIndex.ts:190](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/indices/BaseIndex.ts#L190)
+
+---
+
+### insertNodes
+
+▸ **insertNodes**(`nodes`): `Promise`<`void`\>
+
+#### Parameters
+
+| Name    | Type                                                     |
+| :------ | :------------------------------------------------------- |
+| `nodes` | [`BaseNode`](BaseNode.md)<[`Metadata`](../#metadata)\>[] |
+
+#### Returns
+
+`Promise`<`void`\>
+
+#### Overrides
+
+[BaseIndex](BaseIndex.md).[insertNodes](BaseIndex.md#insertnodes)
+
+#### Defined in
+
+[packages/core/src/indices/vectorStore/VectorStoreIndex.ts:284](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/indices/vectorStore/VectorStoreIndex.ts#L284)
+
+---
+
+### insertNodesToStore
+
+▸ `Protected` **insertNodesToStore**(`vectorStore`, `nodes`): `Promise`<`void`\>
+
+#### Parameters
+
+| Name          | Type                                                     |
+| :------------ | :------------------------------------------------------- |
+| `vectorStore` | [`VectorStore`](../interfaces/VectorStore.md)            |
+| `nodes`       | [`BaseNode`](BaseNode.md)<[`Metadata`](../#metadata)\>[] |
+
+#### Returns
+
+`Promise`<`void`\>
+
+#### Defined in
+
+[packages/core/src/indices/vectorStore/VectorStoreIndex.ts:259](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/indices/vectorStore/VectorStoreIndex.ts#L259)
+
+---
+
+### splitNodes
+
+▸ `Private` **splitNodes**(`nodes`): `Object`
+
+#### Parameters
+
+| Name    | Type                                                     |
+| :------ | :------------------------------------------------------- |
+| `nodes` | [`BaseNode`](BaseNode.md)<[`Metadata`](../#metadata)\>[] |
+
+#### Returns
+
+`Object`
+
+| Name         | Type                                                       |
+| :----------- | :--------------------------------------------------------- |
+| `imageNodes` | [`ImageNode`](ImageNode.md)<[`Metadata`](../#metadata)\>[] |
+| `textNodes`  | [`TextNode`](TextNode.md)<[`Metadata`](../#metadata)\>[]   |
+
+#### Defined in
+
+[packages/core/src/indices/vectorStore/VectorStoreIndex.ts:367](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/indices/vectorStore/VectorStoreIndex.ts#L367)
+
+---
+
+### fromDocuments
+
+▸ `Static` **fromDocuments**(`documents`, `args?`): `Promise`<[`VectorStoreIndex`](VectorStoreIndex.md)\>
+
+High level API: split documents, get embeddings, and build index.
+
+#### Parameters
+
+| Name        | Type                                                     |
+| :---------- | :------------------------------------------------------- |
+| `documents` | [`Document`](Document.md)<[`Metadata`](../#metadata)\>[] |
+| `args`      | `VectorIndexOptions`                                     |
+
+#### Returns
+
+`Promise`<[`VectorStoreIndex`](VectorStoreIndex.md)\>
+
+#### Defined in
+
+[packages/core/src/indices/vectorStore/VectorStoreIndex.ts:201](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/indices/vectorStore/VectorStoreIndex.ts#L201)
+
+---
+
+### fromVectorStore
+
+▸ `Static` **fromVectorStore**(`vectorStore`, `serviceContext`): `Promise`<[`VectorStoreIndex`](VectorStoreIndex.md)\>
+
+#### Parameters
+
+| Name             | Type                                                |
+| :--------------- | :-------------------------------------------------- |
+| `vectorStore`    | [`VectorStore`](../interfaces/VectorStore.md)       |
+| `serviceContext` | [`ServiceContext`](../interfaces/ServiceContext.md) |
+
+#### Returns
+
+`Promise`<[`VectorStoreIndex`](VectorStoreIndex.md)\>
+
+#### Defined in
+
+[packages/core/src/indices/vectorStore/VectorStoreIndex.ts:219](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/indices/vectorStore/VectorStoreIndex.ts#L219)
+
+---
+
+### init
+
+▸ `Static` **init**(`options`): `Promise`<[`VectorStoreIndex`](VectorStoreIndex.md)\>
+
+The async init function creates a new VectorStoreIndex.
+
+#### Parameters
+
+| Name      | Type                 |
+| :-------- | :------------------- |
+| `options` | `VectorIndexOptions` |
+
+#### Returns
+
+`Promise`<[`VectorStoreIndex`](VectorStoreIndex.md)\>
+
+#### Defined in
+
+[packages/core/src/indices/vectorStore/VectorStoreIndex.ts:80](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/indices/vectorStore/VectorStoreIndex.ts#L80)
+
+---
+
+### setupIndexStructFromStorage
+
+▸ `Static` `Private` **setupIndexStructFromStorage**(`indexStore`, `options`): `Promise`<`undefined` \| [`IndexDict`](IndexDict.md)\>
+
+#### Parameters
+
+| Name         | Type                                  |
+| :----------- | :------------------------------------ |
+| `indexStore` | [`BaseIndexStore`](BaseIndexStore.md) |
+| `options`    | `IndexStructOptions`                  |
+
+#### Returns
+
+`Promise`<`undefined` \| [`IndexDict`](IndexDict.md)\>
+
+#### Defined in
+
+[packages/core/src/indices/vectorStore/VectorStoreIndex.ts:118](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/indices/vectorStore/VectorStoreIndex.ts#L118)
diff --git a/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/_category_.yml b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/_category_.yml
new file mode 100644
index 0000000000000000000000000000000000000000..55c7980a46440f2ff50537c3392125668d3bb43e
--- /dev/null
+++ b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/classes/_category_.yml
@@ -0,0 +1,2 @@
+label: "Classes"
+position: 3
\ No newline at end of file
diff --git a/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/enums/ClipEmbeddingModelType.md b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/enums/ClipEmbeddingModelType.md
new file mode 100644
index 0000000000000000000000000000000000000000..bd789ce25ac34797f1301821484143eaba214d16
--- /dev/null
+++ b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/enums/ClipEmbeddingModelType.md
@@ -0,0 +1,27 @@
+---
+id: "ClipEmbeddingModelType"
+title: "Enumeration: ClipEmbeddingModelType"
+sidebar_label: "ClipEmbeddingModelType"
+sidebar_position: 0
+custom_edit_url: null
+---
+
+## Enumeration Members
+
+### XENOVA_CLIP_VIT_BASE_PATCH16
+
+• **XENOVA_CLIP_VIT_BASE_PATCH16** = `"Xenova/clip-vit-base-patch16"`
+
+#### Defined in
+
+[packages/core/src/embeddings/ClipEmbedding.ts:7](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/embeddings/ClipEmbedding.ts#L7)
+
+---
+
+### XENOVA_CLIP_VIT_BASE_PATCH32
+
+• **XENOVA_CLIP_VIT_BASE_PATCH32** = `"Xenova/clip-vit-base-patch32"`
+
+#### Defined in
+
+[packages/core/src/embeddings/ClipEmbedding.ts:6](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/embeddings/ClipEmbedding.ts#L6)
diff --git a/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/enums/DeuceChatStrategy.md b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/enums/DeuceChatStrategy.md
new file mode 100644
index 0000000000000000000000000000000000000000..72cdd63f81f2d2d3058636cf3ac8d0b34cb1f064
--- /dev/null
+++ b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/enums/DeuceChatStrategy.md
@@ -0,0 +1,57 @@
+---
+id: "DeuceChatStrategy"
+title: "Enumeration: DeuceChatStrategy"
+sidebar_label: "DeuceChatStrategy"
+sidebar_position: 0
+custom_edit_url: null
+---
+
+## Enumeration Members
+
+### A16Z
+
+• **A16Z** = `"a16z"`
+
+#### Defined in
+
+[packages/core/src/llm/LLM.ts:413](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/llm/LLM.ts#L413)
+
+---
+
+### META
+
+• **META** = `"meta"`
+
+#### Defined in
+
+[packages/core/src/llm/LLM.ts:414](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/llm/LLM.ts#L414)
+
+---
+
+### METAWBOS
+
+• **METAWBOS** = `"metawbos"`
+
+#### Defined in
+
+[packages/core/src/llm/LLM.ts:415](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/llm/LLM.ts#L415)
+
+---
+
+### REPLICATE4BIT
+
+• **REPLICATE4BIT** = `"replicate4bit"`
+
+#### Defined in
+
+[packages/core/src/llm/LLM.ts:418](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/llm/LLM.ts#L418)
+
+---
+
+### REPLICATE4BITWNEWLINES
+
+• **REPLICATE4BITWNEWLINES** = `"replicate4bitwnewlines"`
+
+#### Defined in
+
+[packages/core/src/llm/LLM.ts:420](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/llm/LLM.ts#L420)
diff --git a/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/enums/IndexStructType.md b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/enums/IndexStructType.md
new file mode 100644
index 0000000000000000000000000000000000000000..c548cc3d5de100b77f25fee68b8bd8e384f138fa
--- /dev/null
+++ b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/enums/IndexStructType.md
@@ -0,0 +1,37 @@
+---
+id: "IndexStructType"
+title: "Enumeration: IndexStructType"
+sidebar_label: "IndexStructType"
+sidebar_position: 0
+custom_edit_url: null
+---
+
+## Enumeration Members
+
+### KEYWORD_TABLE
+
+• **KEYWORD_TABLE** = `"keyword_table"`
+
+#### Defined in
+
+[packages/core/src/indices/BaseIndex.ts:42](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/indices/BaseIndex.ts#L42)
+
+---
+
+### LIST
+
+• **LIST** = `"list"`
+
+#### Defined in
+
+[packages/core/src/indices/BaseIndex.ts:41](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/indices/BaseIndex.ts#L41)
+
+---
+
+### SIMPLE_DICT
+
+• **SIMPLE_DICT** = `"simple_dict"`
+
+#### Defined in
+
+[packages/core/src/indices/BaseIndex.ts:40](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/indices/BaseIndex.ts#L40)
diff --git a/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/enums/KeywordTableRetrieverMode.md b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/enums/KeywordTableRetrieverMode.md
new file mode 100644
index 0000000000000000000000000000000000000000..9f3a664c3a7600e587f137560b8a1d566c7ea808
--- /dev/null
+++ b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/enums/KeywordTableRetrieverMode.md
@@ -0,0 +1,37 @@
+---
+id: "KeywordTableRetrieverMode"
+title: "Enumeration: KeywordTableRetrieverMode"
+sidebar_label: "KeywordTableRetrieverMode"
+sidebar_position: 0
+custom_edit_url: null
+---
+
+## Enumeration Members
+
+### DEFAULT
+
+• **DEFAULT** = `"DEFAULT"`
+
+#### Defined in
+
+[packages/core/src/indices/keyword/KeywordTableIndex.ts:34](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/indices/keyword/KeywordTableIndex.ts#L34)
+
+---
+
+### RAKE
+
+• **RAKE** = `"RAKE"`
+
+#### Defined in
+
+[packages/core/src/indices/keyword/KeywordTableIndex.ts:36](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/indices/keyword/KeywordTableIndex.ts#L36)
+
+---
+
+### SIMPLE
+
+• **SIMPLE** = `"SIMPLE"`
+
+#### Defined in
+
+[packages/core/src/indices/keyword/KeywordTableIndex.ts:35](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/indices/keyword/KeywordTableIndex.ts#L35)
diff --git a/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/enums/MetadataMode.md b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/enums/MetadataMode.md
new file mode 100644
index 0000000000000000000000000000000000000000..2aff5be985225934dfd4f2a9137febd5b6377122
--- /dev/null
+++ b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/enums/MetadataMode.md
@@ -0,0 +1,47 @@
+---
+id: "MetadataMode"
+title: "Enumeration: MetadataMode"
+sidebar_label: "MetadataMode"
+sidebar_position: 0
+custom_edit_url: null
+---
+
+## Enumeration Members
+
+### ALL
+
+• **ALL** = `"ALL"`
+
+#### Defined in
+
+[packages/core/src/Node.ts:21](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Node.ts#L21)
+
+---
+
+### EMBED
+
+• **EMBED** = `"EMBED"`
+
+#### Defined in
+
+[packages/core/src/Node.ts:22](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Node.ts#L22)
+
+---
+
+### LLM
+
+• **LLM** = `"LLM"`
+
+#### Defined in
+
+[packages/core/src/Node.ts:23](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Node.ts#L23)
+
+---
+
+### NONE
+
+• **NONE** = `"NONE"`
+
+#### Defined in
+
+[packages/core/src/Node.ts:24](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Node.ts#L24)
diff --git a/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/enums/NodeRelationship.md b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/enums/NodeRelationship.md
new file mode 100644
index 0000000000000000000000000000000000000000..c4e8cd0ac9fa08b11824a1d97e4d1c1be9d668a5
--- /dev/null
+++ b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/enums/NodeRelationship.md
@@ -0,0 +1,57 @@
+---
+id: "NodeRelationship"
+title: "Enumeration: NodeRelationship"
+sidebar_label: "NodeRelationship"
+sidebar_position: 0
+custom_edit_url: null
+---
+
+## Enumeration Members
+
+### CHILD
+
+• **CHILD** = `"CHILD"`
+
+#### Defined in
+
+[packages/core/src/Node.ts:9](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Node.ts#L9)
+
+---
+
+### NEXT
+
+• **NEXT** = `"NEXT"`
+
+#### Defined in
+
+[packages/core/src/Node.ts:7](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Node.ts#L7)
+
+---
+
+### PARENT
+
+• **PARENT** = `"PARENT"`
+
+#### Defined in
+
+[packages/core/src/Node.ts:8](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Node.ts#L8)
+
+---
+
+### PREVIOUS
+
+• **PREVIOUS** = `"PREVIOUS"`
+
+#### Defined in
+
+[packages/core/src/Node.ts:6](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Node.ts#L6)
+
+---
+
+### SOURCE
+
+• **SOURCE** = `"SOURCE"`
+
+#### Defined in
+
+[packages/core/src/Node.ts:5](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Node.ts#L5)
diff --git a/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/enums/ObjectType.md b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/enums/ObjectType.md
new file mode 100644
index 0000000000000000000000000000000000000000..3fc9a85a335e8987943909d6d07fa4bdb2c0ca2e
--- /dev/null
+++ b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/enums/ObjectType.md
@@ -0,0 +1,57 @@
+---
+id: "ObjectType"
+title: "Enumeration: ObjectType"
+sidebar_label: "ObjectType"
+sidebar_position: 0
+custom_edit_url: null
+---
+
+## Enumeration Members
+
+### DOCUMENT
+
+• **DOCUMENT** = `"DOCUMENT"`
+
+#### Defined in
+
+[packages/core/src/Node.ts:16](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Node.ts#L16)
+
+---
+
+### IMAGE
+
+• **IMAGE** = `"IMAGE"`
+
+#### Defined in
+
+[packages/core/src/Node.ts:14](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Node.ts#L14)
+
+---
+
+### IMAGE_DOCUMENT
+
+• **IMAGE_DOCUMENT** = `"IMAGE_DOCUMENT"`
+
+#### Defined in
+
+[packages/core/src/Node.ts:17](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Node.ts#L17)
+
+---
+
+### INDEX
+
+• **INDEX** = `"INDEX"`
+
+#### Defined in
+
+[packages/core/src/Node.ts:15](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Node.ts#L15)
+
+---
+
+### TEXT
+
+• **TEXT** = `"TEXT"`
+
+#### Defined in
+
+[packages/core/src/Node.ts:13](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Node.ts#L13)
diff --git a/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/enums/OpenAIEmbeddingModelType.md b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/enums/OpenAIEmbeddingModelType.md
new file mode 100644
index 0000000000000000000000000000000000000000..3d6c2689e448c8908181e07fe330ebb230474ba8
--- /dev/null
+++ b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/enums/OpenAIEmbeddingModelType.md
@@ -0,0 +1,17 @@
+---
+id: "OpenAIEmbeddingModelType"
+title: "Enumeration: OpenAIEmbeddingModelType"
+sidebar_label: "OpenAIEmbeddingModelType"
+sidebar_position: 0
+custom_edit_url: null
+---
+
+## Enumeration Members
+
+### TEXT_EMBED_ADA_002
+
+• **TEXT_EMBED_ADA_002** = `"text-embedding-ada-002"`
+
+#### Defined in
+
+[packages/core/src/embeddings/OpenAIEmbedding.ts:13](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/embeddings/OpenAIEmbedding.ts#L13)
diff --git a/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/enums/SimilarityType.md b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/enums/SimilarityType.md
new file mode 100644
index 0000000000000000000000000000000000000000..7a87230b5c8b04a9805865ad68aae50580df6ebe
--- /dev/null
+++ b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/enums/SimilarityType.md
@@ -0,0 +1,40 @@
+---
+id: "SimilarityType"
+title: "Enumeration: SimilarityType"
+sidebar_label: "SimilarityType"
+sidebar_position: 0
+custom_edit_url: null
+---
+
+Similarity type
+Default is cosine similarity. Dot product and negative Euclidean distance are also supported.
+
+## Enumeration Members
+
+### DEFAULT
+
+• **DEFAULT** = `"cosine"`
+
+#### Defined in
+
+[packages/core/src/embeddings/types.ts:8](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/embeddings/types.ts#L8)
+
+---
+
+### DOT_PRODUCT
+
+• **DOT_PRODUCT** = `"dot_product"`
+
+#### Defined in
+
+[packages/core/src/embeddings/types.ts:9](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/embeddings/types.ts#L9)
+
+---
+
+### EUCLIDEAN
+
+• **EUCLIDEAN** = `"euclidean"`
+
+#### Defined in
+
+[packages/core/src/embeddings/types.ts:10](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/embeddings/types.ts#L10)
diff --git a/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/enums/SummaryRetrieverMode.md b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/enums/SummaryRetrieverMode.md
new file mode 100644
index 0000000000000000000000000000000000000000..1e82f46980b3c8956692af585bc4da81bd473c75
--- /dev/null
+++ b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/enums/SummaryRetrieverMode.md
@@ -0,0 +1,27 @@
+---
+id: "SummaryRetrieverMode"
+title: "Enumeration: SummaryRetrieverMode"
+sidebar_label: "SummaryRetrieverMode"
+sidebar_position: 0
+custom_edit_url: null
+---
+
+## Enumeration Members
+
+### DEFAULT
+
+• **DEFAULT** = `"default"`
+
+#### Defined in
+
+[packages/core/src/indices/summary/SummaryIndex.ts:31](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/indices/summary/SummaryIndex.ts#L31)
+
+---
+
+### LLM
+
+• **LLM** = `"llm"`
+
+#### Defined in
+
+[packages/core/src/indices/summary/SummaryIndex.ts:33](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/indices/summary/SummaryIndex.ts#L33)
diff --git a/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/enums/Tokenizers.md b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/enums/Tokenizers.md
new file mode 100644
index 0000000000000000000000000000000000000000..7a17c33fe4ba0feb64c4e6b055b5aa8fe6d66bfb
--- /dev/null
+++ b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/enums/Tokenizers.md
@@ -0,0 +1,17 @@
+---
+id: "Tokenizers"
+title: "Enumeration: Tokenizers"
+sidebar_label: "Tokenizers"
+sidebar_position: 0
+custom_edit_url: null
+---
+
+## Enumeration Members
+
+### CL100K_BASE
+
+• **CL100K_BASE** = `"cl100k_base"`
+
+#### Defined in
+
+[packages/core/src/GlobalsHelper.ts:7](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/GlobalsHelper.ts#L7)
diff --git a/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/enums/VectorStoreQueryMode.md b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/enums/VectorStoreQueryMode.md
new file mode 100644
index 0000000000000000000000000000000000000000..7e108654192f8ae943df1d2eab35b95bb4f83211
--- /dev/null
+++ b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/enums/VectorStoreQueryMode.md
@@ -0,0 +1,77 @@
+---
+id: "VectorStoreQueryMode"
+title: "Enumeration: VectorStoreQueryMode"
+sidebar_label: "VectorStoreQueryMode"
+sidebar_position: 0
+custom_edit_url: null
+---
+
+## Enumeration Members
+
+### DEFAULT
+
+• **DEFAULT** = `"default"`
+
+#### Defined in
+
+[packages/core/src/storage/vectorStore/types.ts:10](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/storage/vectorStore/types.ts#L10)
+
+---
+
+### HYBRID
+
+• **HYBRID** = `"hybrid"`
+
+#### Defined in
+
+[packages/core/src/storage/vectorStore/types.ts:12](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/storage/vectorStore/types.ts#L12)
+
+---
+
+### LINEAR_REGRESSION
+
+• **LINEAR_REGRESSION** = `"linear_regression"`
+
+#### Defined in
+
+[packages/core/src/storage/vectorStore/types.ts:16](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/storage/vectorStore/types.ts#L16)
+
+---
+
+### LOGISTIC_REGRESSION
+
+• **LOGISTIC_REGRESSION** = `"logistic_regression"`
+
+#### Defined in
+
+[packages/core/src/storage/vectorStore/types.ts:15](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/storage/vectorStore/types.ts#L15)
+
+---
+
+### MMR
+
+• **MMR** = `"mmr"`
+
+#### Defined in
+
+[packages/core/src/storage/vectorStore/types.ts:18](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/storage/vectorStore/types.ts#L18)
+
+---
+
+### SPARSE
+
+• **SPARSE** = `"sparse"`
+
+#### Defined in
+
+[packages/core/src/storage/vectorStore/types.ts:11](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/storage/vectorStore/types.ts#L11)
+
+---
+
+### SVM
+
+• **SVM** = `"svm"`
+
+#### Defined in
+
+[packages/core/src/storage/vectorStore/types.ts:14](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/storage/vectorStore/types.ts#L14)
diff --git a/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/enums/_category_.yml b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/enums/_category_.yml
new file mode 100644
index 0000000000000000000000000000000000000000..1687a9e03fd705b092a975c2bd86f8e76af69ea1
--- /dev/null
+++ b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/enums/_category_.yml
@@ -0,0 +1,2 @@
+label: "Enumerations"
+position: 2
\ No newline at end of file
diff --git a/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/index.md b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/index.md
new file mode 100644
index 0000000000000000000000000000000000000000..b70f7760e4e54b198fd68223b315dfbe69dc73e7
--- /dev/null
+++ b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/index.md
@@ -0,0 +1,1594 @@
+---
+id: "index"
+title: "llamaindex"
+sidebar_label: "Exports"
+sidebar_position: 0.5
+custom_edit_url: null
+---
+
+## Enumerations
+
+- [ClipEmbeddingModelType](enums/ClipEmbeddingModelType.md)
+- [DeuceChatStrategy](enums/DeuceChatStrategy.md)
+- [IndexStructType](enums/IndexStructType.md)
+- [KeywordTableRetrieverMode](enums/KeywordTableRetrieverMode.md)
+- [MetadataMode](enums/MetadataMode.md)
+- [NodeRelationship](enums/NodeRelationship.md)
+- [ObjectType](enums/ObjectType.md)
+- [OpenAIEmbeddingModelType](enums/OpenAIEmbeddingModelType.md)
+- [SimilarityType](enums/SimilarityType.md)
+- [SummaryRetrieverMode](enums/SummaryRetrieverMode.md)
+- [Tokenizers](enums/Tokenizers.md)
+- [VectorStoreQueryMode](enums/VectorStoreQueryMode.md)
+
+## Classes
+
+- [Anthropic](classes/Anthropic.md)
+- [AudioSubtitlesReader](classes/AudioSubtitlesReader.md)
+- [AudioTranscriptParagraphsReader](classes/AudioTranscriptParagraphsReader.md)
+- [AudioTranscriptReader](classes/AudioTranscriptReader.md)
+- [AudioTranscriptSentencesReader](classes/AudioTranscriptSentencesReader.md)
+- [BaseDocumentStore](classes/BaseDocumentStore.md)
+- [BaseEmbedding](classes/BaseEmbedding.md)
+- [BaseInMemoryKVStore](classes/BaseInMemoryKVStore.md)
+- [BaseIndex](classes/BaseIndex.md)
+- [BaseIndexStore](classes/BaseIndexStore.md)
+- [BaseKVStore](classes/BaseKVStore.md)
+- [BaseNode](classes/BaseNode.md)
+- [CallbackManager](classes/CallbackManager.md)
+- [ClipEmbedding](classes/ClipEmbedding.md)
+- [CompactAndRefine](classes/CompactAndRefine.md)
+- [CondenseQuestionChatEngine](classes/CondenseQuestionChatEngine.md)
+- [ContextChatEngine](classes/ContextChatEngine.md)
+- [DefaultContextGenerator](classes/DefaultContextGenerator.md)
+- [Document](classes/Document.md)
+- [HTMLReader](classes/HTMLReader.md)
+- [HistoryChatEngine](classes/HistoryChatEngine.md)
+- [ImageDocument](classes/ImageDocument.md)
+- [ImageNode](classes/ImageNode.md)
+- [InMemoryFileSystem](classes/InMemoryFileSystem.md)
+- [IndexDict](classes/IndexDict.md)
+- [IndexList](classes/IndexList.md)
+- [IndexNode](classes/IndexNode.md)
+- [IndexStruct](classes/IndexStruct.md)
+- [KeywordTable](classes/KeywordTable.md)
+- [KeywordTableIndex](classes/KeywordTableIndex.md)
+- [KeywordTableLLMRetriever](classes/KeywordTableLLMRetriever.md)
+- [KeywordTableRAKERetriever](classes/KeywordTableRAKERetriever.md)
+- [KeywordTableSimpleRetriever](classes/KeywordTableSimpleRetriever.md)
+- [LLMQuestionGenerator](classes/LLMQuestionGenerator.md)
+- [LlamaDeuce](classes/LlamaDeuce.md)
+- [MarkdownReader](classes/MarkdownReader.md)
+- [MongoDBAtlasVectorSearch](classes/MongoDBAtlasVectorSearch.md)
+- [MultiModalEmbedding](classes/MultiModalEmbedding.md)
+- [NotionReader](classes/NotionReader.md)
+- [OpenAI](classes/OpenAI.md)
+- [OpenAIEmbedding](classes/OpenAIEmbedding.md)
+- [PDFReader](classes/PDFReader.md)
+- [PGVectorStore](classes/PGVectorStore.md)
+- [PapaCSVReader](classes/PapaCSVReader.md)
+- [Portkey](classes/Portkey.md)
+- [PromptHelper](classes/PromptHelper.md)
+- [Refine](classes/Refine.md)
+- [Response](classes/Response.md)
+- [ResponseSynthesizer](classes/ResponseSynthesizer.md)
+- [RetrieverQueryEngine](classes/RetrieverQueryEngine.md)
+- [SentenceSplitter](classes/SentenceSplitter.md)
+- [SimilarityPostprocessor](classes/SimilarityPostprocessor.md)
+- [SimpleChatEngine](classes/SimpleChatEngine.md)
+- [SimpleChatHistory](classes/SimpleChatHistory.md)
+- [SimpleDirectoryReader](classes/SimpleDirectoryReader.md)
+- [SimpleDocumentStore](classes/SimpleDocumentStore.md)
+- [SimpleIndexStore](classes/SimpleIndexStore.md)
+- [SimpleKVStore](classes/SimpleKVStore.md)
+- [SimpleMongoReader](classes/SimpleMongoReader.md)
+- [SimpleNodeParser](classes/SimpleNodeParser.md)
+- [SimpleResponseBuilder](classes/SimpleResponseBuilder.md)
+- [SimpleVectorStore](classes/SimpleVectorStore.md)
+- [SubQuestionOutputParser](classes/SubQuestionOutputParser.md)
+- [SubQuestionQueryEngine](classes/SubQuestionQueryEngine.md)
+- [SummaryChatHistory](classes/SummaryChatHistory.md)
+- [SummaryIndex](classes/SummaryIndex.md)
+- [SummaryIndexLLMRetriever](classes/SummaryIndexLLMRetriever.md)
+- [SummaryIndexRetriever](classes/SummaryIndexRetriever.md)
+- [TextFileReader](classes/TextFileReader.md)
+- [TextNode](classes/TextNode.md)
+- [TreeSummarize](classes/TreeSummarize.md)
+- [VectorIndexRetriever](classes/VectorIndexRetriever.md)
+- [VectorStoreIndex](classes/VectorStoreIndex.md)
+
+## Interfaces
+
+- [BaseIndexInit](interfaces/BaseIndexInit.md)
+- [BaseNodePostprocessor](interfaces/BaseNodePostprocessor.md)
+- [BaseOutputParser](interfaces/BaseOutputParser.md)
+- [BaseQueryEngine](interfaces/BaseQueryEngine.md)
+- [BaseQuestionGenerator](interfaces/BaseQuestionGenerator.md)
+- [BaseReader](interfaces/BaseReader.md)
+- [BaseRetriever](interfaces/BaseRetriever.md)
+- [BaseTool](interfaces/BaseTool.md)
+- [ChatEngine](interfaces/ChatEngine.md)
+- [ChatHistory](interfaces/ChatHistory.md)
+- [ChatMessage](interfaces/ChatMessage.md)
+- [ChatResponse](interfaces/ChatResponse.md)
+- [Context](interfaces/Context.md)
+- [ContextGenerator](interfaces/ContextGenerator.md)
+- [DefaultStreamToken](interfaces/DefaultStreamToken.md)
+- [Event](interfaces/Event.md)
+- [ExactMatchFilter](interfaces/ExactMatchFilter.md)
+- [GenericFileSystem](interfaces/GenericFileSystem.md)
+- [LLM](interfaces/LLM.md)
+- [LLMMetadata](interfaces/LLMMetadata.md)
+- [MessageContentDetail](interfaces/MessageContentDetail.md)
+- [MetadataFilters](interfaces/MetadataFilters.md)
+- [MetadataInfo](interfaces/MetadataInfo.md)
+- [NodeParser](interfaces/NodeParser.md)
+- [NodeWithScore](interfaces/NodeWithScore.md)
+- [QueryEngineTool](interfaces/QueryEngineTool.md)
+- [RefDocInfo](interfaces/RefDocInfo.md)
+- [RelatedNodeInfo](interfaces/RelatedNodeInfo.md)
+- [RetrievalCallbackResponse](interfaces/RetrievalCallbackResponse.md)
+- [ServiceContext](interfaces/ServiceContext.md)
+- [ServiceContextOptions](interfaces/ServiceContextOptions.md)
+- [StorageContext](interfaces/StorageContext.md)
+- [StreamCallbackResponse](interfaces/StreamCallbackResponse.md)
+- [StructuredOutput](interfaces/StructuredOutput.md)
+- [SubQuestion](interfaces/SubQuestion.md)
+- [ToolMetadata](interfaces/ToolMetadata.md)
+- [VectorStore](interfaces/VectorStore.md)
+- [VectorStoreInfo](interfaces/VectorStoreInfo.md)
+- [VectorStoreQuery](interfaces/VectorStoreQuery.md)
+- [VectorStoreQueryResult](interfaces/VectorStoreQueryResult.md)
+- [VectorStoreQuerySpec](interfaces/VectorStoreQuerySpec.md)
+- [WalkableFileSystem](interfaces/WalkableFileSystem.md)
+
+## Type Aliases
+
+### AnthropicStreamToken
+
+Ƭ **AnthropicStreamToken**: `Object`
+
+#### Type declaration
+
+| Name          | Type                    |
+| :------------ | :---------------------- |
+| `completion`  | `string`                |
+| `log_id?`     | `string`                |
+| `model`       | `string`                |
+| `stop?`       | `boolean`               |
+| `stop_reason` | `string` \| `undefined` |
+
+#### Defined in
+
+[packages/core/src/callbacks/CallbackManager.ts:42](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/callbacks/CallbackManager.ts#L42)
+
+---
+
+### AssemblyAIOptions
+
+Ƭ **AssemblyAIOptions**: `Partial`<`BaseServiceParams`\>
+
+#### Defined in
+
+[packages/core/src/readers/AssemblyAI.ts:12](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/readers/AssemblyAI.ts#L12)
+
+---
+
+### ChoiceSelectPrompt
+
+Ƭ **ChoiceSelectPrompt**: typeof [`defaultChoiceSelectPrompt`](#defaultchoiceselectprompt)
+
+#### Defined in
+
+[packages/core/src/Prompt.ts:165](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Prompt.ts#L165)
+
+---
+
+### CompleteFileSystem
+
+Ƭ **CompleteFileSystem**: [`GenericFileSystem`](interfaces/GenericFileSystem.md) & [`WalkableFileSystem`](interfaces/WalkableFileSystem.md)
+
+#### Defined in
+
+[packages/core/src/storage/FileSystem.ts:49](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/storage/FileSystem.ts#L49)
+
+---
+
+### CompletionResponse
+
+Ƭ **CompletionResponse**: [`ChatResponse`](interfaces/ChatResponse.md)
+
+#### Defined in
+
+[packages/core/src/llm/LLM.ts:51](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/llm/LLM.ts#L51)
+
+---
+
+### CondenseQuestionPrompt
+
+Ƭ **CondenseQuestionPrompt**: typeof [`defaultCondenseQuestionPrompt`](#defaultcondensequestionprompt)
+
+#### Defined in
+
+[packages/core/src/Prompt.ts:346](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Prompt.ts#L346)
+
+---
+
+### ContextSystemPrompt
+
+Ƭ **ContextSystemPrompt**: typeof [`defaultContextSystemPrompt`](#defaultcontextsystemprompt)
+
+#### Defined in
+
+[packages/core/src/Prompt.ts:367](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Prompt.ts#L367)
+
+---
+
+### EventTag
+
+Ƭ **EventTag**: `"intermediate"` \| `"final"`
+
+#### Defined in
+
+[packages/core/src/callbacks/CallbackManager.ts:10](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/callbacks/CallbackManager.ts#L10)
+
+---
+
+### EventType
+
+Ƭ **EventType**: `"retrieve"` \| `"llmPredict"` \| `"wrapper"`
+
+#### Defined in
+
+[packages/core/src/callbacks/CallbackManager.ts:11](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/callbacks/CallbackManager.ts#L11)
+
+---
+
+### ImageNodeConstructorProps
+
+Ƭ **ImageNodeConstructorProps**<`T`\>: `Pick`<[`ImageNode`](classes/ImageNode.md)<`T`\>, `"image"` \| `"id_"`\> & `Partial`<[`ImageNode`](classes/ImageNode.md)<`T`\>\>
+
+#### Type parameters
+
+| Name | Type                            |
+| :--- | :------------------------------ |
+| `T`  | extends [`Metadata`](#metadata) |
+
+#### Defined in
+
+[packages/core/src/Node.ts:290](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Node.ts#L290)
+
+---
+
+### ImageType
+
+Ƭ **ImageType**: `string` \| `Blob` \| `URL`
+
+#### Defined in
+
+[packages/core/src/Node.ts:288](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Node.ts#L288)
+
+---
+
+### KeywordExtractPrompt
+
+Ƭ **KeywordExtractPrompt**: typeof [`defaultKeywordExtractPrompt`](#defaultkeywordextractprompt)
+
+#### Defined in
+
+[packages/core/src/Prompt.ts:382](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Prompt.ts#L382)
+
+---
+
+### ListIndex
+
+Ƭ **ListIndex**: [`SummaryIndex`](classes/SummaryIndex.md)
+
+#### Defined in
+
+[packages/core/src/indices/summary/SummaryIndex.ts:264](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/indices/summary/SummaryIndex.ts#L264)
+
+---
+
+### ListIndexLLMRetriever
+
+Ƭ **ListIndexLLMRetriever**: [`SummaryIndexLLMRetriever`](classes/SummaryIndexLLMRetriever.md)
+
+#### Defined in
+
+[packages/core/src/indices/summary/SummaryIndexRetriever.ts:134](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/indices/summary/SummaryIndexRetriever.ts#L134)
+
+---
+
+### ListIndexRetriever
+
+Ƭ **ListIndexRetriever**: [`SummaryIndexRetriever`](classes/SummaryIndexRetriever.md)
+
+#### Defined in
+
+[packages/core/src/indices/summary/SummaryIndexRetriever.ts:133](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/indices/summary/SummaryIndexRetriever.ts#L133)
+
+---
+
+### ListRetrieverMode
+
+Ƭ **ListRetrieverMode**: [`SummaryRetrieverMode`](enums/SummaryRetrieverMode.md)
+
+#### Defined in
+
+[packages/core/src/indices/summary/SummaryIndex.ts:265](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/indices/summary/SummaryIndex.ts#L265)
+
+---
+
+### MessageContent
+
+Ƭ **MessageContent**: `string` \| [`MessageContentDetail`](interfaces/MessageContentDetail.md)[]
+
+Extended type for the content of a message that allows for multi-modal messages.
+
+#### Defined in
+
+[packages/core/src/ChatEngine.ts:350](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/ChatEngine.ts#L350)
+
+---
+
+### MessageType
+
+Ƭ **MessageType**: `"user"` \| `"assistant"` \| `"system"` \| `"generic"` \| `"function"` \| `"memory"`
+
+#### Defined in
+
+[packages/core/src/llm/LLM.ts:31](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/llm/LLM.ts#L31)
+
+---
+
+### Metadata
+
+Ƭ **Metadata**: `Record`<`string`, `any`\>
+
+#### Defined in
+
+[packages/core/src/Node.ts:27](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Node.ts#L27)
+
+---
+
+### OpenAIStreamToken
+
+Ƭ **OpenAIStreamToken**: [`DefaultStreamToken`](interfaces/DefaultStreamToken.md)
+
+#### Defined in
+
+[packages/core/src/callbacks/CallbackManager.ts:41](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/callbacks/CallbackManager.ts#L41)
+
+---
+
+### QueryKeywordExtractPrompt
+
+Ƭ **QueryKeywordExtractPrompt**: typeof [`defaultQueryKeywordExtractPrompt`](#defaultquerykeywordextractprompt)
+
+#### Defined in
+
+[packages/core/src/Prompt.ts:398](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Prompt.ts#L398)
+
+---
+
+### RefinePrompt
+
+Ƭ **RefinePrompt**: typeof [`defaultRefinePrompt`](#defaultrefineprompt)
+
+#### Defined in
+
+[packages/core/src/Prompt.ts:106](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Prompt.ts#L106)
+
+---
+
+### RelatedNodeType
+
+Ƭ **RelatedNodeType**<`T`\>: [`RelatedNodeInfo`](interfaces/RelatedNodeInfo.md)<`T`\> \| [`RelatedNodeInfo`](interfaces/RelatedNodeInfo.md)<`T`\>[]
+
+#### Type parameters
+
+| Name | Type                                                      |
+| :--- | :-------------------------------------------------------- |
+| `T`  | extends [`Metadata`](#metadata) = [`Metadata`](#metadata) |
+
+#### Defined in
+
+[packages/core/src/Node.ts:36](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Node.ts#L36)
+
+---
+
+### SimpleDirectoryReaderLoadDataProps
+
+Ƭ **SimpleDirectoryReaderLoadDataProps**: `Object`
+
+#### Type declaration
+
+| Name               | Type                                                          |
+| :----------------- | :------------------------------------------------------------ |
+| `defaultReader?`   | [`BaseReader`](interfaces/BaseReader.md) \| `null`            |
+| `directoryPath`    | `string`                                                      |
+| `fileExtToReader?` | `Record`<`string`, [`BaseReader`](interfaces/BaseReader.md)\> |
+| `fs?`              | [`CompleteFileSystem`](#completefilesystem)                   |
+
+#### Defined in
+
+[packages/core/src/readers/SimpleDirectoryReader.ts:52](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/readers/SimpleDirectoryReader.ts#L52)
+
+---
+
+### SimplePrompt
+
+Ƭ **SimplePrompt**: (`input`: `Record`<`string`, `string` \| `undefined`\>) => `string`
+
+#### Type declaration
+
+▸ (`input`): `string`
+
+A SimplePrompt is a function that takes a dictionary of inputs and returns a string.
+NOTE this is a different interface compared to LlamaIndex Python
+NOTE 2: we default to empty string to make it easy to calculate prompt sizes
+
+##### Parameters
+
+| Name    | Type                                         |
+| :------ | :------------------------------------------- |
+| `input` | `Record`<`string`, `string` \| `undefined`\> |
+
+##### Returns
+
+`string`
+
+#### Defined in
+
+[packages/core/src/Prompt.ts:10](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Prompt.ts#L10)
+
+---
+
+### SubQuestionPrompt
+
+Ƭ **SubQuestionPrompt**: typeof [`defaultSubQuestionPrompt`](#defaultsubquestionprompt)
+
+#### Defined in
+
+[packages/core/src/Prompt.ts:314](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Prompt.ts#L314)
+
+---
+
+### SubtitleFormat
+
+Ƭ **SubtitleFormat**: `"srt"` \| `"vtt"`
+
+**`Description`**
+
+Format of the subtitles
+
+#### Defined in
+
+node_modules/.pnpm/assemblyai@3.1.3/node_modules/assemblyai/dist/types/openapi.generated.d.ts:309
+
+---
+
+### SummaryPrompt
+
+Ƭ **SummaryPrompt**: typeof [`defaultSummaryPrompt`](#defaultsummaryprompt)
+
+#### Defined in
+
+[packages/core/src/Prompt.ts:73](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Prompt.ts#L73)
+
+---
+
+### TextQaPrompt
+
+Ƭ **TextQaPrompt**: typeof [`defaultTextQaPrompt`](#defaulttextqaprompt)
+
+#### Defined in
+
+[packages/core/src/Prompt.ts:37](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Prompt.ts#L37)
+
+---
+
+### TranscribeParams
+
+Ƭ **TranscribeParams**: { `audio`: `AudioToTranscribe` } & `Omit`<`TranscriptParams`, `"audio_url"`\>
+
+The parameters to transcribe an audio file.
+
+#### Defined in
+
+node_modules/.pnpm/assemblyai@3.1.3/node_modules/assemblyai/dist/types/transcripts/index.d.ts:29
+
+---
+
+### TreeSummarizePrompt
+
+Ƭ **TreeSummarizePrompt**: typeof [`defaultTreeSummarizePrompt`](#defaulttreesummarizeprompt)
+
+#### Defined in
+
+[packages/core/src/Prompt.ts:131](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Prompt.ts#L131)
+
+## Variables
+
+### ALL_AVAILABLE_ANTHROPIC_MODELS
+
+• `Const` **ALL_AVAILABLE_ANTHROPIC_MODELS**: `Object`
+
+#### Type declaration
+
+| Name                             | Type                                   |
+| :------------------------------- | :------------------------------------- |
+| `claude-2`                       | { `contextWindow`: `number` = 200000 } |
+| `claude-2.contextWindow`         | `number`                               |
+| `claude-instant-1`               | { `contextWindow`: `number` = 100000 } |
+| `claude-instant-1.contextWindow` | `number`                               |
+
+#### Defined in
+
+[packages/core/src/llm/LLM.ts:642](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/llm/LLM.ts#L642)
+
+---
+
+### ALL_AVAILABLE_LLAMADEUCE_MODELS
+
+• `Const` **ALL_AVAILABLE_LLAMADEUCE_MODELS**: `Object`
+
+#### Type declaration
+
+| Name                                  | Type                                                                                                                                                            |
+| :------------------------------------ | :-------------------------------------------------------------------------------------------------------------------------------------------------------------- |
+| `Llama-2-13b-chat-4bit`               | { `contextWindow`: `number` = 4096; `replicateApi`: `string` = "meta/llama-2-13b-chat:f4e2de70d66816a838a89eeeb621910adffb0dd0baba3976c96980970978018d" }       |
+| `Llama-2-13b-chat-4bit.contextWindow` | `number`                                                                                                                                                        |
+| `Llama-2-13b-chat-4bit.replicateApi`  | `string`                                                                                                                                                        |
+| `Llama-2-13b-chat-old`                | { `contextWindow`: `number` = 4096; `replicateApi`: `string` = "a16z-infra/llama13b-v2-chat:df7690f1994d94e96ad9d568eac121aecf50684a0b0963b25a41cc40061269e5" } |
+| `Llama-2-13b-chat-old.contextWindow`  | `number`                                                                                                                                                        |
+| `Llama-2-13b-chat-old.replicateApi`   | `string`                                                                                                                                                        |
+| `Llama-2-70b-chat-4bit`               | { `contextWindow`: `number` = 4096; `replicateApi`: `string` = "meta/llama-2-70b-chat:02e509c789964a7ea8736978a43525956ef40397be9033abf9fd2badfe68c9e3" }       |
+| `Llama-2-70b-chat-4bit.contextWindow` | `number`                                                                                                                                                        |
+| `Llama-2-70b-chat-4bit.replicateApi`  | `string`                                                                                                                                                        |
+| `Llama-2-70b-chat-old`                | { `contextWindow`: `number` = 4096; `replicateApi`: `string` = "replicate/llama70b-v2-chat:e951f18578850b652510200860fc4ea62b3b16fac280f83ff32282f87bbd2e48" }  |
+| `Llama-2-70b-chat-old.contextWindow`  | `number`                                                                                                                                                        |
+| `Llama-2-70b-chat-old.replicateApi`   | `string`                                                                                                                                                        |
+| `Llama-2-7b-chat-4bit`                | { `contextWindow`: `number` = 4096; `replicateApi`: `string` = "meta/llama-2-7b-chat:13c3cdee13ee059ab779f0291d29054dab00a47dad8261375654de5540165fb0" }        |
+| `Llama-2-7b-chat-4bit.contextWindow`  | `number`                                                                                                                                                        |
+| `Llama-2-7b-chat-4bit.replicateApi`   | `string`                                                                                                                                                        |
+| `Llama-2-7b-chat-old`                 | { `contextWindow`: `number` = 4096; `replicateApi`: `string` = "a16z-infra/llama7b-v2-chat:4f0a4744c7295c024a1de15e1a63c880d3da035fa1f49bfd344fe076074c8eea" }  |
+| `Llama-2-7b-chat-old.contextWindow`   | `number`                                                                                                                                                        |
+| `Llama-2-7b-chat-old.replicateApi`    | `string`                                                                                                                                                        |
+
+#### Defined in
+
+[packages/core/src/llm/LLM.ts:372](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/llm/LLM.ts#L372)
+
+---
+
+### ALL_AVAILABLE_OPENAI_MODELS
+
+• `Const` **ALL_AVAILABLE_OPENAI_MODELS**: `Object`
+
+We currently support GPT-3.5 and GPT-4 models
+
+#### Type declaration
+
+| Name                                 | Type                                   |
+| :----------------------------------- | :------------------------------------- |
+| `gpt-3.5-turbo`                      | { `contextWindow`: `number` = 4096 }   |
+| `gpt-3.5-turbo.contextWindow`        | `number`                               |
+| `gpt-3.5-turbo-1106`                 | { `contextWindow`: `number` = 16384 }  |
+| `gpt-3.5-turbo-1106.contextWindow`   | `number`                               |
+| `gpt-3.5-turbo-16k`                  | { `contextWindow`: `number` = 16384 }  |
+| `gpt-3.5-turbo-16k.contextWindow`    | `number`                               |
+| `gpt-4`                              | { `contextWindow`: `number` = 8192 }   |
+| `gpt-4.contextWindow`                | `number`                               |
+| `gpt-4-1106-preview`                 | { `contextWindow`: `number` = 128000 } |
+| `gpt-4-1106-preview.contextWindow`   | `number`                               |
+| `gpt-4-32k`                          | { `contextWindow`: `number` = 32768 }  |
+| `gpt-4-32k.contextWindow`            | `number`                               |
+| `gpt-4-vision-preview`               | { `contextWindow`: `number` = 8192 }   |
+| `gpt-4-vision-preview.contextWindow` | `number`                               |
+
+#### Defined in
+
+[packages/core/src/llm/LLM.ts:119](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/llm/LLM.ts#L119)
+
+---
+
+### DEFAULT_CHUNK_OVERLAP
+
+• `Const` **DEFAULT_CHUNK_OVERLAP**: `20`
+
+#### Defined in
+
+[packages/core/src/constants.ts:5](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/constants.ts#L5)
+
+---
+
+### DEFAULT_CHUNK_OVERLAP_RATIO
+
+• `Const` **DEFAULT_CHUNK_OVERLAP_RATIO**: `0.1`
+
+#### Defined in
+
+[packages/core/src/constants.ts:6](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/constants.ts#L6)
+
+---
+
+### DEFAULT_CHUNK_SIZE
+
+• `Const` **DEFAULT_CHUNK_SIZE**: `1024`
+
+#### Defined in
+
+[packages/core/src/constants.ts:4](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/constants.ts#L4)
+
+---
+
+### DEFAULT_COLLECTION
+
+• `Const` **DEFAULT_COLLECTION**: `"data"`
+
+#### Defined in
+
+[packages/core/src/storage/constants.ts:1](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/storage/constants.ts#L1)
+
+---
+
+### DEFAULT_CONTEXT_WINDOW
+
+• `Const` **DEFAULT_CONTEXT_WINDOW**: `3900`
+
+#### Defined in
+
+[packages/core/src/constants.ts:1](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/constants.ts#L1)
+
+---
+
+### DEFAULT_DOC_STORE_PERSIST_FILENAME
+
+• `Const` **DEFAULT_DOC_STORE_PERSIST_FILENAME**: `"doc_store.json"`
+
+#### Defined in
+
+[packages/core/src/storage/constants.ts:4](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/storage/constants.ts#L4)
+
+---
+
+### DEFAULT_EMBEDDING_DIM
+
+• `Const` **DEFAULT_EMBEDDING_DIM**: `1536`
+
+#### Defined in
+
+[packages/core/src/constants.ts:10](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/constants.ts#L10)
+
+---
+
+### DEFAULT_FS
+
+• `Const` **DEFAULT_FS**: [`GenericFileSystem`](interfaces/GenericFileSystem.md) \| [`CompleteFileSystem`](#completefilesystem)
+
+#### Defined in
+
+[packages/core/src/storage/FileSystem.ts:62](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/storage/FileSystem.ts#L62)
+
+---
+
+### DEFAULT_GRAPH_STORE_PERSIST_FILENAME
+
+• `Const` **DEFAULT_GRAPH_STORE_PERSIST_FILENAME**: `"graph_store.json"`
+
+#### Defined in
+
+[packages/core/src/storage/constants.ts:6](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/storage/constants.ts#L6)
+
+---
+
+### DEFAULT_INDEX_STORE_PERSIST_FILENAME
+
+• `Const` **DEFAULT_INDEX_STORE_PERSIST_FILENAME**: `"index_store.json"`
+
+#### Defined in
+
+[packages/core/src/storage/constants.ts:3](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/storage/constants.ts#L3)
+
+---
+
+### DEFAULT_NAMESPACE
+
+• `Const` **DEFAULT_NAMESPACE**: `"docstore"`
+
+#### Defined in
+
+[packages/core/src/storage/constants.ts:7](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/storage/constants.ts#L7)
+
+---
+
+### DEFAULT_NUM_OUTPUTS
+
+• `Const` **DEFAULT_NUM_OUTPUTS**: `256`
+
+#### Defined in
+
+[packages/core/src/constants.ts:2](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/constants.ts#L2)
+
+---
+
+### DEFAULT_PADDING
+
+• `Const` **DEFAULT_PADDING**: `5`
+
+#### Defined in
+
+[packages/core/src/constants.ts:11](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/constants.ts#L11)
+
+---
+
+### DEFAULT_PERSIST_DIR
+
+• `Const` **DEFAULT_PERSIST_DIR**: `"./storage"`
+
+#### Defined in
+
+[packages/core/src/storage/constants.ts:2](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/storage/constants.ts#L2)
+
+---
+
+### DEFAULT_SIMILARITY_TOP_K
+
+• `Const` **DEFAULT_SIMILARITY_TOP_K**: `2`
+
+#### Defined in
+
+[packages/core/src/constants.ts:7](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/constants.ts#L7)
+
+---
+
+### DEFAULT_VECTOR_STORE_PERSIST_FILENAME
+
+• `Const` **DEFAULT_VECTOR_STORE_PERSIST_FILENAME**: `"vector_store.json"`
+
+#### Defined in
+
+[packages/core/src/storage/constants.ts:5](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/storage/constants.ts#L5)
+
+---
+
+### FILE_EXT_TO_READER
+
+• `Const` **FILE_EXT_TO_READER**: `Record`<`string`, [`BaseReader`](interfaces/BaseReader.md)\>
+
+#### Defined in
+
+[packages/core/src/readers/SimpleDirectoryReader.ts:38](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/readers/SimpleDirectoryReader.ts#L38)
+
+---
+
+### GPT35_MODELS
+
+• `Const` **GPT35_MODELS**: `Object`
+
+#### Type declaration
+
+| Name                               | Type                                  |
+| :--------------------------------- | :------------------------------------ |
+| `gpt-3.5-turbo`                    | { `contextWindow`: `number` = 4096 }  |
+| `gpt-3.5-turbo.contextWindow`      | `number`                              |
+| `gpt-3.5-turbo-1106`               | { `contextWindow`: `number` = 16384 } |
+| `gpt-3.5-turbo-1106.contextWindow` | `number`                              |
+| `gpt-3.5-turbo-16k`                | { `contextWindow`: `number` = 16384 } |
+| `gpt-3.5-turbo-16k.contextWindow`  | `number`                              |
+
+#### Defined in
+
+[packages/core/src/llm/LLM.ts:110](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/llm/LLM.ts#L110)
+
+---
+
+### GPT4_MODELS
+
+• `Const` **GPT4_MODELS**: `Object`
+
+#### Type declaration
+
+| Name                                 | Type                                   |
+| :----------------------------------- | :------------------------------------- |
+| `gpt-4`                              | { `contextWindow`: `number` = 8192 }   |
+| `gpt-4.contextWindow`                | `number`                               |
+| `gpt-4-1106-preview`                 | { `contextWindow`: `number` = 128000 } |
+| `gpt-4-1106-preview.contextWindow`   | `number`                               |
+| `gpt-4-32k`                          | { `contextWindow`: `number` = 32768 }  |
+| `gpt-4-32k.contextWindow`            | `number`                               |
+| `gpt-4-vision-preview`               | { `contextWindow`: `number` = 8192 }   |
+| `gpt-4-vision-preview.contextWindow` | `number`                               |
+
+#### Defined in
+
+[packages/core/src/llm/LLM.ts:103](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/llm/LLM.ts#L103)
+
+---
+
+### globalsHelper
+
+• `Const` **globalsHelper**: `GlobalsHelper`
+
+#### Defined in
+
+[packages/core/src/GlobalsHelper.ts:76](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/GlobalsHelper.ts#L76)
+
+---
+
+### unixLineSeparator
+
+• `Const` **unixLineSeparator**: `"\n"`
+
+#### Defined in
+
+[packages/core/src/TextSplitter.ts:44](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/TextSplitter.ts#L44)
+
+---
+
+### unixParagraphSeparator
+
+• `Const` **unixParagraphSeparator**: `string`
+
+#### Defined in
+
+[packages/core/src/TextSplitter.ts:46](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/TextSplitter.ts#L46)
+
+---
+
+### windowsLineSeparator
+
+• `Const` **windowsLineSeparator**: `"\r\n"`
+
+#### Defined in
+
+[packages/core/src/TextSplitter.ts:45](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/TextSplitter.ts#L45)
+
+---
+
+### windowsParagraphSeparator
+
+• `Const` **windowsParagraphSeparator**: `string`
+
+#### Defined in
+
+[packages/core/src/TextSplitter.ts:47](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/TextSplitter.ts#L47)
+
+## Functions
+
+### anthropicTextQaPrompt
+
+▸ **anthropicTextQaPrompt**(`«destructured»`): `string`
+
+#### Parameters
+
+| Name             | Type     |
+| :--------------- | :------- |
+| `«destructured»` | `Object` |
+
+#### Returns
+
+`string`
+
+#### Defined in
+
+[packages/core/src/Prompt.ts:39](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Prompt.ts#L39)
+
+---
+
+### buildToolsText
+
+▸ **buildToolsText**(`tools`): `string`
+
+#### Parameters
+
+| Name    | Type                                           |
+| :------ | :--------------------------------------------- |
+| `tools` | [`ToolMetadata`](interfaces/ToolMetadata.md)[] |
+
+#### Returns
+
+`string`
+
+#### Defined in
+
+[packages/core/src/Prompt.ts:243](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Prompt.ts#L243)
+
+---
+
+### cjkSentenceTokenizer
+
+▸ **cjkSentenceTokenizer**(`text`): `null` \| `RegExpMatchArray`
+
+Tokenizes sentences. Suitable for Chinese, Japanese, and Korean.
+
+#### Parameters
+
+| Name   | Type     |
+| :----- | :------- |
+| `text` | `string` |
+
+#### Returns
+
+`null` \| `RegExpMatchArray`
+
+#### Defined in
+
+[packages/core/src/TextSplitter.ts:36](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/TextSplitter.ts#L36)
+
+---
+
+### defaultChoiceSelectPrompt
+
+▸ **defaultChoiceSelectPrompt**(`«destructured»`): `string`
+
+#### Parameters
+
+| Name             | Type     |
+| :--------------- | :------- |
+| `«destructured»` | `Object` |
+
+#### Returns
+
+`string`
+
+#### Defined in
+
+[packages/core/src/Prompt.ts:133](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Prompt.ts#L133)
+
+---
+
+### defaultCondenseQuestionPrompt
+
+▸ **defaultCondenseQuestionPrompt**(`«destructured»`): `string`
+
+#### Parameters
+
+| Name             | Type     |
+| :--------------- | :------- |
+| `«destructured»` | `Object` |
+
+#### Returns
+
+`string`
+
+#### Defined in
+
+[packages/core/src/Prompt.ts:330](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Prompt.ts#L330)
+
+---
+
+### defaultContextSystemPrompt
+
+▸ **defaultContextSystemPrompt**(`«destructured»`): `string`
+
+#### Parameters
+
+| Name             | Type     |
+| :--------------- | :------- |
+| `«destructured»` | `Object` |
+
+#### Returns
+
+`string`
+
+#### Defined in
+
+[packages/core/src/Prompt.ts:360](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Prompt.ts#L360)
+
+---
+
+### defaultKeywordExtractPrompt
+
+▸ **defaultKeywordExtractPrompt**(`«destructured»`): `string`
+
+#### Parameters
+
+| Name             | Type     |
+| :--------------- | :------- |
+| `«destructured»` | `Object` |
+
+#### Returns
+
+`string`
+
+#### Defined in
+
+[packages/core/src/Prompt.ts:369](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Prompt.ts#L369)
+
+---
+
+### defaultQueryKeywordExtractPrompt
+
+▸ **defaultQueryKeywordExtractPrompt**(`«destructured»`): `string`
+
+#### Parameters
+
+| Name             | Type     |
+| :--------------- | :------- |
+| `«destructured»` | `Object` |
+
+#### Returns
+
+`string`
+
+#### Defined in
+
+[packages/core/src/Prompt.ts:384](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Prompt.ts#L384)
+
+---
+
+### defaultRefinePrompt
+
+▸ **defaultRefinePrompt**(`«destructured»`): `string`
+
+#### Parameters
+
+| Name             | Type     |
+| :--------------- | :------- |
+| `«destructured»` | `Object` |
+
+#### Returns
+
+`string`
+
+#### Defined in
+
+[packages/core/src/Prompt.ts:91](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Prompt.ts#L91)
+
+---
+
+### defaultSubQuestionPrompt
+
+▸ **defaultSubQuestionPrompt**(`«destructured»`): `string`
+
+#### Parameters
+
+| Name             | Type     |
+| :--------------- | :------- |
+| `«destructured»` | `Object` |
+
+#### Returns
+
+`string`
+
+#### Defined in
+
+[packages/core/src/Prompt.ts:284](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Prompt.ts#L284)
+
+---
+
+### defaultSummaryPrompt
+
+▸ **defaultSummaryPrompt**(`«destructured»`): `string`
+
+#### Parameters
+
+| Name             | Type     |
+| :--------------- | :------- |
+| `«destructured»` | `Object` |
+
+#### Returns
+
+`string`
+
+#### Defined in
+
+[packages/core/src/Prompt.ts:62](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Prompt.ts#L62)
+
+---
+
+### defaultTextQaPrompt
+
+▸ **defaultTextQaPrompt**(`«destructured»`): `string`
+
+#### Parameters
+
+| Name             | Type     |
+| :--------------- | :------- |
+| `«destructured»` | `Object` |
+
+#### Returns
+
+`string`
+
+#### Defined in
+
+[packages/core/src/Prompt.ts:27](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Prompt.ts#L27)
+
+---
+
+### defaultTreeSummarizePrompt
+
+▸ **defaultTreeSummarizePrompt**(`«destructured»`): `string`
+
+#### Parameters
+
+| Name             | Type     |
+| :--------------- | :------- |
+| `«destructured»` | `Object` |
+
+#### Returns
+
+`string`
+
+#### Defined in
+
+[packages/core/src/Prompt.ts:121](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Prompt.ts#L121)
+
+---
+
+### englishSentenceTokenizer
+
+▸ **englishSentenceTokenizer**(`text`): `null` \| `RegExpMatchArray`
+
+Tokenizes sentences. Suitable for English and most European languages.
+
+#### Parameters
+
+| Name   | Type     |
+| :----- | :------- |
+| `text` | `string` |
+
+#### Returns
+
+`null` \| `RegExpMatchArray`
+
+#### Defined in
+
+[packages/core/src/TextSplitter.ts:26](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/TextSplitter.ts#L26)
+
+---
+
+### exists
+
+▸ **exists**(`fs`, `path`): `Promise`<`boolean`\>
+
+Checks if a file exists.
+Analogous to the os.path.exists function from Python.
+
+#### Parameters
+
+| Name   | Type                                                   | Description                    |
+| :----- | :----------------------------------------------------- | :----------------------------- |
+| `fs`   | [`GenericFileSystem`](interfaces/GenericFileSystem.md) | The filesystem to use.         |
+| `path` | `string`                                               | The path to the file to check. |
+
+#### Returns
+
+`Promise`<`boolean`\>
+
+A promise that resolves to true if the file exists, false otherwise.
+
+#### Defined in
+
+[packages/core/src/storage/FileSystem.ts:74](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/storage/FileSystem.ts#L74)
+
+---
+
+### getBiggestPrompt
+
+▸ **getBiggestPrompt**(`prompts`): [`SimplePrompt`](#simpleprompt)
+
+Get biggest empty prompt size from a list of prompts.
+Used to calculate the maximum size of inputs to the LLM.
+
+#### Parameters
+
+| Name      | Type                              |
+| :-------- | :-------------------------------- |
+| `prompts` | [`SimplePrompt`](#simpleprompt)[] |
+
+#### Returns
+
+[`SimplePrompt`](#simpleprompt)
+
+#### Defined in
+
+[packages/core/src/PromptHelper.ts:21](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/PromptHelper.ts#L21)
+
+---
+
+### getEmptyPromptTxt
+
+▸ **getEmptyPromptTxt**(`prompt`): `string`
+
+#### Parameters
+
+| Name     | Type                            |
+| :------- | :------------------------------ |
+| `prompt` | [`SimplePrompt`](#simpleprompt) |
+
+#### Returns
+
+`string`
+
+#### Defined in
+
+[packages/core/src/PromptHelper.ts:11](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/PromptHelper.ts#L11)
+
+---
+
+### getNodeFS
+
+▸ **getNodeFS**(): [`CompleteFileSystem`](#completefilesystem)
+
+#### Returns
+
+[`CompleteFileSystem`](#completefilesystem)
+
+#### Defined in
+
+[packages/core/src/storage/FileSystem.ts:51](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/storage/FileSystem.ts#L51)
+
+---
+
+### getNodesFromDocument
+
+▸ **getNodesFromDocument**(`doc`, `textSplitter`, `includeMetadata?`, `includePrevNextRel?`): [`ImageDocument`](classes/ImageDocument.md)<`any`\>[] \| [`TextNode`](classes/TextNode.md)<[`Metadata`](#metadata)\>[]
+
+Generates an array of nodes from a document.
+
+#### Parameters
+
+| Name                 | Type                                                        | Default value | Description                                                      |
+| :------------------- | :---------------------------------------------------------- | :------------ | :--------------------------------------------------------------- |
+| `doc`                | [`BaseNode`](classes/BaseNode.md)<[`Metadata`](#metadata)\> | `undefined`   | -                                                                |
+| `textSplitter`       | [`SentenceSplitter`](classes/SentenceSplitter.md)           | `undefined`   | The text splitter to use.                                        |
+| `includeMetadata`    | `boolean`                                                   | `true`        | Whether to include metadata in the nodes.                        |
+| `includePrevNextRel` | `boolean`                                                   | `true`        | Whether to include previous and next relationships in the nodes. |
+
+#### Returns
+
+[`ImageDocument`](classes/ImageDocument.md)<`any`\>[] \| [`TextNode`](classes/TextNode.md)<[`Metadata`](#metadata)\>[]
+
+An array of nodes.
+
+#### Defined in
+
+[packages/core/src/NodeParser.ts:35](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/NodeParser.ts#L35)
+
+---
+
+### getResponseBuilder
+
+▸ **getResponseBuilder**(`serviceContext`, `responseMode?`): `BaseResponseBuilder`
+
+#### Parameters
+
+| Name             | Type                                             |
+| :--------------- | :----------------------------------------------- |
+| `serviceContext` | [`ServiceContext`](interfaces/ServiceContext.md) |
+| `responseMode?`  | `ResponseMode`                                   |
+
+#### Returns
+
+`BaseResponseBuilder`
+
+#### Defined in
+
+[packages/core/src/ResponseSynthesizer.ts:271](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/ResponseSynthesizer.ts#L271)
+
+---
+
+### getTextSplitsFromDocument
+
+▸ **getTextSplitsFromDocument**(`document`, `textSplitter`): `string`[]
+
+Splits the text of a document into smaller parts.
+
+#### Parameters
+
+| Name           | Type                                                        | Description               |
+| :------------- | :---------------------------------------------------------- | :------------------------ |
+| `document`     | [`Document`](classes/Document.md)<[`Metadata`](#metadata)\> | The document to split.    |
+| `textSplitter` | [`SentenceSplitter`](classes/SentenceSplitter.md)           | The text splitter to use. |
+
+#### Returns
+
+`string`[]
+
+An array of text splits.
+
+#### Defined in
+
+[packages/core/src/NodeParser.ts:17](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/NodeParser.ts#L17)
+
+---
+
+### getTopKEmbeddings
+
+▸ **getTopKEmbeddings**(`queryEmbedding`, `embeddings`, `similarityTopK?`, `embeddingIds?`, `similarityCutoff?`): [`number`[], `any`[]]
+
+Get the top K embeddings from a list of embeddings ordered by similarity to the query.
+
+#### Parameters
+
+| Name               | Type               | Default value              | Description                                   |
+| :----------------- | :----------------- | :------------------------- | :-------------------------------------------- |
+| `queryEmbedding`   | `number`[]         | `undefined`                |                                               |
+| `embeddings`       | `number`[][]       | `undefined`                | list of embeddings to consider                |
+| `similarityTopK`   | `number`           | `DEFAULT_SIMILARITY_TOP_K` | max number of embeddings to return, default 2 |
+| `embeddingIds`     | `null` \| `any`[]  | `null`                     | ids of embeddings in the embeddings list      |
+| `similarityCutoff` | `null` \| `number` | `null`                     | minimum similarity score                      |
+
+#### Returns
+
+[`number`[], `any`[]]
+
+#### Defined in
+
+[packages/core/src/embeddings/utils.ts:69](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/embeddings/utils.ts#L69)
+
+---
+
+### getTopKEmbeddingsLearner
+
+▸ **getTopKEmbeddingsLearner**(`queryEmbedding`, `embeddings`, `similarityTopK?`, `embeddingsIds?`, `queryMode?`): [`number`[], `any`[]]
+
+#### Parameters
+
+| Name              | Type                                                    | Default value              |
+| :---------------- | :------------------------------------------------------ | :------------------------- |
+| `queryEmbedding`  | `number`[]                                              | `undefined`                |
+| `embeddings`      | `number`[][]                                            | `undefined`                |
+| `similarityTopK?` | `number`                                                | `undefined`                |
+| `embeddingsIds?`  | `any`[]                                                 | `undefined`                |
+| `queryMode`       | [`VectorStoreQueryMode`](enums/VectorStoreQueryMode.md) | `VectorStoreQueryMode.SVM` |
+
+#### Returns
+
+[`number`[], `any`[]]
+
+#### Defined in
+
+[packages/core/src/embeddings/utils.ts:111](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/embeddings/utils.ts#L111)
+
+---
+
+### getTopKMMREmbeddings
+
+▸ **getTopKMMREmbeddings**(`queryEmbedding`, `embeddings`, `similarityFn?`, `similarityTopK?`, `embeddingIds?`, `_similarityCutoff?`, `mmrThreshold?`): [`number`[], `any`[]]
+
+#### Parameters
+
+| Name                | Type                                       | Default value |
+| :------------------ | :----------------------------------------- | :------------ |
+| `queryEmbedding`    | `number`[]                                 | `undefined`   |
+| `embeddings`        | `number`[][]                               | `undefined`   |
+| `similarityFn`      | `null` \| (...`args`: `any`[]) => `number` | `null`        |
+| `similarityTopK`    | `null` \| `number`                         | `null`        |
+| `embeddingIds`      | `null` \| `any`[]                          | `null`        |
+| `_similarityCutoff` | `null` \| `number`                         | `null`        |
+| `mmrThreshold`      | `null` \| `number`                         | `null`        |
+
+#### Returns
+
+[`number`[], `any`[]]
+
+#### Defined in
+
+[packages/core/src/embeddings/utils.ts:123](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/embeddings/utils.ts#L123)
+
+---
+
+### jsonToIndexStruct
+
+▸ **jsonToIndexStruct**(`json`): [`IndexStruct`](classes/IndexStruct.md)
+
+#### Parameters
+
+| Name   | Type  |
+| :----- | :---- |
+| `json` | `any` |
+
+#### Returns
+
+[`IndexStruct`](classes/IndexStruct.md)
+
+#### Defined in
+
+[packages/core/src/indices/BaseIndex.ts:74](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/indices/BaseIndex.ts#L74)
+
+---
+
+### jsonToNode
+
+▸ **jsonToNode**(`json`, `type?`): [`TextNode`](classes/TextNode.md)<[`Metadata`](#metadata)\>
+
+#### Parameters
+
+| Name    | Type                                |
+| :------ | :---------------------------------- |
+| `json`  | `any`                               |
+| `type?` | [`ObjectType`](enums/ObjectType.md) |
+
+#### Returns
+
+[`TextNode`](classes/TextNode.md)<[`Metadata`](#metadata)\>
+
+#### Defined in
+
+[packages/core/src/Node.ts:268](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Node.ts#L268)
+
+---
+
+### messagesToHistoryStr
+
+▸ **messagesToHistoryStr**(`messages`): `string`
+
+#### Parameters
+
+| Name       | Type                                         |
+| :--------- | :------------------------------------------- |
+| `messages` | [`ChatMessage`](interfaces/ChatMessage.md)[] |
+
+#### Returns
+
+`string`
+
+#### Defined in
+
+[packages/core/src/Prompt.ts:348](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Prompt.ts#L348)
+
+---
+
+### parseJsonMarkdown
+
+▸ **parseJsonMarkdown**(`text`): `any`
+
+#### Parameters
+
+| Name   | Type     | Description                |
+| :----- | :------- | :------------------------- |
+| `text` | `string` | A markdown block with JSON |
+
+#### Returns
+
+`any`
+
+parsed JSON object
+
+#### Defined in
+
+[packages/core/src/OutputParser.ts:56](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/OutputParser.ts#L56)
+
+---
+
+### readImage
+
+▸ **readImage**(`input`): `Promise`<`RawImage`\>
+
+#### Parameters
+
+| Name    | Type                      |
+| :------ | :------------------------ |
+| `input` | [`ImageType`](#imagetype) |
+
+#### Returns
+
+`Promise`<`RawImage`\>
+
+#### Defined in
+
+[packages/core/src/embeddings/utils.ts:188](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/embeddings/utils.ts#L188)
+
+---
+
+### serviceContextFromDefaults
+
+▸ **serviceContextFromDefaults**(`options?`): [`ServiceContext`](interfaces/ServiceContext.md)
+
+#### Parameters
+
+| Name       | Type                                                           |
+| :--------- | :------------------------------------------------------------- |
+| `options?` | [`ServiceContextOptions`](interfaces/ServiceContextOptions.md) |
+
+#### Returns
+
+[`ServiceContext`](interfaces/ServiceContext.md)
+
+#### Defined in
+
+[packages/core/src/ServiceContext.ts:30](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/ServiceContext.ts#L30)
+
+---
+
+### serviceContextFromServiceContext
+
+▸ **serviceContextFromServiceContext**(`serviceContext`, `options`): `Object`
+
+#### Parameters
+
+| Name             | Type                                                           |
+| :--------------- | :------------------------------------------------------------- |
+| `serviceContext` | [`ServiceContext`](interfaces/ServiceContext.md)               |
+| `options`        | [`ServiceContextOptions`](interfaces/ServiceContextOptions.md) |
+
+#### Returns
+
+`Object`
+
+| Name              | Type                                            |
+| :---------------- | :---------------------------------------------- |
+| `callbackManager` | [`CallbackManager`](classes/CallbackManager.md) |
+| `embedModel`      | [`BaseEmbedding`](classes/BaseEmbedding.md)     |
+| `llm`             | [`LLM`](interfaces/LLM.md)                      |
+| `nodeParser`      | [`NodeParser`](interfaces/NodeParser.md)        |
+| `promptHelper`    | [`PromptHelper`](classes/PromptHelper.md)       |
+
+#### Defined in
+
+[packages/core/src/ServiceContext.ts:48](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/ServiceContext.ts#L48)
+
+---
+
+### similarity
+
+▸ **similarity**(`embedding1`, `embedding2`, `mode?`): `number`
+
+The similarity between two embeddings.
+
+#### Parameters
+
+| Name         | Type                                        | Default value            |
+| :----------- | :------------------------------------------ | :----------------------- |
+| `embedding1` | `number`[]                                  | `undefined`              |
+| `embedding2` | `number`[]                                  | `undefined`              |
+| `mode`       | [`SimilarityType`](enums/SimilarityType.md) | `SimilarityType.DEFAULT` |
+
+#### Returns
+
+`number`
+
+similarity score with higher numbers meaning the two embeddings are more similar
+
+#### Defined in
+
+[packages/core/src/embeddings/utils.ts:15](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/embeddings/utils.ts#L15)
+
+---
+
+### storageContextFromDefaults
+
+▸ **storageContextFromDefaults**(`«destructured»`): `Promise`<[`StorageContext`](interfaces/StorageContext.md)\>
+
+#### Parameters
+
+| Name             | Type            |
+| :--------------- | :-------------- |
+| `«destructured»` | `BuilderParams` |
+
+#### Returns
+
+`Promise`<[`StorageContext`](interfaces/StorageContext.md)\>
+
+#### Defined in
+
+[packages/core/src/storage/StorageContext.ts:24](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/storage/StorageContext.ts#L24)
+
+---
+
+### walk
+
+▸ **walk**(`fs`, `dirPath`): `AsyncIterable`<`string`\>
+
+Recursively traverses a directory and yields all the paths to the files in it.
+
+#### Parameters
+
+| Name      | Type                                                     | Description                            |
+| :-------- | :------------------------------------------------------- | :------------------------------------- |
+| `fs`      | [`WalkableFileSystem`](interfaces/WalkableFileSystem.md) | The filesystem to use.                 |
+| `dirPath` | `string`                                                 | The path to the directory to traverse. |
+
+#### Returns
+
+`AsyncIterable`<`string`\>
+
+#### Defined in
+
+[packages/core/src/storage/FileSystem.ts:91](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/storage/FileSystem.ts#L91)
diff --git a/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/interfaces/BaseIndexInit.md b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/interfaces/BaseIndexInit.md
new file mode 100644
index 0000000000000000000000000000000000000000..d242592d8ca546a6a1ae3bea8a443da795228e4b
--- /dev/null
+++ b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/interfaces/BaseIndexInit.md
@@ -0,0 +1,73 @@
+---
+id: "BaseIndexInit"
+title: "Interface: BaseIndexInit<T>"
+sidebar_label: "BaseIndexInit"
+sidebar_position: 0
+custom_edit_url: null
+---
+
+## Type parameters
+
+| Name |
+| :--- |
+| `T`  |
+
+## Properties
+
+### docStore
+
+• **docStore**: [`BaseDocumentStore`](../classes/BaseDocumentStore.md)
+
+#### Defined in
+
+[packages/core/src/indices/BaseIndex.ts:143](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/indices/BaseIndex.ts#L143)
+
+---
+
+### indexStore
+
+• `Optional` **indexStore**: [`BaseIndexStore`](../classes/BaseIndexStore.md)
+
+#### Defined in
+
+[packages/core/src/indices/BaseIndex.ts:145](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/indices/BaseIndex.ts#L145)
+
+---
+
+### indexStruct
+
+• **indexStruct**: `T`
+
+#### Defined in
+
+[packages/core/src/indices/BaseIndex.ts:146](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/indices/BaseIndex.ts#L146)
+
+---
+
+### serviceContext
+
+• **serviceContext**: [`ServiceContext`](ServiceContext.md)
+
+#### Defined in
+
+[packages/core/src/indices/BaseIndex.ts:141](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/indices/BaseIndex.ts#L141)
+
+---
+
+### storageContext
+
+• **storageContext**: [`StorageContext`](StorageContext.md)
+
+#### Defined in
+
+[packages/core/src/indices/BaseIndex.ts:142](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/indices/BaseIndex.ts#L142)
+
+---
+
+### vectorStore
+
+• `Optional` **vectorStore**: [`VectorStore`](VectorStore.md)
+
+#### Defined in
+
+[packages/core/src/indices/BaseIndex.ts:144](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/indices/BaseIndex.ts#L144)
diff --git a/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/interfaces/BaseNodePostprocessor.md b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/interfaces/BaseNodePostprocessor.md
new file mode 100644
index 0000000000000000000000000000000000000000..5ac85dc2b796f403321fd445c5f49e97eaa63dac
--- /dev/null
+++ b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/interfaces/BaseNodePostprocessor.md
@@ -0,0 +1,35 @@
+---
+id: "BaseNodePostprocessor"
+title: "Interface: BaseNodePostprocessor"
+sidebar_label: "BaseNodePostprocessor"
+sidebar_position: 0
+custom_edit_url: null
+---
+
+## Implemented by
+
+- [`SimilarityPostprocessor`](../classes/SimilarityPostprocessor.md)
+
+## Properties
+
+### postprocessNodes
+
+• **postprocessNodes**: (`nodes`: [`NodeWithScore`](NodeWithScore.md)<[`Metadata`](../#metadata)\>[]) => [`NodeWithScore`](NodeWithScore.md)<[`Metadata`](../#metadata)\>[]
+
+#### Type declaration
+
+▸ (`nodes`): [`NodeWithScore`](NodeWithScore.md)<[`Metadata`](../#metadata)\>[]
+
+##### Parameters
+
+| Name    | Type                                                               |
+| :------ | :----------------------------------------------------------------- |
+| `nodes` | [`NodeWithScore`](NodeWithScore.md)<[`Metadata`](../#metadata)\>[] |
+
+##### Returns
+
+[`NodeWithScore`](NodeWithScore.md)<[`Metadata`](../#metadata)\>[]
+
+#### Defined in
+
+[packages/core/src/indices/BaseNodePostprocessor.ts:4](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/indices/BaseNodePostprocessor.ts#L4)
diff --git a/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/interfaces/BaseOutputParser.md b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/interfaces/BaseOutputParser.md
new file mode 100644
index 0000000000000000000000000000000000000000..5c89d06944d6c52c68a4736646d6109cf48b1a24
--- /dev/null
+++ b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/interfaces/BaseOutputParser.md
@@ -0,0 +1,59 @@
+---
+id: "BaseOutputParser"
+title: "Interface: BaseOutputParser<T>"
+sidebar_label: "BaseOutputParser"
+sidebar_position: 0
+custom_edit_url: null
+---
+
+An OutputParser is used to extract structured data from the raw output of the LLM.
+
+## Type parameters
+
+| Name |
+| :--- |
+| `T`  |
+
+## Implemented by
+
+- [`SubQuestionOutputParser`](../classes/SubQuestionOutputParser.md)
+
+## Methods
+
+### format
+
+▸ **format**(`output`): `string`
+
+#### Parameters
+
+| Name     | Type     |
+| :------- | :------- |
+| `output` | `string` |
+
+#### Returns
+
+`string`
+
+#### Defined in
+
+[packages/core/src/OutputParser.ts:8](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/OutputParser.ts#L8)
+
+---
+
+### parse
+
+▸ **parse**(`output`): `T`
+
+#### Parameters
+
+| Name     | Type     |
+| :------- | :------- |
+| `output` | `string` |
+
+#### Returns
+
+`T`
+
+#### Defined in
+
+[packages/core/src/OutputParser.ts:7](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/OutputParser.ts#L7)
diff --git a/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/interfaces/BaseQueryEngine.md b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/interfaces/BaseQueryEngine.md
new file mode 100644
index 0000000000000000000000000000000000000000..067d58f100d4c490a453e102ea7b72561eb26b94
--- /dev/null
+++ b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/interfaces/BaseQueryEngine.md
@@ -0,0 +1,37 @@
+---
+id: "BaseQueryEngine"
+title: "Interface: BaseQueryEngine"
+sidebar_label: "BaseQueryEngine"
+sidebar_position: 0
+custom_edit_url: null
+---
+
+A query engine is a question answerer that can use one or more steps.
+
+## Implemented by
+
+- [`RetrieverQueryEngine`](../classes/RetrieverQueryEngine.md)
+- [`SubQuestionQueryEngine`](../classes/SubQuestionQueryEngine.md)
+
+## Methods
+
+### query
+
+▸ **query**(`query`, `parentEvent?`): `Promise`<[`Response`](../classes/Response.md)\>
+
+Query the query engine and get a response.
+
+#### Parameters
+
+| Name           | Type                |
+| :------------- | :------------------ |
+| `query`        | `string`            |
+| `parentEvent?` | [`Event`](Event.md) |
+
+#### Returns
+
+`Promise`<[`Response`](../classes/Response.md)\>
+
+#### Defined in
+
+[packages/core/src/QueryEngine.ts:25](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/QueryEngine.ts#L25)
diff --git a/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/interfaces/BaseQuestionGenerator.md b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/interfaces/BaseQuestionGenerator.md
new file mode 100644
index 0000000000000000000000000000000000000000..33445bcc11fe8ab02d7f182980aa451950555c9d
--- /dev/null
+++ b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/interfaces/BaseQuestionGenerator.md
@@ -0,0 +1,34 @@
+---
+id: "BaseQuestionGenerator"
+title: "Interface: BaseQuestionGenerator"
+sidebar_label: "BaseQuestionGenerator"
+sidebar_position: 0
+custom_edit_url: null
+---
+
+QuestionGenerators generate new questions for the LLM using tools and a user query.
+
+## Implemented by
+
+- [`LLMQuestionGenerator`](../classes/LLMQuestionGenerator.md)
+
+## Methods
+
+### generate
+
+▸ **generate**(`tools`, `query`): `Promise`<[`SubQuestion`](SubQuestion.md)[]\>
+
+#### Parameters
+
+| Name    | Type                                |
+| :------ | :---------------------------------- |
+| `tools` | [`ToolMetadata`](ToolMetadata.md)[] |
+| `query` | `string`                            |
+
+#### Returns
+
+`Promise`<[`SubQuestion`](SubQuestion.md)[]\>
+
+#### Defined in
+
+[packages/core/src/QuestionGenerator.ts:23](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/QuestionGenerator.ts#L23)
diff --git a/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/interfaces/BaseReader.md b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/interfaces/BaseReader.md
new file mode 100644
index 0000000000000000000000000000000000000000..02e45dbfcaddb19163e16681914f04d47a70a695
--- /dev/null
+++ b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/interfaces/BaseReader.md
@@ -0,0 +1,40 @@
+---
+id: "BaseReader"
+title: "Interface: BaseReader"
+sidebar_label: "BaseReader"
+sidebar_position: 0
+custom_edit_url: null
+---
+
+A reader takes imports data into Document objects.
+
+## Implemented by
+
+- [`HTMLReader`](../classes/HTMLReader.md)
+- [`MarkdownReader`](../classes/MarkdownReader.md)
+- [`NotionReader`](../classes/NotionReader.md)
+- [`PDFReader`](../classes/PDFReader.md)
+- [`PapaCSVReader`](../classes/PapaCSVReader.md)
+- [`SimpleDirectoryReader`](../classes/SimpleDirectoryReader.md)
+- [`SimpleMongoReader`](../classes/SimpleMongoReader.md)
+- [`TextFileReader`](../classes/TextFileReader.md)
+
+## Methods
+
+### loadData
+
+▸ **loadData**(`...args`): `Promise`<[`Document`](../classes/Document.md)<[`Metadata`](../#metadata)\>[]\>
+
+#### Parameters
+
+| Name      | Type    |
+| :-------- | :------ |
+| `...args` | `any`[] |
+
+#### Returns
+
+`Promise`<[`Document`](../classes/Document.md)<[`Metadata`](../#metadata)\>[]\>
+
+#### Defined in
+
+[packages/core/src/readers/base.ts:7](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/readers/base.ts#L7)
diff --git a/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/interfaces/BaseRetriever.md b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/interfaces/BaseRetriever.md
new file mode 100644
index 0000000000000000000000000000000000000000..5ed0658519cd65fb68a8bd5c3fba2f977ffe7696
--- /dev/null
+++ b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/interfaces/BaseRetriever.md
@@ -0,0 +1,51 @@
+---
+id: "BaseRetriever"
+title: "Interface: BaseRetriever"
+sidebar_label: "BaseRetriever"
+sidebar_position: 0
+custom_edit_url: null
+---
+
+Retrievers retrieve the nodes that most closely match our query in similarity.
+
+## Implemented by
+
+- [`SummaryIndexLLMRetriever`](../classes/SummaryIndexLLMRetriever.md)
+- [`SummaryIndexRetriever`](../classes/SummaryIndexRetriever.md)
+- [`VectorIndexRetriever`](../classes/VectorIndexRetriever.md)
+
+## Methods
+
+### getServiceContext
+
+▸ **getServiceContext**(): [`ServiceContext`](ServiceContext.md)
+
+#### Returns
+
+[`ServiceContext`](ServiceContext.md)
+
+#### Defined in
+
+[packages/core/src/Retriever.ts:14](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Retriever.ts#L14)
+
+---
+
+### retrieve
+
+▸ **retrieve**(`query`, `parentEvent?`, `preFilters?`): `Promise`<[`NodeWithScore`](NodeWithScore.md)<[`Metadata`](../#metadata)\>[]\>
+
+#### Parameters
+
+| Name           | Type                |
+| :------------- | :------------------ |
+| `query`        | `string`            |
+| `parentEvent?` | [`Event`](Event.md) |
+| `preFilters?`  | `unknown`           |
+
+#### Returns
+
+`Promise`<[`NodeWithScore`](NodeWithScore.md)<[`Metadata`](../#metadata)\>[]\>
+
+#### Defined in
+
+[packages/core/src/Retriever.ts:9](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Retriever.ts#L9)
diff --git a/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/interfaces/BaseTool.md b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/interfaces/BaseTool.md
new file mode 100644
index 0000000000000000000000000000000000000000..ec38f4ef1a6f41208c2e6f61b5f2ae1297bd72d4
--- /dev/null
+++ b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/interfaces/BaseTool.md
@@ -0,0 +1,25 @@
+---
+id: "BaseTool"
+title: "Interface: BaseTool"
+sidebar_label: "BaseTool"
+sidebar_position: 0
+custom_edit_url: null
+---
+
+Simple Tool interface. Likely to change.
+
+## Hierarchy
+
+- **`BaseTool`**
+
+  ↳ [`QueryEngineTool`](QueryEngineTool.md)
+
+## Properties
+
+### metadata
+
+• **metadata**: [`ToolMetadata`](ToolMetadata.md)
+
+#### Defined in
+
+[packages/core/src/Tool.ts:12](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Tool.ts#L12)
diff --git a/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/interfaces/ChatEngine.md b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/interfaces/ChatEngine.md
new file mode 100644
index 0000000000000000000000000000000000000000..2220ba4cc859e076e913a85ad57185d5e585af4a
--- /dev/null
+++ b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/interfaces/ChatEngine.md
@@ -0,0 +1,62 @@
+---
+id: "ChatEngine"
+title: "Interface: ChatEngine"
+sidebar_label: "ChatEngine"
+sidebar_position: 0
+custom_edit_url: null
+---
+
+A ChatEngine is used to handle back and forth chats between the application and the LLM.
+
+## Implemented by
+
+- [`CondenseQuestionChatEngine`](../classes/CondenseQuestionChatEngine.md)
+- [`ContextChatEngine`](../classes/ContextChatEngine.md)
+- [`SimpleChatEngine`](../classes/SimpleChatEngine.md)
+
+## Methods
+
+### chat
+
+▸ **chat**<`T`, `R`\>(`message`, `chatHistory?`, `streaming?`): `Promise`<`R`\>
+
+Send message along with the class's current chat history to the LLM.
+
+#### Type parameters
+
+| Name | Type                                                                                                       |
+| :--- | :--------------------------------------------------------------------------------------------------------- |
+| `T`  | extends `undefined` \| `boolean` = `undefined`                                                             |
+| `R`  | `T` extends `true` ? `AsyncGenerator`<`string`, `void`, `unknown`\> : [`Response`](../classes/Response.md) |
+
+#### Parameters
+
+| Name           | Type                                   | Description                                                        |
+| :------------- | :------------------------------------- | :----------------------------------------------------------------- |
+| `message`      | [`MessageContent`](../#messagecontent) |                                                                    |
+| `chatHistory?` | [`ChatMessage`](ChatMessage.md)[]      | optional chat history if you want to customize the chat history    |
+| `streaming?`   | `T`                                    | optional streaming flag, which auto-sets the return value if True. |
+
+#### Returns
+
+`Promise`<`R`\>
+
+#### Defined in
+
+[packages/core/src/ChatEngine.ts:29](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/ChatEngine.ts#L29)
+
+---
+
+### reset
+
+▸ **reset**(): `void`
+
+Resets the chat history so that it's empty.
+
+#### Returns
+
+`void`
+
+#### Defined in
+
+[packages/core/src/ChatEngine.ts:41](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/ChatEngine.ts#L41)
diff --git a/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/interfaces/ChatHistory.md b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/interfaces/ChatHistory.md
new file mode 100644
index 0000000000000000000000000000000000000000..c36b8cb76725435bee2818509e867eeb658acc29
--- /dev/null
+++ b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/interfaces/ChatHistory.md
@@ -0,0 +1,100 @@
+---
+id: "ChatHistory"
+title: "Interface: ChatHistory"
+sidebar_label: "ChatHistory"
+sidebar_position: 0
+custom_edit_url: null
+---
+
+A ChatHistory is used to keep the state of back and forth chat messages
+
+## Implemented by
+
+- [`SimpleChatHistory`](../classes/SimpleChatHistory.md)
+- [`SummaryChatHistory`](../classes/SummaryChatHistory.md)
+
+## Properties
+
+### messages
+
+• **messages**: [`ChatMessage`](ChatMessage.md)[]
+
+#### Defined in
+
+[packages/core/src/ChatHistory.ts:12](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/ChatHistory.ts#L12)
+
+## Methods
+
+### addMessage
+
+▸ **addMessage**(`message`): `void`
+
+Adds a message to the chat history.
+
+#### Parameters
+
+| Name      | Type                            |
+| :-------- | :------------------------------ |
+| `message` | [`ChatMessage`](ChatMessage.md) |
+
+#### Returns
+
+`void`
+
+#### Defined in
+
+[packages/core/src/ChatHistory.ts:17](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/ChatHistory.ts#L17)
+
+---
+
+### newMessages
+
+▸ **newMessages**(): [`ChatMessage`](ChatMessage.md)[]
+
+Returns the new messages since the last call to this function (or since calling the constructor)
+
+#### Returns
+
+[`ChatMessage`](ChatMessage.md)[]
+
+#### Defined in
+
+[packages/core/src/ChatHistory.ts:32](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/ChatHistory.ts#L32)
+
+---
+
+### requestMessages
+
+▸ **requestMessages**(`transientMessages?`): `Promise`<[`ChatMessage`](ChatMessage.md)[]\>
+
+Returns the messages that should be used as input to the LLM.
+
+#### Parameters
+
+| Name                 | Type                              |
+| :------------------- | :-------------------------------- |
+| `transientMessages?` | [`ChatMessage`](ChatMessage.md)[] |
+
+#### Returns
+
+`Promise`<[`ChatMessage`](ChatMessage.md)[]\>
+
+#### Defined in
+
+[packages/core/src/ChatHistory.ts:22](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/ChatHistory.ts#L22)
+
+---
+
+### reset
+
+▸ **reset**(): `void`
+
+Resets the chat history so that it's empty.
+
+#### Returns
+
+`void`
+
+#### Defined in
+
+[packages/core/src/ChatHistory.ts:27](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/ChatHistory.ts#L27)
diff --git a/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/interfaces/ChatMessage.md b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/interfaces/ChatMessage.md
new file mode 100644
index 0000000000000000000000000000000000000000..bba46d80597e3bd338ca62210f2152dc42572abc
--- /dev/null
+++ b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/interfaces/ChatMessage.md
@@ -0,0 +1,27 @@
+---
+id: "ChatMessage"
+title: "Interface: ChatMessage"
+sidebar_label: "ChatMessage"
+sidebar_position: 0
+custom_edit_url: null
+---
+
+## Properties
+
+### content
+
+• **content**: `any`
+
+#### Defined in
+
+[packages/core/src/llm/LLM.ts:40](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/llm/LLM.ts#L40)
+
+---
+
+### role
+
+• **role**: [`MessageType`](../#messagetype)
+
+#### Defined in
+
+[packages/core/src/llm/LLM.ts:41](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/llm/LLM.ts#L41)
diff --git a/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/interfaces/ChatResponse.md b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/interfaces/ChatResponse.md
new file mode 100644
index 0000000000000000000000000000000000000000..ddd3ae8db1c9d581d0fb41b21f1dadbb94c58258
--- /dev/null
+++ b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/interfaces/ChatResponse.md
@@ -0,0 +1,37 @@
+---
+id: "ChatResponse"
+title: "Interface: ChatResponse"
+sidebar_label: "ChatResponse"
+sidebar_position: 0
+custom_edit_url: null
+---
+
+## Properties
+
+### delta
+
+• `Optional` **delta**: `string`
+
+#### Defined in
+
+[packages/core/src/llm/LLM.ts:47](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/llm/LLM.ts#L47)
+
+---
+
+### message
+
+• **message**: [`ChatMessage`](ChatMessage.md)
+
+#### Defined in
+
+[packages/core/src/llm/LLM.ts:45](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/llm/LLM.ts#L45)
+
+---
+
+### raw
+
+• `Optional` **raw**: `Record`<`string`, `any`\>
+
+#### Defined in
+
+[packages/core/src/llm/LLM.ts:46](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/llm/LLM.ts#L46)
diff --git a/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/interfaces/Context.md b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/interfaces/Context.md
new file mode 100644
index 0000000000000000000000000000000000000000..467628938dd65d17942d11aabf436ae73e02fb40
--- /dev/null
+++ b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/interfaces/Context.md
@@ -0,0 +1,27 @@
+---
+id: "Context"
+title: "Interface: Context"
+sidebar_label: "Context"
+sidebar_position: 0
+custom_edit_url: null
+---
+
+## Properties
+
+### message
+
+• **message**: [`ChatMessage`](ChatMessage.md)
+
+#### Defined in
+
+[packages/core/src/ChatEngine.ts:175](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/ChatEngine.ts#L175)
+
+---
+
+### nodes
+
+• **nodes**: [`NodeWithScore`](NodeWithScore.md)<[`Metadata`](../#metadata)\>[]
+
+#### Defined in
+
+[packages/core/src/ChatEngine.ts:176](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/ChatEngine.ts#L176)
diff --git a/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/interfaces/ContextGenerator.md b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/interfaces/ContextGenerator.md
new file mode 100644
index 0000000000000000000000000000000000000000..5770d62ed898b1c2d798ffac54beb81fd3e607de
--- /dev/null
+++ b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/interfaces/ContextGenerator.md
@@ -0,0 +1,32 @@
+---
+id: "ContextGenerator"
+title: "Interface: ContextGenerator"
+sidebar_label: "ContextGenerator"
+sidebar_position: 0
+custom_edit_url: null
+---
+
+## Implemented by
+
+- [`DefaultContextGenerator`](../classes/DefaultContextGenerator.md)
+
+## Methods
+
+### generate
+
+▸ **generate**(`message`, `parentEvent?`): `Promise`<[`Context`](Context.md)\>
+
+#### Parameters
+
+| Name           | Type                |
+| :------------- | :------------------ |
+| `message`      | `string`            |
+| `parentEvent?` | [`Event`](Event.md) |
+
+#### Returns
+
+`Promise`<[`Context`](Context.md)\>
+
+#### Defined in
+
+[packages/core/src/ChatEngine.ts:180](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/ChatEngine.ts#L180)
diff --git a/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/interfaces/DefaultStreamToken.md b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/interfaces/DefaultStreamToken.md
new file mode 100644
index 0000000000000000000000000000000000000000..1fac64e87965e1a3af6fb9140d33abc53c5fe923
--- /dev/null
+++ b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/interfaces/DefaultStreamToken.md
@@ -0,0 +1,57 @@
+---
+id: "DefaultStreamToken"
+title: "Interface: DefaultStreamToken"
+sidebar_label: "DefaultStreamToken"
+sidebar_position: 0
+custom_edit_url: null
+---
+
+## Properties
+
+### choices
+
+• **choices**: { `delta`: { `content?`: `null` \| `string` ; `role?`: `"function"` \| `"user"` \| `"assistant"` \| `"system"` \| `"tool"` } ; `finish_reason`: `null` \| `string` ; `index`: `number` }[]
+
+#### Defined in
+
+[packages/core/src/callbacks/CallbackManager.ts:29](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/callbacks/CallbackManager.ts#L29)
+
+---
+
+### created
+
+• **created**: `number`
+
+#### Defined in
+
+[packages/core/src/callbacks/CallbackManager.ts:27](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/callbacks/CallbackManager.ts#L27)
+
+---
+
+### id
+
+• **id**: `string`
+
+#### Defined in
+
+[packages/core/src/callbacks/CallbackManager.ts:25](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/callbacks/CallbackManager.ts#L25)
+
+---
+
+### model
+
+• **model**: `string`
+
+#### Defined in
+
+[packages/core/src/callbacks/CallbackManager.ts:28](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/callbacks/CallbackManager.ts#L28)
+
+---
+
+### object
+
+• **object**: `string`
+
+#### Defined in
+
+[packages/core/src/callbacks/CallbackManager.ts:26](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/callbacks/CallbackManager.ts#L26)
diff --git a/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/interfaces/Event.md b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/interfaces/Event.md
new file mode 100644
index 0000000000000000000000000000000000000000..5404fcd932c94ed8afe28ed5cca6a5e5012cf79f
--- /dev/null
+++ b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/interfaces/Event.md
@@ -0,0 +1,47 @@
+---
+id: "Event"
+title: "Interface: Event"
+sidebar_label: "Event"
+sidebar_position: 0
+custom_edit_url: null
+---
+
+## Properties
+
+### id
+
+• **id**: `string`
+
+#### Defined in
+
+[packages/core/src/callbacks/CallbackManager.ts:13](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/callbacks/CallbackManager.ts#L13)
+
+---
+
+### parentId
+
+• `Optional` **parentId**: `string`
+
+#### Defined in
+
+[packages/core/src/callbacks/CallbackManager.ts:16](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/callbacks/CallbackManager.ts#L16)
+
+---
+
+### tags
+
+• `Optional` **tags**: [`EventTag`](../#eventtag)[]
+
+#### Defined in
+
+[packages/core/src/callbacks/CallbackManager.ts:15](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/callbacks/CallbackManager.ts#L15)
+
+---
+
+### type
+
+• **type**: [`EventType`](../#eventtype)
+
+#### Defined in
+
+[packages/core/src/callbacks/CallbackManager.ts:14](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/callbacks/CallbackManager.ts#L14)
diff --git a/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/interfaces/ExactMatchFilter.md b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/interfaces/ExactMatchFilter.md
new file mode 100644
index 0000000000000000000000000000000000000000..479b715139ee58a9a55f6631630444366c6ed8a6
--- /dev/null
+++ b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/interfaces/ExactMatchFilter.md
@@ -0,0 +1,37 @@
+---
+id: "ExactMatchFilter"
+title: "Interface: ExactMatchFilter"
+sidebar_label: "ExactMatchFilter"
+sidebar_position: 0
+custom_edit_url: null
+---
+
+## Properties
+
+### filterType
+
+• **filterType**: `"ExactMatch"`
+
+#### Defined in
+
+[packages/core/src/storage/vectorStore/types.ts:22](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/storage/vectorStore/types.ts#L22)
+
+---
+
+### key
+
+• **key**: `string`
+
+#### Defined in
+
+[packages/core/src/storage/vectorStore/types.ts:23](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/storage/vectorStore/types.ts#L23)
+
+---
+
+### value
+
+• **value**: `string` \| `number`
+
+#### Defined in
+
+[packages/core/src/storage/vectorStore/types.ts:24](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/storage/vectorStore/types.ts#L24)
diff --git a/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/interfaces/GenericFileSystem.md b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/interfaces/GenericFileSystem.md
new file mode 100644
index 0000000000000000000000000000000000000000..273a3afd9ea37d09f3b8c5eaf4b46754f78d2077
--- /dev/null
+++ b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/interfaces/GenericFileSystem.md
@@ -0,0 +1,100 @@
+---
+id: "GenericFileSystem"
+title: "Interface: GenericFileSystem"
+sidebar_label: "GenericFileSystem"
+sidebar_position: 0
+custom_edit_url: null
+---
+
+A filesystem interface that is meant to be compatible with
+the 'fs' module from Node.js.
+Allows for the use of similar inteface implementation on
+browsers.
+
+## Implemented by
+
+- [`InMemoryFileSystem`](../classes/InMemoryFileSystem.md)
+
+## Methods
+
+### access
+
+▸ **access**(`path`): `Promise`<`void`\>
+
+#### Parameters
+
+| Name   | Type     |
+| :----- | :------- |
+| `path` | `string` |
+
+#### Returns
+
+`Promise`<`void`\>
+
+#### Defined in
+
+[packages/core/src/storage/FileSystem.ts:12](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/storage/FileSystem.ts#L12)
+
+---
+
+### mkdir
+
+▸ **mkdir**(`path`, `options?`): `Promise`<`void`\>
+
+#### Parameters
+
+| Name       | Type     |
+| :--------- | :------- |
+| `path`     | `string` |
+| `options?` | `any`    |
+
+#### Returns
+
+`Promise`<`void`\>
+
+#### Defined in
+
+[packages/core/src/storage/FileSystem.ts:13](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/storage/FileSystem.ts#L13)
+
+---
+
+### readFile
+
+▸ **readFile**(`path`, `options?`): `Promise`<`string`\>
+
+#### Parameters
+
+| Name       | Type     |
+| :--------- | :------- |
+| `path`     | `string` |
+| `options?` | `any`    |
+
+#### Returns
+
+`Promise`<`string`\>
+
+#### Defined in
+
+[packages/core/src/storage/FileSystem.ts:11](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/storage/FileSystem.ts#L11)
+
+---
+
+### writeFile
+
+▸ **writeFile**(`path`, `content`, `options?`): `Promise`<`void`\>
+
+#### Parameters
+
+| Name       | Type     |
+| :--------- | :------- |
+| `path`     | `string` |
+| `content`  | `string` |
+| `options?` | `any`    |
+
+#### Returns
+
+`Promise`<`void`\>
+
+#### Defined in
+
+[packages/core/src/storage/FileSystem.ts:10](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/storage/FileSystem.ts#L10)
diff --git a/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/interfaces/LLM.md b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/interfaces/LLM.md
new file mode 100644
index 0000000000000000000000000000000000000000..83724729e7118f00ae0d6a5a283c2972793b43ef
--- /dev/null
+++ b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/interfaces/LLM.md
@@ -0,0 +1,120 @@
+---
+id: "LLM"
+title: "Interface: LLM"
+sidebar_label: "LLM"
+sidebar_position: 0
+custom_edit_url: null
+---
+
+Unified language model interface
+
+## Implemented by
+
+- [`Anthropic`](../classes/Anthropic.md)
+- [`LlamaDeuce`](../classes/LlamaDeuce.md)
+- [`OpenAI`](../classes/OpenAI.md)
+- [`Portkey`](../classes/Portkey.md)
+
+## Properties
+
+### hasStreaming
+
+• **hasStreaming**: `boolean`
+
+#### Defined in
+
+[packages/core/src/llm/LLM.ts:68](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/llm/LLM.ts#L68)
+
+---
+
+### metadata
+
+• **metadata**: [`LLMMetadata`](LLMMetadata.md)
+
+#### Defined in
+
+[packages/core/src/llm/LLM.ts:66](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/llm/LLM.ts#L66)
+
+## Methods
+
+### chat
+
+▸ **chat**<`T`, `R`\>(`messages`, `parentEvent?`, `streaming?`): `Promise`<`R`\>
+
+Get a chat response from the LLM
+
+#### Type parameters
+
+| Name | Type                                                                                                    |
+| :--- | :------------------------------------------------------------------------------------------------------ |
+| `T`  | extends `undefined` \| `boolean` = `undefined`                                                          |
+| `R`  | `T` extends `true` ? `AsyncGenerator`<`string`, `void`, `unknown`\> : [`ChatResponse`](ChatResponse.md) |
+
+#### Parameters
+
+| Name           | Type                              | Description                                                                                      |
+| :------------- | :-------------------------------- | :----------------------------------------------------------------------------------------------- |
+| `messages`     | [`ChatMessage`](ChatMessage.md)[] | The return type of chat() and complete() are set by the "streaming" parameter being set to True. |
+| `parentEvent?` | [`Event`](Event.md)               | -                                                                                                |
+| `streaming?`   | `T`                               | -                                                                                                |
+
+#### Returns
+
+`Promise`<`R`\>
+
+#### Defined in
+
+[packages/core/src/llm/LLM.ts:75](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/llm/LLM.ts#L75)
+
+---
+
+### complete
+
+▸ **complete**<`T`, `R`\>(`prompt`, `parentEvent?`, `streaming?`): `Promise`<`R`\>
+
+Get a prompt completion from the LLM
+
+#### Type parameters
+
+| Name | Type                                                                                                    |
+| :--- | :------------------------------------------------------------------------------------------------------ |
+| `T`  | extends `undefined` \| `boolean` = `undefined`                                                          |
+| `R`  | `T` extends `true` ? `AsyncGenerator`<`string`, `void`, `unknown`\> : [`ChatResponse`](ChatResponse.md) |
+
+#### Parameters
+
+| Name           | Type                | Description            |
+| :------------- | :------------------ | :--------------------- |
+| `prompt`       | `string`            | the prompt to complete |
+| `parentEvent?` | [`Event`](Event.md) | -                      |
+| `streaming?`   | `T`                 | -                      |
+
+#### Returns
+
+`Promise`<`R`\>
+
+#### Defined in
+
+[packages/core/src/llm/LLM.ts:88](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/llm/LLM.ts#L88)
+
+---
+
+### tokens
+
+▸ **tokens**(`messages`): `number`
+
+Calculates the number of tokens needed for the given chat messages
+
+#### Parameters
+
+| Name       | Type                              |
+| :--------- | :-------------------------------- |
+| `messages` | [`ChatMessage`](ChatMessage.md)[] |
+
+#### Returns
+
+`number`
+
+#### Defined in
+
+[packages/core/src/llm/LLM.ts:100](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/llm/LLM.ts#L100)
diff --git a/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/interfaces/LLMMetadata.md b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/interfaces/LLMMetadata.md
new file mode 100644
index 0000000000000000000000000000000000000000..0fe4fbe9a14eaa900fe71a2542424a16e8bb0cb7
--- /dev/null
+++ b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/interfaces/LLMMetadata.md
@@ -0,0 +1,67 @@
+---
+id: "LLMMetadata"
+title: "Interface: LLMMetadata"
+sidebar_label: "LLMMetadata"
+sidebar_position: 0
+custom_edit_url: null
+---
+
+## Properties
+
+### contextWindow
+
+• **contextWindow**: `number`
+
+#### Defined in
+
+[packages/core/src/llm/LLM.ts:58](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/llm/LLM.ts#L58)
+
+---
+
+### maxTokens
+
+• `Optional` **maxTokens**: `number`
+
+#### Defined in
+
+[packages/core/src/llm/LLM.ts:57](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/llm/LLM.ts#L57)
+
+---
+
+### model
+
+• **model**: `string`
+
+#### Defined in
+
+[packages/core/src/llm/LLM.ts:54](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/llm/LLM.ts#L54)
+
+---
+
+### temperature
+
+• **temperature**: `number`
+
+#### Defined in
+
+[packages/core/src/llm/LLM.ts:55](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/llm/LLM.ts#L55)
+
+---
+
+### tokenizer
+
+• **tokenizer**: `undefined` \| [`CL100K_BASE`](../enums/Tokenizers.md#cl100k_base)
+
+#### Defined in
+
+[packages/core/src/llm/LLM.ts:59](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/llm/LLM.ts#L59)
+
+---
+
+### topP
+
+• **topP**: `number`
+
+#### Defined in
+
+[packages/core/src/llm/LLM.ts:56](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/llm/LLM.ts#L56)
diff --git a/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/interfaces/MessageContentDetail.md b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/interfaces/MessageContentDetail.md
new file mode 100644
index 0000000000000000000000000000000000000000..08f7a6d66d06e214cbf1023143e7a1765cb4f736
--- /dev/null
+++ b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/interfaces/MessageContentDetail.md
@@ -0,0 +1,43 @@
+---
+id: "MessageContentDetail"
+title: "Interface: MessageContentDetail"
+sidebar_label: "MessageContentDetail"
+sidebar_position: 0
+custom_edit_url: null
+---
+
+## Properties
+
+### image_url
+
+• `Optional` **image_url**: `Object`
+
+#### Type declaration
+
+| Name  | Type     |
+| :---- | :------- |
+| `url` | `string` |
+
+#### Defined in
+
+[packages/core/src/ChatEngine.ts:344](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/ChatEngine.ts#L344)
+
+---
+
+### text
+
+• `Optional` **text**: `string`
+
+#### Defined in
+
+[packages/core/src/ChatEngine.ts:343](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/ChatEngine.ts#L343)
+
+---
+
+### type
+
+• **type**: `"text"` \| `"image_url"`
+
+#### Defined in
+
+[packages/core/src/ChatEngine.ts:342](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/ChatEngine.ts#L342)
diff --git a/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/interfaces/MetadataFilters.md b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/interfaces/MetadataFilters.md
new file mode 100644
index 0000000000000000000000000000000000000000..7b9e8657843e692d562b90bc678ad523f452aa6e
--- /dev/null
+++ b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/interfaces/MetadataFilters.md
@@ -0,0 +1,17 @@
+---
+id: "MetadataFilters"
+title: "Interface: MetadataFilters"
+sidebar_label: "MetadataFilters"
+sidebar_position: 0
+custom_edit_url: null
+---
+
+## Properties
+
+### filters
+
+• **filters**: [`ExactMatchFilter`](ExactMatchFilter.md)[]
+
+#### Defined in
+
+[packages/core/src/storage/vectorStore/types.ts:28](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/storage/vectorStore/types.ts#L28)
diff --git a/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/interfaces/MetadataInfo.md b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/interfaces/MetadataInfo.md
new file mode 100644
index 0000000000000000000000000000000000000000..68fec167bee789667cd6d243e7224d1834fcd5ef
--- /dev/null
+++ b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/interfaces/MetadataInfo.md
@@ -0,0 +1,37 @@
+---
+id: "MetadataInfo"
+title: "Interface: MetadataInfo"
+sidebar_label: "MetadataInfo"
+sidebar_position: 0
+custom_edit_url: null
+---
+
+## Properties
+
+### description
+
+• **description**: `string`
+
+#### Defined in
+
+[packages/core/src/storage/vectorStore/types.ts:40](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/storage/vectorStore/types.ts#L40)
+
+---
+
+### name
+
+• **name**: `string`
+
+#### Defined in
+
+[packages/core/src/storage/vectorStore/types.ts:38](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/storage/vectorStore/types.ts#L38)
+
+---
+
+### type
+
+• **type**: `string`
+
+#### Defined in
+
+[packages/core/src/storage/vectorStore/types.ts:39](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/storage/vectorStore/types.ts#L39)
diff --git a/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/interfaces/NodeParser.md b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/interfaces/NodeParser.md
new file mode 100644
index 0000000000000000000000000000000000000000..8ce6ee2be7e976fec181d5b27ce92cbf32b1a93a
--- /dev/null
+++ b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/interfaces/NodeParser.md
@@ -0,0 +1,37 @@
+---
+id: "NodeParser"
+title: "Interface: NodeParser"
+sidebar_label: "NodeParser"
+sidebar_position: 0
+custom_edit_url: null
+---
+
+A NodeParser generates Nodes from Documents
+
+## Implemented by
+
+- [`SimpleNodeParser`](../classes/SimpleNodeParser.md)
+
+## Methods
+
+### getNodesFromDocuments
+
+▸ **getNodesFromDocuments**(`documents`): [`BaseNode`](../classes/BaseNode.md)<[`Metadata`](../#metadata)\>[]
+
+Generates an array of nodes from an array of documents.
+
+#### Parameters
+
+| Name        | Type                                                                | Description                           |
+| :---------- | :------------------------------------------------------------------ | :------------------------------------ |
+| `documents` | [`BaseNode`](../classes/BaseNode.md)<[`Metadata`](../#metadata)\>[] | The documents to generate nodes from. |
+
+#### Returns
+
+[`BaseNode`](../classes/BaseNode.md)<[`Metadata`](../#metadata)\>[]
+
+An array of nodes.
+
+#### Defined in
+
+[packages/core/src/NodeParser.ts:86](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/NodeParser.ts#L86)
diff --git a/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/interfaces/NodeWithScore.md b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/interfaces/NodeWithScore.md
new file mode 100644
index 0000000000000000000000000000000000000000..31bf1ab4500322c73062559214eb4b753587964d
--- /dev/null
+++ b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/interfaces/NodeWithScore.md
@@ -0,0 +1,35 @@
+---
+id: "NodeWithScore"
+title: "Interface: NodeWithScore<T>"
+sidebar_label: "NodeWithScore"
+sidebar_position: 0
+custom_edit_url: null
+---
+
+A node with a similarity score
+
+## Type parameters
+
+| Name | Type                                                            |
+| :--- | :-------------------------------------------------------------- |
+| `T`  | extends [`Metadata`](../#metadata) = [`Metadata`](../#metadata) |
+
+## Properties
+
+### node
+
+• **node**: [`BaseNode`](../classes/BaseNode.md)<`T`\>
+
+#### Defined in
+
+[packages/core/src/Node.ts:327](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Node.ts#L327)
+
+---
+
+### score
+
+• `Optional` **score**: `number`
+
+#### Defined in
+
+[packages/core/src/Node.ts:328](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Node.ts#L328)
diff --git a/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/interfaces/QueryEngineTool.md b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/interfaces/QueryEngineTool.md
new file mode 100644
index 0000000000000000000000000000000000000000..7b8806cfd57181bfc308492a61bbdbb96a4b081b
--- /dev/null
+++ b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/interfaces/QueryEngineTool.md
@@ -0,0 +1,39 @@
+---
+id: "QueryEngineTool"
+title: "Interface: QueryEngineTool"
+sidebar_label: "QueryEngineTool"
+sidebar_position: 0
+custom_edit_url: null
+---
+
+A Tool that uses a QueryEngine.
+
+## Hierarchy
+
+- [`BaseTool`](BaseTool.md)
+
+  ↳ **`QueryEngineTool`**
+
+## Properties
+
+### metadata
+
+• **metadata**: [`ToolMetadata`](ToolMetadata.md)
+
+#### Inherited from
+
+[BaseTool](BaseTool.md).[metadata](BaseTool.md#metadata)
+
+#### Defined in
+
+[packages/core/src/Tool.ts:12](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Tool.ts#L12)
+
+---
+
+### queryEngine
+
+• **queryEngine**: [`BaseQueryEngine`](BaseQueryEngine.md)
+
+#### Defined in
+
+[packages/core/src/Tool.ts:19](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Tool.ts#L19)
diff --git a/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/interfaces/RefDocInfo.md b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/interfaces/RefDocInfo.md
new file mode 100644
index 0000000000000000000000000000000000000000..db4bc04edf358124ad92510dcb5969581f38aca3
--- /dev/null
+++ b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/interfaces/RefDocInfo.md
@@ -0,0 +1,27 @@
+---
+id: "RefDocInfo"
+title: "Interface: RefDocInfo"
+sidebar_label: "RefDocInfo"
+sidebar_position: 0
+custom_edit_url: null
+---
+
+## Properties
+
+### extraInfo
+
+• **extraInfo**: `Record`<`string`, `any`\>
+
+#### Defined in
+
+[packages/core/src/storage/docStore/types.ts:12](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/storage/docStore/types.ts#L12)
+
+---
+
+### nodeIds
+
+• **nodeIds**: `string`[]
+
+#### Defined in
+
+[packages/core/src/storage/docStore/types.ts:11](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/storage/docStore/types.ts#L11)
diff --git a/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/interfaces/RelatedNodeInfo.md b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/interfaces/RelatedNodeInfo.md
new file mode 100644
index 0000000000000000000000000000000000000000..ffce2e78514ca40789c4771ce53f46378793471b
--- /dev/null
+++ b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/interfaces/RelatedNodeInfo.md
@@ -0,0 +1,53 @@
+---
+id: "RelatedNodeInfo"
+title: "Interface: RelatedNodeInfo<T>"
+sidebar_label: "RelatedNodeInfo"
+sidebar_position: 0
+custom_edit_url: null
+---
+
+## Type parameters
+
+| Name | Type                                                            |
+| :--- | :-------------------------------------------------------------- |
+| `T`  | extends [`Metadata`](../#metadata) = [`Metadata`](../#metadata) |
+
+## Properties
+
+### hash
+
+• `Optional` **hash**: `string`
+
+#### Defined in
+
+[packages/core/src/Node.ts:33](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Node.ts#L33)
+
+---
+
+### metadata
+
+• **metadata**: `T`
+
+#### Defined in
+
+[packages/core/src/Node.ts:32](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Node.ts#L32)
+
+---
+
+### nodeId
+
+• **nodeId**: `string`
+
+#### Defined in
+
+[packages/core/src/Node.ts:30](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Node.ts#L30)
+
+---
+
+### nodeType
+
+• `Optional` **nodeType**: [`ObjectType`](../enums/ObjectType.md)
+
+#### Defined in
+
+[packages/core/src/Node.ts:31](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Node.ts#L31)
diff --git a/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/interfaces/RetrievalCallbackResponse.md b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/interfaces/RetrievalCallbackResponse.md
new file mode 100644
index 0000000000000000000000000000000000000000..e275d1cae48c802d2c40ca0c8281c1d258449a9f
--- /dev/null
+++ b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/interfaces/RetrievalCallbackResponse.md
@@ -0,0 +1,47 @@
+---
+id: "RetrievalCallbackResponse"
+title: "Interface: RetrievalCallbackResponse"
+sidebar_label: "RetrievalCallbackResponse"
+sidebar_position: 0
+custom_edit_url: null
+---
+
+## Hierarchy
+
+- `BaseCallbackResponse`
+
+  ↳ **`RetrievalCallbackResponse`**
+
+## Properties
+
+### event
+
+• **event**: [`Event`](Event.md)
+
+#### Inherited from
+
+BaseCallbackResponse.event
+
+#### Defined in
+
+[packages/core/src/callbacks/CallbackManager.ts:20](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/callbacks/CallbackManager.ts#L20)
+
+---
+
+### nodes
+
+• **nodes**: [`NodeWithScore`](NodeWithScore.md)<[`Metadata`](../#metadata)\>[]
+
+#### Defined in
+
+[packages/core/src/callbacks/CallbackManager.ts:65](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/callbacks/CallbackManager.ts#L65)
+
+---
+
+### query
+
+• **query**: `string`
+
+#### Defined in
+
+[packages/core/src/callbacks/CallbackManager.ts:64](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/callbacks/CallbackManager.ts#L64)
diff --git a/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/interfaces/ServiceContext.md b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/interfaces/ServiceContext.md
new file mode 100644
index 0000000000000000000000000000000000000000..87c207de2fee13aef3fd33d88e4a0f8b6c30c05d
--- /dev/null
+++ b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/interfaces/ServiceContext.md
@@ -0,0 +1,59 @@
+---
+id: "ServiceContext"
+title: "Interface: ServiceContext"
+sidebar_label: "ServiceContext"
+sidebar_position: 0
+custom_edit_url: null
+---
+
+The ServiceContext is a collection of components that are used in different parts of the application.
+
+## Properties
+
+### callbackManager
+
+• **callbackManager**: [`CallbackManager`](../classes/CallbackManager.md)
+
+#### Defined in
+
+[packages/core/src/ServiceContext.ts:15](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/ServiceContext.ts#L15)
+
+---
+
+### embedModel
+
+• **embedModel**: [`BaseEmbedding`](../classes/BaseEmbedding.md)
+
+#### Defined in
+
+[packages/core/src/ServiceContext.ts:13](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/ServiceContext.ts#L13)
+
+---
+
+### llm
+
+• **llm**: [`LLM`](LLM.md)
+
+#### Defined in
+
+[packages/core/src/ServiceContext.ts:11](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/ServiceContext.ts#L11)
+
+---
+
+### nodeParser
+
+• **nodeParser**: [`NodeParser`](NodeParser.md)
+
+#### Defined in
+
+[packages/core/src/ServiceContext.ts:14](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/ServiceContext.ts#L14)
+
+---
+
+### promptHelper
+
+• **promptHelper**: [`PromptHelper`](../classes/PromptHelper.md)
+
+#### Defined in
+
+[packages/core/src/ServiceContext.ts:12](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/ServiceContext.ts#L12)
diff --git a/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/interfaces/ServiceContextOptions.md b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/interfaces/ServiceContextOptions.md
new file mode 100644
index 0000000000000000000000000000000000000000..f9333a68a38e14459499a2ce0005565725c4d3d3
--- /dev/null
+++ b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/interfaces/ServiceContextOptions.md
@@ -0,0 +1,77 @@
+---
+id: "ServiceContextOptions"
+title: "Interface: ServiceContextOptions"
+sidebar_label: "ServiceContextOptions"
+sidebar_position: 0
+custom_edit_url: null
+---
+
+## Properties
+
+### callbackManager
+
+• `Optional` **callbackManager**: [`CallbackManager`](../classes/CallbackManager.md)
+
+#### Defined in
+
+[packages/core/src/ServiceContext.ts:24](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/ServiceContext.ts#L24)
+
+---
+
+### chunkOverlap
+
+• `Optional` **chunkOverlap**: `number`
+
+#### Defined in
+
+[packages/core/src/ServiceContext.ts:27](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/ServiceContext.ts#L27)
+
+---
+
+### chunkSize
+
+• `Optional` **chunkSize**: `number`
+
+#### Defined in
+
+[packages/core/src/ServiceContext.ts:26](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/ServiceContext.ts#L26)
+
+---
+
+### embedModel
+
+• `Optional` **embedModel**: [`BaseEmbedding`](../classes/BaseEmbedding.md)
+
+#### Defined in
+
+[packages/core/src/ServiceContext.ts:22](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/ServiceContext.ts#L22)
+
+---
+
+### llm
+
+• `Optional` **llm**: [`LLM`](LLM.md)
+
+#### Defined in
+
+[packages/core/src/ServiceContext.ts:20](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/ServiceContext.ts#L20)
+
+---
+
+### nodeParser
+
+• `Optional` **nodeParser**: [`NodeParser`](NodeParser.md)
+
+#### Defined in
+
+[packages/core/src/ServiceContext.ts:23](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/ServiceContext.ts#L23)
+
+---
+
+### promptHelper
+
+• `Optional` **promptHelper**: [`PromptHelper`](../classes/PromptHelper.md)
+
+#### Defined in
+
+[packages/core/src/ServiceContext.ts:21](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/ServiceContext.ts#L21)
diff --git a/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/interfaces/StorageContext.md b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/interfaces/StorageContext.md
new file mode 100644
index 0000000000000000000000000000000000000000..035eeac9d87cfb972ff5a2ccae7bf1468e40771e
--- /dev/null
+++ b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/interfaces/StorageContext.md
@@ -0,0 +1,37 @@
+---
+id: "StorageContext"
+title: "Interface: StorageContext"
+sidebar_label: "StorageContext"
+sidebar_position: 0
+custom_edit_url: null
+---
+
+## Properties
+
+### docStore
+
+• **docStore**: [`BaseDocumentStore`](../classes/BaseDocumentStore.md)
+
+#### Defined in
+
+[packages/core/src/storage/StorageContext.ts:11](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/storage/StorageContext.ts#L11)
+
+---
+
+### indexStore
+
+• **indexStore**: [`BaseIndexStore`](../classes/BaseIndexStore.md)
+
+#### Defined in
+
+[packages/core/src/storage/StorageContext.ts:12](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/storage/StorageContext.ts#L12)
+
+---
+
+### vectorStore
+
+• **vectorStore**: [`VectorStore`](VectorStore.md)
+
+#### Defined in
+
+[packages/core/src/storage/StorageContext.ts:13](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/storage/StorageContext.ts#L13)
diff --git a/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/interfaces/StreamCallbackResponse.md b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/interfaces/StreamCallbackResponse.md
new file mode 100644
index 0000000000000000000000000000000000000000..df4d38fdf74d18f3268feada0491538b4227f5c6
--- /dev/null
+++ b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/interfaces/StreamCallbackResponse.md
@@ -0,0 +1,57 @@
+---
+id: "StreamCallbackResponse"
+title: "Interface: StreamCallbackResponse"
+sidebar_label: "StreamCallbackResponse"
+sidebar_position: 0
+custom_edit_url: null
+---
+
+## Hierarchy
+
+- `BaseCallbackResponse`
+
+  ↳ **`StreamCallbackResponse`**
+
+## Properties
+
+### event
+
+• **event**: [`Event`](Event.md)
+
+#### Inherited from
+
+BaseCallbackResponse.event
+
+#### Defined in
+
+[packages/core/src/callbacks/CallbackManager.ts:20](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/callbacks/CallbackManager.ts#L20)
+
+---
+
+### index
+
+• **index**: `number`
+
+#### Defined in
+
+[packages/core/src/callbacks/CallbackManager.ts:58](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/callbacks/CallbackManager.ts#L58)
+
+---
+
+### isDone
+
+• `Optional` **isDone**: `boolean`
+
+#### Defined in
+
+[packages/core/src/callbacks/CallbackManager.ts:59](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/callbacks/CallbackManager.ts#L59)
+
+---
+
+### token
+
+• `Optional` **token**: [`DefaultStreamToken`](DefaultStreamToken.md)
+
+#### Defined in
+
+[packages/core/src/callbacks/CallbackManager.ts:60](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/callbacks/CallbackManager.ts#L60)
diff --git a/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/interfaces/StructuredOutput.md b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/interfaces/StructuredOutput.md
new file mode 100644
index 0000000000000000000000000000000000000000..8f819e9ef607ef619e060626b2a6a328dae6eef6
--- /dev/null
+++ b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/interfaces/StructuredOutput.md
@@ -0,0 +1,35 @@
+---
+id: "StructuredOutput"
+title: "Interface: StructuredOutput<T>"
+sidebar_label: "StructuredOutput"
+sidebar_position: 0
+custom_edit_url: null
+---
+
+StructuredOutput is just a combo of the raw output and the parsed output.
+
+## Type parameters
+
+| Name |
+| :--- |
+| `T`  |
+
+## Properties
+
+### parsedOutput
+
+• **parsedOutput**: `T`
+
+#### Defined in
+
+[packages/core/src/OutputParser.ts:16](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/OutputParser.ts#L16)
+
+---
+
+### rawOutput
+
+• **rawOutput**: `string`
+
+#### Defined in
+
+[packages/core/src/OutputParser.ts:15](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/OutputParser.ts#L15)
diff --git a/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/interfaces/SubQuestion.md b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/interfaces/SubQuestion.md
new file mode 100644
index 0000000000000000000000000000000000000000..bcc15b0b995f570a14a3c34a53d90ce7c263ad09
--- /dev/null
+++ b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/interfaces/SubQuestion.md
@@ -0,0 +1,27 @@
+---
+id: "SubQuestion"
+title: "Interface: SubQuestion"
+sidebar_label: "SubQuestion"
+sidebar_position: 0
+custom_edit_url: null
+---
+
+## Properties
+
+### subQuestion
+
+• **subQuestion**: `string`
+
+#### Defined in
+
+[packages/core/src/QuestionGenerator.ts:15](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/QuestionGenerator.ts#L15)
+
+---
+
+### toolName
+
+• **toolName**: `string`
+
+#### Defined in
+
+[packages/core/src/QuestionGenerator.ts:16](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/QuestionGenerator.ts#L16)
diff --git a/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/interfaces/ToolMetadata.md b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/interfaces/ToolMetadata.md
new file mode 100644
index 0000000000000000000000000000000000000000..92532a18067d1f625605e629235629a04e17e77f
--- /dev/null
+++ b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/interfaces/ToolMetadata.md
@@ -0,0 +1,27 @@
+---
+id: "ToolMetadata"
+title: "Interface: ToolMetadata"
+sidebar_label: "ToolMetadata"
+sidebar_position: 0
+custom_edit_url: null
+---
+
+## Properties
+
+### description
+
+• **description**: `string`
+
+#### Defined in
+
+[packages/core/src/Tool.ts:4](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Tool.ts#L4)
+
+---
+
+### name
+
+• **name**: `string`
+
+#### Defined in
+
+[packages/core/src/Tool.ts:5](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/Tool.ts#L5)
diff --git a/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/interfaces/VectorStore.md b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/interfaces/VectorStore.md
new file mode 100644
index 0000000000000000000000000000000000000000..7c42cbfe1c23a95a5200051ff13122e70a672ac5
--- /dev/null
+++ b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/interfaces/VectorStore.md
@@ -0,0 +1,109 @@
+---
+id: "VectorStore"
+title: "Interface: VectorStore"
+sidebar_label: "VectorStore"
+sidebar_position: 0
+custom_edit_url: null
+---
+
+## Implemented by
+
+- [`MongoDBAtlasVectorSearch`](../classes/MongoDBAtlasVectorSearch.md)
+- [`PGVectorStore`](../classes/PGVectorStore.md)
+- [`SimpleVectorStore`](../classes/SimpleVectorStore.md)
+
+## Properties
+
+### isEmbeddingQuery
+
+• `Optional` **isEmbeddingQuery**: `boolean`
+
+#### Defined in
+
+[packages/core/src/storage/vectorStore/types.ts:61](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/storage/vectorStore/types.ts#L61)
+
+---
+
+### storesText
+
+• **storesText**: `boolean`
+
+#### Defined in
+
+[packages/core/src/storage/vectorStore/types.ts:60](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/storage/vectorStore/types.ts#L60)
+
+## Methods
+
+### add
+
+▸ **add**(`embeddingResults`): `Promise`<`string`[]\>
+
+#### Parameters
+
+| Name               | Type                                                                |
+| :----------------- | :------------------------------------------------------------------ |
+| `embeddingResults` | [`BaseNode`](../classes/BaseNode.md)<[`Metadata`](../#metadata)\>[] |
+
+#### Returns
+
+`Promise`<`string`[]\>
+
+#### Defined in
+
+[packages/core/src/storage/vectorStore/types.ts:63](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/storage/vectorStore/types.ts#L63)
+
+---
+
+### client
+
+▸ **client**(): `any`
+
+#### Returns
+
+`any`
+
+#### Defined in
+
+[packages/core/src/storage/vectorStore/types.ts:62](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/storage/vectorStore/types.ts#L62)
+
+---
+
+### delete
+
+▸ **delete**(`refDocId`, `deleteOptions?`): `Promise`<`void`\>
+
+#### Parameters
+
+| Name             | Type     |
+| :--------------- | :------- |
+| `refDocId`       | `string` |
+| `deleteOptions?` | `any`    |
+
+#### Returns
+
+`Promise`<`void`\>
+
+#### Defined in
+
+[packages/core/src/storage/vectorStore/types.ts:64](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/storage/vectorStore/types.ts#L64)
+
+---
+
+### query
+
+▸ **query**(`query`, `options?`): `Promise`<[`VectorStoreQueryResult`](VectorStoreQueryResult.md)\>
+
+#### Parameters
+
+| Name       | Type                                      |
+| :--------- | :---------------------------------------- |
+| `query`    | [`VectorStoreQuery`](VectorStoreQuery.md) |
+| `options?` | `any`                                     |
+
+#### Returns
+
+`Promise`<[`VectorStoreQueryResult`](VectorStoreQueryResult.md)\>
+
+#### Defined in
+
+[packages/core/src/storage/vectorStore/types.ts:65](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/storage/vectorStore/types.ts#L65)
diff --git a/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/interfaces/VectorStoreInfo.md b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/interfaces/VectorStoreInfo.md
new file mode 100644
index 0000000000000000000000000000000000000000..07bb50d0322c35082cccadd13dc7643d048ef8a9
--- /dev/null
+++ b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/interfaces/VectorStoreInfo.md
@@ -0,0 +1,27 @@
+---
+id: "VectorStoreInfo"
+title: "Interface: VectorStoreInfo"
+sidebar_label: "VectorStoreInfo"
+sidebar_position: 0
+custom_edit_url: null
+---
+
+## Properties
+
+### contentInfo
+
+• **contentInfo**: `string`
+
+#### Defined in
+
+[packages/core/src/storage/vectorStore/types.ts:45](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/storage/vectorStore/types.ts#L45)
+
+---
+
+### metadataInfo
+
+• **metadataInfo**: [`MetadataInfo`](MetadataInfo.md)[]
+
+#### Defined in
+
+[packages/core/src/storage/vectorStore/types.ts:44](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/storage/vectorStore/types.ts#L44)
diff --git a/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/interfaces/VectorStoreQuery.md b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/interfaces/VectorStoreQuery.md
new file mode 100644
index 0000000000000000000000000000000000000000..24a68e8af09b72094ec84a163f5ad5de66376eeb
--- /dev/null
+++ b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/interfaces/VectorStoreQuery.md
@@ -0,0 +1,87 @@
+---
+id: "VectorStoreQuery"
+title: "Interface: VectorStoreQuery"
+sidebar_label: "VectorStoreQuery"
+sidebar_position: 0
+custom_edit_url: null
+---
+
+## Properties
+
+### alpha
+
+• `Optional` **alpha**: `number`
+
+#### Defined in
+
+[packages/core/src/storage/vectorStore/types.ts:54](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/storage/vectorStore/types.ts#L54)
+
+---
+
+### docIds
+
+• `Optional` **docIds**: `string`[]
+
+#### Defined in
+
+[packages/core/src/storage/vectorStore/types.ts:51](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/storage/vectorStore/types.ts#L51)
+
+---
+
+### filters
+
+• `Optional` **filters**: [`MetadataFilters`](MetadataFilters.md)
+
+#### Defined in
+
+[packages/core/src/storage/vectorStore/types.ts:55](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/storage/vectorStore/types.ts#L55)
+
+---
+
+### mmrThreshold
+
+• `Optional` **mmrThreshold**: `number`
+
+#### Defined in
+
+[packages/core/src/storage/vectorStore/types.ts:56](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/storage/vectorStore/types.ts#L56)
+
+---
+
+### mode
+
+• **mode**: [`VectorStoreQueryMode`](../enums/VectorStoreQueryMode.md)
+
+#### Defined in
+
+[packages/core/src/storage/vectorStore/types.ts:53](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/storage/vectorStore/types.ts#L53)
+
+---
+
+### queryEmbedding
+
+• `Optional` **queryEmbedding**: `number`[]
+
+#### Defined in
+
+[packages/core/src/storage/vectorStore/types.ts:49](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/storage/vectorStore/types.ts#L49)
+
+---
+
+### queryStr
+
+• `Optional` **queryStr**: `string`
+
+#### Defined in
+
+[packages/core/src/storage/vectorStore/types.ts:52](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/storage/vectorStore/types.ts#L52)
+
+---
+
+### similarityTopK
+
+• **similarityTopK**: `number`
+
+#### Defined in
+
+[packages/core/src/storage/vectorStore/types.ts:50](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/storage/vectorStore/types.ts#L50)
diff --git a/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/interfaces/VectorStoreQueryResult.md b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/interfaces/VectorStoreQueryResult.md
new file mode 100644
index 0000000000000000000000000000000000000000..8bd27187a9eb2570930b588c2c01111882d0615f
--- /dev/null
+++ b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/interfaces/VectorStoreQueryResult.md
@@ -0,0 +1,37 @@
+---
+id: "VectorStoreQueryResult"
+title: "Interface: VectorStoreQueryResult"
+sidebar_label: "VectorStoreQueryResult"
+sidebar_position: 0
+custom_edit_url: null
+---
+
+## Properties
+
+### ids
+
+• **ids**: `string`[]
+
+#### Defined in
+
+[packages/core/src/storage/vectorStore/types.ts:6](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/storage/vectorStore/types.ts#L6)
+
+---
+
+### nodes
+
+• `Optional` **nodes**: [`BaseNode`](../classes/BaseNode.md)<[`Metadata`](../#metadata)\>[]
+
+#### Defined in
+
+[packages/core/src/storage/vectorStore/types.ts:4](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/storage/vectorStore/types.ts#L4)
+
+---
+
+### similarities
+
+• **similarities**: `number`[]
+
+#### Defined in
+
+[packages/core/src/storage/vectorStore/types.ts:5](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/storage/vectorStore/types.ts#L5)
diff --git a/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/interfaces/VectorStoreQuerySpec.md b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/interfaces/VectorStoreQuerySpec.md
new file mode 100644
index 0000000000000000000000000000000000000000..078dd1a3949f3289757bfafefcaa27902d8a54b8
--- /dev/null
+++ b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/interfaces/VectorStoreQuerySpec.md
@@ -0,0 +1,37 @@
+---
+id: "VectorStoreQuerySpec"
+title: "Interface: VectorStoreQuerySpec"
+sidebar_label: "VectorStoreQuerySpec"
+sidebar_position: 0
+custom_edit_url: null
+---
+
+## Properties
+
+### filters
+
+• **filters**: [`ExactMatchFilter`](ExactMatchFilter.md)[]
+
+#### Defined in
+
+[packages/core/src/storage/vectorStore/types.ts:33](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/storage/vectorStore/types.ts#L33)
+
+---
+
+### query
+
+• **query**: `string`
+
+#### Defined in
+
+[packages/core/src/storage/vectorStore/types.ts:32](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/storage/vectorStore/types.ts#L32)
+
+---
+
+### topK
+
+• `Optional` **topK**: `number`
+
+#### Defined in
+
+[packages/core/src/storage/vectorStore/types.ts:34](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/storage/vectorStore/types.ts#L34)
diff --git a/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/interfaces/WalkableFileSystem.md b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/interfaces/WalkableFileSystem.md
new file mode 100644
index 0000000000000000000000000000000000000000..ebe0aee529a302bceba1af82e81320fe75a691de
--- /dev/null
+++ b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/interfaces/WalkableFileSystem.md
@@ -0,0 +1,47 @@
+---
+id: "WalkableFileSystem"
+title: "Interface: WalkableFileSystem"
+sidebar_label: "WalkableFileSystem"
+sidebar_position: 0
+custom_edit_url: null
+---
+
+## Methods
+
+### readdir
+
+▸ **readdir**(`path`): `Promise`<`string`[]\>
+
+#### Parameters
+
+| Name   | Type     |
+| :----- | :------- |
+| `path` | `string` |
+
+#### Returns
+
+`Promise`<`string`[]\>
+
+#### Defined in
+
+[packages/core/src/storage/FileSystem.ts:17](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/storage/FileSystem.ts#L17)
+
+---
+
+### stat
+
+▸ **stat**(`path`): `Promise`<`any`\>
+
+#### Parameters
+
+| Name   | Type     |
+| :----- | :------- |
+| `path` | `string` |
+
+#### Returns
+
+`Promise`<`any`\>
+
+#### Defined in
+
+[packages/core/src/storage/FileSystem.ts:18](https://github.com/run-llama/LlamaIndexTS/blob/f0be933/packages/core/src/storage/FileSystem.ts#L18)
diff --git a/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/interfaces/_category_.yml b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/interfaces/_category_.yml
new file mode 100644
index 0000000000000000000000000000000000000000..43bec88cfa0aadb0e46f28701aad493dec3f2097
--- /dev/null
+++ b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/api/interfaces/_category_.yml
@@ -0,0 +1,2 @@
+label: "Interfaces"
+position: 4
\ No newline at end of file
diff --git a/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/concepts.md b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/concepts.md
new file mode 100644
index 0000000000000000000000000000000000000000..34773c6722d98e9949dad03de32272a0178751f1
--- /dev/null
+++ b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/concepts.md
@@ -0,0 +1,86 @@
+---
+sidebar_position: 3
+---
+
+# Koncepti na visokoj razini
+
+`Ova dokumentacija je automatski prevedena i može sadržavati greške. Ne ustručavajte se otvoriti Pull Request za predlaganje promjena.`
+
+LlamaIndex.TS vam pomaže izgraditi aplikacije s podrškom za LLM (npr. pitanja i odgovori, chatbot) nad prilagođenim podacima.
+
+U ovom vodiču o konceptima na visokoj razini, naučit ćete:
+
+- kako LLM može odgovarati na pitanja koristeći vaše vlastite podatke.
+- ključne koncepte i module u LlamaIndex.TS za sastavljanje vlastitog upitačkog sustava.
+
+## Odgovaranje na pitanja na temelju vaših podataka
+
+LlamaIndex koristi dvostupanjsku metodu prilikom korištenja LLM-a s vašim podacima:
+
+1. **indeksiranje**: priprema baze znanja, i
+2. **upit**: dohvaćanje relevantnog konteksta iz znanja kako bi se pomoglo LLM-u u odgovoru na pitanje
+
+![](./_static/concepts/rag.jpg)
+
+Ovaj proces također je poznat kao Retrieval Augmented Generation (RAG).
+
+LlamaIndex.TS pruža osnovni alat za jednostavno izvođenje oba koraka.
+
+Pogledajmo svaku fazu detaljnije.
+
+### Faza indeksiranja
+
+LlamaIndex.TS vam pomaže pripremiti bazu znanja uz pomoć skupa konektora podataka i indeksa.
+
+![](./_static/concepts/indexing.jpg)
+
+[**Učitavači podataka**](./modules/high_level/data_loader.md):
+Konektor podataka (tj. `Reader`) učitava podatke iz različitih izvora podataka i formata podataka u jednostavno predstavljanje `Dokumenta` (tekst i jednostavne metapodatke).
+
+[**Dokumenti / Čvorovi**](./modules/high_level/documents_and_nodes.md): `Dokument` je generički spremnik za bilo koji izvor podataka - na primjer, PDF, izlaz iz API-ja ili dohvaćeni podaci iz baze podataka. `Čvor` je atomična jedinica podataka u LlamaIndex i predstavlja "komadić" izvornog `Dokumenta`. To je bogato predstavljanje koje uključuje metapodatke i odnose (prema drugim čvorovima) kako bi omogućilo točne i izražajne operacije dohvaćanja.
+
+[**Indeksi podataka**](./modules/high_level/data_index.md):
+Nakon što ste učitali podatke, LlamaIndex vam pomaže indeksirati podatke u format koji je jednostavan za dohvaćanje.
+
+Iza kulisa, LlamaIndex parsira sirove dokumente u međureprezentacije, izračunava vektorske ugrađaje i pohranjuje vaše podatke u memoriju ili na disk.
+
+"
+
+### Faza upita
+
+U fazi upita, upitni sustav dohvaća najrelevantniji kontekst na temelju korisničkog upita,
+i prosljeđuje ga LLM-u (uz upit) kako bi sintetizirao odgovor.
+
+To pruža LLM-u ažurirano znanje koje nije uključeno u njegove izvorne podatke za obuku,
+(također smanjujući halucinacije).
+
+Ključni izazov u fazi upita je dohvaćanje, orkestracija i zaključivanje nad (potencijalno mnogim) bazama znanja.
+
+LlamaIndex pruža sastavljive module koji vam pomažu izgraditi i integrirati RAG sustave za pitanja i odgovore (upitnički sustav), chatbotove (chat sustav) ili kao dio agenta.
+
+Ovi građevni blokovi mogu se prilagoditi kako bi odražavali preferencije rangiranja, kao i sastavljati kako bi zaključivali nad više baza znanja na strukturiran način.
+
+![](./_static/concepts/querying.jpg)
+
+#### Građevni blokovi
+
+[**Dohvatioci**](./modules/low_level/retriever.md):
+Dohvatilac definira kako učinkovito dohvatiti relevantni kontekst iz baze znanja (tj. indeksa) kada je zadano upit.
+Specifična logika dohvaćanja razlikuje se za različite indekse, najpopularniji je gusti dohvat protiv vektorskog indeksa.
+
+[**Sintetizatori odgovora**](./modules/low_level/response_synthesizer.md):
+Sintetizator odgovora generira odgovor iz LLM-a koristeći korisnički upit i zadani skup dohvaćenih tekstualnih fragmenata.
+
+"
+
+#### Sustavi
+
+[**Upitnički sustavi**](./modules/high_level/query_engine.md):
+Upitnički sustav je cjeloviti sustav koji vam omogućuje postavljanje pitanja nad vašim podacima.
+Prihvaća prirodni jezik upita i vraća odgovor, zajedno s dohvaćenim referentnim kontekstom koji se prosljeđuje LLM-u.
+
+[**Chat sustavi**](./modules/high_level/chat_engine.md):
+Chat sustav je cjeloviti sustav za vođenje razgovora s vašim podacima
+(više puta uzajamno umjesto jednog pitanja i odgovora).
+
+"
diff --git a/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/end_to_end.md b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/end_to_end.md
new file mode 100644
index 0000000000000000000000000000000000000000..49d7c962f55312845463df3d55d2d71985199a48
--- /dev/null
+++ b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/end_to_end.md
@@ -0,0 +1,61 @@
+---
+sidebar_position: 4
+---
+
+# Primjeri od početka do kraja
+
+`Ova dokumentacija je automatski prevedena i može sadržavati greške. Ne ustručavajte se otvoriti Pull Request za predlaganje promjena.`
+
+Uključujemo nekoliko primjera od početka do kraja koji koriste LlamaIndex.TS u repozitoriju.
+
+Pogledajte primjere u nastavku ili ih isprobajte i dovršite u nekoliko minuta uz interaktivne tutorijale na Github Codespace koje pruža Dev-Docs [ovdje](https://codespaces.new/team-dev-docs/lits-dev-docs-playground?devcontainer_path=.devcontainer%2Fjavascript_ltsquickstart%2Fdevcontainer.json):
+
+## [Chat Engine](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/chatEngine.ts)
+
+Pročitajte datoteku i razgovarajte o njoj s LLM.
+
+## [Vektor Indeks](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/vectorIndex.ts)
+
+Stvorite vektor indeks i pretražite ga. Vektor indeks će koristiti ugrađivanja za dohvaćanje najrelevantnijih čvorova. Prema zadanim postavkama, najboljih k je 2.
+
+"
+
+## [Indeks sažetka](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/summaryIndex.ts)
+
+Stvorite indeks liste i pretražite ga. Ovaj primjer također koristi `LLMRetriever`, koji će koristiti LLM za odabir najboljih čvorova za korištenje prilikom generiranja odgovora.
+
+"
+
+## [Spremi / Učitaj indeks](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/storageContext.ts)
+
+Stvorite i učitajte vektorski indeks. Pohrana na disk u LlamaIndex.TS se automatski događa jednom kada je stvoren objekt konteksta pohrane.
+
+"
+
+## [Prilagođeni vektorski indeks](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/vectorIndexCustomize.ts)
+
+Stvorite vektorski indeks i pretražujte ga, istovremeno konfigurirajući `LLM`, `ServiceContext` i `similarity_top_k`.
+
+## [OpenAI LLM](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/openai.ts)
+
+Stvorite OpenAI LLM i izravno ga koristite za razgovor.
+
+"
+
+## [Llama2 DeuceLLM](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/llamadeuce.ts)
+
+Stvorite Llama-2 LLM i izravno ga koristite za chat.
+
+"
+
+## [SubQuestionQueryEngine](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/subquestion.ts)
+
+Koristi `SubQuestionQueryEngine`, koji razbija složene upite na više pitanja, a zatim agregira odgovor na sva podpitanja.
+
+"
+
+## [Moduli niskog nivoa](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/lowlevel.ts)
+
+Ovaj primjer koristi nekoliko komponenti niskog nivoa, što uklanja potrebu za stvarnim upitnim motorom. Ove komponente se mogu koristiti bilo gdje, u bilo kojoj aplikaciji, ili prilagođene i podklase kako bi zadovoljile vaše vlastite potrebe.
+
+"
diff --git a/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/environments.md b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/environments.md
new file mode 100644
index 0000000000000000000000000000000000000000..17f56902d965fd82d8eede032cdc435876a8ab9f
--- /dev/null
+++ b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/environments.md
@@ -0,0 +1,31 @@
+---
+sidebar_position: 5
+---
+
+# Okruženja
+
+`Ova dokumentacija je automatski prevedena i može sadržavati greške. Ne ustručavajte se otvoriti Pull Request za predlaganje promjena.`
+
+LlamaIndex trenutno službeno podržava NodeJS 18 i NodeJS 20.
+
+## NextJS App Router
+
+Ako koristite NextJS App Router rukovatelje rutama/serverless funkcije, trebat ćete koristiti NodeJS način rada:
+
+```js
+export const runtime = "nodejs"; // zadano
+```
+
+i trebat ćete dodati iznimku za pdf-parse u vašem next.config.js
+
+```js
+// next.config.js
+/** @type {import('next').NextConfig} */
+const nextConfig = {
+  experimental: {
+    serverComponentsExternalPackages: ["pdf-parse"], // Stavlja pdf-parse u stvarni NodeJS način rada s NextJS App Routerom
+  },
+};
+
+module.exports = nextConfig;
+```
diff --git a/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/installation.mdx b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/installation.mdx
new file mode 100644
index 0000000000000000000000000000000000000000..be1189a3908424616106a237aaaf6fa48aedcacc
--- /dev/null
+++ b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/installation.mdx
@@ -0,0 +1,68 @@
+---
+sidebar_position: 1
+---
+
+
+# Instalacija i postavljanje
+
+```Ova dokumentacija je automatski prevedena i može sadržavati greške. Ne ustručavajte se otvoriti Pull Request za predlaganje promjena.```
+
+
+Provjerite imate li NodeJS verziju 18 ili noviju.
+
+
+## Korištenje create-llama
+
+Najlakši način za početak rada s LlamaIndexom je korištenje `create-llama`. Ovaj CLI alat omogućuje vam brzo pokretanje izrade nove aplikacije LlamaIndex, s već postavljenim svime što vam je potrebno.
+
+Samo pokrenite
+
+<Tabs>
+<TabItem value="1" label="npm" default>
+
+```bash
+npx create-llama@latest
+```
+
+</TabItem>
+<TabItem value="2" label="Yarn">
+
+```bash
+yarn create llama
+```
+
+</TabItem>
+<TabItem value="3" label="pnpm">
+
+```bash
+pnpm create llama@latest
+```
+
+</TabItem>
+</Tabs>
+
+da biste započeli. Nakon što se vaša aplikacija generira, pokrenite
+
+```bash npm2yarn
+npm run dev
+```
+
+za pokretanje razvojnog poslužitelja. Zatim možete posjetiti [http://localhost:3000](http://localhost:3000) da biste vidjeli svoju aplikaciju.
+## Instalacija putem NPM-a
+
+```bash npm2yarn
+npm install llamaindex
+```
+
+
+### Varijable okruženja
+
+Naši primjeri koriste OpenAI prema zadanim postavkama. Morat ćete postaviti svoj OpenAI ključ na sljedeći način:
+
+```bash
+export OPENAI_API_KEY="sk-......" # Zamijenite s vašim ključem s https://platform.openai.com/account/api-keys
+```
+
+Ako želite da se automatski učita svaki put, dodajte ga u svoj .zshrc/.bashrc.
+
+UPOZORENJE: ne stavljajte svoj OpenAI ključ u verziju kontrolnog sustava.
diff --git a/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/introduction.md b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/introduction.md
new file mode 100644
index 0000000000000000000000000000000000000000..95849d3d3f087467401fd75320fdb3a938d59b6f
--- /dev/null
+++ b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/introduction.md
@@ -0,0 +1,60 @@
+---
+sidebar_position: 0
+slug: /
+---
+
+# Što je LlamaIndex.TS?
+
+`Ova dokumentacija je automatski prevedena i može sadržavati greške. Ne ustručavajte se otvoriti Pull Request za predlaganje promjena.`
+
+LlamaIndex.TS je okvir podataka za LLM aplikacije za unos, strukturiranje i pristup privatnim ili domenski specifičnim podacima. Iako je dostupan i Python paket (vidi [ovdje](https://docs.llamaindex.ai/en/stable/)), LlamaIndex.TS nudi osnovne značajke u jednostavnom paketu, optimiziranom za upotrebu s TypeScriptom.
+
+## 🚀 Zašto LlamaIndex.TS?
+
+U svojoj srži, LLM-ovi nude prirodnojezično sučelje između ljudi i zaključenih podataka. Široko dostupni modeli su prethodno obučeni na ogromnim količinama javno dostupnih podataka, od Wikipedije i mailing lista do udžbenika i izvornog koda.
+
+Aplikacije izgrađene na temelju LLM-ova često zahtijevaju proširenje tih modela s privatnim ili domenski specifičnim podacima. Nažalost, ti podaci mogu biti raspodijeljeni između izoliranih aplikacija i skladišta podataka. Oni se nalaze iza API-ja, u SQL bazama podataka ili su zarobljeni u PDF-ovima i prezentacijama.
+
+Tu dolazi **LlamaIndex.TS**.
+
+## 🦙 Kako LlamaIndex.TS može pomoći?
+
+LlamaIndex.TS pruža sljedeće alate:
+
+- **Učitavanje podataka** - unesite izravno svoje postojeće podatke u formatima `.txt`, `.pdf`, `.csv`, `.md` i `.docx`
+- **Indeksi podataka** - strukturirajte svoje podatke u međureprezentacije koje su jednostavne i performantne za LLM-ove za konzumiranje.
+- **Engine-i** - pružaju prirodan jezični pristup vašim podacima. Na primjer:
+  - Upitni engine-i su moćna sučelja za dohvat znanjem obogaćenog izlaza.
+  - Chat engine-i su konverzacijska sučelja za višeporuku, "naprijed-natrag" interakcije s vašim podacima.
+
+## 👨‍👩‍👧‍👦 Za koga je LlamaIndex?
+
+LlamaIndex.TS pruža osnovni skup alata koji su bitni za sve koji grade LLM aplikacije s JavaScriptom i TypeScriptom.
+
+Naša API razina visoke razine omogućuje početnicima da koriste LlamaIndex.TS za unos i upitivanje svojih podataka.
+
+Za složenije aplikacije, naše API-je niže razine omogućuju naprednim korisnicima prilagodbu i proširenje bilo kojeg modula - konektora podataka, indeksa, povratnika i upitnih motora, kako bi odgovarali njihovim potrebama.
+
+## Početak rada
+
+`npm install llamaindex`
+
+Naša dokumentacija uključuje [Upute za instalaciju](./installation.md) i [Uvodni vodič](./starter.md) za izgradnju vaše prve aplikacije.
+
+Kada ste spremni za rad, [Visokorazinski koncepti](./concepts.md) pružaju pregled modularne arhitekture LlamaIndex-a. Za praktične primjere, pogledajte naše [Vodiče od početka do kraja](./end_to_end.md).
+
+## 🗺️ Ekosustav
+
+Za preuzimanje ili doprinos, pronađite LlamaIndex na:
+
+- Github: https://github.com/run-llama/LlamaIndexTS
+- NPM: https://www.npmjs.com/package/llamaindex
+
+"
+
+## Zajednica
+
+Trebate pomoć? Imate prijedlog za značajku? Pridružite se LlamaIndex zajednici:
+
+- Twitter: https://twitter.com/llama_index
+- Discord: https://discord.gg/dGcwcsnxhU
diff --git a/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/modules/high_level/chat_engine.md b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/modules/high_level/chat_engine.md
new file mode 100644
index 0000000000000000000000000000000000000000..5edf08bd5a6a928bcdace5ddca023fe51985bcff
--- /dev/null
+++ b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/modules/high_level/chat_engine.md
@@ -0,0 +1,24 @@
+---
+sidebar_position: 4
+---
+
+# ChatEngine (聊天引擎)
+
+`Ova dokumentacija je automatski prevedena i može sadržavati greške. Ne ustručavajte se otvoriti Pull Request za predlaganje promjena.`
+
+ChatEngine (聊天引擎) je brz i jednostavan način za razgovor s podacima u vašem indeksu.
+
+```typescript
+const retriever = index.asRetriever();
+const chatEngine = new ContextChatEngine({ retriever });
+
+// započnite razgovor
+const response = await chatEngine.chat(query);
+```
+
+## Api Reference (Api referenca)
+
+- [ContextChatEngine (KontekstChatEngine)](../../api/classes/ContextChatEngine.md)
+- [CondenseQuestionChatEngine (CondenseQuestionChatEngine)](../../api/classes/ContextChatEngine.md)
+
+"
diff --git a/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/modules/high_level/data_index.md b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/modules/high_level/data_index.md
new file mode 100644
index 0000000000000000000000000000000000000000..c40cf0c1858f4b83cbcaed61f0f03f200bfd1860
--- /dev/null
+++ b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/modules/high_level/data_index.md
@@ -0,0 +1,27 @@
+---
+sidebar_position: 2
+---
+
+# Indeks
+
+`Ova dokumentacija je automatski prevedena i može sadržavati greške. Ne ustručavajte se otvoriti Pull Request za predlaganje promjena.`
+
+Indeks je osnovni spremnik i organizacija za vaše podatke. LlamaIndex.TS podržava dva indeksa:
+
+- `VectorStoreIndex` - će poslati najboljih-k `Node`-ova LLM-u prilikom generiranja odgovora. Zadani najboljih-k je 2.
+- `SummaryIndex` - će poslati svaki `Node` u indeksu LLM-u kako bi generirao odgovor.
+
+```typescript
+import { Document, VectorStoreIndex } from "llamaindex";
+
+const document = new Document({ text: "test" });
+
+const index = await VectorStoreIndex.fromDocuments([document]);
+```
+
+## API Referenca
+
+- [SummaryIndex](../../api/classes/SummaryIndex.md)
+- [VectorStoreIndex](../../api/classes/VectorStoreIndex.md)
+
+"
diff --git a/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/modules/high_level/data_loader.md b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/modules/high_level/data_loader.md
new file mode 100644
index 0000000000000000000000000000000000000000..2e12e0390da1f7c8cd6d84c0bb739e10bcfb95a8
--- /dev/null
+++ b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/modules/high_level/data_loader.md
@@ -0,0 +1,21 @@
+---
+sidebar_position: 1
+---
+
+# Čitač / Učitač
+
+`Ova dokumentacija je automatski prevedena i može sadržavati greške. Ne ustručavajte se otvoriti Pull Request za predlaganje promjena.`
+
+LlamaIndex.TS podržava jednostavno učitavanje datoteka iz mapa koristeći klasu `SimpleDirectoryReader`. Trenutno se podržavaju datoteke `.txt`, `.pdf`, `.csv`, `.md` i `.docx`, a u budućnosti se planira podrška za još više formata!
+
+```typescript
+import { SimpleDirectoryReader } from "llamaindex";
+
+documents = new SimpleDirectoryReader().loadData("./data");
+```
+
+## API Referenca
+
+- [SimpleDirectoryReader](../../api/classes/SimpleDirectoryReader.md)
+
+"
diff --git a/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/modules/high_level/documents_and_nodes.md b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/modules/high_level/documents_and_nodes.md
new file mode 100644
index 0000000000000000000000000000000000000000..a5b80027e3554fd0105ad572088624228fa6e19c
--- /dev/null
+++ b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/modules/high_level/documents_and_nodes.md
@@ -0,0 +1,22 @@
+---
+sidebar_position: 0
+---
+
+# Dokumenti i Čvorovi
+
+`Ova dokumentacija je automatski prevedena i može sadržavati greške. Ne ustručavajte se otvoriti Pull Request za predlaganje promjena.`
+
+`Dokumenti` i `Čvorovi` su osnovni građevni blokovi svakog indeksa. Iako je API za ove objekte sličan, objekti `Dokumenta` predstavljaju cijele datoteke, dok su `Čvorovi` manji dijelovi tog originalnog dokumenta, koji su prikladni za LLM i Q&A.
+
+```typescript
+import { Document } from "llamaindex";
+
+document = new Document({ text: "tekst", metadata: { ključ: "vrijednost" } });
+```
+
+## API Referenca
+
+- [Dokument](../../api/classes/Document.md)
+- [TextNode](../../api/classes/TextNode.md)
+
+"
diff --git a/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/modules/high_level/query_engine.md b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/modules/high_level/query_engine.md
new file mode 100644
index 0000000000000000000000000000000000000000..34bd396867f2ca70dda6cade2da91713cfd6b4f3
--- /dev/null
+++ b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/modules/high_level/query_engine.md
@@ -0,0 +1,42 @@
+---
+sidebar_position: 3
+---
+
+# QueryEngine (Upitni motor)
+
+`Ova dokumentacija je automatski prevedena i može sadržavati greške. Ne ustručavajte se otvoriti Pull Request za predlaganje promjena.`
+
+Upitni motor obuhvaća `Retriever` i `ResponseSynthesizer` u cjevovodu koji će koristiti upitni niz za dohvaćanje čvorova, a zatim ih poslati LLM-u kako bi generirao odgovor.
+
+```typescript
+const queryEngine = index.asQueryEngine();
+const response = await queryEngine.query("upitni niz");
+```
+
+## Upitni motor za podupit (Sub Question Query Engine)
+
+Osnovna ideja Upitnog motora za podupit je da razdvoji jedan upit na više upita, dobije odgovor za svaki od tih upita, a zatim kombinira te različite odgovore u jedan koherentan odgovor za korisnika. Možete ga zamisliti kao tehniku "razmišljanja korak po korak" ali iteriranje kroz izvore podataka!
+
+### Početak rada
+
+Najjednostavniji način za početak isprobavanja Upitnog motora za podupitna pitanja je pokretanje datoteke subquestion.ts u [primjerima](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/subquestion.ts).
+
+```bash
+npx ts-node subquestion.ts
+```
+
+"
+
+### Alati
+
+Upitni motor za podupit je implementiran pomoću Alata. Osnovna ideja Alata je da su izvršne opcije za veliki jezični model. U ovom slučaju, naš Upitni motor za podupit oslanja se na QueryEngineTool, koji je, kako ste pretpostavili, alat za izvođenje upita na Upitnom motoru. To nam omogućuje da modelu pružimo mogućnost upita različitih dokumenata za različita pitanja, na primjer. Također možete zamisliti da Upitni motor za podupit može koristiti Alat koji traži nešto na webu ili dobiva odgovor pomoću Wolfram Alpha.
+
+Više o Alatima možete saznati pogledavajući LlamaIndex Python dokumentaciju na https://gpt-index.readthedocs.io/en/latest/core_modules/agent_modules/tools/root.html
+
+## API Referenca
+
+- [RetrieverQueryEngine (Retriever upitni motor)](../../api/classes/RetrieverQueryEngine.md)
+- [SubQuestionQueryEngine (Upitni motor za podpitanja)](../../api/classes/SubQuestionQueryEngine.md)
+- [QueryEngineTool (Alat za upitni motor)](../../api/interfaces/QueryEngineTool.md)
+
+"
diff --git a/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/modules/index.md b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/modules/index.md
new file mode 100644
index 0000000000000000000000000000000000000000..e1fe80913491ccc676377687defe1e89cccf51fd
--- /dev/null
+++ b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/modules/index.md
@@ -0,0 +1,33 @@
+# Osnovni moduli
+
+`Ova dokumentacija je automatski prevedena i može sadržavati greške. Ne ustručavajte se otvoriti Pull Request za predlaganje promjena.`
+
+LlamaIndex.TS nudi nekoliko osnovnih modula, podijeljenih na visokorazinske module za brzi početak i niskorazinske module za prilagodbu ključnih komponenti prema potrebi.
+
+## Visokorazinski moduli
+
+- [**Dokument**](./high_level/documents_and_nodes.md): Dokument predstavlja tekstualnu datoteku, PDF datoteku ili drugi kontinuirani dio podataka.
+
+- [**Čvor**](./high_level/documents_and_nodes.md): Osnovna građevna jedinica podataka. Najčešće su to dijelovi dokumenta podijeljeni na upravljive dijelove koji su dovoljno mali da se mogu koristiti ugrađenom modelu i LLM.
+
+- [**Čitač/Učitavač**](./high_level/data_loader.md): Čitač ili učitavač je nešto što uzima dokument u stvarnom svijetu i pretvara ga u klasu Dokument koja se može koristiti u vašem Indeksu i upitima. Trenutno podržavamo obične tekstualne datoteke i PDF-ove, a uskoro će biti podržano još mnogo više formata.
+
+- [**Indeksi**](./high_level/data_index.md): Indeksi pohranjuju Čvorove i ugrađivanja tih čvorova.
+
+- [**QueryEngine**](./high_level/query_engine.md): Query engine generira upit koji ste unijeli i vraća vam rezultat. Query engine obično kombinira unaprijed izgrađenu uputu s odabranim čvorovima iz vašeg Indeksa kako bi LLM pružio kontekst koji mu je potreban za odgovor na vaš upit.
+
+- [**ChatEngine**](./high_level/chat_engine.md): ChatEngine vam pomaže izgraditi chatbota koji će komunicirati s vašim Indeksima.
+
+## Niskorazinski modul
+
+- [**LLM**](./low_level/llm.md): Klasa LLM je ujedinjeni sučelje za veliki dobavljač jezičnih modela kao što su OpenAI GPT-4, Anthropic Claude ili Meta LLaMA. Možete je naslijediti kako biste napisali konektor za vlastiti veliki jezični model.
+
+- [**Ugrađivanje**](./low_level/embedding.md): Ugrađivanje je predstavljeno kao vektor decimalnih brojeva s pomičnim zarezom. OpenAI-jev model ugrađivanja teksta ada-002 je naš zadani model ugrađivanja, a svako ugrađivanje koje generira sastoji se od 1.536 decimalnih brojeva s pomičnim zarezom. Još jedan popularan model ugrađivanja je BERT koji koristi 768 decimalnih brojeva s pomičnim zarezom za prikaz svakog čvora. Pružamo nekoliko alata za rad s ugrađivanjima, uključujući 3 opcije za izračunavanje sličnosti i maksimalnu marginalnu relevantnost.
+
+- [**TextSplitter/NodeParser**](./low_level/node_parser.md): Strategije razdvajanja teksta izuzetno su važne za ukupnu učinkovitost pretraživanja ugrađivanja. Trenutno, iako imamo zadano rješenje, ne postoji univerzalno rješenje koje odgovara svima. Ovisno o izvornim dokumentima, možda ćete htjeti koristiti različite veličine i strategije razdvajanja. Trenutno podržavamo razdvajanje po fiksnoj veličini, razdvajanje po fiksnoj veličini s preklapajućim sekcijama, razdvajanje po rečenici i razdvajanje po odlomku. TextSplitter se koristi od strane NodeParsera prilikom razdvajanja `Dokumenata` u `Čvorove`.
+
+- [**Retriever**](./low_level/retriever.md): Retriever je ono što zapravo odabire Čvorove za dohvat iz indeksa. Ovdje možete isprobati dohvaćanje više ili manje Čvorova po upitu, promijeniti funkciju sličnosti ili stvoriti vlastiti retriever za svaki pojedinačni slučaj upotrebe u vašoj aplikaciji. Na primjer, možda ćete željeti imati zaseban retriever za sadržaj koda i tekstualni sadržaj.
+
+- [**ResponseSynthesizer**](./low_level/response_synthesizer.md): ResponseSynthesizer je odgovoran za uzimanje niza upita i korištenje liste `Čvorova` za generiranje odgovora. To može imati različite oblike, poput iteriranja kroz sav kontekst i poboljšavanja odgovora ili izgradnje stabla sažetaka i vraćanja korijena sažetka.
+
+- [**Storage**](./low_level/storage.md): U nekom trenutku ćete htjeti pohraniti svoje indekse, podatke i vektore umjesto ponovnog pokretanja modela ugrađivanja svaki put. IndexStore, DocStore, VectorStore i KVStore su apstrakcije koje vam to omogućuju. Kombinirano, oni čine StorageContext. Trenutno vam omogućujemo da trajno pohranite svoja ugrađivanja u datotekama na datotečnom sustavu (ili virtualnom memorijskom datotečnom sustavu), ali također aktivno dodajemo integracije s vektorskim bazama podataka.
diff --git a/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/modules/low_level/embedding.md b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/modules/low_level/embedding.md
new file mode 100644
index 0000000000000000000000000000000000000000..72332106f6c6f3ce017bd06b76fe55bebd66aefc
--- /dev/null
+++ b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/modules/low_level/embedding.md
@@ -0,0 +1,26 @@
+---
+sidebar_position: 1
+---
+
+# Ugradnja
+
+`Ova dokumentacija je automatski prevedena i može sadržavati greške. Ne ustručavajte se otvoriti Pull Request za predlaganje promjena.`
+
+Model ugradnje u LlamaIndexu odgovoran je za stvaranje numeričkih reprezentacija teksta. Prema zadanim postavkama, LlamaIndex će koristiti model `text-embedding-ada-002` iz OpenAI-a.
+
+Ovo se može eksplicitno postaviti u objektu `ServiceContext`.
+
+```typescript
+import { OpenAIEmbedding, serviceContextFromDefaults } from "llamaindex";
+
+const openaiEmbeds = new OpenAIEmbedding();
+
+const serviceContext = serviceContextFromDefaults({ embedModel: openaiEmbeds });
+```
+
+## API Referenca
+
+- [OpenAIEmbedding](../../api/classes/OpenAIEmbedding.md)
+- [ServiceContext](../../api/interfaces/ServiceContext.md)
+
+"
diff --git a/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/modules/low_level/llm.md b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/modules/low_level/llm.md
new file mode 100644
index 0000000000000000000000000000000000000000..cc468f3828e2736e783ed5782c9cf268c521e7c2
--- /dev/null
+++ b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/modules/low_level/llm.md
@@ -0,0 +1,26 @@
+---
+sidebar_position: 0
+---
+
+# LLM
+
+`Ova dokumentacija je automatski prevedena i može sadržavati greške. Ne ustručavajte se otvoriti Pull Request za predlaganje promjena.`
+
+LLM je odgovoran za čitanje teksta i generiranje prirodnih jezičnih odgovora na upite. Prema zadanim postavkama, LlamaIndex.TS koristi `gpt-3.5-turbo`.
+
+LLM se može eksplicitno postaviti u objektu `ServiceContext`.
+
+```typescript
+import { OpenAI, serviceContextFromDefaults } from "llamaindex";
+
+const openaiLLM = new OpenAI({ model: "gpt-3.5-turbo", temperature: 0 });
+
+const serviceContext = serviceContextFromDefaults({ llm: openaiLLM });
+```
+
+## API Referenca
+
+- [OpenAI](../../api/classes/OpenAI.md)
+- [ServiceContext](../../api/interfaces/ServiceContext.md)
+
+"
diff --git a/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/modules/low_level/node_parser.md b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/modules/low_level/node_parser.md
new file mode 100644
index 0000000000000000000000000000000000000000..288510dc725368b115bbc266c50aee910676ef18
--- /dev/null
+++ b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/modules/low_level/node_parser.md
@@ -0,0 +1,39 @@
+---
+sidebar_position: 3
+---
+
+# NodeParser
+
+`Ova dokumentacija je automatski prevedena i može sadržavati greške. Ne ustručavajte se otvoriti Pull Request za predlaganje promjena.`
+
+`NodeParser` u LlamaIndexu je odgovoran za razdvajanje objekata `Document` u lakše upravljive objekte `Node`. Kada pozovete `.fromDocuments()`, `NodeParser` iz `ServiceContext`-a se automatski koristi da to učini za vas. Alternativno, možete ga koristiti da unaprijed razdvojite dokumente.
+
+```typescript
+import { Document, SimpleNodeParser } from "llamaindex";
+
+const nodeParser = new SimpleNodeParser();
+const nodes = nodeParser.getNodesFromDocuments([
+  new Document({ text: "Imam 10 godina. John ima 20 godina." }),
+]);
+```
+
+## TextSplitter
+
+Osnovni tekstualni razdjelnik će razdvojiti tekst po rečenicama. Može se također koristiti kao samostalni modul za razdvajanje sirovog teksta.
+
+```typescript
+import { SentenceSplitter } from "llamaindex";
+
+const splitter = new SentenceSplitter({ chunkSize: 1 });
+
+const textSplits = splitter.splitText("Pozdrav svijete");
+```
+
+"
+
+## API Referenca
+
+- [SimpleNodeParser](../../api/classes/SimpleNodeParser.md)
+- [SentenceSplitter](../../api/classes/SentenceSplitter.md)
+
+"
diff --git a/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/modules/low_level/response_synthesizer.md b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/modules/low_level/response_synthesizer.md
new file mode 100644
index 0000000000000000000000000000000000000000..0f23ca9da14bde7c93e5ef3fa45dbad7884073b1
--- /dev/null
+++ b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/modules/low_level/response_synthesizer.md
@@ -0,0 +1,47 @@
+---
+sidebar_position: 6
+---
+
+# ResponseSynthesizer (SintetizatorOdgovora)
+
+`Ova dokumentacija je automatski prevedena i može sadržavati greške. Ne ustručavajte se otvoriti Pull Request za predlaganje promjena.`
+
+ResponseSynthesizer je odgovoran za slanje upita, čvorova i predložaka poruka LLM-u radi generiranja odgovora. Postoje nekoliko ključnih načina generiranja odgovora:
+
+- `Refine` (Usavršavanje): "stvaranje i usavršavanje" odgovora tako da se sekvenčno prolazi kroz svaki dobiveni tekstualni fragment. Ovo izvršava zaseban poziv LLM-u po čvoru. Dobro za detaljnije odgovore.
+- `CompactAndRefine` (Kompaktiranje i usavršavanje) (zadano): "kompaktiranje" predloška tijekom svakog poziva LLM-u tako da se stavi što više tekstualnih fragmenata koji mogu stati unutar maksimalne veličine predloška. Ako ima previše fragmenata za stavljanje u jedan predložak, "stvaranje i usavršavanje" odgovora prolaskom kroz više kompaktnih predložaka. Isto kao `Refine`, ali bi trebalo rezultirati manjim brojem poziva LLM-u.
+- `TreeSummarize` (Sažimanje stabla): Na temelju skupa tekstualnih fragmenata i upita, rekurzivno konstruiraj stablo i vrati korijenski čvor kao odgovor. Dobro za svrhe sažimanja.
+- `SimpleResponseBuilder` (Jednostavno izgraditelj odgovora): Na temelju skupa tekstualnih fragmenata i upita, primijeni upit na svaki tekstualni fragment dok se odgovori akumuliraju u niz. Vraća spojeni niz svih odgovora. Dobro kada trebate pokrenuti isti upit zasebno za svaki tekstualni fragment.
+
+```typescript
+import { NodeWithScore, ResponseSynthesizer, TextNode } from "llamaindex";
+
+const responseSynthesizer = new ResponseSynthesizer();
+
+const nodesWithScore: NodeWithScore[] = [
+  {
+    node: new TextNode({ text: "Imam 10 godina." }),
+    score: 1,
+  },
+  {
+    node: new TextNode({ text: "John ima 20 godina." }),
+    score: 0.5,
+  },
+];
+
+const response = await responseSynthesizer.synthesize(
+  "Koliko godina imam?",
+  nodesWithScore,
+);
+console.log(response.response);
+```
+
+## API Reference (API referenca)
+
+- [ResponseSynthesizer (SintetizatorOdgovora)](../../api/classes/ResponseSynthesizer.md)
+- [Refine (Usavršavanje)](../../api/classes/Refine.md)
+- [CompactAndRefine (Kompaktiranje i usavršavanje)](../../api/classes/CompactAndRefine.md)
+- [TreeSummarize (Sažimanje stabla)](../../api/classes/TreeSummarize.md)
+- [SimpleResponseBuilder (Jednostavno izgraditelj odgovora)](../../api/classes/SimpleResponseBuilder.md)
+
+"
diff --git a/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/modules/low_level/retriever.md b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/modules/low_level/retriever.md
new file mode 100644
index 0000000000000000000000000000000000000000..f2a67e689ff10779d428ac2c95a1bb77fc67ec51
--- /dev/null
+++ b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/modules/low_level/retriever.md
@@ -0,0 +1,25 @@
+---
+sidebar_position: 5
+---
+
+# Dohvatnik
+
+`Ova dokumentacija je automatski prevedena i može sadržavati greške. Ne ustručavajte se otvoriti Pull Request za predlaganje promjena.`
+
+Dohvatnik u LlamaIndexu se koristi za dohvaćanje `Node`-ova iz indeksa koristeći upitni niz. `VectorIndexRetriever` će dohvatiti prvih k najsličnijih čvorova. S druge strane, `SummaryIndexRetriever` će dohvatiti sve čvorove bez obzira na upit.
+
+```typescript
+const dohvatnik = vector_index.asRetriever();
+dohvatnik.similarityTopK = 3;
+
+// Dohvati čvorove!
+const čvoroviSaRezultatom = await dohvatnik.retrieve("upitni niz");
+```
+
+## API Referenca
+
+- [SummaryIndexRetriever](../../api/classes/SummaryIndexRetriever.md)
+- [SummaryIndexLLMRetriever](../../api/classes/SummaryIndexLLMRetriever.md)
+- [VectorIndexRetriever](../../api/classes/VectorIndexRetriever.md)
+
+"
diff --git a/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/modules/low_level/storage.md b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/modules/low_level/storage.md
new file mode 100644
index 0000000000000000000000000000000000000000..4c8638789f13ad42e029875552a2361b033dd9f8
--- /dev/null
+++ b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/modules/low_level/storage.md
@@ -0,0 +1,30 @@
+---
+sidebar_position: 7
+---
+
+# Pohrana (Storage)
+
+`Ova dokumentacija je automatski prevedena i može sadržavati greške. Ne ustručavajte se otvoriti Pull Request za predlaganje promjena.`
+
+Pohrana u LlamaIndex.TS radi automatski nakon što ste konfigurirali objekt `StorageContext`. Samo konfigurirajte `persistDir` i dodijelite ga indeksu.
+
+Trenutno je podržano samo spremanje i učitavanje s diska, s planiranim budućim integracijama!
+
+```typescript
+import { Document, VectorStoreIndex, storageContextFromDefaults } from "./src";
+
+const storageContext = await storageContextFromDefaults({
+  persistDir: "./storage",
+});
+
+const document = new Document({ text: "Testni tekst" });
+const index = await VectorStoreIndex.fromDocuments([document], {
+  storageContext,
+});
+```
+
+## API Referenca
+
+- [StorageContext](../../api/interfaces/StorageContext.md)
+
+"
diff --git a/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/starter.md b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/starter.md
new file mode 100644
index 0000000000000000000000000000000000000000..70c1e9b4b6897cee1f3fc788c8ca0066fed8fece
--- /dev/null
+++ b/apps/docs/i18n/hr/docusaurus-plugin-content-docs/current/starter.md
@@ -0,0 +1,58 @@
+---
+sidebar_position: 2
+---
+
+# Uvodni vodič
+
+`Ova dokumentacija je automatski prevedena i može sadržavati greške. Ne ustručavajte se otvoriti Pull Request za predlaganje promjena.`
+
+Nakon što ste [instalirali LlamaIndex.TS pomoću NPM-a](installation) i postavili svoj OpenAI ključ, spremni ste za pokretanje svoje prve aplikacije:
+
+U novom direktoriju:
+
+```bash npm2yarn
+npm install typescript
+npm install @types/node
+npx tsc --init # ako je potrebno
+```
+
+Kreirajte datoteku `primjer.ts`. Ovaj kod će učitati neke primjer podatke, kreirati dokument, indeksirati ga (što stvara ugrađivanja pomoću OpenAI-a) i zatim stvara upitni motor za odgovaranje na pitanja o podacima.
+
+```ts
+// primjer.ts
+import fs from "fs/promises";
+import { Document, VectorStoreIndex } from "llamaindex";
+
+async function main() {
+  // Učitaj eseje iz abramov.txt u Node-u
+  const eseji = await fs.readFile(
+    "node_modules/llamaindex/examples/abramov.txt",
+    "utf-8",
+  );
+
+  // Kreiraj objekt Dokument s esejeima
+  const dokument = new Document({ text: eseji });
+
+  // Podijeli tekst i stvori ugrađivanja. Spremi ih u VectorStoreIndex
+  const indeks = await VectorStoreIndex.fromDocuments([dokument]);
+
+  // Upitaj indeks
+  const upitniMotor = indeks.asQueryEngine();
+  const odgovor = await upitniMotor.query("Što je autor radio na fakultetu?");
+
+  // Ispiši odgovor
+  console.log(odgovor.toString());
+}
+
+main();
+```
+
+Zatim ga možete pokrenuti koristeći
+
+```bash
+npx ts-node primjer.ts
+```
+
+Spremni za više informacija? Provjerite naš NextJS igralište na https://llama-playground.vercel.app/. Izvorni kod je dostupan na https://github.com/run-llama/ts-playground
+
+"
diff --git a/apps/docs/i18n/hu/docusaurus-plugin-content-docs/current/_static/concepts/indexing.jpg b/apps/docs/i18n/hu/docusaurus-plugin-content-docs/current/_static/concepts/indexing.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..8672967213caf28fb27682b6b9e2e5111beb9aaa
Binary files /dev/null and b/apps/docs/i18n/hu/docusaurus-plugin-content-docs/current/_static/concepts/indexing.jpg differ
diff --git a/apps/docs/i18n/hu/docusaurus-plugin-content-docs/current/_static/concepts/querying.jpg b/apps/docs/i18n/hu/docusaurus-plugin-content-docs/current/_static/concepts/querying.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..3c241bdda5ba98d3f3ee2e163b9f19280c5c7503
Binary files /dev/null and b/apps/docs/i18n/hu/docusaurus-plugin-content-docs/current/_static/concepts/querying.jpg differ
diff --git a/apps/docs/i18n/hu/docusaurus-plugin-content-docs/current/_static/concepts/rag.jpg b/apps/docs/i18n/hu/docusaurus-plugin-content-docs/current/_static/concepts/rag.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..b68eca2564f8535b33d9ed6d4ce4d4410d2a0698
Binary files /dev/null and b/apps/docs/i18n/hu/docusaurus-plugin-content-docs/current/_static/concepts/rag.jpg differ
diff --git a/apps/docs/i18n/hu/docusaurus-plugin-content-docs/current/api b/apps/docs/i18n/hu/docusaurus-plugin-content-docs/current/api
new file mode 120000
index 0000000000000000000000000000000000000000..3513e32979f4abb8b2df2b54c8d0cc480bdd847e
--- /dev/null
+++ b/apps/docs/i18n/hu/docusaurus-plugin-content-docs/current/api
@@ -0,0 +1 @@
+../../../../docs/api
\ No newline at end of file
diff --git a/apps/docs/i18n/hu/docusaurus-plugin-content-docs/current/concepts.md b/apps/docs/i18n/hu/docusaurus-plugin-content-docs/current/concepts.md
new file mode 100644
index 0000000000000000000000000000000000000000..ccdfce19bafb9b37f381336631bb4ce0b5fe797e
--- /dev/null
+++ b/apps/docs/i18n/hu/docusaurus-plugin-content-docs/current/concepts.md
@@ -0,0 +1,86 @@
+---
+sidebar_position: 3
+---
+
+# Magas szintű fogalmak
+
+`Ezt a dokumentációt automatikusan fordították le, és tartalmazhat hibákat. Ne habozzon nyitni egy Pull Requestet a változtatások javasolására.`
+
+A LlamaIndex.TS segít az LLM-alapú alkalmazások (pl. kérdés-válasz, chatbot) készítésében saját adatok felett.
+
+Ebben a magas szintű fogalmak útmutatóban megtudhatja:
+
+- hogyan válaszol egy LLM a saját adatai alapján feltett kérdésekre.
+- a LlamaIndex.TS kulcsfontosságú fogalmait és moduljait, amelyek segítenek a saját lekérdezési csővezeték összeállításában.
+
+## Kérdések megválaszolása az Ön adatai alapján
+
+A LlamaIndex kétlépcsős módszert használ az LLM használatakor az adatokkal:
+
+1. **indexelési szakasz**: egy tudásbázis előkészítése, és
+2. **lekérdezési szakasz**: releváns kontextus visszanyerése a tudásból, hogy segítse az LLM-et a kérdésre adott válaszban
+
+![](./_static/concepts/rag.jpg)
+
+Ezt a folyamatot Retrieval Augmented Generation (RAG) néven is ismerik.
+
+A LlamaIndex.TS biztosítja az alapvető eszközkészletet mindkét lépés egyszerűvé tételéhez.
+
+Vizsgáljuk meg részletesen mindkét szakaszt.
+
+### Indexelési szakasz
+
+A LlamaIndex.TS segít az adatbázis előkészítésében adatkonnektorok és indexek segítségével.
+
+![](./_static/concepts/indexing.jpg)
+
+[**Adatbetöltők**](./modules/high_level/data_loader.md):
+Egy adatkonnektor (pl. `Reader`) adatokat vesz fel különböző adatforrásokból és adatformátumokból egy egyszerű `Document` reprezentációba (szöveg és egyszerű metaadat).
+
+[**Dokumentumok / Csomópontok**](./modules/high_level/documents_and_nodes.md): Egy `Document` egy általános tartály bármilyen adatforrás körül - például egy PDF, egy API kimenet vagy adatok lekérdezése az adatbázisból. Egy `Node` az adat atomi egysége a LlamaIndex-ben, és egy forrás `Document` "darabja". Ez egy gazdag reprezentáció, amely tartalmaz metaadatot és kapcsolatokat (más csomópontokhoz), hogy pontos és kifejező lekérdezési műveleteket lehessen végezni.
+
+[**Adatindexek**](./modules/high_level/data_index.md):
+Miután felvette az adatait, a LlamaIndex segít az adatok indexelésében egy olyan formátumba, amely könnyen visszanyerhető.
+
+A LlamaIndex a háttérben feldolgozza a nyers dokumentumokat köztes reprezentációkká, kiszámítja a vektorbeágyazásokat, és az adatokat memóriában vagy lemezre tárolja.
+
+"
+
+### Lekérdezési szakasz
+
+A lekérdezési szakaszban a lekérdezési csővezeték a legrelevánsabb kontextust nyeri ki egy felhasználói lekérdezés alapján,
+és átadja azt az LLM-nek (a lekérdezéssel együtt) egy válasz szintetizálásához.
+
+Ez az LLM-nek naprakész tudást biztosít, amely nincs benne eredeti képzési adataiban,
+(csökkentve a hallucinációt is).
+
+A lekérdezési szakasz legnagyobb kihívása a visszanyerés, az orkestrálás és a gondolkodás a (lehetőleg sok) tudásbázis felett.
+
+A LlamaIndex biztosítja a komponálható modulokat, amelyek segítenek a RAG csővezetékek (lekérdezési motor), chatbot (chat motor) vagy egy ügynök részeként történő összeállításában.
+
+Ezeket az építőköveket testreszabhatja a rangsorolási preferenciák tükrözésére, valamint strukturált módon történő gondolkodáshoz több tudásbázis felett.
+
+![](./_static/concepts/querying.jpg)
+
+#### Építőkövek
+
+[**Visszakeresők**](./modules/low_level/retriever.md):
+Egy visszakereső meghatározza, hogyan lehet hatékonyan visszakeresni a releváns kontextust egy tudásbázisból (azaz indexből) egy lekérdezés alapján.
+A konkrét visszakeresési logika különbözik a különböző indexek esetén, a legnépszerűbb a sűrű visszakeresés egy vektor index ellen.
+
+[**Válasz szintetizálók**](./modules/low_level/response_synthesizer.md):
+Egy válasz szintetizáló választ generál egy LLM-ből, egy felhasználói lekérdezés és egy adott halmaz visszakerült szövegrészlet segítségével.
+
+"
+
+#### Csővezetékek
+
+[**Lekérdezési motorok**](./modules/high_level/query_engine.md):
+Egy lekérdezési motor egy végponti csővezeték, amely lehetővé teszi a kérdések feltevését az adatai alapján.
+Egy természetes nyelvű lekérdezést vesz fel, és választ ad, valamint a hivatkozott kontextust átadja az LLM-nek.
+
+[**Chat motorok**](./modules/high_level/chat_engine.md):
+Egy chat motor egy végponti csővezeték adataival való párbeszéd folytatásához
+(több oda-vissza helyett egyetlen kérdés és válasz).
+
+"
diff --git a/apps/docs/i18n/hu/docusaurus-plugin-content-docs/current/end_to_end.md b/apps/docs/i18n/hu/docusaurus-plugin-content-docs/current/end_to_end.md
new file mode 100644
index 0000000000000000000000000000000000000000..ad3a80e1ccfe5b1b0778b3f25b4900480b541434
--- /dev/null
+++ b/apps/docs/i18n/hu/docusaurus-plugin-content-docs/current/end_to_end.md
@@ -0,0 +1,61 @@
+---
+sidebar_position: 4
+---
+
+# Végponttól végpontig példák
+
+`Ezt a dokumentációt automatikusan fordították le, és tartalmazhat hibákat. Ne habozzon nyitni egy Pull Requestet a változtatások javasolására.`
+
+Több végponttól végpontig példát tartalmazunk a LlamaIndex.TS használatával a repository-ban.
+
+Tekintse meg az alábbi példákat, vagy próbálja ki őket, és fejezze be őket percek alatt interaktív Github Codespace oktatókkal, amelyeket a Dev-Docs nyújt [itt](https://codespaces.new/team-dev-docs/lits-dev-docs-playground?devcontainer_path=.devcontainer%2Fjavascript_ltsquickstart%2Fdevcontainer.json):
+
+## [Chat Engine](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/chatEngine.ts)
+
+Olvasson be egy fájlt és beszéljen róla a LLM-mel.
+
+## [Vektor Index](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/vectorIndex.ts)
+
+Hozzon létre egy vektor indexet és kérdezze le. A vektor index beágyazásokat fog használni a legfontosabb k legrelevánsabb csomópont lekérdezéséhez. Alapértelmezés szerint a legfontosabb k értéke 2.
+
+"
+
+## [Összefoglaló Index](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/summaryIndex.ts)
+
+Hozzon létre egy listát és kérdezze le. Ez a példa használja a `LLMRetriever`-t is, amely a LLM-et használja a legjobb csomópontok kiválasztásához a válasz generálásakor.
+
+"
+
+## [Index Mentése / Betöltése](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/storageContext.ts)
+
+Hozzon létre és töltse be egy vektor indexet. A LlamaIndex.TS-ben a perzisztencia automatikusan megtörténik, amint létrejön egy tárolási kontextus objektum.
+
+"
+
+## [Egyéni Vektor Index](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/vectorIndexCustomize.ts)
+
+Hozzon létre egy vektor indexet és kérdezze le, miközben konfigurálja a `LLM`-et, a `ServiceContext`-et és a `similarity_top_k`-t.
+
+"
+
+## [OpenAI LLM](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/openai.ts)
+
+Hozzon létre egy OpenAI LLM-et és használja közvetlenül a csevegéshez.
+
+"
+
+## [Llama2 DeuceLLM](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/llamadeuce.ts)
+
+Hozzon létre egy Llama-2 LLM-et, és használja közvetlenül a csevegéshez.
+
+"
+
+## [SubQuestionQueryEngine](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/subquestion.ts)
+
+Használja a `SubQuestionQueryEngine`-t, amely bonyolult lekérdezéseket több részre bont, majd összeállít egy választ az összes részlekérdezésre adott válasz alapján.
+
+"
+
+## [Alacsony szintű modulok](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/lowlevel.ts)
+
+Ez a példa több alacsony szintű komponenst használ, amelyek eltávolítják a tényleges lekérdezési motor szükségességét. Ezeket a komponenseket bárhol használhatja, bármilyen alkalmazásban, vagy testreszabhatja és részleges osztályokká alakíthatja őket a saját igényei szerint.
diff --git a/apps/docs/i18n/hu/docusaurus-plugin-content-docs/current/environments.md b/apps/docs/i18n/hu/docusaurus-plugin-content-docs/current/environments.md
new file mode 100644
index 0000000000000000000000000000000000000000..7b14b15337fdce88b2e922623c20757f4a2c8338
--- /dev/null
+++ b/apps/docs/i18n/hu/docusaurus-plugin-content-docs/current/environments.md
@@ -0,0 +1,31 @@
+---
+sidebar_position: 5
+---
+
+# Környezetek
+
+`Ezt a dokumentációt automatikusan fordították le, és tartalmazhat hibákat. Ne habozzon nyitni egy Pull Requestet a változtatások javasolására.`
+
+A LlamaIndex jelenleg hivatalosan támogatja a NodeJS 18 és a NodeJS 20 verziókat.
+
+## NextJS alkalmazás útválasztó
+
+Ha a NextJS alkalmazás útválasztó útválasztó kezelőket/szerver nélküli funkciókat használ, akkor a NodeJS módot kell használnia:
+
+```js
+export const runtime = "nodejs"; // alapértelmezett
+```
+
+és hozzá kell adnia egy kivételt a pdf-parse-hez a next.config.js fájlban
+
+```js
+// next.config.js
+/** @type {import('next').NextConfig} */
+const nextConfig = {
+  experimental: {
+    serverComponentsExternalPackages: ["pdf-parse"], // A pdf-parse-t valódi NodeJS módban helyezi el a NextJS alkalmazás útválasztó
+  },
+};
+
+module.exports = nextConfig;
+```
diff --git a/apps/docs/i18n/hu/docusaurus-plugin-content-docs/current/installation.mdx b/apps/docs/i18n/hu/docusaurus-plugin-content-docs/current/installation.mdx
new file mode 100644
index 0000000000000000000000000000000000000000..3587006b710592fa5a78ce50392b4d50d9d82ccb
--- /dev/null
+++ b/apps/docs/i18n/hu/docusaurus-plugin-content-docs/current/installation.mdx
@@ -0,0 +1,68 @@
+---
+sidebar_position: 1
+---
+
+
+# Telepítés és Beállítás
+
+```Ezt a dokumentációt automatikusan fordították le, és tartalmazhat hibákat. Ne habozzon nyitni egy Pull Requestet a változtatások javasolására.```
+
+
+Győződjön meg róla, hogy rendelkezik a NodeJS v18 vagy annál újabb verziójával.
+
+
+## A create-llama használata
+
+A LlamaIndex leggyorsabb módja a kezdéshez a `create-llama` használata. Ez a CLI eszköz lehetővé teszi, hogy gyorsan elkezdjen egy új LlamaIndex alkalmazást építeni, minden szükséges beállítással.
+
+Csak futtassa a következő parancsot:
+
+<Tabs>
+<TabItem value="1" label="npm" default>
+
+```bash
+npx create-llama@latest
+```
+
+</TabItem>
+<TabItem value="2" label="Yarn">
+
+```bash
+yarn create llama
+```
+
+</TabItem>
+<TabItem value="3" label="pnpm">
+
+```bash
+pnpm create llama@latest
+```
+
+</TabItem>
+</Tabs>
+
+a kezdéshez. Miután létrehozta az alkalmazást, futtassa a következő parancsot:
+
+```bash npm2yarn
+npm run dev
+```
+
+a fejlesztői szerver indításához. Ezután meglátogathatja a [http://localhost:3000](http://localhost:3000) címet az alkalmazás megtekintéséhez.
+## Telepítés az NPM-ről
+
+```bash npm2yarn
+npm install llamaindex
+```
+
+
+### Környezeti változók
+
+Példáink alapértelmezetten az OpenAI-t használják. A következőképpen kell beállítania az Open AI kulcsát:
+
+```bash
+export OPENAI_API_KEY="sk-......" # Cserélje ki a kulcsot a https://platform.openai.com/account/api-keys oldalon található kulcsával
+```
+
+Ha azt szeretné, hogy minden alkalommal automatikusan betöltődjön, adja hozzá a .zshrc/.bashrc fájljához.
+
+FIGYELEM: Ne tegye közzé az OpenAI kulcsát a verziókezelő rendszerben.
diff --git a/apps/docs/i18n/hu/docusaurus-plugin-content-docs/current/introduction.md b/apps/docs/i18n/hu/docusaurus-plugin-content-docs/current/introduction.md
new file mode 100644
index 0000000000000000000000000000000000000000..30c937ba30eb25ef421d078264a52fbbfcfb6d61
--- /dev/null
+++ b/apps/docs/i18n/hu/docusaurus-plugin-content-docs/current/introduction.md
@@ -0,0 +1,60 @@
+---
+sidebar_position: 0
+slug: /
+---
+
+# Mi az LlamaIndex.TS?
+
+`Ezt a dokumentációt automatikusan fordították le, és tartalmazhat hibákat. Ne habozzon nyitni egy Pull Requestet a változtatások javasolására.`
+
+Az LlamaIndex.TS egy adatkeretrendszer az LLM alkalmazások számára, amely lehetővé teszi a privát vagy domain-specifikus adatok beolvasását, strukturálását és hozzáférését. Bár elérhető egy Python csomag is (lásd [itt](https://docs.llamaindex.ai/en/stable/)), az LlamaIndex.TS egyszerű csomagban kínálja a fő funkciókat, amelyeket a TypeScript használatára optimalizáltak.
+
+## 🚀 Miért érdemes használni a LlamaIndex.TS-t?
+
+Az LLM-ek lényegében természetes nyelvű felületet kínálnak az emberek és a következtetett adatok között. Széles körben elérhető modellek előre betanítottak hatalmas mennyiségű nyilvánosan elérhető adatra, a Wikipédiától és a levelezési listáktól a tankönyvekig és a forráskódig.
+
+Az LLM-ekre épülő alkalmazások gyakran igénylik ezeknek a modelleknek a privát vagy domain-specifikus adatokkal való kiegészítését. Sajnos ezek az adatok szét vannak szórva az alkalmazások és adattárolók között. Az API-k mögött vannak, SQL adatbázisokban találhatók, vagy PDF-ekben és diavetítésekben rejtőznek.
+
+Ebben segít a **LlamaIndex.TS**.
+
+## 🦙 Hogyan segíthet a LlamaIndex.TS?
+
+A LlamaIndex.TS az alábbi eszközöket biztosítja:
+
+- **Adatbetöltés** - közvetlenül beolvashatja meglévő `.txt`, `.pdf`, `.csv`, `.md` és `.docx` adatait
+- **Adatindexek** - strukturálja az adatait köztes reprezentációkba, amelyek könnyen és hatékonyan fogyaszthatók LLM-ekkel.
+- **Motorok** - természetes nyelvű hozzáférést biztosítanak az adataihoz. Például:
+  - A lekérdezési motorok erőteljes visszakeresési felületek a tudásbővített kimenet számára.
+  - A csevegőmotorok konverzációs felületek a több üzenetes, "oda-vissza" interakciókhoz az adataival.
+
+## 👨‍👩‍👧‍👦 Kinek való az LlamaIndex?
+
+Az LlamaIndex.TS egy alapvető eszközkészletet nyújt, amely nélkülözhetetlen azoknak, akik JavaScript és TypeScript segítségével LLM alkalmazásokat építenek.
+
+A magas szintű API lehetővé teszi a kezdő felhasználók számára, hogy az LlamaIndex.TS-t használják az adatok beolvasására és lekérdezésére.
+
+A komplexebb alkalmazásokhoz a mélyebb szintű API-k lehetővé teszik a haladó felhasználók számára, hogy testre szabják és kibővítsék bármely modult - adatkonnektorokat, indexeket, visszakeresőket és lekérdezési motorokat - az igényeiknek megfelelően.
+
+## Első lépések
+
+`npm install llamaindex`
+
+A dokumentációnk tartalmazza a [Telepítési utasításokat](./installation.md) és egy [Kezdő útmutatót](./starter.md) az első alkalmazás létrehozásához.
+
+Miután elindultál, a [Magas szintű fogalmak](./concepts.md) áttekintést ad a LlamaIndex moduláris architektúrájáról. További gyakorlati példákért tekintsd meg az [End-to-End útmutatóinkat](./end_to_end.md).
+
+## 🗺️ Ökoszisztéma
+
+Az LlamaIndex letöltéséhez vagy hozzájárulásához keresd meg az alábbi helyeken:
+
+- Github: https://github.com/run-llama/LlamaIndexTS
+- NPM: https://www.npmjs.com/package/llamaindex
+
+"
+
+## Közösség
+
+Segítségre van szüksége? Van egy funkció javaslata? Csatlakozzon az LlamaIndex közösséghez:
+
+- Twitter: https://twitter.com/llama_index
+- Discord: https://discord.gg/dGcwcsnxhU
diff --git a/apps/docs/i18n/hu/docusaurus-plugin-content-docs/current/modules/high_level/chat_engine.md b/apps/docs/i18n/hu/docusaurus-plugin-content-docs/current/modules/high_level/chat_engine.md
new file mode 100644
index 0000000000000000000000000000000000000000..4efb1f42e7e0735f1a16579ca8578a0216db6833
--- /dev/null
+++ b/apps/docs/i18n/hu/docusaurus-plugin-content-docs/current/modules/high_level/chat_engine.md
@@ -0,0 +1,24 @@
+---
+sidebar_position: 4
+---
+
+# ChatEngine (聊天引擎)
+
+`Ezt a dokumentációt automatikusan fordították le, és tartalmazhat hibákat. Ne habozzon nyitni egy Pull Requestet a változtatások javasolására.`
+
+A chat engine egy gyors és egyszerű módja annak, hogy beszélgethessen az indexben található adatokkal.
+
+```typescript
+const retriever = index.asRetriever();
+const chatEngine = new ContextChatEngine({ retriever });
+
+// beszélgetés indítása
+const response = await chatEngine.chat(query);
+```
+
+## Api Referenciák
+
+- [ContextChatEngine (KontextusChatEngine)](../../api/classes/ContextChatEngine.md)
+- [CondenseQuestionChatEngine (RövidítettKérdésChatEngine)](../../api/classes/ContextChatEngine.md)
+
+"
diff --git a/apps/docs/i18n/hu/docusaurus-plugin-content-docs/current/modules/high_level/data_index.md b/apps/docs/i18n/hu/docusaurus-plugin-content-docs/current/modules/high_level/data_index.md
new file mode 100644
index 0000000000000000000000000000000000000000..03c828ea83e399b111006b723e6fd858461202b4
--- /dev/null
+++ b/apps/docs/i18n/hu/docusaurus-plugin-content-docs/current/modules/high_level/data_index.md
@@ -0,0 +1,27 @@
+---
+sidebar_position: 2
+---
+
+# Index (Index)
+
+`Ezt a dokumentációt automatikusan fordították le, és tartalmazhat hibákat. Ne habozzon nyitni egy Pull Requestet a változtatások javasolására.`
+
+Az index az adatok alapvető tárolója és szervezője. A LlamaIndex.TS két indexet támogat:
+
+- `VectorStoreIndex` - a legjobb-k `Node`-okat küldi a LLM-nek válasz generálásakor. Az alapértelmezett legjobb-k érték 2.
+- `SummaryIndex` - minden `Node`-ot elküld az indexben a LLM-nek válasz generálásához
+
+```typescript
+import { Document, VectorStoreIndex } from "llamaindex";
+
+const document = new Document({ text: "teszt" });
+
+const index = await VectorStoreIndex.fromDocuments([document]);
+```
+
+## API referencia
+
+- [SummaryIndex](../../api/classes/SummaryIndex.md)
+- [VectorStoreIndex](../../api/classes/VectorStoreIndex.md)
+
+"
diff --git a/apps/docs/i18n/hu/docusaurus-plugin-content-docs/current/modules/high_level/data_loader.md b/apps/docs/i18n/hu/docusaurus-plugin-content-docs/current/modules/high_level/data_loader.md
new file mode 100644
index 0000000000000000000000000000000000000000..120e7831db5dd4e2c4163a6986a0d7938d8495f0
--- /dev/null
+++ b/apps/docs/i18n/hu/docusaurus-plugin-content-docs/current/modules/high_level/data_loader.md
@@ -0,0 +1,21 @@
+---
+sidebar_position: 1
+---
+
+# Olvasó / Betöltő
+
+`Ezt a dokumentációt automatikusan fordították le, és tartalmazhat hibákat. Ne habozzon nyitni egy Pull Requestet a változtatások javasolására.`
+
+A LlamaIndex.TS egyszerűen lehetővé teszi a fájlok könnyű betöltését mappákból a `SimpleDirectoryReader` osztály segítségével. Jelenleg a `.txt`, `.pdf`, `.csv`, `.md` és `.docx` fájlok támogatottak, továbbiak tervezés alatt!
+
+```typescript
+import { SimpleDirectoryReader } from "llamaindex";
+
+documents = new SimpleDirectoryReader().loadData("./data");
+```
+
+## API Referencia
+
+- [SimpleDirectoryReader](../../api/classes/SimpleDirectoryReader.md)
+
+"
diff --git a/apps/docs/i18n/hu/docusaurus-plugin-content-docs/current/modules/high_level/documents_and_nodes.md b/apps/docs/i18n/hu/docusaurus-plugin-content-docs/current/modules/high_level/documents_and_nodes.md
new file mode 100644
index 0000000000000000000000000000000000000000..b7e1bc712749bd2ea8e000d64dd04e84c50209e9
--- /dev/null
+++ b/apps/docs/i18n/hu/docusaurus-plugin-content-docs/current/modules/high_level/documents_and_nodes.md
@@ -0,0 +1,22 @@
+---
+sidebar_position: 0
+---
+
+# Dokumentumok és Csomópontok
+
+`Ezt a dokumentációt automatikusan fordították le, és tartalmazhat hibákat. Ne habozzon nyitni egy Pull Requestet a változtatások javasolására.`
+
+A `Dokumentumok` és a `Csomópontok` az index alapvető építőelemei. Bár ezeknek az objektumoknak az API-ja hasonló, a `Dokumentum` objektumok teljes fájlokat képviselnek, míg a `Csomópontok` kisebb darabok az eredeti dokumentumból, amelyek alkalmasak egy LLM és Q&A számára.
+
+```typescript
+import { Document } from "llamaindex";
+
+document = new Document({ text: "szöveg", metadata: { kulcs: "érték" } });
+```
+
+## API Referencia
+
+- [Dokumentum](../../api/classes/Document.md)
+- [SzövegesCsomópont](../../api/classes/TextNode.md)
+
+"
diff --git a/apps/docs/i18n/hu/docusaurus-plugin-content-docs/current/modules/high_level/query_engine.md b/apps/docs/i18n/hu/docusaurus-plugin-content-docs/current/modules/high_level/query_engine.md
new file mode 100644
index 0000000000000000000000000000000000000000..261b9a84c1f2ee269e109a811cb1b80fd4e8569c
--- /dev/null
+++ b/apps/docs/i18n/hu/docusaurus-plugin-content-docs/current/modules/high_level/query_engine.md
@@ -0,0 +1,42 @@
+---
+sidebar_position: 3
+---
+
+# QueryEngine (Lekérdezési motor)
+
+`Ezt a dokumentációt automatikusan fordították le, és tartalmazhat hibákat. Ne habozzon nyitni egy Pull Requestet a változtatások javasolására.`
+
+A lekérdezési motor egy `Retriever` és egy `ResponseSynthesizer` objektumot csomagol be egy csővezetékbe, amely a lekérdezési karakterláncot használja a csomópontok lekérdezésére, majd elküldi azokat az LLM-nek a válasz generálásához.
+
+```typescript
+const queryEngine = index.asQueryEngine();
+const response = await queryEngine.query("lekérdezési karakterlánc");
+```
+
+## Alkérdés lekérdezési motor (Sub Question Query Engine)
+
+Az Alkérdés lekérdezési motor alapvető koncepciója az, hogy egyetlen lekérdezést több lekérdezésre bont, minden egyes lekérdezésre választ kap, majd ezeket a különböző válaszokat egyetlen koherens válaszként kombinálja a felhasználó számára. Gondolhat rá, mint a "gondolja végig lépésről lépésre" módszerre, amikor az adatforrásokon iterál!
+
+### Első lépések
+
+A legegyszerűbb módja annak, hogy elkezdje kipróbálni az Alkérdés lekérdezési motort, az [examples](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/subquestion.ts) mappában található subquestion.ts fájl futtatása.
+
+```bash
+npx ts-node subquestion.ts
+```
+
+"
+
+### Eszközök
+
+Az Alkérdés lekérdezési motor eszközökkel van implementálva. Az eszközök alapötlete az, hogy végrehajtható lehetőségek a nagy nyelvi modell számára. Ebben az esetben az Alkérdés lekérdezési motorunk a QueryEngineTool-ra támaszkodik, amely, ahogy már sejthetted, egy eszköz a lekérdezések futtatásához egy QueryEngine-en. Ez lehetővé teszi számunkra, hogy a modellnek lehetőséget adjunk arra, hogy különböző kérdésekre különböző dokumentumokat kérdezzen le például. Elképzelhető, hogy az Alkérdés lekérdezési motor használhat egy eszközt, amely a weben való keresésre vagy válaszok szerzésére használja a Wolfram Alpha-t.
+
+További információkat az eszközökről a LlamaIndex Python dokumentációjában találhatsz: https://gpt-index.readthedocs.io/en/latest/core_modules/agent_modules/tools/root.html
+
+## API referencia
+
+- [RetrieverQueryEngine (Lekérdező lekérdezési motor)](../../api/classes/RetrieverQueryEngine.md)
+- [SubQuestionQueryEngine (Alkérdés lekérdezési motor)](../../api/classes/SubQuestionQueryEngine.md)
+- [QueryEngineTool (Lekérdezési motor eszköz)](../../api/interfaces/QueryEngineTool.md)
+
+"
diff --git a/apps/docs/i18n/hu/docusaurus-plugin-content-docs/current/modules/index.md b/apps/docs/i18n/hu/docusaurus-plugin-content-docs/current/modules/index.md
new file mode 100644
index 0000000000000000000000000000000000000000..9fb7cbecd9d031090d9d3f0ab4072e13bb63d0c4
--- /dev/null
+++ b/apps/docs/i18n/hu/docusaurus-plugin-content-docs/current/modules/index.md
@@ -0,0 +1,33 @@
+# Alapmodulok
+
+`Ezt a dokumentációt automatikusan fordították le, és tartalmazhat hibákat. Ne habozzon nyitni egy Pull Requestet a változtatások javasolására.`
+
+A LlamaIndex.TS több alapmodult kínál, amelyeket magas szintű modulokra és alacsony szintű modulokra osztottunk fel, hogy gyorsan elkezdhessünk, és testreszabható kulcskomponenseket kaphassunk.
+
+## Magas szintű modulok
+
+- [**Dokumentum**](./high_level/documents_and_nodes.md): Egy dokumentum egy szöveges fájlt, PDF fájlt vagy más összefüggő adatot képvisel.
+
+- [**Csomópont**](./high_level/documents_and_nodes.md): Az alapvető adatépítő blokk. Általában ezek a dokumentum részei, amelyek kezelhető darabokra vannak felosztva, és elég kicsiek ahhoz, hogy be lehessen táplálni egy beágyazási modellbe és az LLM-be.
+
+- [**Olvasó/Betöltő**](./high_level/data_loader.md): Az olvasó vagy betöltő olyan elem, amely valós dokumentumot vesz át, és átalakítja egy dokumentum osztállyá, amelyet aztán használhatunk az Indexben és a lekérdezésekben. Jelenleg támogatjuk a sima szövegfájlokat és a PDF-eket, és sok más formátumot is tervezünk támogatni.
+
+- [**Indexek**](./high_level/data_index.md): Az indexek tárolják a csomópontokat és ezek csomópontok beágyazásait.
+
+- [**Lekérdezési motor**](./high_level/query_engine.md): A lekérdezési motorok generálják a lekérdezést, amit megadunk, és visszaadják az eredményt. A lekérdezési motorok általában egy előre elkészített promptot kombinálnak a kiválasztott csomópontokkal az Indexből, hogy az LLM-nek megfelelő kontextust adjanak a lekérdezés megválaszolásához.
+
+- [**Csevegőmotor**](./high_level/chat_engine.md): A csevegőmotor segít abban, hogy egy csevegőrobotot hozz létre, amely az Indexekkel interakcióba lép.
+
+## Alacsony szintű modul
+
+- [**LLM**](./low_level/llm.md): Az LLM osztály egy egységes felületet nyújt egy nagy nyelvi modell szolgáltatóhoz, mint például az OpenAI GPT-4, az Anthropic Claude vagy a Meta LLaMA. Az osztályt leszármaztathatja, hogy saját nagy nyelvi modelljéhez csatlakoztatót írjon.
+
+- [**Embedding**](./low_level/embedding.md): Az embedding egy lebegőpontos számok vektoraként van reprezentálva. Az OpenAI text-embedding-ada-002 az alapértelmezett embedding modellünk, és minden általa generált embedding 1536 lebegőpontos számból áll. Egy másik népszerű embedding modell a BERT, amely 768 lebegőpontos számot használ minden csomópont reprezentálásához. Több olyan segédprogramot biztosítunk, amelyek az embeddingekkel való munkához szükségesek, beleértve 3 hasonlósági számítási lehetőséget és Maximum Marginal Relevance-t.
+
+- [**TextSplitter/NodeParser**](./low_level/node_parser.md): A szöveg felosztási stratégiák rendkívül fontosak az embedding keresés általános hatékonyságához. Jelenleg, bár van egy alapértelmezett stratégia, nincs egyetlen megoldás, amely minden esetben megfelelő lenne. A forrásdokumentumoktól függően különböző felosztási méreteket és stratégiákat szeretnél használni. Jelenleg támogatjuk a fix méretű felosztást, a fix méretű felosztást átfedő szakaszokkal, a mondatokra való felosztást és az bekezdésekre való felosztást. A szöveg felosztó eszközt a NodeParser használja, amikor a `Document`-eket `Node`-okra osztja.
+
+- [**Retriever**](./low_level/retriever.md): A Retriever az, ami ténylegesen kiválasztja a Node-okat az indexből való visszakereséshez. Itt több vagy kevesebb Node-ot is visszakereshetsz lekérdezésenként, megváltoztathatod a hasonlósági függvényt, vagy létrehozhatsz saját retrievert az alkalmazásodban minden egyes egyedi felhasználási esethez. Például külön retrievert hozhatsz létre a kód tartalomhoz és a szöveges tartalomhoz.
+
+- [**ResponseSynthesizer**](./low_level/response_synthesizer.md): A ResponseSynthesizer felelős egy lekérdezési karakterlánc feldolgozásáért, és egy `Node` lista felhasználásával választ generál. Ez sokféle formában történhet, például az összes kontextuson való iterálással és egy válasz finomításával, vagy összefoglalók faépítésével és a gyökér összefoglaló visszaadásával.
+
+- [**Storage**](./low_level/storage.md): Eljön az a pont, amikor az indexeket, az adatokat és a vektorokat tárolni szeretnéd, ahelyett, hogy minden alkalommal újra futtatnád az embedding modelleket. Az IndexStore, DocStore, VectorStore és KVStore absztrakciók lehetővé teszik ezt. Együttesen alkotják a StorageContext-ot. Jelenleg lehetővé tesszük az embeddingek fájlokban történő tárolását a fájlrendszerben (vagy egy virtuális memóriafájlrendszerben), de aktívan hozzáadunk integrációkat a Vektor Adatbázisokhoz is.
diff --git a/apps/docs/i18n/hu/docusaurus-plugin-content-docs/current/modules/low_level/embedding.md b/apps/docs/i18n/hu/docusaurus-plugin-content-docs/current/modules/low_level/embedding.md
new file mode 100644
index 0000000000000000000000000000000000000000..d18710b3ecaf78295608b67b8209d6fca8b139b7
--- /dev/null
+++ b/apps/docs/i18n/hu/docusaurus-plugin-content-docs/current/modules/low_level/embedding.md
@@ -0,0 +1,26 @@
+---
+sidebar_position: 1
+---
+
+# Beágyazás
+
+`Ezt a dokumentációt automatikusan fordították le, és tartalmazhat hibákat. Ne habozzon nyitni egy Pull Requestet a változtatások javasolására.`
+
+A beágyazási modell a LlamaIndexben felelős a szöveg numerikus reprezentációinak létrehozásáért. Alapértelmezetten a LlamaIndex a `text-embedding-ada-002` modellt használja az OpenAI-tól.
+
+Ezt explicit módon beállíthatjuk a `ServiceContext` objektumban.
+
+```typescript
+import { OpenAIEmbedding, serviceContextFromDefaults } from "llamaindex";
+
+const openaiEmbeds = new OpenAIEmbedding();
+
+const serviceContext = serviceContextFromDefaults({ embedModel: openaiEmbeds });
+```
+
+## API Referencia
+
+- [OpenAIEmbedding](../../api/classes/OpenAIEmbedding.md)
+- [ServiceContext](../../api/interfaces/ServiceContext.md)
+
+"
diff --git a/apps/docs/i18n/hu/docusaurus-plugin-content-docs/current/modules/low_level/llm.md b/apps/docs/i18n/hu/docusaurus-plugin-content-docs/current/modules/low_level/llm.md
new file mode 100644
index 0000000000000000000000000000000000000000..ffc74ad4f14035fa0682683554de4b3de532ed69
--- /dev/null
+++ b/apps/docs/i18n/hu/docusaurus-plugin-content-docs/current/modules/low_level/llm.md
@@ -0,0 +1,26 @@
+---
+sidebar_position: 0
+---
+
+# LLM
+
+`Ezt a dokumentációt automatikusan fordították le, és tartalmazhat hibákat. Ne habozzon nyitni egy Pull Requestet a változtatások javasolására.`
+
+Az LLM felelős a szöveg olvasásáért és természetes nyelvű válaszok generálásáért a lekérdezésekre. Alapértelmezetten a LlamaIndex.TS a `gpt-3.5-turbo`-t használja.
+
+Az LLM explicit módon beállítható a `ServiceContext` objektumban.
+
+```typescript
+import { OpenAI, serviceContextFromDefaults } from "llamaindex";
+
+const openaiLLM = new OpenAI({ model: "gpt-3.5-turbo", temperature: 0 });
+
+const serviceContext = serviceContextFromDefaults({ llm: openaiLLM });
+```
+
+## API Referencia
+
+- [OpenAI](../../api/classes/OpenAI.md)
+- [ServiceContext](../../api/interfaces/ServiceContext.md)
+
+"
diff --git a/apps/docs/i18n/hu/docusaurus-plugin-content-docs/current/modules/low_level/node_parser.md b/apps/docs/i18n/hu/docusaurus-plugin-content-docs/current/modules/low_level/node_parser.md
new file mode 100644
index 0000000000000000000000000000000000000000..8b22e30b4cf367bff980a28c0b4d4fbe287079ae
--- /dev/null
+++ b/apps/docs/i18n/hu/docusaurus-plugin-content-docs/current/modules/low_level/node_parser.md
@@ -0,0 +1,39 @@
+---
+sidebar_position: 3
+---
+
+# NodeParser
+
+`Ezt a dokumentációt automatikusan fordították le, és tartalmazhat hibákat. Ne habozzon nyitni egy Pull Requestet a változtatások javasolására.`
+
+A `NodeParser` a LlamaIndex-ben felelős a `Document` objektumok felosztásáért kezelhetőbb `Node` objektumokra. Amikor a `.fromDocuments()` metódust hívod, a `ServiceContext`-ben található `NodeParser` automatikusan elvégzi ezt neked. Ezenkívül használhatod dokumentumok előzetes felosztására is.
+
+```typescript
+import { Document, SimpleNodeParser } from "llamaindex";
+
+const nodeParser = new SimpleNodeParser();
+const nodes = nodeParser.getNodesFromDocuments([
+  new Document({ text: "10 éves vagyok. John 20 éves." }),
+]);
+```
+
+## TextSplitter
+
+Az alapvető szöveg felosztó mondatokra bontja a szöveget. Ezt önálló modulként is használhatod nyers szöveg felosztására.
+
+```typescript
+import { SentenceSplitter } from "llamaindex";
+
+const splitter = new SentenceSplitter({ chunkSize: 1 });
+
+const textSplits = splitter.splitText("Helló Világ");
+```
+
+"
+
+## API Referencia
+
+- [SimpleNodeParser](../../api/classes/SimpleNodeParser.md)
+- [SentenceSplitter](../../api/classes/SentenceSplitter.md)
+
+"
diff --git a/apps/docs/i18n/hu/docusaurus-plugin-content-docs/current/modules/low_level/response_synthesizer.md b/apps/docs/i18n/hu/docusaurus-plugin-content-docs/current/modules/low_level/response_synthesizer.md
new file mode 100644
index 0000000000000000000000000000000000000000..17550763bf43cf7b4c71506f537a2e796d7ad232
--- /dev/null
+++ b/apps/docs/i18n/hu/docusaurus-plugin-content-docs/current/modules/low_level/response_synthesizer.md
@@ -0,0 +1,47 @@
+---
+sidebar_position: 6
+---
+
+# ResponseSynthesizer (Válaszszintetizátor)
+
+`Ezt a dokumentációt automatikusan fordították le, és tartalmazhat hibákat. Ne habozzon nyitni egy Pull Requestet a változtatások javasolására.`
+
+A ResponseSynthesizer felelős a lekérdezés, a csomópontok és a sablonok elküldéséért az LLM-nek a válasz generálásához. Néhány kulcsfontosságú módja van a válasz generálásának:
+
+- `Finomítás`: "létrehoz és finomít" egy választ a lekérdezésben található szövegrészletek sorrendben történő átvizsgálásával. Ez minden csomópontra külön LLM hívást tesz. Jó részletesebb válaszokhoz.
+- `Kompakt és finomít` (alapértelmezett): "kompakt" a sablon minden LLM hívás során, úgy hogy minél több szövegrészletet helyez be a maximális sablonméretbe. Ha túl sok részlet van ahhoz, hogy egy sablonba beleférjen, "létrehoz és finomít" egy választ több kompakt sablon átvizsgálásával. Ugyanaz, mint a `finomítás`, de kevesebb LLM hívást eredményez.
+- `Fa összefoglalás`: Adott szövegrészletek és lekérdezés esetén rekurzívan felépít egy fát és a gyökércsomópontot adja vissza válaszként. Jó összefoglalás céljából.
+- `Egyszerű válaszépítő`: Adott szövegrészletek és lekérdezés esetén alkalmazza a lekérdezést minden szövegrészletre, miközben a válaszokat egy tömbbe gyűjti. Egyesíti az összes válasz sztringjét. Jó, ha külön-külön szeretnéd futtatni a lekérdezést minden szövegrészletre.
+
+```typescript
+import { NodeWithScore, ResponseSynthesizer, TextNode } from "llamaindex";
+
+const responseSynthesizer = new ResponseSynthesizer();
+
+const nodesWithScore: NodeWithScore[] = [
+  {
+    node: new TextNode({ text: "10 éves vagyok." }),
+    score: 1,
+  },
+  {
+    node: new TextNode({ text: "John 20 éves." }),
+    score: 0.5,
+  },
+];
+
+const response = await responseSynthesizer.synthesize(
+  "Hány éves vagyok?",
+  nodesWithScore,
+);
+console.log(response.response);
+```
+
+## API Referencia
+
+- [ResponseSynthesizer (Válaszszintetizátor)](../../api/classes/ResponseSynthesizer.md)
+- [Finomítás](../../api/classes/Refine.md)
+- [Kompakt és finomít](../../api/classes/CompactAndRefine.md)
+- [Fa összefoglalás](../../api/classes/TreeSummarize.md)
+- [Egyszerű válaszépítő](../../api/classes/SimpleResponseBuilder.md)
+
+"
diff --git a/apps/docs/i18n/hu/docusaurus-plugin-content-docs/current/modules/low_level/retriever.md b/apps/docs/i18n/hu/docusaurus-plugin-content-docs/current/modules/low_level/retriever.md
new file mode 100644
index 0000000000000000000000000000000000000000..69b623aa7135760d23b213181b3344d62f14296d
--- /dev/null
+++ b/apps/docs/i18n/hu/docusaurus-plugin-content-docs/current/modules/low_level/retriever.md
@@ -0,0 +1,25 @@
+---
+sidebar_position: 5
+---
+
+# Retriever (Visszakereső)
+
+`Ezt a dokumentációt automatikusan fordították le, és tartalmazhat hibákat. Ne habozzon nyitni egy Pull Requestet a változtatások javasolására.`
+
+A visszakereső (retriever) a LlamaIndex-ben azt használjuk, hogy lekérje a `Node`-okat egy indexből egy lekérdezési karakterlánc segítségével. Egy `VectorIndexRetriever` a legjobb-k legösszetettebb node-okat fogja lekérni. Eközben egy `SummaryIndexRetriever` minden node-ot le fog kérni, függetlenül a lekérdezéstől.
+
+```typescript
+const retriever = vector_index.asRetriever();
+retriever.similarityTopK = 3;
+
+// Node-ok lekérése!
+const nodesWithScore = await retriever.retrieve("lekérdezési karakterlánc");
+```
+
+## API Referencia
+
+- [SummaryIndexRetriever](../../api/classes/SummaryIndexRetriever.md)
+- [SummaryIndexLLMRetriever](../../api/classes/SummaryIndexLLMRetriever.md)
+- [VectorIndexRetriever](../../api/classes/VectorIndexRetriever.md)
+
+"
diff --git a/apps/docs/i18n/hu/docusaurus-plugin-content-docs/current/modules/low_level/storage.md b/apps/docs/i18n/hu/docusaurus-plugin-content-docs/current/modules/low_level/storage.md
new file mode 100644
index 0000000000000000000000000000000000000000..b4a913dae875d9b4fa5ef3ab45e7b438f951ca86
--- /dev/null
+++ b/apps/docs/i18n/hu/docusaurus-plugin-content-docs/current/modules/low_level/storage.md
@@ -0,0 +1,30 @@
+---
+sidebar_position: 7
+---
+
+# Tárolás
+
+`Ezt a dokumentációt automatikusan fordították le, és tartalmazhat hibákat. Ne habozzon nyitni egy Pull Requestet a változtatások javasolására.`
+
+A tárolás a LlamaIndex.TS-ben automatikusan működik, miután konfiguráltál egy `StorageContext` objektumot. Csak állítsd be a `persistDir`-t és csatold azt egy indexhez.
+
+Jelenleg csak a lemezre történő mentés és betöltés támogatott, a jövőbeni integrációk tervezés alatt állnak!
+
+```typescript
+import { Document, VectorStoreIndex, storageContextFromDefaults } from "./src";
+
+const storageContext = await storageContextFromDefaults({
+  persistDir: "./storage",
+});
+
+const document = new Document({ text: "Teszt szöveg" });
+const index = await VectorStoreIndex.fromDocuments([document], {
+  storageContext,
+});
+```
+
+## API Referencia
+
+- [StorageContext](../../api/interfaces/StorageContext.md)
+
+"
diff --git a/apps/docs/i18n/hu/docusaurus-plugin-content-docs/current/starter.md b/apps/docs/i18n/hu/docusaurus-plugin-content-docs/current/starter.md
new file mode 100644
index 0000000000000000000000000000000000000000..926874d04cdf708dfc810fea9c2f438fdeea0fa9
--- /dev/null
+++ b/apps/docs/i18n/hu/docusaurus-plugin-content-docs/current/starter.md
@@ -0,0 +1,58 @@
+---
+sidebar_position: 2
+---
+
+# Kezdő útmutató
+
+`Ezt a dokumentációt automatikusan fordították le, és tartalmazhat hibákat. Ne habozzon nyitni egy Pull Requestet a változtatások javasolására.`
+
+Miután [telepítette a LlamaIndex.TS-t az NPM segítségével](installation) és beállította az OpenAI kulcsát, már készen áll az első alkalmazásának elindítására:
+
+Egy új mappában:
+
+```bash npm2yarn
+npm install typescript
+npm install @types/node
+npx tsc --init # ha szükséges
+```
+
+Hozzon létre egy `example.ts` nevű fájlt. Ez a kód betölt néhány példaadatot, létrehoz egy dokumentumot, indexeli (amely az OpenAI-t használva beágyazásokat hoz létre), majd létrehoz egy lekérdezési motort adatainkkal kapcsolatos kérdések megválaszolásához.
+
+```ts
+// example.ts
+import fs from "fs/promises";
+import { Document, VectorStoreIndex } from "llamaindex";
+
+async function main() {
+  // Esszé betöltése az abramov.txt fájlból Node-ban
+  const essay = await fs.readFile(
+    "node_modules/llamaindex/examples/abramov.txt",
+    "utf-8",
+  );
+
+  // Dokumentum objektum létrehozása az esszével
+  const document = new Document({ text: essay });
+
+  // Szöveg felosztása és beágyazások létrehozása. Tárolás egy VectorStoreIndex-ben
+  const index = await VectorStoreIndex.fromDocuments([document]);
+
+  // Lekérdezés az indexben
+  const queryEngine = index.asQueryEngine();
+  const response = await queryEngine.query("Mit csinált az író az egyetemen?");
+
+  // Válasz kimenete
+  console.log(response.toString());
+}
+
+main();
+```
+
+Ezután futtathatja a következő paranccsal:
+
+```bash
+npx ts-node example.ts
+```
+
+Készen áll a további tanulásra? Nézze meg a NextJS játszótérünket a https://llama-playground.vercel.app/ oldalon. A forráskód elérhető a https://github.com/run-llama/ts-playground címen.
+
+"
diff --git a/apps/docs/i18n/in/docusaurus-plugin-content-docs/current/_static/concepts/indexing.jpg b/apps/docs/i18n/in/docusaurus-plugin-content-docs/current/_static/concepts/indexing.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..8672967213caf28fb27682b6b9e2e5111beb9aaa
Binary files /dev/null and b/apps/docs/i18n/in/docusaurus-plugin-content-docs/current/_static/concepts/indexing.jpg differ
diff --git a/apps/docs/i18n/in/docusaurus-plugin-content-docs/current/_static/concepts/querying.jpg b/apps/docs/i18n/in/docusaurus-plugin-content-docs/current/_static/concepts/querying.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..3c241bdda5ba98d3f3ee2e163b9f19280c5c7503
Binary files /dev/null and b/apps/docs/i18n/in/docusaurus-plugin-content-docs/current/_static/concepts/querying.jpg differ
diff --git a/apps/docs/i18n/in/docusaurus-plugin-content-docs/current/_static/concepts/rag.jpg b/apps/docs/i18n/in/docusaurus-plugin-content-docs/current/_static/concepts/rag.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..b68eca2564f8535b33d9ed6d4ce4d4410d2a0698
Binary files /dev/null and b/apps/docs/i18n/in/docusaurus-plugin-content-docs/current/_static/concepts/rag.jpg differ
diff --git a/apps/docs/i18n/in/docusaurus-plugin-content-docs/current/api b/apps/docs/i18n/in/docusaurus-plugin-content-docs/current/api
new file mode 120000
index 0000000000000000000000000000000000000000..3513e32979f4abb8b2df2b54c8d0cc480bdd847e
--- /dev/null
+++ b/apps/docs/i18n/in/docusaurus-plugin-content-docs/current/api
@@ -0,0 +1 @@
+../../../../docs/api
\ No newline at end of file
diff --git a/apps/docs/i18n/in/docusaurus-plugin-content-docs/current/concepts.md b/apps/docs/i18n/in/docusaurus-plugin-content-docs/current/concepts.md
new file mode 100644
index 0000000000000000000000000000000000000000..feeca75b5987bf85034aa1f1cecad23daddb70e9
--- /dev/null
+++ b/apps/docs/i18n/in/docusaurus-plugin-content-docs/current/concepts.md
@@ -0,0 +1,86 @@
+---
+sidebar_position: 3
+---
+
+# Konsep Tingkat Tinggi
+
+`Dokumentasi ini telah diterjemahkan secara otomatis dan mungkin mengandung kesalahan. Jangan ragu untuk membuka Pull Request untuk mengusulkan perubahan.`
+
+LlamaIndex.TS membantu Anda membangun aplikasi yang didukung oleh LLM (misalnya Q&A, chatbot) dengan menggunakan data kustom.
+
+Dalam panduan konsep tingkat tinggi ini, Anda akan belajar:
+
+- bagaimana LLM dapat menjawab pertanyaan menggunakan data Anda sendiri.
+- konsep-konsep kunci dan modul dalam LlamaIndex.TS untuk menyusun pipeline query Anda sendiri.
+
+## Menjawab Pertanyaan di Seluruh Data Anda
+
+LlamaIndex menggunakan metode dua tahap saat menggunakan LLM dengan data Anda:
+
+1. **tahap indexing**: mempersiapkan basis pengetahuan, dan
+2. **tahap querying**: mengambil konteks relevan dari pengetahuan untuk membantu LLM dalam merespons pertanyaan
+
+![](./_static/concepts/rag.jpg)
+
+Proses ini juga dikenal sebagai Retrieval Augmented Generation (RAG).
+
+LlamaIndex.TS menyediakan toolkit penting untuk membuat kedua tahap ini menjadi sangat mudah.
+
+Mari kita jelajahi setiap tahap secara detail.
+
+### Tahap Pengindeksan
+
+LlamaIndex.TS membantu Anda mempersiapkan basis pengetahuan dengan rangkaian konektor data dan indeks.
+
+![](./_static/concepts/indexing.jpg)
+
+[**Data Loader**](./modules/high_level/data_loader.md):
+Sebuah konektor data (yaitu `Reader`) mengambil data dari berbagai sumber data dan format data ke dalam representasi `Document` yang sederhana (teks dan metadata sederhana).
+
+[**Dokumen / Node**](./modules/high_level/documents_and_nodes.md): Sebuah `Document` adalah wadah generik untuk setiap sumber data - misalnya, PDF, keluaran API, atau data yang diambil dari database. Sebuah `Node` adalah unit atomik data dalam LlamaIndex dan mewakili "chunk" dari `Document` sumber. Ini adalah representasi kaya yang mencakup metadata dan hubungan (ke node lain) untuk memungkinkan operasi pengambilan yang akurat dan ekspresif.
+
+[**Indeks Data**](./modules/high_level/data_index.md):
+Setelah Anda mengambil data Anda, LlamaIndex membantu Anda mengindeks data ke dalam format yang mudah diambil.
+
+Di balik layar, LlamaIndex memparsing dokumen mentah menjadi representasi intermediate, menghitung vektor embedding, dan menyimpan data Anda di memori atau ke disk.
+
+"
+
+### Tahap Querying
+
+Pada tahap querying, pipeline query mengambil konteks yang paling relevan berdasarkan pertanyaan pengguna,
+dan meneruskannya ke LLM (bersama dengan pertanyaan) untuk mensintesis respons.
+
+Ini memberikan LLM pengetahuan terkini yang tidak ada dalam data pelatihan aslinya,
+(juga mengurangi halusinasi).
+
+Tantangan utama pada tahap querying adalah pengambilan, orkestrasi, dan penalaran atas basis pengetahuan (mungkin banyak).
+
+LlamaIndex menyediakan modul-modul yang dapat disusun yang membantu Anda membangun dan mengintegrasikan pipeline RAG untuk Q&A (query engine), chatbot (chat engine), atau sebagai bagian dari agen.
+
+Blok-blok bangunan ini dapat disesuaikan untuk mencerminkan preferensi peringkat, serta disusun untuk melakukan penalaran atas beberapa basis pengetahuan secara terstruktur.
+
+![](./_static/concepts/querying.jpg)
+
+#### Blok Bangunan
+
+[**Retrievers**](./modules/low_level/retriever.md):
+Sebuah retriever mendefinisikan bagaimana mengambil konteks yang relevan secara efisien dari basis pengetahuan (yaitu indeks) ketika diberikan sebuah query.
+Logika pengambilan spesifik berbeda untuk setiap indeks, yang paling populer adalah pengambilan padat terhadap indeks vektor.
+
+[**Response Synthesizers**](./modules/low_level/response_synthesizer.md):
+Sebuah response synthesizer menghasilkan respons dari LLM, menggunakan query pengguna dan kumpulan teks yang diambil.
+
+"
+
+#### Pipeline
+
+[**Query Engines**](./modules/high_level/query_engine.md):
+Query engine adalah pipeline end-to-end yang memungkinkan Anda untuk mengajukan pertanyaan tentang data Anda.
+Ia menerima pertanyaan dalam bahasa alami, dan mengembalikan respons, bersama dengan konteks referensi yang diambil dan diteruskan ke LLM.
+
+[**Chat Engines**](./modules/high_level/chat_engine.md):
+Chat engine adalah pipeline end-to-end untuk melakukan percakapan dengan data Anda
+(bukan hanya satu pertanyaan dan jawaban, tetapi berulang kali).
+
+"
diff --git a/apps/docs/i18n/in/docusaurus-plugin-content-docs/current/end_to_end.md b/apps/docs/i18n/in/docusaurus-plugin-content-docs/current/end_to_end.md
new file mode 100644
index 0000000000000000000000000000000000000000..511e8dd76ccfcf86654d479648b826a0423695ea
--- /dev/null
+++ b/apps/docs/i18n/in/docusaurus-plugin-content-docs/current/end_to_end.md
@@ -0,0 +1,57 @@
+---
+sidebar_position: 4
+---
+
+# Contoh End-to-End
+
+`Dokumentasi ini telah diterjemahkan secara otomatis dan mungkin mengandung kesalahan. Jangan ragu untuk membuka Pull Request untuk mengusulkan perubahan.`
+
+Kami menyertakan beberapa contoh end-to-end menggunakan LlamaIndex.TS di repositori ini.
+
+Lihat contoh-contoh di bawah ini atau coba dan lengkapi dalam beberapa menit dengan tutorial interaktif Github Codespace yang disediakan oleh Dev-Docs [di sini](https://codespaces.new/team-dev-docs/lits-dev-docs-playground?devcontainer_path=.devcontainer%2Fjavascript_ltsquickstart%2Fdevcontainer.json):
+
+## [Chat Engine](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/chatEngine.ts)
+
+Baca file dan berdiskusi tentangnya dengan LLM.
+
+## [Indeks Vektor](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/vectorIndex.ts)
+
+Buat indeks vektor dan lakukan kueri. Indeks vektor akan menggunakan embedding untuk mengambil node-node yang paling relevan sebanyak k teratas. Secara default, k teratas adalah 2.
+
+## [Indeks Ringkasan](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/summaryIndex.ts)
+
+Buat indeks daftar dan cari di dalamnya. Contoh ini juga menggunakan `LLMRetriever`, yang akan menggunakan LLM untuk memilih node terbaik yang akan digunakan saat menghasilkan jawaban.
+
+## [Simpan / Muat Indeks](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/storageContext.ts)
+
+Buat dan muat indeks vektor. Penyimpanan ke disk dalam LlamaIndex.TS terjadi secara otomatis setelah objek konteks penyimpanan dibuat.
+
+"
+
+## [Indeks Vektor yang Dikustomisasi](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/vectorIndexCustomize.ts)
+
+Buat indeks vektor dan lakukan kueri, sambil mengonfigurasi `LLM`, `ServiceContext`, dan `similarity_top_k`.
+
+## [OpenAI LLM](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/openai.ts)
+
+Buat OpenAI LLM dan langsung gunakan untuk chat.
+
+"
+
+## [Llama2 DeuceLLM](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/llamadeuce.ts)
+
+Membuat Llama-2 LLM dan langsung menggunakannya untuk chat.
+
+"
+
+## [SubQuestionQueryEngine](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/subquestion.ts)
+
+Menggunakan `SubQuestionQueryEngine`, yang memecah kueri kompleks menjadi beberapa pertanyaan, dan kemudian menggabungkan respons dari semua sub-pertanyaan.
+
+"
+
+## [Modul Tingkat Rendah](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/lowlevel.ts)
+
+Contoh ini menggunakan beberapa komponen tingkat rendah, yang menghilangkan kebutuhan akan mesin kueri yang sebenarnya. Komponen-komponen ini dapat digunakan di mana saja, dalam aplikasi apa pun, atau disesuaikan dan disubkelasikan untuk memenuhi kebutuhan Anda sendiri.
+
+"
diff --git a/apps/docs/i18n/in/docusaurus-plugin-content-docs/current/environments.md b/apps/docs/i18n/in/docusaurus-plugin-content-docs/current/environments.md
new file mode 100644
index 0000000000000000000000000000000000000000..9fc24a07fb124dd2c32e854c21b03e2da1bc5f8f
--- /dev/null
+++ b/apps/docs/i18n/in/docusaurus-plugin-content-docs/current/environments.md
@@ -0,0 +1,31 @@
+---
+sidebar_position: 5
+---
+
+# Lingkungan
+
+`Dokumentasi ini telah diterjemahkan secara otomatis dan mungkin mengandung kesalahan. Jangan ragu untuk membuka Pull Request untuk mengusulkan perubahan.`
+
+LlamaIndex saat ini secara resmi mendukung NodeJS 18 dan NodeJS 20.
+
+## Router Aplikasi NextJS
+
+Jika Anda menggunakan penangan rute/handler serverless NextJS App Router, Anda perlu menggunakan mode NodeJS:
+
+```js
+export const runtime = "nodejs"; // default
+```
+
+dan Anda perlu menambahkan pengecualian untuk pdf-parse di next.config.js Anda
+
+```js
+// next.config.js
+/** @type {import('next').NextConfig} */
+const nextConfig = {
+  experimental: {
+    serverComponentsExternalPackages: ["pdf-parse"], // Menempatkan pdf-parse dalam mode NodeJS sebenarnya dengan NextJS App Router
+  },
+};
+
+module.exports = nextConfig;
+```
diff --git a/apps/docs/i18n/in/docusaurus-plugin-content-docs/current/installation.mdx b/apps/docs/i18n/in/docusaurus-plugin-content-docs/current/installation.mdx
new file mode 100644
index 0000000000000000000000000000000000000000..730b6bc6cdf48d9cbe9ed7801127041d791a6fc5
--- /dev/null
+++ b/apps/docs/i18n/in/docusaurus-plugin-content-docs/current/installation.mdx
@@ -0,0 +1,68 @@
+---
+sidebar_position: 1
+---
+
+
+# Instalasi dan Pengaturan
+
+```Dokumentasi ini telah diterjemahkan secara otomatis dan mungkin mengandung kesalahan. Jangan ragu untuk membuka Pull Request untuk mengusulkan perubahan.```
+
+
+Pastikan Anda memiliki NodeJS v18 atau yang lebih tinggi.
+
+
+## Menggunakan create-llama
+
+Cara termudah untuk memulai dengan LlamaIndex adalah dengan menggunakan `create-llama`. Alat CLI ini memungkinkan Anda untuk dengan cepat memulai membangun aplikasi LlamaIndex baru, dengan semua pengaturan yang sudah siap untuk Anda.
+
+Cukup jalankan
+
+<Tabs>
+<TabItem value="1" label="npm" default>
+
+```bash
+npx create-llama@latest
+```
+
+</TabItem>
+<TabItem value="2" label="Yarn">
+
+```bash
+yarn create llama
+```
+
+</TabItem>
+<TabItem value="3" label="pnpm">
+
+```bash
+pnpm create llama@latest
+```
+
+</TabItem>
+</Tabs>
+
+untuk memulai. Setelah aplikasi Anda dibuat, jalankan
+
+```bash npm2yarn
+npm run dev
+```
+
+untuk memulai server pengembangan. Anda dapat mengunjungi [http://localhost:3000](http://localhost:3000) untuk melihat aplikasi Anda.
+## Instalasi dari NPM
+
+```bash npm2yarn
+npm install llamaindex
+```
+
+
+### Variabel Lingkungan
+
+Contoh-contoh kami menggunakan OpenAI secara default. Anda perlu mengatur kunci Open AI Anda seperti ini:
+
+```bash
+export OPENAI_API_KEY="sk-......" # Ganti dengan kunci Anda dari https://platform.openai.com/account/api-keys
+```
+
+Jika Anda ingin memuatnya secara otomatis setiap kali, tambahkan ke .zshrc/.bashrc Anda.
+
+PERINGATAN: jangan memasukkan kunci OpenAI Anda ke dalam kontrol versi.
diff --git a/apps/docs/i18n/in/docusaurus-plugin-content-docs/current/introduction.md b/apps/docs/i18n/in/docusaurus-plugin-content-docs/current/introduction.md
new file mode 100644
index 0000000000000000000000000000000000000000..ff3737764b13dd88a6a14ff463f98412dc42de52
--- /dev/null
+++ b/apps/docs/i18n/in/docusaurus-plugin-content-docs/current/introduction.md
@@ -0,0 +1,60 @@
+---
+sidebar_position: 0
+slug: /
+---
+
+# Apa itu LlamaIndex.TS?
+
+`Dokumentasi ini telah diterjemahkan secara otomatis dan mungkin mengandung kesalahan. Jangan ragu untuk membuka Pull Request untuk mengusulkan perubahan.`
+
+LlamaIndex.TS adalah kerangka data untuk aplikasi LLM untuk mengambil, membangun struktur, dan mengakses data pribadi atau khusus domain. Meskipun paket python juga tersedia (lihat [di sini](https://docs.llamaindex.ai/en/stable/)), LlamaIndex.TS menawarkan fitur inti dalam paket yang sederhana, dioptimalkan untuk penggunaan dengan TypeScript.
+
+## 🚀 Mengapa LlamaIndex.TS?
+
+Pada intinya, LLM menawarkan antarmuka bahasa alami antara manusia dan data yang disimpulkan. Model yang tersedia secara luas telah dilatih sebelumnya dengan jumlah data yang sangat besar yang tersedia secara publik, mulai dari Wikipedia dan milis hingga buku teks dan kode sumber.
+
+Aplikasi yang dibangun di atas LLM sering membutuhkan penambahan data pribadi atau khusus domain ke dalam model-model ini. Sayangnya, data tersebut dapat tersebar di berbagai aplikasi dan penyimpanan data yang terisolasi. Data tersebut mungkin berada di balik API, dalam database SQL, atau terperangkap dalam file PDF dan slide presentasi.
+
+Di sinilah peran **LlamaIndex.TS** menjadi penting.
+
+## 🦙 Bagaimana LlamaIndex.TS dapat membantu?
+
+LlamaIndex.TS menyediakan alat-alat berikut:
+
+- **Pemuatan data** mengimpor data `.txt`, `.pdf`, `.csv`, `.md`, dan `.docx` yang sudah ada secara langsung.
+- **Indeks data** membangun struktur data dalam representasi perantara yang mudah dan performa untuk dikonsumsi oleh LLM.
+- **Mesin** menyediakan akses bahasa alami ke data Anda. Misalnya:
+  - Mesin kueri adalah antarmuka pengambilan yang kuat untuk output yang diperkaya pengetahuan.
+  - Mesin obrolan adalah antarmuka percakapan untuk interaksi "bolak-balik" dengan data Anda.
+
+## 👨‍👩‍👧‍👦 Untuk siapa LlamaIndex ditujukan?
+
+LlamaIndex.TS menyediakan seperangkat alat inti yang penting bagi siapa pun yang membangun aplikasi LLM dengan JavaScript dan TypeScript.
+
+API tingkat tinggi kami memungkinkan pengguna pemula menggunakan LlamaIndex.TS untuk mengimpor dan mengambil data mereka.
+
+Untuk aplikasi yang lebih kompleks, API tingkat lebih rendah kami memungkinkan pengguna yang lebih mahir untuk menyesuaikan dan memperluas modul apa pun - penghubung data, indeks, pengambil, dan mesin kueri, sesuai dengan kebutuhan mereka.
+
+## Memulai
+
+`npm install llamaindex`
+
+Dokumentasi kami mencakup [Instruksi Instalasi](./installation.md) dan [Tutorial Awal](./starter.md) untuk membangun aplikasi pertama Anda.
+
+Setelah Anda mulai, [Konsep Tingkat Tinggi](./concepts.md) memberikan gambaran tentang arsitektur modular LlamaIndex. Untuk contoh praktis yang lebih mendalam, lihat [Tutorial End-to-End](./end_to_end.md).
+
+## 🗺️ Ekosistem
+
+Untuk mengunduh atau berkontribusi, temukan LlamaIndex di:
+
+- Github: https://github.com/run-llama/LlamaIndexTS
+- NPM: https://www.npmjs.com/package/llamaindex
+
+"
+
+## Komunitas
+
+Butuh bantuan? Punya saran fitur? Bergabunglah dengan komunitas LlamaIndex:
+
+- Twitter: https://twitter.com/llama_index
+- Discord https://discord.gg/dGcwcsnxhU
diff --git a/apps/docs/i18n/in/docusaurus-plugin-content-docs/current/modules/high_level/chat_engine.md b/apps/docs/i18n/in/docusaurus-plugin-content-docs/current/modules/high_level/chat_engine.md
new file mode 100644
index 0000000000000000000000000000000000000000..6607a9866e8ee7ec759192d1a2ac71d289ecd414
--- /dev/null
+++ b/apps/docs/i18n/in/docusaurus-plugin-content-docs/current/modules/high_level/chat_engine.md
@@ -0,0 +1,22 @@
+---
+sidebar_position: 4
+---
+
+# ChatEngine (聊天引擎)
+
+`Dokumentasi ini telah diterjemahkan secara otomatis dan mungkin mengandung kesalahan. Jangan ragu untuk membuka Pull Request untuk mengusulkan perubahan.`
+
+ChatEngine (聊天引擎) adalah cara cepat dan sederhana untuk melakukan obrolan dengan data di indeks Anda.
+
+```typescript
+const retriever = index.asRetriever();
+const chatEngine = new ContextChatEngine({ retriever });
+
+// mulai melakukan obrolan
+const response = await chatEngine.chat(query);
+```
+
+## Referensi API
+
+- [ContextChatEngine (聊天引擎)](../../api/classes/ContextChatEngine.md)
+- [CondenseQuestionChatEngine (聊天引擎)](../../api/classes/ContextChatEngine.md)
diff --git a/apps/docs/i18n/in/docusaurus-plugin-content-docs/current/modules/high_level/data_index.md b/apps/docs/i18n/in/docusaurus-plugin-content-docs/current/modules/high_level/data_index.md
new file mode 100644
index 0000000000000000000000000000000000000000..a7a1eb5b7c651c87905970637b5f5de17ab9614a
--- /dev/null
+++ b/apps/docs/i18n/in/docusaurus-plugin-content-docs/current/modules/high_level/data_index.md
@@ -0,0 +1,25 @@
+---
+sidebar_position: 2
+---
+
+# Indeks
+
+`Dokumentasi ini telah diterjemahkan secara otomatis dan mungkin mengandung kesalahan. Jangan ragu untuk membuka Pull Request untuk mengusulkan perubahan.`
+
+Indeks adalah wadah dasar dan organisasi untuk data Anda. LlamaIndex.TS mendukung dua jenis indeks:
+
+- `VectorStoreIndex` - akan mengirimkan `Node` teratas ke LLM saat menghasilkan respons. Top-k default adalah 2.
+- `SummaryIndex` - akan mengirimkan setiap `Node` dalam indeks ke LLM untuk menghasilkan respons.
+
+```typescript
+import { Document, VectorStoreIndex } from "llamaindex";
+
+const document = new Document({ text: "test" });
+
+const index = await VectorStoreIndex.fromDocuments([document]);
+```
+
+## Referensi API
+
+- [SummaryIndex](../../api/classes/SummaryIndex.md)
+- [VectorStoreIndex](../../api/classes/VectorStoreIndex.md)
diff --git a/apps/docs/i18n/in/docusaurus-plugin-content-docs/current/modules/high_level/data_loader.md b/apps/docs/i18n/in/docusaurus-plugin-content-docs/current/modules/high_level/data_loader.md
new file mode 100644
index 0000000000000000000000000000000000000000..c7ccffdad86bd496c224bf909f3b99d7e744eee5
--- /dev/null
+++ b/apps/docs/i18n/in/docusaurus-plugin-content-docs/current/modules/high_level/data_loader.md
@@ -0,0 +1,19 @@
+---
+sidebar_position: 1
+---
+
+# Pemutar / Pemuat
+
+`Dokumentasi ini telah diterjemahkan secara otomatis dan mungkin mengandung kesalahan. Jangan ragu untuk membuka Pull Request untuk mengusulkan perubahan.`
+
+LlamaIndex.TS mendukung pengambilan file dari folder dengan mudah menggunakan kelas `SimpleDirectoryReader`. Saat ini, file `.txt`, `.pdf`, `.csv`, `.md`, dan `.docx` didukung, dengan rencana untuk menambahkan lebih banyak di masa depan!
+
+```typescript
+import { SimpleDirectoryReader } from "llamaindex";
+
+documents = new SimpleDirectoryReader().loadData("./data");
+```
+
+## Referensi API
+
+- [SimpleDirectoryReader](../../api/classes/SimpleDirectoryReader.md)
diff --git a/apps/docs/i18n/in/docusaurus-plugin-content-docs/current/modules/high_level/documents_and_nodes.md b/apps/docs/i18n/in/docusaurus-plugin-content-docs/current/modules/high_level/documents_and_nodes.md
new file mode 100644
index 0000000000000000000000000000000000000000..9ea4da083bac37f1c0bc3b33665942b3bbc55131
--- /dev/null
+++ b/apps/docs/i18n/in/docusaurus-plugin-content-docs/current/modules/high_level/documents_and_nodes.md
@@ -0,0 +1,22 @@
+---
+sidebar_position: 0
+---
+
+# Dokumen dan Node
+
+`Dokumentasi ini telah diterjemahkan secara otomatis dan mungkin mengandung kesalahan. Jangan ragu untuk membuka Pull Request untuk mengusulkan perubahan.`
+
+`Dokumen` dan `Node` adalah blok dasar dari setiap indeks. Meskipun API untuk objek-objek ini mirip, objek `Dokumen` mewakili seluruh file, sedangkan `Node` adalah bagian-bagian kecil dari dokumen asli tersebut, yang cocok untuk LLM dan Q&A.
+
+```typescript
+import { Document } from "llamaindex";
+
+document = new Document({ text: "teks", metadata: { key: "val" } });
+```
+
+## Referensi API
+
+- [Dokumen](../../api/classes/Document.md)
+- [TextNode](../../api/classes/TextNode.md)
+
+"
diff --git a/apps/docs/i18n/in/docusaurus-plugin-content-docs/current/modules/high_level/query_engine.md b/apps/docs/i18n/in/docusaurus-plugin-content-docs/current/modules/high_level/query_engine.md
new file mode 100644
index 0000000000000000000000000000000000000000..f752625ed6fd7c7ae04700587b649072daa33456
--- /dev/null
+++ b/apps/docs/i18n/in/docusaurus-plugin-content-docs/current/modules/high_level/query_engine.md
@@ -0,0 +1,40 @@
+---
+sidebar_position: 3
+---
+
+# QueryEngine
+
+`Dokumentasi ini telah diterjemahkan secara otomatis dan mungkin mengandung kesalahan. Jangan ragu untuk membuka Pull Request untuk mengusulkan perubahan.`
+
+Query engine adalah sebuah mesin query yang menggabungkan `Retriever` dan `ResponseSynthesizer` menjadi sebuah pipeline, yang akan menggunakan string query untuk mengambil node dan kemudian mengirimnya ke LLM untuk menghasilkan respons.
+
+```typescript
+const queryEngine = index.asQueryEngine();
+const response = await queryEngine.query("string query");
+```
+
+## Sub Question Query Engine
+
+Konsep dasar dari Sub Question Query Engine adalah membagi sebuah query tunggal menjadi beberapa query, mendapatkan jawaban untuk setiap query tersebut, dan kemudian menggabungkan jawaban-jawaban yang berbeda tersebut menjadi sebuah respons tunggal yang koheren untuk pengguna. Anda dapat menganggapnya sebagai teknik "pikirkan ini langkah demi langkah" namun dengan mengiterasi sumber data Anda!
+
+### Memulai
+
+Cara termudah untuk mencoba Sub Question Query Engine adalah dengan menjalankan file subquestion.ts di [contoh](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/subquestion.ts).
+
+```bash
+npx ts-node subquestion.ts
+```
+
+### Tools
+
+SubQuestionQueryEngine diimplementasikan dengan menggunakan Tools. Ide dasar dari Tools adalah bahwa mereka adalah opsi yang dapat dieksekusi oleh large language model. Dalam kasus ini, SubQuestionQueryEngine kita bergantung pada QueryEngineTool, yang seperti yang Anda duga adalah sebuah tool untuk menjalankan query pada QueryEngine. Hal ini memungkinkan kita memberikan model opsi untuk melakukan query pada dokumen-dokumen yang berbeda misalnya. Anda juga dapat membayangkan bahwa SubQuestionQueryEngine dapat menggunakan Tool yang mencari sesuatu di web atau mendapatkan jawaban menggunakan Wolfram Alpha.
+
+Anda dapat mempelajari lebih lanjut tentang Tools dengan melihat dokumentasi Python LlamaIndex di https://gpt-index.readthedocs.io/en/latest/core_modules/agent_modules/tools/root.html
+
+"
+
+## Referensi API
+
+- [RetrieverQueryEngine](../../api/classes/RetrieverQueryEngine.md)
+- [SubQuestionQueryEngine](../../api/classes/SubQuestionQueryEngine.md)
+- [QueryEngineTool](../../api/interfaces/QueryEngineTool.md)
diff --git a/apps/docs/i18n/in/docusaurus-plugin-content-docs/current/modules/index.md b/apps/docs/i18n/in/docusaurus-plugin-content-docs/current/modules/index.md
new file mode 100644
index 0000000000000000000000000000000000000000..d382a91d06a79029dd36da4f3eebb3bb5d6414d8
--- /dev/null
+++ b/apps/docs/i18n/in/docusaurus-plugin-content-docs/current/modules/index.md
@@ -0,0 +1,33 @@
+# Modul Inti
+
+`Dokumentasi ini telah diterjemahkan secara otomatis dan mungkin mengandung kesalahan. Jangan ragu untuk membuka Pull Request untuk mengusulkan perubahan.`
+
+LlamaIndex.TS menawarkan beberapa modul inti, yang terbagi menjadi modul tingkat tinggi untuk memulai dengan cepat, dan modul tingkat rendah untuk menyesuaikan komponen kunci sesuai kebutuhan Anda.
+
+## Modul Tingkat Tinggi
+
+- [**Dokumen**](./high_level/documents_and_nodes.md): Sebuah dokumen mewakili file teks, file PDF, atau potongan data yang berkelanjutan lainnya.
+
+- [**Node**](./high_level/documents_and_nodes.md): Blok data dasar. Paling umum, ini adalah bagian dari dokumen yang dibagi menjadi bagian-bagian yang dapat dikelola yang cukup kecil untuk dimasukkan ke dalam model embedding dan LLM.
+
+- [**Pembaca/Pemuat**](./high_level/data_loader.md): Sebuah pembaca atau pemuat adalah sesuatu yang mengambil dokumen di dunia nyata dan mengubahnya menjadi kelas Dokumen yang kemudian dapat digunakan dalam Indeks dan kueri Anda. Saat ini kami mendukung file teks biasa dan PDF dengan banyak fitur lain yang akan datang.
+
+- [**Indeks**](./high_level/data_index.md): Indeks menyimpan Node dan embedding dari node-node tersebut.
+
+- [**Mesin Kueri**](./high_level/query_engine.md): Mesin kueri adalah yang menghasilkan kueri yang Anda masukkan dan memberikan Anda hasilnya. Mesin kueri umumnya menggabungkan prompt yang telah dibangun sebelumnya dengan node-node yang dipilih dari Indeks Anda untuk memberikan konteks yang diperlukan oleh LLM untuk menjawab kueri Anda.
+
+- [**Mesin Obrolan**](./high_level/chat_engine.md): Mesin Obrolan membantu Anda membangun chatbot yang akan berinteraksi dengan Indeks Anda.
+
+## Modul Tingkat Rendah
+
+- [**LLM**](./low_level/llm.md): Kelas LLM adalah antarmuka yang terpadu untuk penyedia model bahasa besar seperti OpenAI GPT-4, Anthropic Claude, atau Meta LLaMA. Anda dapat membuat subkelasnya untuk menulis konektor ke model bahasa besar Anda sendiri.
+
+- [**Embedding**](./low_level/embedding.md): Embedding direpresentasikan sebagai vektor angka floating point. Model embedding default kami adalah text-embedding-ada-002 dari OpenAI dan setiap embedding yang dihasilkannya terdiri dari 1.536 angka floating point. Model embedding populer lainnya adalah BERT yang menggunakan 768 angka floating point untuk merepresentasikan setiap Node. Kami menyediakan beberapa utilitas untuk bekerja dengan embedding termasuk 3 opsi perhitungan kesamaan dan Maximum Marginal Relevance.
+
+- [**TextSplitter/NodeParser**](./low_level/node_parser.md): Strategi pemisahan teks sangat penting untuk efektivitas pencarian embedding secara keseluruhan. Saat ini, meskipun kami memiliki default, tidak ada solusi yang cocok untuk semua kasus. Tergantung pada dokumen sumber, Anda mungkin ingin menggunakan ukuran dan strategi pemisahan yang berbeda. Saat ini kami mendukung pemisahan berdasarkan ukuran tetap, pemisahan berdasarkan ukuran tetap dengan bagian yang tumpang tindih, pemisahan berdasarkan kalimat, dan pemisahan berdasarkan paragraf. Text splitter digunakan oleh NodeParser saat memisahkan `Document` menjadi `Node`.
+
+- [**Retriever**](./low_level/retriever.md): Retriever adalah yang sebenarnya memilih Node yang akan diambil dari indeks. Di sini, Anda mungkin ingin mencoba mengambil lebih banyak atau lebih sedikit Node per permintaan, mengubah fungsi kesamaan Anda, atau membuat retriever sendiri untuk setiap kasus penggunaan individu dalam aplikasi Anda. Misalnya, Anda mungkin ingin memiliki retriever terpisah untuk konten kode vs konten teks.
+
+- [**ResponseSynthesizer**](./low_level/response_synthesizer.md): ResponseSynthesizer bertanggung jawab untuk mengambil string permintaan, dan menggunakan daftar `Node` untuk menghasilkan respons. Ini dapat berupa berbagai bentuk, seperti mengulang semua konteks dan menyempurnakan jawaban, atau membangun pohon ringkasan dan mengembalikan ringkasan utama.
+
+- [**Storage**](./low_level/storage.md): Pada suatu titik, Anda akan ingin menyimpan indeks, data, dan vektor Anda daripada menjalankan model embedding setiap kali. IndexStore, DocStore, VectorStore, dan KVStore adalah abstraksi yang memungkinkan Anda melakukannya. Digabungkan, mereka membentuk StorageContext. Saat ini, kami memungkinkan Anda menyimpan embedding Anda dalam file di sistem file (atau sistem file virtual di memori), tetapi kami juga sedang aktif menambahkan integrasi ke Vector Databases.
diff --git a/apps/docs/i18n/in/docusaurus-plugin-content-docs/current/modules/low_level/embedding.md b/apps/docs/i18n/in/docusaurus-plugin-content-docs/current/modules/low_level/embedding.md
new file mode 100644
index 0000000000000000000000000000000000000000..e4530c1de31538fb1855bee31bf0a3fb8c3a4b98
--- /dev/null
+++ b/apps/docs/i18n/in/docusaurus-plugin-content-docs/current/modules/low_level/embedding.md
@@ -0,0 +1,26 @@
+---
+sidebar_position: 1
+---
+
+# Menyematkan (Embedding)
+
+`Dokumentasi ini telah diterjemahkan secara otomatis dan mungkin mengandung kesalahan. Jangan ragu untuk membuka Pull Request untuk mengusulkan perubahan.`
+
+Model penyemat di LlamaIndex bertanggung jawab untuk membuat representasi numerik dari teks. Secara default, LlamaIndex akan menggunakan model `text-embedding-ada-002` dari OpenAI.
+
+Ini dapat secara eksplisit diatur dalam objek `ServiceContext`.
+
+```typescript
+import { OpenAIEmbedding, serviceContextFromDefaults } from "llamaindex";
+
+const openaiEmbeds = new OpenAIEmbedding();
+
+const serviceContext = serviceContextFromDefaults({ embedModel: openaiEmbeds });
+```
+
+## Referensi API
+
+- [OpenAIEmbedding](../../api/classes/OpenAIEmbedding.md)
+- [ServiceContext](../../api/interfaces/ServiceContext.md)
+
+"
diff --git a/apps/docs/i18n/in/docusaurus-plugin-content-docs/current/modules/low_level/llm.md b/apps/docs/i18n/in/docusaurus-plugin-content-docs/current/modules/low_level/llm.md
new file mode 100644
index 0000000000000000000000000000000000000000..f1218dca6c1b0bc890bf8ce43db31f09a6f60ac9
--- /dev/null
+++ b/apps/docs/i18n/in/docusaurus-plugin-content-docs/current/modules/low_level/llm.md
@@ -0,0 +1,26 @@
+---
+sidebar_position: 0
+---
+
+# LLM
+
+`Dokumentasi ini telah diterjemahkan secara otomatis dan mungkin mengandung kesalahan. Jangan ragu untuk membuka Pull Request untuk mengusulkan perubahan.`
+
+LLM bertanggung jawab untuk membaca teks dan menghasilkan respons bahasa alami terhadap pertanyaan. Secara default, LlamaIndex.TS menggunakan `gpt-3.5-turbo`.
+
+LLM dapat secara eksplisit diatur dalam objek `ServiceContext`.
+
+```typescript
+import { OpenAI, serviceContextFromDefaults } from "llamaindex";
+
+const openaiLLM = new OpenAI({ model: "gpt-3.5-turbo", temperature: 0 });
+
+const serviceContext = serviceContextFromDefaults({ llm: openaiLLM });
+```
+
+## Referensi API
+
+- [OpenAI](../../api/classes/OpenAI.md)
+- [ServiceContext](../../api/interfaces/ServiceContext.md)
+
+"
diff --git a/apps/docs/i18n/in/docusaurus-plugin-content-docs/current/modules/low_level/node_parser.md b/apps/docs/i18n/in/docusaurus-plugin-content-docs/current/modules/low_level/node_parser.md
new file mode 100644
index 0000000000000000000000000000000000000000..2c12d585da776cb567b902464a30b5b5f37b4b2e
--- /dev/null
+++ b/apps/docs/i18n/in/docusaurus-plugin-content-docs/current/modules/low_level/node_parser.md
@@ -0,0 +1,37 @@
+---
+sidebar_position: 3
+---
+
+# NodeParser
+
+`Dokumentasi ini telah diterjemahkan secara otomatis dan mungkin mengandung kesalahan. Jangan ragu untuk membuka Pull Request untuk mengusulkan perubahan.`
+
+`NodeParser` dalam LlamaIndex bertanggung jawab untuk membagi objek `Document` menjadi objek `Node` yang lebih mudah dikelola. Ketika Anda memanggil `.fromDocuments()`, `NodeParser` dari `ServiceContext` digunakan untuk melakukan ini secara otomatis untuk Anda. Atau, Anda dapat menggunakannya untuk membagi dokumen sebelum waktunya.
+
+```typescript
+import { Document, SimpleNodeParser } from "llamaindex";
+
+const nodeParser = new SimpleNodeParser();
+const nodes = nodeParser.getNodesFromDocuments([
+  new Document({ text: "Saya berusia 10 tahun. John berusia 20 tahun." }),
+]);
+```
+
+## TextSplitter
+
+Pemisah teks yang mendasarinya akan membagi teks berdasarkan kalimat. Ini juga dapat digunakan sebagai modul mandiri untuk membagi teks mentah.
+
+```typescript
+import { SentenceSplitter } from "llamaindex";
+
+const splitter = new SentenceSplitter({ chunkSize: 1 });
+
+const textSplits = splitter.splitText("Halo Dunia");
+```
+
+## Referensi API
+
+- [SimpleNodeParser](../../api/classes/SimpleNodeParser.md)
+- [SentenceSplitter](../../api/classes/SentenceSplitter.md)
+
+"
diff --git a/apps/docs/i18n/in/docusaurus-plugin-content-docs/current/modules/low_level/response_synthesizer.md b/apps/docs/i18n/in/docusaurus-plugin-content-docs/current/modules/low_level/response_synthesizer.md
new file mode 100644
index 0000000000000000000000000000000000000000..88ce7fc9d06c8eaa7ec655120c37ddeef446a13c
--- /dev/null
+++ b/apps/docs/i18n/in/docusaurus-plugin-content-docs/current/modules/low_level/response_synthesizer.md
@@ -0,0 +1,47 @@
+---
+sidebar_position: 6
+---
+
+# ResponseSynthesizer
+
+`Dokumentasi ini telah diterjemahkan secara otomatis dan mungkin mengandung kesalahan. Jangan ragu untuk membuka Pull Request untuk mengusulkan perubahan.`
+
+ResponseSynthesizer bertanggung jawab untuk mengirimkan query, node, dan template prompt ke LLM untuk menghasilkan respons. Ada beberapa mode utama untuk menghasilkan respons:
+
+- `Refine`: "membuat dan menyempurnakan" jawaban dengan secara berurutan melalui setiap potongan teks yang ditemukan. Ini membuat panggilan LLM terpisah per Node. Bagus untuk jawaban yang lebih rinci.
+- `CompactAndRefine` (default): "mengompakkan" prompt selama setiap panggilan LLM dengan memasukkan sebanyak mungkin potongan teks yang dapat muat dalam ukuran prompt maksimum. Jika terlalu banyak potongan untuk dimasukkan dalam satu prompt, "membuat dan menyempurnakan" jawaban dengan melalui beberapa prompt yang kompak. Sama seperti `refine`, tetapi seharusnya menghasilkan panggilan LLM yang lebih sedikit.
+- `TreeSummarize`: Diberikan sekumpulan potongan teks dan query, secara rekursif membangun pohon dan mengembalikan node root sebagai respons. Bagus untuk tujuan ringkasan.
+- `SimpleResponseBuilder`: Diberikan sekumpulan potongan teks dan query, menerapkan query ke setiap potongan teks sambil mengumpulkan respons ke dalam sebuah array. Mengembalikan string yang digabungkan dari semua respons. Bagus ketika Anda perlu menjalankan query yang sama secara terpisah terhadap setiap potongan teks.
+
+```typescript
+import { NodeWithScore, ResponseSynthesizer, TextNode } from "llamaindex";
+
+const responseSynthesizer = new ResponseSynthesizer();
+
+const nodesWithScore: NodeWithScore[] = [
+  {
+    node: new TextNode({ text: "Saya berusia 10 tahun." }),
+    score: 1,
+  },
+  {
+    node: new TextNode({ text: "John berusia 20 tahun." }),
+    score: 0.5,
+  },
+];
+
+const response = await responseSynthesizer.synthesize(
+  "Berapa usia saya?",
+  nodesWithScore,
+);
+console.log(response.response);
+```
+
+## Referensi API
+
+- [ResponseSynthesizer](../../api/classes/ResponseSynthesizer.md)
+- [Refine](../../api/classes/Refine.md)
+- [CompactAndRefine](../../api/classes/CompactAndRefine.md)
+- [TreeSummarize](../../api/classes/TreeSummarize.md)
+- [SimpleResponseBuilder](../../api/classes/SimpleResponseBuilder.md)
+
+"
diff --git a/apps/docs/i18n/in/docusaurus-plugin-content-docs/current/modules/low_level/retriever.md b/apps/docs/i18n/in/docusaurus-plugin-content-docs/current/modules/low_level/retriever.md
new file mode 100644
index 0000000000000000000000000000000000000000..efcfa9109581cf2a4e0865e765e4cc8a3ea6f056
--- /dev/null
+++ b/apps/docs/i18n/in/docusaurus-plugin-content-docs/current/modules/low_level/retriever.md
@@ -0,0 +1,25 @@
+---
+sidebar_position: 5
+---
+
+# Retriever
+
+`Dokumentasi ini telah diterjemahkan secara otomatis dan mungkin mengandung kesalahan. Jangan ragu untuk membuka Pull Request untuk mengusulkan perubahan.`
+
+Retriever dalam LlamaIndex adalah yang digunakan untuk mengambil `Node` dari indeks menggunakan string query. Sebuah `VectorIndexRetriever` akan mengambil k node yang paling mirip. Sementara itu, `SummaryIndexRetriever` akan mengambil semua node tanpa memperdulikan query.
+
+```typescript
+const retriever = vector_index.asRetriever();
+retriever.similarityTopK = 3;
+
+// Mengambil node!
+const nodesWithScore = await retriever.retrieve("string query");
+```
+
+## Referensi API
+
+- [SummaryIndexRetriever](../../api/classes/SummaryIndexRetriever.md)
+- [SummaryIndexLLMRetriever](../../api/classes/SummaryIndexLLMRetriever.md)
+- [VectorIndexRetriever](../../api/classes/VectorIndexRetriever.md)
+
+"
diff --git a/apps/docs/i18n/in/docusaurus-plugin-content-docs/current/modules/low_level/storage.md b/apps/docs/i18n/in/docusaurus-plugin-content-docs/current/modules/low_level/storage.md
new file mode 100644
index 0000000000000000000000000000000000000000..027d936dcc0ece7ff7b3c51dcb0e73d5373089cd
--- /dev/null
+++ b/apps/docs/i18n/in/docusaurus-plugin-content-docs/current/modules/low_level/storage.md
@@ -0,0 +1,30 @@
+---
+sidebar_position: 7
+---
+
+# Penyimpanan
+
+`Dokumentasi ini telah diterjemahkan secara otomatis dan mungkin mengandung kesalahan. Jangan ragu untuk membuka Pull Request untuk mengusulkan perubahan.`
+
+Penyimpanan di LlamaIndex.TS bekerja secara otomatis setelah Anda mengonfigurasi objek `StorageContext`. Cukup konfigurasikan `persistDir` dan lampirkan ke sebuah indeks.
+
+Saat ini, hanya penyimpanan dan pengambilan dari disk yang didukung, dengan integrasi masa depan yang direncanakan!
+
+```typescript
+import { Document, VectorStoreIndex, storageContextFromDefaults } from "./src";
+
+const storageContext = await storageContextFromDefaults({
+  persistDir: "./storage",
+});
+
+const document = new Document({ text: "Test Text" });
+const index = await VectorStoreIndex.fromDocuments([document], {
+  storageContext,
+});
+```
+
+## Referensi API
+
+- [StorageContext](../../api/interfaces/StorageContext.md)
+
+"
diff --git a/apps/docs/i18n/in/docusaurus-plugin-content-docs/current/starter.md b/apps/docs/i18n/in/docusaurus-plugin-content-docs/current/starter.md
new file mode 100644
index 0000000000000000000000000000000000000000..292cdfcb58819f18dc991597e4a76e78645f561e
--- /dev/null
+++ b/apps/docs/i18n/in/docusaurus-plugin-content-docs/current/starter.md
@@ -0,0 +1,60 @@
+---
+sidebar_position: 2
+---
+
+# Panduan Pemula
+
+`Dokumentasi ini telah diterjemahkan secara otomatis dan mungkin mengandung kesalahan. Jangan ragu untuk membuka Pull Request untuk mengusulkan perubahan.`
+
+Setelah Anda [menginstal LlamaIndex.TS menggunakan NPM](installation) dan mengatur kunci OpenAI Anda, Anda siap untuk memulai aplikasi pertama Anda:
+
+Di dalam folder baru:
+
+```bash npm2yarn
+npm install typescript
+npm install @types/node
+npx tsc --init # jika diperlukan
+```
+
+Buat file `example.ts`. Kode ini akan memuat beberapa data contoh, membuat dokumen, mengindeksnya (yang menciptakan embedding menggunakan OpenAI), dan kemudian membuat mesin kueri untuk menjawab pertanyaan tentang data tersebut.
+
+```ts
+// example.ts
+import fs from "fs/promises";
+import { Document, VectorStoreIndex } from "llamaindex";
+
+async function main() {
+  // Memuat esai dari abramov.txt di Node
+  const essay = await fs.readFile(
+    "node_modules/llamaindex/examples/abramov.txt",
+    "utf-8",
+  );
+
+  // Membuat objek Dokumen dengan esai
+  const document = new Document({ text: essay });
+
+  // Memisahkan teks dan membuat embedding. Menyimpannya dalam VectorStoreIndex
+  const index = await VectorStoreIndex.fromDocuments([document]);
+
+  // Mengkueri indeks
+  const queryEngine = index.asQueryEngine();
+  const response = await queryEngine.query(
+    "Apa yang dilakukan penulis di perguruan tinggi?",
+  );
+
+  // Menampilkan respons
+  console.log(response.toString());
+}
+
+main();
+```
+
+Kemudian Anda dapat menjalankannya menggunakan
+
+```bash
+npx ts-node example.ts
+```
+
+Siap untuk belajar lebih lanjut? Lihat playground NextJS kami di https://llama-playground.vercel.app/. Sumbernya tersedia di https://github.com/run-llama/ts-playground
+
+"
diff --git a/apps/docs/i18n/it/docusaurus-plugin-content-docs/current/_static/concepts/indexing.jpg b/apps/docs/i18n/it/docusaurus-plugin-content-docs/current/_static/concepts/indexing.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..8672967213caf28fb27682b6b9e2e5111beb9aaa
Binary files /dev/null and b/apps/docs/i18n/it/docusaurus-plugin-content-docs/current/_static/concepts/indexing.jpg differ
diff --git a/apps/docs/i18n/it/docusaurus-plugin-content-docs/current/_static/concepts/querying.jpg b/apps/docs/i18n/it/docusaurus-plugin-content-docs/current/_static/concepts/querying.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..3c241bdda5ba98d3f3ee2e163b9f19280c5c7503
Binary files /dev/null and b/apps/docs/i18n/it/docusaurus-plugin-content-docs/current/_static/concepts/querying.jpg differ
diff --git a/apps/docs/i18n/it/docusaurus-plugin-content-docs/current/_static/concepts/rag.jpg b/apps/docs/i18n/it/docusaurus-plugin-content-docs/current/_static/concepts/rag.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..b68eca2564f8535b33d9ed6d4ce4d4410d2a0698
Binary files /dev/null and b/apps/docs/i18n/it/docusaurus-plugin-content-docs/current/_static/concepts/rag.jpg differ
diff --git a/apps/docs/i18n/it/docusaurus-plugin-content-docs/current/api b/apps/docs/i18n/it/docusaurus-plugin-content-docs/current/api
new file mode 120000
index 0000000000000000000000000000000000000000..3513e32979f4abb8b2df2b54c8d0cc480bdd847e
--- /dev/null
+++ b/apps/docs/i18n/it/docusaurus-plugin-content-docs/current/api
@@ -0,0 +1 @@
+../../../../docs/api
\ No newline at end of file
diff --git a/apps/docs/i18n/it/docusaurus-plugin-content-docs/current/concepts.md b/apps/docs/i18n/it/docusaurus-plugin-content-docs/current/concepts.md
new file mode 100644
index 0000000000000000000000000000000000000000..95971ed81f79fb473708da40f10c44df4a72aa6a
--- /dev/null
+++ b/apps/docs/i18n/it/docusaurus-plugin-content-docs/current/concepts.md
@@ -0,0 +1,86 @@
+---
+sidebar_position: 3
+---
+
+# Concetti di alto livello
+
+`Questa documentazione è stata tradotta automaticamente e può contenere errori. Non esitare ad aprire una Pull Request per suggerire modifiche.`
+
+LlamaIndex.TS ti aiuta a costruire applicazioni basate su LLM (ad esempio, Q&A, chatbot) su dati personalizzati.
+
+In questa guida ai concetti di alto livello, imparerai:
+
+- come un LLM può rispondere alle domande utilizzando i tuoi dati.
+- concetti chiave e moduli in LlamaIndex.TS per comporre la tua pipeline di interrogazione.
+
+## Rispondere alle domande sui tuoi dati
+
+LlamaIndex utilizza un metodo a due fasi quando si utilizza un LLM con i tuoi dati:
+
+1. **fase di indicizzazione**: preparazione di una base di conoscenza, e
+2. **fase di interrogazione**: recupero del contesto rilevante dalla conoscenza per assistere il LLM nel rispondere a una domanda
+
+![](./_static/concepts/rag.jpg)
+
+Questo processo è anche conosciuto come Retrieval Augmented Generation (RAG).
+
+LlamaIndex.TS fornisce il toolkit essenziale per rendere entrambe le fasi estremamente facili.
+
+Esploriamo ogni fase nel dettaglio.
+
+### Fase di indicizzazione
+
+LlamaIndex.TS ti aiuta a preparare la base di conoscenza con una serie di connettori dati e indici.
+
+![](./_static/concepts/indexing.jpg)
+
+[**Data Loaders**](./modules/high_level/data_loader.md):
+Un connettore dati (ad esempio, `Reader`) acquisisce dati da diverse fonti e formati dati in una semplice rappresentazione `Document` (testo e metadati semplici).
+
+[**Documenti / Nodi**](./modules/high_level/documents_and_nodes.md): Un `Document` è un contenitore generico per qualsiasi fonte di dati - ad esempio, un PDF, un output di un'API o dati recuperati da un database. Un `Node` è l'unità atomica di dati in LlamaIndex e rappresenta un "chunk" di un `Document` di origine. È una rappresentazione completa che include metadati e relazioni (con altri nodi) per consentire operazioni di recupero accurate ed espressive.
+
+[**Indici dei dati**](./modules/high_level/data_index.md):
+Una volta che hai acquisito i tuoi dati, LlamaIndex ti aiuta a indicizzare i dati in un formato facilmente recuperabile.
+
+Sotto il cofano, LlamaIndex analizza i documenti grezzi in rappresentazioni intermedie, calcola gli embedding vettoriali e memorizza i tuoi dati in memoria o su disco.
+
+"
+
+### Fase di interrogazione
+
+Nella fase di interrogazione, la pipeline di interrogazione recupera il contesto più rilevante dato una query dell'utente,
+e lo passa al LLM (insieme alla query) per sintetizzare una risposta.
+
+Ciò fornisce al LLM una conoscenza aggiornata che non è presente nei suoi dati di addestramento originali,
+(riducendo anche l'allucinazione).
+
+La sfida principale nella fase di interrogazione è il recupero, l'orchestrazione e il ragionamento su (potenzialmente molte) basi di conoscenza.
+
+LlamaIndex fornisce moduli componibili che ti aiutano a costruire e integrare pipeline RAG per Q&A (motore di interrogazione), chatbot (motore di chat) o come parte di un agente.
+
+Questi blocchi di costruzione possono essere personalizzati per riflettere le preferenze di ranking, nonché composti per ragionare su più basi di conoscenza in modo strutturato.
+
+![](./_static/concepts/querying.jpg)
+
+#### Blocchi di costruzione
+
+[**Retriever**](./modules/low_level/retriever.md):
+Un retriever definisce come recuperare efficientemente il contesto rilevante da una base di conoscenza (cioè un indice) quando viene fornita una query.
+La logica specifica di recupero differisce per diversi indici, il più popolare è il recupero denso su un indice vettoriale.
+
+[**Response Synthesizers**](./modules/low_level/response_synthesizer.md):
+Un response synthesizer genera una risposta da un LLM, utilizzando una query dell'utente e un insieme di frammenti di testo recuperati.
+
+"
+
+#### Pipeline
+
+[**Motori di interrogazione**](./modules/high_level/query_engine.md):
+Un motore di interrogazione è una pipeline end-to-end che ti consente di fare domande sui tuoi dati.
+Prende in input una query in linguaggio naturale e restituisce una risposta, insieme al contesto di riferimento recuperato e passato al LLM.
+
+[**Motori di chat**](./modules/high_level/chat_engine.md):
+Un motore di chat è una pipeline end-to-end per avere una conversazione con i tuoi dati
+(più scambi di domande e risposte anziché una singola domanda e risposta).
+
+"
diff --git a/apps/docs/i18n/it/docusaurus-plugin-content-docs/current/end_to_end.md b/apps/docs/i18n/it/docusaurus-plugin-content-docs/current/end_to_end.md
new file mode 100644
index 0000000000000000000000000000000000000000..b27f069635872d9643d742884431bdd1244ecb43
--- /dev/null
+++ b/apps/docs/i18n/it/docusaurus-plugin-content-docs/current/end_to_end.md
@@ -0,0 +1,51 @@
+---
+sidebar_position: 4
+---
+
+# Esempi end-to-end
+
+`Questa documentazione è stata tradotta automaticamente e può contenere errori. Non esitare ad aprire una Pull Request per suggerire modifiche.`
+
+Includiamo diversi esempi end-to-end utilizzando LlamaIndex.TS nel repository.
+
+Controlla gli esempi di seguito o provale e completale in pochi minuti con i tutorial interattivi di Github Codespace forniti da Dev-Docs [qui](https://codespaces.new/team-dev-docs/lits-dev-docs-playground?devcontainer_path=.devcontainer%2Fjavascript_ltsquickstart%2Fdevcontainer.json):
+
+## [Motore di chat](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/chatEngine.ts)
+
+Leggi un file e chatta al riguardo con il LLM.
+
+## [Indice vettoriale](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/vectorIndex.ts)
+
+Crea un indice vettoriale e interrogalo. L'indice vettoriale utilizzerà le embedding per recuperare i nodi più rilevanti in cima k. Per impostazione predefinita, il valore di k è 2.
+
+## [Indice di riepilogo](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/summaryIndex.ts)
+
+Crea un indice di elenco e interrogalo. Questo esempio utilizza anche il `LLMRetriever`, che utilizzerà LLM per selezionare i migliori nodi da utilizzare durante la generazione della risposta.
+
+## [Salva / Carica un indice](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/storageContext.ts)
+
+Crea e carica un indice vettoriale. La persistenza su disco in LlamaIndex.TS avviene automaticamente una volta creato un oggetto di contesto di archiviazione.
+
+## [Indice vettoriale personalizzato](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/vectorIndexCustomize.ts)
+
+Crea un indice vettoriale e interrogalo, configurando anche il `LLM`, il `ServiceContext` e il `similarity_top_k`.
+
+## [OpenAI LLM](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/openai.ts)
+
+Crea un OpenAI LLM e utilizzalo direttamente per la chat.
+
+## [Llama2 DeuceLLM](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/llamadeuce.ts)
+
+Crea un Llama-2 LLM e utilizzalo direttamente per la chat.
+
+"
+
+## [SubQuestionQueryEngine](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/subquestion.ts)
+
+Utilizza il `SubQuestionQueryEngine`, che suddivide le query complesse in diverse domande e quindi aggrega una risposta tra le risposte a tutte le sotto-domande.
+
+"
+
+## [Moduli a basso livello](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/lowlevel.ts)
+
+Questo esempio utilizza diversi componenti a basso livello, che eliminano la necessità di un motore di interrogazione effettivo. Questi componenti possono essere utilizzati ovunque, in qualsiasi applicazione, o personalizzati e sottoclassificati per soddisfare le tue esigenze.
diff --git a/apps/docs/i18n/it/docusaurus-plugin-content-docs/current/environments.md b/apps/docs/i18n/it/docusaurus-plugin-content-docs/current/environments.md
new file mode 100644
index 0000000000000000000000000000000000000000..9f5eabb390ff50beca71c52188a81780291f7870
--- /dev/null
+++ b/apps/docs/i18n/it/docusaurus-plugin-content-docs/current/environments.md
@@ -0,0 +1,31 @@
+---
+sidebar_position: 5
+---
+
+# Ambienti
+
+`Questa documentazione è stata tradotta automaticamente e può contenere errori. Non esitare ad aprire una Pull Request per suggerire modifiche.`
+
+LlamaIndex attualmente supporta ufficialmente NodeJS 18 e NodeJS 20.
+
+## Router dell'app NextJS
+
+Se stai utilizzando i gestori di route/router dell'app NextJS, dovrai utilizzare la modalità NodeJS:
+
+```js
+export const runtime = "nodejs"; // predefinito
+```
+
+e dovrai aggiungere un'eccezione per pdf-parse nel tuo next.config.js
+
+```js
+// next.config.js
+/** @type {import('next').NextConfig} */
+const nextConfig = {
+  experimental: {
+    serverComponentsExternalPackages: ["pdf-parse"], // Mette pdf-parse nella modalità NodeJS effettiva con il router dell'app NextJS
+  },
+};
+
+module.exports = nextConfig;
+```
diff --git a/apps/docs/i18n/it/docusaurus-plugin-content-docs/current/installation.mdx b/apps/docs/i18n/it/docusaurus-plugin-content-docs/current/installation.mdx
new file mode 100644
index 0000000000000000000000000000000000000000..0f90448636683f3e4f14ebd55d02b8ee06dc9776
--- /dev/null
+++ b/apps/docs/i18n/it/docusaurus-plugin-content-docs/current/installation.mdx
@@ -0,0 +1,68 @@
+---
+sidebar_position: 1
+---
+
+
+# Installazione e Configurazione
+
+```Questa documentazione è stata tradotta automaticamente e può contenere errori. Non esitare ad aprire una Pull Request per suggerire modifiche.```
+
+
+Assicurati di avere NodeJS v18 o versioni successive.
+
+
+## Utilizzo di create-llama
+
+Il modo più semplice per iniziare con LlamaIndex è utilizzare `create-llama`. Questo strumento CLI ti consente di avviare rapidamente la creazione di una nuova applicazione LlamaIndex, con tutto configurato per te.
+
+Esegui semplicemente
+
+<Tabs>
+<TabItem value="1" label="npm" default>
+
+```bash
+npx create-llama@latest
+```
+
+</TabItem>
+<TabItem value="2" label="Yarn">
+
+```bash
+yarn create llama
+```
+
+</TabItem>
+<TabItem value="3" label="pnpm">
+
+```bash
+pnpm create llama@latest
+```
+
+</TabItem>
+</Tabs>
+
+per iniziare. Una volta generata la tua app, esegui
+
+```bash npm2yarn
+npm run dev
+```
+
+per avviare il server di sviluppo. Puoi quindi visitare [http://localhost:3000](http://localhost:3000) per visualizzare la tua app.
+## Installazione da NPM
+
+```bash npm2yarn
+npm install llamaindex
+```
+
+
+### Variabili d'ambiente
+
+I nostri esempi utilizzano OpenAI di default. Dovrai configurare la tua chiave Open AI nel seguente modo:
+
+```bash
+export OPENAI_API_KEY="sk-......" # Sostituisci con la tua chiave da https://platform.openai.com/account/api-keys
+```
+
+Se desideri caricarla automaticamente ogni volta, aggiungila al tuo .zshrc/.bashrc.
+
+ATTENZIONE: non inserire la tua chiave OpenAI nel controllo di versione.
diff --git a/apps/docs/i18n/it/docusaurus-plugin-content-docs/current/introduction.md b/apps/docs/i18n/it/docusaurus-plugin-content-docs/current/introduction.md
new file mode 100644
index 0000000000000000000000000000000000000000..a7bd0156b2359e12a7d5434359d6af782e7443ab
--- /dev/null
+++ b/apps/docs/i18n/it/docusaurus-plugin-content-docs/current/introduction.md
@@ -0,0 +1,60 @@
+---
+sidebar_position: 0
+slug: /
+---
+
+# Cos'è LlamaIndex.TS?
+
+`Questa documentazione è stata tradotta automaticamente e può contenere errori. Non esitare ad aprire una Pull Request per suggerire modifiche.`
+
+LlamaIndex.TS è un framework dati per applicazioni LLM per l'ingestione, la strutturazione e l'accesso a dati privati o specifici del dominio. Mentre è disponibile anche un pacchetto Python (vedi [qui](https://docs.llamaindex.ai/en/stable/)), LlamaIndex.TS offre funzionalità di base in un pacchetto semplice, ottimizzato per l'uso con TypeScript.
+
+## 🚀 Perché LlamaIndex.TS?
+
+Fondamentalmente, le LLM offrono un'interfaccia di linguaggio naturale tra gli esseri umani e i dati inferiti. I modelli ampiamente disponibili vengono pre-addestrati su enormi quantità di dati disponibili pubblicamente, da Wikipedia e mailing list a libri di testo e codice sorgente.
+
+Le applicazioni costruite su LLM spesso richiedono di integrare questi modelli con dati privati o specifici del dominio. Purtroppo, questi dati possono essere distribuiti tra applicazioni e archivi dati isolati. Possono trovarsi dietro API, in database SQL o intrappolati in PDF e presentazioni.
+
+Ed è qui che entra in gioco **LlamaIndex.TS**.
+
+## 🦙 Come può aiutare LlamaIndex.TS?
+
+LlamaIndex.TS fornisce i seguenti strumenti:
+
+- **Caricamento dati** per l'ingestione diretta dei dati esistenti in formato `.txt`, `.pdf`, `.csv`, `.md` e `.docx`.
+- **Indici dati** per la strutturazione dei dati in rappresentazioni intermedie facili e performanti per i LLM.
+- **Motori** che forniscono accesso ai dati in linguaggio naturale. Ad esempio:
+  - I motori di interrogazione sono potenti interfacce di recupero per l'output arricchito di conoscenza.
+  - I motori di chat sono interfacce conversazionali per interazioni "avanti e indietro" a più messaggi con i dati.
+
+## 👨‍👩‍👧‍👦 A chi si rivolge LlamaIndex?
+
+LlamaIndex.TS fornisce un set di strumenti di base, essenziali per chiunque stia costruendo app LLM con JavaScript e TypeScript.
+
+La nostra API di alto livello consente agli utenti principianti di utilizzare LlamaIndex.TS per acquisire e interrogare i loro dati.
+
+Per applicazioni più complesse, le nostre API di livello inferiore consentono agli utenti avanzati di personalizzare ed estendere qualsiasi modulo: connettori di dati, indici, recuperatori e motori di interrogazione, per adattarli alle proprie esigenze.
+
+## Primi passi
+
+`npm install llamaindex`
+
+La nostra documentazione include le [Istruzioni di installazione](./installation.md) e un [Tutorial introduttivo](./starter.md) per creare la tua prima applicazione.
+
+Una volta che sei pronto, i [Concetti di alto livello](./concepts.md) offrono una panoramica dell'architettura modulare di LlamaIndex. Per ulteriori esempi pratici, consulta i nostri [Tutorial end-to-end](./end_to_end.md).
+
+## 🗺️ Ecosistema
+
+Per scaricare o contribuire, trova LlamaIndex su:
+
+- Github: https://github.com/run-llama/LlamaIndexTS
+- NPM: https://www.npmjs.com/package/llamaindex
+
+"
+
+## Community
+
+Hai bisogno di aiuto? Hai suggerimenti per una nuova funzionalità? Unisciti alla community di LlamaIndex:
+
+- Twitter: https://twitter.com/llama_index
+- Discord: https://discord.gg/dGcwcsnxhU
diff --git a/apps/docs/i18n/it/docusaurus-plugin-content-docs/current/modules/high_level/chat_engine.md b/apps/docs/i18n/it/docusaurus-plugin-content-docs/current/modules/high_level/chat_engine.md
new file mode 100644
index 0000000000000000000000000000000000000000..627fd5f3732c155f2f618ed8db653c8dc076b243
--- /dev/null
+++ b/apps/docs/i18n/it/docusaurus-plugin-content-docs/current/modules/high_level/chat_engine.md
@@ -0,0 +1,24 @@
+---
+sidebar_position: 4
+---
+
+# ChatEngine (Motore di Chat)
+
+`Questa documentazione è stata tradotta automaticamente e può contenere errori. Non esitare ad aprire una Pull Request per suggerire modifiche.`
+
+Il motore di chat è un modo rapido e semplice per chattare con i dati nel tuo indice.
+
+```typescript
+const retriever = index.asRetriever();
+const chatEngine = new ContextChatEngine({ retriever });
+
+// inizia a chattare
+const response = await chatEngine.chat(query);
+```
+
+## Riferimenti API
+
+- [ContextChatEngine (Motore di Chat di Contesto)](../../api/classes/ContextChatEngine.md)
+- [CondenseQuestionChatEngine (Motore di Chat per Domande Condensate)](../../api/classes/ContextChatEngine.md)
+
+"
diff --git a/apps/docs/i18n/it/docusaurus-plugin-content-docs/current/modules/high_level/data_index.md b/apps/docs/i18n/it/docusaurus-plugin-content-docs/current/modules/high_level/data_index.md
new file mode 100644
index 0000000000000000000000000000000000000000..06a95cecd1d07ee3b8a02dcb2d8216eaf84406f6
--- /dev/null
+++ b/apps/docs/i18n/it/docusaurus-plugin-content-docs/current/modules/high_level/data_index.md
@@ -0,0 +1,27 @@
+---
+sidebar_position: 2
+---
+
+# Indice
+
+`Questa documentazione è stata tradotta automaticamente e può contenere errori. Non esitare ad aprire una Pull Request per suggerire modifiche.`
+
+Un indice è il contenitore e l'organizzazione di base per i tuoi dati. LlamaIndex.TS supporta due tipi di indici:
+
+- `VectorStoreIndex` - invierà i primi `Node` al LLM quando genera una risposta. Il valore predefinito per i primi è 2.
+- `SummaryIndex` - invierà ogni `Node` presente nell'indice al LLM per generare una risposta.
+
+```typescript
+import { Document, VectorStoreIndex } from "llamaindex";
+
+const document = new Document({ text: "test" });
+
+const index = await VectorStoreIndex.fromDocuments([document]);
+```
+
+## Riferimento API
+
+- [SummaryIndex](../../api/classes/SummaryIndex.md)
+- [VectorStoreIndex](../../api/classes/VectorStoreIndex.md)
+
+"
diff --git a/apps/docs/i18n/it/docusaurus-plugin-content-docs/current/modules/high_level/data_loader.md b/apps/docs/i18n/it/docusaurus-plugin-content-docs/current/modules/high_level/data_loader.md
new file mode 100644
index 0000000000000000000000000000000000000000..e0eb6a8ce95ac211629d1f25fffa4d850877e9d6
--- /dev/null
+++ b/apps/docs/i18n/it/docusaurus-plugin-content-docs/current/modules/high_level/data_loader.md
@@ -0,0 +1,21 @@
+---
+sidebar_position: 1
+---
+
+# Lettore / Caricatore
+
+`Questa documentazione è stata tradotta automaticamente e può contenere errori. Non esitare ad aprire una Pull Request per suggerire modifiche.`
+
+LlamaIndex.TS supporta il caricamento semplice di file da cartelle utilizzando la classe `SimpleDirectoryReader`. Attualmente, sono supportati i file `.txt`, `.pdf`, `.csv`, `.md` e `.docx`, con ulteriori pianificati per il futuro!
+
+```typescript
+import { SimpleDirectoryReader } from "llamaindex";
+
+documents = new SimpleDirectoryReader().loadData("./data");
+```
+
+## Riferimento API
+
+- [SimpleDirectoryReader](../../api/classes/SimpleDirectoryReader.md)
+
+"
diff --git a/apps/docs/i18n/it/docusaurus-plugin-content-docs/current/modules/high_level/documents_and_nodes.md b/apps/docs/i18n/it/docusaurus-plugin-content-docs/current/modules/high_level/documents_and_nodes.md
new file mode 100644
index 0000000000000000000000000000000000000000..569f7edbaa6d95bd40f0a2dec02ab08b9b35b157
--- /dev/null
+++ b/apps/docs/i18n/it/docusaurus-plugin-content-docs/current/modules/high_level/documents_and_nodes.md
@@ -0,0 +1,22 @@
+---
+sidebar_position: 0
+---
+
+# Documenti e Nodi
+
+`Questa documentazione è stata tradotta automaticamente e può contenere errori. Non esitare ad aprire una Pull Request per suggerire modifiche.`
+
+I `Documenti` e i `Nodi` sono i blocchi fondamentali di qualsiasi indice. Sebbene l'API per questi oggetti sia simile, gli oggetti `Documenti` rappresentano interi file, mentre i `Nodi` sono pezzi più piccoli di quel documento originale, adatti per un LLM e una Q&A.
+
+```typescript
+import { Documento } from "llamaindex";
+
+documento = new Documento({ testo: "testo", metadati: { chiave: "valore" } });
+```
+
+## Riferimento API
+
+- [Documento](../../api/classes/Documento.md)
+- [NodoTesto](../../api/classes/NodoTesto.md)
+
+"
diff --git a/apps/docs/i18n/it/docusaurus-plugin-content-docs/current/modules/high_level/query_engine.md b/apps/docs/i18n/it/docusaurus-plugin-content-docs/current/modules/high_level/query_engine.md
new file mode 100644
index 0000000000000000000000000000000000000000..2f2aad6e975398f23074fd9b23021f36c53aef28
--- /dev/null
+++ b/apps/docs/i18n/it/docusaurus-plugin-content-docs/current/modules/high_level/query_engine.md
@@ -0,0 +1,42 @@
+---
+sidebar_position: 3
+---
+
+# QueryEngine (Motore di Query)
+
+`Questa documentazione è stata tradotta automaticamente e può contenere errori. Non esitare ad aprire una Pull Request per suggerire modifiche.`
+
+Un motore di query avvolge un `Retriever` e un `ResponseSynthesizer` in una pipeline, che utilizzerà la stringa di query per recuperare i nodi e quindi inviarli al LLM per generare una risposta.
+
+```typescript
+const queryEngine = index.asQueryEngine();
+const response = await queryEngine.query("stringa di query");
+```
+
+## Motore di Query per Sotto-Domande
+
+Il concetto di base del Motore di Query per Sotto-Domande è che suddivide una singola query in più query, ottiene una risposta per ciascuna di queste query e quindi combina queste diverse risposte in una singola risposta coerente per l'utente. Puoi pensarlo come la tecnica di "pensare passo dopo passo" ma iterando sulle tue fonti di dati!
+
+### Per iniziare
+
+Il modo più semplice per iniziare a provare il Motore di Query per Sotto-Domande è eseguire il file subquestion.ts in [esempi](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/subquestion.ts).
+
+```bash
+npx ts-node subquestion.ts
+```
+
+"
+
+### Strumenti
+
+Il Motore di Query per Sotto-Domande è implementato con Strumenti. L'idea di base degli Strumenti è che siano opzioni eseguibili per il grande modello di linguaggio. In questo caso, il nostro Motore di Query per Sotto-Domande si basa su QueryEngineTool, che come avrai intuito è uno strumento per eseguire query su un Motore di Query. Ciò ci consente di fornire al modello un'opzione per interrogare documenti diversi per domande diverse, ad esempio. Potresti immaginare anche che il Motore di Query per Sotto-Domande possa utilizzare uno Strumento che cerca qualcosa sul web o ottiene una risposta utilizzando Wolfram Alpha.
+
+Puoi saperne di più sugli Strumenti consultando la documentazione di LlamaIndex Python su https://gpt-index.readthedocs.io/en/latest/core_modules/agent_modules/tools/root.html
+
+## Riferimento API
+
+- [RetrieverQueryEngine (Motore di Query del Recuperatore)](../../api/classes/RetrieverQueryEngine.md)
+- [SubQuestionQueryEngine (Motore di Query delle Sotto-domande)](../../api/classes/SubQuestionQueryEngine.md)
+- [QueryEngineTool (Strumento del Motore di Query)](../../api/interfaces/QueryEngineTool.md)
+
+"
diff --git a/apps/docs/i18n/it/docusaurus-plugin-content-docs/current/modules/index.md b/apps/docs/i18n/it/docusaurus-plugin-content-docs/current/modules/index.md
new file mode 100644
index 0000000000000000000000000000000000000000..1494909f514ca2b0b9abf2baf80a42198b776604
--- /dev/null
+++ b/apps/docs/i18n/it/docusaurus-plugin-content-docs/current/modules/index.md
@@ -0,0 +1,33 @@
+# Moduli principali
+
+`Questa documentazione è stata tradotta automaticamente e può contenere errori. Non esitare ad aprire una Pull Request per suggerire modifiche.`
+
+LlamaIndex.TS offre diversi moduli principali, suddivisi in moduli di alto livello per iniziare rapidamente e moduli di basso livello per personalizzare i componenti chiave come desideri.
+
+## Moduli di alto livello
+
+- [**Documento**](./high_level/documents_and_nodes.md): Un documento rappresenta un file di testo, un file PDF o un'altra porzione di dati contigui.
+
+- [**Nodo**](./high_level/documents_and_nodes.md): Il blocco di base dei dati. Più comunemente, questi sono parti del documento suddivise in pezzi gestibili abbastanza piccoli da poter essere inseriti in un modello di embedding e LLM.
+
+- [**Lettore/Caricatore**](./high_level/data_loader.md): Un lettore o caricatore è qualcosa che prende un documento nel mondo reale e lo trasforma in una classe Document che può quindi essere utilizzata nel tuo indice e nelle tue query. Attualmente supportiamo file di testo semplice e PDF con molti altri formati in arrivo.
+
+- [**Indici**](./high_level/data_index.md): gli indici memorizzano i nodi e gli embedding di quei nodi.
+
+- [**Motore di query**](./high_level/query_engine.md): I motori di query generano la query che inserisci e restituiscono il risultato. I motori di query combinano generalmente un prompt predefinito con i nodi selezionati dal tuo indice per fornire al LLM il contesto di cui ha bisogno per rispondere alla tua query.
+
+- [**Motore di chat**](./high_level/chat_engine.md): Un motore di chat ti aiuta a costruire un chatbot che interagirà con i tuoi indici.
+
+## Modulo di basso livello
+
+- [**LLM**](./low_level/llm.md): La classe LLM è un'interfaccia unificata su un grande provider di modelli di linguaggio come OpenAI GPT-4, Anthropic Claude o Meta LLaMA. Puoi sottoclassificarla per scrivere un connettore per il tuo grande modello di linguaggio.
+
+- [**Embedding**](./low_level/embedding.md): Un embedding è rappresentato come un vettore di numeri in virgola mobile. Il modello di embedding predefinito di OpenAI, text-embedding-ada-002, consiste in 1.536 numeri in virgola mobile. Un altro modello di embedding popolare è BERT, che utilizza 768 numeri in virgola mobile per rappresentare ogni nodo. Forniamo una serie di utilità per lavorare con gli embedding, inclusi 3 opzioni di calcolo della similarità e Maximum Marginal Relevance.
+
+- [**TextSplitter/NodeParser**](./low_level/node_parser.md): Le strategie di divisione del testo sono estremamente importanti per l'efficacia complessiva della ricerca dell'embedding. Attualmente, sebbene abbiamo una divisione predefinita, non esiste una soluzione universale. A seconda dei documenti di origine, potresti voler utilizzare diverse dimensioni e strategie di divisione. Attualmente supportiamo la divisione per dimensione fissa, la divisione per dimensione fissa con sezioni sovrapposte, la divisione per frase e la divisione per paragrafo. Il text splitter viene utilizzato dal NodeParser per dividere i `Document` in `Node`.
+
+- [**Retriever**](./low_level/retriever.md): Il Retriever è ciò che effettivamente sceglie i Node da recuperare dall'indice. Qui, potresti voler provare a recuperare più o meno Node per query, cambiare la tua funzione di similarità o creare il tuo retriever per ogni caso d'uso specifico nella tua applicazione. Ad esempio, potresti voler avere un retriever separato per il contenuto del codice rispetto al contenuto del testo.
+
+- [**ResponseSynthesizer**](./low_level/response_synthesizer.md): Il ResponseSynthesizer è responsabile di prendere una stringa di query e utilizzare una lista di `Node` per generare una risposta. Questo può assumere molte forme, come iterare su tutto il contesto e affinare una risposta o costruire un albero di riassunti e restituire il riassunto principale.
+
+- [**Storage**](./low_level/storage.md): A un certo punto vorrai archiviare i tuoi indici, dati e vettori anziché eseguire nuovamente i modelli di embedding ogni volta. IndexStore, DocStore, VectorStore e KVStore sono astrazioni che ti consentono di farlo. Insieme, formano il StorageContext. Attualmente, ti consentiamo di persistere i tuoi embedding in file sul filesystem (o in un filesystem virtuale in memoria), ma stiamo anche aggiungendo attivamente integrazioni con Vector Databases.
diff --git a/apps/docs/i18n/it/docusaurus-plugin-content-docs/current/modules/low_level/embedding.md b/apps/docs/i18n/it/docusaurus-plugin-content-docs/current/modules/low_level/embedding.md
new file mode 100644
index 0000000000000000000000000000000000000000..e89fde607bf849640d20c58948fd51ed980ffb1f
--- /dev/null
+++ b/apps/docs/i18n/it/docusaurus-plugin-content-docs/current/modules/low_level/embedding.md
@@ -0,0 +1,26 @@
+---
+sidebar_position: 1
+---
+
+# Incorporazione
+
+`Questa documentazione è stata tradotta automaticamente e può contenere errori. Non esitare ad aprire una Pull Request per suggerire modifiche.`
+
+Il modello di incorporazione in LlamaIndex è responsabile per la creazione di rappresentazioni numeriche del testo. Per impostazione predefinita, LlamaIndex utilizzerà il modello `text-embedding-ada-002` di OpenAI.
+
+Ciò può essere esplicitamente impostato nell'oggetto `ServiceContext`.
+
+```typescript
+import { OpenAIEmbedding, serviceContextFromDefaults } from "llamaindex";
+
+const openaiEmbeds = new OpenAIEmbedding();
+
+const serviceContext = serviceContextFromDefaults({ embedModel: openaiEmbeds });
+```
+
+## Riferimento API
+
+- [OpenAIEmbedding](../../api/classes/OpenAIEmbedding.md)
+- [ServiceContext](../../api/interfaces/ServiceContext.md)
+
+"
diff --git a/apps/docs/i18n/it/docusaurus-plugin-content-docs/current/modules/low_level/llm.md b/apps/docs/i18n/it/docusaurus-plugin-content-docs/current/modules/low_level/llm.md
new file mode 100644
index 0000000000000000000000000000000000000000..cfaf8a002252f2a2fc504c406ec55c44baceadc3
--- /dev/null
+++ b/apps/docs/i18n/it/docusaurus-plugin-content-docs/current/modules/low_level/llm.md
@@ -0,0 +1,26 @@
+---
+sidebar_position: 0
+---
+
+# LLM
+
+`Questa documentazione è stata tradotta automaticamente e può contenere errori. Non esitare ad aprire una Pull Request per suggerire modifiche.`
+
+LLM è responsabile della lettura del testo e della generazione di risposte in linguaggio naturale alle query. Per impostazione predefinita, LlamaIndex.TS utilizza `gpt-3.5-turbo`.
+
+LLM può essere esplicitamente impostato nell'oggetto `ServiceContext`.
+
+```typescript
+import { OpenAI, serviceContextFromDefaults } from "llamaindex";
+
+const openaiLLM = new OpenAI({ model: "gpt-3.5-turbo", temperature: 0 });
+
+const serviceContext = serviceContextFromDefaults({ llm: openaiLLM });
+```
+
+## Riferimento API
+
+- [OpenAI](../../api/classes/OpenAI.md)
+- [ServiceContext](../../api/interfaces/ServiceContext.md)
+
+"
diff --git a/apps/docs/i18n/it/docusaurus-plugin-content-docs/current/modules/low_level/node_parser.md b/apps/docs/i18n/it/docusaurus-plugin-content-docs/current/modules/low_level/node_parser.md
new file mode 100644
index 0000000000000000000000000000000000000000..280667fb59ab81bc8b281064d524ffc868a1d1fa
--- /dev/null
+++ b/apps/docs/i18n/it/docusaurus-plugin-content-docs/current/modules/low_level/node_parser.md
@@ -0,0 +1,37 @@
+---
+sidebar_position: 3
+---
+
+# NodeParser
+
+`Questa documentazione è stata tradotta automaticamente e può contenere errori. Non esitare ad aprire una Pull Request per suggerire modifiche.`
+
+Il `NodeParser` in LlamaIndex è responsabile per suddividere gli oggetti `Document` in oggetti `Node` più gestibili. Quando chiami `.fromDocuments()`, il `NodeParser` dal `ServiceContext` viene utilizzato per farlo automaticamente per te. In alternativa, puoi usarlo per suddividere i documenti in anticipo.
+
+```typescript
+import { Document, SimpleNodeParser } from "llamaindex";
+
+const nodeParser = new SimpleNodeParser();
+const nodes = nodeParser.getNodesFromDocuments([
+  new Document({ text: "Ho 10 anni. John ha 20 anni." }),
+]);
+```
+
+## TextSplitter
+
+Il text splitter sottostante dividerà il testo in frasi. Può anche essere utilizzato come modulo autonomo per dividere il testo grezzo.
+
+```typescript
+import { SentenceSplitter } from "llamaindex";
+
+const splitter = new SentenceSplitter({ chunkSize: 1 });
+
+const textSplits = splitter.splitText("Ciao Mondo");
+```
+
+## Riferimento API
+
+- [SimpleNodeParser](../../api/classes/SimpleNodeParser.md)
+- [SentenceSplitter](../../api/classes/SentenceSplitter.md)
+
+"
diff --git a/apps/docs/i18n/it/docusaurus-plugin-content-docs/current/modules/low_level/response_synthesizer.md b/apps/docs/i18n/it/docusaurus-plugin-content-docs/current/modules/low_level/response_synthesizer.md
new file mode 100644
index 0000000000000000000000000000000000000000..f326095d43c8c2a25bebf2962ec4f974336bfc06
--- /dev/null
+++ b/apps/docs/i18n/it/docusaurus-plugin-content-docs/current/modules/low_level/response_synthesizer.md
@@ -0,0 +1,47 @@
+---
+sidebar_position: 6
+---
+
+# ResponseSynthesizer (Sintetizzatore di Risposta)
+
+`Questa documentazione è stata tradotta automaticamente e può contenere errori. Non esitare ad aprire una Pull Request per suggerire modifiche.`
+
+Il ResponseSynthesizer è responsabile per l'invio della query, dei nodi e dei modelli di prompt al LLM per generare una risposta. Ci sono alcuni modi chiave per generare una risposta:
+
+- `Refine` (Raffinare): "crea e raffina" una risposta passando sequenzialmente attraverso ogni frammento di testo recuperato. Questo effettua una chiamata separata al LLM per ogni nodo. Utile per risposte più dettagliate.
+- `CompactAndRefine` (Compatto e Raffinare) (predefinito): "compatta" il prompt durante ogni chiamata al LLM inserendo il maggior numero possibile di frammenti di testo che possono essere inseriti nella dimensione massima del prompt. Se ci sono troppi frammenti da inserire in un solo prompt, "crea e raffina" una risposta passando attraverso più prompt compatti. Lo stesso di `refine`, ma dovrebbe comportare meno chiamate al LLM.
+- `TreeSummarize` (Sommario ad Albero): Dato un insieme di frammenti di testo e la query, costruisce ricorsivamente un albero e restituisce il nodo radice come risposta. Utile per scopi di sommario.
+- `SimpleResponseBuilder` (Costruttore di Risposta Semplice): Dato un insieme di frammenti di testo e la query, applica la query a ciascun frammento di testo accumulando le risposte in un array. Restituisce una stringa concatenata di tutte le risposte. Utile quando è necessario eseguire la stessa query separatamente su ciascun frammento di testo.
+
+```typescript
+import { NodeWithScore, ResponseSynthesizer, TextNode } from "llamaindex";
+
+const responseSynthesizer = new ResponseSynthesizer();
+
+const nodesWithScore: NodeWithScore[] = [
+  {
+    node: new TextNode({ text: "Ho 10 anni." }),
+    score: 1,
+  },
+  {
+    node: new TextNode({ text: "John ha 20 anni." }),
+    score: 0.5,
+  },
+];
+
+const response = await responseSynthesizer.synthesize(
+  "Quanti anni ho?",
+  nodesWithScore,
+);
+console.log(response.response);
+```
+
+## Riferimento API
+
+- [ResponseSynthesizer (Sintetizzatore di Risposta)](../../api/classes/ResponseSynthesizer.md)
+- [Refine (Raffinare)](../../api/classes/Refine.md)
+- [CompactAndRefine (Compatto e Raffinare)](../../api/classes/CompactAndRefine.md)
+- [TreeSummarize (Sommario ad Albero)](../../api/classes/TreeSummarize.md)
+- [SimpleResponseBuilder (Costruttore di Risposta Semplice)](../../api/classes/SimpleResponseBuilder.md)
+
+"
diff --git a/apps/docs/i18n/it/docusaurus-plugin-content-docs/current/modules/low_level/retriever.md b/apps/docs/i18n/it/docusaurus-plugin-content-docs/current/modules/low_level/retriever.md
new file mode 100644
index 0000000000000000000000000000000000000000..32d0ba8f67402667ee645bca12b324d69a3420a7
--- /dev/null
+++ b/apps/docs/i18n/it/docusaurus-plugin-content-docs/current/modules/low_level/retriever.md
@@ -0,0 +1,25 @@
+---
+sidebar_position: 5
+---
+
+# Retriever
+
+`Questa documentazione è stata tradotta automaticamente e può contenere errori. Non esitare ad aprire una Pull Request per suggerire modifiche.`
+
+Un retriever in LlamaIndex è ciò che viene utilizzato per recuperare i `Node` da un indice utilizzando una stringa di query. Un `VectorIndexRetriever` recupererà i nodi più simili in cima alla lista. Nel frattempo, un `SummaryIndexRetriever` recupererà tutti i nodi indipendentemente dalla query.
+
+```typescript
+const retriever = vector_index.asRetriever();
+retriever.similarityTopK = 3;
+
+// Recupera i nodi!
+const nodesWithScore = await retriever.retrieve("stringa di query");
+```
+
+## Riferimento API
+
+- [SummaryIndexRetriever](../../api/classes/SummaryIndexRetriever.md)
+- [SummaryIndexLLMRetriever](../../api/classes/SummaryIndexLLMRetriever.md)
+- [VectorIndexRetriever](../../api/classes/VectorIndexRetriever.md)
+
+"
diff --git a/apps/docs/i18n/it/docusaurus-plugin-content-docs/current/modules/low_level/storage.md b/apps/docs/i18n/it/docusaurus-plugin-content-docs/current/modules/low_level/storage.md
new file mode 100644
index 0000000000000000000000000000000000000000..e29e0f621798fd4fa65939721cece864a50eefe6
--- /dev/null
+++ b/apps/docs/i18n/it/docusaurus-plugin-content-docs/current/modules/low_level/storage.md
@@ -0,0 +1,30 @@
+---
+sidebar_position: 7
+---
+
+# Archiviazione
+
+`Questa documentazione è stata tradotta automaticamente e può contenere errori. Non esitare ad aprire una Pull Request per suggerire modifiche.`
+
+L'archiviazione in LlamaIndex.TS funziona automaticamente una volta configurato un oggetto `StorageContext`. Basta configurare il `persistDir` e collegarlo a un indice.
+
+Al momento, è supportato solo il salvataggio e il caricamento da disco, con integrazioni future in programma!
+
+```typescript
+import { Document, VectorStoreIndex, storageContextFromDefaults } from "./src";
+
+const storageContext = await storageContextFromDefaults({
+  persistDir: "./storage",
+});
+
+const document = new Document({ text: "Testo di prova" });
+const index = await VectorStoreIndex.fromDocuments([document], {
+  storageContext,
+});
+```
+
+## Riferimento API
+
+- [StorageContext](../../api/interfaces/StorageContext.md)
+
+"
diff --git a/apps/docs/i18n/it/docusaurus-plugin-content-docs/current/starter.md b/apps/docs/i18n/it/docusaurus-plugin-content-docs/current/starter.md
new file mode 100644
index 0000000000000000000000000000000000000000..749a44a168ae1ba85070a19b1b6fd12c4cc22215
--- /dev/null
+++ b/apps/docs/i18n/it/docusaurus-plugin-content-docs/current/starter.md
@@ -0,0 +1,60 @@
+---
+sidebar_position: 2
+---
+
+# Tutorial di Avvio
+
+`Questa documentazione è stata tradotta automaticamente e può contenere errori. Non esitare ad aprire una Pull Request per suggerire modifiche.`
+
+Una volta che hai [installato LlamaIndex.TS utilizzando NPM](installation) e configurato la tua chiave OpenAI, sei pronto per avviare la tua prima app:
+
+In una nuova cartella:
+
+```bash npm2yarn
+npm install typescript
+npm install @types/node
+npx tsc --init # se necessario
+```
+
+Crea il file `example.ts`. Questo codice caricherà alcuni dati di esempio, creerà un documento, lo indicherà (creando embedding utilizzando OpenAI) e quindi creerà un motore di interrogazione per rispondere alle domande sui dati.
+
+```ts
+// example.ts
+import fs from "fs/promises";
+import { Document, VectorStoreIndex } from "llamaindex";
+
+async function main() {
+  // Carica l'articolo da abramov.txt in Node
+  const essay = await fs.readFile(
+    "node_modules/llamaindex/examples/abramov.txt",
+    "utf-8",
+  );
+
+  // Crea un oggetto Document con l'articolo
+  const document = new Document({ text: essay });
+
+  // Dividi il testo e crea gli embedding. Salvali in un VectorStoreIndex
+  const index = await VectorStoreIndex.fromDocuments([document]);
+
+  // Interroga l'indice
+  const queryEngine = index.asQueryEngine();
+  const response = await queryEngine.query(
+    "Cosa ha fatto l'autore all'università?",
+  );
+
+  // Stampa la risposta
+  console.log(response.toString());
+}
+
+main();
+```
+
+Successivamente puoi eseguirlo utilizzando
+
+```bash
+npx ts-node example.ts
+```
+
+Pronto per saperne di più? Dai un'occhiata al nostro playground NextJS su https://llama-playground.vercel.app/. Il codice sorgente è disponibile su https://github.com/run-llama/ts-playground
+
+"
diff --git a/apps/docs/i18n/ja/docusaurus-plugin-content-docs/current/_static/concepts/indexing.jpg b/apps/docs/i18n/ja/docusaurus-plugin-content-docs/current/_static/concepts/indexing.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..8672967213caf28fb27682b6b9e2e5111beb9aaa
Binary files /dev/null and b/apps/docs/i18n/ja/docusaurus-plugin-content-docs/current/_static/concepts/indexing.jpg differ
diff --git a/apps/docs/i18n/ja/docusaurus-plugin-content-docs/current/_static/concepts/querying.jpg b/apps/docs/i18n/ja/docusaurus-plugin-content-docs/current/_static/concepts/querying.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..3c241bdda5ba98d3f3ee2e163b9f19280c5c7503
Binary files /dev/null and b/apps/docs/i18n/ja/docusaurus-plugin-content-docs/current/_static/concepts/querying.jpg differ
diff --git a/apps/docs/i18n/ja/docusaurus-plugin-content-docs/current/_static/concepts/rag.jpg b/apps/docs/i18n/ja/docusaurus-plugin-content-docs/current/_static/concepts/rag.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..b68eca2564f8535b33d9ed6d4ce4d4410d2a0698
Binary files /dev/null and b/apps/docs/i18n/ja/docusaurus-plugin-content-docs/current/_static/concepts/rag.jpg differ
diff --git a/apps/docs/i18n/ja/docusaurus-plugin-content-docs/current/api b/apps/docs/i18n/ja/docusaurus-plugin-content-docs/current/api
new file mode 120000
index 0000000000000000000000000000000000000000..3513e32979f4abb8b2df2b54c8d0cc480bdd847e
--- /dev/null
+++ b/apps/docs/i18n/ja/docusaurus-plugin-content-docs/current/api
@@ -0,0 +1 @@
+../../../../docs/api
\ No newline at end of file
diff --git a/apps/docs/i18n/ja/docusaurus-plugin-content-docs/current/concepts.md b/apps/docs/i18n/ja/docusaurus-plugin-content-docs/current/concepts.md
new file mode 100644
index 0000000000000000000000000000000000000000..37ccc52847f7c63637e75a959680bfec804b7607
--- /dev/null
+++ b/apps/docs/i18n/ja/docusaurus-plugin-content-docs/current/concepts.md
@@ -0,0 +1,79 @@
+---
+sidebar_position: 3
+---
+
+# ハイレベルな概念
+
+`このドキュメントは自動的に翻訳されており、誤りを含んでいる可能性があります。変更を提案するためにプルリクエストを開くことを躊躇しないでください。`
+
+LlamaIndex.TSは、カスタムデータ上でLLMパワードアプリケーション(例:Q&A、チャットボット)を構築するのに役立ちます。
+
+このハイレベルな概念ガイドでは、次のことを学びます:
+
+- LLMが独自のデータを使用して質問に答える方法
+- LlamaIndex.TSの主要な概念とモジュールを使用して、独自のクエリパイプラインを構築する方法
+
+## データ全体での質問への回答
+
+LlamaIndexは、データとLLMを使用する場合に、2つのステージの方法を使用します:
+
+1. **インデックス作成ステージ**:ナレッジベースの準備
+2. **クエリステージ**:質問に応答するために、ナレッジから関連するコンテキストを取得する
+
+![](./_static/concepts/rag.jpg)
+
+このプロセスは、Retrieval Augmented Generation(RAG)とも呼ばれています。
+
+LlamaIndex.TSは、両方のステップを非常に簡単に行うための必須のツールキットを提供します。
+
+それでは、各ステージを詳しく見てみましょう。
+
+### インデックス作成ステージ
+
+LlamaIndex.TSは、データコネクタとインデックスのスイートを使用して、ナレッジベースを準備するのに役立ちます。
+
+![](./_static/concepts/indexing.jpg)
+
+[**データローダー**](./modules/high_level/data_loader.md):
+データコネクタ(つまり、`Reader`)は、さまざまなデータソースとデータ形式からデータを取り込み、シンプルな`Document`表現(テキストとシンプルなメタデータ)に変換します。
+
+[**ドキュメント/ノード**](./modules/high_level/documents_and_nodes.md):`Document`は、任意のデータソース(例:PDF、APIの出力、データベースからの取得データ)を囲む汎用のコンテナです。`Node`は、LlamaIndexのデータの原子単位であり、ソース`Document`の「チャンク」を表します。これは、メタデータや関係(他のノードへの関連)を含む豊富な表現であり、正確で表現力のある検索操作を可能にします。
+
+[**データインデックス**](./modules/high_level/data_index.md):
+データを取り込んだ後、LlamaIndexはデータを簡単に取得できる形式にインデックス化するのに役立ちます。
+
+LlamaIndexは、生のドキュメントを中間表現に解析し、ベクトル埋め込みを計算し、データをメモリ上またはディスク上に格納します。
+
+### クエリステージ
+
+クエリパイプラインでは、ユーザーのクエリに基づいて最も関連性の高いコンテキストを取得し、それをLLMに渡して応答を合成します。
+
+これにより、LLMは元のトレーニングデータにない最新の知識を得ることができます(幻覚も減少します)。
+
+クエリステージの主な課題は、(潜在的に多数の)ナレッジベースに対して検索、オーケストレーション、および推論を行うことです。
+
+LlamaIndexは、Q&A(クエリエンジン)、チャットボット(チャットエンジン)、またはエージェントの一部として使用するためのRAGパイプラインを構築および統合するのに役立つ組み合わせ可能なモジュールを提供します。
+
+これらのビルディングブロックは、ランキングの優先順位を反映させるためにカスタマイズすることもでき、構造化された方法で複数のナレッジベースに対して推論を行うために組み合わせることもできます。
+
+![](./_static/concepts/querying.jpg)
+
+#### ビルディングブロック
+
+[**Retrievers(リトリーバー)**](./modules/low_level/retriever.md):
+リトリーバーは、クエリが与えられたときにナレッジベース(つまりインデックス)から関連するコンテキストを効率的に取得する方法を定義します。
+具体的な検索ロジックは、異なるインデックスによって異なりますが、最も一般的なのはベクトルインデックスに対する密な検索です。
+
+[**Response Synthesizers(レスポンスシンセサイザー)**](./modules/low_level/response_synthesizer.md):
+レスポンスシンセサイザーは、LLMからの応答を生成するために、ユーザークエリと取得したテキストチャンクのセットを使用します。
+
+#### パイプライン
+
+[**クエリエンジン**](./modules/high_level/query_engine.md):
+クエリエンジンは、データに対して質問をするためのエンドツーエンドのパイプラインです。
+自然言語のクエリを受け取り、応答とともにLLMに渡された参照コンテキストを返します。
+
+[**チャットエンジン**](./modules/high_level/chat_engine.md):
+チャットエンジンは、単一の質問と回答ではなく、データとの対話を行うためのエンドツーエンドのパイプラインです。
+
+"
diff --git a/apps/docs/i18n/ja/docusaurus-plugin-content-docs/current/end_to_end.md b/apps/docs/i18n/ja/docusaurus-plugin-content-docs/current/end_to_end.md
new file mode 100644
index 0000000000000000000000000000000000000000..426bfa520dff5805ab075df5ad356fb2e524cfb5
--- /dev/null
+++ b/apps/docs/i18n/ja/docusaurus-plugin-content-docs/current/end_to_end.md
@@ -0,0 +1,47 @@
+---
+sidebar_position: 4
+---
+
+# エンドツーエンドの例
+
+`このドキュメントは自動的に翻訳されており、誤りを含んでいる可能性があります。変更を提案するためにプルリクエストを開くことを躊躇しないでください。`
+
+リポジトリ内のLlamaIndex.TSを使用したいくつかのエンドツーエンドの例を含めています。
+
+以下の例をチェックしてみるか、Dev-Docsが提供する対話型のGithub Codespaceチュートリアルで数分で試してみてください。[こちら](https://codespaces.new/team-dev-docs/lits-dev-docs-playground?devcontainer_path=.devcontainer%2Fjavascript_ltsquickstart%2Fdevcontainer.json)からアクセスできます。
+
+## [チャットエンジン](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/chatEngine.ts)
+
+ファイルを読み込んでLLMとチャットします。
+
+## [ベクトルインデックス](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/vectorIndex.ts)
+
+ベクトルインデックスを作成し、クエリを実行します。ベクトルインデックスは埋め込みを使用して、トップkの関連ノードを取得します。デフォルトでは、kの値は2です。
+
+## [サマリーインデックス](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/summaryIndex.ts)
+
+リストインデックスを作成し、クエリを実行します。この例では、`LLMRetriever`も使用されており、回答を生成する際に使用する最適なノードを選択するためにLLMが使用されます。
+
+## [インデックスの保存/読み込み](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/storageContext.ts)
+
+ベクトルインデックスの作成と読み込み。LlamaIndex.TSでは、ストレージコンテキストオブジェクトが作成されると、ディスクへの永続化が自動的に行われます。
+
+## [カスタマイズされたベクトルインデックス](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/vectorIndexCustomize.ts)
+
+ベクトルインデックスを作成し、クエリを実行すると同時に、`LLM`、`ServiceContext`、および`similarity_top_k`を設定します。
+
+## [OpenAI LLM](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/openai.ts)
+
+OpenAI LLMを作成し、直接チャットに使用します。
+
+## [Llama2 DeuceLLM](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/llamadeuce.ts)
+
+Llama-2 LLMを作成し、直接チャットに使用します。
+
+## [SubQuestionQueryEngine](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/subquestion.ts)
+
+`SubQuestionQueryEngine`を使用しています。これは複雑なクエリを複数の質問に分割し、すべてのサブクエリの回答を集約します。
+
+## [低レベルモジュール](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/lowlevel.ts)
+
+この例では、実際のクエリエンジンの必要性をなくすために、いくつかの低レベルのコンポーネントを使用しています。これらのコンポーネントは、どこでも、どのアプリケーションでも使用できるだけでなく、カスタマイズしてサブクラス化して独自のニーズに合わせることもできます。
diff --git a/apps/docs/i18n/ja/docusaurus-plugin-content-docs/current/environments.md b/apps/docs/i18n/ja/docusaurus-plugin-content-docs/current/environments.md
new file mode 100644
index 0000000000000000000000000000000000000000..ff2a474e41c1e4b6387c3abae8f90c171dde8107
--- /dev/null
+++ b/apps/docs/i18n/ja/docusaurus-plugin-content-docs/current/environments.md
@@ -0,0 +1,31 @@
+---
+sidebar_position: 5
+---
+
+# 環境
+
+`このドキュメントは自動的に翻訳されており、誤りを含んでいる可能性があります。変更を提案するためにプルリクエストを開くことを躊躇しないでください。`
+
+LlamaIndexは現在、公式にNodeJS 18とNodeJS 20をサポートしています。
+
+## NextJSアプリケーションルーター
+
+NextJSアプリケーションルーターのルートハンドラー/サーバーレス関数を使用している場合、NodeJSモードを使用する必要があります。
+
+```js
+export const runtime = "nodejs"; // デフォルト
+```
+
+また、next.config.jsでpdf-parseの例外を追加する必要があります。
+
+```js
+// next.config.js
+/** @type {import('next').NextConfig} */
+const nextConfig = {
+  experimental: {
+    serverComponentsExternalPackages: ["pdf-parse"], // pdf-parseを実際のNodeJSモードに配置するためのNextJSアプリケーションルーター
+  },
+};
+
+module.exports = nextConfig;
+```
diff --git a/apps/docs/i18n/ja/docusaurus-plugin-content-docs/current/installation.mdx b/apps/docs/i18n/ja/docusaurus-plugin-content-docs/current/installation.mdx
new file mode 100644
index 0000000000000000000000000000000000000000..902b2871351206ee5e8264d6f7502555f73d2132
--- /dev/null
+++ b/apps/docs/i18n/ja/docusaurus-plugin-content-docs/current/installation.mdx
@@ -0,0 +1,68 @@
+---
+sidebar_position: 1
+---
+
+
+# インストールとセットアップ
+
+```このドキュメントは自動的に翻訳されており、誤りを含んでいる可能性があります。変更を提案するためにプルリクエストを開くことを躊躇しないでください。```
+
+
+NodeJSのバージョンが18以上であることを確認してください。
+
+
+## create-llamaを使用する
+
+LlamaIndexを始める最も簡単な方法は、`create-llama`を使用することです。このCLIツールを使用すると、すぐに新しいLlamaIndexアプリケーションの構築を開始し、すべてがセットアップされます。
+
+以下を実行してください。
+
+<Tabs>
+<TabItem value="1" label="npm" default>
+
+```bash
+npx create-llama@latest
+```
+
+</TabItem>
+<TabItem value="2" label="Yarn">
+
+```bash
+yarn create llama
+```
+
+</TabItem>
+<TabItem value="3" label="pnpm">
+
+```bash
+pnpm create llama@latest
+```
+
+</TabItem>
+</Tabs>
+
+開始するために上記を実行してください。アプリケーションが生成されたら、以下を実行して開発サーバーを起動します。
+
+```bash npm2yarn
+npm run dev
+```
+
+開発サーバーを起動した後、[http://localhost:3000](http://localhost:3000)にアクセスしてアプリケーションを確認できます。
+## NPMからのインストール
+
+```bash npm2yarn
+npm install llamaindex
+```
+
+
+### 環境変数
+
+デフォルトでは、私たちの例ではOpenAIを使用しています。次のようにOpen AIキーを設定する必要があります。
+
+```bash
+export OPENAI_API_KEY="sk-......" # https://platform.openai.com/account/api-keysから取得したキーに置き換えてください
+```
+
+毎回自動的にロードされるようにしたい場合は、.zshrc/.bashrcに追加してください。
+
+警告: OpenAIキーをバージョン管理にチェックインしないでください。
diff --git a/apps/docs/i18n/ja/docusaurus-plugin-content-docs/current/introduction.md b/apps/docs/i18n/ja/docusaurus-plugin-content-docs/current/introduction.md
new file mode 100644
index 0000000000000000000000000000000000000000..75b8a0dc9c775607c750b621fdcbdb0ba486e085
--- /dev/null
+++ b/apps/docs/i18n/ja/docusaurus-plugin-content-docs/current/introduction.md
@@ -0,0 +1,60 @@
+---
+sidebar_position: 0
+slug: /
+---
+
+# LlamaIndex.TSとは何ですか?
+
+`このドキュメントは自動的に翻訳されており、誤りを含んでいる可能性があります。変更を提案するためにプルリクエストを開くことを躊躇しないでください。`
+
+LlamaIndex.TSは、LLMアプリケーションがプライベートまたはドメイン固有のデータを取り込み、構造化し、アクセスするためのデータフレームワークです。Pythonパッケージも利用可能です([こちら](https://docs.llamaindex.ai/en/stable/)を参照してください)、しかし、LlamaIndex.TSはTypeScriptとの使用に最適化されたシンプルなパッケージで、コア機能を提供しています。
+
+## 🚀 LlamaIndex.TSを選ぶ理由
+
+LLMは、人間と推論データの間の自然言語インターフェースを提供します。広く利用可能なモデルは、Wikipediaやメーリングリスト、教科書、ソースコードなど、公に利用可能な大量のデータで事前にトレーニングされています。
+
+LLMを基に構築されたアプリケーションでは、これらのモデルにプライベートまたはドメイン固有のデータを追加する必要があります。残念ながら、そのデータはアプリケーションやデータストアに分散して存在していることがあります。APIの背後にある、SQLデータベース内にある、またはPDFやスライドデッキに閉じ込められているかもしれません。
+
+それが**LlamaIndex.TS**の役割です。
+
+## 🦙 LlamaIndex.TSはどのように役立ちますか?
+
+LlamaIndex.TSは以下のツールを提供します:
+
+- **データの読み込み**:既存の`.txt`、`.pdf`、`.csv`、`.md`、`.docx`データを直接取り込むことができます。
+- **データのインデックス**:データを中間表現で構造化し、LLMが簡単かつ高速に消費できるようにします。
+- **エンジン**:データへの自然言語アクセスを提供します。例えば:
+  - クエリエンジンは、知識拡張出力のための強力な検索インターフェースです。
+  - チャットエンジンは、データとの「やり取り」を行うための対話型インターフェースです。
+
+## 👨‍👩‍👧‍👦 LlamaIndexは誰のためのものですか?
+
+LlamaIndex.TSは、JavaScriptとTypeScriptを使用してLLMアプリを構築するすべての人にとって必要なツールのコアセットを提供します。
+
+当社のハイレベルAPIを使用すると、初心者のユーザーでもLlamaIndex.TSを使用してデータを取り込み、クエリを実行することができます。
+
+より複雑なアプリケーションでは、低レベルのAPIを使用して、データコネクタ、インデックス、リトリーバ、クエリエンジンなどのモジュールをカスタマイズおよび拡張することができます。これにより、ユーザーのニーズに合わせることができます。
+
+## はじめに
+
+`npm install llamaindex`
+
+私たちのドキュメントには、[インストール手順](./installation.md)と[スターターチュートリアル](./starter.md)が含まれており、最初のアプリケーションの構築をサポートします。
+
+一度準備ができたら、[ハイレベルなコンセプト](./concepts.md)では、LlamaIndexのモジュラーアーキテクチャの概要を説明しています。より実践的な例については、[エンドツーエンドのチュートリアル](./end_to_end.md)を参照してください。
+
+## 🗺️ エコシステム
+
+LlamaIndexをダウンロードしたり、貢献したりするには、以下を参照してください:
+
+- Github: https://github.com/run-llama/LlamaIndexTS
+- NPM: https://www.npmjs.com/package/llamaindex
+
+"
+
+## コミュニティ
+
+ヘルプが必要ですか?機能の提案はありますか?LlamaIndexコミュニティに参加しましょう:
+
+- Twitter: https://twitter.com/llama_index
+- Discord: https://discord.gg/dGcwcsnxhU
diff --git a/apps/docs/i18n/ja/docusaurus-plugin-content-docs/current/modules/high_level/chat_engine.md b/apps/docs/i18n/ja/docusaurus-plugin-content-docs/current/modules/high_level/chat_engine.md
new file mode 100644
index 0000000000000000000000000000000000000000..8a12180ff7847f1aa45ca79387dd1533923db46b
--- /dev/null
+++ b/apps/docs/i18n/ja/docusaurus-plugin-content-docs/current/modules/high_level/chat_engine.md
@@ -0,0 +1,22 @@
+---
+sidebar_position: 4
+---
+
+# 聊天引擎 (ChatEngine)
+
+`このドキュメントは自動的に翻訳されており、誤りを含んでいる可能性があります。変更を提案するためにプルリクエストを開くことを躊躇しないでください。`
+
+聊天引擎是一种快速简单的与索引中的数据进行聊天的方式。
+
+```typescript
+const retriever = index.asRetriever();
+const chatEngine = new ContextChatEngine({ retriever });
+
+// 开始聊天
+const response = await chatEngine.chat(query);
+```
+
+## API 参考
+
+- [聊天引擎 (ContextChatEngine)](../../api/classes/ContextChatEngine.md)
+- [压缩问题聊天引擎 (CondenseQuestionChatEngine)](../../api/classes/ContextChatEngine.md)
diff --git a/apps/docs/i18n/ja/docusaurus-plugin-content-docs/current/modules/high_level/data_index.md b/apps/docs/i18n/ja/docusaurus-plugin-content-docs/current/modules/high_level/data_index.md
new file mode 100644
index 0000000000000000000000000000000000000000..167479ea797a89681688e71e50405e79d451db11
--- /dev/null
+++ b/apps/docs/i18n/ja/docusaurus-plugin-content-docs/current/modules/high_level/data_index.md
@@ -0,0 +1,27 @@
+---
+sidebar_position: 2
+---
+
+# インデックス
+
+`このドキュメントは自動的に翻訳されており、誤りを含んでいる可能性があります。変更を提案するためにプルリクエストを開くことを躊躇しないでください。`
+
+インデックスは、データの基本的なコンテナと組織です。LlamaIndex.TSでは、2つのインデックスがサポートされています:
+
+- `VectorStoreIndex` - 応答を生成する際に、トップkの`Node`をLLMに送信します。デフォルトのトップkは2です。
+- `SummaryIndex` - 応答を生成するために、インデックス内のすべての`Node`をLLMに送信します。
+
+```typescript
+import { Document, VectorStoreIndex } from "llamaindex";
+
+const document = new Document({ text: "テスト" });
+
+const index = await VectorStoreIndex.fromDocuments([document]);
+```
+
+## APIリファレンス
+
+- [SummaryIndex](../../api/classes/SummaryIndex.md)
+- [VectorStoreIndex](../../api/classes/VectorStoreIndex.md)
+
+"
diff --git a/apps/docs/i18n/ja/docusaurus-plugin-content-docs/current/modules/high_level/data_loader.md b/apps/docs/i18n/ja/docusaurus-plugin-content-docs/current/modules/high_level/data_loader.md
new file mode 100644
index 0000000000000000000000000000000000000000..f735fe5c6a55a179217f8cc714b522fe4e08a445
--- /dev/null
+++ b/apps/docs/i18n/ja/docusaurus-plugin-content-docs/current/modules/high_level/data_loader.md
@@ -0,0 +1,19 @@
+---
+sidebar_position: 1
+---
+
+# リーダー / ローダー
+
+`このドキュメントは自動的に翻訳されており、誤りを含んでいる可能性があります。変更を提案するためにプルリクエストを開くことを躊躇しないでください。`
+
+LlamaIndex.TSは、`SimpleDirectoryReader`クラスを使用してフォルダから簡単にファイルを読み込むことができます。現在、`.txt`、`.pdf`、`.csv`、`.md`、`.docx`ファイルがサポートされており、将来的にはさらに多くのファイル形式がサポートされる予定です!
+
+```typescript
+import { SimpleDirectoryReader } from "llamaindex";
+
+documents = new SimpleDirectoryReader().loadData("./data");
+```
+
+## API リファレンス
+
+- [SimpleDirectoryReader](../../api/classes/SimpleDirectoryReader.md)
diff --git a/apps/docs/i18n/ja/docusaurus-plugin-content-docs/current/modules/high_level/documents_and_nodes.md b/apps/docs/i18n/ja/docusaurus-plugin-content-docs/current/modules/high_level/documents_and_nodes.md
new file mode 100644
index 0000000000000000000000000000000000000000..3245359130645c0dc03860cb7387a7c6cf165788
--- /dev/null
+++ b/apps/docs/i18n/ja/docusaurus-plugin-content-docs/current/modules/high_level/documents_and_nodes.md
@@ -0,0 +1,22 @@
+---
+sidebar_position: 0
+---
+
+# ドキュメントとノード
+
+`このドキュメントは自動的に翻訳されており、誤りを含んでいる可能性があります。変更を提案するためにプルリクエストを開くことを躊躇しないでください。`
+
+`Document`と`Node`は、どんなインデックスの基本的な構成要素です。これらのオブジェクトのAPIは似ていますが、`Document`オブジェクトはファイル全体を表し、`Node`は元のドキュメントの小さな部分で、LLMとQ&Aに適しています。
+
+```typescript
+import { Document } from "llamaindex";
+
+document = new Document({ text: "テキスト", metadata: { key: "val" } });
+```
+
+## APIリファレンス
+
+- [Document](../../api/classes/Document.md)
+- [TextNode](../../api/classes/TextNode.md)
+
+"
diff --git a/apps/docs/i18n/ja/docusaurus-plugin-content-docs/current/modules/high_level/query_engine.md b/apps/docs/i18n/ja/docusaurus-plugin-content-docs/current/modules/high_level/query_engine.md
new file mode 100644
index 0000000000000000000000000000000000000000..f27aacb2a112f02f618ed919a417d82b52e391ec
--- /dev/null
+++ b/apps/docs/i18n/ja/docusaurus-plugin-content-docs/current/modules/high_level/query_engine.md
@@ -0,0 +1,40 @@
+---
+sidebar_position: 3
+---
+
+# QueryEngine (クエリエンジン)
+
+`このドキュメントは自動的に翻訳されており、誤りを含んでいる可能性があります。変更を提案するためにプルリクエストを開くことを躊躇しないでください。`
+
+クエリエンジンは`Retriever`と`ResponseSynthesizer`をパイプラインにラップし、クエリ文字列を使用してノードを取得し、それをLLMに送信して応答を生成します。
+
+```typescript
+const queryEngine = index.asQueryEngine();
+const response = await queryEngine.query("クエリ文字列");
+```
+
+## サブクエリクエリエンジン
+
+サブクエリクエリエンジンの基本的なコンセプトは、単一のクエリを複数のクエリに分割し、それぞれのクエリに対して回答を取得し、それらの異なる回答をユーザーに対して単一の一貫した応答に結合することです。これは、データソースを反復処理しながら「ステップバイステップで考える」プロンプトテクニックと考えることができます!
+
+### はじめに
+
+サブクエリクエリエンジンを試す最も簡単な方法は、[examples](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/subquestion.ts)のsubquestion.tsファイルを実行することです。
+
+```bash
+npx ts-node subquestion.ts
+```
+
+### ツール
+
+サブクエリクエリエンジンはツールで実装されています。ツールの基本的なアイデアは、それらが大規模な言語モデルの実行可能なオプションであるということです。この場合、SubQuestionQueryEngineはQueryEngineToolに依存しています。QueryEngineToolは、QueryEngine上でクエリを実行するためのツールです。これにより、モデルに異なる質問に対して異なるドキュメントをクエリするオプションを与えることができます。また、SubQuestionQueryEngineは、ウェブ上で何かを検索したり、Wolfram Alphaを使用して回答を取得するツールを使用することも想像できます。
+
+ツールについて詳しくは、LlamaIndex Pythonドキュメントを参照してください。https://gpt-index.readthedocs.io/en/latest/core_modules/agent_modules/tools/root.html
+
+## API リファレンス
+
+- [RetrieverQueryEngine (リトリーバークエリエンジン)](../../api/classes/RetrieverQueryEngine.md)
+- [SubQuestionQueryEngine (サブクエリエンジン)](../../api/classes/SubQuestionQueryEngine.md)
+- [QueryEngineTool (クエリエンジンツール)](../../api/interfaces/QueryEngineTool.md)
+
+"
diff --git a/apps/docs/i18n/ja/docusaurus-plugin-content-docs/current/modules/index.md b/apps/docs/i18n/ja/docusaurus-plugin-content-docs/current/modules/index.md
new file mode 100644
index 0000000000000000000000000000000000000000..7f6c64d08e02459bd9e2ade3702fac713f23dfb3
--- /dev/null
+++ b/apps/docs/i18n/ja/docusaurus-plugin-content-docs/current/modules/index.md
@@ -0,0 +1,33 @@
+# コアモジュール
+
+`このドキュメントは自動的に翻訳されており、誤りを含んでいる可能性があります。変更を提案するためにプルリクエストを開くことを躊躇しないでください。`
+
+LlamaIndex.TSには、いくつかのコアモジュールがあります。これらは、すぐに始めるための高レベルモジュールと、必要に応じて主要なコンポーネントをカスタマイズするための低レベルモジュールに分かれています。
+
+## 高レベルモジュール
+
+- [**Document**](./high_level/documents_and_nodes.md): ドキュメントは、テキストファイル、PDFファイル、または他の連続したデータを表します。
+
+- [**Node**](./high_level/documents_and_nodes.md): 基本的なデータの構築ブロックです。一般的には、これらはドキュメントの一部であり、埋め込みモデルとLLMに供給するのに十分に小さな管理可能なピースに分割されます。
+
+- [**Reader/Loader**](./high_level/data_loader.md): リーダーまたはローダーは、現実世界のドキュメントを受け取り、Documentクラスに変換してIndexとクエリで使用できるようにするものです。現在、プレーンテキストファイルとPDFをサポートしており、今後さらに多くの形式をサポートする予定です。
+
+- [**Indexes**](./high_level/data_index.md): インデックスは、ノードとそれらのノードの埋め込みを格納します。
+
+- [**QueryEngine**](./high_level/query_engine.md): クエリエンジンは、入力したクエリを生成し、結果を返すものです。クエリエンジンは、通常、事前に構築されたプロンプトとIndexから選択されたノードを組み合わせて、LLMがクエリに答えるために必要なコンテキストを提供します。
+
+- [**ChatEngine**](./high_level/chat_engine.md): ChatEngineは、Indexと対話するチャットボットを構築するのに役立ちます。
+
+## 低レベルモジュール
+
+- [**LLM**](./low_level/llm.md): LLMクラスは、OpenAI GPT-4、Anthropic Claude、またはMeta LLaMAなどの大規模言語モデルプロバイダーに対する統一されたインターフェースです。独自の大規模言語モデルに接続するために、このクラスをサブクラス化することができます。
+
+- [**Embedding**](./low_level/embedding.md): 埋め込みは、浮動小数点数のベクトルとして表されます。OpenAIのtext-embedding-ada-002は、デフォルトの埋め込みモデルであり、生成される各埋め込みは1,536個の浮動小数点数で構成されています。もう1つの人気のある埋め込みモデルはBERTであり、各ノードを表すために768個の浮動小数点数を使用します。埋め込みを使用するためのさまざまなユーティリティを提供しており、3つの類似性計算オプションと最大限のマージナルリレバンスを含んでいます。
+
+- [**TextSplitter/NodeParser**](./low_level/node_parser.md): テキストの分割戦略は、埋め込み検索の全体的な効果に非常に重要です。現在、デフォルトの分割方法がありますが、ワンサイズフィットオールの解決策はありません。ソースドキュメントに応じて、異なる分割サイズと戦略を使用することができます。現在、固定サイズでの分割、オーバーラップセクションを持つ固定サイズでの分割、文での分割、段落での分割をサポートしています。テキストスプリッターは、`Document`を`Node`に分割する際にNodeParserによって使用されます。
+
+- [**Retriever**](./low_level/retriever.md): Retrieverは、実際にインデックスからノードを選択する役割を果たします。ここでは、クエリごとにより多くまたはより少ないノードを取得したり、類似性関数を変更したり、アプリケーションの個々のユースケースごとに独自のリトリーバーを作成したりすることができます。たとえば、コードコンテンツとテキストコンテンツに対して別々のリトリーバーを使用したい場合があります。
+
+- [**ResponseSynthesizer**](./low_level/response_synthesizer.md): ResponseSynthesizerは、クエリ文字列を受け取り、`Node`のリストを使用して応答を生成する役割を担っています。これには、すべてのコンテキストを反復処理して回答を洗練させる方法や、サマリのツリーを構築してルートサマリを返す方法など、さまざまな形式があります。
+
+- [**Storage**](./low_level/storage.md): インデックス、データ、ベクトルを再実行する代わりに、いずれかの時点でインデックス、データ、ベクトルを保存したくなるでしょう。IndexStore、DocStore、VectorStore、およびKVStoreは、それを実現するための抽象化です。これらを組み合わせると、StorageContextが形成されます。現在、埋め込みをファイルシステム上のファイル(または仮想インメモリファイルシステム)に永続化することができますが、ベクトルデータベースへの統合も積極的に追加しています。
diff --git a/apps/docs/i18n/ja/docusaurus-plugin-content-docs/current/modules/low_level/embedding.md b/apps/docs/i18n/ja/docusaurus-plugin-content-docs/current/modules/low_level/embedding.md
new file mode 100644
index 0000000000000000000000000000000000000000..6ca2f81cc7f2eb2a70689051f6c265da5add3908
--- /dev/null
+++ b/apps/docs/i18n/ja/docusaurus-plugin-content-docs/current/modules/low_level/embedding.md
@@ -0,0 +1,26 @@
+---
+sidebar_position: 1
+---
+
+# 埋め込み
+
+`このドキュメントは自動的に翻訳されており、誤りを含んでいる可能性があります。変更を提案するためにプルリクエストを開くことを躊躇しないでください。`
+
+LlamaIndexの埋め込みモデルは、テキストの数値表現を作成する責任を持ちます。デフォルトでは、LlamaIndexはOpenAIの`text-embedding-ada-002`モデルを使用します。
+
+これは、`ServiceContext`オブジェクトで明示的に設定することができます。
+
+```typescript
+import { OpenAIEmbedding, serviceContextFromDefaults } from "llamaindex";
+
+const openaiEmbeds = new OpenAIEmbedding();
+
+const serviceContext = serviceContextFromDefaults({ embedModel: openaiEmbeds });
+```
+
+## APIリファレンス
+
+- [OpenAIEmbedding](../../api/classes/OpenAIEmbedding.md)
+- [ServiceContext](../../api/interfaces/ServiceContext.md)
+
+"
diff --git a/apps/docs/i18n/ja/docusaurus-plugin-content-docs/current/modules/low_level/llm.md b/apps/docs/i18n/ja/docusaurus-plugin-content-docs/current/modules/low_level/llm.md
new file mode 100644
index 0000000000000000000000000000000000000000..fc4460e223b2863f6b1dab39268b061280eae79f
--- /dev/null
+++ b/apps/docs/i18n/ja/docusaurus-plugin-content-docs/current/modules/low_level/llm.md
@@ -0,0 +1,26 @@
+---
+sidebar_position: 0
+---
+
+# LLM
+
+`このドキュメントは自動的に翻訳されており、誤りを含んでいる可能性があります。変更を提案するためにプルリクエストを開くことを躊躇しないでください。`
+
+LLMはテキストを読み取り、クエリに対して自然言語の応答を生成する責任を持っています。デフォルトでは、LlamaIndex.TSは`gpt-3.5-turbo`を使用します。
+
+LLMは`ServiceContext`オブジェクトで明示的に設定することができます。
+
+```typescript
+import { OpenAI, serviceContextFromDefaults } from "llamaindex";
+
+const openaiLLM = new OpenAI({ model: "gpt-3.5-turbo", temperature: 0 });
+
+const serviceContext = serviceContextFromDefaults({ llm: openaiLLM });
+```
+
+## APIリファレンス
+
+- [OpenAI](../../api/classes/OpenAI.md)
+- [ServiceContext](../../api/interfaces/ServiceContext.md)
+
+"
diff --git a/apps/docs/i18n/ja/docusaurus-plugin-content-docs/current/modules/low_level/node_parser.md b/apps/docs/i18n/ja/docusaurus-plugin-content-docs/current/modules/low_level/node_parser.md
new file mode 100644
index 0000000000000000000000000000000000000000..f0f529af18c3691a2187475ae35aa30084e2aabb
--- /dev/null
+++ b/apps/docs/i18n/ja/docusaurus-plugin-content-docs/current/modules/low_level/node_parser.md
@@ -0,0 +1,37 @@
+---
+sidebar_position: 3
+---
+
+# NodeParser (ノードパーサー)
+
+`このドキュメントは自動的に翻訳されており、誤りを含んでいる可能性があります。変更を提案するためにプルリクエストを開くことを躊躇しないでください。`
+
+`NodeParser` は LlamaIndex の中で、`Document` オブジェクトをより管理しやすい `Node` オブジェクトに分割する役割を担っています。`.fromDocuments()` を呼び出すと、`ServiceContext` の中の `NodeParser` が自動的にこれを行います。また、事前にドキュメントを分割するためにも使用することができます。
+
+```typescript
+import { Document, SimpleNodeParser } from "llamaindex";
+
+const nodeParser = new SimpleNodeParser();
+const nodes = nodeParser.getNodesFromDocuments([
+  new Document({ text: "私は10歳です。ジョンは20歳です。" }),
+]);
+```
+
+## TextSplitter (テキスト分割器)
+
+テキスト分割器は、文によってテキストを分割します。生のテキストを分割するためのスタンドアロンモジュールとしても使用することができます。
+
+```typescript
+import { SentenceSplitter } from "llamaindex";
+
+const splitter = new SentenceSplitter({ chunkSize: 1 });
+
+const textSplits = splitter.splitText("こんにちは、世界");
+```
+
+## API リファレンス
+
+- [SimpleNodeParser (シンプルノードパーサー)](../../api/classes/SimpleNodeParser.md)
+- [SentenceSplitter (センテンススプリッター)](../../api/classes/SentenceSplitter.md)
+
+"
diff --git a/apps/docs/i18n/ja/docusaurus-plugin-content-docs/current/modules/low_level/response_synthesizer.md b/apps/docs/i18n/ja/docusaurus-plugin-content-docs/current/modules/low_level/response_synthesizer.md
new file mode 100644
index 0000000000000000000000000000000000000000..6103c3fd43e0e13cc990b0647601ec3ce51c52ce
--- /dev/null
+++ b/apps/docs/i18n/ja/docusaurus-plugin-content-docs/current/modules/low_level/response_synthesizer.md
@@ -0,0 +1,45 @@
+---
+sidebar_position: 6
+---
+
+# ResponseSynthesizer (レスポンス合成器)
+
+`このドキュメントは自動的に翻訳されており、誤りを含んでいる可能性があります。変更を提案するためにプルリクエストを開くことを躊躇しないでください。`
+
+ResponseSynthesizerは、クエリ、ノード、およびプロンプトテンプレートをLLMに送信して応答を生成する責任を持ちます。応答を生成するためのいくつかの主要なモードがあります:
+
+- `Refine`(洗練):各取得したテキストチャンクを順番に処理して回答を「作成および洗練」します。これにより、各ノードごとに別々のLLM呼び出しが行われます。詳細な回答に適しています。
+- `CompactAndRefine`(コンパクトおよび洗練)(デフォルト):各LLM呼び出し中にプロンプトを「コンパクト化」し、最大プロンプトサイズ内に収まるだけのテキストチャンクを詰め込みます。1つのプロンプトに詰め込むチャンクが多すぎる場合は、「作成および洗練」を行い、複数のコンパクトプロンプトを通じて回答を生成します。`refine`と同じですが、LLM呼び出し回数が少なくなるはずです。
+- `TreeSummarize`(ツリー要約):テキストチャンクのセットとクエリが与えられた場合、再帰的にツリーを構築し、ルートノードを応答として返します。要約の目的に適しています。
+- `SimpleResponseBuilder`(シンプルな応答ビルダー):テキストチャンクのセットとクエリが与えられた場合、クエリを各テキストチャンクに適用し、応答を配列に蓄積します。すべての応答の連結された文字列を返します。各テキストチャンクに対して個別に同じクエリを実行する必要がある場合に適しています。
+
+```typescript
+import { NodeWithScore, ResponseSynthesizer, TextNode } from "llamaindex";
+
+const responseSynthesizer = new ResponseSynthesizer();
+
+const nodesWithScore: NodeWithScore[] = [
+  {
+    node: new TextNode({ text: "私は10歳です。" }),
+    score: 1,
+  },
+  {
+    node: new TextNode({ text: "ジョンは20歳です。" }),
+    score: 0.5,
+  },
+];
+
+const response = await responseSynthesizer.synthesize(
+  "私は何歳ですか?",
+  nodesWithScore,
+);
+console.log(response.response);
+```
+
+## APIリファレンス
+
+- [ResponseSynthesizer (レスポンス合成器)](../../api/classes/ResponseSynthesizer.md)
+- [Refine (洗練)](../../api/classes/Refine.md)
+- [CompactAndRefine (コンパクトおよび洗練)](../../api/classes/CompactAndRefine.md)
+- [TreeSummarize (ツリー要約)](../../api/classes/TreeSummarize.md)
+- [SimpleResponseBuilder (シンプルな応答ビルダー)](../../api/classes/SimpleResponseBuilder.md)
diff --git a/apps/docs/i18n/ja/docusaurus-plugin-content-docs/current/modules/low_level/retriever.md b/apps/docs/i18n/ja/docusaurus-plugin-content-docs/current/modules/low_level/retriever.md
new file mode 100644
index 0000000000000000000000000000000000000000..3d186afc438f2dbbd41e566fc235229c13627458
--- /dev/null
+++ b/apps/docs/i18n/ja/docusaurus-plugin-content-docs/current/modules/low_level/retriever.md
@@ -0,0 +1,23 @@
+---
+sidebar_position: 5
+---
+
+# レトリーバー (Retriever)
+
+`このドキュメントは自動的に翻訳されており、誤りを含んでいる可能性があります。変更を提案するためにプルリクエストを開くことを躊躇しないでください。`
+
+LlamaIndexにおけるレトリーバーは、クエリ文字列を使用してインデックスから`Node`を取得するために使用されます。`VectorIndexRetriever`は、トップ-kの最も類似したノードを取得します。一方、`SummaryIndexRetriever`は、クエリに関係なくすべてのノードを取得します。
+
+```typescript
+const retriever = vector_index.asRetriever();
+retriever.similarityTopK = 3;
+
+// ノードを取得します!
+const nodesWithScore = await retriever.retrieve("クエリ文字列");
+```
+
+## API リファレンス
+
+- [SummaryIndexRetriever](../../api/classes/SummaryIndexRetriever.md)
+- [SummaryIndexLLMRetriever](../../api/classes/SummaryIndexLLMRetriever.md)
+- [VectorIndexRetriever](../../api/classes/VectorIndexRetriever.md)
diff --git a/apps/docs/i18n/ja/docusaurus-plugin-content-docs/current/modules/low_level/storage.md b/apps/docs/i18n/ja/docusaurus-plugin-content-docs/current/modules/low_level/storage.md
new file mode 100644
index 0000000000000000000000000000000000000000..dfa953fbd451a7ee65a083757771cb4228619590
--- /dev/null
+++ b/apps/docs/i18n/ja/docusaurus-plugin-content-docs/current/modules/low_level/storage.md
@@ -0,0 +1,30 @@
+---
+sidebar_position: 7
+---
+
+# ストレージ
+
+`このドキュメントは自動的に翻訳されており、誤りを含んでいる可能性があります。変更を提案するためにプルリクエストを開くことを躊躇しないでください。`
+
+LlamaIndex.TSのストレージは、`StorageContext`オブジェクトを設定した後に自動的に機能します。単に`persistDir`を設定し、インデックスにアタッチするだけです。
+
+現時点では、ディスクからの保存と読み込みのみがサポートされており、将来的には他の統合も計画されています!
+
+```typescript
+import { Document, VectorStoreIndex, storageContextFromDefaults } from "./src";
+
+const storageContext = await storageContextFromDefaults({
+  persistDir: "./storage",
+});
+
+const document = new Document({ text: "テストテキスト" });
+const index = await VectorStoreIndex.fromDocuments([document], {
+  storageContext,
+});
+```
+
+## APIリファレンス
+
+- [StorageContext](../../api/interfaces/StorageContext.md)
+
+"
diff --git a/apps/docs/i18n/ja/docusaurus-plugin-content-docs/current/starter.md b/apps/docs/i18n/ja/docusaurus-plugin-content-docs/current/starter.md
new file mode 100644
index 0000000000000000000000000000000000000000..7613b73752eeef75eba1d7eaaab042b170c08946
--- /dev/null
+++ b/apps/docs/i18n/ja/docusaurus-plugin-content-docs/current/starter.md
@@ -0,0 +1,56 @@
+---
+sidebar_position: 2
+---
+
+# スターターチュートリアル
+
+`このドキュメントは自動的に翻訳されており、誤りを含んでいる可能性があります。変更を提案するためにプルリクエストを開くことを躊躇しないでください。`
+
+[LlamaIndex.TSをNPMでインストール](installation)し、OpenAIキーを設定したら、最初のアプリを開始する準備ができます。
+
+新しいフォルダで以下のコマンドを実行してください:
+
+```bash npm2yarn
+npm install typescript
+npm install @types/node
+npx tsc --init # 必要な場合
+```
+
+`example.ts`というファイルを作成します。このコードは、いくつかのサンプルデータをロードし、ドキュメントを作成し、それをインデックス化します(OpenAIを使用して埋め込みを作成します)、そしてデータに関する質問に答えるためのクエリエンジンを作成します。
+
+```ts
+// example.ts
+import fs from "fs/promises";
+import { Document, VectorStoreIndex } from "llamaindex";
+
+async function main() {
+  // Nodeでabramov.txtからエッセイをロードする
+  const essay = await fs.readFile(
+    "node_modules/llamaindex/examples/abramov.txt",
+    "utf-8",
+  );
+
+  // エッセイを含むDocumentオブジェクトを作成する
+  const document = new Document({ text: essay });
+
+  // テキストを分割し、埋め込みを作成します。VectorStoreIndexに保存します
+  const index = await VectorStoreIndex.fromDocuments([document]);
+
+  // インデックスにクエリを実行する
+  const queryEngine = index.asQueryEngine();
+  const response = await queryEngine.query("著者は大学で何をしましたか?");
+
+  // レスポンスを出力する
+  console.log(response.toString());
+}
+
+main();
+```
+
+次に、次のコマンドを使用して実行できます:
+
+```bash
+npx ts-node example.ts
+```
+
+もっと学びたいですか?[NextJSプレイグラウンド](https://llama-playground.vercel.app/)をチェックしてみてください。ソースコードは[こちら](https://github.com/run-llama/ts-playground)で入手できます。
diff --git a/apps/docs/i18n/ko/docusaurus-plugin-content-docs/current/_static/concepts/indexing.jpg b/apps/docs/i18n/ko/docusaurus-plugin-content-docs/current/_static/concepts/indexing.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..8672967213caf28fb27682b6b9e2e5111beb9aaa
Binary files /dev/null and b/apps/docs/i18n/ko/docusaurus-plugin-content-docs/current/_static/concepts/indexing.jpg differ
diff --git a/apps/docs/i18n/ko/docusaurus-plugin-content-docs/current/_static/concepts/querying.jpg b/apps/docs/i18n/ko/docusaurus-plugin-content-docs/current/_static/concepts/querying.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..3c241bdda5ba98d3f3ee2e163b9f19280c5c7503
Binary files /dev/null and b/apps/docs/i18n/ko/docusaurus-plugin-content-docs/current/_static/concepts/querying.jpg differ
diff --git a/apps/docs/i18n/ko/docusaurus-plugin-content-docs/current/_static/concepts/rag.jpg b/apps/docs/i18n/ko/docusaurus-plugin-content-docs/current/_static/concepts/rag.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..b68eca2564f8535b33d9ed6d4ce4d4410d2a0698
Binary files /dev/null and b/apps/docs/i18n/ko/docusaurus-plugin-content-docs/current/_static/concepts/rag.jpg differ
diff --git a/apps/docs/i18n/ko/docusaurus-plugin-content-docs/current/api b/apps/docs/i18n/ko/docusaurus-plugin-content-docs/current/api
new file mode 120000
index 0000000000000000000000000000000000000000..3513e32979f4abb8b2df2b54c8d0cc480bdd847e
--- /dev/null
+++ b/apps/docs/i18n/ko/docusaurus-plugin-content-docs/current/api
@@ -0,0 +1 @@
+../../../../docs/api
\ No newline at end of file
diff --git a/apps/docs/i18n/ko/docusaurus-plugin-content-docs/current/concepts.md b/apps/docs/i18n/ko/docusaurus-plugin-content-docs/current/concepts.md
new file mode 100644
index 0000000000000000000000000000000000000000..d86ffb05e43e11a676edee88927fc1b47961ceb1
--- /dev/null
+++ b/apps/docs/i18n/ko/docusaurus-plugin-content-docs/current/concepts.md
@@ -0,0 +1,85 @@
+---
+sidebar_position: 3
+---
+
+# 고수준 개념
+
+`이 문서는 자동 번역되었으며 오류가 포함될 수 있습니다. 변경 사항을 제안하려면 Pull Request를 열어 주저하지 마십시오.`
+
+LlamaIndex.TS는 사용자 정의 데이터를 사용하여 LLM 기반 애플리케이션 (예: Q&A, 챗봇)을 구축하는 데 도움이 됩니다.
+
+이 고수준 개념 가이드에서는 다음을 배울 수 있습니다:
+
+- LLM이 사용자의 데이터를 사용하여 질문에 답하는 방법.
+- 질의 파이프라인을 구성하기 위한 LlamaIndex.TS의 주요 개념 및 모듈.
+
+## 데이터 전체에서 질문에 답하기
+
+LlamaIndex는 데이터와 함께 LLM을 사용할 때 두 단계 방법을 사용합니다:
+
+1. **인덱싱 단계**: 지식 베이스를 준비하고
+2. **질의 단계**: 질문에 대답하기 위해 LLM에게 관련 컨텍스트를 검색하여 전달
+
+![](./_static/concepts/rag.jpg)
+
+이 프로세스는 검색 증강 생성 (RAG)로도 알려져 있습니다.
+
+LlamaIndex.TS는 이러한 단계를 모두 쉽게 수행할 수 있는 필수 도구를 제공합니다.
+
+자세한 내용을 살펴보겠습니다.
+
+### 인덱싱 단계
+
+LlamaIndex.TS는 데이터 커넥터와 인덱스의 모음을 사용하여 지식 베이스를 준비하는 데 도움이 됩니다.
+
+![](./_static/concepts/indexing.jpg)
+
+[**데이터 로더**](./modules/high_level/data_loader.md):
+데이터 커넥터 (즉, `Reader`)는 다양한 데이터 소스와 데이터 형식에서 데이터를 간단한 `Document` 표현 (텍스트 및 간단한 메타데이터)으로 가져옵니다.
+
+[**문서 / 노드**](./modules/high_level/documents_and_nodes.md): `Document`는 모든 데이터 소스 (예: PDF, API 출력 또는 데이터베이스에서 검색한 데이터)를 감싸는 일반적인 컨테이너입니다. `Node`는 LlamaIndex에서 데이터의 원자 단위이며 소스 `Document`의 "덩어리"를 나타냅니다. 이것은 메타데이터와 관계 (다른 노드와의 관계)를 포함하여 정확하고 표현력있는 검색 작업을 가능하게 하는 풍부한 표현입니다.
+
+[**데이터 인덱스**](./modules/high_level/data_index.md):
+데이터를 가져온 후에는 LlamaIndex가 검색하기 쉬운 형식으로 데이터를 인덱싱하는 데 도움이 됩니다.
+
+LlamaIndex는 내부적으로 원시 문서를 중간 표현으로 파싱하고 벡터 임베딩을 계산하며 데이터를 메모리에 저장하거나 디스크에 저장합니다.
+
+"
+
+### 질의 단계
+
+질의 단계에서 질의 파이프라인은 사용자 질의에 가장 관련성 높은 컨텍스트를 검색하고,
+그것을 LLM에게 전달하여 응답을 합성합니다.
+
+이를 통해 LLM은 원래의 훈련 데이터에 없는 최신 지식을 얻을 수 있으며,
+(환각을 줄이는) 환경을 제공합니다.
+
+질의 단계에서의 주요 도전은 (잠재적으로 많은) 지식 베이스에 대한 검색, 조율 및 추론입니다.
+
+LlamaIndex는 Q&A (질의 엔진), 챗봇 (채팅 엔진) 또는 에이전트의 일부로 사용하기 위해 RAG 파이프라인을 구축하고 통합하는 데 도움이 되는 조립 가능한 모듈을 제공합니다.
+
+이러한 구성 요소는 순위 지정 기본 설정을 반영하고 구조화된 방식으로 여러 지식 베이스에 대한 추론을 수행하기 위해 사용자 정의할 수 있습니다.
+
+![](./_static/concepts/querying.jpg)
+
+#### 구성 요소
+
+[**검색기**](./modules/low_level/retriever.md):
+검색기는 쿼리가 주어졌을 때 지식 베이스 (즉, 인덱스)에서 관련 컨텍스트를 효율적으로 검색하는 방법을 정의합니다.
+구체적인 검색 로직은 다양한 인덱스에 따라 다르며, 가장 인기 있는 것은 벡터 인덱스에 대한 밀집 검색입니다.
+
+[**응답 합성기**](./modules/low_level/response_synthesizer.md):
+응답 합성기는 LLM에서 사용자 쿼리와 검색된 텍스트 청크 세트를 사용하여 응답을 생성합니다.
+
+"
+
+#### 파이프라인
+
+[**질의 엔진**](./modules/high_level/query_engine.md):
+질의 엔진은 데이터에 대해 질문을 할 수 있는 종단 간 파이프라인입니다.
+자연어 질의를 입력으로 받아 응답과 함께 LLM에게 전달되는 참조 컨텍스트를 반환합니다.
+
+[**채팅 엔진**](./modules/high_level/chat_engine.md):
+채팅 엔진은 단일 질문 및 답변이 아닌 데이터와의 다중 질문 및 대화를 위한 종단 간 파이프라인입니다.
+
+"
diff --git a/apps/docs/i18n/ko/docusaurus-plugin-content-docs/current/end_to_end.md b/apps/docs/i18n/ko/docusaurus-plugin-content-docs/current/end_to_end.md
new file mode 100644
index 0000000000000000000000000000000000000000..00e4ac3d74cefe31be6c1f8962ad0d26b68029b9
--- /dev/null
+++ b/apps/docs/i18n/ko/docusaurus-plugin-content-docs/current/end_to_end.md
@@ -0,0 +1,55 @@
+---
+sidebar_position: 4
+---
+
+# 엔드 투 엔드 예제
+
+`이 문서는 자동 번역되었으며 오류가 포함될 수 있습니다. 변경 사항을 제안하려면 Pull Request를 열어 주저하지 마십시오.`
+
+저희는 저장소에 LlamaIndex.TS를 사용한 여러 엔드 투 엔드 예제를 포함하고 있습니다.
+
+아래 예제를 확인하거나 [여기](https://codespaces.new/team-dev-docs/lits-dev-docs-playground?devcontainer_path=.devcontainer%2Fjavascript_ltsquickstart%2Fdevcontainer.json)에서 제공되는 Dev-Docs의 대화형 Github Codespace 튜토리얼을 사용하여 몇 분 안에 시도해보고 완료할 수 있습니다:
+
+## [채팅 엔진](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/chatEngine.ts)
+
+파일을 읽고 LLM과 관련하여 채팅하세요.
+
+## [벡터 인덱스](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/vectorIndex.ts)
+
+벡터 인덱스를 생성하고 쿼리합니다. 벡터 인덱스는 임베딩을 사용하여 가장 관련성이 높은 상위 k개의 노드를 가져옵니다. 기본적으로, 상위 k는 2입니다.
+
+## [Summary Index](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/summaryIndex.ts)
+
+목록 인덱스를 생성하고 쿼리합니다. 이 예제는 또한 `LLMRetriever`를 사용하며, 답변을 생성할 때 사용할 최상의 노드를 선택하는 데 LLM을 사용합니다.
+
+"
+
+## [인덱스 저장 / 불러오기](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/storageContext.ts)
+
+벡터 인덱스를 생성하고 불러옵니다. LlamaIndex.TS에서는 저장소 컨텍스트 객체가 생성되면 자동으로 디스크에 지속성이 유지됩니다.
+
+## [사용자 정의 벡터 인덱스](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/vectorIndexCustomize.ts)
+
+벡터 인덱스를 생성하고 쿼리하면서 `LLM`, `ServiceContext`, `similarity_top_k`를 구성합니다.
+
+## [OpenAI LLM](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/openai.ts)
+
+OpenAI LLM을 생성하고 채팅에 직접 사용하세요.
+
+"
+
+## [Llama2 DeuceLLM](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/llamadeuce.ts)
+
+Llama-2 LLM을 생성하고 채팅에 직접 사용하세요.
+
+"
+
+## [SubQuestionQueryEngine](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/subquestion.ts)
+
+`SubQuestionQueryEngine`를 사용하여 복잡한 쿼리를 여러 개의 하위 질문으로 분할하고, 그에 따라 모든 하위 질문에 대한 응답을 집계합니다.
+
+"
+
+## [저수준 모듈](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/lowlevel.ts)
+
+이 예제는 실제 쿼리 엔진이 필요하지 않은 여러 저수준 컴포넌트를 사용합니다. 이러한 컴포넌트는 어디에서나 어떤 애플리케이션에서든 사용할 수 있으며, 필요에 맞게 사용자 정의하거나 서브클래스화하여 사용할 수 있습니다.
diff --git a/apps/docs/i18n/ko/docusaurus-plugin-content-docs/current/environments.md b/apps/docs/i18n/ko/docusaurus-plugin-content-docs/current/environments.md
new file mode 100644
index 0000000000000000000000000000000000000000..2da3bba0efacad137db7241b5e86d6066d1ea950
--- /dev/null
+++ b/apps/docs/i18n/ko/docusaurus-plugin-content-docs/current/environments.md
@@ -0,0 +1,31 @@
+---
+sidebar_position: 5
+---
+
+# 환경
+
+`이 문서는 자동 번역되었으며 오류가 포함될 수 있습니다. 변경 사항을 제안하려면 Pull Request를 열어 주저하지 마십시오.`
+
+LlamaIndex는 현재 공식적으로 NodeJS 18과 NodeJS 20을 지원합니다.
+
+## NextJS 앱 라우터
+
+NextJS 앱 라우터 라우트 핸들러/서버리스 함수를 사용하는 경우, NodeJS 모드를 사용해야합니다:
+
+```js
+export const runtime = "nodejs"; // 기본값
+```
+
+그리고 next.config.js에서 pdf-parse에 대한 예외를 추가해야합니다.
+
+```js
+// next.config.js
+/** @type {import('next').NextConfig} */
+const nextConfig = {
+  experimental: {
+    serverComponentsExternalPackages: ["pdf-parse"], // pdf-parse를 실제 NodeJS 모드로 NextJS 앱 라우터에 추가합니다.
+  },
+};
+
+module.exports = nextConfig;
+```
diff --git a/apps/docs/i18n/ko/docusaurus-plugin-content-docs/current/installation.mdx b/apps/docs/i18n/ko/docusaurus-plugin-content-docs/current/installation.mdx
new file mode 100644
index 0000000000000000000000000000000000000000..3bffdb38bceab1c574cb0c74243b7c48312d7ce3
--- /dev/null
+++ b/apps/docs/i18n/ko/docusaurus-plugin-content-docs/current/installation.mdx
@@ -0,0 +1,68 @@
+---
+sidebar_position: 1
+---
+
+
+# 설치 및 설정
+
+```이 문서는 자동 번역되었으며 오류가 포함될 수 있습니다. 변경 사항을 제안하려면 Pull Request를 열어 주저하지 마십시오.```
+
+
+NodeJS v18 이상이 설치되어 있는지 확인하세요.
+
+
+## create-llama 사용하기
+
+LlamaIndex를 시작하는 가장 쉬운 방법은 `create-llama`를 사용하는 것입니다. 이 CLI 도구를 사용하면 모든 설정이 완료된 새로운 LlamaIndex 애플리케이션을 빠르게 구축할 수 있습니다.
+
+다음을 실행하세요.
+
+<Tabs>
+<TabItem value="1" label="npm" default>
+
+```bash
+npx create-llama@latest
+```
+
+</TabItem>
+<TabItem value="2" label="Yarn">
+
+```bash
+yarn create llama
+```
+
+</TabItem>
+<TabItem value="3" label="pnpm">
+
+```bash
+pnpm create llama@latest
+```
+
+</TabItem>
+</Tabs>
+
+시작하려면 위 명령을 실행하세요. 앱이 생성되면 다음을 실행하여 개발 서버를 시작하세요.
+
+```bash npm2yarn
+npm run dev
+```
+
+개발 서버를 시작한 후 [http://localhost:3000](http://localhost:3000)을(를) 방문하여 앱을 확인할 수 있습니다.
+## NPM을 통한 설치
+
+```bash npm2yarn
+npm install llamaindex
+```
+
+
+### 환경 변수
+
+기본적으로 우리의 예제는 OpenAI를 사용합니다. Open AI 키를 다음과 같이 설정해야 합니다:
+
+```bash
+export OPENAI_API_KEY="sk-......" # https://platform.openai.com/account/api-keys에서 키를 받아서 대체하세요
+```
+
+매번 자동으로 로드되도록 하려면 .zshrc/.bashrc에 추가하세요.
+
+경고: OpenAI 키를 버전 관리에 체크인하지 마세요.
diff --git a/apps/docs/i18n/ko/docusaurus-plugin-content-docs/current/introduction.md b/apps/docs/i18n/ko/docusaurus-plugin-content-docs/current/introduction.md
new file mode 100644
index 0000000000000000000000000000000000000000..6ca1df54eda4f58c50ab0b3bb8e161f273f78cbb
--- /dev/null
+++ b/apps/docs/i18n/ko/docusaurus-plugin-content-docs/current/introduction.md
@@ -0,0 +1,62 @@
+---
+sidebar_position: 0
+slug: /
+---
+
+# LlamaIndex.TS란 무엇인가요?
+
+`이 문서는 자동 번역되었으며 오류가 포함될 수 있습니다. 변경 사항을 제안하려면 Pull Request를 열어 주저하지 마십시오.`
+
+LlamaIndex.TS는 LLM 애플리케이션에서 개인 또는 도메인별 데이터를 수집, 구조화 및 액세스하기 위한 데이터 프레임워크입니다. 파이썬 패키지도 사용할 수 있지만 (여기를 참조하세요: [링크](https://docs.llamaindex.ai/en/stable/)), LlamaIndex.TS는 TypeScript와 함께 사용하기 위해 최적화된 간단한 패키지로 핵심 기능을 제공합니다.
+
+## 🚀 LlamaIndex.TS를 사용하는 이유는 무엇인가요?
+
+LLM은 인간과 추론된 데이터 간의 자연어 인터페이스를 제공합니다. 널리 사용되는 모델은 Wikipedia, 메일링 리스트, 교과서 및 소스 코드와 같은 대중적으로 사용 가능한 데이터에 대해 사전 훈련되어 있습니다.
+
+LLM을 기반으로 한 애플리케이션은 종종 이러한 모델을 개인 또는 도메인별 데이터로 보강해야 합니다. 그러나 이러한 데이터는 종종 애플리케이션 및 데이터 저장소 간에 분산되어 있습니다. API 뒤에 있거나 SQL 데이터베이스에 저장되어 있거나 PDF 및 슬라이드 덱에 갇혀 있을 수 있습니다.
+
+이럴 때 **LlamaIndex.TS**가 필요합니다.
+
+## 🦙 LlamaIndex.TS는 어떻게 도움이 될까요?
+
+LlamaIndex.TS는 다음과 같은 도구를 제공합니다:
+
+- **데이터 로딩** 기존의 `.txt`, `.pdf`, `.csv`, `.md` 및 `.docx` 데이터를 직접 수집합니다.
+- **데이터 인덱스** 중간 표현으로 데이터를 구조화하여 LLM이 소비하기 쉽고 성능이 우수합니다.
+- **엔진**은 데이터에 대한 자연어 액세스를 제공합니다. 예를 들어:
+  - 쿼리 엔진은 지식 증강 출력을 위한 강력한 검색 인터페이스입니다.
+  - 채팅 엔진은 데이터와의 다중 메시지 "왕복" 상호작용을 위한 대화형 인터페이스입니다.
+
+## 👨‍👩‍👧‍👦 LlamaIndex는 누구를 위한 것인가요?
+
+LlamaIndex.TS는 JavaScript와 TypeScript로 LLM 앱을 개발하는 모든 사람들을 위한 필수 도구 세트를 제공합니다.
+
+우리의 고수준 API를 사용하면 초보 사용자도 LlamaIndex.TS를 사용하여 데이터를 수집하고 쿼리할 수 있습니다.
+
+더 복잡한 애플리케이션의 경우, 저희의 저수준 API를 사용하여 고급 사용자가 모듈 (데이터 커넥터, 인덱스, 리트리버 및 쿼리 엔진)를 사용자의 요구에 맞게 사용자 정의하고 확장할 수 있습니다.
+
+## 시작하기
+
+`npm install llamaindex`
+
+저희 문서에는 [설치 지침](./installation.md)과 [스타터 튜토리얼](./starter.md)이 포함되어 있어 첫 번째 애플리케이션을 빌드할 수 있습니다.
+
+한 번 시작하면, [고수준 개념](./concepts.md)에서 LlamaIndex의 모듈식 아키텍처에 대한 개요를 확인할 수 있습니다. 더 많은 실전 예제를 원하신다면, [End-to-End 튜토리얼](./end_to_end.md)을 참조해주세요.
+
+"
+
+## 🗺️ 생태계
+
+LlamaIndex를 다운로드하거나 기여하려면 다음을 참조하세요:
+
+- Github: https://github.com/run-llama/LlamaIndexTS
+- NPM: https://www.npmjs.com/package/llamaindex
+
+"
+
+## 커뮤니티
+
+도움이 필요하신가요? 기능 제안이 있으신가요? LlamaIndex 커뮤니티에 참여해보세요:
+
+- Twitter: https://twitter.com/llama_index
+- Discord: https://discord.gg/dGcwcsnxhU
diff --git a/apps/docs/i18n/ko/docusaurus-plugin-content-docs/current/modules/high_level/chat_engine.md b/apps/docs/i18n/ko/docusaurus-plugin-content-docs/current/modules/high_level/chat_engine.md
new file mode 100644
index 0000000000000000000000000000000000000000..53573abd3652a26533da0c64cb0e80d9c1e4c793
--- /dev/null
+++ b/apps/docs/i18n/ko/docusaurus-plugin-content-docs/current/modules/high_level/chat_engine.md
@@ -0,0 +1,22 @@
+---
+sidebar_position: 4
+---
+
+# 채팅 엔진 (ChatEngine)
+
+`이 문서는 자동 번역되었으며 오류가 포함될 수 있습니다. 변경 사항을 제안하려면 Pull Request를 열어 주저하지 마십시오.`
+
+채팅 엔진은 인덱스 내의 데이터와 채팅하는 빠르고 간단한 방법입니다.
+
+```typescript
+const retriever = index.asRetriever();
+const chatEngine = new ContextChatEngine({ retriever });
+
+// 채팅 시작
+const response = await chatEngine.chat(query);
+```
+
+## API 참조
+
+- [ContextChatEngine](../../api/classes/ContextChatEngine.md)
+- [CondenseQuestionChatEngine](../../api/classes/ContextChatEngine.md)
diff --git a/apps/docs/i18n/ko/docusaurus-plugin-content-docs/current/modules/high_level/data_index.md b/apps/docs/i18n/ko/docusaurus-plugin-content-docs/current/modules/high_level/data_index.md
new file mode 100644
index 0000000000000000000000000000000000000000..be6a90113fb727a5c434596f37a0a071a479d980
--- /dev/null
+++ b/apps/docs/i18n/ko/docusaurus-plugin-content-docs/current/modules/high_level/data_index.md
@@ -0,0 +1,27 @@
+---
+sidebar_position: 2
+---
+
+# 색인
+
+`이 문서는 자동 번역되었으며 오류가 포함될 수 있습니다. 변경 사항을 제안하려면 Pull Request를 열어 주저하지 마십시오.`
+
+색인은 데이터의 기본 컨테이너이자 조직 방법입니다. LlamaIndex.TS는 두 가지 색인을 지원합니다:
+
+- `VectorStoreIndex` - 응답을 생성할 때 상위 k개의 `Node`를 LLM에 전송합니다. 기본적으로 상위 2개를 전송합니다.
+- `SummaryIndex` - 응답을 생성하기 위해 색인의 모든 `Node`를 LLM에 전송합니다.
+
+```typescript
+import { Document, VectorStoreIndex } from "llamaindex";
+
+const document = new Document({ text: "test" });
+
+const index = await VectorStoreIndex.fromDocuments([document]);
+```
+
+## API 참조
+
+- [SummaryIndex](../../api/classes/SummaryIndex.md)
+- [VectorStoreIndex](../../api/classes/VectorStoreIndex.md)
+
+"
diff --git a/apps/docs/i18n/ko/docusaurus-plugin-content-docs/current/modules/high_level/data_loader.md b/apps/docs/i18n/ko/docusaurus-plugin-content-docs/current/modules/high_level/data_loader.md
new file mode 100644
index 0000000000000000000000000000000000000000..ebe94ef35eee11a12fb809d8974868bf41ff9293
--- /dev/null
+++ b/apps/docs/i18n/ko/docusaurus-plugin-content-docs/current/modules/high_level/data_loader.md
@@ -0,0 +1,19 @@
+---
+sidebar_position: 1
+---
+
+# 리더 / 로더
+
+`이 문서는 자동 번역되었으며 오류가 포함될 수 있습니다. 변경 사항을 제안하려면 Pull Request를 열어 주저하지 마십시오.`
+
+LlamaIndex.TS는 `SimpleDirectoryReader` 클래스를 사용하여 폴더에서 파일을 쉽게 로드할 수 있습니다. 현재 `.txt`, `.pdf`, `.csv`, `.md` 및 `.docx` 파일이 지원되며, 앞으로 더 많은 파일 형식이 지원될 예정입니다!
+
+```typescript
+import { SimpleDirectoryReader } from "llamaindex";
+
+documents = new SimpleDirectoryReader().loadData("./data");
+```
+
+## API 참조
+
+- [SimpleDirectoryReader](../../api/classes/SimpleDirectoryReader.md)
diff --git a/apps/docs/i18n/ko/docusaurus-plugin-content-docs/current/modules/high_level/documents_and_nodes.md b/apps/docs/i18n/ko/docusaurus-plugin-content-docs/current/modules/high_level/documents_and_nodes.md
new file mode 100644
index 0000000000000000000000000000000000000000..25ae68fa5d458ff818f890eb74d80425ea7cd785
--- /dev/null
+++ b/apps/docs/i18n/ko/docusaurus-plugin-content-docs/current/modules/high_level/documents_and_nodes.md
@@ -0,0 +1,22 @@
+---
+sidebar_position: 0
+---
+
+# 문서와 노드
+
+`이 문서는 자동 번역되었으며 오류가 포함될 수 있습니다. 변경 사항을 제안하려면 Pull Request를 열어 주저하지 마십시오.`
+
+`문서(Document)`와 `노드(Node)`는 모든 인덱스의 기본 구성 요소입니다. 이 객체들의 API는 유사하지만, `문서(Document)` 객체는 전체 파일을 나타내는 반면, `노드(Node)`는 해당 원본 문서의 작은 조각으로, LLM과 Q&A에 적합합니다.
+
+```typescript
+import { Document } from "llamaindex";
+
+document = new Document({ text: "텍스트", metadata: { key: "val" } });
+```
+
+## API 참조
+
+- [문서(Document)](../../api/classes/Document.md)
+- [텍스트노드(TextNode)](../../api/classes/TextNode.md)
+
+"
diff --git a/apps/docs/i18n/ko/docusaurus-plugin-content-docs/current/modules/high_level/query_engine.md b/apps/docs/i18n/ko/docusaurus-plugin-content-docs/current/modules/high_level/query_engine.md
new file mode 100644
index 0000000000000000000000000000000000000000..c622b1778d998275f080417b75bc0581066dca4e
--- /dev/null
+++ b/apps/docs/i18n/ko/docusaurus-plugin-content-docs/current/modules/high_level/query_engine.md
@@ -0,0 +1,44 @@
+---
+sidebar_position: 3
+---
+
+# QueryEngine (쿼리 엔진)
+
+`이 문서는 자동 번역되었으며 오류가 포함될 수 있습니다. 변경 사항을 제안하려면 Pull Request를 열어 주저하지 마십시오.`
+
+쿼리 엔진은 `Retriever`와 `ResponseSynthesizer`를 하나의 파이프라인으로 묶어서, 쿼리 문자열을 사용하여 노드를 가져온 다음 LLM에게 응답을 생성하도록 합니다.
+
+```typescript
+const queryEngine = index.asQueryEngine();
+const response = await queryEngine.query("쿼리 문자열");
+```
+
+## Sub Question Query Engine (하위 질문 질의 엔진)
+
+하위 질문 질의 엔진의 기본 개념은 하나의 질의를 여러 개의 질의로 분할하고, 각 질의에 대한 답변을 가져와서 사용자에게 하나의 일관된 응답으로 결합하는 것입니다. 데이터 소스를 반복적으로 검토하여 "이를 단계별로 생각해보는" 프롬프트 기술로 생각할 수 있습니다!
+
+### 시작하기
+
+하위 질문 쿼리 엔진을 시작하는 가장 쉬운 방법은 [examples](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/subquestion.ts)의 subquestion.ts 파일을 실행하는 것입니다.
+
+```bash
+npx ts-node subquestion.ts
+```
+
+"
+
+### 도구 (Tools)
+
+하위 질문 질의 엔진은 도구(Tools)로 구현되었습니다. 도구(Tools)의 기본 아이디어는 대형 언어 모델을 위한 실행 가능한 옵션입니다. 이 경우, 하위 질문 질의 엔진은 QueryEngineTool에 의존합니다. QueryEngineTool은 QueryEngine에서 질의를 실행하기 위한 도구입니다. 이를 통해 모델에게 예를 들어 다른 질문에 대해 다른 문서를 질의할 수 있는 옵션을 제공할 수 있습니다. 또한 하위 질문 질의 엔진은 웹에서 무언가를 검색하거나 Wolfram Alpha를 사용하여 답변을 가져오는 도구를 사용할 수도 있습니다.
+
+도구(Tools)에 대해 더 자세히 알아보려면 LlamaIndex Python 문서를 참조하십시오. https://gpt-index.readthedocs.io/en/latest/core_modules/agent_modules/tools/root.html
+
+"
+
+## API 참조
+
+- [RetrieverQueryEngine (검색 엔진 쿼리 엔진)](../../api/classes/RetrieverQueryEngine.md)
+- [SubQuestionQueryEngine (하위 질문 쿼리 엔진)](../../api/classes/SubQuestionQueryEngine.md)
+- [QueryEngineTool (쿼리 엔진 도구)](../../api/interfaces/QueryEngineTool.md)
+
+"
diff --git a/apps/docs/i18n/ko/docusaurus-plugin-content-docs/current/modules/index.md b/apps/docs/i18n/ko/docusaurus-plugin-content-docs/current/modules/index.md
new file mode 100644
index 0000000000000000000000000000000000000000..3a9bb77c10e70f0c5e0bbfc5d1c4794829d94665
--- /dev/null
+++ b/apps/docs/i18n/ko/docusaurus-plugin-content-docs/current/modules/index.md
@@ -0,0 +1,35 @@
+# 핵심 모듈
+
+`이 문서는 자동 번역되었으며 오류가 포함될 수 있습니다. 변경 사항을 제안하려면 Pull Request를 열어 주저하지 마십시오.`
+
+LlamaIndex.TS는 빠르게 시작할 수 있는 고수준 모듈과 필요에 따라 핵심 구성 요소를 사용자 정의할 수 있는 저수준 모듈로 구성되어 있습니다.
+
+## 고수준 모듈
+
+- [**문서 (Document)**](./high_level/documents_and_nodes.md): 문서는 텍스트 파일, PDF 파일 또는 기타 연속적인 데이터를 나타냅니다.
+
+- [**노드 (Node)**](./high_level/documents_and_nodes.md): 기본 데이터 구성 요소입니다. 일반적으로 문서를 관리 가능한 작은 조각으로 분할한 것으로, 임베딩 모델과 LLM에 공급할 수 있는 크기입니다.
+
+- [**리더/로더 (Reader/Loader)**](./high_level/data_loader.md): 리더 또는 로더는 실제 세계에서 문서를 입력으로 받아 Document 클래스로 변환하여 인덱스와 쿼리에서 사용할 수 있도록 합니다. 현재 일반 텍스트 파일과 PDF를 지원하며, 더 많은 형식을 지원할 예정입니다.
+
+- [**인덱스 (Indexes)**](./high_level/data_index.md): 인덱스는 노드와 해당 노드의 임베딩을 저장합니다.
+
+- [**쿼리 엔진 (QueryEngine)**](./high_level/query_engine.md): 쿼리 엔진은 입력한 쿼리를 생성하고 결과를 반환합니다. 쿼리 엔진은 일반적으로 미리 작성된 프롬프트와 인덱스에서 선택한 노드를 결합하여 LLM이 쿼리에 대답하기 위해 필요한 컨텍스트를 제공합니다.
+
+- [**챗 엔진 (ChatEngine)**](./high_level/chat_engine.md): 챗 엔진은 인덱스와 상호 작용하는 챗봇을 구축하는 데 도움을 줍니다.
+
+## 저수준 모듈
+
+- [**LLM**](./low_level/llm.md): LLM 클래스는 OpenAI GPT-4, Anthropic Claude 또는 Meta LLaMA와 같은 대형 언어 모델 제공자를 통합 인터페이스로 제공합니다. 이 클래스를 서브클래스화하여 사용자 고유의 대형 언어 모델에 대한 커넥터를 작성할 수 있습니다.
+
+- [**Embedding**](./low_level/embedding.md): 임베딩은 부동 소수점 숫자의 벡터로 표현됩니다. OpenAI의 text-embedding-ada-002는 기본 임베딩 모델이며, 생성되는 각 임베딩은 1,536개의 부동 소수점 숫자로 구성됩니다. 다른 인기있는 임베딩 모델로는 BERT가 있으며, 각 노드를 표현하기 위해 768개의 부동 소수점 숫자를 사용합니다. 최대 마진 관련성을 포함한 3가지 유사도 계산 옵션과 임베딩 작업에 사용할 수 있는 여러 유틸리티를 제공합니다.
+
+- [**TextSplitter/NodeParser**](./low_level/node_parser.md): 텍스트 분할 전략은 임베딩 검색의 전반적인 효과에 매우 중요합니다. 현재는 기본값이 있지만 일반적인 해결책은 없습니다. 소스 문서에 따라 다른 분할 크기와 전략을 사용하고 싶을 수 있습니다. 현재는 고정 크기로 분할, 겹치는 섹션을 포함한 고정 크기로 분할, 문장으로 분할 및 단락으로 분할하는 것을 지원합니다. 텍스트 분할기는 `Document`를 `Node`로 분할할 때 NodeParser에서 사용됩니다.
+
+- [**Retriever**](./low_level/retriever.md): Retriever는 실제로 인덱스에서 검색할 Node를 선택하는 역할을 합니다. 여기에서는 쿼리당 더 많거나 적은 Node를 검색하거나 유사도 함수를 변경하거나 응용 프로그램의 각 개별 사용 사례에 대해 별도의 검색기를 만들고 싶을 수 있습니다. 예를 들어, 코드 콘텐츠와 텍스트 콘텐츠에 대해 별도의 검색기를 사용하고 싶을 수 있습니다.
+
+- [**ResponseSynthesizer**](./low_level/response_synthesizer.md): ResponseSynthesizer는 쿼리 문자열을 가져와 `Node` 목록을 사용하여 응답을 생성하는 역할을 담당합니다. 이는 모든 컨텍스트를 반복하고 답변을 정제하거나 요약의 트리를 구축하고 루트 요약을 반환하는 등 다양한 형태로 이루어질 수 있습니다.
+
+- [**Storage**](./low_level/storage.md): 언젠가는 임베딩 모델을 매번 다시 실행하는 대신 인덱스, 데이터 및 벡터를 저장하고 싶을 것입니다. IndexStore, DocStore, VectorStore 및 KVStore는 이를 가능하게 하는 추상화입니다. 이들은 StorageContext를 형성합니다. 현재는 파일 시스템(또는 가상 인메모리 파일 시스템)에 임베딩을 지속적으로 저장할 수 있도록 허용하지만, Vector Database와의 통합도 활발히 추가하고 있습니다.
+
+"
diff --git a/apps/docs/i18n/ko/docusaurus-plugin-content-docs/current/modules/low_level/embedding.md b/apps/docs/i18n/ko/docusaurus-plugin-content-docs/current/modules/low_level/embedding.md
new file mode 100644
index 0000000000000000000000000000000000000000..4a17cb59b953b2e1963c6b5132f896bcae1f426c
--- /dev/null
+++ b/apps/docs/i18n/ko/docusaurus-plugin-content-docs/current/modules/low_level/embedding.md
@@ -0,0 +1,26 @@
+---
+sidebar_position: 1
+---
+
+# 임베딩 (Embedding)
+
+`이 문서는 자동 번역되었으며 오류가 포함될 수 있습니다. 변경 사항을 제안하려면 Pull Request를 열어 주저하지 마십시오.`
+
+LlamaIndex의 임베딩 모델은 텍스트의 수치적 표현을 생성하는 역할을 담당합니다. 기본적으로 LlamaIndex는 OpenAI의 `text-embedding-ada-002` 모델을 사용합니다.
+
+이는 명시적으로 `ServiceContext` 객체에서 설정할 수 있습니다.
+
+```typescript
+import { OpenAIEmbedding, serviceContextFromDefaults } from "llamaindex";
+
+const openaiEmbeds = new OpenAIEmbedding();
+
+const serviceContext = serviceContextFromDefaults({ embedModel: openaiEmbeds });
+```
+
+## API 참조
+
+- [OpenAIEmbedding](../../api/classes/OpenAIEmbedding.md)
+- [ServiceContext](../../api/interfaces/ServiceContext.md)
+
+"
diff --git a/apps/docs/i18n/ko/docusaurus-plugin-content-docs/current/modules/low_level/llm.md b/apps/docs/i18n/ko/docusaurus-plugin-content-docs/current/modules/low_level/llm.md
new file mode 100644
index 0000000000000000000000000000000000000000..620513012a18ca0b3a2008c56452f5741059bb19
--- /dev/null
+++ b/apps/docs/i18n/ko/docusaurus-plugin-content-docs/current/modules/low_level/llm.md
@@ -0,0 +1,26 @@
+---
+sidebar_position: 0
+---
+
+# LLM
+
+`이 문서는 자동 번역되었으며 오류가 포함될 수 있습니다. 변경 사항을 제안하려면 Pull Request를 열어 주저하지 마십시오.`
+
+LLM은 텍스트를 읽고 질의에 대한 자연어 응답을 생성하는 역할을 담당합니다. 기본적으로 LlamaIndex.TS는 `gpt-3.5-turbo`를 사용합니다.
+
+LLM은 명시적으로 `ServiceContext` 객체에서 설정할 수 있습니다.
+
+```typescript
+import { OpenAI, serviceContextFromDefaults } from "llamaindex";
+
+const openaiLLM = new OpenAI({ model: "gpt-3.5-turbo", temperature: 0 });
+
+const serviceContext = serviceContextFromDefaults({ llm: openaiLLM });
+```
+
+## API 참조
+
+- [OpenAI](../../api/classes/OpenAI.md)
+- [ServiceContext](../../api/interfaces/ServiceContext.md)
+
+"
diff --git a/apps/docs/i18n/ko/docusaurus-plugin-content-docs/current/modules/low_level/node_parser.md b/apps/docs/i18n/ko/docusaurus-plugin-content-docs/current/modules/low_level/node_parser.md
new file mode 100644
index 0000000000000000000000000000000000000000..56817a1f7632e6507c617e741d4d738f8c603715
--- /dev/null
+++ b/apps/docs/i18n/ko/docusaurus-plugin-content-docs/current/modules/low_level/node_parser.md
@@ -0,0 +1,37 @@
+---
+sidebar_position: 3
+---
+
+# NodeParser
+
+`이 문서는 자동 번역되었으며 오류가 포함될 수 있습니다. 변경 사항을 제안하려면 Pull Request를 열어 주저하지 마십시오.`
+
+`NodeParser`는 LlamaIndex에서 `Document` 객체를 더 작은 `Node` 객체로 분할하는 역할을 담당합니다. `.fromDocuments()`를 호출하면 `ServiceContext`의 `NodeParser`가 자동으로 이 작업을 수행합니다. 또는 문서를 미리 분할하는 데에도 사용할 수 있습니다.
+
+```typescript
+import { Document, SimpleNodeParser } from "llamaindex";
+
+const nodeParser = new SimpleNodeParser();
+const nodes = nodeParser.getNodesFromDocuments([
+  new Document({ text: "나는 10살입니다. 존은 20살입니다." }),
+]);
+```
+
+## TextSplitter
+
+기본 텍스트 분할기는 문장 단위로 텍스트를 분할합니다. 원시 텍스트를 분할하는 독립 모듈로도 사용할 수 있습니다.
+
+```typescript
+import { SentenceSplitter } from "llamaindex";
+
+const splitter = new SentenceSplitter({ chunkSize: 1 });
+
+const textSplits = splitter.splitText("안녕하세요 세상");
+```
+
+## API 참조
+
+- [SimpleNodeParser](../../api/classes/SimpleNodeParser.md)
+- [SentenceSplitter](../../api/classes/SentenceSplitter.md)
+
+"
diff --git a/apps/docs/i18n/ko/docusaurus-plugin-content-docs/current/modules/low_level/response_synthesizer.md b/apps/docs/i18n/ko/docusaurus-plugin-content-docs/current/modules/low_level/response_synthesizer.md
new file mode 100644
index 0000000000000000000000000000000000000000..b792306273eaadb8af0ccee051d5e2eb0dc62dc1
--- /dev/null
+++ b/apps/docs/i18n/ko/docusaurus-plugin-content-docs/current/modules/low_level/response_synthesizer.md
@@ -0,0 +1,45 @@
+---
+sidebar_position: 6
+---
+
+# ResponseSynthesizer (응답 합성기)
+
+`이 문서는 자동 번역되었으며 오류가 포함될 수 있습니다. 변경 사항을 제안하려면 Pull Request를 열어 주저하지 마십시오.`
+
+ResponseSynthesizer는 쿼리, 노드 및 프롬프트 템플릿을 LLM에 보내 응답을 생성하는 역할을 담당합니다. 응답을 생성하는 몇 가지 주요 모드가 있습니다:
+
+- `Refine` (정제): 각 검색된 텍스트 청크를 순차적으로 통과하여 답변을 "생성 및 정제"합니다. 각 노드에 대해 별도의 LLM 호출을 수행합니다. 자세한 답변에 적합합니다.
+- `CompactAndRefine` (기본값): 각 LLM 호출 중 프롬프트를 "압축"하여 최대 프롬프트 크기 내에 맞을 수 있는 텍스트 청크를 가능한 많이 채웁니다. 하나의 프롬프트에 채울 수 있는 청크가 너무 많은 경우, 여러 개의 압축 프롬프트를 통해 답변을 "생성 및 정제"합니다. `refine`와 동일하지만 LLM 호출 횟수가 적어집니다.
+- `TreeSummarize` (트리 요약): 주어진 텍스트 청크 세트와 쿼리를 사용하여 재귀적으로 트리를 구성하고 루트 노드를 응답으로 반환합니다. 요약 목적에 적합합니다.
+- `SimpleResponseBuilder` (간단한 응답 빌더): 주어진 텍스트 청크 세트와 쿼리를 사용하여 각 텍스트 청크에 쿼리를 적용하면서 응답을 배열에 누적합니다. 모든 응답의 연결된 문자열을 반환합니다. 각 텍스트 청크에 대해 별도로 동일한 쿼리를 실행해야 할 때 유용합니다.
+
+```typescript
+import { NodeWithScore, ResponseSynthesizer, TextNode } from "llamaindex";
+
+const responseSynthesizer = new ResponseSynthesizer();
+
+const nodesWithScore: NodeWithScore[] = [
+  {
+    node: new TextNode({ text: "I am 10 years old." }),
+    score: 1,
+  },
+  {
+    node: new TextNode({ text: "John is 20 years old." }),
+    score: 0.5,
+  },
+];
+
+const response = await responseSynthesizer.synthesize(
+  "What age am I?",
+  nodesWithScore,
+);
+console.log(response.response);
+```
+
+## API 참조
+
+- [ResponseSynthesizer (응답 합성기)](../../api/classes/ResponseSynthesizer.md)
+- [Refine (정제)](../../api/classes/Refine.md)
+- [CompactAndRefine (압축 및 정제)](../../api/classes/CompactAndRefine.md)
+- [TreeSummarize (트리 요약)](../../api/classes/TreeSummarize.md)
+- [SimpleResponseBuilder (간단한 응답 빌더)](../../api/classes/SimpleResponseBuilder.md)
diff --git a/apps/docs/i18n/ko/docusaurus-plugin-content-docs/current/modules/low_level/retriever.md b/apps/docs/i18n/ko/docusaurus-plugin-content-docs/current/modules/low_level/retriever.md
new file mode 100644
index 0000000000000000000000000000000000000000..e6b328f11e5f8a2c85f387d8262ecf835cdb51da
--- /dev/null
+++ b/apps/docs/i18n/ko/docusaurus-plugin-content-docs/current/modules/low_level/retriever.md
@@ -0,0 +1,23 @@
+---
+sidebar_position: 5
+---
+
+# 리트리버 (Retriever)
+
+`이 문서는 자동 번역되었으며 오류가 포함될 수 있습니다. 변경 사항을 제안하려면 Pull Request를 열어 주저하지 마십시오.`
+
+LlamaIndex에서 리트리버는 쿼리 문자열을 사용하여 인덱스에서 `Node`를 가져오는 데 사용되는 도구입니다. `VectorIndexRetriever`는 가장 유사한 상위 k개의 노드를 가져옵니다. 한편, `SummaryIndexRetriever`는 쿼리에 관계없이 모든 노드를 가져옵니다.
+
+```typescript
+const retriever = vector_index.asRetriever();
+retriever.similarityTopK = 3;
+
+// 노드를 가져옵니다!
+const nodesWithScore = await retriever.retrieve("쿼리 문자열");
+```
+
+## API 참조
+
+- [SummaryIndexRetriever](../../api/classes/SummaryIndexRetriever.md)
+- [SummaryIndexLLMRetriever](../../api/classes/SummaryIndexLLMRetriever.md)
+- [VectorIndexRetriever](../../api/classes/VectorIndexRetriever.md)
diff --git a/apps/docs/i18n/ko/docusaurus-plugin-content-docs/current/modules/low_level/storage.md b/apps/docs/i18n/ko/docusaurus-plugin-content-docs/current/modules/low_level/storage.md
new file mode 100644
index 0000000000000000000000000000000000000000..11f6b3252df487375b22ddae8f210197cbad6d16
--- /dev/null
+++ b/apps/docs/i18n/ko/docusaurus-plugin-content-docs/current/modules/low_level/storage.md
@@ -0,0 +1,30 @@
+---
+sidebar_position: 7
+---
+
+# 저장소 (Storage)
+
+`이 문서는 자동 번역되었으며 오류가 포함될 수 있습니다. 변경 사항을 제안하려면 Pull Request를 열어 주저하지 마십시오.`
+
+LlamaIndex.TS의 저장소는 `StorageContext` 객체를 구성한 후 자동으로 작동합니다. `persistDir`을 구성하고 인덱스에 연결하기만 하면 됩니다.
+
+현재는 디스크에서의 저장 및 로드만 지원되며, 향후 통합이 계획되어 있습니다!
+
+```typescript
+import { Document, VectorStoreIndex, storageContextFromDefaults } from "./src";
+
+const storageContext = await storageContextFromDefaults({
+  persistDir: "./storage",
+});
+
+const document = new Document({ text: "테스트 텍스트" });
+const index = await VectorStoreIndex.fromDocuments([document], {
+  storageContext,
+});
+```
+
+## API 참조
+
+- [StorageContext](../../api/interfaces/StorageContext.md)
+
+"
diff --git a/apps/docs/i18n/ko/docusaurus-plugin-content-docs/current/starter.md b/apps/docs/i18n/ko/docusaurus-plugin-content-docs/current/starter.md
new file mode 100644
index 0000000000000000000000000000000000000000..21c4d5698e438a4f57252ad1d0e32b8645019713
--- /dev/null
+++ b/apps/docs/i18n/ko/docusaurus-plugin-content-docs/current/starter.md
@@ -0,0 +1,58 @@
+---
+sidebar_position: 2
+---
+
+# 스타터 튜토리얼
+
+`이 문서는 자동 번역되었으며 오류가 포함될 수 있습니다. 변경 사항을 제안하려면 Pull Request를 열어 주저하지 마십시오.`
+
+[LlamaIndex.TS를 NPM을 사용하여 설치](installation)하고 OpenAI 키를 설정한 후, 첫 번째 앱을 시작할 준비가 되었습니다:
+
+새 폴더에서:
+
+```bash npm2yarn
+npm install typescript
+npm install @types/node
+npx tsc --init # 필요한 경우
+```
+
+`example.ts` 파일을 생성하세요. 이 코드는 몇 가지 예제 데이터를 로드하고 문서를 생성한 다음 (OpenAI를 사용하여 임베딩을 생성하는) 색인을 만들고 데이터에 대한 질문에 대답하기 위한 쿼리 엔진을 생성합니다.
+
+```ts
+// example.ts
+import fs from "fs/promises";
+import { Document, VectorStoreIndex } from "llamaindex";
+
+async function main() {
+  // Node에서 abramov.txt에서 에세이 로드
+  const essay = await fs.readFile(
+    "node_modules/llamaindex/examples/abramov.txt",
+    "utf-8",
+  );
+
+  // 에세이로 Document 객체 생성
+  const document = new Document({ text: essay });
+
+  // 텍스트를 분할하고 임베딩을 생성하여 VectorStoreIndex에 저장
+  const index = await VectorStoreIndex.fromDocuments([document]);
+
+  // 색인에 쿼리
+  const queryEngine = index.asQueryEngine();
+  const response = await queryEngine.query("저자는 대학에서 무엇을 했나요?");
+
+  // 응답 출력
+  console.log(response.toString());
+}
+
+main();
+```
+
+그런 다음 다음을 사용하여 실행할 수 있습니다.
+
+```bash
+npx ts-node example.ts
+```
+
+더 알아보려면 https://llama-playground.vercel.app/에서 NextJS 플레이그라운드를 확인하세요. 소스는 https://github.com/run-llama/ts-playground에서 확인할 수 있습니다.
+
+"
diff --git a/apps/docs/i18n/lt/docusaurus-plugin-content-docs/current/_static/concepts/indexing.jpg b/apps/docs/i18n/lt/docusaurus-plugin-content-docs/current/_static/concepts/indexing.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..8672967213caf28fb27682b6b9e2e5111beb9aaa
Binary files /dev/null and b/apps/docs/i18n/lt/docusaurus-plugin-content-docs/current/_static/concepts/indexing.jpg differ
diff --git a/apps/docs/i18n/lt/docusaurus-plugin-content-docs/current/_static/concepts/querying.jpg b/apps/docs/i18n/lt/docusaurus-plugin-content-docs/current/_static/concepts/querying.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..3c241bdda5ba98d3f3ee2e163b9f19280c5c7503
Binary files /dev/null and b/apps/docs/i18n/lt/docusaurus-plugin-content-docs/current/_static/concepts/querying.jpg differ
diff --git a/apps/docs/i18n/lt/docusaurus-plugin-content-docs/current/_static/concepts/rag.jpg b/apps/docs/i18n/lt/docusaurus-plugin-content-docs/current/_static/concepts/rag.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..b68eca2564f8535b33d9ed6d4ce4d4410d2a0698
Binary files /dev/null and b/apps/docs/i18n/lt/docusaurus-plugin-content-docs/current/_static/concepts/rag.jpg differ
diff --git a/apps/docs/i18n/lt/docusaurus-plugin-content-docs/current/api b/apps/docs/i18n/lt/docusaurus-plugin-content-docs/current/api
new file mode 120000
index 0000000000000000000000000000000000000000..3513e32979f4abb8b2df2b54c8d0cc480bdd847e
--- /dev/null
+++ b/apps/docs/i18n/lt/docusaurus-plugin-content-docs/current/api
@@ -0,0 +1 @@
+../../../../docs/api
\ No newline at end of file
diff --git a/apps/docs/i18n/lt/docusaurus-plugin-content-docs/current/concepts.md b/apps/docs/i18n/lt/docusaurus-plugin-content-docs/current/concepts.md
new file mode 100644
index 0000000000000000000000000000000000000000..b6bc62b582f428f24d71e3b74cf1939688e6668a
--- /dev/null
+++ b/apps/docs/i18n/lt/docusaurus-plugin-content-docs/current/concepts.md
@@ -0,0 +1,86 @@
+---
+sidebar_position: 3
+---
+
+# Aukšto lygio sąvokos
+
+`Ši dokumentacija buvo automatiškai išversta ir gali turėti klaidų. Nedvejodami atidarykite Pull Request, jei norite pasiūlyti pakeitimus.`
+
+LlamaIndex.TS padeda jums kurti LLM pagrįstas aplikacijas (pvz., klausimų ir atsakymų sistema, chatbot'as) naudojant pasirinktinius duomenis.
+
+Šiame aukšto lygio sąvokų vadove sužinosite:
+
+- kaip LLM gali atsakyti į klausimus naudojant jūsų pačių duomenis.
+- pagrindines sąvokas ir modulius LlamaIndex.TS, skirtus sudaryti savo užklausų grandinėms.
+
+## Klausimų atsakymas naudojant jūsų duomenis
+
+LlamaIndex naudoja dviejų etapų metodą, naudojant LLM su jūsų duomenimis:
+
+1. **indeksavimo etapas**: pasiruošimas žinių bazei ir
+2. **užklausos etapas**: atitinkamos konteksto iš žinių paieška, kad padėtų LLM atsakyti į klausimą
+
+![](./_static/concepts/rag.jpg)
+
+Šis procesas taip pat žinomas kaip "Retrieval Augmented Generation" (RAG).
+
+LlamaIndex.TS suteikia esminį įrankių rinkinį, kuris padaro abu žingsnius labai paprastus.
+
+Išsamiau išnagrinėkime kiekvieną etapą.
+
+### Indeksavimo etapas
+
+LlamaIndex.TS padeda jums paruošti žinių bazę su duomenų jungiklių ir indeksų rinkiniu.
+
+![](./_static/concepts/indexing.jpg)
+
+[**Duomenų įkėlėjai**](./modules/high_level/data_loader.md):
+Duomenų jungiklis (t.y. `Reader`) įkelia duomenis iš įvairių duomenų šaltinių ir duomenų formatų į paprastą `Document` atstovavimą (tekstą ir paprastą metaduomenų).
+
+[**Dokumentai / Mazgai**](./modules/high_level/documents_and_nodes.md): `Document` yra bendrinis konteineris, apimančio bet kokį duomenų šaltinį - pavyzdžiui, PDF, API išvestį arba gautus duomenis iš duomenų bazės. `Node` yra atomiškas duomenų vienetas LlamaIndex ir atstovauja "gabalui" iš šaltinio `Document`. Tai turtingas atstovavimas, kuris apima metaduomenis ir ryšius (su kitais mazgais), kad būtų galima atlikti tikslų ir išraiškingą atkūrimo operacijas.
+
+[**Duomenų indeksai**](./modules/high_level/data_index.md):
+Kai įkėlėte savo duomenis, LlamaIndex padeda jums indeksuoti duomenis į lengvai atkurtiną formatą.
+
+Po dangčio LlamaIndex analizuoja žalius dokumentus į tarpinį atstovavimą, skaičiuoja vektorinius įdėlius ir saugo jūsų duomenis atmintyje ar diske.
+
+"
+
+### Užklausos etapas
+
+Užklausos etape užklausos grandinė gauna aktualiausią kontekstą pagal vartotojo užklausą
+ir perduoda jį LLM (kartu su užklausa), kad būtų sintezuotas atsakymas.
+
+Tai suteikia LLM naujausias žinias, kurios nėra jo pradinėje mokymo duomenų rinkinyje,
+(taip pat sumažinant halucinaciją).
+
+Pagrindinis iššūkis užklausos etape yra žinių paieška, orkestro organizavimas ir apmąstymas per (galbūt daugelį) žinių pagrindų.
+
+LlamaIndex suteikia suderinamus modulius, kurie padeda jums kurti ir integruoti RAG grandines klausimams ir atsakymams (užklausų variklis), chatbot'ams (pokalbių variklis) arba kaip dalį agento.
+
+Šie statybiniai blokai gali būti pritaikomi atspindėti reitingavimo nuostatas, taip pat suderinti apmąstymui per kelis žinių pagrindus struktūrizuotu būdu.
+
+![](./_static/concepts/querying.jpg)
+
+#### Statybiniai blokai
+
+[**Gavėjai**](./modules/low_level/retriever.md):
+Gavėjas apibrėžia, kaip efektyviai gauti aktualų kontekstą iš žinių pagrindo (t.y. indekso), kai pateikiama užklausa.
+Konkrečios gavimo logikos skiriasi priklausomai nuo indeksų, populiariausias būdamas tankus gavimas pagal vektorinį indeksą.
+
+[**Atsakymo sintezatoriai**](./modules/low_level/response_synthesizer.md):
+Atsakymo sintezatorius generuoja atsakymą iš LLM, naudodamas vartotojo užklausą ir nurodytą rinkinį gautų teksto fragmentų.
+
+"
+
+#### Grandinės
+
+[**Užklausų varikliai**](./modules/high_level/query_engine.md):
+Užklausų variklis yra nuo pradžios iki pabaigos grandinė, kuri leidžia jums užduoti klausimus apie savo duomenis.
+Jis priima natūralios kalbos užklausą ir grąžina atsakymą kartu su gautu ir perduotu kontekstu LLM.
+
+[**Pokalbių varikliai**](./modules/high_level/chat_engine.md):
+Pokalbių variklis yra nuo pradžios iki pabaigos grandinė, skirta pokalbiui su jūsų duomenimis
+(daugybė dialogų, o ne vienas klausimas ir atsakymas).
+
+"
diff --git a/apps/docs/i18n/lt/docusaurus-plugin-content-docs/current/end_to_end.md b/apps/docs/i18n/lt/docusaurus-plugin-content-docs/current/end_to_end.md
new file mode 100644
index 0000000000000000000000000000000000000000..959d7a8b1a57f15e1cc89f5658ffca7985e233bb
--- /dev/null
+++ b/apps/docs/i18n/lt/docusaurus-plugin-content-docs/current/end_to_end.md
@@ -0,0 +1,63 @@
+---
+sidebar_position: 4
+---
+
+# Pavyzdžiai nuo pradžios iki pabaigos
+
+`Ši dokumentacija buvo automatiškai išversta ir gali turėti klaidų. Nedvejodami atidarykite Pull Request, jei norite pasiūlyti pakeitimus.`
+
+Mūsų saugykloje įtraukėme keletą pavyzdžių, naudojant LlamaIndex.TS
+
+Peržiūrėkite žemiau esančius pavyzdžius arba išbandykite juos ir užbaikite per kelias minutes su interaktyviais Github Codespace vadovais, kurie pateikiami Dev-Docs [čia](https://codespaces.new/team-dev-docs/lits-dev-docs-playground?devcontainer_path=.devcontainer%2Fjavascript_ltsquickstart%2Fdevcontainer.json):
+
+## [Pokalbių variklis (Chat Engine)](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/chatEngine.ts)
+
+Nuskaitykite failą ir aptarkite jį su LLM.
+
+## [Vektorių indeksas](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/vectorIndex.ts)
+
+Sukurkite vektorių indeksą ir užklausykite jį. Vektorių indeksas naudos įdėtis, kad gautų k-ąjį labiausiai susijusį mazgą. Pagal numatytuosius nustatymus, k yra 2.
+
+"
+
+## [Santraukos indeksas](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/summaryIndex.ts)
+
+Sukurkite sąrašo indeksą ir užklausykite jį. Šis pavyzdys taip pat naudoja `LLMRetriever`, kuris naudos LLM, kad pasirinktų geriausius mazgus, kai generuojamas atsakymas.
+
+"
+
+## [Išsaugoti / Įkelti indeksą](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/storageContext.ts)
+
+Sukurkite ir įkelkite vektorinį indeksą. LlamaIndex.TS automatiškai vykdo duomenų išsaugojimą į diską, kai sukuriamas saugojimo konteksto objektas.
+
+"
+
+## [Pritaikytas vektorių indeksas](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/vectorIndexCustomize.ts)
+
+Sukurkite vektorių indeksą ir užklausykite jį, tuo pačiu konfigūruodami `LLM`, `ServiceContext` ir `similarity_top_k`.
+
+"
+
+## [OpenAI LLM](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/openai.ts)
+
+Sukurkite OpenAI LLM ir tiesiogiai naudokite jį pokalbiams.
+
+"
+
+## [Llama2 DeuceLLM](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/llamadeuce.ts)
+
+Sukurkite Llama-2 LLM ir tiesiogiai naudokite jį pokalbiams.
+
+"
+
+## [SubQuestionQueryEngine](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/subquestion.ts)
+
+Naudoja `SubQuestionQueryEngine`, kuris sudaro sudėtingus užklausimus į kelis klausimus ir tada sujungia atsakymus į visus subklausimus.
+
+"
+
+## [Žemų lygių modulių](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/lowlevel.ts)
+
+Šis pavyzdys naudoja keletą žemų lygių komponentų, kurie pašalina poreikį turėti tikrą užklausų variklį. Šiuos komponentus galima naudoti bet kur, bet kurioje programoje arba juos galima pritaikyti ir paveldėti, kad atitiktų jūsų individualius poreikius.
+
+"
diff --git a/apps/docs/i18n/lt/docusaurus-plugin-content-docs/current/environments.md b/apps/docs/i18n/lt/docusaurus-plugin-content-docs/current/environments.md
new file mode 100644
index 0000000000000000000000000000000000000000..d4ed4f788122b3fb3733cf4ae0f84647377ce981
--- /dev/null
+++ b/apps/docs/i18n/lt/docusaurus-plugin-content-docs/current/environments.md
@@ -0,0 +1,31 @@
+---
+sidebar_position: 5
+---
+
+# Aplinkos
+
+`Ši dokumentacija buvo automatiškai išversta ir gali turėti klaidų. Nedvejodami atidarykite Pull Request, jei norite pasiūlyti pakeitimus.`
+
+LlamaIndex šiuo metu oficialiai palaiko NodeJS 18 ir NodeJS 20.
+
+## NextJS Aplikacijos maršrutizatorius
+
+Jei naudojate NextJS Aplikacijos maršrutizatoriaus maršrutų tvarkyklės / serverio funkcijas, turėsite naudoti NodeJS režimą:
+
+```js
+export const runtime = "nodejs"; // numatytasis
+```
+
+ir turėsite pridėti išimtį pdf-parse savo next.config.js
+
+```js
+// next.config.js
+/** @type {import('next').NextConfig} */
+const nextConfig = {
+  experimental: {
+    serverComponentsExternalPackages: ["pdf-parse"], // Prideda pdf-parse į tikrąjį NodeJS režimą su NextJS Aplikacijos maršrutizatoriumi
+  },
+};
+
+module.exports = nextConfig;
+```
diff --git a/apps/docs/i18n/lt/docusaurus-plugin-content-docs/current/installation.mdx b/apps/docs/i18n/lt/docusaurus-plugin-content-docs/current/installation.mdx
new file mode 100644
index 0000000000000000000000000000000000000000..2ef2f08e40b2065322c0d786ee4825aa88238a4a
--- /dev/null
+++ b/apps/docs/i18n/lt/docusaurus-plugin-content-docs/current/installation.mdx
@@ -0,0 +1,68 @@
+---
+sidebar_position: 1
+---
+
+
+# Diegimas ir konfigūracija
+
+```Ši dokumentacija buvo automatiškai išversta ir gali turėti klaidų. Nedvejodami atidarykite Pull Request, jei norite pasiūlyti pakeitimus.```
+
+
+Įsitikinkite, kad turite NodeJS v18 arba naujesnę versiją.
+
+
+## Naudodami create-llama
+
+Paprastiausias būdas pradėti naudoti LlamaIndex yra naudoti `create-llama` įrankį. Šis CLI įrankis leidžia greitai pradėti kurti naują LlamaIndex aplikaciją, viskas jau paruošta jums.
+
+Tiesiog paleiskite
+
+<Tabs>
+<TabItem value="1" label="npm" default>
+
+```bash
+npx create-llama@latest
+```
+
+</TabItem>
+<TabItem value="2" label="Yarn">
+
+```bash
+yarn create llama
+```
+
+</TabItem>
+<TabItem value="3" label="pnpm">
+
+```bash
+pnpm create llama@latest
+```
+
+</TabItem>
+</Tabs>
+
+kad pradėtumėte. Kai jūsų programa yra sugeneruota, paleiskite
+
+```bash npm2yarn
+npm run dev
+```
+
+kad paleistumėte plėtros serverį. Tada galite apsilankyti [http://localhost:3000](http://localhost:3000), kad pamatytumėte savo programą.
+## Diegimas iš NPM
+
+```bash npm2yarn
+npm install llamaindex
+```
+
+
+### Aplinkos kintamieji
+
+Mūsų pavyzdžiai pagal nutylėjimą naudoja OpenAI. Jums reikės nustatyti savo Open AI raktą taip:
+
+```bash
+export OPENAI_API_KEY="sk-......" # Pakeiskite į savo raktą iš https://platform.openai.com/account/api-keys
+```
+
+Jei norite, kad jis būtų automatiškai įkeltas kiekvieną kartą, pridėkite jį į savo .zshrc/.bashrc failą.
+
+ĮSPĖJIMAS: neįkelkite savo OpenAI rakto į versijų kontrolę.
diff --git a/apps/docs/i18n/lt/docusaurus-plugin-content-docs/current/introduction.md b/apps/docs/i18n/lt/docusaurus-plugin-content-docs/current/introduction.md
new file mode 100644
index 0000000000000000000000000000000000000000..a27583c288959317585a5aaa55c417e63c01c26f
--- /dev/null
+++ b/apps/docs/i18n/lt/docusaurus-plugin-content-docs/current/introduction.md
@@ -0,0 +1,60 @@
+---
+sidebar_position: 0
+slug: /
+---
+
+# Kas yra LlamaIndex.TS?
+
+`Ši dokumentacija buvo automatiškai išversta ir gali turėti klaidų. Nedvejodami atidarykite Pull Request, jei norite pasiūlyti pakeitimus.`
+
+LlamaIndex.TS yra duomenų pagrindas LLM aplikacijoms, skirtas įkelti, struktūrizuoti ir pasiekti privačius arba domeno specifinius duomenis. Nors taip pat yra prieinamas Python paketas (žr. [čia](https://docs.llamaindex.ai/en/stable/)), LlamaIndex.TS siūlo pagrindines funkcijas paprastoje paketo versijoje, optimizuotoje naudojimui su TypeScript.
+
+## 🚀 Kodėl LlamaIndex.TS?
+
+Pagrindinėje esmėje, LLM (Lietuvių kalbos modeliai) siūlo natūralios kalbos sąsają tarp žmonių ir išvestų duomenų. Plačiai prieinami modeliai yra išankstinio apmokymo dideliais kiekiais viešai prieinamų duomenų, nuo Vikipedijos ir pašto sąrašų iki vadovėlių ir šaltinio kodo.
+
+LLM pagrindu sukurtos programos dažnai reikalauja papildyti šiuos modelius privačiais arba domeno specifiniais duomenimis. Deja, šie duomenys gali būti paskirstyti tarp izoliuotų programų ir duomenų saugyklų. Jie gali būti už API ribų, SQL duomenų bazėse arba užstrigę PDF failuose ir skaidrėse.
+
+Čia ateina **LlamaIndex.TS**.
+
+## 🦙 Kaip gali padėti LlamaIndex.TS?
+
+LlamaIndex.TS teikia šiuos įrankius:
+
+- **Duomenų įkėlimas** - tiesiogiai įkelkite savo esamus `.txt`, `.pdf`, `.csv`, `.md` ir `.docx` duomenis.
+- **Duomenų indeksai** - struktūrizuokite savo duomenis tarpinėse reprezentacijose, kurios yra lengvai naudojamos ir efektyvios LLM'ams.
+- **Varikliai** - suteikia natūralios kalbos prieigą prie jūsų duomenų. Pavyzdžiui:
+  - Užklausų varikliai yra galingos išgavimo sąsajos, skirtos žiniomis papildytam išvesties gavimui.
+  - Pokalbių varikliai yra pokalbių sąsajos, skirtos daugelio žinučių, "atgal ir pirmyn" sąveikai su jūsų duomenimis.
+
+## 👨‍👩‍👧‍👦 Kam skirtas LlamaIndex?
+
+LlamaIndex.TS teikia pagrindinį įrankių rinkinį, būtiną visiems, kurie kuria LLM programas naudodami JavaScript ir TypeScript.
+
+Mūsų aukšto lygio API leidžia pradedantiesiems naudoti LlamaIndex.TS, kad galėtų įkelti ir užklausti savo duomenis.
+
+Sudėtingesnėms programoms mūsų žemesnio lygio API leidžia pažengusiems naudotojams pritaikyti ir išplėsti bet kurį modulį - duomenų jungtis, indeksai, gavikliai ir užklausų varikliai - pagal savo poreikius.
+
+## Pradžia
+
+`npm install llamaindex`
+
+Mūsų dokumentacija apima [įdiegimo instrukcijas](./installation.md) ir [pradžios vadovą](./starter.md), skirtą sukurti pirmąją aplikaciją.
+
+Kai jau esate paleidę, [aukšto lygio konceptai](./concepts.md) pateikia apžvalgą apie LlamaIndex modularią architektūrą. Norėdami gauti daugiau praktinių pavyzdžių, peržiūrėkite mūsų [nuo pradžių iki pabaigos vadovus](./end_to_end.md).
+
+## 🗺️ Ekosistema
+
+Norėdami atsisiųsti ar prisidėti, rasite LlamaIndex čia:
+
+- Github: https://github.com/run-llama/LlamaIndexTS
+- NPM: https://www.npmjs.com/package/llamaindex
+
+"
+
+## Bendruomenė
+
+Reikia pagalbos? Turite funkcijos pasiūlymą? Prisijunkite prie LlamaIndex bendruomenės:
+
+- Twitter: https://twitter.com/llama_index
+- Discord: https://discord.gg/dGcwcsnxhU
diff --git a/apps/docs/i18n/lt/docusaurus-plugin-content-docs/current/modules/high_level/chat_engine.md b/apps/docs/i18n/lt/docusaurus-plugin-content-docs/current/modules/high_level/chat_engine.md
new file mode 100644
index 0000000000000000000000000000000000000000..bf9ada544afd5cc069b8f0852cfe2e89774b148e
--- /dev/null
+++ b/apps/docs/i18n/lt/docusaurus-plugin-content-docs/current/modules/high_level/chat_engine.md
@@ -0,0 +1,22 @@
+---
+sidebar_position: 4
+---
+
+# ChatEngine (Pokalbių variklis)
+
+`Ši dokumentacija buvo automatiškai išversta ir gali turėti klaidų. Nedvejodami atidarykite Pull Request, jei norite pasiūlyti pakeitimus.`
+
+Pokalbių variklis yra greitas ir paprastas būdas bendrauti su duomenimis savo indekse.
+
+```typescript
+const retriever = index.asRetriever();
+const chatEngine = new ContextChatEngine({ retriever });
+
+// pradėti pokalbį
+const response = await chatEngine.chat(query);
+```
+
+## API nuorodos
+
+- [ContextChatEngine](../../api/classes/ContextChatEngine.md)
+- [CondenseQuestionChatEngine](../../api/classes/ContextChatEngine.md)
diff --git a/apps/docs/i18n/lt/docusaurus-plugin-content-docs/current/modules/high_level/data_index.md b/apps/docs/i18n/lt/docusaurus-plugin-content-docs/current/modules/high_level/data_index.md
new file mode 100644
index 0000000000000000000000000000000000000000..c056e1892c502811020c8bd5f2833c669d1ae41a
--- /dev/null
+++ b/apps/docs/i18n/lt/docusaurus-plugin-content-docs/current/modules/high_level/data_index.md
@@ -0,0 +1,27 @@
+---
+sidebar_position: 2
+---
+
+# Indeksas
+
+`Ši dokumentacija buvo automatiškai išversta ir gali turėti klaidų. Nedvejodami atidarykite Pull Request, jei norite pasiūlyti pakeitimus.`
+
+Indeksas yra pagrindinis jūsų duomenų konteineris ir organizavimo būdas. LlamaIndex.TS palaiko du indeksus:
+
+- `VectorStoreIndex` - generuojant atsakymą, siųs LLM viršutinius `Node`'us. Numatytasis viršutinių `Node`'ų skaičius yra 2.
+- `SummaryIndex` - generuojant atsakymą, siųs visus indekso `Node`'us LLM
+
+```typescript
+import { Document, VectorStoreIndex } from "llamaindex";
+
+const document = new Document({ text: "test" });
+
+const index = await VectorStoreIndex.fromDocuments([document]);
+```
+
+## API Nuorodos
+
+- [SummaryIndex](../../api/classes/SummaryIndex.md)
+- [VectorStoreIndex](../../api/classes/VectorStoreIndex.md)
+
+"
diff --git a/apps/docs/i18n/lt/docusaurus-plugin-content-docs/current/modules/high_level/data_loader.md b/apps/docs/i18n/lt/docusaurus-plugin-content-docs/current/modules/high_level/data_loader.md
new file mode 100644
index 0000000000000000000000000000000000000000..42d1bf74b5070cd9cc53a19fb4f1344b3727ead3
--- /dev/null
+++ b/apps/docs/i18n/lt/docusaurus-plugin-content-docs/current/modules/high_level/data_loader.md
@@ -0,0 +1,19 @@
+---
+sidebar_position: 1
+---
+
+# Skaitytuvas / Įkėlėjas
+
+`Ši dokumentacija buvo automatiškai išversta ir gali turėti klaidų. Nedvejodami atidarykite Pull Request, jei norite pasiūlyti pakeitimus.`
+
+LlamaIndex.TS palaiko lengvą failų įkėlimą iš aplankų naudojant `SimpleDirectoryReader` klasę. Šiuo metu palaikomi `.txt`, `.pdf`, `.csv`, `.md` ir `.docx` failų formatai, o ateityje planuojama palaikyti daugiau!
+
+```typescript
+import { SimpleDirectoryReader } from "llamaindex";
+
+documents = new SimpleDirectoryReader().loadData("./data");
+```
+
+## API Nuorodos
+
+- [SimpleDirectoryReader](../../api/classes/SimpleDirectoryReader.md)
diff --git a/apps/docs/i18n/lt/docusaurus-plugin-content-docs/current/modules/high_level/documents_and_nodes.md b/apps/docs/i18n/lt/docusaurus-plugin-content-docs/current/modules/high_level/documents_and_nodes.md
new file mode 100644
index 0000000000000000000000000000000000000000..ffc2a87bd89e89bdbc32c01c71904699f7aef2be
--- /dev/null
+++ b/apps/docs/i18n/lt/docusaurus-plugin-content-docs/current/modules/high_level/documents_and_nodes.md
@@ -0,0 +1,22 @@
+---
+sidebar_position: 0
+---
+
+# Dokumentai ir Mazgai
+
+`Ši dokumentacija buvo automatiškai išversta ir gali turėti klaidų. Nedvejodami atidarykite Pull Request, jei norite pasiūlyti pakeitimus.`
+
+`Dokumentai` ir `Mazgai` yra pagrindiniai bet kokio indekso statybiniai blokai. Nors šių objektų API yra panašus, `Dokumento` objektai atstovauja visiems failams, o `Mazgai` yra mažesni šio pradinio dokumento fragmentai, tinkami LLM ir Q&A.
+
+```typescript
+import { Document } from "llamaindex";
+
+document = new Document({ text: "tekstas", metadata: { key: "val" } });
+```
+
+## API Nuorodos
+
+- [Dokumentas](../../api/classes/Document.md)
+- [TekstoMazgas](../../api/classes/TextNode.md)
+
+"
diff --git a/apps/docs/i18n/lt/docusaurus-plugin-content-docs/current/modules/high_level/query_engine.md b/apps/docs/i18n/lt/docusaurus-plugin-content-docs/current/modules/high_level/query_engine.md
new file mode 100644
index 0000000000000000000000000000000000000000..2a252917d2cd0d987287d45771fc6b56da599d18
--- /dev/null
+++ b/apps/docs/i18n/lt/docusaurus-plugin-content-docs/current/modules/high_level/query_engine.md
@@ -0,0 +1,42 @@
+---
+sidebar_position: 3
+---
+
+# Užklausos variklis (QueryEngine)
+
+`Ši dokumentacija buvo automatiškai išversta ir gali turėti klaidų. Nedvejodami atidarykite Pull Request, jei norite pasiūlyti pakeitimus.`
+
+Užklausos variklis apgaubia `Retriever` ir `ResponseSynthesizer` į vieną grandinę, kuri naudos užklausos eilutę, kad gautų mazgus ir tada juos siųstų į LLM, kad sugeneruotų atsakymą.
+
+```typescript
+const queryEngine = index.asQueryEngine();
+const response = await queryEngine.query("užklausos eilutė");
+```
+
+## Subklausimo užklausos variklis
+
+Subklausimo užklausos variklio pagrindinė koncepcija yra tai, kad jis padalina vieną užklausą į kelias užklausas, gauna atsakymą į kiekvieną iš tų užklausų ir tada sujungia skirtingus atsakymus į vientisą atsakymą vartotojui. Galite tai įsivaizduoti kaip "galvokite apie tai žingsnis po žingsnio" techniką, bet iteruojant per savo duomenų šaltinius!
+
+### Pradžia
+
+Paprastiausias būdas pradėti išbandyti Subklausimo užklausos variklį yra paleisti subquestion.ts failą [pavyzdžiuose](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/subquestion.ts).
+
+```bash
+npx ts-node subquestion.ts
+```
+
+"
+
+### Įrankiai
+
+Subklausimo užklausos variklis yra įgyvendintas su įrankiais. Įrankių pagrindinė idėja yra tai, kad jie yra vykdomi variantai didelio kalbos modelio atžvilgiu. Šiuo atveju mūsų subklausimo užklausos variklis remiasi QueryEngineTool, kuris, kaip jau supratote, yra įrankis, skirtas vykdyti užklausas QueryEngine. Tai leidžia mums suteikti modeliui galimybę užklausti skirtingus dokumentus skirtingiems klausimams, pavyzdžiui. Taip pat galite įsivaizduoti, kad subklausimo užklausos variklis gali naudoti įrankį, kuris ieško kažko internete arba gauna atsakymą naudojant Wolfram Alpha.
+
+Daugiau apie įrankius galite sužinoti peržiūrėję LlamaIndex Python dokumentaciją https://gpt-index.readthedocs.io/en/latest/core_modules/agent_modules/tools/root.html
+
+"
+
+## API nuorodos
+
+- [RetrieverQueryEngine](../../api/classes/RetrieverQueryEngine.md)
+- [SubQuestionQueryEngine](../../api/classes/SubQuestionQueryEngine.md)
+- [QueryEngineTool](../../api/interfaces/QueryEngineTool.md)
diff --git a/apps/docs/i18n/lt/docusaurus-plugin-content-docs/current/modules/index.md b/apps/docs/i18n/lt/docusaurus-plugin-content-docs/current/modules/index.md
new file mode 100644
index 0000000000000000000000000000000000000000..56b02870d5e429b50d6eaefd7fb68ac29889863d
--- /dev/null
+++ b/apps/docs/i18n/lt/docusaurus-plugin-content-docs/current/modules/index.md
@@ -0,0 +1,33 @@
+# Pagrindiniai moduliai
+
+`Ši dokumentacija buvo automatiškai išversta ir gali turėti klaidų. Nedvejodami atidarykite Pull Request, jei norite pasiūlyti pakeitimus.`
+
+LlamaIndex.TS siūlo keletą pagrindinių modulių, kurie yra suskirstyti į aukšto lygio modulius, skirtus greitam pradėjimui, ir žemo lygio modulius, skirtus tinkinti pagrindinius komponentus pagal jūsų poreikius.
+
+## Aukšto lygio moduliai
+
+- [**Dokumentas**](./high_level/documents_and_nodes.md): Dokumentas atitinka tekstinį failą, PDF failą ar kitą nuoseklią duomenų dalį.
+
+- [**Mazgas**](./high_level/documents_and_nodes.md): Pagrindinė duomenų statybinė blokėli. Dažniausiai tai yra dokumento dalys, suskaidytos į valdomas dalis, kurios yra pakankamai mažos, kad galėtų būti paduotos į įterpimo modelį ir LLM.
+
+- [**Skaitytuvas/Įkėlėjas**](./high_level/data_loader.md): Skaitytuvas ar įkėlėjas yra kas nors, kas priima dokumentą realiame pasaulyje ir jį paverčia į dokumento klasę, kurią galima naudoti jūsų indekse ir užklausose. Šiuo metu palaikome paprastus teksto failus ir daugybę PDF failų.
+
+- [**Indeksai**](./high_level/data_index.md): Indeksai saugo mazgus ir šių mazgų įterpimus.
+
+- [**Užklausų variklis**](./high_level/query_engine.md): Užklausų varikliai yra tie, kurie generuoja užklausą, kurią įvedate, ir grąžina rezultatą. Užklausų varikliai paprastai sujungia iš anksto sukurtą užklausos šabloną su pasirinktais mazgais iš jūsų indekso, kad suteiktų LLM kontekstą, kuris reikalingas atsakyti į jūsų užklausą.
+
+- [**Pokalbių variklis**](./high_level/chat_engine.md): Pokalbių variklis padeda jums sukurti pokalbių roboto, kuris sąveikauja su jūsų indeksais.
+
+## Žemo lygio modulis
+
+- [**LLM**](./low_level/llm.md): LLM klasė yra vieningas sąsajos taškas didelio kalbos modelio tiekėjui, tokiam kaip OpenAI GPT-4, Anthropic Claude ar Meta LLaMA. Ją galite paveldėti, kad sukurtumėte jungiklį savo pačių dideliam kalbos modeliui.
+
+- [**Embedding**](./low_level/embedding.md): Įterpimas yra vaizduojamas kaip slankiojo kablelio skaičių vektorius. Mūsų numatytasis įterpimo modelis yra OpenAI teksto-įterpimo-ada-002, kurio kiekvienas įterpimas susideda iš 1 536 slankiojo kablelio skaičių. Kitas populiarus įterpimo modelis yra BERT, kuris naudoja 768 slankiojo kablelio skaičius, kad vaizduotų kiekvieną mazgą. Mes teikiame keletą įrankių, skirtų dirbti su įterpimais, įskaitant 3 panašumo skaičiavimo variantus ir maksimalią ribinę reikšmingumą.
+
+- [**TextSplitter/NodeParser**](./low_level/node_parser.md): Teksto skaidymo strategijos yra labai svarbios bendrai įterpimo paieškos efektyvumui. Šiuo metu, nors turime numatytąjį variantą, nėra vieno dydžio sprendimo, tinkamo visiems atvejams. Priklausomai nuo šaltinio dokumentų, galite norėti naudoti skirtingus skaidymo dydžius ir strategijas. Šiuo metu palaikome skaidymą pagal fiksuotą dydį, skaidymą pagal fiksuotą dydį su persidengiančiais skyriais, skaidymą pagal sakinį ir skaidymą pagal pastraipą. Teksto skaidyklė naudojama NodeParser, kai skaidoma `Dokumentai` į `Mazgus`.
+
+- [**Retriever**](./low_level/retriever.md): Atkūrėjas yra tas, kuris iš tikrųjų pasirenka mazgus, kuriuos atkurti iš indekso. Čia galite bandyti atkurti daugiau ar mažiau mazgų užklausai, keisti panašumo funkciją arba sukurti savo atkūrėją kiekvienam atskiram naudojimo atvejui jūsų programoje. Pavyzdžiui, galite norėti turėti atskirą atkūrėją kodo turiniui ir teksto turiniui.
+
+- [**ResponseSynthesizer**](./low_level/response_synthesizer.md): Atsakymo sintezatorius atsakingas už užklausos eilutės naudojimą ir naudojant `Mazgų` sąrašą generuoja atsakymą. Tai gali būti įvairių formų, pvz., peržiūrint visą kontekstą ir tobulinant atsakymą arba kuriant medį su santraukomis ir grąžinant pagrindinę santrauką.
+
+- [**Storage**](./low_level/storage.md): Iš anksto ar vėliau norėsite saugoti savo indeksus, duomenis ir vektorius, o ne kiekvieną kartą paleisti įterpimo modelius. IndexStore, DocStore, VectorStore ir KVStore yra abstrakcijos, leidžiančios tai padaryti. Kartu jie sudaro StorageContext. Šiuo metu leidžiame jums išsaugoti savo įterpimus failuose failų sistemoje (arba virtualioje atmintinėje failų sistemoje), tačiau taip pat aktyviai įtraukiame integracijas su vektorinėmis duomenų bazėmis.
diff --git a/apps/docs/i18n/lt/docusaurus-plugin-content-docs/current/modules/low_level/embedding.md b/apps/docs/i18n/lt/docusaurus-plugin-content-docs/current/modules/low_level/embedding.md
new file mode 100644
index 0000000000000000000000000000000000000000..32e53046d3666ef317bca53fc96d577573239075
--- /dev/null
+++ b/apps/docs/i18n/lt/docusaurus-plugin-content-docs/current/modules/low_level/embedding.md
@@ -0,0 +1,26 @@
+---
+sidebar_position: 1
+---
+
+# Įterpimas
+
+`Ši dokumentacija buvo automatiškai išversta ir gali turėti klaidų. Nedvejodami atidarykite Pull Request, jei norite pasiūlyti pakeitimus.`
+
+Įterpimo modelis LlamaIndex atsakingas už teksto skaitinės reprezentacijos kūrimą. Pagal numatytuosius nustatymus, LlamaIndex naudos `text-embedding-ada-002` modelį iš OpenAI.
+
+Tai gali būti aiškiai nustatyta `ServiceContext` objekte.
+
+```typescript
+import { OpenAIEmbedding, serviceContextFromDefaults } from "llamaindex";
+
+const openaiEmbeds = new OpenAIEmbedding();
+
+const serviceContext = serviceContextFromDefaults({ embedModel: openaiEmbeds });
+```
+
+## API nuorodos
+
+- [OpenAIEmbedding](../../api/classes/OpenAIEmbedding.md)
+- [ServiceContext](../../api/interfaces/ServiceContext.md)
+
+"
diff --git a/apps/docs/i18n/lt/docusaurus-plugin-content-docs/current/modules/low_level/llm.md b/apps/docs/i18n/lt/docusaurus-plugin-content-docs/current/modules/low_level/llm.md
new file mode 100644
index 0000000000000000000000000000000000000000..b33a7b6bfa9373f167ed5b0d4692eb8cff770d47
--- /dev/null
+++ b/apps/docs/i18n/lt/docusaurus-plugin-content-docs/current/modules/low_level/llm.md
@@ -0,0 +1,26 @@
+---
+sidebar_position: 0
+---
+
+# LLM
+
+`Ši dokumentacija buvo automatiškai išversta ir gali turėti klaidų. Nedvejodami atidarykite Pull Request, jei norite pasiūlyti pakeitimus.`
+
+LLM yra atsakingas už teksto skaitymą ir natūralių kalbos atsakymų generavimą į užklausas. Pagal numatytuosius nustatymus, LlamaIndex.TS naudoja `gpt-3.5-turbo`.
+
+LLM gali būti aiškiai nustatytas `ServiceContext` objekte.
+
+```typescript
+import { OpenAI, serviceContextFromDefaults } from "llamaindex";
+
+const openaiLLM = new OpenAI({ model: "gpt-3.5-turbo", temperature: 0 });
+
+const serviceContext = serviceContextFromDefaults({ llm: openaiLLM });
+```
+
+## API Nuorodos
+
+- [OpenAI](../../api/classes/OpenAI.md)
+- [ServiceContext](../../api/interfaces/ServiceContext.md)
+
+"
diff --git a/apps/docs/i18n/lt/docusaurus-plugin-content-docs/current/modules/low_level/node_parser.md b/apps/docs/i18n/lt/docusaurus-plugin-content-docs/current/modules/low_level/node_parser.md
new file mode 100644
index 0000000000000000000000000000000000000000..476ce767b1c60efb61377645564da4d7bfa136f2
--- /dev/null
+++ b/apps/docs/i18n/lt/docusaurus-plugin-content-docs/current/modules/low_level/node_parser.md
@@ -0,0 +1,37 @@
+---
+sidebar_position: 3
+---
+
+# NodeParser (NodeParser)
+
+`Ši dokumentacija buvo automatiškai išversta ir gali turėti klaidų. Nedvejodami atidarykite Pull Request, jei norite pasiūlyti pakeitimus.`
+
+`NodeParser` LlamaIndex bibliotekoje yra atsakingas už `Document` objektų padalinimą į lengviau tvarkomus `Node` objektus. Kai iškviečiate `.fromDocuments()`, `NodeParser` iš `ServiceContext` yra naudojamas automatiškai tai padaryti už jus. Alternatyviai, jį galite naudoti, kad iš anksto padalintumėte dokumentus.
+
+```typescript
+import { Document, SimpleNodeParser } from "llamaindex";
+
+const nodeParser = new SimpleNodeParser();
+const nodes = nodeParser.getNodesFromDocuments([
+  new Document({ text: "Man yra 10 metų. Jonas yra 20 metų." }),
+]);
+```
+
+## TextSplitter (TextSplitter)
+
+Pagrindinis teksto padalintojas padalins tekstą į sakiniais. Jį taip pat galima naudoti kaip atskirą modulį, skirtą žaliavų tekstui padalinti.
+
+```typescript
+import { SentenceSplitter } from "llamaindex";
+
+const splitter = new SentenceSplitter({ chunkSize: 1 });
+
+const textSplits = splitter.splitText("Sveikas, pasauli");
+```
+
+## API nuorodos
+
+- [SimpleNodeParser](../../api/classes/SimpleNodeParser.md)
+- [SentenceSplitter](../../api/classes/SentenceSplitter.md)
+
+"
diff --git a/apps/docs/i18n/lt/docusaurus-plugin-content-docs/current/modules/low_level/response_synthesizer.md b/apps/docs/i18n/lt/docusaurus-plugin-content-docs/current/modules/low_level/response_synthesizer.md
new file mode 100644
index 0000000000000000000000000000000000000000..31b7c993ec5692c2a51a6a15ccdde4f2a9ac295c
--- /dev/null
+++ b/apps/docs/i18n/lt/docusaurus-plugin-content-docs/current/modules/low_level/response_synthesizer.md
@@ -0,0 +1,47 @@
+---
+sidebar_position: 6
+---
+
+# ResponseSynthesizer (Atsakymo sintezatorius)
+
+`Ši dokumentacija buvo automatiškai išversta ir gali turėti klaidų. Nedvejodami atidarykite Pull Request, jei norite pasiūlyti pakeitimus.`
+
+ResponseSynthesizer (Atsakymo sintezatorius) yra atsakingas už užklausos, mazgų ir šablonų perdavimą LLM (Lietuvių kalbos modeliui) generuoti atsakymą. Yra keletas pagrindinių būdų, kaip generuoti atsakymą:
+
+- `Refine` (Tobulinti): "sukurti ir tobulinti" atsakymą, eina per kiekvieną gautą teksto gabalą sekančiai. Tai reiškia atskirą LLM skambutį kiekvienam mazgui. Gerai tinka išsamesniems atsakymams.
+- `CompactAndRefine` (Kompaktiškas ir tobulinti) (numatytasis): "kompaktiškai" sutraukti užklausą kiekviename LLM skambutyje, įkišant kuo daugiau teksto gabalų, kurie telpa maksimalioje užklausos dydžio riboje. Jei yra per daug gabalų, kad tilptų į vieną užklausą, "sukurti ir tobulinti" atsakymą, eina per kelis kompaktiškus užklausos šablonus. Tas pats kaip `refine`, bet turėtų reikšti mažiau LLM skambučių.
+- `TreeSummarize` (Medžio santrauka): Pagal duotą teksto gabalų rinkinį ir užklausą, rekursyviai konstruojamas medis ir grąžinamas šakninis mazgas kaip atsakymas. Gerai tinka santraukos tikslais.
+- `SimpleResponseBuilder` (Paprasto atsakymo kūrėjas): Pagal duotą teksto gabalų rinkinį ir užklausą, taikoma užklausa kiekvienam teksto gabalui, tuo pačiu kaupiant atsakymus į masyvą. Grąžina sujungtą visų atsakymų eilutę. Gerai tinka, kai reikia atskirai paleisti tą pačią užklausą kiekvienam teksto gabalui.
+
+```typescript
+import { NodeWithScore, ResponseSynthesizer, TextNode } from "llamaindex";
+
+const responseSynthesizer = new ResponseSynthesizer();
+
+const nodesWithScore: NodeWithScore[] = [
+  {
+    node: new TextNode({ text: "Man yra 10 metų." }),
+    score: 1,
+  },
+  {
+    node: new TextNode({ text: "Džonas yra 20 metų." }),
+    score: 0.5,
+  },
+];
+
+const response = await responseSynthesizer.synthesize(
+  "Kiek man yra metų?",
+  nodesWithScore,
+);
+console.log(response.response);
+```
+
+## API nuorodos
+
+- [ResponseSynthesizer (Atsakymo sintezatorius)](../../api/classes/ResponseSynthesizer.md)
+- [Refine (Tobulinti)](../../api/classes/Refine.md)
+- [CompactAndRefine (Kompaktiškas ir tobulinti)](../../api/classes/CompactAndRefine.md)
+- [TreeSummarize (Medžio santrauka)](../../api/classes/TreeSummarize.md)
+- [SimpleResponseBuilder (Paprasto atsakymo kūrėjas)](../../api/classes/SimpleResponseBuilder.md)
+
+"
diff --git a/apps/docs/i18n/lt/docusaurus-plugin-content-docs/current/modules/low_level/retriever.md b/apps/docs/i18n/lt/docusaurus-plugin-content-docs/current/modules/low_level/retriever.md
new file mode 100644
index 0000000000000000000000000000000000000000..38298476d5c96a12066c8e9919d51d87ad62ff17
--- /dev/null
+++ b/apps/docs/i18n/lt/docusaurus-plugin-content-docs/current/modules/low_level/retriever.md
@@ -0,0 +1,23 @@
+---
+sidebar_position: 5
+---
+
+# Gavėjas (Retriever)
+
+`Ši dokumentacija buvo automatiškai išversta ir gali turėti klaidų. Nedvejodami atidarykite Pull Request, jei norite pasiūlyti pakeitimus.`
+
+Gavėjas LlamaIndex'e yra tai, kas naudojama išgauti `Node`'us iš indekso naudojant užklausos eilutę. `VectorIndexRetriever` išgaus top-k panašiausius mazgus. Tuo tarpu `SummaryIndexRetriever` išgaus visus mazgus, nepriklausomai nuo užklausos.
+
+```typescript
+const gavėjas = vector_index.asRetriever();
+gavėjas.similarityTopK = 3;
+
+// Išgaunami mazgai!
+const mazgaiSuRezultatu = await gavėjas.retrieve("užklausos eilutė");
+```
+
+## API nuorodos (API Reference)
+
+- [SummaryIndexRetriever](../../api/classes/SummaryIndexRetriever.md)
+- [SummaryIndexLLMRetriever](../../api/classes/SummaryIndexLLMRetriever.md)
+- [VectorIndexRetriever](../../api/classes/VectorIndexRetriever.md)
diff --git a/apps/docs/i18n/lt/docusaurus-plugin-content-docs/current/modules/low_level/storage.md b/apps/docs/i18n/lt/docusaurus-plugin-content-docs/current/modules/low_level/storage.md
new file mode 100644
index 0000000000000000000000000000000000000000..2a24e865e25bfc40675f5cb0efe6d1d091d1df5a
--- /dev/null
+++ b/apps/docs/i18n/lt/docusaurus-plugin-content-docs/current/modules/low_level/storage.md
@@ -0,0 +1,28 @@
+---
+sidebar_position: 7
+---
+
+# Saugojimas
+
+`Ši dokumentacija buvo automatiškai išversta ir gali turėti klaidų. Nedvejodami atidarykite Pull Request, jei norite pasiūlyti pakeitimus.`
+
+Saugojimas LlamaIndex.TS veikia automatiškai, kai jūs sukonfigūruojate `StorageContext` objektą. Tiesiog sukonfigūruokite `persistDir` ir pridėkite jį prie indekso.
+
+Šiuo metu palaikomas tik išsaugojimas ir įkėlimas iš disko, su planuojamomis ateities integracijomis!
+
+```typescript
+import { Document, VectorStoreIndex, storageContextFromDefaults } from "./src";
+
+const storageContext = await storageContextFromDefaults({
+  persistDir: "./storage",
+});
+
+const document = new Document({ text: "Testo tekstas" });
+const index = await VectorStoreIndex.fromDocuments([document], {
+  storageContext,
+});
+```
+
+## API Nuorodos
+
+- [StorageContext](../../api/interfaces/StorageContext.md)
diff --git a/apps/docs/i18n/lt/docusaurus-plugin-content-docs/current/starter.md b/apps/docs/i18n/lt/docusaurus-plugin-content-docs/current/starter.md
new file mode 100644
index 0000000000000000000000000000000000000000..c64459fecd5b38d272bb3b251657b29a47809f5c
--- /dev/null
+++ b/apps/docs/i18n/lt/docusaurus-plugin-content-docs/current/starter.md
@@ -0,0 +1,58 @@
+---
+sidebar_position: 2
+---
+
+# Pradžios vadovas
+
+`Ši dokumentacija buvo automatiškai išversta ir gali turėti klaidų. Nedvejodami atidarykite Pull Request, jei norite pasiūlyti pakeitimus.`
+
+Kai jūs [įdiegėte LlamaIndex.TS naudodami NPM](installation) ir sukonfigūravote savo OpenAI raktą, jūs esate pasiruošę pradėti savo pirmąją programą:
+
+Naujame aplanke:
+
+```bash npm2yarn
+npm install typescript
+npm install @types/node
+npx tsc --init # jei reikia
+```
+
+Sukurkite failą `example.ts`. Šis kodas įkels keletą pavyzdinių duomenų, sukurs dokumentą, jį indeksuos (kuriant įdėjimus naudojant OpenAI) ir tada sukurs užklausos variklį, kuris atsakys į duomenų klausimus.
+
+```ts
+// example.ts
+import fs from "fs/promises";
+import { Document, VectorStoreIndex } from "llamaindex";
+
+async function main() {
+  // Įkelkite esė iš abramov.txt naudojant Node
+  const essay = await fs.readFile(
+    "node_modules/llamaindex/examples/abramov.txt",
+    "utf-8",
+  );
+
+  // Sukurkite dokumento objektą su esė
+  const document = new Document({ text: essay });
+
+  // Padalinkite tekstą ir sukurkite įdėjimus. Saugokite juos vektorių saugykloje
+  const index = await VectorStoreIndex.fromDocuments([document]);
+
+  // Užklauskite indekso
+  const queryEngine = index.asQueryEngine();
+  const response = await queryEngine.query("Ką autorius darė koledže?");
+
+  // Išvesti atsakymą
+  console.log(response.toString());
+}
+
+main();
+```
+
+Tada galite paleisti jį naudodami
+
+```bash
+npx ts-node example.ts
+```
+
+Pasiruošęs sužinoti daugiau? Patikrinkite mūsų NextJS žaidimų aikštelę adresu https://llama-playground.vercel.app/. Šaltinis yra prieinamas adresu https://github.com/run-llama/ts-playground
+
+"
diff --git a/apps/docs/i18n/lv/docusaurus-plugin-content-docs/current/_static/concepts/indexing.jpg b/apps/docs/i18n/lv/docusaurus-plugin-content-docs/current/_static/concepts/indexing.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..8672967213caf28fb27682b6b9e2e5111beb9aaa
Binary files /dev/null and b/apps/docs/i18n/lv/docusaurus-plugin-content-docs/current/_static/concepts/indexing.jpg differ
diff --git a/apps/docs/i18n/lv/docusaurus-plugin-content-docs/current/_static/concepts/querying.jpg b/apps/docs/i18n/lv/docusaurus-plugin-content-docs/current/_static/concepts/querying.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..3c241bdda5ba98d3f3ee2e163b9f19280c5c7503
Binary files /dev/null and b/apps/docs/i18n/lv/docusaurus-plugin-content-docs/current/_static/concepts/querying.jpg differ
diff --git a/apps/docs/i18n/lv/docusaurus-plugin-content-docs/current/_static/concepts/rag.jpg b/apps/docs/i18n/lv/docusaurus-plugin-content-docs/current/_static/concepts/rag.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..b68eca2564f8535b33d9ed6d4ce4d4410d2a0698
Binary files /dev/null and b/apps/docs/i18n/lv/docusaurus-plugin-content-docs/current/_static/concepts/rag.jpg differ
diff --git a/apps/docs/i18n/lv/docusaurus-plugin-content-docs/current/api b/apps/docs/i18n/lv/docusaurus-plugin-content-docs/current/api
new file mode 120000
index 0000000000000000000000000000000000000000..3513e32979f4abb8b2df2b54c8d0cc480bdd847e
--- /dev/null
+++ b/apps/docs/i18n/lv/docusaurus-plugin-content-docs/current/api
@@ -0,0 +1 @@
+../../../../docs/api
\ No newline at end of file
diff --git a/apps/docs/i18n/lv/docusaurus-plugin-content-docs/current/concepts.md b/apps/docs/i18n/lv/docusaurus-plugin-content-docs/current/concepts.md
new file mode 100644
index 0000000000000000000000000000000000000000..803483d40bd5bd79de16088f6d3515431a8ae2fe
--- /dev/null
+++ b/apps/docs/i18n/lv/docusaurus-plugin-content-docs/current/concepts.md
@@ -0,0 +1,86 @@
+---
+sidebar_position: 3
+---
+
+# Augstā līmeņa koncepti
+
+`Šis dokuments ir automātiski tulkots un var saturēt kļūdas. Nevilciniet atvērt Pull Request, lai ierosinātu izmaiņas.`
+
+LlamaIndex.TS palīdz jums veidot LLM jaudīgas lietojumprogrammas (piemēram, jautājumu un atbilžu sistēmu, čatbota) ar pielāgotiem datiem.
+
+Šajā augstā līmeņa konceptu rokasgrāmatā jūs iemācīsieties:
+
+- kā LLM var atbildēt uz jautājumiem, izmantojot jūsu pašu datus.
+- galvenos jēdzienus un moduļus LlamaIndex.TS, lai veidotu savu vaicājumu plūsmu.
+
+## Jautājumu atbildēšana pār jūsu datiem
+
+LlamaIndex izmanto divu posmu metodi, izmantojot LLM ar jūsu datiem:
+
+1. **indeksēšanas posms**: zināšanu bāzes sagatavošana, un
+2. **vaicāšanas posms**: atbilstošā konteksta iegūšana no zināšanām, lai palīdzētu LLM atbildēt uz jautājumu.
+
+![](./_static/concepts/rag.jpg)
+
+Šo procesu sauc arī par atgūšanas papildinātu ģenerēšanu (RAG).
+
+LlamaIndex.TS nodrošina būtiskos rīkus, lai abus soļus padarītu ļoti vienkāršus.
+
+Apskatīsim katru posmu detalizēti.
+
+### Indeksēšanas posms
+
+LlamaIndex.TS palīdz jums sagatavot zināšanu bāzi, izmantojot datu savienotājus un indeksus.
+
+![](./_static/concepts/indexing.jpg)
+
+[**Datu ielādētāji**](./modules/high_level/data_loader.md):
+Datu savienotājs (piemēram, `Reader`) iegūst datus no dažādiem datu avotiem un datu formātiem un pārveido tos par vienkāršu `Dokumenta` reprezentāciju (teksts un vienkārša metadati).
+
+[**Dokumenti / mezgli**](./modules/high_level/documents_and_nodes.md): `Dokuments` ir vispārīgs konteiners jebkuram datu avotam - piemēram, PDF, API izvade vai atgūti dati no datu bāzes. `Mezgls` ir atomiskā datu vienība LlamaIndex un pārstāv "gabalu" no avota `Dokumenta`. Tas ir bagātīgs pārstājums, kas ietver metadatus un attiecības (ar citiem mezgliem), lai ļautu precīzas un izteiksmīgas atgūšanas operācijas.
+
+[**Datu indeksi**](./modules/high_level/data_index.md):
+Kad jūs esat ielādējis savus datus, LlamaIndex palīdz jums indeksēt datus tā, lai tos būtu viegli atgūt.
+
+LlamaIndex apstrādā neapstrādātos dokumentus, pārveido tos par starpposmu reprezentācijām, aprēķina vektora iegultnes un saglabā jūsu datus atmiņā vai diskā.
+
+"
+
+### Vaicāšanas posms
+
+Vaicāšanas posmā vaicājumu plūsma atgūst vispiemērotāko kontekstu, ņemot vērā lietotāja vaicājumu,
+un nodod to LLM (kopā ar vaicājumu), lai sintezētu atbildi.
+
+Tas nodrošina LLM ar aktuālām zināšanām, kas nav tās sākotnējā apmācības datu kopā,
+(arī samazinot halucinācijas).
+
+Galvenais izaicinājums vaicāšanas posmā ir atgūšana, orķestrēšana un loģika pār (iespējams, vairākām) zināšanu bāzēm.
+
+LlamaIndex nodrošina komponējamus moduļus, kas palīdz jums veidot un integrēt RAG plūsmas jautājumu un atbilžu sistēmām (vaicājumu dzinējs), čatbotiem (čata dzinējs) vai kā daļu no aģenta.
+
+Šīs būvēšanas bloki var tikt pielāgoti, lai atspoguļotu rangs iestatījumus, kā arī sastādīti, lai loģiski izvērtētu vairākas zināšanu bāzes.
+
+![](./_static/concepts/querying.jpg)
+
+#### Būvēšanas bloki
+
+[**Atgūtāji**](./modules/low_level/retriever.md):
+Atgūtājs definē, kā efektīvi atgūt atbilstošu kontekstu no zināšanu bāzes (piemēram, indeksa), ņemot vērā vaicājumu.
+Konkrētā atgūšanas loģika atšķiras atkarībā no indeksiem, populārākais būdams blīva atgūšana pret vektora indeksu.
+
+[**Atbildes sintezatori**](./modules/low_level/response_synthesizer.md):
+Atbildes sintezators ģenerē atbildi no LLM, izmantojot lietotāja vaicājumu un atgūtu teksta fragmentu kopu.
+
+"
+
+#### Plūsmas
+
+[**Vaicājumu dzinēji**](./modules/high_level/query_engine.md):
+Vaicājumu dzinējs ir no sākuma līdz beigām plūsma, kas ļauj jums uzdot jautājumus par jūsu datiem.
+Tas ņem vērā dabiskās valodas vaicājumu un atgriež atbildi, kopā ar atsauces kontekstu, kas iegūts un nodots LLM.
+
+[**Čata dzinēji**](./modules/high_level/chat_engine.md):
+Čata dzinējs ir no sākuma līdz beigām plūsma, kas ļauj jums veikt sarunu ar jūsu datiem
+(vairākas abpusējas saziņas vietas vietā viena jautājuma un atbildes).
+
+"
diff --git a/apps/docs/i18n/lv/docusaurus-plugin-content-docs/current/end_to_end.md b/apps/docs/i18n/lv/docusaurus-plugin-content-docs/current/end_to_end.md
new file mode 100644
index 0000000000000000000000000000000000000000..07cb867f3592aff6aca9424367421be1df864700
--- /dev/null
+++ b/apps/docs/i18n/lv/docusaurus-plugin-content-docs/current/end_to_end.md
@@ -0,0 +1,63 @@
+---
+sidebar_position: 4
+---
+
+# Galam līdz galam piemēri
+
+`Šis dokuments ir automātiski tulkots un var saturēt kļūdas. Nevilciniet atvērt Pull Request, lai ierosinātu izmaiņas.`
+
+Mūsu repozitorijā ir iekļauti vairāki galam līdz galam piemēri, izmantojot LlamaIndex.TS
+
+Apskatiet zemāk esošos piemērus vai izmēģiniet tos un pabeidziet tos dažu minūšu laikā, izmantojot interaktīvus Github Codespace pamācības, ko nodrošina Dev-Docs [šeit](https://codespaces.new/team-dev-docs/lits-dev-docs-playground?devcontainer_path=.devcontainer%2Fjavascript_ltsquickstart%2Fdevcontainer.json):
+
+## [Čata dzinējs](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/chatEngine.ts)
+
+Nolasiet failu un sarunājieties par to ar LLM.
+
+## [Vektora indekss](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/vectorIndex.ts)
+
+Izveidojiet vektora indeksu un veiciet vaicājumu. Vektora indekss izmantos iegultās vērtības, lai iegūtu visaktualitākos k kaimiņus. Pēc noklusējuma, k vērtība ir 2.
+
+"
+
+## [Kopsavilkuma indekss](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/summaryIndex.ts)
+
+Izveidojiet saraksta indeksu un veiciet vaicājumu. Šajā piemērā tiek izmantots arī `LLMRetriever`, kas izmanto LLM, lai izvēlētos labākos mezglus, kas jāizmanto, veidojot atbildi.
+
+"
+
+## [Saglabāt / Ielādēt indeksu](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/storageContext.ts)
+
+Izveidojiet un ielādējiet vektora indeksu. LlamaIndex.TS automātiski saglabā datus diskā, kad tiek izveidots krātuves konteksta objekts.
+
+"
+
+## [Pielāgota vektora indekss](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/vectorIndexCustomize.ts)
+
+Izveidojiet vektora indeksu un veiciet vaicājumu, konfigurējot arī `LLM`, `ServiceContext` un `similarity_top_k`.
+
+"
+
+## [OpenAI LLM](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/openai.ts)
+
+Izveidojiet OpenAI LLM un to izmantojiet tiešsaistes čatam.
+
+"
+
+## [Llama2 DeuceLLM](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/llamadeuce.ts)
+
+Izveidojiet Llama-2 LLM un to izmantojiet tieši čatam.
+
+"
+
+## [SubQuestionQueryEngine](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/subquestion.ts)
+
+Izmanto `SubQuestionQueryEngine`, kas sadala sarežģītas vaicājumus vairākos apakšjautājumos un pēc tam apkopo atbildes visu apakšjautājumu rezultātos.
+
+"
+
+## [Zemā līmeņa moduļi](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/lowlevel.ts)
+
+Šis piemērs izmanto vairākus zemā līmeņa komponentus, kas novērš nepieciešamību pēc faktiskas vaicājumu dzinēja. Šos komponentus var izmantot jebkur, jebkurā lietotnē vai pielāgot un apakšklasēt, lai atbilstu jūsu pašu vajadzībām.
+
+"
diff --git a/apps/docs/i18n/lv/docusaurus-plugin-content-docs/current/environments.md b/apps/docs/i18n/lv/docusaurus-plugin-content-docs/current/environments.md
new file mode 100644
index 0000000000000000000000000000000000000000..f24aaa1497381a265c20329883b07241841f5f00
--- /dev/null
+++ b/apps/docs/i18n/lv/docusaurus-plugin-content-docs/current/environments.md
@@ -0,0 +1,31 @@
+---
+sidebar_position: 5
+---
+
+# Vides
+
+`Šis dokuments ir automātiski tulkots un var saturēt kļūdas. Nevilciniet atvērt Pull Request, lai ierosinātu izmaiņas.`
+
+LlamaIndex pašlaik oficiāli atbalsta NodeJS 18 un NodeJS 20.
+
+## NextJS lietotnes maršrutētājs
+
+Ja izmantojat NextJS lietotnes maršrutētāja maršrutētājus/servera funkcijas, jums būs jāizmanto NodeJS režīms:
+
+```js
+export const runtime = "nodejs"; // noklusējums
+```
+
+un jums būs jāpievieno izņēmums pdf-parse savā next.config.js failā
+
+```js
+// next.config.js
+/** @type {import('next').NextConfig} */
+const nextConfig = {
+  experimental: {
+    serverComponentsExternalPackages: ["pdf-parse"], // Ievieto pdf-parse faktiskajā NodeJS režīmā ar NextJS lietotnes maršrutētāju
+  },
+};
+
+module.exports = nextConfig;
+```
diff --git a/apps/docs/i18n/lv/docusaurus-plugin-content-docs/current/installation.mdx b/apps/docs/i18n/lv/docusaurus-plugin-content-docs/current/installation.mdx
new file mode 100644
index 0000000000000000000000000000000000000000..e76a406c627781352dc3bb6a7fd0c907847438ab
--- /dev/null
+++ b/apps/docs/i18n/lv/docusaurus-plugin-content-docs/current/installation.mdx
@@ -0,0 +1,68 @@
+---
+sidebar_position: 1
+---
+
+
+# Instalācija un iestatīšana
+
+```Šis dokuments ir automātiski tulkots un var saturēt kļūdas. Nevilciniet atvērt Pull Request, lai ierosinātu izmaiņas.```
+
+
+Pārliecinieties, ka jums ir NodeJS v18 vai jaunāka versija.
+
+
+## Izmantojot create-llama
+
+Vienkāršākais veids, kā sākt darbu ar LlamaIndex, ir izmantot `create-llama`. Šis CLI rīks ļauj jums ātri sākt jaunas LlamaIndex lietotnes izveidi, ar visu nepieciešamo iestatījumu jau sagatavotu.
+
+Vienkārši izpildiet komandu
+
+<Tabs>
+<TabItem value="1" label="npm" default>
+
+```bash
+npx create-llama@latest
+```
+
+</TabItem>
+<TabItem value="2" label="Yarn">
+
+```bash
+yarn create llama
+```
+
+</TabItem>
+<TabItem value="3" label="pnpm">
+
+```bash
+pnpm create llama@latest
+```
+
+</TabItem>
+</Tabs>
+
+lai sāktu. Kad jūsu lietotne ir izveidota, izpildiet komandu
+
+```bash npm2yarn
+npm run dev
+```
+
+lai startētu izstrādes serveri. Tad jūs varat apmeklēt [http://localhost:3000](http://localhost:3000), lai redzētu savu lietotni.
+## Instalēšana no NPM
+
+```bash npm2yarn
+npm install llamaindex
+```
+
+
+### Vides mainīgie
+
+Mūsu piemēri pēc noklusējuma izmanto OpenAI. Jums būs jāiestata savs Open AI atslēga šādi:
+
+```bash
+export OPENAI_API_KEY="sk-......" # Aizvietojiet ar savu atslēgu no https://platform.openai.com/account/api-keys
+```
+
+Ja vēlaties, lai tas tiktu automātiski ielādēts katru reizi, pievienojiet to savam .zshrc/.bashrc failam.
+
+BRĪDINĀJUMS: neiekļaujiet savu OpenAI atslēgu versiju kontroles sistēmā.
diff --git a/apps/docs/i18n/lv/docusaurus-plugin-content-docs/current/introduction.md b/apps/docs/i18n/lv/docusaurus-plugin-content-docs/current/introduction.md
new file mode 100644
index 0000000000000000000000000000000000000000..f009de1eb13e97e062ad92cb2094b264d834ec93
--- /dev/null
+++ b/apps/docs/i18n/lv/docusaurus-plugin-content-docs/current/introduction.md
@@ -0,0 +1,62 @@
+---
+sidebar_position: 0
+slug: /
+---
+
+# Kas ir LlamaIndex.TS?
+
+`Šis dokuments ir automātiski tulkots un var saturēt kļūdas. Nevilciniet atvērt Pull Request, lai ierosinātu izmaiņas.`
+
+LlamaIndex.TS ir datu pamatstruktūra LLM lietojumprogrammām, lai ievadītu, strukturētu un piekļūtu privātiem vai domēna specifiskiem datiem. Lai gan ir pieejams arī Python pakotne (skatīt [šeit](https://docs.llamaindex.ai/en/stable/)), LlamaIndex.TS piedāvā pamata funkcijas vienkāršā pakotnē, kas optimizēta lietošanai ar TypeScript.
+
+## 🚀 Kāpēc izvēlēties LlamaIndex.TS?
+
+Būtībā LLM piedāvā dabiskās valodas interfeisu starp cilvēkiem un iegūtajiem datiem. Plaši pieejamie modeļi ir iepriekšapmācīti ar milzīgu daudzumu publiski pieejamu datu, sākot no Vikipēdijas un pasta sarakstiem līdz mācību grāmatām un pirmkoda failiem.
+
+Lietojumprogrammām, kas balstītas uz LLM, bieži ir nepieciešams papildināt šos modeļus ar privātiem vai domēna specifiskiem datiem. Diemžēl šie dati var būt sadalīti starp dažādām lietojumprogrammām un datu glabātuvēm. Tie var atrasties aiz API, SQL datu bāzēs vai būt ieslodzīti PDF failos un slaidu prezentācijās.
+
+Šeit nāk klajā **LlamaIndex.TS**.
+
+## 🦙 Kā LlamaIndex.TS var palīdzēt?
+
+LlamaIndex.TS nodrošina šādas rīkus:
+
+- **Datu ielāde** - tieši ievadiet esošos `.txt`, `.pdf`, `.csv`, `.md` un `.docx` datus
+- **Datu indeksi** - strukturējiet savus datus starpposma reprezentācijās, kas ir viegli un efektīvi lietojami LLM lietojumprogrammām.
+- **Dzinēji** - nodrošina dabiskās valodas piekļuvi jūsu datiem. Piemēram:
+  - Vaicājumu dzinēji ir spēcīgi atgūšanas interfeisi zināšanu papildinātai izvadei.
+  - Sarunu dzinēji ir sarunu interfeisi daudzziņu "turp un atpakaļ" mijiedarbībai ar jūsu datiem.
+
+"
+
+## 👨‍👩‍👧‍👦 Kam ir paredzēts LlamaIndex?
+
+LlamaIndex.TS nodrošina pamata rīkus, kas ir būtiski ikvienam, kas veido LLM lietojumprogrammas ar JavaScript un TypeScript.
+
+Mūsu augsta līmeņa API ļauj iesācējiem lietot LlamaIndex.TS, lai ievadītu un vaicātu savus datus.
+
+Lielākām un sarežģītākām lietojumprogrammām mūsu zemāka līmeņa API ļauj pieredzējušiem lietotājiem pielāgot un paplašināt jebkuru moduli - datu savienotājus, indeksus, atgūtājus un vaicājumu dzinējus, lai pielāgotu tos savām vajadzībām.
+
+## Sākumā
+
+`npm install llamaindex`
+
+Mūsu dokumentācijā ir iekļautas [Instalācijas instrukcijas](./installation.md) un [Sākuma pamācība](./starter.md), lai izveidotu savu pirmo lietojumprogrammu.
+
+Kad esat gatavs, [Augsta līmeņa koncepti](./concepts.md) sniedz pārskatu par LlamaIndex modulāro arhitektūru. Lai iegūtu vairāk praktisku piemēru, apskatiet mūsu [Galēji līdz galam pamācības](./end_to_end.md).
+
+## 🗺️ Ekosistēma
+
+Lai lejupielādētu vai piedalītos, atradīsiet LlamaIndex šeit:
+
+- Github: https://github.com/run-llama/LlamaIndexTS
+- NPM: https://www.npmjs.com/package/llamaindex
+
+"
+
+## Kopiena
+
+Vajag palīdzību? Ir ieteikums funkcijai? Pievienojieties LlamaIndex kopienai:
+
+- Twitter: https://twitter.com/llama_index
+- Discord: https://discord.gg/dGcwcsnxhU
diff --git a/apps/docs/i18n/lv/docusaurus-plugin-content-docs/current/modules/high_level/chat_engine.md b/apps/docs/i18n/lv/docusaurus-plugin-content-docs/current/modules/high_level/chat_engine.md
new file mode 100644
index 0000000000000000000000000000000000000000..e335dd4c35fda9cd3696ea3434e904c6fed7f600
--- /dev/null
+++ b/apps/docs/i18n/lv/docusaurus-plugin-content-docs/current/modules/high_level/chat_engine.md
@@ -0,0 +1,22 @@
+---
+sidebar_position: 4
+---
+
+# Čata dzinējs (ChatEngine)
+
+`Šis dokuments ir automātiski tulkots un var saturēt kļūdas. Nevilciniet atvērt Pull Request, lai ierosinātu izmaiņas.`
+
+Čata dzinējs ir ātrs un vienkāršs veids, kā sazināties ar datiem savā indeksā.
+
+```typescript
+const retriever = index.asRetriever();
+const chatEngine = new ContextChatEngine({ retriever });
+
+// sākt čatošanu
+const response = await chatEngine.chat(query);
+```
+
+## Api atsauces
+
+- [Konteksta čata dzinējs (ContextChatEngine)](../../api/classes/ContextChatEngine.md)
+- [Kompaktā jautājumu čata dzinējs (CondenseQuestionChatEngine)](../../api/classes/ContextChatEngine.md)
diff --git a/apps/docs/i18n/lv/docusaurus-plugin-content-docs/current/modules/high_level/data_index.md b/apps/docs/i18n/lv/docusaurus-plugin-content-docs/current/modules/high_level/data_index.md
new file mode 100644
index 0000000000000000000000000000000000000000..62644414664bad9fee7422c2e2cb3b6c1833c9a9
--- /dev/null
+++ b/apps/docs/i18n/lv/docusaurus-plugin-content-docs/current/modules/high_level/data_index.md
@@ -0,0 +1,27 @@
+---
+sidebar_position: 2
+---
+
+# Indekss
+
+`Šis dokuments ir automātiski tulkots un var saturēt kļūdas. Nevilciniet atvērt Pull Request, lai ierosinātu izmaiņas.`
+
+Indekss ir pamata konteineris un organizācija jūsu datiem. LlamaIndex.TS atbalsta divus indeksus:
+
+- `VectorStoreIndex` - ģenerējot atbildi, nosūtīs augstākās `Node` vērtības uz LLM. Noklusējuma augstākās vērtības ir 2.
+- `SummaryIndex` - nosūtīs katru `Node` indeksā uz LLM, lai ģenerētu atbildi.
+
+```typescript
+import { Document, VectorStoreIndex } from "llamaindex";
+
+const document = new Document({ text: "tests" });
+
+const index = await VectorStoreIndex.fromDocuments([document]);
+```
+
+## API Atsauce
+
+- [SummaryIndex](../../api/classes/SummaryIndex.md)
+- [VectorStoreIndex](../../api/classes/VectorStoreIndex.md)
+
+"
diff --git a/apps/docs/i18n/lv/docusaurus-plugin-content-docs/current/modules/high_level/data_loader.md b/apps/docs/i18n/lv/docusaurus-plugin-content-docs/current/modules/high_level/data_loader.md
new file mode 100644
index 0000000000000000000000000000000000000000..02a9e53b9cf2409fcf591a72ec996e9dfae203ad
--- /dev/null
+++ b/apps/docs/i18n/lv/docusaurus-plugin-content-docs/current/modules/high_level/data_loader.md
@@ -0,0 +1,19 @@
+---
+sidebar_position: 1
+---
+
+# Lasītājs / Ielādētājs
+
+`Šis dokuments ir automātiski tulkots un var saturēt kļūdas. Nevilciniet atvērt Pull Request, lai ierosinātu izmaiņas.`
+
+LlamaIndex.TS atbalsta vieglu failu ielādi no mapēm, izmantojot klasi `SimpleDirectoryReader`. Pašlaik tiek atbalstīti failu formāti `.txt`, `.pdf`, `.csv`, `.md` un `.docx`, bet nākotnē plānots atbalstīt vēl vairāk!
+
+```typescript
+import { SimpleDirectoryReader } from "llamaindex";
+
+documents = new SimpleDirectoryReader().loadData("./data");
+```
+
+## API Atsauce
+
+- [SimpleDirectoryReader](../../api/classes/SimpleDirectoryReader.md)
diff --git a/apps/docs/i18n/lv/docusaurus-plugin-content-docs/current/modules/high_level/documents_and_nodes.md b/apps/docs/i18n/lv/docusaurus-plugin-content-docs/current/modules/high_level/documents_and_nodes.md
new file mode 100644
index 0000000000000000000000000000000000000000..10d749dfbce58220fc8a70f6e193790b22c7f25e
--- /dev/null
+++ b/apps/docs/i18n/lv/docusaurus-plugin-content-docs/current/modules/high_level/documents_and_nodes.md
@@ -0,0 +1,25 @@
+---
+sidebar_position: 0
+---
+
+# Dokumenti un mezgli
+
+`Šis dokuments ir automātiski tulkots un var saturēt kļūdas. Nevilciniet atvērt Pull Request, lai ierosinātu izmaiņas.`
+
+`Dokumenti` un `Mezgli` ir pamata būvēšanas bloki jebkurai indeksam. Lai gan šo objektu API ir līdzīgs, `Dokumenta` objekti pārstāv veselas failus, bet `Mezgli` ir mazāki šī oriģinālā dokumenta gabali, kas ir piemēroti LLM un Q&A.
+
+```typescript
+import { Dokuments } from "llamaindex";
+
+dokuments = new Dokuments({
+  teksts: "teksts",
+  metadati: { atslēga: "vērtība" },
+});
+```
+
+## API Atsauce
+
+- [Dokuments](../../api/classes/Document.md)
+- [TekstaMezgls](../../api/classes/TextNode.md)
+
+"
diff --git a/apps/docs/i18n/lv/docusaurus-plugin-content-docs/current/modules/high_level/query_engine.md b/apps/docs/i18n/lv/docusaurus-plugin-content-docs/current/modules/high_level/query_engine.md
new file mode 100644
index 0000000000000000000000000000000000000000..f39fec12d9cba1685904fac939eedbfb25ded2e0
--- /dev/null
+++ b/apps/docs/i18n/lv/docusaurus-plugin-content-docs/current/modules/high_level/query_engine.md
@@ -0,0 +1,40 @@
+---
+sidebar_position: 3
+---
+
+# QueryEngine (Vaicājumu dzinējs)
+
+`Šis dokuments ir automātiski tulkots un var saturēt kļūdas. Nevilciniet atvērt Pull Request, lai ierosinātu izmaiņas.`
+
+Vaicājumu dzinējs ietver "Retriever" un "ResponseSynthesizer" komponentes vienā plūsmā, kas izmanto vaicājuma virkni, lai iegūtu mezglus un pēc tam nosūtītu tos LLM, lai ģenerētu atbildi.
+
+```typescript
+const queryEngine = index.asQueryEngine();
+const response = await queryEngine.query("vaicājuma virkne");
+```
+
+## Apakšjautājumu dzinējs
+
+Apakšjautājumu dzinēja pamatideja ir sadalīt vienu vaicājumu vairākos vaicājumos, iegūt atbildi uz katru no šiem vaicājumiem un pēc tam apvienot atšķirīgās atbildes vienā saprotamā atbildē lietotājam. To varētu uzskatīt par "padomāt soli pa solim" tehniku, iterējot pār datu avotiem!
+
+### Sākumā
+
+Vienkāršākais veids, kā sākt izmēģināt Apakšjautājumu vaicājumu dzinēju, ir palaist subquestion.ts failu [piemēros](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/subquestion.ts).
+
+```bash
+npx ts-node subquestion.ts
+```
+
+### Rīki
+
+Apakšjautājumu dzinējs ir ieviests ar rīkiem. Rīku pamatideja ir tā, ka tie ir izpildāmas opcijas lielajam valodas modelim. Šajā gadījumā mūsu Apakšjautājumu dzinējs balstās uz QueryEngineTool, kas, kā jūs jau minējāt, ir rīks, lai izpildītu vaicājumus QueryEngine. Tas ļauj mums dot modeļam iespēju vaicāt dažādus dokumentus dažādiem jautājumiem, piemēram. Jūs varētu iedomāties, ka Apakšjautājumu dzinējs varētu izmantot rīku, kas meklē kaut ko tīmeklī vai iegūst atbildi, izmantojot Wolfram Alpha.
+
+Uzziniet vairāk par rīkiem, apskatot LlamaIndex Python dokumentāciju: https://gpt-index.readthedocs.io/en/latest/core_modules/agent_modules/tools/root.html
+
+"
+
+## API atsauce
+
+- [RetrieverQueryEngine (Atgūtāja vaicājumu dzinējs)](../../api/classes/RetrieverQueryEngine.md)
+- [SubQuestionQueryEngine (Apakšjautājumu vaicājumu dzinējs)](../../api/classes/SubQuestionQueryEngine.md)
+- [QueryEngineTool (Vaicājumu dzinēja rīks)](../../api/interfaces/QueryEngineTool.md)
diff --git a/apps/docs/i18n/lv/docusaurus-plugin-content-docs/current/modules/index.md b/apps/docs/i18n/lv/docusaurus-plugin-content-docs/current/modules/index.md
new file mode 100644
index 0000000000000000000000000000000000000000..d6a377ff2770ae715c4ef37b621b3805ace5b2ca
--- /dev/null
+++ b/apps/docs/i18n/lv/docusaurus-plugin-content-docs/current/modules/index.md
@@ -0,0 +1,33 @@
+# Pamatmoduļi
+
+`Šis dokuments ir automātiski tulkots un var saturēt kļūdas. Nevilciniet atvērt Pull Request, lai ierosinātu izmaiņas.`
+
+LlamaIndex.TS piedāvā vairākus pamatmoduļus, kas ir sadalīti augsta līmeņa moduļos, lai ātri sāktu darbu, un zemā līmeņa moduļos, lai pielāgotu galvenos komponentus pēc vajadzības.
+
+## Augsta līmeņa moduļi
+
+- [**Dokuments**](./high_level/documents_and_nodes.md): Dokuments pārstāv teksta failu, PDF failu vai citu vienmērīgu datu gabalu.
+
+- [**Kods**](./high_level/documents_and_nodes.md): Pamata datu būvēšanas bloks. Visbiežāk tie ir dokumenta daļas, kas ir sadalītas pārvaldāmos gabalos, kas ir pietiekami mazi, lai tos varētu padot iegultajam modelim un LLM.
+
+- [**Lasītājs/Ielādētājs**](./high_level/data_loader.md): Lasītājs vai ielādētājs ir kas, kas ņem dokumentu reālajā pasaulē un pārveido to par Dokumenta klasi, kas pēc tam var tikt izmantota jūsu indeksā un vaicājumos. Pašlaik mēs atbalstām vienkāršus teksta failus un PDF failus, bet nākotnē plānojam atbalstīt vēl daudz vairāk formātus.
+
+- [**Indeksi**](./high_level/data_index.md): Indeksi glabā Kodus un to iegultās vērtības.
+
+- [**Vaicājumu dzinējs**](./high_level/query_engine.md): Vaicājumu dzinēji ir tie, kas ģenerē vaicājumu, ko ievadāt, un sniedz jums rezultātu. Vaicājumu dzinēji parasti apvieno iepriekš izveidotu norādi ar atlasītiem Kodiem no jūsu indeksa, lai dotu LLM kontekstu, kas nepieciešams, lai atbildētu uz jūsu vaicājumu.
+
+- [**Čata dzinējs**](./high_level/chat_engine.md): Čata dzinējs palīdz jums izveidot čatbota, kas mijiedarbosies ar jūsu indeksiem.
+
+## Zemā līmeņa modulis
+
+- [**LLM**](./low_level/llm.md): LLM klase ir vienota saskarne pār lielu valodas modeli sniedzēju, piemēram, OpenAI GPT-4, Anthropic Claude vai Meta LLaMA. Jūs varat to apakšklasēt, lai izveidotu savienojumu ar savu lielo valodas modeli.
+
+- [**Iegultās vērtības**](./low_level/embedding.md): Iegultā vērtība ir reprezentēta kā peldošo punktu skaitļu vektors. Mūsu noklusējuma iegultā modelis ir OpenAI teksta iegultā-ada-002, un katrā iegultajā vērtībā ir 1 536 peldošo punktu skaitļi. Vēl viens populārs iegultā modelis ir BERT, kas izmanto 768 peldošo punktu skaitļus, lai reprezentētu katru mezglu. Mēs piedāvājam vairākas utilītas, lai strādātu ar iegultajām vērtībām, ieskaitot 3 līdzības aprēķināšanas iespējas un maksimālo marginālo nozīmību.
+
+- [**Teksta sadalītājs/Mezglu parsētājs**](./low_level/node_parser.md): Teksta sadalīšanas stratēģijas ir ļoti svarīgas iegultā meklēšanas kopējai efektivitātei. Pašlaik, lai gan mums ir noklusējuma vērtība, nav vienas izmēra risinājuma, kas derētu visiem. Atkarībā no avota dokumentiem, jūs varat izmantot dažādas sadalīšanas izmērus un stratēģijas. Pašlaik mēs atbalstām sadalīšanu pēc fiksēta izmēra, sadalīšanu pēc fiksēta izmēra ar pārklājošām sadaļām, sadalīšanu pēc teikuma un sadalīšanu pēc rindkopa. Teksta sadalītājs tiek izmantots Mezglu parsētājā, sadalot `Dokumentus` par `Mezgliem`.
+
+- [**Atgūtājs**](./low_level/retriever.md): Atgūtājs ir tas, kas faktiski izvēlas Mezglus, ko atgūt no indeksa. Šeit jūs varat izmēģināt atgūt vairāk vai mazāk Mezglu vienā vaicājumā, mainīt līdzības funkciju vai izveidot savu atgūtāju katram individuālam lietojumam jūsu lietotnē. Piemēram, jūs varat vēlēties atsevišķu atgūtāju koda saturam un teksta saturam.
+
+- [**Atbildes sintezētājs**](./low_level/response_synthesizer.md): Atbildes sintezētājs ir atbildīgs par vaicājuma virknes ņemšanu un saraksta `Mezglu` izmantošanu, lai ģenerētu atbildi. Tas var būt dažādās formās, piemēram, iterējot cauri visam kontekstam un precizējot atbildi vai veidojot koka kopsavilkumu un atgriežot saknes kopsavilkumu.
+
+- [**Krātuve**](./low_level/storage.md): Izmantojot indeksus, datus un vektorus, jūs vēlēsieties tos saglabāt, nevis katru reizi palaist iegultā modeļa. IndexStore, DocStore, VectorStore un KVStore ir abstrakcijas, kas ļauj jums to darīt. Kopā tie veido StorageContext. Pašlaik mēs ļaujam saglabāt iegultās vērtības failos failu sistēmā (vai virtuālā atmiņas failu sistēmā), bet arī aktīvi pievienojam integrācijas ar vektora datu bāzēm.
diff --git a/apps/docs/i18n/lv/docusaurus-plugin-content-docs/current/modules/low_level/embedding.md b/apps/docs/i18n/lv/docusaurus-plugin-content-docs/current/modules/low_level/embedding.md
new file mode 100644
index 0000000000000000000000000000000000000000..bf22f869e5959ba7c4ef7ef01334939f0b4c8e93
--- /dev/null
+++ b/apps/docs/i18n/lv/docusaurus-plugin-content-docs/current/modules/low_level/embedding.md
@@ -0,0 +1,26 @@
+---
+sidebar_position: 1
+---
+
+# Iegult
+
+`Šis dokuments ir automātiski tulkots un var saturēt kļūdas. Nevilciniet atvērt Pull Request, lai ierosinātu izmaiņas.`
+
+Iegultā modelis LlamaIndex ir atbildīgs par teksta numeriskās reprezentācijas veidošanu. Pēc noklusējuma LlamaIndex izmantos `text-embedding-ada-002` modeli no OpenAI.
+
+To var skaidri iestatīt `ServiceContext` objektā.
+
+```typescript
+import { OpenAIEmbedding, serviceContextFromDefaults } from "llamaindex";
+
+const openaiEmbeds = new OpenAIEmbedding();
+
+const serviceContext = serviceContextFromDefaults({ embedModel: openaiEmbeds });
+```
+
+## API Atsauce
+
+- [OpenAIEmbedding](../../api/classes/OpenAIEmbedding.md)
+- [ServiceContext](../../api/interfaces/ServiceContext.md)
+
+"
diff --git a/apps/docs/i18n/lv/docusaurus-plugin-content-docs/current/modules/low_level/llm.md b/apps/docs/i18n/lv/docusaurus-plugin-content-docs/current/modules/low_level/llm.md
new file mode 100644
index 0000000000000000000000000000000000000000..272c869b3efc355c4b4ae56a84b77e88eb8dae84
--- /dev/null
+++ b/apps/docs/i18n/lv/docusaurus-plugin-content-docs/current/modules/low_level/llm.md
@@ -0,0 +1,26 @@
+---
+sidebar_position: 0
+---
+
+# LLM
+
+`Šis dokuments ir automātiski tulkots un var saturēt kļūdas. Nevilciniet atvērt Pull Request, lai ierosinātu izmaiņas.`
+
+LLM ir atbildīgs par teksta lasīšanu un dabisku valodas atbilžu ģenerēšanu uz vaicājumiem. Pēc noklusējuma, LlamaIndex.TS izmanto `gpt-3.5-turbo`.
+
+LLM var tikt skaidri iestatīts `ServiceContext` objektā.
+
+```typescript
+import { OpenAI, serviceContextFromDefaults } from "llamaindex";
+
+const openaiLLM = new OpenAI({ model: "gpt-3.5-turbo", temperature: 0 });
+
+const serviceContext = serviceContextFromDefaults({ llm: openaiLLM });
+```
+
+## API Atsauce
+
+- [OpenAI](../../api/classes/OpenAI.md)
+- [ServiceContext](../../api/interfaces/ServiceContext.md)
+
+"
diff --git a/apps/docs/i18n/lv/docusaurus-plugin-content-docs/current/modules/low_level/node_parser.md b/apps/docs/i18n/lv/docusaurus-plugin-content-docs/current/modules/low_level/node_parser.md
new file mode 100644
index 0000000000000000000000000000000000000000..5deb712f100b859a348c7c19797252be4838b96a
--- /dev/null
+++ b/apps/docs/i18n/lv/docusaurus-plugin-content-docs/current/modules/low_level/node_parser.md
@@ -0,0 +1,35 @@
+---
+sidebar_position: 3
+---
+
+# NodeParser
+
+`Šis dokuments ir automātiski tulkots un var saturēt kļūdas. Nevilciniet atvērt Pull Request, lai ierosinātu izmaiņas.`
+
+`NodeParser` LlamaIndex ir atbildīgs par `Document` objektu sadalīšanu mazāk pārvaldāmos `Node` objektos. Kad jūs izsaucat `.fromDocuments()`, `NodeParser` no `ServiceContext` tiek izmantots, lai to automātiski izdarītu jums. Alternatīvi, jūs varat to izmantot, lai iepriekš sadalītu dokumentus.
+
+```typescript
+import { Document, SimpleNodeParser } from "llamaindex";
+
+const nodeParser = new SimpleNodeParser();
+const nodes = nodeParser.getNodesFromDocuments([
+  new Document({ text: "Man ir 10 gadu. Džons ir 20 gadu." }),
+]);
+```
+
+## TextSplitter (TextSplitter)
+
+Pamata teksta sadalītājs sadalīs tekstu pa teikumiem. To var izmantot arī kā atsevišķu moduli, lai sadalītu neapstrādātu tekstu.
+
+```typescript
+import { SentenceSplitter } from "llamaindex";
+
+const splitter = new SentenceSplitter({ chunkSize: 1 });
+
+const textSplits = splitter.splitText("Sveika, pasaule");
+```
+
+## API atsauce
+
+- [SimpleNodeParser](../../api/classes/SimpleNodeParser.md)
+- [SentenceSplitter](../../api/classes/SentenceSplitter.md)
diff --git a/apps/docs/i18n/lv/docusaurus-plugin-content-docs/current/modules/low_level/response_synthesizer.md b/apps/docs/i18n/lv/docusaurus-plugin-content-docs/current/modules/low_level/response_synthesizer.md
new file mode 100644
index 0000000000000000000000000000000000000000..d893d42c7098f72c36f6f43d34d2004652ef0137
--- /dev/null
+++ b/apps/docs/i18n/lv/docusaurus-plugin-content-docs/current/modules/low_level/response_synthesizer.md
@@ -0,0 +1,45 @@
+---
+sidebar_position: 6
+---
+
+# ResponseSynthesizer (Atbildes sintezators)
+
+`Šis dokuments ir automātiski tulkots un var saturēt kļūdas. Nevilciniet atvērt Pull Request, lai ierosinātu izmaiņas.`
+
+ResponseSynthesizer ir atbildīgs par vaicājuma, mezglu un iedvesmas veidņu nosūtīšanu LLM, lai ģenerētu atbildi. Ir daži galvenie režīmi atbildes ģenerēšanai:
+
+- `Refine` (Precizēt): "izveidot un precizēt" atbildi, secīgi pārskatot katru iegūto teksta gabalu. Tas veic atsevišķu LLM pieprasījumu katram mezglam. Labi piemērots detalizētām atbildēm.
+- `CompactAndRefine` (Kompakti un precizēt) (noklusējums): "kompakti" veidņu laikā katrā LLM pieprasījumā, ievietojot pēc iespējas vairāk teksta gabalu, kas ietilpst maksimālajā veidņu izmērā. Ja ir pārāk daudz gabalu, lai ievietotu vienā veidnē, "izveidot un precizēt" atbildi, pārejot cauri vairākiem kompaktiem veidņiem. Tas ir tas pats kā `refine`, bet vajadzētu rezultēt mazāk LLM pieprasījumos.
+- `TreeSummarize` (Koka kopsavilkums): Izmantojot teksta gabalu kopu un vaicājumu, rekursīvi veido koku un atgriež saknes mezglu kā atbildi. Labi piemērots kopsavilkuma nolūkiem.
+- `SimpleResponseBuilder` (Vienkāršs atbilžu veidotājs): Izmantojot teksta gabalu kopu un vaicājumu, piemēro vaicājumu katram teksta gabalam, vienlaikus apkopojot atbildes masīvā. Atgriež visu atbilžu apvienoto virkni. Labi piemērots, ja jums ir nepieciešams atsevišķi izpildīt vienādu vaicājumu pret katru teksta gabalu.
+
+```typescript
+import { NodeWithScore, ResponseSynthesizer, TextNode } from "llamaindex";
+
+const responseSynthesizer = new ResponseSynthesizer();
+
+const nodesWithScore: NodeWithScore[] = [
+  {
+    node: new TextNode({ text: "Man ir 10 gadu." }),
+    score: 1,
+  },
+  {
+    node: new TextNode({ text: "Džons ir 20 gadu." }),
+    score: 0.5,
+  },
+];
+
+const response = await responseSynthesizer.synthesize(
+  "Cik vecs esmu?",
+  nodesWithScore,
+);
+console.log(response.response);
+```
+
+## API atsauce
+
+- [ResponseSynthesizer (Atbildes sintezators)](../../api/classes/ResponseSynthesizer.md)
+- [Refine (Precizēt)](../../api/classes/Refine.md)
+- [CompactAndRefine (Kompakti un precizēt)](../../api/classes/CompactAndRefine.md)
+- [TreeSummarize (Koka kopsavilkums)](../../api/classes/TreeSummarize.md)
+- [SimpleResponseBuilder (Vienkāršs atbilžu veidotājs)](../../api/classes/SimpleResponseBuilder.md)
diff --git a/apps/docs/i18n/lv/docusaurus-plugin-content-docs/current/modules/low_level/retriever.md b/apps/docs/i18n/lv/docusaurus-plugin-content-docs/current/modules/low_level/retriever.md
new file mode 100644
index 0000000000000000000000000000000000000000..346710c3bcfc3cc678d692fe92b94d318fd9e243
--- /dev/null
+++ b/apps/docs/i18n/lv/docusaurus-plugin-content-docs/current/modules/low_level/retriever.md
@@ -0,0 +1,23 @@
+---
+sidebar_position: 5
+---
+
+# Atgūtājs
+
+`Šis dokuments ir automātiski tulkots un var saturēt kļūdas. Nevilciniet atvērt Pull Request, lai ierosinātu izmaiņas.`
+
+Atgūtājs LlamaIndex ir tas, kas tiek izmantots, lai iegūtu `Node` no indeksa, izmantojot vaicājuma virkni. `VectorIndexRetriever` iegūs vislīdzīgākos virsotnes k. Tāpat `SummaryIndexRetriever` iegūs visus mezglus neatkarīgi no vaicājuma.
+
+```typescript
+const atgūtājs = vector_index.asRetriever();
+atgūtājs.līdzībaTopK = 3;
+
+// Iegūt mezglus!
+const mezgliArRezultātu = await atgūtājs.atgūt("vaicājuma virkne");
+```
+
+## API atsauce
+
+- [SummaryIndexRetriever](../../api/classes/SummaryIndexRetriever.md)
+- [SummaryIndexLLMRetriever](../../api/classes/SummaryIndexLLMRetriever.md)
+- [VectorIndexRetriever](../../api/classes/VectorIndexRetriever.md)
diff --git a/apps/docs/i18n/lv/docusaurus-plugin-content-docs/current/modules/low_level/storage.md b/apps/docs/i18n/lv/docusaurus-plugin-content-docs/current/modules/low_level/storage.md
new file mode 100644
index 0000000000000000000000000000000000000000..ae9f3fd84154bfc7a2e904b5e1a1748436f51a82
--- /dev/null
+++ b/apps/docs/i18n/lv/docusaurus-plugin-content-docs/current/modules/low_level/storage.md
@@ -0,0 +1,30 @@
+---
+sidebar_position: 7
+---
+
+# Krātuve
+
+`Šis dokuments ir automātiski tulkots un var saturēt kļūdas. Nevilciniet atvērt Pull Request, lai ierosinātu izmaiņas.`
+
+Krātuve LlamaIndex.TS darbojas automātiski, kad jūs konfigurējat `StorageContext` objektu. Vienkārši konfigurējiet `persistDir` un pievienojiet to indeksam.
+
+Pašlaik tikai saglabāšana un ielāde no diska ir atbalstīta, ar plānotām nākotnes integrācijām!
+
+```typescript
+import { Document, VectorStoreIndex, storageContextFromDefaults } from "./src";
+
+const storageContext = await storageContextFromDefaults({
+  persistDir: "./storage",
+});
+
+const document = new Document({ text: "Testa teksts" });
+const index = await VectorStoreIndex.fromDocuments([document], {
+  storageContext,
+});
+```
+
+## API Atsauce
+
+- [StorageContext](../../api/interfaces/StorageContext.md)
+
+"
diff --git a/apps/docs/i18n/lv/docusaurus-plugin-content-docs/current/starter.md b/apps/docs/i18n/lv/docusaurus-plugin-content-docs/current/starter.md
new file mode 100644
index 0000000000000000000000000000000000000000..efa1bfd0d3ddf4fcf00c0812dfe56eeeb8fda065
--- /dev/null
+++ b/apps/docs/i18n/lv/docusaurus-plugin-content-docs/current/starter.md
@@ -0,0 +1,56 @@
+---
+sidebar_position: 2
+---
+
+# Ievadjuce
+
+`Šis dokuments ir automātiski tulkots un var saturēt kļūdas. Nevilciniet atvērt Pull Request, lai ierosinātu izmaiņas.`
+
+Kad esi [uzstādījis LlamaIndex.TS, izmantojot NPM](installation) un iestatījis savu OpenAI atslēgu, esi gatavs sākt savu pirmo lietotni:
+
+Jaunā mapē:
+
+```bash npm2yarn
+npm install typescript
+npm install @types/node
+npx tsc --init # ja nepieciešams
+```
+
+Izveido failu `example.ts`. Šis kods ielādēs piemēra datus, izveidos dokumentu, indeksēs to (izmantojot OpenAI iegultās vērtības) un pēc tam izveidos vaicājumu dzinēju, lai atbildētu uz jautājumiem par datiem.
+
+```ts
+// example.ts
+import fs from "fs/promises";
+import { Document, VectorStoreIndex } from "llamaindex";
+
+async function main() {
+  // Ielādē eseju no abramov.txt Node vidē
+  const eseja = await fs.readFile(
+    "node_modules/llamaindex/examples/abramov.txt",
+    "utf-8",
+  );
+
+  // Izveido dokumenta objektu ar eseju
+  const dokuments = new Document({ text: eseja });
+
+  // Sadala tekstu un izveido iegultās vērtības. Saglabā tās VectorStoreIndex
+  const indekss = await VectorStoreIndex.fromDocuments([dokuments]);
+
+  // Veic vaicājumu indeksā
+  const vaicājumaDzinējs = indekss.asQueryEngine();
+  const atbilde = await vaicājumaDzinējs.query("Ko autors darīja koledžā?");
+
+  // Izvada atbildi
+  console.log(atbilde.toString());
+}
+
+main();
+```
+
+Tad to vari palaist, izmantojot
+
+```bash
+npx ts-node example.ts
+```
+
+Gatavs uzzināt vairāk? Apmeklē mūsu NextJS spēļu laukumu vietnē https://llama-playground.vercel.app/. Avots ir pieejams vietnē https://github.com/run-llama/ts-playground
diff --git a/apps/docs/i18n/nl/docusaurus-plugin-content-docs/current/_static/concepts/indexing.jpg b/apps/docs/i18n/nl/docusaurus-plugin-content-docs/current/_static/concepts/indexing.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..8672967213caf28fb27682b6b9e2e5111beb9aaa
Binary files /dev/null and b/apps/docs/i18n/nl/docusaurus-plugin-content-docs/current/_static/concepts/indexing.jpg differ
diff --git a/apps/docs/i18n/nl/docusaurus-plugin-content-docs/current/_static/concepts/querying.jpg b/apps/docs/i18n/nl/docusaurus-plugin-content-docs/current/_static/concepts/querying.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..3c241bdda5ba98d3f3ee2e163b9f19280c5c7503
Binary files /dev/null and b/apps/docs/i18n/nl/docusaurus-plugin-content-docs/current/_static/concepts/querying.jpg differ
diff --git a/apps/docs/i18n/nl/docusaurus-plugin-content-docs/current/_static/concepts/rag.jpg b/apps/docs/i18n/nl/docusaurus-plugin-content-docs/current/_static/concepts/rag.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..b68eca2564f8535b33d9ed6d4ce4d4410d2a0698
Binary files /dev/null and b/apps/docs/i18n/nl/docusaurus-plugin-content-docs/current/_static/concepts/rag.jpg differ
diff --git a/apps/docs/i18n/nl/docusaurus-plugin-content-docs/current/api b/apps/docs/i18n/nl/docusaurus-plugin-content-docs/current/api
new file mode 120000
index 0000000000000000000000000000000000000000..3513e32979f4abb8b2df2b54c8d0cc480bdd847e
--- /dev/null
+++ b/apps/docs/i18n/nl/docusaurus-plugin-content-docs/current/api
@@ -0,0 +1 @@
+../../../../docs/api
\ No newline at end of file
diff --git a/apps/docs/i18n/nl/docusaurus-plugin-content-docs/current/concepts.md b/apps/docs/i18n/nl/docusaurus-plugin-content-docs/current/concepts.md
new file mode 100644
index 0000000000000000000000000000000000000000..4403f771e796dfcc73f17585e1d1b025b590ee0c
--- /dev/null
+++ b/apps/docs/i18n/nl/docusaurus-plugin-content-docs/current/concepts.md
@@ -0,0 +1,86 @@
+---
+sidebar_position: 3
+---
+
+# Hoog-Niveau Concepten
+
+`Deze documentatie is automatisch vertaald en kan fouten bevatten. Aarzel niet om een Pull Request te openen om wijzigingen voor te stellen.`
+
+LlamaIndex.TS helpt je bij het bouwen van LLM-aangedreven applicaties (bijv. Q&A, chatbot) over aangepaste gegevens.
+
+In deze gids met hoog-niveau concepten leer je:
+
+- hoe een LLM vragen kan beantwoorden met behulp van je eigen gegevens.
+- belangrijke concepten en modules in LlamaIndex.TS voor het samenstellen van je eigen query-pijplijn.
+
+## Vragen beantwoorden over je gegevens
+
+LlamaIndex gebruikt een tweestapsmethode bij het gebruik van een LLM met je gegevens:
+
+1. **indexeringsfase**: het voorbereiden van een kennisbank, en
+2. **queryfase**: het ophalen van relevante context uit de kennisbank om de LLM te helpen bij het beantwoorden van een vraag.
+
+![](./_static/concepts/rag.jpg)
+
+Dit proces staat ook bekend als Retrieval Augmented Generation (RAG).
+
+LlamaIndex.TS biedt de essentiële toolkit om beide stappen super eenvoudig te maken.
+
+Laten we elke fase in detail bekijken.
+
+### Indexeringsfase
+
+LlamaIndex.TS helpt je bij het voorbereiden van de kennisbank met een reeks gegevensconnectoren en indexen.
+
+![](./_static/concepts/indexing.jpg)
+
+[**Gegevensladers**](./modules/high_level/data_loader.md):
+Een gegevensconnector (bijv. `Reader`) neemt gegevens op uit verschillende gegevensbronnen en gegevensindelingen in een eenvoudige `Document`-representatie (tekst en eenvoudige metadata).
+
+[**Documenten / Nodes**](./modules/high_level/documents_and_nodes.md): Een `Document` is een generieke container rondom elke gegevensbron - bijvoorbeeld een PDF, een API-uitvoer of opgehaalde gegevens uit een database. Een `Node` is de atomaire eenheid van gegevens in LlamaIndex en vertegenwoordigt een "chunk" van een bron-`Document`. Het is een rijke representatie die metadata en relaties (naar andere nodes) bevat om nauwkeurige en expressieve ophaalbewerkingen mogelijk te maken.
+
+[**Gegevensindexen**](./modules/high_level/data_index.md):
+Nadat je je gegevens hebt opgenomen, helpt LlamaIndex je bij het indexeren van gegevens in een formaat dat eenvoudig op te halen is.
+
+Onder de motorkap analyseert LlamaIndex de ruwe documenten in tussenliggende representaties, berekent vector-embeddings en slaat je gegevens op in het geheugen of op schijf.
+
+"
+
+### Queryfase
+
+In de queryfase haalt de query-pijplijn de meest relevante context op op basis van een gebruikersquery,
+en geeft dit door aan de LLM (samen met de query) om een antwoord te genereren.
+
+Dit geeft de LLM actuele kennis die niet in zijn oorspronkelijke trainingsgegevens staat,
+(ook het verminderen van hallucinatie).
+
+De belangrijkste uitdaging in de queryfase is het ophalen, orchestreren en redeneren over (mogelijk vele) kennisbanken.
+
+LlamaIndex biedt samenstelbare modules die je helpen bij het bouwen en integreren van RAG-pijplijnen voor Q&A (query-engine), chatbot (chat-engine), of als onderdeel van een agent.
+
+Deze bouwstenen kunnen worden aangepast om voorkeuren voor rangschikking weer te geven, en kunnen worden samengesteld om op een gestructureerde manier redeneringen uit te voeren over meerdere kennisbanken.
+
+![](./_static/concepts/querying.jpg)
+
+#### Bouwstenen
+
+[**Retrievers**](./modules/low_level/retriever.md):
+Een retriever bepaalt hoe relevante context efficiënt uit een kennisbank (d.w.z. index) kan worden opgehaald wanneer een query wordt gegeven.
+De specifieke ophaallogica verschilt per index, waarbij de meest populaire dichte ophaallogica is tegen een vectorindex.
+
+[**Response Synthesizers**](./modules/low_level/response_synthesizer.md):
+Een response synthesizer genereert een antwoord vanuit een LLM, met behulp van een gebruikersquery en een gegeven set opgehaalde tekstfragmenten.
+
+"
+
+#### Pijplijnen
+
+[**Query-engines**](./modules/high_level/query_engine.md):
+Een query-engine is een end-to-end pijplijn waarmee je vragen kunt stellen over je gegevens.
+Het neemt een natuurlijke taalquery in en geeft een antwoord terug, samen met de opgehaalde referentiecontext die aan de LLM is doorgegeven.
+
+[**Chat-engines**](./modules/high_level/chat_engine.md):
+Een chat-engine is een end-to-end pijplijn voor het voeren van een gesprek met je gegevens
+(meerdere heen-en-weer in plaats van een enkele vraag en antwoord).
+
+"
diff --git a/apps/docs/i18n/nl/docusaurus-plugin-content-docs/current/end_to_end.md b/apps/docs/i18n/nl/docusaurus-plugin-content-docs/current/end_to_end.md
new file mode 100644
index 0000000000000000000000000000000000000000..6d7b2d193fb45ea2ceec6fba13ee1cfc464f3032
--- /dev/null
+++ b/apps/docs/i18n/nl/docusaurus-plugin-content-docs/current/end_to_end.md
@@ -0,0 +1,51 @@
+---
+sidebar_position: 4
+---
+
+# Voorbeelden van begin tot eind
+
+`Deze documentatie is automatisch vertaald en kan fouten bevatten. Aarzel niet om een Pull Request te openen om wijzigingen voor te stellen.`
+
+We hebben verschillende voorbeelden van begin tot eind opgenomen met behulp van LlamaIndex.TS in de repository.
+
+Bekijk hieronder de voorbeelden of probeer ze uit en voltooi ze in enkele minuten met interactieve Github Codespace-tutorials die worden aangeboden door Dev-Docs [hier](https://codespaces.new/team-dev-docs/lits-dev-docs-playground?devcontainer_path=.devcontainer%2Fjavascript_ltsquickstart%2Fdevcontainer.json):
+
+## [Chat Engine](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/chatEngine.ts)
+
+Lees een bestand en praat erover met de LLM.
+
+## [Vector Index](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/vectorIndex.ts)
+
+Maak een vectorindex en vraag deze op. De vectorindex zal embeddings gebruiken om de meest relevante knooppunten op te halen. Standaard is de top k 2.
+
+## [Samenvattingsindex](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/summaryIndex.ts)
+
+Maak een lijstindex en vraag deze op. Dit voorbeeld maakt ook gebruik van de `LLMRetriever`, die de LLM zal gebruiken om de beste knooppunten te selecteren bij het genereren van een antwoord.
+
+## [Een index opslaan / laden](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/storageContext.ts)
+
+Maak een vectorindex aan en laad deze. Het opslaan op schijf in LlamaIndex.TS gebeurt automatisch zodra er een opslagcontextobject is gemaakt.
+
+## [Aangepaste Vector Index](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/vectorIndexCustomize.ts)
+
+Maak een vectorindex en doorzoek deze, terwijl je ook de `LLM`, de `ServiceContext` en de `similarity_top_k` configureert.
+
+## [OpenAI LLM](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/openai.ts)
+
+Maak een OpenAI LLM en gebruik deze direct voor chat.
+
+## [Llama2 DeuceLLM](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/llamadeuce.ts)
+
+Maak een Llama-2 LLM aan en gebruik deze direct voor chat.
+
+"
+
+## [SubQuestionQueryEngine](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/subquestion.ts)
+
+Maakt gebruik van de `SubQuestionQueryEngine`, die complexe queries opsplitst in meerdere vragen en vervolgens een antwoord verzamelt op basis van de antwoorden op alle subvragen.
+
+"
+
+## [Modules op laag niveau](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/lowlevel.ts)
+
+Dit voorbeeld maakt gebruik van verschillende modules op laag niveau, waardoor de noodzaak voor een daadwerkelijke query-engine wordt verwijderd. Deze modules kunnen overal worden gebruikt, in elke toepassing, of aangepast en afgeleid om aan uw eigen behoeften te voldoen.
diff --git a/apps/docs/i18n/nl/docusaurus-plugin-content-docs/current/environments.md b/apps/docs/i18n/nl/docusaurus-plugin-content-docs/current/environments.md
new file mode 100644
index 0000000000000000000000000000000000000000..e8dfa9649bbb115eae1e9c1ae564427477e2ab01
--- /dev/null
+++ b/apps/docs/i18n/nl/docusaurus-plugin-content-docs/current/environments.md
@@ -0,0 +1,31 @@
+---
+sidebar_position: 5
+---
+
+# Omgevingen
+
+`Deze documentatie is automatisch vertaald en kan fouten bevatten. Aarzel niet om een Pull Request te openen om wijzigingen voor te stellen.`
+
+LlamaIndex ondersteunt momenteel officieel NodeJS 18 en NodeJS 20.
+
+## NextJS App Router
+
+Als je NextJS App Router route handlers/serverless functies gebruikt, moet je de NodeJS-modus gebruiken:
+
+```js
+export const runtime = "nodejs"; // standaard
+```
+
+en je moet een uitzondering toevoegen voor pdf-parse in je next.config.js
+
+```js
+// next.config.js
+/** @type {import('next').NextConfig} */
+const nextConfig = {
+  experimental: {
+    serverComponentsExternalPackages: ["pdf-parse"], // Zet pdf-parse in de werkelijke NodeJS-modus met NextJS App Router
+  },
+};
+
+module.exports = nextConfig;
+```
diff --git a/apps/docs/i18n/nl/docusaurus-plugin-content-docs/current/installation.mdx b/apps/docs/i18n/nl/docusaurus-plugin-content-docs/current/installation.mdx
new file mode 100644
index 0000000000000000000000000000000000000000..4778e3752307133b325d203a5a7e2993525073a8
--- /dev/null
+++ b/apps/docs/i18n/nl/docusaurus-plugin-content-docs/current/installation.mdx
@@ -0,0 +1,68 @@
+---
+sidebar_position: 1
+---
+
+
+# Installatie en Setup
+
+```Deze documentatie is automatisch vertaald en kan fouten bevatten. Aarzel niet om een Pull Request te openen om wijzigingen voor te stellen.```
+
+
+Zorg ervoor dat je NodeJS v18 of hoger hebt geïnstalleerd.
+
+
+## Gebruik van create-llama
+
+De makkelijkste manier om aan de slag te gaan met LlamaIndex is door gebruik te maken van `create-llama`. Deze CLI-tool stelt je in staat om snel een nieuwe LlamaIndex applicatie te bouwen, waarbij alles voor je is ingesteld.
+
+Voer gewoon het volgende commando uit:
+
+<Tabs>
+<TabItem value="1" label="npm" default>
+
+```bash
+npx create-llama@latest
+```
+
+</TabItem>
+<TabItem value="2" label="Yarn">
+
+```bash
+yarn create llama
+```
+
+</TabItem>
+<TabItem value="3" label="pnpm">
+
+```bash
+pnpm create llama@latest
+```
+
+</TabItem>
+</Tabs>
+
+om aan de slag te gaan. Zodra je app is gegenereerd, voer je het volgende commando uit:
+
+```bash npm2yarn
+npm run dev
+```
+
+om de ontwikkelingsserver te starten. Je kunt vervolgens [http://localhost:3000](http://localhost:3000) bezoeken om je app te bekijken.
+## Installatie via NPM
+
+```bash npm2yarn
+npm install llamaindex
+```
+
+
+### Omgevingsvariabelen
+
+Onze voorbeelden gebruiken standaard OpenAI. Je moet je Open AI-sleutel als volgt instellen:
+
+```bash
+export OPENAI_API_KEY="sk-......" # Vervang dit door je sleutel van https://platform.openai.com/account/api-keys
+```
+
+Als je wilt dat het automatisch wordt geladen elke keer, voeg het dan toe aan je .zshrc/.bashrc.
+
+WAARSCHUWING: voeg je OpenAI-sleutel niet toe aan versiebeheer.
diff --git a/apps/docs/i18n/nl/docusaurus-plugin-content-docs/current/introduction.md b/apps/docs/i18n/nl/docusaurus-plugin-content-docs/current/introduction.md
new file mode 100644
index 0000000000000000000000000000000000000000..1d68601942ff5d7028542f1da050b5c0d04e7fb7
--- /dev/null
+++ b/apps/docs/i18n/nl/docusaurus-plugin-content-docs/current/introduction.md
@@ -0,0 +1,60 @@
+---
+sidebar_position: 0
+slug: /
+---
+
+# Wat is LlamaIndex.TS?
+
+`Deze documentatie is automatisch vertaald en kan fouten bevatten. Aarzel niet om een Pull Request te openen om wijzigingen voor te stellen.`
+
+LlamaIndex.TS is een gegevensframework voor LLM-toepassingen om privé- of domeinspecifieke gegevens in te nemen, te structureren en toegankelijk te maken. Hoewel er ook een Python-pakket beschikbaar is (zie [hier](https://docs.llamaindex.ai/en/stable/)), biedt LlamaIndex.TS kernfuncties in een eenvoudig pakket, geoptimaliseerd voor gebruik met TypeScript.
+
+## 🚀 Waarom LlamaIndex.TS?
+
+LLM's bieden in de kern een natuurlijke taalinterface tussen mensen en afgeleide gegevens. Breed beschikbare modellen zijn vooraf getraind op enorme hoeveelheden openbaar beschikbare gegevens, van Wikipedia en mailinglijsten tot studieboeken en broncode.
+
+Toepassingen die zijn gebouwd bovenop LLM's vereisen vaak het aanvullen van deze modellen met privé- of domeinspecifieke gegevens. Helaas kan die gegevens verspreid zijn over geïsoleerde toepassingen en gegevensopslagplaatsen. Het bevindt zich achter API's, in SQL-databases of zit vast in PDF's en presentaties.
+
+Daar komt **LlamaIndex.TS** in beeld.
+
+## 🦙 Hoe kan LlamaIndex.TS helpen?
+
+LlamaIndex.TS biedt de volgende tools:
+
+- **Gegevensinname** neemt uw bestaande `.txt`, `.pdf`, `.csv`, `.md` en `.docx` gegevens rechtstreeks in.
+- **Gegevensindexen** structureren uw gegevens in tussenliggende representaties die gemakkelijk en efficiënt zijn voor LLM's om te consumeren.
+- **Engines** bieden natuurlijke taaltoegang tot uw gegevens. Bijvoorbeeld:
+  - Query-engines zijn krachtige ophaalinterfaces voor kennisverrijkte output.
+  - Chat-engines zijn conversatie-interfaces voor interacties met uw gegevens in meerdere berichten, "heen en weer".
+
+## 👨‍👩‍👧‍👦 Voor wie is LlamaIndex bedoeld?
+
+LlamaIndex.TS biedt een kernset tools die essentieel zijn voor iedereen die LLM-apps bouwt met JavaScript en TypeScript.
+
+Onze API op hoog niveau stelt beginnende gebruikers in staat om LlamaIndex.TS te gebruiken om hun gegevens in te nemen en te bevragen.
+
+Voor complexere toepassingen stellen onze API's op lager niveau gevorderde gebruikers in staat om elke module aan te passen en uit te breiden - gegevensconnectoren, indices, ophalers en query-engines - om aan hun behoeften te voldoen.
+
+## Aan de slag
+
+`npm install llamaindex`
+
+Onze documentatie bevat [Installatie-instructies](./installation.md) en een [Starterzelfstudie](./starter.md) om uw eerste toepassing te bouwen.
+
+Zodra u aan de slag bent, geeft [Hoog-niveau Concepten](./concepts.md) een overzicht van de modulaire architectuur van LlamaIndex. Voor meer praktische voorbeelden kunt u onze [End-to-End Tutorials](./end_to_end.md) bekijken.
+
+## 🗺️ Ecosysteem
+
+Om te downloaden of bij te dragen, vind LlamaIndex op:
+
+- Github: https://github.com/run-llama/LlamaIndexTS
+- NPM: https://www.npmjs.com/package/llamaindex
+
+"
+
+## Community
+
+Hulp nodig? Een functievoorstel? Word lid van de LlamaIndex-community:
+
+- Twitter: https://twitter.com/llama_index
+- Discord: https://discord.gg/dGcwcsnxhU
diff --git a/apps/docs/i18n/nl/docusaurus-plugin-content-docs/current/modules/high_level/chat_engine.md b/apps/docs/i18n/nl/docusaurus-plugin-content-docs/current/modules/high_level/chat_engine.md
new file mode 100644
index 0000000000000000000000000000000000000000..2f8a96bb51dc2f2163a933a338fd47f5a5c775a6
--- /dev/null
+++ b/apps/docs/i18n/nl/docusaurus-plugin-content-docs/current/modules/high_level/chat_engine.md
@@ -0,0 +1,22 @@
+---
+sidebar_position: 4
+---
+
+# ChatEngine
+
+`Deze documentatie is automatisch vertaald en kan fouten bevatten. Aarzel niet om een Pull Request te openen om wijzigingen voor te stellen.`
+
+De chatengine is een snelle en eenvoudige manier om te chatten met de gegevens in uw index.
+
+```typescript
+const retriever = index.asRetriever();
+const chatEngine = new ContextChatEngine({ retriever });
+
+// begin met chatten
+const response = await chatEngine.chat(query);
+```
+
+## API Referenties
+
+- [ContextChatEngine](../../api/classes/ContextChatEngine.md)
+- [CondenseQuestionChatEngine](../../api/classes/ContextChatEngine.md)
diff --git a/apps/docs/i18n/nl/docusaurus-plugin-content-docs/current/modules/high_level/data_index.md b/apps/docs/i18n/nl/docusaurus-plugin-content-docs/current/modules/high_level/data_index.md
new file mode 100644
index 0000000000000000000000000000000000000000..9ef3c6295ea70b779f638ae69ad843944fa4686f
--- /dev/null
+++ b/apps/docs/i18n/nl/docusaurus-plugin-content-docs/current/modules/high_level/data_index.md
@@ -0,0 +1,25 @@
+---
+sidebar_position: 2
+---
+
+# Index
+
+`Deze documentatie is automatisch vertaald en kan fouten bevatten. Aarzel niet om een Pull Request te openen om wijzigingen voor te stellen.`
+
+Een index is de basiscontainer en organisatie voor uw gegevens. LlamaIndex.TS ondersteunt twee indexes:
+
+- `VectorStoreIndex` - stuurt de top-k `Node`s naar de LLM bij het genereren van een reactie. De standaard top-k is 2.
+- `SummaryIndex` - stuurt elke `Node` in de index naar de LLM om een reactie te genereren.
+
+```typescript
+import { Document, VectorStoreIndex } from "llamaindex";
+
+const document = new Document({ text: "test" });
+
+const index = await VectorStoreIndex.fromDocuments([document]);
+```
+
+## API Referentie
+
+- [SummaryIndex](../../api/classes/SummaryIndex.md)
+- [VectorStoreIndex](../../api/classes/VectorStoreIndex.md)
diff --git a/apps/docs/i18n/nl/docusaurus-plugin-content-docs/current/modules/high_level/data_loader.md b/apps/docs/i18n/nl/docusaurus-plugin-content-docs/current/modules/high_level/data_loader.md
new file mode 100644
index 0000000000000000000000000000000000000000..d3596f832e372585b3355ea4c725af2740a3a76a
--- /dev/null
+++ b/apps/docs/i18n/nl/docusaurus-plugin-content-docs/current/modules/high_level/data_loader.md
@@ -0,0 +1,21 @@
+---
+sidebar_position: 1
+---
+
+# Lezer / Loader
+
+`Deze documentatie is automatisch vertaald en kan fouten bevatten. Aarzel niet om een Pull Request te openen om wijzigingen voor te stellen.`
+
+LlamaIndex.TS ondersteunt het eenvoudig laden van bestanden uit mappen met behulp van de `SimpleDirectoryReader` klasse. Momenteel worden `.txt`, `.pdf`, `.csv`, `.md` en `.docx` bestanden ondersteund, met meer gepland voor de toekomst!
+
+```typescript
+import { SimpleDirectoryReader } from "llamaindex";
+
+documenten = new SimpleDirectoryReader().loadData("./data");
+```
+
+## API Referentie
+
+- [SimpleDirectoryReader](../../api/classes/SimpleDirectoryReader.md)
+
+"
diff --git a/apps/docs/i18n/nl/docusaurus-plugin-content-docs/current/modules/high_level/documents_and_nodes.md b/apps/docs/i18n/nl/docusaurus-plugin-content-docs/current/modules/high_level/documents_and_nodes.md
new file mode 100644
index 0000000000000000000000000000000000000000..9eaefcb259691586c1271ab1487c9bb91e3331e0
--- /dev/null
+++ b/apps/docs/i18n/nl/docusaurus-plugin-content-docs/current/modules/high_level/documents_and_nodes.md
@@ -0,0 +1,22 @@
+---
+sidebar_position: 0
+---
+
+# Documenten en Nodes
+
+`Deze documentatie is automatisch vertaald en kan fouten bevatten. Aarzel niet om een Pull Request te openen om wijzigingen voor te stellen.`
+
+`Documenten` en `Nodes` zijn de basisbouwstenen van elke index. Hoewel de API voor deze objecten vergelijkbaar is, vertegenwoordigen `Documenten` objecten volledige bestanden, terwijl `Nodes` kleinere delen zijn van dat oorspronkelijke document, die geschikt zijn voor een LLM en Q&A.
+
+```typescript
+import { Document } from "llamaindex";
+
+document = new Document({ text: "tekst", metadata: { key: "val" } });
+```
+
+## API Referentie
+
+- [Document](../../api/classes/Document.md)
+- [TextNode](../../api/classes/TextNode.md)
+
+"
diff --git a/apps/docs/i18n/nl/docusaurus-plugin-content-docs/current/modules/high_level/query_engine.md b/apps/docs/i18n/nl/docusaurus-plugin-content-docs/current/modules/high_level/query_engine.md
new file mode 100644
index 0000000000000000000000000000000000000000..297cdc135b2e79957f8028e62a6d00e5662b12fa
--- /dev/null
+++ b/apps/docs/i18n/nl/docusaurus-plugin-content-docs/current/modules/high_level/query_engine.md
@@ -0,0 +1,38 @@
+---
+sidebar_position: 3
+---
+
+# QueryEngine
+
+`Deze documentatie is automatisch vertaald en kan fouten bevatten. Aarzel niet om een Pull Request te openen om wijzigingen voor te stellen.`
+
+Een query-engine wikkelt een `Retriever` en een `ResponseSynthesizer` in een pijplijn, die de queryreeks zal gebruiken om knooppunten op te halen en deze vervolgens naar de LLM stuurt om een ​​reactie te genereren.
+
+```typescript
+const queryEngine = index.asQueryEngine();
+const response = await queryEngine.query("queryreeks");
+```
+
+## Subvraag Query Engine
+
+Het basisconcept van de Subvraag Query Engine is dat het een enkele query opsplitst in meerdere queries, voor elke query een antwoord krijgt en vervolgens die verschillende antwoorden combineert tot een samenhangende reactie voor de gebruiker. Je kunt het zien als de techniek van "denk hier stap voor stap over na" maar dan met iteratie over je gegevensbronnen!
+
+### Aan de slag
+
+De gemakkelijkste manier om de Subvraag Query Engine uit te proberen, is door het bestand subquestion.ts uit te voeren in [voorbeelden](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/subquestion.ts).
+
+```bash
+npx ts-node subquestion.ts
+```
+
+### Tools
+
+SubQuestionQueryEngine is geïmplementeerd met Tools. Het basisidee van Tools is dat het uitvoerbare opties zijn voor het grote taalmodel. In dit geval vertrouwt onze SubQuestionQueryEngine op QueryEngineTool, dat zoals je al geraden hebt een tool is om queries uit te voeren op een QueryEngine. Dit stelt ons in staat om het model de mogelijkheid te geven om verschillende documenten te bevragen voor verschillende vragen, bijvoorbeeld. Je kunt je ook voorstellen dat de SubQuestionQueryEngine een Tool kan gebruiken die iets op het web zoekt of een antwoord krijgt met behulp van Wolfram Alpha.
+
+Je kunt meer te weten komen over Tools door te kijken naar de LlamaIndex Python-documentatie op https://gpt-index.readthedocs.io/en/latest/core_modules/agent_modules/tools/root.html
+
+## API Referentie
+
+- [RetrieverQueryEngine](../../api/classes/RetrieverQueryEngine.md)
+- [SubQuestionQueryEngine](../../api/classes/SubQuestionQueryEngine.md)
+- [QueryEngineTool](../../api/interfaces/QueryEngineTool.md)
diff --git a/apps/docs/i18n/nl/docusaurus-plugin-content-docs/current/modules/index.md b/apps/docs/i18n/nl/docusaurus-plugin-content-docs/current/modules/index.md
new file mode 100644
index 0000000000000000000000000000000000000000..885ec31ed507b6d1667b9a6258673178d7a16adc
--- /dev/null
+++ b/apps/docs/i18n/nl/docusaurus-plugin-content-docs/current/modules/index.md
@@ -0,0 +1,33 @@
+# Kernmodules
+
+`Deze documentatie is automatisch vertaald en kan fouten bevatten. Aarzel niet om een Pull Request te openen om wijzigingen voor te stellen.`
+
+LlamaIndex.TS biedt verschillende kernmodules aan, onderverdeeld in modules op hoog niveau om snel aan de slag te gaan, en modules op laag niveau om belangrijke componenten aan te passen zoals u dat wilt.
+
+## Modules op Hoog Niveau
+
+- [**Document**](./high_level/documents_and_nodes.md): Een document vertegenwoordigt een tekstbestand, PDF-bestand of een ander aaneengesloten stuk gegevens.
+
+- [**Node**](./high_level/documents_and_nodes.md): Het basisgegevensblok. Meestal zijn dit delen van het document die in beheersbare stukken zijn verdeeld en klein genoeg zijn om in een insluitingsmodel en LLM te worden gevoed.
+
+- [**Reader/Loader**](./high_level/data_loader.md): Een lezer of lader is iets dat een document uit de echte wereld inneemt en omzet in een Document-klasse die vervolgens kan worden gebruikt in uw Index en query's. We ondersteunen momenteel platte tekstbestanden en PDF's, met nog veel meer in de toekomst.
+
+- [**Indexes**](./high_level/data_index.md): Indexen slaan de Nodes en de insluitingen van die nodes op.
+
+- [**QueryEngine**](./high_level/query_engine.md): Query-engines genereren de query die u invoert en geven u het resultaat terug. Query-engines combineren doorgaans een vooraf gebouwde prompt met geselecteerde nodes uit uw Index om de LLM de context te geven die nodig is om uw query te beantwoorden.
+
+- [**ChatEngine**](./high_level/chat_engine.md): Een ChatEngine helpt u bij het bouwen van een chatbot die zal communiceren met uw Indexen.
+
+## Module op Laag Niveau
+
+- [**LLM**](./low_level/llm.md): De LLM-klasse is een uniforme interface voor een grote taalmodelprovider zoals OpenAI GPT-4, Anthropic Claude of Meta LLaMA. U kunt deze subklasseren om een verbinding te maken met uw eigen grote taalmodel.
+
+- [**Embedding**](./low_level/embedding.md): Een embedding wordt weergegeven als een vector van zwevende komma getallen. OpenAI's text-embedding-ada-002 is ons standaard embeddingmodel en elke embedding die het genereert bestaat uit 1.536 zwevende komma getallen. Een ander populair embeddingmodel is BERT, dat 768 zwevende komma getallen gebruikt om elke Node voor te stellen. We bieden een aantal hulpprogramma's om met embeddings te werken, waaronder 3 opties voor het berekenen van gelijkenis en Maximum Marginal Relevance.
+
+- [**TextSplitter/NodeParser**](./low_level/node_parser.md): Strategieën voor het splitsen van tekst zijn ontzettend belangrijk voor de algehele effectiviteit van de embedding-zoekopdracht. Momenteel hebben we wel een standaard, maar er is geen universele oplossing. Afhankelijk van de bronbestanden wilt u mogelijk verschillende splitsingsgroottes en -strategieën gebruiken. Momenteel ondersteunen we splitsing op basis van vaste grootte, splitsing op basis van vaste grootte met overlappende secties, splitsing op basis van zin en splitsing op basis van alinea. De tekstsplitter wordt gebruikt door de NodeParser bij het splitsen van `Documenten` in `Nodes`.
+
+- [**Retriever**](./low_level/retriever.md): De Retriever is degene die daadwerkelijk de Nodes kiest die uit de index moeten worden opgehaald. Hier kunt u ervoor kiezen om meer of minder Nodes per query op te halen, uw gelijkheidsfunctie te wijzigen of uw eigen retriever te maken voor elk individueel gebruiksscenario in uw toepassing. Bijvoorbeeld, u kunt ervoor kiezen om een aparte retriever te hebben voor code-inhoud versus tekstinhoud.
+
+- [**ResponseSynthesizer**](./low_level/response_synthesizer.md): De ResponseSynthesizer is verantwoordelijk voor het nemen van een zoekopdrachtstring en het gebruik van een lijst met `Nodes` om een antwoord te genereren. Dit kan verschillende vormen aannemen, zoals het itereren over alle context en het verfijnen van een antwoord, of het opbouwen van een boom van samenvattingen en het retourneren van de hoofdsamenvatting.
+
+- [**Storage**](./low_level/storage.md): Op een gegeven moment wilt u uw indexen, gegevens en vectoren opslaan in plaats van de embeddingmodellen telkens opnieuw uit te voeren. IndexStore, DocStore, VectorStore en KVStore zijn abstracties waarmee u dat kunt doen. Samen vormen ze de StorageContext. Momenteel kunt u uw embeddings opslaan in bestanden op het bestandssysteem (of een virtueel in-memory bestandssysteem), maar we voegen ook actief integraties toe met Vector Databases.
diff --git a/apps/docs/i18n/nl/docusaurus-plugin-content-docs/current/modules/low_level/embedding.md b/apps/docs/i18n/nl/docusaurus-plugin-content-docs/current/modules/low_level/embedding.md
new file mode 100644
index 0000000000000000000000000000000000000000..dbaa427b99e03c45e8c3ff0a26989965074f3a32
--- /dev/null
+++ b/apps/docs/i18n/nl/docusaurus-plugin-content-docs/current/modules/low_level/embedding.md
@@ -0,0 +1,26 @@
+---
+sidebar_position: 1
+---
+
+# Inbedding
+
+`Deze documentatie is automatisch vertaald en kan fouten bevatten. Aarzel niet om een Pull Request te openen om wijzigingen voor te stellen.`
+
+Het inbeddingsmodel in LlamaIndex is verantwoordelijk voor het maken van numerieke representaties van tekst. Standaard zal LlamaIndex het model `text-embedding-ada-002` van OpenAI gebruiken.
+
+Dit kan expliciet worden ingesteld in het `ServiceContext` object.
+
+```typescript
+import { OpenAIEmbedding, serviceContextFromDefaults } from "llamaindex";
+
+const openaiEmbeds = new OpenAIEmbedding();
+
+const serviceContext = serviceContextFromDefaults({ embedModel: openaiEmbeds });
+```
+
+## API Referentie
+
+- [OpenAIEmbedding](../../api/classes/OpenAIEmbedding.md)
+- [ServiceContext](../../api/interfaces/ServiceContext.md)
+
+"
diff --git a/apps/docs/i18n/nl/docusaurus-plugin-content-docs/current/modules/low_level/llm.md b/apps/docs/i18n/nl/docusaurus-plugin-content-docs/current/modules/low_level/llm.md
new file mode 100644
index 0000000000000000000000000000000000000000..363c68e2f98e76182018e17ad611746bc599bba5
--- /dev/null
+++ b/apps/docs/i18n/nl/docusaurus-plugin-content-docs/current/modules/low_level/llm.md
@@ -0,0 +1,26 @@
+---
+sidebar_position: 0
+---
+
+# LLM
+
+`Deze documentatie is automatisch vertaald en kan fouten bevatten. Aarzel niet om een Pull Request te openen om wijzigingen voor te stellen.`
+
+De LLM is verantwoordelijk voor het lezen van tekst en het genereren van natuurlijke taalreacties op vragen. Standaard maakt LlamaIndex.TS gebruik van `gpt-3.5-turbo`.
+
+De LLM kan expliciet worden ingesteld in het `ServiceContext` object.
+
+```typescript
+import { OpenAI, serviceContextFromDefaults } from "llamaindex";
+
+const openaiLLM = new OpenAI({ model: "gpt-3.5-turbo", temperature: 0 });
+
+const serviceContext = serviceContextFromDefaults({ llm: openaiLLM });
+```
+
+## API Referentie
+
+- [OpenAI](../../api/classes/OpenAI.md)
+- [ServiceContext](../../api/interfaces/ServiceContext.md)
+
+"
diff --git a/apps/docs/i18n/nl/docusaurus-plugin-content-docs/current/modules/low_level/node_parser.md b/apps/docs/i18n/nl/docusaurus-plugin-content-docs/current/modules/low_level/node_parser.md
new file mode 100644
index 0000000000000000000000000000000000000000..49fce72e522f77dd392f20dbef081c72307c72ea
--- /dev/null
+++ b/apps/docs/i18n/nl/docusaurus-plugin-content-docs/current/modules/low_level/node_parser.md
@@ -0,0 +1,37 @@
+---
+sidebar_position: 3
+---
+
+# NodeParser
+
+`Deze documentatie is automatisch vertaald en kan fouten bevatten. Aarzel niet om een Pull Request te openen om wijzigingen voor te stellen.`
+
+De `NodeParser` in LlamaIndex is verantwoordelijk voor het opsplitsen van `Document` objecten in meer beheersbare `Node` objecten. Wanneer je `.fromDocuments()` aanroept, wordt automatisch de `NodeParser` van de `ServiceContext` gebruikt om dit voor jou te doen. Je kunt het ook gebruiken om documenten van tevoren op te splitsen.
+
+```typescript
+import { Document, SimpleNodeParser } from "llamaindex";
+
+const nodeParser = new SimpleNodeParser();
+const nodes = nodeParser.getNodesFromDocuments([
+  new Document({ text: "Ik ben 10 jaar oud. John is 20 jaar oud." }),
+]);
+```
+
+## TextSplitter
+
+De onderliggende tekstsplitser splitst tekst op in zinnen. Het kan ook als een op zichzelf staande module worden gebruikt om ruwe tekst op te splitsen.
+
+```typescript
+import { SentenceSplitter } from "llamaindex";
+
+const splitter = new SentenceSplitter({ chunkSize: 1 });
+
+const textSplits = splitter.splitText("Hallo Wereld");
+```
+
+## API Referentie
+
+- [SimpleNodeParser](../../api/classes/SimpleNodeParser.md)
+- [SentenceSplitter](../../api/classes/SentenceSplitter.md)
+
+"
diff --git a/apps/docs/i18n/nl/docusaurus-plugin-content-docs/current/modules/low_level/response_synthesizer.md b/apps/docs/i18n/nl/docusaurus-plugin-content-docs/current/modules/low_level/response_synthesizer.md
new file mode 100644
index 0000000000000000000000000000000000000000..c5625509d6191973cfd9790ef39f354965a9f4c0
--- /dev/null
+++ b/apps/docs/i18n/nl/docusaurus-plugin-content-docs/current/modules/low_level/response_synthesizer.md
@@ -0,0 +1,51 @@
+---
+sidebar_position: 6
+---
+
+# ResponseSynthesizer
+
+`Deze documentatie is automatisch vertaald en kan fouten bevatten. Aarzel niet om een Pull Request te openen om wijzigingen voor te stellen.`
+
+De ResponseSynthesizer is verantwoordelijk voor het verzenden van de query, nodes en prompt templates naar de LLM om een ​​reactie te genereren. Er zijn een paar belangrijke modi voor het genereren van een reactie:
+
+- `Refine`: "creëer en verfijn" een antwoord door sequentieel door elke opgehaalde tekstfragment te gaan.
+  Dit maakt een aparte LLM-oproep per Node. Goed voor gedetailleerdere antwoorden.
+- `CompactAndRefine` (standaard): "compact" de prompt tijdens elke LLM-oproep door zoveel mogelijk tekstfragmenten in te voegen die passen binnen de maximale promptgrootte. Als er te veel fragmenten zijn om in één prompt in te voegen, "creëer en verfijn" een antwoord door meerdere compacte prompts te doorlopen. Hetzelfde als `refine`, maar zou resulteren in minder LLM-oproepen.
+- `TreeSummarize`: Gegeven een set tekstfragmenten en de query, recursief een boom construeren
+  en de root-node retourneren als reactie. Goed voor samenvattingsdoeleinden.
+- `SimpleResponseBuilder`: Gegeven een set tekstfragmenten en de query, de query toepassen op elk tekstfragment
+  terwijl de reacties worden opgebouwd in een array. Retourneert een geconcateneerde string van alle
+  reacties. Goed wanneer u dezelfde query apart moet uitvoeren tegen elk tekstfragment.
+
+```typescript
+import { NodeWithScore, ResponseSynthesizer, TextNode } from "llamaindex";
+
+const responseSynthesizer = new ResponseSynthesizer();
+
+const nodesWithScore: NodeWithScore[] = [
+  {
+    node: new TextNode({ text: "Ik ben 10 jaar oud." }),
+    score: 1,
+  },
+  {
+    node: new TextNode({ text: "John is 20 jaar oud." }),
+    score: 0.5,
+  },
+];
+
+const response = await responseSynthesizer.synthesize(
+  "Hoe oud ben ik?",
+  nodesWithScore,
+);
+console.log(response.response);
+```
+
+## API Referentie
+
+- [ResponseSynthesizer](../../api/classes/ResponseSynthesizer.md)
+- [Refine](../../api/classes/Refine.md)
+- [CompactAndRefine](../../api/classes/CompactAndRefine.md)
+- [TreeSummarize](../../api/classes/TreeSummarize.md)
+- [SimpleResponseBuilder](../../api/classes/SimpleResponseBuilder.md)
+
+"
diff --git a/apps/docs/i18n/nl/docusaurus-plugin-content-docs/current/modules/low_level/retriever.md b/apps/docs/i18n/nl/docusaurus-plugin-content-docs/current/modules/low_level/retriever.md
new file mode 100644
index 0000000000000000000000000000000000000000..dc8a98e2c5d3c626dde4c14f2e02636e33896bf3
--- /dev/null
+++ b/apps/docs/i18n/nl/docusaurus-plugin-content-docs/current/modules/low_level/retriever.md
@@ -0,0 +1,25 @@
+---
+sidebar_position: 5
+---
+
+# Retriever
+
+`Deze documentatie is automatisch vertaald en kan fouten bevatten. Aarzel niet om een Pull Request te openen om wijzigingen voor te stellen.`
+
+Een retriever in LlamaIndex is wat wordt gebruikt om `Node`s op te halen uit een index met behulp van een zoekopdracht. Een `VectorIndexRetriever` haalt de meest vergelijkbare knooppunten op. Ondertussen haalt een `SummaryIndexRetriever` alle knooppunten op, ongeacht de zoekopdracht.
+
+```typescript
+const retriever = vector_index.asRetriever();
+retriever.similarityTopK = 3;
+
+// Haal knooppunten op!
+const nodesWithScore = await retriever.retrieve("zoekopdracht");
+```
+
+## API Referentie
+
+- [SummaryIndexRetriever](../../api/classes/SummaryIndexRetriever.md)
+- [SummaryIndexLLMRetriever](../../api/classes/SummaryIndexLLMRetriever.md)
+- [VectorIndexRetriever](../../api/classes/VectorIndexRetriever.md)
+
+"
diff --git a/apps/docs/i18n/nl/docusaurus-plugin-content-docs/current/modules/low_level/storage.md b/apps/docs/i18n/nl/docusaurus-plugin-content-docs/current/modules/low_level/storage.md
new file mode 100644
index 0000000000000000000000000000000000000000..f8bee56748cde7343727c4738b2904b800979594
--- /dev/null
+++ b/apps/docs/i18n/nl/docusaurus-plugin-content-docs/current/modules/low_level/storage.md
@@ -0,0 +1,30 @@
+---
+sidebar_position: 7
+---
+
+# Opslag
+
+`Deze documentatie is automatisch vertaald en kan fouten bevatten. Aarzel niet om een Pull Request te openen om wijzigingen voor te stellen.`
+
+Opslag in LlamaIndex.TS werkt automatisch zodra je een `StorageContext` object hebt geconfigureerd. Configureer gewoon de `persistDir` en voeg deze toe aan een index.
+
+Op dit moment wordt alleen het opslaan en laden vanaf de schijf ondersteund, met toekomstige integraties gepland!
+
+```typescript
+import { Document, VectorStoreIndex, storageContextFromDefaults } from "./src";
+
+const storageContext = await storageContextFromDefaults({
+  persistDir: "./storage",
+});
+
+const document = new Document({ text: "Testtekst" });
+const index = await VectorStoreIndex.fromDocuments([document], {
+  storageContext,
+});
+```
+
+## API Referentie
+
+- [StorageContext](../../api/interfaces/StorageContext.md)
+
+"
diff --git a/apps/docs/i18n/nl/docusaurus-plugin-content-docs/current/starter.md b/apps/docs/i18n/nl/docusaurus-plugin-content-docs/current/starter.md
new file mode 100644
index 0000000000000000000000000000000000000000..659aca93056ce3773762fb9a78a8106dcc4ee4ce
--- /dev/null
+++ b/apps/docs/i18n/nl/docusaurus-plugin-content-docs/current/starter.md
@@ -0,0 +1,60 @@
+---
+sidebar_position: 2
+---
+
+# Startgids
+
+`Deze documentatie is automatisch vertaald en kan fouten bevatten. Aarzel niet om een Pull Request te openen om wijzigingen voor te stellen.`
+
+Zodra je [LlamaIndex.TS hebt geïnstalleerd met behulp van NPM](installation) en je OpenAI-sleutel hebt ingesteld, ben je klaar om je eerste app te starten:
+
+In een nieuwe map:
+
+```bash npm2yarn
+npm install typescript
+npm install @types/node
+npx tsc --init # indien nodig
+```
+
+Maak het bestand `example.ts` aan. Deze code zal wat voorbeeldgegevens laden, een document maken, het indexeren (waarbij embeddings worden gemaakt met behulp van OpenAI) en vervolgens een query-engine maken om vragen over de gegevens te beantwoorden.
+
+```ts
+// example.ts
+import fs from "fs/promises";
+import { Document, VectorStoreIndex } from "llamaindex";
+
+async function main() {
+  // Laad essay vanuit abramov.txt in Node
+  const essay = await fs.readFile(
+    "node_modules/llamaindex/examples/abramov.txt",
+    "utf-8",
+  );
+
+  // Maak een Document-object met het essay
+  const document = new Document({ text: essay });
+
+  // Split de tekst en maak embeddings. Sla ze op in een VectorStoreIndex
+  const index = await VectorStoreIndex.fromDocuments([document]);
+
+  // Doorzoek de index
+  const queryEngine = index.asQueryEngine();
+  const response = await queryEngine.query(
+    "Wat heeft de auteur op de universiteit gedaan?",
+  );
+
+  // Geef de respons weer
+  console.log(response.toString());
+}
+
+main();
+```
+
+Je kunt het vervolgens uitvoeren met behulp van
+
+```bash
+npx ts-node example.ts
+```
+
+Klaar om meer te leren? Bekijk onze NextJS-speeltuin op https://llama-playground.vercel.app/. De broncode is beschikbaar op https://github.com/run-llama/ts-playground
+
+"
diff --git a/apps/docs/i18n/no/docusaurus-plugin-content-docs/current/_static/concepts/indexing.jpg b/apps/docs/i18n/no/docusaurus-plugin-content-docs/current/_static/concepts/indexing.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..8672967213caf28fb27682b6b9e2e5111beb9aaa
Binary files /dev/null and b/apps/docs/i18n/no/docusaurus-plugin-content-docs/current/_static/concepts/indexing.jpg differ
diff --git a/apps/docs/i18n/no/docusaurus-plugin-content-docs/current/_static/concepts/querying.jpg b/apps/docs/i18n/no/docusaurus-plugin-content-docs/current/_static/concepts/querying.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..3c241bdda5ba98d3f3ee2e163b9f19280c5c7503
Binary files /dev/null and b/apps/docs/i18n/no/docusaurus-plugin-content-docs/current/_static/concepts/querying.jpg differ
diff --git a/apps/docs/i18n/no/docusaurus-plugin-content-docs/current/_static/concepts/rag.jpg b/apps/docs/i18n/no/docusaurus-plugin-content-docs/current/_static/concepts/rag.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..b68eca2564f8535b33d9ed6d4ce4d4410d2a0698
Binary files /dev/null and b/apps/docs/i18n/no/docusaurus-plugin-content-docs/current/_static/concepts/rag.jpg differ
diff --git a/apps/docs/i18n/no/docusaurus-plugin-content-docs/current/api b/apps/docs/i18n/no/docusaurus-plugin-content-docs/current/api
new file mode 120000
index 0000000000000000000000000000000000000000..3513e32979f4abb8b2df2b54c8d0cc480bdd847e
--- /dev/null
+++ b/apps/docs/i18n/no/docusaurus-plugin-content-docs/current/api
@@ -0,0 +1 @@
+../../../../docs/api
\ No newline at end of file
diff --git a/apps/docs/i18n/no/docusaurus-plugin-content-docs/current/concepts.md b/apps/docs/i18n/no/docusaurus-plugin-content-docs/current/concepts.md
new file mode 100644
index 0000000000000000000000000000000000000000..f8c8b7555d149e4f8d4d64aee92c331cd0b8f0ac
--- /dev/null
+++ b/apps/docs/i18n/no/docusaurus-plugin-content-docs/current/concepts.md
@@ -0,0 +1,84 @@
+---
+sidebar_position: 3
+---
+
+# Høynivåkonsepter
+
+`Denne dokumentasjonen har blitt automatisk oversatt og kan inneholde feil. Ikke nøl med å åpne en Pull Request for å foreslå endringer.`
+
+LlamaIndex.TS hjelper deg med å bygge LLM-drevne applikasjoner (f.eks. spørsmål og svar, chatbot) over egendefinerte data.
+
+I denne veiledningen om høynivåkonsepter vil du lære:
+
+- hvordan en LLM kan svare på spørsmål ved hjelp av dine egne data.
+- nøkkelkonsepter og moduler i LlamaIndex.TS for å komponere din egen spørringspipeline.
+
+## Å svare på spørsmål over dine data
+
+LlamaIndex bruker en todelt metode når du bruker en LLM med dine data:
+
+1. **indekseringsstadiet**: forbereder en kunnskapsbase, og
+2. **spørringsstadiet**: henter relevant kontekst fra kunnskapen for å hjelpe LLM-en med å svare på et spørsmål
+
+![](./_static/concepts/rag.jpg)
+
+Denne prosessen er også kjent som Retrieval Augmented Generation (RAG).
+
+LlamaIndex.TS gir deg det essensielle verktøyet for å gjøre begge trinnene superenkelt.
+
+La oss utforske hvert trinn i detalj.
+
+### Indekseringsstadiet
+
+LlamaIndex.TS hjelper deg med å forberede kunnskapsbasen med en pakke med datakoblinger og indekser.
+
+![](./_static/concepts/indexing.jpg)
+
+[**Datainnlastere**](./modules/high_level/data_loader.md):
+En datakobling (dvs. `Reader`) henter data fra forskjellige datakilder og dataformater inn i en enkel `Document`-representasjon (tekst og enkel metadata).
+
+[**Dokumenter / Noder**](./modules/high_level/documents_and_nodes.md): Et `Document` er en generisk beholder for hvilken som helst datakilde - for eksempel en PDF, en API-utgang eller hentede data fra en database. En `Node` er den atomære enheten av data i LlamaIndex og representerer en "chunk" av en kilde `Document`. Det er en rik representasjon som inkluderer metadata og relasjoner (til andre noder) for å muliggjøre nøyaktige og uttrykksfulle hentingsoperasjoner.
+
+[**Dataindekser**](./modules/high_level/data_index.md):
+Når du har lastet inn dataene dine, hjelper LlamaIndex deg med å indeksere dataene i et format som er enkelt å hente.
+
+Under panseret analyserer LlamaIndex de rå dokumentene til mellomliggende representasjoner, beregner vektorembbedinger og lagrer dataene dine i minnet eller på disk.
+
+### Spørringsstadiet
+
+I spørringsstadiet henter spørringspipelinen den mest relevante konteksten gitt en brukerspørring,
+og sender den til LLM-en (sammen med spørringen) for å syntetisere et svar.
+
+Dette gir LLM-en oppdatert kunnskap som ikke er i dens opprinnelige treningsdata,
+(reduserer også hallusinasjoner).
+
+Den største utfordringen i spørringsstadiet er henting, orkestrering og resonnement over (potensielt mange) kunnskapsbaser.
+
+LlamaIndex tilbyr sammensetningsbare moduler som hjelper deg med å bygge og integrere RAG-pipeliner for spørsmål og svar (spørringsmotor), chatbot (chatmotor), eller som en del av en agent.
+
+Disse byggeklossene kan tilpasses for å gjenspeile rangeringspreferanser, samt sammensettes for å resonnere over flere kunnskapsbaser på en strukturert måte.
+
+![](./_static/concepts/querying.jpg)
+
+#### Byggeklosser
+
+[**Retrievers**](./modules/low_level/retriever.md):
+En retriever definerer hvordan man effektivt henter relevant kontekst fra en kunnskapsbase (dvs. indeks) når man har en spørring.
+Den spesifikke hentelogikken varierer for forskjellige indekser, hvor den mest populære er tett henting mot en vektorindeks.
+
+[**Response Synthesizers**](./modules/low_level/response_synthesizer.md):
+En response synthesizer genererer et svar fra en LLM ved hjelp av en brukerspørring og en gitt mengde hentede tekstbiter.
+
+"
+
+#### Pipeliner
+
+[**Spørringsmotorer**](./modules/high_level/query_engine.md):
+En spørringsmotor er en helhetlig pipeline som lar deg stille spørsmål om dine data.
+Den tar imot en naturlig språkspørring og returnerer et svar, sammen med referansekonteksten som er hentet og sendt til LLM-en.
+
+[**Chatmotorer**](./modules/high_level/chat_engine.md):
+En chatmotor er en helhetlig pipeline for å ha en samtale med dine data
+(flere frem-og-tilbake i stedet for et enkelt spørsmål og svar).
+
+"
diff --git a/apps/docs/i18n/no/docusaurus-plugin-content-docs/current/end_to_end.md b/apps/docs/i18n/no/docusaurus-plugin-content-docs/current/end_to_end.md
new file mode 100644
index 0000000000000000000000000000000000000000..2376413f6d173826c7033eb071a021b6df5aa0ce
--- /dev/null
+++ b/apps/docs/i18n/no/docusaurus-plugin-content-docs/current/end_to_end.md
@@ -0,0 +1,53 @@
+---
+sidebar_position: 4
+---
+
+# Eksempler fra start til slutt
+
+`Denne dokumentasjonen har blitt automatisk oversatt og kan inneholde feil. Ikke nøl med å åpne en Pull Request for å foreslå endringer.`
+
+Vi inkluderer flere eksempler fra start til slutt ved bruk av LlamaIndex.TS i repositoryen.
+
+Sjekk ut eksemplene nedenfor eller prøv dem ut og fullfør dem på få minutter med interaktive Github Codespace-tutorials levert av Dev-Docs [her](https://codespaces.new/team-dev-docs/lits-dev-docs-playground?devcontainer_path=.devcontainer%2Fjavascript_ltsquickstart%2Fdevcontainer.json):
+
+## [Chat Engine (ChatEngine)](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/chatEngine.ts)
+
+Les en fil og diskuter den med LLM.
+
+## [Vektorindeks](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/vectorIndex.ts)
+
+Opprett en vektorindeks og spør den. Vektorindeksen vil bruke innebygde representasjoner for å hente de k mest relevante nodene. Som standard er k lik 2.
+
+"
+
+## [Sammendragsindeks](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/summaryIndex.ts)
+
+Opprett en listeindeks og spør den. Dette eksempelet bruker også `LLMRetriever`, som vil bruke LLM til å velge de beste nodene å bruke når du genererer svar.
+
+## [Lagre / Last inn en indeks](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/storageContext.ts)
+
+Opprett og last inn en vektorindeks. Lagring til disk i LlamaIndex.TS skjer automatisk når et lagringskontekstobjekt er opprettet.
+
+"
+
+## [Tilpasset Vektorindeks](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/vectorIndexCustomize.ts)
+
+Opprett en vektorindeks og spør den, samtidig som du konfigurerer `LLM`, `ServiceContext` og `similarity_top_k`.
+
+## [OpenAI LLM](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/openai.ts)
+
+Opprett en OpenAI LLM og bruk den direkte til chat.
+
+## [Llama2 DeuceLLM](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/llamadeuce.ts)
+
+Opprett en Llama-2 LLM og bruk den direkte til chat.
+
+## [SubQuestionQueryEngine](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/subquestion.ts)
+
+Bruker `SubQuestionQueryEngine`, som bryter komplekse spørringer ned i flere spørsmål, og deretter samler et svar på tvers av svarene på alle delspørsmål.
+
+"
+
+## [Lavnivåmoduler](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/lowlevel.ts)
+
+Dette eksempelet bruker flere lavnivåkomponenter, som fjerner behovet for en faktisk spørringsmotor. Disse komponentene kan brukes hvor som helst, i hvilken som helst applikasjon, eller tilpasses og underklasse for å møte dine egne behov.
diff --git a/apps/docs/i18n/no/docusaurus-plugin-content-docs/current/environments.md b/apps/docs/i18n/no/docusaurus-plugin-content-docs/current/environments.md
new file mode 100644
index 0000000000000000000000000000000000000000..ab5f6482d3a87f5bf7a3f90fa5d8fcb2ad7d0229
--- /dev/null
+++ b/apps/docs/i18n/no/docusaurus-plugin-content-docs/current/environments.md
@@ -0,0 +1,31 @@
+---
+sidebar_position: 5
+---
+
+# Miljøer
+
+`Denne dokumentasjonen har blitt automatisk oversatt og kan inneholde feil. Ikke nøl med å åpne en Pull Request for å foreslå endringer.`
+
+LlamaIndex støtter for øyeblikket offisielt NodeJS 18 og NodeJS 20.
+
+## NextJS App Router
+
+Hvis du bruker NextJS App Router rutehåndterere/serverløse funksjoner, må du bruke NodeJS-modus:
+
+```js
+export const runtime = "nodejs"; // standard
+```
+
+og du må legge til et unntak for pdf-parse i next.config.js-filen din
+
+```js
+// next.config.js
+/** @type {import('next').NextConfig} */
+const nextConfig = {
+  experimental: {
+    serverComponentsExternalPackages: ["pdf-parse"], // Setter pdf-parse i faktisk NodeJS-modus med NextJS App Router
+  },
+};
+
+module.exports = nextConfig;
+```
diff --git a/apps/docs/i18n/no/docusaurus-plugin-content-docs/current/installation.mdx b/apps/docs/i18n/no/docusaurus-plugin-content-docs/current/installation.mdx
new file mode 100644
index 0000000000000000000000000000000000000000..2886d8323425b1d2f6e33c80556b18baa09b8413
--- /dev/null
+++ b/apps/docs/i18n/no/docusaurus-plugin-content-docs/current/installation.mdx
@@ -0,0 +1,68 @@
+---
+sidebar_position: 1
+---
+
+
+# Installasjon og oppsett
+
+```Denne dokumentasjonen har blitt automatisk oversatt og kan inneholde feil. Ikke nøl med å åpne en Pull Request for å foreslå endringer.```
+
+
+Sørg for at du har NodeJS v18 eller nyere.
+
+
+## Bruke create-llama
+
+Den enkleste måten å komme i gang med LlamaIndex på er å bruke `create-llama`. Dette CLI-verktøyet lar deg raskt starte byggingen av en ny LlamaIndex-applikasjon, med alt satt opp for deg.
+
+Bare kjør
+
+<Tabs>
+<TabItem value="1" label="npm" default>
+
+```bash
+npx create-llama@latest
+```
+
+</TabItem>
+<TabItem value="2" label="Yarn">
+
+```bash
+yarn create llama
+```
+
+</TabItem>
+<TabItem value="3" label="pnpm">
+
+```bash
+pnpm create llama@latest
+```
+
+</TabItem>
+</Tabs>
+
+for å komme i gang. Når appen din er generert, kjør
+
+```bash npm2yarn
+npm run dev
+```
+
+for å starte utviklingsserveren. Du kan deretter besøke [http://localhost:3000](http://localhost:3000) for å se appen din.
+## Installasjon fra NPM
+
+```bash npm2yarn
+npm install llamaindex
+```
+
+
+### Miljøvariabler
+
+Våre eksempler bruker OpenAI som standard. Du må sette opp din Open AI-nøkkel slik:
+
+```bash
+export OPENAI_API_KEY="sk-......" # Erstatt med din nøkkel fra https://platform.openai.com/account/api-keys
+```
+
+Hvis du vil ha den lastet automatisk hver gang, legg den til i din .zshrc/.bashrc.
+
+ADVARSEL: Ikke sjekk inn din OpenAI-nøkkel i versjonskontroll.
diff --git a/apps/docs/i18n/no/docusaurus-plugin-content-docs/current/introduction.md b/apps/docs/i18n/no/docusaurus-plugin-content-docs/current/introduction.md
new file mode 100644
index 0000000000000000000000000000000000000000..ed8882bfd14bd69208d2cdd3a0f8d43fd531857c
--- /dev/null
+++ b/apps/docs/i18n/no/docusaurus-plugin-content-docs/current/introduction.md
@@ -0,0 +1,60 @@
+---
+sidebar_position: 0
+slug: /
+---
+
+# Hva er LlamaIndex.TS?
+
+`Denne dokumentasjonen har blitt automatisk oversatt og kan inneholde feil. Ikke nøl med å åpne en Pull Request for å foreslå endringer.`
+
+LlamaIndex.TS er et data-rammeverk for LLM-applikasjoner for å ta inn, strukturere og få tilgang til private eller domenespesifikke data. Mens det også finnes en Python-pakke tilgjengelig (se [her](https://docs.llamaindex.ai/en/stable/)), tilbyr LlamaIndex.TS kjernefunksjoner i en enkel pakke, optimalisert for bruk med TypeScript.
+
+## 🚀 Hvorfor LlamaIndex.TS?
+
+På kjernen til LLM-er tilbys et naturlig språklig grensesnitt mellom mennesker og infererte data. Bredt tilgjengelige modeller er forhåndstrening på enorme mengder offentlig tilgjengelige data, fra Wikipedia og e-postlister til lærebøker og kildekode.
+
+Applikasjoner bygget på toppen av LLM-er krever ofte å supplere disse modellene med private eller domenespesifikke data. Dessverre kan disse dataene være spredt over isolerte applikasjoner og datalagre. De er bak API-er, i SQL-databaser eller fanget i PDF-er og presentasjoner.
+
+Det er her **LlamaIndex.TS** kommer inn.
+
+## 🦙 Hvordan kan LlamaIndex.TS hjelpe?
+
+LlamaIndex.TS tilbyr følgende verktøy:
+
+- **Datainnlasting** tar inn eksisterende `.txt`, `.pdf`, `.csv`, `.md` og `.docx` data direkte.
+- **Dataindekser** strukturer dataene dine i mellomliggende representasjoner som er enkle og effektive for LLM-er å bruke.
+- **Motorer** gir naturlig språklig tilgang til dataene dine. For eksempel:
+  - Spørringsmotorer er kraftige grensesnitt for henting av kunnskapsforsterket utdata.
+  - Chatmotorer er samtalegrensesnitt for flerbeskjeds "fram og tilbake"-interaksjoner med dataene dine.
+
+## 👨‍👩‍👧‍👦 Hvem er LlamaIndex for?
+
+LlamaIndex.TS gir et kjerne sett med verktøy som er essensielle for alle som bygger LLM-apper med JavaScript og TypeScript.
+
+Vår høynivå-API lar nybegynnere bruke LlamaIndex.TS til å ta inn og spørre dataene sine.
+
+For mer komplekse applikasjoner lar våre lavnivå-APIer avanserte brukere tilpasse og utvide hvilken som helst modul - datakoblinger, indekser, hentere og spørringsmotorer - for å passe deres behov.
+
+## Komme i gang
+
+`npm install llamaindex`
+
+Dokumentasjonen vår inkluderer [Installasjonsinstruksjoner](./installation.md) og en [Starterveiledning](./starter.md) for å bygge din første applikasjon.
+
+Når du er oppe og kjører, gir [Høynivåkonsepter](./concepts.md) en oversikt over LlamaIndex sin modulære arkitektur. For mer praktiske eksempler, kan du se gjennom våre [End-to-End veiledninger](./end_to_end.md).
+
+## 🗺️ Økosystem
+
+For å laste ned eller bidra, finn LlamaIndex på:
+
+- Github: https://github.com/run-llama/LlamaIndexTS
+- NPM: https://www.npmjs.com/package/llamaindex
+
+"
+
+## Fellesskap
+
+Trenger du hjelp? Har du forslag til funksjoner? Bli med i LlamaIndex-fellesskapet:
+
+- Twitter: https://twitter.com/llama_index
+- Discord: https://discord.gg/dGcwcsnxhU
diff --git a/apps/docs/i18n/no/docusaurus-plugin-content-docs/current/modules/high_level/chat_engine.md b/apps/docs/i18n/no/docusaurus-plugin-content-docs/current/modules/high_level/chat_engine.md
new file mode 100644
index 0000000000000000000000000000000000000000..f76adb133e58f432a2143171b76354a55da4ed0c
--- /dev/null
+++ b/apps/docs/i18n/no/docusaurus-plugin-content-docs/current/modules/high_level/chat_engine.md
@@ -0,0 +1,22 @@
+---
+sidebar_position: 4
+---
+
+# ChatEngine (聊天引擎)
+
+`Denne dokumentasjonen har blitt automatisk oversatt og kan inneholde feil. Ikke nøl med å åpne en Pull Request for å foreslå endringer.`
+
+聊天引擎是一种快速简便的与索引中的数据进行聊天的方式。
+
+```typescript
+const retriever = index.asRetriever();
+const chatEngine = new ContextChatEngine({ retriever });
+
+// 开始聊天
+const response = await chatEngine.chat(query);
+```
+
+## API-referanser
+
+- [ContextChatEngine (KontekstChatEngine)](../../api/classes/ContextChatEngine.md)
+- [CondenseQuestionChatEngine (KondensereSpørsmålChatEngine)](../../api/classes/ContextChatEngine.md)
diff --git a/apps/docs/i18n/no/docusaurus-plugin-content-docs/current/modules/high_level/data_index.md b/apps/docs/i18n/no/docusaurus-plugin-content-docs/current/modules/high_level/data_index.md
new file mode 100644
index 0000000000000000000000000000000000000000..30c910084aa173fdd5596b84ac84f0f3dd516f21
--- /dev/null
+++ b/apps/docs/i18n/no/docusaurus-plugin-content-docs/current/modules/high_level/data_index.md
@@ -0,0 +1,25 @@
+---
+sidebar_position: 2
+---
+
+# Indeks
+
+`Denne dokumentasjonen har blitt automatisk oversatt og kan inneholde feil. Ikke nøl med å åpne en Pull Request for å foreslå endringer.`
+
+En indeks er den grunnleggende beholderen og organisasjonen for dataene dine. LlamaIndex.TS støtter to indekser:
+
+- `VectorStoreIndex` - vil sende de øverste-k `Node`-ene til LLM når du genererer et svar. Standardverdien for øverste-k er 2.
+- `SummaryIndex` - vil sende hver `Node` i indeksen til LLM for å generere et svar.
+
+```typescript
+import { Document, VectorStoreIndex } from "llamaindex";
+
+const document = new Document({ text: "test" });
+
+const index = await VectorStoreIndex.fromDocuments([document]);
+```
+
+## API-referanse
+
+- [SummaryIndex](../../api/classes/SummaryIndex.md)
+- [VectorStoreIndex](../../api/classes/VectorStoreIndex.md)
diff --git a/apps/docs/i18n/no/docusaurus-plugin-content-docs/current/modules/high_level/data_loader.md b/apps/docs/i18n/no/docusaurus-plugin-content-docs/current/modules/high_level/data_loader.md
new file mode 100644
index 0000000000000000000000000000000000000000..ec42aa9bf5582ec03328cc8d6c8a0aea3c4e1162
--- /dev/null
+++ b/apps/docs/i18n/no/docusaurus-plugin-content-docs/current/modules/high_level/data_loader.md
@@ -0,0 +1,19 @@
+---
+sidebar_position: 1
+---
+
+# Leser / Laster
+
+`Denne dokumentasjonen har blitt automatisk oversatt og kan inneholde feil. Ikke nøl med å åpne en Pull Request for å foreslå endringer.`
+
+LlamaIndex.TS støtter enkel lasting av filer fra mapper ved hjelp av klassen `SimpleDirectoryReader`. For øyeblikket støttes `.txt`, `.pdf`, `.csv`, `.md` og `.docx` filer, med flere planlagt i fremtiden!
+
+```typescript
+import { SimpleDirectoryReader } from "llamaindex";
+
+documents = new SimpleDirectoryReader().loadData("./data");
+```
+
+## API-referanse
+
+- [SimpleDirectoryReader](../../api/classes/SimpleDirectoryReader.md)
diff --git a/apps/docs/i18n/no/docusaurus-plugin-content-docs/current/modules/high_level/documents_and_nodes.md b/apps/docs/i18n/no/docusaurus-plugin-content-docs/current/modules/high_level/documents_and_nodes.md
new file mode 100644
index 0000000000000000000000000000000000000000..4fadfc71ed9a0730545e3c86fc83cc823f0a8dc0
--- /dev/null
+++ b/apps/docs/i18n/no/docusaurus-plugin-content-docs/current/modules/high_level/documents_and_nodes.md
@@ -0,0 +1,22 @@
+---
+sidebar_position: 0
+---
+
+# Dokumenter og Noder
+
+`Denne dokumentasjonen har blitt automatisk oversatt og kan inneholde feil. Ikke nøl med å åpne en Pull Request for å foreslå endringer.`
+
+`Dokumenter` og `Noder` er de grunnleggende byggeklossene i ethvert indeks. Selv om API-et for disse objektene er likt, representerer `Dokument`-objekter hele filer, mens `Noder` er mindre deler av det opprinnelige dokumentet som er egnet for LLM og Q&A.
+
+```typescript
+import { Document } from "llamaindex";
+
+document = new Document({ text: "tekst", metadata: { nøkkel: "verdi" } });
+```
+
+## API-referanse
+
+- [Dokument](../../api/classes/Document.md)
+- [TextNode](../../api/classes/TextNode.md)
+
+"
diff --git a/apps/docs/i18n/no/docusaurus-plugin-content-docs/current/modules/high_level/query_engine.md b/apps/docs/i18n/no/docusaurus-plugin-content-docs/current/modules/high_level/query_engine.md
new file mode 100644
index 0000000000000000000000000000000000000000..3742390416f2887d35f30c07564895bd6f50cd12
--- /dev/null
+++ b/apps/docs/i18n/no/docusaurus-plugin-content-docs/current/modules/high_level/query_engine.md
@@ -0,0 +1,40 @@
+---
+sidebar_position: 3
+---
+
+# QueryEngine (Spørringsmotor)
+
+`Denne dokumentasjonen har blitt automatisk oversatt og kan inneholde feil. Ikke nøl med å åpne en Pull Request for å foreslå endringer.`
+
+En spørringsmotor pakker inn en `Retriever` og en `ResponseSynthesizer` i en pipeline, som vil bruke spørringsstrengen til å hente noder og deretter sende dem til LLM for å generere et svar.
+
+```typescript
+const queryEngine = index.asQueryEngine();
+const response = await queryEngine.query("spørringsstreng");
+```
+
+## Under-spørsmål Spørringsmotor
+
+Det grunnleggende konseptet med Under-spørsmål Spørringsmotoren er at den deler opp en enkelt spørring i flere spørringer, får et svar for hver av disse spørringene, og kombinerer deretter de forskjellige svarene til en sammenhengende respons for brukeren. Du kan tenke på det som en "tenk grundig gjennom" teknikk, men med iterasjon over datakildene dine!
+
+### Komme i gang
+
+Den enkleste måten å begynne å prøve Under-spørsmål Spørringsmotoren på er å kjøre subquestion.ts-filen i [eksemplene](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/subquestion.ts).
+
+```bash
+npx ts-node subquestion.ts
+```
+
+### Verktøy
+
+Under-spørsmål Spørringsmotoren er implementert med Verktøy. Den grunnleggende ideen med Verktøy er at de er utførbare alternativer for det store språkmodellen. I dette tilfellet er vår Under-spørsmål Spørringsmotor avhengig av QueryEngineTool, som som du kanskje har gjettet, er et verktøy for å kjøre spørringer på en Spørringsmotor. Dette gjør det mulig for oss å gi modellen muligheten til å spørre forskjellige dokumenter for forskjellige spørsmål, for eksempel. Du kan også forestille deg at Under-spørsmål Spørringsmotoren kan bruke et Verktøy som søker etter noe på nettet eller får et svar ved hjelp av Wolfram Alpha.
+
+Du kan lære mer om Verktøy ved å se på LlamaIndex Python-dokumentasjonen https://gpt-index.readthedocs.io/en/latest/core_modules/agent_modules/tools/root.html
+
+## API-referanse
+
+- [RetrieverQueryEngine (RetrieverSpørringsmotor)](../../api/classes/RetrieverQueryEngine.md)
+- [SubQuestionQueryEngine (UnderSpørsmålSpørringsmotor)](../../api/classes/SubQuestionQueryEngine.md)
+- [QueryEngineTool (SpørringsmotorVerktøy)](../../api/interfaces/QueryEngineTool.md)
+
+"
diff --git a/apps/docs/i18n/no/docusaurus-plugin-content-docs/current/modules/index.md b/apps/docs/i18n/no/docusaurus-plugin-content-docs/current/modules/index.md
new file mode 100644
index 0000000000000000000000000000000000000000..28c7dede865df152df24e1ef43ce2d656884e0a9
--- /dev/null
+++ b/apps/docs/i18n/no/docusaurus-plugin-content-docs/current/modules/index.md
@@ -0,0 +1,33 @@
+# Kjerne Moduler
+
+`Denne dokumentasjonen har blitt automatisk oversatt og kan inneholde feil. Ikke nøl med å åpne en Pull Request for å foreslå endringer.`
+
+LlamaIndex.TS tilbyr flere kjerne moduler, delt inn i høy-nivå moduler for å komme raskt i gang, og lav-nivå moduler for å tilpasse nøkkelkomponenter etter behov.
+
+## Høy-Nivå Moduler
+
+- [**Dokument**](./high_level/documents_and_nodes.md): Et dokument representerer en tekstfil, PDF-fil eller annen sammenhengende data.
+
+- [**Node**](./high_level/documents_and_nodes.md): Byggeklossen for data. Vanligvis er dette deler av dokumentet som er delt opp i håndterbare biter som er små nok til å mates inn i en innebygd modell og LLM.
+
+- [**Leser/Laster**](./high_level/data_loader.md): En leser eller laster er noe som tar inn et dokument i den virkelige verden og transformerer det til en Dokumentklasse som deretter kan brukes i indeksen og spørringer. Vi støtter for øyeblikket vanlige tekstfiler og PDF-er, med mange flere som kommer.
+
+- [**Indekser**](./high_level/data_index.md): Indekser lagrer Nodene og innkapslingene av disse nodene.
+
+- [**Spørringsmotor**](./high_level/query_engine.md): Spørringsmotorer er det som genererer spørringen du legger inn og gir deg resultatet tilbake. Spørringsmotorer kombinerer vanligvis en forhåndsbygd ledetekst med valgte noder fra indeksen din for å gi LLM konteksten den trenger for å svare på spørringen din.
+
+- [**ChatEngine**](./high_level/chat_engine.md): En ChatEngine hjelper deg med å bygge en chatbot som vil samhandle med indeksene dine.
+
+## Lav-nivå Modul
+
+- [**LLM**](./low_level/llm.md): LLM-klassen er et enhetlig grensesnitt over en stor språkmodell-leverandør som OpenAI GPT-4, Anthropic Claude eller Meta LLaMA. Du kan lage en underklasse av den for å skrive en tilkobling til din egen store språkmodell.
+
+- [**Embedding**](./low_level/embedding.md): En embedding representeres som en vektor av flyttall. OpenAI's text-embedding-ada-002 er vår standard embedding-modell, og hver embedding den genererer består av 1 536 flyttall. En annen populær embedding-modell er BERT, som bruker 768 flyttall for å representere hver Node. Vi tilbyr en rekke verktøy for å jobbe med embeddings, inkludert 3 alternativer for beregning av likhet og Maximum Marginal Relevance.
+
+- [**TextSplitter/NodeParser**](./low_level/node_parser.md): Strategier for tekstdeling er utrolig viktige for den generelle effektiviteten til embedding-søket. For øyeblikket har vi en standardløsning, men det finnes ingen universalløsning. Avhengig av kilde dokumentene, kan du ønske å bruke forskjellige delingsstørrelser og strategier. For øyeblikket støtter vi deling etter fast størrelse, deling etter fast størrelse med overlappende seksjoner, deling etter setning og deling etter avsnitt. Tekstsplitteren brukes av NodeParser når den deler `Documenter` inn i `Noder`.
+
+- [**Retriever**](./low_level/retriever.md): Retrieveren er det som faktisk velger Nodene som skal hentes fra indeksen. Her kan du ønske å prøve å hente flere eller færre Noder per spørring, endre likhetsfunksjonen din eller opprette din egen retriever for hver enkelt brukssak i applikasjonen din. For eksempel kan du ønske å ha en separat retriever for kodeinnhold vs. tekstinnhold.
+
+- [**ResponseSynthesizer**](./low_level/response_synthesizer.md): ResponseSynthesizeren er ansvarlig for å ta en spørringsstreng og bruke en liste over `Noder` for å generere et svar. Dette kan ta mange former, som å iterere over all kontekst og forbedre et svar, eller bygge et tre av sammendrag og returnere rot-sammendraget.
+
+- [**Storage**](./low_level/storage.md): På et tidspunkt vil du ønske å lagre indeksene dine, dataene og vektorene i stedet for å kjøre embedding-modellene hver gang. IndexStore, DocStore, VectorStore og KVStore er abstraksjoner som lar deg gjøre det. Sammen danner de StorageContext. For øyeblikket lar vi deg lagre embeddingene dine i filer på filsystemet (eller et virtuelt minnebasert filsystem), men vi legger også aktivt til integrasjoner for Vector Databaser.
diff --git a/apps/docs/i18n/no/docusaurus-plugin-content-docs/current/modules/low_level/embedding.md b/apps/docs/i18n/no/docusaurus-plugin-content-docs/current/modules/low_level/embedding.md
new file mode 100644
index 0000000000000000000000000000000000000000..09898ae4d09114b24ef80a1a309663aa841bc3db
--- /dev/null
+++ b/apps/docs/i18n/no/docusaurus-plugin-content-docs/current/modules/low_level/embedding.md
@@ -0,0 +1,26 @@
+---
+sidebar_position: 1
+---
+
+# Innbygging
+
+`Denne dokumentasjonen har blitt automatisk oversatt og kan inneholde feil. Ikke nøl med å åpne en Pull Request for å foreslå endringer.`
+
+Innbyggingsmodellen i LlamaIndex er ansvarlig for å opprette numeriske representasjoner av tekst. Som standard vil LlamaIndex bruke modellen `text-embedding-ada-002` fra OpenAI.
+
+Dette kan eksplisitt settes i `ServiceContext`-objektet.
+
+```typescript
+import { OpenAIEmbedding, serviceContextFromDefaults } from "llamaindex";
+
+const openaiEmbeds = new OpenAIEmbedding();
+
+const serviceContext = serviceContextFromDefaults({ embedModel: openaiEmbeds });
+```
+
+## API-referanse
+
+- [OpenAIEmbedding](../../api/classes/OpenAIEmbedding.md)
+- [ServiceContext](../../api/interfaces/ServiceContext.md)
+
+"
diff --git a/apps/docs/i18n/no/docusaurus-plugin-content-docs/current/modules/low_level/llm.md b/apps/docs/i18n/no/docusaurus-plugin-content-docs/current/modules/low_level/llm.md
new file mode 100644
index 0000000000000000000000000000000000000000..ac2211e54eb5761abc14fabe59ab51cf8ae0eb35
--- /dev/null
+++ b/apps/docs/i18n/no/docusaurus-plugin-content-docs/current/modules/low_level/llm.md
@@ -0,0 +1,26 @@
+---
+sidebar_position: 0
+---
+
+# LLM
+
+`Denne dokumentasjonen har blitt automatisk oversatt og kan inneholde feil. Ikke nøl med å åpne en Pull Request for å foreslå endringer.`
+
+LLM er ansvarlig for å lese tekst og generere naturlige språksvar på spørringer. Som standard bruker LlamaIndex.TS `gpt-3.5-turbo`.
+
+LLM kan eksplisitt settes i `ServiceContext`-objektet.
+
+```typescript
+import { OpenAI, serviceContextFromDefaults } from "llamaindex";
+
+const openaiLLM = new OpenAI({ model: "gpt-3.5-turbo", temperature: 0 });
+
+const serviceContext = serviceContextFromDefaults({ llm: openaiLLM });
+```
+
+## API-referanse
+
+- [OpenAI](../../api/classes/OpenAI.md)
+- [ServiceContext](../../api/interfaces/ServiceContext.md)
+
+"
diff --git a/apps/docs/i18n/no/docusaurus-plugin-content-docs/current/modules/low_level/node_parser.md b/apps/docs/i18n/no/docusaurus-plugin-content-docs/current/modules/low_level/node_parser.md
new file mode 100644
index 0000000000000000000000000000000000000000..dc2822e80f42141f0cb049e9abe97a95fae76beb
--- /dev/null
+++ b/apps/docs/i18n/no/docusaurus-plugin-content-docs/current/modules/low_level/node_parser.md
@@ -0,0 +1,37 @@
+---
+sidebar_position: 3
+---
+
+# NodeParser
+
+`Denne dokumentasjonen har blitt automatisk oversatt og kan inneholde feil. Ikke nøl med å åpne en Pull Request for å foreslå endringer.`
+
+`NodeParser` i LlamaIndex er ansvarlig for å dele opp `Document`-objekter i mer håndterbare `Node`-objekter. Når du kaller `.fromDocuments()`, brukes `NodeParser` fra `ServiceContext` til å gjøre dette automatisk for deg. Alternativt kan du bruke den til å dele opp dokumenter på forhånd.
+
+```typescript
+import { Document, SimpleNodeParser } from "llamaindex";
+
+const nodeParser = new SimpleNodeParser();
+const nodes = nodeParser.getNodesFromDocuments([
+  new Document({ text: "Jeg er 10 år gammel. John er 20 år gammel." }),
+]);
+```
+
+## TextSplitter
+
+Den underliggende tekstsplitteren deler teksten opp i setninger. Den kan også brukes som en frittstående modul for å dele opp rå tekst.
+
+```typescript
+import { SentenceSplitter } from "llamaindex";
+
+const splitter = new SentenceSplitter({ chunkSize: 1 });
+
+const textSplits = splitter.splitText("Hei verden");
+```
+
+## API-referanse
+
+- [SimpleNodeParser](../../api/classes/SimpleNodeParser.md)
+- [SentenceSplitter](../../api/classes/SentenceSplitter.md)
+
+"
diff --git a/apps/docs/i18n/no/docusaurus-plugin-content-docs/current/modules/low_level/response_synthesizer.md b/apps/docs/i18n/no/docusaurus-plugin-content-docs/current/modules/low_level/response_synthesizer.md
new file mode 100644
index 0000000000000000000000000000000000000000..d8746481cc8166d32e5304394d3ceb4adb6d8f22
--- /dev/null
+++ b/apps/docs/i18n/no/docusaurus-plugin-content-docs/current/modules/low_level/response_synthesizer.md
@@ -0,0 +1,51 @@
+---
+sidebar_position: 6
+---
+
+# ResponseSynthesizer (SvarSyntetisator)
+
+`Denne dokumentasjonen har blitt automatisk oversatt og kan inneholde feil. Ikke nøl med å åpne en Pull Request for å foreslå endringer.`
+
+SvarSyntetisatoren er ansvarlig for å sende spørringen, nodene og malene for prompten til LLM for å generere et svar. Det er noen få nøkkelmoduser for å generere et svar:
+
+- `Forbedre`: "opprett og forbedre" et svar ved å gå sekvensielt gjennom hver hentet tekstbit.
+  Dette gjør en separat LLM-kall per Node. Bra for mer detaljerte svar.
+- `KompaktOgForbedre` (standard): "kompakt" prompten under hvert LLM-kall ved å fylle så mange tekstbiter som kan passe innenfor maksimal promptstørrelse. Hvis det er for mange biter til å fylle i én prompt, "opprett og forbedre" et svar ved å gå gjennom flere kompakte prompter. Det samme som `forbedre`, men bør resultere i færre LLM-kall.
+- `TreOppsummering`: Gitt en mengde tekstbiter og spørringen, konstruer en trestruktur rekursivt
+  og returner rotnoden som svaret. Bra for oppsummeringsformål.
+- `EnkelSvarBygger`: Gitt en mengde tekstbiter og spørringen, bruk spørringen på hver tekstbit
+  mens du akkumulerer svarene i en matrise. Returnerer en sammenslått streng av alle
+  svarene. Bra når du trenger å kjøre samme spørring separat mot hver tekstbit.
+
+```typescript
+import { NodeWithScore, ResponseSynthesizer, TextNode } from "llamaindex";
+
+const responseSynthesizer = new ResponseSynthesizer();
+
+const nodesWithScore: NodeWithScore[] = [
+  {
+    node: new TextNode({ text: "Jeg er 10 år gammel." }),
+    score: 1,
+  },
+  {
+    node: new TextNode({ text: "John er 20 år gammel." }),
+    score: 0.5,
+  },
+];
+
+const response = await responseSynthesizer.synthesize(
+  "Hvor gammel er jeg?",
+  nodesWithScore,
+);
+console.log(response.response);
+```
+
+## API-referanse
+
+- [ResponseSynthesizer (SvarSyntetisator)](../../api/classes/ResponseSynthesizer.md)
+- [Forbedre (Refine)](../../api/classes/Refine.md)
+- [KompaktOgForbedre (CompactAndRefine)](../../api/classes/CompactAndRefine.md)
+- [TreOppsummering (TreeSummarize)](../../api/classes/TreeSummarize.md)
+- [EnkelSvarBygger (SimpleResponseBuilder)](../../api/classes/SimpleResponseBuilder.md)
+
+"
diff --git a/apps/docs/i18n/no/docusaurus-plugin-content-docs/current/modules/low_level/retriever.md b/apps/docs/i18n/no/docusaurus-plugin-content-docs/current/modules/low_level/retriever.md
new file mode 100644
index 0000000000000000000000000000000000000000..d5bc96810776616bc9798b0dcb884af7a66dfa4e
--- /dev/null
+++ b/apps/docs/i18n/no/docusaurus-plugin-content-docs/current/modules/low_level/retriever.md
@@ -0,0 +1,25 @@
+---
+sidebar_position: 5
+---
+
+# Retriever (Henter)
+
+`Denne dokumentasjonen har blitt automatisk oversatt og kan inneholde feil. Ikke nøl med å åpne en Pull Request for å foreslå endringer.`
+
+En retriever i LlamaIndex er det som brukes for å hente `Node`-er fra en indeks ved hjelp av en spørringsstreng. En `VectorIndexRetriever` vil hente de mest lignende nodene i topp-k resultatene. I mellomtiden vil en `SummaryIndexRetriever` hente alle nodene uavhengig av spørringen.
+
+```typescript
+const retriever = vector_index.asRetriever();
+retriever.similarityTopK = 3;
+
+// Hent noder!
+const nodesWithScore = await retriever.retrieve("spørringsstreng");
+```
+
+## API-referanse
+
+- [SummaryIndexRetriever](../../api/classes/SummaryIndexRetriever.md)
+- [SummaryIndexLLMRetriever](../../api/classes/SummaryIndexLLMRetriever.md)
+- [VectorIndexRetriever](../../api/classes/VectorIndexRetriever.md)
+
+"
diff --git a/apps/docs/i18n/no/docusaurus-plugin-content-docs/current/modules/low_level/storage.md b/apps/docs/i18n/no/docusaurus-plugin-content-docs/current/modules/low_level/storage.md
new file mode 100644
index 0000000000000000000000000000000000000000..3b4e307104a893265f52ab20b9722a2c20a94aaf
--- /dev/null
+++ b/apps/docs/i18n/no/docusaurus-plugin-content-docs/current/modules/low_level/storage.md
@@ -0,0 +1,30 @@
+---
+sidebar_position: 7
+---
+
+# Lagring
+
+`Denne dokumentasjonen har blitt automatisk oversatt og kan inneholde feil. Ikke nøl med å åpne en Pull Request for å foreslå endringer.`
+
+Lagring i LlamaIndex.TS fungerer automatisk når du har konfigurert et `StorageContext`-objekt. Bare konfigurer `persistDir` og fest det til en indeks.
+
+Akkurat nå støttes bare lagring og lasting fra disk, med planlagte fremtidige integrasjoner!
+
+```typescript
+import { Document, VectorStoreIndex, storageContextFromDefaults } from "./src";
+
+const storageContext = await storageContextFromDefaults({
+  persistDir: "./storage",
+});
+
+const document = new Document({ text: "Testtekst" });
+const index = await VectorStoreIndex.fromDocuments([document], {
+  storageContext,
+});
+```
+
+## API-referanse
+
+- [StorageContext](../../api/interfaces/StorageContext.md)
+
+"
diff --git a/apps/docs/i18n/no/docusaurus-plugin-content-docs/current/starter.md b/apps/docs/i18n/no/docusaurus-plugin-content-docs/current/starter.md
new file mode 100644
index 0000000000000000000000000000000000000000..3a576d220bfa952915a9387752ab89fa2e8c14c3
--- /dev/null
+++ b/apps/docs/i18n/no/docusaurus-plugin-content-docs/current/starter.md
@@ -0,0 +1,60 @@
+---
+sidebar_position: 2
+---
+
+# Startveiledning
+
+`Denne dokumentasjonen har blitt automatisk oversatt og kan inneholde feil. Ikke nøl med å åpne en Pull Request for å foreslå endringer.`
+
+Når du har [installert LlamaIndex.TS ved hjelp av NPM](installation) og satt opp din OpenAI-nøkkel, er du klar til å starte din første app:
+
+I en ny mappe:
+
+```bash npm2yarn
+npm install typescript
+npm install @types/node
+npx tsc --init # hvis nødvendig
+```
+
+Opprett filen `example.ts`. Denne koden vil laste inn noen eksempeldata, opprette et dokument, indeksere det (som oppretter innebygde vektorer ved hjelp av OpenAI), og deretter opprette en spørringsmotor for å svare på spørsmål om dataene.
+
+```ts
+// example.ts
+import fs from "fs/promises";
+import { Document, VectorStoreIndex } from "llamaindex";
+
+async function main() {
+  // Last inn essay fra abramov.txt i Node
+  const essay = await fs.readFile(
+    "node_modules/llamaindex/examples/abramov.txt",
+    "utf-8",
+  );
+
+  // Opprett Document-objekt med essay
+  const document = new Document({ text: essay });
+
+  // Del opp teksten og opprett innebygde vektorer. Lagre dem i en VectorStoreIndex
+  const index = await VectorStoreIndex.fromDocuments([document]);
+
+  // Spørr indeksen
+  const queryEngine = index.asQueryEngine();
+  const response = await queryEngine.query(
+    "Hva gjorde forfatteren på college?",
+  );
+
+  // Skriv ut responsen
+  console.log(response.toString());
+}
+
+main();
+```
+
+Deretter kan du kjøre det ved å bruke
+
+```bash
+npx ts-node example.ts
+```
+
+Klar til å lære mer? Sjekk ut vår NextJS-lekeplass på https://llama-playground.vercel.app/. Kildekoden er tilgjengelig på https://github.com/run-llama/ts-playground
+
+"
diff --git a/apps/docs/i18n/pl/docusaurus-plugin-content-docs/current/_static/concepts/indexing.jpg b/apps/docs/i18n/pl/docusaurus-plugin-content-docs/current/_static/concepts/indexing.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..8672967213caf28fb27682b6b9e2e5111beb9aaa
Binary files /dev/null and b/apps/docs/i18n/pl/docusaurus-plugin-content-docs/current/_static/concepts/indexing.jpg differ
diff --git a/apps/docs/i18n/pl/docusaurus-plugin-content-docs/current/_static/concepts/querying.jpg b/apps/docs/i18n/pl/docusaurus-plugin-content-docs/current/_static/concepts/querying.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..3c241bdda5ba98d3f3ee2e163b9f19280c5c7503
Binary files /dev/null and b/apps/docs/i18n/pl/docusaurus-plugin-content-docs/current/_static/concepts/querying.jpg differ
diff --git a/apps/docs/i18n/pl/docusaurus-plugin-content-docs/current/_static/concepts/rag.jpg b/apps/docs/i18n/pl/docusaurus-plugin-content-docs/current/_static/concepts/rag.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..b68eca2564f8535b33d9ed6d4ce4d4410d2a0698
Binary files /dev/null and b/apps/docs/i18n/pl/docusaurus-plugin-content-docs/current/_static/concepts/rag.jpg differ
diff --git a/apps/docs/i18n/pl/docusaurus-plugin-content-docs/current/api b/apps/docs/i18n/pl/docusaurus-plugin-content-docs/current/api
new file mode 120000
index 0000000000000000000000000000000000000000..3513e32979f4abb8b2df2b54c8d0cc480bdd847e
--- /dev/null
+++ b/apps/docs/i18n/pl/docusaurus-plugin-content-docs/current/api
@@ -0,0 +1 @@
+../../../../docs/api
\ No newline at end of file
diff --git a/apps/docs/i18n/pl/docusaurus-plugin-content-docs/current/concepts.md b/apps/docs/i18n/pl/docusaurus-plugin-content-docs/current/concepts.md
new file mode 100644
index 0000000000000000000000000000000000000000..178cf1c91539c9e5873feafb6ef229cc98a0d8d3
--- /dev/null
+++ b/apps/docs/i18n/pl/docusaurus-plugin-content-docs/current/concepts.md
@@ -0,0 +1,86 @@
+---
+sidebar_position: 3
+---
+
+# Wysokopoziomowe koncepcje
+
+`Ta dokumentacja została przetłumaczona automatycznie i może zawierać błędy. Nie wahaj się otworzyć Pull Request, aby zaproponować zmiany.`
+
+LlamaIndex.TS pomaga w budowaniu aplikacji opartych na LLM (np. pytania i odpowiedzi, chatbot) na podstawie niestandardowych danych.
+
+W tym przewodniku po wysokopoziomowych koncepcjach dowiesz się:
+
+- jak LLM może odpowiadać na pytania przy użyciu twoich własnych danych.
+- kluczowe koncepcje i moduły w LlamaIndex.TS do tworzenia własnego potoku zapytań.
+
+## Odpowiadanie na pytania w oparciu o Twoje dane
+
+LlamaIndex używa dwuetapowej metody podczas korzystania z LLM wraz z danymi:
+
+1. **etap indeksowania**: przygotowanie bazy wiedzy, oraz
+2. **etap zapytania**: pobieranie odpowiedniego kontekstu z wiedzy, aby pomóc LLM w udzieleniu odpowiedzi na pytanie.
+
+![](./_static/concepts/rag.jpg)
+
+Ten proces jest również znany jako Retrieval Augmented Generation (RAG).
+
+LlamaIndex.TS dostarcza niezbędnego narzędzia do wykonania obu etapów w sposób bardzo prosty.
+
+Przeanalizujmy teraz każdy etap szczegółowo.
+
+### Etap indeksowania
+
+LlamaIndex.TS pomaga w przygotowaniu bazy wiedzy za pomocą zestawu konektorów danych i indeksów.
+
+![](./_static/concepts/indexing.jpg)
+
+[**Ładowarki danych**](./modules/high_level/data_loader.md):
+Konektor danych (tj. `Reader`) pobiera dane z różnych źródeł danych i formatów danych do prostego reprezentacji `Document` (tekst i proste metadane).
+
+[**Dokumenty / Węzły**](./modules/high_level/documents_and_nodes.md): `Document` to ogólny kontener dla dowolnego źródła danych - na przykład pliku PDF, wyniku API lub pobranych danych z bazy danych. `Node` to atomowa jednostka danych w LlamaIndex i reprezentuje "kawałek" źródłowego `Document`. Jest to bogata reprezentacja, która zawiera metadane i relacje (do innych węzłów), umożliwiające dokładne i wyraźne operacje wyszukiwania.
+
+[**Indeksy danych**](./modules/high_level/data_index.md):
+Po załadowaniu danych LlamaIndex pomaga w indeksowaniu danych w formacie, który jest łatwy do pobrania.
+
+Pod spodem LlamaIndex analizuje surowe dokumenty na pośrednie reprezentacje, oblicza osadzenia wektorowe i przechowuje Twoje dane w pamięci lub na dysku.
+
+"
+
+### Etap zapytania
+
+W etapie zapytania, potok zapytań pobiera najbardziej odpowiedni kontekst na podstawie zapytania użytkownika,
+a następnie przekazuje go do LLM (wraz z zapytaniem) w celu syntezowania odpowiedzi.
+
+Daje to LLM aktualną wiedzę, która nie znajduje się w jego oryginalnych danych treningowych,
+(co również zmniejsza halucynacje).
+
+Największym wyzwaniem na etapie zapytania jest pobieranie, zarządzanie i wnioskowanie na podstawie (potencjalnie wielu) baz wiedzy.
+
+LlamaIndex dostarcza moduły, które można komponować, aby pomóc w budowaniu i integracji potoków RAG dla pytań i odpowiedzi (silnik zapytań), chatbotów (silnik chatu) lub jako część agenta.
+
+Te podstawowe elementy można dostosować, aby odzwierciedlały preferencje rankingowe, a także komponować w celu wnioskowania na podstawie wielu baz wiedzy w strukturalny sposób.
+
+![](./_static/concepts/querying.jpg)
+
+#### Podstawowe elementy
+
+[**Retrievers**](./modules/low_level/retriever.md):
+Retriever definiuje sposób efektywnego pobierania odpowiedniego kontekstu z bazy wiedzy (tj. indeksu) na podstawie zapytania.
+Konkretna logika pobierania różni się w zależności od różnych indeksów, najpopularniejszym jest gęste pobieranie na podstawie indeksu wektorowego.
+
+[**Response Synthesizers**](./modules/low_level/response_synthesizer.md):
+Response Synthesizer generuje odpowiedź na podstawie LLM, używając zapytania użytkownika i określonego zestawu pobranych fragmentów tekstu.
+
+"
+
+#### Potoki
+
+[**Silniki zapytań**](./modules/high_level/query_engine.md):
+Silnik zapytań to potok od początku do końca, który umożliwia zadawanie pytań na podstawie danych.
+Przyjmuje zapytanie w naturalnym języku i zwraca odpowiedź wraz z pobranym kontekstem referencyjnym przekazanym do LLM.
+
+[**Silniki chatu**](./modules/high_level/chat_engine.md):
+Silnik chatu to potok od początku do końca, który umożliwia prowadzenie rozmowy z danymi
+(wielokrotne pytania i odpowiedzi zamiast pojedynczego pytania i odpowiedzi).
+
+"
diff --git a/apps/docs/i18n/pl/docusaurus-plugin-content-docs/current/end_to_end.md b/apps/docs/i18n/pl/docusaurus-plugin-content-docs/current/end_to_end.md
new file mode 100644
index 0000000000000000000000000000000000000000..f6fe052e0a4860de717bf02fcfb3c3dd628734e4
--- /dev/null
+++ b/apps/docs/i18n/pl/docusaurus-plugin-content-docs/current/end_to_end.md
@@ -0,0 +1,63 @@
+---
+sidebar_position: 4
+---
+
+# Przykłady od początku do końca
+
+`Ta dokumentacja została przetłumaczona automatycznie i może zawierać błędy. Nie wahaj się otworzyć Pull Request, aby zaproponować zmiany.`
+
+W repozytorium zawieramy kilka przykładów od początku do końca, korzystając z LlamaIndex.TS.
+
+Sprawdź poniższe przykłady lub wypróbuj je i uzupełnij w kilka minut za pomocą interaktywnych samouczków Github Codespace udostępnionych przez Dev-Docs [tutaj](https://codespaces.new/team-dev-docs/lits-dev-docs-playground?devcontainer_path=.devcontainer%2Fjavascript_ltsquickstart%2Fdevcontainer.json):
+
+## [Silnik czatu](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/chatEngine.ts)
+
+Przeczytaj plik i porozmawiaj o nim z LLM.
+
+## [Indeks wektorowy](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/vectorIndex.ts)
+
+Utwórz indeks wektorowy i zapytaj go. Indeks wektorowy będzie używał osadzeń do pobrania k najbardziej istotnych węzłów. Domyślnie, k wynosi 2.
+
+"
+
+## [Indeks podsumowania](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/summaryIndex.ts)
+
+Utwórz indeks listy i zapytaj go. Ten przykład wykorzystuje również `LLMRetriever`, który używa LLM do wyboru najlepszych węzłów do użycia podczas generowania odpowiedzi.
+
+"
+
+## [Zapisz / Wczytaj indeks](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/storageContext.ts)
+
+Utwórz i wczytaj indeks wektorowy. Automatyczne zapisywanie na dysku w LlamaIndex.TS następuje automatycznie po utworzeniu obiektu kontekstu przechowywania.
+
+"
+
+## [Niestandardowy indeks wektorowy](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/vectorIndexCustomize.ts)
+
+Utwórz indeks wektorowy i zapytaj go, konfigurując jednocześnie `LLM`, `ServiceContext` i `similarity_top_k`.
+
+"
+
+## [OpenAI LLM](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/openai.ts)
+
+Utwórz OpenAI LLM i użyj go bezpośrednio do czatu.
+
+"
+
+## [Llama2 DeuceLLM](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/llamadeuce.ts)
+
+Utwórz Llama-2 LLM i użyj go bezpośrednio do czatu.
+
+"
+
+## [SubQuestionQueryEngine](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/subquestion.ts)
+
+Wykorzystuje `SubQuestionQueryEngine`, który dzieli złożone zapytania na wiele pytań, a następnie agreguje odpowiedzi na wszystkie podpytania.
+
+"
+
+## [Moduły na niskim poziomie](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/lowlevel.ts)
+
+Ten przykład wykorzystuje kilka komponentów na niskim poziomie, co eliminuje potrzebę posiadania rzeczywistego silnika zapytań. Te komponenty mogą być używane w dowolnym miejscu, w dowolnej aplikacji, lub dostosowane i podklasowane, aby spełnić Twoje własne potrzeby.
+
+"
diff --git a/apps/docs/i18n/pl/docusaurus-plugin-content-docs/current/environments.md b/apps/docs/i18n/pl/docusaurus-plugin-content-docs/current/environments.md
new file mode 100644
index 0000000000000000000000000000000000000000..96b7206709900cee17cd1aca72045371cf54532f
--- /dev/null
+++ b/apps/docs/i18n/pl/docusaurus-plugin-content-docs/current/environments.md
@@ -0,0 +1,31 @@
+---
+sidebar_position: 5
+---
+
+# Środowiska
+
+`Ta dokumentacja została przetłumaczona automatycznie i może zawierać błędy. Nie wahaj się otworzyć Pull Request, aby zaproponować zmiany.`
+
+LlamaIndex obecnie oficjalnie obsługuje NodeJS 18 i NodeJS 20.
+
+## Router aplikacji NextJS
+
+Jeśli korzystasz z obsługi trasowania NextJS App Router lub funkcji bezserwerowych, będziesz musiał użyć trybu NodeJS:
+
+```js
+export const runtime = "nodejs"; // domyślnie
+```
+
+i będziesz musiał dodać wyjątek dla pdf-parse w pliku next.config.js
+
+```js
+// next.config.js
+/** @type {import('next').NextConfig} */
+const nextConfig = {
+  experimental: {
+    serverComponentsExternalPackages: ["pdf-parse"], // Umieszcza pdf-parse w rzeczywistym trybie NodeJS z NextJS App Router
+  },
+};
+
+module.exports = nextConfig;
+```
diff --git a/apps/docs/i18n/pl/docusaurus-plugin-content-docs/current/installation.mdx b/apps/docs/i18n/pl/docusaurus-plugin-content-docs/current/installation.mdx
new file mode 100644
index 0000000000000000000000000000000000000000..3a49465b5dbf6f5bbeea29af31950ec50b87b264
--- /dev/null
+++ b/apps/docs/i18n/pl/docusaurus-plugin-content-docs/current/installation.mdx
@@ -0,0 +1,68 @@
+---
+sidebar_position: 1
+---
+
+
+# Instalacja i konfiguracja
+
+```Ta dokumentacja została przetłumaczona automatycznie i może zawierać błędy. Nie wahaj się otworzyć Pull Request, aby zaproponować zmiany.```
+
+
+Upewnij się, że masz zainstalowany NodeJS w wersji 18 lub nowszej.
+
+
+## Użycie create-llama
+
+Najprostszym sposobem na rozpoczęcie pracy z LlamaIndex jest użycie `create-llama`. Narzędzie CLI umożliwia szybkie rozpoczęcie tworzenia nowej aplikacji LlamaIndex, z wszystkim już skonfigurowanym.
+
+Wystarczy uruchomić
+
+<Tabs>
+<TabItem value="1" label="npm" default>
+
+```bash
+npx create-llama@latest
+```
+
+</TabItem>
+<TabItem value="2" label="Yarn">
+
+```bash
+yarn create llama
+```
+
+</TabItem>
+<TabItem value="3" label="pnpm">
+
+```bash
+pnpm create llama@latest
+```
+
+</TabItem>
+</Tabs>
+
+aby rozpocząć. Po wygenerowaniu aplikacji uruchom
+
+```bash npm2yarn
+npm run dev
+```
+
+aby uruchomić serwer deweloperski. Następnie możesz odwiedzić [http://localhost:3000](http://localhost:3000), aby zobaczyć swoją aplikację.
+## Instalacja z NPM
+
+```bash npm2yarn
+npm install llamaindex
+```
+
+
+### Zmienne środowiskowe
+
+Nasze przykłady domyślnie korzystają z OpenAI. Musisz skonfigurować swój klucz Open AI w następujący sposób:
+
+```bash
+export OPENAI_API_KEY="sk-......" # Zastąp swoim kluczem z https://platform.openai.com/account/api-keys
+```
+
+Jeśli chcesz, aby był automatycznie wczytywany za każdym razem, dodaj go do pliku .zshrc/.bashrc.
+
+OSTRZEŻENIE: Nie dodawaj swojego klucza OpenAI do systemu kontroli wersji.
diff --git a/apps/docs/i18n/pl/docusaurus-plugin-content-docs/current/introduction.md b/apps/docs/i18n/pl/docusaurus-plugin-content-docs/current/introduction.md
new file mode 100644
index 0000000000000000000000000000000000000000..499020ea08cb3a77214faefd5e06c365d9a0f4b3
--- /dev/null
+++ b/apps/docs/i18n/pl/docusaurus-plugin-content-docs/current/introduction.md
@@ -0,0 +1,60 @@
+---
+sidebar_position: 0
+slug: /
+---
+
+# Czym jest LlamaIndex.TS?
+
+`Ta dokumentacja została przetłumaczona automatycznie i może zawierać błędy. Nie wahaj się otworzyć Pull Request, aby zaproponować zmiany.`
+
+LlamaIndex.TS to framework danych dla aplikacji LLM, który umożliwia pobieranie, strukturyzację i dostęp do prywatnych lub specyficznych dla domeny danych. Chociaż dostępny jest również pakiet Python (patrz [tutaj](https://docs.llamaindex.ai/en/stable/)), LlamaIndex.TS oferuje podstawowe funkcje w prostym pakiecie, zoptymalizowanym do użytku z TypeScript.
+
+## 🚀 Dlaczego LlamaIndex.TS?
+
+W swojej istocie LLM-y oferują naturalny interfejs językowy między ludźmi a wywnioskowanymi danymi. Powszechnie dostępne modele są wstępnie przeszkolone na ogromnych ilościach publicznie dostępnych danych, od Wikipedii i list mailingowych po podręczniki i kod źródłowy.
+
+Aplikacje oparte na LLM-ach często wymagają rozszerzenia tych modeli o prywatne lub specyficzne dla domeny dane. Niestety, te dane mogą być rozproszone w różnych aplikacjach i magazynach danych. Mogą znajdować się za interfejsami API, w bazach danych SQL lub być uwięzione w plikach PDF i prezentacjach.
+
+Właśnie tutaj pojawia się **LlamaIndex.TS**.
+
+## 🦙 Jak LlamaIndex.TS może pomóc?
+
+LlamaIndex.TS udostępnia następujące narzędzia:
+
+- **Wczytywanie danych** - umożliwia wczytywanie istniejących danych w formatach `.txt`, `.pdf`, `.csv`, `.md` i `.docx`
+- **Indeksy danych** - strukturyzuje dane w pośrednich reprezentacjach, które są łatwe i wydajne do wykorzystania przez LLM.
+- **Silniki** - zapewniają dostęp do danych za pomocą języka naturalnego. Na przykład:
+  - Silniki zapytań to potężne interfejsy do pobierania wzbogaconych wiedzą wyników.
+  - Silniki czatów to interfejsy konwersacyjne umożliwiające interakcje "tam i z powrotem" z danymi.
+
+## 👨‍👩‍👧‍👦 Dla kogo jest LlamaIndex?
+
+LlamaIndex.TS dostarcza podstawowy zestaw narzędzi, niezbędnych dla wszystkich tworzących aplikacje LLM przy użyciu JavaScript i TypeScript.
+
+Nasze API na wysokim poziomie umożliwia początkującym użytkownikom korzystanie z LlamaIndex.TS do przetwarzania i wyszukiwania danych.
+
+Dla bardziej zaawansowanych aplikacji nasze API na niższym poziomie umożliwia zaawansowanym użytkownikom dostosowanie i rozszerzenie dowolnego modułu - łączników danych, indeksów, odbiorników i silników zapytań - aby dostosować je do swoich potrzeb.
+
+## Rozpoczęcie pracy
+
+`npm install llamaindex`
+
+Nasza dokumentacja zawiera [Instrukcje instalacji](./installation.md) oraz [Samouczek dla początkujących](./starter.md), który pomoże Ci zbudować swoją pierwszą aplikację.
+
+Gdy już będziesz gotowy, [Wysokopoziomowe koncepcje](./concepts.md) zawierają przegląd modułowej architektury LlamaIndex. Jeśli chcesz zobaczyć praktyczne przykłady, zapoznaj się z naszymi [Samouczkami od początku do końca](./end_to_end.md).
+
+## 🗺️ Ekosystem
+
+Aby pobrać lub przyczynić się do projektu, odwiedź LlamaIndex na:
+
+- Github: https://github.com/run-llama/LlamaIndexTS
+- NPM: https://www.npmjs.com/package/llamaindex
+
+"
+
+## Społeczność
+
+Potrzebujesz pomocy? Masz sugestię dotyczącą funkcji? Dołącz do społeczności LlamaIndex:
+
+- Twitter: https://twitter.com/llama_index
+- Discord: https://discord.gg/dGcwcsnxhU
diff --git a/apps/docs/i18n/pl/docusaurus-plugin-content-docs/current/modules/high_level/chat_engine.md b/apps/docs/i18n/pl/docusaurus-plugin-content-docs/current/modules/high_level/chat_engine.md
new file mode 100644
index 0000000000000000000000000000000000000000..2284a8c5b9e2f60209fb548bf9c8a05f28ee68b7
--- /dev/null
+++ b/apps/docs/i18n/pl/docusaurus-plugin-content-docs/current/modules/high_level/chat_engine.md
@@ -0,0 +1,22 @@
+---
+sidebar_position: 4
+---
+
+# ChatEngine (聊天引擎)
+
+`Ta dokumentacja została przetłumaczona automatycznie i może zawierać błędy. Nie wahaj się otworzyć Pull Request, aby zaproponować zmiany.`
+
+Silnik czatu to szybki i prosty sposób na rozmowę z danymi w Twoim indeksie.
+
+```typescript
+const retriever = index.asRetriever();
+const chatEngine = new ContextChatEngine({ retriever });
+
+// rozpocznij rozmowę
+const response = await chatEngine.chat(query);
+```
+
+## Odwołania do interfejsu API
+
+- [ContextChatEngine (Silnik czatu kontekstowego)](../../api/classes/ContextChatEngine.md)
+- [CondenseQuestionChatEngine (Silnik czatu skondensowanego pytania)](../../api/classes/ContextChatEngine.md)
diff --git a/apps/docs/i18n/pl/docusaurus-plugin-content-docs/current/modules/high_level/data_index.md b/apps/docs/i18n/pl/docusaurus-plugin-content-docs/current/modules/high_level/data_index.md
new file mode 100644
index 0000000000000000000000000000000000000000..30018e81b550dac1d220b9c982add0434df88556
--- /dev/null
+++ b/apps/docs/i18n/pl/docusaurus-plugin-content-docs/current/modules/high_level/data_index.md
@@ -0,0 +1,27 @@
+---
+sidebar_position: 2
+---
+
+# Indeks
+
+`Ta dokumentacja została przetłumaczona automatycznie i może zawierać błędy. Nie wahaj się otworzyć Pull Request, aby zaproponować zmiany.`
+
+Indeks to podstawowy kontener i organizacja dla Twoich danych. LlamaIndex.TS obsługuje dwa indeksy:
+
+- `VectorStoreIndex` - wysyła do LLM (Llama Learning Machine) najlepsze `Node` w celu wygenerowania odpowiedzi. Domyślnie wybierane są 2 najlepsze wyniki.
+- `SummaryIndex` - wysyła do LLM każdy `Node` w indeksie w celu wygenerowania odpowiedzi.
+
+```typescript
+import { Document, VectorStoreIndex } from "llamaindex";
+
+const document = new Document({ text: "test" });
+
+const index = await VectorStoreIndex.fromDocuments([document]);
+```
+
+## Dokumentacja API
+
+- [SummaryIndex](../../api/classes/SummaryIndex.md)
+- [VectorStoreIndex](../../api/classes/VectorStoreIndex.md)
+
+"
diff --git a/apps/docs/i18n/pl/docusaurus-plugin-content-docs/current/modules/high_level/data_loader.md b/apps/docs/i18n/pl/docusaurus-plugin-content-docs/current/modules/high_level/data_loader.md
new file mode 100644
index 0000000000000000000000000000000000000000..7d9661cc33d186effada1eb5c9a46cc4324a4b41
--- /dev/null
+++ b/apps/docs/i18n/pl/docusaurus-plugin-content-docs/current/modules/high_level/data_loader.md
@@ -0,0 +1,21 @@
+---
+sidebar_position: 1
+---
+
+# Czytnik / Ładowarka
+
+`Ta dokumentacja została przetłumaczona automatycznie i może zawierać błędy. Nie wahaj się otworzyć Pull Request, aby zaproponować zmiany.`
+
+LlamaIndex.TS umożliwia łatwe wczytywanie plików z folderów za pomocą klasy `SimpleDirectoryReader`. Obecnie obsługiwane są pliki `.txt`, `.pdf`, `.csv`, `.md` i `.docx`, a w przyszłości planowane jest dodanie obsługi kolejnych formatów!
+
+```typescript
+import { SimpleDirectoryReader } from "llamaindex";
+
+documents = new SimpleDirectoryReader().loadData("./data");
+```
+
+## Dokumentacja API
+
+- [SimpleDirectoryReader](../../api/classes/SimpleDirectoryReader.md)
+
+"
diff --git a/apps/docs/i18n/pl/docusaurus-plugin-content-docs/current/modules/high_level/documents_and_nodes.md b/apps/docs/i18n/pl/docusaurus-plugin-content-docs/current/modules/high_level/documents_and_nodes.md
new file mode 100644
index 0000000000000000000000000000000000000000..1431b11fa18186e1d6e33d4005cfde4525b73e07
--- /dev/null
+++ b/apps/docs/i18n/pl/docusaurus-plugin-content-docs/current/modules/high_level/documents_and_nodes.md
@@ -0,0 +1,22 @@
+---
+sidebar_position: 0
+---
+
+# Dokumenty i Węzły
+
+`Ta dokumentacja została przetłumaczona automatycznie i może zawierać błędy. Nie wahaj się otworzyć Pull Request, aby zaproponować zmiany.`
+
+`Dokumenty` i `Węzły` są podstawowymi elementami budowy każdego indeksu. Podczas gdy API dla tych obiektów jest podobne, obiekty `Dokument` reprezentują całe pliki, podczas gdy `Węzły` są mniejszymi fragmentami tego oryginalnego dokumentu, które są odpowiednie dla LLM i Q&A.
+
+```typescript
+import { Document } from "llamaindex";
+
+document = new Document({ text: "tekst", metadata: { klucz: "wartość" } });
+```
+
+## Dokumentacja API
+
+- [Dokument](../../api/classes/Document.md)
+- [WęzełTekstowy](../../api/classes/TextNode.md)
+
+"
diff --git a/apps/docs/i18n/pl/docusaurus-plugin-content-docs/current/modules/high_level/query_engine.md b/apps/docs/i18n/pl/docusaurus-plugin-content-docs/current/modules/high_level/query_engine.md
new file mode 100644
index 0000000000000000000000000000000000000000..00318b096f5d1dced626674a404d12b74c6629fc
--- /dev/null
+++ b/apps/docs/i18n/pl/docusaurus-plugin-content-docs/current/modules/high_level/query_engine.md
@@ -0,0 +1,44 @@
+---
+sidebar_position: 3
+---
+
+# QueryEngine (Silnik zapytań)
+
+`Ta dokumentacja została przetłumaczona automatycznie i może zawierać błędy. Nie wahaj się otworzyć Pull Request, aby zaproponować zmiany.`
+
+Silnik zapytań zawiera w sobie `Retriever` oraz `ResponseSynthesizer` w jednym potoku, który używa ciągu zapytań do pobrania węzłów, a następnie wysyła je do LLM w celu wygenerowania odpowiedzi.
+
+```typescript
+const queryEngine = index.asQueryEngine();
+const response = await queryEngine.query("ciąg zapytań");
+```
+
+## Silnik zapytań podpytań (Sub Question Query Engine)
+
+Podstawową koncepcją Silnika zapytań podpytań jest podzielenie pojedynczego zapytania na wiele zapytań, uzyskanie odpowiedzi na każde z tych zapytań, a następnie połączenie tych różnych odpowiedzi w jedną spójną odpowiedź dla użytkownika. Można to porównać do techniki "przemyśl to krok po kroku", ale iterującej po źródłach danych!
+
+### Rozpoczęcie pracy
+
+Najłatwiejszym sposobem na rozpoczęcie próbowania Silnika zapytań podpytań jest uruchomienie pliku subquestion.ts w folderze [examples](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/subquestion.ts).
+
+```bash
+npx ts-node subquestion.ts
+```
+
+"
+
+### Narzędzia
+
+Silnik zapytań podpytań jest implementowany za pomocą narzędzi (Tools). Podstawową ideą narzędzi jest to, że są to opcje wykonywalne dla dużego modelu językowego. W tym przypadku nasz Silnik zapytań podpytań polega na QueryEngineTool, który, jak się domyślasz, jest narzędziem do wykonywania zapytań na Silniku zapytań. Pozwala to modelowi na możliwość zapytania różnych dokumentów w celu uzyskania odpowiedzi na różne pytania, na przykład. Można również sobie wyobrazić, że Silnik zapytań podpytań może używać narzędzia, które wyszukuje coś w sieci lub uzyskuje odpowiedź za pomocą Wolfram Alpha.
+
+Więcej informacji na temat narzędzi można znaleźć w dokumentacji Pythona LlamaIndex https://gpt-index.readthedocs.io/en/latest/core_modules/agent_modules/tools/root.html
+
+"
+
+## Dokumentacja interfejsu API
+
+- [RetrieverQueryEngine (Silnik zapytań Retriever)](../../api/classes/RetrieverQueryEngine.md)
+- [SubQuestionQueryEngine (Silnik zapytań podrzędnych)](../../api/classes/SubQuestionQueryEngine.md)
+- [QueryEngineTool (Narzędzie silnika zapytań)](../../api/interfaces/QueryEngineTool.md)
+
+"
diff --git a/apps/docs/i18n/pl/docusaurus-plugin-content-docs/current/modules/index.md b/apps/docs/i18n/pl/docusaurus-plugin-content-docs/current/modules/index.md
new file mode 100644
index 0000000000000000000000000000000000000000..6804a9cae61b7497c1e36103ef960291e791667c
--- /dev/null
+++ b/apps/docs/i18n/pl/docusaurus-plugin-content-docs/current/modules/index.md
@@ -0,0 +1,33 @@
+# Moduły podstawowe
+
+`Ta dokumentacja została przetłumaczona automatycznie i może zawierać błędy. Nie wahaj się otworzyć Pull Request, aby zaproponować zmiany.`
+
+LlamaIndex.TS oferuje kilka modułów podstawowych, podzielonych na moduły wysokiego poziomu, które umożliwiają szybkie rozpoczęcie pracy, oraz moduły niskiego poziomu, które umożliwiają dostosowanie kluczowych komponentów według potrzeb.
+
+## Moduły wysokiego poziomu
+
+- [**Dokument**](./high_level/documents_and_nodes.md): Dokument reprezentuje plik tekstowy, plik PDF lub inny ciągły fragment danych.
+
+- [**Węzeł**](./high_level/documents_and_nodes.md): Podstawowy element budujący dane. Najczęściej są to części dokumentu podzielone na zarządzalne fragmenty, które są wystarczająco małe, aby można je było przekazać do modelu osadzającego i LLM.
+
+- [**Czytnik/Ładowarka**](./high_level/data_loader.md): Czytnik lub ładowarka to narzędzie, które pobiera dokument ze świata rzeczywistego i przekształca go w klasę Dokumentu, która może być używana w indeksie i zapytaniach. Obecnie obsługujemy pliki tekstowe i pliki PDF, a wkrótce pojawi się wiele innych formatów.
+
+- [**Indeksy**](./high_level/data_index.md): Indeksy przechowują węzły i osadzenia tych węzłów.
+
+- [**Silnik zapytań**](./high_level/query_engine.md): Silniki zapytań generują zapytanie, które wprowadzasz i zwracają wynik. Silniki zapytań zazwyczaj łączą gotowe podpowiedzi z wybranymi węzłami z indeksu, aby dostarczyć LLM kontekstu, który jest potrzebny do odpowiedzi na Twoje zapytanie.
+
+- [**Silnik czatu**](./high_level/chat_engine.md): Silnik czatu pomaga w budowie chatbota, który będzie współpracować z Twoimi indeksami.
+
+## Moduł niskiego poziomu
+
+- [**LLM**](./low_level/llm.md): Klasa LLM to zintegrowane interfejsy dla dostawców dużych modeli językowych, takich jak OpenAI GPT-4, Anthropic Claude lub Meta LLaMA. Można ją podklasować, aby napisać łącznik do własnego dużego modelu językowego.
+
+- [**Embedding**](./low_level/embedding.md): Osadzenie jest reprezentowane jako wektor liczb zmiennoprzecinkowych. Domyślnym modelem osadzania jest text-embedding-ada-002 od OpenAI, a każde wygenerowane osadzenie składa się z 1536 liczb zmiennoprzecinkowych. Innym popularnym modelem osadzania jest BERT, który używa 768 liczb zmiennoprzecinkowych do reprezentacji każdego węzła. Oferujemy wiele narzędzi do pracy z osadzeniami, w tym 3 opcje obliczania podobieństwa i maksymalne marginalne znaczenie.
+
+- [**TextSplitter/NodeParser**](./low_level/node_parser.md): Strategie podziału tekstu są niezwykle ważne dla ogólnej skuteczności wyszukiwania osadzeń. Obecnie, mimo że mamy domyślne ustawienia, nie ma jednego rozwiązania, które pasuje do wszystkich przypadków. W zależności od dokumentów źródłowych, możesz chcieć użyć różnych rozmiarów i strategii podziału. Obecnie obsługujemy podział według stałego rozmiaru, podział według stałego rozmiaru z nakładającymi się sekcjami, podział według zdań i podział według akapitów. Podział tekstu jest używany przez NodeParser do podziału `Documentów` na `Węzły`.
+
+- [**Retriever**](./low_level/retriever.md): Retriever to element, który faktycznie wybiera Węzły do pobrania z indeksu. Tutaj możesz spróbować pobierać więcej lub mniej Węzłów na zapytanie, zmieniać funkcję podobieństwa lub tworzyć własne retrievery dla każdego indywidualnego przypadku w aplikacji. Na przykład możesz chcieć mieć oddzielny retriever dla treści kodu i treści tekstowych.
+
+- [**ResponseSynthesizer**](./low_level/response_synthesizer.md): ResponseSynthesizer jest odpowiedzialny za przetwarzanie ciągu zapytania i używanie listy `Węzłów` do generowania odpowiedzi. Może to przybrać wiele form, na przykład iterowanie po całym kontekście i dopracowywanie odpowiedzi lub budowanie drzewa podsumowań i zwracanie korzenia podsumowania.
+
+- [**Storage**](./low_level/storage.md): W pewnym momencie będziesz chciał przechowywać swoje indeksy, dane i wektory, zamiast uruchamiać modele osadzające za każdym razem. IndexStore, DocStore, VectorStore i KVStore to abstrakcje, które umożliwiają to. Razem tworzą StorageContext. Obecnie umożliwiamy zapisywanie osadzeń w plikach na systemie plików (lub wirtualnym systemie plików w pamięci), ale aktywnie dodajemy również integracje z bazami danych wektorowymi.
diff --git a/apps/docs/i18n/pl/docusaurus-plugin-content-docs/current/modules/low_level/embedding.md b/apps/docs/i18n/pl/docusaurus-plugin-content-docs/current/modules/low_level/embedding.md
new file mode 100644
index 0000000000000000000000000000000000000000..d1e19158622a06381573b15ef4b33d31764b819f
--- /dev/null
+++ b/apps/docs/i18n/pl/docusaurus-plugin-content-docs/current/modules/low_level/embedding.md
@@ -0,0 +1,26 @@
+---
+sidebar_position: 1
+---
+
+# Osadzanie
+
+`Ta dokumentacja została przetłumaczona automatycznie i może zawierać błędy. Nie wahaj się otworzyć Pull Request, aby zaproponować zmiany.`
+
+Model osadzania w LlamaIndex jest odpowiedzialny za tworzenie numerycznych reprezentacji tekstu. Domyślnie LlamaIndex będzie używał modelu `text-embedding-ada-002` z OpenAI.
+
+Można to jawnie ustawić w obiekcie `ServiceContext`.
+
+```typescript
+import { OpenAIEmbedding, serviceContextFromDefaults } from "llamaindex";
+
+const openaiEmbeds = new OpenAIEmbedding();
+
+const serviceContext = serviceContextFromDefaults({ embedModel: openaiEmbeds });
+```
+
+## Dokumentacja interfejsu API
+
+- [OpenAIEmbedding](../../api/classes/OpenAIEmbedding.md)
+- [ServiceContext](../../api/interfaces/ServiceContext.md)
+
+"
diff --git a/apps/docs/i18n/pl/docusaurus-plugin-content-docs/current/modules/low_level/llm.md b/apps/docs/i18n/pl/docusaurus-plugin-content-docs/current/modules/low_level/llm.md
new file mode 100644
index 0000000000000000000000000000000000000000..b05fb363a886e4e241e3857230795efcfcd0a05b
--- /dev/null
+++ b/apps/docs/i18n/pl/docusaurus-plugin-content-docs/current/modules/low_level/llm.md
@@ -0,0 +1,26 @@
+---
+sidebar_position: 0
+---
+
+# LLM
+
+`Ta dokumentacja została przetłumaczona automatycznie i może zawierać błędy. Nie wahaj się otworzyć Pull Request, aby zaproponować zmiany.`
+
+LLM jest odpowiedzialny za odczytywanie tekstu i generowanie naturalnych odpowiedzi językowych na zapytania. Domyślnie LlamaIndex.TS używa `gpt-3.5-turbo`.
+
+LLM można jawnie ustawić w obiekcie `ServiceContext`.
+
+```typescript
+import { OpenAI, serviceContextFromDefaults } from "llamaindex";
+
+const openaiLLM = new OpenAI({ model: "gpt-3.5-turbo", temperature: 0 });
+
+const serviceContext = serviceContextFromDefaults({ llm: openaiLLM });
+```
+
+## Dokumentacja interfejsu API
+
+- [OpenAI](../../api/classes/OpenAI.md)
+- [ServiceContext](../../api/interfaces/ServiceContext.md)
+
+"
diff --git a/apps/docs/i18n/pl/docusaurus-plugin-content-docs/current/modules/low_level/node_parser.md b/apps/docs/i18n/pl/docusaurus-plugin-content-docs/current/modules/low_level/node_parser.md
new file mode 100644
index 0000000000000000000000000000000000000000..02deb2c5fe22f1e5fa3071e130bdaba49ee7afcb
--- /dev/null
+++ b/apps/docs/i18n/pl/docusaurus-plugin-content-docs/current/modules/low_level/node_parser.md
@@ -0,0 +1,39 @@
+---
+sidebar_position: 3
+---
+
+# NodeParser
+
+`Ta dokumentacja została przetłumaczona automatycznie i może zawierać błędy. Nie wahaj się otworzyć Pull Request, aby zaproponować zmiany.`
+
+`NodeParser` w LlamaIndex jest odpowiedzialny za podział obiektów `Document` na bardziej zarządzalne obiekty `Node`. Gdy wywołasz `.fromDocuments()`, `NodeParser` z `ServiceContext` jest automatycznie używany do tego. Alternatywnie, możesz go użyć do podziału dokumentów z wyprzedzeniem.
+
+```typescript
+import { Document, SimpleNodeParser } from "llamaindex";
+
+const nodeParser = new SimpleNodeParser();
+const nodes = nodeParser.getNodesFromDocuments([
+  new Document({ text: "Mam 10 lat. John ma 20 lat." }),
+]);
+```
+
+## TextSplitter
+
+Podstawowy podział tekstu dokonuje podziału tekstu na zdania. Może być również używany jako samodzielny moduł do podziału surowego tekstu.
+
+```typescript
+import { SentenceSplitter } from "llamaindex";
+
+const splitter = new SentenceSplitter({ chunkSize: 1 });
+
+const textSplits = splitter.splitText("Witaj Świecie");
+```
+
+"
+
+## Dokumentacja API
+
+- [SimpleNodeParser](../../api/classes/SimpleNodeParser.md)
+- [SentenceSplitter](../../api/classes/SentenceSplitter.md)
+
+"
diff --git a/apps/docs/i18n/pl/docusaurus-plugin-content-docs/current/modules/low_level/response_synthesizer.md b/apps/docs/i18n/pl/docusaurus-plugin-content-docs/current/modules/low_level/response_synthesizer.md
new file mode 100644
index 0000000000000000000000000000000000000000..c4498674e4e4f4e2557ef7c1aa581d51d846176d
--- /dev/null
+++ b/apps/docs/i18n/pl/docusaurus-plugin-content-docs/current/modules/low_level/response_synthesizer.md
@@ -0,0 +1,47 @@
+---
+sidebar_position: 6
+---
+
+# ResponseSynthesizer (SyntezatorOdpowiedzi)
+
+`Ta dokumentacja została przetłumaczona automatycznie i może zawierać błędy. Nie wahaj się otworzyć Pull Request, aby zaproponować zmiany.`
+
+ResponseSynthesizer jest odpowiedzialny za wysyłanie zapytania, węzłów i szablonów promptów do LLM w celu wygenerowania odpowiedzi. Istnieje kilka kluczowych trybów generowania odpowiedzi:
+
+- `Refine` (Udoskonalanie): "tworzenie i udoskonalanie" odpowiedzi poprzez sekwencyjne przechodzenie przez każdy pobrany fragment tekstu. Wykonuje osobne wywołanie LLM dla każdego węzła. Dobry dla bardziej szczegółowych odpowiedzi.
+- `CompactAndRefine` (KompaktowanieIUdoskonalanie) (domyślny): "kompaktowanie" promptu podczas każdego wywołania LLM poprzez umieszczenie jak największej liczby fragmentów tekstu, które mogą zmieścić się w maksymalnym rozmiarze promptu. Jeśli jest zbyt wiele fragmentów do umieszczenia w jednym promptu, "tworzy i udoskonala" odpowiedź, przechodząc przez wiele kompaktowych promptów. To samo co `refine`, ale powinno skutkować mniejszą liczbą wywołań LLM.
+- `TreeSummarize` (PodsumowanieDrzewa): Na podstawie zestawu fragmentów tekstu i zapytania rekurencyjnie konstruuje drzewo i zwraca węzeł korzenia jako odpowiedź. Dobry do celów podsumowania.
+- `SimpleResponseBuilder` (ProstyBudowniczyOdpowiedzi): Na podstawie zestawu fragmentów tekstu i zapytania stosuje zapytanie do każdego fragmentu tekstu, gromadząc odpowiedzi w tablicy. Zwraca połączony ciąg wszystkich odpowiedzi. Dobry, gdy potrzebujesz osobno uruchomić to samo zapytanie dla każdego fragmentu tekstu.
+
+```typescript
+import { NodeWithScore, ResponseSynthesizer, TextNode } from "llamaindex";
+
+const responseSynthesizer = new ResponseSynthesizer();
+
+const nodesWithScore: NodeWithScore[] = [
+  {
+    node: new TextNode({ text: "Mam 10 lat." }),
+    score: 1,
+  },
+  {
+    node: new TextNode({ text: "John ma 20 lat." }),
+    score: 0.5,
+  },
+];
+
+const response = await responseSynthesizer.synthesize(
+  "Ile mam lat?",
+  nodesWithScore,
+);
+console.log(response.response);
+```
+
+## Dokumentacja interfejsu API
+
+- [ResponseSynthesizer (SyntezatorOdpowiedzi)](../../api/classes/ResponseSynthesizer.md)
+- [Refine (Udoskonalanie)](../../api/classes/Refine.md)
+- [CompactAndRefine (KompaktowanieIUdoskonalanie)](../../api/classes/CompactAndRefine.md)
+- [TreeSummarize (PodsumowanieDrzewa)](../../api/classes/TreeSummarize.md)
+- [SimpleResponseBuilder (ProstyBudowniczyOdpowiedzi)](../../api/classes/SimpleResponseBuilder.md)
+
+"
diff --git a/apps/docs/i18n/pl/docusaurus-plugin-content-docs/current/modules/low_level/retriever.md b/apps/docs/i18n/pl/docusaurus-plugin-content-docs/current/modules/low_level/retriever.md
new file mode 100644
index 0000000000000000000000000000000000000000..15f5e38403494c2c9c44bb51709546a058b34d62
--- /dev/null
+++ b/apps/docs/i18n/pl/docusaurus-plugin-content-docs/current/modules/low_level/retriever.md
@@ -0,0 +1,25 @@
+---
+sidebar_position: 5
+---
+
+# Retriever (Pobieracz)
+
+`Ta dokumentacja została przetłumaczona automatycznie i może zawierać błędy. Nie wahaj się otworzyć Pull Request, aby zaproponować zmiany.`
+
+Pobieracz w LlamaIndex służy do pobierania węzłów (`Node`) z indeksu za pomocą ciągu zapytania. Pobieracz `VectorIndexRetriever` pobierze k najbardziej podobnych węzłów. Natomiast pobieracz `SummaryIndexRetriever` pobierze wszystkie węzły bez względu na zapytanie.
+
+```typescript
+const retriever = vector_index.asRetriever();
+retriever.similarityTopK = 3;
+
+// Pobierz węzły!
+const nodesWithScore = await retriever.retrieve("ciąg zapytania");
+```
+
+## Dokumentacja interfejsu API
+
+- [SummaryIndexRetriever (Pobieracz indeksu podsumowania)](../../api/classes/SummaryIndexRetriever.md)
+- [SummaryIndexLLMRetriever (Pobieracz indeksu podsumowania LLM)](../../api/classes/SummaryIndexLLMRetriever.md)
+- [VectorIndexRetriever (Pobieracz indeksu wektorowego)](../../api/classes/VectorIndexRetriever.md)
+
+"
diff --git a/apps/docs/i18n/pl/docusaurus-plugin-content-docs/current/modules/low_level/storage.md b/apps/docs/i18n/pl/docusaurus-plugin-content-docs/current/modules/low_level/storage.md
new file mode 100644
index 0000000000000000000000000000000000000000..2e22b5367a48bb94561f43bf935b59df252874b9
--- /dev/null
+++ b/apps/docs/i18n/pl/docusaurus-plugin-content-docs/current/modules/low_level/storage.md
@@ -0,0 +1,30 @@
+---
+sidebar_position: 7
+---
+
+# Przechowywanie danych
+
+`Ta dokumentacja została przetłumaczona automatycznie i może zawierać błędy. Nie wahaj się otworzyć Pull Request, aby zaproponować zmiany.`
+
+Przechowywanie danych w LlamaIndex.TS działa automatycznie po skonfigurowaniu obiektu `StorageContext`. Wystarczy skonfigurować `persistDir` i dołączyć go do indeksu.
+
+Obecnie obsługiwane jest tylko zapisywanie i wczytywanie z dysku, ale planowane są integracje z innymi źródłami!
+
+```typescript
+import { Document, VectorStoreIndex, storageContextFromDefaults } from "./src";
+
+const storageContext = await storageContextFromDefaults({
+  persistDir: "./storage",
+});
+
+const document = new Document({ text: "Testowy tekst" });
+const index = await VectorStoreIndex.fromDocuments([document], {
+  storageContext,
+});
+```
+
+## Dokumentacja API
+
+- [StorageContext](../../api/interfaces/StorageContext.md)
+
+"
diff --git a/apps/docs/i18n/pl/docusaurus-plugin-content-docs/current/starter.md b/apps/docs/i18n/pl/docusaurus-plugin-content-docs/current/starter.md
new file mode 100644
index 0000000000000000000000000000000000000000..3ce1ab70876700922780d63477d42f67103ea1a2
--- /dev/null
+++ b/apps/docs/i18n/pl/docusaurus-plugin-content-docs/current/starter.md
@@ -0,0 +1,58 @@
+---
+sidebar_position: 2
+---
+
+# Samouczek dla początkujących
+
+`Ta dokumentacja została przetłumaczona automatycznie i może zawierać błędy. Nie wahaj się otworzyć Pull Request, aby zaproponować zmiany.`
+
+Po zainstalowaniu [LlamaIndex.TS przy użyciu NPM](installation) i skonfigurowaniu klucza OpenAI, jesteś gotowy, aby rozpocząć pracę nad swoją pierwszą aplikacją:
+
+W nowym folderze:
+
+```bash npm2yarn
+npm install typescript
+npm install @types/node
+npx tsc --init # jeśli jest to konieczne
+```
+
+Utwórz plik `example.ts`. Ten kod załaduje przykładowe dane, utworzy dokument, zaindeksuje go (co utworzy osadzenia przy użyciu OpenAI), a następnie utworzy silnik zapytań, który będzie odpowiadał na pytania dotyczące danych.
+
+```ts
+// example.ts
+import fs from "fs/promises";
+import { Document, VectorStoreIndex } from "llamaindex";
+
+async function main() {
+  // Załaduj esej z pliku abramov.txt w Node
+  const essay = await fs.readFile(
+    "node_modules/llamaindex/examples/abramov.txt",
+    "utf-8",
+  );
+
+  // Utwórz obiekt dokumentu z esejem
+  const document = new Document({ text: essay });
+
+  // Podziel tekst i utwórz osadzenia. Przechowuj je w indeksie VectorStoreIndex
+  const index = await VectorStoreIndex.fromDocuments([document]);
+
+  // Zapytaj indeks
+  const queryEngine = index.asQueryEngine();
+  const response = await queryEngine.query("Co autor robił na studiach?");
+
+  // Wyświetl odpowiedź
+  console.log(response.toString());
+}
+
+main();
+```
+
+Następnie możesz go uruchomić za pomocą
+
+```bash
+npx ts-node example.ts
+```
+
+Gotowy, aby dowiedzieć się więcej? Sprawdź nasze środowisko NextJS w https://llama-playground.vercel.app/. Źródło jest dostępne na https://github.com/run-llama/ts-playground
+
+"
diff --git a/apps/docs/i18n/pt/docusaurus-plugin-content-docs/current/_static/concepts/indexing.jpg b/apps/docs/i18n/pt/docusaurus-plugin-content-docs/current/_static/concepts/indexing.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..8672967213caf28fb27682b6b9e2e5111beb9aaa
Binary files /dev/null and b/apps/docs/i18n/pt/docusaurus-plugin-content-docs/current/_static/concepts/indexing.jpg differ
diff --git a/apps/docs/i18n/pt/docusaurus-plugin-content-docs/current/_static/concepts/querying.jpg b/apps/docs/i18n/pt/docusaurus-plugin-content-docs/current/_static/concepts/querying.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..3c241bdda5ba98d3f3ee2e163b9f19280c5c7503
Binary files /dev/null and b/apps/docs/i18n/pt/docusaurus-plugin-content-docs/current/_static/concepts/querying.jpg differ
diff --git a/apps/docs/i18n/pt/docusaurus-plugin-content-docs/current/_static/concepts/rag.jpg b/apps/docs/i18n/pt/docusaurus-plugin-content-docs/current/_static/concepts/rag.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..b68eca2564f8535b33d9ed6d4ce4d4410d2a0698
Binary files /dev/null and b/apps/docs/i18n/pt/docusaurus-plugin-content-docs/current/_static/concepts/rag.jpg differ
diff --git a/apps/docs/i18n/pt/docusaurus-plugin-content-docs/current/api b/apps/docs/i18n/pt/docusaurus-plugin-content-docs/current/api
new file mode 120000
index 0000000000000000000000000000000000000000..3513e32979f4abb8b2df2b54c8d0cc480bdd847e
--- /dev/null
+++ b/apps/docs/i18n/pt/docusaurus-plugin-content-docs/current/api
@@ -0,0 +1 @@
+../../../../docs/api
\ No newline at end of file
diff --git a/apps/docs/i18n/pt/docusaurus-plugin-content-docs/current/concepts.md b/apps/docs/i18n/pt/docusaurus-plugin-content-docs/current/concepts.md
new file mode 100644
index 0000000000000000000000000000000000000000..1ffa0ef972ba73fa302ed5a0617bfc6939fe3573
--- /dev/null
+++ b/apps/docs/i18n/pt/docusaurus-plugin-content-docs/current/concepts.md
@@ -0,0 +1,84 @@
+---
+sidebar_position: 3
+---
+
+# Conceitos de Alto Nível
+
+`Esta documentação foi traduzida automaticamente e pode conter erros. Não hesite em abrir um Pull Request para sugerir alterações.`
+
+LlamaIndex.TS ajuda você a construir aplicativos com LLM (por exemplo, Q&A, chatbot) sobre dados personalizados.
+
+Neste guia de conceitos de alto nível, você aprenderá:
+
+- como um LLM pode responder perguntas usando seus próprios dados.
+- conceitos-chave e módulos em LlamaIndex.TS para compor sua própria sequência de consulta.
+
+## Responder Perguntas em Seus Dados
+
+LlamaIndex utiliza um método de duas etapas ao usar um LLM com seus dados:
+
+1. **etapa de indexação**: preparando uma base de conhecimento, e
+2. **etapa de consulta**: recuperando o contexto relevante do conhecimento para ajudar o LLM a responder a uma pergunta.
+
+![](./_static/concepts/rag.jpg)
+
+Esse processo também é conhecido como Geração Aprimorada por Recuperação (RAG).
+
+LlamaIndex.TS fornece o conjunto de ferramentas essenciais para tornar ambas as etapas super fáceis.
+
+Vamos explorar cada etapa em detalhes.
+
+### Etapa de Indexação
+
+LlamaIndex.TS ajuda você a preparar a base de conhecimento com uma série de conectores de dados e índices.
+
+![](./_static/concepts/indexing.jpg)
+
+[**Carregadores de Dados**](./modules/high_level/data_loader.md):
+Um conector de dados (ou seja, `Reader`) ingere dados de diferentes fontes e formatos de dados em uma representação simples de `Documento` (texto e metadados simples).
+
+[**Documentos / Nós**](./modules/high_level/documents_and_nodes.md): Um `Documento` é um contêiner genérico para qualquer fonte de dados - por exemplo, um PDF, uma saída de API ou dados recuperados de um banco de dados. Um `Nó` é a unidade atômica de dados no LlamaIndex e representa um "pedaço" de um `Documento` de origem. É uma representação rica que inclui metadados e relacionamentos (com outros nós) para permitir operações de recuperação precisas e expressivas.
+
+[**Índices de Dados**](./modules/high_level/data_index.md):
+Depois de ingerir seus dados, o LlamaIndex ajuda você a indexar os dados em um formato fácil de recuperar.
+
+Por baixo dos panos, o LlamaIndex analisa os documentos brutos em representações intermediárias, calcula incorporações vetoriais e armazena seus dados na memória ou em disco.
+
+### Etapa de Consulta
+
+Na etapa de consulta, a sequência de consulta recupera o contexto mais relevante dado uma consulta do usuário,
+e passa isso para o LLM (juntamente com a consulta) para sintetizar uma resposta.
+
+Isso fornece ao LLM um conhecimento atualizado que não está em seus dados de treinamento originais,
+(também reduzindo a alucinação).
+
+O desafio chave na etapa de consulta é a recuperação, orquestração e raciocínio sobre bases de conhecimento (potencialmente muitas).
+
+LlamaIndex fornece módulos componíveis que ajudam você a construir e integrar sequências de consulta RAG para Q&A (motor de consulta), chatbot (motor de chat) ou como parte de um agente.
+
+Esses blocos de construção podem ser personalizados para refletir preferências de classificação, bem como compostos para raciocinar sobre várias bases de conhecimento de maneira estruturada.
+
+![](./_static/concepts/querying.jpg)
+
+#### Blocos de Construção
+
+[**Recuperadores**](./modules/low_level/retriever.md):
+Um recuperador define como recuperar eficientemente o contexto relevante de uma base de conhecimento (ou seja, índice) quando fornecida uma consulta.
+A lógica específica de recuperação difere para diferentes índices, sendo a mais popular a recuperação densa em relação a um índice vetorial.
+
+[**Sintetizadores de Resposta**](./modules/low_level/response_synthesizer.md):
+Um sintetizador de resposta gera uma resposta a partir de um LLM, usando uma consulta do usuário e um conjunto dado de trechos de texto recuperados.
+
+"
+
+#### Sequências de Consulta
+
+[**Motores de Consulta**](./modules/high_level/query_engine.md):
+Um motor de consulta é uma sequência de ponta a ponta que permite fazer perguntas sobre seus dados.
+Ele recebe uma consulta em linguagem natural e retorna uma resposta, juntamente com o contexto de referência recuperado e passado para o LLM.
+
+[**Motores de Chat**](./modules/high_level/chat_engine.md):
+Um motor de chat é uma sequência de ponta a ponta para ter uma conversa com seus dados
+(múltiplas idas e vindas em vez de uma única pergunta e resposta).
+
+"
diff --git a/apps/docs/i18n/pt/docusaurus-plugin-content-docs/current/end_to_end.md b/apps/docs/i18n/pt/docusaurus-plugin-content-docs/current/end_to_end.md
new file mode 100644
index 0000000000000000000000000000000000000000..98ae4b12702f9f8cf75c3bf73e0553364fa8f0dd
--- /dev/null
+++ b/apps/docs/i18n/pt/docusaurus-plugin-content-docs/current/end_to_end.md
@@ -0,0 +1,53 @@
+---
+sidebar_position: 4
+---
+
+# Exemplos de Ponta a Ponta
+
+`Esta documentação foi traduzida automaticamente e pode conter erros. Não hesite em abrir um Pull Request para sugerir alterações.`
+
+Incluímos vários exemplos de ponta a ponta usando o LlamaIndex.TS no repositório.
+
+Confira os exemplos abaixo ou experimente-os e complete-os em minutos com tutoriais interativos do Github Codespace fornecidos pelo Dev-Docs [aqui](https://codespaces.new/team-dev-docs/lits-dev-docs-playground?devcontainer_path=.devcontainer%2Fjavascript_ltsquickstart%2Fdevcontainer.json):
+
+## [Chat Engine (Motor de Chat)](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/chatEngine.ts)
+
+Leia um arquivo e converse sobre ele com o LLM.
+
+## [Índice Vetorial](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/vectorIndex.ts)
+
+Crie um índice vetorial e faça consultas nele. O índice vetorial usará embeddings para buscar os nós mais relevantes do top k. Por padrão, o top k é 2.
+
+## [Índice de Resumo](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/summaryIndex.ts)
+
+Crie um índice de lista e consulte-o. Este exemplo também usa o `LLMRetriever`, que usará o LLM para selecionar os melhores nós a serem usados ao gerar uma resposta.
+
+## [Salvar / Carregar um Índice](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/storageContext.ts)
+
+Crie e carregue um índice de vetor. A persistência no disco no LlamaIndex.TS acontece automaticamente assim que um objeto de contexto de armazenamento é criado.
+
+"
+
+## [Índice Vetorial Personalizado](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/vectorIndexCustomize.ts)
+
+Crie um índice vetorial e consulte-o, ao mesmo tempo em que configura o `LLM`, o `ServiceContext` e o `similarity_top_k`.
+
+## [OpenAI LLM](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/openai.ts)
+
+Crie um OpenAI LLM e use-o diretamente para conversar.
+
+## [Llama2 DeuceLLM](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/llamadeuce.ts)
+
+Crie um Llama-2 LLM e use-o diretamente para bate-papo.
+
+"
+
+## [SubQuestionQueryEngine](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/subquestion.ts)
+
+Utiliza o `SubQuestionQueryEngine`, que divide consultas complexas em várias perguntas e, em seguida, agrega uma resposta com base nas respostas de todas as subperguntas.
+
+"
+
+## [Módulos de Baixo Nível](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/lowlevel.ts)
+
+Este exemplo utiliza vários componentes de baixo nível, o que elimina a necessidade de um mecanismo de consulta real. Esses componentes podem ser usados em qualquer lugar, em qualquer aplicativo, ou personalizados e sub-classificados para atender às suas próprias necessidades.
diff --git a/apps/docs/i18n/pt/docusaurus-plugin-content-docs/current/environments.md b/apps/docs/i18n/pt/docusaurus-plugin-content-docs/current/environments.md
new file mode 100644
index 0000000000000000000000000000000000000000..faeb70787d3a865fb1dc67e9ebb91167dccb5297
--- /dev/null
+++ b/apps/docs/i18n/pt/docusaurus-plugin-content-docs/current/environments.md
@@ -0,0 +1,31 @@
+---
+sidebar_position: 5
+---
+
+# Ambientes
+
+`Esta documentação foi traduzida automaticamente e pode conter erros. Não hesite em abrir um Pull Request para sugerir alterações.`
+
+O LlamaIndex atualmente suporta oficialmente o NodeJS 18 e o NodeJS 20.
+
+## Roteador de Aplicativos NextJS
+
+Se você estiver usando os manipuladores de rota/funções serverless do Roteador de Aplicativos NextJS, você precisará usar o modo NodeJS:
+
+```js
+export const runtime = "nodejs"; // padrão
+```
+
+e você precisará adicionar uma exceção para o pdf-parse no seu next.config.js
+
+```js
+// next.config.js
+/** @type {import('next').NextConfig} */
+const nextConfig = {
+  experimental: {
+    serverComponentsExternalPackages: ["pdf-parse"], // Coloca o pdf-parse no modo NodeJS real com o Roteador de Aplicativos NextJS
+  },
+};
+
+module.exports = nextConfig;
+```
diff --git a/apps/docs/i18n/pt/docusaurus-plugin-content-docs/current/installation.mdx b/apps/docs/i18n/pt/docusaurus-plugin-content-docs/current/installation.mdx
new file mode 100644
index 0000000000000000000000000000000000000000..b7e961f95e214fa3130968d366f3e2b195dd86a1
--- /dev/null
+++ b/apps/docs/i18n/pt/docusaurus-plugin-content-docs/current/installation.mdx
@@ -0,0 +1,68 @@
+---
+sidebar_position: 1
+---
+
+
+# Instalação e Configuração
+
+```Esta documentação foi traduzida automaticamente e pode conter erros. Não hesite em abrir um Pull Request para sugerir alterações.```
+
+
+Certifique-se de ter o NodeJS v18 ou superior.
+
+
+## Usando create-llama
+
+A maneira mais fácil de começar com o LlamaIndex é usando o `create-llama`. Essa ferramenta de linha de comando permite que você comece rapidamente a construir um novo aplicativo LlamaIndex, com tudo configurado para você.
+
+Basta executar
+
+<Tabs>
+<TabItem value="1" label="npm" default>
+
+```bash
+npx create-llama@latest
+```
+
+</TabItem>
+<TabItem value="2" label="Yarn">
+
+```bash
+yarn create llama
+```
+
+</TabItem>
+<TabItem value="3" label="pnpm">
+
+```bash
+pnpm create llama@latest
+```
+
+</TabItem>
+</Tabs>
+
+para começar. Depois que o seu aplicativo for gerado, execute
+
+```bash npm2yarn
+npm run dev
+```
+
+para iniciar o servidor de desenvolvimento. Você pode então visitar [http://localhost:3000](http://localhost:3000) para ver o seu aplicativo.
+## Instalação via NPM
+
+```bash npm2yarn
+npm install llamaindex
+```
+
+
+### Variáveis de ambiente
+
+Nossos exemplos usam o OpenAI por padrão. Você precisará configurar sua chave do Open AI da seguinte forma:
+
+```bash
+export OPENAI_API_KEY="sk-......" # Substitua pela sua chave obtida em https://platform.openai.com/account/api-keys
+```
+
+Se você deseja carregá-la automaticamente sempre que iniciar, adicione-a ao seu .zshrc/.bashrc.
+
+ATENÇÃO: não inclua sua chave do OpenAI no controle de versão.
diff --git a/apps/docs/i18n/pt/docusaurus-plugin-content-docs/current/introduction.md b/apps/docs/i18n/pt/docusaurus-plugin-content-docs/current/introduction.md
new file mode 100644
index 0000000000000000000000000000000000000000..b9ccfdd992c715b50c5c6e5def79b4f8d6e7a7e1
--- /dev/null
+++ b/apps/docs/i18n/pt/docusaurus-plugin-content-docs/current/introduction.md
@@ -0,0 +1,60 @@
+---
+sidebar_position: 0
+slug: /
+---
+
+# O que é o LlamaIndex.TS?
+
+`Esta documentação foi traduzida automaticamente e pode conter erros. Não hesite em abrir um Pull Request para sugerir alterações.`
+
+O LlamaIndex.TS é um framework de dados para aplicativos LLM para ingestão, estruturação e acesso a dados privados ou específicos de domínio. Embora um pacote Python também esteja disponível (veja [aqui](https://docs.llamaindex.ai/en/stable/)), o LlamaIndex.TS oferece recursos principais em um pacote simples, otimizado para uso com TypeScript.
+
+## 🚀 Por que usar o LlamaIndex.TS?
+
+No seu cerne, os LLMs oferecem uma interface de linguagem natural entre humanos e dados inferidos. Modelos amplamente disponíveis são pré-treinados em grandes quantidades de dados publicamente disponíveis, desde a Wikipedia e listas de discussão até livros didáticos e código-fonte.
+
+Aplicativos construídos em cima dos LLMs frequentemente exigem aprimorar esses modelos com dados privados ou específicos de domínio. Infelizmente, esses dados podem estar distribuídos em aplicativos e bancos de dados isolados. Eles podem estar por trás de APIs, em bancos de dados SQL ou presos em PDFs e apresentações.
+
+É aí que entra o **LlamaIndex.TS**.
+
+## 🦙 Como o LlamaIndex.TS pode ajudar?
+
+O LlamaIndex.TS oferece as seguintes ferramentas:
+
+- **Carregamento de dados** permite a ingestão direta de seus dados existentes em formatos `.txt`, `.pdf`, `.csv`, `.md` e `.docx`.
+- **Índices de dados** estruturam seus dados em representações intermediárias que são fáceis e eficientes para os LLMs consumirem.
+- **Engines** fornecem acesso em linguagem natural aos seus dados. Por exemplo:
+  - Os motores de consulta são interfaces poderosas de recuperação para saída com conhecimento aprimorado.
+  - Os motores de chat são interfaces conversacionais para interações de "ida e volta" com seus dados.
+
+## 👨‍👩‍👧‍👦 Para quem é o LlamaIndex?
+
+O LlamaIndex.TS fornece um conjunto de ferramentas essenciais para qualquer pessoa que esteja construindo aplicativos LLM com JavaScript e TypeScript.
+
+Nossa API de alto nível permite que usuários iniciantes usem o LlamaIndex.TS para ingestão e consulta de seus dados.
+
+Para aplicativos mais complexos, nossas APIs de nível inferior permitem que usuários avançados personalizem e estendam qualquer módulo - conectores de dados, índices, recuperadores e mecanismos de consulta - para atender às suas necessidades.
+
+## Primeiros Passos
+
+`npm install llamaindex`
+
+Nossa documentação inclui [Instruções de Instalação](./installation.md) e um [Tutorial Inicial](./starter.md) para construir seu primeiro aplicativo.
+
+Depois de estar pronto para começar, [Conceitos de Alto Nível](./concepts.md) oferece uma visão geral da arquitetura modular do LlamaIndex. Para exemplos práticos mais detalhados, consulte nossos [Tutoriais de Ponta a Ponta](./end_to_end.md).
+
+## 🗺️ Ecossistema
+
+Para baixar ou contribuir, encontre o LlamaIndex em:
+
+- Github: https://github.com/run-llama/LlamaIndexTS
+- NPM: https://www.npmjs.com/package/llamaindex
+
+"
+
+## Comunidade
+
+Precisa de ajuda? Tem uma sugestão de recurso? Junte-se à comunidade LlamaIndex:
+
+- Twitter: https://twitter.com/llama_index
+- Discord: https://discord.gg/dGcwcsnxhU
diff --git a/apps/docs/i18n/pt/docusaurus-plugin-content-docs/current/modules/high_level/chat_engine.md b/apps/docs/i18n/pt/docusaurus-plugin-content-docs/current/modules/high_level/chat_engine.md
new file mode 100644
index 0000000000000000000000000000000000000000..67d234ac43ac6562e93e473e23a07bc122dc14e8
--- /dev/null
+++ b/apps/docs/i18n/pt/docusaurus-plugin-content-docs/current/modules/high_level/chat_engine.md
@@ -0,0 +1,22 @@
+---
+sidebar_position: 4
+---
+
+# ChatEngine (Motor de Chat)
+
+`Esta documentação foi traduzida automaticamente e pode conter erros. Não hesite em abrir um Pull Request para sugerir alterações.`
+
+O motor de chat é uma maneira rápida e simples de conversar com os dados em seu índice.
+
+```typescript
+const retriever = index.asRetriever();
+const chatEngine = new ContextChatEngine({ retriever });
+
+// iniciar conversa
+const response = await chatEngine.chat(query);
+```
+
+## Referências de API
+
+- [ContextChatEngine (Motor de Chat de Contexto)](../../api/classes/ContextChatEngine.md)
+- [CondenseQuestionChatEngine (Motor de Chat de Perguntas Condensadas)](../../api/classes/ContextChatEngine.md)
diff --git a/apps/docs/i18n/pt/docusaurus-plugin-content-docs/current/modules/high_level/data_index.md b/apps/docs/i18n/pt/docusaurus-plugin-content-docs/current/modules/high_level/data_index.md
new file mode 100644
index 0000000000000000000000000000000000000000..d97d2655615b3b5a227c289597ee928f6e3de58f
--- /dev/null
+++ b/apps/docs/i18n/pt/docusaurus-plugin-content-docs/current/modules/high_level/data_index.md
@@ -0,0 +1,25 @@
+---
+sidebar_position: 2
+---
+
+# Índice
+
+`Esta documentação foi traduzida automaticamente e pode conter erros. Não hesite em abrir um Pull Request para sugerir alterações.`
+
+Um índice é o recipiente básico e a organização para seus dados. O LlamaIndex.TS suporta dois tipos de índices:
+
+- `VectorStoreIndex` - enviará os principais `Node`s para o LLM ao gerar uma resposta. O valor padrão para os principais é 2.
+- `SummaryIndex` - enviará todos os `Node`s no índice para o LLM a fim de gerar uma resposta.
+
+```typescript
+import { Document, VectorStoreIndex } from "llamaindex";
+
+const document = new Document({ text: "teste" });
+
+const index = await VectorStoreIndex.fromDocuments([document]);
+```
+
+## Referência da API
+
+- [SummaryIndex](../../api/classes/SummaryIndex.md)
+- [VectorStoreIndex](../../api/classes/VectorStoreIndex.md)
diff --git a/apps/docs/i18n/pt/docusaurus-plugin-content-docs/current/modules/high_level/data_loader.md b/apps/docs/i18n/pt/docusaurus-plugin-content-docs/current/modules/high_level/data_loader.md
new file mode 100644
index 0000000000000000000000000000000000000000..8453283a3ca64e887e985ef3dbfb48bbf9da93fb
--- /dev/null
+++ b/apps/docs/i18n/pt/docusaurus-plugin-content-docs/current/modules/high_level/data_loader.md
@@ -0,0 +1,19 @@
+---
+sidebar_position: 1
+---
+
+# Leitor / Carregador
+
+`Esta documentação foi traduzida automaticamente e pode conter erros. Não hesite em abrir um Pull Request para sugerir alterações.`
+
+O LlamaIndex.TS suporta o carregamento fácil de arquivos de pastas usando a classe `SimpleDirectoryReader`. Atualmente, os arquivos `.txt`, `.pdf`, `.csv`, `.md` e `.docx` são suportados, com mais opções planejadas para o futuro!
+
+```typescript
+import { SimpleDirectoryReader } from "llamaindex";
+
+documents = new SimpleDirectoryReader().loadData("./data");
+```
+
+## Referência da API
+
+- [SimpleDirectoryReader](../../api/classes/SimpleDirectoryReader.md)
diff --git a/apps/docs/i18n/pt/docusaurus-plugin-content-docs/current/modules/high_level/documents_and_nodes.md b/apps/docs/i18n/pt/docusaurus-plugin-content-docs/current/modules/high_level/documents_and_nodes.md
new file mode 100644
index 0000000000000000000000000000000000000000..248a28b1f053b550de46bf0a4896e8def399bfd3
--- /dev/null
+++ b/apps/docs/i18n/pt/docusaurus-plugin-content-docs/current/modules/high_level/documents_and_nodes.md
@@ -0,0 +1,22 @@
+---
+sidebar_position: 0
+---
+
+# Documentos e Nós
+
+`Esta documentação foi traduzida automaticamente e pode conter erros. Não hesite em abrir um Pull Request para sugerir alterações.`
+
+`Documentos` e `Nós` são os blocos de construção básicos de qualquer índice. Embora a API para esses objetos seja semelhante, os objetos `Documentos` representam arquivos inteiros, enquanto os `Nós` são partes menores desse documento original, adequados para um LLM e Q&A.
+
+```typescript
+import { Documento } from "llamaindex";
+
+documento = new Documento({ texto: "texto", metadados: { chave: "val" } });
+```
+
+## Referência da API
+
+- [Documento](../../api/classes/Documento.md)
+- [Nó de Texto](../../api/classes/NoDeTexto.md)
+
+"
diff --git a/apps/docs/i18n/pt/docusaurus-plugin-content-docs/current/modules/high_level/query_engine.md b/apps/docs/i18n/pt/docusaurus-plugin-content-docs/current/modules/high_level/query_engine.md
new file mode 100644
index 0000000000000000000000000000000000000000..5ecf01f9f5ada3af9fbc36b80a3e89b21d234e4b
--- /dev/null
+++ b/apps/docs/i18n/pt/docusaurus-plugin-content-docs/current/modules/high_level/query_engine.md
@@ -0,0 +1,40 @@
+---
+sidebar_position: 3
+---
+
+# QueryEngine (Motor de Consulta)
+
+`Esta documentação foi traduzida automaticamente e pode conter erros. Não hesite em abrir um Pull Request para sugerir alterações.`
+
+Um motor de consulta envolve um `Retriever` e um `ResponseSynthesizer` em um pipeline, que usará a string de consulta para buscar nós e, em seguida, enviá-los para o LLM para gerar uma resposta.
+
+```typescript
+const queryEngine = index.asQueryEngine();
+const response = await queryEngine.query("string de consulta");
+```
+
+## Motor de Consulta de Subperguntas
+
+O conceito básico do Motor de Consulta de Subperguntas é dividir uma única consulta em várias consultas, obter uma resposta para cada uma dessas consultas e, em seguida, combinar essas respostas diferentes em uma única resposta coerente para o usuário. Você pode pensar nisso como a técnica de "pensar passo a passo" mas iterando sobre suas fontes de dados!
+
+### Começando
+
+A maneira mais fácil de começar a experimentar o Motor de Consulta de Subperguntas é executar o arquivo subquestion.ts em [exemplos](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/subquestion.ts).
+
+```bash
+npx ts-node subquestion.ts
+```
+
+### Ferramentas
+
+O SubQuestionQueryEngine é implementado com Ferramentas. A ideia básica das Ferramentas é que elas são opções executáveis para o grande modelo de linguagem. Neste caso, nosso SubQuestionQueryEngine depende do QueryEngineTool, que, como você pode imaginar, é uma ferramenta para executar consultas em um QueryEngine. Isso nos permite dar ao modelo a opção de consultar diferentes documentos para diferentes perguntas, por exemplo. Você também pode imaginar que o SubQuestionQueryEngine poderia usar uma Ferramenta que busca algo na web ou obtém uma resposta usando o Wolfram Alpha.
+
+Você pode aprender mais sobre as Ferramentas dando uma olhada na documentação do LlamaIndex Python em https://gpt-index.readthedocs.io/en/latest/core_modules/agent_modules/tools/root.html
+
+"
+
+## Referência da API
+
+- [RetrieverQueryEngine (Motor de Consulta do Retriever)](../../api/classes/RetrieverQueryEngine.md)
+- [SubQuestionQueryEngine (Motor de Consulta de Subpergunta)](../../api/classes/SubQuestionQueryEngine.md)
+- [QueryEngineTool (Ferramenta do Motor de Consulta)](../../api/interfaces/QueryEngineTool.md)
diff --git a/apps/docs/i18n/pt/docusaurus-plugin-content-docs/current/modules/index.md b/apps/docs/i18n/pt/docusaurus-plugin-content-docs/current/modules/index.md
new file mode 100644
index 0000000000000000000000000000000000000000..bca94ad78a8bc5bc8b563c09337507429a3f2123
--- /dev/null
+++ b/apps/docs/i18n/pt/docusaurus-plugin-content-docs/current/modules/index.md
@@ -0,0 +1,33 @@
+# Módulos Principais
+
+`Esta documentação foi traduzida automaticamente e pode conter erros. Não hesite em abrir um Pull Request para sugerir alterações.`
+
+O LlamaIndex.TS oferece vários módulos principais, separados em módulos de alto nível para começar rapidamente e módulos de baixo nível para personalizar os principais componentes conforme necessário.
+
+## Módulos de Alto Nível
+
+- [**Documento**](./high_level/documents_and_nodes.md): Um documento representa um arquivo de texto, arquivo PDF ou outro pedaço contíguo de dados.
+
+- [**Nó**](./high_level/documents_and_nodes.md): O bloco básico de construção de dados. Mais comumente, esses são partes do documento divididas em pedaços gerenciáveis que são pequenos o suficiente para serem alimentados em um modelo de incorporação e LLM.
+
+- [**Leitor/Carregador**](./high_level/data_loader.md): Um leitor ou carregador é algo que recebe um documento do mundo real e o transforma em uma classe Document que pode ser usada em seu Índice e consultas. Atualmente, oferecemos suporte a arquivos de texto simples e PDFs, com muitos outros a serem adicionados.
+
+- [**Índices**](./high_level/data_index.md): os índices armazenam os Nós e as incorporações desses nós.
+
+- [**Motor de Consulta**](./high_level/query_engine.md): Os motores de consulta são responsáveis por gerar a consulta que você insere e fornecer o resultado. Os motores de consulta geralmente combinam um prompt pré-construído com nós selecionados do seu Índice para fornecer ao LLM o contexto necessário para responder à sua consulta.
+
+- [**Motor de Chat**](./high_level/chat_engine.md): Um Motor de Chat ajuda você a construir um chatbot que interage com seus Índices.
+
+## Módulo de Baixo Nível
+
+- [**LLM**](./low_level/llm.md): A classe LLM é uma interface unificada sobre um grande provedor de modelos de linguagem, como OpenAI GPT-4, Anthropic Claude ou Meta LLaMA. Você pode criar uma subclasse dela para escrever um conector para seu próprio modelo de linguagem.
+
+- [**Embedding**](./low_level/embedding.md): Uma incorporação é representada como um vetor de números de ponto flutuante. O modelo de incorporação de texto-embedding-ada-002 da OpenAI é nosso modelo de incorporação padrão e cada incorporação que ele gera consiste em 1.536 números de ponto flutuante. Outro modelo de incorporação popular é o BERT, que usa 768 números de ponto flutuante para representar cada nó. Fornecemos várias utilidades para trabalhar com incorporações, incluindo 3 opções de cálculo de similaridade e Maximum Marginal Relevance.
+
+- [**TextSplitter/NodeParser**](./low_level/node_parser.md): As estratégias de divisão de texto são incrivelmente importantes para a eficácia geral da pesquisa de incorporação. Atualmente, embora tenhamos um padrão, não há uma solução única para todos os casos. Dependendo dos documentos de origem, você pode querer usar tamanhos e estratégias de divisão diferentes. Atualmente, oferecemos suporte à divisão por tamanho fixo, divisão por tamanho fixo com seções sobrepostas, divisão por sentença e divisão por parágrafo. O divisor de texto é usado pelo NodeParser ao dividir `Documentos` em `Nós`.
+
+- [**Retriever**](./low_level/retriever.md): O Retriever é o responsável por escolher os Nós a serem recuperados do índice. Aqui, você pode querer tentar recuperar mais ou menos Nós por consulta, alterar sua função de similaridade ou criar seu próprio recuperador para cada caso de uso individual em seu aplicativo. Por exemplo, você pode querer ter um recuperador separado para conteúdo de código versus conteúdo de texto.
+
+- [**ResponseSynthesizer**](./low_level/response_synthesizer.md): O ResponseSynthesizer é responsável por receber uma sequência de consulta e usar uma lista de `Nós` para gerar uma resposta. Isso pode assumir várias formas, como iterar sobre todo o contexto e refinar uma resposta ou construir uma árvore de resumos e retornar o resumo principal.
+
+- [**Storage**](./low_level/storage.md): Em algum momento, você vai querer armazenar seus índices, dados e vetores em vez de executar os modelos de incorporação toda vez. IndexStore, DocStore, VectorStore e KVStore são abstrações que permitem fazer isso. Juntos, eles formam o StorageContext. Atualmente, permitimos que você persista suas incorporações em arquivos no sistema de arquivos (ou em um sistema de arquivos virtual na memória), mas também estamos adicionando ativamente integrações com Bancos de Dados de Vetores.
diff --git a/apps/docs/i18n/pt/docusaurus-plugin-content-docs/current/modules/low_level/embedding.md b/apps/docs/i18n/pt/docusaurus-plugin-content-docs/current/modules/low_level/embedding.md
new file mode 100644
index 0000000000000000000000000000000000000000..736aaaf8822892850b4ef5cc9e2c96d847bc27ce
--- /dev/null
+++ b/apps/docs/i18n/pt/docusaurus-plugin-content-docs/current/modules/low_level/embedding.md
@@ -0,0 +1,26 @@
+---
+sidebar_position: 1
+---
+
+# Incorporação
+
+`Esta documentação foi traduzida automaticamente e pode conter erros. Não hesite em abrir um Pull Request para sugerir alterações.`
+
+O modelo de incorporação no LlamaIndex é responsável por criar representações numéricas de texto. Por padrão, o LlamaIndex usará o modelo `text-embedding-ada-002` da OpenAI.
+
+Isso pode ser definido explicitamente no objeto `ServiceContext`.
+
+```typescript
+import { OpenAIEmbedding, serviceContextFromDefaults } from "llamaindex";
+
+const openaiEmbeds = new OpenAIEmbedding();
+
+const serviceContext = serviceContextFromDefaults({ embedModel: openaiEmbeds });
+```
+
+## Referência da API
+
+- [OpenAIEmbedding](../../api/classes/OpenAIEmbedding.md)
+- [ServiceContext](../../api/interfaces/ServiceContext.md)
+
+"
diff --git a/apps/docs/i18n/pt/docusaurus-plugin-content-docs/current/modules/low_level/llm.md b/apps/docs/i18n/pt/docusaurus-plugin-content-docs/current/modules/low_level/llm.md
new file mode 100644
index 0000000000000000000000000000000000000000..3ca00e1ed6ca12b226f4e6029c12d880e055db03
--- /dev/null
+++ b/apps/docs/i18n/pt/docusaurus-plugin-content-docs/current/modules/low_level/llm.md
@@ -0,0 +1,26 @@
+---
+sidebar_position: 0
+---
+
+# LLM
+
+`Esta documentação foi traduzida automaticamente e pode conter erros. Não hesite em abrir um Pull Request para sugerir alterações.`
+
+O LLM é responsável por ler texto e gerar respostas em linguagem natural para consultas. Por padrão, o LlamaIndex.TS usa `gpt-3.5-turbo`.
+
+O LLM pode ser definido explicitamente no objeto `ServiceContext`.
+
+```typescript
+import { OpenAI, serviceContextFromDefaults } from "llamaindex";
+
+const openaiLLM = new OpenAI({ model: "gpt-3.5-turbo", temperature: 0 });
+
+const serviceContext = serviceContextFromDefaults({ llm: openaiLLM });
+```
+
+## Referência da API
+
+- [OpenAI](../../api/classes/OpenAI.md)
+- [ServiceContext](../../api/interfaces/ServiceContext.md)
+
+"
diff --git a/apps/docs/i18n/pt/docusaurus-plugin-content-docs/current/modules/low_level/node_parser.md b/apps/docs/i18n/pt/docusaurus-plugin-content-docs/current/modules/low_level/node_parser.md
new file mode 100644
index 0000000000000000000000000000000000000000..7996131ca786977c3479b1d3a4bce9878da1b515
--- /dev/null
+++ b/apps/docs/i18n/pt/docusaurus-plugin-content-docs/current/modules/low_level/node_parser.md
@@ -0,0 +1,37 @@
+---
+sidebar_position: 3
+---
+
+# NodeParser
+
+`Esta documentação foi traduzida automaticamente e pode conter erros. Não hesite em abrir um Pull Request para sugerir alterações.`
+
+O `NodeParser` no LlamaIndex é responsável por dividir objetos `Document` em objetos `Node` mais gerenciáveis. Quando você chama `.fromDocuments()`, o `NodeParser` do `ServiceContext` é usado para fazer isso automaticamente para você. Alternativamente, você pode usá-lo para dividir documentos antecipadamente.
+
+```typescript
+import { Document, SimpleNodeParser } from "llamaindex";
+
+const nodeParser = new SimpleNodeParser();
+const nodes = nodeParser.getNodesFromDocuments([
+  new Document({ text: "Eu tenho 10 anos. John tem 20 anos." }),
+]);
+```
+
+## TextSplitter
+
+O divisor de texto subjacente dividirá o texto em frases. Ele também pode ser usado como um módulo independente para dividir texto bruto.
+
+```typescript
+import { SentenceSplitter } from "llamaindex";
+
+const splitter = new SentenceSplitter({ chunkSize: 1 });
+
+const textSplits = splitter.splitText("Olá Mundo");
+```
+
+## Referência da API
+
+- [SimpleNodeParser](../../api/classes/SimpleNodeParser.md)
+- [SentenceSplitter](../../api/classes/SentenceSplitter.md)
+
+"
diff --git a/apps/docs/i18n/pt/docusaurus-plugin-content-docs/current/modules/low_level/response_synthesizer.md b/apps/docs/i18n/pt/docusaurus-plugin-content-docs/current/modules/low_level/response_synthesizer.md
new file mode 100644
index 0000000000000000000000000000000000000000..0012d31ffd5b9153d3ef9bd974182d0db1d108ca
--- /dev/null
+++ b/apps/docs/i18n/pt/docusaurus-plugin-content-docs/current/modules/low_level/response_synthesizer.md
@@ -0,0 +1,45 @@
+---
+sidebar_position: 6
+---
+
+# ResponseSynthesizer
+
+`Esta documentação foi traduzida automaticamente e pode conter erros. Não hesite em abrir um Pull Request para sugerir alterações.`
+
+O ResponseSynthesizer é responsável por enviar a consulta, os nós e os modelos de prompt para o LLM (Language Model) a fim de gerar uma resposta. Existem alguns modos principais para gerar uma resposta:
+
+- `Refine`: "criar e refinar" uma resposta percorrendo sequencialmente cada trecho de texto recuperado. Isso faz uma chamada separada para o LLM por nó. Bom para respostas mais detalhadas.
+- `CompactAndRefine` (padrão): "compactar" o prompt durante cada chamada ao LLM, inserindo o máximo de trechos de texto que couberem no tamanho máximo do prompt. Se houver muitos trechos para caber em um único prompt, "criar e refinar" uma resposta percorrendo vários prompts compactos. O mesmo que `refine`, mas deve resultar em menos chamadas ao LLM.
+- `TreeSummarize`: Dado um conjunto de trechos de texto e a consulta, construir recursivamente uma árvore e retornar o nó raiz como resposta. Bom para fins de sumarização.
+- `SimpleResponseBuilder`: Dado um conjunto de trechos de texto e a consulta, aplicar a consulta a cada trecho de texto enquanto acumula as respostas em uma matriz. Retorna uma string concatenada de todas as respostas. Bom quando você precisa executar a mesma consulta separadamente para cada trecho de texto.
+
+```typescript
+import { NodeWithScore, ResponseSynthesizer, TextNode } from "llamaindex";
+
+const responseSynthesizer = new ResponseSynthesizer();
+
+const nodesWithScore: NodeWithScore[] = [
+  {
+    node: new TextNode({ text: "Eu tenho 10 anos." }),
+    score: 1,
+  },
+  {
+    node: new TextNode({ text: "John tem 20 anos." }),
+    score: 0.5,
+  },
+];
+
+const response = await responseSynthesizer.synthesize(
+  "Qual é a minha idade?",
+  nodesWithScore,
+);
+console.log(response.response);
+```
+
+## Referência da API
+
+- [ResponseSynthesizer](../../api/classes/ResponseSynthesizer.md)
+- [Refine](../../api/classes/Refine.md)
+- [CompactAndRefine](../../api/classes/CompactAndRefine.md)
+- [TreeSummarize](../../api/classes/TreeSummarize.md)
+- [SimpleResponseBuilder](../../api/classes/SimpleResponseBuilder.md)
diff --git a/apps/docs/i18n/pt/docusaurus-plugin-content-docs/current/modules/low_level/retriever.md b/apps/docs/i18n/pt/docusaurus-plugin-content-docs/current/modules/low_level/retriever.md
new file mode 100644
index 0000000000000000000000000000000000000000..8745376508370cbc19eafe66d9ebfcfc28c12288
--- /dev/null
+++ b/apps/docs/i18n/pt/docusaurus-plugin-content-docs/current/modules/low_level/retriever.md
@@ -0,0 +1,23 @@
+---
+sidebar_position: 5
+---
+
+# Retriever (Recuperador)
+
+`Esta documentação foi traduzida automaticamente e pode conter erros. Não hesite em abrir um Pull Request para sugerir alterações.`
+
+Um recuperador no LlamaIndex é o que é usado para buscar `Node`s de um índice usando uma string de consulta. Um `VectorIndexRetriever` buscará os nós mais similares ao top-k. Enquanto isso, um `SummaryIndexRetriever` buscará todos os nós, independentemente da consulta.
+
+```typescript
+const recuperador = vector_index.asRetriever();
+recuperador.similarityTopK = 3;
+
+// Buscar nós!
+const nósComPontuação = await recuperador.retrieve("string de consulta");
+```
+
+## Referência da API
+
+- [SummaryIndexRetriever (Recuperador de Índice de Resumo)](../../api/classes/SummaryIndexRetriever.md)
+- [SummaryIndexLLMRetriever (Recuperador de Índice de Resumo LLM)](../../api/classes/SummaryIndexLLMRetriever.md)
+- [VectorIndexRetriever (Recuperador de Índice Vetorial)](../../api/classes/VectorIndexRetriever.md)
diff --git a/apps/docs/i18n/pt/docusaurus-plugin-content-docs/current/modules/low_level/storage.md b/apps/docs/i18n/pt/docusaurus-plugin-content-docs/current/modules/low_level/storage.md
new file mode 100644
index 0000000000000000000000000000000000000000..165f15d61c2f39bce19fe65a4f08429395e35d1b
--- /dev/null
+++ b/apps/docs/i18n/pt/docusaurus-plugin-content-docs/current/modules/low_level/storage.md
@@ -0,0 +1,30 @@
+---
+sidebar_position: 7
+---
+
+# Armazenamento
+
+`Esta documentação foi traduzida automaticamente e pode conter erros. Não hesite em abrir um Pull Request para sugerir alterações.`
+
+O armazenamento no LlamaIndex.TS funciona automaticamente assim que você configurar um objeto `StorageContext`. Basta configurar o `persistDir` e anexá-lo a um índice.
+
+No momento, apenas o salvamento e o carregamento do disco são suportados, com integrações futuras planejadas!
+
+```typescript
+import { Document, VectorStoreIndex, storageContextFromDefaults } from "./src";
+
+const storageContext = await storageContextFromDefaults({
+  persistDir: "./storage",
+});
+
+const document = new Document({ text: "Texto de Teste" });
+const index = await VectorStoreIndex.fromDocuments([document], {
+  storageContext,
+});
+```
+
+## Referência da API
+
+- [StorageContext](../../api/interfaces/StorageContext.md)
+
+"
diff --git a/apps/docs/i18n/pt/docusaurus-plugin-content-docs/current/starter.md b/apps/docs/i18n/pt/docusaurus-plugin-content-docs/current/starter.md
new file mode 100644
index 0000000000000000000000000000000000000000..763aeb4f5e2232d0e25bbefa944ac385d09735f2
--- /dev/null
+++ b/apps/docs/i18n/pt/docusaurus-plugin-content-docs/current/starter.md
@@ -0,0 +1,60 @@
+---
+sidebar_position: 2
+---
+
+# Tutorial Inicial
+
+`Esta documentação foi traduzida automaticamente e pode conter erros. Não hesite em abrir um Pull Request para sugerir alterações.`
+
+Depois de [instalar o LlamaIndex.TS usando o NPM](installation) e configurar sua chave do OpenAI, você está pronto para iniciar seu primeiro aplicativo:
+
+Em uma nova pasta:
+
+```bash npm2yarn
+npm install typescript
+npm install @types/node
+npx tsc --init # se necessário
+```
+
+Crie o arquivo `example.ts`. Este código irá carregar alguns dados de exemplo, criar um documento, indexá-lo (o que cria embeddings usando o OpenAI) e, em seguida, criar um mecanismo de consulta para responder perguntas sobre os dados.
+
+```ts
+// example.ts
+import fs from "fs/promises";
+import { Document, VectorStoreIndex } from "llamaindex";
+
+async function main() {
+  // Carrega o ensaio de abramov.txt no Node
+  const ensaio = await fs.readFile(
+    "node_modules/llamaindex/examples/abramov.txt",
+    "utf-8",
+  );
+
+  // Cria um objeto Document com o ensaio
+  const documento = new Document({ text: ensaio });
+
+  // Divide o texto e cria embeddings. Armazene-os em um VectorStoreIndex
+  const index = await VectorStoreIndex.fromDocuments([documento]);
+
+  // Consulta o índice
+  const mecanismoConsulta = index.asQueryEngine();
+  const resposta = await mecanismoConsulta.query(
+    "O que o autor fez na faculdade?",
+  );
+
+  // Exibe a resposta
+  console.log(resposta.toString());
+}
+
+main();
+```
+
+Em seguida, você pode executá-lo usando
+
+```bash
+npx ts-node example.ts
+```
+
+Pronto para aprender mais? Confira nosso playground NextJS em https://llama-playground.vercel.app/. O código-fonte está disponível em https://github.com/run-llama/ts-playground
+
+"
diff --git a/apps/docs/i18n/ro/docusaurus-plugin-content-docs/current/_static/concepts/indexing.jpg b/apps/docs/i18n/ro/docusaurus-plugin-content-docs/current/_static/concepts/indexing.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..8672967213caf28fb27682b6b9e2e5111beb9aaa
Binary files /dev/null and b/apps/docs/i18n/ro/docusaurus-plugin-content-docs/current/_static/concepts/indexing.jpg differ
diff --git a/apps/docs/i18n/ro/docusaurus-plugin-content-docs/current/_static/concepts/querying.jpg b/apps/docs/i18n/ro/docusaurus-plugin-content-docs/current/_static/concepts/querying.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..3c241bdda5ba98d3f3ee2e163b9f19280c5c7503
Binary files /dev/null and b/apps/docs/i18n/ro/docusaurus-plugin-content-docs/current/_static/concepts/querying.jpg differ
diff --git a/apps/docs/i18n/ro/docusaurus-plugin-content-docs/current/_static/concepts/rag.jpg b/apps/docs/i18n/ro/docusaurus-plugin-content-docs/current/_static/concepts/rag.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..b68eca2564f8535b33d9ed6d4ce4d4410d2a0698
Binary files /dev/null and b/apps/docs/i18n/ro/docusaurus-plugin-content-docs/current/_static/concepts/rag.jpg differ
diff --git a/apps/docs/i18n/ro/docusaurus-plugin-content-docs/current/api b/apps/docs/i18n/ro/docusaurus-plugin-content-docs/current/api
new file mode 120000
index 0000000000000000000000000000000000000000..3513e32979f4abb8b2df2b54c8d0cc480bdd847e
--- /dev/null
+++ b/apps/docs/i18n/ro/docusaurus-plugin-content-docs/current/api
@@ -0,0 +1 @@
+../../../../docs/api
\ No newline at end of file
diff --git a/apps/docs/i18n/ro/docusaurus-plugin-content-docs/current/concepts.md b/apps/docs/i18n/ro/docusaurus-plugin-content-docs/current/concepts.md
new file mode 100644
index 0000000000000000000000000000000000000000..da2be8ee7ef9695db97104995c913cc073ab263e
--- /dev/null
+++ b/apps/docs/i18n/ro/docusaurus-plugin-content-docs/current/concepts.md
@@ -0,0 +1,86 @@
+---
+sidebar_position: 3
+---
+
+# Concepte de nivel înalt
+
+`Această documentație a fost tradusă automat și poate conține erori. Nu ezitați să deschideți un Pull Request pentru a sugera modificări.`
+
+LlamaIndex.TS vă ajută să construiți aplicații cu motor LLM (de exemplu, Q&A, chatbot) peste date personalizate.
+
+În acest ghid de concepte de nivel înalt, veți învăța:
+
+- cum poate un LLM să răspundă la întrebări folosind propriile date.
+- concepte cheie și module în LlamaIndex.TS pentru a compune propriul pipeline de interogare.
+
+## Răspunderea la întrebări în întregul set de date
+
+LlamaIndex folosește o metodă în două etape atunci când folosește un LLM cu datele dvs.:
+
+1. **etapa de indexare**: pregătirea unei baze de cunoștințe și
+2. **etapa de interogare**: recuperarea contextului relevant din cunoștințele pentru a ajuta LLM să răspundă la o întrebare
+
+![](./_static/concepts/rag.jpg)
+
+Acest proces este cunoscut și sub numele de Generare cu Recuperare Îmbunătățită (RAG).
+
+LlamaIndex.TS oferă setul de instrumente esențiale pentru a face ambele etape extrem de ușoare.
+
+Să explorăm fiecare etapă în detaliu.
+
+### Etapa de indexare
+
+LlamaIndex.TS vă ajută să pregătiți baza de cunoștințe cu o suită de conectori de date și indexi.
+
+![](./_static/concepts/indexing.jpg)
+
+[**Încărcătoare de date**](./modules/high_level/data_loader.md):
+Un conector de date (adică `Reader`) preia date din diferite surse de date și formate de date într-o reprezentare simplă a `Documentului` (text și metadate simple).
+
+[**Documente / Noduri**](./modules/high_level/documents_and_nodes.md): Un `Document` este un container generic pentru orice sursă de date - de exemplu, un PDF, un rezultat API sau date recuperate dintr-o bază de date. Un `Nod` este unitatea atomică de date în LlamaIndex și reprezintă o "bucată" a unui `Document` sursă. Este o reprezentare bogată care include metadate și relații (cu alte noduri) pentru a permite operații precise și expresive de recuperare.
+
+[**Indexuri de date**](./modules/high_level/data_index.md):
+După ce ați preluat datele, LlamaIndex vă ajută să indexați datele într-un format ușor de recuperat.
+
+În spatele scenei, LlamaIndex analizează documentele brute în reprezentări intermediare, calculează înglobări vectoriale și stochează datele în memorie sau pe disc.
+
+"
+
+### Etapa de interogare
+
+În etapa de interogare, pipeline-ul de interogare recuperează contextul cel mai relevant dată fiind o interogare a utilizatorului,
+și îl transmite LLM-ului (împreună cu interogarea) pentru a sintetiza un răspuns.
+
+Acest lucru oferă LLM-ului cunoștințe actualizate care nu se află în datele sale de antrenament originale,
+(reducând, de asemenea, halucinațiile).
+
+Provocarea cheie în etapa de interogare este recuperarea, orchestrarea și raționamentul asupra bazelor de cunoștințe (potențial multe).
+
+LlamaIndex oferă module componibile care vă ajută să construiți și să integrați pipeline-uri RAG pentru Q&A (motor de interogare), chatbot (motor de chat) sau ca parte a unui agent.
+
+Aceste blocuri de construcție pot fi personalizate pentru a reflecta preferințele de clasificare, precum și compuse pentru a raționa asupra mai multor baze de cunoștințe într-un mod structurat.
+
+![](./_static/concepts/querying.jpg)
+
+#### Blocuri de construcție
+
+[**Recuperatoare**](./modules/low_level/retriever.md):
+Un recuperator definește modul de recuperare eficientă a contextului relevant dintr-o bază de cunoștințe (adică index) atunci când i se oferă o interogare.
+Logica specifică de recuperare diferă pentru diferite indicii, cel mai popular fiind recuperarea densă într-un index vectorial.
+
+[**Sintetizatoare de răspuns**](./modules/low_level/response_synthesizer.md):
+Un sintetizator de răspuns generează un răspuns dintr-un LLM, folosind o interogare a utilizatorului și un set dat de fragmente de text recuperate.
+
+"
+
+#### Pipeline-uri
+
+[**Motoare de interogare**](./modules/high_level/query_engine.md):
+Un motor de interogare este un pipeline de la cap la coadă care vă permite să puneți întrebări despre datele dvs.
+Primește o interogare în limbaj natural și returnează un răspuns, împreună cu contextul de referință recuperat și transmis LLM-ului.
+
+[**Motoare de chat**](./modules/high_level/chat_engine.md):
+Un motor de chat este un pipeline de la cap la coadă pentru a purta o conversație cu datele dvs.
+(mai multe schimburi de mesaje în loc de o singură întrebare și răspuns).
+
+"
diff --git a/apps/docs/i18n/ro/docusaurus-plugin-content-docs/current/end_to_end.md b/apps/docs/i18n/ro/docusaurus-plugin-content-docs/current/end_to_end.md
new file mode 100644
index 0000000000000000000000000000000000000000..55e2b2312bab5b4045b3a54481ff6f9a2294668e
--- /dev/null
+++ b/apps/docs/i18n/ro/docusaurus-plugin-content-docs/current/end_to_end.md
@@ -0,0 +1,57 @@
+---
+sidebar_position: 4
+---
+
+# Exemple de la cap la coadă
+
+`Această documentație a fost tradusă automat și poate conține erori. Nu ezitați să deschideți un Pull Request pentru a sugera modificări.`
+
+Includem mai multe exemple de la cap la coadă folosind LlamaIndex.TS în depozitul nostru.
+
+Verificați exemplele de mai jos sau încercați-le și finalizați-le în câteva minute cu tutoriale interactive Github Codespace oferite de Dev-Docs [aici](https://codespaces.new/team-dev-docs/lits-dev-docs-playground?devcontainer_path=.devcontainer%2Fjavascript_ltsquickstart%2Fdevcontainer.json):
+
+## [Motor de chat (Chat Engine)](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/chatEngine.ts)
+
+Citiți un fișier și discutați despre el cu LLM.
+
+## [Index Vectorial](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/vectorIndex.ts)
+
+Creați un index vectorial și interogați-l. Indexul vectorial va utiliza înglobări pentru a obține cele mai relevante k noduri. În mod implicit, k este 2.
+
+## [Index de rezumat](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/summaryIndex.ts)
+
+Creați un index de listă și interogați-l. Acest exemplu utilizează, de asemenea, `LLMRetriever`, care va utiliza LLM pentru a selecta cele mai bune noduri de utilizat la generarea răspunsului.
+
+"
+
+## [Salvare / Încărcare unui Index](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/storageContext.ts)
+
+Creați și încărcați un index vectorial. Persistența pe disc în LlamaIndex.TS se întâmplă automat odată ce este creat un obiect de context de stocare.
+
+## [Index personalizat de vectori](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/vectorIndexCustomize.ts)
+
+Creați un index de vectori și interogați-l, configurând în același timp `LLM`, `ServiceContext` și `similarity_top_k`.
+
+"
+
+## [OpenAI LLM](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/openai.ts)
+
+Creați un OpenAI LLM și utilizați-l direct pentru chat.
+
+"
+
+## [Llama2 DeuceLLM](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/llamadeuce.ts)
+
+Creați un Llama-2 LLM și utilizați-l direct pentru chat.
+
+"
+
+## [SubQuestionQueryEngine](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/subquestion.ts)
+
+Folosește `SubQuestionQueryEngine`, care descompune interogările complexe în mai multe întrebări și apoi agregă un răspuns pe baza răspunsurilor la toate sub-întrebările.
+
+"
+
+## [Module de nivel scăzut](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/lowlevel.ts)
+
+Acest exemplu utilizează mai multe componente de nivel scăzut, care elimină necesitatea unui motor de interogare real. Aceste componente pot fi utilizate oriunde, în orice aplicație, sau personalizate și subclasate pentru a satisface propriile nevoi.
diff --git a/apps/docs/i18n/ro/docusaurus-plugin-content-docs/current/environments.md b/apps/docs/i18n/ro/docusaurus-plugin-content-docs/current/environments.md
new file mode 100644
index 0000000000000000000000000000000000000000..67ea4162c754edde56aa0cfbf2046d5c37bab1db
--- /dev/null
+++ b/apps/docs/i18n/ro/docusaurus-plugin-content-docs/current/environments.md
@@ -0,0 +1,31 @@
+---
+sidebar_position: 5
+---
+
+# Medii de lucru
+
+`Această documentație a fost tradusă automat și poate conține erori. Nu ezitați să deschideți un Pull Request pentru a sugera modificări.`
+
+LlamaIndex suportă oficial în prezent NodeJS 18 și NodeJS 20.
+
+## Routerul aplicației NextJS
+
+Dacă utilizați handler-ele de rute/funcții serverless ale Routerului aplicației NextJS, va trebui să utilizați modul NodeJS:
+
+```js
+export const runtime = "nodejs"; // implicit
+```
+
+și va trebui să adăugați o excepție pentru pdf-parse în fișierul next.config.js
+
+```js
+// next.config.js
+/** @type {import('next').NextConfig} */
+const nextConfig = {
+  experimental: {
+    serverComponentsExternalPackages: ["pdf-parse"], // Plasează pdf-parse în modul NodeJS real cu Routerul aplicației NextJS
+  },
+};
+
+module.exports = nextConfig;
+```
diff --git a/apps/docs/i18n/ro/docusaurus-plugin-content-docs/current/installation.mdx b/apps/docs/i18n/ro/docusaurus-plugin-content-docs/current/installation.mdx
new file mode 100644
index 0000000000000000000000000000000000000000..6bb0780071957451fd75445da734a8160c727344
--- /dev/null
+++ b/apps/docs/i18n/ro/docusaurus-plugin-content-docs/current/installation.mdx
@@ -0,0 +1,68 @@
+---
+sidebar_position: 1
+---
+
+
+# Instalare și Configurare
+
+```Această documentație a fost tradusă automat și poate conține erori. Nu ezitați să deschideți un Pull Request pentru a sugera modificări.```
+
+
+Asigurați-vă că aveți NodeJS v18 sau o versiune mai recentă.
+
+
+## Utilizarea create-llama
+
+Cel mai simplu mod de a începe cu LlamaIndex este prin utilizarea `create-llama`. Acest instrument CLI vă permite să începeți rapid construirea unei noi aplicații LlamaIndex, cu totul configurat pentru dumneavoastră.
+
+Rulați pur și simplu
+
+<Tabs>
+<TabItem value="1" label="npm" default>
+
+```bash
+npx create-llama@latest
+```
+
+</TabItem>
+<TabItem value="2" label="Yarn">
+
+```bash
+yarn create llama
+```
+
+</TabItem>
+<TabItem value="3" label="pnpm">
+
+```bash
+pnpm create llama@latest
+```
+
+</TabItem>
+</Tabs>
+
+pentru a începe. Odată ce aplicația este generată, rulați
+
+```bash npm2yarn
+npm run dev
+```
+
+pentru a porni serverul de dezvoltare. Puteți apoi vizita [http://localhost:3000](http://localhost:3000) pentru a vedea aplicația dumneavoastră.
+## Instalare din NPM
+
+```bash npm2yarn
+npm install llamaindex
+```
+
+
+### Variabile de mediu
+
+Exemplele noastre utilizează implicit OpenAI. Va trebui să configurați cheia dvs. Open AI în felul următor:
+
+```bash
+export OPENAI_API_KEY="sk-......" # Înlocuiți cu cheia dvs. de la https://platform.openai.com/account/api-keys
+```
+
+Dacă doriți să fie încărcată automat de fiecare dată, adăugați-o în .zshrc/.bashrc.
+
+ATENȚIE: Nu adăugați cheia dvs. OpenAI în controlul de versiune.
diff --git a/apps/docs/i18n/ro/docusaurus-plugin-content-docs/current/introduction.md b/apps/docs/i18n/ro/docusaurus-plugin-content-docs/current/introduction.md
new file mode 100644
index 0000000000000000000000000000000000000000..ca1b63689566c48712796b92f30f7037582268ad
--- /dev/null
+++ b/apps/docs/i18n/ro/docusaurus-plugin-content-docs/current/introduction.md
@@ -0,0 +1,60 @@
+---
+sidebar_position: 0
+slug: /
+---
+
+# Ce este LlamaIndex.TS?
+
+`Această documentație a fost tradusă automat și poate conține erori. Nu ezitați să deschideți un Pull Request pentru a sugera modificări.`
+
+LlamaIndex.TS este un cadru de date pentru aplicațiile LLM pentru a prelua, structura și accesa date private sau specifice domeniului. În timp ce există și un pachet Python disponibil (vezi [aici](https://docs.llamaindex.ai/en/stable/)), LlamaIndex.TS oferă funcții de bază într-un pachet simplu, optimizat pentru utilizarea cu TypeScript.
+
+## 🚀 De ce LlamaIndex.TS?
+
+În esență, LLM-urile oferă o interfață de limbaj natural între oameni și datele inferate. Modelele larg disponibile sunt pre-antrenate pe o cantitate mare de date disponibile public, de la Wikipedia și liste de corespondență la manuale și cod sursă.
+
+Aplicațiile construite pe baza LLM-urilor necesită adesea completarea acestor modele cu date private sau specifice domeniului. Din păcate, aceste date pot fi distribuite în aplicații și depozite de date izolate. Ele se află în spatele API-urilor, în baze de date SQL sau sunt blocate în fișiere PDF și prezentări.
+
+Aici intervine **LlamaIndex.TS**.
+
+## 🦙 Cum poate ajuta LlamaIndex.TS?
+
+LlamaIndex.TS oferă următoarele instrumente:
+
+- **Încărcare de date** preiați direct datele existente în format `.txt`, `.pdf`, `.csv`, `.md` și `.docx`
+- **Indexarea datelor** structurează datele în reprezentări intermediare care sunt ușor de utilizat și performante pentru LLM-uri.
+- **Motoare** oferă acces în limbaj natural la datele dvs. De exemplu:
+  - Motoarele de interogare sunt interfețe puternice de recuperare pentru rezultate îmbogățite cu cunoștințe.
+  - Motoarele de chat sunt interfețe de conversație pentru interacțiuni "înainte și înapoi" cu datele dvs.
+
+## 👨‍👩‍👧‍👦 Pentru cine este LlamaIndex?
+
+LlamaIndex.TS oferă un set de instrumente de bază, esențiale pentru oricine construiește aplicații LLM cu JavaScript și TypeScript.
+
+API-ul nostru de nivel înalt permite utilizatorilor începători să utilizeze LlamaIndex.TS pentru a prelua și interoga datele lor.
+
+Pentru aplicații mai complexe, API-urile noastre de nivel inferior permit utilizatorilor avansați să personalizeze și să extindă orice modul - conectori de date, indici, recuperatori și motoare de interogare - pentru a se potrivi nevoilor lor.
+
+## Începerea lucrului
+
+`npm install llamaindex`
+
+Documentația noastră include [Instrucțiuni de instalare](./installation.md) și un [Tutorial de pornire](./starter.md) pentru a construi prima ta aplicație.
+
+Odată ce ai început, [Concepte de nivel înalt](./concepts.md) oferă o prezentare generală a arhitecturii modulare a LlamaIndex. Pentru mai multe exemple practice, consultă [Tutorialele de la cap la coadă](./end_to_end.md).
+
+## 🗺️ Ecosistem
+
+Pentru a descărca sau contribui, găsiți LlamaIndex pe:
+
+- Github: https://github.com/run-llama/LlamaIndexTS
+- NPM: https://www.npmjs.com/package/llamaindex
+
+"
+
+## Comunitate
+
+Ai nevoie de ajutor? Ai o sugestie de funcționalitate? Alătură-te comunității LlamaIndex:
+
+- Twitter: https://twitter.com/llama_index
+- Discord https://discord.gg/dGcwcsnxhU
diff --git a/apps/docs/i18n/ro/docusaurus-plugin-content-docs/current/modules/high_level/chat_engine.md b/apps/docs/i18n/ro/docusaurus-plugin-content-docs/current/modules/high_level/chat_engine.md
new file mode 100644
index 0000000000000000000000000000000000000000..e3e82e4429a4b70bece75f526ba3b842ee98c21c
--- /dev/null
+++ b/apps/docs/i18n/ro/docusaurus-plugin-content-docs/current/modules/high_level/chat_engine.md
@@ -0,0 +1,24 @@
+---
+sidebar_position: 4
+---
+
+# ChatEngine (Motor de Chat)
+
+`Această documentație a fost tradusă automat și poate conține erori. Nu ezitați să deschideți un Pull Request pentru a sugera modificări.`
+
+Motorul de chat este o modalitate rapidă și simplă de a comunica cu datele din indexul tău.
+
+```typescript
+const retriever = index.asRetriever();
+const chatEngine = new ContextChatEngine({ retriever });
+
+// începe conversația
+const response = await chatEngine.chat(query);
+```
+
+## Referințe API
+
+- [ContextChatEngine (Motor de Chat în Context)](../../api/classes/ContextChatEngine.md)
+- [CondenseQuestionChatEngine (Motor de Chat pentru Întrebări Condensate)](../../api/classes/ContextChatEngine.md)
+
+"
diff --git a/apps/docs/i18n/ro/docusaurus-plugin-content-docs/current/modules/high_level/data_index.md b/apps/docs/i18n/ro/docusaurus-plugin-content-docs/current/modules/high_level/data_index.md
new file mode 100644
index 0000000000000000000000000000000000000000..9965246c6d2bbd48fd22e8bcdef863b5ebe6c63e
--- /dev/null
+++ b/apps/docs/i18n/ro/docusaurus-plugin-content-docs/current/modules/high_level/data_index.md
@@ -0,0 +1,25 @@
+---
+sidebar_position: 2
+---
+
+# Index
+
+`Această documentație a fost tradusă automat și poate conține erori. Nu ezitați să deschideți un Pull Request pentru a sugera modificări.`
+
+Un index este containerul de bază și organizarea datelor tale. LlamaIndex.TS suportă două tipuri de index:
+
+- `VectorStoreIndex` - va trimite primele `Node`-uri către LLM atunci când generează un răspuns. Valoarea implicită pentru primele `k` este 2.
+- `SummaryIndex` - va trimite fiecare `Node` din index către LLM pentru a genera un răspuns.
+
+```typescript
+import { Document, VectorStoreIndex } from "llamaindex";
+
+const document = new Document({ text: "test" });
+
+const index = await VectorStoreIndex.fromDocuments([document]);
+```
+
+## Referință API
+
+- [SummaryIndex](../../api/classes/SummaryIndex.md)
+- [VectorStoreIndex](../../api/classes/VectorStoreIndex.md)
diff --git a/apps/docs/i18n/ro/docusaurus-plugin-content-docs/current/modules/high_level/data_loader.md b/apps/docs/i18n/ro/docusaurus-plugin-content-docs/current/modules/high_level/data_loader.md
new file mode 100644
index 0000000000000000000000000000000000000000..667884aa69e76bceb64be6e1b6c9634a44fca18c
--- /dev/null
+++ b/apps/docs/i18n/ro/docusaurus-plugin-content-docs/current/modules/high_level/data_loader.md
@@ -0,0 +1,21 @@
+---
+sidebar_position: 1
+---
+
+# Cititor / Încărcător
+
+`Această documentație a fost tradusă automat și poate conține erori. Nu ezitați să deschideți un Pull Request pentru a sugera modificări.`
+
+LlamaIndex.TS suportă încărcarea ușoară a fișierelor din foldere folosind clasa `SimpleDirectoryReader`. În prezent, sunt suportate fișierele `.txt`, `.pdf`, `.csv`, `.md` și `.docx`, cu planuri pentru suportul altor formate în viitor!
+
+```typescript
+import { SimpleDirectoryReader } from "llamaindex";
+
+documents = new SimpleDirectoryReader().loadData("./data");
+```
+
+## Referință API
+
+- [SimpleDirectoryReader](../../api/classes/SimpleDirectoryReader.md)
+
+"
diff --git a/apps/docs/i18n/ro/docusaurus-plugin-content-docs/current/modules/high_level/documents_and_nodes.md b/apps/docs/i18n/ro/docusaurus-plugin-content-docs/current/modules/high_level/documents_and_nodes.md
new file mode 100644
index 0000000000000000000000000000000000000000..1537061abcf6b92a61dff221ad357d75ecd9f27f
--- /dev/null
+++ b/apps/docs/i18n/ro/docusaurus-plugin-content-docs/current/modules/high_level/documents_and_nodes.md
@@ -0,0 +1,22 @@
+---
+sidebar_position: 0
+---
+
+# Documente și Noduri
+
+`Această documentație a fost tradusă automat și poate conține erori. Nu ezitați să deschideți un Pull Request pentru a sugera modificări.`
+
+`Documentele` și `Nodurile` sunt elementele de bază ale oricărui index. În timp ce API-ul pentru aceste obiecte este similar, obiectele `Document` reprezintă fișiere întregi, în timp ce `Nodurile` sunt bucăți mai mici ale acelui document original, care sunt potrivite pentru un LLM și Q&A.
+
+```typescript
+import { Document } from "llamaindex";
+
+document = new Document({ text: "text", metadata: { key: "val" } });
+```
+
+## Referință API
+
+- [Document](../../api/classes/Document.md)
+- [TextNode](../../api/classes/TextNode.md)
+
+"
diff --git a/apps/docs/i18n/ro/docusaurus-plugin-content-docs/current/modules/high_level/query_engine.md b/apps/docs/i18n/ro/docusaurus-plugin-content-docs/current/modules/high_level/query_engine.md
new file mode 100644
index 0000000000000000000000000000000000000000..67bb113e0bf29357f4236845b46835b794199467
--- /dev/null
+++ b/apps/docs/i18n/ro/docusaurus-plugin-content-docs/current/modules/high_level/query_engine.md
@@ -0,0 +1,44 @@
+---
+sidebar_position: 3
+---
+
+# QueryEngine (Motor de interogare)
+
+`Această documentație a fost tradusă automat și poate conține erori. Nu ezitați să deschideți un Pull Request pentru a sugera modificări.`
+
+Un motor de interogare încapsulează un `Retriever` și un `ResponseSynthesizer` într-un șir de procese, care va utiliza șirul de interogare pentru a obține nodurile și apoi le va trimite la LLM pentru a genera un răspuns.
+
+```typescript
+const queryEngine = index.asQueryEngine();
+const response = await queryEngine.query("șir de interogare");
+```
+
+## Motor de interogare pentru subîntrebări
+
+Conceptul de bază al Motorului de interogare pentru subîntrebări constă în împărțirea unei singure interogări în mai multe interogări, obținerea unui răspuns pentru fiecare dintre aceste interogări și apoi combinarea acestor răspunsuri diferite într-un singur răspuns coerent pentru utilizator. Puteți să-l considerați ca pe o tehnică de prompt "gândește-te la asta pas cu pas", dar iterând prin sursele de date!
+
+### Începerea utilizării
+
+Cel mai simplu mod de a începe să încercați Motorul de interogare pentru subîntrebări este să rulați fișierul subquestion.ts din [exemple](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/subquestion.ts).
+
+```bash
+npx ts-node subquestion.ts
+```
+
+"
+
+### Instrumente
+
+Motorul de interogare pentru subîntrebări este implementat cu ajutorul Instrumentelor. Ideea de bază a Instrumentelor este că acestea sunt opțiuni executabile pentru modelul de limbă mare. În acest caz, Motorul de interogare pentru subîntrebări se bazează pe QueryEngineTool, care, așa cum ați ghicit, este un instrument pentru a rula interogări pe un Motor de interogare. Acest lucru ne permite să oferim modelului o opțiune de a interoga diferite documente pentru diferite întrebări, de exemplu. De asemenea, puteți să vă imaginați că Motorul de interogare pentru subîntrebări ar putea utiliza un Instrument care caută ceva pe web sau obține un răspuns folosind Wolfram Alpha.
+
+Puteți afla mai multe despre Instrumente consultând documentația Python LlamaIndex https://gpt-index.readthedocs.io/en/latest/core_modules/agent_modules/tools/root.html
+
+"
+
+## Referință API
+
+- [RetrieverQueryEngine (Motor de interogare Retriever)](../../api/classes/RetrieverQueryEngine.md)
+- [SubQuestionQueryEngine (Motor de interogare SubQuestion)](../../api/classes/SubQuestionQueryEngine.md)
+- [QueryEngineTool (Instrument Motor de interogare)](../../api/interfaces/QueryEngineTool.md)
+
+"
diff --git a/apps/docs/i18n/ro/docusaurus-plugin-content-docs/current/modules/index.md b/apps/docs/i18n/ro/docusaurus-plugin-content-docs/current/modules/index.md
new file mode 100644
index 0000000000000000000000000000000000000000..64f969dfc454b57283e8132f9cb985d17ee130ac
--- /dev/null
+++ b/apps/docs/i18n/ro/docusaurus-plugin-content-docs/current/modules/index.md
@@ -0,0 +1,33 @@
+# Module de bază
+
+`Această documentație a fost tradusă automat și poate conține erori. Nu ezitați să deschideți un Pull Request pentru a sugera modificări.`
+
+LlamaIndex.TS oferă mai multe module de bază, separate în module de nivel înalt pentru a începe rapid și module de nivel scăzut pentru a personaliza componentele cheie după nevoie.
+
+## Module de nivel înalt
+
+- [**Document**](./high_level/documents_and_nodes.md): Un document reprezintă un fișier text, un fișier PDF sau o altă bucată continuă de date.
+
+- [**Node**](./high_level/documents_and_nodes.md): Blocul de construcție de bază al datelor. Cel mai frecvent, acestea sunt părți ale documentului împărțite în bucăți gestionabile, suficient de mici pentru a fi introduse într-un model de încorporare și LLM.
+
+- [**Reader/Loader**](./high_level/data_loader.md): Un cititor sau încărcător este ceva care preia un document din lumea reală și îl transformă într-o clasă Document care poate fi apoi utilizată în indexul și interogările dvs. În prezent, suportăm fișiere de text simplu și fișiere PDF, cu multe altele în viitor.
+
+- [**Indexes**](./high_level/data_index.md): indexele stochează nodurile și încorporările acestor noduri.
+
+- [**QueryEngine**](./high_level/query_engine.md): Motoarele de interogare sunt cele care generează interogarea pe care o introduceți și vă oferă rezultatul înapoi. Motoarele de interogare combină în general o sugestie pre-construită cu nodurile selectate din indexul dvs. pentru a oferi LLM contextul de care are nevoie pentru a răspunde la interogarea dvs.
+
+- [**ChatEngine**](./high_level/chat_engine.md): Un ChatEngine vă ajută să construiți un chatbot care va interacționa cu indexurile dvs.
+
+## Modul de nivel scăzut
+
+- [**LLM**](./low_level/llm.md): Clasa LLM este o interfață unificată peste un furnizor de modele de limbaj mare, cum ar fi OpenAI GPT-4, Anthropic Claude sau Meta LLaMA. Puteți crea o clasă derivată pentru a crea un conector către propriul dvs. model de limbaj mare.
+
+- [**Embedding**](./low_level/embedding.md): Un embedding este reprezentat ca un vector de numere reale. Modelul nostru implicit de embedding este text-embedding-ada-002 de la OpenAI și fiecare embedding generat constă în 1.536 de numere reale. Un alt model popular de embedding este BERT, care utilizează 768 de numere reale pentru a reprezenta fiecare nod. Oferim o serie de utilități pentru a lucra cu embeddings, inclusiv 3 opțiuni de calcul al similarității și Maximum Marginal Relevance.
+
+- [**TextSplitter/NodeParser**](./low_level/node_parser.md): Strategiile de împărțire a textului sunt extrem de importante pentru eficacitatea generală a căutării în embedding. În prezent, deși avem o valoare implicită, nu există o soluție care să se potrivească tuturor. În funcție de documentele sursă, este posibil să doriți să utilizați dimensiuni și strategii diferite de împărțire. În prezent, suportăm împărțirea după dimensiune fixă, împărțirea după dimensiune fixă cu secțiuni suprapuse, împărțirea după propoziție și împărțirea după paragraf. TextSplitter este utilizat de NodeParser pentru a împărți `Documente` în `Noduri`.
+
+- [**Retriever**](./low_level/retriever.md): Retriever-ul este cel care selectează efectiv Nodurile de recuperat din index. Aici, puteți încerca să recuperați mai multe sau mai puține Noduri per interogare, să schimbați funcția de similaritate sau să creați propriul dvs. retriever pentru fiecare caz de utilizare individual în aplicația dvs. De exemplu, puteți dori să aveți un retriever separat pentru conținutul codului vs. conținutul text.
+
+- [**ResponseSynthesizer**](./low_level/response_synthesizer.md): ResponseSynthesizer-ul este responsabil pentru preluarea unui șir de interogare și utilizarea unei liste de `Noduri` pentru a genera un răspuns. Acest lucru poate lua mai multe forme, cum ar fi iterarea peste tot contextul și rafinarea unui răspuns sau construirea unui arbore de rezumate și returnarea rezumatului principal.
+
+- [**Storage**](./low_level/storage.md): La un moment dat, veți dori să stocați indexurile, datele și vectorii în loc să rulați modelele de embedding de fiecare dată. IndexStore, DocStore, VectorStore și KVStore sunt abstracții care vă permit să faceți acest lucru. Împreună, ele formează StorageContext-ul. În prezent, vă permitem să persistați embeddings în fișiere pe sistemul de fișiere (sau într-un sistem de fișiere virtual în memorie), dar adăugăm și integrări active la bazele de date vectoriale.
diff --git a/apps/docs/i18n/ro/docusaurus-plugin-content-docs/current/modules/low_level/embedding.md b/apps/docs/i18n/ro/docusaurus-plugin-content-docs/current/modules/low_level/embedding.md
new file mode 100644
index 0000000000000000000000000000000000000000..b2e72bc59dba65633c22029690b02be96ea48b64
--- /dev/null
+++ b/apps/docs/i18n/ro/docusaurus-plugin-content-docs/current/modules/low_level/embedding.md
@@ -0,0 +1,26 @@
+---
+sidebar_position: 1
+---
+
+# Înglobare (Embedding)
+
+`Această documentație a fost tradusă automat și poate conține erori. Nu ezitați să deschideți un Pull Request pentru a sugera modificări.`
+
+Modelul de înglobare din LlamaIndex este responsabil pentru crearea reprezentărilor numerice ale textului. În mod implicit, LlamaIndex va utiliza modelul `text-embedding-ada-002` de la OpenAI.
+
+Acest lucru poate fi setat explicit în obiectul `ServiceContext`.
+
+```typescript
+import { OpenAIEmbedding, serviceContextFromDefaults } from "llamaindex";
+
+const openaiEmbeds = new OpenAIEmbedding();
+
+const serviceContext = serviceContextFromDefaults({ embedModel: openaiEmbeds });
+```
+
+## Referință API
+
+- [OpenAIEmbedding](../../api/classes/OpenAIEmbedding.md)
+- [ServiceContext](../../api/interfaces/ServiceContext.md)
+
+"
diff --git a/apps/docs/i18n/ro/docusaurus-plugin-content-docs/current/modules/low_level/llm.md b/apps/docs/i18n/ro/docusaurus-plugin-content-docs/current/modules/low_level/llm.md
new file mode 100644
index 0000000000000000000000000000000000000000..44a9a300b7856ae881d4729bddd94004f5ea84c2
--- /dev/null
+++ b/apps/docs/i18n/ro/docusaurus-plugin-content-docs/current/modules/low_level/llm.md
@@ -0,0 +1,26 @@
+---
+sidebar_position: 0
+---
+
+# LLM
+
+`Această documentație a fost tradusă automat și poate conține erori. Nu ezitați să deschideți un Pull Request pentru a sugera modificări.`
+
+LLM-ul este responsabil de citirea textului și generarea de răspunsuri în limbaj natural la interogări. În mod implicit, LlamaIndex.TS utilizează `gpt-3.5-turbo`.
+
+LLM-ul poate fi setat explicit în obiectul `ServiceContext`.
+
+```typescript
+import { OpenAI, serviceContextFromDefaults } from "llamaindex";
+
+const openaiLLM = new OpenAI({ model: "gpt-3.5-turbo", temperature: 0 });
+
+const serviceContext = serviceContextFromDefaults({ llm: openaiLLM });
+```
+
+## Referință API
+
+- [OpenAI](../../api/classes/OpenAI.md)
+- [ServiceContext](../../api/interfaces/ServiceContext.md)
+
+"
diff --git a/apps/docs/i18n/ro/docusaurus-plugin-content-docs/current/modules/low_level/node_parser.md b/apps/docs/i18n/ro/docusaurus-plugin-content-docs/current/modules/low_level/node_parser.md
new file mode 100644
index 0000000000000000000000000000000000000000..5c6f0d85f385f6db4d038094e18f2976887cbc10
--- /dev/null
+++ b/apps/docs/i18n/ro/docusaurus-plugin-content-docs/current/modules/low_level/node_parser.md
@@ -0,0 +1,37 @@
+---
+sidebar_position: 3
+---
+
+# NodeParser
+
+`Această documentație a fost tradusă automat și poate conține erori. Nu ezitați să deschideți un Pull Request pentru a sugera modificări.`
+
+`NodeParser` în LlamaIndex este responsabil pentru împărțirea obiectelor `Document` în obiecte `Node` mai ușor de gestionat. Când apelați `.fromDocuments()`, `NodeParser` din `ServiceContext` este utilizat pentru a face acest lucru automat pentru dvs. Alternativ, îl puteți utiliza pentru a împărți documentele în avans.
+
+```typescript
+import { Document, SimpleNodeParser } from "llamaindex";
+
+const nodeParser = new SimpleNodeParser();
+const nodes = nodeParser.getNodesFromDocuments([
+  new Document({ text: "Am 10 ani. John are 20 de ani." }),
+]);
+```
+
+## TextSplitter
+
+TextSplitter-ul subiacent va împărți textul în propoziții. Poate fi, de asemenea, utilizat ca un modul independent pentru împărțirea textului brut.
+
+```typescript
+import { SentenceSplitter } from "llamaindex";
+
+const splitter = new SentenceSplitter({ chunkSize: 1 });
+
+const textSplits = splitter.splitText("Salut, lume!");
+```
+
+## Referință API
+
+- [SimpleNodeParser](../../api/classes/SimpleNodeParser.md)
+- [SentenceSplitter](../../api/classes/SentenceSplitter.md)
+
+"
diff --git a/apps/docs/i18n/ro/docusaurus-plugin-content-docs/current/modules/low_level/response_synthesizer.md b/apps/docs/i18n/ro/docusaurus-plugin-content-docs/current/modules/low_level/response_synthesizer.md
new file mode 100644
index 0000000000000000000000000000000000000000..1aacdf4b0fd7007969a3f0321abe0cd749ed3a39
--- /dev/null
+++ b/apps/docs/i18n/ro/docusaurus-plugin-content-docs/current/modules/low_level/response_synthesizer.md
@@ -0,0 +1,48 @@
+---
+sidebar_position: 6
+---
+
+# ResponseSynthesizer (SintetizatorRaspuns)
+
+`Această documentație a fost tradusă automat și poate conține erori. Nu ezitați să deschideți un Pull Request pentru a sugera modificări.`
+
+Sintetizatorul de răspunsuri este responsabil pentru trimiterea interogării, nodurilor și șabloanelor de prompt către LLM pentru a genera un răspuns. Există câteva moduri cheie de generare a unui răspuns:
+
+- `Refine` (Rafinare): "crează și rafinează" un răspuns trecând secvențial prin fiecare fragment de text recuperat.
+  Acest lucru face o apelare LLM separată pentru fiecare nod. Bun pentru răspunsuri mai detaliate.
+- `CompactAndRefine` (Compactare și Rafinare) (implicit): "compactează" promptul în fiecare apel LLM prin umplerea cu cât mai multe fragmente de text care pot încăpea în dimensiunea maximă a promptului. Dacă există prea multe fragmente de text pentru a le încadra într-un singur prompt, "crează și rafinează" un răspuns trecând prin mai multe prompturi compacte. La fel ca `refine`, dar ar trebui să rezulte în mai puține apeluri LLM.
+- `TreeSummarize` (Rezumat în Arbore): Având un set de fragmente de text și interogarea, construiește recursiv un arbore și returnează nodul rădăcină ca răspuns. Bun pentru scopuri de rezumat.
+- `SimpleResponseBuilder` (Constructor Simplu de Răspunsuri): Având un set de fragmente de text și interogarea, aplică interogarea la fiecare fragment de text în timp ce acumulează răspunsurile într-un tablou. Returnează un șir concatenat al tuturor răspunsurilor. Bun atunci când trebuie să rulați aceeași interogare separat pentru fiecare fragment de text.
+
+```typescript
+import { NodeWithScore, ResponseSynthesizer, TextNode } from "llamaindex";
+
+const responseSynthesizer = new ResponseSynthesizer();
+
+const nodesWithScore: NodeWithScore[] = [
+  {
+    node: new TextNode({ text: "Am 10 ani." }),
+    score: 1,
+  },
+  {
+    node: new TextNode({ text: "John are 20 de ani." }),
+    score: 0.5,
+  },
+];
+
+const response = await responseSynthesizer.synthesize(
+  "Ce vârstă am?",
+  nodesWithScore,
+);
+console.log(response.response);
+```
+
+## Referință API
+
+- [ResponseSynthesizer (SintetizatorRaspuns)](../../api/classes/ResponseSynthesizer.md)
+- [Refine (Rafinare)](../../api/classes/Refine.md)
+- [CompactAndRefine (Compactare și Rafinare)](../../api/classes/CompactAndRefine.md)
+- [TreeSummarize (Rezumat în Arbore)](../../api/classes/TreeSummarize.md)
+- [SimpleResponseBuilder (Constructor Simplu de Răspunsuri)](../../api/classes/SimpleResponseBuilder.md)
+
+"
diff --git a/apps/docs/i18n/ro/docusaurus-plugin-content-docs/current/modules/low_level/retriever.md b/apps/docs/i18n/ro/docusaurus-plugin-content-docs/current/modules/low_level/retriever.md
new file mode 100644
index 0000000000000000000000000000000000000000..2ff076fa09b884d5ae1159a1bd77d33e18a7f77b
--- /dev/null
+++ b/apps/docs/i18n/ro/docusaurus-plugin-content-docs/current/modules/low_level/retriever.md
@@ -0,0 +1,25 @@
+---
+sidebar_position: 5
+---
+
+# Retriever (Recuperator)
+
+`Această documentație a fost tradusă automat și poate conține erori. Nu ezitați să deschideți un Pull Request pentru a sugera modificări.`
+
+Un recuperator în LlamaIndex este ceea ce este folosit pentru a prelua noduri (`Node`) dintr-un index folosind o șir de interogare. Un `VectorIndexRetriever` va prelua primele k noduri cele mai similare. Între timp, un `SummaryIndexRetriever` va prelua toate nodurile indiferent de interogare.
+
+```typescript
+const recuperator = vector_index.asRetriever();
+recuperator.similarityTopK = 3;
+
+// Preia nodurile!
+const noduriCuScor = await recuperator.retrieve("șir de interogare");
+```
+
+## Referință API
+
+- [SummaryIndexRetriever (RecuperatorSummaryIndex)](../../api/classes/SummaryIndexRetriever.md)
+- [SummaryIndexLLMRetriever (RecuperatorSummaryIndexLLM)](../../api/classes/SummaryIndexLLMRetriever.md)
+- [VectorIndexRetriever (RecuperatorVectorIndex)](../../api/classes/VectorIndexRetriever.md)
+
+"
diff --git a/apps/docs/i18n/ro/docusaurus-plugin-content-docs/current/modules/low_level/storage.md b/apps/docs/i18n/ro/docusaurus-plugin-content-docs/current/modules/low_level/storage.md
new file mode 100644
index 0000000000000000000000000000000000000000..c9b21e532978b379181a60ba4ef49ba4c7ed08dd
--- /dev/null
+++ b/apps/docs/i18n/ro/docusaurus-plugin-content-docs/current/modules/low_level/storage.md
@@ -0,0 +1,30 @@
+---
+sidebar_position: 7
+---
+
+# Stocare
+
+`Această documentație a fost tradusă automat și poate conține erori. Nu ezitați să deschideți un Pull Request pentru a sugera modificări.`
+
+Stocarea în LlamaIndex.TS funcționează automat odată ce ați configurat un obiect `StorageContext`. Doar configurați `persistDir` și atașați-l la un index.
+
+În prezent, este suportată doar salvarea și încărcarea de pe disc, cu integrări viitoare planificate!
+
+```typescript
+import { Document, VectorStoreIndex, storageContextFromDefaults } from "./src";
+
+const storageContext = await storageContextFromDefaults({
+  persistDir: "./storage",
+});
+
+const document = new Document({ text: "Text de test" });
+const index = await VectorStoreIndex.fromDocuments([document], {
+  storageContext,
+});
+```
+
+## Referință API
+
+- [StorageContext](../../api/interfaces/StorageContext.md)
+
+"
diff --git a/apps/docs/i18n/ro/docusaurus-plugin-content-docs/current/starter.md b/apps/docs/i18n/ro/docusaurus-plugin-content-docs/current/starter.md
new file mode 100644
index 0000000000000000000000000000000000000000..5d05ab8931bf8c7708e88838d227427b779ad1a2
--- /dev/null
+++ b/apps/docs/i18n/ro/docusaurus-plugin-content-docs/current/starter.md
@@ -0,0 +1,58 @@
+---
+sidebar_position: 2
+---
+
+# Tutorial de pornire
+
+`Această documentație a fost tradusă automat și poate conține erori. Nu ezitați să deschideți un Pull Request pentru a sugera modificări.`
+
+După ce ați [instalat LlamaIndex.TS folosind NPM](installation) și ați configurat cheia OpenAI, sunteți gata să începeți prima aplicație:
+
+Într-un folder nou:
+
+```bash npm2yarn
+npm install typescript
+npm install @types/node
+npx tsc --init # dacă este necesar
+```
+
+Creați fișierul `example.ts`. Acest cod va încărca niște date de exemplu, va crea un document, îl va indexa (creând înglobări folosind OpenAI) și apoi va crea un motor de interogare pentru a răspunde la întrebări despre date.
+
+```ts
+// example.ts
+import fs from "fs/promises";
+import { Document, VectorStoreIndex } from "llamaindex";
+
+async function main() {
+  // Încărcați eseul din abramov.txt în Node
+  const essay = await fs.readFile(
+    "node_modules/llamaindex/examples/abramov.txt",
+    "utf-8",
+  );
+
+  // Creați obiectul Document cu eseul
+  const document = new Document({ text: essay });
+
+  // Împărțiți textul și creați înglobări. Stocați-le într-un VectorStoreIndex
+  const index = await VectorStoreIndex.fromDocuments([document]);
+
+  // Interogați indexul
+  const queryEngine = index.asQueryEngine();
+  const response = await queryEngine.query("Ce a făcut autorul în facultate?");
+
+  // Afișați răspunsul
+  console.log(response.toString());
+}
+
+main();
+```
+
+Apoi puteți să-l rulați folosind
+
+```bash
+npx ts-node example.ts
+```
+
+Gata să aflați mai multe? Verificați playground-ul nostru NextJS la adresa https://llama-playground.vercel.app/. Sursa este disponibilă la adresa https://github.com/run-llama/ts-playground
+
+"
diff --git a/apps/docs/i18n/ru/docusaurus-plugin-content-docs/current/_static/concepts/indexing.jpg b/apps/docs/i18n/ru/docusaurus-plugin-content-docs/current/_static/concepts/indexing.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..8672967213caf28fb27682b6b9e2e5111beb9aaa
Binary files /dev/null and b/apps/docs/i18n/ru/docusaurus-plugin-content-docs/current/_static/concepts/indexing.jpg differ
diff --git a/apps/docs/i18n/ru/docusaurus-plugin-content-docs/current/_static/concepts/querying.jpg b/apps/docs/i18n/ru/docusaurus-plugin-content-docs/current/_static/concepts/querying.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..3c241bdda5ba98d3f3ee2e163b9f19280c5c7503
Binary files /dev/null and b/apps/docs/i18n/ru/docusaurus-plugin-content-docs/current/_static/concepts/querying.jpg differ
diff --git a/apps/docs/i18n/ru/docusaurus-plugin-content-docs/current/_static/concepts/rag.jpg b/apps/docs/i18n/ru/docusaurus-plugin-content-docs/current/_static/concepts/rag.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..b68eca2564f8535b33d9ed6d4ce4d4410d2a0698
Binary files /dev/null and b/apps/docs/i18n/ru/docusaurus-plugin-content-docs/current/_static/concepts/rag.jpg differ
diff --git a/apps/docs/i18n/ru/docusaurus-plugin-content-docs/current/api b/apps/docs/i18n/ru/docusaurus-plugin-content-docs/current/api
new file mode 120000
index 0000000000000000000000000000000000000000..3513e32979f4abb8b2df2b54c8d0cc480bdd847e
--- /dev/null
+++ b/apps/docs/i18n/ru/docusaurus-plugin-content-docs/current/api
@@ -0,0 +1 @@
+../../../../docs/api
\ No newline at end of file
diff --git a/apps/docs/i18n/ru/docusaurus-plugin-content-docs/current/concepts.md b/apps/docs/i18n/ru/docusaurus-plugin-content-docs/current/concepts.md
new file mode 100644
index 0000000000000000000000000000000000000000..dedace630a030d08e08f94c41b12fce2444ec6b4
--- /dev/null
+++ b/apps/docs/i18n/ru/docusaurus-plugin-content-docs/current/concepts.md
@@ -0,0 +1,86 @@
+---
+sidebar_position: 3
+---
+
+# Высокоуровневые концепции
+
+`Эта документация была автоматически переведена и может содержать ошибки. Не стесняйтесь открывать Pull Request для предложения изменений.`
+
+LlamaIndex.TS помогает вам создавать приложения, основанные на LLM (например, Q&A, чат-бот) с использованием пользовательских данных.
+
+В этом руководстве по высокоуровневым концепциям вы узнаете:
+
+- как LLM может отвечать на вопросы с использованием ваших собственных данных.
+- ключевые концепции и модули в LlamaIndex.TS для создания собственного запроса.
+
+## Ответы на вопросы по всем вашим данным
+
+LlamaIndex использует двухэтапный метод при использовании LLM с вашими данными:
+
+1. **этап индексации**: подготовка базы знаний, и
+2. **этап запроса**: получение соответствующего контекста из базы знаний для помощи LLM в ответе на вопрос
+
+![](./_static/concepts/rag.jpg)
+
+Этот процесс также известен как Retrieval Augmented Generation (RAG).
+
+LlamaIndex.TS предоставляет необходимый инструментарий для облегчения обоих этапов.
+
+Давайте подробнее рассмотрим каждый этап.
+
+### Этап индексации
+
+LlamaIndex.TS помогает вам подготовить базу знаний с помощью набора коннекторов данных и индексов.
+
+![](./_static/concepts/indexing.jpg)
+
+[**Загрузчики данных**](./modules/high_level/data_loader.md):
+Коннектор данных (например, `Reader`) загружает данные из различных источников данных и форматов в простое представление `Document` (текст и простая метаданные).
+
+[**Документы / Узлы**](./modules/high_level/documents_and_nodes.md): `Document` - это общий контейнер для любого источника данных - например, PDF, вывод API или полученные данные из базы данных. `Node` - это атомарная единица данных в LlamaIndex и представляет собой "кусок" исходного `Document`. Это богатое представление, которое включает метаданные и отношения (к другим узлам), чтобы обеспечить точные и выразительные операции извлечения.
+
+[**Индексы данных**](./modules/high_level/data_index.md):
+После загрузки данных LlamaIndex помогает вам индексировать данные в формате, который легко извлекать.
+
+Под капотом LlamaIndex разбирает исходные документы на промежуточные представления, вычисляет векторные вложения и хранит ваши данные в памяти или на диске.
+
+"
+
+### Этап запроса
+
+На этапе запроса конвейер запросов извлекает наиболее релевантный контекст, учитывая запрос пользователя,
+и передает его LLM (вместе с запросом) для синтеза ответа.
+
+Это дает LLM актуальные знания, которых нет в его исходных данных обучения,
+(также уменьшая галлюцинации).
+
+Основной проблемой на этапе запроса является извлечение, оркестрация и рассуждение над (возможно, множеством) баз знаний.
+
+LlamaIndex предоставляет составные модули, которые помогают вам создавать и интегрировать конвейеры RAG для Q&A (движок запросов), чат-бота (чат-движок) или в качестве части агента.
+
+Эти строительные блоки могут быть настроены для отражения предпочтений ранжирования, а также составлены для рассуждения над несколькими базами знаний структурированным образом.
+
+![](./_static/concepts/querying.jpg)
+
+#### Строительные блоки
+
+[**Извлекатели**](./modules/low_level/retriever.md):
+Извлекатель определяет, как эффективно извлекать соответствующий контекст из базы знаний (т.е. индекса) при заданном запросе.
+Конкретная логика извлечения отличается для разных индексов, наиболее популярным является плотное извлечение из векторного индекса.
+
+[**Синтезаторы ответов**](./modules/low_level/response_synthesizer.md):
+Синтезатор ответов генерирует ответ от LLM с использованием запроса пользователя и заданного набора извлеченных фрагментов текста.
+
+"
+
+#### Конвейеры
+
+[**Движки запросов**](./modules/high_level/query_engine.md):
+Движок запросов - это конвейер от начала до конца, который позволяет вам задавать вопросы о ваших данных.
+Он принимает естественноязыковой запрос и возвращает ответ, вместе с извлеченным контекстом, переданным LLM.
+
+[**Чат-движки**](./modules/high_level/chat_engine.md):
+Чат-движок - это конвейер от начала до конца для ведения разговора с вашими данными
+(множество вопросов и ответов вместо одного вопроса и ответа).
+
+"
diff --git a/apps/docs/i18n/ru/docusaurus-plugin-content-docs/current/end_to_end.md b/apps/docs/i18n/ru/docusaurus-plugin-content-docs/current/end_to_end.md
new file mode 100644
index 0000000000000000000000000000000000000000..20f596e733219ab4f30cc8222251d481f5742d3b
--- /dev/null
+++ b/apps/docs/i18n/ru/docusaurus-plugin-content-docs/current/end_to_end.md
@@ -0,0 +1,63 @@
+---
+sidebar_position: 4
+---
+
+# Примеры от начала до конца
+
+`Эта документация была автоматически переведена и может содержать ошибки. Не стесняйтесь открывать Pull Request для предложения изменений.`
+
+Мы включили несколько примеров от начала до конца, используя LlamaIndex.TS в репозитории.
+
+Ознакомьтесь с примерами ниже или попробуйте их и завершите их за несколько минут с помощью интерактивных учебников Github Codespace, предоставленных Dev-Docs [здесь](https://codespaces.new/team-dev-docs/lits-dev-docs-playground?devcontainer_path=.devcontainer%2Fjavascript_ltsquickstart%2Fdevcontainer.json):
+
+## [Чатовый движок (Chat Engine)](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/chatEngine.ts)
+
+Прочитайте файл и обсудите его с LLM.
+
+## [Векторный индекс](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/vectorIndex.ts)
+
+Создайте векторный индекс и выполните запрос к нему. Векторный индекс будет использовать вложения для получения двух наиболее релевантных узлов по умолчанию.
+
+"
+
+## [Индекс сводной информации](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/summaryIndex.ts)
+
+Создайте список индексов и выполните запрос к нему. В этом примере также используется `LLMRetriever`, который будет использовать LLM для выбора лучших узлов для использования при генерации ответа.
+
+"
+
+## [Сохранение / Загрузка индекса](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/storageContext.ts)
+
+Создайте и загрузите векторный индекс. Сохранение на диск в LlamaIndex.TS происходит автоматически после создания объекта контекста хранения.
+
+"
+
+## [Настроенный векторный индекс](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/vectorIndexCustomize.ts)
+
+Создайте векторный индекс и выполните запрос к нему, настроив `LLM`, `ServiceContext` и `similarity_top_k`.
+
+"
+
+## [OpenAI LLM](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/openai.ts)
+
+Создайте OpenAI LLM и непосредственно используйте его для чата.
+
+"
+
+## [Llama2 DeuceLLM](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/llamadeuce.ts)
+
+Создайте Llama-2 LLM и непосредственно используйте его для чата.
+
+"
+
+## [SubQuestionQueryEngine](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/subquestion.ts)
+
+Использует `SubQuestionQueryEngine`, который разбивает сложные запросы на несколько вопросов, а затем агрегирует ответы на все подвопросы.
+
+"
+
+## [Модули низкого уровня](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/lowlevel.ts)
+
+Этот пример использует несколько компонентов низкого уровня, что устраняет необходимость в фактическом движке запросов. Эти компоненты могут быть использованы в любом месте, в любом приложении или настроены и унаследованы для удовлетворения ваших собственных потребностей.
+
+"
diff --git a/apps/docs/i18n/ru/docusaurus-plugin-content-docs/current/environments.md b/apps/docs/i18n/ru/docusaurus-plugin-content-docs/current/environments.md
new file mode 100644
index 0000000000000000000000000000000000000000..751d4e5a203ddb315a70e9c2440b567a4a81d440
--- /dev/null
+++ b/apps/docs/i18n/ru/docusaurus-plugin-content-docs/current/environments.md
@@ -0,0 +1,31 @@
+---
+sidebar_position: 5
+---
+
+# Окружения
+
+`Эта документация была автоматически переведена и может содержать ошибки. Не стесняйтесь открывать Pull Request для предложения изменений.`
+
+LlamaIndex в настоящее время официально поддерживает NodeJS 18 и NodeJS 20.
+
+## Маршрутизатор приложений NextJS
+
+Если вы используете обработчики маршрутов/безсерверные функции NextJS App Router, вам потребуется использовать режим NodeJS:
+
+```js
+export const runtime = "nodejs"; // по умолчанию
+```
+
+и вам потребуется добавить исключение для pdf-parse в вашем файле next.config.js
+
+```js
+// next.config.js
+/** @type {import('next').NextConfig} */
+const nextConfig = {
+  experimental: {
+    serverComponentsExternalPackages: ["pdf-parse"], // Помещает pdf-parse в режим фактического NodeJS с помощью маршрутизатора приложений NextJS
+  },
+};
+
+module.exports = nextConfig;
+```
diff --git a/apps/docs/i18n/ru/docusaurus-plugin-content-docs/current/installation.mdx b/apps/docs/i18n/ru/docusaurus-plugin-content-docs/current/installation.mdx
new file mode 100644
index 0000000000000000000000000000000000000000..70df777d8c1df85c7e19566ffd981b0c687e1264
--- /dev/null
+++ b/apps/docs/i18n/ru/docusaurus-plugin-content-docs/current/installation.mdx
@@ -0,0 +1,68 @@
+---
+sidebar_position: 1
+---
+
+
+# Установка и настройка
+
+```Эта документация была автоматически переведена и может содержать ошибки. Не стесняйтесь открывать Pull Request для предложения изменений.```
+
+
+Убедитесь, что у вас установлена NodeJS версии 18 или выше.
+
+
+## Использование create-llama
+
+Самый простой способ начать работу с LlamaIndex - использовать `create-llama`. Этот инструмент командной строки позволяет быстро начать создание нового приложения LlamaIndex со всеми необходимыми настройками.
+
+Просто запустите
+
+<Tabs>
+<TabItem value="1" label="npm" default>
+
+```bash
+npx create-llama@latest
+```
+
+</TabItem>
+<TabItem value="2" label="Yarn">
+
+```bash
+yarn create llama
+```
+
+</TabItem>
+<TabItem value="3" label="pnpm">
+
+```bash
+pnpm create llama@latest
+```
+
+</TabItem>
+</Tabs>
+
+чтобы начать. После генерации вашего приложения выполните команду
+
+```bash npm2yarn
+npm run dev
+```
+
+чтобы запустить сервер разработки. Затем вы можете посетить [http://localhost:3000](http://localhost:3000), чтобы увидеть ваше приложение.
+## Установка через NPM
+
+```bash npm2yarn
+npm install llamaindex
+```
+
+
+### Переменные окружения
+
+Наши примеры по умолчанию используют OpenAI. Вам нужно будет настроить ваш ключ Open AI следующим образом:
+
+```bash
+export OPENAI_API_KEY="sk-......" # Замените на свой ключ с https://platform.openai.com/account/api-keys
+```
+
+Если вы хотите, чтобы он автоматически загружался каждый раз, добавьте его в ваш .zshrc/.bashrc.
+
+ПРЕДУПРЕЖДЕНИЕ: не добавляйте свой ключ OpenAI в систему контроля версий.
diff --git a/apps/docs/i18n/ru/docusaurus-plugin-content-docs/current/introduction.md b/apps/docs/i18n/ru/docusaurus-plugin-content-docs/current/introduction.md
new file mode 100644
index 0000000000000000000000000000000000000000..463f972652d3dc20fff32411c6f4f2f761741dda
--- /dev/null
+++ b/apps/docs/i18n/ru/docusaurus-plugin-content-docs/current/introduction.md
@@ -0,0 +1,60 @@
+---
+sidebar_position: 0
+slug: /
+---
+
+# Что такое LlamaIndex.TS?
+
+`Эта документация была автоматически переведена и может содержать ошибки. Не стесняйтесь открывать Pull Request для предложения изменений.`
+
+LlamaIndex.TS - это фреймворк данных для приложений LLM, предназначенный для ввода, структурирования и доступа к частным или специфическим для домена данным. В то время как также доступен пакет на Python (см. [здесь](https://docs.llamaindex.ai/en/stable/)), LlamaIndex.TS предлагает основные функции в простом пакете, оптимизированном для использования с TypeScript.
+
+## 🚀 Почему LlamaIndex.TS?
+
+В основе своей LLM предлагают естественный языковой интерфейс между людьми и полученными данными. Широко доступные модели предварительно обучены на огромных объемах общедоступных данных, от Википедии и рассылок до учебников и исходного кода.
+
+Приложения, построенные на основе LLM, часто требуют дополнения этих моделей частными или специфичными для домена данными. К сожалению, эти данные могут быть распределены по изолированным приложениям и хранилищам данных. Они находятся за API, в SQL-базах данных или заперты в PDF-файлах и презентациях.
+
+Именно здесь и приходит на помощь **LlamaIndex.TS**.
+
+## 🦙 Как может помочь LlamaIndex.TS?
+
+LlamaIndex.TS предоставляет следующие инструменты:
+
+- **Загрузка данных** - ввод ваших существующих данных в форматах `.txt`, `.pdf`, `.csv`, `.md` и `.docx` непосредственно.
+- **Индексы данных** - структурирование ваших данных в промежуточные представления, которые легко и эффективно используются LLM.
+- **Движки** - обеспечивают естественный языковой доступ к вашим данным. Например:
+  - Движки запросов - мощные интерфейсы извлечения для вывода с увеличенным знанием.
+  - Движки чата - разговорные интерфейсы для многосообщений и взаимодействий "туда и обратно" с вашими данными.
+
+## 👨‍👩‍👧‍👦 Для кого предназначен LlamaIndex?
+
+LlamaIndex.TS предоставляет основной набор инструментов, необходимых для создания приложений LLM с использованием JavaScript и TypeScript.
+
+Наш API высокого уровня позволяет начинающим пользователям использовать LlamaIndex.TS для ввода и запроса их данных.
+
+Для более сложных приложений наши API низкого уровня позволяют опытным пользователям настраивать и расширять любой модуль - коннекторы данных, индексы, извлекатели и поисковые движки, чтобы соответствовать их потребностям.
+
+## Начало работы
+
+`npm install llamaindex`
+
+Наша документация включает [Инструкции по установке](./installation.md) и [Стартовое руководство](./starter.md) для создания вашего первого приложения.
+
+Когда вы начнете работу, [Высокоуровневые концепции](./concepts.md) предоставляют обзор модульной архитектуры LlamaIndex. Для более практических примеров руководство [Полный цикл руководств](./end_to_end.md) будет полезно.
+
+## 🗺️ Экосистема
+
+Для загрузки или внесения вклада найдите LlamaIndex по следующим ссылкам:
+
+- Github: https://github.com/run-llama/LlamaIndexTS
+- NPM: https://www.npmjs.com/package/llamaindex
+
+"
+
+## Сообщество
+
+Нужна помощь? Есть предложение по функционалу? Присоединяйтесь к сообществу LlamaIndex:
+
+- Twitter: https://twitter.com/llama_index
+- Discord: https://discord.gg/dGcwcsnxhU
diff --git a/apps/docs/i18n/ru/docusaurus-plugin-content-docs/current/modules/high_level/chat_engine.md b/apps/docs/i18n/ru/docusaurus-plugin-content-docs/current/modules/high_level/chat_engine.md
new file mode 100644
index 0000000000000000000000000000000000000000..e4b703fe10954e44d68bdc6d41dc5c40eca4e6ac
--- /dev/null
+++ b/apps/docs/i18n/ru/docusaurus-plugin-content-docs/current/modules/high_level/chat_engine.md
@@ -0,0 +1,22 @@
+---
+sidebar_position: 4
+---
+
+# Чат-движок (ChatEngine)
+
+`Эта документация была автоматически переведена и может содержать ошибки. Не стесняйтесь открывать Pull Request для предложения изменений.`
+
+Чат-движок - это быстрый и простой способ общаться с данными в вашем индексе.
+
+```typescript
+const retriever = index.asRetriever();
+const chatEngine = new ContextChatEngine({ retriever });
+
+// начать общение
+const response = await chatEngine.chat(query);
+```
+
+## Ссылки на API
+
+- [Чат-движок контекста (ContextChatEngine)](../../api/classes/ContextChatEngine.md)
+- [Чат-движок сжатия вопросов (CondenseQuestionChatEngine)](../../api/classes/ContextChatEngine.md)
diff --git a/apps/docs/i18n/ru/docusaurus-plugin-content-docs/current/modules/high_level/data_index.md b/apps/docs/i18n/ru/docusaurus-plugin-content-docs/current/modules/high_level/data_index.md
new file mode 100644
index 0000000000000000000000000000000000000000..7da09e5480695d74ec2bdcd65b32fac672e724ec
--- /dev/null
+++ b/apps/docs/i18n/ru/docusaurus-plugin-content-docs/current/modules/high_level/data_index.md
@@ -0,0 +1,25 @@
+---
+sidebar_position: 2
+---
+
+# Индекс
+
+`Эта документация была автоматически переведена и может содержать ошибки. Не стесняйтесь открывать Pull Request для предложения изменений.`
+
+Индекс - это основной контейнер и организация для ваших данных. LlamaIndex.TS поддерживает два типа индексов:
+
+- `VectorStoreIndex` - отправляет лучшие `Node` в LLM при генерации ответа. Значение top-k по умолчанию равно 2.
+- `SummaryIndex` - отправляет каждый `Node` в индексе в LLM для генерации ответа.
+
+```typescript
+import { Document, VectorStoreIndex } from "llamaindex";
+
+const document = new Document({ text: "тест" });
+
+const index = await VectorStoreIndex.fromDocuments([document]);
+```
+
+## Справочник по API
+
+- [SummaryIndex](../../api/classes/SummaryIndex.md)
+- [VectorStoreIndex](../../api/classes/VectorStoreIndex.md)
diff --git a/apps/docs/i18n/ru/docusaurus-plugin-content-docs/current/modules/high_level/data_loader.md b/apps/docs/i18n/ru/docusaurus-plugin-content-docs/current/modules/high_level/data_loader.md
new file mode 100644
index 0000000000000000000000000000000000000000..cc08accea114ab7438c34813ecd8e6bd57fee268
--- /dev/null
+++ b/apps/docs/i18n/ru/docusaurus-plugin-content-docs/current/modules/high_level/data_loader.md
@@ -0,0 +1,19 @@
+---
+sidebar_position: 1
+---
+
+# Reader / Loader (Читатель / Загрузчик)
+
+`Эта документация была автоматически переведена и может содержать ошибки. Не стесняйтесь открывать Pull Request для предложения изменений.`
+
+LlamaIndex.TS поддерживает простую загрузку файлов из папок с использованием класса `SimpleDirectoryReader`. В настоящее время поддерживаются файлы `.txt`, `.pdf`, `.csv`, `.md` и `.docx`, а в будущем планируется добавить еще больше форматов!
+
+```typescript
+import { SimpleDirectoryReader } from "llamaindex";
+
+documents = new SimpleDirectoryReader().loadData("./data");
+```
+
+## Справочник по API
+
+- [SimpleDirectoryReader](../../api/classes/SimpleDirectoryReader.md)
diff --git a/apps/docs/i18n/ru/docusaurus-plugin-content-docs/current/modules/high_level/documents_and_nodes.md b/apps/docs/i18n/ru/docusaurus-plugin-content-docs/current/modules/high_level/documents_and_nodes.md
new file mode 100644
index 0000000000000000000000000000000000000000..a9dd6ba3175ac14ba076c475fa92e12feacc5926
--- /dev/null
+++ b/apps/docs/i18n/ru/docusaurus-plugin-content-docs/current/modules/high_level/documents_and_nodes.md
@@ -0,0 +1,22 @@
+---
+sidebar_position: 0
+---
+
+# Документы и Узлы
+
+`Эта документация была автоматически переведена и может содержать ошибки. Не стесняйтесь открывать Pull Request для предложения изменений.`
+
+`Документы` и `Узлы` являются основными строительными блоками любого индекса. В то время как API для этих объектов похож, объекты `Документ` представляют целые файлы, в то время как `Узлы` являются более мелкими частями этого исходного документа, которые подходят для LLM и Q&A.
+
+```typescript
+import { Document } from "llamaindex";
+
+document = new Document({ text: "текст", metadata: { key: "val" } });
+```
+
+## Справочник по API
+
+- [Документ (Document)](../../api/classes/Document.md)
+- [ТекстовыйУзел (TextNode)](../../api/classes/TextNode.md)
+
+"
diff --git a/apps/docs/i18n/ru/docusaurus-plugin-content-docs/current/modules/high_level/query_engine.md b/apps/docs/i18n/ru/docusaurus-plugin-content-docs/current/modules/high_level/query_engine.md
new file mode 100644
index 0000000000000000000000000000000000000000..badfc5b80e9cfe926e0f061b468ad3253aef7a88
--- /dev/null
+++ b/apps/docs/i18n/ru/docusaurus-plugin-content-docs/current/modules/high_level/query_engine.md
@@ -0,0 +1,44 @@
+---
+sidebar_position: 3
+---
+
+# QueryEngine (Запросный движок)
+
+`Эта документация была автоматически переведена и может содержать ошибки. Не стесняйтесь открывать Pull Request для предложения изменений.`
+
+Запросный движок объединяет `Retriever` и `ResponseSynthesizer` в конвейер, который будет использовать строку запроса для получения узлов, а затем отправлять их в LLM для генерации ответа.
+
+```typescript
+const queryEngine = index.asQueryEngine();
+const response = await queryEngine.query("строка запроса");
+```
+
+## Запросный движок для подзапросов
+
+Основная концепция Запросного движка для подзапросов заключается в том, что он разделяет один запрос на несколько запросов, получает ответ на каждый из этих запросов, а затем объединяет эти разные ответы в один последовательный ответ для пользователя. Вы можете представить это как технику "подумайте об этом шаг за шагом", но с итерацией по источникам данных!
+
+### Начало работы
+
+Самый простой способ начать использовать Запросный движок для подзапросов - запустить файл subquestion.ts в папке [examples](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/subquestion.ts).
+
+```bash
+npx ts-node subquestion.ts
+```
+
+"
+
+### Инструменты
+
+Запросный движок для подзапросов реализован с помощью инструментов. Основная идея инструментов заключается в том, что они являются исполняемыми вариантами для большой языковой модели. В данном случае наш Запросный движок для подзапросов зависит от инструмента QueryEngineTool, который, как вы уже догадались, является инструментом для выполнения запросов на Запросный движок. Это позволяет нам дать модели возможность запрашивать разные документы для разных вопросов, например. Вы также можете представить, что Запросный движок для подзапросов может использовать инструмент, который ищет что-то в Интернете или получает ответ с помощью Wolfram Alpha.
+
+Вы можете узнать больше об инструментах, взглянув на документацию по Python LlamaIndex по адресу https://gpt-index.readthedocs.io/en/latest/core_modules/agent_modules/tools/root.html
+
+"
+
+## Справочник по API
+
+- [RetrieverQueryEngine (Запросный движок Retriever)](../../api/classes/RetrieverQueryEngine.md)
+- [SubQuestionQueryEngine (Запросный движок SubQuestion)](../../api/classes/SubQuestionQueryEngine.md)
+- [QueryEngineTool (Инструмент запросного движка)](../../api/interfaces/QueryEngineTool.md)
+
+"
diff --git a/apps/docs/i18n/ru/docusaurus-plugin-content-docs/current/modules/index.md b/apps/docs/i18n/ru/docusaurus-plugin-content-docs/current/modules/index.md
new file mode 100644
index 0000000000000000000000000000000000000000..d154d20f0a3fabccdf9ed23f9a93e613d2cf0efe
--- /dev/null
+++ b/apps/docs/i18n/ru/docusaurus-plugin-content-docs/current/modules/index.md
@@ -0,0 +1,33 @@
+# Основные модули
+
+`Эта документация была автоматически переведена и может содержать ошибки. Не стесняйтесь открывать Pull Request для предложения изменений.`
+
+LlamaIndex.TS предлагает несколько основных модулей, разделенных на модули высокого уровня для быстрого начала работы и модули низкого уровня для настройки ключевых компонентов по мере необходимости.
+
+## Модули высокого уровня
+
+- [**Документ**](./high_level/documents_and_nodes.md): Документ представляет собой текстовый файл, файл PDF или другой непрерывный кусок данных.
+
+- [**Узел**](./high_level/documents_and_nodes.md): Основной строительный блок данных. Обычно это части документа, разделенные на управляемые куски, достаточно маленькие, чтобы их можно было подать на модель встраивания и LLM.
+
+- [**Reader/Loader**](./high_level/data_loader.md): Reader или Loader - это то, что принимает документ в реальном мире и преобразует его в класс Document, который затем может быть использован в вашем индексе и запросах. В настоящее время мы поддерживаем обычные текстовые файлы и PDF-файлы, а также много других форматов.
+
+- [**Индексы**](./high_level/data_index.md): индексы хранят узлы и вложения этих узлов.
+
+- [**QueryEngine**](./high_level/query_engine.md): Query Engine - это то, что генерирует запрос, который вы вводите, и возвращает результат. Query Engine обычно объединяет заранее созданный запрос с выбранными узлами из вашего индекса, чтобы предоставить LLM контекст, необходимый для ответа на ваш запрос.
+
+- [**ChatEngine**](./high_level/chat_engine.md): ChatEngine помогает вам создать чат-бота, который будет взаимодействовать с вашими индексами.
+
+## Модули низкого уровня
+
+- [**LLM**](./low_level/llm.md): Класс LLM - это унифицированный интерфейс для больших поставщиков языковых моделей, таких как OpenAI GPT-4, Anthropic Claude или Meta LLaMA. Вы можете создать подкласс для написания коннектора к своей собственной большой языковой модели.
+
+- [**Embedding**](./low_level/embedding.md): Встраивание представляется в виде вектора чисел с плавающей запятой. Наша модель встраивания по умолчанию - это OpenAI's text-embedding-ada-002, и каждое встраивание, которое она генерирует, состоит из 1536 чисел с плавающей запятой. Еще одна популярная модель встраивания - это BERT, который использует 768 чисел с плавающей запятой для представления каждого узла. Мы предоставляем несколько утилит для работы с встраиваниями, включая 3 варианта расчета сходства и максимальную маржинальную релевантность.
+
+- [**TextSplitter/NodeParser**](./low_level/node_parser.md): Стратегии разделения текста крайне важны для общей эффективности поиска встраивания. В настоящее время у нас есть значение по умолчанию, но нет универсального решения. В зависимости от исходных документов вы можете использовать разные размеры и стратегии разделения. В настоящее время мы поддерживаем разделение по фиксированному размеру, разделение по фиксированному размеру с перекрывающимися секциями, разделение по предложению и разделение по абзацу. Разделитель текста используется NodeParser при разделении `Document` на `Node`.
+
+- [**Retriever**](./low_level/retriever.md): Retriever - это то, что фактически выбирает узлы для извлечения из индекса. Здесь вы можете попробовать извлечь больше или меньше узлов для каждого запроса, изменить функцию сходства или создать собственный извлекатель для каждого отдельного случая использования в вашем приложении. Например, вы можете создать отдельный извлекатель для кодового содержимого и текстового содержимого.
+
+- [**ResponseSynthesizer**](./low_level/response_synthesizer.md): ResponseSynthesizer отвечает за преобразование строки запроса и использование списка `Node` для генерации ответа. Это может быть в виде итерации по всем контекстам и уточнения ответа или построения дерева сводок и возврата корневой сводки.
+
+- [**Storage**](./low_level/storage.md): В какой-то момент вам захочется сохранить свои индексы, данные и векторы, а не запускать модели встраивания каждый раз. IndexStore, DocStore, VectorStore и KVStore - это абстракции, позволяющие вам это сделать. Вместе они формируют контекст хранения. В настоящее время мы позволяем сохранять встраивания в файлах на файловой системе (или виртуальной файловой системе в памяти), но мы также активно добавляем интеграции с векторными базами данных.
diff --git a/apps/docs/i18n/ru/docusaurus-plugin-content-docs/current/modules/low_level/embedding.md b/apps/docs/i18n/ru/docusaurus-plugin-content-docs/current/modules/low_level/embedding.md
new file mode 100644
index 0000000000000000000000000000000000000000..5dcb8b55f8aaedc93bb54388eb4b498dd281228d
--- /dev/null
+++ b/apps/docs/i18n/ru/docusaurus-plugin-content-docs/current/modules/low_level/embedding.md
@@ -0,0 +1,26 @@
+---
+sidebar_position: 1
+---
+
+# Встраивание (Embedding)
+
+`Эта документация была автоматически переведена и может содержать ошибки. Не стесняйтесь открывать Pull Request для предложения изменений.`
+
+Модель встраивания в LlamaIndex отвечает за создание числовых представлений текста. По умолчанию LlamaIndex будет использовать модель `text-embedding-ada-002` от OpenAI.
+
+Это можно явно установить в объекте `ServiceContext`.
+
+```typescript
+import { OpenAIEmbedding, serviceContextFromDefaults } from "llamaindex";
+
+const openaiEmbeds = new OpenAIEmbedding();
+
+const serviceContext = serviceContextFromDefaults({ embedModel: openaiEmbeds });
+```
+
+## Справочник по API
+
+- [OpenAIEmbedding](../../api/classes/OpenAIEmbedding.md)
+- [ServiceContext](../../api/interfaces/ServiceContext.md)
+
+"
diff --git a/apps/docs/i18n/ru/docusaurus-plugin-content-docs/current/modules/low_level/llm.md b/apps/docs/i18n/ru/docusaurus-plugin-content-docs/current/modules/low_level/llm.md
new file mode 100644
index 0000000000000000000000000000000000000000..239839115c43e2aec86d461605893f8b9a088e4c
--- /dev/null
+++ b/apps/docs/i18n/ru/docusaurus-plugin-content-docs/current/modules/low_level/llm.md
@@ -0,0 +1,26 @@
+---
+sidebar_position: 0
+---
+
+# LLM (Языковая модель)
+
+`Эта документация была автоматически переведена и может содержать ошибки. Не стесняйтесь открывать Pull Request для предложения изменений.`
+
+LLM отвечает за чтение текста и генерацию естественноязыковых ответов на запросы. По умолчанию LlamaIndex.TS использует `gpt-3.5-turbo`.
+
+LLM можно явно установить в объекте `ServiceContext`.
+
+```typescript
+import { OpenAI, serviceContextFromDefaults } from "llamaindex";
+
+const openaiLLM = new OpenAI({ model: "gpt-3.5-turbo", temperature: 0 });
+
+const serviceContext = serviceContextFromDefaults({ llm: openaiLLM });
+```
+
+## Справочник по API
+
+- [OpenAI](../../api/classes/OpenAI.md)
+- [ServiceContext](../../api/interfaces/ServiceContext.md)
+
+"
diff --git a/apps/docs/i18n/ru/docusaurus-plugin-content-docs/current/modules/low_level/node_parser.md b/apps/docs/i18n/ru/docusaurus-plugin-content-docs/current/modules/low_level/node_parser.md
new file mode 100644
index 0000000000000000000000000000000000000000..24a87fe4cc2ef58b191de2cc73cf0efc66b236ba
--- /dev/null
+++ b/apps/docs/i18n/ru/docusaurus-plugin-content-docs/current/modules/low_level/node_parser.md
@@ -0,0 +1,37 @@
+---
+sidebar_position: 3
+---
+
+# NodeParser (ПарсерУзлов)
+
+`Эта документация была автоматически переведена и может содержать ошибки. Не стесняйтесь открывать Pull Request для предложения изменений.`
+
+`NodeParser` в LlamaIndex отвечает за разделение объектов `Document` на более управляемые объекты `Node`. Когда вы вызываете `.fromDocuments()`, `NodeParser` из `ServiceContext` автоматически выполняет это для вас. Кроме того, вы можете использовать его для предварительного разделения документов.
+
+```typescript
+import { Document, SimpleNodeParser } from "llamaindex";
+
+const nodeParser = new SimpleNodeParser();
+const nodes = nodeParser.getNodesFromDocuments([
+  new Document({ text: "Мне 10 лет. Джону 20 лет." }),
+]);
+```
+
+## TextSplitter (TextSplitter)
+
+Базовый разделитель текста разделяет текст на предложения. Его также можно использовать как самостоятельный модуль для разделения необработанного текста.
+
+```typescript
+import { SentenceSplitter } from "llamaindex";
+
+const splitter = new SentenceSplitter({ chunkSize: 1 });
+
+const textSplits = splitter.splitText("Привет, мир");
+```
+
+## Справочник по API
+
+- [SimpleNodeParser (ПростойПарсерУзлов)](../../api/classes/SimpleNodeParser.md)
+- [SentenceSplitter (РазделительПредложений)](../../api/classes/SentenceSplitter.md)
+
+"
diff --git a/apps/docs/i18n/ru/docusaurus-plugin-content-docs/current/modules/low_level/response_synthesizer.md b/apps/docs/i18n/ru/docusaurus-plugin-content-docs/current/modules/low_level/response_synthesizer.md
new file mode 100644
index 0000000000000000000000000000000000000000..e8c9746d4a78ce0c35d9ca772058e46cf2757565
--- /dev/null
+++ b/apps/docs/i18n/ru/docusaurus-plugin-content-docs/current/modules/low_level/response_synthesizer.md
@@ -0,0 +1,47 @@
+---
+sidebar_position: 6
+---
+
+# ResponseSynthesizer (СинтезаторОтветов)
+
+`Эта документация была автоматически переведена и может содержать ошибки. Не стесняйтесь открывать Pull Request для предложения изменений.`
+
+ResponseSynthesizer (СинтезаторОтветов) отвечает за отправку запроса, узлов и шаблонов подсказок в LLM для генерации ответа. Есть несколько ключевых режимов для генерации ответа:
+
+- `Refine` (Уточнить): "создание и уточнение" ответа путем последовательного прохождения через каждый извлеченный текстовый фрагмент. Это делает отдельный вызов LLM для каждого узла. Хорошо подходит для более подробных ответов.
+- `CompactAndRefine` (Компактно и уточнить) (по умолчанию): "сжатие" подсказки во время каждого вызова LLM путем заполнения максимального размера подсказки максимальным количеством текстовых фрагментов, которые могут поместиться. Если слишком много фрагментов, чтобы поместиться в одну подсказку, "создание и уточнение" ответа путем прохождения через несколько компактных подсказок. То же самое, что и `Refine`, но должно привести к меньшему количеству вызовов LLM.
+- `TreeSummarize` (Суммирование дерева): По заданному набору текстовых фрагментов и запросу рекурсивно строит дерево и возвращает корневой узел в качестве ответа. Хорошо подходит для целей суммирования.
+- `SimpleResponseBuilder` (Простой построитель ответа): По заданному набору текстовых фрагментов и запросу применяет запрос к каждому текстовому фрагменту, накапливая ответы в массиве. Возвращает объединенную строку всех ответов. Хорошо подходит, когда вам нужно запустить один и тот же запрос отдельно для каждого текстового фрагмента.
+
+```typescript
+import { NodeWithScore, ResponseSynthesizer, TextNode } from "llamaindex";
+
+const responseSynthesizer = new ResponseSynthesizer();
+
+const nodesWithScore: NodeWithScore[] = [
+  {
+    node: new TextNode({ text: "Мне 10 лет." }),
+    score: 1,
+  },
+  {
+    node: new TextNode({ text: "Джону 20 лет." }),
+    score: 0.5,
+  },
+];
+
+const response = await responseSynthesizer.synthesize(
+  "Сколько мне лет?",
+  nodesWithScore,
+);
+console.log(response.response);
+```
+
+## Справочник по API
+
+- [ResponseSynthesizer (СинтезаторОтветов)](../../api/classes/ResponseSynthesizer.md)
+- [Refine (Уточнить)](../../api/classes/Refine.md)
+- [CompactAndRefine (Компактно и уточнить)](../../api/classes/CompactAndRefine.md)
+- [TreeSummarize (Суммирование дерева)](../../api/classes/TreeSummarize.md)
+- [SimpleResponseBuilder (Простой построитель ответа)](../../api/classes/SimpleResponseBuilder.md)
+
+"
diff --git a/apps/docs/i18n/ru/docusaurus-plugin-content-docs/current/modules/low_level/retriever.md b/apps/docs/i18n/ru/docusaurus-plugin-content-docs/current/modules/low_level/retriever.md
new file mode 100644
index 0000000000000000000000000000000000000000..45e1be335838d7ea647e2c28a6b5101b50976d78
--- /dev/null
+++ b/apps/docs/i18n/ru/docusaurus-plugin-content-docs/current/modules/low_level/retriever.md
@@ -0,0 +1,23 @@
+---
+sidebar_position: 5
+---
+
+# Retriever (Извлекатель)
+
+`Эта документация была автоматически переведена и может содержать ошибки. Не стесняйтесь открывать Pull Request для предложения изменений.`
+
+Извлекатель в LlamaIndex - это то, что используется для получения узлов (`Node`) из индекса с использованием строки запроса. `VectorIndexRetriever` извлечет топ-k наиболее похожих узлов. В то же время, `SummaryIndexRetriever` извлечет все узлы, независимо от запроса.
+
+```typescript
+const retriever = vector_index.asRetriever();
+retriever.similarityTopK = 3;
+
+// Получение узлов!
+const nodesWithScore = await retriever.retrieve("строка запроса");
+```
+
+## Справочник по API
+
+- [SummaryIndexRetriever (Извлекатель сводного индекса)](../../api/classes/SummaryIndexRetriever.md)
+- [SummaryIndexLLMRetriever (Извлекатель сводного индекса LLM)](../../api/classes/SummaryIndexLLMRetriever.md)
+- [VectorIndexRetriever (Извлекатель векторного индекса)](../../api/classes/VectorIndexRetriever.md)
diff --git a/apps/docs/i18n/ru/docusaurus-plugin-content-docs/current/modules/low_level/storage.md b/apps/docs/i18n/ru/docusaurus-plugin-content-docs/current/modules/low_level/storage.md
new file mode 100644
index 0000000000000000000000000000000000000000..1abfc3bfcdfc3175374772e65a911fa4d77faafb
--- /dev/null
+++ b/apps/docs/i18n/ru/docusaurus-plugin-content-docs/current/modules/low_level/storage.md
@@ -0,0 +1,28 @@
+---
+sidebar_position: 7
+---
+
+# Хранилище (Storage)
+
+`Эта документация была автоматически переведена и может содержать ошибки. Не стесняйтесь открывать Pull Request для предложения изменений.`
+
+Хранилище в LlamaIndex.TS работает автоматически после настройки объекта `StorageContext`. Просто настройте `persistDir` и присоедините его к индексу.
+
+В настоящее время поддерживается только сохранение и загрузка с диска, с планируемыми будущими интеграциями!
+
+```typescript
+import { Document, VectorStoreIndex, storageContextFromDefaults } from "./src";
+
+const storageContext = await storageContextFromDefaults({
+  persistDir: "./storage",
+});
+
+const document = new Document({ text: "Тестовый текст" });
+const index = await VectorStoreIndex.fromDocuments([document], {
+  storageContext,
+});
+```
+
+## Справочник по API
+
+- [StorageContext](../../api/interfaces/StorageContext.md)
diff --git a/apps/docs/i18n/ru/docusaurus-plugin-content-docs/current/starter.md b/apps/docs/i18n/ru/docusaurus-plugin-content-docs/current/starter.md
new file mode 100644
index 0000000000000000000000000000000000000000..27b27419b81aa859750299f0f024821715d6e340
--- /dev/null
+++ b/apps/docs/i18n/ru/docusaurus-plugin-content-docs/current/starter.md
@@ -0,0 +1,58 @@
+---
+sidebar_position: 2
+---
+
+# Руководство для начинающих
+
+`Эта документация была автоматически переведена и может содержать ошибки. Не стесняйтесь открывать Pull Request для предложения изменений.`
+
+После того, как вы [установили LlamaIndex.TS с помощью NPM](installation) и настроили свой ключ OpenAI, вы готовы начать работу с вашим первым приложением:
+
+В новой папке:
+
+```bash npm2yarn
+npm install typescript
+npm install @types/node
+npx tsc --init # если необходимо
+```
+
+Создайте файл `example.ts`. Этот код загрузит некоторые примеры данных, создаст документ, проиндексирует его (что создаст вложения с использованием OpenAI), а затем создаст поисковую систему для ответов на вопросы о данных.
+
+```ts
+// example.ts
+import fs from "fs/promises";
+import { Document, VectorStoreIndex } from "llamaindex";
+
+async function main() {
+  // Загрузка эссе из abramov.txt в Node
+  const essay = await fs.readFile(
+    "node_modules/llamaindex/examples/abramov.txt",
+    "utf-8",
+  );
+
+  // Создание объекта Document с эссе
+  const document = new Document({ text: essay });
+
+  // Разделение текста и создание вложений. Сохранение их в VectorStoreIndex
+  const index = await VectorStoreIndex.fromDocuments([document]);
+
+  // Запрос к индексу
+  const queryEngine = index.asQueryEngine();
+  const response = await queryEngine.query("Что автор делал в колледже?");
+
+  // Вывод ответа
+  console.log(response.toString());
+}
+
+main();
+```
+
+Затем вы можете запустить его с помощью
+
+```bash
+npx ts-node example.ts
+```
+
+Готовы узнать больше? Посетите нашу площадку NextJS по адресу https://llama-playground.vercel.app/. Исходный код доступен по адресу https://github.com/run-llama/ts-playground
+
+"
diff --git a/apps/docs/i18n/se/docusaurus-plugin-content-docs/current/_static/concepts/indexing.jpg b/apps/docs/i18n/se/docusaurus-plugin-content-docs/current/_static/concepts/indexing.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..8672967213caf28fb27682b6b9e2e5111beb9aaa
Binary files /dev/null and b/apps/docs/i18n/se/docusaurus-plugin-content-docs/current/_static/concepts/indexing.jpg differ
diff --git a/apps/docs/i18n/se/docusaurus-plugin-content-docs/current/_static/concepts/querying.jpg b/apps/docs/i18n/se/docusaurus-plugin-content-docs/current/_static/concepts/querying.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..3c241bdda5ba98d3f3ee2e163b9f19280c5c7503
Binary files /dev/null and b/apps/docs/i18n/se/docusaurus-plugin-content-docs/current/_static/concepts/querying.jpg differ
diff --git a/apps/docs/i18n/se/docusaurus-plugin-content-docs/current/_static/concepts/rag.jpg b/apps/docs/i18n/se/docusaurus-plugin-content-docs/current/_static/concepts/rag.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..b68eca2564f8535b33d9ed6d4ce4d4410d2a0698
Binary files /dev/null and b/apps/docs/i18n/se/docusaurus-plugin-content-docs/current/_static/concepts/rag.jpg differ
diff --git a/apps/docs/i18n/se/docusaurus-plugin-content-docs/current/api b/apps/docs/i18n/se/docusaurus-plugin-content-docs/current/api
new file mode 120000
index 0000000000000000000000000000000000000000..3513e32979f4abb8b2df2b54c8d0cc480bdd847e
--- /dev/null
+++ b/apps/docs/i18n/se/docusaurus-plugin-content-docs/current/api
@@ -0,0 +1 @@
+../../../../docs/api
\ No newline at end of file
diff --git a/apps/docs/i18n/se/docusaurus-plugin-content-docs/current/concepts.md b/apps/docs/i18n/se/docusaurus-plugin-content-docs/current/concepts.md
new file mode 100644
index 0000000000000000000000000000000000000000..b4bf5fb8100b44d8fee7fc265e4c680d29b7a6de
--- /dev/null
+++ b/apps/docs/i18n/se/docusaurus-plugin-content-docs/current/concepts.md
@@ -0,0 +1,84 @@
+---
+sidebar_position: 3
+---
+
+# Koncepti na visokom nivou
+
+`Ova dokumentacija je automatski prevedena i može sadržati greške. Ne oklevajte da otvorite Pull Request za predlaganje izmena.`
+
+LlamaIndex.TS vam pomaže da izgradite aplikacije sa LLM-om (npr. Q&A, chatbot) preko prilagođenih podataka.
+
+U ovom vodiču za koncepte na visokom nivou, naučićete:
+
+- kako LLM može odgovarati na pitanja koristeći vaše sopstvene podatke.
+- ključne koncepte i module u LlamaIndex.TS za sastavljanje sopstvenog upita.
+
+## Odgovaranje na pitanja preko vaših podataka
+
+LlamaIndex koristi dvostepenu metodu prilikom korišćenja LLM-a sa vašim podacima:
+
+1. **indeksiranje faze**: priprema baze znanja, i
+2. **upitna faza**: dobijanje relevantnog konteksta iz znanja kako bi se pomoglo LLM-u u odgovoru na pitanje.
+
+![](./_static/concepts/rag.jpg)
+
+Ovaj proces se takođe naziva Retrieval Augmented Generation (RAG).
+
+LlamaIndex.TS pruža osnovni alat za olakšavanje oba koraka.
+
+Hajde da istražimo svaku fazu detaljnije.
+
+### Faza indeksiranja
+
+LlamaIndex.TS vam pomaže da pripremite bazu znanja uz pomoć skupa konektora za podatke i indeksa.
+
+![](./_static/concepts/indexing.jpg)
+
+[**Učitavači podataka**](./modules/high_level/data_loader.md):
+Konektor za podatke (tj. `Reader`) unosi podatke iz različitih izvora podataka i formata podataka u jednostavno predstavljanje `Dokumenta` (tekst i jednostavne metapodatke).
+
+[**Dokumenti / Čvorovi**](./modules/high_level/documents_and_nodes.md): `Dokument` je generički kontejner oko bilo kog izvora podataka - na primer, PDF, izlaz iz API-ja ili preuzeti podaci iz baze podataka. `Čvor` je atomična jedinica podataka u LlamaIndex-u i predstavlja "komadić" izvornog `Dokumenta`. To je bogato predstavljanje koje uključuje metapodatke i odnose (prema drugim čvorovima) kako bi omogućilo tačne i izražajne operacije pretraživanja.
+
+[**Indeksi podataka**](./modules/high_level/data_index.md):
+Kada ste uneli podatke, LlamaIndex vam pomaže da indeksirate podatke u format koji je lako povratiti.
+
+Ispod haube, LlamaIndex parsira sirove dokumente u međureprezentacije, izračunava vektorske ugnežđenja i skladišti vaše podatke u memoriji ili na disku.
+
+### Faza upita
+
+U fazi upita, upitni tok podataka dobija najrelevantniji kontekst na osnovu korisničkog upita,
+i prosleđuje ga LLM-u (zajedno sa upitom) kako bi sintetisao odgovor.
+
+Ovo daje LLM-u ažurirano znanje koje nije u njegovim originalnim obučavajućim podacima,
+(takođe smanjujući halucinacije).
+
+Ključni izazov u fazi upita je dobijanje, orkestracija i zaključivanje nad (potencijalno mnogo) bazama znanja.
+
+LlamaIndex pruža sastavljive module koji vam pomažu da izgradite i integrišete RAG tokove za Q&A (upitni motor), chatbot (chat motor), ili kao deo agenta.
+
+Ovi građevinski blokovi mogu biti prilagođeni kako bi odražavali preferencije rangiranja, kao i sastavljeni kako bi zaključivali nad više baza znanja na strukturiran način.
+
+![](./_static/concepts/querying.jpg)
+
+#### Građevinski blokovi
+
+[**Retrieveri**](./modules/low_level/retriever.md):
+Retriever definiše kako efikasno dobiti relevantan kontekst iz baze znanja (tj. indeksa) na osnovu upita.
+Specifična logika dobijanja se razlikuje za različite indekse, najpopularniji je gusti retrieval protiv vektorskog indeksa.
+
+[**Sintetizatori odgovora**](./modules/low_level/response_synthesizer.md):
+Sintetizator odgovora generiše odgovor iz LLM-a koristeći korisnički upit i dati skup dobijenih tekstualnih fragmenata.
+
+"
+
+#### Tokovi
+
+[**Upitni motori**](./modules/high_level/query_engine.md):
+Upitni motor je krajnji tok podataka koji vam omogućava postavljanje pitanja nad vašim podacima.
+Prihvata prirodni jezik upita i vraća odgovor, zajedno sa referentnim kontekstom koji je dobijen i prosleđen LLM-u.
+
+[**Chat motori**](./modules/high_level/chat_engine.md):
+Chat motor je krajnji tok podataka za vođenje razgovora sa vašim podacima
+(više puta unazad i unapred umesto jednog pitanja i odgovora).
+
+"
diff --git a/apps/docs/i18n/se/docusaurus-plugin-content-docs/current/end_to_end.md b/apps/docs/i18n/se/docusaurus-plugin-content-docs/current/end_to_end.md
new file mode 100644
index 0000000000000000000000000000000000000000..f3575f77c3193c9f5f5acb612be4344d0d380e58
--- /dev/null
+++ b/apps/docs/i18n/se/docusaurus-plugin-content-docs/current/end_to_end.md
@@ -0,0 +1,63 @@
+---
+sidebar_position: 4
+---
+
+# Примери од почетка до краja
+
+`Ova dokumentacija je automatski prevedena i može sadržati greške. Ne oklevajte da otvorite Pull Request za predlaganje izmena.`
+
+У репозиторијуму укључујемо неколико примера од почетка до краја користећи LlamaIndex.TS
+
+Погледајте примере испод или их испробајте и завршите за неколико минута са интерактивним упутствима за Github Codespace које нуди Dev-Docs [овде](https://codespaces.new/team-dev-docs/lits-dev-docs-playground?devcontainer_path=.devcontainer%2Fjavascript_ltsquickstart%2Fdevcontainer.json):
+
+## [Chat Engine](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/chatEngine.ts)
+
+Pročitajte datoteku i razgovarajte o njoj sa LLM.
+
+## [Векторски Индекс](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/vectorIndex.ts)
+
+Креирајте векторски индекс и претражите га. Векторски индекс ће користити уграђивања да би добио најрелевантније врхове k. Подразумевано, k је 2.
+
+"
+
+## [Индекс сажетка](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/summaryIndex.ts)
+
+Креирајте листу индекса и претражите је. Овај пример такође користи `LLMRetriever`, који ће користити LLM за избор најбољих чворова за коришћење при генерисању одговора.
+
+"
+
+## [Сачувајте / Учитајте индекс](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/storageContext.ts)
+
+Креирајте и учитајте векторски индекс. Постојаност на диску у LlamaIndex.TS се дешава аутоматски једном када се креира објекат контекста складишта.
+
+"
+
+## [Прилагођени векторски индекс](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/vectorIndexCustomize.ts)
+
+Креирајте векторски индекс и претражите га, истовремено конфигуришући `LLM`, `ServiceContext` и `similarity_top_k`.
+
+"
+
+## [OpenAI LLM](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/openai.ts)
+
+Креирајте OpenAI LLM и директно га користите за четовање.
+
+"
+
+## [Llama2 DeuceLLM](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/llamadeuce.ts)
+
+Креирајте Llama-2 LLM и директно га користите за четовање.
+
+"
+
+## [SubQuestionQueryEngine](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/subquestion.ts)
+
+Користи `SubQuestionQueryEngine`, који разбија комплексне упите на више питања, а затим агрегира одговоре на сва подпитања.
+
+"
+
+## [Модули ниског нивоа](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/lowlevel.ts)
+
+Овај пример користи неколико компоненти ниског нивоа, што уклања потребу за стварним мотором за упите. Ове компоненте могу се користити било где, у било којој апликацији, или прилагођавати и подкласирати да задовоље ваше потребе.
+
+"
diff --git a/apps/docs/i18n/se/docusaurus-plugin-content-docs/current/environments.md b/apps/docs/i18n/se/docusaurus-plugin-content-docs/current/environments.md
new file mode 100644
index 0000000000000000000000000000000000000000..70d86c34a5abeab9e188b7a8b0a147897f33256a
--- /dev/null
+++ b/apps/docs/i18n/se/docusaurus-plugin-content-docs/current/environments.md
@@ -0,0 +1,31 @@
+---
+sidebar_position: 5
+---
+
+# Okruženja
+
+`Ova dokumentacija je automatski prevedena i može sadržati greške. Ne oklevajte da otvorite Pull Request za predlaganje izmena.`
+
+LlamaIndex trenutno zvanično podržava NodeJS 18 i NodeJS 20.
+
+## NextJS App Router
+
+Ako koristite NextJS App Router rute handlera/serverless funkcija, moraćete koristiti NodeJS režim:
+
+```js
+export const runtime = "nodejs"; // podrazumevano
+```
+
+i moraćete dodati izuzetak za pdf-parse u vašem next.config.js fajlu
+
+```js
+// next.config.js
+/** @type {import('next').NextConfig} */
+const nextConfig = {
+  experimental: {
+    serverComponentsExternalPackages: ["pdf-parse"], // Stavlja pdf-parse u stvarni NodeJS režim sa NextJS App Routerom
+  },
+};
+
+module.exports = nextConfig;
+```
diff --git a/apps/docs/i18n/se/docusaurus-plugin-content-docs/current/installation.mdx b/apps/docs/i18n/se/docusaurus-plugin-content-docs/current/installation.mdx
new file mode 100644
index 0000000000000000000000000000000000000000..7a44f1f48129d5885dbac961790b39f8c40a6cfe
--- /dev/null
+++ b/apps/docs/i18n/se/docusaurus-plugin-content-docs/current/installation.mdx
@@ -0,0 +1,68 @@
+---
+sidebar_position: 1
+---
+
+
+# Instalacija i podešavanje
+
+```Ova dokumentacija je automatski prevedena i može sadržati greške. Ne oklevajte da otvorite Pull Request za predlaganje izmena.```
+
+
+Proverite da imate NodeJS v18 ili noviju verziju.
+
+
+## Korišćenje create-llama
+
+Najlakši način da započnete sa LlamaIndex-om je korišćenje `create-llama` alata. Ovaj CLI alat vam omogućava da brzo započnete izgradnju nove LlamaIndex aplikacije, sa svim podešavanjima već postavljenim za vas.
+
+Samo pokrenite
+
+<Tabs>
+<TabItem value="1" label="npm" default>
+
+```bash
+npx create-llama@latest
+```
+
+</TabItem>
+<TabItem value="2" label="Yarn">
+
+```bash
+yarn create llama
+```
+
+</TabItem>
+<TabItem value="3" label="pnpm">
+
+```bash
+pnpm create llama@latest
+```
+
+</TabItem>
+</Tabs>
+
+da biste započeli. Kada se vaša aplikacija generiše, pokrenite
+
+```bash npm2yarn
+npm run dev
+```
+
+da biste pokrenuli serversku verziju. Zatim možete posetiti [http://localhost:3000](http://localhost:3000) da biste videli svoju aplikaciju.
+## Instalacija putem NPM-a
+
+```bash npm2yarn
+npm install llamaindex
+```
+
+
+### Okružne promenljive
+
+Naši primeri podrazumevano koriste OpenAI. Morate podesiti svoj Open AI ključ na sledeći način:
+
+```bash
+export OPENAI_API_KEY="sk-......" # Zamenite sa svojim ključem sa https://platform.openai.com/account/api-keys
+```
+
+Ako želite da se automatski učita svaki put, dodajte ga u svoj .zshrc/.bashrc.
+
+UPOZORENJE: Ne čuvajte svoj OpenAI ključ u verzionom kontrolnom sistemu.
diff --git a/apps/docs/i18n/se/docusaurus-plugin-content-docs/current/introduction.md b/apps/docs/i18n/se/docusaurus-plugin-content-docs/current/introduction.md
new file mode 100644
index 0000000000000000000000000000000000000000..e33842486830ff91e1069d675154565a69f5ca00
--- /dev/null
+++ b/apps/docs/i18n/se/docusaurus-plugin-content-docs/current/introduction.md
@@ -0,0 +1,60 @@
+---
+sidebar_position: 0
+slug: /
+---
+
+# Šta je LlamaIndex.TS?
+
+`Ova dokumentacija je automatski prevedena i može sadržati greške. Ne oklevajte da otvorite Pull Request za predlaganje izmena.`
+
+LlamaIndex.TS je okvir za podatke za LLM aplikacije koji omogućava unošenje, strukturiranje i pristup privatnim ili domenski specifičnim podacima. Iako je dostupan i Python paket (vidi [ovde](https://docs.llamaindex.ai/en/stable/)), LlamaIndex.TS nudi osnovne funkcionalnosti u jednostavnom paketu, optimizovanom za upotrebu sa TypeScript-om.
+
+## 🚀 Zašto LlamaIndex.TS?
+
+U svojoj osnovi, LLM-ovi nude prirodni jezički interfejs između ljudi i zaključenih podataka. Široko dostupni modeli su prethodno obučeni na ogromnim količinama javno dostupnih podataka, od Vikipedije i mailing lista do udžbenika i izvornog koda.
+
+Aplikacije izgrađene na osnovu LLM-ova često zahtevaju proširivanje ovih modela privatnim ili domenski specifičnim podacima. Nažalost, ti podaci mogu biti raspoređeni u različitim aplikacijama i skladištima podataka. Oni se nalaze iza API-ja, u SQL bazama podataka ili su zarobljeni u PDF-ovima i prezentacijama.
+
+Tu dolazi **LlamaIndex.TS**.
+
+## 🦙 Kako LlamaIndex.TS može pomoći?
+
+LlamaIndex.TS pruža sledeće alate:
+
+- **Učitavanje podataka** - unosite svoje postojeće podatke u formatima `.txt`, `.pdf`, `.csv`, `.md` i `.docx` direktno
+- **Indeksi podataka** - strukturirajte svoje podatke u međureprezentacije koje su jednostavne i efikasne za upotrebu u LLM aplikacijama.
+- **Engine-i** - pružaju prirodan jezički pristup vašim podacima. Na primer:
+  - Upitni engine-i su moćna sučelja za preuzimanje znanja obogaćenog izlaza.
+  - Chat engine-i su konverzacijska sučelja za interakcije "napred-nazad" sa vašim podacima.
+
+## 👨‍👩‍👧‍👦 Za koga je LlamaIndex?
+
+LlamaIndex.TS pruža osnovni set alata koji su neophodni svima koji grade LLM aplikacije sa JavaScript-om i TypeScript-om.
+
+Naša API na visokom nivou omogućava početnicima da koriste LlamaIndex.TS za unošenje i pretragu svojih podataka.
+
+Za složenije aplikacije, naše API-je na nižem nivou omogućavaju naprednim korisnicima da prilagode i prošire bilo koji modul - konektore podataka, indekse, povratnike i upitne motore, kako bi odgovarali njihovim potrebama.
+
+## Početak rada
+
+`npm install llamaindex`
+
+Naša dokumentacija uključuje [Uputstva za instalaciju](./installation.md) i [Uvodni tutorijal](./starter.md) za izgradnju vaše prve aplikacije.
+
+Kada ste spremni za rad, [Koncepti na visokom nivou](./concepts.md) pružaju pregled modularne arhitekture LlamaIndex-a. Za praktične primere, pogledajte naše [Vodiče od početka do kraja](./end_to_end.md).
+
+## 🗺️ Ekosistem
+
+Za preuzimanje ili doprinos, pronađite LlamaIndex na:
+
+- Github: https://github.com/run-llama/LlamaIndexTS
+- NPM: https://www.npmjs.com/package/llamaindex
+
+"
+
+## Zajednica
+
+Treba vam pomoć? Imate sugestiju za funkcionalnost? Pridružite se LlamaIndex zajednici:
+
+- Twitter: https://twitter.com/llama_index
+- Discord: https://discord.gg/dGcwcsnxhU
diff --git a/apps/docs/i18n/se/docusaurus-plugin-content-docs/current/modules/high_level/chat_engine.md b/apps/docs/i18n/se/docusaurus-plugin-content-docs/current/modules/high_level/chat_engine.md
new file mode 100644
index 0000000000000000000000000000000000000000..e0c6773591cee899f3ca47a18e2d59226940e906
--- /dev/null
+++ b/apps/docs/i18n/se/docusaurus-plugin-content-docs/current/modules/high_level/chat_engine.md
@@ -0,0 +1,22 @@
+---
+sidebar_position: 4
+---
+
+# ChatEngine (Чет мотор)
+
+`Ova dokumentacija je automatski prevedena i može sadržati greške. Ne oklevajte da otvorite Pull Request za predlaganje izmena.`
+
+Чет мотор је брз и једноставан начин за разговор са подацима у вашем индексу.
+
+```typescript
+const retriever = index.asRetriever();
+const chatEngine = new ContextChatEngine({ retriever });
+
+// започни разговор
+const response = await chatEngine.chat(query);
+```
+
+## Api Reference (Api Референца)
+
+- [ContextChatEngine (Чет мотор контекста)](../../api/classes/ContextChatEngine.md)
+- [CondenseQuestionChatEngine (Чет мотор за сажета питања)](../../api/classes/ContextChatEngine.md)
diff --git a/apps/docs/i18n/se/docusaurus-plugin-content-docs/current/modules/high_level/data_index.md b/apps/docs/i18n/se/docusaurus-plugin-content-docs/current/modules/high_level/data_index.md
new file mode 100644
index 0000000000000000000000000000000000000000..966cf9d59e97b8bf98bb381b603b75bb57b9765b
--- /dev/null
+++ b/apps/docs/i18n/se/docusaurus-plugin-content-docs/current/modules/high_level/data_index.md
@@ -0,0 +1,27 @@
+---
+sidebar_position: 2
+---
+
+# Indeks
+
+`Ova dokumentacija je automatski prevedena i može sadržati greške. Ne oklevajte da otvorite Pull Request za predlaganje izmena.`
+
+Indeks je osnovni kontejner i organizacija za vaše podatke. LlamaIndex.TS podržava dva indeksa:
+
+- `VectorStoreIndex` - će poslati najboljih k `Node`-ova LLM-u prilikom generisanja odgovora. Podrazumevani broj najboljih je 2.
+- `SummaryIndex` - će poslati svaki `Node` u indeksu LLM-u kako bi generisao odgovor.
+
+```typescript
+import { Document, VectorStoreIndex } from "llamaindex";
+
+const document = new Document({ text: "test" });
+
+const index = await VectorStoreIndex.fromDocuments([document]);
+```
+
+## API Referenca
+
+- [SummaryIndex](../../api/classes/SummaryIndex.md)
+- [VectorStoreIndex](../../api/classes/VectorStoreIndex.md)
+
+"
diff --git a/apps/docs/i18n/se/docusaurus-plugin-content-docs/current/modules/high_level/data_loader.md b/apps/docs/i18n/se/docusaurus-plugin-content-docs/current/modules/high_level/data_loader.md
new file mode 100644
index 0000000000000000000000000000000000000000..72c618474b1001414fc9783f0de4efae1caa7375
--- /dev/null
+++ b/apps/docs/i18n/se/docusaurus-plugin-content-docs/current/modules/high_level/data_loader.md
@@ -0,0 +1,21 @@
+---
+sidebar_position: 1
+---
+
+# Čitač / Učitavač
+
+`Ova dokumentacija je automatski prevedena i može sadržati greške. Ne oklevajte da otvorite Pull Request za predlaganje izmena.`
+
+LlamaIndex.TS podržava jednostavno učitavanje datoteka iz foldera koristeći klasu `SimpleDirectoryReader`. Trenutno se podržavaju `.txt`, `.pdf`, `.csv`, `.md` i `.docx` datoteke, a u budućnosti se planira podrška za još više formata!
+
+```typescript
+import { SimpleDirectoryReader } from "llamaindex";
+
+documents = new SimpleDirectoryReader().loadData("./data");
+```
+
+## API Referenca
+
+- [SimpleDirectoryReader](../../api/classes/SimpleDirectoryReader.md)
+
+"
diff --git a/apps/docs/i18n/se/docusaurus-plugin-content-docs/current/modules/high_level/documents_and_nodes.md b/apps/docs/i18n/se/docusaurus-plugin-content-docs/current/modules/high_level/documents_and_nodes.md
new file mode 100644
index 0000000000000000000000000000000000000000..724e78f766e9c9ce400fb30e44a2f1ee150434cc
--- /dev/null
+++ b/apps/docs/i18n/se/docusaurus-plugin-content-docs/current/modules/high_level/documents_and_nodes.md
@@ -0,0 +1,22 @@
+---
+sidebar_position: 0
+---
+
+# Dokumenti i čvorovi
+
+`Ova dokumentacija je automatski prevedena i može sadržati greške. Ne oklevajte da otvorite Pull Request za predlaganje izmena.`
+
+`Dokumenti` i `Čvorovi` su osnovni građevinski blokovi svakog indeksa. Iako je API za ove objekte sličan, objekti `Dokumenta` predstavljaju kompletne datoteke, dok su `Čvorovi` manji delovi originalnog dokumenta, koji su pogodni za LLM i Q&A.
+
+```typescript
+import { Document } from "llamaindex";
+
+document = new Document({ text: "tekst", metadata: { ključ: "vrednost" } });
+```
+
+## API Referenca
+
+- [Dokument](../../api/classes/Document.md)
+- [TextNode](../../api/classes/TextNode.md)
+
+"
diff --git a/apps/docs/i18n/se/docusaurus-plugin-content-docs/current/modules/high_level/query_engine.md b/apps/docs/i18n/se/docusaurus-plugin-content-docs/current/modules/high_level/query_engine.md
new file mode 100644
index 0000000000000000000000000000000000000000..c7b1ff5a3496c958415336168a358ad0133d1cff
--- /dev/null
+++ b/apps/docs/i18n/se/docusaurus-plugin-content-docs/current/modules/high_level/query_engine.md
@@ -0,0 +1,42 @@
+---
+sidebar_position: 3
+---
+
+# QueryEngine (Upitni motor)
+
+`Ova dokumentacija je automatski prevedena i može sadržati greške. Ne oklevajte da otvorite Pull Request za predlaganje izmena.`
+
+QueryEngine (Upitni motor) obuhvata `Retriever` i `ResponseSynthesizer` u cevovodu, koji će koristiti upitni niz za dohvat čvorova, a zatim ih poslati LLM-u da generiše odgovor.
+
+```typescript
+const queryEngine = index.asQueryEngine();
+const response = await queryEngine.query("upitni niz");
+```
+
+## Podupitni upitni motor
+
+Osnovna ideja podupitnog upitnog motora je da podeli jedan upit na više upita, dobije odgovor za svaki od tih upita, a zatim kombinuje te različite odgovore u jedan koherentan odgovor za korisnika. Možete ga zamisliti kao tehniku "razmišljanja korak po korak" ali iteriranje kroz izvore podataka!
+
+### Početak rada
+
+Najlakši način da počnete da koristite Upitni motor za podpitanja je pokretanje fajla subquestion.ts u [primerima](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/subquestion.ts).
+
+```bash
+npx ts-node subquestion.ts
+```
+
+"
+
+### Alati
+
+Podupitni upitni motor je implementiran sa Alatima. Osnovna ideja Alata je da su to izvršne opcije za veliki jezički model. U ovom slučaju, naš podupitni upitni motor se oslanja na QueryEngineTool, koji je, kao što ste pretpostavili, alat za pokretanje upita na QueryEngine-u. To nam omogućava da modelu damo opciju da upita različite dokumente za različita pitanja, na primer. Takođe možete zamisliti da podupitni upitni motor može koristiti Alat koji traži nešto na vebu ili dobija odgovor koristeći Wolfram Alpha.
+
+Više o Alatima možete saznati tako što ćete pogledati LlamaIndex Python dokumentaciju na sledećem linku: https://gpt-index.readthedocs.io/en/latest/core_modules/agent_modules/tools/root.html
+
+## API Reference (API referenca)
+
+- [RetrieverQueryEngine (Motor za dohvat upita)](../../api/classes/RetrieverQueryEngine.md)
+- [SubQuestionQueryEngine (Motor za podupit)](../../api/classes/SubQuestionQueryEngine.md)
+- [QueryEngineTool (Alat za upitni motor)](../../api/interfaces/QueryEngineTool.md)
+
+"
diff --git a/apps/docs/i18n/se/docusaurus-plugin-content-docs/current/modules/index.md b/apps/docs/i18n/se/docusaurus-plugin-content-docs/current/modules/index.md
new file mode 100644
index 0000000000000000000000000000000000000000..b6909aed4ff17e16bf8d08dfa18901b68b3f0cd0
--- /dev/null
+++ b/apps/docs/i18n/se/docusaurus-plugin-content-docs/current/modules/index.md
@@ -0,0 +1,33 @@
+# Osnovni Moduli
+
+`Ova dokumentacija je automatski prevedena i može sadržati greške. Ne oklevajte da otvorite Pull Request za predlaganje izmena.`
+
+LlamaIndex.TS nudi nekoliko osnovnih modula, koji su podeljeni na visokonivne module za brzi početak i niskonivne module za prilagođavanje ključnih komponenti prema potrebi.
+
+## Visokog Nivoa Moduli
+
+- [**Dokument**](./high_level/documents_and_nodes.md): Dokument predstavlja tekstualni fajl, PDF fajl ili drugi kontinuirani deo podataka.
+
+- [**Čvor**](./high_level/documents_and_nodes.md): Osnovna građevinska jedinica podataka. Najčešće, ovo su delovi dokumenta podeljeni na upravljive delove koji su dovoljno mali da se mogu koristiti u modelu za ugradnju i LLM.
+
+- [**Čitač/Učitavač**](./high_level/data_loader.md): Čitač ili učitavač je nešto što uzima dokument u stvarnom svetu i pretvara ga u klasu Dokumenta koja se može koristiti u vašem Indeksu i upitima. Trenutno podržavamo obične tekstualne fajlove i PDF-ove, a uskoro će biti podržano još mnogo formata.
+
+- [**Indeksi**](./high_level/data_index.md): Indeksi čuvaju Čvorove i ugradnje tih čvorova.
+
+- [**Upitni Motor**](./high_level/query_engine.md): Upitni motori generišu upit koji ste uneli i vraćaju vam rezultat. Upitni motori obično kombinuju unapred izgrađenu poruku sa odabranim čvorovima iz vašeg Indeksa kako bi LLM pružio kontekst koji mu je potreban da odgovori na vaš upit.
+
+- [**Čet Motor**](./high_level/chat_engine.md): Čet motor vam pomaže da izgradite čet bota koji će komunicirati sa vašim Indeksima.
+
+## Niskonivni Modul
+
+- [**LLM**](./low_level/llm.md): Klasa LLM je ujedinjeni interfejs za veliki provajder jezičkog modela kao što su OpenAI GPT-4, Anthropic Claude ili Meta LLaMA. Možete je naslediti kako biste napisali konektor za sopstveni veliki jezički model.
+
+- [**Embedding**](./low_level/embedding.md): Embedding je predstavljen kao vektor decimalnih brojeva. OpenAI-jev model text-embedding-ada-002 je naš podrazumevani embedding model, a svaki generisani embedding se sastoji od 1.536 decimalnih brojeva. Još jedan popularan embedding model je BERT koji koristi 768 decimalnih brojeva za predstavljanje svakog čvora. Pružamo nekoliko alata za rad sa embedding-om, uključujući 3 opcije za računanje sličnosti i Maximum Marginal Relevance.
+
+- [**TextSplitter/NodeParser**](./low_level/node_parser.md): Strategije deljenja teksta su izuzetno važne za ukupnu efikasnost pretrage embedding-a. Trenutno, iako imamo podrazumevano podešavanje, ne postoji univerzalno rešenje. Zavisno od izvornih dokumenata, možda ćete želeti da koristite različite veličine i strategije deljenja. Trenutno podržavamo deljenje po fiksnim veličinama, deljenje po fiksnim veličinama sa preklapajućim sekcijama, deljenje po rečenici i deljenje po pasusu. Text splitter se koristi od strane NodeParser-a prilikom deljenja `Dokumenata` na `Čvorove`.
+
+- [**Retriever**](./low_level/retriever.md): Retriever je ono što zapravo bira Čvorove koje treba povratiti iz indeksa. Ovde možete pokušati da povratite više ili manje Čvorova po upitu, promeniti funkciju sličnosti ili kreirati sopstveni retriever za svaki pojedinačni slučaj upotrebe u vašoj aplikaciji. Na primer, možda ćete želeti da imate poseban retriever za sadržaj koda naspram tekstualnog sadržaja.
+
+- [**ResponseSynthesizer**](./low_level/response_synthesizer.md): ResponseSynthesizer je odgovoran za uzimanje upita i korišćenje liste `Čvorova` za generisanje odgovora. To može imati različite oblike, kao što je iteriranje kroz sav kontekst i usavršavanje odgovora, ili izgradnja stabla sažetaka i vraćanje korena sažetka.
+
+- [**Storage**](./low_level/storage.md): U nekom trenutku ćete želeti da sačuvate svoje indekse, podatke i vektore umesto da ponovo pokrećete modele embedding-a svaki put. IndexStore, DocStore, VectorStore i KVStore su apstrakcije koje vam to omogućavaju. Kombinovano, oni čine StorageContext. Trenutno vam omogućavamo da trajno čuvate svoje embedding-e u datotekama na fajl sistemu (ili virtuelnom fajl sistemu u memoriji), ali takođe aktivno dodajemo integracije sa Vector bazama podataka.
diff --git a/apps/docs/i18n/se/docusaurus-plugin-content-docs/current/modules/low_level/embedding.md b/apps/docs/i18n/se/docusaurus-plugin-content-docs/current/modules/low_level/embedding.md
new file mode 100644
index 0000000000000000000000000000000000000000..da5eb48de76c2752a09234e24966575aac25c4bc
--- /dev/null
+++ b/apps/docs/i18n/se/docusaurus-plugin-content-docs/current/modules/low_level/embedding.md
@@ -0,0 +1,26 @@
+---
+sidebar_position: 1
+---
+
+# Ugrađivanje
+
+`Ova dokumentacija je automatski prevedena i može sadržati greške. Ne oklevajte da otvorite Pull Request za predlaganje izmena.`
+
+Model ugrađivanja u LlamaIndex-u je odgovoran za kreiranje numeričkih reprezentacija teksta. Prema zadanim postavkama, LlamaIndex će koristiti model `text-embedding-ada-002` iz OpenAI-a.
+
+Ovo se može eksplicitno postaviti u objektu `ServiceContext`.
+
+```typescript
+import { OpenAIEmbedding, serviceContextFromDefaults } from "llamaindex";
+
+const openaiEmbeds = new OpenAIEmbedding();
+
+const serviceContext = serviceContextFromDefaults({ embedModel: openaiEmbeds });
+```
+
+## API Referenca
+
+- [OpenAIEmbedding](../../api/classes/OpenAIEmbedding.md)
+- [ServiceContext](../../api/interfaces/ServiceContext.md)
+
+"
diff --git a/apps/docs/i18n/se/docusaurus-plugin-content-docs/current/modules/low_level/llm.md b/apps/docs/i18n/se/docusaurus-plugin-content-docs/current/modules/low_level/llm.md
new file mode 100644
index 0000000000000000000000000000000000000000..7d57403c9f696a89edbab1f5736b1308368a4aca
--- /dev/null
+++ b/apps/docs/i18n/se/docusaurus-plugin-content-docs/current/modules/low_level/llm.md
@@ -0,0 +1,26 @@
+---
+sidebar_position: 0
+---
+
+# LLM
+
+`Ova dokumentacija je automatski prevedena i može sadržati greške. Ne oklevajte da otvorite Pull Request za predlaganje izmena.`
+
+LLM je odgovoran za čitanje teksta i generisanje prirodnih jezičkih odgovora na upite. Podrazumevano, LlamaIndex.TS koristi `gpt-3.5-turbo`.
+
+LLM se može eksplicitno postaviti u objektu `ServiceContext`.
+
+```typescript
+import { OpenAI, serviceContextFromDefaults } from "llamaindex";
+
+const openaiLLM = new OpenAI({ model: "gpt-3.5-turbo", temperature: 0 });
+
+const serviceContext = serviceContextFromDefaults({ llm: openaiLLM });
+```
+
+## API Referenca
+
+- [OpenAI](../../api/classes/OpenAI.md)
+- [ServiceContext](../../api/interfaces/ServiceContext.md)
+
+"
diff --git a/apps/docs/i18n/se/docusaurus-plugin-content-docs/current/modules/low_level/node_parser.md b/apps/docs/i18n/se/docusaurus-plugin-content-docs/current/modules/low_level/node_parser.md
new file mode 100644
index 0000000000000000000000000000000000000000..9db6dfa5d60c976b49b01bbe6f7002f7f8afb363
--- /dev/null
+++ b/apps/docs/i18n/se/docusaurus-plugin-content-docs/current/modules/low_level/node_parser.md
@@ -0,0 +1,37 @@
+---
+sidebar_position: 3
+---
+
+# NodeParser
+
+`Ova dokumentacija je automatski prevedena i može sadržati greške. Ne oklevajte da otvorite Pull Request za predlaganje izmena.`
+
+`NodeParser` u LlamaIndex-u je odgovoran za deljenje objekata `Document` na lakše upravljive objekte `Node`. Kada pozovete `.fromDocuments()`, `NodeParser` iz `ServiceContext`-a se automatski koristi da to uradi za vas. Alternativno, možete ga koristiti da unapred podelite dokumente.
+
+```typescript
+import { Document, SimpleNodeParser } from "llamaindex";
+
+const nodeParser = new SimpleNodeParser();
+const nodes = nodeParser.getNodesFromDocuments([
+  new Document({ text: "Imam 10 godina. Džon ima 20 godina." }),
+]);
+```
+
+## TextSplitter
+
+Osnovni delilac teksta će deliti tekst po rečenicama. Može se takođe koristiti kao samostalan modul za deljenje sirovog teksta.
+
+```typescript
+import { SentenceSplitter } from "llamaindex";
+
+const splitter = new SentenceSplitter({ chunkSize: 1 });
+
+const textSplits = splitter.splitText("Zdravo svete");
+```
+
+## API Referenca
+
+- [SimpleNodeParser](../../api/classes/SimpleNodeParser.md)
+- [SentenceSplitter](../../api/classes/SentenceSplitter.md)
+
+"
diff --git a/apps/docs/i18n/se/docusaurus-plugin-content-docs/current/modules/low_level/response_synthesizer.md b/apps/docs/i18n/se/docusaurus-plugin-content-docs/current/modules/low_level/response_synthesizer.md
new file mode 100644
index 0000000000000000000000000000000000000000..4baf1630812a00bc593576f7d7d481b7ba28e868
--- /dev/null
+++ b/apps/docs/i18n/se/docusaurus-plugin-content-docs/current/modules/low_level/response_synthesizer.md
@@ -0,0 +1,47 @@
+---
+sidebar_position: 6
+---
+
+# ResponseSynthesizer (SintetizatorOdgovora)
+
+`Ova dokumentacija je automatski prevedena i može sadržati greške. Ne oklevajte da otvorite Pull Request za predlaganje izmena.`
+
+ResponseSynthesizer je odgovoran za slanje upita, čvorova i predložaka za generisanje odgovora LLM-u. Postoje nekoliko ključnih načina generisanja odgovora:
+
+- `Refine` (Usavršavanje): "kreiranje i usavršavanje" odgovora tako što se sekvenčno prolazi kroz svaki dobijeni tekstualni fragment. Ovo pravi poseban poziv LLM-u za svaki čvor. Dobro za detaljnije odgovore.
+- `CompactAndRefine` (Kompaktovanje i usavršavanje) (podrazumevano): "kompaktovanje" predloška tokom svakog poziva LLM-u tako što se ubacuje što više tekstualnih fragmenata koji mogu da stanu u maksimalnu veličinu predloška. Ako ima previše fragmenata da bi se ubacili u jedan predložak, "kreira se i usavršava" odgovor prolaskom kroz više kompaktnih predložaka. Isto kao `refine`, ali bi trebalo da rezultira manjim brojem poziva LLM-u.
+- `TreeSummarize` (Sumiranje stabla): Na osnovu skupa tekstualnih fragmenata i upita, rekurzivno konstruiše stablo i vraća koren kao odgovor. Dobro za svrhe sumiranja.
+- `SimpleResponseBuilder` (Jednostavno izgradnja odgovora): Na osnovu skupa tekstualnih fragmenata i upita, primenjuje upit na svaki tekstualni fragment dok akumulira odgovore u niz. Vraća konkatenirani string svih odgovora. Dobro kada je potrebno pokrenuti isti upit posebno za svaki tekstualni fragment.
+
+```typescript
+import { NodeWithScore, ResponseSynthesizer, TextNode } from "llamaindex";
+
+const responseSynthesizer = new ResponseSynthesizer();
+
+const nodesWithScore: NodeWithScore[] = [
+  {
+    node: new TextNode({ text: "Imam 10 godina." }),
+    score: 1,
+  },
+  {
+    node: new TextNode({ text: "John ima 20 godina." }),
+    score: 0.5,
+  },
+];
+
+const response = await responseSynthesizer.synthesize(
+  "Koliko godina imam?",
+  nodesWithScore,
+);
+console.log(response.response);
+```
+
+## API Reference (API referenca)
+
+- [ResponseSynthesizer (SintetizatorOdgovora)](../../api/classes/ResponseSynthesizer.md)
+- [Refine (Usavršavanje)](../../api/classes/Refine.md)
+- [CompactAndRefine (Kompaktovanje i usavršavanje)](../../api/classes/CompactAndRefine.md)
+- [TreeSummarize (Sumiranje stabla)](../../api/classes/TreeSummarize.md)
+- [SimpleResponseBuilder (Jednostavno izgradnja odgovora)](../../api/classes/SimpleResponseBuilder.md)
+
+"
diff --git a/apps/docs/i18n/se/docusaurus-plugin-content-docs/current/modules/low_level/retriever.md b/apps/docs/i18n/se/docusaurus-plugin-content-docs/current/modules/low_level/retriever.md
new file mode 100644
index 0000000000000000000000000000000000000000..4ddd2f2e06b14fcda0d7dee4ce36b64638107a5b
--- /dev/null
+++ b/apps/docs/i18n/se/docusaurus-plugin-content-docs/current/modules/low_level/retriever.md
@@ -0,0 +1,25 @@
+---
+sidebar_position: 5
+---
+
+# Pretraživač
+
+`Ova dokumentacija je automatski prevedena i može sadržati greške. Ne oklevajte da otvorite Pull Request za predlaganje izmena.`
+
+Pretraživač u LlamaIndex-u se koristi za dohvatanje `Node`-ova iz indeksa koristeći upitni niz. `VectorIndexRetriever` će dohvatiti prvih k najsličnijih čvorova. S druge strane, `SummaryIndexRetriever` će dohvatiti sve čvorove bez obzira na upit.
+
+```typescript
+const retriever = vector_index.asRetriever();
+retriever.similarityTopK = 3;
+
+// Dohvati čvorove!
+const nodesWithScore = await retriever.retrieve("upitni niz");
+```
+
+## API Referenca
+
+- [SummaryIndexRetriever](../../api/classes/SummaryIndexRetriever.md)
+- [SummaryIndexLLMRetriever](../../api/classes/SummaryIndexLLMRetriever.md)
+- [VectorIndexRetriever](../../api/classes/VectorIndexRetriever.md)
+
+"
diff --git a/apps/docs/i18n/se/docusaurus-plugin-content-docs/current/modules/low_level/storage.md b/apps/docs/i18n/se/docusaurus-plugin-content-docs/current/modules/low_level/storage.md
new file mode 100644
index 0000000000000000000000000000000000000000..8d5bdd0f623be25efeacdfae3e0f9f30bc65afa0
--- /dev/null
+++ b/apps/docs/i18n/se/docusaurus-plugin-content-docs/current/modules/low_level/storage.md
@@ -0,0 +1,30 @@
+---
+sidebar_position: 7
+---
+
+# Skladište (Storage)
+
+`Ova dokumentacija je automatski prevedena i može sadržati greške. Ne oklevajte da otvorite Pull Request za predlaganje izmena.`
+
+Skladište u LlamaIndex.TS automatski funkcioniše kada konfigurišete objekat `StorageContext`. Samo konfigurišite `persistDir` i povežite ga sa indeksom.
+
+Trenutno je podržano samo čuvanje i učitavanje sa diska, sa planiranim budućim integracijama!
+
+```typescript
+import { Document, VectorStoreIndex, storageContextFromDefaults } from "./src";
+
+const storageContext = await storageContextFromDefaults({
+  persistDir: "./storage",
+});
+
+const document = new Document({ text: "Test Text" });
+const index = await VectorStoreIndex.fromDocuments([document], {
+  storageContext,
+});
+```
+
+## API Referenca
+
+- [StorageContext](../../api/interfaces/StorageContext.md)
+
+"
diff --git a/apps/docs/i18n/se/docusaurus-plugin-content-docs/current/starter.md b/apps/docs/i18n/se/docusaurus-plugin-content-docs/current/starter.md
new file mode 100644
index 0000000000000000000000000000000000000000..f99eafe3c0e914b5d16566fe3ab1fdeea39be8d3
--- /dev/null
+++ b/apps/docs/i18n/se/docusaurus-plugin-content-docs/current/starter.md
@@ -0,0 +1,58 @@
+---
+sidebar_position: 2
+---
+
+# Uvodni tutorijal
+
+`Ova dokumentacija je automatski prevedena i može sadržati greške. Ne oklevajte da otvorite Pull Request za predlaganje izmena.`
+
+Kada ste [instalirali LlamaIndex.TS pomoću NPM-a](installation) i podesili svoj OpenAI ključ, spremni ste da započnete svoju prvu aplikaciju:
+
+U novom folderu:
+
+```bash npm2yarn
+npm install typescript
+npm install @types/node
+npx tsc --init # ako je potrebno
+```
+
+Kreirajte fajl `example.ts`. Ovaj kod će učitati neke primere podataka, kreirati dokument, indeksirati ga (što stvara ugnežđivanja pomoću OpenAI) i zatim kreirati upitni motor za odgovaranje na pitanja o podacima.
+
+```ts
+// example.ts
+import fs from "fs/promises";
+import { Document, VectorStoreIndex } from "llamaindex";
+
+async function main() {
+  // Učitaj eseje iz abramov.txt u Node-u
+  const eseji = await fs.readFile(
+    "node_modules/llamaindex/examples/abramov.txt",
+    "utf-8",
+  );
+
+  // Kreiraj objekat Document sa esejom
+  const dokument = new Document({ text: eseji });
+
+  // Podeli tekst i kreiraj ugnežđivanja. Sačuvaj ih u VectorStoreIndex-u
+  const indeks = await VectorStoreIndex.fromDocuments([dokument]);
+
+  // Upitaj indeks
+  const upitniMotor = indeks.asQueryEngine();
+  const odgovor = await upitniMotor.query("Šta je autor radio na fakultetu?");
+
+  // Ispisi odgovor
+  console.log(odgovor.toString());
+}
+
+main();
+```
+
+Zatim ga možete pokrenuti koristeći
+
+```bash
+npx ts-node example.ts
+```
+
+Spremni da naučite više? Pogledajte naš NextJS playground na https://llama-playground.vercel.app/. Izvorni kod je dostupan na https://github.com/run-llama/ts-playground
+
+"
diff --git a/apps/docs/i18n/sk/docusaurus-plugin-content-docs/current/_static/concepts/indexing.jpg b/apps/docs/i18n/sk/docusaurus-plugin-content-docs/current/_static/concepts/indexing.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..8672967213caf28fb27682b6b9e2e5111beb9aaa
Binary files /dev/null and b/apps/docs/i18n/sk/docusaurus-plugin-content-docs/current/_static/concepts/indexing.jpg differ
diff --git a/apps/docs/i18n/sk/docusaurus-plugin-content-docs/current/_static/concepts/querying.jpg b/apps/docs/i18n/sk/docusaurus-plugin-content-docs/current/_static/concepts/querying.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..3c241bdda5ba98d3f3ee2e163b9f19280c5c7503
Binary files /dev/null and b/apps/docs/i18n/sk/docusaurus-plugin-content-docs/current/_static/concepts/querying.jpg differ
diff --git a/apps/docs/i18n/sk/docusaurus-plugin-content-docs/current/_static/concepts/rag.jpg b/apps/docs/i18n/sk/docusaurus-plugin-content-docs/current/_static/concepts/rag.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..b68eca2564f8535b33d9ed6d4ce4d4410d2a0698
Binary files /dev/null and b/apps/docs/i18n/sk/docusaurus-plugin-content-docs/current/_static/concepts/rag.jpg differ
diff --git a/apps/docs/i18n/sk/docusaurus-plugin-content-docs/current/api b/apps/docs/i18n/sk/docusaurus-plugin-content-docs/current/api
new file mode 120000
index 0000000000000000000000000000000000000000..3513e32979f4abb8b2df2b54c8d0cc480bdd847e
--- /dev/null
+++ b/apps/docs/i18n/sk/docusaurus-plugin-content-docs/current/api
@@ -0,0 +1 @@
+../../../../docs/api
\ No newline at end of file
diff --git a/apps/docs/i18n/sk/docusaurus-plugin-content-docs/current/concepts.md b/apps/docs/i18n/sk/docusaurus-plugin-content-docs/current/concepts.md
new file mode 100644
index 0000000000000000000000000000000000000000..0fd502ddf0005a5f0d530dfa345e9eb3d09fc38c
--- /dev/null
+++ b/apps/docs/i18n/sk/docusaurus-plugin-content-docs/current/concepts.md
@@ -0,0 +1,86 @@
+---
+sidebar_position: 3
+---
+
+# Koncepti na visoki ravni
+
+`Ta dokumentacija je bila samodejno prevedena in lahko vsebuje napake. Ne oklevajte odpreti Pull Request za predlaganje sprememb.`
+
+LlamaIndex.TS vam pomaga graditi aplikacije, ki temeljijo na LLM (npr. vprašanja in odgovori, chatbot) nad prilagojenimi podatki.
+
+V tej vodniku o konceptih na visoki ravni boste izvedeli:
+
+- kako LLM lahko odgovarja na vprašanja s pomočjo vaših lastnih podatkov.
+- ključne koncepte in module v LlamaIndex.TS za sestavljanje lastne poizvedovalne cevovodne arhitekture.
+
+## Odgovarjanje na vprašanja preko vaših podatkov
+
+LlamaIndex uporablja dvostopenjsko metodo pri uporabi LLM z vašimi podatki:
+
+1. **indeksiranje**: priprava baze znanja, in
+2. **poizvedovanje**: pridobivanje relevantnega konteksta iz znanja, da pomaga LLM pri odgovarjanju na vprašanje
+
+![](./_static/concepts/rag.jpg)
+
+Ta postopek je znan tudi kot Retrieval Augmented Generation (RAG).
+
+LlamaIndex.TS zagotavlja bistvena orodja za enostavno izvajanje obeh korakov.
+
+Poglejmo si vsako stopnjo podrobneje.
+
+### Stopnja indeksiranja
+
+LlamaIndex.TS vam pomaga pri pripravi baze znanja s pomočjo nabora povezovalnikov podatkov in indeksov.
+
+![](./_static/concepts/indexing.jpg)
+
+[**Povezovalniki podatkov**](./modules/high_level/data_loader.md):
+Povezovalnik podatkov (tj. `Reader`) vnaša podatke iz različnih virov podatkov in oblik podatkov v preprosto predstavitev `Document` (besedilo in preprosti metapodatki).
+
+[**Dokumenti / Vozišča**](./modules/high_level/documents_and_nodes.md): `Document` je splošen kontejner za katerikoli vir podatkov - na primer PDF, izhod API-ja ali pridobljeni podatki iz baze podatkov. `Node` je atomarna enota podatkov v LlamaIndex in predstavlja "kos" vira `Document`. Gre za bogato predstavitev, ki vključuje metapodatke in odnose (do drugih vozlišč), ki omogočajo natančne in izrazite operacije pridobivanja.
+
+[**Indeksi podatkov**](./modules/high_level/data_index.md):
+Ko ste vnesli svoje podatke, vam LlamaIndex pomaga pri indeksiranju podatkov v format, ki je enostaven za pridobivanje.
+
+Pod pokrovom LlamaIndex razčleni surove dokumente v vmesne predstavitve, izračuna vektorske vložke in shrani vaše podatke v pomnilnik ali na disk.
+
+"
+
+### Stopnja poizvedovanja
+
+Na stopnji poizvedovanja cevovod za poizvedovanje pridobi najbolj relevanten kontekst glede na uporabnikovo poizvedbo
+in ga preda LLM (skupaj s poizvedbo), da sintetizira odgovor.
+
+To zagotavlja LLM-ju posodobljeno znanje, ki ni v njegovih izvirnih podatkih za usposabljanje,
+(prav tako zmanjšuje halucinacije).
+
+Ključni izziv na stopnji poizvedovanja je pridobivanje, usklajevanje in sklepanje iz (potencialno mnogih) baz znanja.
+
+LlamaIndex zagotavlja sestavljive module, ki vam pomagajo graditi in integrirati cevovodne arhitekture RAG za vprašanja in odgovore (poizvedovalni motor), chatbote (chatbot motor) ali kot del agenta.
+
+Te gradnike je mogoče prilagoditi, da odražajo prednostne vrstni redi rangiranja, pa tudi sestaviti, da sklepajo iz več baz znanja na strukturiran način.
+
+![](./_static/concepts/querying.jpg)
+
+#### Gradniki
+
+[**Pridobitelji**](./modules/low_level/retriever.md):
+Pridobitelj določa, kako učinkovito pridobiti relevanten kontekst iz baze znanja (tj. indeksa), ko je podana poizvedba.
+Posebna logika pridobivanja se razlikuje glede na različne indekse, najbolj priljubljeno pa je gosto pridobivanje z uporabo vektorskega indeksa.
+
+[**Sintetizatorji odgovorov**](./modules/low_level/response_synthesizer.md):
+Sintetizator odgovora ustvari odgovor iz LLM z uporabo uporabnikove poizvedbe in določenega nabora pridobljenih besedilnih kosov.
+
+"
+
+#### Cevovodi
+
+[**Poizvedovalni motorji**](./modules/high_level/query_engine.md):
+Poizvedovalni motor je celovit cevovod, ki vam omogoča postavljanje vprašanj glede na vaše podatke.
+Sprejme naravnojezično poizvedbo in vrne odgovor skupaj z referenčnim kontekstom, ki je bil pridobljen in posredovan LLM-ju.
+
+[**Chatbot motorji**](./modules/high_level/chat_engine.md):
+Chatbot motor je celovit cevovod za pogovor z vašimi podatki
+(večkratno povratno vprašanje in odgovor namesto enega samega vprašanja in odgovora).
+
+"
diff --git a/apps/docs/i18n/sk/docusaurus-plugin-content-docs/current/end_to_end.md b/apps/docs/i18n/sk/docusaurus-plugin-content-docs/current/end_to_end.md
new file mode 100644
index 0000000000000000000000000000000000000000..9717da4e8760b6da3ed7a5391b268c1bbfd3acd0
--- /dev/null
+++ b/apps/docs/i18n/sk/docusaurus-plugin-content-docs/current/end_to_end.md
@@ -0,0 +1,55 @@
+---
+sidebar_position: 4
+---
+
+# Primeri od začetka do konca
+
+`Ta dokumentacija je bila samodejno prevedena in lahko vsebuje napake. Ne oklevajte odpreti Pull Request za predlaganje sprememb.`
+
+Vključujemo več primerov od začetka do konca, ki uporabljajo LlamaIndex.TS v repozitoriju.
+
+Preverite spodnje primere ali jih preizkusite in dokončajte v nekaj minutah s pomočjo interaktivnih vadnic Github Codespace, ki jih ponuja Dev-Docs [tukaj](https://codespaces.new/team-dev-docs/lits-dev-docs-playground?devcontainer_path=.devcontainer%2Fjavascript_ltsquickstart%2Fdevcontainer.json):
+
+## [Chat Engine](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/chatEngine.ts)
+
+Preberite datoteko in se pogovarjajte o njej z LLM.
+
+## [Vektorski indeks](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/vectorIndex.ts)
+
+Ustvarite vektorski indeks in ga poizvedujte. Vektorski indeks bo uporabil vložitve za pridobitev najbolj relevantnih vozlišč. Privzeto je najboljših k enak 2.
+
+## [Povzetek indeksa](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/summaryIndex.ts)
+
+Ustvarite seznam indeksov in ga poizvedujte. Ta primer uporablja tudi `LLMRetriever`, ki bo uporabil LLM za izbiro najboljših vozlišč za uporabo pri generiranju odgovora.
+
+"
+
+## [Shrani / Naloži indeks](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/storageContext.ts)
+
+Ustvarite in naložite vektorski indeks. V LlamaIndex.TS se samodejno izvede shranjevanje na disk, ko je ustvarjen objekt konteksta shranjevanja.
+
+"
+
+## [Prilagojeni vektorski indeks](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/vectorIndexCustomize.ts)
+
+Ustvarite vektorski indeks in ga poizvedujte, hkrati pa konfigurirajte `LLM`, `ServiceContext` in `similarity_top_k`.
+
+"
+
+## [OpenAI LLM](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/openai.ts)
+
+Ustvarite OpenAI LLM in ga neposredno uporabite za klepet.
+
+## [Llama2 DeuceLLM](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/llamadeuce.ts)
+
+Ustvarite Llama-2 LLM in ga neposredno uporabite za klepet.
+
+## [SubQuestionQueryEngine](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/subquestion.ts)
+
+Uporablja `SubQuestionQueryEngine`, ki razbije kompleksna poizvedovanja na več podvprašanj in nato združi odgovore na vsa podvprašanja.
+
+"
+
+## [Nizkonivojski moduli](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/lowlevel.ts)
+
+Ta primer uporablja več nizkonivojskih komponent, kar odpravlja potrebo po dejanskem iskalnem motorju. Te komponente se lahko uporabljajo kjerkoli, v kateri koli aplikaciji ali pa jih prilagodite in podrazredite, da izpolnite svoje potrebe.
diff --git a/apps/docs/i18n/sk/docusaurus-plugin-content-docs/current/environments.md b/apps/docs/i18n/sk/docusaurus-plugin-content-docs/current/environments.md
new file mode 100644
index 0000000000000000000000000000000000000000..743f744711a3333aa53e3d48b5a48023a5735079
--- /dev/null
+++ b/apps/docs/i18n/sk/docusaurus-plugin-content-docs/current/environments.md
@@ -0,0 +1,31 @@
+---
+sidebar_position: 5
+---
+
+# Okolja
+
+`Ta dokumentacija je bila samodejno prevedena in lahko vsebuje napake. Ne oklevajte odpreti Pull Request za predlaganje sprememb.`
+
+LlamaIndex trenutno uradno podpira NodeJS 18 in NodeJS 20.
+
+## Usmerjevalnik NextJS aplikacije
+
+Če uporabljate usmerjevalnik NextJS aplikacije za obdelavo poti/obdelovalcev brez strežnika, boste morali uporabiti način NodeJS:
+
+```js
+export const runtime = "nodejs"; // privzeto
+```
+
+in v datoteko next.config.js dodajte izjemo za pdf-parse
+
+```js
+// next.config.js
+/** @type {import('next').NextConfig} */
+const nextConfig = {
+  experimental: {
+    serverComponentsExternalPackages: ["pdf-parse"], // Postavi pdf-parse v dejanski način NodeJS z usmerjevalnikom NextJS aplikacije
+  },
+};
+
+module.exports = nextConfig;
+```
diff --git a/apps/docs/i18n/sk/docusaurus-plugin-content-docs/current/installation.mdx b/apps/docs/i18n/sk/docusaurus-plugin-content-docs/current/installation.mdx
new file mode 100644
index 0000000000000000000000000000000000000000..e5bcfe4dbfe824add351df89cbe8e234837f9db3
--- /dev/null
+++ b/apps/docs/i18n/sk/docusaurus-plugin-content-docs/current/installation.mdx
@@ -0,0 +1,71 @@
+---
+sidebar_position: 1
+---
+
+
+# Namestitev in nastavitev
+
+```Ta dokumentacija je bila samodejno prevedena in lahko vsebuje napake. Ne oklevajte odpreti Pull Request za predlaganje sprememb.```
+
+
+Prepričajte se, da imate nameščen NodeJS v18 ali novejšo različico.
+
+
+## Uporaba create-llama
+
+Najlažji način za začetek uporabe LlamaIndex je z uporabo `create-llama`. Ta orodja v ukazni vrstici vam omogoča, da hitro začnete graditi novo aplikacijo LlamaIndex, pri čemer je vse že nastavljeno za vas.
+
+Preprosto zaženite
+
+<Tabs>
+<TabItem value="1" label="npm" default>
+
+```bash
+npx create-llama@latest
+```
+
+</TabItem>
+<TabItem value="2" label="Yarn">
+
+```bash
+yarn create llama
+```
+
+</TabItem>
+<TabItem value="3" label="pnpm">
+
+```bash
+pnpm create llama@latest
+```
+
+</TabItem>
+</Tabs>
+
+da začnete. Ko je vaša aplikacija ustvarjena, zaženite
+
+```bash npm2yarn
+npm run dev
+```
+
+za zagon razvojnega strežnika. Nato lahko obiščete [http://localhost:3000](http://localhost:3000), da si ogledate svojo aplikacijo.
+## Namestitev preko NPM
+
+```bash npm2yarn
+npm install llamaindex
+```
+
+
+### Okoljske spremenljivke
+
+Naši primeri privzeto uporabljajo OpenAI. Morali boste nastaviti svoj Open AI ključ na naslednji način:
+
+```bash
+export OPENAI_API_KEY="sk-......" # Zamenjajte s svojim ključem s strani https://platform.openai.com/account/api-keys
+```
+
+Če želite, da se ključ samodejno naloži vsakič, ga dodajte v datoteko .zshrc/.bashrc.
+
+OPOZORILO: Ne preverjajte svojega OpenAI ključa v nadzoru različic.
+
+
+"
diff --git a/apps/docs/i18n/sk/docusaurus-plugin-content-docs/current/introduction.md b/apps/docs/i18n/sk/docusaurus-plugin-content-docs/current/introduction.md
new file mode 100644
index 0000000000000000000000000000000000000000..40deafddf6e82e6171547b9800780d843f4fbbe3
--- /dev/null
+++ b/apps/docs/i18n/sk/docusaurus-plugin-content-docs/current/introduction.md
@@ -0,0 +1,60 @@
+---
+sidebar_position: 0
+slug: /
+---
+
+# Kaj je LlamaIndex.TS?
+
+`Ta dokumentacija je bila samodejno prevedena in lahko vsebuje napake. Ne oklevajte odpreti Pull Request za predlaganje sprememb.`
+
+LlamaIndex.TS je podatkovni okvir za LLM aplikacije, ki omogoča vnos, strukturiranje in dostop do zasebnih ali domensko specifičnih podatkov. Čeprav je na voljo tudi Python paket (glej [tukaj](https://docs.llamaindex.ai/en/stable/)), LlamaIndex.TS ponuja osnovne funkcije v preprostem paketu, optimiziranem za uporabo s TypeScriptom.
+
+## 🚀 Zakaj LlamaIndex.TS?
+
+LLM-i na svojem jedru ponujajo naravnojezično vmesnik med ljudmi in sklepanimi podatki. Široko dostopni modeli so predhodno naučeni na ogromnih količinah javno dostopnih podatkov, od Wikipedije in e-poštnih seznamov do učbenikov in izvorne kode.
+
+Aplikacije, zgrajene na vrhu LLM-ov, pogosto zahtevajo dopolnjevanje teh modelov z zasebnimi ali domensko specifičnimi podatki. Na žalost so ti podatki razpršeni med aplikacijami in podatkovnimi skladišči. Nahajajo se za vmesniki API-jev, v SQL bazah podatkov ali pa so ujeti v PDF datotekah in predstavitvah.
+
+Tu nastopi **LlamaIndex.TS**.
+
+## 🦙 Kako lahko LlamaIndex.TS pomaga?
+
+LlamaIndex.TS zagotavlja naslednja orodja:
+
+- **Nalaganje podatkov** omogoča neposreden vnos vaših obstoječih podatkov v formatih `.txt`, `.pdf`, `.csv`, `.md` in `.docx`.
+- **Indeksi podatkov** strukturirajo vaše podatke v vmesne predstavitve, ki so enostavne in učinkovite za uporabo v LLM aplikacijah.
+- **Motorji** omogočajo naravnojezični dostop do vaših podatkov. Na primer:
+  - Poizvedbeni motorji so močna orodja za pridobivanje znanja.
+  - Klepetalni motorji so pogovorni vmesniki za interakcijo z vašimi podatki v obliki več sporočilnih "naprej in nazaj" pogovorov.
+
+## 👨‍👩‍👧‍👦 Za koga je LlamaIndex?
+
+LlamaIndex.TS zagotavlja osnovni nabor orodij, ki so bistvena za vse, ki gradijo LLM aplikacije z JavaScriptom in TypeScriptom.
+
+Naša visokonivojska API omogoča začetnikom uporabo LlamaIndex.TS za vnos in poizvedovanje podatkov.
+
+Za bolj kompleksne aplikacije naša nizkonivojska API omogoča naprednim uporabnikom prilagajanje in razširjanje katerega koli modula - povezave podatkov, indekse, pridobivalce in poizvedovalnike, da ustrezajo njihovim potrebam.
+
+## Začetek
+
+`npm install llamaindex`
+
+Naša dokumentacija vključuje [Navodila za namestitev](./installation.md) in [Vodič za začetek](./starter.md), ki vam pomagata zgraditi vašo prvo aplikacijo.
+
+Ko ste pripravljeni, [Visokonivojski koncepti](./concepts.md) ponujajo pregled modularne arhitekture LlamaIndex-a. Za več praktičnih primerov si oglejte naše [Vodiče od začetka do konca](./end_to_end.md).
+
+## 🗺️ Ekosistem
+
+Za prenos ali prispevek poiščite LlamaIndex na:
+
+- Github: https://github.com/run-llama/LlamaIndexTS
+- NPM: https://www.npmjs.com/package/llamaindex
+
+"
+
+## Skupnost
+
+Potrebujete pomoč? Imate predlog za funkcionalnost? Pridružite se skupnosti LlamaIndex:
+
+- Twitter: https://twitter.com/llama_index
+- Discord: https://discord.gg/dGcwcsnxhU
diff --git a/apps/docs/i18n/sk/docusaurus-plugin-content-docs/current/modules/high_level/chat_engine.md b/apps/docs/i18n/sk/docusaurus-plugin-content-docs/current/modules/high_level/chat_engine.md
new file mode 100644
index 0000000000000000000000000000000000000000..212421c5265b4123c6e0421258edfbbd0f49740f
--- /dev/null
+++ b/apps/docs/i18n/sk/docusaurus-plugin-content-docs/current/modules/high_level/chat_engine.md
@@ -0,0 +1,22 @@
+---
+sidebar_position: 4
+---
+
+# ChatEngine (Klepetalni pogon)
+
+`Ta dokumentacija je bila samodejno prevedena in lahko vsebuje napake. Ne oklevajte odpreti Pull Request za predlaganje sprememb.`
+
+Klepetalni pogon je hiter in preprost način za klepetanje s podatki v vašem indeksu.
+
+```typescript
+const retriever = index.asRetriever();
+const chatEngine = new ContextChatEngine({ retriever });
+
+// začnite klepetati
+const response = await chatEngine.chat(query);
+```
+
+## Api Reference (Api referenca)
+
+- [ContextChatEngine (Klepetalni pogon konteksta)](../../api/classes/ContextChatEngine.md)
+- [CondenseQuestionChatEngine (Klepetalni pogon za stiskanje vprašanj)](../../api/classes/ContextChatEngine.md)
diff --git a/apps/docs/i18n/sk/docusaurus-plugin-content-docs/current/modules/high_level/data_index.md b/apps/docs/i18n/sk/docusaurus-plugin-content-docs/current/modules/high_level/data_index.md
new file mode 100644
index 0000000000000000000000000000000000000000..bd83124b4d17293af8b06a89db76e910856d6e97
--- /dev/null
+++ b/apps/docs/i18n/sk/docusaurus-plugin-content-docs/current/modules/high_level/data_index.md
@@ -0,0 +1,27 @@
+---
+sidebar_position: 2
+---
+
+# Kazalo
+
+`Ta dokumentacija je bila samodejno prevedena in lahko vsebuje napake. Ne oklevajte odpreti Pull Request za predlaganje sprememb.`
+
+Kazalo je osnovni kontejner in organizacija za vaše podatke. LlamaIndex.TS podpira dve kazali:
+
+- `VectorStoreIndex` - bo poslal najboljše `Node` v LLM pri generiranju odgovora. Privzeto je najboljše 2.
+- `SummaryIndex` - bo poslal vsak `Node` v kazalu v LLM, da generira odgovor.
+
+```typescript
+import { Document, VectorStoreIndex } from "llamaindex";
+
+const document = new Document({ text: "test" });
+
+const index = await VectorStoreIndex.fromDocuments([document]);
+```
+
+## API Referenca
+
+- [SummaryIndex](../../api/classes/SummaryIndex.md)
+- [VectorStoreIndex](../../api/classes/VectorStoreIndex.md)
+
+"
diff --git a/apps/docs/i18n/sk/docusaurus-plugin-content-docs/current/modules/high_level/data_loader.md b/apps/docs/i18n/sk/docusaurus-plugin-content-docs/current/modules/high_level/data_loader.md
new file mode 100644
index 0000000000000000000000000000000000000000..5abd1c36392b981264f35ef1801be2f0a999fe7c
--- /dev/null
+++ b/apps/docs/i18n/sk/docusaurus-plugin-content-docs/current/modules/high_level/data_loader.md
@@ -0,0 +1,21 @@
+---
+sidebar_position: 1
+---
+
+# Bralec / Nalagalnik
+
+`Ta dokumentacija je bila samodejno prevedena in lahko vsebuje napake. Ne oklevajte odpreti Pull Request za predlaganje sprememb.`
+
+LlamaIndex.TS omogoča enostavno nalaganje datotek iz map s pomočjo razreda `SimpleDirectoryReader`. Trenutno so podprte datoteke `.txt`, `.pdf`, `.csv`, `.md` in `.docx`, v prihodnosti pa načrtujemo podporo za več formatov!
+
+```typescript
+import { SimpleDirectoryReader } from "llamaindex";
+
+documents = new SimpleDirectoryReader().loadData("./data");
+```
+
+## API Referenca
+
+- [SimpleDirectoryReader](../../api/classes/SimpleDirectoryReader.md)
+
+"
diff --git a/apps/docs/i18n/sk/docusaurus-plugin-content-docs/current/modules/high_level/documents_and_nodes.md b/apps/docs/i18n/sk/docusaurus-plugin-content-docs/current/modules/high_level/documents_and_nodes.md
new file mode 100644
index 0000000000000000000000000000000000000000..a2e00b7c89de5b8d5659c6bfe6b32f2eabdcb41c
--- /dev/null
+++ b/apps/docs/i18n/sk/docusaurus-plugin-content-docs/current/modules/high_level/documents_and_nodes.md
@@ -0,0 +1,22 @@
+---
+sidebar_position: 0
+---
+
+# Dokumenti in vozlišča
+
+`Ta dokumentacija je bila samodejno prevedena in lahko vsebuje napake. Ne oklevajte odpreti Pull Request za predlaganje sprememb.`
+
+`Dokumenti` in `vozlišča` so osnovni gradniki vsakega indeksa. Čeprav je API za te objekte podoben, objekti `Dokument` predstavljajo celotne datoteke, medtem ko so `vozlišča` manjši deli tega izvirnega dokumenta, primerni za LLM in Q&A.
+
+```typescript
+import { Document } from "llamaindex";
+
+document = new Document({ text: "besedilo", metadata: { ključ: "vrednost" } });
+```
+
+## API Referenca
+
+- [Dokument](../../api/classes/Document.md)
+- [TextNode](../../api/classes/TextNode.md)
+
+"
diff --git a/apps/docs/i18n/sk/docusaurus-plugin-content-docs/current/modules/high_level/query_engine.md b/apps/docs/i18n/sk/docusaurus-plugin-content-docs/current/modules/high_level/query_engine.md
new file mode 100644
index 0000000000000000000000000000000000000000..5858d37a567b7408c8a595951cac58a317045da5
--- /dev/null
+++ b/apps/docs/i18n/sk/docusaurus-plugin-content-docs/current/modules/high_level/query_engine.md
@@ -0,0 +1,40 @@
+---
+sidebar_position: 3
+---
+
+# QueryEngine (Poizvedovalni pogon)
+
+`Ta dokumentacija je bila samodejno prevedena in lahko vsebuje napake. Ne oklevajte odpreti Pull Request za predlaganje sprememb.`
+
+Poizvedovalni pogon ovije `Retriever` in `ResponseSynthesizer` v cevovod, ki bo uporabil poizvedovalni niz za pridobivanje vozlišč in jih nato poslal v LLM za generiranje odgovora.
+
+```typescript
+const queryEngine = index.asQueryEngine();
+const response = await queryEngine.query("poizvedovalni niz");
+```
+
+## Poizvedovalni pogon za podvprašanja
+
+Osnovna ideja poizvedovalnega pogona za podvprašanja je, da razdeli eno poizvedbo na več poizvedb, pridobi odgovor za vsako od teh poizvedb in nato združi te različne odgovore v en sam koherenten odgovor za uporabnika. Lahko si predstavljate to kot tehniko "razmisli o tem korak za korakom", vendar z iteracijo po vaših virih podatkov!
+
+### Začetek
+
+Najlažji način za začetek preizkušanja podpogonskega poizvedbenega pogona je zagon datoteke subquestion.ts v mapi [examples](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/subquestion.ts).
+
+```bash
+npx ts-node subquestion.ts
+```
+
+"
+
+### Orodja
+
+Poizvedovalni pogon za podvprašanja je implementiran z orodji. Osnovna ideja orodij je, da so izvedljive možnosti za velik jezikovni model. V tem primeru se naš poizvedovalni pogon za podvprašanja zanaša na orodje QueryEngineTool, ki je, kot ste uganili, orodje za izvajanje poizvedb na poizvedovalnem pogonu. To nam omogoča, da modelu omogočimo možnost poizvedovanja različnih dokumentov za različna vprašanja, na primer. Prav tako si lahko predstavljate, da bi poizvedovalni pogon za podvprašanja lahko uporabil orodje, ki išče nekaj na spletu ali pridobi odgovor z uporabo Wolfram Alpha.
+
+Več o orodjih lahko izveste, če si ogledate dokumentacijo za Python LlamaIndex na naslovu https://gpt-index.readthedocs.io/en/latest/core_modules/agent_modules/tools/root.html
+
+## API Referenca
+
+- [RetrieverQueryEngine (Poizvedovalni pogon pridobitelja)](../../api/classes/RetrieverQueryEngine.md)
+- [SubQuestionQueryEngine (Poizvedovalni pogon podvprašanja)](../../api/classes/SubQuestionQueryEngine.md)
+- [QueryEngineTool (Orodje za poizvedovalni pogon)](../../api/interfaces/QueryEngineTool.md)
diff --git a/apps/docs/i18n/sk/docusaurus-plugin-content-docs/current/modules/index.md b/apps/docs/i18n/sk/docusaurus-plugin-content-docs/current/modules/index.md
new file mode 100644
index 0000000000000000000000000000000000000000..835ec01672d0007be0f3ffefe15419cf80e61e64
--- /dev/null
+++ b/apps/docs/i18n/sk/docusaurus-plugin-content-docs/current/modules/index.md
@@ -0,0 +1,35 @@
+# Osnovni moduli
+
+`Ta dokumentacija je bila samodejno prevedena in lahko vsebuje napake. Ne oklevajte odpreti Pull Request za predlaganje sprememb.`
+
+LlamaIndex.TS ponuja več osnovnih modulov, razdeljenih na visokonivojske module za hitro začetek in nizkonivojske module za prilagajanje ključnih komponent po potrebi.
+
+## Visokonivojski moduli
+
+- [**Dokument**](./high_level/documents_and_nodes.md): Dokument predstavlja besedilno datoteko, datoteko PDF ali drug kos podatkov.
+
+- [**Voziček**](./high_level/documents_and_nodes.md): Osnovni gradnik podatkov. Najpogosteje so to deli dokumenta, razdeljeni na obvladljive kose, ki so dovolj majhni, da jih lahko vstavimo v model vdelave in LLM.
+
+- [**Branilec/Nalagalnik**](./high_level/data_loader.md): Branilec ali nalagalnik je nekaj, kar sprejme dokument v resničnem svetu in ga pretvori v razred Dokument, ki ga lahko uporabite v svojem indeksu in poizvedbah. Trenutno podpiramo datoteke s čistim besedilom in PDF-je z mnogo večimi.
+
+- [**Indeksi**](./high_level/data_index.md): indeksi shranjujejo vozlišča in vdelave teh vozlišč.
+
+- [**Poizvedovalni motor**](./high_level/query_engine.md): Poizvedovalni motorji generirajo poizvedbo, ki jo vnesete, in vam vrnejo rezultat. Poizvedovalni motorji običajno združujejo predhodno pripravljen namig s izbranimi vozlišči iz vašega indeksa, da LLM zagotovijo kontekst, ki ga potrebuje za odgovor na vašo poizvedbo.
+
+- [**Klepetalni motor**](./high_level/chat_engine.md): Klepetalni motor vam pomaga zgraditi klepetalnega robota, ki bo interaktiven z vašimi indeksi.
+
+## Nizkonivojski modul
+
+- [**LLM**](./low_level/llm.md): Razred LLM je združen vmesnik nad ponudnikom velikega jezikovnega modela, kot je OpenAI GPT-4, Anthropic Claude ali Meta LLaMA. Lahko ga podrazredite, da napišete povezavo do lastnega velikega jezikovnega modela.
+
+- [**Vdelava**](./low_level/embedding.md): Vdelava je predstavljena kot vektor plavajočih števil. OpenAI-jev model vdelave besedil-ada-002 je naš privzeti model vdelave, vsaka vdelava, ki jo ustvari, pa vsebuje 1.536 plavajočih števil. Drug priljubljen model vdelave je BERT, ki uporablja 768 plavajočih števil za predstavitev vsakega vozlišča. Ponujamo več orodij za delo z vdelavami, vključno z 3 možnostmi za izračun podobnosti in največjo mejno relevantnostjo.
+
+- [**RazdeljevalecBesedila/ParserVozlišč**](./low_level/node_parser.md): Strategije razdeljevanja besedila so izjemno pomembne za celovitost iskanja vdelav. Trenutno imamo privzeto rešitev, vendar ni univerzalne rešitve, ki bi ustrezala vsem. Odvisno od izvornih dokumentov morda želite uporabiti različne velikosti in strategije razdeljevanja. Trenutno podpiramo razdeljevanje po fiksnih velikostih, razdeljevanje po fiksnih velikostih z prekrivajočimi se odseki, razdeljevanje po stavkih in razdeljevanje po odstavkih. Razdeljevalec besedila se uporablja pri razdeljevanju `Dokumentov` v `Vozlišča` s strani ParserjaVozlišč.
+
+- [**Pridobitelj**](./low_level/retriever.md): Pridobitelj je tisti, ki dejansko izbere vozlišča za pridobitev iz indeksa. Tu lahko poskusite pridobiti več ali manj vozlišč na poizvedbo, spremeniti funkcijo podobnosti ali ustvariti lasten pridobitelj za vsak posamezen primer uporabe v vaši aplikaciji. Na primer, morda želite imeti ločen pridobitelj za vsebino kode in besedilno vsebino.
+
+- [**SintetizatorOdgovora**](./low_level/response_synthesizer.md): SintetizatorOdgovora je odgovoren za sprejemanje poizvedbenega niza in uporabo seznama `Vozlišč` za generiranje odgovora. To lahko zavzame različne oblike, kot je iteriranje po vsem kontekstu in izpopolnjevanje odgovora ali gradnja drevesa povzetkov in vračanje korena povzetka.
+
+- [**Shramba**](./low_level/storage.md): Prej ali slej boste želeli shraniti svoje indekse, podatke in vektorje namesto ponovnega zagona modelov vdelave vsakič. IndexStore, DocStore, VectorStore in KVStore so abstrakcije, ki vam to omogočajo. Skupaj tvorijo StorageContext. Trenutno vam omogočamo, da trajno shranite svoje vdelave v datoteke na datotečnem sistemu (ali navideznem pomnilniškem datotečnem sistemu), vendar aktivno dodajamo tudi integracije z vektorskimi bazami podatkov.
+
+"
diff --git a/apps/docs/i18n/sk/docusaurus-plugin-content-docs/current/modules/low_level/embedding.md b/apps/docs/i18n/sk/docusaurus-plugin-content-docs/current/modules/low_level/embedding.md
new file mode 100644
index 0000000000000000000000000000000000000000..c5e1ada5f1d3658af4e85fec2dbced7bbbeed7df
--- /dev/null
+++ b/apps/docs/i18n/sk/docusaurus-plugin-content-docs/current/modules/low_level/embedding.md
@@ -0,0 +1,26 @@
+---
+sidebar_position: 1
+---
+
+# Vdelava
+
+`Ta dokumentacija je bila samodejno prevedena in lahko vsebuje napake. Ne oklevajte odpreti Pull Request za predlaganje sprememb.`
+
+Model vdelave v LlamaIndexu je odgovoren za ustvarjanje numeričnih predstav besedila. Privzeto bo LlamaIndex uporabil model `text-embedding-ada-002` iz OpenAI.
+
+To lahko eksplicitno nastavite v objektu `ServiceContext`.
+
+```typescript
+import { OpenAIEmbedding, serviceContextFromDefaults } from "llamaindex";
+
+const openaiEmbeds = new OpenAIEmbedding();
+
+const serviceContext = serviceContextFromDefaults({ embedModel: openaiEmbeds });
+```
+
+## API Sklic
+
+- [OpenAIEmbedding](../../api/classes/OpenAIEmbedding.md)
+- [ServiceContext](../../api/interfaces/ServiceContext.md)
+
+"
diff --git a/apps/docs/i18n/sk/docusaurus-plugin-content-docs/current/modules/low_level/llm.md b/apps/docs/i18n/sk/docusaurus-plugin-content-docs/current/modules/low_level/llm.md
new file mode 100644
index 0000000000000000000000000000000000000000..e15689e9873605b2968933f823e6b84ddba5235c
--- /dev/null
+++ b/apps/docs/i18n/sk/docusaurus-plugin-content-docs/current/modules/low_level/llm.md
@@ -0,0 +1,26 @@
+---
+sidebar_position: 0
+---
+
+# LLM
+
+`Ta dokumentacija je bila samodejno prevedena in lahko vsebuje napake. Ne oklevajte odpreti Pull Request za predlaganje sprememb.`
+
+LLM je odgovoren za branje besedila in ustvarjanje naravnih jezikovnih odgovorov na poizvedbe. Privzeto LlamaIndex.TS uporablja `gpt-3.5-turbo`.
+
+LLM lahko eksplicitno nastavimo v objektu `ServiceContext`.
+
+```typescript
+import { OpenAI, serviceContextFromDefaults } from "llamaindex";
+
+const openaiLLM = new OpenAI({ model: "gpt-3.5-turbo", temperature: 0 });
+
+const serviceContext = serviceContextFromDefaults({ llm: openaiLLM });
+```
+
+## API Referenca
+
+- [OpenAI](../../api/classes/OpenAI.md)
+- [ServiceContext](../../api/interfaces/ServiceContext.md)
+
+"
diff --git a/apps/docs/i18n/sk/docusaurus-plugin-content-docs/current/modules/low_level/node_parser.md b/apps/docs/i18n/sk/docusaurus-plugin-content-docs/current/modules/low_level/node_parser.md
new file mode 100644
index 0000000000000000000000000000000000000000..8c4c8e02e47d4474b035797dc08097965b7478c5
--- /dev/null
+++ b/apps/docs/i18n/sk/docusaurus-plugin-content-docs/current/modules/low_level/node_parser.md
@@ -0,0 +1,39 @@
+---
+sidebar_position: 3
+---
+
+# NodeParser (Razčlenjevalnik vozlišč)
+
+`Ta dokumentacija je bila samodejno prevedena in lahko vsebuje napake. Ne oklevajte odpreti Pull Request za predlaganje sprememb.`
+
+`NodeParser` v LlamaIndexu je odgovoren za razdeljevanje objektov `Document` v bolj obvladljive objekte `Node`. Ko pokličete `.fromDocuments()`, se `NodeParser` iz `ServiceContext`a uporabi za samodejno razdeljevanje. Lahko pa ga uporabite tudi za predhodno razdeljevanje dokumentov.
+
+```typescript
+import { Document, SimpleNodeParser } from "llamaindex";
+
+const nodeParser = new SimpleNodeParser();
+const nodes = nodeParser.getNodesFromDocuments([
+  new Document({ text: "Star sem 10 let. John je star 20 let." }),
+]);
+```
+
+## TextSplitter (Razčlenjevalnik besedila)
+
+Podrejeni razčlenjevalnik besedila bo besedilo razdelil na stavke. Lahko se uporablja tudi kot samostojni modul za razdeljevanje surovega besedila.
+
+```typescript
+import { SentenceSplitter } from "llamaindex";
+
+const splitter = new SentenceSplitter({ chunkSize: 1 });
+
+const textSplits = splitter.splitText("Pozdravljen svet");
+```
+
+"
+
+## API Reference (Referenca API-ja)
+
+- [SimpleNodeParser (Preprost razčlenjevalnik vozlišč)](../../api/classes/SimpleNodeParser.md)
+- [SentenceSplitter (Razdeljevalec stavkov)](../../api/classes/SentenceSplitter.md)
+
+"
diff --git a/apps/docs/i18n/sk/docusaurus-plugin-content-docs/current/modules/low_level/response_synthesizer.md b/apps/docs/i18n/sk/docusaurus-plugin-content-docs/current/modules/low_level/response_synthesizer.md
new file mode 100644
index 0000000000000000000000000000000000000000..d3000e67f57ae1ca99cd408e08473609bd9e7560
--- /dev/null
+++ b/apps/docs/i18n/sk/docusaurus-plugin-content-docs/current/modules/low_level/response_synthesizer.md
@@ -0,0 +1,55 @@
+---
+sidebar_position: 6
+---
+
+# ResponseSynthesizer (SintetizatorOdgovora)
+
+`Ta dokumentacija je bila samodejno prevedena in lahko vsebuje napake. Ne oklevajte odpreti Pull Request za predlaganje sprememb.`
+
+SintetizatorOdgovora je odgovoren za pošiljanje poizvedbe, vozlišč in predlogov predlogov LLM za generiranje odgovora. Obstaja nekaj ključnih načinov za generiranje odgovora:
+
+- `Refine` (Izboljšaj): "ustvari in izboljšaj" odgovor z zaporednim pregledovanjem vsakega pridobljenega koščka besedila.
+  To naredi ločen klic LLM na vozlišče. Dobro za podrobnejše odgovore.
+- `CompactAndRefine` (KompaktnoInIzboljšaj) (privzeto): "kompaktno" predlogo med vsakim klicem LLM z vstavljanjem
+  čim več koščkov besedila, ki se prilegajo največji velikosti predloge. Če je
+  preveč koščkov za vstavljanje v eno predlogo, "ustvari in izboljšaj" odgovor z večkratnim pregledovanjem
+  kompaktnih predlogov. Enako kot `refine`, vendar bi moralo rezultirati v manj klicih LLM.
+- `TreeSummarize` (PovzemiDrevo): Glede na nabor koščkov besedila in poizvedbo rekurzivno sestavi drevo
+  in vrne koren vozlišča kot odgovor. Dobro za namene povzemanja.
+- `SimpleResponseBuilder` (PreprostGraditeljOdgovora): Glede na nabor koščkov besedila in poizvedbo uporabi poizvedbo na vsakem besedilnem
+  koščku med kopičenjem odgovorov v matriko. Vrne združen niz vseh
+  odgovorov. Dobro, ko morate poizvedbo posebej zagnati proti vsakemu besedilnemu
+  koščku.
+
+```typescript
+import { NodeWithScore, ResponseSynthesizer, TextNode } from "llamaindex";
+
+const responseSynthesizer = new ResponseSynthesizer();
+
+const nodesWithScore: NodeWithScore[] = [
+  {
+    node: new TextNode({ text: "Star sem 10 let." }),
+    score: 1,
+  },
+  {
+    node: new TextNode({ text: "John je star 20 let." }),
+    score: 0.5,
+  },
+];
+
+const response = await responseSynthesizer.synthesize(
+  "Koliko let imam?",
+  nodesWithScore,
+);
+console.log(response.response);
+```
+
+## API Reference (ReferencaAPI)
+
+- [ResponseSynthesizer (SintetizatorOdgovora)](../../api/classes/ResponseSynthesizer.md)
+- [Refine (Izboljšaj)](../../api/classes/Refine.md)
+- [CompactAndRefine (KompaktnoInIzboljšaj)](../../api/classes/CompactAndRefine.md)
+- [TreeSummarize (PovzemiDrevo)](../../api/classes/TreeSummarize.md)
+- [SimpleResponseBuilder (PreprostGraditeljOdgovora)](../../api/classes/SimpleResponseBuilder.md)
+
+"
diff --git a/apps/docs/i18n/sk/docusaurus-plugin-content-docs/current/modules/low_level/retriever.md b/apps/docs/i18n/sk/docusaurus-plugin-content-docs/current/modules/low_level/retriever.md
new file mode 100644
index 0000000000000000000000000000000000000000..cf1edafa4469118ec222c865603d66c37a68db5d
--- /dev/null
+++ b/apps/docs/i18n/sk/docusaurus-plugin-content-docs/current/modules/low_level/retriever.md
@@ -0,0 +1,25 @@
+---
+sidebar_position: 5
+---
+
+# Pridobitelj
+
+`Ta dokumentacija je bila samodejno prevedena in lahko vsebuje napake. Ne oklevajte odpreti Pull Request za predlaganje sprememb.`
+
+Pridobitelj v LlamaIndexu se uporablja za pridobivanje `Node`-ov iz indeksa z uporabo poizvedbenega niza. `VectorIndexRetriever` bo pridobil najbolj podobne vozlišča glede na kriterij k. Medtem pa bo `SummaryIndexRetriever` pridobil vsa vozlišča, ne glede na poizvedbo.
+
+```typescript
+const pridobitelj = vector_index.asRetriever();
+pridobitelj.similarityTopK = 3;
+
+// Pridobivanje vozlišč!
+const vozliščaZRezultatom = await pridobitelj.retrieve("poizvedbeni niz");
+```
+
+## API Sklic
+
+- [SummaryIndexRetriever](../../api/classes/SummaryIndexRetriever.md)
+- [SummaryIndexLLMRetriever](../../api/classes/SummaryIndexLLMRetriever.md)
+- [VectorIndexRetriever](../../api/classes/VectorIndexRetriever.md)
+
+"
diff --git a/apps/docs/i18n/sk/docusaurus-plugin-content-docs/current/modules/low_level/storage.md b/apps/docs/i18n/sk/docusaurus-plugin-content-docs/current/modules/low_level/storage.md
new file mode 100644
index 0000000000000000000000000000000000000000..606b9c8186838d9a9e170ef848e167bc470077ea
--- /dev/null
+++ b/apps/docs/i18n/sk/docusaurus-plugin-content-docs/current/modules/low_level/storage.md
@@ -0,0 +1,30 @@
+---
+sidebar_position: 7
+---
+
+# Shranjevanje (Storage)
+
+`Ta dokumentacija je bila samodejno prevedena in lahko vsebuje napake. Ne oklevajte odpreti Pull Request za predlaganje sprememb.`
+
+Shranjevanje v LlamaIndex.TS deluje samodejno, ko konfigurirate objekt `StorageContext`. Preprosto nastavite `persistDir` in ga povežite z indeksom.
+
+Trenutno je podprto samo shranjevanje in nalaganje iz diska, z načrtovanimi prihodnjimi integracijami!
+
+```typescript
+import { Document, VectorStoreIndex, storageContextFromDefaults } from "./src";
+
+const storageContext = await storageContextFromDefaults({
+  persistDir: "./storage",
+});
+
+const document = new Document({ text: "Testni tekst" });
+const index = await VectorStoreIndex.fromDocuments([document], {
+  storageContext,
+});
+```
+
+## API Referenca
+
+- [StorageContext](../../api/interfaces/StorageContext.md)
+
+"
diff --git a/apps/docs/i18n/sk/docusaurus-plugin-content-docs/current/starter.md b/apps/docs/i18n/sk/docusaurus-plugin-content-docs/current/starter.md
new file mode 100644
index 0000000000000000000000000000000000000000..5e2b665980952f543fa440b6fd9eada4ef6de79c
--- /dev/null
+++ b/apps/docs/i18n/sk/docusaurus-plugin-content-docs/current/starter.md
@@ -0,0 +1,58 @@
+---
+sidebar_position: 2
+---
+
+# Vodič za začetek
+
+`Ta dokumentacija je bila samodejno prevedena in lahko vsebuje napake. Ne oklevajte odpreti Pull Request za predlaganje sprememb.`
+
+Ko ste [namestili LlamaIndex.TS z uporabo NPM](namestitev) in nastavili svoj OpenAI ključ, ste pripravljeni za zagon prve aplikacije:
+
+V novi mapi:
+
+```bash npm2yarn
+npm install typescript
+npm install @types/node
+npx tsc --init # če je potrebno
+```
+
+Ustvarite datoteko `example.ts`. Ta koda bo naložila nekaj primerov podatkov, ustvarila dokument, ga indeksirala (kar ustvari vložke z uporabo OpenAI) in nato ustvarila iskalni motor za odgovarjanje na vprašanja o podatkih.
+
+```ts
+// example.ts
+import fs from "fs/promises";
+import { Document, VectorStoreIndex } from "llamaindex";
+
+async function main() {
+  // Naloži eseje iz abramov.txt v Node
+  const eseji = await fs.readFile(
+    "node_modules/llamaindex/examples/abramov.txt",
+    "utf-8",
+  );
+
+  // Ustvari objekt dokumenta z eseji
+  const dokument = new Document({ text: eseji });
+
+  // Razdeli besedilo in ustvari vložke. Shranite jih v VectorStoreIndex
+  const indeks = await VectorStoreIndex.fromDocuments([dokument]);
+
+  // Poizvedujte indeks
+  const iskalniMotor = indeks.asQueryEngine();
+  const odgovor = await iskalniMotor.query("Kaj je avtor počel na fakulteti?");
+
+  // Izpiši odgovor
+  console.log(odgovor.toString());
+}
+
+main();
+```
+
+Nato ga lahko zaženete z uporabo
+
+```bash
+npx ts-node example.ts
+```
+
+Ste pripravljeni izvedeti več? Oglejte si našo NextJS igrišče na https://llama-playground.vercel.app/. Izvorna koda je na voljo na https://github.com/run-llama/ts-playground
+
+"
diff --git a/apps/docs/i18n/sl/docusaurus-plugin-content-docs/current/_static/concepts/indexing.jpg b/apps/docs/i18n/sl/docusaurus-plugin-content-docs/current/_static/concepts/indexing.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..8672967213caf28fb27682b6b9e2e5111beb9aaa
Binary files /dev/null and b/apps/docs/i18n/sl/docusaurus-plugin-content-docs/current/_static/concepts/indexing.jpg differ
diff --git a/apps/docs/i18n/sl/docusaurus-plugin-content-docs/current/_static/concepts/querying.jpg b/apps/docs/i18n/sl/docusaurus-plugin-content-docs/current/_static/concepts/querying.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..3c241bdda5ba98d3f3ee2e163b9f19280c5c7503
Binary files /dev/null and b/apps/docs/i18n/sl/docusaurus-plugin-content-docs/current/_static/concepts/querying.jpg differ
diff --git a/apps/docs/i18n/sl/docusaurus-plugin-content-docs/current/_static/concepts/rag.jpg b/apps/docs/i18n/sl/docusaurus-plugin-content-docs/current/_static/concepts/rag.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..b68eca2564f8535b33d9ed6d4ce4d4410d2a0698
Binary files /dev/null and b/apps/docs/i18n/sl/docusaurus-plugin-content-docs/current/_static/concepts/rag.jpg differ
diff --git a/apps/docs/i18n/sl/docusaurus-plugin-content-docs/current/api b/apps/docs/i18n/sl/docusaurus-plugin-content-docs/current/api
new file mode 120000
index 0000000000000000000000000000000000000000..3513e32979f4abb8b2df2b54c8d0cc480bdd847e
--- /dev/null
+++ b/apps/docs/i18n/sl/docusaurus-plugin-content-docs/current/api
@@ -0,0 +1 @@
+../../../../docs/api
\ No newline at end of file
diff --git a/apps/docs/i18n/sl/docusaurus-plugin-content-docs/current/concepts.md b/apps/docs/i18n/sl/docusaurus-plugin-content-docs/current/concepts.md
new file mode 100644
index 0000000000000000000000000000000000000000..0606a386533ff54ce580ef0c93516527969fcc25
--- /dev/null
+++ b/apps/docs/i18n/sl/docusaurus-plugin-content-docs/current/concepts.md
@@ -0,0 +1,84 @@
+---
+sidebar_position: 3
+---
+
+# Vysokoúrovňové koncepty
+
+`Táto dokumentácia bola automaticky preložená a môže obsahovať chyby. Neváhajte otvoriť Pull Request na navrhnutie zmien.`
+
+LlamaIndex.TS vám pomáha vytvárať aplikácie s využitím LLM (napr. Q&A, chatbot) nad vlastnými dátami.
+
+V tomto príručke o vysokoúrovňových konceptoch sa dozviete:
+
+- ako LLM môže odpovedať na otázky pomocou vašich vlastných dát.
+- kľúčové koncepty a moduly v LlamaIndex.TS pre zostavenie vlastného dotazovacieho potrubia.
+
+## Odpovedanie na otázky vo vašich dátach
+
+LlamaIndex používa dvojfázovú metódu pri použití LLM s vašimi dátami:
+
+1. **indexovacia fáza**: príprava znalostnej bázy a
+2. **dotazovacia fáza**: získavanie relevantného kontextu zo znalostí na pomoc LLM pri odpovedaní na otázku
+
+![](./_static/concepts/rag.jpg)
+
+Tento proces je tiež známy ako Retrieval Augmented Generation (RAG).
+
+LlamaIndex.TS poskytuje základné nástroje, ktoré vám uľahčia obidve fázy.
+
+Pojďme si každú fázu preskúmať podrobnejšie.
+
+### Indexovacia fáza
+
+LlamaIndex.TS vám pomáha pri príprave znalostnej bázy pomocou sady konektorov a indexov pre dáta.
+
+![](./_static/concepts/indexing.jpg)
+
+[**Data Loaders**](./modules/high_level/data_loader.md):
+Konektor pre dáta (tzv. `Reader`) načíta dáta z rôznych zdrojov a formátov do jednoduchej reprezentácie `Document` (text a jednoduché metadáta).
+
+[**Dokumenty / Uzly**](./modules/high_level/documents_and_nodes.md): `Document` je všeobecný kontajner pre akýkoľvek zdroj dát - napríklad PDF, výstup z API alebo získané dáta z databázy. `Node` je atomická jednotka dát v LlamaIndex a reprezentuje "kúsok" zdrojového `Document`. Je to bohatá reprezentácia, ktorá obsahuje metadáta a vzťahy (k iným uzlom), aby umožnila presné a výstižné operácie získavania.
+
+[**Indexy dát**](./modules/high_level/data_index.md):
+Po načítaní vašich dát vám LlamaIndex pomáha indexovať ich do formátu, ktorý je ľahko vyhľadateľný.
+
+Pod kapotou LlamaIndex analyzuje surové dokumenty do medzireprezentácií, vypočíta vektorové vloženia a ukladá vaše dáta do pamäte alebo na disk.
+
+### Dotazovacia fáza
+
+V dotazovacej fáze dotazovacie potrubie získava najrelevantnejší kontext na základe používateľského dotazu
+a prenáša ho LLM (spolu s dotazom) na syntetizáciu odpovede.
+
+Týmto spôsobom LLM získava aktuálne znalosti, ktoré nie sú obsiahnuté v jeho pôvodných trénovacích dátach,
+(zároveň sa znižuje halucinácia).
+
+Kľúčovou výzvou v dotazovacej fáze je získavanie, orchestrácia a logické usporiadanie (potenciálne mnohých) znalostných báz.
+
+LlamaIndex poskytuje komponovateľné moduly, ktoré vám pomáhajú vytvárať a integrovať RAG potrubia pre Q&A (dotazovací engine), chatbot (chatovací engine) alebo ako súčasť agenta.
+
+Tieto stavebné bloky je možné prispôsobiť podľa preferencií ohodnotenia a zostaviť tak, aby logicky usporiadali viaceré znalostné bázy.
+
+![](./_static/concepts/querying.jpg)
+
+#### Stavebné bloky
+
+[**Retrievers**](./modules/low_level/retriever.md):
+Retriever definuje, ako efektívne získať relevantný kontext zo znalostnej bázy (tj. indexu) na základe dotazu.
+Konkrétna logika získavania sa líši pre rôzne indexy, najpopulárnejším je husté získavanie z vektorového indexu.
+
+[**Response Synthesizers**](./modules/low_level/response_synthesizer.md):
+Response Synthesizer generuje odpoveď z LLM pomocou používateľského dotazu a daného súboru získaných textových častí.
+
+"
+
+#### Potrubia
+
+[**Dotazovacie enginy**](./modules/high_level/query_engine.md):
+Dotazovací engine je koncové potrubie, ktoré vám umožňuje klásť otázky vo vašich dátach.
+Prijať prirodzený jazykový dotaz a vrátiť odpoveď spolu s referenčným kontextom získaným a preneseným na LLM.
+
+[**Chatovacie enginy**](./modules/high_level/chat_engine.md):
+Chatovací engine je koncové potrubie pre konverzáciu s vašimi dátami
+(viacnásobná vzájomná komunikácia namiesto jednoduchej otázky a odpovede).
+
+"
diff --git a/apps/docs/i18n/sl/docusaurus-plugin-content-docs/current/end_to_end.md b/apps/docs/i18n/sl/docusaurus-plugin-content-docs/current/end_to_end.md
new file mode 100644
index 0000000000000000000000000000000000000000..0b94ec6903a1f90f0b171f80585e9af85c1653be
--- /dev/null
+++ b/apps/docs/i18n/sl/docusaurus-plugin-content-docs/current/end_to_end.md
@@ -0,0 +1,55 @@
+---
+sidebar_position: 4
+---
+
+# Príklady od začiatku do konca
+
+`Táto dokumentácia bola automaticky preložená a môže obsahovať chyby. Neváhajte otvoriť Pull Request na navrhnutie zmien.`
+
+V repozitári máme niekoľko príkladov od začiatku do konca, ktoré používajú LlamaIndex.TS.
+
+Pozrite si nižšie uvedené príklady alebo ich vyskúšajte a dokončite ich v priebehu niekoľkých minút s interaktívnymi tutoriálmi na Github Codespace poskytovanými Dev-Docs [tu](https://codespaces.new/team-dev-docs/lits-dev-docs-playground?devcontainer_path=.devcontainer%2Fjavascript_ltsquickstart%2Fdevcontainer.json):
+
+## [Chat Engine](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/chatEngine.ts)
+
+Načítajte súbor a diskutujte o ňom s LLM.
+
+## [Vektorový Index](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/vectorIndex.ts)
+
+Vytvorte vektorový index a vyhľadajte ho. Vektorový index bude používať vložky na získanie najrelevantnejších k najlepším k uzlom. Predvolená hodnota pre k je 2.
+
+"
+
+## [Index súhrnu](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/summaryIndex.ts)
+
+Vytvorte zoznamový index a vyhľadávajte v ňom. Tento príklad tiež používa `LLMRetriever`, ktorý použije LLM na výber najlepších uzlov na použitie pri generovaní odpovede.
+
+"
+
+## [Uloženie / Načítanie indexu](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/storageContext.ts)
+
+Vytvorte a načítajte vektorový index. Ukladanie na disk v LlamaIndex.TS sa deje automaticky, keď je vytvorený objekt kontextu úložiska.
+
+## [Vlastný vektorový index](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/vectorIndexCustomize.ts)
+
+Vytvorte vektorový index a vyhľadávajte v ňom, pričom konfigurujete `LLM`, `ServiceContext` a `similarity_top_k`.
+
+## [OpenAI LLM](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/openai.ts)
+
+Vytvorte OpenAI LLM a priamo ho použite na chatovanie.
+
+"
+
+## [Llama2 DeuceLLM](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/llamadeuce.ts)
+
+Vytvorte Llama-2 LLM a priamo ho použite na chatovanie.
+
+"
+
+## [SubQuestionQueryEngine](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/subquestion.ts)
+
+Používa `SubQuestionQueryEngine`, ktorý rozdeľuje zložité dotazy na viacero otázok a potom agreguje odpoveď zo všetkých podotázok.
+
+## [Moduly nízkej úrovne](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/lowlevel.ts)
+
+Tento príklad používa niekoľko modulov nízkej úrovne, čo odstraňuje potrebu skutočného vyhľadávacieho enginu. Tieto komponenty môžu byť použité kdekoľvek, v akomkoľvek aplikácii, alebo môžu byť prispôsobené a podtriedené, aby vyhovovali vašim vlastným potrebám.
diff --git a/apps/docs/i18n/sl/docusaurus-plugin-content-docs/current/environments.md b/apps/docs/i18n/sl/docusaurus-plugin-content-docs/current/environments.md
new file mode 100644
index 0000000000000000000000000000000000000000..be3e3885e1a85ee71b7707b4b5e84a33327906be
--- /dev/null
+++ b/apps/docs/i18n/sl/docusaurus-plugin-content-docs/current/environments.md
@@ -0,0 +1,31 @@
+---
+sidebar_position: 5
+---
+
+# Prostredia
+
+`Táto dokumentácia bola automaticky preložená a môže obsahovať chyby. Neváhajte otvoriť Pull Request na navrhnutie zmien.`
+
+LlamaIndex momentálne oficiálne podporuje NodeJS 18 a NodeJS 20.
+
+## NextJS App Router
+
+Ak používate spracovatele trás/serveless funkcie NextJS App Router, budete potrebovať použiť režim NodeJS:
+
+```js
+export const runtime = "nodejs"; // predvolené
+```
+
+a budete musieť pridať výnimku pre pdf-parse vo vašom next.config.js
+
+```js
+// next.config.js
+/** @type {import('next').NextConfig} */
+const nextConfig = {
+  experimental: {
+    serverComponentsExternalPackages: ["pdf-parse"], // Puts pdf-parse in actual NodeJS mode with NextJS App Router
+  },
+};
+
+module.exports = nextConfig;
+```
diff --git a/apps/docs/i18n/sl/docusaurus-plugin-content-docs/current/installation.mdx b/apps/docs/i18n/sl/docusaurus-plugin-content-docs/current/installation.mdx
new file mode 100644
index 0000000000000000000000000000000000000000..5d5885318b7470e5a0b78ab79d2a4fb5d072b434
--- /dev/null
+++ b/apps/docs/i18n/sl/docusaurus-plugin-content-docs/current/installation.mdx
@@ -0,0 +1,68 @@
+---
+sidebar_position: 1
+---
+
+
+# Inštalácia a nastavenie
+
+```Táto dokumentácia bola automaticky preložená a môže obsahovať chyby. Neváhajte otvoriť Pull Request na navrhnutie zmien.```
+
+
+Uistite sa, že máte nainštalovaný NodeJS vo verzii 18 alebo vyššej.
+
+
+## Použitie create-llama
+
+Najjednoduchší spôsob, ako začať s LlamaIndexom, je použitie `create-llama`. Tento nástroj v príkazovom riadku vám umožňuje rýchlo začať s vytváraním novej aplikácie LlamaIndex s prednastaveným prostredím.
+
+Jednoducho spustite
+
+<Tabs>
+<TabItem value="1" label="npm" default>
+
+```bash
+npx create-llama@latest
+```
+
+</TabItem>
+<TabItem value="2" label="Yarn">
+
+```bash
+yarn create llama
+```
+
+</TabItem>
+<TabItem value="3" label="pnpm">
+
+```bash
+pnpm create llama@latest
+```
+
+</TabItem>
+</Tabs>
+
+pre začatie. Po vygenerovaní vášho projektu spustite
+
+```bash npm2yarn
+npm run dev
+```
+
+pre spustenie vývojového servera. Potom môžete navštíviť [http://localhost:3000](http://localhost:3000), aby ste videli vašu aplikáciu.
+## Inštalácia cez NPM
+
+```bash npm2yarn
+npm install llamaindex
+```
+
+
+### Premenné prostredia
+
+Naše príklady predvolene používajú OpenAI. Budete potrebovať nastaviť váš Open AI kľúč nasledovne:
+
+```bash
+export OPENAI_API_KEY="sk-......" # Nahraďte svojím kľúčom z https://platform.openai.com/account/api-keys
+```
+
+Ak chcete mať kľúč automaticky načítaný pri každom spustení, pridajte ho do vášho .zshrc/.bashrc.
+
+UPOZORNENIE: Neukladajte váš OpenAI kľúč do verzovacieho systému.
diff --git a/apps/docs/i18n/sl/docusaurus-plugin-content-docs/current/introduction.md b/apps/docs/i18n/sl/docusaurus-plugin-content-docs/current/introduction.md
new file mode 100644
index 0000000000000000000000000000000000000000..12e1e006811086a617083f82397e8a4e17ac335b
--- /dev/null
+++ b/apps/docs/i18n/sl/docusaurus-plugin-content-docs/current/introduction.md
@@ -0,0 +1,60 @@
+---
+sidebar_position: 0
+slug: /
+---
+
+# Čo je LlamaIndex.TS?
+
+`Táto dokumentácia bola automaticky preložená a môže obsahovať chyby. Neváhajte otvoriť Pull Request na navrhnutie zmien.`
+
+LlamaIndex.TS je rámec dát pre aplikácie LLM na spracovanie, štruktúrovanie a prístup k súkromným alebo doménovo špecifickým údajom. Zatiaľ čo je k dispozícii aj balíček pre Python (viď [tu](https://docs.llamaindex.ai/en/stable/)), LlamaIndex.TS ponúka základné funkcie v jednoduchom balíčku, optimalizovanom pre použitie s TypeScriptom.
+
+## 🚀 Prečo LlamaIndex.TS?
+
+V podstate LLM ponúka prirodzené jazykové rozhranie medzi ľuďmi a odvodenými údajmi. Široko dostupné modely sú predtrénované na obrovské množstvo verejne dostupných údajov, od Wikipédie a mailingových zoznamov po učebnice a zdrojový kód.
+
+Aplikácie postavené na LLM často vyžadujú rozšírenie týchto modelov o súkromné alebo doménovo špecifické údaje. Bohužiaľ, tieto údaje môžu byť rozptýlené medzi izolovanými aplikáciami a úložiskami údajov. Nachádzajú sa za rozhraniami API, v SQL databázach alebo sú uväznené v PDF a prezentáciách.
+
+A tu prichádza **LlamaIndex.TS**.
+
+## 🦙 Ako môže LlamaIndex.TS pomôcť?
+
+LlamaIndex.TS poskytuje nasledujúce nástroje:
+
+- **Načítanie dát** - priamo načítajte svoje existujúce údaje vo formátoch `.txt`, `.pdf`, `.csv`, `.md` a `.docx`
+- **Indexy dát** - štruktúrujte svoje údaje do medzi-reprezentácií, ktoré sú jednoduché a výkonné pre LLM na spracovanie.
+- **Engine (motory)** - poskytujú prístup k vašim údajom pomocou prirodzeného jazyka. Napríklad:
+  - Query enginy sú výkonné rozhrania pre získavanie poznatkov s rozšíreným výstupom.
+  - Chat enginy sú konverzačné rozhrania pre viacsprávové interakcie "tam a späť" s vašimi údajmi.
+
+## 👨‍👩‍👧‍👦 Pre koho je LlamaIndex?
+
+LlamaIndex.TS poskytuje základnú sadu nástrojov, ktoré sú nevyhnutné pre každého, kto staví aplikácie LLM s použitím JavaScriptu a TypeScriptu.
+
+Naša vyššia úroveň API umožňuje začiatočníkom používať LlamaIndex.TS na spracovanie a vyhľadávanie ich údajov.
+
+Pre zložitejšie aplikácie naša nižšia úroveň API umožňuje pokročilým používateľom prispôsobiť a rozšíriť akýkoľvek modul - konektory údajov, indexy, získavače a vyhľadávacie motory, aby vyhovovali ich potrebám.
+
+## Začíname
+
+`npm install llamaindex`
+
+Naša dokumentácia obsahuje [Inštalačné pokyny](./installation.md) a [Úvodný tutoriál](./starter.md) pre vytvorenie vašej prvej aplikácie.
+
+Keď už máte všetko pripravené, [Vysokoúrovňové koncepty](./concepts.md) poskytujú prehľad o modulárnej architektúre LlamaIndexu. Pre viac praktických príkladov si prečítajte naše [Tutoriály od začiatku do konca](./end_to_end.md).
+
+## 🗺️ Ekosystém
+
+Na stiahnutie alebo prispievanie nájdete LlamaIndex na:
+
+- Github: https://github.com/run-llama/LlamaIndexTS
+- NPM: https://www.npmjs.com/package/llamaindex
+
+"
+
+## Komunita
+
+Potrebujete pomoc? Máte návrh na novú funkciu? Pripojte sa k LlamaIndex komunite:
+
+- Twitter: https://twitter.com/llama_index
+- Discord: https://discord.gg/dGcwcsnxhU
diff --git a/apps/docs/i18n/sl/docusaurus-plugin-content-docs/current/modules/high_level/chat_engine.md b/apps/docs/i18n/sl/docusaurus-plugin-content-docs/current/modules/high_level/chat_engine.md
new file mode 100644
index 0000000000000000000000000000000000000000..88eddd1e2d3698362ab638a693d612b9c432053e
--- /dev/null
+++ b/apps/docs/i18n/sl/docusaurus-plugin-content-docs/current/modules/high_level/chat_engine.md
@@ -0,0 +1,24 @@
+---
+sidebar_position: 4
+---
+
+# ChatEngine (ChatEngine)
+
+`Táto dokumentácia bola automaticky preložená a môže obsahovať chyby. Neváhajte otvoriť Pull Request na navrhnutie zmien.`
+
+ChatEngine je rýchly a jednoduchý spôsob, ako komunikovať s dátami vo vašom indexe.
+
+```typescript
+const retriever = index.asRetriever();
+const chatEngine = new ContextChatEngine({ retriever });
+
+// začnite chatovať
+const response = await chatEngine.chat(query);
+```
+
+## Api Referencie
+
+- [ContextChatEngine (KontextovýChatEngine)](../../api/classes/ContextChatEngine.md)
+- [CondenseQuestionChatEngine (CondenseQuestionChatEngine)](../../api/classes/ContextChatEngine.md)
+
+"
diff --git a/apps/docs/i18n/sl/docusaurus-plugin-content-docs/current/modules/high_level/data_index.md b/apps/docs/i18n/sl/docusaurus-plugin-content-docs/current/modules/high_level/data_index.md
new file mode 100644
index 0000000000000000000000000000000000000000..ef42971f836ad1262b4da0a282b7c8d42aab1dc8
--- /dev/null
+++ b/apps/docs/i18n/sl/docusaurus-plugin-content-docs/current/modules/high_level/data_index.md
@@ -0,0 +1,27 @@
+---
+sidebar_position: 2
+---
+
+# Index
+
+`Táto dokumentácia bola automaticky preložená a môže obsahovať chyby. Neváhajte otvoriť Pull Request na navrhnutie zmien.`
+
+Index je základný kontajner a organizácia pre vaše dáta. LlamaIndex.TS podporuje dva indexy:
+
+- `VectorStoreIndex` - pri generovaní odpovede odosiela najlepších k `Node` do LLM. Predvolené top-k je 2.
+- `SummaryIndex` - pri generovaní odpovede odosiela každý `Node` v indexe do LLM.
+
+```typescript
+import { Document, VectorStoreIndex } from "llamaindex";
+
+const document = new Document({ text: "test" });
+
+const index = await VectorStoreIndex.fromDocuments([document]);
+```
+
+## API Referencia
+
+- [SummaryIndex](../../api/classes/SummaryIndex.md)
+- [VectorStoreIndex](../../api/classes/VectorStoreIndex.md)
+
+"
diff --git a/apps/docs/i18n/sl/docusaurus-plugin-content-docs/current/modules/high_level/data_loader.md b/apps/docs/i18n/sl/docusaurus-plugin-content-docs/current/modules/high_level/data_loader.md
new file mode 100644
index 0000000000000000000000000000000000000000..b878cab10741d18fd0171f7ab68e5b1d40b4950b
--- /dev/null
+++ b/apps/docs/i18n/sl/docusaurus-plugin-content-docs/current/modules/high_level/data_loader.md
@@ -0,0 +1,21 @@
+---
+sidebar_position: 1
+---
+
+# Čítač / Načítavač
+
+`Táto dokumentácia bola automaticky preložená a môže obsahovať chyby. Neváhajte otvoriť Pull Request na navrhnutie zmien.`
+
+LlamaIndex.TS podporuje jednoduché načítavanie súborov z priečinkov pomocou triedy `SimpleDirectoryReader`. Momentálne sú podporované súbory s príponami `.txt`, `.pdf`, `.csv`, `.md` a `.docx`, s plánom podpory ďalších v budúcnosti!
+
+```typescript
+import { SimpleDirectoryReader } from "llamaindex";
+
+documents = new SimpleDirectoryReader().loadData("./data");
+```
+
+## API Referencia
+
+- [SimpleDirectoryReader](../../api/classes/SimpleDirectoryReader.md)
+
+"
diff --git a/apps/docs/i18n/sl/docusaurus-plugin-content-docs/current/modules/high_level/documents_and_nodes.md b/apps/docs/i18n/sl/docusaurus-plugin-content-docs/current/modules/high_level/documents_and_nodes.md
new file mode 100644
index 0000000000000000000000000000000000000000..5e23dd6be8c423255b8b7229ce6a0e1c5a6d5c35
--- /dev/null
+++ b/apps/docs/i18n/sl/docusaurus-plugin-content-docs/current/modules/high_level/documents_and_nodes.md
@@ -0,0 +1,22 @@
+---
+sidebar_position: 0
+---
+
+# Dokumenty a uzly
+
+`Táto dokumentácia bola automaticky preložená a môže obsahovať chyby. Neváhajte otvoriť Pull Request na navrhnutie zmien.`
+
+`Dokumenty` a `Uzly` sú základné stavebné bloky každého indexu. Zatiaľ čo API pre tieto objekty je podobné, objekty `Dokument` predstavujú celé súbory, zatiaľ čo `Uzly` sú menšie časti tohto pôvodného dokumentu, ktoré sú vhodné pre LLM a Q&A.
+
+```typescript
+import { Document } from "llamaindex";
+
+document = new Document({ text: "text", metadata: { key: "val" } });
+```
+
+## API Referencia
+
+- [Dokument](../../api/classes/Document.md)
+- [TextNode](../../api/classes/TextNode.md)
+
+"
diff --git a/apps/docs/i18n/sl/docusaurus-plugin-content-docs/current/modules/high_level/query_engine.md b/apps/docs/i18n/sl/docusaurus-plugin-content-docs/current/modules/high_level/query_engine.md
new file mode 100644
index 0000000000000000000000000000000000000000..f16e7d8b977740cd5135284ef4113b8b7883a817
--- /dev/null
+++ b/apps/docs/i18n/sl/docusaurus-plugin-content-docs/current/modules/high_level/query_engine.md
@@ -0,0 +1,42 @@
+---
+sidebar_position: 3
+---
+
+# QueryEngine (Dotazný modul)
+
+`Táto dokumentácia bola automaticky preložená a môže obsahovať chyby. Neváhajte otvoriť Pull Request na navrhnutie zmien.`
+
+Dotazný modul obaluje `Retriever` a `ResponseSynthesizer` do rúry, ktorá použije reťazec dotazu na získanie uzlov a potom ich pošle do LLM na generovanie odpovede.
+
+```typescript
+const queryEngine = index.asQueryEngine();
+const response = await queryEngine.query("reťazec dotazu");
+```
+
+## Dotazný engine pre podotázky
+
+Základný koncept dotazného enginu pre podotázky spočíva v tom, že rozdelí jediný dotaz na viacero dotazov, získa odpoveď pre každý z týchto dotazov a potom tieto rôzne odpovede spojí do jednej súvislej odpovede pre používateľa. Môžete si ho predstaviť ako techniku "premysli si to krok za krokom", ale iterujúcu cez vaše zdroje údajov!
+
+### Začíname
+
+Najjednoduchší spôsob, ako začať vyskúšať Dotazný engine pre podotázky, je spustiť súbor subquestion.ts v [príkladoch](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/subquestion.ts).
+
+```bash
+npx ts-node subquestion.ts
+```
+
+"
+
+### Nástroje
+
+Dotazný engine pre podotázky je implementovaný pomocou nástrojov. Základná myšlienka nástrojov je, že sú vykonateľné možnosti pre veľký jazykový model. V tomto prípade sa náš dotazný engine pre podotázky spolieha na nástroj QueryEngineTool, ktorý, ako už názov napovedá, je nástrojom na vykonávanie dotazov na dotazný engine. To nám umožňuje modelu poskytnúť možnosť vyhľadávať rôzne dokumenty pre rôzne otázky, napríklad. Môžete si tiež predstaviť, že dotazný engine pre podotázky môže používať nástroj, ktorý vyhľadáva niečo na webe alebo získava odpoveď pomocou Wolfram Alpha.
+
+Viac sa dozviete o nástrojoch, ak sa pozriete do dokumentácie LlamaIndex Python https://gpt-index.readthedocs.io/en/latest/core_modules/agent_modules/tools/root.html
+
+## API Referencia
+
+- [RetrieverQueryEngine (Dotazný modul pre získavanie)](../../api/classes/RetrieverQueryEngine.md)
+- [SubQuestionQueryEngine (Dotazný modul pre podotázky)](../../api/classes/SubQuestionQueryEngine.md)
+- [QueryEngineTool (Nástroj dotazného modulu)](../../api/interfaces/QueryEngineTool.md)
+
+"
diff --git a/apps/docs/i18n/sl/docusaurus-plugin-content-docs/current/modules/index.md b/apps/docs/i18n/sl/docusaurus-plugin-content-docs/current/modules/index.md
new file mode 100644
index 0000000000000000000000000000000000000000..a2065a1e41b5180f23ae5a189baabe393236aa28
--- /dev/null
+++ b/apps/docs/i18n/sl/docusaurus-plugin-content-docs/current/modules/index.md
@@ -0,0 +1,33 @@
+# Hlavné moduly
+
+`Táto dokumentácia bola automaticky preložená a môže obsahovať chyby. Neváhajte otvoriť Pull Request na navrhnutie zmien.`
+
+LlamaIndex.TS ponúka niekoľko hlavných modulov, ktoré sú rozdelené na moduly vysokej úrovne pre rýchly štart a moduly nízkej úrovne pre prispôsobenie kľúčových komponentov podľa potreby.
+
+## Vysokoúrovňové moduly
+
+- [**Dokument**](./high_level/documents_and_nodes.md): Dokument predstavuje textový súbor, súbor PDF alebo iný súvislý údaj.
+
+- [**Node**](./high_level/documents_and_nodes.md): Základná stavebná jednotka údajov. Najčastejšie ide o časti dokumentu rozdelené na spraviteľné kúsky, ktoré sú dostatočne malé na vloženie do modelu vloženia a LLM.
+
+- [**Reader/Loader**](./high_level/data_loader.md): Čítač alebo načítavač je niečo, čo prijíma dokument zo skutočného sveta a transformuje ho do triedy Dokument, ktorá potom môže byť použitá vo vašom indexe a dotazoch. Momentálne podporujeme súbory s obyčajným textom a PDF súbory a v budúcnosti pridáme ešte mnoho ďalších formátov.
+
+- [**Indexy**](./high_level/data_index.md): Indexy uchovávajú uzly a vloženia týchto uzlov.
+
+- [**QueryEngine**](./high_level/query_engine.md): Dotazovacie motory generujú dotaz, ktorý zadáte, a vrátia vám výsledok. Dotazovacie motory zvyčajne kombinujú predpripravený vstup s vybranými uzlami z vášho indexu, aby poskytli LLM kontext, ktorý potrebuje na zodpovedanie vášho dotazu.
+
+- [**ChatEngine**](./high_level/chat_engine.md): ChatEngine vám pomáha vytvoriť chatbota, ktorý bude interagovať s vašimi indexmi.
+
+## Modul nízkej úrovne
+
+- [**LLM**](./low_level/llm.md): Trieda LLM je jednotným rozhraním pre poskytovateľa veľkého jazykového modelu, ako napríklad OpenAI GPT-4, Anthropic Claude alebo Meta LLaMA. Môžete ju podtriediť a vytvoriť konektor pre vlastný veľký jazykový model.
+
+- [**Embedding**](./low_level/embedding.md): Embedding je reprezentovaný ako vektor desatinných čísel. Naším predvoleným modelom embeddingu je text-embedding-ada-002 od spoločnosti OpenAI, ktorý generuje embeddingy pozostávajúce z 1 536 desatinných čísel. Ďalším populárnym modelom embeddingu je BERT, ktorý používa 768 desatinných čísel na reprezentáciu každého uzla. Poskytujeme niekoľko nástrojov na prácu s embeddingmi, vrátane 3 možností výpočtu podobnosti a Maximum Marginal Relevance.
+
+- [**TextSplitter/NodeParser**](./low_level/node_parser.md): Stratégie rozdelenia textu sú mimoriadne dôležité pre celkovú účinnosť vyhľadávania pomocou embeddingu. Momentálne máme predvolenú stratégiu, ale neexistuje univerzálna riešenie. V závislosti od zdrojových dokumentov môžete chcieť použiť rôzne veľkosti a stratégie rozdelenia. Momentálne podporujeme rozdelenie podľa pevnej veľkosti, rozdelenie podľa pevnej veľkosti s prekryvajúcimi sa sekciami, rozdelenie podľa vety a rozdelenie podľa odseku. Textový rozdeľovač sa používa pri rozdelení `Dokumentov` na `Uzly` v rámci NodeParseru.
+
+- [**Retriever**](./low_level/retriever.md): Retriever je zodpovedný za výber uzlov, ktoré majú byť získané z indexu. Tu môžete skúsiť získať viac alebo menej uzlov na dotaz, zmeniť funkciu podobnosti alebo vytvoriť vlastný retriever pre každý jednotlivý prípad použitia vo vašej aplikácii. Napríklad môžete mať samostatný retriever pre obsah kódu a textový obsah.
+
+- [**ResponseSynthesizer**](./low_level/response_synthesizer.md): ResponseSynthesizer je zodpovedný za spracovanie reťazca dotazu a použitie zoznamu `Uzlov` na generovanie odpovede. To môže mať rôzne formy, ako napríklad prechádzanie všetkými kontextami a zlepšovanie odpovede alebo vytváranie stromu súhrnov a vrátenie koreňového súhrnu.
+
+- [**Storage**](./low_level/storage.md): V nejakom okamihu budete chcieť uložiť svoje indexy, dáta a vektory, aby ste nemuseli opakovane spúšťať modely embeddingu. IndexStore, DocStore, VectorStore a KVStore sú abstrakcie, ktoré vám to umožňujú. Spoločne tvoria StorageContext. Momentálne vám umožňujeme ukladať vaše embeddingy do súborov na súborovom systéme (alebo do virtuálneho pamäťového súborového systému), ale aktívne pridávame aj integrácie do Vector Databáz.
diff --git a/apps/docs/i18n/sl/docusaurus-plugin-content-docs/current/modules/low_level/embedding.md b/apps/docs/i18n/sl/docusaurus-plugin-content-docs/current/modules/low_level/embedding.md
new file mode 100644
index 0000000000000000000000000000000000000000..b286efb5a4ef9c0c4eef8a56aba822f1b6225071
--- /dev/null
+++ b/apps/docs/i18n/sl/docusaurus-plugin-content-docs/current/modules/low_level/embedding.md
@@ -0,0 +1,26 @@
+---
+sidebar_position: 1
+---
+
+# Vkladanie
+
+`Táto dokumentácia bola automaticky preložená a môže obsahovať chyby. Neváhajte otvoriť Pull Request na navrhnutie zmien.`
+
+Model vkladania v LlamaIndexe je zodpovedný za vytváranie číselných reprezentácií textu. LlamaIndex štandardne používa model `text-embedding-ada-002` od OpenAI.
+
+Toto je možné explicitne nastaviť v objekte `ServiceContext`.
+
+```typescript
+import { OpenAIEmbedding, serviceContextFromDefaults } from "llamaindex";
+
+const openaiEmbeds = new OpenAIEmbedding();
+
+const serviceContext = serviceContextFromDefaults({ embedModel: openaiEmbeds });
+```
+
+## API Referencia
+
+- [OpenAIEmbedding](../../api/classes/OpenAIEmbedding.md)
+- [ServiceContext](../../api/interfaces/ServiceContext.md)
+
+"
diff --git a/apps/docs/i18n/sl/docusaurus-plugin-content-docs/current/modules/low_level/llm.md b/apps/docs/i18n/sl/docusaurus-plugin-content-docs/current/modules/low_level/llm.md
new file mode 100644
index 0000000000000000000000000000000000000000..0ed80b3310fd3f7723d7c8a4cc154e652c7d69ea
--- /dev/null
+++ b/apps/docs/i18n/sl/docusaurus-plugin-content-docs/current/modules/low_level/llm.md
@@ -0,0 +1,26 @@
+---
+sidebar_position: 0
+---
+
+# LLM
+
+`Táto dokumentácia bola automaticky preložená a môže obsahovať chyby. Neváhajte otvoriť Pull Request na navrhnutie zmien.`
+
+LLM je zodpovedný za čítanie textu a generovanie prirodzených jazykových odpovedí na otázky. Východzím modelom pre LlamaIndex.TS je `gpt-3.5-turbo`.
+
+LLM môže byť explicitne nastavený v objekte `ServiceContext`.
+
+```typescript
+import { OpenAI, serviceContextFromDefaults } from "llamaindex";
+
+const openaiLLM = new OpenAI({ model: "gpt-3.5-turbo", temperature: 0 });
+
+const serviceContext = serviceContextFromDefaults({ llm: openaiLLM });
+```
+
+## API Referencia
+
+- [OpenAI](../../api/classes/OpenAI.md)
+- [ServiceContext](../../api/interfaces/ServiceContext.md)
+
+"
diff --git a/apps/docs/i18n/sl/docusaurus-plugin-content-docs/current/modules/low_level/node_parser.md b/apps/docs/i18n/sl/docusaurus-plugin-content-docs/current/modules/low_level/node_parser.md
new file mode 100644
index 0000000000000000000000000000000000000000..5a9df46fd7eb7a422a3de8183ca151b4a97c5f6e
--- /dev/null
+++ b/apps/docs/i18n/sl/docusaurus-plugin-content-docs/current/modules/low_level/node_parser.md
@@ -0,0 +1,37 @@
+---
+sidebar_position: 3
+---
+
+# NodeParser
+
+`Táto dokumentácia bola automaticky preložená a môže obsahovať chyby. Neváhajte otvoriť Pull Request na navrhnutie zmien.`
+
+`NodeParser` v LlamaIndexe je zodpovedný za rozdelenie objektov `Document` na jednoduchšie spravovateľné objekty `Node`. Keď zavoláte `.fromDocuments()`, automaticky sa použije `NodeParser` z `ServiceContextu` na to, aby to urobil za vás. Alternatívne ho môžete použiť na rozdelenie dokumentov vopred.
+
+```typescript
+import { Document, SimpleNodeParser } from "llamaindex";
+
+const nodeParser = new SimpleNodeParser();
+const nodes = nodeParser.getNodesFromDocuments([
+  new Document({ text: "Mám 10 rokov. John má 20 rokov." }),
+]);
+```
+
+## TextSplitter
+
+Podkladový textový rozdeľovač rozdelí text na vety. Môže sa tiež použiť ako samostatný modul na rozdelenie surového textu.
+
+```typescript
+import { SentenceSplitter } from "llamaindex";
+
+const splitter = new SentenceSplitter({ chunkSize: 1 });
+
+const textSplits = splitter.splitText("Ahoj svet");
+```
+
+## API Referencia
+
+- [SimpleNodeParser](../../api/classes/SimpleNodeParser.md)
+- [SentenceSplitter](../../api/classes/SentenceSplitter.md)
+
+"
diff --git a/apps/docs/i18n/sl/docusaurus-plugin-content-docs/current/modules/low_level/response_synthesizer.md b/apps/docs/i18n/sl/docusaurus-plugin-content-docs/current/modules/low_level/response_synthesizer.md
new file mode 100644
index 0000000000000000000000000000000000000000..980bba6d103b41260691fe2a28fd4e59561e3386
--- /dev/null
+++ b/apps/docs/i18n/sl/docusaurus-plugin-content-docs/current/modules/low_level/response_synthesizer.md
@@ -0,0 +1,47 @@
+---
+sidebar_position: 6
+---
+
+# ResponseSynthesizer (Syntetizátor odpovedí)
+
+`Táto dokumentácia bola automaticky preložená a môže obsahovať chyby. Neváhajte otvoriť Pull Request na navrhnutie zmien.`
+
+ResponseSynthesizer je zodpovedný za odosielanie dotazu, uzlov a šablón promptov do LLM (Language Model) na generovanie odpovede. Existujú niekoľko kľúčových režimov na generovanie odpovede:
+
+- `Refine` (Vylepšiť): "vytvoriť a vylepšiť" odpoveď postupným prechádzaním každého získaného textového úseku. Týmto spôsobom sa vykoná samostatné volanie LLM pre každý uzol. Dobré pre podrobné odpovede.
+- `CompactAndRefine` (Kompaktné a vylepšiť) (predvolené): "kompaktovať" prompt počas každého volania LLM tým, že sa do maximálnej veľkosti promptu vloží čo najviac textových úsekov. Ak je príliš veľa úsekov na vloženie do jedného promptu, "vytvoriť a vylepšiť" odpoveď prechádzaním viacerých kompaktných promptov. To isté ako `refine`, ale malo by to vyžadovať menej volaní LLM.
+- `TreeSummarize` (Zhrnutie stromu): Na základe sady textových úsekov a dotazu rekurzívne zostavte strom a vráťte koreňový uzol ako odpoveď. Dobré pre účely zhrnutia.
+- `SimpleResponseBuilder` (Jednoduchý generátor odpovedí): Na základe sady textových úsekov a dotazu aplikujte dotaz na každý textový úsek a získané odpovede akumulujte do poľa. Vráti spojený reťazec všetkých odpovedí. Dobré, keď potrebujete spustiť rovnaký dotaz samostatne pre každý textový úsek.
+
+```typescript
+import { NodeWithScore, ResponseSynthesizer, TextNode } from "llamaindex";
+
+const responseSynthesizer = new ResponseSynthesizer();
+
+const nodesWithScore: NodeWithScore[] = [
+  {
+    node: new TextNode({ text: "Mám 10 rokov." }),
+    score: 1,
+  },
+  {
+    node: new TextNode({ text: "John má 20 rokov." }),
+    score: 0.5,
+  },
+];
+
+const response = await responseSynthesizer.synthesize(
+  "Koľko mám rokov?",
+  nodesWithScore,
+);
+console.log(response.response);
+```
+
+## API Referencia
+
+- [ResponseSynthesizer (Syntetizátor odpovedí)](../../api/classes/ResponseSynthesizer.md)
+- [Refine (Vylepšiť)](../../api/classes/Refine.md)
+- [CompactAndRefine (Kompaktné a vylepšiť)](../../api/classes/CompactAndRefine.md)
+- [TreeSummarize (Zhrnutie stromu)](../../api/classes/TreeSummarize.md)
+- [SimpleResponseBuilder (Jednoduchý generátor odpovedí)](../../api/classes/SimpleResponseBuilder.md)
+
+"
diff --git a/apps/docs/i18n/sl/docusaurus-plugin-content-docs/current/modules/low_level/retriever.md b/apps/docs/i18n/sl/docusaurus-plugin-content-docs/current/modules/low_level/retriever.md
new file mode 100644
index 0000000000000000000000000000000000000000..b388e433a4df5af4d77b43c43e3d258efdb49592
--- /dev/null
+++ b/apps/docs/i18n/sl/docusaurus-plugin-content-docs/current/modules/low_level/retriever.md
@@ -0,0 +1,25 @@
+---
+sidebar_position: 5
+---
+
+# Retriever (Získavač)
+
+`Táto dokumentácia bola automaticky preložená a môže obsahovať chyby. Neváhajte otvoriť Pull Request na navrhnutie zmien.`
+
+V LlamaIndexu je získavač (retriever) používaný na získanie uzlov (`Node`) z indexu pomocou reťazca dotazu. `VectorIndexRetriever` získa najpodobnejšie uzly na základe k najvyššieho skóre. Na druhej strane, `SummaryIndexRetriever` získa všetky uzly bez ohľadu na dotaz.
+
+```typescript
+const retriever = vector_index.asRetriever();
+retriever.similarityTopK = 3;
+
+// Získajte uzly!
+const nodesWithScore = await retriever.retrieve("reťazec dotazu");
+```
+
+## API Referencia
+
+- [SummaryIndexRetriever](../../api/classes/SummaryIndexRetriever.md)
+- [SummaryIndexLLMRetriever](../../api/classes/SummaryIndexLLMRetriever.md)
+- [VectorIndexRetriever](../../api/classes/VectorIndexRetriever.md)
+
+"
diff --git a/apps/docs/i18n/sl/docusaurus-plugin-content-docs/current/modules/low_level/storage.md b/apps/docs/i18n/sl/docusaurus-plugin-content-docs/current/modules/low_level/storage.md
new file mode 100644
index 0000000000000000000000000000000000000000..e754bb9c2263edb6916ea078f1d88daebc879347
--- /dev/null
+++ b/apps/docs/i18n/sl/docusaurus-plugin-content-docs/current/modules/low_level/storage.md
@@ -0,0 +1,30 @@
+---
+sidebar_position: 7
+---
+
+# Úložisko (Storage)
+
+`Táto dokumentácia bola automaticky preložená a môže obsahovať chyby. Neváhajte otvoriť Pull Request na navrhnutie zmien.`
+
+Úložisko v LlamaIndex.TS funguje automaticky, akonáhle ste nakonfigurovali objekt `StorageContext`. Stačí nakonfigurovať `persistDir` a pripojiť ho k indexu.
+
+Momentálne je podporované iba ukladanie a načítavanie zo disku, s plánovanými budúcimi integráciami!
+
+```typescript
+import { Document, VectorStoreIndex, storageContextFromDefaults } from "./src";
+
+const storageContext = await storageContextFromDefaults({
+  persistDir: "./storage",
+});
+
+const document = new Document({ text: "Test Text" });
+const index = await VectorStoreIndex.fromDocuments([document], {
+  storageContext,
+});
+```
+
+## API Referencia
+
+- [StorageContext](../../api/interfaces/StorageContext.md)
+
+"
diff --git a/apps/docs/i18n/sl/docusaurus-plugin-content-docs/current/starter.md b/apps/docs/i18n/sl/docusaurus-plugin-content-docs/current/starter.md
new file mode 100644
index 0000000000000000000000000000000000000000..c52cdbea7d5145ff5f038f4dc0244ee39c0d2d46
--- /dev/null
+++ b/apps/docs/i18n/sl/docusaurus-plugin-content-docs/current/starter.md
@@ -0,0 +1,58 @@
+---
+sidebar_position: 2
+---
+
+# Úvodný návod
+
+`Táto dokumentácia bola automaticky preložená a môže obsahovať chyby. Neváhajte otvoriť Pull Request na navrhnutie zmien.`
+
+Ak ste [nainštalovali LlamaIndex.TS pomocou NPM](installation) a nastavili svoj OpenAI kľúč, ste pripravení začať s vašou prvou aplikáciou:
+
+Vytvorte nový priečinok:
+
+```bash npm2yarn
+npm install typescript
+npm install @types/node
+npx tsc --init # ak je potrebné
+```
+
+Vytvorte súbor `example.ts`. Tento kód načíta niektoré príkladové údaje, vytvorí dokument, vytvorí index (ktorý vytvára vektory pomocou OpenAI) a potom vytvorí dotazovací engine na zodpovedanie otázok o údajoch.
+
+```ts
+// example.ts
+import fs from "fs/promises";
+import { Document, VectorStoreIndex } from "llamaindex";
+
+async function main() {
+  // Načítajte esej z abramov.txt v Node
+  const essay = await fs.readFile(
+    "node_modules/llamaindex/examples/abramov.txt",
+    "utf-8",
+  );
+
+  // Vytvorte objekt Document s esejom
+  const document = new Document({ text: essay });
+
+  // Rozdeľte text a vytvorte vektory. Uložte ich do VectorStoreIndexu
+  const index = await VectorStoreIndex.fromDocuments([document]);
+
+  // Dotaz na index
+  const queryEngine = index.asQueryEngine();
+  const response = await queryEngine.query("Čo autor robil na vysokej škole?");
+
+  // Výstup odpovede
+  console.log(response.toString());
+}
+
+main();
+```
+
+Potom ho môžete spustiť pomocou
+
+```bash
+npx ts-node example.ts
+```
+
+Ste pripravení na ďalšie informácie? Pozrite si našu NextJS prehrávačku na adrese https://llama-playground.vercel.app/. Zdrojový kód je k dispozícii na adrese https://github.com/run-llama/ts-playground
+
+"
diff --git a/apps/docs/i18n/sv/docusaurus-plugin-content-docs/current/_static/concepts/indexing.jpg b/apps/docs/i18n/sv/docusaurus-plugin-content-docs/current/_static/concepts/indexing.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..8672967213caf28fb27682b6b9e2e5111beb9aaa
Binary files /dev/null and b/apps/docs/i18n/sv/docusaurus-plugin-content-docs/current/_static/concepts/indexing.jpg differ
diff --git a/apps/docs/i18n/sv/docusaurus-plugin-content-docs/current/_static/concepts/querying.jpg b/apps/docs/i18n/sv/docusaurus-plugin-content-docs/current/_static/concepts/querying.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..3c241bdda5ba98d3f3ee2e163b9f19280c5c7503
Binary files /dev/null and b/apps/docs/i18n/sv/docusaurus-plugin-content-docs/current/_static/concepts/querying.jpg differ
diff --git a/apps/docs/i18n/sv/docusaurus-plugin-content-docs/current/_static/concepts/rag.jpg b/apps/docs/i18n/sv/docusaurus-plugin-content-docs/current/_static/concepts/rag.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..b68eca2564f8535b33d9ed6d4ce4d4410d2a0698
Binary files /dev/null and b/apps/docs/i18n/sv/docusaurus-plugin-content-docs/current/_static/concepts/rag.jpg differ
diff --git a/apps/docs/i18n/sv/docusaurus-plugin-content-docs/current/api b/apps/docs/i18n/sv/docusaurus-plugin-content-docs/current/api
new file mode 120000
index 0000000000000000000000000000000000000000..3513e32979f4abb8b2df2b54c8d0cc480bdd847e
--- /dev/null
+++ b/apps/docs/i18n/sv/docusaurus-plugin-content-docs/current/api
@@ -0,0 +1 @@
+../../../../docs/api
\ No newline at end of file
diff --git a/apps/docs/i18n/sv/docusaurus-plugin-content-docs/current/concepts.md b/apps/docs/i18n/sv/docusaurus-plugin-content-docs/current/concepts.md
new file mode 100644
index 0000000000000000000000000000000000000000..57b792e5d093472e3c71699b66de3107d31acb32
--- /dev/null
+++ b/apps/docs/i18n/sv/docusaurus-plugin-content-docs/current/concepts.md
@@ -0,0 +1,84 @@
+---
+sidebar_position: 3
+---
+
+# Hög nivå koncept
+
+`Denna dokumentation har översatts automatiskt och kan innehålla fel. Tveka inte att öppna en Pull Request för att föreslå ändringar.`
+
+LlamaIndex.TS hjälper dig att bygga LLM-drivna applikationer (t.ex. Q&A, chatbot) över anpassade data.
+
+I denna guide om högnivåkoncept kommer du att lära dig:
+
+- hur en LLM kan svara på frågor med hjälp av dina egna data.
+- nyckelkoncept och moduler i LlamaIndex.TS för att komponera din egen frågepipeline.
+
+## Att svara på frågor över dina data
+
+LlamaIndex använder en tvåstegsmetod när du använder en LLM med dina data:
+
+1. **indexeringssteg**: förbereder en kunskapsbas, och
+2. **frågesteg**: hämtar relevant kontext från kunskapen för att hjälpa LLM:en att svara på en fråga.
+
+![](./_static/concepts/rag.jpg)
+
+Denna process kallas också Retrieval Augmented Generation (RAG).
+
+LlamaIndex.TS tillhandahåller det nödvändiga verktyget för att göra båda stegen superenkla.
+
+Låt oss utforska varje steg i detalj.
+
+### Indexeringssteg
+
+LlamaIndex.TS hjälper dig att förbereda kunskapsbasen med en uppsättning dataanslutningar och index.
+
+![](./_static/concepts/indexing.jpg)
+
+[**Data Loaders**](./modules/high_level/data_loader.md):
+En dataanslutning (t.ex. `Reader`) tar in data från olika datakällor och dataformat och representerar det som en enkel `Document` (text och enkel metadata).
+
+[**Dokument / Noder**](./modules/high_level/documents_and_nodes.md): Ett `Document` är en generisk behållare för vilken datakälla som helst - till exempel en PDF, en API-utdata eller hämtad data från en databas. En `Node` är den atomära enheten av data i LlamaIndex och representerar en "bit" av en källa `Document`. Det är en rik representation som inkluderar metadata och relationer (till andra noder) för att möjliggöra exakta och uttrycksfulla hämtningsoperationer.
+
+[**Dataindex**](./modules/high_level/data_index.md):
+När du har tagit in dina data hjälper LlamaIndex dig att indexera data i ett format som är lätt att hämta.
+
+Under huven parser LlamaIndex de råa dokumenten till mellanliggande representationer, beräknar vektorinbäddningar och lagrar dina data i minnet eller på disk.
+
+### Frågesteg
+
+I frågestadiet hämtar frågepipelinen den mest relevanta kontexten med hjälp av en användarfråga,
+och skickar den till LLM:en (tillsammans med frågan) för att syntetisera ett svar.
+
+Detta ger LLM:en uppdaterad kunskap som inte finns i dess ursprungliga träningsdata,
+(samtidigt som hallucination minskas).
+
+Den största utmaningen i frågestadiet är att hämta, orkestrera och resonera över (potentiellt många) kunskapsbaser.
+
+LlamaIndex tillhandahåller komponerbara moduler som hjälper dig att bygga och integrera RAG-pipelines för Q&A (frågemotor), chatbot (chattmotor) eller som en del av en agent.
+
+Dessa byggstenar kan anpassas för att återspegla rangordningspreferenser och komponeras för att resonera över flera kunskapsbaser på ett strukturerat sätt.
+
+![](./_static/concepts/querying.jpg)
+
+#### Byggstenar
+
+[**Retrievers**](./modules/low_level/retriever.md):
+En retriever definierar hur man effektivt hämtar relevant kontext från en kunskapsbas (dvs. index) när man har en fråga.
+Den specifika hämtlogiken skiljer sig åt för olika index, där den mest populära är tät hämtning mot en vektorindex.
+
+[**Response Synthesizers**](./modules/low_level/response_synthesizer.md):
+En response synthesizer genererar ett svar från en LLM med hjälp av en användarfråga och en given uppsättning hämtade textfragment.
+
+"
+
+#### Pipelines
+
+[**Frågemotorer**](./modules/high_level/query_engine.md):
+En frågemotor är en helhetspipeline som låter dig ställa frågor om dina data.
+Den tar emot en naturlig språkfråga och returnerar ett svar, tillsammans med referenskontext som hämtats och skickats till LLM:en.
+
+[**Chattmotorer**](./modules/high_level/chat_engine.md):
+En chattmotor är en helhetspipeline för att ha en konversation med dina data
+(flera fram och tillbaka istället för en enda fråga och svar).
+
+"
diff --git a/apps/docs/i18n/sv/docusaurus-plugin-content-docs/current/end_to_end.md b/apps/docs/i18n/sv/docusaurus-plugin-content-docs/current/end_to_end.md
new file mode 100644
index 0000000000000000000000000000000000000000..7534608f33a11f4871fe0f06a7d49190b9e3ea1e
--- /dev/null
+++ b/apps/docs/i18n/sv/docusaurus-plugin-content-docs/current/end_to_end.md
@@ -0,0 +1,55 @@
+---
+sidebar_position: 4
+---
+
+# Exempel från början till slut
+
+`Denna dokumentation har översatts automatiskt och kan innehålla fel. Tveka inte att öppna en Pull Request för att föreslå ändringar.`
+
+Vi inkluderar flera exempel från början till slut med användning av LlamaIndex.TS i repositoryn.
+
+Kolla in exemplen nedan eller prova dem och slutför dem på några minuter med interaktiva Github Codespace-tutorials som tillhandahålls av Dev-Docs [här](https://codespaces.new/team-dev-docs/lits-dev-docs-playground?devcontainer_path=.devcontainer%2Fjavascript_ltsquickstart%2Fdevcontainer.json):
+
+## [Chattmotor](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/chatEngine.ts)
+
+Läs en fil och chatta om den med LLM.
+
+## [Vektorindex](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/vectorIndex.ts)
+
+Skapa ett vektorindex och fråga det. Vektorindexet kommer att använda inbäddningar för att hämta de k mest relevanta noderna. Som standard är k-värdet 2.
+
+"
+
+## [Sammanfattningsindex](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/summaryIndex.ts)
+
+Skapa en listindex och fråga den. Detta exempel använder också `LLMRetriever`, som kommer att använda LLM för att välja de bästa noderna att använda vid generering av svar.
+
+## [Spara / Ladda en Index](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/storageContext.ts)
+
+Skapa och ladda en vektorindex. Persistens till disk i LlamaIndex.TS sker automatiskt när en lagringskontextobjekt skapas.
+
+"
+
+## [Anpassad Vektorindex](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/vectorIndexCustomize.ts)
+
+Skapa en vektorindex och fråga det, samtidigt som du konfigurerar `LLM`, `ServiceContext` och `similarity_top_k`.
+
+## [OpenAI LLM](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/openai.ts)
+
+Skapa en OpenAI LLM och använd den direkt för chatt.
+
+## [Llama2 DeuceLLM](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/llamadeuce.ts)
+
+Skapa en Llama-2 LLM och använd den direkt för chatt.
+
+## [SubQuestionQueryEngine](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/subquestion.ts)
+
+Använder `SubQuestionQueryEngine`, som bryter ner komplexa frågor i flera delfrågor och sedan sammanställer ett svar över svaren på alla delfrågor.
+
+"
+
+## [Moduler på låg nivå](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/lowlevel.ts)
+
+Detta exempel använder flera komponenter på låg nivå, vilket eliminerar behovet av en faktisk frågemotor. Dessa komponenter kan användas var som helst, i vilken applikation som helst, eller anpassas och underklassas för att möta dina egna behov.
+
+"
diff --git a/apps/docs/i18n/sv/docusaurus-plugin-content-docs/current/environments.md b/apps/docs/i18n/sv/docusaurus-plugin-content-docs/current/environments.md
new file mode 100644
index 0000000000000000000000000000000000000000..d9a7ca7f4fdd12fbc77e399195d28584d2929fea
--- /dev/null
+++ b/apps/docs/i18n/sv/docusaurus-plugin-content-docs/current/environments.md
@@ -0,0 +1,31 @@
+---
+sidebar_position: 5
+---
+
+# Miljöer
+
+`Denna dokumentation har översatts automatiskt och kan innehålla fel. Tveka inte att öppna en Pull Request för att föreslå ändringar.`
+
+LlamaIndex stöder för närvarande officiellt NodeJS 18 och NodeJS 20.
+
+## NextJS App Router
+
+Om du använder NextJS App Router route handlers/serverless functions måste du använda NodeJS-läget:
+
+```js
+export const runtime = "nodejs"; // standard
+```
+
+och du måste lägga till ett undantag för pdf-parse i din next.config.js
+
+```js
+// next.config.js
+/** @type {import('next').NextConfig} */
+const nextConfig = {
+  experimental: {
+    serverComponentsExternalPackages: ["pdf-parse"], // Sätter pdf-parse i faktiskt NodeJS-läge med NextJS App Router
+  },
+};
+
+module.exports = nextConfig;
+```
diff --git a/apps/docs/i18n/sv/docusaurus-plugin-content-docs/current/installation.mdx b/apps/docs/i18n/sv/docusaurus-plugin-content-docs/current/installation.mdx
new file mode 100644
index 0000000000000000000000000000000000000000..b0d4102b925d3a76c39a7f696f17a7ce61148576
--- /dev/null
+++ b/apps/docs/i18n/sv/docusaurus-plugin-content-docs/current/installation.mdx
@@ -0,0 +1,68 @@
+---
+sidebar_position: 1
+---
+
+
+# Installation och konfiguration
+
+```Denna dokumentation har översatts automatiskt och kan innehålla fel. Tveka inte att öppna en Pull Request för att föreslå ändringar.```
+
+
+Se till att du har NodeJS v18 eller högre installerat.
+
+
+## Använda create-llama
+
+Det enklaste sättet att komma igång med LlamaIndex är att använda `create-llama`. Detta CLI-verktyg gör det möjligt för dig att snabbt börja bygga en ny LlamaIndex-applikation, med allt inställt för dig.
+
+Kör bara
+
+<Tabs>
+<TabItem value="1" label="npm" default>
+
+```bash
+npx create-llama@latest
+```
+
+</TabItem>
+<TabItem value="2" label="Yarn">
+
+```bash
+yarn create llama
+```
+
+</TabItem>
+<TabItem value="3" label="pnpm">
+
+```bash
+pnpm create llama@latest
+```
+
+</TabItem>
+</Tabs>
+
+för att komma igång. När din app har genererats, kör
+
+```bash npm2yarn
+npm run dev
+```
+
+för att starta utvecklingsservern. Du kan sedan besöka [http://localhost:3000](http://localhost:3000) för att se din app.
+## Installation från NPM
+
+```bash npm2yarn
+npm install llamaindex
+```
+
+
+### Miljövariabler
+
+Våra exempel använder OpenAI som standard. Du behöver konfigurera din Open AI-nyckel på följande sätt:
+
+```bash
+export OPENAI_API_KEY="sk-......" # Ersätt med din nyckel från https://platform.openai.com/account/api-keys
+```
+
+Om du vill att den automatiskt ska laddas varje gång, lägg till den i din .zshrc/.bashrc.
+
+VARNING: Lägg inte upp din OpenAI-nyckel i versionshanteringssystemet.
diff --git a/apps/docs/i18n/sv/docusaurus-plugin-content-docs/current/introduction.md b/apps/docs/i18n/sv/docusaurus-plugin-content-docs/current/introduction.md
new file mode 100644
index 0000000000000000000000000000000000000000..2f4db70c28c057b010b0849939d5ec7c33ab035e
--- /dev/null
+++ b/apps/docs/i18n/sv/docusaurus-plugin-content-docs/current/introduction.md
@@ -0,0 +1,60 @@
+---
+sidebar_position: 0
+slug: /
+---
+
+# Vad är LlamaIndex.TS?
+
+`Denna dokumentation har översatts automatiskt och kan innehålla fel. Tveka inte att öppna en Pull Request för att föreslå ändringar.`
+
+LlamaIndex.TS är ett dataramverk för LLM-applikationer för att ta emot, strukturera och få åtkomst till privata eller domänspecifika data. Även om det finns ett python-paket tillgängligt (se [här](https://docs.llamaindex.ai/en/stable/)), erbjuder LlamaIndex.TS kärnfunktioner i en enkel paket, optimerad för användning med TypeScript.
+
+## 🚀 Varför LlamaIndex.TS?
+
+I grunden erbjuder LLM:er ett naturligt språksgränssnitt mellan människor och infererade data. Bredvid tillgängliga modeller är förtränade på stora mängder offentligt tillgängliga data, från Wikipedia och e-postlistor till läroböcker och källkod.
+
+Applikationer som bygger på LLM:er kräver ofta att komplettera dessa modeller med privata eller domänspecifika data. Tyvärr kan den datan vara fördelad över isolerade applikationer och datalager. Den kan finnas bakom API:er, i SQL-databaser eller fast i PDF:er och presentationsbilder.
+
+Det är där **LlamaIndex.TS** kommer in.
+
+## 🦙 Hur kan LlamaIndex.TS hjälpa?
+
+LlamaIndex.TS tillhandahåller följande verktyg:
+
+- **Datainläsning** ta emot dina befintliga `.txt`, `.pdf`, `.csv`, `.md` och `.docx` data direkt
+- **Dataindex** strukturera dina data i mellanliggande representationer som är enkla och prestandaoptimerade för LLM:er att använda.
+- **Motorer** ger naturlig språkåtkomst till dina data. Till exempel:
+  - Frågemotorer är kraftfulla gränssnitt för återvinning av kunskapsförstärkt utdata.
+  - Chattmotorer är konversationsgränssnitt för flermeddelande, "fram och tillbaka" interaktioner med dina data.
+
+## 👨‍👩‍👧‍👦 Vem är LlamaIndex för?
+
+LlamaIndex.TS tillhandahåller en kärnuppsättning verktyg som är nödvändiga för alla som bygger LLM-appar med JavaScript och TypeScript.
+
+Vår högnivå-API gör att nybörjaranvändare kan använda LlamaIndex.TS för att ta emot och fråga sin data.
+
+För mer komplexa applikationer tillåter våra lägre nivå-API:er avancerade användare att anpassa och utöka vilken modul som helst - dataanslutningar, index, hämtare och frågemotorer - för att passa deras behov.
+
+## Komma igång
+
+`npm install llamaindex`
+
+Vår dokumentation inkluderar [Installationsinstruktioner](./installation.md) och en [Starterhandledning](./starter.md) för att bygga din första applikation.
+
+När du är igång, ger [Högnivåkoncept](./concepts.md) en översikt över LlamaIndex modulära arkitektur. För mer praktiska exempel, titta igenom våra [Steg-för-steg handledningar](./end_to_end.md).
+
+## 🗺️ Ekosystem
+
+För att ladda ner eller bidra, hitta LlamaIndex på:
+
+- Github: https://github.com/run-llama/LlamaIndexTS
+- NPM: https://www.npmjs.com/package/llamaindex
+
+"
+
+## Community
+
+Behöver du hjälp? Har du förslag på funktioner? Gå med i LlamaIndex-communityn:
+
+- Twitter: https://twitter.com/llama_index
+- Discord https://discord.gg/dGcwcsnxhU
diff --git a/apps/docs/i18n/sv/docusaurus-plugin-content-docs/current/modules/high_level/chat_engine.md b/apps/docs/i18n/sv/docusaurus-plugin-content-docs/current/modules/high_level/chat_engine.md
new file mode 100644
index 0000000000000000000000000000000000000000..56bd74988b5c4a34650996f3ae38a228b68e07f0
--- /dev/null
+++ b/apps/docs/i18n/sv/docusaurus-plugin-content-docs/current/modules/high_level/chat_engine.md
@@ -0,0 +1,22 @@
+---
+sidebar_position: 4
+---
+
+# ChatEngine (聊天引擎)
+
+`Denna dokumentation har översatts automatiskt och kan innehålla fel. Tveka inte att öppna en Pull Request för att föreslå ändringar.`
+
+Chattmotorn är ett snabbt och enkelt sätt att chatta med data i din index.
+
+```typescript
+const retriever = index.asRetriever();
+const chatEngine = new ContextChatEngine({ retriever });
+
+// börja chatta
+const response = await chatEngine.chat(query);
+```
+
+## Api-referenser
+
+- [ContextChatEngine (Kontextchattmotor)](../../api/classes/ContextChatEngine.md)
+- [CondenseQuestionChatEngine (SammanfattaFrågaChattmotor)](../../api/classes/ContextChatEngine.md)
diff --git a/apps/docs/i18n/sv/docusaurus-plugin-content-docs/current/modules/high_level/data_index.md b/apps/docs/i18n/sv/docusaurus-plugin-content-docs/current/modules/high_level/data_index.md
new file mode 100644
index 0000000000000000000000000000000000000000..1371ad72d7c4194bab09e1e62f84a754caa42759
--- /dev/null
+++ b/apps/docs/i18n/sv/docusaurus-plugin-content-docs/current/modules/high_level/data_index.md
@@ -0,0 +1,25 @@
+---
+sidebar_position: 2
+---
+
+# Index
+
+`Denna dokumentation har översatts automatiskt och kan innehålla fel. Tveka inte att öppna en Pull Request för att föreslå ändringar.`
+
+En index är den grundläggande behållaren och organisationen för dina data. LlamaIndex.TS stöder två index:
+
+- `VectorStoreIndex` - kommer att skicka de bästa-k `Node` till LLM när en respons genereras. Standardvärdet för bästa-k är 2.
+- `SummaryIndex` - kommer att skicka varje `Node` i indexet till LLM för att generera en respons.
+
+```typescript
+import { Document, VectorStoreIndex } from "llamaindex";
+
+const document = new Document({ text: "test" });
+
+const index = await VectorStoreIndex.fromDocuments([document]);
+```
+
+## API Referens
+
+- [SummaryIndex](../../api/classes/SummaryIndex.md)
+- [VectorStoreIndex](../../api/classes/VectorStoreIndex.md)
diff --git a/apps/docs/i18n/sv/docusaurus-plugin-content-docs/current/modules/high_level/data_loader.md b/apps/docs/i18n/sv/docusaurus-plugin-content-docs/current/modules/high_level/data_loader.md
new file mode 100644
index 0000000000000000000000000000000000000000..b56d219c867c5f9b10a4db65d64533bff097dfcf
--- /dev/null
+++ b/apps/docs/i18n/sv/docusaurus-plugin-content-docs/current/modules/high_level/data_loader.md
@@ -0,0 +1,19 @@
+---
+sidebar_position: 1
+---
+
+# Läsare / Laddare
+
+`Denna dokumentation har översatts automatiskt och kan innehålla fel. Tveka inte att öppna en Pull Request för att föreslå ändringar.`
+
+LlamaIndex.TS stöder enkel inläsning av filer från mappar med hjälp av klassen `SimpleDirectoryReader`. För närvarande stöds filtyperna `.txt`, `.pdf`, `.csv`, `.md` och `.docx`, med fler planerade i framtiden!
+
+```typescript
+import { SimpleDirectoryReader } from "llamaindex";
+
+documents = new SimpleDirectoryReader().loadData("./data");
+```
+
+## API Referens
+
+- [SimpleDirectoryReader](../../api/classes/SimpleDirectoryReader.md)
diff --git a/apps/docs/i18n/sv/docusaurus-plugin-content-docs/current/modules/high_level/documents_and_nodes.md b/apps/docs/i18n/sv/docusaurus-plugin-content-docs/current/modules/high_level/documents_and_nodes.md
new file mode 100644
index 0000000000000000000000000000000000000000..25c2665252e99f808467f264f65583a75ba48779
--- /dev/null
+++ b/apps/docs/i18n/sv/docusaurus-plugin-content-docs/current/modules/high_level/documents_and_nodes.md
@@ -0,0 +1,22 @@
+---
+sidebar_position: 0
+---
+
+# Dokument och Noder
+
+`Denna dokumentation har översatts automatiskt och kan innehålla fel. Tveka inte att öppna en Pull Request för att föreslå ändringar.`
+
+`Dokument` och `Noder` är de grundläggande byggstenarna i en index. Även om API:et för dessa objekt är liknande, representerar `Dokument` objekt hela filer, medan `Noder` är mindre delar av det ursprungliga dokumentet, som är lämpliga för en LLM och Q&A.
+
+```typescript
+import { Document } from "llamaindex";
+
+document = new Document({ text: "text", metadata: { key: "val" } });
+```
+
+## API Referens
+
+- [Dokument](../../api/classes/Document.md)
+- [TextNode](../../api/classes/TextNode.md)
+
+"
diff --git a/apps/docs/i18n/sv/docusaurus-plugin-content-docs/current/modules/high_level/query_engine.md b/apps/docs/i18n/sv/docusaurus-plugin-content-docs/current/modules/high_level/query_engine.md
new file mode 100644
index 0000000000000000000000000000000000000000..16975de011cd6a1e71e1618a92b462bf4a2ed96b
--- /dev/null
+++ b/apps/docs/i18n/sv/docusaurus-plugin-content-docs/current/modules/high_level/query_engine.md
@@ -0,0 +1,42 @@
+---
+sidebar_position: 3
+---
+
+# QueryEngine (Frågemotor)
+
+`Denna dokumentation har översatts automatiskt och kan innehålla fel. Tveka inte att öppna en Pull Request för att föreslå ändringar.`
+
+En frågemotor omsluter en `Retriever` och en `ResponseSynthesizer` i en pipeline, som kommer att använda frågesträngen för att hämta noder och sedan skicka dem till LLM för att generera ett svar.
+
+```typescript
+const queryEngine = index.asQueryEngine();
+const response = await queryEngine.query("frågesträng");
+```
+
+## Underfrågefrågemotor
+
+Den grundläggande tanken med Underfrågefrågemotorn är att dela upp en enda fråga i flera frågor, få ett svar för var och en av dessa frågor och sedan kombinera dessa olika svar till ett sammanhängande svar för användaren. Du kan tänka på det som tekniken "tänk igenom detta steg för steg" men genom att iterera över dina datakällor!
+
+### Komma igång
+
+Det enklaste sättet att börja prova Underfrågefrågemotorn är att köra filen subquestion.ts i [exempel](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/subquestion.ts).
+
+```bash
+npx ts-node subquestion.ts
+```
+
+"
+
+### Verktyg
+
+Underfrågefrågemotorn implementeras med hjälp av Verktyg. Den grundläggande idén med Verktyg är att de är körbara alternativ för det stora språkmodellen. I det här fallet förlitar sig vår Underfrågefrågemotor på QueryEngineTool, som som du kanske gissat är ett verktyg för att köra frågor på en frågemotor. Detta gör att vi kan ge modellen möjlighet att fråga olika dokument för olika frågor till exempel. Du kan också tänka dig att Underfrågefrågemotorn kan använda ett verktyg som söker efter något på webben eller får ett svar med hjälp av Wolfram Alpha.
+
+Du kan lära dig mer om Verktyg genom att titta på LlamaIndex Python-dokumentationen https://gpt-index.readthedocs.io/en/latest/core_modules/agent_modules/tools/root.html
+
+"
+
+## API-referens
+
+- [RetrieverQueryEngine (RetrieverFrågemotor)](../../api/classes/RetrieverQueryEngine.md)
+- [SubQuestionQueryEngine (UnderfrågaFrågemotor)](../../api/classes/SubQuestionQueryEngine.md)
+- [QueryEngineTool (FrågemotorVerktyg)](../../api/interfaces/QueryEngineTool.md)
diff --git a/apps/docs/i18n/sv/docusaurus-plugin-content-docs/current/modules/index.md b/apps/docs/i18n/sv/docusaurus-plugin-content-docs/current/modules/index.md
new file mode 100644
index 0000000000000000000000000000000000000000..bdb4093e10f4c91d9d6d21692f9e8b3b42892631
--- /dev/null
+++ b/apps/docs/i18n/sv/docusaurus-plugin-content-docs/current/modules/index.md
@@ -0,0 +1,33 @@
+# Kärnmoduler
+
+`Denna dokumentation har översatts automatiskt och kan innehålla fel. Tveka inte att öppna en Pull Request för att föreslå ändringar.`
+
+LlamaIndex.TS erbjuder flera kärnmoduler, uppdelade i högnivåmoduler för att snabbt komma igång och lågnivåmoduler för att anpassa nyckelkomponenter efter behov.
+
+## Högnivåmoduler
+
+- [**Dokument**](./high_level/documents_and_nodes.md): Ett dokument representerar en textfil, PDF-fil eller annan sammanhängande data.
+
+- [**Nod**](./high_level/documents_and_nodes.md): Den grundläggande byggstenen för data. Vanligtvis är dessa delar av dokumentet uppdelade i hanterbara bitar som är tillräckligt små för att matas in i en inbäddningsmodell och LLM.
+
+- [**Läsare/Laddare**](./high_level/data_loader.md): En läsare eller laddare är något som tar emot ett dokument i den verkliga världen och omvandlar det till en dokumentklass som sedan kan användas i din Index och förfrågningar. För närvarande stöder vi vanliga textfiler och PDF-filer med många fler på gång.
+
+- [**Index**](./high_level/data_index.md): Index lagrar Noderna och inbäddningarna av dessa noder.
+
+- [**Frågemotor**](./high_level/query_engine.md): Frågemotorer genererar den förfrågan du skickar in och ger dig tillbaka resultatet. Frågemotorer kombinerar vanligtvis en förbyggd ledtråd med valda noder från din Index för att ge LLM:en den kontext den behöver för att svara på din förfrågan.
+
+- [**Chattmotor**](./high_level/chat_engine.md): En chattmotor hjälper dig att bygga en chattbot som kommer att interagera med dina Index.
+
+## Lågnivåmodul
+
+- [**LLM**](./low_level/llm.md): Klassen LLM är ett enhetligt gränssnitt över en stor språkmodellsleverantör som OpenAI GPT-4, Anthropic Claude eller Meta LLaMA. Du kan ärva från den för att skriva en anslutning till din egen stora språkmodell.
+
+- [**Inbäddning**](./low_level/embedding.md): En inbäddning representeras som en vektor av flyttal. OpenAI:s text-embedding-ada-002 är vår standardinbäddningsmodell och varje inbäddning den genererar består av 1 536 flyttal. En annan populär inbäddningsmodell är BERT som använder 768 flyttal för att representera varje nod. Vi tillhandahåller ett antal verktyg för att arbeta med inbäddningar, inklusive 3 alternativ för likhetsberäkning och Maximum Marginal Relevance.
+
+- [**TextSplitter/NodeParser**](./low_level/node_parser.md): Strategier för textuppdelning är otroligt viktiga för den övergripande effektiviteten hos inbäddningssökningen. För närvarande har vi en standardlösning, men det finns ingen universallösning. Beroende på källmaterialen kan du vilja använda olika uppdelningsstorlekar och strategier. För närvarande stöder vi uppdelning efter fast storlek, uppdelning efter fast storlek med överlappande sektioner, uppdelning efter mening och uppdelning efter stycke. Textuppdelaren används av NodeParser när den delar upp `Dokument` i `Noder`.
+
+- [**Retriever**](./low_level/retriever.md): Retrievern är den som faktiskt väljer vilka noder som ska hämtas från indexet. Här kan du vilja försöka hämta fler eller färre noder per fråga, ändra din likhetsfunktion eller skapa din egen retriever för varje enskilt användningsfall i din applikation. Till exempel kan du vilja ha en separat retriever för kodinnehåll jämfört med textinnehåll.
+
+- [**ResponseSynthesizer**](./low_level/response_synthesizer.md): ResponseSynthesizern är ansvarig för att ta en frågesträng och använda en lista med `Noder` för att generera ett svar. Detta kan ta olika former, som att iterera över allt sammanhang och förbättra ett svar, eller bygga ett träd av sammanfattningar och returnera rotsammanfattningen.
+
+- [**Lagring**](./low_level/storage.md): Förr eller senare kommer du att vilja lagra dina index, data och vektorer istället för att köra inbäddningsmodellerna varje gång. IndexStore, DocStore, VectorStore och KVStore är abstraktioner som låter dig göra det. Tillsammans bildar de StorageContext. För närvarande tillåter vi dig att spara dina inbäddningar i filer på filsystemet (eller ett virtuellt filsystem i minnet), men vi lägger också aktivt till integrationer med vektor-databaser.
diff --git a/apps/docs/i18n/sv/docusaurus-plugin-content-docs/current/modules/low_level/embedding.md b/apps/docs/i18n/sv/docusaurus-plugin-content-docs/current/modules/low_level/embedding.md
new file mode 100644
index 0000000000000000000000000000000000000000..03010d0193cca50f8b684839c3965891d5b55047
--- /dev/null
+++ b/apps/docs/i18n/sv/docusaurus-plugin-content-docs/current/modules/low_level/embedding.md
@@ -0,0 +1,26 @@
+---
+sidebar_position: 1
+---
+
+# Inbäddning
+
+`Denna dokumentation har översatts automatiskt och kan innehålla fel. Tveka inte att öppna en Pull Request för att föreslå ändringar.`
+
+Inbäddningsmodellen i LlamaIndex är ansvarig för att skapa numeriska representationer av text. Som standard kommer LlamaIndex att använda modellen `text-embedding-ada-002` från OpenAI.
+
+Detta kan explicit ställas in i objektet `ServiceContext`.
+
+```typescript
+import { OpenAIEmbedding, serviceContextFromDefaults } from "llamaindex";
+
+const openaiEmbeds = new OpenAIEmbedding();
+
+const serviceContext = serviceContextFromDefaults({ embedModel: openaiEmbeds });
+```
+
+## API-referens
+
+- [OpenAIEmbedding](../../api/classes/OpenAIEmbedding.md)
+- [ServiceContext](../../api/interfaces/ServiceContext.md)
+
+"
diff --git a/apps/docs/i18n/sv/docusaurus-plugin-content-docs/current/modules/low_level/llm.md b/apps/docs/i18n/sv/docusaurus-plugin-content-docs/current/modules/low_level/llm.md
new file mode 100644
index 0000000000000000000000000000000000000000..f1fb3cb1e06fba2734415cec510787dfb77b4e8a
--- /dev/null
+++ b/apps/docs/i18n/sv/docusaurus-plugin-content-docs/current/modules/low_level/llm.md
@@ -0,0 +1,26 @@
+---
+sidebar_position: 0
+---
+
+# LLM
+
+`Denna dokumentation har översatts automatiskt och kan innehålla fel. Tveka inte att öppna en Pull Request för att föreslå ändringar.`
+
+LLM är ansvarig för att läsa text och generera naturliga språksvar på frågor. Som standard använder LlamaIndex.TS `gpt-3.5-turbo`.
+
+LLM kan explicit sättas i `ServiceContext`-objektet.
+
+```typescript
+import { OpenAI, serviceContextFromDefaults } from "llamaindex";
+
+const openaiLLM = new OpenAI({ model: "gpt-3.5-turbo", temperature: 0 });
+
+const serviceContext = serviceContextFromDefaults({ llm: openaiLLM });
+```
+
+## API-referens
+
+- [OpenAI](../../api/classes/OpenAI.md)
+- [ServiceContext](../../api/interfaces/ServiceContext.md)
+
+"
diff --git a/apps/docs/i18n/sv/docusaurus-plugin-content-docs/current/modules/low_level/node_parser.md b/apps/docs/i18n/sv/docusaurus-plugin-content-docs/current/modules/low_level/node_parser.md
new file mode 100644
index 0000000000000000000000000000000000000000..2673a00e580aac1f40be3568afb590b0a07a269d
--- /dev/null
+++ b/apps/docs/i18n/sv/docusaurus-plugin-content-docs/current/modules/low_level/node_parser.md
@@ -0,0 +1,37 @@
+---
+sidebar_position: 3
+---
+
+# NodeParser
+
+`Denna dokumentation har översatts automatiskt och kan innehålla fel. Tveka inte att öppna en Pull Request för att föreslå ändringar.`
+
+`NodeParser` i LlamaIndex är ansvarig för att dela upp `Document`-objekt i mer hanterbara `Node`-objekt. När du anropar `.fromDocuments()`, används `NodeParser` från `ServiceContext` automatiskt för att göra detta åt dig. Alternativt kan du använda det för att dela upp dokument i förväg.
+
+```typescript
+import { Document, SimpleNodeParser } from "llamaindex";
+
+const nodeParser = new SimpleNodeParser();
+const nodes = nodeParser.getNodesFromDocuments([
+  new Document({ text: "Jag är 10 år gammal. John är 20 år gammal." }),
+]);
+```
+
+## TextSplitter
+
+Den underliggande textdelaren delar upp texten i meningar. Den kan också användas som en fristående modul för att dela upp råtext.
+
+```typescript
+import { SentenceSplitter } from "llamaindex";
+
+const splitter = new SentenceSplitter({ chunkSize: 1 });
+
+const textSplits = splitter.splitText("Hej världen");
+```
+
+## API-referens
+
+- [SimpleNodeParser](../../api/classes/SimpleNodeParser.md)
+- [SentenceSplitter](../../api/classes/SentenceSplitter.md)
+
+"
diff --git a/apps/docs/i18n/sv/docusaurus-plugin-content-docs/current/modules/low_level/response_synthesizer.md b/apps/docs/i18n/sv/docusaurus-plugin-content-docs/current/modules/low_level/response_synthesizer.md
new file mode 100644
index 0000000000000000000000000000000000000000..4b0ee2c12979bc18de4ca823cf646654c1bd20e6
--- /dev/null
+++ b/apps/docs/i18n/sv/docusaurus-plugin-content-docs/current/modules/low_level/response_synthesizer.md
@@ -0,0 +1,52 @@
+---
+sidebar_position: 6
+---
+
+# ResponseSynthesizer
+
+`Denna dokumentation har översatts automatiskt och kan innehålla fel. Tveka inte att öppna en Pull Request för att föreslå ändringar.`
+
+ResponseSynthesizer är ansvarig för att skicka frågan, noderna och promptmallarna till LLM för att generera ett svar. Det finns några nyckellägen för att generera ett svar:
+
+- `Refine`: "skapa och förbättra" ett svar genom att sekventiellt gå igenom varje hämtad textbit.
+  Detta gör ett separat LLM-anrop per nod. Bra för mer detaljerade svar.
+- `CompactAndRefine` (standard): "kompakta" prompten under varje LLM-anrop genom att fylla på med så
+  många textbitar som får plats inom den maximala promptstorleken. Om det finns
+  för många bitar för att få plats i en prompt, "skapa och förbättra" ett svar genom att gå igenom
+  flera kompakta promptar. Samma som `refine`, men bör resultera i färre LLM-anrop.
+- `TreeSummarize`: Givet en uppsättning textbitar och frågan, konstruera rekursivt ett träd
+  och returnera rotnoden som svar. Bra för sammanfattningssyften.
+- `SimpleResponseBuilder`: Givet en uppsättning textbitar och frågan, tillämpa frågan på varje textbit
+  samtidigt som svaren ackumuleras i en array. Returnerar en sammanslagen sträng av alla
+  svar. Bra när du behöver köra samma fråga separat mot varje textbit.
+
+```typescript
+import { NodeWithScore, ResponseSynthesizer, TextNode } from "llamaindex";
+
+const responseSynthesizer = new ResponseSynthesizer();
+
+const nodesWithScore: NodeWithScore[] = [
+  {
+    node: new TextNode({ text: "Jag är 10 år gammal." }),
+    score: 1,
+  },
+  {
+    node: new TextNode({ text: "John är 20 år gammal." }),
+    score: 0.5,
+  },
+];
+
+const response = await responseSynthesizer.synthesize(
+  "Hur gammal är jag?",
+  nodesWithScore,
+);
+console.log(response.response);
+```
+
+## API-referens
+
+- [ResponseSynthesizer](../../api/classes/ResponseSynthesizer.md)
+- [Refine](../../api/classes/Refine.md)
+- [CompactAndRefine](../../api/classes/CompactAndRefine.md)
+- [TreeSummarize](../../api/classes/TreeSummarize.md)
+- [SimpleResponseBuilder](../../api/classes/SimpleResponseBuilder.md)
diff --git a/apps/docs/i18n/sv/docusaurus-plugin-content-docs/current/modules/low_level/retriever.md b/apps/docs/i18n/sv/docusaurus-plugin-content-docs/current/modules/low_level/retriever.md
new file mode 100644
index 0000000000000000000000000000000000000000..55a6f4238d3aa52a95b48b6a43e095328a6af10f
--- /dev/null
+++ b/apps/docs/i18n/sv/docusaurus-plugin-content-docs/current/modules/low_level/retriever.md
@@ -0,0 +1,23 @@
+---
+sidebar_position: 5
+---
+
+# Retriever (Hämtare)
+
+`Denna dokumentation har översatts automatiskt och kan innehålla fel. Tveka inte att öppna en Pull Request för att föreslå ändringar.`
+
+En hämtare i LlamaIndex används för att hämta `Node`s från en index med hjälp av en frågesträng. En `VectorIndexRetriever` kommer att hämta de mest liknande noderna enligt top-k. Å andra sidan kommer en `SummaryIndexRetriever` att hämta alla noder oavsett frågan.
+
+```typescript
+const retriever = vector_index.asRetriever();
+retriever.similarityTopK = 3;
+
+// Hämta noder!
+const nodesWithScore = await retriever.retrieve("frågesträng");
+```
+
+## API-referens
+
+- [SummaryIndexRetriever (SammanfattningIndexHämtare)](../../api/classes/SummaryIndexRetriever.md)
+- [SummaryIndexLLMRetriever (SammanfattningIndexLLMHämtare)](../../api/classes/SummaryIndexLLMRetriever.md)
+- [VectorIndexRetriever (VektorIndexHämtare)](../../api/classes/VectorIndexRetriever.md)
diff --git a/apps/docs/i18n/sv/docusaurus-plugin-content-docs/current/modules/low_level/storage.md b/apps/docs/i18n/sv/docusaurus-plugin-content-docs/current/modules/low_level/storage.md
new file mode 100644
index 0000000000000000000000000000000000000000..c1c9478b717f8cadbf681f1eb9a2b556741c3dbe
--- /dev/null
+++ b/apps/docs/i18n/sv/docusaurus-plugin-content-docs/current/modules/low_level/storage.md
@@ -0,0 +1,30 @@
+---
+sidebar_position: 7
+---
+
+# Lagring
+
+`Denna dokumentation har översatts automatiskt och kan innehålla fel. Tveka inte att öppna en Pull Request för att föreslå ändringar.`
+
+Lagring i LlamaIndex.TS fungerar automatiskt när du har konfigurerat en `StorageContext`-objekt. Konfigurera bara `persistDir` och bifoga den till en index.
+
+För närvarande stöds endast spara och ladda från disk, med framtida integrationer planerade!
+
+```typescript
+import { Document, VectorStoreIndex, storageContextFromDefaults } from "./src";
+
+const storageContext = await storageContextFromDefaults({
+  persistDir: "./storage",
+});
+
+const document = new Document({ text: "Test Text" });
+const index = await VectorStoreIndex.fromDocuments([document], {
+  storageContext,
+});
+```
+
+## API Referens
+
+- [StorageContext](../../api/interfaces/StorageContext.md)
+
+"
diff --git a/apps/docs/i18n/sv/docusaurus-plugin-content-docs/current/starter.md b/apps/docs/i18n/sv/docusaurus-plugin-content-docs/current/starter.md
new file mode 100644
index 0000000000000000000000000000000000000000..c7e97da290416d8eba2bb69091d3198a42ed420e
--- /dev/null
+++ b/apps/docs/i18n/sv/docusaurus-plugin-content-docs/current/starter.md
@@ -0,0 +1,60 @@
+---
+sidebar_position: 2
+---
+
+# Startguide
+
+`Denna dokumentation har översatts automatiskt och kan innehålla fel. Tveka inte att öppna en Pull Request för att föreslå ändringar.`
+
+När du har [installerat LlamaIndex.TS med hjälp av NPM](installation) och konfigurerat din OpenAI-nyckel är du redo att starta din första app:
+
+I en ny mapp:
+
+```bash npm2yarn
+npm install typescript
+npm install @types/node
+npx tsc --init # om det behövs
+```
+
+Skapa filen `example.ts`. Den här koden kommer att ladda in några exempeldata, skapa ett dokument, indexera det (vilket skapar inbäddningar med hjälp av OpenAI) och sedan skapa en frågemotor för att svara på frågor om datan.
+
+```ts
+// example.ts
+import fs from "fs/promises";
+import { Document, VectorStoreIndex } from "llamaindex";
+
+async function main() {
+  // Ladda in essän från abramov.txt i Node
+  const essay = await fs.readFile(
+    "node_modules/llamaindex/examples/abramov.txt",
+    "utf-8",
+  );
+
+  // Skapa ett Document-objekt med essän
+  const document = new Document({ text: essay });
+
+  // Dela upp texten och skapa inbäddningar. Spara dem i en VectorStoreIndex
+  const index = await VectorStoreIndex.fromDocuments([document]);
+
+  // Fråga indexet
+  const queryEngine = index.asQueryEngine();
+  const response = await queryEngine.query(
+    "Vad gjorde författaren på college?",
+  );
+
+  // Skriv ut svaret
+  console.log(response.toString());
+}
+
+main();
+```
+
+Sedan kan du köra det med
+
+```bash
+npx ts-node example.ts
+```
+
+Redo att lära dig mer? Kolla in vår NextJS-lekplats på https://llama-playground.vercel.app/. Källkoden finns tillgänglig på https://github.com/run-llama/ts-playground
+
+"
diff --git a/apps/docs/i18n/th/docusaurus-plugin-content-docs/current/_static/concepts/indexing.jpg b/apps/docs/i18n/th/docusaurus-plugin-content-docs/current/_static/concepts/indexing.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..8672967213caf28fb27682b6b9e2e5111beb9aaa
Binary files /dev/null and b/apps/docs/i18n/th/docusaurus-plugin-content-docs/current/_static/concepts/indexing.jpg differ
diff --git a/apps/docs/i18n/th/docusaurus-plugin-content-docs/current/_static/concepts/querying.jpg b/apps/docs/i18n/th/docusaurus-plugin-content-docs/current/_static/concepts/querying.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..3c241bdda5ba98d3f3ee2e163b9f19280c5c7503
Binary files /dev/null and b/apps/docs/i18n/th/docusaurus-plugin-content-docs/current/_static/concepts/querying.jpg differ
diff --git a/apps/docs/i18n/th/docusaurus-plugin-content-docs/current/_static/concepts/rag.jpg b/apps/docs/i18n/th/docusaurus-plugin-content-docs/current/_static/concepts/rag.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..b68eca2564f8535b33d9ed6d4ce4d4410d2a0698
Binary files /dev/null and b/apps/docs/i18n/th/docusaurus-plugin-content-docs/current/_static/concepts/rag.jpg differ
diff --git a/apps/docs/i18n/th/docusaurus-plugin-content-docs/current/api b/apps/docs/i18n/th/docusaurus-plugin-content-docs/current/api
new file mode 120000
index 0000000000000000000000000000000000000000..3513e32979f4abb8b2df2b54c8d0cc480bdd847e
--- /dev/null
+++ b/apps/docs/i18n/th/docusaurus-plugin-content-docs/current/api
@@ -0,0 +1 @@
+../../../../docs/api
\ No newline at end of file
diff --git a/apps/docs/i18n/th/docusaurus-plugin-content-docs/current/concepts.md b/apps/docs/i18n/th/docusaurus-plugin-content-docs/current/concepts.md
new file mode 100644
index 0000000000000000000000000000000000000000..f415a054c94eb247d74f98d0ba103df3cc4d6932
--- /dev/null
+++ b/apps/docs/i18n/th/docusaurus-plugin-content-docs/current/concepts.md
@@ -0,0 +1,86 @@
+---
+sidebar_position: 3
+---
+
+# แนวคิดระดับสูง
+
+`เอกสารนี้ได้รับการแปลโดยอัตโนมัติและอาจมีข้อผิดพลาด อย่าลังเลที่จะเปิด Pull Request เพื่อแนะนำการเปลี่ยนแปลง.`
+
+LlamaIndex.TS ช่วยให้คุณสร้างแอปพลิเคชันที่ใช้ LLM (เช่น Q&A, chatbot) บนข้อมูลที่กำหนดเองได้
+
+ในเอกสารแนวคิดระดับสูงนี้ คุณจะเรียนรู้:
+
+- วิธีการ LLM สามารถตอบคำถามโดยใช้ข้อมูลของคุณเองได้อย่างไร
+- แนวคิดหลักและโมดูลใน LlamaIndex.TS ที่ใช้สร้าง query pipeline ของคุณเอง
+
+## การตอบคำถามทั่วข้อมูลของคุณ
+
+LlamaIndex ใช้วิธีการสองขั้นตอนเมื่อใช้ LLM กับข้อมูลของคุณ:
+
+1. **ขั้นตอนการสร้างดัชนี**: เตรียมฐานความรู้
+2. **ขั้นตอนการค้นหา**: ดึงข้อมูลที่เกี่ยวข้องจากฐานความรู้เพื่อช่วย LLM ในการตอบคำถาม
+
+![](./_static/concepts/rag.jpg)
+
+กระบวนการนี้เรียกว่า Retrieval Augmented Generation (RAG) ด้วย
+
+LlamaIndex.TS มีเครื่องมือสำคัญที่ช่วยให้ทั้งสองขั้นตอนง่ายมาก
+
+มาเรียนรู้เกี่ยวกับแต่ละขั้นตอนในรายละเอียด
+
+### ขั้นตอนการสร้างดัชนี
+
+LlamaIndex.TS ช่วยให้คุณเตรียมฐานความรู้ด้วยชุดของตัวเชื่อมต่อข้อมูลและดัชนี
+
+![](./_static/concepts/indexing.jpg)
+
+[**Data Loaders**](./modules/high_level/data_loader.md):
+ตัวเชื่อมต่อข้อมูล (เช่น `Reader`) รับข้อมูลจากแหล่งข้อมูลและรูปแบบข้อมูลที่แตกต่างกันเข้าสู่รูปแบบ `Document` ที่เรียบง่าย (ข้อความและข้อมูลเบื้องต้น)
+
+[**Documents / Nodes**](./modules/high_level/documents_and_nodes.md): `Document` เป็นคอนเทนเนอร์ทั่วไปที่ครอบคลุมแหล่งข้อมูลใด ๆ - เช่น PDF, ผลลัพธ์จาก API หรือข้อมูลที่ดึงมาจากฐานข้อมูล `Node` เป็นหน่วยข้อมูลอะตอมิกใน LlamaIndex และแทน "ชิ้น" ของ `Document` แห่งต้นฉบับ มันเป็นการแสดงผลที่หลากหลายที่รวมถึงข้อมูลเบื้องต้นและความสัมพันธ์ (กับโหนดอื่น ๆ) เพื่อให้สามารถดึงข้อมูลได้อย่างแม่นยำและสื่อความหมายได้
+
+[**Data Indexes**](./modules/high_level/data_index.md):
+เมื่อคุณได้รับข้อมูลเข้าสู่ระบบแล้ว LlamaIndex ช่วยคุณดัชนีข้อมูลให้อยู่ในรูปแบบที่ง่ายต่อการเรียกดู
+
+ภายใน LlamaIndex จะแยกวิเคราะห์เอกสารเบื้องต้นเป็นรูปแบบกลาง คำนวณเวกเตอร์ซึ่งเป็นการแทนข้อมูลและจัดเก็บข้อมูลของคุณในหน่วยความจำหรือแผ่นดิสก์
+
+"
+
+### ขั้นตอนการค้นหา
+
+ในขั้นตอนการค้นหา pipeline ของคำถามจะดึงข้อมูลที่เกี่ยวข้องที่สุดตามคำถามของผู้ใช้
+และส่งข้อมูลนั้นให้กับ LLM (พร้อมกับคำถาม) เพื่อสร้างคำตอบ
+
+นี้จะทำให้ LLM มีความรู้ที่อัปเดตล่าสุดที่ไม่ได้อยู่ในข้อมูลการฝึกอบรมเดิมของมัน
+(ลดการเกิดภาพลวงตา)
+
+ความท้าทายสำคัญในขั้นตอนการค้นหาคือการค้นหา การจัดการ และการแสดงเหตุผลเกี่ยวกับฐานความรู้ (ที่อาจมีหลายฐานความรู้)
+
+LlamaIndex มีโมดูลที่สามารถสร้างและรวมเป็นระบบ RAG pipeline สำหรับ Q&A (query engine), chatbot (chat engine), หรือเป็นส่วนหนึ่งของตัวแทน
+
+ส่วนประกอบเหล่านี้สามารถปรับแต่งให้สอดคล้องกับการจัดอันดับที่ต้องการ และสามารถรวมกันเพื่อแสดงเหตุผลเกี่ยวกับหลายฐานความรู้ในวิธีที่เป็นโครงสร้าง
+
+![](./_static/concepts/querying.jpg)
+
+#### ส่วนประกอบพื้นฐาน
+
+[**Retrievers**](./modules/low_level/retriever.md):
+Retrievers กำหนดวิธีการค้นหาข้อมูลที่เกี่ยวข้องจากฐานความรู้ (เช่นดัชนี) อย่างมีประสิทธิภาพเมื่อมีคำถาม
+ตรรกะการค้นหาเฉพาะของแต่ละดัชนีแตกต่างกัน และดัชนีที่ได้รับความนิยมสูงสุดคือการค้นหาแบบหนาแน่นต่อดัชนีเวกเตอร์
+
+[**Response Synthesizers**](./modules/low_level/response_synthesizer.md):
+Response Synthesizers สร้างคำตอบจาก LLM โดยใช้คำถามของผู้ใช้และชุดข้อความที่ได้รับ
+
+"
+
+#### ท่องเที่ยว
+
+[**Query Engines**](./modules/high_level/query_engine.md):
+Query engine เป็นท่องเที่ยวที่สามารถให้คุณถามคำถามเกี่ยวกับข้อมูลของคุณได้
+มันรับคำถามเป็นภาษาธรรมชาติและส่งคำตอบพร้อมกับข้อมูลที่เกี่ยวข้องที่ดึงมาและส่งให้กับ LLM
+
+[**Chat Engines**](./modules/high_level/chat_engine.md):
+Chat engine เป็นท่องเที่ยวที่สามารถสร้างการสนทนากับข้อมูลของคุณได้
+(มีการสื่อสารไปมาหลายครั้งแทนการถามคำถามและตอบคำถามเดียว)
+
+"
diff --git a/apps/docs/i18n/th/docusaurus-plugin-content-docs/current/end_to_end.md b/apps/docs/i18n/th/docusaurus-plugin-content-docs/current/end_to_end.md
new file mode 100644
index 0000000000000000000000000000000000000000..0cb4555f58cacc203af2603733bc43abd60638e8
--- /dev/null
+++ b/apps/docs/i18n/th/docusaurus-plugin-content-docs/current/end_to_end.md
@@ -0,0 +1,61 @@
+---
+sidebar_position: 4
+---
+
+# ตัวอย่าง End to End
+
+`เอกสารนี้ได้รับการแปลโดยอัตโนมัติและอาจมีข้อผิดพลาด อย่าลังเลที่จะเปิด Pull Request เพื่อแนะนำการเปลี่ยนแปลง.`
+
+เรามีตัวอย่าง End-to-End หลายรูปแบบที่ใช้ LlamaIndex.TS ในเรปอสิทอรี
+
+ดูตัวอย่างด้านล่างหรือลองใช้งานและทำตามได้ในไม่กี่นาทีด้วยการสอนแบบ Github Codespace ที่ให้โดย Dev-Docs [ที่นี่](https://codespaces.new/team-dev-docs/lits-dev-docs-playground?devcontainer_path=.devcontainer%2Fjavascript_ltsquickstart%2Fdevcontainer.json):
+
+## [Chat Engine](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/chatEngine.ts)
+
+อ่านไฟล์และพูดคุยเกี่ยวกับมันกับ LLM.
+
+## [ดัชนีเวกเตอร์](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/vectorIndex.ts)
+
+สร้างดัชนีเวกเตอร์และสอบถามข้อมูล ดัชนีเวกเตอร์จะใช้การฝังรูปภาพเพื่อเรียกดูโหนดที่เกี่ยวข้องมากที่สุด k โหนด โดยค่าเริ่มต้นของ k คือ 2.
+
+"
+
+## [สรุปดัชนี](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/summaryIndex.ts)
+
+สร้างดัชนีรายการและสอบถามดัชนี ตัวอย่างนี้ยังใช้ `LLMRetriever` ซึ่งจะใช้ LLM เพื่อเลือกโหนดที่ดีที่สุดในการสร้างคำตอบ
+
+"
+
+## [บันทึก / โหลดดัชนี](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/storageContext.ts)
+
+สร้างและโหลดดัชนีเวกเตอร์ การบันทึกลงดิสก์ใน LlamaIndex.TS จะเกิดขึ้นโดยอัตโนมัติเมื่อมีการสร้างออบเจ็กต์ storage context
+
+"
+
+## [Customized Vector Index](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/vectorIndexCustomize.ts)
+
+สร้างดัชนีเวกเตอร์และสอบถามด้วยการกำหนดค่า `LLM`, `ServiceContext`, และ `similarity_top_k`
+
+"
+
+## [OpenAI LLM](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/openai.ts)
+
+สร้าง OpenAI LLM และใช้งานได้โดยตรงสำหรับการสนทนา.
+
+"
+
+## [Llama2 DeuceLLM](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/llamadeuce.ts)
+
+สร้าง Llama-2 LLM และใช้งานได้โดยตรงสำหรับการสนทนา.
+
+"
+
+## [SubQuestionQueryEngine](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/subquestion.ts)
+
+ใช้ `SubQuestionQueryEngine` ซึ่งแยกคำถามที่ซับซ้อนเป็นคำถามหลายๆ คำ แล้วรวมผลลัพธ์จากคำตอบของทุกคำถามย่อยเข้าด้วยกัน
+
+"
+
+## [โมดูลระดับต่ำ](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/lowlevel.ts)
+
+ตัวอย่างนี้ใช้คอมโพเนนต์ระดับต่ำหลายอย่างซึ่งลดความจำเป็นในการใช้งานเครื่องมือค้นหาจริง คอมโพเนนต์เหล่านี้สามารถใช้ได้ทุกที่ในแอปพลิเคชันใดก็ได้ หรือปรับแต่งและสร้างคลาสย่อยเพื่อตอบสนองความต้องการของคุณเอง
diff --git a/apps/docs/i18n/th/docusaurus-plugin-content-docs/current/environments.md b/apps/docs/i18n/th/docusaurus-plugin-content-docs/current/environments.md
new file mode 100644
index 0000000000000000000000000000000000000000..6abaeae5982cbc2bfad94fa457af4759c2582d79
--- /dev/null
+++ b/apps/docs/i18n/th/docusaurus-plugin-content-docs/current/environments.md
@@ -0,0 +1,31 @@
+---
+sidebar_position: 5
+---
+
+# สภาพแวดล้อม (Environments)
+
+`เอกสารนี้ได้รับการแปลโดยอัตโนมัติและอาจมีข้อผิดพลาด อย่าลังเลที่จะเปิด Pull Request เพื่อแนะนำการเปลี่ยนแปลง.`
+
+LlamaIndex รองรับ NodeJS 18 และ NodeJS 20 อย่างเป็นทางการในปัจจุบัน
+
+## NextJS App Router
+
+หากคุณใช้ NextJS App Router route handlers/serverless functions คุณจะต้องใช้โหมด NodeJS:
+
+```js
+export const runtime = "nodejs"; // ค่าเริ่มต้น
+```
+
+และคุณจะต้องเพิ่มข้อยกเว้นสำหรับ pdf-parse ใน next.config.js ของคุณ
+
+```js
+// next.config.js
+/** @type {import('next').NextConfig} */
+const nextConfig = {
+  experimental: {
+    serverComponentsExternalPackages: ["pdf-parse"], // ให้ pdf-parse ทำงานในโหมด NodeJS จริงๆ กับ NextJS App Router
+  },
+};
+
+module.exports = nextConfig;
+```
diff --git a/apps/docs/i18n/th/docusaurus-plugin-content-docs/current/installation.mdx b/apps/docs/i18n/th/docusaurus-plugin-content-docs/current/installation.mdx
new file mode 100644
index 0000000000000000000000000000000000000000..afc4621d0c4555eb743756251559161a6d5fe6af
--- /dev/null
+++ b/apps/docs/i18n/th/docusaurus-plugin-content-docs/current/installation.mdx
@@ -0,0 +1,71 @@
+---
+sidebar_position: 1
+---
+
+
+# การติดตั้งและการตั้งค่า
+
+```เอกสารนี้ได้รับการแปลโดยอัตโนมัติและอาจมีข้อผิดพลาด อย่าลังเลที่จะเปิด Pull Request เพื่อแนะนำการเปลี่ยนแปลง.```
+
+
+ตรวจสอบให้แน่ใจว่าคุณมี NodeJS เวอร์ชัน 18 หรือสูงกว่า
+
+
+## การใช้ create-llama
+
+วิธีที่ง่ายที่สุดในการเริ่มต้นใช้งาน LlamaIndex คือโดยใช้ `create-llama` โปรแกรมเครื่องมือชุดคำสั่งนี้ช่วยให้คุณสามารถเริ่มสร้างแอปพลิเคชัน LlamaIndex ใหม่ได้อย่างรวดเร็วพร้อมทั้งตั้งค่าที่จำเป็น
+
+เพียงแค่รัน
+
+<Tabs>
+<TabItem value="1" label="npm" default>
+
+```bash
+npx create-llama@latest
+```
+
+</TabItem>
+<TabItem value="2" label="Yarn">
+
+```bash
+yarn create llama
+```
+
+</TabItem>
+<TabItem value="3" label="pnpm">
+
+```bash
+pnpm create llama@latest
+```
+
+</TabItem>
+</Tabs>
+
+เพื่อเริ่มต้น หลังจากที่แอปของคุณถูกสร้างขึ้นแล้ว รัน
+
+```bash npm2yarn
+npm run dev
+```
+
+เพื่อเริ่มเซิร์ฟเวอร์ในโหมดการพัฒนา คุณสามารถเข้าชม [http://localhost:3000](http://localhost:3000) เพื่อดูแอปของคุณได้
+## การติดตั้งจาก NPM
+
+```bash npm2yarn
+npm install llamaindex
+```
+
+
+### ตัวแปรสภาพแวดล้อม
+
+ตัวอย่างของเราใช้ OpenAI เป็นค่าเริ่มต้น คุณจะต้องตั้งค่า Open AI key ของคุณดังนี้:
+
+```bash
+export OPENAI_API_KEY="sk-......" # แทนที่ด้วยคีย์ของคุณจาก https://platform.openai.com/account/api-keys
+```
+
+หากคุณต้องการให้โหลดโดยอัตโนมัติทุกครั้ง เพิ่มไปยัง .zshrc/.bashrc ของคุณ
+
+คำเตือน: อย่าเก็บ OpenAI key ของคุณไว้ในระบบควบคุมเวอร์ชัน
+
+
+"
diff --git a/apps/docs/i18n/th/docusaurus-plugin-content-docs/current/introduction.md b/apps/docs/i18n/th/docusaurus-plugin-content-docs/current/introduction.md
new file mode 100644
index 0000000000000000000000000000000000000000..80fe5ef53bd9f84390f5c58a2228bfb0e7bb521e
--- /dev/null
+++ b/apps/docs/i18n/th/docusaurus-plugin-content-docs/current/introduction.md
@@ -0,0 +1,62 @@
+---
+sidebar_position: 0
+slug: /
+---
+
+# LlamaIndex.TS คืออะไร?
+
+`เอกสารนี้ได้รับการแปลโดยอัตโนมัติและอาจมีข้อผิดพลาด อย่าลังเลที่จะเปิด Pull Request เพื่อแนะนำการเปลี่ยนแปลง.`
+
+LlamaIndex.TS เป็นเฟรมเวิร์กข้อมูลสำหรับแอปพลิเคชัน LLM เพื่อรับเข้าข้อมูลที่เป็นส่วนตัวหรือเฉพาะด้าน โดยในขณะเดียวกันยังมีแพ็คเกจ Python ที่ใช้ได้เช่นกัน (ดูรายละเอียดเพิ่มเติม [ที่นี่](https://docs.llamaindex.ai/en/stable/)) แต่ LlamaIndex.TS นำเสนอคุณสมบัติหลักในแพ็คเกจที่เรียบง่ายและถูกปรับแต่งให้เหมาะสมกับการใช้งานกับ TypeScript
+
+## 🚀 ทำไมต้องใช้ LlamaIndex.TS?
+
+ที่สำคัญที่สุดของ LLMs คือการให้บริการอินเตอร์เฟซภาษาธรรมชาติระหว่างมนุษย์และข้อมูลที่ได้รับการสร้างขึ้น โมเดลที่มีอยู่อย่างแพร่หลายถูกฝึกสอนล่วงหน้าด้วยข้อมูลสาธารณะมากมาย ตั้งแต่ Wikipedia และรายการจดหมายถึงหนังสือเรียนและโค้ดต้นฉบับ
+
+แอปพลิเคชันที่สร้างขึ้นบน LLMs มักต้องการเพิ่มข้อมูลเฉพาะเจาะจงหรือข้อมูลส่วนตัวในโมเดลเหล่านี้ แต่ข้อมูลเหล่านั้นอาจกระจายอยู่ในแอปพลิเคชันและฐานข้อมูลที่แยกต่างหาก อาจอยู่ใน API, ฐานข้อมูล SQL หรือติดอยู่ในไฟล์ PDF และ slide decks
+
+นี่คือจุดที่ **LlamaIndex.TS** เข้ามาช่วยในการแก้ปัญหา
+
+## 🦙 LlamaIndex.TS ช่วยอย่างไร?
+
+LlamaIndex.TS ให้เครื่องมือต่อไปนี้:
+
+- **การโหลดข้อมูล** รับเข้าข้อมูลที่มีอยู่ในรูปแบบ `.txt`, `.pdf`, `.csv`, `.md` และ `.docx` โดยตรง
+- **ดัชนีข้อมูล** โครงสร้างข้อมูลของคุณในรูปแบบกลางที่ง่ายและมีประสิทธิภาพสำหรับ LLMs ในการบริโภค
+- **เอ็นจิน** ให้การเข้าถึงข้อมูลของคุณด้วยภาษาธรรมชาติ เช่น:
+  - เอ็นจินคิวรี่เป็นอินเตอร์เฟซการเรียกคืนที่มีกำลังในการเพิ่มความรู้
+  - เอ็นจินแชทเป็นอินเตอร์เฟซการสนทนาสำหรับการโต้ตอบ "ไปมา" หลายข้อความกับข้อมูลของคุณ
+
+## 👨‍👩‍👧‍👦 LlamaIndex เหมาะกับใคร?
+
+LlamaIndex.TS ให้เครื่องมือชุดหลักที่จำเป็นสำหรับผู้ที่กำลังสร้างแอปพลิเคชัน LLM ด้วย JavaScript และ TypeScript
+
+API ระดับสูงของเราช่วยให้ผู้ใช้ผู้เริ่มต้นใช้ LlamaIndex.TS เพื่อรับเข้ารูปแบบและสอบถามข้อมูลของพวกเขา
+
+สำหรับแอปพลิเคชันที่ซับซ้อนมากขึ้น API ระดับต่ำของเราช่วยให้ผู้ใช้ขั้นสูงสามารถปรับแต่งและขยายส่วนของโมดูลใดๆ - ตัวเชื่อมต่อข้อมูล, ดัชนี, เครื่องค้นหาและเครื่องมือสอบถาม - เพื่อให้เหมาะสมกับความต้องการของพวกเขา.
+
+## เริ่มต้นใช้งาน
+
+`npm install llamaindex`
+
+เอกสารของเราประกอบด้วย[คำแนะนำการติดตั้ง](./installation.md)และ[บทแนะนำเบื้องต้น](./starter.md)เพื่อสร้างแอปพลิเคชันครั้งแรกของคุณ
+
+เมื่อคุณเริ่มใช้งานแล้ว [แนวคิดระดับสูง](./concepts.md) มีภาพรวมของสถาปัตยกรรมแบบโมดูลของ LlamaIndex สำหรับตัวอย่างที่เป็นปฏิบัติจริงมากขึ้น โปรดดูที่ [บทแนะนำจบสู่จบ](./end_to_end.md) เพื่อตัวอย่างที่ใช้งานได้จริง
+
+"
+
+## 🗺️ ระบบนิเวศ
+
+เพื่อดาวน์โหลดหรือร่วมสนับสนุน คุณสามารถค้นหา LlamaIndex ได้ที่:
+
+- Github: https://github.com/run-llama/LlamaIndexTS
+- NPM: https://www.npmjs.com/package/llamaindex
+
+"
+
+## ชุมชน
+
+ต้องการความช่วยเหลือหรือมีคำแนะนำเกี่ยวกับคุณลักษณะใหม่ ร่วมกับชุมชน LlamaIndex ได้ที่:
+
+- Twitter: https://twitter.com/llama_index
+- Discord https://discord.gg/dGcwcsnxhU
diff --git a/apps/docs/i18n/th/docusaurus-plugin-content-docs/current/modules/high_level/chat_engine.md b/apps/docs/i18n/th/docusaurus-plugin-content-docs/current/modules/high_level/chat_engine.md
new file mode 100644
index 0000000000000000000000000000000000000000..2bdaf7f12608ea44eaaa3255c4615344ce882ef8
--- /dev/null
+++ b/apps/docs/i18n/th/docusaurus-plugin-content-docs/current/modules/high_level/chat_engine.md
@@ -0,0 +1,24 @@
+---
+sidebar_position: 4
+---
+
+# ตัวเครื่องสนทนา (ChatEngine)
+
+`เอกสารนี้ได้รับการแปลโดยอัตโนมัติและอาจมีข้อผิดพลาด อย่าลังเลที่จะเปิด Pull Request เพื่อแนะนำการเปลี่ยนแปลง.`
+
+ตัวเครื่องสนทนาเป็นวิธีที่รวดเร็วและง่ายในการสนทนากับข้อมูลในดัชนีของคุณ.
+
+```typescript
+const retriever = index.asRetriever();
+const chatEngine = new ContextChatEngine({ retriever });
+
+// เริ่มการสนทนา
+const response = await chatEngine.chat(query);
+```
+
+## การอ้างอิง Api
+
+- [ตัวเครื่องสนทนาแบบ ContextChatEngine](../../api/classes/ContextChatEngine.md)
+- [ตัวเครื่องสนทนาแบบ CondenseQuestionChatEngine](../../api/classes/ContextChatEngine.md)
+
+"
diff --git a/apps/docs/i18n/th/docusaurus-plugin-content-docs/current/modules/high_level/data_index.md b/apps/docs/i18n/th/docusaurus-plugin-content-docs/current/modules/high_level/data_index.md
new file mode 100644
index 0000000000000000000000000000000000000000..75db747c78e7b026ad42d1703db7fc616d5c0dcc
--- /dev/null
+++ b/apps/docs/i18n/th/docusaurus-plugin-content-docs/current/modules/high_level/data_index.md
@@ -0,0 +1,27 @@
+---
+sidebar_position: 2
+---
+
+# ดัชนี (Index)
+
+`เอกสารนี้ได้รับการแปลโดยอัตโนมัติและอาจมีข้อผิดพลาด อย่าลังเลที่จะเปิด Pull Request เพื่อแนะนำการเปลี่ยนแปลง.`
+
+ดัชนีเป็นคอนเทนเนอร์และการจัดระเบียบพื้นฐานสำหรับข้อมูลของคุณ LlamaIndex.TS สนับสนุนดัชนีสองประเภท:
+
+- `VectorStoreIndex` - จะส่ง `Node` ที่ดีที่สุด k ไปยัง LLM เมื่อสร้างการตอบกลับ ค่าเริ่มต้นของ k คือ 2
+- `SummaryIndex` - จะส่งทุก `Node` ในดัชนีไปยัง LLM เพื่อสร้างการตอบกลับ
+
+```typescript
+import { Document, VectorStoreIndex } from "llamaindex";
+
+const document = new Document({ text: "ทดสอบ" });
+
+const index = await VectorStoreIndex.fromDocuments([document]);
+```
+
+## การอ้างอิง API
+
+- [SummaryIndex](../../api/classes/SummaryIndex.md)
+- [VectorStoreIndex](../../api/classes/VectorStoreIndex.md)
+
+"
diff --git a/apps/docs/i18n/th/docusaurus-plugin-content-docs/current/modules/high_level/data_loader.md b/apps/docs/i18n/th/docusaurus-plugin-content-docs/current/modules/high_level/data_loader.md
new file mode 100644
index 0000000000000000000000000000000000000000..bf1323b36bc1413c389c759897365a9d42e0943c
--- /dev/null
+++ b/apps/docs/i18n/th/docusaurus-plugin-content-docs/current/modules/high_level/data_loader.md
@@ -0,0 +1,21 @@
+---
+sidebar_position: 1
+---
+
+# โมดูลอ่าน / โหลด
+
+`เอกสารนี้ได้รับการแปลโดยอัตโนมัติและอาจมีข้อผิดพลาด อย่าลังเลที่จะเปิด Pull Request เพื่อแนะนำการเปลี่ยนแปลง.`
+
+LlamaIndex.TS สนับสนุนการโหลดไฟล์จากโฟลเดอร์อย่างง่ายด้วยคลาส `SimpleDirectoryReader` ในปัจจุบันรองรับไฟล์ประเภท `.txt`, `.pdf`, `.csv`, `.md` และ `.docx` และยังมีแผนที่จะรองรับไฟล์อื่นๆ ในอนาคต!
+
+```typescript
+import { SimpleDirectoryReader } from "llamaindex";
+
+documents = new SimpleDirectoryReader().loadData("./data");
+```
+
+## การอ้างอิง API
+
+- [SimpleDirectoryReader](../../api/classes/SimpleDirectoryReader.md)
+
+"
diff --git a/apps/docs/i18n/th/docusaurus-plugin-content-docs/current/modules/high_level/documents_and_nodes.md b/apps/docs/i18n/th/docusaurus-plugin-content-docs/current/modules/high_level/documents_and_nodes.md
new file mode 100644
index 0000000000000000000000000000000000000000..d6a702cbeb1a700124da52e59d65aa27f623d9d4
--- /dev/null
+++ b/apps/docs/i18n/th/docusaurus-plugin-content-docs/current/modules/high_level/documents_and_nodes.md
@@ -0,0 +1,22 @@
+---
+sidebar_position: 0
+---
+
+# เอกสารและโหนด
+
+`เอกสารนี้ได้รับการแปลโดยอัตโนมัติและอาจมีข้อผิดพลาด อย่าลังเลที่จะเปิด Pull Request เพื่อแนะนำการเปลี่ยนแปลง.`
+
+`เอกสาร (Document)` และ `โหนด (Node)` เป็นองค์ประกอบพื้นฐานของดัชนีใด ๆ ในการเข้าถึง API สำหรับออบเจ็กต์เหล่านี้คล้ายกัน ออบเจ็กต์ `เอกสาร (Document)` แทนไฟล์ทั้งหมดในขณะที่ `โหนด (Node)` เป็นส่วนย่อยของเอกสารต้นฉบับนั้น ที่เหมาะสำหรับการใช้ใน LLM และ Q&A
+
+```typescript
+import { Document } from "llamaindex";
+
+document = new Document({ text: "ข้อความ", metadata: { key: "val" } });
+```
+
+## การอ้างอิง API
+
+- [เอกสาร (Document)](../../api/classes/Document.md)
+- [โหนดข้อความ (TextNode)](../../api/classes/TextNode.md)
+
+"
diff --git a/apps/docs/i18n/th/docusaurus-plugin-content-docs/current/modules/high_level/query_engine.md b/apps/docs/i18n/th/docusaurus-plugin-content-docs/current/modules/high_level/query_engine.md
new file mode 100644
index 0000000000000000000000000000000000000000..adfc59160ba4d4ca9bab822a83731a7135256370
--- /dev/null
+++ b/apps/docs/i18n/th/docusaurus-plugin-content-docs/current/modules/high_level/query_engine.md
@@ -0,0 +1,44 @@
+---
+sidebar_position: 3
+---
+
+# QueryEngine (เครื่องมือสอบถาม)
+
+`เอกสารนี้ได้รับการแปลโดยอัตโนมัติและอาจมีข้อผิดพลาด อย่าลังเลที่จะเปิด Pull Request เพื่อแนะนำการเปลี่ยนแปลง.`
+
+เครื่องมือสอบถาม (QueryEngine) คือการห่อหุ้ม `Retriever` และ `ResponseSynthesizer` เข้าด้วยกันเป็นท่อ (pipeline) ซึ่งจะใช้สตริงคำค้นหาเพื่อเรียกข้อมูลโหนดแล้วส่งไปยัง LLM เพื่อสร้างคำตอบ
+
+```typescript
+const queryEngine = index.asQueryEngine();
+const response = await queryEngine.query("สตริงคำค้นหา");
+```
+
+## ตัวเครื่องสอบถามคำถามย่อย (Sub Question Query Engine)
+
+แนวคิดพื้นฐานของตัวเครื่องสอบถามคำถามย่อย (Sub Question Query Engine) คือการแบ่งคำถามเดียวเป็นหลายคำถาม แล้วรับคำตอบสำหรับแต่ละคำถามเหล่านั้น แล้วรวมคำตอบที่แตกต่างกันเป็นคำตอบเดียวสำหรับผู้ใช้ คุณสามารถคิดเกี่ยวกับมันเป็นเทคนิค "คิดให้ดีขึ้นขั้นตอนละขั้น" แต่วนซ้ำข้อมูลต้นทางของคุณ!
+
+### เริ่มต้นใช้งาน
+
+วิธีที่ง่ายที่สุดในการเริ่มลองใช้งานเครื่องมือสอบถามคำถามย่อย (Sub Question Query Engine) คือการเรียกใช้ไฟล์ subquestion.ts ในโฟลเดอร์ [examples](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/subquestion.ts).
+
+```bash
+npx ts-node subquestion.ts
+```
+
+"
+
+### เครื่องมือ
+
+ตัวเครื่องสอบถามคำถามย่อย (SubQuestionQueryEngine) ถูกนำมาใช้งานด้วยเครื่องมือ (Tools) แนวคิดพื้นฐานของเครื่องมือ (Tools) คือเครื่องมือที่สามารถใช้งานได้สำหรับโมเดลภาษาขนาดใหญ่ ในกรณีนี้ SubQuestionQueryEngine ของเราพึ่ง QueryEngineTool ซึ่งเป็นเครื่องมือในการเรียกใช้คำถามบน QueryEngine นี้ สิ่งนี้ช่วยให้เราสามารถให้โมเดลมีตัวเลือกในการสอบถามเอกสารต่าง ๆ สำหรับคำถามต่าง ๆ ตัวอย่างเช่น คุณยังสามารถจินตนาการได้ว่า SubQuestionQueryEngine อาจใช้เครื่องมือที่ค้นหาสิ่งใดบนเว็บหรือรับคำตอบโดยใช้ Wolfram Alpha
+
+คุณสามารถเรียนรู้เพิ่มเติมเกี่ยวกับเครื่องมือได้โดยดูที่เอกสาร LlamaIndex Python ที่ https://gpt-index.readthedocs.io/en/latest/core_modules/agent_modules/tools/root.html
+
+"
+
+## อ้างอิง API
+
+- [RetrieverQueryEngine (เครื่องมือสอบถาม Retriever)](../../api/classes/RetrieverQueryEngine.md)
+- [SubQuestionQueryEngine (เครื่องมือสอบถาม SubQuestion)](../../api/classes/SubQuestionQueryEngine.md)
+- [QueryEngineTool (เครื่องมือสอบถาม QueryEngine)](../../api/interfaces/QueryEngineTool.md)
+
+"
diff --git a/apps/docs/i18n/th/docusaurus-plugin-content-docs/current/modules/index.md b/apps/docs/i18n/th/docusaurus-plugin-content-docs/current/modules/index.md
new file mode 100644
index 0000000000000000000000000000000000000000..ea7bd78611dbcbd290565abffeaed217fc170245
--- /dev/null
+++ b/apps/docs/i18n/th/docusaurus-plugin-content-docs/current/modules/index.md
@@ -0,0 +1,35 @@
+# โมดูลหลัก
+
+`เอกสารนี้ได้รับการแปลโดยอัตโนมัติและอาจมีข้อผิดพลาด อย่าลังเลที่จะเปิด Pull Request เพื่อแนะนำการเปลี่ยนแปลง.`
+
+LlamaIndex.TS มีโมดูลหลักหลายระดับ แยกเป็นโมดูลระดับสูงสำหรับเริ่มต้นใช้งานได้อย่างรวดเร็ว และโมดูลระดับต่ำสำหรับปรับแต่งส่วนประกอบหลักตามความต้องการของคุณ
+
+## โมดูลระดับสูง
+
+- [**เอกสาร**](./high_level/documents_and_nodes.md): เอกสารแทนไฟล์ข้อความ ไฟล์ PDF หรือข้อมูลต่อเนื่องอื่น ๆ
+
+- [**โหนด**](./high_level/documents_and_nodes.md): ส่วนประกอบข้อมูลพื้นฐาน โดยทั่วไปแล้วเป็นส่วนของเอกสารที่แบ่งออกเป็นส่วนย่อยที่สามารถจัดการได้และเล็กพอที่จะสามารถนำเข้าโมเดลฝังตัวและ LLM
+
+- [**อ่าน/โหลด**](./high_level/data_loader.md): อ่านหรือโหลดเป็นสิ่งที่รับเอกสารในโลกจริงและแปลงเป็นคลาสเอกสารที่สามารถใช้ในดัชนีและคิวรีของคุณได้ เราสนับสนุนไฟล์ข้อความธรรมดาและไฟล์ PDF และอีกมากมาย
+
+- [**ดัชนี**](./high_level/data_index.md): ดัชนีเก็บโหนดและการฝังตัวของโหนดเหล่านั้น
+
+- [**เครื่องมือค้นหา**](./high_level/query_engine.md): เครื่องมือค้นหาคือสิ่งที่สร้างคำค้นที่คุณใส่เข้าไปและให้ผลลัพธ์กลับมาให้คุณ เครื่องมือค้นหาทั่วไปจะรวมคำพูดที่สร้างไว้ล่วงหน้ากับโหนดที่เลือกจากดัชนีของคุณเพื่อให้ LLM มีบริบทที่จำเป็นในการตอบคำถามของคุณ
+
+- [**เครื่องมือแชท**](./high_level/chat_engine.md): เครื่องมือแชทช่วยให้คุณสร้างแชทบอทที่จะปฏิสัมพันธ์กับดัชนีของคุณ
+
+## โมดูลระดับต่ำ
+
+- [**LLM**](./low_level/llm.md): คลาส LLM เป็นอินเตอร์เฟซที่รวมกันของผู้ให้บริการโมเดลภาษาขนาดใหญ่ เช่น OpenAI GPT-4, Anthropic Claude หรือ Meta LLaMA คุณสามารถสร้างคลาสย่อยขึ้นมาเพื่อเขียนตัวเชื่อมต่อกับโมเดลภาษาขนาดใหญ่ของคุณเอง
+
+- [**Embedding**](./low_level/embedding.md): การฝังข้อมูลแสดงในรูปแบบเวกเตอร์ที่ประกอบด้วยตัวเลขทศนิยม โมเดลฝังข้อความ text-embedding-ada-002 ของ OpenAI เป็นโมเดลฝังข้อมูลเริ่มต้นของเราและแต่ละโมเดลฝังข้อมูลจะประกอบด้วยตัวเลขทศนิยม 1,536 ตัวเลขทศนิยม โมเดลฝังข้อมูลยอดนิยมอีกตัวอย่างคือ BERT ซึ่งใช้ตัวเลขทศนิยม 768 ตัวเลขทศนิยมในการแสดงแต่ละโหนด โดยเรามีเครื่องมือหลายรายการที่ใช้ในการทำงานกับการฝังข้อมูลรวมถึงตัวเลือกการคำนวณความคล้ายคลึง 3 ตัวเลือกและ Maximum Marginal Relevance
+
+- [**TextSplitter/NodeParser**](./low_level/node_parser.md): กลยุทธ์ในการแบ่งข้อความเป็นส่วนย่อยๆ เป็นสิ่งสำคัญอย่างมากต่อประสิทธิภาพของการค้นหาข้อมูลที่ฝังอยู่ ในปัจจุบันเรามีค่าเริ่มต้นในการแบ่งข้อความ แต่ไม่มีวิธีที่เหมาะกับทุกกรณี ขึ้นอยู่กับเอกสารต้นฉบับคุณอาจต้องการใช้ขนาดและกลยุทธ์ในการแบ่งที่แตกต่างกัน ในปัจจุบันเราสนับสนุนการแบ่งตามขนาดคงที่ การแบ่งตามขนาดคงที่พร้อมกับส่วนที่ซ้อนทับ การแบ่งตามประโยค และการแบ่งตามย่อหน้า ตัวแบ่งข้อความถูกใช้โดย NodeParser เมื่อแบ่ง `Document` เป็น `Node`
+
+- [**Retriever**](./low_level/retriever.md): Retriever เป็นส่วนที่เลือก Node ที่จะเรียกคืนจากดัชนี ที่นี่คุณอาจต้องการลองเรียกคืน Node มากหรือน้อยกว่านี้ การเปลี่ยนฟังก์ชันความคล้ายคลึงของคุณ หรือสร้าง Retriever เองสำหรับแต่ละกรณีการใช้งานในแอปพลิเคชันของคุณ เช่น คุณอาจต้องการ Retriever แยกสำหรับเนื้อหาของโค้ดกับเนื้อหาข้อความ
+
+- [**ResponseSynthesizer**](./low_level/response_synthesizer.md): ResponseSynthesizer รับผิดชอบในการรับสตริงคำถาม และใช้รายการ `Node` เพื่อสร้างคำตอบ สามารถทำได้หลายรูปแบบ เช่น การวนซ้ำผ่านทั้งหมดของเนื้อหาและปรับปรุงคำตอบ หรือสร้างต้นไม้ของสรุปและส่งคืนสรุปรากฐาน
+
+- [**Storage**](./low_level/storage.md): ในบางจุดคุณอาจต้องการเก็บดัชนีของคุณ ข้อมูลและเวกเตอร์เพื่อไม่ต้องรันโมเดลฝังข้อมูลทุกครั้ง IndexStore, DocStore, VectorStore และ KVStore เป็นการแยกแยะที่ช่วยให้คุณทำได้ รวมกันเป็น StorageContext ในปัจจุบันเราอนุญาตให้คุณเก็บฝังข้อมูลของคุณในไฟล์บนระบบไฟล์ (หรือระบบไฟล์เสมือนในหน่วยความจำ) แต่เรากำลังเพิ่มการรวมระบบฐานข้อมูลเวกเตอร์อย่างต่อเนื่อง
+
+"
diff --git a/apps/docs/i18n/th/docusaurus-plugin-content-docs/current/modules/low_level/embedding.md b/apps/docs/i18n/th/docusaurus-plugin-content-docs/current/modules/low_level/embedding.md
new file mode 100644
index 0000000000000000000000000000000000000000..88f8da7656dcaefe4948c168c8f50f1e50f0525c
--- /dev/null
+++ b/apps/docs/i18n/th/docusaurus-plugin-content-docs/current/modules/low_level/embedding.md
@@ -0,0 +1,26 @@
+---
+sidebar_position: 1
+---
+
+# การฝัง (Embedding)
+
+`เอกสารนี้ได้รับการแปลโดยอัตโนมัติและอาจมีข้อผิดพลาด อย่าลังเลที่จะเปิด Pull Request เพื่อแนะนำการเปลี่ยนแปลง.`
+
+โมเดลการฝังใน LlamaIndex รับผิดชอบในการสร้างการแสดงตัวเลขของข้อความ โดยค่าเริ่มต้น LlamaIndex จะใช้โมเดล `text-embedding-ada-002` จาก OpenAI
+
+สามารถตั้งค่าได้โดยชัดเจนในอ็อบเจ็กต์ `ServiceContext`
+
+```typescript
+import { OpenAIEmbedding, serviceContextFromDefaults } from "llamaindex";
+
+const openaiEmbeds = new OpenAIEmbedding();
+
+const serviceContext = serviceContextFromDefaults({ embedModel: openaiEmbeds });
+```
+
+## การอ้างอิง API
+
+- [OpenAIEmbedding](../../api/classes/OpenAIEmbedding.md)
+- [ServiceContext](../../api/interfaces/ServiceContext.md)
+
+"
diff --git a/apps/docs/i18n/th/docusaurus-plugin-content-docs/current/modules/low_level/llm.md b/apps/docs/i18n/th/docusaurus-plugin-content-docs/current/modules/low_level/llm.md
new file mode 100644
index 0000000000000000000000000000000000000000..24edd19842682e0326ad41401f456155554fc9e1
--- /dev/null
+++ b/apps/docs/i18n/th/docusaurus-plugin-content-docs/current/modules/low_level/llm.md
@@ -0,0 +1,26 @@
+---
+sidebar_position: 0
+---
+
+# LLM (ภาษาธรรมชาติและการตอบสนอง)
+
+`เอกสารนี้ได้รับการแปลโดยอัตโนมัติและอาจมีข้อผิดพลาด อย่าลังเลที่จะเปิด Pull Request เพื่อแนะนำการเปลี่ยนแปลง.`
+
+LLM รับผิดชอบในการอ่านข้อความและสร้างการตอบสนองทางภาษาธรรมชาติสำหรับคำถามต่างๆ โดยค่าเริ่มต้น LlamaIndex.TS ใช้ `gpt-3.5-turbo`.
+
+LLM สามารถตั้งค่าได้โดยชัดเจนในอ็อบเจกต์ `ServiceContext`.
+
+```typescript
+import { OpenAI, serviceContextFromDefaults } from "llamaindex";
+
+const openaiLLM = new OpenAI({ model: "gpt-3.5-turbo", temperature: 0 });
+
+const serviceContext = serviceContextFromDefaults({ llm: openaiLLM });
+```
+
+## การอ้างอิง API
+
+- [OpenAI](../../api/classes/OpenAI.md)
+- [ServiceContext](../../api/interfaces/ServiceContext.md)
+
+"
diff --git a/apps/docs/i18n/th/docusaurus-plugin-content-docs/current/modules/low_level/node_parser.md b/apps/docs/i18n/th/docusaurus-plugin-content-docs/current/modules/low_level/node_parser.md
new file mode 100644
index 0000000000000000000000000000000000000000..3b4994bfcfad965940e06dea89e1e5b29b7ae547
--- /dev/null
+++ b/apps/docs/i18n/th/docusaurus-plugin-content-docs/current/modules/low_level/node_parser.md
@@ -0,0 +1,39 @@
+---
+sidebar_position: 3
+---
+
+# NodeParser (ตัวแยกโหนด)
+
+`เอกสารนี้ได้รับการแปลโดยอัตโนมัติและอาจมีข้อผิดพลาด อย่าลังเลที่จะเปิด Pull Request เพื่อแนะนำการเปลี่ยนแปลง.`
+
+`NodeParser` ใน LlamaIndex รับผิดชอบในการแบ่ง `Document` เป็น `Node` ที่จัดการได้ง่ายมากขึ้น เมื่อคุณเรียกใช้ `.fromDocuments()` `NodeParser` จาก `ServiceContext` จะถูกใช้งานเพื่อทำให้งานนี้เป็นอัตโนมัติสำหรับคุณ หรือถ้าคุณต้องการคุณสามารถใช้งานเพื่อแบ่งเอกสารล่วงหน้าได้
+
+```typescript
+import { Document, SimpleNodeParser } from "llamaindex";
+
+const nodeParser = new SimpleNodeParser();
+const nodes = nodeParser.getNodesFromDocuments([
+  new Document({ text: "ฉันอายุ 10 ปี จอห์นอายุ 20 ปี" }),
+]);
+```
+
+## TextSplitter (ตัวแยกข้อความ)
+
+ตัวแยกข้อความในฐานะพื้นฐานจะแยกข้อความตามประโยค สามารถใช้เป็นโมดูลแยกข้อความเปล่าๆ ได้เช่นกัน
+
+```typescript
+import { SentenceSplitter } from "llamaindex";
+
+const splitter = new SentenceSplitter({ chunkSize: 1 });
+
+const textSplits = splitter.splitText("สวัสดีชาวโลก");
+```
+
+"
+
+## API Reference (การอ้างอิง API)
+
+- [SimpleNodeParser (ตัวแยกโหนดแบบง่าย)](../../api/classes/SimpleNodeParser.md)
+- [SentenceSplitter (ตัวแยกประโยค)](../../api/classes/SentenceSplitter.md)
+
+"
diff --git a/apps/docs/i18n/th/docusaurus-plugin-content-docs/current/modules/low_level/response_synthesizer.md b/apps/docs/i18n/th/docusaurus-plugin-content-docs/current/modules/low_level/response_synthesizer.md
new file mode 100644
index 0000000000000000000000000000000000000000..a0882aa44fe6da4c8b1d6db8afcaba84ca08b85b
--- /dev/null
+++ b/apps/docs/i18n/th/docusaurus-plugin-content-docs/current/modules/low_level/response_synthesizer.md
@@ -0,0 +1,48 @@
+---
+sidebar_position: 6
+---
+
+# ResponseSynthesizer (ตัวสังเคราะห์การตอบกลับ)
+
+`เอกสารนี้ได้รับการแปลโดยอัตโนมัติและอาจมีข้อผิดพลาด อย่าลังเลที่จะเปิด Pull Request เพื่อแนะนำการเปลี่ยนแปลง.`
+
+ResponseSynthesizer มีหน้าที่ส่งคำถาม, โหนด และแม่แบบข้อความให้กับ LLM เพื่อสร้างคำตอบ มีโหมดหลักๆ สำหรับการสร้างคำตอบดังนี้:
+
+- `Refine` (ปรับปรุง): "สร้างและปรับปรุง" คำตอบโดยการไปทีละชิ้นข้อความที่ได้รับ
+  สร้างการเรียก LLM แยกตามโหนด ใช้สำหรับคำตอบที่ละเอียดมากขึ้น
+- `CompactAndRefine` (คอมแพ็คและปรับปรุง) (ค่าเริ่มต้น): "คอมแพ็ค" แม่แบบระหว่างการเรียก LLM โดยการเติมข้อความที่จะพอดีกับขนาดของแม่แบบสูงสุด หากมีข้อความมากเกินไปที่จะเติมในแม่แบบเดียว ให้ "สร้างและปรับปรุง" คำตอบโดยไปทีละแม่แบบ คล้ายกับ `refine` แต่ควรจะทำให้มีการเรียก LLM น้อยลง
+- `TreeSummarize` (สรุปต้นไม้): โดยให้ชุดข้อความและคำถาม สร้างต้นไม้และส่งโหนดรากเป็นคำตอบ ใช้สำหรับการสรุป
+- `SimpleResponseBuilder` (สร้างคำตอบแบบง่าย): โดยให้ชุดข้อความและคำถาม นำคำถามไปใช้กับแต่ละข้อความ และสะสมคำตอบในอาร์เรย์ ส่งคืนสตริงที่ต่อกันของคำตอบทั้งหมด ใช้เมื่อต้องการเรียกคำถามเดียวกันต่อแต่ละข้อความ
+
+```typescript
+import { NodeWithScore, ResponseSynthesizer, TextNode } from "llamaindex";
+
+const responseSynthesizer = new ResponseSynthesizer();
+
+const nodesWithScore: NodeWithScore[] = [
+  {
+    node: new TextNode({ text: "ฉันอายุ 10 ปี" }),
+    score: 1,
+  },
+  {
+    node: new TextNode({ text: "จอห์นอายุ 20 ปี" }),
+    score: 0.5,
+  },
+];
+
+const response = await responseSynthesizer.synthesize(
+  "ฉันอายุเท่าไร?",
+  nodesWithScore,
+);
+console.log(response.response);
+```
+
+## อ้างอิง API
+
+- [ResponseSynthesizer](../../api/classes/ResponseSynthesizer.md)
+- [Refine](../../api/classes/Refine.md)
+- [CompactAndRefine](../../api/classes/CompactAndRefine.md)
+- [TreeSummarize](../../api/classes/TreeSummarize.md)
+- [SimpleResponseBuilder](../../api/classes/SimpleResponseBuilder.md)
+
+"
diff --git a/apps/docs/i18n/th/docusaurus-plugin-content-docs/current/modules/low_level/retriever.md b/apps/docs/i18n/th/docusaurus-plugin-content-docs/current/modules/low_level/retriever.md
new file mode 100644
index 0000000000000000000000000000000000000000..0ef44f8ec292bb36e5761ce9817853ebe607ff70
--- /dev/null
+++ b/apps/docs/i18n/th/docusaurus-plugin-content-docs/current/modules/low_level/retriever.md
@@ -0,0 +1,23 @@
+---
+sidebar_position: 5
+---
+
+# Retriever (ตัวเรียกคืน)
+
+`เอกสารนี้ได้รับการแปลโดยอัตโนมัติและอาจมีข้อผิดพลาด อย่าลังเลที่จะเปิด Pull Request เพื่อแนะนำการเปลี่ยนแปลง.`
+
+Retriever ใน LlamaIndex คือสิ่งที่ใช้ในการเรียกคืน `Node` จากดัชนีโดยใช้ query string ซึ่ง `VectorIndexRetriever` จะเรียกคืนโหนดที่คล้ายกันที่สุด top-k ในขณะที่ `SummaryIndexRetriever` จะเรียกคืนโหนดทั้งหมดไม่ว่าจะเป็น query อะไร
+
+```typescript
+const retriever = vector_index.asRetriever();
+retriever.similarityTopK = 3;
+
+// เรียกคืนโหนด!
+const nodesWithScore = await retriever.retrieve("query string");
+```
+
+## API Reference (การอ้างอิง API)
+
+- [SummaryIndexRetriever](../../api/classes/SummaryIndexRetriever.md)
+- [SummaryIndexLLMRetriever](../../api/classes/SummaryIndexLLMRetriever.md)
+- [VectorIndexRetriever](../../api/classes/VectorIndexRetriever.md)
diff --git a/apps/docs/i18n/th/docusaurus-plugin-content-docs/current/modules/low_level/storage.md b/apps/docs/i18n/th/docusaurus-plugin-content-docs/current/modules/low_level/storage.md
new file mode 100644
index 0000000000000000000000000000000000000000..712ab9aff1be9862f98af864da981e37f30a07bb
--- /dev/null
+++ b/apps/docs/i18n/th/docusaurus-plugin-content-docs/current/modules/low_level/storage.md
@@ -0,0 +1,28 @@
+---
+sidebar_position: 7
+---
+
+# การจัดเก็บข้อมูล (Storage)
+
+`เอกสารนี้ได้รับการแปลโดยอัตโนมัติและอาจมีข้อผิดพลาด อย่าลังเลที่จะเปิด Pull Request เพื่อแนะนำการเปลี่ยนแปลง.`
+
+การจัดเก็บข้อมูลใน LlamaIndex.TS ทำงานอัตโนมัติเมื่อคุณกำหนดค่า `StorageContext` object แล้ว แค่กำหนดค่า `persistDir` และเชื่อมต่อกับดัชนี
+
+ในขณะนี้เราสนับสนุนการบันทึกและโหลดข้อมูลจากดิสก์เท่านั้น แต่ยังมีการรวมระบบอื่นๆ ในอนาคต!
+
+```typescript
+import { Document, VectorStoreIndex, storageContextFromDefaults } from "./src";
+
+const storageContext = await storageContextFromDefaults({
+  persistDir: "./storage",
+});
+
+const document = new Document({ text: "Test Text" });
+const index = await VectorStoreIndex.fromDocuments([document], {
+  storageContext,
+});
+```
+
+## การอ้างอิง API (API Reference)
+
+- [StorageContext](../../api/interfaces/StorageContext.md)
diff --git a/apps/docs/i18n/th/docusaurus-plugin-content-docs/current/starter.md b/apps/docs/i18n/th/docusaurus-plugin-content-docs/current/starter.md
new file mode 100644
index 0000000000000000000000000000000000000000..036f89452f46a5bae20933594884955281515f8a
--- /dev/null
+++ b/apps/docs/i18n/th/docusaurus-plugin-content-docs/current/starter.md
@@ -0,0 +1,60 @@
+---
+sidebar_position: 2
+---
+
+# คู่มือเริ่มต้น
+
+`เอกสารนี้ได้รับการแปลโดยอัตโนมัติและอาจมีข้อผิดพลาด อย่าลังเลที่จะเปิด Pull Request เพื่อแนะนำการเปลี่ยนแปลง.`
+
+เมื่อคุณ[ติดตั้ง LlamaIndex.TS โดยใช้ NPM](installation)และตั้งค่าคีย์ OpenAI ของคุณเสร็จสิ้น คุณพร้อมที่จะเริ่มต้นแอปพลิเคชันครั้งแรกของคุณแล้ว:
+
+ในโฟลเดอร์ใหม่:
+
+```bash npm2yarn
+npm install typescript
+npm install @types/node
+npx tsc --init # หากจำเป็น
+```
+
+สร้างไฟล์ `example.ts` โค้ดนี้จะโหลดข้อมูลตัวอย่างบางส่วน สร้างเอกสาร ดัชนี (ซึ่งสร้างเอมเบดด้วย OpenAI) และจากนั้นสร้างเครื่องมือค้นหาเพื่อตอบคำถามเกี่ยวกับข้อมูล
+
+```ts
+// example.ts
+import fs from "fs/promises";
+import { Document, VectorStoreIndex } from "llamaindex";
+
+async function main() {
+  // โหลดเอสเซย์จาก abramov.txt ใน Node
+  const essay = await fs.readFile(
+    "node_modules/llamaindex/examples/abramov.txt",
+    "utf-8",
+  );
+
+  // สร้างออบเจ็กต์เอกสารด้วยเอสเซย์
+  const document = new Document({ text: essay });
+
+  // แยกข้อความและสร้างเอมเบด จัดเก็บใน VectorStoreIndex
+  const index = await VectorStoreIndex.fromDocuments([document]);
+
+  // ค้นหาดัชนี
+  const queryEngine = index.asQueryEngine();
+  const response = await queryEngine.query(
+    "ผู้เขียนทำอะไรในช่วงเวลาที่เขาเรียนมหาวิทยาลัย?",
+  );
+
+  // แสดงผลลัพธ์
+  console.log(response.toString());
+}
+
+main();
+```
+
+จากนั้นคุณสามารถเรียกใช้ได้โดยใช้
+
+```bash
+npx ts-node example.ts
+```
+
+พร้อมที่จะเรียนรู้เพิ่มเติมหรือไม่? ดู NextJS playground ของเราได้ที่ https://llama-playground.vercel.app/ แหล่งที่มาสามารถดูได้ที่ https://github.com/run-llama/ts-playground
+
+"
diff --git a/apps/docs/i18n/tr/docusaurus-plugin-content-docs/current/_static/concepts/indexing.jpg b/apps/docs/i18n/tr/docusaurus-plugin-content-docs/current/_static/concepts/indexing.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..8672967213caf28fb27682b6b9e2e5111beb9aaa
Binary files /dev/null and b/apps/docs/i18n/tr/docusaurus-plugin-content-docs/current/_static/concepts/indexing.jpg differ
diff --git a/apps/docs/i18n/tr/docusaurus-plugin-content-docs/current/_static/concepts/querying.jpg b/apps/docs/i18n/tr/docusaurus-plugin-content-docs/current/_static/concepts/querying.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..3c241bdda5ba98d3f3ee2e163b9f19280c5c7503
Binary files /dev/null and b/apps/docs/i18n/tr/docusaurus-plugin-content-docs/current/_static/concepts/querying.jpg differ
diff --git a/apps/docs/i18n/tr/docusaurus-plugin-content-docs/current/_static/concepts/rag.jpg b/apps/docs/i18n/tr/docusaurus-plugin-content-docs/current/_static/concepts/rag.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..b68eca2564f8535b33d9ed6d4ce4d4410d2a0698
Binary files /dev/null and b/apps/docs/i18n/tr/docusaurus-plugin-content-docs/current/_static/concepts/rag.jpg differ
diff --git a/apps/docs/i18n/tr/docusaurus-plugin-content-docs/current/api b/apps/docs/i18n/tr/docusaurus-plugin-content-docs/current/api
new file mode 120000
index 0000000000000000000000000000000000000000..3513e32979f4abb8b2df2b54c8d0cc480bdd847e
--- /dev/null
+++ b/apps/docs/i18n/tr/docusaurus-plugin-content-docs/current/api
@@ -0,0 +1 @@
+../../../../docs/api
\ No newline at end of file
diff --git a/apps/docs/i18n/tr/docusaurus-plugin-content-docs/current/concepts.md b/apps/docs/i18n/tr/docusaurus-plugin-content-docs/current/concepts.md
new file mode 100644
index 0000000000000000000000000000000000000000..953217330390c50e31a48a2ff2fa2f5057c1c084
--- /dev/null
+++ b/apps/docs/i18n/tr/docusaurus-plugin-content-docs/current/concepts.md
@@ -0,0 +1,85 @@
+---
+sidebar_position: 3
+---
+
+# Yüksek Düzeyli Kavramlar
+
+`Bu belge otomatik olarak çevrilmiştir ve hatalar içerebilir. Değişiklik önermek için bir Pull Request açmaktan çekinmeyin.`
+
+LlamaIndex.TS, özel veriler üzerinde LLM destekli uygulamalar (örneğin, soru-cevap, sohbet botu) oluşturmanıza yardımcı olur.
+
+Bu yüksek düzeyli kavramlar rehberinde aşağıdakileri öğreneceksiniz:
+
+- LLM'in kendi verilerinizi kullanarak soruları nasıl yanıtlayabileceği.
+- Kendi sorgu boru hattınızı oluşturmak için LlamaIndex.TS'deki temel kavramlar ve modüller.
+
+## Verileriniz Üzerinde Soruları Yanıtlama
+
+LlamaIndex, verilerinizle bir LLM kullanırken iki aşamalı bir yöntem kullanır:
+
+1. **indeksleme aşaması**: bir bilgi tabanını hazırlama ve
+2. **sorgulama aşaması**: bir soruya yanıt vermek için LLM'e yardımcı olacak ilgili bağlamı bilgiden almak
+
+![](./_static/concepts/rag.jpg)
+
+Bu süreç aynı zamanda Retrieval Augmented Generation (RAG) olarak da bilinir.
+
+LlamaIndex.TS, her iki adımı da son derece kolay hale getiren temel araç setini sağlar.
+
+Her aşamayı detaylı olarak inceleyelim.
+
+### İndeksleme Aşaması
+
+LlamaIndex.TS, veri bağlantı noktaları ve indeksler için bir dizi veri bağlayıcı ve indeksleme aracıyla bilgi tabanını hazırlamanıza yardımcı olur.
+
+![](./_static/concepts/indexing.jpg)
+
+[**Veri Yükleyicileri**](./modules/high_level/data_loader.md):
+Bir veri bağlayıcısı (örneğin, `Reader`), farklı veri kaynaklarından ve veri formatlarından basit bir `Document` temsiline (metin ve basit meta veri) veri alır.
+
+[**Belgeler / Düğümler**](./modules/high_level/documents_and_nodes.md): Bir `Document`, herhangi bir veri kaynağı etrafında genel bir konteynerdir - örneğin, bir PDF, bir API çıktısı veya bir veritabanından alınan veriler. Bir `Node`, LlamaIndex'deki verinin atomik bir birimidir ve bir kaynak `Document`'in bir "parçasını" temsil eder. Doğru ve açıklayıcı alım işlemlerini mümkün kılmak için meta verileri ve ilişkileri (diğer düğümlere) içeren zengin bir temsil içerir.
+
+[**Veri İndeksleri**](./modules/high_level/data_index.md):
+Verilerinizi içe aktardıktan sonra, LlamaIndex verilerinizi kolayca alınabilir bir formata dönüştürmenize yardımcı olur.
+
+LlamaIndex, arka planda ham belgeleri ara temsillere ayrıştırır, vektör gömlemelerini hesaplar ve verilerinizi bellekte veya diske depolar.
+
+"
+
+### Sorgulama Aşaması
+
+Sorgulama aşamasında, sorgu boru hattı, bir kullanıcı sorgusu verildiğinde en uygun bağlamı alır
+ve bunu LLM'ye (sorgu ile birlikte) bir yanıt sentezlemek için iletilir.
+
+Bu, LLM'in orijinal eğitim verilerinde olmayan güncel bilgilere sahip olmasını sağlar
+(ayrıca hayal ürünü azaltır).
+
+Sorgulama aşamasındaki temel zorluk, (potansiyel olarak birçok) bilgi tabanı üzerinde geri alma, düzenleme ve akıl yürütmedir.
+
+LlamaIndex, soru-cevap (sorgu motoru), sohbet botu (sohbet motoru) veya bir ajanın bir parçası olarak RAG boru hatları oluşturmanıza yardımcı olan birleştirilebilir modüller sağlar.
+
+Bu yapı blokları, sıralama tercihlerini yansıtmak ve yapılandırılmış bir şekilde birden fazla bilgi tabanı üzerinde akıl yürütmek için özelleştirilebilir.
+
+![](./_static/concepts/querying.jpg)
+
+#### Yapı Taşları
+
+[**Geri Alıcılar**](./modules/low_level/retriever.md):
+Bir geri alıcı, bir sorgu verildiğinde (yani indeks) bir bilgi tabanından ilgili bağlamı nasıl verimli bir şekilde alacağını tanımlar.
+Belirli geri alma mantığı, farklı indeksler için farklılık gösterir ve en popüler olanı vektör indeksi karşısında yoğun geri alma işlemidir.
+
+[**Yanıt Sentezleyiciler**](./modules/low_level/response_synthesizer.md):
+Bir yanıt sentezleyici, bir LLM'den bir yanıt üretir ve bunun için bir kullanıcı sorgusu ve alınan metin parçalarının belirli bir kümesi kullanılır.
+
+"
+
+#### Boru Hatları
+
+[**Sorgu Motorları**](./modules/high_level/query_engine.md):
+Bir sorgu motoru, verileriniz üzerinde soru sormak için uçtan uca bir boru hattıdır.
+Doğal dil sorgusu alır ve bir yanıt ile birlikte LLM'ye iletilen referans bağlamını döndürür.
+
+[**Sohbet Motorları**](./modules/high_level/chat_engine.md):
+Bir sohbet motoru, tek bir soru ve cevap yerine verilerinizle bir konuşma yapmak için uçtan uca bir boru hattıdır.
+
+"
diff --git a/apps/docs/i18n/tr/docusaurus-plugin-content-docs/current/end_to_end.md b/apps/docs/i18n/tr/docusaurus-plugin-content-docs/current/end_to_end.md
new file mode 100644
index 0000000000000000000000000000000000000000..5ce75cb1c3f6b77ee88f3c1892e4290277834066
--- /dev/null
+++ b/apps/docs/i18n/tr/docusaurus-plugin-content-docs/current/end_to_end.md
@@ -0,0 +1,61 @@
+---
+sidebar_position: 4
+---
+
+# Uçtan Uca Örnekler
+
+`Bu belge otomatik olarak çevrilmiştir ve hatalar içerebilir. Değişiklik önermek için bir Pull Request açmaktan çekinmeyin.`
+
+Depoda LlamaIndex.TS kullanarak birkaç uçtan uca örnek bulunmaktadır.
+
+Aşağıdaki örnekleri inceleyin veya onları deneyin ve kendi ihtiyaçlarınıza uyacak şekilde özelleştirilebilen interaktif Github Codespace öğreticileriyle dakikalar içinde tamamlayın. Dev-Docs tarafından sağlanan öğreticilere [buradan](https://codespaces.new/team-dev-docs/lits-dev-docs-playground?devcontainer_path=.devcontainer%2Fjavascript_ltsquickstart%2Fdevcontainer.json) erişebilirsiniz:
+
+## [Sohbet Motoru](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/chatEngine.ts)
+
+Bir dosyayı okuyun ve LLM ile ilgili sohbet edin.
+
+## [Vektör İndeksi](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/vectorIndex.ts)
+
+Bir vektör indeksi oluşturun ve sorgulayın. Vektör indeksi, en ilgili k en üst düğümü getirmek için gömme kullanacaktır. Varsayılan olarak, k değeri 2'dir.
+
+"
+
+## [Özet İndeks](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/summaryIndex.ts)
+
+Bir liste indeksi oluşturun ve sorgulayın. Bu örnek ayrıca yanıt üretirken kullanılacak en iyi düğümleri seçmek için `LLMRetriever`'ı da kullanır.
+
+"
+
+## [Bir İndeks Kaydet / Yükle](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/storageContext.ts)
+
+Bir vektör indeksi oluşturun ve yükleyin. LlamaIndex.TS'de depolama bağlamı nesnesi oluşturulduğunda diskte kalıcılık otomatik olarak gerçekleşir.
+
+"
+
+## [Özelleştirilmiş Vektör İndeksi](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/vectorIndexCustomize.ts)
+
+Bir vektör indeksi oluşturun ve sorgulayın, aynı zamanda `LLM`, `ServiceContext` ve `similarity_top_k`'yi yapılandırın.
+
+"
+
+## [OpenAI LLM](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/openai.ts)
+
+Bir OpenAI LLM oluşturun ve doğrudan sohbet için kullanın.
+
+"
+
+## [Llama2 DeuceLLM](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/llamadeuce.ts)
+
+Bir Llama-2 LLM oluşturun ve doğrudan sohbet için kullanın.
+
+## [SubQuestionQueryEngine](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/subquestion.ts)
+
+Karmaşık sorguları birden fazla alt soruya bölen ve ardından tüm alt soruların cevaplarına göre bir yanıt toplayan `SubQuestionQueryEngine` kullanır.
+
+"
+
+## [Düşük Seviye Modüller](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/lowlevel.ts)
+
+Bu örnek, gerçek bir sorgu motoruna ihtiyaç duymadan birkaç düşük seviye bileşen kullanır. Bu bileşenler herhangi bir uygulamada veya ihtiyaçlarınıza uyacak şekilde özelleştirilebilir ve alt sınıflandırılabilir.
+
+"
diff --git a/apps/docs/i18n/tr/docusaurus-plugin-content-docs/current/environments.md b/apps/docs/i18n/tr/docusaurus-plugin-content-docs/current/environments.md
new file mode 100644
index 0000000000000000000000000000000000000000..52d0f846fca73a752ecac144378d4648e3db7e37
--- /dev/null
+++ b/apps/docs/i18n/tr/docusaurus-plugin-content-docs/current/environments.md
@@ -0,0 +1,31 @@
+---
+sidebar_position: 5
+---
+
+# Ortamlar
+
+`Bu belge otomatik olarak çevrilmiştir ve hatalar içerebilir. Değişiklik önermek için bir Pull Request açmaktan çekinmeyin.`
+
+LlamaIndex şu anda resmi olarak NodeJS 18 ve NodeJS 20'yi desteklemektedir.
+
+## NextJS Uygulama Yönlendirici
+
+Eğer NextJS Uygulama Yönlendirici rota işleyicileri/sunucusuz fonksiyonlar kullanıyorsanız, NodeJS modunu kullanmanız gerekecektir:
+
+```js
+export const runtime = "nodejs"; // varsayılan
+```
+
+ve next.config.js dosyanıza pdf-parse için bir istisna eklemeniz gerekecektir:
+
+```js
+// next.config.js
+/** @type {import('next').NextConfig} */
+const nextConfig = {
+  experimental: {
+    serverComponentsExternalPackages: ["pdf-parse"], // pdf-parse'ı NextJS Uygulama Yönlendirici ile gerçek NodeJS modunda kullanır
+  },
+};
+
+module.exports = nextConfig;
+```
diff --git a/apps/docs/i18n/tr/docusaurus-plugin-content-docs/current/installation.mdx b/apps/docs/i18n/tr/docusaurus-plugin-content-docs/current/installation.mdx
new file mode 100644
index 0000000000000000000000000000000000000000..524c8ab9bde4f9f9e5de163cffe8a3897b715a9b
--- /dev/null
+++ b/apps/docs/i18n/tr/docusaurus-plugin-content-docs/current/installation.mdx
@@ -0,0 +1,68 @@
+---
+sidebar_position: 1
+---
+
+
+# Kurulum ve Ayarlama
+
+```Bu belge otomatik olarak çevrilmiştir ve hatalar içerebilir. Değişiklik önermek için bir Pull Request açmaktan çekinmeyin.```
+
+
+NodeJS'in 18 veya daha yeni bir sürümüne sahip olduğunuzdan emin olun.
+
+
+## create-llama Kullanma
+
+LlamaIndex ile başlamanın en kolay yolu `create-llama` kullanmaktır. Bu CLI aracı, size her şeyin ayarlandığı yeni bir LlamaIndex uygulaması oluşturmanıza hızlı bir şekilde olanak tanır.
+
+Sadece şunu çalıştırın:
+
+<Tabs>
+<TabItem value="1" label="npm" default>
+
+```bash
+npx create-llama@latest
+```
+
+</TabItem>
+<TabItem value="2" label="Yarn">
+
+```bash
+yarn create llama
+```
+
+</TabItem>
+<TabItem value="3" label="pnpm">
+
+```bash
+pnpm create llama@latest
+```
+
+</TabItem>
+</Tabs>
+
+başlamak için. Uygulamanız oluşturulduktan sonra, geliştirme sunucusunu başlatmak için
+
+```bash npm2yarn
+npm run dev
+```
+
+komutunu çalıştırın. Ardından [http://localhost:3000](http://localhost:3000) adresini ziyaret ederek uygulamanızı görebilirsiniz.
+## NPM ile Kurulum
+
+```bash npm2yarn
+npm install llamaindex
+```
+
+
+### Ortam Değişkenleri
+
+Örneklerimiz varsayılan olarak OpenAI kullanır. Open AI anahtarınızı aşağıdaki gibi ayarlamanız gerekecektir:
+
+```bash
+export OPENAI_API_KEY="sk-......" # https://platform.openai.com/account/api-keys adresinden anahtarınızla değiştirin
+```
+
+Eğer her seferinde otomatik olarak yüklenmesini istiyorsanız, .zshrc/.bashrc dosyanıza ekleyin.
+
+UYARI: OpenAI anahtarınızı sürüm kontrolüne eklemeyin.
diff --git a/apps/docs/i18n/tr/docusaurus-plugin-content-docs/current/introduction.md b/apps/docs/i18n/tr/docusaurus-plugin-content-docs/current/introduction.md
new file mode 100644
index 0000000000000000000000000000000000000000..9afa80e0cb57ceb1aa58e034ab3ee2abc354e8e7
--- /dev/null
+++ b/apps/docs/i18n/tr/docusaurus-plugin-content-docs/current/introduction.md
@@ -0,0 +1,62 @@
+---
+sidebar_position: 0
+slug: /
+---
+
+# LlamaIndex.TS Nedir?
+
+`Bu belge otomatik olarak çevrilmiştir ve hatalar içerebilir. Değişiklik önermek için bir Pull Request açmaktan çekinmeyin.`
+
+LlamaIndex.TS, LLM uygulamalarının özel veya alan özgü verilerini almak, yapılandırmak ve erişmek için bir veri çerçevesidir. Bir Python paketi de mevcuttur (buraya bakın: [buraya](https://docs.llamaindex.ai/en/stable/)), ancak LlamaIndex.TS, TypeScript ile kullanım için optimize edilmiş basit bir paket içinde temel özellikler sunar.
+
+## 🚀 Neden LlamaIndex.TS?
+
+LLM'lerin temelinde, insanlar ve çıkarılan veriler arasında doğal dil arayüzü bulunur. Geniş çapta kullanılabilen modeller, Wikipedia, posta listeleri, ders kitapları ve kaynak kodları gibi halka açık verilerin büyük miktarlarında önceden eğitilmiştir.
+
+LLM'lerin üzerine inşa edilen uygulamalar genellikle bu modelleri özel veya alan özgü verilerle genişletmeyi gerektirir. Ne yazık ki, bu veriler, uygulamalar ve veri depoları arasında dağılmış olabilir. API'lerin arkasında, SQL veritabanlarında veya PDF'lerde ve slayt sunumlarında sıkışmış olabilir.
+
+İşte burada **LlamaIndex.TS** devreye giriyor.
+
+## 🦙 LlamaIndex.TS Nasıl Yardımcı Olabilir?
+
+LlamaIndex.TS aşağıdaki araçları sağlar:
+
+- **Veri yükleme** mevcut `.txt`, `.pdf`, `.csv`, `.md` ve `.docx` verilerinizi doğrudan alır
+- **Veri dizinleri** verilerinizi LLM'lerin tüketmesi için kolay ve performanslı ara temsillerde yapılandırır.
+- **Motorlar** verilerinize doğal dil erişimi sağlar. Örneğin:
+  - Sorgu motorları, bilgi artırılmış çıktılar için güçlü geri alma arabirimleridir.
+  - Sohbet motorları, verilerinizle çoklu mesajlı, "ileri geri" etkileşimler için konuşma arabirimleridir.
+
+"
+
+## 👨‍👩‍👧‍👦 LlamaIndex kimler için?
+
+LlamaIndex.TS, JavaScript ve TypeScript ile LLM uygulamaları oluşturan herkes için temel araçlar sağlar.
+
+Yüksek seviyeli API'miz, başlangıç ​​seviyesindeki kullanıcıların verilerini almak ve sorgulamak için LlamaIndex.TS'yi kullanmalarını sağlar.
+
+Daha karmaşık uygulamalar için, düşük seviyeli API'larımız, gelişmiş kullanıcıların ihtiyaçlarına uyacak şekilde herhangi bir modülü - veri bağlayıcıları, indeksler, alıcılar ve sorgu motorları - özelleştirmelerine ve genişletmelerine olanak tanır.
+
+## Başlarken
+
+`npm install llamaindex`
+
+Dökümantasyonumuz, [Kurulum Talimatları](./installation.md) ve ilk uygulamanızı oluşturmanız için bir [Başlangıç Kılavuzu](./starter.md) içerir.
+
+Çalışmaya başladıktan sonra, [Yüksek Düzeyli Kavramlar](./concepts.md) LlamaIndex'in modüler mimarisinin bir genel bakışını sunar. Daha fazla pratik örnek için [Uçtan Uca Öğreticilerimize](./end_to_end.md) göz atabilirsiniz.
+
+## 🗺️ Ekosistem
+
+İndirmek veya katkıda bulunmak için LlamaIndex'i aşağıdaki platformlarda bulabilirsiniz:
+
+- Github: https://github.com/run-llama/LlamaIndexTS
+- NPM: https://www.npmjs.com/package/llamaindex
+
+"
+
+## Topluluk
+
+Yardıma mı ihtiyacınız var? Bir özellik öneriniz mi var? LlamaIndex topluluğuna katılın:
+
+- Twitter: https://twitter.com/llama_index
+- Discord: https://discord.gg/dGcwcsnxhU
diff --git a/apps/docs/i18n/tr/docusaurus-plugin-content-docs/current/modules/high_level/chat_engine.md b/apps/docs/i18n/tr/docusaurus-plugin-content-docs/current/modules/high_level/chat_engine.md
new file mode 100644
index 0000000000000000000000000000000000000000..8f1e6df7179047daaff938628b6ea3ca332ba65d
--- /dev/null
+++ b/apps/docs/i18n/tr/docusaurus-plugin-content-docs/current/modules/high_level/chat_engine.md
@@ -0,0 +1,22 @@
+---
+sidebar_position: 4
+---
+
+# ChatEngine (聊天引擎)
+
+`Bu belge otomatik olarak çevrilmiştir ve hatalar içerebilir. Değişiklik önermek için bir Pull Request açmaktan çekinmeyin.`
+
+聊天引擎是一种快速简单的与索引中的数据进行聊天的方式。
+
+```typescript
+const retriever = index.asRetriever();
+const chatEngine = new ContextChatEngine({ retriever });
+
+// 开始聊天
+const response = await chatEngine.chat(query);
+```
+
+## API Referansları
+
+- [ContextChatEngine (BağlamChatMotoru)](../../api/classes/ContextChatEngine.md)
+- [CondenseQuestionChatEngine (KısaltılmışSoruChatMotoru)](../../api/classes/ContextChatEngine.md)
diff --git a/apps/docs/i18n/tr/docusaurus-plugin-content-docs/current/modules/high_level/data_index.md b/apps/docs/i18n/tr/docusaurus-plugin-content-docs/current/modules/high_level/data_index.md
new file mode 100644
index 0000000000000000000000000000000000000000..3dcd8946f795c8c05773cdae86613c927a1e5e7b
--- /dev/null
+++ b/apps/docs/i18n/tr/docusaurus-plugin-content-docs/current/modules/high_level/data_index.md
@@ -0,0 +1,27 @@
+---
+sidebar_position: 2
+---
+
+# İndeks
+
+`Bu belge otomatik olarak çevrilmiştir ve hatalar içerebilir. Değişiklik önermek için bir Pull Request açmaktan çekinmeyin.`
+
+Bir indeks, verilerinizin temel konteyneri ve organizasyonudur. LlamaIndex.TS, iki indeksi destekler:
+
+- `VectorStoreIndex` - yanıt oluşturulurken en iyi k `Node`'ları LLM'ye gönderir. Varsayılan en iyi k değeri 2'dir.
+- `SummaryIndex` - yanıt oluşturmak için indeksteki her `Node`'u LLM'ye gönderir.
+
+```typescript
+import { Document, VectorStoreIndex } from "llamaindex";
+
+const document = new Document({ text: "test" });
+
+const index = await VectorStoreIndex.fromDocuments([document]);
+```
+
+## API Referansı
+
+- [SummaryIndex](../../api/classes/SummaryIndex.md)
+- [VectorStoreIndex](../../api/classes/VectorStoreIndex.md)
+
+"
diff --git a/apps/docs/i18n/tr/docusaurus-plugin-content-docs/current/modules/high_level/data_loader.md b/apps/docs/i18n/tr/docusaurus-plugin-content-docs/current/modules/high_level/data_loader.md
new file mode 100644
index 0000000000000000000000000000000000000000..dcc8e293f65ae2866e95197610f6c14446fd72c6
--- /dev/null
+++ b/apps/docs/i18n/tr/docusaurus-plugin-content-docs/current/modules/high_level/data_loader.md
@@ -0,0 +1,19 @@
+---
+sidebar_position: 1
+---
+
+# Okuyucu / Yükleyici
+
+`Bu belge otomatik olarak çevrilmiştir ve hatalar içerebilir. Değişiklik önermek için bir Pull Request açmaktan çekinmeyin.`
+
+LlamaIndex.TS, `SimpleDirectoryReader` sınıfını kullanarak klasörlerden dosyaların kolayca yüklenmesini destekler. Şu anda `.txt`, `.pdf`, `.csv`, `.md` ve `.docx` dosyaları desteklenmektedir ve gelecekte daha fazlası planlanmaktadır!
+
+```typescript
+import { SimpleDirectoryReader } from "llamaindex";
+
+documents = new SimpleDirectoryReader().loadData("./data");
+```
+
+## API Referansı
+
+- [SimpleDirectoryReader](../../api/classes/SimpleDirectoryReader.md)
diff --git a/apps/docs/i18n/tr/docusaurus-plugin-content-docs/current/modules/high_level/documents_and_nodes.md b/apps/docs/i18n/tr/docusaurus-plugin-content-docs/current/modules/high_level/documents_and_nodes.md
new file mode 100644
index 0000000000000000000000000000000000000000..c9fb5bc3eac90fc6da885260385b89a84ced8be9
--- /dev/null
+++ b/apps/docs/i18n/tr/docusaurus-plugin-content-docs/current/modules/high_level/documents_and_nodes.md
@@ -0,0 +1,22 @@
+---
+sidebar_position: 0
+---
+
+# Belgeler ve Düğümler
+
+`Bu belge otomatik olarak çevrilmiştir ve hatalar içerebilir. Değişiklik önermek için bir Pull Request açmaktan çekinmeyin.`
+
+`Belge`ler ve `Düğüm`ler, herhangi bir dizinin temel yapı taşlarıdır. Bu nesnelerin API'si benzer olsa da, `Belge` nesneleri tüm dosyaları temsil ederken, `Düğüm`ler, orijinal belgenin daha küçük parçalarıdır ve LLM ve Q&A için uygundur.
+
+```typescript
+import { Document } from "llamaindex";
+
+document = new Document({ text: "metin", metadata: { key: "val" } });
+```
+
+## API Referansı
+
+- [Belge](../../api/classes/Document.md)
+- [TextNode](../../api/classes/TextNode.md)
+
+"
diff --git a/apps/docs/i18n/tr/docusaurus-plugin-content-docs/current/modules/high_level/query_engine.md b/apps/docs/i18n/tr/docusaurus-plugin-content-docs/current/modules/high_level/query_engine.md
new file mode 100644
index 0000000000000000000000000000000000000000..9d3fcbc21173379a63014df5a9328c16b5eda33c
--- /dev/null
+++ b/apps/docs/i18n/tr/docusaurus-plugin-content-docs/current/modules/high_level/query_engine.md
@@ -0,0 +1,42 @@
+---
+sidebar_position: 3
+---
+
+# QueryEngine
+
+`Bu belge otomatik olarak çevrilmiştir ve hatalar içerebilir. Değişiklik önermek için bir Pull Request açmaktan çekinmeyin.`
+
+Bir sorgu motoru, bir `Retriever` ve bir `ResponseSynthesizer`'ı bir boru hattına sarar ve sorgu dizesini kullanarak düğümleri alır ve ardından yanıt oluşturmak için LLM'ye gönderir.
+
+```typescript
+const queryEngine = index.asQueryEngine();
+const response = await queryEngine.query("sorgu dizesi");
+```
+
+## Alt Soru Sorgu Motoru
+
+Alt Soru Sorgu Motoru'nun temel konsepti, tek bir sorguyu birden çok sorguya bölmek, her bir sorgu için bir yanıt almak ve ardından bu farklı yanıtları kullanıcının anlayabileceği tek bir tutarlı yanıta birleştirmektir. Veri kaynaklarınızı teker teker gözden geçirerek "bu adım adım düşün" yöntemini düşünebilirsiniz!
+
+### Başlarken
+
+Alt Soru Sorgu Motoru'nu denemeye başlamanın en kolay yolu, [örnekler](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/subquestion.ts) klasöründe bulunan subquestion.ts dosyasını çalıştırmaktır.
+
+```bash
+npx ts-node subquestion.ts
+```
+
+"
+
+### Araçlar
+
+SubQuestionQueryEngine, Araçlar ile uygulanır. Araçların temel fikri, büyük dil modeli için yürütülebilir seçenekler olmalarıdır. Bu durumda, SubQuestionQueryEngine'imiz, sorgu motorunda sorguları çalıştırmak için bir QueryEngineTool'a dayanır. Bu, modelin farklı sorular için farklı belgelere sorgu yapma seçeneği sunmamızı sağlar. Ayrıca, SubQuestionQueryEngine'in web'de bir şey arayan veya Wolfram Alpha'yı kullanarak bir yanıt alan bir Araç kullanabileceğini hayal edebilirsiniz.
+
+Araçlar hakkında daha fazla bilgi için LlamaIndex Python belgelerine göz atabilirsiniz: https://gpt-index.readthedocs.io/en/latest/core_modules/agent_modules/tools/root.html
+
+"
+
+## API Referansı
+
+- [RetrieverQueryEngine](../../api/classes/RetrieverQueryEngine.md)
+- [SubQuestionQueryEngine](../../api/classes/SubQuestionQueryEngine.md)
+- [QueryEngineTool](../../api/interfaces/QueryEngineTool.md)
diff --git a/apps/docs/i18n/tr/docusaurus-plugin-content-docs/current/modules/index.md b/apps/docs/i18n/tr/docusaurus-plugin-content-docs/current/modules/index.md
new file mode 100644
index 0000000000000000000000000000000000000000..0b3aa0e9b0a28336793ee01ee68d2bba1ebed297
--- /dev/null
+++ b/apps/docs/i18n/tr/docusaurus-plugin-content-docs/current/modules/index.md
@@ -0,0 +1,33 @@
+# Temel Modüller
+
+`Bu belge otomatik olarak çevrilmiştir ve hatalar içerebilir. Değişiklik önermek için bir Pull Request açmaktan çekinmeyin.`
+
+LlamaIndex.TS, hızlı başlamanız için yüksek seviyeli modüllere ve ihtiyaçlarınıza göre özelleştirebileceğiniz düşük seviyeli modüllere ayrılmış birkaç temel modül sunar.
+
+## Yüksek Seviyeli Modüller
+
+- [**Belge**](./high_level/documents_and_nodes.md): Bir belge, bir metin dosyası, PDF dosyası veya diğer sürekli veri parçalarını temsil eder.
+
+- [**Düğüm**](./high_level/documents_and_nodes.md): Temel veri birimidir. Genellikle, bunlar, belgenin yönetilebilir parçalara bölünmüş olan ve bir gömme modeline ve LLM'ye beslemek için yeterince küçük olan parçalardır.
+
+- [**Okuyucu/Yükleyici**](./high_level/data_loader.md): Bir okuyucu veya yükleyici, gerçek dünyada bir belgeyi alır ve bir Belge sınıfına dönüştürerek İndeks ve sorgularınızda kullanılabilir hale getirir. Şu anda düz metin dosyalarını ve PDF'leri destekliyoruz ve daha birçok formata destek eklemeye devam ediyoruz.
+
+- [**İndeksler**](./high_level/data_index.md): İndeksler, Düğümleri ve bu düğümlerin gömülerini depolar.
+
+- [**Sorgu Motoru**](./high_level/query_engine.md): Sorgu motorları, girdiğiniz sorguyu oluşturan ve sonucu size veren şeylerdir. Sorgu motorları genellikle, LLM'nin sorgunuzu yanıtlamak için ihtiyaç duyduğu bağlamı sağlamak için önceden oluşturulmuş bir ipucuyla İndeksinizden seçilen düğümleri birleştirir.
+
+- [**Sohbet Motoru**](./high_level/chat_engine.md): Bir Sohbet Motoru, İndekslerinizle etkileşimde bulunacak bir sohbet botu oluşturmanıza yardımcı olur.
+
+## Düşük Seviyeli Modül
+
+- [**LLM**](./low_level/llm.md): LLM sınıfı, OpenAI GPT-4, Anthropic Claude veya Meta LLaMA gibi büyük bir dil modeli sağlayıcısı üzerinde birleşik bir arayüz sağlar. Kendi büyük dil modelinize bir bağlayıcı yazmak için bunu alt sınıf olarak kullanabilirsiniz.
+
+- [**Gömme**](./low_level/embedding.md): Bir gömme, kayan nokta sayılarının bir vektörü olarak temsil edilir. OpenAI'nin varsayılan gömme modeli olan text-embedding-ada-002, her bir gömme için 1.536 kayan nokta sayısı içerir. Başka popüler bir gömme modeli ise her bir Düğümü temsil etmek için 768 kayan nokta sayısı kullanan BERT'tir. Gömme ile çalışmak için 3 benzerlik hesaplama seçeneği ve Maksimum Marjinal Önem dahil bir dizi yardımcı program sağlıyoruz.
+
+- [**TextSplitter/NodeParser**](./low_level/node_parser.md): Metin bölme stratejileri, gömme aramasının genel etkinliği için son derece önemlidir. Şu anda bir varsayılanımız olsa da, herkesin kullanabileceği tek bir çözüm yoktur. Kaynak belgelere bağlı olarak farklı bölme boyutları ve stratejileri kullanmak isteyebilirsiniz. Şu anda sabit boyuta göre bölme, örtüşen bölümlerle sabit boyuta göre bölme, cümlelere göre bölme ve paragraflara göre bölme gibi bölme yöntemlerini destekliyoruz. Metin bölücü, `Belge`leri `Düğüm`lere bölerken NodeParser tarafından kullanılır.
+
+- [**Retriever**](./low_level/retriever.md): Retriever, dizinden alınacak Düğümleri gerçekten seçen bileşendir. Burada, her sorgu için daha fazla veya daha az Düğüm almayı deneyebilir, benzerlik fonksiyonunuzu değiştirebilir veya uygulamanızdaki her bir özel kullanım durumu için kendi retriever'ınızı oluşturabilirsiniz. Örneğin, kod içeriği ile metin içeriği için ayrı bir retriever'a sahip olmak isteyebilirsiniz.
+
+- [**ResponseSynthesizer**](./low_level/response_synthesizer.md): ResponseSynthesizer, bir sorgu dizesini alır ve bir `Düğüm` listesini kullanarak bir yanıt oluşturur. Bu, bir yanıtı tüm bağlam üzerinde dolaşarak iyileştirme veya özetlerin bir ağacını oluşturarak kök özeti döndürme gibi birçok şekilde gerçekleştirilebilir.
+
+- [**Depolama**](./low_level/storage.md): Bir noktada dizinlerinizi, verilerinizi ve vektörlerinizi her seferinde gömme modellerini yeniden çalıştırmak yerine depolamak isteyeceksiniz. IndexStore, DocStore, VectorStore ve KVStore, bunu yapmanıza izin veren soyutlamalardır. Birleşerek StorageContext'i oluştururlar. Şu anda, gömlemelerinizi dosyalarda (veya sanal bellekteki bir dosya sisteminde) saklamanıza izin veriyoruz, ancak Vector Veritabanlarına entegrasyonlar eklemeye de aktif olarak çalışıyoruz.
diff --git a/apps/docs/i18n/tr/docusaurus-plugin-content-docs/current/modules/low_level/embedding.md b/apps/docs/i18n/tr/docusaurus-plugin-content-docs/current/modules/low_level/embedding.md
new file mode 100644
index 0000000000000000000000000000000000000000..7bee8d8fe81eeb9269c984854036190aae07faeb
--- /dev/null
+++ b/apps/docs/i18n/tr/docusaurus-plugin-content-docs/current/modules/low_level/embedding.md
@@ -0,0 +1,26 @@
+---
+sidebar_position: 1
+---
+
+# Gömme (Embedding)
+
+`Bu belge otomatik olarak çevrilmiştir ve hatalar içerebilir. Değişiklik önermek için bir Pull Request açmaktan çekinmeyin.`
+
+LlamaIndex içindeki gömme modeli, metnin sayısal temsillerini oluşturmakla sorumludur. Varsayılan olarak, LlamaIndex, OpenAI'den `text-embedding-ada-002` modelini kullanır.
+
+Bu, açıkça `ServiceContext` nesnesinde ayarlanabilir.
+
+```typescript
+import { OpenAIEmbedding, serviceContextFromDefaults } from "llamaindex";
+
+const openaiEmbeds = new OpenAIEmbedding();
+
+const serviceContext = serviceContextFromDefaults({ embedModel: openaiEmbeds });
+```
+
+## API Referansı
+
+- [OpenAIEmbedding](../../api/classes/OpenAIEmbedding.md)
+- [ServiceContext](../../api/interfaces/ServiceContext.md)
+
+"
diff --git a/apps/docs/i18n/tr/docusaurus-plugin-content-docs/current/modules/low_level/llm.md b/apps/docs/i18n/tr/docusaurus-plugin-content-docs/current/modules/low_level/llm.md
new file mode 100644
index 0000000000000000000000000000000000000000..a7008d50036e4803c06bc0d97d9ebd4589ee43d0
--- /dev/null
+++ b/apps/docs/i18n/tr/docusaurus-plugin-content-docs/current/modules/low_level/llm.md
@@ -0,0 +1,26 @@
+---
+sidebar_position: 0
+---
+
+# LLM
+
+`Bu belge otomatik olarak çevrilmiştir ve hatalar içerebilir. Değişiklik önermek için bir Pull Request açmaktan çekinmeyin.`
+
+LLM, metinleri okuma ve sorgulara doğal dil yanıtları üretme işlemlerinden sorumludur. Varsayılan olarak, LlamaIndex.TS `gpt-3.5-turbo` kullanır.
+
+LLM, açıkça `ServiceContext` nesnesinde ayarlanabilir.
+
+```typescript
+import { OpenAI, serviceContextFromDefaults } from "llamaindex";
+
+const openaiLLM = new OpenAI({ model: "gpt-3.5-turbo", temperature: 0 });
+
+const serviceContext = serviceContextFromDefaults({ llm: openaiLLM });
+```
+
+## API Referansı
+
+- [OpenAI](../../api/classes/OpenAI.md)
+- [ServiceContext](../../api/interfaces/ServiceContext.md)
+
+"
diff --git a/apps/docs/i18n/tr/docusaurus-plugin-content-docs/current/modules/low_level/node_parser.md b/apps/docs/i18n/tr/docusaurus-plugin-content-docs/current/modules/low_level/node_parser.md
new file mode 100644
index 0000000000000000000000000000000000000000..7d6e54207ef6c3f513ff8b3b4ab1ed69389cad94
--- /dev/null
+++ b/apps/docs/i18n/tr/docusaurus-plugin-content-docs/current/modules/low_level/node_parser.md
@@ -0,0 +1,39 @@
+---
+sidebar_position: 3
+---
+
+# NodeParser
+
+`Bu belge otomatik olarak çevrilmiştir ve hatalar içerebilir. Değişiklik önermek için bir Pull Request açmaktan çekinmeyin.`
+
+`NodeParser`, LlamaIndex içinde `Document` nesnelerini daha yönetilebilir `Node` nesnelerine bölen bir bileşendir. `.fromDocuments()` çağrıldığında, `ServiceContext` içindeki `NodeParser` otomatik olarak bunu yapmak için kullanılır. Alternatif olarak, belgeleri önceden bölmek için de kullanabilirsiniz.
+
+```typescript
+import { Document, SimpleNodeParser } from "llamaindex";
+
+const nodeParser = new SimpleNodeParser();
+const nodes = nodeParser.getNodesFromDocuments([
+  new Document({ text: "Ben 10 yaşındayım. John 20 yaşındadır." }),
+]);
+```
+
+## TextSplitter
+
+Altta yatan metin bölücü, metni cümlelere göre böler. Ayrıca ham metni bölmek için bağımsız bir modül olarak da kullanılabilir.
+
+```typescript
+import { SentenceSplitter } from "llamaindex";
+
+const splitter = new SentenceSplitter({ chunkSize: 1 });
+
+const textSplits = splitter.splitText("Merhaba Dünya");
+```
+
+"
+
+## API Referansı
+
+- [SimpleNodeParser](../../api/classes/SimpleNodeParser.md)
+- [SentenceSplitter](../../api/classes/SentenceSplitter.md)
+
+"
diff --git a/apps/docs/i18n/tr/docusaurus-plugin-content-docs/current/modules/low_level/response_synthesizer.md b/apps/docs/i18n/tr/docusaurus-plugin-content-docs/current/modules/low_level/response_synthesizer.md
new file mode 100644
index 0000000000000000000000000000000000000000..40dfed4f72092f2b9637c46a3384bd041be1896b
--- /dev/null
+++ b/apps/docs/i18n/tr/docusaurus-plugin-content-docs/current/modules/low_level/response_synthesizer.md
@@ -0,0 +1,50 @@
+---
+sidebar_position: 6
+---
+
+# ResponseSynthesizer
+
+`Bu belge otomatik olarak çevrilmiştir ve hatalar içerebilir. Değişiklik önermek için bir Pull Request açmaktan çekinmeyin.`
+
+ResponseSynthesizer, sorguyu, düğümleri ve prompt şablonlarını yanıt üretmek için LLM'ye gönderme işlemini üstlenir. Yanıt üretmek için birkaç temel mod vardır:
+
+- `Refine`: Her alınan metin parçası üzerinden sıralı olarak "yanıt oluştur ve iyileştir" işlemi yapar.
+  Bu, her düğüm için ayrı bir LLM çağrısı yapar. Daha detaylı yanıtlar için iyidir.
+- `CompactAndRefine` (varsayılan): Her LLM çağrısı sırasında prompt'u "sıkıştırarak" maksimum prompt boyutu içine sığabilecek kadar çok metin parçası ekler.
+  Bir prompt içine sığacak kadar çok parça varsa, birden fazla sıkıştırılmış prompt üzerinden "yanıt oluştur ve iyileştir" işlemi yapar. `refine` ile aynıdır, ancak daha az LLM çağrısı yapar.
+- `TreeSummarize`: Bir metin parçası kümesi ve sorgu verildiğinde, rekürsif olarak bir ağaç oluşturur ve kök düğümü yanıt olarak döndürür. Özetleme amaçlı iyidir.
+- `SimpleResponseBuilder`: Bir metin parçası kümesi ve sorgu verildiğinde, her metin parçasına sorguyu uygulayarak yanıtları bir diziye biriktirir.
+  Tüm yanıtların birleştirilmiş bir dizesini döndürür. Her bir metin parçası için ayrı ayrı sorguyu çalıştırmanız gerektiğinde iyidir.
+
+```typescript
+import { NodeWithScore, ResponseSynthesizer, TextNode } from "llamaindex";
+
+const responseSynthesizer = new ResponseSynthesizer();
+
+const nodesWithScore: NodeWithScore[] = [
+  {
+    node: new TextNode({ text: "Ben 10 yaşındayım." }),
+    score: 1,
+  },
+  {
+    node: new TextNode({ text: "John 20 yaşındadır." }),
+    score: 0.5,
+  },
+];
+
+const response = await responseSynthesizer.synthesize(
+  "Kaç yaşındayım?",
+  nodesWithScore,
+);
+console.log(response.response);
+```
+
+## API Referansı
+
+- [ResponseSynthesizer](../../api/classes/ResponseSynthesizer.md)
+- [Refine](../../api/classes/Refine.md)
+- [CompactAndRefine](../../api/classes/CompactAndRefine.md)
+- [TreeSummarize](../../api/classes/TreeSummarize.md)
+- [SimpleResponseBuilder](../../api/classes/SimpleResponseBuilder.md)
+
+"
diff --git a/apps/docs/i18n/tr/docusaurus-plugin-content-docs/current/modules/low_level/retriever.md b/apps/docs/i18n/tr/docusaurus-plugin-content-docs/current/modules/low_level/retriever.md
new file mode 100644
index 0000000000000000000000000000000000000000..07800027d5a16a235499f683be26039a6467ef32
--- /dev/null
+++ b/apps/docs/i18n/tr/docusaurus-plugin-content-docs/current/modules/low_level/retriever.md
@@ -0,0 +1,25 @@
+---
+sidebar_position: 5
+---
+
+# Retriever (Alıcı)
+
+`Bu belge otomatik olarak çevrilmiştir ve hatalar içerebilir. Değişiklik önermek için bir Pull Request açmaktan çekinmeyin.`
+
+LlamaIndex'te bir alıcı, bir sorgu dizesi kullanarak bir dizinden `Node`'ları almak için kullanılan bir bileşendir. Bir `VectorIndexRetriever` en benzer düğümleri getirecektir. Öte yandan, bir `SummaryIndexRetriever` sorguya bakılmaksızın tüm düğümleri getirecektir.
+
+```typescript
+const retriever = vector_index.asRetriever();
+retriever.similarityTopK = 3;
+
+// Düğümleri getir!
+const nodesWithScore = await retriever.retrieve("sorgu dizesi");
+```
+
+## API Referansı
+
+- [SummaryIndexRetriever](../../api/classes/SummaryIndexRetriever.md)
+- [SummaryIndexLLMRetriever](../../api/classes/SummaryIndexLLMRetriever.md)
+- [VectorIndexRetriever](../../api/classes/VectorIndexRetriever.md)
+
+"
diff --git a/apps/docs/i18n/tr/docusaurus-plugin-content-docs/current/modules/low_level/storage.md b/apps/docs/i18n/tr/docusaurus-plugin-content-docs/current/modules/low_level/storage.md
new file mode 100644
index 0000000000000000000000000000000000000000..5bee31014a0adcda19952e6bf4ca68356011af18
--- /dev/null
+++ b/apps/docs/i18n/tr/docusaurus-plugin-content-docs/current/modules/low_level/storage.md
@@ -0,0 +1,30 @@
+---
+sidebar_position: 7
+---
+
+# Depolama
+
+`Bu belge otomatik olarak çevrilmiştir ve hatalar içerebilir. Değişiklik önermek için bir Pull Request açmaktan çekinmeyin.`
+
+LlamaIndex.TS'de depolama otomatik olarak çalışır, bir `StorageContext` nesnesini yapılandırdıktan sonra. Sadece `persistDir`'yi yapılandırın ve bir indekse ekleyin.
+
+Şu anda, yalnızca diskten kaydetme ve yükleme desteklenmektedir, gelecekteki entegrasyonlar planlanmaktadır!
+
+```typescript
+import { Document, VectorStoreIndex, storageContextFromDefaults } from "./src";
+
+const storageContext = await storageContextFromDefaults({
+  persistDir: "./storage",
+});
+
+const document = new Document({ text: "Test Metni" });
+const index = await VectorStoreIndex.fromDocuments([document], {
+  storageContext,
+});
+```
+
+## API Referansı
+
+- [StorageContext](../../api/interfaces/StorageContext.md)
+
+"
diff --git a/apps/docs/i18n/tr/docusaurus-plugin-content-docs/current/starter.md b/apps/docs/i18n/tr/docusaurus-plugin-content-docs/current/starter.md
new file mode 100644
index 0000000000000000000000000000000000000000..05b7afac0e1339198f260d681f26230f4bed500a
--- /dev/null
+++ b/apps/docs/i18n/tr/docusaurus-plugin-content-docs/current/starter.md
@@ -0,0 +1,58 @@
+---
+sidebar_position: 2
+---
+
+# Başlangıç Kılavuzu
+
+`Bu belge otomatik olarak çevrilmiştir ve hatalar içerebilir. Değişiklik önermek için bir Pull Request açmaktan çekinmeyin.`
+
+[LlamaIndex.TS'i NPM kullanarak kurduktan](installation) ve OpenAI anahtarınızı ayarladıktan sonra, ilk uygulamanıza başlamaya hazırsınız:
+
+Yeni bir klasörde:
+
+```bash npm2yarn
+npm install typescript
+npm install @types/node
+npx tsc --init # gerekirse
+```
+
+`example.ts` adında bir dosya oluşturun. Bu kod, bazı örnek verileri yükleyecek, bir belge oluşturacak, onu dizine ekleyecek (OpenAI kullanarak gömme oluşturacak) ve ardından veriler hakkında soruları yanıtlayacak bir sorgu motoru oluşturacak.
+
+```ts
+// example.ts
+import fs from "fs/promises";
+import { Document, VectorStoreIndex } from "llamaindex";
+
+async function main() {
+  // Node'da abramov.txt dosyasından makale yükle
+  const essay = await fs.readFile(
+    "node_modules/llamaindex/examples/abramov.txt",
+    "utf-8",
+  );
+
+  // Makale ile Document nesnesi oluştur
+  const document = new Document({ text: essay });
+
+  // Metni bölecek ve gömme oluşturacak. Bunları VectorStoreIndex içinde sakla
+  const index = await VectorStoreIndex.fromDocuments([document]);
+
+  // İndexe sorgu yap
+  const queryEngine = index.asQueryEngine();
+  const response = await queryEngine.query("Yazar üniversitede ne yaptı?");
+
+  // Yanıtı çıktıla
+  console.log(response.toString());
+}
+
+main();
+```
+
+Ardından şunu kullanarak çalıştırabilirsiniz
+
+```bash
+npx ts-node example.ts
+```
+
+Daha fazlasını öğrenmeye hazır mısınız? NextJS oyun alanımıza göz atın: https://llama-playground.vercel.app/. Kaynak kodu burada bulunabilir: https://github.com/run-llama/ts-playground
+
+"
diff --git a/apps/docs/i18n/uk/docusaurus-plugin-content-docs/current/_static/concepts/indexing.jpg b/apps/docs/i18n/uk/docusaurus-plugin-content-docs/current/_static/concepts/indexing.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..8672967213caf28fb27682b6b9e2e5111beb9aaa
Binary files /dev/null and b/apps/docs/i18n/uk/docusaurus-plugin-content-docs/current/_static/concepts/indexing.jpg differ
diff --git a/apps/docs/i18n/uk/docusaurus-plugin-content-docs/current/_static/concepts/querying.jpg b/apps/docs/i18n/uk/docusaurus-plugin-content-docs/current/_static/concepts/querying.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..3c241bdda5ba98d3f3ee2e163b9f19280c5c7503
Binary files /dev/null and b/apps/docs/i18n/uk/docusaurus-plugin-content-docs/current/_static/concepts/querying.jpg differ
diff --git a/apps/docs/i18n/uk/docusaurus-plugin-content-docs/current/_static/concepts/rag.jpg b/apps/docs/i18n/uk/docusaurus-plugin-content-docs/current/_static/concepts/rag.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..b68eca2564f8535b33d9ed6d4ce4d4410d2a0698
Binary files /dev/null and b/apps/docs/i18n/uk/docusaurus-plugin-content-docs/current/_static/concepts/rag.jpg differ
diff --git a/apps/docs/i18n/uk/docusaurus-plugin-content-docs/current/api b/apps/docs/i18n/uk/docusaurus-plugin-content-docs/current/api
new file mode 120000
index 0000000000000000000000000000000000000000..3513e32979f4abb8b2df2b54c8d0cc480bdd847e
--- /dev/null
+++ b/apps/docs/i18n/uk/docusaurus-plugin-content-docs/current/api
@@ -0,0 +1 @@
+../../../../docs/api
\ No newline at end of file
diff --git a/apps/docs/i18n/uk/docusaurus-plugin-content-docs/current/concepts.md b/apps/docs/i18n/uk/docusaurus-plugin-content-docs/current/concepts.md
new file mode 100644
index 0000000000000000000000000000000000000000..2cf5fabbd5fba36971aa57071c6874cd35b25653
--- /dev/null
+++ b/apps/docs/i18n/uk/docusaurus-plugin-content-docs/current/concepts.md
@@ -0,0 +1,86 @@
+---
+sidebar_position: 3
+---
+
+# Високорівневі концепції
+
+`Ця документація була автоматично перекладена і може містити помилки. Не соромтеся відкривати Pull Request, щоб запропонувати зміни.`
+
+LlamaIndex.TS допомагає вам створювати додатки, що працюють на основі LLM (наприклад, системи питань та відповідей, чат-боти) з власними даними.
+
+У цьому посібнику з високорівневих концепцій ви дізнаєтеся:
+
+- як LLM може відповідати на питання за допомогою ваших власних даних.
+- ключові концепції та модулі в LlamaIndex.TS для створення власного запитового конвеєра.
+
+## Відповіді на питання за допомогою ваших даних
+
+LlamaIndex використовує двоетапний метод при використанні LLM з вашими даними:
+
+1. **етап індексування**: підготовка бази знань, та
+2. **етап запитування**: отримання відповідного контексту знань для допомоги LLM у відповіді на питання
+
+![](./_static/concepts/rag.jpg)
+
+Цей процес також відомий як Retrieval Augmented Generation (RAG).
+
+LlamaIndex.TS надає необхідний набір інструментів для зроблення обох етапів надзвичайно простими.
+
+Давайте детальніше розглянемо кожен етап.
+
+### Етап індексації
+
+LlamaIndex.TS допомагає вам підготувати базу знань за допомогою набору з'єднувачів даних та індексів.
+
+![](./_static/concepts/indexing.jpg)
+
+[**Завантажувачі даних**](./modules/high_level/data_loader.md):
+З'єднувач даних (тобто `Reader`) впроваджує дані з різних джерел даних та форматів у просте представлення `Document` (текст та прості метадані).
+
+[**Документи / Вузли**](./modules/high_level/documents_and_nodes.md): `Document` є загальним контейнером для будь-якого джерела даних - наприклад, PDF, виводу API або отриманих даних з бази даних. `Node` є атомарною одиницею даних в LlamaIndex і представляє "частину" вихідного `Document`. Це багатогранне представлення, яке включає метадані та відносини (до інших вузлів), що дозволяють здійснювати точні та виразні операції відновлення.
+
+[**Індекси даних**](./modules/high_level/data_index.md):
+Після впровадження даних LlamaIndex допомагає вам індексувати дані у формат, який легко отримати.
+
+Під капотом LlamaIndex розбирає вихідні документи на проміжні представлення, обчислює векторні вкладення та зберігає ваші дані у пам'яті або на диску.
+
+"
+
+### Етап запитування
+
+На етапі запитування конвеєр запитів отримує найбільш відповідний контекст на основі запиту користувача,
+та передає його LLM (разом з запитом) для синтезування відповіді.
+
+Це надає LLM актуальні знання, яких немає у його початкових навчальних даних,
+(а також зменшує галюцинації).
+
+Основним викликом на етапі запитування є отримання, організація та міркування над (можливо, багатьма) базами знань.
+
+LlamaIndex надає модулі, які можна комбінувати, щоб допомогти вам будувати та інтегрувати конвеєри RAG для систем питань та відповідей (двигун запитів), чат-ботів (чат-двигун) або як частину агента.
+
+Ці будівельні блоки можуть бути налаштовані для відображення вподобань ранжування, а також комбіновані для міркування над кількома базами знань у структурований спосіб.
+
+![](./_static/concepts/querying.jpg)
+
+#### Будівельні блоки
+
+[**Витягувачі**](./modules/low_level/retriever.md):
+Витягувач визначає, як ефективно отримати відповідний контекст з бази знань (тобто індексу), коли заданий запит.
+Конкретна логіка витягування відрізняється для різних індексів, найпопулярнішим з яких є щільне витягування з векторним індексом.
+
+[**Синтезатори відповідей**](./modules/low_level/response_synthesizer.md):
+Синтезатор відповідей генерує відповідь від LLM, використовуючи запит користувача та заданий набір витягнутих фрагментів тексту.
+
+"
+
+#### Конвеєри
+
+[**Запитові двигуни**](./modules/high_level/query_engine.md):
+Запитовий двигун - це повний конвеєр, який дозволяє вам задавати питання щодо ваших даних.
+Він приймає запит на природній мові та повертає відповідь, разом з отриманим контекстом, який передається LLM.
+
+[**Чат-двигуни**](./modules/high_level/chat_engine.md):
+Чат-двигун - це повний конвеєр для проведення розмови з вашими даними
+(багатооборотний діалог замість одного питання та відповіді).
+
+"
diff --git a/apps/docs/i18n/uk/docusaurus-plugin-content-docs/current/end_to_end.md b/apps/docs/i18n/uk/docusaurus-plugin-content-docs/current/end_to_end.md
new file mode 100644
index 0000000000000000000000000000000000000000..d2085890af7c0633c84218432799d848765506ab
--- /dev/null
+++ b/apps/docs/i18n/uk/docusaurus-plugin-content-docs/current/end_to_end.md
@@ -0,0 +1,59 @@
+---
+sidebar_position: 4
+---
+
+# Приклади з кінця до кінця
+
+`Ця документація була автоматично перекладена і може містити помилки. Не соромтеся відкривати Pull Request, щоб запропонувати зміни.`
+
+Ми включили кілька прикладів з використанням LlamaIndex.TS у репозиторії.
+
+Перегляньте наведені нижче приклади або спробуйте їх і завершіть за кілька хвилин з інтерактивними посібниками Github Codespace, наданими Dev-Docs [тут](https://codespaces.new/team-dev-docs/lits-dev-docs-playground?devcontainer_path=.devcontainer%2Fjavascript_ltsquickstart%2Fdevcontainer.json):
+
+## [Чатовий рушій (Chat Engine)](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/chatEngine.ts)
+
+Прочитайте файл і обговорюйте його з LLM.
+
+## [Векторний індекс](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/vectorIndex.ts)
+
+Створіть векторний індекс та запитайте його. Векторний індекс буде використовувати вкладення для отримання k найбільш відповідних вузлів. За замовчуванням, k дорівнює 2.
+
+"
+
+## [Індекс підсумків](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/summaryIndex.ts)
+
+Створіть індекс списку та запитайте його. У цьому прикладі також використовується `LLMRetriever`, який використовує LLM для вибору найкращих вузлів для використання при генерації відповіді.
+
+"
+
+## [Збереження / Завантаження індексу](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/storageContext.ts)
+
+Створення та завантаження векторного індексу. Автоматичне збереження на диск в LlamaIndex.TS відбувається автоматично після створення об'єкта контексту зберігання.
+
+"
+
+## [Налаштований векторний індекс](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/vectorIndexCustomize.ts)
+
+Створіть векторний індекс та запитайте його, налаштувавши `LLM`, `ServiceContext` та `similarity_top_k`.
+
+"
+
+## [OpenAI LLM](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/openai.ts)
+
+Створіть OpenAI LLM та безпосередньо використовуйте його для чату.
+
+"
+
+## [Llama2 DeuceLLM](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/llamadeuce.ts)
+
+Створіть Llama-2 LLM та безпосередньо використовуйте його для чату.
+
+"
+
+## [SubQuestionQueryEngine](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/subquestion.ts)
+
+Використовує `SubQuestionQueryEngine`, який розбиває складні запити на кілька підзапитів, а потім агрегує відповідь на всі підзапити.
+
+## [Модулі низького рівня](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/lowlevel.ts)
+
+Цей приклад використовує кілька компонентів низького рівня, що усуває необхідність у фактичному двигуні запитів. Ці компоненти можуть бути використані будь-де, в будь-якому додатку, або налаштовані та підкласифіковані для відповідності вашим потребам.
diff --git a/apps/docs/i18n/uk/docusaurus-plugin-content-docs/current/environments.md b/apps/docs/i18n/uk/docusaurus-plugin-content-docs/current/environments.md
new file mode 100644
index 0000000000000000000000000000000000000000..0ca2731852ea16e7493ac9e9856fc6cdd14dfecb
--- /dev/null
+++ b/apps/docs/i18n/uk/docusaurus-plugin-content-docs/current/environments.md
@@ -0,0 +1,31 @@
+---
+sidebar_position: 5
+---
+
+# Середовища
+
+`Ця документація була автоматично перекладена і може містити помилки. Не соромтеся відкривати Pull Request, щоб запропонувати зміни.`
+
+LlamaIndex наразі офіційно підтримує NodeJS 18 та NodeJS 20.
+
+## Маршрутизатор додатків NextJS
+
+Якщо ви використовуєте обробники маршрутів/функції безсерверного режиму NextJS App Router, вам потрібно використовувати режим NodeJS:
+
+```js
+export const runtime = "nodejs"; // за замовчуванням
+```
+
+і вам потрібно додати виняток для pdf-parse у вашому next.config.js
+
+```js
+// next.config.js
+/** @type {import('next').NextConfig} */
+const nextConfig = {
+  experimental: {
+    serverComponentsExternalPackages: ["pdf-parse"], // Розміщує pdf-parse у фактичному режимі NodeJS з NextJS App Router
+  },
+};
+
+module.exports = nextConfig;
+```
diff --git a/apps/docs/i18n/uk/docusaurus-plugin-content-docs/current/installation.mdx b/apps/docs/i18n/uk/docusaurus-plugin-content-docs/current/installation.mdx
new file mode 100644
index 0000000000000000000000000000000000000000..c631bfb7f3cb0c8ee94485f0740c216e653b78e3
--- /dev/null
+++ b/apps/docs/i18n/uk/docusaurus-plugin-content-docs/current/installation.mdx
@@ -0,0 +1,71 @@
+---
+sidebar_position: 1
+---
+
+
+# Встановлення та налаштування
+
+```Ця документація була автоматично перекладена і може містити помилки. Не соромтеся відкривати Pull Request, щоб запропонувати зміни.```
+
+
+Переконайтеся, що у вас встановлено NodeJS версії 18 або вище.
+
+
+## Використання create-llama
+
+Найпростіший спосіб почати роботу з LlamaIndex - використовувати `create-llama`. Цей інструмент командного рядка дозволяє швидко створити новий додаток LlamaIndex з усім необхідним налаштуванням.
+
+Просто виконайте команду
+
+<Tabs>
+<TabItem value="1" label="npm" default>
+
+```bash
+npx create-llama@latest
+```
+
+</TabItem>
+<TabItem value="2" label="Yarn">
+
+```bash
+yarn create llama
+```
+
+</TabItem>
+<TabItem value="3" label="pnpm">
+
+```bash
+pnpm create llama@latest
+```
+
+</TabItem>
+</Tabs>
+
+щоб почати. Після створення додатку виконайте команду
+
+```bash npm2yarn
+npm run dev
+```
+
+щоб запустити сервер розробки. Потім ви можете відвідати [http://localhost:3000](http://localhost:3000), щоб переглянути свій додаток.
+## Встановлення з NPM
+
+```bash npm2yarn
+npm install llamaindex
+```
+
+
+### Змінні середовища
+
+Наші приклади за замовчуванням використовують OpenAI. Вам потрібно налаштувати свій ключ Open AI наступним чином:
+
+```bash
+export OPENAI_API_KEY="sk-......" # Замініть на свій ключ з https://platform.openai.com/account/api-keys
+```
+
+Якщо ви хочете, щоб він автоматично завантажувався кожного разу, додайте його до вашого .zshrc/.bashrc.
+
+ПОПЕРЕДЖЕННЯ: не додавайте свій ключ OpenAI до системи контролю версій.
+
+
+"
diff --git a/apps/docs/i18n/uk/docusaurus-plugin-content-docs/current/introduction.md b/apps/docs/i18n/uk/docusaurus-plugin-content-docs/current/introduction.md
new file mode 100644
index 0000000000000000000000000000000000000000..91047a36d64fdabdc58d764aeefc5e7243a4693e
--- /dev/null
+++ b/apps/docs/i18n/uk/docusaurus-plugin-content-docs/current/introduction.md
@@ -0,0 +1,60 @@
+---
+sidebar_position: 0
+slug: /
+---
+
+# Що таке LlamaIndex.TS?
+
+`Ця документація була автоматично перекладена і може містити помилки. Не соромтеся відкривати Pull Request, щоб запропонувати зміни.`
+
+LlamaIndex.TS - це фреймворк для обробки даних в додатках LLM, який дозволяє вводити, структурувати та отримувати доступ до приватних або специфічних для домену даних. Хоча також доступний пакет для Python (див. [тут](https://docs.llamaindex.ai/en/stable/)), LlamaIndex.TS пропонує основні функціональні можливості в простому пакеті, оптимізованому для використання з TypeScript.
+
+## 🚀 Чому LlamaIndex.TS?
+
+На своєму корені LLM-додатки пропонують інтерфейс природної мови між людьми та виведеними даними. Широко доступні моделі навчаються на величезній кількості публічно доступних даних, від Вікіпедії та списків розсилки до підручників та вихідного коду.
+
+Додатки, побудовані на основі LLM-моделей, часто потребують доповнення цих моделей приватними або специфічними для домену даними. Незважаючи на це, ці дані можуть бути розподілені між ізольованими додатками та сховищами даних. Вони можуть бути заховані за API, зберігатися в SQL-базах даних або бути втягнутими в PDF-файли та презентації.
+
+Саме тут і приходить на допомогу **LlamaIndex.TS**.
+
+## 🦙 Як LlamaIndex.TS може допомогти?
+
+LlamaIndex.TS надає наступні інструменти:
+
+- **Завантаження даних** - введення ваших існуючих даних у форматах `.txt`, `.pdf`, `.csv`, `.md` та `.docx` безпосередньо.
+- **Індекси даних** - структурування даних у проміжні представлення, які легкі та продуктивні для використання LLM.
+- **Двигуни** - надають доступ до ваших даних за допомогою природної мови. Наприклад:
+  - Двигуни запитів - потужні інтерфейси для отримання знань з підвищеною видачею.
+  - Двигуни чату - розмовні інтерфейси для взаємодії з вашими даними у вигляді багато повідомлень "туди і назад".
+
+## 👨‍👩‍👧‍👦 Для кого призначений LlamaIndex?
+
+LlamaIndex.TS надає основний набір інструментів, які є необхідними для будь-якого, хто будує додатки LLM з використанням JavaScript та TypeScript.
+
+Наше API високого рівня дозволяє початківцям використовувати LlamaIndex.TS для введення та запитування їх даних.
+
+Для більш складних додатків наші API нижчого рівня дозволяють досвідченим користувачам налаштовувати та розширювати будь-який модуль - з'єднувачі даних, індекси, відновлювачі та двигуни запитів - для відповідності їх потребам.
+
+## Початок роботи
+
+`npm install llamaindex`
+
+Наша документація містить [Інструкції з встановлення](./installation.md) та [Посібник для початківців](./starter.md) для створення вашої першої програми.
+
+Після того, як ви розпочнете роботу, [Високорівневі концепції](./concepts.md) містить огляд модульної архітектури LlamaIndex. Для більш практичних прикладів роботи, перегляньте наші [Посібники з кінця в кінець](./end_to_end.md).
+
+## 🗺️ Екосистема
+
+Для завантаження або співпраці з LlamaIndex, перейдіть за посиланнями:
+
+- Github: https://github.com/run-llama/LlamaIndexTS
+- NPM: https://www.npmjs.com/package/llamaindex
+
+"
+
+## Спільнота
+
+Потрібна допомога? Є пропозиція щодо функціоналу? Приєднуйтесь до спільноти LlamaIndex:
+
+- Twitter: https://twitter.com/llama_index
+- Discord: https://discord.gg/dGcwcsnxhU
diff --git a/apps/docs/i18n/uk/docusaurus-plugin-content-docs/current/modules/high_level/chat_engine.md b/apps/docs/i18n/uk/docusaurus-plugin-content-docs/current/modules/high_level/chat_engine.md
new file mode 100644
index 0000000000000000000000000000000000000000..9f47623288a561c54e1a5b41d0877e173ea508eb
--- /dev/null
+++ b/apps/docs/i18n/uk/docusaurus-plugin-content-docs/current/modules/high_level/chat_engine.md
@@ -0,0 +1,22 @@
+---
+sidebar_position: 4
+---
+
+# Чатовий двигун (ChatEngine)
+
+`Ця документація була автоматично перекладена і може містити помилки. Не соромтеся відкривати Pull Request, щоб запропонувати зміни.`
+
+Чатовий двигун - це швидкий і простий спосіб спілкування з даними у вашому індексі.
+
+```typescript
+const retriever = index.asRetriever();
+const chatEngine = new ContextChatEngine({ retriever });
+
+// початок спілкування
+const response = await chatEngine.chat(query);
+```
+
+## Посилання на API
+
+- [Чатовий двигун контексту (ContextChatEngine)](../../api/classes/ContextChatEngine.md)
+- [Чатовий двигун стиснення запитань (CondenseQuestionChatEngine)](../../api/classes/ContextChatEngine.md)
diff --git a/apps/docs/i18n/uk/docusaurus-plugin-content-docs/current/modules/high_level/data_index.md b/apps/docs/i18n/uk/docusaurus-plugin-content-docs/current/modules/high_level/data_index.md
new file mode 100644
index 0000000000000000000000000000000000000000..44a3e3c17bfd0f637e666e8a8dab459c2610eba4
--- /dev/null
+++ b/apps/docs/i18n/uk/docusaurus-plugin-content-docs/current/modules/high_level/data_index.md
@@ -0,0 +1,27 @@
+---
+sidebar_position: 2
+---
+
+# Індекс
+
+`Ця документація була автоматично перекладена і може містити помилки. Не соромтеся відкривати Pull Request, щоб запропонувати зміни.`
+
+Індекс - це основний контейнер і організація для ваших даних. LlamaIndex.TS підтримує два типи індексів:
+
+- `VectorStoreIndex` - надсилає топ-k `Node` до LLM при генерації відповіді. За замовчуванням, top-k дорівнює 2.
+- `SummaryIndex` - надсилає кожен `Node` в індексі до LLM для генерації відповіді.
+
+```typescript
+import { Document, VectorStoreIndex } from "llamaindex";
+
+const document = new Document({ text: "тест" });
+
+const index = await VectorStoreIndex.fromDocuments([document]);
+```
+
+## Довідник по API
+
+- [SummaryIndex](../../api/classes/SummaryIndex.md)
+- [VectorStoreIndex](../../api/classes/VectorStoreIndex.md)
+
+"
diff --git a/apps/docs/i18n/uk/docusaurus-plugin-content-docs/current/modules/high_level/data_loader.md b/apps/docs/i18n/uk/docusaurus-plugin-content-docs/current/modules/high_level/data_loader.md
new file mode 100644
index 0000000000000000000000000000000000000000..eb253e317b4774a95a5234959bb171b320a8ad78
--- /dev/null
+++ b/apps/docs/i18n/uk/docusaurus-plugin-content-docs/current/modules/high_level/data_loader.md
@@ -0,0 +1,19 @@
+---
+sidebar_position: 1
+---
+
+# Читач / Завантажувач
+
+`Ця документація була автоматично перекладена і може містити помилки. Не соромтеся відкривати Pull Request, щоб запропонувати зміни.`
+
+LlamaIndex.TS підтримує просте завантаження файлів з папок за допомогою класу `SimpleDirectoryReader`. Наразі підтримуються файли з розширеннями `.txt`, `.pdf`, `.csv`, `.md` та `.docx`, а в майбутньому планується підтримка ще більшої кількості форматів!
+
+```typescript
+import { SimpleDirectoryReader } from "llamaindex";
+
+documents = new SimpleDirectoryReader().loadData("./data");
+```
+
+## Довідник API
+
+- [SimpleDirectoryReader](../../api/classes/SimpleDirectoryReader.md)
diff --git a/apps/docs/i18n/uk/docusaurus-plugin-content-docs/current/modules/high_level/documents_and_nodes.md b/apps/docs/i18n/uk/docusaurus-plugin-content-docs/current/modules/high_level/documents_and_nodes.md
new file mode 100644
index 0000000000000000000000000000000000000000..58ffcc72ed50bba008fe61425aee953ab21049ad
--- /dev/null
+++ b/apps/docs/i18n/uk/docusaurus-plugin-content-docs/current/modules/high_level/documents_and_nodes.md
@@ -0,0 +1,22 @@
+---
+sidebar_position: 0
+---
+
+# Документи та Вузли
+
+`Ця документація була автоматично перекладена і може містити помилки. Не соромтеся відкривати Pull Request, щоб запропонувати зміни.`
+
+`Документи` та `Вузли` є основними будівельними блоками будь-якого індексу. Хоча API для цих об'єктів схожий, об'єкти `Документ` представляють цілі файли, тоді як `Вузли` є меншими частинами цього початкового документа, які підходять для LLM та Q&A.
+
+```typescript
+import { Document } from "llamaindex";
+
+document = new Document({ text: "текст", metadata: { key: "val" } });
+```
+
+## Довідник по API
+
+- [Документ (Document)](../../api/classes/Document.md)
+- [ТекстовийВузол (TextNode)](../../api/classes/TextNode.md)
+
+"
diff --git a/apps/docs/i18n/uk/docusaurus-plugin-content-docs/current/modules/high_level/query_engine.md b/apps/docs/i18n/uk/docusaurus-plugin-content-docs/current/modules/high_level/query_engine.md
new file mode 100644
index 0000000000000000000000000000000000000000..7a8f2ee0e7055bb88a9649ca4e424dc5c5d2a3a7
--- /dev/null
+++ b/apps/docs/i18n/uk/docusaurus-plugin-content-docs/current/modules/high_level/query_engine.md
@@ -0,0 +1,40 @@
+---
+sidebar_position: 3
+---
+
+# QueryEngine (Запитовий рушій)
+
+`Ця документація була автоматично перекладена і може містити помилки. Не соромтеся відкривати Pull Request, щоб запропонувати зміни.`
+
+Запитовий рушій обгортає `Retriever` та `ResponseSynthesizer` в конвеєр, який використовує рядок запиту для отримання вузлів та надсилає їх до LLM для генерації відповіді.
+
+```typescript
+const queryEngine = index.asQueryEngine();
+const response = await queryEngine.query("рядок запиту");
+```
+
+## Запитовий Двигун для Підзапитів
+
+Основна концепція Запитового Двигуна для Підзапитів полягає в тому, що він розбиває один запит на кілька запитів, отримує відповідь для кожного з цих запитів, а потім комбінує ці різні відповіді в одну зв'язну відповідь для користувача. Ви можете уявити це як техніку "подумайте про це крок за кроком", але ітеруючись по джерелах даних!
+
+### Початок роботи
+
+Найпростіший спосіб спробувати Запитовий Двигун для Підзапитів - запустити файл subquestion.ts у папці [examples](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/subquestion.ts).
+
+```bash
+npx ts-node subquestion.ts
+```
+
+"
+
+### Інструменти
+
+Запитовий Двигун для Підзапитів реалізований за допомогою Інструментів. Основна ідея Інструментів полягає в тому, що вони є виконавчими опціями для великомасштабної мовної моделі. У цьому випадку наш Запитовий Двигун для Підзапитів покладається на Інструмент QueryEngineTool, який, як ви вже здогадалися, є інструментом для виконання запитів на Запитовий Двигун. Це дозволяє нам дати моделі можливість запитувати різні документи для різних питань, наприклад. Ви також можете уявити, що Запитовий Двигун для Підзапитів може використовувати Інструмент, який шукає щось в Інтернеті або отримує відповідь за допомогою Wolfram Alpha.
+
+Ви можете дізнатися більше про Інструменти, переглянувши документацію Python LlamaIndex за посиланням https://gpt-index.readthedocs.io/en/latest/core_modules/agent_modules/tools/root.html
+
+## Довідка по API
+
+- [RetrieverQueryEngine (Запитовий рушій Retriever)](../../api/classes/RetrieverQueryEngine.md)
+- [SubQuestionQueryEngine (Запитовий рушій підзапитів)](../../api/classes/SubQuestionQueryEngine.md)
+- [QueryEngineTool (Інструмент запитового рушія)](../../api/interfaces/QueryEngineTool.md)
diff --git a/apps/docs/i18n/uk/docusaurus-plugin-content-docs/current/modules/index.md b/apps/docs/i18n/uk/docusaurus-plugin-content-docs/current/modules/index.md
new file mode 100644
index 0000000000000000000000000000000000000000..ef4de2172cdc3ffd4cb6b4ce4ef243c5ec402513
--- /dev/null
+++ b/apps/docs/i18n/uk/docusaurus-plugin-content-docs/current/modules/index.md
@@ -0,0 +1,33 @@
+# Основні модулі
+
+`Ця документація була автоматично перекладена і може містити помилки. Не соромтеся відкривати Pull Request, щоб запропонувати зміни.`
+
+LlamaIndex.TS пропонує кілька основних модулів, розділених на високорівневі модулі для швидкого початку роботи та низькорівневі модулі для налаштування ключових компонентів за потребою.
+
+## Високорівневі модулі
+
+- [**Документ**](./high_level/documents_and_nodes.md): Документ представляє собою текстовий файл, файл PDF або інший послідовний шматок даних.
+
+- [**Вузол**](./high_level/documents_and_nodes.md): Основний будівельний блок даних. Зазвичай це частини документа, розділені на керовані шматки, які достатньо малі, щоб їх можна було передати в модель вбудовування та LLM.
+
+- [**Reader/Loader**](./high_level/data_loader.md): Рідер або завантажувач - це щось, що приймає документ у реальному світі та перетворює його на клас Документа, який потім можна використовувати в вашому Індексі та запитах. Наразі ми підтримуємо звичайні текстові файли та PDF-файли, але незабаром буде багато інших форматів.
+
+- [**Індекси**](./high_level/data_index.md): індекси зберігають Вузли та вбудовування цих вузлів.
+
+- [**QueryEngine**](./high_level/query_engine.md): Двигуни запитів - це ті, що генерують запит, який ви вводите, і повертають вам результат. Зазвичай двигуни запитів поєднують попередньо побудований запит з вибраними вузлами з вашого Індексу, щоб надати LLM контекст, який потрібен для відповіді на ваш запит.
+
+- [**ChatEngine**](./high_level/chat_engine.md): ChatEngine допомагає вам створити чат-бота, який буде взаємодіяти з вашими Індексами.
+
+## Низькорівневий модуль
+
+- [**LLM**](./low_level/llm.md): Клас LLM є єдиною інтерфейсною оболонкою над великим постачальником мовних моделей, таких як OpenAI GPT-4, Anthropic Claude або Meta LLaMA. Ви можете успадкувати його, щоб написати з'єднувач до власної великої мовної моделі.
+
+- [**Embedding**](./low_level/embedding.md): Вкладення представляється у вигляді вектора чисел з плаваючою комою. Нашою типовою моделлю вкладення є text-embedding-ada-002 від OpenAI, і кожне вкладення, яке вона генерує, складається з 1536 чисел з плаваючою комою. Ще одна популярна модель вкладення - BERT, яка використовує 768 чисел з плаваючою комою для представлення кожного вузла. Ми надаємо кілька утиліт для роботи з вкладеннями, включаючи 3 варіанти обчислення схожості та максимальної маржинальної релевантності.
+
+- [**TextSplitter/NodeParser**](./low_level/node_parser.md): Стратегії розбиття тексту на частини мають велике значення для загальної ефективності пошуку вкладень. Наразі, хоча у нас є типове значення, немає універсального рішення. Залежно від джерела документів, ви можете використовувати різні розміри та стратегії розбиття. Наразі ми підтримуємо розбиття за фіксованим розміром, розбиття за фіксованим розміром з перекриваючими секціями, розбиття за реченням та розбиття за абзацем. Розбійник тексту використовується NodeParser при розбитті `Документів` на `Вузли`.
+
+- [**Retriever**](./low_level/retriever.md): Ретрівер насправді вибирає Вузли для отримання з індексу. Тут ви можете спробувати отримати більше або менше Вузлів за запитом, змінити функцію схожості або створити власний ретрівер для кожного окремого випадку використання у вашому додатку. Наприклад, ви можете мати окремого ретрівера для кодового вмісту та текстового вмісту.
+
+- [**ResponseSynthesizer**](./low_level/response_synthesizer.md): ResponseSynthesizer відповідає за прийняття рядка запиту та використання списку `Вузлів` для генерації відповіді. Це може мати різні форми, наприклад, ітерування по всьому контексту та уточнення відповіді або побудова дерева резюме та повернення кореневого резюме.
+
+- [**Storage**](./low_level/storage.md): Рано чи пізно вам захочеться зберігати ваші індекси, дані та вектори, а не запускати моделі вкладення кожного разу. IndexStore, DocStore, VectorStore та KVStore - це абстракції, які дозволяють вам це зробити. Разом вони утворюють StorageContext. Наразі ми дозволяємо зберігати ваші вкладення у файлах на файловій системі (або віртуальній файловій системі в оперативній пам'яті), але також активно додаємо інтеграції з векторними базами даних.
diff --git a/apps/docs/i18n/uk/docusaurus-plugin-content-docs/current/modules/low_level/embedding.md b/apps/docs/i18n/uk/docusaurus-plugin-content-docs/current/modules/low_level/embedding.md
new file mode 100644
index 0000000000000000000000000000000000000000..22def4fa7f3eb5ab7b3b730f20f2caf3f1bfa3b7
--- /dev/null
+++ b/apps/docs/i18n/uk/docusaurus-plugin-content-docs/current/modules/low_level/embedding.md
@@ -0,0 +1,26 @@
+---
+sidebar_position: 1
+---
+
+# Вбудовування
+
+`Ця документація була автоматично перекладена і може містити помилки. Не соромтеся відкривати Pull Request, щоб запропонувати зміни.`
+
+Модель вбудовування в LlamaIndex відповідає за створення числових представлень тексту. За замовчуванням, LlamaIndex використовує модель `text-embedding-ada-002` від OpenAI.
+
+Це можна явно встановити в об'єкті `ServiceContext`.
+
+```typescript
+import { OpenAIEmbedding, serviceContextFromDefaults } from "llamaindex";
+
+const openaiEmbeds = new OpenAIEmbedding();
+
+const serviceContext = serviceContextFromDefaults({ embedModel: openaiEmbeds });
+```
+
+## Довідник по API
+
+- [OpenAIEmbedding](../../api/classes/OpenAIEmbedding.md)
+- [ServiceContext](../../api/interfaces/ServiceContext.md)
+
+"
diff --git a/apps/docs/i18n/uk/docusaurus-plugin-content-docs/current/modules/low_level/llm.md b/apps/docs/i18n/uk/docusaurus-plugin-content-docs/current/modules/low_level/llm.md
new file mode 100644
index 0000000000000000000000000000000000000000..2b092bcdc46c6ad023e7932f53dbc9724017139c
--- /dev/null
+++ b/apps/docs/i18n/uk/docusaurus-plugin-content-docs/current/modules/low_level/llm.md
@@ -0,0 +1,26 @@
+---
+sidebar_position: 0
+---
+
+# LLM (Мовний модуль)
+
+`Ця документація була автоматично перекладена і може містити помилки. Не соромтеся відкривати Pull Request, щоб запропонувати зміни.`
+
+Мовний модуль (LLM) відповідає за читання тексту та генерацію природних мовних відповідей на запити. За замовчуванням, LlamaIndex.TS використовує `gpt-3.5-turbo`.
+
+LLM можна явно встановити в об'єкті `ServiceContext`.
+
+```typescript
+import { OpenAI, serviceContextFromDefaults } from "llamaindex";
+
+const openaiLLM = new OpenAI({ model: "gpt-3.5-turbo", temperature: 0 });
+
+const serviceContext = serviceContextFromDefaults({ llm: openaiLLM });
+```
+
+## Довідка по API
+
+- [OpenAI](../../api/classes/OpenAI.md)
+- [ServiceContext](../../api/interfaces/ServiceContext.md)
+
+"
diff --git a/apps/docs/i18n/uk/docusaurus-plugin-content-docs/current/modules/low_level/node_parser.md b/apps/docs/i18n/uk/docusaurus-plugin-content-docs/current/modules/low_level/node_parser.md
new file mode 100644
index 0000000000000000000000000000000000000000..a26d922bce3de01d5139c75273e3a766167ee11c
--- /dev/null
+++ b/apps/docs/i18n/uk/docusaurus-plugin-content-docs/current/modules/low_level/node_parser.md
@@ -0,0 +1,39 @@
+---
+sidebar_position: 3
+---
+
+# NodeParser (Парсер вузлів)
+
+`Ця документація була автоматично перекладена і може містити помилки. Не соромтеся відкривати Pull Request, щоб запропонувати зміни.`
+
+`NodeParser` в LlamaIndex відповідає за розбиття об'єктів `Document` на більш керовані об'єкти `Node`. Коли ви викликаєте `.fromDocuments()`, `NodeParser` з `ServiceContext` автоматично використовується для цього. Альтернативно, ви можете використовувати його для розбиття документів заздалегідь.
+
+```typescript
+import { Document, SimpleNodeParser } from "llamaindex";
+
+const nodeParser = new SimpleNodeParser();
+const nodes = nodeParser.getNodesFromDocuments([
+  new Document({ text: "Мені 10 років. Джону 20 років." }),
+]);
+```
+
+## TextSplitter (Розбірник тексту)
+
+Основний розбірник тексту розбиватиме текст на речення. Його також можна використовувати як самостійний модуль для розбиття сирих текстів.
+
+```typescript
+import { SentenceSplitter } from "llamaindex";
+
+const splitter = new SentenceSplitter({ chunkSize: 1 });
+
+const textSplits = splitter.splitText("Привіт, світ");
+```
+
+"
+
+## Довідник API
+
+- [SimpleNodeParser (Простий парсер вузлів)](../../api/classes/SimpleNodeParser.md)
+- [SentenceSplitter (Розбивач речень)](../../api/classes/SentenceSplitter.md)
+
+"
diff --git a/apps/docs/i18n/uk/docusaurus-plugin-content-docs/current/modules/low_level/response_synthesizer.md b/apps/docs/i18n/uk/docusaurus-plugin-content-docs/current/modules/low_level/response_synthesizer.md
new file mode 100644
index 0000000000000000000000000000000000000000..509074d8a5e569307210a1c3c930a802630bd8ad
--- /dev/null
+++ b/apps/docs/i18n/uk/docusaurus-plugin-content-docs/current/modules/low_level/response_synthesizer.md
@@ -0,0 +1,53 @@
+---
+sidebar_position: 6
+---
+
+# ResponseSynthesizer (СинтезаторВідповідей)
+
+`Ця документація була автоматично перекладена і може містити помилки. Не соромтеся відкривати Pull Request, щоб запропонувати зміни.`
+
+СинтезаторВідповідей відповідає за надсилання запиту, вузлів та шаблонів запитів до LLM для генерації відповіді. Є кілька ключових режимів для генерації відповіді:
+
+- `Refine` (Вдосконалити): "створити та вдосконалити" відповідь, послідовно проходячи крізь кожен отриманий фрагмент тексту.
+  Це здійснює окремий виклик LLM для кожного вузла. Добре підходить для детальних відповідей.
+- `CompactAndRefine` (Компактно та вдосконалити) (за замовчуванням): "компактність" запиту під час кожного виклику LLM шляхом заповнення максимального розміру запиту якомога більшою кількістю фрагментів тексту. Якщо є
+  забагато фрагментів для заповнення одного запиту, "створити та вдосконалити" відповідь, пройшовши через
+  кілька компактних запитів. Те саме, що й `refine`, але повинно призвести до меншої кількості викликів LLM.
+- `TreeSummarize` (ЗведенняДоДерева): Задано набір фрагментів тексту та запит, рекурсивно побудувати дерево
+  та повернути кореневий вузол як відповідь. Добре підходить для стислого узагальнення.
+- `SimpleResponseBuilder` (ПростийПобудовникВідповідей): Задано набір фрагментів тексту та запит, застосувати запит до кожного фрагменту тексту
+  одночасно накопичуючи відповіді в масив. Повертає об'єднану рядок з усіх
+  відповідей. Добре, коли потрібно окремо запустити один і той же запит проти кожного фрагменту тексту.
+
+```typescript
+import { NodeWithScore, ResponseSynthesizer, TextNode } from "llamaindex";
+
+const responseSynthesizer = new ResponseSynthesizer();
+
+const nodesWithScore: NodeWithScore[] = [
+  {
+    node: new TextNode({ text: "Мені 10 років." }),
+    score: 1,
+  },
+  {
+    node: new TextNode({ text: "Джону 20 років." }),
+    score: 0.5,
+  },
+];
+
+const response = await responseSynthesizer.synthesize(
+  "Скільки мені років?",
+  nodesWithScore,
+);
+console.log(response.response);
+```
+
+## Довідка по API
+
+- [ResponseSynthesizer (СинтезаторВідповідей)](../../api/classes/ResponseSynthesizer.md)
+- [Refine (Вдосконалити)](../../api/classes/Refine.md)
+- [CompactAndRefine (Компактно та вдосконалити)](../../api/classes/CompactAndRefine.md)
+- [TreeSummarize (ЗведенняДоДерева)](../../api/classes/TreeSummarize.md)
+- [SimpleResponseBuilder (ПростийПобудовникВідповідей)](../../api/classes/SimpleResponseBuilder.md)
+
+"
diff --git a/apps/docs/i18n/uk/docusaurus-plugin-content-docs/current/modules/low_level/retriever.md b/apps/docs/i18n/uk/docusaurus-plugin-content-docs/current/modules/low_level/retriever.md
new file mode 100644
index 0000000000000000000000000000000000000000..ee6adad9c0caa9ef6d5b3a3de7e08a28a7739223
--- /dev/null
+++ b/apps/docs/i18n/uk/docusaurus-plugin-content-docs/current/modules/low_level/retriever.md
@@ -0,0 +1,23 @@
+---
+sidebar_position: 5
+---
+
+# Retriever (Відновлювач)
+
+`Ця документація була автоматично перекладена і може містити помилки. Не соромтеся відкривати Pull Request, щоб запропонувати зміни.`
+
+Відновлювач в LlamaIndex - це те, що використовується для отримання вузлів (`Node`) з індексу за допомогою рядка запиту. `VectorIndexRetriever` отримує k найбільш схожих вузлів. Тим часом, `SummaryIndexRetriever` отримує всі вузли, незалежно від запиту.
+
+```typescript
+const retriever = vector_index.asRetriever();
+retriever.similarityTopK = 3;
+
+// Отримати вузли!
+const nodesWithScore = await retriever.retrieve("рядок запиту");
+```
+
+## Довідник API
+
+- [SummaryIndexRetriever (ВідновлювачSummaryIndex)](../../api/classes/SummaryIndexRetriever.md)
+- [SummaryIndexLLMRetriever (ВідновлювачSummaryIndexLLM)](../../api/classes/SummaryIndexLLMRetriever.md)
+- [VectorIndexRetriever (ВідновлювачVectorIndex)](../../api/classes/VectorIndexRetriever.md)
diff --git a/apps/docs/i18n/uk/docusaurus-plugin-content-docs/current/modules/low_level/storage.md b/apps/docs/i18n/uk/docusaurus-plugin-content-docs/current/modules/low_level/storage.md
new file mode 100644
index 0000000000000000000000000000000000000000..a2c9dcb407237f5031b6f3d9cedae4ad660680f3
--- /dev/null
+++ b/apps/docs/i18n/uk/docusaurus-plugin-content-docs/current/modules/low_level/storage.md
@@ -0,0 +1,30 @@
+---
+sidebar_position: 7
+---
+
+# Зберігання (Storage)
+
+`Ця документація була автоматично перекладена і може містити помилки. Не соромтеся відкривати Pull Request, щоб запропонувати зміни.`
+
+Зберігання в LlamaIndex.TS працює автоматично після налаштування об'єкта `StorageContext`. Просто налаштуйте `persistDir` і прикріпіть його до індексу.
+
+Наразі підтримується лише збереження та завантаження з диска, з планами на майбутні інтеграції!
+
+```typescript
+import { Document, VectorStoreIndex, storageContextFromDefaults } from "./src";
+
+const storageContext = await storageContextFromDefaults({
+  persistDir: "./storage",
+});
+
+const document = new Document({ text: "Тестовий текст" });
+const index = await VectorStoreIndex.fromDocuments([document], {
+  storageContext,
+});
+```
+
+## Довідник по API
+
+- [StorageContext](../../api/interfaces/StorageContext.md)
+
+"
diff --git a/apps/docs/i18n/uk/docusaurus-plugin-content-docs/current/starter.md b/apps/docs/i18n/uk/docusaurus-plugin-content-docs/current/starter.md
new file mode 100644
index 0000000000000000000000000000000000000000..17be86d23c9427729205d7a75c3fa8deca22ae64
--- /dev/null
+++ b/apps/docs/i18n/uk/docusaurus-plugin-content-docs/current/starter.md
@@ -0,0 +1,58 @@
+---
+sidebar_position: 2
+---
+
+# Посібник для початківців
+
+`Ця документація була автоматично перекладена і може містити помилки. Не соромтеся відкривати Pull Request, щоб запропонувати зміни.`
+
+Після того, як ви [встановили LlamaIndex.TS за допомогою NPM](installation) і налаштували свій ключ OpenAI, ви готові розпочати свою першу програму:
+
+У новій папці:
+
+```bash npm2yarn
+npm install typescript
+npm install @types/node
+npx tsc --init # якщо потрібно
+```
+
+Створіть файл `example.ts`. Цей код завантажить деякі прикладові дані, створить документ, проіндексує його (створюючи векторні вкладення за допомогою OpenAI) і потім створить запитовий двигун для відповідей на питання про дані.
+
+```ts
+// example.ts
+import fs from "fs/promises";
+import { Document, VectorStoreIndex } from "llamaindex";
+
+async function main() {
+  // Завантажте есе з abramov.txt в Node
+  const essay = await fs.readFile(
+    "node_modules/llamaindex/examples/abramov.txt",
+    "utf-8",
+  );
+
+  // Створіть об'єкт Document з есе
+  const document = new Document({ text: essay });
+
+  // Розбийте текст на частини і створіть вкладення. Збережіть їх у VectorStoreIndex
+  const index = await VectorStoreIndex.fromDocuments([document]);
+
+  // Запит до індексу
+  const queryEngine = index.asQueryEngine();
+  const response = await queryEngine.query("Що робив автор у коледжі?");
+
+  // Виведення відповіді
+  console.log(response.toString());
+}
+
+main();
+```
+
+Потім ви можете запустити його за допомогою
+
+```bash
+npx ts-node example.ts
+```
+
+Готові дізнатись більше? Перевірте нашу пісочницю NextJS за адресою https://llama-playground.vercel.app/. Вихідний код доступний за адресою https://github.com/run-llama/ts-playground
+
+"
diff --git a/apps/docs/i18n/vi/docusaurus-plugin-content-docs/current/_static/concepts/indexing.jpg b/apps/docs/i18n/vi/docusaurus-plugin-content-docs/current/_static/concepts/indexing.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..8672967213caf28fb27682b6b9e2e5111beb9aaa
Binary files /dev/null and b/apps/docs/i18n/vi/docusaurus-plugin-content-docs/current/_static/concepts/indexing.jpg differ
diff --git a/apps/docs/i18n/vi/docusaurus-plugin-content-docs/current/_static/concepts/querying.jpg b/apps/docs/i18n/vi/docusaurus-plugin-content-docs/current/_static/concepts/querying.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..3c241bdda5ba98d3f3ee2e163b9f19280c5c7503
Binary files /dev/null and b/apps/docs/i18n/vi/docusaurus-plugin-content-docs/current/_static/concepts/querying.jpg differ
diff --git a/apps/docs/i18n/vi/docusaurus-plugin-content-docs/current/_static/concepts/rag.jpg b/apps/docs/i18n/vi/docusaurus-plugin-content-docs/current/_static/concepts/rag.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..b68eca2564f8535b33d9ed6d4ce4d4410d2a0698
Binary files /dev/null and b/apps/docs/i18n/vi/docusaurus-plugin-content-docs/current/_static/concepts/rag.jpg differ
diff --git a/apps/docs/i18n/vi/docusaurus-plugin-content-docs/current/api b/apps/docs/i18n/vi/docusaurus-plugin-content-docs/current/api
new file mode 120000
index 0000000000000000000000000000000000000000..3513e32979f4abb8b2df2b54c8d0cc480bdd847e
--- /dev/null
+++ b/apps/docs/i18n/vi/docusaurus-plugin-content-docs/current/api
@@ -0,0 +1 @@
+../../../../docs/api
\ No newline at end of file
diff --git a/apps/docs/i18n/vi/docusaurus-plugin-content-docs/current/concepts.md b/apps/docs/i18n/vi/docusaurus-plugin-content-docs/current/concepts.md
new file mode 100644
index 0000000000000000000000000000000000000000..67e80004aa17b4e30b7821ee13889b0ba0920d9b
--- /dev/null
+++ b/apps/docs/i18n/vi/docusaurus-plugin-content-docs/current/concepts.md
@@ -0,0 +1,86 @@
+---
+sidebar_position: 3
+---
+
+# Khái niệm cấp cao
+
+`Tài liệu này đã được dịch tự động và có thể chứa lỗi. Đừng ngần ngại mở một Pull Request để đề xuất thay đổi.`
+
+LlamaIndex.TS giúp bạn xây dựng các ứng dụng được cung cấp bởi LLM (ví dụ: Q&A, chatbot) trên dữ liệu tùy chỉnh.
+
+Trong hướng dẫn về khái niệm cấp cao này, bạn sẽ tìm hiểu về:
+
+- cách một LLM có thể trả lời câu hỏi bằng cách sử dụng dữ liệu của riêng bạn.
+- các khái niệm và mô-đun chính trong LlamaIndex.TS để xây dựng pipeline truy vấn của riêng bạn.
+
+## Trả lời câu hỏi trên dữ liệu của bạn
+
+LlamaIndex sử dụng một phương pháp hai giai đoạn khi sử dụng LLM với dữ liệu của bạn:
+
+1. **giai đoạn lập chỉ mục**: chuẩn bị một cơ sở kiến thức, và
+2. **giai đoạn truy vấn**: truy xuất ngữ cảnh liên quan từ kiến thức để hỗ trợ LLM trong việc đáp ứng câu hỏi
+
+![](./_static/concepts/rag.jpg)
+
+Quá trình này còn được gọi là Retrieval Augmented Generation (RAG).
+
+LlamaIndex.TS cung cấp bộ công cụ cần thiết để làm cho cả hai giai đoạn này trở nên dễ dàng.
+
+Hãy khám phá từng giai đoạn chi tiết.
+
+### Giai đoạn lập chỉ mục
+
+LlamaIndex.TS giúp bạn chuẩn bị cơ sở kiến thức với một bộ kết nối dữ liệu và chỉ mục.
+
+![](./_static/concepts/indexing.jpg)
+
+[**Data Loaders**](./modules/high_level/data_loader.md):
+Một bộ kết nối dữ liệu (ví dụ: `Reader`) tiếp nhận dữ liệu từ các nguồn và định dạng dữ liệu khác nhau vào một biểu diễn `Document` đơn giản (văn bản và siêu dữ liệu đơn giản).
+
+[**Documents / Nodes**](./modules/high_level/documents_and_nodes.md): Một `Document` là một container chung cho bất kỳ nguồn dữ liệu nào - ví dụ: một tệp PDF, đầu ra từ API hoặc dữ liệu được truy xuất từ cơ sở dữ liệu. Một `Node` là đơn vị nguyên tử của dữ liệu trong LlamaIndex và đại diện cho một "đoạn" của một `Document` nguồn. Đây là một biểu diễn phong phú bao gồm siêu dữ liệu và mối quan hệ (với các node khác) để cho phép các hoạt động truy xuất chính xác và diễn đạt.
+
+[**Data Indexes**](./modules/high_level/data_index.md):
+Sau khi bạn đã tiếp nhận dữ liệu của mình, LlamaIndex giúp bạn lập chỉ mục dữ liệu vào một định dạng dễ truy xuất.
+
+Dưới nền tảng, LlamaIndex phân tích các tài liệu gốc thành các biểu diễn trung gian, tính toán vector nhúng và lưu trữ dữ liệu của bạn trong bộ nhớ hoặc đĩa.
+
+"
+
+### Giai đoạn truy vấn
+
+Trong giai đoạn truy vấn, pipeline truy vấn truy xuất ngữ cảnh phù hợp nhất dựa trên truy vấn của người dùng,
+và chuyển nó cho LLM (cùng với truy vấn) để tổng hợp một câu trả lời.
+
+Điều này cung cấp cho LLM kiến thức cập nhật mà không có trong dữ liệu huấn luyện ban đầu của nó,
+(cũng giảm thiểu hiện tượng tưởng tượng).
+
+Thách thức chính trong giai đoạn truy vấn là truy xuất, điều phối và lập luận qua (có thể là nhiều) cơ sở kiến thức.
+
+LlamaIndex cung cấp các mô-đun có thể kết hợp giúp bạn xây dựng và tích hợp các pipeline RAG cho Q&A (query engine), chatbot (chat engine), hoặc là một phần của một agent.
+
+Những khối xây dựng này có thể được tùy chỉnh để phản ánh sự ưu tiên xếp hạng, cũng như được tổ hợp để lập luận qua nhiều cơ sở kiến thức theo cách có cấu trúc.
+
+![](./_static/concepts/querying.jpg)
+
+#### Các khối xây dựng
+
+[**Retrievers**](./modules/low_level/retriever.md):
+Một retriever xác định cách truy xuất ngữ cảnh phù hợp từ một cơ sở kiến thức (tức là chỉ mục) khi có một truy vấn.
+Logic truy xuất cụ thể khác nhau cho các chỉ mục khác nhau, phổ biến nhất là truy xuất dày đặc đối với một chỉ mục vector.
+
+[**Response Synthesizers**](./modules/low_level/response_synthesizer.md):
+Một response synthesizer tạo ra một câu trả lời từ một LLM, sử dụng truy vấn của người dùng và một tập hợp các đoạn văn bản đã được truy xuất.
+
+"
+
+#### Các pipeline
+
+[**Query Engines**](./modules/high_level/query_engine.md):
+Một query engine là một pipeline end-to-end cho phép bạn đặt câu hỏi trên dữ liệu của bạn.
+Nó nhận vào một truy vấn bằng ngôn ngữ tự nhiên và trả về một câu trả lời, cùng với ngữ cảnh tham chiếu được truy xuất và chuyển cho LLM.
+
+[**Chat Engines**](./modules/high_level/chat_engine.md):
+Một chat engine là một pipeline end-to-end để có cuộc trò chuyện với dữ liệu của bạn
+(nhiều lần trao đổi thay vì chỉ một câu hỏi và câu trả lời duy nhất).
+
+"
diff --git a/apps/docs/i18n/vi/docusaurus-plugin-content-docs/current/end_to_end.md b/apps/docs/i18n/vi/docusaurus-plugin-content-docs/current/end_to_end.md
new file mode 100644
index 0000000000000000000000000000000000000000..0e97210a10631b289dfec128d5f2b41cd2541e11
--- /dev/null
+++ b/apps/docs/i18n/vi/docusaurus-plugin-content-docs/current/end_to_end.md
@@ -0,0 +1,59 @@
+---
+sidebar_position: 4
+---
+
+# Ví dụ từ đầu đến cuối
+
+`Tài liệu này đã được dịch tự động và có thể chứa lỗi. Đừng ngần ngại mở một Pull Request để đề xuất thay đổi.`
+
+Chúng tôi bao gồm một số ví dụ từ đầu đến cuối sử dụng LlamaIndex.TS trong kho lưu trữ
+
+Hãy xem các ví dụ dưới đây hoặc thử chúng và hoàn thành chúng trong vài phút với hướng dẫn tương tác Github Codespace được cung cấp bởi Dev-Docs [tại đây](https://codespaces.new/team-dev-docs/lits-dev-docs-playground?devcontainer_path=.devcontainer%2Fjavascript_ltsquickstart%2Fdevcontainer.json):
+
+## [Bộ máy trò chuyện](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/chatEngine.ts)
+
+Đọc một tệp và trò chuyện về nó với LLM.
+
+## [Chỉ số Vector](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/vectorIndex.ts)
+
+Tạo một chỉ số vector và truy vấn nó. Chỉ số vector sẽ sử dụng nhúng để lấy các nút liên quan nhất hàng đầu k. Mặc định, k hàng đầu là 2.
+
+"
+
+## [Chỉ mục Tóm tắt](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/summaryIndex.ts)
+
+Tạo một chỉ mục danh sách và truy vấn nó. Ví dụ này cũng sử dụng `LLMRetriever`, sẽ sử dụng LLM để chọn các nút tốt nhất để sử dụng khi tạo câu trả lời.
+
+"
+
+## [Lưu / Tải một Chỉ mục](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/storageContext.ts)
+
+Tạo và tải một chỉ mục vector. Việc lưu trữ vào đĩa trong LlamaIndex.TS xảy ra tự động khi một đối tượng ngữ cảnh lưu trữ được tạo ra.
+
+"
+
+## [Chỉ số Vector Tùy chỉnh](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/vectorIndexCustomize.ts)
+
+Tạo một chỉ số vector và truy vấn nó, đồng thời cấu hình `LLM`, `ServiceContext` và `similarity_top_k`.
+
+## [OpenAI LLM](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/openai.ts)
+
+Tạo một OpenAI LLM và sử dụng nó trực tiếp để trò chuyện.
+
+"
+
+## [Llama2 DeuceLLM](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/llamadeuce.ts)
+
+Tạo một Llama-2 LLM và sử dụng nó trực tiếp cho cuộc trò chuyện.
+
+"
+
+## [SubQuestionQueryEngine](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/subquestion.ts)
+
+Sử dụng `SubQuestionQueryEngine`, nó chia các truy vấn phức tạp thành nhiều câu hỏi nhỏ, sau đó tổng hợp phản hồi từ các câu trả lời của tất cả các câu hỏi con.
+
+"
+
+## [Các mô-đun cấp thấp](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/lowlevel.ts)
+
+Ví dụ này sử dụng một số thành phần cấp thấp, loại bỏ nhu cầu sử dụng một công cụ truy vấn thực tế. Các thành phần này có thể được sử dụng ở bất kỳ đâu, trong bất kỳ ứng dụng nào, hoặc tùy chỉnh và phụ lớp để đáp ứng nhu cầu của riêng bạn.
diff --git a/apps/docs/i18n/vi/docusaurus-plugin-content-docs/current/environments.md b/apps/docs/i18n/vi/docusaurus-plugin-content-docs/current/environments.md
new file mode 100644
index 0000000000000000000000000000000000000000..4fb7feb1e3899887aaba59e1ea7e540995f4a976
--- /dev/null
+++ b/apps/docs/i18n/vi/docusaurus-plugin-content-docs/current/environments.md
@@ -0,0 +1,31 @@
+---
+sidebar_position: 5
+---
+
+# Môi trường
+
+`Tài liệu này đã được dịch tự động và có thể chứa lỗi. Đừng ngần ngại mở một Pull Request để đề xuất thay đổi.`
+
+LlamaIndex hiện đang chính thức hỗ trợ NodeJS 18 và NodeJS 20.
+
+## NextJS App Router
+
+Nếu bạn đang sử dụng NextJS App Router route handlers/serverless functions, bạn sẽ cần sử dụng chế độ NodeJS:
+
+```js
+export const runtime = "nodejs"; // mặc định
+```
+
+và bạn sẽ cần thêm một ngoại lệ cho pdf-parse trong next.config.js của bạn
+
+```js
+// next.config.js
+/** @type {import('next').NextConfig} */
+const nextConfig = {
+  experimental: {
+    serverComponentsExternalPackages: ["pdf-parse"], // Đặt pdf-parse trong chế độ NodeJS thực tế với NextJS App Router
+  },
+};
+
+module.exports = nextConfig;
+```
diff --git a/apps/docs/i18n/vi/docusaurus-plugin-content-docs/current/installation.mdx b/apps/docs/i18n/vi/docusaurus-plugin-content-docs/current/installation.mdx
new file mode 100644
index 0000000000000000000000000000000000000000..bfe3e716be7b75088a9553b46701f5dc1440e970
--- /dev/null
+++ b/apps/docs/i18n/vi/docusaurus-plugin-content-docs/current/installation.mdx
@@ -0,0 +1,71 @@
+---
+sidebar_position: 1
+---
+
+
+# Cài đặt và Thiết lập
+
+```Tài liệu này đã được dịch tự động và có thể chứa lỗi. Đừng ngần ngại mở một Pull Request để đề xuất thay đổi.```
+
+
+Hãy đảm bảo bạn đã có NodeJS phiên bản 18 trở lên.
+
+
+## Sử dụng create-llama
+
+Cách đơn giản nhất để bắt đầu với LlamaIndex là sử dụng `create-llama`. Công cụ CLI này cho phép bạn nhanh chóng bắt đầu xây dựng một ứng dụng LlamaIndex mới, với tất cả mọi thứ đã được thiết lập sẵn cho bạn.
+
+Chỉ cần chạy
+
+<Tabs>
+<TabItem value="1" label="npm" default>
+
+```bash
+npx create-llama@latest
+```
+
+</TabItem>
+<TabItem value="2" label="Yarn">
+
+```bash
+yarn create llama
+```
+
+</TabItem>
+<TabItem value="3" label="pnpm">
+
+```bash
+pnpm create llama@latest
+```
+
+</TabItem>
+</Tabs>
+
+để bắt đầu. Sau khi ứng dụng của bạn được tạo ra, chạy
+
+```bash npm2yarn
+npm run dev
+```
+
+để khởi chạy máy chủ phát triển. Bạn sau đó có thể truy cập [http://localhost:3000](http://localhost:3000) để xem ứng dụng của bạn.
+## Cài đặt từ NPM
+
+```bash npm2yarn
+npm install llamaindex
+```
+
+
+### Biến môi trường
+
+Các ví dụ của chúng tôi mặc định sử dụng OpenAI. Bạn cần thiết lập khóa Open AI của mình như sau:
+
+```bash
+export OPENAI_API_KEY="sk-......" # Thay thế bằng khóa của bạn từ https://platform.openai.com/account/api-keys
+```
+
+Nếu bạn muốn nó được tải tự động mỗi lần, thêm nó vào tệp .zshrc/.bashrc của bạn.
+
+CẢNH BÁO: không đưa khóa OpenAI của bạn vào quản lý phiên bản.
+
+
+"
diff --git a/apps/docs/i18n/vi/docusaurus-plugin-content-docs/current/introduction.md b/apps/docs/i18n/vi/docusaurus-plugin-content-docs/current/introduction.md
new file mode 100644
index 0000000000000000000000000000000000000000..22d3240cf397b2eb7233cae50b0450f1fe005f17
--- /dev/null
+++ b/apps/docs/i18n/vi/docusaurus-plugin-content-docs/current/introduction.md
@@ -0,0 +1,60 @@
+---
+sidebar_position: 0
+slug: /
+---
+
+# LlamaIndex.TS là gì?
+
+`Tài liệu này đã được dịch tự động và có thể chứa lỗi. Đừng ngần ngại mở một Pull Request để đề xuất thay đổi.`
+
+LlamaIndex.TS là một framework dữ liệu cho các ứng dụng LLM để tiếp nhận, cấu trúc và truy cập dữ liệu riêng tư hoặc dữ liệu cụ thể cho lĩnh vực. Trong khi có một gói python cũng có sẵn (xem [ở đây](https://docs.llamaindex.ai/en/stable/)), LlamaIndex.TS cung cấp các tính năng cốt lõi trong một gói đơn giản, tối ưu cho việc sử dụng với TypeScript.
+
+## 🚀 Tại sao chọn LlamaIndex.TS?
+
+Ở cốt lõi của chúng, LLMs cung cấp một giao diện ngôn ngữ tự nhiên giữa con người và dữ liệu được suy luận. Các mô hình phổ biến có sẵn được huấn luyện trước trên lượng lớn dữ liệu công khai, từ Wikipedia và danh sách gửi thư đến sách giáo trình và mã nguồn.
+
+Các ứng dụng được xây dựng trên LLMs thường yêu cầu bổ sung các mô hình này bằng dữ liệu riêng tư hoặc dữ liệu cụ thể cho lĩnh vực. Thật không may, dữ liệu đó có thể được phân tán trên các ứng dụng và kho lưu trữ dữ liệu khác nhau. Nó có thể nằm sau các API, trong cơ sở dữ liệu SQL hoặc bị mắc kẹt trong các tệp PDF và slide.
+
+Đó là lý do LlamaIndex.TS ra đời.
+
+## 🦙 LlamaIndex.TS có thể giúp như thế nào?
+
+LlamaIndex.TS cung cấp các công cụ sau:
+
+- **Tải dữ liệu** tiếp nhận trực tiếp dữ liệu từ các tệp `.txt`, `.pdf`, `.csv`, `.md` và `.docx` hiện có của bạn.
+- **Chỉ mục dữ liệu** cấu trúc dữ liệu của bạn thành các biểu diễn trung gian dễ dàng và hiệu suất cho LLMs tiêu thụ.
+- **Engines** cung cấp truy cập bằng ngôn ngữ tự nhiên vào dữ liệu của bạn. Ví dụ:
+  - Query engines là giao diện truy xuất mạnh mẽ cho đầu ra được tăng cường bằng kiến thức.
+  - Chat engines là giao diện trò chuyện cho tương tác "đi lại" đa thông điệp với dữ liệu của bạn.
+
+## 👨‍👩‍👧‍👦 LlamaIndex dành cho ai?
+
+LlamaIndex.TS cung cấp một bộ công cụ cốt lõi, cần thiết cho bất kỳ ai xây dựng ứng dụng LLM với JavaScript và TypeScript.
+
+API cấp cao của chúng tôi cho phép người dùng mới bắt đầu sử dụng LlamaIndex.TS để tiếp nhận và truy vấn dữ liệu của họ.
+
+Đối với các ứng dụng phức tạp hơn, các API cấp thấp của chúng tôi cho phép người dùng nâng cao tùy chỉnh và mở rộng bất kỳ module nào - kết nối dữ liệu, chỉ mục, trình lấy và truy vấn, để phù hợp với nhu cầu của họ.
+
+## Bắt đầu
+
+`npm install llamaindex`
+
+Tài liệu của chúng tôi bao gồm [Hướng dẫn cài đặt](./installation.md) và [Hướng dẫn bắt đầu](./starter.md) để xây dựng ứng dụng đầu tiên của bạn.
+
+Khi bạn đã sẵn sàng, [Khái niệm cấp cao](./concepts.md) cung cấp một cái nhìn tổng quan về kiến trúc mô-đun của LlamaIndex. Để có thêm ví dụ thực tế, hãy xem qua [Hướng dẫn từ đầu đến cuối](./end_to_end.md).
+
+## 🗺️ Hệ sinh thái
+
+Để tải xuống hoặc đóng góp, hãy tìm LlamaIndex trên:
+
+- Github: https://github.com/run-llama/LlamaIndexTS
+- NPM: https://www.npmjs.com/package/llamaindex
+
+"
+
+## Cộng đồng
+
+Cần giúp đỡ? Có đề xuất tính năng? Tham gia cộng đồng LlamaIndex:
+
+- Twitter: https://twitter.com/llama_index
+- Discord: https://discord.gg/dGcwcsnxhU
diff --git a/apps/docs/i18n/vi/docusaurus-plugin-content-docs/current/modules/high_level/chat_engine.md b/apps/docs/i18n/vi/docusaurus-plugin-content-docs/current/modules/high_level/chat_engine.md
new file mode 100644
index 0000000000000000000000000000000000000000..f524b3284715881f21f84ddf9cbd64b10e4c61c4
--- /dev/null
+++ b/apps/docs/i18n/vi/docusaurus-plugin-content-docs/current/modules/high_level/chat_engine.md
@@ -0,0 +1,22 @@
+---
+sidebar_position: 4
+---
+
+# ChatEngine (聊天引擎)
+
+`Tài liệu này đã được dịch tự động và có thể chứa lỗi. Đừng ngần ngại mở một Pull Request để đề xuất thay đổi.`
+
+ChatEngine (聊天引擎) là một cách nhanh chóng và đơn giản để trò chuyện với dữ liệu trong chỉ mục của bạn.
+
+```typescript
+const retriever = index.asRetriever();
+const chatEngine = new ContextChatEngine({ retriever });
+
+// bắt đầu trò chuyện
+const response = await chatEngine.chat(query);
+```
+
+## Tài liệu tham khảo API
+
+- [ContextChatEngine (Ngữ cảnh ChatEngine)](../../api/classes/ContextChatEngine.md)
+- [CondenseQuestionChatEngine (Ngữ cảnh CondenseQuestionChatEngine)](../../api/classes/ContextChatEngine.md)
diff --git a/apps/docs/i18n/vi/docusaurus-plugin-content-docs/current/modules/high_level/data_index.md b/apps/docs/i18n/vi/docusaurus-plugin-content-docs/current/modules/high_level/data_index.md
new file mode 100644
index 0000000000000000000000000000000000000000..b90b762766ef8557689708b1e2cd012efc4051b4
--- /dev/null
+++ b/apps/docs/i18n/vi/docusaurus-plugin-content-docs/current/modules/high_level/data_index.md
@@ -0,0 +1,25 @@
+---
+sidebar_position: 2
+---
+
+# Chỉ mục (Index)
+
+`Tài liệu này đã được dịch tự động và có thể chứa lỗi. Đừng ngần ngại mở một Pull Request để đề xuất thay đổi.`
+
+Một chỉ mục là một container cơ bản và tổ chức cho dữ liệu của bạn. LlamaIndex.TS hỗ trợ hai loại chỉ mục:
+
+- `VectorStoreIndex` - sẽ gửi các `Node` hàng đầu đến LLM khi tạo ra một phản hồi. Giá trị mặc định của hàng đầu là 2.
+- `SummaryIndex` - sẽ gửi mọi `Node` trong chỉ mục đến LLM để tạo ra một phản hồi.
+
+```typescript
+import { Document, VectorStoreIndex } from "llamaindex";
+
+const document = new Document({ text: "test" });
+
+const index = await VectorStoreIndex.fromDocuments([document]);
+```
+
+## Tài liệu tham khảo API
+
+- [SummaryIndex](../../api/classes/SummaryIndex.md)
+- [VectorStoreIndex](../../api/classes/VectorStoreIndex.md)
diff --git a/apps/docs/i18n/vi/docusaurus-plugin-content-docs/current/modules/high_level/data_loader.md b/apps/docs/i18n/vi/docusaurus-plugin-content-docs/current/modules/high_level/data_loader.md
new file mode 100644
index 0000000000000000000000000000000000000000..9eea9f7935b08ec4dcb09de3dab55806bfa13c25
--- /dev/null
+++ b/apps/docs/i18n/vi/docusaurus-plugin-content-docs/current/modules/high_level/data_loader.md
@@ -0,0 +1,19 @@
+---
+sidebar_position: 1
+---
+
+# Đọc giả / Trình tải
+
+`Tài liệu này đã được dịch tự động và có thể chứa lỗi. Đừng ngần ngại mở một Pull Request để đề xuất thay đổi.`
+
+LlamaIndex.TS hỗ trợ việc tải dữ liệu từ thư mục một cách dễ dàng bằng cách sử dụng lớp `SimpleDirectoryReader`. Hiện tại, hỗ trợ các tệp `.txt`, `.pdf`, `.csv`, `.md` và `.docx`, và sẽ có thêm nhiều định dạng khác trong tương lai!
+
+```typescript
+import { SimpleDirectoryReader } from "llamaindex";
+
+documents = new SimpleDirectoryReader().loadData("./data");
+```
+
+## Tài liệu tham khảo API
+
+- [SimpleDirectoryReader](../../api/classes/SimpleDirectoryReader.md)
diff --git a/apps/docs/i18n/vi/docusaurus-plugin-content-docs/current/modules/high_level/documents_and_nodes.md b/apps/docs/i18n/vi/docusaurus-plugin-content-docs/current/modules/high_level/documents_and_nodes.md
new file mode 100644
index 0000000000000000000000000000000000000000..7225bcea576283bfc10b441f9b3c37d05b759118
--- /dev/null
+++ b/apps/docs/i18n/vi/docusaurus-plugin-content-docs/current/modules/high_level/documents_and_nodes.md
@@ -0,0 +1,20 @@
+---
+sidebar_position: 0
+---
+
+# Tài liệu và Node
+
+`Tài liệu này đã được dịch tự động và có thể chứa lỗi. Đừng ngần ngại mở một Pull Request để đề xuất thay đổi.`
+
+`Document` và `Node` là những khối xây dựng cơ bản của bất kỳ chỉ mục nào. Trong khi API cho các đối tượng này tương tự nhau, đối tượng `Document` đại diện cho toàn bộ tệp, trong khi `Node` là các phần nhỏ hơn của tài liệu gốc đó, phù hợp cho LLM và Q&A.
+
+```typescript
+import { Document } from "llamaindex";
+
+document = new Document({ text: "text", metadata: { key: "val" } });
+```
+
+## Tài liệu tham khảo API
+
+- [Document](../../api/classes/Document.md)
+- [TextNode](../../api/classes/TextNode.md)
diff --git a/apps/docs/i18n/vi/docusaurus-plugin-content-docs/current/modules/high_level/query_engine.md b/apps/docs/i18n/vi/docusaurus-plugin-content-docs/current/modules/high_level/query_engine.md
new file mode 100644
index 0000000000000000000000000000000000000000..8f16700604fc3a10431377f7114cd13efeb26eb5
--- /dev/null
+++ b/apps/docs/i18n/vi/docusaurus-plugin-content-docs/current/modules/high_level/query_engine.md
@@ -0,0 +1,38 @@
+---
+sidebar_position: 3
+---
+
+# QueryEngine (Trình truy vấn)
+
+`Tài liệu này đã được dịch tự động và có thể chứa lỗi. Đừng ngần ngại mở một Pull Request để đề xuất thay đổi.`
+
+Một trình truy vấn bao gồm một `Retriever` và một `ResponseSynthesizer` trong một đường ống, sẽ sử dụng chuỗi truy vấn để truy xuất các nút và sau đó gửi chúng đến LLM để tạo ra một phản hồi.
+
+```typescript
+const queryEngine = index.asQueryEngine();
+const response = await queryEngine.query("chuỗi truy vấn");
+```
+
+## Trình truy vấn Câu hỏi phụ
+
+Khái niệm cơ bản của Trình truy vấn Câu hỏi phụ là chia một truy vấn duy nhất thành nhiều truy vấn, lấy câu trả lời cho mỗi truy vấn đó, và sau đó kết hợp các câu trả lời khác nhau thành một phản hồi duy nhất cho người dùng. Bạn có thể nghĩ đến nó như là kỹ thuật "suy nghĩ từng bước" nhưng lặp lại qua các nguồn dữ liệu của bạn!
+
+### Bắt đầu
+
+Cách đơn giản nhất để bắt đầu thử Trình truy vấn Câu hỏi phụ là chạy tệp subquestion.ts trong [examples](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/subquestion.ts).
+
+```bash
+npx ts-node subquestion.ts
+```
+
+### Công cụ
+
+Trình truy vấn Câu hỏi phụ được triển khai với Công cụ. Ý tưởng cơ bản của Công cụ là chúng là các tùy chọn thực thi cho mô hình ngôn ngữ lớn. Trong trường hợp này, Trình truy vấn Câu hỏi phụ của chúng tôi phụ thuộc vào QueryEngineTool, một công cụ để chạy các truy vấn trên một Trình truy vấn. Điều này cho phép chúng tôi cung cấp cho mô hình một tùy chọn để truy vấn các tài liệu khác nhau cho các câu hỏi khác nhau ví dụ. Bạn cũng có thể tưởng tượng rằng Trình truy vấn Câu hỏi phụ có thể sử dụng một Công cụ để tìm kiếm một cái gì đó trên web hoặc lấy một câu trả lời bằng cách sử dụng Wolfram Alpha.
+
+Bạn có thể tìm hiểu thêm về Công cụ bằng cách xem tài liệu Python LlamaIndex tại https://gpt-index.readthedocs.io/en/latest/core_modules/agent_modules/tools/root.html
+
+## Tài liệu tham khảo API
+
+- [RetrieverQueryEngine (Trình truy vấn Retriever)](../../api/classes/RetrieverQueryEngine.md)
+- [SubQuestionQueryEngine (Trình truy vấn SubQuestion)](../../api/classes/SubQuestionQueryEngine.md)
+- [QueryEngineTool (Công cụ Trình truy vấn)](../../api/interfaces/QueryEngineTool.md)
diff --git a/apps/docs/i18n/vi/docusaurus-plugin-content-docs/current/modules/index.md b/apps/docs/i18n/vi/docusaurus-plugin-content-docs/current/modules/index.md
new file mode 100644
index 0000000000000000000000000000000000000000..8d6bd53db59a18aef1afabe2e9175e50ee14f87a
--- /dev/null
+++ b/apps/docs/i18n/vi/docusaurus-plugin-content-docs/current/modules/index.md
@@ -0,0 +1,33 @@
+# Các Module Cốt Lõi
+
+`Tài liệu này đã được dịch tự động và có thể chứa lỗi. Đừng ngần ngại mở một Pull Request để đề xuất thay đổi.`
+
+LlamaIndex.TS cung cấp một số module cốt lõi, được chia thành các module cấp cao để bắt đầu nhanh chóng và các module cấp thấp để tùy chỉnh các thành phần chính theo nhu cầu của bạn.
+
+## Các Module Cấp Cao
+
+- [**Document**](./high_level/documents_and_nodes.md): Một tài liệu đại diện cho một tệp văn bản, tệp PDF hoặc dữ liệu liên tục khác.
+
+- [**Node**](./high_level/documents_and_nodes.md): Khối xây dựng dữ liệu cơ bản. Thông thường, đây là các phần của tài liệu được chia thành các phần quản lý nhỏ hơn có thể được đưa vào một mô hình nhúng và LLM.
+
+- [**Reader/Loader**](./high_level/data_loader.md): Một reader hoặc loader là một cái gì đó nhận vào một tài liệu trong thế giới thực và chuyển đổi thành một lớp Document có thể được sử dụng trong Index và các truy vấn của bạn. Hiện tại, chúng tôi hỗ trợ các tệp văn bản thuần và PDF với nhiều tùy chọn khác nữa.
+
+- [**Indexes**](./high_level/data_index.md): indexes lưu trữ các Node và các embedding của các node đó.
+
+- [**QueryEngine**](./high_level/query_engine.md): Query engines là những gì tạo ra truy vấn bạn đưa vào và trả lại kết quả cho bạn. Query engines thường kết hợp một prompt được xây dựng sẵn với các node được chọn từ Index của bạn để cung cấp cho LLM ngữ cảnh cần thiết để trả lời truy vấn của bạn.
+
+- [**ChatEngine**](./high_level/chat_engine.md): Một ChatEngine giúp bạn xây dựng một chatbot sẽ tương tác với Index của bạn.
+
+## Module Cấp Thấp
+
+- [**LLM**](./low_level/llm.md): Lớp LLM là một giao diện thống nhất cho một nhà cung cấp mô hình ngôn ngữ lớn như OpenAI GPT-4, Anthropic Claude hoặc Meta LLaMA. Bạn có thể kế thừa nó để viết một kết nối tới mô hình ngôn ngữ lớn của riêng bạn.
+
+- [**Embedding**](./low_level/embedding.md): Một embedding được biểu diễn dưới dạng một vector các số thực. Mô hình embedding mặc định của chúng tôi là text-embedding-ada-002 của OpenAI và mỗi embedding mà nó tạo ra bao gồm 1.536 số thực. Một mô hình embedding phổ biến khác là BERT, sử dụng 768 số thực để biểu diễn mỗi Node. Chúng tôi cung cấp một số tiện ích để làm việc với embeddings bao gồm 3 tùy chọn tính toán độ tương đồng và Maximum Marginal Relevance.
+
+- [**TextSplitter/NodeParser**](./low_level/node_parser.md): Chiến lược chia văn bản là rất quan trọng đối với hiệu suất tổng thể của tìm kiếm embedding. Hiện tại, mặc dù chúng tôi có một giá trị mặc định, nhưng không có một giải pháp phù hợp cho tất cả. Tùy thuộc vào tài liệu nguồn, bạn có thể muốn sử dụng các kích thước và chiến lược chia khác nhau. Hiện tại, chúng tôi hỗ trợ chia theo kích thước cố định, chia theo kích thước cố định với các phần giao nhau, chia theo câu và chia theo đoạn văn. Text splitter được sử dụng bởi NodeParser khi chia `Document` thành `Node`.
+
+- [**Retriever**](./low_level/retriever.md): Retriever là thành phần thực sự chọn các Node để truy xuất từ chỉ mục. Ở đây, bạn có thể muốn thử truy xuất nhiều hoặc ít Node hơn cho mỗi truy vấn, thay đổi hàm tương đồng hoặc tạo retriever riêng cho mỗi trường hợp sử dụng cụ thể trong ứng dụng của bạn. Ví dụ, bạn có thể muốn có một retriever riêng cho nội dung mã nguồn so với nội dung văn bản.
+
+- [**ResponseSynthesizer**](./low_level/response_synthesizer.md): ResponseSynthesizer có trách nhiệm lấy một chuỗi truy vấn và sử dụng danh sách `Node` để tạo ra một phản hồi. Điều này có thể có nhiều hình thức, như lặp lại tất cả ngữ cảnh và làm rõ một câu trả lời, hoặc xây dựng một cây tóm tắt và trả về tóm tắt gốc.
+
+- [**Storage**](./low_level/storage.md): Đến một lúc nào đó, bạn sẽ muốn lưu trữ chỉ mục, dữ liệu và vectors thay vì chạy lại các mô hình embedding mỗi lần. IndexStore, DocStore, VectorStore và KVStore là các trừu tượng cho phép bạn làm điều đó. Kết hợp, chúng tạo thành StorageContext. Hiện tại, chúng tôi cho phép bạn lưu trữ embeddings của mình trong các tệp trên hệ thống tệp (hoặc hệ thống tệp ảo trong bộ nhớ), nhưng chúng tôi cũng đang tích cực thêm tích hợp vào Cơ sở dữ liệu Vector.
diff --git a/apps/docs/i18n/vi/docusaurus-plugin-content-docs/current/modules/low_level/embedding.md b/apps/docs/i18n/vi/docusaurus-plugin-content-docs/current/modules/low_level/embedding.md
new file mode 100644
index 0000000000000000000000000000000000000000..41562152ffe7d078959ef726e9fe7881789dc80a
--- /dev/null
+++ b/apps/docs/i18n/vi/docusaurus-plugin-content-docs/current/modules/low_level/embedding.md
@@ -0,0 +1,26 @@
+---
+sidebar_position: 1
+---
+
+# Nhúng (Embedding)
+
+`Tài liệu này đã được dịch tự động và có thể chứa lỗi. Đừng ngần ngại mở một Pull Request để đề xuất thay đổi.`
+
+Mô hình nhúng trong LlamaIndex có trách nhiệm tạo ra biểu diễn số học của văn bản. Mặc định, LlamaIndex sẽ sử dụng mô hình `text-embedding-ada-002` từ OpenAI.
+
+Điều này có thể được thiết lập rõ ràng trong đối tượng `ServiceContext`.
+
+```typescript
+import { OpenAIEmbedding, serviceContextFromDefaults } from "llamaindex";
+
+const openaiEmbeds = new OpenAIEmbedding();
+
+const serviceContext = serviceContextFromDefaults({ embedModel: openaiEmbeds });
+```
+
+## Tài liệu tham khảo API
+
+- [OpenAIEmbedding](../../api/classes/OpenAIEmbedding.md)
+- [ServiceContext](../../api/interfaces/ServiceContext.md)
+
+"
diff --git a/apps/docs/i18n/vi/docusaurus-plugin-content-docs/current/modules/low_level/llm.md b/apps/docs/i18n/vi/docusaurus-plugin-content-docs/current/modules/low_level/llm.md
new file mode 100644
index 0000000000000000000000000000000000000000..19c5f47c56b11ba1cc71ee4efd194331058ef1e0
--- /dev/null
+++ b/apps/docs/i18n/vi/docusaurus-plugin-content-docs/current/modules/low_level/llm.md
@@ -0,0 +1,26 @@
+---
+sidebar_position: 0
+---
+
+# LLM
+
+`Tài liệu này đã được dịch tự động và có thể chứa lỗi. Đừng ngần ngại mở một Pull Request để đề xuất thay đổi.`
+
+LLM (Llama Language Model) có nhiệm vụ đọc văn bản và tạo ra câu trả lời tự nhiên cho các truy vấn. Mặc định, LlamaIndex.TS sử dụng `gpt-3.5-turbo`.
+
+LLM có thể được thiết lập rõ ràng trong đối tượng `ServiceContext`.
+
+```typescript
+import { OpenAI, serviceContextFromDefaults } from "llamaindex";
+
+const openaiLLM = new OpenAI({ model: "gpt-3.5-turbo", temperature: 0 });
+
+const serviceContext = serviceContextFromDefaults({ llm: openaiLLM });
+```
+
+## Tài liệu tham khảo API
+
+- [OpenAI](../../api/classes/OpenAI.md)
+- [ServiceContext](../../api/interfaces/ServiceContext.md)
+
+"
diff --git a/apps/docs/i18n/vi/docusaurus-plugin-content-docs/current/modules/low_level/node_parser.md b/apps/docs/i18n/vi/docusaurus-plugin-content-docs/current/modules/low_level/node_parser.md
new file mode 100644
index 0000000000000000000000000000000000000000..163d1a92cda0207ccead4a6bbfa897b99e1d5111
--- /dev/null
+++ b/apps/docs/i18n/vi/docusaurus-plugin-content-docs/current/modules/low_level/node_parser.md
@@ -0,0 +1,37 @@
+---
+sidebar_position: 3
+---
+
+# NodeParser
+
+`Tài liệu này đã được dịch tự động và có thể chứa lỗi. Đừng ngần ngại mở một Pull Request để đề xuất thay đổi.`
+
+`NodeParser` trong LlamaIndex có trách nhiệm chia các đối tượng `Document` thành các đối tượng `Node` dễ quản lý hơn. Khi bạn gọi `.fromDocuments()`, `NodeParser` từ `ServiceContext` được sử dụng để tự động thực hiện điều này cho bạn. Hoặc bạn cũng có thể sử dụng nó để chia tài liệu trước.
+
+```typescript
+import { Document, SimpleNodeParser } from "llamaindex";
+
+const nodeParser = new SimpleNodeParser();
+const nodes = nodeParser.getNodesFromDocuments([
+  new Document({ text: "Tôi 10 tuổi. John 20 tuổi." }),
+]);
+```
+
+## TextSplitter
+
+Bộ chia văn bản cơ bản sẽ chia văn bản thành các câu. Nó cũng có thể được sử dụng như một module độc lập để chia văn bản thô.
+
+```typescript
+import { SentenceSplitter } from "llamaindex";
+
+const splitter = new SentenceSplitter({ chunkSize: 1 });
+
+const textSplits = splitter.splitText("Xin chào thế giới");
+```
+
+## Tài liệu API
+
+- [SimpleNodeParser](../../api/classes/SimpleNodeParser.md)
+- [SentenceSplitter](../../api/classes/SentenceSplitter.md)
+
+"
diff --git a/apps/docs/i18n/vi/docusaurus-plugin-content-docs/current/modules/low_level/response_synthesizer.md b/apps/docs/i18n/vi/docusaurus-plugin-content-docs/current/modules/low_level/response_synthesizer.md
new file mode 100644
index 0000000000000000000000000000000000000000..1c2a321aa937e6430f06decf42ed297dff2b2a92
--- /dev/null
+++ b/apps/docs/i18n/vi/docusaurus-plugin-content-docs/current/modules/low_level/response_synthesizer.md
@@ -0,0 +1,47 @@
+---
+sidebar_position: 6
+---
+
+# ResponseSynthesizer (Trình tổng hợp phản hồi)
+
+`Tài liệu này đã được dịch tự động và có thể chứa lỗi. Đừng ngần ngại mở một Pull Request để đề xuất thay đổi.`
+
+ResponseSynthesizer là trách nhiệm của việc gửi truy vấn, các nút và mẫu gợi ý đến LLM để tạo ra một phản hồi. Có một số chế độ chính để tạo ra một phản hồi:
+
+- `Refine` (Tinh chỉnh): "tạo và tinh chỉnh" một câu trả lời bằng cách lần lượt đi qua từng đoạn văn bản được truy xuất. Điều này tạo ra một cuộc gọi LLM riêng cho mỗi Node. Tốt cho các câu trả lời chi tiết hơn.
+- `CompactAndRefine` (Nén và tinh chỉnh) (mặc định): "nén" gợi ý trong mỗi cuộc gọi LLM bằng cách đưa vào càng nhiều đoạn văn bản nào có thể vừa với kích thước gợi ý tối đa. Nếu có quá nhiều đoạn văn bản để đưa vào một gợi ý, "tạo và tinh chỉnh" một câu trả lời bằng cách đi qua nhiều gợi ý nén. Tương tự như `refine`, nhưng sẽ giảm số lượng cuộc gọi LLM.
+- `TreeSummarize` (Tóm tắt cây): Dựa trên một tập hợp các đoạn văn bản và truy vấn, đệ quy xây dựng một cây và trả về nút gốc là phản hồi. Tốt cho mục đích tóm tắt.
+- `SimpleResponseBuilder` (Trình xây dựng phản hồi đơn giản): Dựa trên một tập hợp các đoạn văn bản và truy vấn, áp dụng truy vấn cho mỗi đoạn văn bản trong khi tích lũy các phản hồi vào một mảng. Trả về một chuỗi ghép nối của tất cả các phản hồi. Tốt khi bạn cần chạy truy vấn giống nhau độc lập cho mỗi đoạn văn bản.
+
+```typescript
+import { NodeWithScore, ResponseSynthesizer, TextNode } from "llamaindex";
+
+const responseSynthesizer = new ResponseSynthesizer();
+
+const nodesWithScore: NodeWithScore[] = [
+  {
+    node: new TextNode({ text: "Tôi 10 tuổi." }),
+    score: 1,
+  },
+  {
+    node: new TextNode({ text: "John 20 tuổi." }),
+    score: 0.5,
+  },
+];
+
+const response = await responseSynthesizer.synthesize(
+  "Tôi bao nhiêu tuổi?",
+  nodesWithScore,
+);
+console.log(response.response);
+```
+
+## Tài liệu API
+
+- [ResponseSynthesizer (Trình tổng hợp phản hồi)](../../api/classes/ResponseSynthesizer.md)
+- [Refine (Tinh chỉnh)](../../api/classes/Refine.md)
+- [CompactAndRefine (Nén và tinh chỉnh)](../../api/classes/CompactAndRefine.md)
+- [TreeSummarize (Tóm tắt cây)](../../api/classes/TreeSummarize.md)
+- [SimpleResponseBuilder (Trình xây dựng phản hồi đơn giản)](../../api/classes/SimpleResponseBuilder.md)
+
+"
diff --git a/apps/docs/i18n/vi/docusaurus-plugin-content-docs/current/modules/low_level/retriever.md b/apps/docs/i18n/vi/docusaurus-plugin-content-docs/current/modules/low_level/retriever.md
new file mode 100644
index 0000000000000000000000000000000000000000..680aa5c0277134e8061341a3d0148210023df39c
--- /dev/null
+++ b/apps/docs/i18n/vi/docusaurus-plugin-content-docs/current/modules/low_level/retriever.md
@@ -0,0 +1,23 @@
+---
+sidebar_position: 5
+---
+
+# Retriever (Trình lấy dữ liệu)
+
+`Tài liệu này đã được dịch tự động và có thể chứa lỗi. Đừng ngần ngại mở một Pull Request để đề xuất thay đổi.`
+
+Trong LlamaIndex, một retriever là công cụ được sử dụng để lấy các `Node` từ một chỉ mục bằng cách sử dụng một chuỗi truy vấn. Một `VectorIndexRetriever` sẽ lấy các node tương tự nhất theo thứ tự top-k. Trong khi đó, một `SummaryIndexRetriever` sẽ lấy tất cả các node mà không quan trọng truy vấn.
+
+```typescript
+const retriever = vector_index.asRetriever();
+retriever.similarityTopK = 3;
+
+// Lấy các node!
+const nodesWithScore = await retriever.retrieve("chuỗi truy vấn");
+```
+
+## Tài liệu tham khảo API
+
+- [SummaryIndexRetriever (Trình lấy dữ liệu chỉ mục tóm tắt)](../../api/classes/SummaryIndexRetriever.md)
+- [SummaryIndexLLMRetriever (Trình lấy dữ liệu chỉ mục tóm tắt LLM)](../../api/classes/SummaryIndexLLMRetriever.md)
+- [VectorIndexRetriever (Trình lấy dữ liệu chỉ mục vector)](../../api/classes/VectorIndexRetriever.md)
diff --git a/apps/docs/i18n/vi/docusaurus-plugin-content-docs/current/modules/low_level/storage.md b/apps/docs/i18n/vi/docusaurus-plugin-content-docs/current/modules/low_level/storage.md
new file mode 100644
index 0000000000000000000000000000000000000000..d20a65af63dcc56ec9f611a9efd6dc197ef7c6ed
--- /dev/null
+++ b/apps/docs/i18n/vi/docusaurus-plugin-content-docs/current/modules/low_level/storage.md
@@ -0,0 +1,28 @@
+---
+sidebar_position: 7
+---
+
+# Lưu trữ (Storage)
+
+`Tài liệu này đã được dịch tự động và có thể chứa lỗi. Đừng ngần ngại mở một Pull Request để đề xuất thay đổi.`
+
+Lưu trữ trong LlamaIndex.TS hoạt động tự động sau khi bạn đã cấu hình đối tượng `StorageContext`. Chỉ cần cấu hình `persistDir` và gắn nó vào một chỉ mục.
+
+Hiện tại, chỉ hỗ trợ lưu và tải từ đĩa, với các tích hợp trong tương lai được lên kế hoạch!
+
+```typescript
+import { Document, VectorStoreIndex, storageContextFromDefaults } from "./src";
+
+const storageContext = await storageContextFromDefaults({
+  persistDir: "./storage",
+});
+
+const document = new Document({ text: "Test Text" });
+const index = await VectorStoreIndex.fromDocuments([document], {
+  storageContext,
+});
+```
+
+## Tài liệu tham khảo API
+
+- [StorageContext](../../api/interfaces/StorageContext.md)
diff --git a/apps/docs/i18n/vi/docusaurus-plugin-content-docs/current/starter.md b/apps/docs/i18n/vi/docusaurus-plugin-content-docs/current/starter.md
new file mode 100644
index 0000000000000000000000000000000000000000..30d8abad828f6d9843355aa0beb175caed012735
--- /dev/null
+++ b/apps/docs/i18n/vi/docusaurus-plugin-content-docs/current/starter.md
@@ -0,0 +1,60 @@
+---
+sidebar_position: 2
+---
+
+# Hướng dẫn bắt đầu
+
+`Tài liệu này đã được dịch tự động và có thể chứa lỗi. Đừng ngần ngại mở một Pull Request để đề xuất thay đổi.`
+
+Sau khi bạn đã [cài đặt LlamaIndex.TS bằng NPM](installation) và thiết lập khóa OpenAI của bạn, bạn đã sẵn sàng để bắt đầu ứng dụng đầu tiên của mình:
+
+Trong một thư mục mới:
+
+```bash npm2yarn
+npm install typescript
+npm install @types/node
+npx tsc --init # nếu cần thiết
+```
+
+Tạo tệp `example.ts`. Đoạn mã này sẽ tải một số dữ liệu mẫu, tạo một tài liệu, tạo chỉ mục cho nó (tạo embeddings bằng cách sử dụng OpenAI) và sau đó tạo một công cụ truy vấn để trả lời các câu hỏi về dữ liệu.
+
+```ts
+// example.ts
+import fs from "fs/promises";
+import { Document, VectorStoreIndex } from "llamaindex";
+
+async function main() {
+  // Tải bài luận từ abramov.txt trong Node
+  const essay = await fs.readFile(
+    "node_modules/llamaindex/examples/abramov.txt",
+    "utf-8",
+  );
+
+  // Tạo đối tượng Document với bài luận
+  const document = new Document({ text: essay });
+
+  // Chia văn bản và tạo embeddings. Lưu chúng trong VectorStoreIndex
+  const index = await VectorStoreIndex.fromDocuments([document]);
+
+  // Truy vấn chỉ mục
+  const queryEngine = index.asQueryEngine();
+  const response = await queryEngine.query(
+    "Tác giả đã làm gì trong trường đại học?",
+  );
+
+  // Xuất kết quả
+  console.log(response.toString());
+}
+
+main();
+```
+
+Sau đó, bạn có thể chạy nó bằng cách sử dụng
+
+```bash
+npx ts-node example.ts
+```
+
+Sẵn sàng để tìm hiểu thêm? Hãy kiểm tra sân chơi NextJS của chúng tôi tại https://llama-playground.vercel.app/. Mã nguồn có sẵn tại https://github.com/run-llama/ts-playground
+
+"
diff --git a/apps/docs/i18n/zh_tw/docusaurus-plugin-content-docs/current/_static/concepts/indexing.jpg b/apps/docs/i18n/zh_tw/docusaurus-plugin-content-docs/current/_static/concepts/indexing.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..8672967213caf28fb27682b6b9e2e5111beb9aaa
Binary files /dev/null and b/apps/docs/i18n/zh_tw/docusaurus-plugin-content-docs/current/_static/concepts/indexing.jpg differ
diff --git a/apps/docs/i18n/zh_tw/docusaurus-plugin-content-docs/current/_static/concepts/querying.jpg b/apps/docs/i18n/zh_tw/docusaurus-plugin-content-docs/current/_static/concepts/querying.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..3c241bdda5ba98d3f3ee2e163b9f19280c5c7503
Binary files /dev/null and b/apps/docs/i18n/zh_tw/docusaurus-plugin-content-docs/current/_static/concepts/querying.jpg differ
diff --git a/apps/docs/i18n/zh_tw/docusaurus-plugin-content-docs/current/_static/concepts/rag.jpg b/apps/docs/i18n/zh_tw/docusaurus-plugin-content-docs/current/_static/concepts/rag.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..b68eca2564f8535b33d9ed6d4ce4d4410d2a0698
Binary files /dev/null and b/apps/docs/i18n/zh_tw/docusaurus-plugin-content-docs/current/_static/concepts/rag.jpg differ
diff --git a/apps/docs/i18n/zh_tw/docusaurus-plugin-content-docs/current/api b/apps/docs/i18n/zh_tw/docusaurus-plugin-content-docs/current/api
new file mode 120000
index 0000000000000000000000000000000000000000..3513e32979f4abb8b2df2b54c8d0cc480bdd847e
--- /dev/null
+++ b/apps/docs/i18n/zh_tw/docusaurus-plugin-content-docs/current/api
@@ -0,0 +1 @@
+../../../../docs/api
\ No newline at end of file
diff --git a/apps/docs/i18n/zh_tw/docusaurus-plugin-content-docs/current/concepts.md b/apps/docs/i18n/zh_tw/docusaurus-plugin-content-docs/current/concepts.md
new file mode 100644
index 0000000000000000000000000000000000000000..10391e039fb9dcc039cbf2541169a31c65a06fea
--- /dev/null
+++ b/apps/docs/i18n/zh_tw/docusaurus-plugin-content-docs/current/concepts.md
@@ -0,0 +1,83 @@
+---
+sidebar_position: 3
+---
+
+# 高级概念
+
+`此文件已自動翻譯,可能包含錯誤。如有更改建議,請毫不猶豫地提交 Pull Request。`
+
+LlamaIndex.TS 帮助您构建基于自定义数据的 LLM 引擎应用程序(例如问答系统、聊天机器人)。
+
+在这个高级概念指南中,您将学习到:
+
+- 如何使用 LLM 回答问题,利用您自己的数据。
+- LlamaIndex.TS 中用于组合自己的查询流程的关键概念和模块。
+
+## 跨数据回答问题
+
+LlamaIndex 在使用 LLM 与您的数据时采用了两个阶段的方法:
+
+1. **索引阶段**:准备知识库,
+2. **查询阶段**:从知识库中检索相关上下文,以帮助 LLM 回答问题。
+
+![](./_static/concepts/rag.jpg)
+
+这个过程也被称为检索增强生成(RAG)。
+
+LlamaIndex.TS 提供了必要的工具包,使这两个步骤变得非常简单。
+
+让我们详细了解每个阶段。
+
+### 索引階段
+
+LlamaIndex.TS 通過一套數據連接器和索引幫助您準備知識庫。
+
+![](./_static/concepts/indexing.jpg)
+
+[**數據加載器**](./modules/high_level/data_loader.md):
+數據連接器(即 `Reader`)從不同的數據源和數據格式中提取數據,並將其轉換為簡單的 `Document` 表示(文本和簡單的元數據)。
+
+[**文檔 / 節點**](./modules/high_level/documents_and_nodes.md):`Document` 是一個通用的容器,用於包裝任何數據源 - 例如 PDF、API 輸出或從數據庫檢索的數據。`Node` 是 LlamaIndex 中的數據原子單位,表示源 `Document` 的一個“塊”。它是一個豐富的表示,包括元數據和關係(與其他節點的關係),以實現準確和表達豐富的檢索操作。
+
+[**數據索引**](./modules/high_level/data_index.md):
+在將數據加載完畢後,LlamaIndex 幫助您將數據索引到易於檢索的格式中。
+
+在內部,LlamaIndex 將原始文檔解析為中間表示,計算向量嵌入,並將數據存儲在內存或磁盤中。
+
+### 查询阶段
+
+在查询阶段,查询流程根据用户的查询检索最相关的上下文,并将其传递给 LLM(连同查询),以合成一个回答。
+
+这使得 LLM 具有最新的知识,而这些知识不在其原始训练数据中,
+(同时减少了虚构)。
+
+查询阶段的关键挑战是在(可能很多)知识库上进行检索、编排和推理。
+
+LlamaIndex 提供了可组合的模块,帮助您构建和集成用于问答(查询引擎)、聊天机器人(聊天引擎)或作为代理的 RAG 流程。
+
+这些构建模块可以根据排名偏好进行自定义,并以结构化方式组合以推理多个知识库。
+
+![](./_static/concepts/querying.jpg)
+
+#### 构建模块
+
+[**检索器**](./modules/low_level/retriever.md):
+检索器定义了如何在给定查询时从知识库(即索引)中高效地检索相关上下文。
+具体的检索逻辑因索引的不同而异,最流行的是针对向量索引的密集检索。
+
+[**响应合成器**](./modules/low_level/response_synthesizer.md):
+响应合成器使用用户查询和一组检索到的文本块从 LLM 生成回答。
+
+"
+
+#### 流程
+
+[**查询引擎**](./modules/high_level/query_engine.md):
+查询引擎是一个端到端的流程,允许您对数据进行提问。
+它接收一个自然语言查询,并返回一个回答,以及检索到的参考上下文传递给 LLM。
+
+[**聊天引擎**](./modules/high_level/chat_engine.md):
+聊天引擎是一个端到端的流程,用于与数据进行对话
+(而不仅仅是单个问题和答案)。
+
+"
diff --git a/apps/docs/i18n/zh_tw/docusaurus-plugin-content-docs/current/end_to_end.md b/apps/docs/i18n/zh_tw/docusaurus-plugin-content-docs/current/end_to_end.md
new file mode 100644
index 0000000000000000000000000000000000000000..0b8d7bc623d7d8f3930e7b5a89a1739ea879699f
--- /dev/null
+++ b/apps/docs/i18n/zh_tw/docusaurus-plugin-content-docs/current/end_to_end.md
@@ -0,0 +1,47 @@
+---
+sidebar_position: 4
+---
+
+# 端到端示例
+
+`此文件已自動翻譯,可能包含錯誤。如有更改建議,請毫不猶豫地提交 Pull Request。`
+
+我们在存储库中包含了使用LlamaIndex.TS的几个端到端示例
+
+请查看下面的示例,或者尝试使用Dev-Docs提供的交互式Github Codespace教程,在几分钟内完成它们 [这里](https://codespaces.new/team-dev-docs/lits-dev-docs-playground?devcontainer_path=.devcontainer%2Fjavascript_ltsquickstart%2Fdevcontainer.json):
+
+## [聊天引擎 (ChatEngine)](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/chatEngine.ts)
+
+读取文件并与 LLM 进行聊天。
+
+## [向量索引](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/vectorIndex.ts)
+
+创建一个向量索引并对其进行查询。向量索引将使用嵌入来获取最相关的前k个节点。默认情况下,k的值为2。
+
+## [摘要索引](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/summaryIndex.ts)
+
+创建一个列表索引并查询它。这个示例还使用了`LLMRetriever`,它将使用LLM来选择在生成答案时使用的最佳节点。
+
+## [保存/加载索引](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/storageContext.ts)
+
+创建并加载一个向量索引。一旦创建了存储上下文对象,LlamaIndex.TS会自动将数据持久化到磁盘中。
+
+## [自定义向量索引](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/vectorIndexCustomize.ts)
+
+创建一个向量索引并对其进行查询,同时配置`LLM`、`ServiceContext`和`similarity_top_k`。
+
+## [OpenAI LLM](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/openai.ts)
+
+创建一个OpenAI LLM并直接用于聊天。
+
+## [Llama2 DeuceLLM](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/llamadeuce.ts)
+
+创建一个Llama-2 LLM,并直接用于聊天。
+
+## [子查询引擎 (SubQuestionQueryEngine)](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/subquestion.ts)
+
+使用`子查询引擎 (SubQuestionQueryEngine)`,将复杂的查询拆分为多个子问题,然后在所有子问题的答案中聚合响应。
+
+## [低级模块](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/lowlevel.ts)
+
+这个示例使用了几个低级组件,它们可以替代实际的查询引擎。这些组件可以在任何应用程序中使用,也可以根据自己的需求进行定制和子类化。
diff --git a/apps/docs/i18n/zh_tw/docusaurus-plugin-content-docs/current/environments.md b/apps/docs/i18n/zh_tw/docusaurus-plugin-content-docs/current/environments.md
new file mode 100644
index 0000000000000000000000000000000000000000..6da17a90812b4dbe1284eb3d63925a8150e98cde
--- /dev/null
+++ b/apps/docs/i18n/zh_tw/docusaurus-plugin-content-docs/current/environments.md
@@ -0,0 +1,31 @@
+---
+sidebar_position: 5
+---
+
+# 環境
+
+`此文件已自動翻譯,可能包含錯誤。如有更改建議,請毫不猶豫地提交 Pull Request。`
+
+LlamaIndex 目前正式支援 NodeJS 18 和 NodeJS 20。
+
+## NextJS 應用程式路由器
+
+如果您正在使用 NextJS 應用程式路由器的路由處理程序/無伺服器函式,您需要使用 NodeJS 模式:
+
+```js
+export const runtime = "nodejs"; // 預設值
+```
+
+並且您需要在 next.config.js 中為 pdf-parse 添加一個例外:
+
+```js
+// next.config.js
+/** @type {import('next').NextConfig} */
+const nextConfig = {
+  experimental: {
+    serverComponentsExternalPackages: ["pdf-parse"], // 將 pdf-parse 放在實際的 NodeJS 模式中與 NextJS 應用程式路由器一起使用
+  },
+};
+
+module.exports = nextConfig;
+```
diff --git a/apps/docs/i18n/zh_tw/docusaurus-plugin-content-docs/current/installation.mdx b/apps/docs/i18n/zh_tw/docusaurus-plugin-content-docs/current/installation.mdx
new file mode 100644
index 0000000000000000000000000000000000000000..869245c7472c8d453ecb37cdad1ed5c6ffd57e49
--- /dev/null
+++ b/apps/docs/i18n/zh_tw/docusaurus-plugin-content-docs/current/installation.mdx
@@ -0,0 +1,68 @@
+---
+sidebar_position: 1
+---
+
+
+# 安裝與設定
+
+```此文件已自動翻譯,可能包含錯誤。如有更改建議,請毫不猶豫地提交 Pull Request。```
+
+
+請確保您已經安裝 NodeJS v18 或更高版本。
+
+
+## 使用 create-llama
+
+使用 `create-llama` 是開始使用 LlamaIndex 的最簡單方法。這個命令行工具可以讓您快速開始建立一個新的 LlamaIndex 應用程式,並為您設定好一切。
+
+只需執行以下指令:
+
+<Tabs>
+<TabItem value="1" label="npm" default>
+
+```bash
+npx create-llama@latest
+```
+
+</TabItem>
+<TabItem value="2" label="Yarn">
+
+```bash
+yarn create llama
+```
+
+</TabItem>
+<TabItem value="3" label="pnpm">
+
+```bash
+pnpm create llama@latest
+```
+
+</TabItem>
+</Tabs>
+
+即可開始。當您的應用程式建立完成後,執行以下指令:
+
+```bash npm2yarn
+npm run dev
+```
+
+來啟動開發伺服器。然後,您可以訪問 [http://localhost:3000](http://localhost:3000) 來查看您的應用程式。
+## 從 NPM 安裝
+
+```bash npm2yarn
+npm install llamaindex
+```
+
+
+### 環境變數
+
+我們的示例預設使用 OpenAI。您需要設置您的 Open AI 金鑰,方法如下:
+
+```bash
+export OPENAI_API_KEY="sk-......" # 請替換為您在 https://platform.openai.com/account/api-keys 上獲取的金鑰
+```
+
+如果您希望每次自動加載,請將其添加到您的 .zshrc/.bashrc 文件中。
+
+警告:請勿將您的 OpenAI 金鑰提交到版本控制中。
diff --git a/apps/docs/i18n/zh_tw/docusaurus-plugin-content-docs/current/introduction.md b/apps/docs/i18n/zh_tw/docusaurus-plugin-content-docs/current/introduction.md
new file mode 100644
index 0000000000000000000000000000000000000000..fe51497b3eaf1e370fd1e36de0a963d40db96854
--- /dev/null
+++ b/apps/docs/i18n/zh_tw/docusaurus-plugin-content-docs/current/introduction.md
@@ -0,0 +1,58 @@
+---
+sidebar_position: 0
+slug: /
+---
+
+# 什麼是LlamaIndex.TS?
+
+`此文件已自動翻譯,可能包含錯誤。如有更改建議,請毫不猶豫地提交 Pull Request。`
+
+LlamaIndex.TS是一個數據框架,用於LLM應用程序對私有或特定領域的數據進行摄取、結構化和訪問。雖然還提供了一個Python包(請參見[這裡](https://docs.llamaindex.ai/en/stable/)),但LlamaIndex.TS提供了一個簡單的包,針對TypeScript進行了優化,提供了核心功能。
+
+## 🚀 為什麼選擇LlamaIndex.TS?
+
+在其核心,LLM提供了人類和推斷數據之間的自然語言界面。廣泛可用的模型已經在大量公開可用的數據上進行了預訓練,這些數據來自維基百科、郵件列表、教科書和源代碼。
+
+基於LLM的應用程序通常需要使用私有或特定領域的數據來擴充這些模型。不幸的是,這些數據可能分散在不同的應用程序和數據存儲中。它們可能在API後面、SQL數據庫中,或者被困在PDF和幻燈片中。
+
+這就是**LlamaIndex.TS**的用途所在。
+
+## 🦙 LlamaIndex.TS如何幫助?
+
+LlamaIndex.TS提供以下工具:
+
+- **數據加載**:直接將現有的`.txt`、`.pdf`、`.csv`、`.md`和`.docx`數據進行摄取。
+- **數據索引**:將數據結構化為中間表示形式,便於LLM應用程序消耗並提高性能。
+- **引擎**:提供對數據的自然語言訪問。例如:
+  - 查詢引擎是強大的檢索界面,用於增強知識輸出。
+  - 聊天引擎是用於與數據進行多消息、來回交互的對話界面。
+
+## 👨‍👩‍👧‍👦 LlamaIndex 適用於誰?
+
+LlamaIndex.TS 提供了一組核心工具,對於使用 JavaScript 和 TypeScript 構建 LLM 應用程式的任何人來說都是必不可少的。
+
+我們的高級 API 允許初學者使用 LlamaIndex.TS 來輸入和查詢他們的資料。
+
+對於更複雜的應用程式,我們的低階 API 允許高級使用者自定義和擴展任何模組 - 資料連接器、索引、檢索器和查詢引擎,以滿足他們的需求。
+
+## 入門指南
+
+`npm install llamaindex`
+
+我們的文檔包括[安裝說明](./installation.md)和[入門教程](./starter.md),以構建您的第一個應用程序。
+
+一旦您開始運行,[高級概念](./concepts.md)提供了 LlamaIndex 模塊化架構的概述。如果需要更多實際的操作示例,請查看我們的[端到端教程](./end_to_end.md)。
+
+## 🗺️ 生態系統
+
+要下載或貢獻,請在以下位置找到LlamaIndex:
+
+- Github:https://github.com/run-llama/LlamaIndexTS
+- NPM:https://www.npmjs.com/package/llamaindex
+
+## 社區
+
+需要幫助嗎?有功能建議嗎?加入LlamaIndex社區:
+
+- Twitter:https://twitter.com/llama_index
+- Discord:https://discord.gg/dGcwcsnxhU
diff --git a/apps/docs/i18n/zh_tw/docusaurus-plugin-content-docs/current/modules/high_level/chat_engine.md b/apps/docs/i18n/zh_tw/docusaurus-plugin-content-docs/current/modules/high_level/chat_engine.md
new file mode 100644
index 0000000000000000000000000000000000000000..31ab71a8651ff7601204f40bff624ab759069d80
--- /dev/null
+++ b/apps/docs/i18n/zh_tw/docusaurus-plugin-content-docs/current/modules/high_level/chat_engine.md
@@ -0,0 +1,22 @@
+---
+sidebar_position: 4
+---
+
+# 聊天引擎 (ChatEngine)
+
+`此文件已自動翻譯,可能包含錯誤。如有更改建議,請毫不猶豫地提交 Pull Request。`
+
+聊天引擎是一种快速简单的与索引中的数据进行聊天的方式。
+
+```typescript
+const retriever = index.asRetriever();
+const chatEngine = new ContextChatEngine({ retriever });
+
+// 开始聊天
+const response = await chatEngine.chat(query);
+```
+
+## API 参考
+
+- [上下文聊天引擎 (ContextChatEngine)](../../api/classes/ContextChatEngine.md)
+- [压缩问题聊天引擎 (CondenseQuestionChatEngine)](../../api/classes/ContextChatEngine.md)
diff --git a/apps/docs/i18n/zh_tw/docusaurus-plugin-content-docs/current/modules/high_level/data_index.md b/apps/docs/i18n/zh_tw/docusaurus-plugin-content-docs/current/modules/high_level/data_index.md
new file mode 100644
index 0000000000000000000000000000000000000000..5be6c1c31dda13fa6d5a9ae03f9536c76e54bf40
--- /dev/null
+++ b/apps/docs/i18n/zh_tw/docusaurus-plugin-content-docs/current/modules/high_level/data_index.md
@@ -0,0 +1,27 @@
+---
+sidebar_position: 2
+---
+
+# 索引 (Index)
+
+`此文件已自動翻譯,可能包含錯誤。如有更改建議,請毫不猶豫地提交 Pull Request。`
+
+索引是您的数据的基本容器和组织方式。LlamaIndex.TS 支持两种索引:
+
+- `VectorStoreIndex` - 在生成响应时,将发送前 k 个 `Node` 到 LLM。默认的 k 值为 2。
+- `SummaryIndex` - 将发送索引中的每个 `Node` 到 LLM 以生成响应。
+
+```typescript
+import { Document, VectorStoreIndex } from "llamaindex";
+
+const document = new Document({ text: "test" });
+
+const index = await VectorStoreIndex.fromDocuments([document]);
+```
+
+## API 参考 (API Reference)
+
+- [SummaryIndex](../../api/classes/SummaryIndex.md)
+- [VectorStoreIndex](../../api/classes/VectorStoreIndex.md)
+
+"
diff --git a/apps/docs/i18n/zh_tw/docusaurus-plugin-content-docs/current/modules/high_level/data_loader.md b/apps/docs/i18n/zh_tw/docusaurus-plugin-content-docs/current/modules/high_level/data_loader.md
new file mode 100644
index 0000000000000000000000000000000000000000..78bb05a249fb2e64331f9d7adf507a13cc6a442d
--- /dev/null
+++ b/apps/docs/i18n/zh_tw/docusaurus-plugin-content-docs/current/modules/high_level/data_loader.md
@@ -0,0 +1,19 @@
+---
+sidebar_position: 1
+---
+
+# 讀取器 / 載入器
+
+`此文件已自動翻譯,可能包含錯誤。如有更改建議,請毫不猶豫地提交 Pull Request。`
+
+LlamaIndex.TS 支援使用 `SimpleDirectoryReader` 類別從資料夾輕鬆載入檔案。目前支援 `.txt`、`.pdf`、`.csv`、`.md` 和 `.docx` 檔案,未來還有更多計劃中的支援!
+
+```typescript
+import { SimpleDirectoryReader } from "llamaindex";
+
+documents = new SimpleDirectoryReader().loadData("./data");
+```
+
+## API 參考
+
+- [SimpleDirectoryReader](../../api/classes/SimpleDirectoryReader.md)
diff --git a/apps/docs/i18n/zh_tw/docusaurus-plugin-content-docs/current/modules/high_level/documents_and_nodes.md b/apps/docs/i18n/zh_tw/docusaurus-plugin-content-docs/current/modules/high_level/documents_and_nodes.md
new file mode 100644
index 0000000000000000000000000000000000000000..5cb678ec2b5704036ee9dfb6db58ddca6686da8d
--- /dev/null
+++ b/apps/docs/i18n/zh_tw/docusaurus-plugin-content-docs/current/modules/high_level/documents_and_nodes.md
@@ -0,0 +1,22 @@
+---
+sidebar_position: 0
+---
+
+# 文件和節點
+
+`此文件已自動翻譯,可能包含錯誤。如有更改建議,請毫不猶豫地提交 Pull Request。`
+
+`Document`和`Node`是任何索引的基本構建塊。雖然這些對象的API類似,但`Document`對象代表整個文件,而`Node`則是原始文件的較小片段,適用於LLM和Q&A。
+
+```typescript
+import { Document } from "llamaindex";
+
+document = new Document({ text: "text", metadata: { key: "val" } });
+```
+
+## API 參考
+
+- [Document](../../api/classes/Document.md)
+- [TextNode](../../api/classes/TextNode.md)
+
+"
diff --git a/apps/docs/i18n/zh_tw/docusaurus-plugin-content-docs/current/modules/high_level/query_engine.md b/apps/docs/i18n/zh_tw/docusaurus-plugin-content-docs/current/modules/high_level/query_engine.md
new file mode 100644
index 0000000000000000000000000000000000000000..785743061c95c118910cab64c8f9c20dfbfcde4a
--- /dev/null
+++ b/apps/docs/i18n/zh_tw/docusaurus-plugin-content-docs/current/modules/high_level/query_engine.md
@@ -0,0 +1,38 @@
+---
+sidebar_position: 3
+---
+
+# 查詢引擎 (QueryEngine)
+
+`此文件已自動翻譯,可能包含錯誤。如有更改建議,請毫不猶豫地提交 Pull Request。`
+
+查詢引擎將`Retriever`和`ResponseSynthesizer`封裝成一個流程,使用查詢字串來檢索節點,然後將它們發送到LLM以生成回應。
+
+```typescript
+const queryEngine = index.asQueryEngine();
+const response = await queryEngine.query("查詢字串");
+```
+
+## 子查詢引擎 (Sub Question Query Engine)
+
+子查詢引擎的基本概念是將單個查詢拆分為多個查詢,為每個查詢獲取答案,然後將這些不同的答案結合成一個統一的回應,以供用戶使用。您可以將其想象為“逐步思考”提示技術,但是在數據源上進行迭代!
+
+### 開始使用
+
+開始嘗試子查詢引擎的最簡單方法是在[examples](https://github.com/run-llama/LlamaIndexTS/blob/main/examples/subquestion.ts)中運行subquestion.ts文件。
+
+```bash
+npx ts-node subquestion.ts
+```
+
+### 工具 (Tools)
+
+子查詢引擎使用工具 (Tools) 來實現。工具的基本概念是它們是大型語言模型的可執行選項。在這種情況下,我們的子查詢引擎依賴於查詢引擎工具 (QueryEngineTool),正如您所猜測的,這是一個在查詢引擎上運行查詢的工具。這使我們能夠給模型提供一個選項,例如為不同的問題查詢不同的文檔。您還可以想像子查詢引擎可以使用一個在網上搜索某些內容或使用Wolfram Alpha獲取答案的工具。
+
+您可以通過查看LlamaIndex Python文檔來了解更多關於工具的信息:https://gpt-index.readthedocs.io/en/latest/core_modules/agent_modules/tools/root.html
+
+## API 參考
+
+- [檢索器查詢引擎 (RetrieverQueryEngine)](../../api/classes/RetrieverQueryEngine.md)
+- [子查詢引擎 (SubQuestionQueryEngine)](../../api/classes/SubQuestionQueryEngine.md)
+- [查詢引擎工具 (QueryEngineTool)](../../api/interfaces/QueryEngineTool.md)
diff --git a/apps/docs/i18n/zh_tw/docusaurus-plugin-content-docs/current/modules/index.md b/apps/docs/i18n/zh_tw/docusaurus-plugin-content-docs/current/modules/index.md
new file mode 100644
index 0000000000000000000000000000000000000000..a11c5dc96717693644b8c0dc688e4ef9ed5c7fbb
--- /dev/null
+++ b/apps/docs/i18n/zh_tw/docusaurus-plugin-content-docs/current/modules/index.md
@@ -0,0 +1,33 @@
+# 核心模組
+
+`此文件已自動翻譯,可能包含錯誤。如有更改建議,請毫不猶豫地提交 Pull Request。`
+
+LlamaIndex.TS 提供了幾個核心模組,分為高層模組和低層模組,以便快速入門和根據需要自定義關鍵組件。
+
+## 高層模組
+
+- [**文件 (Document)**](./high_level/documents_and_nodes.md): 代表文本文件、PDF 文件或其他連續的數據。
+
+- [**節點 (Node)**](./high_level/documents_and_nodes.md): 基本的數據構建塊。通常,這些是文檔的部分,分成可管理的小塊,足夠小以供嵌入模型和 LLM 使用。
+
+- [**讀取器/加載器 (Reader/Loader)**](./high_level/data_loader.md): 讀取器或加載器是將現實世界中的文檔轉換為可以在索引和查詢中使用的 Document 類的工具。我們目前支持純文本文件和 PDF,還有更多的格式即將推出。
+
+- [**索引 (Indexes)**](./high_level/data_index.md): 索引存儲節點和這些節點的嵌入。
+
+- [**查詢引擎 (QueryEngine)**](./high_level/query_engine.md): 查詢引擎用於生成您輸入的查詢並返回結果。查詢引擎通常將預先構建的提示與索引中選定的節點結合,以提供 LLM 回答您的查詢所需的上下文。
+
+- [**聊天引擎 (ChatEngine)**](./high_level/chat_engine.md): 聊天引擎可幫助您構建與索引互動的聊天機器人。
+
+## 低層模組
+
+- [**LLM**](./low_level/llm.md): LLM 類別是一個統一的介面,用於連接到像 OpenAI GPT-4、Anthropic Claude 或 Meta LLaMA 這樣的大型語言模型提供者。您可以對其進行子類化,以編寫連接器,連接到您自己的大型語言模型。
+
+- [**Embedding**](./low_level/embedding.md): 嵌入表示為一個浮點數向量。OpenAI 的 text-embedding-ada-002 是我們的默認嵌入模型,它生成的每個嵌入由 1,536 個浮點數組成。另一個流行的嵌入模型是 BERT,它使用 768 個浮點數來表示每個節點。我們提供了一些用於處理嵌入的實用工具,包括 3 種相似度計算選項和最大邊緣相關性。
+
+- [**TextSplitter/NodeParser**](./low_level/node_parser.md): 文本分割策略對於嵌入搜索的整體有效性非常重要。目前,雖然我們有一個默認值,但沒有一種大小適合所有的解決方案。根據源文件的不同,您可能希望使用不同的分割大小和策略。目前,我們支持按固定大小分割、按固定大小分割並具有重疊部分、按句子分割和按段落分割。當將 `Document` 分割為 `Node` 時,NodeParser 使用文本分割器。
+
+- [**Retriever**](./low_level/retriever.md): Retriever 實際上是從索引中選擇要檢索的節點。在這裡,您可能希望嘗試檢索更多或更少的節點,更改相似度函數,或者為應用程序中的每個個別用例創建自己的檢索器。例如,您可能希望為代碼內容和文本內容分別使用不同的檢索器。
+
+- [**ResponseSynthesizer**](./low_level/response_synthesizer.md): ResponseSynthesizer 負責接收查詢字符串,並使用一個 `Node` 列表生成回應。這可以有多種形式,例如遍歷所有上下文並精煉答案,或者構建摘要樹並返回根摘要。
+
+- [**Storage**](./low_level/storage.md): 在某個時候,您可能希望將索引、數據和向量存儲起來,而不是每次都重新運行嵌入模型。IndexStore、DocStore、VectorStore 和 KVStore 是抽象類別,讓您可以進行存儲。它們結合在一起形成 StorageContext。目前,我們允許您將嵌入存儲在文件系統上的文件中(或者虛擬的內存文件系統),但我們也正在積極添加到向量數據庫的集成。
diff --git a/apps/docs/i18n/zh_tw/docusaurus-plugin-content-docs/current/modules/low_level/embedding.md b/apps/docs/i18n/zh_tw/docusaurus-plugin-content-docs/current/modules/low_level/embedding.md
new file mode 100644
index 0000000000000000000000000000000000000000..859c2361ef4015840bb4c70e7ff66482a04b7247
--- /dev/null
+++ b/apps/docs/i18n/zh_tw/docusaurus-plugin-content-docs/current/modules/low_level/embedding.md
@@ -0,0 +1,26 @@
+---
+sidebar_position: 1
+---
+
+# 嵌入 (Embedding)
+
+`此文件已自動翻譯,可能包含錯誤。如有更改建議,請毫不猶豫地提交 Pull Request。`
+
+LlamaIndex中的嵌入模型负责创建文本的数值表示。默认情况下,LlamaIndex将使用OpenAI的`text-embedding-ada-002`模型。
+
+可以在`ServiceContext`对象中明确设置。
+
+```typescript
+import { OpenAIEmbedding, serviceContextFromDefaults } from "llamaindex";
+
+const openaiEmbeds = new OpenAIEmbedding();
+
+const serviceContext = serviceContextFromDefaults({ embedModel: openaiEmbeds });
+```
+
+## API 参考 (API Reference)
+
+- [OpenAIEmbedding](../../api/classes/OpenAIEmbedding.md)
+- [ServiceContext](../../api/interfaces/ServiceContext.md)
+
+"
diff --git a/apps/docs/i18n/zh_tw/docusaurus-plugin-content-docs/current/modules/low_level/llm.md b/apps/docs/i18n/zh_tw/docusaurus-plugin-content-docs/current/modules/low_level/llm.md
new file mode 100644
index 0000000000000000000000000000000000000000..15c96e2c0df6120c0db45635ada1a041e362a22b
--- /dev/null
+++ b/apps/docs/i18n/zh_tw/docusaurus-plugin-content-docs/current/modules/low_level/llm.md
@@ -0,0 +1,26 @@
+---
+sidebar_position: 0
+---
+
+# LLM
+
+`此文件已自動翻譯,可能包含錯誤。如有更改建議,請毫不猶豫地提交 Pull Request。`
+
+LLM 負責讀取文本並生成對查詢的自然語言回應。默認情況下,LlamaIndex.TS 使用 `gpt-3.5-turbo`。
+
+LLM 可以在 `ServiceContext` 對象中明確設置。
+
+```typescript
+import { OpenAI, serviceContextFromDefaults } from "llamaindex";
+
+const openaiLLM = new OpenAI({ model: "gpt-3.5-turbo", temperature: 0 });
+
+const serviceContext = serviceContextFromDefaults({ llm: openaiLLM });
+```
+
+## API 參考
+
+- [OpenAI](../../api/classes/OpenAI.md)
+- [ServiceContext](../../api/interfaces/ServiceContext.md)
+
+"
diff --git a/apps/docs/i18n/zh_tw/docusaurus-plugin-content-docs/current/modules/low_level/node_parser.md b/apps/docs/i18n/zh_tw/docusaurus-plugin-content-docs/current/modules/low_level/node_parser.md
new file mode 100644
index 0000000000000000000000000000000000000000..a9847a9e8357aab7f9df6380c34357c8bbce2e71
--- /dev/null
+++ b/apps/docs/i18n/zh_tw/docusaurus-plugin-content-docs/current/modules/low_level/node_parser.md
@@ -0,0 +1,37 @@
+---
+sidebar_position: 3
+---
+
+# NodeParser (節點解析器)
+
+`此文件已自動翻譯,可能包含錯誤。如有更改建議,請毫不猶豫地提交 Pull Request。`
+
+在 LlamaIndex 中,`NodeParser` 負責將 `Document` 物件拆分成更易管理的 `Node` 物件。當您呼叫 `.fromDocuments()` 時,`ServiceContext` 中的 `NodeParser` 會自動為您執行此操作。或者,您也可以使用它提前拆分文件。
+
+```typescript
+import { Document, SimpleNodeParser } from "llamaindex";
+
+const nodeParser = new SimpleNodeParser();
+const nodes = nodeParser.getNodesFromDocuments([
+  new Document({ text: "我今年10歲。約翰今年20歲。" }),
+]);
+```
+
+## TextSplitter (文本拆分器)
+
+底層的文本拆分器將根據句子將文本拆分。它也可以作為獨立模塊用於拆分原始文本。
+
+```typescript
+import { SentenceSplitter } from "llamaindex";
+
+const splitter = new SentenceSplitter({ chunkSize: 1 });
+
+const textSplits = splitter.splitText("你好世界");
+```
+
+## API 參考
+
+- [SimpleNodeParser (簡單節點解析器)](../../api/classes/SimpleNodeParser.md)
+- [SentenceSplitter (句子拆分器)](../../api/classes/SentenceSplitter.md)
+
+"
diff --git a/apps/docs/i18n/zh_tw/docusaurus-plugin-content-docs/current/modules/low_level/response_synthesizer.md b/apps/docs/i18n/zh_tw/docusaurus-plugin-content-docs/current/modules/low_level/response_synthesizer.md
new file mode 100644
index 0000000000000000000000000000000000000000..28f92bac34c68a02f96fb6106b3f9809ffcb5410
--- /dev/null
+++ b/apps/docs/i18n/zh_tw/docusaurus-plugin-content-docs/current/modules/low_level/response_synthesizer.md
@@ -0,0 +1,45 @@
+---
+sidebar_position: 6
+---
+
+# 回應合成器 (ResponseSynthesizer)
+
+`此文件已自動翻譯,可能包含錯誤。如有更改建議,請毫不猶豫地提交 Pull Request。`
+
+回應合成器負責將查詢、節點和提示模板傳送給 LLM 以生成回應。有幾種關鍵模式可用於生成回應:
+
+- `Refine`:通過依次處理每個檢索到的文本片段來「創建和完善」答案。這對於更詳細的答案很有用。
+- `CompactAndRefine`(默認):在每次 LLM 調用期間「壓縮」提示,將盡可能多的文本片段塞入最大提示大小內。如果有太多的片段無法塞入一個提示中,則通過多個緊湊提示來「創建和完善」答案。與 `refine` 相同,但應該會減少 LLM 調用次數。
+- `TreeSummarize`:根據一組文本片段和查詢,遞歸構建一棵樹並返回根節點作為回應。適用於摘要目的。
+- `SimpleResponseBuilder`:根據一組文本片段和查詢,將查詢應用於每個文本片段,同時將回應累積到一個數組中。返回所有回應的連接字符串。當您需要對每個文本片段單獨運行相同的查詢時很有用。
+
+```typescript
+import { NodeWithScore, ResponseSynthesizer, TextNode } from "llamaindex";
+
+const responseSynthesizer = new ResponseSynthesizer();
+
+const nodesWithScore: NodeWithScore[] = [
+  {
+    node: new TextNode({ text: "我今年10歲。" }),
+    score: 1,
+  },
+  {
+    node: new TextNode({ text: "約翰今年20歲。" }),
+    score: 0.5,
+  },
+];
+
+const response = await responseSynthesizer.synthesize(
+  "我幾歲?",
+  nodesWithScore,
+);
+console.log(response.response);
+```
+
+## API 參考
+
+- [回應合成器 (ResponseSynthesizer)](../../api/classes/ResponseSynthesizer.md)
+- [創建和完善 (Refine)](../../api/classes/Refine.md)
+- [壓縮和完善 (CompactAndRefine)](../../api/classes/CompactAndRefine.md)
+- [樹摘要 (TreeSummarize)](../../api/classes/TreeSummarize.md)
+- [簡單回應構建器 (SimpleResponseBuilder)](../../api/classes/SimpleResponseBuilder.md)
diff --git a/apps/docs/i18n/zh_tw/docusaurus-plugin-content-docs/current/modules/low_level/retriever.md b/apps/docs/i18n/zh_tw/docusaurus-plugin-content-docs/current/modules/low_level/retriever.md
new file mode 100644
index 0000000000000000000000000000000000000000..f1a16d3a4d162cf9843a1212c2b54194236548e8
--- /dev/null
+++ b/apps/docs/i18n/zh_tw/docusaurus-plugin-content-docs/current/modules/low_level/retriever.md
@@ -0,0 +1,23 @@
+---
+sidebar_position: 5
+---
+
+# 檢索器 (Retriever)
+
+`此文件已自動翻譯,可能包含錯誤。如有更改建議,請毫不猶豫地提交 Pull Request。`
+
+在 LlamaIndex 中,檢索器用於使用查詢字串從索引中提取 `Node`。`VectorIndexRetriever` 將提取前 k 個最相似的節點。而 `SummaryIndexRetriever` 則將提取所有節點,無論查詢如何。
+
+```typescript
+const retriever = vector_index.asRetriever();
+retriever.similarityTopK = 3;
+
+// 提取節點!
+const nodesWithScore = await retriever.retrieve("查詢字串");
+```
+
+## API 參考
+
+- [SummaryIndexRetriever](../../api/classes/SummaryIndexRetriever.md)
+- [SummaryIndexLLMRetriever](../../api/classes/SummaryIndexLLMRetriever.md)
+- [VectorIndexRetriever](../../api/classes/VectorIndexRetriever.md)
diff --git a/apps/docs/i18n/zh_tw/docusaurus-plugin-content-docs/current/modules/low_level/storage.md b/apps/docs/i18n/zh_tw/docusaurus-plugin-content-docs/current/modules/low_level/storage.md
new file mode 100644
index 0000000000000000000000000000000000000000..ce6919a04def3278119eb9db7a9420a0e7294ec1
--- /dev/null
+++ b/apps/docs/i18n/zh_tw/docusaurus-plugin-content-docs/current/modules/low_level/storage.md
@@ -0,0 +1,28 @@
+---
+sidebar_position: 7
+---
+
+# 儲存 (Storage)
+
+`此文件已自動翻譯,可能包含錯誤。如有更改建議,請毫不猶豫地提交 Pull Request。`
+
+在 LlamaIndex.TS 中,一旦您配置了 `StorageContext` 物件,儲存就會自動運作。只需配置 `persistDir` 並將其附加到索引即可。
+
+目前,只支援從磁碟儲存和載入,未來將會有更多整合計劃!
+
+```typescript
+import { Document, VectorStoreIndex, storageContextFromDefaults } from "./src";
+
+const storageContext = await storageContextFromDefaults({
+  persistDir: "./storage",
+});
+
+const document = new Document({ text: "測試文字" });
+const index = await VectorStoreIndex.fromDocuments([document], {
+  storageContext,
+});
+```
+
+## API 參考 (API Reference)
+
+- [儲存上下文 (StorageContext)](../../api/interfaces/StorageContext.md)
diff --git a/apps/docs/i18n/zh_tw/docusaurus-plugin-content-docs/current/starter.md b/apps/docs/i18n/zh_tw/docusaurus-plugin-content-docs/current/starter.md
new file mode 100644
index 0000000000000000000000000000000000000000..5f64f236724d9e724d9dabba3257e8680ed63d54
--- /dev/null
+++ b/apps/docs/i18n/zh_tw/docusaurus-plugin-content-docs/current/starter.md
@@ -0,0 +1,56 @@
+---
+sidebar_position: 2
+---
+
+# 入門教程
+
+`此文件已自動翻譯,可能包含錯誤。如有更改建議,請毫不猶豫地提交 Pull Request。`
+
+一旦您[使用NPM安裝了LlamaIndex.TS](installation)並設置了您的OpenAI密鑰,您就可以開始您的第一個應用程序了:
+
+在一個新的文件夾中:
+
+```bash npm2yarn
+npm install typescript
+npm install @types/node
+npx tsc --init # 如果需要的話
+```
+
+創建文件 `example.ts`。這段代碼將加載一些示例數據,創建一個文檔,對其進行索引(使用OpenAI創建嵌入),然後創建一個查詢引擎來回答有關數據的問題。
+
+```ts
+// example.ts
+import fs from "fs/promises";
+import { Document, VectorStoreIndex } from "llamaindex";
+
+async function main() {
+  // 從Node中的abramov.txt文件加載文章
+  const essay = await fs.readFile(
+    "node_modules/llamaindex/examples/abramov.txt",
+    "utf-8",
+  );
+
+  // 使用文章創建Document對象
+  const document = new Document({ text: essay });
+
+  // 分割文本並創建嵌入。將它們存儲在VectorStoreIndex中
+  const index = await VectorStoreIndex.fromDocuments([document]);
+
+  // 查詢索引
+  const queryEngine = index.asQueryEngine();
+  const response = await queryEngine.query("作者在大學裡做了什麼?");
+
+  // 輸出回答
+  console.log(response.toString());
+}
+
+main();
+```
+
+然後您可以使用以下命令運行它
+
+```bash
+npx ts-node example.ts
+```
+
+準備好學習更多了嗎?請查看我們的NextJS遊樂場,網址為https://llama-playground.vercel.app/。源代碼可在https://github.com/run-llama/ts-playground找到。
diff --git a/package.json b/package.json
index bad62f6d2f87ce5dafec03cfa55693c30d12a163..97ffd24f5109fe4f56a71cca63c9d80f2a032658 100644
--- a/package.json
+++ b/package.json
@@ -32,6 +32,6 @@
     }
   },
   "lint-staged": {
-    "*.{js,jsx,ts,tsx,md}": "prettier --write"
+    "(!apps/docs/i18n/**/docusaurus-plugin-content-docs/current/api/*).{js,jsx,ts,tsx,md}": "prettier --write"
   }
 }