diff --git a/.vscode/settings.json b/.vscode/settings.json index 4930aa2d1536d873952d1a10fd765ee5ac9f1289..8d924b71c4d59f3e227fe42dfb4185f664498ca9 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -32,7 +32,9 @@ "opendocument", "openrouter", "Qdrant", + "searxng", "Serper", + "Serply", "textgenwebui", "togetherai", "vectordbs", diff --git a/docker/.env.example b/docker/.env.example index a38b4c5a293c81f1fc7ca7e6a0092b76db4e627e..71572cc8eb543f486ad6f40d0b141592d9b4e6f4 100644 --- a/docker/.env.example +++ b/docker/.env.example @@ -245,3 +245,6 @@ GID='1000' #------ Serply.io ----------- https://serply.io/ # AGENT_SERPLY_API_KEY= + +#------ SearXNG ----------- https://github.com/searxng/searxng +# AGENT_SEARXNG_API_URL= \ No newline at end of file diff --git a/frontend/src/i18n.js b/frontend/src/i18n.js index 2f5ca580ccdfdaa2616e40206fa96a5e80b484f1..5344e1f9fee7fd144645076fd2eb4eaa01b21537 100644 --- a/frontend/src/i18n.js +++ b/frontend/src/i18n.js @@ -9,7 +9,7 @@ i18next .use(LanguageDetector) .init({ fallbackLng: "en", - debug: true, + debug: import.meta.env.DEV, defaultNS, resources, lowerCaseLng: true, diff --git a/frontend/src/locales/resources.js b/frontend/src/locales/resources.js index d2072d34a49f44161d0dbbd6ebd8de7a1466cd6f..04c06502e0ea54783d19a4f329baa35c7411809c 100644 --- a/frontend/src/locales/resources.js +++ b/frontend/src/locales/resources.js @@ -10,10 +10,15 @@ // to a specific language file as this will break the other languages. Any new keys should be added to english // and the language file you are working on. +// Contributor Notice: If you are adding a translation you MUST locally run `yarn verify:translations` from the root prior to PR. +// please do not submit PR's without first verifying this test passes as it will tell you about missing keys or values +// from the primary dictionary. + import English from "./en/common.js"; import Spanish from "./es/common.js"; import French from "./fr/common.js"; import Mandarin from "./zh/common.js"; +import Russian from "./ru/common.js"; export const defaultNS = "common"; export const resources = { @@ -29,4 +34,7 @@ export const resources = { fr: { common: French, }, + ru: { + common: Russian, + }, }; diff --git a/frontend/src/locales/ru/common.js b/frontend/src/locales/ru/common.js new file mode 100644 index 0000000000000000000000000000000000000000..34f9591c68d33f6f24adacd528994b1ce9e31461 --- /dev/null +++ b/frontend/src/locales/ru/common.js @@ -0,0 +1,415 @@ +const TRANSLATIONS = { + common: { + "workspaces-name": "Ð˜Ð¼Ñ Ñ€Ð°Ð±Ð¾Ñ‡Ð¸Ñ… проÑтранÑтв", + error: "ошибка", + success: "уÑпех", + user: "Пользователь", + selection: "Выбор модели", + saving: "Сохранение...", + save: "Сохранить изменениÑ", + previous: "ÐŸÑ€ÐµÐ´Ñ‹Ð´ÑƒÑ‰Ð°Ñ Ñтраница", + next: "Ð¡Ð»ÐµÐ´ÑƒÑŽÑ‰Ð°Ñ Ñтраница", + }, + settings: { + title: "ÐаÑтройки ÑкземплÑра", + system: "СиÑтемные наÑтройки", + invites: "Приглашение", + users: "Пользователи", + workspaces: "Рабочие проÑтранÑтва", + "workspace-chats": "Чат рабочего проÑтранÑтва", + appearance: "Внешний вид", + "api-keys": "API ключи", + llm: "Предпочтение LLM", + transcription: "Модель транÑкрипции", + embedder: "ÐаÑтройки вÑтраиваниÑ", + "text-splitting": "Разделение и ÑÐµÐ³Ð¼ÐµÐ½Ñ‚Ð°Ñ†Ð¸Ñ Ñ‚ÐµÐºÑта", + "vector-database": "Ð’ÐµÐºÑ‚Ð¾Ñ€Ð½Ð°Ñ Ð±Ð°Ð·Ð° данных", + embeds: "Виджеты вÑÑ‚Ñ€Ð°Ð¸Ð²Ð°Ð½Ð¸Ñ Ñ‡Ð°Ñ‚Ð°", + "embed-chats": "ИÑÑ‚Ð¾Ñ€Ð¸Ñ Ð²ÑÑ‚Ñ€Ð°Ð¸Ð²Ð°Ð½Ð¸Ñ Ñ‡Ð°Ñ‚Ð¾Ð²", + security: "БезопаÑноÑÑ‚ÑŒ", + "event-logs": "Журналы Ñобытий", + privacy: "КонфиденциальноÑÑ‚ÑŒ и данные", + }, + login: { + "multi-user": { + welcome: "Добро пожаловать в", + "placeholder-username": "Ð˜Ð¼Ñ Ð¿Ð¾Ð»ÑŒÐ·Ð¾Ð²Ð°Ñ‚ÐµÐ»Ñ", + "placeholder-password": "Пароль", + login: "Войти", + validating: "Проверка...", + "forgot-pass": "Забыли пароль", + reset: "СброÑить", + }, + "sign-in": { + start: "Войти в ваш", + end: "аккаунт.", + }, + }, + "workspaces—settings": { + general: "Общие наÑтройки", + chat: "ÐаÑтройки чата", + vector: "Ð’ÐµÐºÑ‚Ð¾Ñ€Ð½Ð°Ñ Ð±Ð°Ð·Ð° данных", + members: "УчаÑтники", + agent: "ÐšÐ¾Ð½Ñ„Ð¸Ð³ÑƒÑ€Ð°Ñ†Ð¸Ñ Ð°Ð³ÐµÐ½Ñ‚Ð°", + }, + general: { + vector: { + title: "КоличеÑтво векторов", + description: "Общее количеÑтво векторов в вашей векторной базе данных.", + }, + names: { + description: + "Ðто изменит только отображаемое Ð¸Ð¼Ñ Ð²Ð°ÑˆÐµÐ³Ð¾ рабочего проÑтранÑтва.", + }, + message: { + title: "Предлагаемые ÑÐ¾Ð¾Ð±Ñ‰ÐµÐ½Ð¸Ñ Ñ‡Ð°Ñ‚Ð°", + description: + "ÐаÑтройте ÑообщениÑ, которые будут предложены пользователÑм вашего рабочего проÑтранÑтва.", + add: "Добавить новое Ñообщение", + save: "Сохранить ÑообщениÑ", + heading: "ОбъÑÑните мне", + body: "преимущеÑтва AnythingLLM", + }, + pfp: { + title: "Изображение Ð¿Ñ€Ð¾Ñ„Ð¸Ð»Ñ Ð¿Ð¾Ð¼Ð¾Ñ‰Ð½Ð¸ÐºÐ°", + description: + "ÐаÑтройте изображение Ð¿Ñ€Ð¾Ñ„Ð¸Ð»Ñ Ð¿Ð¾Ð¼Ð¾Ñ‰Ð½Ð¸ÐºÐ° Ð´Ð»Ñ Ñтого рабочего проÑтранÑтва.", + image: "Изображение рабочего проÑтранÑтва", + remove: "Удалить изображение рабочего проÑтранÑтва", + }, + delete: { + delete: "Удалить рабочее проÑтранÑтво", + deleting: "Удаление рабочего проÑтранÑтва...", + "confirm-start": "Ð’Ñ‹ ÑобираетеÑÑŒ удалить веÑÑŒ ваш", + "confirm-end": + "рабочее проÑтранÑтво. Ðто удалит вÑе векторные вÑÑ‚Ñ€Ð°Ð¸Ð²Ð°Ð½Ð¸Ñ Ð² вашей векторной базе данных.\n\nОригинальные иÑходные файлы оÑтанутÑÑ Ð½ÐµÑ‚Ñ€Ð¾Ð½ÑƒÑ‚Ñ‹Ð¼Ð¸. Ðто дейÑтвие необратимо.", + }, + }, + chat: { + llm: { + title: "ПоÑтавщик LLM рабочего проÑтранÑтва", + description: + "Конкретный поÑтавщик и модель LLM, которые будут иÑпользоватьÑÑ Ð´Ð»Ñ Ñтого рабочего проÑтранÑтва. По умолчанию иÑпользуетÑÑ ÑиÑтемный поÑтавщик и наÑтройки LLM.", + search: "ИÑкать вÑех поÑтавщиков LLM", + }, + model: { + title: "Модель чата рабочего проÑтранÑтва", + description: + "ÐšÐ¾Ð½ÐºÑ€ÐµÑ‚Ð½Ð°Ñ Ð¼Ð¾Ð´ÐµÐ»ÑŒ чата, ÐºÐ¾Ñ‚Ð¾Ñ€Ð°Ñ Ð±ÑƒÐ´ÐµÑ‚ иÑпользоватьÑÑ Ð´Ð»Ñ Ñтого рабочего проÑтранÑтва. ЕÑли пуÑто, будет иÑпользоватьÑÑ ÑиÑтемное предпочтение LLM.", + wait: "-- ожидание моделей --", + }, + mode: { + title: "Режим чата", + chat: { + title: "Чат", + "desc-start": "будет предоÑтавлÑÑ‚ÑŒ ответы Ñ Ð¾Ð±Ñ‰ÐµÐ¹ информацией LLM", + and: "и", + "desc-end": "найденный контекÑÑ‚ документов.", + }, + query: { + title: "ЗапроÑ", + "desc-start": "будет предоÑтавлÑÑ‚ÑŒ ответы", + only: "только", + "desc-end": "еÑли найден контекÑÑ‚ документов.", + }, + }, + history: { + title: "ИÑÑ‚Ð¾Ñ€Ð¸Ñ Ñ‡Ð°Ñ‚Ð°", + "desc-start": + "КоличеÑтво предыдущих чатов, которые будут включены в краткоÑрочную памÑÑ‚ÑŒ ответа.", + recommend: "Рекомендуем 20.", + "desc-end": + "Любое количеÑтво более 45 может привеÑти к непрерывным ÑбоÑм чата в завиÑимоÑти от размера Ñообщений.", + }, + prompt: { + title: "ПодÑказка", + description: + "ПодÑказка, ÐºÐ¾Ñ‚Ð¾Ñ€Ð°Ñ Ð±ÑƒÐ´ÐµÑ‚ иÑпользоватьÑÑ Ð² Ñтом рабочем проÑтранÑтве. Определите контекÑÑ‚ и инÑтрукции Ð´Ð»Ñ AI Ð´Ð»Ñ ÑÐ¾Ð·Ð´Ð°Ð½Ð¸Ñ Ð¾Ñ‚Ð²ÐµÑ‚Ð°. Ð’Ñ‹ должны предоÑтавить тщательно разработанную подÑказку, чтобы AI мог генерировать релевантный и точный ответ.", + }, + refusal: { + title: "Ответ об отказе в режиме запроÑа", + "desc-start": "Ð’ режиме", + query: "запроÑа", + "desc-end": + "вы можете вернуть пользовательÑкий ответ об отказе, еÑли контекÑÑ‚ не найден.", + }, + temperature: { + title: "Температура LLM", + "desc-start": + "Ðтот параметр контролирует, наÑколько 'креативными' будут ответы вашего LLM.", + "desc-end": + "Чем выше чиÑло, тем более креативные ответы. Ð”Ð»Ñ Ð½ÐµÐºÐ¾Ñ‚Ð¾Ñ€Ñ‹Ñ… моделей Ñто может привеÑти к неÑвÑзным ответам при Ñлишком выÑоких наÑтройках.", + hint: "БольшинÑтво LLM имеют различные допуÑтимые диапазоны значений. ПроконÑультируйтеÑÑŒ Ñ Ð²Ð°ÑˆÐ¸Ð¼ поÑтавщиком LLM Ð´Ð»Ñ Ð¿Ð¾Ð»ÑƒÑ‡ÐµÐ½Ð¸Ñ Ñтой информации.", + }, + }, + "vector-workspace": { + identifier: "Идентификатор векторной базы данных", + snippets: { + title: "МакÑимальное количеÑтво контекÑтных фрагментов", + description: + "Ðтот параметр контролирует макÑимальное количеÑтво контекÑтных фрагментов, которые будут отправлены LLM Ð´Ð»Ñ ÐºÐ°Ð¶Ð´Ð¾Ð³Ð¾ чата или запроÑа.", + recommend: "Рекомендуемое количеÑтво: 4", + }, + doc: { + title: "Порог ÑходÑтва документов", + description: + "ÐœÐ¸Ð½Ð¸Ð¼Ð°Ð»ÑŒÐ½Ð°Ñ Ð¾Ñ†ÐµÐ½ÐºÐ° ÑходÑтва, Ð½ÐµÐ¾Ð±Ñ…Ð¾Ð´Ð¸Ð¼Ð°Ñ Ð´Ð»Ñ Ñ‚Ð¾Ð³Ð¾, чтобы иÑточник ÑчиталÑÑ ÑвÑзанным Ñ Ñ‡Ð°Ñ‚Ð¾Ð¼. Чем выше чиÑло, тем более Ñхожим должен быть иÑточник Ñ Ñ‡Ð°Ñ‚Ð¾Ð¼.", + zero: "Без ограничений", + low: "Ðизкий (оценка ÑходÑтва ≥ .25)", + medium: "Средний (оценка ÑходÑтва ≥ .50)", + high: "Ð’Ñ‹Ñокий (оценка ÑходÑтва ≥ .75)", + }, + reset: { + reset: "Ð¡Ð±Ñ€Ð¾Ñ Ð²ÐµÐºÑ‚Ð¾Ñ€Ð½Ð¾Ð¹ базы данных", + resetting: "ОчиÑтка векторов...", + confirm: + "Ð’Ñ‹ ÑобираетеÑÑŒ ÑброÑить векторную базу данных Ñтого рабочего проÑтранÑтва. Ðто удалит вÑе текущие векторные вÑтраиваниÑ.\n\nОригинальные иÑходные файлы оÑтанутÑÑ Ð½ÐµÑ‚Ñ€Ð¾Ð½ÑƒÑ‚Ñ‹Ð¼Ð¸. Ðто дейÑтвие необратимо.", + error: "Ðе удалоÑÑŒ ÑброÑить векторную базу данных рабочего проÑтранÑтва!", + success: "Ð’ÐµÐºÑ‚Ð¾Ñ€Ð½Ð°Ñ Ð±Ð°Ð·Ð° данных рабочего проÑтранÑтва была Ñброшена!", + }, + }, + agent: { + "performance-warning": + "ПроизводительноÑÑ‚ÑŒ LLM, не поддерживающих вызовы инÑтрументов, Ñильно завиÑит от возможноÑтей и точноÑти модели. Ðекоторые ÑпоÑобноÑти могут быть ограничены или не функционировать.", + provider: { + title: "ПоÑтавщик LLM агента рабочего проÑтранÑтва", + description: + "Конкретный поÑтавщик и модель LLM, которые будут иÑпользоватьÑÑ Ð´Ð»Ñ Ð°Ð³ÐµÐ½Ñ‚Ð° @agent Ñтого рабочего проÑтранÑтва.", + }, + mode: { + chat: { + title: "Модель чата агента рабочего проÑтранÑтва", + description: + "ÐšÐ¾Ð½ÐºÑ€ÐµÑ‚Ð½Ð°Ñ Ð¼Ð¾Ð´ÐµÐ»ÑŒ чата, ÐºÐ¾Ñ‚Ð¾Ñ€Ð°Ñ Ð±ÑƒÐ´ÐµÑ‚ иÑпользоватьÑÑ Ð´Ð»Ñ Ð°Ð³ÐµÐ½Ñ‚Ð° @agent Ñтого рабочего проÑтранÑтва.", + }, + title: "Модель агента рабочего проÑтранÑтва", + description: + "ÐšÐ¾Ð½ÐºÑ€ÐµÑ‚Ð½Ð°Ñ Ð¼Ð¾Ð´ÐµÐ»ÑŒ LLM, ÐºÐ¾Ñ‚Ð¾Ñ€Ð°Ñ Ð±ÑƒÐ´ÐµÑ‚ иÑпользоватьÑÑ Ð´Ð»Ñ Ð°Ð³ÐµÐ½Ñ‚Ð° @agent Ñтого рабочего проÑтранÑтва.", + wait: "-- ожидание моделей --", + }, + skill: { + title: "Ðавыки агента по умолчанию", + description: + "Улучшите еÑтеÑтвенные ÑпоÑобноÑти агента по умолчанию Ñ Ð¿Ð¾Ð¼Ð¾Ñ‰ÑŒÑŽ Ñтих предуÑтановленных навыков. Ðта наÑтройка применÑетÑÑ ÐºÐ¾ вÑем рабочим проÑтранÑтвам.", + rag: { + title: "RAG и Ð´Ð¾Ð»Ð³Ð¾Ð²Ñ€ÐµÐ¼ÐµÐ½Ð½Ð°Ñ Ð¿Ð°Ð¼ÑÑ‚ÑŒ", + description: + "Позвольте агенту иÑпользовать ваши локальные документы Ð´Ð»Ñ Ð¾Ñ‚Ð²ÐµÑ‚Ð° на Ð·Ð°Ð¿Ñ€Ð¾Ñ Ð¸Ð»Ð¸ попроÑите агента 'запомнить' чаÑти контента Ð´Ð»Ñ Ð´Ð¾Ð»Ð³Ð¾Ñрочного Ð¸Ð·Ð²Ð»ÐµÑ‡ÐµÐ½Ð¸Ñ Ð¸Ð· памÑти.", + }, + view: { + title: "ПроÑмотр и резюмирование документов", + description: + "Позвольте агенту перечиÑлÑÑ‚ÑŒ и резюмировать Ñодержание файлов рабочего проÑтранÑтва, которые в данный момент вÑтроены.", + }, + scrape: { + title: "Сбор данных Ñ Ð²ÐµÐ±-Ñайтов", + description: + "Позвольте агенту поÑещать и Ñобирать Ñодержимое веб-Ñайтов.", + }, + generate: { + title: "Создание диаграмм", + description: + "Включите возможноÑÑ‚ÑŒ ÑÐ¾Ð·Ð´Ð°Ð½Ð¸Ñ Ñ€Ð°Ð·Ð»Ð¸Ñ‡Ð½Ñ‹Ñ… типов диаграмм из предоÑтавленных данных или данных, указанных в чате.", + }, + save: { + title: "Создание и Ñохранение файлов в браузер", + description: + "Включите возможноÑÑ‚ÑŒ ÑÐ¾Ð·Ð´Ð°Ð½Ð¸Ñ Ð¸ запиÑи файлов, которые можно Ñохранить и загрузить в вашем браузере.", + }, + web: { + title: "ПоиÑк в Интернете и проÑмотр в реальном времени", + "desc-start": + "Позвольте вашему агенту иÑкать в Интернете Ð´Ð»Ñ Ð¾Ñ‚Ð²ÐµÑ‚Ð° на ваши вопроÑÑ‹, подключаÑÑÑŒ к поÑтавщику поиÑка (SERP).", + "desc-end": + "ПоиÑк в Интернете во Ð²Ñ€ÐµÐ¼Ñ ÑеÑÑий агента не будет работать, пока Ñто не наÑтроено.", + }, + }, + }, + recorded: { + title: "Чаты рабочего проÑтранÑтва", + description: + "Ðто вÑе запиÑанные чаты и ÑообщениÑ, отправленные пользователÑми, упорÑдоченные по дате ÑозданиÑ.", + export: "ÐкÑпорт", + table: { + id: "Идентификатор", + by: "Отправлено", + workspace: "Рабочее проÑтранÑтво", + prompt: "ПодÑказка", + response: "Ответ", + at: "Отправлено в", + }, + }, + appearance: { + title: "Внешний вид", + description: "ÐаÑтройте параметры внешнего вида вашей платформы.", + logo: { + title: "ÐаÑтроить логотип", + description: + "Загрузите Ñвой логотип, чтобы перÑонализировать ваш чат-бот.", + add: "Добавить пользовательÑкий логотип", + recommended: "Рекомендуемый размер: 800 x 200", + remove: "Удалить", + replace: "Заменить", + }, + message: { + title: "ÐаÑтроить ÑообщениÑ", + description: + "ÐаÑтройте автоматичеÑкие ÑообщениÑ, отображаемые вашим пользователÑм.", + new: "Ðовое", + system: "ÑиÑтема", + user: "пользователь", + message: "Ñообщение", + assistant: "Чат-аÑÑиÑтент AnythingLLM", + "double-click": "Дважды щелкните, чтобы редактировать...", + save: "Сохранить ÑообщениÑ", + }, + icons: { + title: "ПользовательÑкие иконки в подвале", + description: + "ÐаÑтройте иконки в подвале, отображаемые внизу боковой панели.", + icon: "Иконка", + link: "СÑылка", + }, + }, + api: { + title: "API ключи", + description: + "API ключи позволÑÑŽÑ‚ владельцу программно получать доÑтуп к Ñтому ÑкземплÑру AnythingLLM и управлÑÑ‚ÑŒ им.", + link: "Прочитать документацию по API", + generate: "Создать новый API ключ", + table: { + key: "API ключ", + by: "Создано", + created: "Создано", + }, + }, + llm: { + title: "Предпочтение LLM", + description: + "Ðто учетные данные и наÑтройки Ð´Ð»Ñ Ð²Ð°ÑˆÐµÐ³Ð¾ предпочтительного поÑтавщика чата и вÑÑ‚Ñ€Ð°Ð¸Ð²Ð°Ð½Ð¸Ñ LLM. Важно, чтобы Ñти ключи были актуальными и правильными, иначе AnythingLLM не будет работать должным образом.", + provider: "ПоÑтавщик LLM", + }, + transcription: { + title: "Предпочтение модели транÑкрипции", + description: + "Ðто учетные данные и наÑтройки Ð´Ð»Ñ Ð²Ð°ÑˆÐµÐ³Ð¾ предпочтительного поÑтавщика моделей транÑкрипции. Важно, чтобы Ñти ключи были актуальными и правильными, иначе медиафайлы и аудио не будут транÑкрибироватьÑÑ.", + provider: "ПоÑтавщик транÑкрипции", + "warn-start": + "ИÑпользование локальной модели whisper на машинах Ñ Ð¾Ð³Ñ€Ð°Ð½Ð¸Ñ‡ÐµÐ½Ð½Ð¾Ð¹ оперативной памÑтью или процеÑÑором может привеÑти к завиÑанию AnythingLLM при обработке медиафайлов.", + "warn-recommend": + "Мы рекомендуем минимум 2ГБ оперативной памÑти и загружать файлы <10МБ.", + "warn-end": + "Ð’ÑÑ‚Ñ€Ð¾ÐµÐ½Ð½Ð°Ñ Ð¼Ð¾Ð´ÐµÐ»ÑŒ будет автоматичеÑки загружена при первом иÑпользовании.", + }, + embedding: { + title: "ÐаÑтройки вÑтраиваниÑ", + "desc-start": + "При иÑпользовании LLM, который не поддерживает вÑтроенный механизм вÑÑ‚Ñ€Ð°Ð¸Ð²Ð°Ð½Ð¸Ñ - возможно, потребуетÑÑ Ð´Ð¾Ð¿Ð¾Ð»Ð½Ð¸Ñ‚ÐµÐ»ÑŒÐ½Ð¾ указать учетные данные Ð´Ð»Ñ Ð²ÑÑ‚Ñ€Ð°Ð¸Ð²Ð°Ð½Ð¸Ñ Ñ‚ÐµÐºÑта.", + "desc-end": + "Ð’Ñтраивание - Ñто процеÑÑ Ð¿Ñ€ÐµÐ²Ñ€Ð°Ñ‰ÐµÐ½Ð¸Ñ Ñ‚ÐµÐºÑта в векторы. Ðти учетные данные необходимы Ð´Ð»Ñ Ð¿Ñ€ÐµÐ²Ñ€Ð°Ñ‰ÐµÐ½Ð¸Ñ Ð²Ð°ÑˆÐ¸Ñ… файлов и подÑказок в формат, который AnythingLLM может иÑпользовать Ð´Ð»Ñ Ð¾Ð±Ñ€Ð°Ð±Ð¾Ñ‚ÐºÐ¸.", + provider: { + title: "ПоÑтавщик вÑтраиваниÑ", + description: + "Ðет необходимоÑти в наÑтройке при иÑпользовании вÑтроенного механизма вÑÑ‚Ñ€Ð°Ð¸Ð²Ð°Ð½Ð¸Ñ AnythingLLM.", + }, + }, + text: { + title: "ÐаÑтройки Ñ€Ð°Ð·Ð´ÐµÐ»ÐµÐ½Ð¸Ñ Ð¸ Ñегментации текÑта", + "desc-start": + "Иногда может понадобитьÑÑ Ð¸Ð·Ð¼ÐµÐ½Ð¸Ñ‚ÑŒ Ñтандартный ÑпоÑоб Ñ€Ð°Ð·Ð´ÐµÐ»ÐµÐ½Ð¸Ñ Ð¸ Ñегментации новых документов перед их вÑтавкой в векторную базу данных.", + "desc-end": + "Следует изменÑÑ‚ÑŒ Ñтот параметр только при полном понимании работы Ñ€Ð°Ð·Ð´ÐµÐ»ÐµÐ½Ð¸Ñ Ñ‚ÐµÐºÑта и его побочных Ñффектов.", + "warn-start": "Ð˜Ð·Ð¼ÐµÐ½ÐµÐ½Ð¸Ñ Ð·Ð´ÐµÑÑŒ будут применÑÑ‚ÑŒÑÑ Ñ‚Ð¾Ð»ÑŒÐºÐ¾ к", + "warn-center": "новым вÑтроенным документам", + "warn-end": ", а не к ÑущеÑтвующим документам.", + size: { + title: "Размер Ñегмента текÑта", + description: + "Ðто макÑÐ¸Ð¼Ð°Ð»ÑŒÐ½Ð°Ñ Ð´Ð»Ð¸Ð½Ð° Ñимволов, которые могут приÑутÑтвовать в одном векторе.", + recommend: "МакÑÐ¸Ð¼Ð°Ð»ÑŒÐ½Ð°Ñ Ð´Ð»Ð¸Ð½Ð° модели вÑÑ‚Ñ€Ð°Ð¸Ð²Ð°Ð½Ð¸Ñ ÑоÑтавлÑет", + }, + overlap: { + title: "Перекрытие Ñегментов текÑта", + description: + "Ðто макÑимальное перекрытие Ñимволов, которое проиÑходит при Ñегментации между Ð´Ð²ÑƒÐ¼Ñ Ñмежными Ñегментами текÑта.", + }, + }, + vector: { + title: "Ð’ÐµÐºÑ‚Ð¾Ñ€Ð½Ð°Ñ Ð±Ð°Ð·Ð° данных", + description: + "Ðто учетные данные и наÑтройки Ð´Ð»Ñ Ñ‚Ð¾Ð³Ð¾, как будет функционировать ваш ÑкземплÑÑ€ AnythingLLM. Важно, чтобы Ñти ключи были актуальными и правильными.", + provider: { + title: "ПоÑтавщик векторной базы данных", + description: "ÐаÑтройка Ð´Ð»Ñ LanceDB не требуетÑÑ.", + }, + }, + embeddable: { + title: "Ð’Ñтраиваемые виджеты чата", + description: + "Ð’Ñтраиваемые виджеты чата - Ñто интерфейÑÑ‹ чата, ориентированные на публичное иÑпользование и привÑзанные к одному рабочему проÑтранÑтву. Они позволÑÑŽÑ‚ Ñоздавать рабочие проÑтранÑтва, которые затем можно публиковать в Интернете.", + create: "Создать вÑтраивание", + table: { + workspace: "Рабочее проÑтранÑтво", + chats: "Отправленные чаты", + Active: "Ðктивные домены", + }, + }, + "embed-chats": { + title: "Ð’Ñтраивание чатов", + description: + "Ðто вÑе запиÑанные чаты и ÑÐ¾Ð¾Ð±Ñ‰ÐµÐ½Ð¸Ñ Ð¾Ñ‚ любого вÑтраиваниÑ, которое вы опубликовали.", + table: { + embed: "Ð’Ñтраивание", + sender: "Отправитель", + message: "Сообщение", + response: "Ответ", + at: "Отправлено в", + }, + }, + multi: { + title: "МногопользовательÑкий режим", + description: + "ÐаÑтройте ваш ÑкземплÑÑ€ Ð´Ð»Ñ Ð¿Ð¾Ð´Ð´ÐµÑ€Ð¶ÐºÐ¸ вашей команды, активировав многопользовательÑкий режим.", + enable: { + "is-enable": "МногопользовательÑкий режим включен", + enable: "Включить многопользовательÑкий режим", + description: + "По умолчанию, вы будете единÑтвенным админиÑтратором. Как админиÑтратор, вы должны будете Ñоздавать учетные запиÑи Ð´Ð»Ñ Ð²Ñех новых пользователей или админиÑтраторов. Ðе терÑйте ваш пароль, так как только админиÑтратор может ÑброÑить пароли.", + username: "Ð˜Ð¼Ñ Ð¿Ð¾Ð»ÑŒÐ·Ð¾Ð²Ð°Ñ‚ÐµÐ»Ñ ÑƒÑ‡ÐµÑ‚Ð½Ð¾Ð¹ запиÑи админиÑтратора", + password: "Пароль учетной запиÑи админиÑтратора", + }, + password: { + title: "Защита паролем", + description: + "Защитите ваш ÑкземплÑÑ€ AnythingLLM паролем. ЕÑли вы забудете его, метода воÑÑÑ‚Ð°Ð½Ð¾Ð²Ð»ÐµÐ½Ð¸Ñ Ð½Ðµ ÑущеÑтвует, поÑтому убедитеÑÑŒ, что вы Ñохранили Ñтот пароль.", + }, + instance: { + title: "Защитить ÑкземплÑÑ€ паролем", + description: + "По умолчанию, вы будете единÑтвенным админиÑтратором. Как админиÑтратор, вы должны будете Ñоздавать учетные запиÑи Ð´Ð»Ñ Ð²Ñех новых пользователей или админиÑтраторов. Ðе терÑйте ваш пароль, так как только админиÑтратор может ÑброÑить пароли.", + password: "Пароль ÑкземплÑра", + }, + }, + event: { + title: "Журналы Ñобытий", + description: + "ПроÑматривайте вÑе дейÑÑ‚Ð²Ð¸Ñ Ð¸ ÑобытиÑ, проиÑходÑщие в Ñтом ÑкземплÑре Ð´Ð»Ñ Ð¼Ð¾Ð½Ð¸Ñ‚Ð¾Ñ€Ð¸Ð½Ð³Ð°.", + clear: "ОчиÑтить журналы Ñобытий", + table: { + type: "Тип ÑобытиÑ", + user: "Пользователь", + occurred: "Произошло в", + }, + }, + privacy: { + title: "КонфиденциальноÑÑ‚ÑŒ и обработка данных", + description: + "Ðто ваша ÐºÐ¾Ð½Ñ„Ð¸Ð³ÑƒÑ€Ð°Ñ†Ð¸Ñ Ð´Ð»Ñ Ñ‚Ð¾Ð³Ð¾, как подключенные Ñторонние поÑтавщики и AnythingLLM обрабатывают ваши данные.", + llm: "Выбор LLM", + embedding: "Предпочтение вÑтраиваниÑ", + vector: "Ð’ÐµÐºÑ‚Ð¾Ñ€Ð½Ð°Ñ Ð±Ð°Ð·Ð° данных", + anonymous: "ÐÐ½Ð¾Ð½Ð¸Ð¼Ð½Ð°Ñ Ñ‚ÐµÐ»ÐµÐ¼ÐµÑ‚Ñ€Ð¸Ñ Ð²ÐºÐ»ÑŽÑ‡ÐµÐ½Ð°", + }, +}; + +export default TRANSLATIONS; diff --git a/frontend/src/locales/verifyTranslations.mjs b/frontend/src/locales/verifyTranslations.mjs index dccec76d34465a5d44ce14bb2ead52248f19f7c6..93f7eaa68e81e32d0745b7ba859b6a693377d3ca 100644 --- a/frontend/src/locales/verifyTranslations.mjs +++ b/frontend/src/locales/verifyTranslations.mjs @@ -9,12 +9,16 @@ function langDisplayName(lang) { function compareStructures(lang, a, b, subdir = null) { //if a and b aren't the same type, they can't be equal - if (typeof a !== typeof b) { + if (typeof a !== typeof b && a !== null && b !== null) { console.log("Invalid type comparison", [ { lang, a: typeof a, b: typeof b, + values: { + a, + b, + }, ...(!!subdir ? { subdir } : {}), }, ]); diff --git a/frontend/src/pages/Admin/Agents/WebSearchSelection/SearchProviderOptions/index.jsx b/frontend/src/pages/Admin/Agents/WebSearchSelection/SearchProviderOptions/index.jsx index 58ceb844741919d3f22f468c0c9818b2d4ec9b0f..c5ccd2607bfe96e91bfddee11886d2ca5963c08b 100644 --- a/frontend/src/pages/Admin/Agents/WebSearchSelection/SearchProviderOptions/index.jsx +++ b/frontend/src/pages/Admin/Agents/WebSearchSelection/SearchProviderOptions/index.jsx @@ -182,3 +182,25 @@ export function SerplySearchOptions({ settings }) { </> ); } + +export function SearXNGOptions({ settings }) { + return ( + <div className="flex gap-x-4"> + <div className="flex flex-col w-60"> + <label className="text-white text-sm font-semibold block mb-4"> + SearXNG API base URL + </label> + <input + type="url" + name="env::AgentSearXNGApiUrl" + className="border-none bg-zinc-900 text-white placeholder:text-white/20 text-sm rounded-lg focus:border-white block w-full p-2.5" + placeholder="SearXNG API Key" + defaultValue={settings?.AgentSearXNGApiUrl} + required={true} + autoComplete="off" + spellCheck={false} + /> + </div> + </div> + ); +} diff --git a/frontend/src/pages/Admin/Agents/WebSearchSelection/icons/searxng.png b/frontend/src/pages/Admin/Agents/WebSearchSelection/icons/searxng.png new file mode 100644 index 0000000000000000000000000000000000000000..434e570f8bfe0f8752b186a8fac772e73c40fe18 Binary files /dev/null and b/frontend/src/pages/Admin/Agents/WebSearchSelection/icons/searxng.png differ diff --git a/frontend/src/pages/Admin/Agents/WebSearchSelection/index.jsx b/frontend/src/pages/Admin/Agents/WebSearchSelection/index.jsx index 9650c38fbfe7e133e95bb2838ce956ac7a1cc533..438be111404f4c34451d0fa581b3b4a6a00be08a 100644 --- a/frontend/src/pages/Admin/Agents/WebSearchSelection/index.jsx +++ b/frontend/src/pages/Admin/Agents/WebSearchSelection/index.jsx @@ -4,6 +4,7 @@ import GoogleSearchIcon from "./icons/google.png"; import SerperDotDevIcon from "./icons/serper.png"; import BingSearchIcon from "./icons/bing.png"; import SerplySearchIcon from "./icons/serply.png"; +import SearXNGSearchIcon from "./icons/searxng.png"; import { CaretUpDown, MagnifyingGlass, @@ -17,6 +18,7 @@ import { GoogleSearchOptions, BingSearchOptions, SerplySearchOptions, + SearXNGOptions, } from "./SearchProviderOptions"; const SEARCH_PROVIDERS = [ @@ -60,6 +62,14 @@ const SEARCH_PROVIDERS = [ description: "Serply.io web-search. Free account with a 100 calls/month forever.", }, + { + name: "SearXNG", + value: "searxng-engine", + logo: SearXNGSearchIcon, + options: (settings) => <SearXNGOptions settings={settings} />, + description: + "Free, open-source, internet meta-search engine with no tracking.", + }, ]; export default function AgentWebSearchSelection({ diff --git a/server/.env.example b/server/.env.example index a88a8a039b09ec0b4298e8c9bd7a866c2fd0862c..3a4fb072fea0d101fd526d46edc090bbb1847e61 100644 --- a/server/.env.example +++ b/server/.env.example @@ -241,3 +241,6 @@ TTS_PROVIDER="native" #------ Serply.io ----------- https://serply.io/ # AGENT_SERPLY_API_KEY= + +#------ SearXNG ----------- https://github.com/searxng/searxng +# AGENT_SEARXNG_API_URL= \ No newline at end of file diff --git a/server/endpoints/api/index.js b/server/endpoints/api/index.js index c5a2b8a8d32fd1f6dd190abad62d2f4665a3e7bb..fdf225b85323f198000c98693d33a404d4fed365 100644 --- a/server/endpoints/api/index.js +++ b/server/endpoints/api/index.js @@ -4,6 +4,7 @@ const { apiAuthEndpoints } = require("./auth"); const { apiDocumentEndpoints } = require("./document"); const { apiSystemEndpoints } = require("./system"); const { apiWorkspaceEndpoints } = require("./workspace"); +const { apiWorkspaceThreadEndpoints } = require("./workspaceThread"); const { apiUserManagementEndpoints } = require("./userManagement"); // All endpoints must be documented and pass through the validApiKey Middleware. @@ -17,6 +18,7 @@ function developerEndpoints(app, router) { apiSystemEndpoints(router); apiWorkspaceEndpoints(router); apiDocumentEndpoints(router); + apiWorkspaceThreadEndpoints(router); apiUserManagementEndpoints(router); } diff --git a/server/endpoints/api/workspaceThread/index.js b/server/endpoints/api/workspaceThread/index.js new file mode 100644 index 0000000000000000000000000000000000000000..a8c859a80b878aed39d8f6743b424a1c7b8047c9 --- /dev/null +++ b/server/endpoints/api/workspaceThread/index.js @@ -0,0 +1,594 @@ +const { v4: uuidv4 } = require("uuid"); +const { WorkspaceThread } = require("../../../models/workspaceThread"); +const { Workspace } = require("../../../models/workspace"); +const { validApiKey } = require("../../../utils/middleware/validApiKey"); +const { reqBody, multiUserMode } = require("../../../utils/http"); +const { chatWithWorkspace } = require("../../../utils/chats"); +const { + streamChatWithWorkspace, + VALID_CHAT_MODE, +} = require("../../../utils/chats/stream"); +const { Telemetry } = require("../../../models/telemetry"); +const { EventLogs } = require("../../../models/eventLogs"); +const { + writeResponseChunk, + convertToChatHistory, +} = require("../../../utils/helpers/chat/responses"); +const { WorkspaceChats } = require("../../../models/workspaceChats"); +const { User } = require("../../../models/user"); + +function apiWorkspaceThreadEndpoints(app) { + if (!app) return; + + app.post( + "/v1/workspace/:slug/thread/new", + [validApiKey], + async (request, response) => { + /* + #swagger.tags = ['Workspace Threads'] + #swagger.description = 'Create a new workspace thread' + #swagger.parameters['slug'] = { + in: 'path', + description: 'Unique slug of workspace', + required: true, + type: 'string' + } + #swagger.requestBody = { + description: 'Optional userId associated with the thread', + required: false, + type: 'object', + content: { + "application/json": { + example: { + userId: 1 + } + } + } + } + #swagger.responses[200] = { + content: { + "application/json": { + schema: { + type: 'object', + example: { + thread: { + "id": 1, + "name": "Thread", + "slug": "thread-uuid", + "user_id": 1, + "workspace_id": 1 + }, + message: null + } + } + } + } + } + #swagger.responses[403] = { + schema: { + "$ref": "#/definitions/InvalidAPIKey" + } + } + */ + try { + const { slug } = request.params; + const { userId } = reqBody(request); + const workspace = await Workspace.get({ slug }); + + if (!workspace) { + response.sendStatus(400).end(); + return; + } + + const { thread, message } = await WorkspaceThread.new( + workspace, + userId ? Number(userId) : null + ); + + await Telemetry.sendTelemetry("workspace_thread_created", { + multiUserMode: multiUserMode(response), + LLMSelection: process.env.LLM_PROVIDER || "openai", + Embedder: process.env.EMBEDDING_ENGINE || "inherit", + VectorDbSelection: process.env.VECTOR_DB || "lancedb", + }); + await EventLogs.logEvent("api_workspace_thread_created", { + workspaceName: workspace?.name || "Unknown Workspace", + }); + response.status(200).json({ thread, message }); + } catch (e) { + console.log(e.message, e); + response.sendStatus(500).end(); + } + } + ); + + app.post( + "/v1/workspace/:slug/thread/:threadSlug/update", + [validApiKey], + async (request, response) => { + /* + #swagger.tags = ['Workspace Threads'] + #swagger.description = 'Update thread name by its unique slug.' + #swagger.path = '/v1/workspace/{slug}/thread/{threadSlug}/update' + #swagger.parameters['slug'] = { + in: 'path', + description: 'Unique slug of workspace', + required: true, + type: 'string' + } + #swagger.parameters['threadSlug'] = { + in: 'path', + description: 'Unique slug of thread', + required: true, + type: 'string' + } + #swagger.requestBody = { + description: 'JSON object containing new name to update the thread.', + required: true, + type: 'object', + content: { + "application/json": { + example: { + "name": 'Updated Thread Name' + } + } + } + } + #swagger.responses[200] = { + content: { + "application/json": { + schema: { + type: 'object', + example: { + thread: { + "id": 1, + "name": "Updated Thread Name", + "slug": "thread-uuid", + "user_id": 1, + "workspace_id": 1 + }, + message: null, + } + } + } + } + } + #swagger.responses[403] = { + schema: { + "$ref": "#/definitions/InvalidAPIKey" + } + } + */ + try { + const { slug, threadSlug } = request.params; + const { name } = reqBody(request); + const workspace = await Workspace.get({ slug }); + const thread = await WorkspaceThread.get({ + slug: threadSlug, + workspace_id: workspace.id, + }); + + if (!workspace || !thread) { + response.sendStatus(400).end(); + return; + } + + const { thread: updatedThread, message } = await WorkspaceThread.update( + thread, + { name } + ); + response.status(200).json({ thread: updatedThread, message }); + } catch (e) { + console.log(e.message, e); + response.sendStatus(500).end(); + } + } + ); + + app.delete( + "/v1/workspace/:slug/thread/:threadSlug", + [validApiKey], + async (request, response) => { + /* + #swagger.tags = ['Workspace Threads'] + #swagger.description = 'Delete a workspace thread' + #swagger.parameters['slug'] = { + in: 'path', + description: 'Unique slug of workspace', + required: true, + type: 'string' + } + #swagger.parameters['threadSlug'] = { + in: 'path', + description: 'Unique slug of thread', + required: true, + type: 'string' + } + #swagger.responses[200] = { + description: 'Thread deleted successfully' + } + #swagger.responses[403] = { + schema: { + "$ref": "#/definitions/InvalidAPIKey" + } + } + */ + try { + const { slug, threadSlug } = request.params; + const workspace = await Workspace.get({ slug }); + + if (!workspace) { + response.sendStatus(400).end(); + return; + } + + await WorkspaceThread.delete({ + slug: threadSlug, + workspace_id: workspace.id, + }); + response.sendStatus(200).end(); + } catch (e) { + console.log(e.message, e); + response.sendStatus(500).end(); + } + } + ); + + app.get( + "/v1/workspace/:slug/thread/:threadSlug/chats", + [validApiKey], + async (request, response) => { + /* + #swagger.tags = ['Workspace Threads'] + #swagger.description = 'Get chats for a workspace thread' + #swagger.parameters['slug'] = { + in: 'path', + description: 'Unique slug of workspace', + required: true, + type: 'string' + } + #swagger.parameters['threadSlug'] = { + in: 'path', + description: 'Unique slug of thread', + required: true, + type: 'string' + } + #swagger.responses[200] = { + content: { + "application/json": { + schema: { + type: 'object', + example: { + history: [ + { + "role": "user", + "content": "What is AnythingLLM?", + "sentAt": 1692851630 + }, + { + "role": "assistant", + "content": "AnythingLLM is a platform that allows you to convert notes, PDFs, and other source materials into a chatbot. It ensures privacy, cites its answers, and allows multiple people to interact with the same documents simultaneously. It is particularly useful for businesses to enhance the visibility and readability of various written communications such as SOPs, contracts, and sales calls. You can try it out with a free trial to see if it meets your business needs.", + "sources": [{"source": "object about source document and snippets used"}] + } + ] + } + } + } + } + } + #swagger.responses[403] = { + schema: { + "$ref": "#/definitions/InvalidAPIKey" + } + } + */ + try { + const { slug, threadSlug } = request.params; + const workspace = await Workspace.get({ slug }); + const thread = await WorkspaceThread.get({ + slug: threadSlug, + workspace_id: workspace.id, + }); + + if (!workspace || !thread) { + response.sendStatus(400).end(); + return; + } + + const history = await WorkspaceChats.where( + { + workspaceId: workspace.id, + thread_id: thread.id, + include: true, + }, + null, + { id: "asc" } + ); + + response.status(200).json({ history: convertToChatHistory(history) }); + } catch (e) { + console.log(e.message, e); + response.sendStatus(500).end(); + } + } + ); + + app.post( + "/v1/workspace/:slug/thread/:threadSlug/chat", + [validApiKey], + async (request, response) => { + /* + #swagger.tags = ['Workspace Threads'] + #swagger.description = 'Chat with a workspace thread' + #swagger.parameters['slug'] = { + in: 'path', + description: 'Unique slug of workspace', + required: true, + type: 'string' + } + #swagger.parameters['threadSlug'] = { + in: 'path', + description: 'Unique slug of thread', + required: true, + type: 'string' + } + #swagger.requestBody = { + description: 'Send a prompt to the workspace thread and the type of conversation (query or chat).', + required: true, + type: 'object', + content: { + "application/json": { + example: { + message: "What is AnythingLLM?", + mode: "query | chat", + userId: 1 + } + } + } + } + #swagger.responses[200] = { + content: { + "application/json": { + schema: { + type: 'object', + example: { + id: 'chat-uuid', + type: "abort | textResponse", + textResponse: "Response to your query", + sources: [{title: "anythingllm.txt", chunk: "This is a context chunk used in the answer of the prompt by the LLM."}], + close: true, + error: "null | text string of the failure mode." + } + } + } + } + } + #swagger.responses[403] = { + schema: { + "$ref": "#/definitions/InvalidAPIKey" + } + } + */ + try { + const { slug, threadSlug } = request.params; + const { message, mode = "query", userId } = reqBody(request); + const workspace = await Workspace.get({ slug }); + const thread = await WorkspaceThread.get({ + slug: threadSlug, + workspace_id: workspace.id, + }); + + if (!workspace || !thread) { + response.status(400).json({ + id: uuidv4(), + type: "abort", + textResponse: null, + sources: [], + close: true, + error: `Workspace ${slug} or thread ${threadSlug} is not valid.`, + }); + return; + } + + if (!message?.length || !VALID_CHAT_MODE.includes(mode)) { + response.status(400).json({ + id: uuidv4(), + type: "abort", + textResponse: null, + sources: [], + close: true, + error: !message?.length + ? "message parameter cannot be empty." + : `${mode} is not a valid mode.`, + }); + return; + } + + const user = userId ? await User.get({ id: Number(userId) }) : null; + const result = await chatWithWorkspace( + workspace, + message, + mode, + user, + thread + ); + await Telemetry.sendTelemetry("sent_chat", { + LLMSelection: process.env.LLM_PROVIDER || "openai", + Embedder: process.env.EMBEDDING_ENGINE || "inherit", + VectorDbSelection: process.env.VECTOR_DB || "lancedb", + }); + await EventLogs.logEvent("api_sent_chat", { + workspaceName: workspace?.name, + chatModel: workspace?.chatModel || "System Default", + threadName: thread?.name, + userId: user?.id, + }); + response.status(200).json({ ...result }); + } catch (e) { + console.log(e.message, e); + response.status(500).json({ + id: uuidv4(), + type: "abort", + textResponse: null, + sources: [], + close: true, + error: e.message, + }); + } + } + ); + + app.post( + "/v1/workspace/:slug/thread/:threadSlug/stream-chat", + [validApiKey], + async (request, response) => { + /* + #swagger.tags = ['Workspace Threads'] + #swagger.description = 'Stream chat with a workspace thread' + #swagger.parameters['slug'] = { + in: 'path', + description: 'Unique slug of workspace', + required: true, + type: 'string' + } + #swagger.parameters['threadSlug'] = { + in: 'path', + description: 'Unique slug of thread', + required: true, + type: 'string' + } + #swagger.requestBody = { + description: 'Send a prompt to the workspace thread and the type of conversation (query or chat).', + required: true, + type: 'object', + content: { + "application/json": { + example: { + message: "What is AnythingLLM?", + mode: "query | chat", + userId: 1 + } + } + } + } + #swagger.responses[200] = { + content: { + "text/event-stream": { + schema: { + type: 'array', + example: [ + { + id: 'uuid-123', + type: "abort | textResponseChunk", + textResponse: "First chunk", + sources: [], + close: false, + error: "null | text string of the failure mode." + }, + { + id: 'uuid-123', + type: "abort | textResponseChunk", + textResponse: "chunk two", + sources: [], + close: false, + error: "null | text string of the failure mode." + }, + { + id: 'uuid-123', + type: "abort | textResponseChunk", + textResponse: "final chunk of LLM output!", + sources: [{title: "anythingllm.txt", chunk: "This is a context chunk used in the answer of the prompt by the LLM. This will only return in the final chunk."}], + close: true, + error: "null | text string of the failure mode." + } + ] + } + } + } + } + #swagger.responses[403] = { + schema: { + "$ref": "#/definitions/InvalidAPIKey" + } + } + */ + try { + const { slug, threadSlug } = request.params; + const { message, mode = "query", userId } = reqBody(request); + const workspace = await Workspace.get({ slug }); + const thread = await WorkspaceThread.get({ + slug: threadSlug, + workspace_id: workspace.id, + }); + + if (!workspace || !thread) { + response.status(400).json({ + id: uuidv4(), + type: "abort", + textResponse: null, + sources: [], + close: true, + error: `Workspace ${slug} or thread ${threadSlug} is not valid.`, + }); + return; + } + + if (!message?.length || !VALID_CHAT_MODE.includes(mode)) { + response.status(400).json({ + id: uuidv4(), + type: "abort", + textResponse: null, + sources: [], + close: true, + error: !message?.length + ? "Message is empty" + : `${mode} is not a valid mode.`, + }); + return; + } + + const user = userId ? await User.get({ id: Number(userId) }) : null; + + response.setHeader("Cache-Control", "no-cache"); + response.setHeader("Content-Type", "text/event-stream"); + response.setHeader("Access-Control-Allow-Origin", "*"); + response.setHeader("Connection", "keep-alive"); + response.flushHeaders(); + + await streamChatWithWorkspace( + response, + workspace, + message, + mode, + user, + thread + ); + await Telemetry.sendTelemetry("sent_chat", { + LLMSelection: process.env.LLM_PROVIDER || "openai", + Embedder: process.env.EMBEDDING_ENGINE || "inherit", + VectorDbSelection: process.env.VECTOR_DB || "lancedb", + }); + await EventLogs.logEvent("api_sent_chat", { + workspaceName: workspace?.name, + chatModel: workspace?.chatModel || "System Default", + threadName: thread?.name, + userId: user?.id, + }); + response.end(); + } catch (e) { + console.log(e.message, e); + writeResponseChunk(response, { + id: uuidv4(), + type: "abort", + textResponse: null, + sources: [], + close: true, + error: e.message, + }); + response.end(); + } + } + ); +} + +module.exports = { apiWorkspaceThreadEndpoints }; diff --git a/server/models/systemSettings.js b/server/models/systemSettings.js index 8d548c7bc6adbcd403fe6f3ac2537ee7bc1436b6..4d998e819b3198404e583f40b3903dab367f3878 100644 --- a/server/models/systemSettings.js +++ b/server/models/systemSettings.js @@ -76,6 +76,7 @@ const SystemSettings = { "serper-dot-dev", "bing-search", "serply-engine", + "searxng-engine", ].includes(update) ) throw new Error("Invalid SERP provider."); @@ -176,10 +177,11 @@ const SystemSettings = { // Agent Settings & Configs // -------------------------------------------------------- AgentGoogleSearchEngineId: process.env.AGENT_GSE_CTX || null, - AgentGoogleSearchEngineKey: process.env.AGENT_GSE_KEY || null, - AgentSerperApiKey: process.env.AGENT_SERPER_DEV_KEY || null, - AgentBingSearchApiKey: process.env.AGENT_BING_SEARCH_API_KEY || null, - AgentSerplyApiKey: process.env.AGENT_SERPLY_API_KEY || null, + AgentGoogleSearchEngineKey: !!process.env.AGENT_GSE_KEY || null, + AgentSerperApiKey: !!process.env.AGENT_SERPER_DEV_KEY || null, + AgentBingSearchApiKey: !!process.env.AGENT_BING_SEARCH_API_KEY || null, + AgentSerplyApiKey: !!process.env.AGENT_SERPLY_API_KEY || null, + AgentSearXNGApiUrl: process.env.AGENT_SEARXNG_API_URL || null, }; }, diff --git a/server/models/workspaceChats.js b/server/models/workspaceChats.js index bda40064d5bb59ad307ee4c585a57f308b58b90a..951245204fe42d8bc9ea5d3e6ede5e34d176ac6d 100644 --- a/server/models/workspaceChats.js +++ b/server/models/workspaceChats.js @@ -7,6 +7,7 @@ const WorkspaceChats = { response = {}, user = null, threadId = null, + include = true, }) { try { const chat = await prisma.workspace_chats.create({ @@ -16,6 +17,7 @@ const WorkspaceChats = { response: JSON.stringify(response), user_id: user?.id || null, thread_id: threadId, + include, }, }); return { chat, message: null }; diff --git a/server/swagger/init.js b/server/swagger/init.js index 064814565a37f9524c4b7e3df0edfda855b4e8dc..31edcf1c47c72037947fddd4be4a7d398136c431 100644 --- a/server/swagger/init.js +++ b/server/swagger/init.js @@ -35,6 +35,7 @@ const endpointsFiles = [ "../endpoints/api/document/index.js", "../endpoints/api/workspace/index.js", "../endpoints/api/system/index.js", + "../endpoints/api/workspaceThread/index.js", "../endpoints/api/userManagement/index.js", ]; diff --git a/server/swagger/openapi.json b/server/swagger/openapi.json index 2a1b55437529eceaa8fd24df484420b3e9d82104..d27504aaebc12568bf62cb057a740421dd1a4e39 100644 --- a/server/swagger/openapi.json +++ b/server/swagger/openapi.json @@ -2371,6 +2371,487 @@ } } }, + "/v1/workspace/{slug}/thread/new": { + "post": { + "tags": [ + "Workspace Threads" + ], + "description": "Create a new workspace thread", + "parameters": [ + { + "name": "slug", + "in": "path", + "required": true, + "schema": { + "type": "string" + }, + "description": "Unique slug of workspace" + } + ], + "responses": { + "200": { + "description": "OK", + "content": { + "application/json": { + "schema": { + "type": "object", + "example": { + "thread": { + "id": 1, + "name": "Thread", + "slug": "thread-uuid", + "user_id": 1, + "workspace_id": 1 + }, + "message": null + } + } + } + } + }, + "400": { + "description": "Bad Request" + }, + "403": { + "description": "Forbidden", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/InvalidAPIKey" + } + }, + "application/xml": { + "schema": { + "$ref": "#/components/schemas/InvalidAPIKey" + } + } + } + }, + "500": { + "description": "Internal Server Error" + } + }, + "requestBody": { + "description": "Optional userId associated with the thread", + "required": false, + "type": "object", + "content": { + "application/json": { + "example": { + "userId": 1 + } + } + } + } + } + }, + "/v1/workspace/{slug}/thread/{threadSlug}/update": { + "post": { + "tags": [ + "Workspace Threads" + ], + "description": "Update thread name by its unique slug.", + "parameters": [ + { + "name": "slug", + "in": "path", + "required": true, + "schema": { + "type": "string" + }, + "description": "Unique slug of workspace" + }, + { + "name": "threadSlug", + "in": "path", + "required": true, + "schema": { + "type": "string" + }, + "description": "Unique slug of thread" + } + ], + "responses": { + "200": { + "description": "OK", + "content": { + "application/json": { + "schema": { + "type": "object", + "example": { + "thread": { + "id": 1, + "name": "Updated Thread Name", + "slug": "thread-uuid", + "user_id": 1, + "workspace_id": 1 + }, + "message": null + } + } + } + } + }, + "400": { + "description": "Bad Request" + }, + "403": { + "description": "Forbidden", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/InvalidAPIKey" + } + }, + "application/xml": { + "schema": { + "$ref": "#/components/schemas/InvalidAPIKey" + } + } + } + }, + "500": { + "description": "Internal Server Error" + } + }, + "requestBody": { + "description": "JSON object containing new name to update the thread.", + "required": true, + "type": "object", + "content": { + "application/json": { + "example": { + "name": "Updated Thread Name" + } + } + } + } + } + }, + "/v1/workspace/{slug}/thread/{threadSlug}": { + "delete": { + "tags": [ + "Workspace Threads" + ], + "description": "Delete a workspace thread", + "parameters": [ + { + "name": "slug", + "in": "path", + "required": true, + "schema": { + "type": "string" + }, + "description": "Unique slug of workspace" + }, + { + "name": "threadSlug", + "in": "path", + "required": true, + "schema": { + "type": "string" + }, + "description": "Unique slug of thread" + } + ], + "responses": { + "200": { + "description": "Thread deleted successfully" + }, + "400": { + "description": "Bad Request" + }, + "403": { + "description": "Forbidden", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/InvalidAPIKey" + } + }, + "application/xml": { + "schema": { + "$ref": "#/components/schemas/InvalidAPIKey" + } + } + } + }, + "500": { + "description": "Internal Server Error" + } + } + } + }, + "/v1/workspace/{slug}/thread/{threadSlug}/chats": { + "get": { + "tags": [ + "Workspace Threads" + ], + "description": "Get chats for a workspace thread", + "parameters": [ + { + "name": "slug", + "in": "path", + "required": true, + "schema": { + "type": "string" + }, + "description": "Unique slug of workspace" + }, + { + "name": "threadSlug", + "in": "path", + "required": true, + "schema": { + "type": "string" + }, + "description": "Unique slug of thread" + } + ], + "responses": { + "200": { + "description": "OK", + "content": { + "application/json": { + "schema": { + "type": "object", + "example": { + "history": [ + { + "role": "user", + "content": "What is AnythingLLM?", + "sentAt": 1692851630 + }, + { + "role": "assistant", + "content": "AnythingLLM is a platform that allows you to convert notes, PDFs, and other source materials into a chatbot. It ensures privacy, cites its answers, and allows multiple people to interact with the same documents simultaneously. It is particularly useful for businesses to enhance the visibility and readability of various written communications such as SOPs, contracts, and sales calls. You can try it out with a free trial to see if it meets your business needs.", + "sources": [ + { + "source": "object about source document and snippets used" + } + ] + } + ] + } + } + } + } + }, + "400": { + "description": "Bad Request" + }, + "403": { + "description": "Forbidden", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/InvalidAPIKey" + } + }, + "application/xml": { + "schema": { + "$ref": "#/components/schemas/InvalidAPIKey" + } + } + } + }, + "500": { + "description": "Internal Server Error" + } + } + } + }, + "/v1/workspace/{slug}/thread/{threadSlug}/chat": { + "post": { + "tags": [ + "Workspace Threads" + ], + "description": "Chat with a workspace thread", + "parameters": [ + { + "name": "slug", + "in": "path", + "required": true, + "schema": { + "type": "string" + }, + "description": "Unique slug of workspace" + }, + { + "name": "threadSlug", + "in": "path", + "required": true, + "schema": { + "type": "string" + }, + "description": "Unique slug of thread" + } + ], + "responses": { + "200": { + "description": "OK", + "content": { + "application/json": { + "schema": { + "type": "object", + "example": { + "id": "chat-uuid", + "type": "abort | textResponse", + "textResponse": "Response to your query", + "sources": [ + { + "title": "anythingllm.txt", + "chunk": "This is a context chunk used in the answer of the prompt by the LLM." + } + ], + "close": true, + "error": "null | text string of the failure mode." + } + } + } + } + }, + "400": { + "description": "Bad Request" + }, + "403": { + "description": "Forbidden", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/InvalidAPIKey" + } + }, + "application/xml": { + "schema": { + "$ref": "#/components/schemas/InvalidAPIKey" + } + } + } + }, + "500": { + "description": "Internal Server Error" + } + }, + "requestBody": { + "description": "Send a prompt to the workspace thread and the type of conversation (query or chat).", + "required": true, + "type": "object", + "content": { + "application/json": { + "example": { + "message": "What is AnythingLLM?", + "mode": "query | chat", + "userId": 1 + } + } + } + } + } + }, + "/v1/workspace/{slug}/thread/{threadSlug}/stream-chat": { + "post": { + "tags": [ + "Workspace Threads" + ], + "description": "Stream chat with a workspace thread", + "parameters": [ + { + "name": "slug", + "in": "path", + "required": true, + "schema": { + "type": "string" + }, + "description": "Unique slug of workspace" + }, + { + "name": "threadSlug", + "in": "path", + "required": true, + "schema": { + "type": "string" + }, + "description": "Unique slug of thread" + } + ], + "responses": { + "200": { + "content": { + "text/event-stream": { + "schema": { + "type": "array", + "example": [ + { + "id": "uuid-123", + "type": "abort | textResponseChunk", + "textResponse": "First chunk", + "sources": [], + "close": false, + "error": "null | text string of the failure mode." + }, + { + "id": "uuid-123", + "type": "abort | textResponseChunk", + "textResponse": "chunk two", + "sources": [], + "close": false, + "error": "null | text string of the failure mode." + }, + { + "id": "uuid-123", + "type": "abort | textResponseChunk", + "textResponse": "final chunk of LLM output!", + "sources": [ + { + "title": "anythingllm.txt", + "chunk": "This is a context chunk used in the answer of the prompt by the LLM. This will only return in the final chunk." + } + ], + "close": true, + "error": "null | text string of the failure mode." + } + ] + } + } + }, + "description": "OK" + }, + "400": { + "description": "Bad Request" + }, + "403": { + "description": "Forbidden", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/InvalidAPIKey" + } + }, + "application/xml": { + "schema": { + "$ref": "#/components/schemas/InvalidAPIKey" + } + } + } + } + }, + "requestBody": { + "description": "Send a prompt to the workspace thread and the type of conversation (query or chat).", + "required": true, + "type": "object", + "content": { + "application/json": { + "example": { + "message": "What is AnythingLLM?", + "mode": "query | chat", + "userId": 1 + } + } + } + } + } + }, "/v1/users": { "get": { "tags": [ diff --git a/server/utils/agents/aibitat/plugins/web-browsing.js b/server/utils/agents/aibitat/plugins/web-browsing.js index 81314f178f12157b5ef80277de4c1aafa0d9399f..f4269fe139ff2c660085ee0b174c396b2769d8c5 100644 --- a/server/utils/agents/aibitat/plugins/web-browsing.js +++ b/server/utils/agents/aibitat/plugins/web-browsing.js @@ -71,6 +71,9 @@ const webBrowsing = { case "serply-engine": engine = "_serplyEngine"; break; + case "searxng-engine": + engine = "_searXNGEngine"; + break; default: engine = "_googleSearchEngine"; } @@ -102,7 +105,7 @@ const webBrowsing = { query.length > 100 ? `${query.slice(0, 100)}...` : query }"` ); - const searchResponse = await fetch(searchURL) + const data = await fetch(searchURL) .then((res) => res.json()) .then((searchResult) => searchResult?.items || []) .then((items) => { @@ -116,10 +119,15 @@ const webBrowsing = { }) .catch((e) => { console.log(e); - return {}; + return []; }); - return JSON.stringify(searchResponse); + if (data.length === 0) + return `No information was found online for the search query.`; + this.super.introspect( + `${this.caller}: I found ${data.length} results - looking over them now.` + ); + return JSON.stringify(data); }, /** @@ -176,6 +184,9 @@ const webBrowsing = { if (data.length === 0) return `No information was found online for the search query.`; + this.super.introspect( + `${this.caller}: I found ${data.length} results - looking over them now.` + ); return JSON.stringify(data); }, _bingWebSearch: async function (query) { @@ -219,6 +230,9 @@ const webBrowsing = { if (searchResponse.length === 0) return `No information was found online for the search query.`; + this.super.introspect( + `${this.caller}: I found ${data.length} results - looking over them now.` + ); return JSON.stringify(searchResponse); }, _serplyEngine: async function ( @@ -293,6 +307,71 @@ const webBrowsing = { if (data.length === 0) return `No information was found online for the search query.`; + this.super.introspect( + `${this.caller}: I found ${data.length} results - looking over them now.` + ); + return JSON.stringify(data); + }, + _searXNGEngine: async function (query) { + let searchURL; + if (!process.env.AGENT_SEARXNG_API_URL) { + this.super.introspect( + `${this.caller}: I can't use SearXNG searching because the user has not defined the required base URL.\nPlease set this value in the agent skill settings.` + ); + return `Search is disabled and no content was found. This functionality is disabled because the user has not set it up yet.`; + } + + try { + searchURL = new URL(process.env.AGENT_SEARXNG_API_URL); + searchURL.searchParams.append("q", encodeURIComponent(query)); + searchURL.searchParams.append("format", "json"); + } catch (e) { + this.super.handlerProps.log(`SearXNG Search: ${e.message}`); + this.super.introspect( + `${this.caller}: I can't use SearXNG searching because the url provided is not a valid URL.` + ); + return `Search is disabled and no content was found. This functionality is disabled because the user has not set it up yet.`; + } + + this.super.introspect( + `${this.caller}: Using SearXNG to search for "${ + query.length > 100 ? `${query.slice(0, 100)}...` : query + }"` + ); + + const { response, error } = await fetch(searchURL.toString(), { + method: "GET", + headers: { + "Content-Type": "application/json", + "User-Agent": "anything-llm", + }, + }) + .then((res) => res.json()) + .then((data) => { + return { response: data, error: null }; + }) + .catch((e) => { + return { response: null, error: e.message }; + }); + if (error) + return `There was an error searching for content. ${error}`; + + const data = []; + response.results?.forEach((searchResult) => { + const { url, title, content, publishedDate } = searchResult; + data.push({ + title, + link: url, + snippet: content, + publishedDate, + }); + }); + + if (data.length === 0) + return `No information was found online for the search query.`; + this.super.introspect( + `${this.caller}: I found ${data.length} results - looking over them now.` + ); return JSON.stringify(data); }, }); diff --git a/server/utils/chats/index.js b/server/utils/chats/index.js index b6258c2e336cc6dd90738488dcf89e6285aca2f9..f3e0baae224e8ffca10880d3c13cc91f3e1b8773 100644 --- a/server/utils/chats/index.js +++ b/server/utils/chats/index.js @@ -77,15 +77,30 @@ async function chatWithWorkspace( // User is trying to query-mode chat a workspace that has no data in it - so // we should exit early as no information can be found under these conditions. if ((!hasVectorizedSpace || embeddingsCount === 0) && chatMode === "query") { + const textResponse = + workspace?.queryRefusalResponse ?? + "There is no relevant information in this workspace to answer your query."; + + await WorkspaceChats.new({ + workspaceId: workspace.id, + prompt: message, + response: { + text: textResponse, + sources: [], + type: chatMode, + }, + threadId: thread?.id || null, + include: false, + user, + }); + return { id: uuid, type: "textResponse", sources: [], close: true, error: null, - textResponse: - workspace?.queryRefusalResponse ?? - "There is no relevant information in this workspace to answer your query.", + textResponse, }; } @@ -172,15 +187,30 @@ async function chatWithWorkspace( // If in query mode and no context chunks are found from search, backfill, or pins - do not // let the LLM try to hallucinate a response or use general knowledge and exit early if (chatMode === "query" && contextTexts.length === 0) { + const textResponse = + workspace?.queryRefusalResponse ?? + "There is no relevant information in this workspace to answer your query."; + + await WorkspaceChats.new({ + workspaceId: workspace.id, + prompt: message, + response: { + text: textResponse, + sources: [], + type: chatMode, + }, + threadId: thread?.id || null, + include: false, + user, + }); + return { id: uuid, type: "textResponse", sources: [], close: true, error: null, - textResponse: - workspace?.queryRefusalResponse ?? - "There is no relevant information in this workspace to answer your query.", + textResponse, }; } diff --git a/server/utils/chats/stream.js b/server/utils/chats/stream.js index ced9a97109430a7b28a4215b9f17357dd14b7d1b..770e6cb6b8253f5a1a42f5bdecf4c9ebaba04a77 100644 --- a/server/utils/chats/stream.js +++ b/server/utils/chats/stream.js @@ -75,16 +75,29 @@ async function streamChatWithWorkspace( // User is trying to query-mode chat a workspace that has no data in it - so // we should exit early as no information can be found under these conditions. if ((!hasVectorizedSpace || embeddingsCount === 0) && chatMode === "query") { + const textResponse = + workspace?.queryRefusalResponse ?? + "There is no relevant information in this workspace to answer your query."; writeResponseChunk(response, { id: uuid, type: "textResponse", - textResponse: - workspace?.queryRefusalResponse ?? - "There is no relevant information in this workspace to answer your query.", + textResponse, sources: [], close: true, error: null, }); + await WorkspaceChats.new({ + workspaceId: workspace.id, + prompt: message, + response: { + text: textResponse, + sources: [], + type: chatMode, + }, + threadId: thread?.id || null, + include: false, + user, + }); return; } @@ -177,16 +190,30 @@ async function streamChatWithWorkspace( // If in query mode and no context chunks are found from search, backfill, or pins - do not // let the LLM try to hallucinate a response or use general knowledge and exit early if (chatMode === "query" && contextTexts.length === 0) { + const textResponse = + workspace?.queryRefusalResponse ?? + "There is no relevant information in this workspace to answer your query."; writeResponseChunk(response, { id: uuid, type: "textResponse", - textResponse: - workspace?.queryRefusalResponse ?? - "There is no relevant information in this workspace to answer your query.", + textResponse, sources: [], close: true, error: null, }); + + await WorkspaceChats.new({ + workspaceId: workspace.id, + prompt: message, + response: { + text: textResponse, + sources: [], + type: chatMode, + }, + threadId: thread?.id || null, + include: false, + user, + }); return; } diff --git a/server/utils/helpers/updateENV.js b/server/utils/helpers/updateENV.js index 78d84053995e351237ebaa9ac341faa07df34227..c82cdaf9eda77c787400aaf93b71af5061d47271 100644 --- a/server/utils/helpers/updateENV.js +++ b/server/utils/helpers/updateENV.js @@ -407,6 +407,10 @@ const KEY_MAPPING = { envKey: "AGENT_SERPLY_API_KEY", checks: [], }, + AgentSearXNGApiUrl: { + envKey: "AGENT_SEARXNG_API_URL", + checks: [], + }, // TTS/STT Integration ENVS TextToSpeechProvider: {