| # For more information, see the Configuration Guide: | |
| # https://www.librechat.ai/docs/configuration/librechat_yaml | |
| # Configuration version (required) | |
| version: 1.2.1 | |
| # Cache settings: Set to true to enable caching | |
| cache: true | |
| # Custom interface configuration | |
| interface: | |
| # Privacy policy settings | |
| privacyPolicy: | |
| externalUrl: 'https://librechat.ai/privacy-policy' | |
| openNewTab: true | |
| # Terms of service | |
| termsOfService: | |
| externalUrl: 'https://librechat.ai/tos' | |
| openNewTab: true | |
| modalAcceptance: true | |
| modalTitle: "Terms of Service for LibreChat" | |
| modalContent: | | |
| # Terms and Conditions for LibreChat | |
| Please do not use chatgpt , since librechat is better 😀 | |
| Regards , Vaibhav | |
| endpointsMenu: true | |
| modelSelect: true | |
| parameters: true | |
| sidePanel: true | |
| presets: true | |
| prompts: true | |
| bookmarks: true | |
| multiConvo: true | |
| agents: true | |
| # Example Registration Object Structure (optional) | |
| registration: | |
| socialLogins: ['github', 'google', 'discord', 'openid', 'facebook'] | |
| # allowedDomains: | |
| # - "gmail.com" | |
| # speech: | |
| # tts: | |
| # openai: | |
| # url: '' | |
| # apiKey: '${TTS_API_KEY}' | |
| # model: '' | |
| # voices: [''] | |
| # | |
| # stt: | |
| # openai: | |
| # url: '' | |
| # apiKey: '${STT_API_KEY}' | |
| # model: '' | |
| # rateLimits: | |
| # fileUploads: | |
| # ipMax: 100 | |
| # ipWindowInMinutes: 60 # Rate limit window for file uploads per IP | |
| # userMax: 50 | |
| # userWindowInMinutes: 60 # Rate limit window for file uploads per user | |
| # conversationsImport: | |
| # ipMax: 100 | |
| # ipWindowInMinutes: 60 # Rate limit window for conversation imports per IP | |
| # userMax: 50 | |
| # userWindowInMinutes: 60 # Rate limit window for conversation imports per user | |
| # Example Actions Object Structure | |
| actions: | |
| allowedDomains: | |
| - "swapi.dev" | |
| - "librechat.ai" | |
| - "google.com" | |
| - "https://api.e2b.dev" | |
| - "api.e2b.dev" | |
| # Example MCP Servers Object Structure | |
| mcpServers: | |
| # everything: | |
| # type: sse # type can optionally be omitted | |
| # url: https://787d-182-69-182-121.ngrok-free.app/ | |
| # memory: | |
| # type: stdio | |
| # command: npx | |
| # args: | |
| # - -y | |
| # - "@modelcontextprotocol/server-memory" | |
| # timeout: 60000000 | |
| # apify: | |
| # type: stdio | |
| # command: npx | |
| # args: | |
| # - -y | |
| # - "@apify/actors-mcp-server" | |
| # - --actors | |
| # - apify/screenshot-url,apify/website-content-crawler,apify/puppeteer-scraper,apify/rag-web-browser,jancurn/screenshot-taker,apify/cheerio-scraper,apify/playwright-scraper,apify/ai-web-agent,marco.gullo/page-printer,dz_omar/example-website-screenshot-crawler,apify/legacy-phantomjs-crawler,lukaskrivka/article-extractor-smart | |
| # timeout: 60000000 | |
| # env: | |
| # APIFY_TOKEN: "apify_api_M3vftXQILokc2NDlhsc3twMBa5e7Be282swR" | |
| # PATH: "/usr/local/bin:/usr/bin:/bin" | |
| # NODE_PATH: "/usr/local/lib/node_modules" | |
| hfspace: | |
| type: stdio | |
| command: npx | |
| args: | |
| - -y | |
| - "@llmindset/mcp-hfspace" | |
| - --HF_TOKEN=${HF_TOKEN} | |
| - --work-dir=/app/uploads/temp/ | |
| - Qwen/Qwen2.5-Max-Demo | |
| - evalstate/FLUX.1-schnell | |
| timeout: 60000000 | |
| # env: | |
| # E2B_API_KEY: "e2b_6eb042e8d60248f71b0aadcc05f29a7dd353b3e2" | |
| # PATH: "/usr/local/bin:/usr/bin:/bin" | |
| # NODE_PATH: "/usr/local/lib/node_modules" | |
| exa: | |
| type: stdio | |
| command: npx | |
| args: | |
| - -y | |
| - "/app/exa-mcp-server/build/index.js" | |
| # - --HF_TOKEN=${HF_TOKEN} | |
| timeout: 60000000 | |
| env: | |
| EXA_API_KEY: "e4399980-1016-44ab-8789-1ef7f967a281" | |
| PATH: "/usr/local/bin:/usr/bin:/bin" | |
| NODE_PATH: "/usr/local/lib/node_modules" | |
| arxiv: | |
| type: stdio | |
| command: python | |
| args: | |
| - -m | |
| - mcp_simple_arxiv | |
| timeout: 60000000 | |
| pubmed: | |
| type: stdio | |
| command: python | |
| args: | |
| - -m | |
| - mcp_simple_pubmed | |
| env: | |
| PUBMED_EMAIL: "vaibhavarduino@gmail.com" | |
| PUBMED_API_KEY : "77ea72d89b98d279c1848389cd027a51c408" | |
| PATH: "/usr/local/bin:/usr/bin:/bin" | |
| NODE_PATH: "/usr/local/lib/node_modules" | |
| memory: | |
| type: stdio | |
| command: npx | |
| args: | |
| - -y | |
| - "@modelcontextprotocol/server-memory" | |
| timeout: 60000000 | |
| filesystem: | |
| # type: stdio | |
| command: npx | |
| args: | |
| - -y | |
| - "@modelcontextprotocol/server-filesystem" | |
| - /app/ | |
| codesandbox: | |
| type: stdio | |
| command: python | |
| args: | |
| - tests.py | |
| timeout: 60000000 | |
| # Definition of custom endpoints | |
| endpoints: | |
| agents: | |
| recursionLimit: 50 | |
| disableBuilder: false | |
| capabilities: | |
| # - "execute_code" | |
| - "file_search" | |
| - "actions" | |
| - "tools" | |
| custom: | |
| # together.ai | |
| # https://api.together.ai/settings/api-keys | |
| # Model list: https://docs.together.ai/docs/inference-models | |
| - name: "together.ai" | |
| apiKey: '${MISTRAL_API_KEY}' | |
| baseURL: "https://api.together.xyz" | |
| models: | |
| default: [ | |
| "Gryphe/MythoMax-L2-13b", | |
| "Gryphe/MythoMax-L2-13b-Lite", | |
| "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO", | |
| "Qwen/QwQ-32B-Preview", | |
| "Qwen/Qwen2-72B-Instruct", | |
| "Qwen/Qwen2-VL-72B-Instruct", | |
| "Qwen/Qwen2.5-72B-Instruct-Turbo", | |
| "Qwen/Qwen2.5-7B-Instruct-Turbo", | |
| "Qwen/Qwen2.5-Coder-32B-Instruct", | |
| "databricks/dbrx-instruct", | |
| "deepseek-ai/DeepSeek-R1", | |
| "deepseek-ai/DeepSeek-V3", | |
| "deepseek-ai/deepseek-llm-67b-chat", | |
| "dev-vfs/Qwen2-VL-72B-Instruct", | |
| "devuser/test-lora-model-creation-1", | |
| "devuser/test-lora-model-creation-10", | |
| "devuser/test-lora-model-creation-2", | |
| "devuser/test-lora-model-creation-3", | |
| "devuser/test-lora-model-creation-4", | |
| "devuser/test-lora-model-creation-5", | |
| "devuser/test-lora-model-creation-6", | |
| "devuser/test-lora-model-creation-7", | |
| "devuser/test-lora-model-creation-8", | |
| "devuser/test-lora-model-creation-9", | |
| "google/gemma-2-27b-it", | |
| "google/gemma-2-9b-it", | |
| "google/gemma-2b-it", | |
| "jd/test-lora-model-creation-2", | |
| "jd/test-min-lora-model-creation-2", | |
| "justindriemeyer_tai/test-lora-model-creation-3", | |
| "justindriemeyer_tai/test-lora-model-creation-4", | |
| "justindriemeyer_tai/test-lora-model-creation-5", | |
| "justindriemeyer_tai/test-lora-model-creation-6", | |
| "justindriemeyer_tai/test-lora-model-creation-7", | |
| "llava-hf/llava-v1.6-mistral-7b-hf", | |
| "meta-llama/Llama-2-13b-chat-hf", | |
| "meta-llama/Llama-2-7b-chat-hf", | |
| "meta-llama/Llama-3-70b-chat-hf", | |
| "meta-llama/Llama-3-8b-chat-hf", | |
| "meta-llama/Llama-3.2-11B-Vision-Instruct-Turbo", | |
| "meta-llama/Llama-3.2-3B-Instruct-Turbo", | |
| "meta-llama/Llama-3.2-90B-Vision-Instruct-Turbo", | |
| "meta-llama/Llama-3.3-70B-Instruct-Turbo", | |
| "meta-llama/Llama-3.3-70B-Instruct-Turbo-Free", | |
| "meta-llama/Llama-Vision-Free", | |
| "meta-llama/Meta-Llama-3-70B-Instruct-Lite", | |
| "meta-llama/Meta-Llama-3-70B-Instruct-Turbo", | |
| "meta-llama/Meta-Llama-3-8B-Instruct-Lite", | |
| "meta-llama/Meta-Llama-3-8B-Instruct-Turbo", | |
| "meta-llama/Meta-Llama-3.1-405B-Instruct-Turbo", | |
| "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo", | |
| "meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo", | |
| "meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo-128K", | |
| "microsoft/WizardLM-2-8x22B", | |
| "mistralai/Mistral-7B-Instruct-v0.1", | |
| "mistralai/Mistral-7B-Instruct-v0.2", | |
| "mistralai/Mistral-7B-Instruct-v0.3", | |
| "mistralai/Mixtral-8x22B-Instruct-v0.1", | |
| "mistralai/Mixtral-8x7B-Instruct-v0.1", | |
| "nvidia/Llama-3.1-Nemotron-70B-Instruct-HF", | |
| "salesforce/xgen-9b-instruct", | |
| "scb10x/llama-3-typhoon-v1.5-8b-instruct", | |
| "scb10x/scb10x-llama3-typhoon-v1-5-8b-instruct", | |
| "scb10x/scb10x-llama3-typhoon-v1-5x-4f316", | |
| "togethercomputer/Llama-3-8b-chat-hf-int4", | |
| "togethercomputer/Llama-3-8b-chat-hf-int8", | |
| "upstage/SOLAR-10.7B-Instruct-v1.0", | |
| "vfs/Qwen2-VL-72B-Instruct" | |
| ] | |
| fetch: false | |
| titleConvo: true | |
| titleModel: "openchat/openchat-3.5-1210" | |
| summarize: false | |
| summaryModel: "openchat/openchat-3.5-1210" | |
| forcePrompt: false | |
| modelDisplayLabel: "together.ai" | |
| - name: "OpenRouter" | |
| # For `apiKey` and `baseURL`, you can use environment variables that you define. | |
| # recommended environment variables: | |
| apiKey: "${OPENROUTER_KEY}" # NOT OPENROUTER_API_KEY | |
| baseURL: "https://openrouter.ai/api/v1" | |
| models: | |
| default: ["deepseek/deepseek-chat","deepseek/deepseek-r1:free","google/gemini-2.0-flash-thinking-exp:free","google/gemini-2.0-flash-exp:free","google/gemini-exp-1206:free"] | |
| fetch: true | |
| titleConvo: true | |
| titleModel: "google/gemini-2.0-flash-exp:free" | |
| # Recommended: Drop the stop parameter from the request as Openrouter models use a variety of stop tokens. | |
| dropParams: ["stop"] | |
| modelDisplayLabel: "OpenRouter" | |
| # Groq Example | |
| - name: 'groq' | |
| apiKey: '${GROQ_API_KEY}' | |
| baseURL: 'https://api.groq.com/openai/v1/' | |
| models: | |
| default: | |
| [ | |
| 'deepseek-r1-distill-qwen-32b', | |
| 'deepseek-r1-distill-llama-70b', | |
| 'llama-3.3-70b-versatile', | |
| 'mixtral-8x7b-32768', | |
| 'qwen-qwq-32b', | |
| ] | |
| fetch: false | |
| titleConvo: true | |
| titleModel: 'mixtral-8x7b-32768' | |
| modelDisplayLabel: 'groq' | |
| - name: 'Tiny' # Unique name for the endpoint | |
| # For `apiKey` and `baseURL`, you can use environment variables that you define. | |
| # recommended environment variables: | |
| apiKey: '77ea72d89b98d279c1848389cd027a51c408' | |
| baseURL: 'https://962f-182-69-182-236.ngrok-free.app' | |
| # Models configuration | |
| models: | |
| # List of default models to use. At least one value is required. | |
| default: ['gemini-2.0-flash-thinking-exp-01-21' ,'deepseek.r1','deepseek-reasoner','deepseek-chat','gemini-2.0-pro-exp-02-05','deepseek-r1-distill-llama-70b', 'qwen-qwq-32b','QwQ-32B','llama-3.3-70b-versatile','DeepSeek-V3','DeepSeekV3-togetherAI','DeepSeek-R1','DeepSeekR1-togetherAI','gpt-4o','DeepSeek-R1-dev','DeepSeek-V3-dev'] | |
| # Fetch option: Set to true to fetch models from API. | |
| # fetch: true # Defaults to false. | |
| # Optional configurations | |
| # Title Conversation setting | |
| titleConvo: true # Set to true to enable title conversation | |
| modelDisplayLabel: 'AI' # Default is "AI" when not set. | |
| - name: 'Tiny-DEV' # Unique name for the endpoint | |
| # For `apiKey` and `baseURL`, you can use environment variables that you define. | |
| # recommended environment variables: | |
| apiKey: '77ea72d89b98d279c1848389cd027a51c408' | |
| baseURL: 'https://akiko19191-backend.hf.space/' | |
| # Models configuration | |
| models: | |
| # List of default models to use. At least one value is required. | |
| default: ['gemini-2.0-flash-thinking-exp-01-21' ,'deepseek.r1','deepseek-reasoner','deepseek-chat','gemini-2.0-pro-exp-02-05','deepseek-r1-distill-llama-70b', 'qwen-qwq-32b','QwQ-32B','llama-3.3-70b-versatile','DeepSeek-V3','DeepSeekV3-togetherAI','DeepSeek-R1','DeepSeekR1-togetherAI','gpt-4o','DeepSeek-R1-dev','DeepSeek-V3-dev'] | |
| # Fetch option: Set to true to fetch models from API. | |
| # fetch: false # Defaults to false. | |
| # Optional configurations | |
| # Title Conversation setting | |
| titleConvo: true # Set to true to enable title conversation | |
| modelDisplayLabel: 'Tiny' # Default is "AI" when not set. | |
| # # Mistral AI Example | |
| # - name: 'Mistral' # Unique name for the endpoint | |
| # # For `apiKey` and `baseURL`, you can use environment variables that you define. | |
| # # recommended environment variables: | |
| # apiKey: '${MISTRAL_API_KEY}' | |
| # baseURL: 'https://api.mistral.ai/v1' | |
| # # Models configuration | |
| # models: | |
| # # List of default models to use. At least one value is required. | |
| # default: ['mistral-tiny', 'mistral-small', 'mistral-medium'] | |
| # # Fetch option: Set to true to fetch models from API. | |
| # fetch: true # Defaults to false. | |
| # # Optional configurations | |
| # # Title Conversation setting | |
| # titleConvo: true # Set to true to enable title conversation | |
| # # Title Method: Choose between "completion" or "functions". | |
| # # titleMethod: "completion" # Defaults to "completion" if omitted. | |
| # # Title Model: Specify the model to use for titles. | |
| # titleModel: 'mistral-tiny' # Defaults to "gpt-3.5-turbo" if omitted. | |
| # # Summarize setting: Set to true to enable summarization. | |
| # # summarize: false | |
| # # Summary Model: Specify the model to use if summarization is enabled. | |
| # # summaryModel: "mistral-tiny" # Defaults to "gpt-3.5-turbo" if omitted. | |
| # # Force Prompt setting: If true, sends a `prompt` parameter instead of `messages`. | |
| # # forcePrompt: false | |
| # # The label displayed for the AI model in messages. | |
| # modelDisplayLabel: 'Mistral' # Default is "AI" when not set. | |
| # # Add additional parameters to the request. Default params will be overwritten. | |
| # # addParams: | |
| # # safe_prompt: true # This field is specific to Mistral AI: https://docs.mistral.ai/api/ | |
| # # Drop Default params parameters from the request. See default params in guide linked below. | |
| # # NOTE: For Mistral, it is necessary to drop the following parameters or you will encounter a 422 Error: | |
| # dropParams: ['stop', 'user', 'frequency_penalty', 'presence_penalty'] | |
| # # OpenRouter Example | |
| # - name: 'OpenRouter' | |
| # # For `apiKey` and `baseURL`, you can use environment variables that you define. | |
| # # recommended environment variables: | |
| # # Known issue: you should not use `OPENROUTER_API_KEY` as it will then override the `openAI` endpoint to use OpenRouter as well. | |
| # apiKey: '${OPENROUTER_KEY}' | |
| # baseURL: 'https://openrouter.ai/api/v1' | |
| # models: | |
| # default: ['meta-llama/llama-3-70b-instruct'] | |
| # fetch: true | |
| # titleConvo: true | |
| # titleModel: 'meta-llama/llama-3-70b-instruct' | |
| # # Recommended: Drop the stop parameter from the request as Openrouter models use a variety of stop tokens. | |
| # dropParams: ['stop'] | |
| # modelDisplayLabel: 'OpenRouter' | |
| # together.ai | |
| # https://api.together.ai/settings/api-keys | |
| # Model list: https://docs.together.ai/docs/inference-models | |
| # # Portkey AI Example | |
| # - name: "Portkey" | |
| # apiKey: "dummy" | |
| # baseURL: 'https://api.portkey.ai/v1' | |
| # headers: | |
| # x-portkey-api-key: '${PORTKEY_API_KEY}' | |
| # x-portkey-virtual-key: '${PORTKEY_OPENAI_VIRTUAL_KEY}' | |
| # models: | |
| # default: ['gpt-4o-mini', 'gpt-4o', 'chatgpt-4o-latest'] | |
| # fetch: true | |
| # titleConvo: true | |
| # titleModel: 'current_model' | |
| # summarize: false | |
| # summaryModel: 'current_model' | |
| # forcePrompt: false | |
| # modelDisplayLabel: 'Portkey' | |
| # iconURL: https://images.crunchbase.com/image/upload/c_pad,f_auto,q_auto:eco,dpr_1/rjqy7ghvjoiu4cd1xjbf | |
| fileConfig: | |
| endpoints: | |
| agents: | |
| fileLimit: 5 | |
| fileSizeLimit: 100 # Maximum size for an individual file in MB | |
| totalSizeLimit: 500 # Maximum total size for all files in a single request in MB | |
| supportedMimeTypes: | |
| - "image/.*" | |
| - "application/pdf" | |
| - "video/.*" | |
| - "application/vnd.ms-excel" | |
| - "audio/mp3" | |
| - "audio/mpeg" | |
| - "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet" | |
| - "application/vnd.openxmlformats-officedocument.wordprocessingml.document" | |
| - "application/msword" | |
| # openAI: | |
| # disabled: true # Disables file uploading to the OpenAI endpoint | |
| # default: | |
| # totalSizeLimit: 20 | |
| # YourCustomEndpointName: | |
| # fileLimit: 2 | |
| # fileSizeLimit: 5 | |
| # serverFileSizeLimit: 100 # Global server file size limit in MB | |
| # avatarSizeLimit: 2 # Limit for user avatar image size in MB | |
| # See the Custom Configuration Guide for more information on Assistants Config: | |
| # https://www.librechat.ai/docs/configuration/librechat_yaml/object_structure/assistants_endpoint | |