ritvit commited on
Commit
fcc40fc
1 Parent(s): 4e30714

Update librechat.yaml

Browse files
Files changed (1) hide show
  1. librechat.yaml +1080 -59
librechat.yaml CHANGED
@@ -1,70 +1,742 @@
1
  # Configuration version (required)
2
  version: 1.0.1
3
 
4
- # Cache settings: Set to true to enable caching
5
  cache: true
6
 
7
- # Definition of custom endpoints
 
 
8
  endpoints:
9
  custom:
10
- # Mistral AI API
11
- - name: "Mistral" # Unique name for the endpoint
12
- # For `apiKey` and `baseURL`, you can use environment variables that you define.
13
- # recommended environment variables:
14
- apiKey: "${MISTRAL_API_KEY}"
15
- baseURL: "https://api.mistral.ai/v1"
16
-
17
- # Models configuration
18
- models:
19
- # List of default models to use. At least one value is required.
20
- default: ["mistral-tiny", "mistral-small", "mistral-medium"]
21
- # Fetch option: Set to true to fetch models from API.
22
- fetch: true # Defaults to false.
23
-
24
- # Optional configurations
25
-
26
- # Title Conversation setting
27
- titleConvo: true # Set to true to enable title conversation
28
-
29
- # Title Method: Choose between "completion" or "functions".
30
- titleMethod: "completion" # Defaults to "completion" if omitted.
 
 
 
 
 
31
 
32
- # Title Model: Specify the model to use for titles.
33
- titleModel: "mistral-tiny" # Defaults to "gpt-3.5-turbo" if omitted.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
34
 
35
- # Summarize setting: Set to true to enable summarization.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
36
  summarize: false
 
 
 
 
 
 
37
 
38
- # Summary Model: Specify the model to use if summarization is enabled.
39
- summaryModel: "mistral-tiny" # Defaults to "gpt-3.5-turbo" if omitted.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
40
 
41
- # Force Prompt setting: If true, sends a `prompt` parameter instead of `messages`.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
42
  forcePrompt: false
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
43
 
44
- # The label displayed for the AI model in messages.
45
- modelDisplayLabel: "Mistral" # Default is "AI" when not set.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
46
 
47
- # Add additional parameters to the request. Default params will be overwritten.
48
- addParams:
49
- safe_mode: true # This field is specific to Mistral AI: https://docs.mistral.ai/api/
50
-
51
- # Drop Default params parameters from the request. See default params in guide linked below.
52
- dropParams: ["stop", "temperature", "top_p"]
53
- # - stop # dropped since it's not recognized by Mistral AI API
54
- # `temperature` and `top_p` are removed to allow Mistral AI API defaults to be used:
55
- # - temperature
56
- # - top_p
 
 
 
 
 
 
 
 
 
 
 
57
 
58
- # OpenRouter.ai Example
 
 
59
  - name: "OpenRouter"
60
- # For `apiKey` and `baseURL`, you can use environment variables that you define.
61
- # recommended environment variables:
62
- # Known issue: you should not use `OPENROUTER_API_KEY` as it will then override the `openAI` endpoint to use OpenRouter as well.
63
  apiKey: "${OPENROUTER_KEY}"
64
  baseURL: "https://openrouter.ai/api/v1"
65
  models:
66
- default: ["nousresearch/nous-capybara-7b:free", "mistralai/mistral-7b-instruct:free", "huggingfaceh4/zephyr-7b-beta:free", "openchat/openchat-7b:free", "gryphe/mythomist-7b:free", "undi95/toppy-m-7b:free", "openrouter/cinematika-7b:free", "openrouter/auto", "nousresearch/nous-capybara-7b", "mistralai/mistral-7b-instruct", "huggingfaceh4/zephyr-7b-beta", "openchat/openchat-7b", "gryphe/mythomist-7b", "openrouter/cinematika-7b", "rwkv/rwkv-5-world-3b", "recursal/rwkv-5-3b-ai-town", "jondurbin/bagel-34b", "jebcarter/psyfighter-13b", "koboldai/psyfighter-13b-2", "neversleep/noromaid-mixtral-8x7b-instruct", "nousresearch/nous-hermes-llama2-13b", "meta-llama/codellama-34b-instruct", "phind/phind-codellama-34b", "intel/neural-chat-7b", "nousresearch/nous-hermes-2-mixtral-8x7b-dpo", "nousresearch/nous-hermes-2-mixtral-8x7b-sft", "haotian-liu/llava-13b", "nousresearch/nous-hermes-2-vision-7b", "meta-llama/llama-2-13b-chat", "gryphe/mythomax-l2-13b", "nousresearch/nous-hermes-llama2-70b", "teknium/openhermes-2-mistral-7b", "teknium/openhermes-2.5-mistral-7b", "undi95/remm-slerp-l2-13b", "undi95/toppy-m-7b", "01-ai/yi-34b-chat", "01-ai/yi-34b", "01-ai/yi-6b", "togethercomputer/stripedhyena-nous-7b", "togethercomputer/stripedhyena-hessian-7b", "mistralai/mixtral-8x7b", "nousresearch/nous-hermes-yi-34b", "open-orca/mistral-7b-openorca", "openai/gpt-3.5-turbo", "openai/gpt-3.5-turbo-1106", "openai/gpt-3.5-turbo-16k", "openai/gpt-4-1106-preview", "openai/gpt-4", "openai/gpt-4-latest", "openai/gpt-4-32k", "openai/gpt-4-vision-preview", "openai/gpt-3.5-turbo-instruct", "google/palm-2-chat-bison", "google/palm-2-codechat-bison", "google/palm-2-chat-bison-32k", "google/palm-2-codechat-bison-32k", "google/gemini-pro", "google/gemini-pro-vision", "perplexity/pplx-70b-online", "perplexity/pplx-7b-online", "perplexity/pplx-7b-chat", "perplexity/pplx-70b-chat", "meta-llama/llama-2-70b-chat", "nousresearch/nous-capybara-34b", "jondurbin/airoboros-l2-70b", "austism/chronos-hermes-13b", "migtissera/synthia-70b", "pygmalionai/mythalion-13b", "undi95/remm-slerp-l2-13b-6k", "xwin-lm/xwin-lm-70b", "gryphe/mythomax-l2-13b-8k", "alpindale/goliath-120b ", "lizpreciatior/lzlv-70b-fp16-hf", "neversleep/noromaid-20b", "mistralai/mixtral-8x7b-instruct", "cognitivecomputations/dolphin-mixtral-8x7b", "anthropic/claude-2", "anthropic/claude-2.0", "anthropic/claude-instant-v1", "mancer/weaver", "mistralai/mistral-tiny", "mistralai/mistral-small", "mistralai/mistral-medium"]
67
- fetch: true
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
68
  titleConvo: true
69
  titleModel: "gpt-3.5-turbo"
70
  summarize: false
@@ -72,21 +744,370 @@ endpoints:
72
  forcePrompt: false
73
  modelDisplayLabel: "OpenRouter"
74
 
75
- - name: "Reverse Proxy"
76
- # For `apiKey` and `baseURL`, you can use environment variables that you define.
77
- # recommended environment variables:
78
- # Known issue: you should not use `OPENROUTER_API_KEY` as it will then override the `openAI` endpoint to use OpenRouter as well.
79
- apiKey: "user_provided"
80
- baseURL: "user_provided"
81
  models:
82
- default: ["gpt-3.5-turbo"]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
83
  fetch: true
84
  titleConvo: true
85
- titleModel: "gpt-3.5-turbo"
 
86
  summarize: false
87
- summaryModel: "gpt-3.5-turbo"
88
  forcePrompt: false
89
- modelDisplayLabel: "AI"
 
90
 
91
- # See the Custom Configuration Guide for more information:
92
- # https://docs.librechat.ai/install/configuration/custom_config.html
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  # Configuration version (required)
2
  version: 1.0.1
3
 
 
4
  cache: true
5
 
6
+ registration:
7
+ socialLogins: ["discord", "facebook", "github", "google", "openid"]
8
+
9
  endpoints:
10
  custom:
11
+ # Anyscale
12
+ # # Model list: https://console.anyscale.com/v2/playground
13
+ - name: "Anyscale"
14
+ apiKey: "${ANYSCALE_API_KEY}"
15
+ baseURL: "https://api.endpoints.anyscale.com/v1"
16
+ models:
17
+ default: [
18
+ "meta-llama/Llama-2-7b-chat-hf",
19
+ "meta-llama/Llama-2-13b-chat-hf",
20
+ "meta-llama/Llama-2-70b-chat-hf",
21
+ "codellama/CodeLlama-34b-Instruct-hf",
22
+ "codellama/CodeLlama-70b-Instruct-hf",
23
+ "mistralai/Mistral-7B-Instruct-v0.1",
24
+ "mistralai/Mixtral-8x7B-Instruct-v0.1",
25
+ "mlabonne/NeuralHermes-2.5-Mistral-7B",
26
+ "Open-Orca/Mistral-7B-OpenOrca",
27
+ "HuggingFaceH4/zephyr-7b-beta",
28
+ "google/gemma-7b-it"
29
+ ]
30
+ fetch: false
31
+ titleConvo: true
32
+ titleModel: "meta-llama/Llama-2-7b-chat-hf"
33
+ summarize: false
34
+ summaryModel: "meta-llama/Llama-2-7b-chat-hf"
35
+ forcePrompt: false
36
+ modelDisplayLabel: "Anyscale"
37
 
38
+ # APIpie
39
+ # https://apipie.ai/dashboard/
40
+ # Script to fetch models: https://github.com/LibreChat-AI/librechat-config-yaml/blob/main/scripts/apipie.py
41
+ - name: "APIpie"
42
+ apiKey: "${APIPIE_API_KEY}"
43
+ baseURL: "https://apipie.ai/v1/"
44
+ models:
45
+ default: [
46
+ "GPT-JT-Moderation-6B",
47
+ "LLaMA-2-7B-32K",
48
+ "Llama-2-13b-chat-hf",
49
+ "Llama-2-13b-hf",
50
+ "Llama-2-70b-chat-hf",
51
+ "Llama-2-70b-hf",
52
+ "Llama-2-7B-32K-Instruct",
53
+ "Llama-2-7b-chat-hf",
54
+ "Llama-2-7b-hf",
55
+ "Meta-Llama-3-70B-Instruct",
56
+ "Meta-Llama-3-8B",
57
+ "Meta-Llama-3-8B-Instruct",
58
+ "Mistral-7B-Instruct-v0.1",
59
+ "Mistral-7B-Instruct-v0.2",
60
+ "Mistral-7B-OpenOrca",
61
+ "Mixtral-8x22B-Instruct-v0.1",
62
+ "Mixtral-8x22B-v0.1",
63
+ "Mixtral-8x7B-Instruct-v0.1",
64
+ "Mixtral-8x7B-v0.1",
65
+ "MythoMax-L2-13b",
66
+ "NexusRaven-V2-13B",
67
+ "Nous-Hermes-2-Mixtral-8x7B-DPO",
68
+ "Nous-Hermes-2-Mixtral-8x7B-SFT",
69
+ "Nous-Hermes-Llama2-13b",
70
+ "Nous-Hermes-llama-2-7b",
71
+ "ReMM-SLERP-L2-13B",
72
+ "RedPajama-INCITE-7B-Base",
73
+ "RedPajama-INCITE-7B-Chat",
74
+ "RedPajama-INCITE-Chat-3B-v1",
75
+ "TinyLlama-1.1B-Chat-v1.0",
76
+ "Toppy-M-7B",
77
+ "WizardLM-2-7B",
78
+ "WizardLM-2-8x22B",
79
+ "Yi-34B-Chat",
80
+ "airoboros-70b",
81
+ "airoboros-l2-70b",
82
+ "alpaca-7b",
83
+ "babbage-002",
84
+ "chat-bison",
85
+ "chatgpt-4o-latest",
86
+ "chronos-hermes-13b",
87
+ "chronos-hermes-13b-v2",
88
+ "claude-1",
89
+ "claude-1.2",
90
+ "claude-2",
91
+ "claude-2.0",
92
+ "claude-2.1",
93
+ "claude-3-5-sonnet",
94
+ "claude-3-5-sonnet-20240620-v1",
95
+ "claude-3-haiku",
96
+ "claude-3-haiku-20240307-v1",
97
+ "claude-3-opus",
98
+ "claude-3-sonnet",
99
+ "claude-3-sonnet-20240229-v1",
100
+ "claude-3.5-sonnet",
101
+ "claude-instant-1",
102
+ "claude-instant-1.0",
103
+ "claude-instant-1.1",
104
+ "claude-instant-1.2",
105
+ "claude-instant-v1",
106
+ "claude-v2",
107
+ "codellama-34b-instruct",
108
+ "codellama-70b-instruct",
109
+ "codestral-mamba",
110
+ "command",
111
+ "command-light",
112
+ "command-light-nightly",
113
+ "command-light-text-v14",
114
+ "command-nightly",
115
+ "command-r",
116
+ "command-r-plus",
117
+ "command-r-plus-v1",
118
+ "command-r-v1",
119
+ "command-text-v14",
120
+ "davinci-002",
121
+ "dbrx-instruct",
122
+ "deepseek-chat",
123
+ "deepseek-coder",
124
+ "dolphin-2.5-mixtral-8x7b",
125
+ "dolphin-2.6-mixtral-8x7b",
126
+ "dolphin-llama-3-70b",
127
+ "dolphin-mixtral-8x22b",
128
+ "dolphin-mixtral-8x7b",
129
+ "eagle-7b",
130
+ "fimbulvetr-11b-v2",
131
+ "firellava-13b",
132
+ "gemini-1.5-flash",
133
+ "gemini-1.5-pro",
134
+ "gemini-flash-1.5",
135
+ "gemini-pro",
136
+ "gemini-pro-1.5",
137
+ "gemini-pro-1.5-exp",
138
+ "gemini-pro-vision",
139
+ "gemma-1.1-7b-it",
140
+ "gemma-2-27b-it",
141
+ "gemma-2-9b-it",
142
+ "gemma-7b-it",
143
+ "goliath-120b",
144
+ "gpt-3.5-turbo",
145
+ "gpt-3.5-turbo-0125",
146
+ "gpt-3.5-turbo-0301",
147
+ "gpt-3.5-turbo-0613",
148
+ "gpt-3.5-turbo-1106",
149
+ "gpt-3.5-turbo-16k",
150
+ "gpt-3.5-turbo-16k-0613",
151
+ "gpt-3.5-turbo-instruct",
152
+ "gpt-3.5-turbo-instruct-0914",
153
+ "gpt-4",
154
+ "gpt-4-0125-preview",
155
+ "gpt-4-0314",
156
+ "gpt-4-0613",
157
+ "gpt-4-1106-preview",
158
+ "gpt-4-1106-vision-preview",
159
+ "gpt-4-32k",
160
+ "gpt-4-32k-0314",
161
+ "gpt-4-turbo",
162
+ "gpt-4-turbo-2024-04-09",
163
+ "gpt-4-turbo-preview",
164
+ "gpt-4-vision-preview",
165
+ "gpt-4o",
166
+ "gpt-4o-2024-05-13",
167
+ "gpt-4o-2024-08-06",
168
+ "gpt-4o-mini",
169
+ "gpt-4o-mini-2024-07-18",
170
+ "hermes-2-pro-llama-3-8b",
171
+ "hermes-2-theta-llama-3-8b",
172
+ "hermes-3-llama-3.1-405b",
173
+ "hermes-3-llama-3.1-70b",
174
+ "j2-grande-instruct",
175
+ "j2-jumbo-instruct",
176
+ "j2-mid",
177
+ "j2-mid-v1",
178
+ "j2-ultra",
179
+ "j2-ultra-v1",
180
+ "jamba-1-5-large",
181
+ "jamba-1-5-mini",
182
+ "jamba-instruct",
183
+ "jamba-instruct-v1",
184
+ "l3-euryale-70b",
185
+ "l3-lunaris-8b",
186
+ "l3-stheno-8b",
187
+ "large-latest",
188
+ "llama-2-13b-chat",
189
+ "llama-2-70b-chat",
190
+ "llama-3-70b",
191
+ "llama-3-70b-instruct",
192
+ "llama-3-8b",
193
+ "llama-3-8b-instruct",
194
+ "llama-3-lumimaid-70b",
195
+ "llama-3-lumimaid-8b",
196
+ "llama-3-sonar-large-32k-chat",
197
+ "llama-3-sonar-large-32k-online",
198
+ "llama-3-sonar-small-32k-chat",
199
+ "llama-3-sonar-small-32k-online",
200
+ "llama-3.1-405b",
201
+ "llama-3.1-405b-instruct",
202
+ "llama-3.1-70b-instruct",
203
+ "llama-3.1-8b-instruct",
204
+ "llama-3.1-sonar-huge-128k-online",
205
+ "llama-3.1-sonar-large-128k-chat",
206
+ "llama-3.1-sonar-large-128k-online",
207
+ "llama-3.1-sonar-small-128k-chat",
208
+ "llama-3.1-sonar-small-128k-online",
209
+ "llama-guard-2-8b",
210
+ "llama2-13b-chat-v1",
211
+ "llama2-70b-chat-v1",
212
+ "llama3-70b-instruct-v1",
213
+ "llama3-70b-instruct-v1:0",
214
+ "llama3-8b-instruct-v1",
215
+ "llama3-8b-instruct-v1:0",
216
+ "llava-1.5-7b-hf",
217
+ "lzlv-70b-fp16-hf",
218
+ "lzlv_70b_fp16_hf",
219
+ "magnum-72b",
220
+ "medium",
221
+ "midnight-rose-70b",
222
+ "mistral-7b-instruct",
223
+ "mistral-7b-instruct-v0",
224
+ "mistral-7b-instruct-v0.1",
225
+ "mistral-7b-instruct-v0.2",
226
+ "mistral-7b-instruct-v0.3",
227
+ "mistral-7b-openorca",
228
+ "mistral-large",
229
+ "mistral-large-2402-v1",
230
+ "mistral-medium",
231
+ "mistral-nemo",
232
+ "mistral-small",
233
+ "mistral-small-2402-v1",
234
+ "mistral-tiny",
235
+ "mixtral-8x22b",
236
+ "mixtral-8x22b-instruct",
237
+ "mixtral-8x7b",
238
+ "mixtral-8x7b-instruct",
239
+ "mixtral-8x7b-instruct-v0",
240
+ "mn-celeste-12b",
241
+ "mn-starcannon-12b",
242
+ "mythalion-13b",
243
+ "mythomax-l2-13b",
244
+ "mythomist-7b",
245
+ "noromaid-20b",
246
+ "nous-capybara-7b",
247
+ "nous-hermes-2-mistral-7b-dpo",
248
+ "nous-hermes-2-mixtral-8x7b-dpo",
249
+ "nous-hermes-2-mixtral-8x7b-sft",
250
+ "nous-hermes-2-vision-7b",
251
+ "nous-hermes-llama2-13b",
252
+ "nous-hermes-yi-34b",
253
+ "olmo-7b-instruct",
254
+ "olympus-premier-v1",
255
+ "openchat-3.5-1210",
256
+ "openchat-7b",
257
+ "openchat-8b",
258
+ "openchat_3.5",
259
+ "openhermes-2-mistral-7b",
260
+ "openhermes-2.5-mistral-7b",
261
+ "palm-2-chat-bison",
262
+ "palm-2-chat-bison-32k",
263
+ "palm-2-codechat-bison",
264
+ "palm-2-codechat-bison-32k",
265
+ "phi-2",
266
+ "phi-3-medium-128k-instruct",
267
+ "phi-3-medium-4k-instruct",
268
+ "phi-3-mini-128k-instruct",
269
+ "phi-3.5-mini-128k-instruct",
270
+ "phind-codellama-34b",
271
+ "pplx-70b-online",
272
+ "pplx-7b-chat",
273
+ "qwen-110b-chat",
274
+ "qwen-14b-chat",
275
+ "qwen-2-72b-instruct",
276
+ "qwen-2-7b-instruct",
277
+ "qwen-32b-chat",
278
+ "qwen-4b-chat",
279
+ "qwen-72b-chat",
280
+ "qwen-7b-chat",
281
+ "remm-slerp-l2-13b",
282
+ "small",
283
+ "snowflake-arctic-instruct",
284
+ "soliloquy-l3",
285
+ "sonar-medium-online",
286
+ "sonar-small-chat",
287
+ "sonar-small-online",
288
+ "stripedhyena-hessian-7b",
289
+ "stripedhyena-nous-7b",
290
+ "text-babbage-002",
291
+ "text-bison",
292
+ "text-davinci-002",
293
+ "tiny",
294
+ "titan-text-express-v1",
295
+ "titan-text-lite-v1",
296
+ "titan-text-premier-v1",
297
+ "titan-tg1-large",
298
+ "toppy-m-7b",
299
+ "vicuna-13b-v1.5",
300
+ "vicuna-7b-v1.5",
301
+ "weaver",
302
+ "wizardlm-2-7b",
303
+ "wizardlm-2-8x22b",
304
+ "xwin-lm-70b",
305
+ "yi-1.5-34b-chat",
306
+ "yi-34b",
307
+ "yi-34b-chat",
308
+ "yi-6b",
309
+ "yi-large",
310
+ "yi-large-fc",
311
+ "yi-large-turbo",
312
+ "yi-vision",
313
+ "zephyr-7b-beta",
314
+ "zephyr-orpo-141b-A35b-v0.1"
315
+ ]
316
+ fetch: false
317
+ titleConvo: true
318
+ titleModel: "claude-3-haiku"
319
+ summarize: false
320
+ summaryModel: "claude-3-haiku"
321
+ dropParams: ["stream"]
322
+ modelDisplayLabel: "APIpie"
323
+ iconURL: "https://raw.githubusercontent.com/fuegovic/lc-config-yaml/main/icons/APIpie.png"
324
+
325
+ # cohere
326
+ # Model list: https://dashboard.cohere.com/playground/chat
327
+ - name: "cohere"
328
+ apiKey: "${COHERE_API_KEY}"
329
+ baseURL: "https://api.cohere.ai/v1"
330
+ models:
331
+ default: [
332
+ "c4ai-aya-23-35b",
333
+ "c4ai-aya-23-8b",
334
+ "command",
335
+ "command-light",
336
+ "command-light-nightly",
337
+ "command-nightly",
338
+ "command-r",
339
+ "command-r-plus",
340
+ ]
341
+ fetch: false
342
+ modelDisplayLabel: "cohere"
343
+ titleModel: "command"
344
+ dropParams: ["stop", "user", "frequency_penalty", "presence_penalty", "temperature", "top_p"]
345
 
346
+ # DEEPNIGHT
347
+ # https://github.com/brahmai-research/aiforcause
348
+ # Model list: https://aiforcause.deepnight.tech/models
349
+ - name: "DEEPNIGHT"
350
+ apiKey: "sk-free1234"
351
+ baseURL: "https://aiforcause.deepnight.tech/openai/"
352
+ models:
353
+ default: [
354
+ "gpt-35-turbo",
355
+ "gpt-35-turbo-16k",
356
+ "gpt-4-turbo"
357
+ ]
358
+ fetch: false
359
+ titleConvo: true
360
+ titleModel: "gpt-35-turbo"
361
  summarize: false
362
+ summaryModel: "gpt-35-turbo"
363
+ forcePrompt: false
364
+ modelDisplayLabel: "DEEPNIGHT"
365
+ addParams:
366
+ stream: True
367
+ iconURL: "https://raw.githubusercontent.com/fuegovic/lc-config-yaml/main/icons/DEEPNIGHT.png"
368
 
369
+ # deepseek
370
+ # https://platform.deepseek.com/api_keys
371
+ # Model list: https://platform.deepseek.com/api-docs/pricing
372
+ - name: "deepseek"
373
+ apiKey: "${DEEPSEEK_API_KEY}"
374
+ baseURL: "https://api.deepseek.com"
375
+ models:
376
+ default: [
377
+ "deepseek-chat",
378
+ "deepseek-coder"
379
+ ]
380
+ fetch: false
381
+ titleConvo: true
382
+ titleModel: "deepseek-chat"
383
+ summarize: false
384
+ summaryModel: "deepseek-chat"
385
+ forcePrompt: false
386
+ modelDisplayLabel: "DeepSeek"
387
 
388
+ # Fireworks.ai
389
+ # Models: https://fireworks.ai/models?show=Serverless
390
+ - name: "Fireworks"
391
+ apiKey: "${FIREWORKS_API_KEY}"
392
+ baseURL: "https://api.fireworks.ai/inference/v1"
393
+ models:
394
+ default: [
395
+ "accounts/fireworks/models/devashisht-test-v2",
396
+ "accounts/fireworks/models/dt-fc-rc-v1",
397
+ "accounts/fireworks/models/firefunction-v1",
398
+ "accounts/fireworks/models/firefunction-v2",
399
+ "accounts/fireworks/models/firellava-13b",
400
+ "accounts/devashisht-72fdad/models/function-calling-v11",
401
+ "accounts/fireworks/models/fw-function-call-34b-v0",
402
+ "accounts/stability/models/japanese-stablelm-instruct-beta-70b",
403
+ "accounts/stability/models/japanese-stablelm-instruct-gamma-7b",
404
+ "accounts/fireworks/models/japanese-stable-vlm",
405
+ "accounts/fireworks/models/gemma2-9b-it",
406
+ "accounts/fireworks/models/llama-v3p1-405b-instruct",
407
+ "accounts/fireworks/models/llama-v3p1-70b-instruct",
408
+ "accounts/fireworks/models/llama-v3p1-8b-instruct",
409
+ "accounts/fireworks/models/llama-v3-70b-instruct",
410
+ "accounts/fireworks/models/llama-v3-70b-instruct-hf",
411
+ "accounts/fireworks/models/llama-v3-8b-hf",
412
+ "accounts/fireworks/models/llama-v3-8b-instruct",
413
+ "accounts/fireworks/models/llama-v3-8b-instruct-hf",
414
+ "accounts/fireworks/models/llama-v2-13b-chat",
415
+ "accounts/fireworks/models/llama-v2-13b-code-instruct",
416
+ "accounts/fireworks/models/llama-v2-34b-code-instruct",
417
+ "accounts/fireworks/models/llama-v2-70b-chat",
418
+ "accounts/fireworks/models/llama-v2-70b-code-instruct",
419
+ "accounts/fireworks/models/llama-v2-7b-chat",
420
+ "accounts/fireworks/models/deepseek-coder-v2-instruct",
421
+ "accounts/fireworks/models/deepseek-coder-v2-lite-instruct",
422
+ "accounts/fireworks/models/llava-v15-13b-fireworks",
423
+ "accounts/fireworks/models/mistral-7b-instruct-4k",
424
+ "accounts/dev-e24710/models/mistral-spellbound-format",
425
+ "accounts/fireworks/models/mixtral-8x22b-instruct",
426
+ "accounts/fireworks/models/mixtral-8x7b-instruct",
427
+ "accounts/fireworks/models/mixtral-8x7b-instruct-hf",
428
+ "accounts/fireworks/models/new-mixtral-chat",
429
+ "accounts/fireworks/models/qwen-14b-chat",
430
+ "accounts/fireworks/models/qwen-1-8b-chat",
431
+ "accounts/fireworks/models/qwen-72b-chat",
432
+ "accounts/stability/models/stablelm-zephyr-3b",
433
+ "accounts/fireworks/models/yi-34b-200k-capybara",
434
+ ]
435
+ fetch: false
436
+ titleConvo: true
437
+ titleModel: "accounts/fireworks/models/llama-v2-7b-chat"
438
+ summarize: false
439
+ summaryModel: "accounts/fireworks/models/llama-v2-7b-chat"
440
  forcePrompt: false
441
+ modelDisplayLabel: "Fireworks"
442
+ dropParams: ["user"]
443
+
444
+ # groq
445
+ # Model list: https://console.groq.com/settings/limits
446
+ - name: "groq"
447
+ apiKey: "${GROQ_API_KEY}"
448
+ baseURL: "https://api.groq.com/openai/v1/"
449
+ models:
450
+ default: [
451
+ "llama-3.1-405b-reasoning",
452
+ "llama-3.1-70b-versatile",
453
+ "llama-3.1-8b-instant",
454
+ "llama3-groq-70b-8192-tool-use-preview",
455
+ "llama3-groq-8b-8192-tool-use-preview",
456
+ "llama3-70b-8192",
457
+ "llama3-8b-8192",
458
+ "mixtral-8x7b-32768",
459
+ "gemma-7b-it",
460
+ "gemma2-9b-it"
461
+ ]
462
+ fetch: false
463
+ titleConvo: true
464
+ titleModel: "mixtral-8x7b-32768"
465
+ modelDisplayLabel: "groq"
466
 
467
+ # HuggingFace
468
+ # https://huggingface.co/settings/tokens
469
+ - name: 'HuggingFace'
470
+ apiKey: '${HUGGINGFACE_TOKEN}'
471
+ baseURL: 'https://api-inference.huggingface.co/v1'
472
+ models:
473
+ default: [
474
+ "codellama/CodeLlama-34b-Instruct-hf",
475
+ "google/gemma-1.1-2b-it",
476
+ "google/gemma-1.1-7b-it",
477
+ "HuggingFaceH4/starchat2-15b-v0.1",
478
+ "HuggingFaceH4/zephyr-7b-beta",
479
+ "meta-llama/Meta-Llama-3-8B-Instruct",
480
+ "microsoft/Phi-3-mini-4k-instruct",
481
+ "mistralai/Mistral-7B-Instruct-v0.1",
482
+ "mistralai/Mistral-7B-Instruct-v0.2",
483
+ "mistralai/Mixtral-8x7B-Instruct-v0.1",
484
+ "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO",
485
+ ]
486
+ fetch: true
487
+ titleConvo: true
488
+ titleModel: "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO"
489
+ dropParams: ["top_p"]
490
 
491
+ # Mistral AI API
492
+ # Model list: https://docs.mistral.ai/getting-started/models/
493
+ - name: "Mistral"
494
+ apiKey: "${MISTRAL_API_KEY}"
495
+ baseURL: "https://api.mistral.ai/v1"
496
+ models:
497
+ default: [
498
+ "mistral-tiny",
499
+ "mistral-small",
500
+ "mistral-medium",
501
+ "mistral-large-latest"
502
+ ]
503
+ fetch: false
504
+ titleConvo: true
505
+ titleMethod: "completion"
506
+ titleModel: "mistral-tiny"
507
+ summarize: false
508
+ summaryModel: "mistral-tiny"
509
+ forcePrompt: false
510
+ modelDisplayLabel: "Mistral"
511
+ dropParams: ["stop", "user", "frequency_penalty", "presence_penalty"]
512
 
513
+ # OpenRouter.ai
514
+ # Model list: https://openrouter.ai/models
515
+ # Script to fetch models: https://github.com/LibreChat-AI/librechat-config-yaml/blob/main/scripts/openrouter.py
516
  - name: "OpenRouter"
 
 
 
517
  apiKey: "${OPENROUTER_KEY}"
518
  baseURL: "https://openrouter.ai/api/v1"
519
  models:
520
+ default: [
521
+ "openrouter/auto",
522
+ "---FREE---",
523
+ "google/gemma-2-9b-it:free",
524
+ "google/gemma-7b-it:free",
525
+ "gryphe/mythomist-7b:free",
526
+ "huggingfaceh4/zephyr-7b-beta:free",
527
+ "meta-llama/llama-3-8b-instruct:free",
528
+ "meta-llama/llama-3.1-8b-instruct:free",
529
+ "microsoft/phi-3-medium-128k-instruct:free",
530
+ "microsoft/phi-3-mini-128k-instruct:free",
531
+ "mistralai/mistral-7b-instruct:free",
532
+ "nousresearch/nous-capybara-7b:free",
533
+ "openchat/openchat-7b:free",
534
+ "qwen/qwen-2-7b-instruct:free",
535
+ "undi95/toppy-m-7b:free",
536
+ "---NITRO---",
537
+ "google/gemma-7b-it:nitro",
538
+ "gryphe/mythomax-l2-13b:nitro",
539
+ "meta-llama/llama-3-70b-instruct:nitro",
540
+ "meta-llama/llama-3-8b-instruct:nitro",
541
+ "mistralai/mistral-7b-instruct:nitro",
542
+ "mistralai/mixtral-8x7b-instruct:nitro",
543
+ "undi95/toppy-m-7b:nitro",
544
+ "---BETA---",
545
+ "anthropic/claude-2.0:beta",
546
+ "anthropic/claude-2.1:beta",
547
+ "anthropic/claude-2:beta",
548
+ "anthropic/claude-3-haiku:beta",
549
+ "anthropic/claude-3-opus:beta",
550
+ "anthropic/claude-3-sonnet:beta",
551
+ "anthropic/claude-3.5-sonnet:beta",
552
+ "anthropic/claude-instant-1:beta",
553
+ "---EXTENDED---",
554
+ "gryphe/mythomax-l2-13b:extended",
555
+ "meta-llama/llama-3-8b-instruct:extended",
556
+ "neversleep/llama-3-lumimaid-8b:extended",
557
+ "nousresearch/hermes-3-llama-3.1-405b:extended",
558
+ "openai/gpt-4o:extended",
559
+ "undi95/remm-slerp-l2-13b:extended",
560
+ "---01-AI---",
561
+ "01-ai/yi-1.5-34b-chat",
562
+ "01-ai/yi-34b",
563
+ "01-ai/yi-34b-chat",
564
+ "01-ai/yi-6b",
565
+ "01-ai/yi-large",
566
+ "01-ai/yi-large-fc",
567
+ "01-ai/yi-large-turbo",
568
+ "01-ai/yi-vision",
569
+ "---AI21---",
570
+ "ai21/jamba-1-5-large",
571
+ "ai21/jamba-1-5-mini",
572
+ "ai21/jamba-instruct",
573
+ "---ANTHROPIC---",
574
+ "anthropic/claude-1",
575
+ "anthropic/claude-1.2",
576
+ "anthropic/claude-2",
577
+ "anthropic/claude-2.0",
578
+ "anthropic/claude-2.1",
579
+ "anthropic/claude-3-haiku",
580
+ "anthropic/claude-3-opus",
581
+ "anthropic/claude-3-sonnet",
582
+ "anthropic/claude-3.5-sonnet",
583
+ "anthropic/claude-instant-1",
584
+ "anthropic/claude-instant-1.0",
585
+ "anthropic/claude-instant-1.1",
586
+ "---COGNITIVECOMPUTATIONS---",
587
+ "cognitivecomputations/dolphin-llama-3-70b",
588
+ "cognitivecomputations/dolphin-mixtral-8x22b",
589
+ "cognitivecomputations/dolphin-mixtral-8x7b",
590
+ "---COHERE---",
591
+ "cohere/command",
592
+ "cohere/command-r",
593
+ "cohere/command-r-plus",
594
+ "---GOOGLE---",
595
+ "google/gemini-flash-1.5",
596
+ "google/gemini-pro",
597
+ "google/gemini-pro-1.5",
598
+ "google/gemini-pro-1.5-exp",
599
+ "google/gemini-pro-vision",
600
+ "google/gemma-2-27b-it",
601
+ "google/gemma-2-9b-it",
602
+ "google/gemma-7b-it",
603
+ "google/palm-2-chat-bison",
604
+ "google/palm-2-chat-bison-32k",
605
+ "google/palm-2-codechat-bison",
606
+ "google/palm-2-codechat-bison-32k",
607
+ "---META-LLAMA---",
608
+ "meta-llama/codellama-34b-instruct",
609
+ "meta-llama/codellama-70b-instruct",
610
+ "meta-llama/llama-2-13b-chat",
611
+ "meta-llama/llama-2-70b-chat",
612
+ "meta-llama/llama-3-70b",
613
+ "meta-llama/llama-3-70b-instruct",
614
+ "meta-llama/llama-3-8b",
615
+ "meta-llama/llama-3-8b-instruct",
616
+ "meta-llama/llama-3.1-405b",
617
+ "meta-llama/llama-3.1-405b-instruct",
618
+ "meta-llama/llama-3.1-70b-instruct",
619
+ "meta-llama/llama-3.1-8b-instruct",
620
+ "meta-llama/llama-guard-2-8b",
621
+ "---MICROSOFT---",
622
+ "microsoft/phi-3-medium-128k-instruct",
623
+ "microsoft/phi-3-medium-4k-instruct",
624
+ "microsoft/phi-3-mini-128k-instruct",
625
+ "microsoft/phi-3.5-mini-128k-instruct",
626
+ "microsoft/wizardlm-2-7b",
627
+ "microsoft/wizardlm-2-8x22b",
628
+ "---MISTRALAI---",
629
+ "mistralai/codestral-mamba",
630
+ "mistralai/mistral-7b-instruct",
631
+ "mistralai/mistral-7b-instruct-v0.1",
632
+ "mistralai/mistral-7b-instruct-v0.2",
633
+ "mistralai/mistral-7b-instruct-v0.3",
634
+ "mistralai/mistral-large",
635
+ "mistralai/mistral-medium",
636
+ "mistralai/mistral-nemo",
637
+ "mistralai/mistral-small",
638
+ "mistralai/mistral-tiny",
639
+ "mistralai/mixtral-8x22b",
640
+ "mistralai/mixtral-8x22b-instruct",
641
+ "mistralai/mixtral-8x7b",
642
+ "mistralai/mixtral-8x7b-instruct",
643
+ "---NEVERSLEEP---",
644
+ "neversleep/llama-3-lumimaid-70b",
645
+ "neversleep/llama-3-lumimaid-8b",
646
+ "neversleep/noromaid-20b",
647
+ "---NOUSRESEARCH---",
648
+ "nousresearch/hermes-2-pro-llama-3-8b",
649
+ "nousresearch/hermes-2-theta-llama-3-8b",
650
+ "nousresearch/hermes-3-llama-3.1-405b",
651
+ "nousresearch/hermes-3-llama-3.1-70b",
652
+ "nousresearch/nous-capybara-7b",
653
+ "nousresearch/nous-hermes-2-mistral-7b-dpo",
654
+ "nousresearch/nous-hermes-2-mixtral-8x7b-dpo",
655
+ "nousresearch/nous-hermes-2-mixtral-8x7b-sft",
656
+ "nousresearch/nous-hermes-llama2-13b",
657
+ "nousresearch/nous-hermes-yi-34b",
658
+ "---OPENAI---",
659
+ "openai/chatgpt-4o-latest",
660
+ "openai/gpt-3.5-turbo",
661
+ "openai/gpt-3.5-turbo-0125",
662
+ "openai/gpt-3.5-turbo-0301",
663
+ "openai/gpt-3.5-turbo-0613",
664
+ "openai/gpt-3.5-turbo-1106",
665
+ "openai/gpt-3.5-turbo-16k",
666
+ "openai/gpt-3.5-turbo-instruct",
667
+ "openai/gpt-4",
668
+ "openai/gpt-4-0314",
669
+ "openai/gpt-4-1106-preview",
670
+ "openai/gpt-4-32k",
671
+ "openai/gpt-4-32k-0314",
672
+ "openai/gpt-4-turbo",
673
+ "openai/gpt-4-turbo-preview",
674
+ "openai/gpt-4-vision-preview",
675
+ "openai/gpt-4o",
676
+ "openai/gpt-4o-2024-05-13",
677
+ "openai/gpt-4o-2024-08-06",
678
+ "openai/gpt-4o-mini",
679
+ "openai/gpt-4o-mini-2024-07-18",
680
+ "---PERPLEXITY---",
681
+ "perplexity/llama-3-sonar-large-32k-chat",
682
+ "perplexity/llama-3-sonar-large-32k-online",
683
+ "perplexity/llama-3-sonar-small-32k-chat",
684
+ "perplexity/llama-3-sonar-small-32k-online",
685
+ "perplexity/llama-3.1-sonar-huge-128k-online",
686
+ "perplexity/llama-3.1-sonar-large-128k-chat",
687
+ "perplexity/llama-3.1-sonar-large-128k-online",
688
+ "perplexity/llama-3.1-sonar-small-128k-chat",
689
+ "perplexity/llama-3.1-sonar-small-128k-online",
690
+ "---QWEN---",
691
+ "qwen/qwen-110b-chat",
692
+ "qwen/qwen-14b-chat",
693
+ "qwen/qwen-2-72b-instruct",
694
+ "qwen/qwen-2-7b-instruct",
695
+ "qwen/qwen-32b-chat",
696
+ "qwen/qwen-4b-chat",
697
+ "qwen/qwen-72b-chat",
698
+ "qwen/qwen-7b-chat",
699
+ "---SAO10K---",
700
+ "sao10k/fimbulvetr-11b-v2",
701
+ "sao10k/l3-euryale-70b",
702
+ "sao10k/l3-lunaris-8b",
703
+ "sao10k/l3-stheno-8b",
704
+ "---OTHERS---",
705
+ "aetherwiing/mn-starcannon-12b",
706
+ "allenai/olmo-7b-instruct",
707
+ "alpindale/goliath-120b",
708
+ "alpindale/magnum-72b",
709
+ "austism/chronos-hermes-13b",
710
+ "databricks/dbrx-instruct",
711
+ "deepseek/deepseek-chat",
712
+ "deepseek/deepseek-coder",
713
+ "gryphe/mythomax-l2-13b",
714
+ "gryphe/mythomist-7b",
715
+ "jondurbin/airoboros-l2-70b",
716
+ "lizpreciatior/lzlv-70b-fp16-hf",
717
+ "mancer/weaver",
718
+ "nothingiisreal/mn-celeste-12b",
719
+ "open-orca/mistral-7b-openorca",
720
+ "openchat/openchat-7b",
721
+ "openchat/openchat-8b",
722
+ "openrouter/flavor-of-the-week",
723
+ "phind/phind-codellama-34b",
724
+ "pygmalionai/mythalion-13b",
725
+ "recursal/eagle-7b",
726
+ "recursal/rwkv-5-3b-ai-town",
727
+ "rwkv/rwkv-5-world-3b",
728
+ "snowflake/snowflake-arctic-instruct",
729
+ "sophosympatheia/midnight-rose-70b",
730
+ "teknium/openhermes-2-mistral-7b",
731
+ "teknium/openhermes-2.5-mistral-7b",
732
+ "togethercomputer/stripedhyena-hessian-7b",
733
+ "togethercomputer/stripedhyena-nous-7b",
734
+ "undi95/remm-slerp-l2-13b",
735
+ "undi95/toppy-m-7b",
736
+ "xwin-lm/xwin-lm-70b"
737
+ ]
738
+ fetch: false
739
+ dropParams: ["stop"]
740
  titleConvo: true
741
  titleModel: "gpt-3.5-turbo"
742
  summarize: false
 
744
  forcePrompt: false
745
  modelDisplayLabel: "OpenRouter"
746
 
747
+ # Preplexity
748
+ # Model list: https://docs.perplexity.ai/docs/model-cards
749
+ - name: "Perplexity"
750
+ apiKey: "${PERPLEXITY_API_KEY}"
751
+ baseURL: "https://api.perplexity.ai/"
 
752
  models:
753
+ default: [
754
+ "llama-3.1-sonar-small-128k-chat",
755
+ "llama-3.1-sonar-small-128k-online",
756
+ "llama-3.1-sonar-large-128k-chat",
757
+ "llama-3.1-sonar-large-128k-online",
758
+ "llama-3.1-sonar-huge-128k-online",
759
+ "llama-3.1-8b-instruct",
760
+ "llama-3.1-70b-instruct"
761
+ ]
762
+ fetch: false # fetching list of models is not supported
763
+ titleConvo: true
764
+ titleModel: "llama-3.1-sonar-small-128k-chat"
765
+ summarize: false
766
+ summaryModel: "llama-3.1-sonar-small-128k-chat"
767
+ forcePrompt: false
768
+ dropParams: ["stop", "frequency_penalty"]
769
+ modelDisplayLabel: "Perplexity"
770
+
771
+ # ShuttleAI API
772
+ - name: "ShuttleAI"
773
+ apiKey: "${SHUTTLEAI_API_KEY}"
774
+ baseURL: "https://api.shuttleai.app/v1"
775
+ models:
776
+ default: [
777
+ "shuttle-2-turbo",
778
+ "shuttle-turbo",
779
+ "gpt-4o-2024-05-13",
780
+ "gpt-4o",
781
+ "im-also-a-good-gpt2-chatbot",
782
+ "gpt-4-turbo-2024-04-09",
783
+ "gpt-4-turbo",
784
+ "gpt-4-0125-preview",
785
+ "gpt-4-turbo-preview",
786
+ "gpt-4-1106-preview",
787
+ "gpt-4-1106-vision-preview",
788
+ "gpt-4-vision-preview",
789
+ "gpt-4-0613",
790
+ "gpt-4",
791
+ "gpt-4-bing",
792
+ "gpt-4-turbo-bing",
793
+ "gpt-4-32k-0613",
794
+ "gpt-4-32k",
795
+ "gpt-3.5-turbo-0125",
796
+ "gpt-3.5-turbo",
797
+ "gpt-3.5-turbo-1106",
798
+ "claude-3-opus-20240229",
799
+ "claude-3-opus",
800
+ "claude-3-sonnet-20240229",
801
+ "claude-3-sonnet",
802
+ "claude-3-haiku-20240307",
803
+ "claude-3-haiku",
804
+ "claude-2.1",
805
+ "claude-2.0",
806
+ "claude-2",
807
+ "claude-instant-1.2",
808
+ "claude-instant-1.1",
809
+ "claude-instant-1.0",
810
+ "claude-instant",
811
+ "meta-llama-3-70b-instruct",
812
+ "llama-3-70b-instruct",
813
+ "meta-llama-3-8b-instruct",
814
+ "llama-3-8b-instruct",
815
+ "llama-3-sonar-large-32k-online",
816
+ "llama-3-sonar-small-32k-online",
817
+ "llama-3-sonar-large-32k-chat",
818
+ "llama-3-sonar-small-32k-chat",
819
+ "blackbox",
820
+ "blackbox-code",
821
+ "wizardlm-2-8x22b",
822
+ "wizardlm-2-70b",
823
+ "dolphin-2.6-mixtral-8x7b",
824
+ "dolphin-mixtral-8x7b",
825
+ "mistral-large",
826
+ "mistral-next",
827
+ "mistral-medium",
828
+ "mistral-small",
829
+ "mistral-tiny",
830
+ "mixtral-8x7b-instruct-v0.1",
831
+ "mixtral-8x7b-instruct",
832
+ "mixtral-8x22b-instruct-v0.1",
833
+ "mixtral-8x22b-instruct",
834
+ "mistral-7b-instruct-v0.2",
835
+ "mistral-7b-instruct-2",
836
+ "mistral-7b-instruct-v0.1",
837
+ "mistral-7b-instruct",
838
+ "nous-hermes-2-mixtral-8x7b",
839
+ "gemini-1.5-pro-latest",
840
+ "gemini-1.5-pro",
841
+ "gemini-1.0-pro-latest",
842
+ "gemini-1.0-pro",
843
+ "gemini-pro",
844
+ "gemini-1.0-pro-vision",
845
+ "gemini-pro-vision",
846
+ "lzlv-70b",
847
+ "figgs-rp",
848
+ "cinematika-7b"
849
+ ]
850
  fetch: true
851
  titleConvo: true
852
+ titleMethod: "completion"
853
+ titleModel: "shuttle-2-turbo"
854
  summarize: false
855
+ summaryModel: "shuttle-2-turbo"
856
  forcePrompt: false
857
+ dropParams: ["user", "frequency_penalty", "presence_penalty", "repition_penalty"]
858
+ modelDisplayLabel: "ShuttleAI"
859
 
860
+ - name: "together.ai"
861
+ apiKey: "${TOGETHERAI_API_KEY}"
862
+ baseURL: "https://api.together.xyz"
863
+ models:
864
+ default: [
865
+ "Austism/chronos-hermes-13b",
866
+ "Gryphe/MythoMax-L2-13b",
867
+ "HuggingFaceH4/zephyr-7b-beta",
868
+ "NousResearch/Hermes-2-Theta-Llama-3-70B",
869
+ "NousResearch/Nous-Capybara-7B-V1p9",
870
+ "NousResearch/Nous-Hermes-2-Mistral-7B-DPO",
871
+ "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO",
872
+ "NousResearch/Nous-Hermes-2-Mixtral-8x7B-SFT",
873
+ "NousResearch/Nous-Hermes-2-Yi-34B",
874
+ "NousResearch/Nous-Hermes-Llama2-13b",
875
+ "NousResearch/Nous-Hermes-Llama2-70b",
876
+ "NousResearch/Nous-Hermes-llama-2-7b",
877
+ "Open-Orca/Mistral-7B-OpenOrca",
878
+ "Qwen/Qwen1.5-0.5B-Chat",
879
+ "Qwen/Qwen1.5-1.8B-Chat",
880
+ "Qwen/Qwen1.5-110B-Chat",
881
+ "Qwen/Qwen1.5-14B-Chat",
882
+ "Qwen/Qwen1.5-32B-Chat",
883
+ "Qwen/Qwen1.5-4B-Chat",
884
+ "Qwen/Qwen1.5-72B-Chat",
885
+ "Qwen/Qwen1.5-7B-Chat",
886
+ "Qwen/Qwen2-1.5B-Instruct",
887
+ "Qwen/Qwen2-72B-Instruct",
888
+ "Qwen/Qwen2-7B-Instruct",
889
+ "Snowflake/snowflake-arctic-instruct",
890
+ "Undi95/ReMM-SLERP-L2-13B",
891
+ "Undi95/Toppy-M-7B",
892
+ "WizardLM/WizardLM-13B-V1.2",
893
+ "allenai/OLMo-7B-Instruct",
894
+ "carson/ml31405bit",
895
+ "carson/ml3170bit",
896
+ "carson/ml318bit",
897
+ "carson/ml318br",
898
+ "codellama/CodeLlama-13b-Instruct-hf",
899
+ "codellama/CodeLlama-34b-Instruct-hf",
900
+ "codellama/CodeLlama-70b-Instruct-hf",
901
+ "codellama/CodeLlama-7b-Instruct-hf",
902
+ "cognitivecomputations/dolphin-2.5-mixtral-8x7b",
903
+ "databricks/dbrx-instruct",
904
+ "deepseek-ai/deepseek-coder-33b-instruct",
905
+ "deepseek-ai/deepseek-llm-67b-chat",
906
+ "garage-bAInd/Platypus2-70B-instruct",
907
+ "google/gemma-2-27b-it",
908
+ "google/gemma-2-9b-it",
909
+ "google/gemma-2b-it",
910
+ "google/gemma-7b-it",
911
+ "gradientai/Llama-3-70B-Instruct-Gradient-1048k",
912
+ "lmsys/vicuna-13b-v1.3",
913
+ "lmsys/vicuna-13b-v1.5",
914
+ "lmsys/vicuna-13b-v1.5-16k",
915
+ "lmsys/vicuna-7b-v1.3",
916
+ "lmsys/vicuna-7b-v1.5",
917
+ "meta-llama/Llama-2-13b-chat-hf",
918
+ "meta-llama/Llama-2-70b-chat-hf",
919
+ "meta-llama/Llama-2-7b-chat-hf",
920
+ "meta-llama/Llama-3-70b-chat-hf",
921
+ "meta-llama/Llama-3-8b-chat-hf",
922
+ "meta-llama/Meta-Llama-3-70B-Instruct",
923
+ "meta-llama/Meta-Llama-3-70B-Instruct-Lite",
924
+ "meta-llama/Meta-Llama-3-70B-Instruct-Turbo",
925
+ "meta-llama/Meta-Llama-3-8B-Instruct",
926
+ "meta-llama/Meta-Llama-3-8B-Instruct-Lite",
927
+ "meta-llama/Meta-Llama-3-8B-Instruct-Turbo",
928
+ "meta-llama/Meta-Llama-3.1-405B-Instruct-Turbo",
929
+ "meta-llama/Meta-Llama-3.1-70B-Instruct-Reference",
930
+ "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo",
931
+ "meta-llama/Meta-Llama-3.1-70B-Reference",
932
+ "meta-llama/Meta-Llama-3.1-8B-Instruct-Reference",
933
+ "meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo",
934
+ "microsoft/WizardLM-2-8x22B",
935
+ "mistralai/Mistral-7B-Instruct-v0.1",
936
+ "mistralai/Mistral-7B-Instruct-v0.2",
937
+ "mistralai/Mistral-7B-Instruct-v0.3",
938
+ "mistralai/Mixtral-8x22B-Instruct-v0.1",
939
+ "mistralai/Mixtral-8x7B-Instruct-v0.1",
940
+ "openchat/openchat-3.5-1210",
941
+ "snorkelai/Snorkel-Mistral-PairRM-DPO",
942
+ "teknium/OpenHermes-2-Mistral-7B",
943
+ "teknium/OpenHermes-2p5-Mistral-7B",
944
+ "togethercomputer/CodeLlama-13b-Instruct",
945
+ "togethercomputer/CodeLlama-34b-Instruct",
946
+ "togethercomputer/CodeLlama-7b-Instruct",
947
+ "togethercomputer/Koala-13B",
948
+ "togethercomputer/Koala-7B",
949
+ "togethercomputer/Llama-2-7B-32K-Instruct",
950
+ "togethercomputer/Llama-3-8b-chat-hf-int4",
951
+ "togethercomputer/Llama-3-8b-chat-hf-int8",
952
+ "togethercomputer/SOLAR-10.7B-Instruct-v1.0-int4",
953
+ "togethercomputer/StripedHyena-Nous-7B",
954
+ "togethercomputer/alpaca-7b",
955
+ "togethercomputer/guanaco-13b",
956
+ "togethercomputer/guanaco-33b",
957
+ "togethercomputer/guanaco-65b",
958
+ "togethercomputer/guanaco-7b",
959
+ "togethercomputer/llama-2-13b-chat",
960
+ "togethercomputer/llama-2-70b-chat",
961
+ "togethercomputer/llama-2-7b-chat",
962
+ "upstage/SOLAR-10.7B-Instruct-v1.0",
963
+ "zero-one-ai/Yi-34B-Chat"
964
+ ]
965
+ fetch: false
966
+ titleConvo: true
967
+ titleModel: "togethercomputer/llama-2-7b-chat"
968
+ summarize: false
969
+ summaryModel: "togethercomputer/llama-2-7b-chat"
970
+ forcePrompt: false
971
+ modelDisplayLabel: "together.ai"
972
+
973
+ # Unify
974
+ # Model list: https://unify.ai/chat
975
+ - name: "Unify"
976
+ apiKey: "${UNIFY_API_KEY}"
977
+ baseURL: "https://api.unify.ai/v0/"
978
+ models:
979
+ default: [
980
+ "router@q:1|c:2.12e-01|t:5.00e-04|i:2.78e-04",
981
+ "chatgpt-4o-latest@openai",
982
+ "gpt-3.5-turbo@openai",
983
+ "gpt-4-turbo@openai",
984
+ "gpt-4@openai",
985
+ "gpt-4o-2024-08-06@openai",
986
+ "gpt-4o-mini@openai",
987
+ "gpt-4o@openai",
988
+ "claude-3-haiku@anthropic",
989
+ "claude-3-opus@anthropic",
990
+ "claude-3-sonnet@anthropic",
991
+ "claude-3.5-sonnet@anthropic",
992
+ "claude-3-haiku@aws-bedrock",
993
+ "claude-3-opus@aws-bedrock",
994
+ "claude-3-sonnet@aws-bedrock",
995
+ "claude-3.5-sonnet@aws-bedrock",
996
+ "command-r-plus@aws-bedrock",
997
+ "llama-3-70b-chat@aws-bedrock",
998
+ "llama-3-8b-chat@aws-bedrock",
999
+ "llama-3.1-405b-chat@aws-bedrock",
1000
+ "llama-3.1-70b-chat@aws-bedrock",
1001
+ "llama-3.1-8b-chat@aws-bedrock",
1002
+ "mistral-7b-instruct-v0.2@aws-bedrock",
1003
+ "mistral-large@aws-bedrock",
1004
+ "mixtral-8x7b-instruct-v0.1@aws-bedrock",
1005
+ "codellama-13b-instruct@fireworks-ai",
1006
+ "codellama-34b-instruct@fireworks-ai",
1007
+ "gemma-2-9b-it@fireworks-ai",
1008
+ "gemma-7b-it@fireworks-ai",
1009
+ "llama-3-70b-chat@fireworks-ai",
1010
+ "llama-3-8b-chat@fireworks-ai",
1011
+ "llama-3.1-405b-chat@fireworks-ai",
1012
+ "llama-3.1-70b-chat@fireworks-ai",
1013
+ "llama-3.1-8b-chat@fireworks-ai",
1014
+ "mistral-7b-instruct-v0.1@fireworks-ai",
1015
+ "mistral-7b-instruct-v0.2@fireworks-ai",
1016
+ "mistral-7b-instruct-v0.3@fireworks-ai",
1017
+ "mistral-nemo@fireworks-ai",
1018
+ "mixtral-8x22b-instruct-v0.1@fireworks-ai",
1019
+ "mixtral-8x7b-instruct-v0.1@fireworks-ai",
1020
+ "qwen-2-72b-instruct@fireworks-ai",
1021
+ "codellama-13b-instruct@octoai",
1022
+ "codellama-34b-instruct@octoai",
1023
+ "codellama-7b-instruct@octoai",
1024
+ "llama-3-70b-chat@octoai",
1025
+ "llama-3-8b-chat@octoai",
1026
+ "llama-3.1-405b-chat@octoai",
1027
+ "llama-3.1-70b-chat@octoai",
1028
+ "llama-3.1-8b-chat@octoai",
1029
+ "mistral-7b-instruct-v0.2@octoai",
1030
+ "mistral-7b-instruct-v0.3@octoai",
1031
+ "mixtral-8x22b-instruct-v0.1@octoai",
1032
+ "mixtral-8x7b-instruct-v0.1@octoai",
1033
+ "qwen-2-7b-instruct@octoai",
1034
+ "codellama-13b-instruct@together-ai",
1035
+ "codellama-34b-instruct@together-ai",
1036
+ "codellama-70b-instruct@together-ai",
1037
+ "codellama-7b-instruct@together-ai",
1038
+ "deepseek-coder-33b-instruct@together-ai",
1039
+ "gemma-2b-it@together-ai",
1040
+ "gemma-7b-it@together-ai",
1041
+ "llama-3-70b-chat@together-ai",
1042
+ "llama-3-8b-chat@together-ai",
1043
+ "llama-3.1-405b-chat@together-ai",
1044
+ "llama-3.1-70b-chat@together-ai",
1045
+ "llama-3.1-8b-chat@together-ai",
1046
+ "mistral-7b-instruct-v0.1@together-ai",
1047
+ "mistral-7b-instruct-v0.2@together-ai",
1048
+ "mistral-7b-instruct-v0.3@together-ai",
1049
+ "mixtral-8x22b-instruct-v0.1@together-ai",
1050
+ "mixtral-8x7b-instruct-v0.1@together-ai",
1051
+ "phind-codellama-34b-v2@together-ai",
1052
+ "qwen-2-72b-instruct@together-ai",
1053
+ "codellama-34b-instruct@deepinfra",
1054
+ "gemma-2-27b-it@deepinfra",
1055
+ "gemma-2-9b-it@deepinfra",
1056
+ "gemma-7b-it@deepinfra",
1057
+ "llama-3-70b-chat@deepinfra",
1058
+ "llama-3-8b-chat@deepinfra",
1059
+ "llama-3.1-405b-chat@deepinfra",
1060
+ "llama-3.1-70b-chat@deepinfra",
1061
+ "llama-3.1-8b-chat@deepinfra",
1062
+ "mistral-7b-instruct-v0.1@deepinfra",
1063
+ "mistral-7b-instruct-v0.3@deepinfra",
1064
+ "mixtral-8x22b-instruct-v0.1@deepinfra",
1065
+ "mixtral-8x7b-instruct-v0.1@deepinfra",
1066
+ "nemotron-4-340b-instruct@deepinfra",
1067
+ "phi-3-medium-4k-instruct@deepinfra",
1068
+ "phind-codellama-34b-v2@deepinfra",
1069
+ "qwen-2-72b-instruct@deepinfra",
1070
+ "qwen-2-7b-instruct@deepinfra",
1071
+ "codellama-34b-instruct@perplexity-ai",
1072
+ "llama-3.1-70b-chat@perplexity-ai",
1073
+ "llama-3.1-8b-chat@perplexity-ai",
1074
+ "mistral-7b-instruct-v0.2@perplexity-ai",
1075
+ "mixtral-8x7b-instruct-v0.1@perplexity-ai",
1076
+ "gemini-1.5-flash@vertex-ai",
1077
+ "gemini-1.5-pro@vertex-ai",
1078
+ "gemma-2-9b-it@vertex-ai",
1079
+ "gemma-2-9b-it@groq",
1080
+ "gemma-7b-it@groq",
1081
+ "llama-3-70b-chat@groq",
1082
+ "llama-3-8b-chat@groq",
1083
+ "mixtral-8x7b-instruct-v0.1@groq",
1084
+ "gemma-7b-it@lepton-ai",
1085
+ "llama-3-70b-chat@lepton-ai",
1086
+ "llama-3-8b-chat@lepton-ai",
1087
+ "llama-3.1-405b-chat@lepton-ai",
1088
+ "llama-3.1-70b-chat@lepton-ai",
1089
+ "llama-3.1-8b-chat@lepton-ai",
1090
+ "mistral-7b-instruct-v0.3@lepton-ai",
1091
+ "mixtral-8x7b-instruct-v0.1@lepton-ai",
1092
+ "gpt-4o-mini@azure-ai",
1093
+ "gpt-4o@azure-ai",
1094
+ "llama-3.1-405b-chat@azure-ai",
1095
+ "llama-3.1-70b-chat@azure-ai",
1096
+ "llama-3.1-8b-chat@azure-ai",
1097
+ "llama-3-70b-chat@replicate",
1098
+ "llama-3-8b-chat@replicate",
1099
+ "llama-3.1-405b-chat@replicate",
1100
+ "mistral-7b-instruct-v0.2@replicate",
1101
+ "mixtral-8x7b-instruct-v0.1@replicate",
1102
+ "mistral-7b-instruct-v0.2@mistral-ai",
1103
+ "mistral-7b-instruct-v0.3@mistral-ai",
1104
+ "mistral-large@mistral-ai",
1105
+ "mistral-nemo@mistral-ai",
1106
+ "mistral-small@mistral-ai",
1107
+ "mixtral-8x22b-instruct-v0.1@mistral-ai",
1108
+ "mixtral-8x7b-instruct-v0.1@mistral-ai",
1109
+ ]
1110
+ fetch: false
1111
+ titleConvo: true
1112
+ titleModel: "router@q:1|c:2.12e-01|t:5.00e-04|i:2.78e-04"
1113
+ dropParams: ["stop", "user", "frequency_penalty", "presence_penalty"]