Luke nsarrazin HF staff commited on
Commit
73a5c0d
1 Parent(s): bbbedb7

Several QoL contributions (#760)

Browse files

* allow customizing disclaimer as `PUBLIC_APP_DISCLAIMER_MESSAGE`

* support passing `defaultHeaders` to `openai` endpoint

* add azure openai, claude, mistral examples using `defaultHeaders` & `openai` endpoint

* fix streaming being buffered behind cloudflare tunnel

might help to relieve issue #598

* support new lines in model description

* don't automatically generate modelUrl to huggingface

fixes broken links for self-hosted or custom-named model

* add `PUBLIC_APP_DISCLAIMER_MESSAGE` in `.env`

* `npm run format`

---------

Co-authored-by: Nathan Sarrazin <[email protected]>

.env CHANGED
@@ -120,6 +120,7 @@ PUBLIC_APP_COLOR=blue # can be any of tailwind colors: https://tailwindcss.com/d
120
  PUBLIC_APP_DESCRIPTION=# description used throughout the app (if not set, a default one will be used)
121
  PUBLIC_APP_DATA_SHARING=#set to 1 to enable options & text regarding data sharing
122
  PUBLIC_APP_DISCLAIMER=#set to 1 to show a disclaimer on login page
 
123
  LLM_SUMMERIZATION=true
124
 
125
  EXPOSE_API=true
 
120
  PUBLIC_APP_DESCRIPTION=# description used throughout the app (if not set, a default one will be used)
121
  PUBLIC_APP_DATA_SHARING=#set to 1 to enable options & text regarding data sharing
122
  PUBLIC_APP_DISCLAIMER=#set to 1 to show a disclaimer on login page
123
+ PUBLIC_APP_DISCLAIMER_MESSAGE="Disclaimer: AI is an area of active research with known problems such as biased generation and misinformation. Do not use this application for high-stakes decisions or advice."
124
  LLM_SUMMERIZATION=true
125
 
126
  EXPOSE_API=true
.env.template CHANGED
@@ -228,6 +228,7 @@ PUBLIC_APP_NAME=HuggingChat
228
  PUBLIC_APP_ASSETS=huggingchat
229
  PUBLIC_APP_COLOR=yellow
230
  PUBLIC_APP_DESCRIPTION="Making the community's best AI chat models available to everyone."
 
231
  PUBLIC_APP_DATA_SHARING=1
232
  PUBLIC_APP_DISCLAIMER=1
233
 
 
228
  PUBLIC_APP_ASSETS=huggingchat
229
  PUBLIC_APP_COLOR=yellow
230
  PUBLIC_APP_DESCRIPTION="Making the community's best AI chat models available to everyone."
231
+ PUBLIC_APP_DISCLAIMER_MESSAGE="Disclaimer: AI is an area of active research with known problems such as biased generation and misinformation. Do not use this application for high-stakes decisions or advice."
232
  PUBLIC_APP_DATA_SHARING=1
233
  PUBLIC_APP_DISCLAIMER=1
234
 
README.md CHANGED
@@ -316,6 +316,75 @@ MODELS=`[{
316
  }]`
317
  ```
318
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
319
  ##### Llama.cpp API server
320
 
321
  chat-ui also supports the llama.cpp API server directly without the need for an adapter. You can do this using the `llamacpp` endpoint type.
 
316
  }]`
317
  ```
318
 
319
+ You may also consume any model provider that provides compatible OpenAI API endpoint. For example, you may self-host [Portkey](https://github.com/Portkey-AI/gateway) gateway and experiment with Claude or GPTs offered by Azure OpenAI. Example for Claude from Anthropic:
320
+
321
+ ```
322
+ MODELS=`[{
323
+ "name": "claude-2.1",
324
+ "displayName": "Claude 2.1",
325
+ "description": "Anthropic has been founded by former OpenAI researchers...",
326
+ "parameters": {
327
+ "temperature": 0.5,
328
+ "max_new_tokens": 4096,
329
+ },
330
+ "endpoints": [
331
+ {
332
+ "type": "openai",
333
+ "baseURL": "https://gateway.example.com/v1",
334
+ "defaultHeaders": {
335
+ "x-portkey-config": '{"provider":"anthropic","api_key":"sk-ant-abc...xyz"}'
336
+ }
337
+ }
338
+ ]
339
+ }]`
340
+ ```
341
+
342
+ Example for GPT 4 deployed on Azure OpenAI:
343
+
344
+ ```
345
+ MODELS=`[{
346
+ "id": "gpt-4-1106-preview",
347
+ "name": "gpt-4-1106-preview",
348
+ "displayName": "gpt-4-1106-preview",
349
+ "parameters": {
350
+ "temperature": 0.5,
351
+ "max_new_tokens": 4096,
352
+ },
353
+ "endpoints": [
354
+ {
355
+ "type": "openai",
356
+ "baseURL": "https://gateway.example.com/v1",
357
+ "defaultHeaders": {
358
+ "x-portkey-config": '{"provider":"azure-openai","resource_name":"abc-fr","deployment_id":"gpt-4-1106-preview","api_version":"2023-03-15-preview","api_key":"abc...xyz"}'
359
+ }
360
+ }
361
+ ]
362
+ }]`
363
+ ```
364
+
365
+ Or try Mistral from [Deepinfra](https://deepinfra.com/mistralai/Mistral-7B-Instruct-v0.1/api?example=openai-http):
366
+
367
+ > Note, apiKey can either be set custom per endpoint, or globally using `OPENAI_API_KEY` variable.
368
+
369
+ ```
370
+ MODELS=`[{
371
+ "name": "mistral-7b",
372
+ "displayName": "Mistral 7B",
373
+ "description": "A 7B dense Transformer, fast-deployed and easily customisable. Small, yet powerful for a variety of use cases. Supports English and code, and a 8k context window.",
374
+ "parameters": {
375
+ "temperature": 0.5,
376
+ "max_new_tokens": 4096,
377
+ },
378
+ "endpoints": [
379
+ {
380
+ "type": "openai",
381
+ "baseURL": "https://api.deepinfra.com/v1/openai",
382
+ "apiKey": "abc...xyz"
383
+ }
384
+ ]
385
+ }]`
386
+ ```
387
+
388
  ##### Llama.cpp API server
389
 
390
  chat-ui also supports the llama.cpp API server directly without the need for an adapter. You can do this using the `llamacpp` endpoint type.
src/lib/components/DisclaimerModal.svelte CHANGED
@@ -1,7 +1,11 @@
1
  <script lang="ts">
2
  import { base } from "$app/paths";
3
  import { page } from "$app/stores";
4
- import { PUBLIC_APP_DESCRIPTION, PUBLIC_APP_NAME } from "$env/static/public";
 
 
 
 
5
  import LogoHuggingFaceBorderless from "$lib/components/icons/LogoHuggingFaceBorderless.svelte";
6
  import Modal from "$lib/components/Modal.svelte";
7
  import { useSettingsStore } from "$lib/stores/settings";
@@ -25,8 +29,7 @@
25
  </p>
26
 
27
  <p class="text-sm text-gray-500">
28
- Disclaimer: AI is an area of active research with known problems such as biased generation and
29
- misinformation. Do not use this application for high-stakes decisions or advice.
30
  </p>
31
 
32
  <div class="flex w-full flex-col items-center gap-2">
 
1
  <script lang="ts">
2
  import { base } from "$app/paths";
3
  import { page } from "$app/stores";
4
+ import {
5
+ PUBLIC_APP_DESCRIPTION,
6
+ PUBLIC_APP_NAME,
7
+ PUBLIC_APP_DISCLAIMER_MESSAGE,
8
+ } from "$env/static/public";
9
  import LogoHuggingFaceBorderless from "$lib/components/icons/LogoHuggingFaceBorderless.svelte";
10
  import Modal from "$lib/components/Modal.svelte";
11
  import { useSettingsStore } from "$lib/stores/settings";
 
29
  </p>
30
 
31
  <p class="text-sm text-gray-500">
32
+ {PUBLIC_APP_DISCLAIMER_MESSAGE}
 
33
  </p>
34
 
35
  <div class="flex w-full flex-col items-center gap-2">
src/lib/server/endpoints/openai/endpointOai.ts CHANGED
@@ -15,12 +15,14 @@ export const endpointOAIParametersSchema = z.object({
15
  completion: z
16
  .union([z.literal("completions"), z.literal("chat_completions")])
17
  .default("chat_completions"),
 
18
  });
19
 
20
  export async function endpointOai(
21
  input: z.input<typeof endpointOAIParametersSchema>
22
  ): Promise<Endpoint> {
23
- const { baseURL, apiKey, completion, model } = endpointOAIParametersSchema.parse(input);
 
24
  let OpenAI;
25
  try {
26
  OpenAI = (await import("openai")).OpenAI;
@@ -31,6 +33,7 @@ export async function endpointOai(
31
  const openai = new OpenAI({
32
  apiKey: apiKey ?? "sk-",
33
  baseURL,
 
34
  });
35
 
36
  if (completion === "completions") {
 
15
  completion: z
16
  .union([z.literal("completions"), z.literal("chat_completions")])
17
  .default("chat_completions"),
18
+ defaultHeaders: z.record(z.string()).optional(),
19
  });
20
 
21
  export async function endpointOai(
22
  input: z.input<typeof endpointOAIParametersSchema>
23
  ): Promise<Endpoint> {
24
+ const { baseURL, apiKey, completion, model, defaultHeaders } =
25
+ endpointOAIParametersSchema.parse(input);
26
  let OpenAI;
27
  try {
28
  OpenAI = (await import("openai")).OpenAI;
 
33
  const openai = new OpenAI({
34
  apiKey: apiKey ?? "sk-",
35
  baseURL,
36
+ defaultHeaders,
37
  });
38
 
39
  if (completion === "completions") {
src/routes/conversation/[id]/+server.ts CHANGED
@@ -381,7 +381,11 @@ export async function POST({ request, locals, params, getClientAddress }) {
381
  });
382
 
383
  // Todo: maybe we should wait for the message to be saved before ending the response - in case of errors
384
- return new Response(stream);
 
 
 
 
385
  }
386
 
387
  export async function DELETE({ locals, params }) {
 
381
  });
382
 
383
  // Todo: maybe we should wait for the message to be saved before ending the response - in case of errors
384
+ return new Response(stream, {
385
+ headers: {
386
+ "Content-Type": "text/event-stream",
387
+ },
388
+ });
389
  }
390
 
391
  export async function DELETE({ locals, params }) {
src/routes/settings/[...model]/+page.svelte CHANGED
@@ -34,22 +34,24 @@
34
  </h2>
35
 
36
  {#if model.description}
37
- <p class=" text-gray-600">
38
  {model.description}
39
  </p>
40
  {/if}
41
  </div>
42
 
43
  <div class="flex flex-wrap items-center gap-2 md:gap-4">
44
- <a
45
- href={model.modelUrl || "https://huggingface.co/" + model.name}
46
- target="_blank"
47
- rel="noreferrer"
48
- class="flex items-center truncate underline underline-offset-2"
49
- >
50
- <CarbonArrowUpRight class="mr-1.5 shrink-0 text-xs " />
51
- Model page
52
- </a>
 
 
53
 
54
  {#if model.datasetName || model.datasetUrl}
55
  <a
 
34
  </h2>
35
 
36
  {#if model.description}
37
+ <p class="whitespace-pre-wrap text-gray-600">
38
  {model.description}
39
  </p>
40
  {/if}
41
  </div>
42
 
43
  <div class="flex flex-wrap items-center gap-2 md:gap-4">
44
+ {#if model.modelUrl}
45
+ <a
46
+ href={model.modelUrl || "https://huggingface.co/" + model.name}
47
+ target="_blank"
48
+ rel="noreferrer"
49
+ class="flex items-center truncate underline underline-offset-2"
50
+ >
51
+ <CarbonArrowUpRight class="mr-1.5 shrink-0 text-xs " />
52
+ Model page
53
+ </a>
54
+ {/if}
55
 
56
  {#if model.datasetName || model.datasetUrl}
57
  <a