Mishig nsarrazin HF staff commited on
Commit
50d8483
1 Parent(s): c6129c3

Count system prompt tokens (#850)

Browse files

* Count sysmte prompt tokens

* show error only once

* fix

* fix css

* simplify

* fix

* simplify

* Revert "simplify"

This reverts commit 61c0a2281f9795c4a4351d9ad473993a3c01c59f.

* `model.tokenizer` config & fix reactivity issues

* rm gated tokenizer

* use `truncate`

---------

Co-authored-by: Nathan Sarrazin <[email protected]>

.env.template CHANGED
@@ -7,6 +7,7 @@ MODELS=`[
7
  "logoUrl": "https://huggingface.co/datasets/huggingchat/models-logo/resolve/main/mistral-logo.png",
8
  "websiteUrl" : "https://mistral.ai/news/mixtral-of-experts/",
9
  "modelUrl": "https://huggingface.co/mistralai/Mixtral-8x7B-Instruct-v0.1",
 
10
  "preprompt" : "",
11
  "chatPromptTemplate": "<s> {{#each messages}}{{#ifUser}}[INST]{{#if @first}}{{#if @root.preprompt}}{{@root.preprompt}}\n{{/if}}{{/if}} {{content}} [/INST]{{/ifUser}}{{#ifAssistant}} {{content}}</s> {{/ifAssistant}}{{/each}}",
12
  "parameters" : {
@@ -63,7 +64,6 @@ MODELS=`[
63
  "description": "The latest and biggest model from Meta, fine-tuned for chat.",
64
  "logoUrl": "https://huggingface.co/datasets/huggingchat/models-logo/resolve/main/meta-logo.png",
65
  "websiteUrl": "https://ai.meta.com/llama/",
66
- "modelUrl": "https://huggingface.co/meta-llama/Llama-2-70b-chat-hf",
67
  "preprompt": " ",
68
  "chatPromptTemplate" : "<s>[INST] <<SYS>>\n{{preprompt}}\n<</SYS>>\n\n{{#each messages}}{{#ifUser}}{{content}} [/INST] {{/ifUser}}{{#ifAssistant}}{{content}} </s><s>[INST] {{/ifAssistant}}{{/each}}",
69
  "promptExamples": [
@@ -94,6 +94,7 @@ MODELS=`[
94
  "logoUrl": "https://huggingface.co/datasets/huggingchat/models-logo/resolve/main/nous-logo.png",
95
  "websiteUrl" : "https://nousresearch.com/",
96
  "modelUrl": "https://huggingface.co/NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO",
 
97
  "chatPromptTemplate" : "{{#if @root.preprompt}}<|im_start|>system\n{{@root.preprompt}}<|im_end|>\n{{/if}}{{#each messages}}{{#ifUser}}<|im_start|>user\n{{content}}<|im_end|>\n<|im_start|>assistant\n{{/ifUser}}{{#ifAssistant}}{{content}}<|im_end|>\n{{/ifAssistant}}{{/each}}",
98
  "promptExamples": [
99
  {
@@ -155,6 +156,7 @@ MODELS=`[
155
  "logoUrl": "https://huggingface.co/datasets/huggingchat/models-logo/resolve/main/mistral-logo.png",
156
  "websiteUrl": "https://mistral.ai/news/announcing-mistral-7b/",
157
  "modelUrl": "https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.1",
 
158
  "preprompt": "",
159
  "chatPromptTemplate" : "<s>{{#each messages}}{{#ifUser}}[INST] {{#if @first}}{{#if @root.preprompt}}{{@root.preprompt}}\n{{/if}}{{/if}}{{content}} [/INST]{{/ifUser}}{{#ifAssistant}}{{content}}</s>{{/ifAssistant}}{{/each}}",
160
  "parameters": {
@@ -187,6 +189,7 @@ MODELS=`[
187
  "logoUrl": "https://huggingface.co/datasets/huggingchat/models-logo/resolve/main/mistral-logo.png",
188
  "websiteUrl": "https://mistral.ai/news/announcing-mistral-7b/",
189
  "modelUrl": "https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.2",
 
190
  "preprompt": "",
191
  "chatPromptTemplate" : "<s>{{#each messages}}{{#ifUser}}[INST] {{#if @first}}{{#if @root.preprompt}}{{@root.preprompt}}\n{{/if}}{{/if}}{{content}} [/INST]{{/ifUser}}{{#ifAssistant}}{{content}}</s>{{/ifAssistant}}{{/each}}",
192
  "parameters": {
@@ -218,6 +221,7 @@ MODELS=`[
218
  "logoUrl": "https://huggingface.co/datasets/huggingchat/models-logo/resolve/main/openchat-logo.png",
219
  "websiteUrl": "https://huggingface.co/openchat/openchat-3.5-0106",
220
  "modelUrl": "https://huggingface.co/openchat/openchat-3.5-0106",
 
221
  "preprompt": "",
222
  "chatPromptTemplate" : "<s>{{#each messages}}{{#ifUser}}GPT4 Correct User: {{#if @first}}{{#if @root.preprompt}}{{@root.preprompt}}\n{{/if}}{{/if}}{{content}}<|end_of_turn|>GPT4 Correct Assistant:{{/ifUser}}{{#ifAssistant}}{{content}}<|end_of_turn|>{{/ifAssistant}}{{/each}}",
223
  "parameters": {
 
7
  "logoUrl": "https://huggingface.co/datasets/huggingchat/models-logo/resolve/main/mistral-logo.png",
8
  "websiteUrl" : "https://mistral.ai/news/mixtral-of-experts/",
9
  "modelUrl": "https://huggingface.co/mistralai/Mixtral-8x7B-Instruct-v0.1",
10
+ "tokenizer": "mistralai/Mixtral-8x7B-Instruct-v0.1",
11
  "preprompt" : "",
12
  "chatPromptTemplate": "<s> {{#each messages}}{{#ifUser}}[INST]{{#if @first}}{{#if @root.preprompt}}{{@root.preprompt}}\n{{/if}}{{/if}} {{content}} [/INST]{{/ifUser}}{{#ifAssistant}} {{content}}</s> {{/ifAssistant}}{{/each}}",
13
  "parameters" : {
 
64
  "description": "The latest and biggest model from Meta, fine-tuned for chat.",
65
  "logoUrl": "https://huggingface.co/datasets/huggingchat/models-logo/resolve/main/meta-logo.png",
66
  "websiteUrl": "https://ai.meta.com/llama/",
 
67
  "preprompt": " ",
68
  "chatPromptTemplate" : "<s>[INST] <<SYS>>\n{{preprompt}}\n<</SYS>>\n\n{{#each messages}}{{#ifUser}}{{content}} [/INST] {{/ifUser}}{{#ifAssistant}}{{content}} </s><s>[INST] {{/ifAssistant}}{{/each}}",
69
  "promptExamples": [
 
94
  "logoUrl": "https://huggingface.co/datasets/huggingchat/models-logo/resolve/main/nous-logo.png",
95
  "websiteUrl" : "https://nousresearch.com/",
96
  "modelUrl": "https://huggingface.co/NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO",
97
+ "tokenizer": "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO",
98
  "chatPromptTemplate" : "{{#if @root.preprompt}}<|im_start|>system\n{{@root.preprompt}}<|im_end|>\n{{/if}}{{#each messages}}{{#ifUser}}<|im_start|>user\n{{content}}<|im_end|>\n<|im_start|>assistant\n{{/ifUser}}{{#ifAssistant}}{{content}}<|im_end|>\n{{/ifAssistant}}{{/each}}",
99
  "promptExamples": [
100
  {
 
156
  "logoUrl": "https://huggingface.co/datasets/huggingchat/models-logo/resolve/main/mistral-logo.png",
157
  "websiteUrl": "https://mistral.ai/news/announcing-mistral-7b/",
158
  "modelUrl": "https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.1",
159
+ "tokenizer": "mistralai/Mistral-7B-Instruct-v0.1",
160
  "preprompt": "",
161
  "chatPromptTemplate" : "<s>{{#each messages}}{{#ifUser}}[INST] {{#if @first}}{{#if @root.preprompt}}{{@root.preprompt}}\n{{/if}}{{/if}}{{content}} [/INST]{{/ifUser}}{{#ifAssistant}}{{content}}</s>{{/ifAssistant}}{{/each}}",
162
  "parameters": {
 
189
  "logoUrl": "https://huggingface.co/datasets/huggingchat/models-logo/resolve/main/mistral-logo.png",
190
  "websiteUrl": "https://mistral.ai/news/announcing-mistral-7b/",
191
  "modelUrl": "https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.2",
192
+ "tokenizer": "mistralai/Mistral-7B-Instruct-v0.2",
193
  "preprompt": "",
194
  "chatPromptTemplate" : "<s>{{#each messages}}{{#ifUser}}[INST] {{#if @first}}{{#if @root.preprompt}}{{@root.preprompt}}\n{{/if}}{{/if}}{{content}} [/INST]{{/ifUser}}{{#ifAssistant}}{{content}}</s>{{/ifAssistant}}{{/each}}",
195
  "parameters": {
 
221
  "logoUrl": "https://huggingface.co/datasets/huggingchat/models-logo/resolve/main/openchat-logo.png",
222
  "websiteUrl": "https://huggingface.co/openchat/openchat-3.5-0106",
223
  "modelUrl": "https://huggingface.co/openchat/openchat-3.5-0106",
224
+ "tokenizer": "openchat/openchat-3.5-0106",
225
  "preprompt": "",
226
  "chatPromptTemplate" : "<s>{{#each messages}}{{#ifUser}}GPT4 Correct User: {{#if @first}}{{#if @root.preprompt}}{{@root.preprompt}}\n{{/if}}{{/if}}{{content}}<|end_of_turn|>GPT4 Correct Assistant:{{/ifUser}}{{#ifAssistant}}{{content}}<|end_of_turn|>{{/ifAssistant}}{{/each}}",
227
  "parameters": {
src/lib/components/AssistantSettings.svelte CHANGED
@@ -12,6 +12,7 @@
12
 
13
  import { useSettingsStore } from "$lib/stores/settings";
14
  import { isHuggingChat } from "$lib/utils/isHuggingChat";
 
15
 
16
  type ActionData = {
17
  error: boolean;
@@ -28,8 +29,10 @@
28
  export let models: Model[] = [];
29
 
30
  let files: FileList | null = null;
31
-
32
  const settings = useSettingsStore();
 
 
 
33
 
34
  let compress: typeof readAndCompressImage | null = null;
35
 
@@ -238,7 +241,11 @@
238
 
239
  <label>
240
  <div class="mb-1 font-semibold">Model</div>
241
- <select name="modelId" class="w-full rounded-lg border-2 border-gray-200 bg-gray-100 p-2">
 
 
 
 
242
  {#each models.filter((model) => !model.unlisted) as model}
243
  <option
244
  value={model.id}
@@ -390,12 +397,25 @@
390
 
391
  <div class="col-span-1 flex h-full flex-col">
392
  <span class="mb-1 text-sm font-semibold"> Instructions (system prompt) </span>
393
- <textarea
394
- name="preprompt"
395
- class="mb-20 min-h-[8lh] flex-1 rounded-lg border-2 border-gray-200 bg-gray-100 p-2 text-sm"
396
- placeholder="You'll act as..."
397
- value={assistant?.preprompt ?? ""}
398
- />
 
 
 
 
 
 
 
 
 
 
 
 
 
399
  <p class="text-xs text-red-500">{getError("preprompt", form)}</p>
400
  </div>
401
  </div>
 
12
 
13
  import { useSettingsStore } from "$lib/stores/settings";
14
  import { isHuggingChat } from "$lib/utils/isHuggingChat";
15
+ import TokensCounter from "./TokensCounter.svelte";
16
 
17
  type ActionData = {
18
  error: boolean;
 
29
  export let models: Model[] = [];
30
 
31
  let files: FileList | null = null;
 
32
  const settings = useSettingsStore();
33
+ let modelId =
34
+ assistant?.modelId ?? models.find((_model) => _model.id === $settings.activeModel)?.name;
35
+ let systemPrompt = assistant?.preprompt ?? "";
36
 
37
  let compress: typeof readAndCompressImage | null = null;
38
 
 
241
 
242
  <label>
243
  <div class="mb-1 font-semibold">Model</div>
244
+ <select
245
+ name="modelId"
246
+ class="w-full rounded-lg border-2 border-gray-200 bg-gray-100 p-2"
247
+ bind:value={modelId}
248
+ >
249
  {#each models.filter((model) => !model.unlisted) as model}
250
  <option
251
  value={model.id}
 
397
 
398
  <div class="col-span-1 flex h-full flex-col">
399
  <span class="mb-1 text-sm font-semibold"> Instructions (system prompt) </span>
400
+ <div class="relative mb-20 flex min-h-[8lh] flex-1 grow flex-col">
401
+ <textarea
402
+ name="preprompt"
403
+ class="flex-1 rounded-lg border-2 border-gray-200 bg-gray-100 p-2 text-sm"
404
+ placeholder="You'll act as..."
405
+ bind:value={systemPrompt}
406
+ />
407
+ {#if modelId}
408
+ {@const model = models.find((_model) => _model.id === modelId)}
409
+ {#if model?.tokenizer && systemPrompt}
410
+ <TokensCounter
411
+ classNames="absolute bottom-2 right-2"
412
+ prompt={systemPrompt}
413
+ modelTokenizer={model.tokenizer}
414
+ truncate={model?.parameters?.truncate}
415
+ />
416
+ {/if}
417
+ {/if}
418
+ </div>
419
  <p class="text-xs text-red-500">{getError("preprompt", form)}</p>
420
  </div>
421
  </div>
src/lib/components/TokensCounter.svelte ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <script lang="ts">
2
+ import type { Model } from "$lib/types/Model";
3
+ import { AutoTokenizer, PreTrainedTokenizer } from "@xenova/transformers";
4
+
5
+ export let classNames = "";
6
+ export let prompt = "";
7
+ export let modelTokenizer: Exclude<Model["tokenizer"], undefined>;
8
+ export let truncate: number | undefined = undefined;
9
+
10
+ let tokenizer: PreTrainedTokenizer | undefined = undefined;
11
+
12
+ async function getTokenizer(_modelTokenizer: Exclude<Model["tokenizer"], undefined>) {
13
+ if (typeof _modelTokenizer === "string") {
14
+ // return auto tokenizer
15
+ return await AutoTokenizer.from_pretrained(_modelTokenizer);
16
+ }
17
+ {
18
+ // construct & return pretrained tokenizer
19
+ const { tokenizerUrl, tokenizerConfigUrl } = _modelTokenizer satisfies {
20
+ tokenizerUrl: string;
21
+ tokenizerConfigUrl: string;
22
+ };
23
+ const tokenizerJSON = await (await fetch(tokenizerUrl)).json();
24
+ const tokenizerConfig = await (await fetch(tokenizerConfigUrl)).json();
25
+ return new PreTrainedTokenizer(tokenizerJSON, tokenizerConfig);
26
+ }
27
+ }
28
+
29
+ async function tokenizeText(_prompt: string) {
30
+ if (!tokenizer) {
31
+ return;
32
+ }
33
+ const { input_ids } = await tokenizer(_prompt);
34
+ return input_ids.size;
35
+ }
36
+
37
+ $: (async () => {
38
+ tokenizer = await getTokenizer(modelTokenizer);
39
+ })();
40
+ </script>
41
+
42
+ {#if tokenizer}
43
+ {#await tokenizeText(prompt) then nTokens}
44
+ <p class="text-sm opacity-60 hover:opacity-80 {classNames}">
45
+ {nTokens}{truncate ? `/${truncate}` : ""}
46
+ </p>
47
+ {/await}
48
+ {/if}
src/lib/server/models.ts CHANGED
@@ -28,6 +28,15 @@ const modelConfig = z.object({
28
  logoUrl: z.string().url().optional(),
29
  websiteUrl: z.string().url().optional(),
30
  modelUrl: z.string().url().optional(),
 
 
 
 
 
 
 
 
 
31
  datasetName: z.string().min(1).optional(),
32
  datasetUrl: z.string().url().optional(),
33
  userMessageToken: z.string().default(""),
 
28
  logoUrl: z.string().url().optional(),
29
  websiteUrl: z.string().url().optional(),
30
  modelUrl: z.string().url().optional(),
31
+ tokenizer: z
32
+ .union([
33
+ z.string(),
34
+ z.object({
35
+ tokenizerUrl: z.string().url(),
36
+ tokenizerConfigUrl: z.string().url(),
37
+ }),
38
+ ])
39
+ .optional(),
40
  datasetName: z.string().min(1).optional(),
41
  datasetUrl: z.string().url().optional(),
42
  userMessageToken: z.string().default(""),
src/lib/types/Model.ts CHANGED
@@ -12,6 +12,7 @@ export type Model = Pick<
12
  | "description"
13
  | "logoUrl"
14
  | "modelUrl"
 
15
  | "datasetUrl"
16
  | "preprompt"
17
  | "multimodal"
 
12
  | "description"
13
  | "logoUrl"
14
  | "modelUrl"
15
+ | "tokenizer"
16
  | "datasetUrl"
17
  | "preprompt"
18
  | "multimodal"
src/routes/+layout.server.ts CHANGED
@@ -158,6 +158,7 @@ export const load: LayoutServerLoad = async ({ locals, depends }) => {
158
  name: model.name,
159
  websiteUrl: model.websiteUrl,
160
  modelUrl: model.modelUrl,
 
161
  datasetName: model.datasetName,
162
  datasetUrl: model.datasetUrl,
163
  displayName: model.displayName,
 
158
  name: model.name,
159
  websiteUrl: model.websiteUrl,
160
  modelUrl: model.modelUrl,
161
+ tokenizer: model.tokenizer,
162
  datasetName: model.datasetName,
163
  datasetUrl: model.datasetUrl,
164
  displayName: model.displayName,
src/routes/api/models/+server.ts CHANGED
@@ -6,6 +6,7 @@ export async function GET() {
6
  name: model.name,
7
  websiteUrl: model.websiteUrl,
8
  modelUrl: model.modelUrl,
 
9
  datasetName: model.datasetName,
10
  datasetUrl: model.datasetUrl,
11
  displayName: model.displayName,
 
6
  name: model.name,
7
  websiteUrl: model.websiteUrl,
8
  modelUrl: model.modelUrl,
9
+ tokenizer: model.tokenizer,
10
  datasetName: model.datasetName,
11
  datasetUrl: model.datasetUrl,
12
  displayName: model.displayName,
src/routes/settings/(nav)/[...model]/+page.svelte CHANGED
@@ -5,6 +5,7 @@
5
  import type { BackendModel } from "$lib/server/models";
6
  import { useSettingsStore } from "$lib/stores/settings";
7
  import CopyToClipBoardBtn from "$lib/components/CopyToClipBoardBtn.svelte";
 
8
  import CarbonArrowUpRight from "~icons/carbon/arrow-up-right";
9
  import CarbonLink from "~icons/carbon/link";
10
 
@@ -99,7 +100,7 @@
99
  {isActive ? "Active model" : "Activate"}
100
  </button>
101
 
102
- <div class="flex w-full flex-col gap-2">
103
  <div class="flex w-full flex-row content-between">
104
  <h3 class="mb-1.5 text-lg font-semibold text-gray-800">System Prompt</h3>
105
  {#if hasCustomPreprompt}
@@ -117,5 +118,13 @@
117
  class="w-full resize-none rounded-md border-2 bg-gray-100 p-2"
118
  bind:value={$settings.customPrompts[$page.params.model]}
119
  />
 
 
 
 
 
 
 
 
120
  </div>
121
  </div>
 
5
  import type { BackendModel } from "$lib/server/models";
6
  import { useSettingsStore } from "$lib/stores/settings";
7
  import CopyToClipBoardBtn from "$lib/components/CopyToClipBoardBtn.svelte";
8
+ import TokensCounter from "$lib/components/TokensCounter.svelte";
9
  import CarbonArrowUpRight from "~icons/carbon/arrow-up-right";
10
  import CarbonLink from "~icons/carbon/link";
11
 
 
100
  {isActive ? "Active model" : "Activate"}
101
  </button>
102
 
103
+ <div class="relative flex w-full flex-col gap-2">
104
  <div class="flex w-full flex-row content-between">
105
  <h3 class="mb-1.5 text-lg font-semibold text-gray-800">System Prompt</h3>
106
  {#if hasCustomPreprompt}
 
118
  class="w-full resize-none rounded-md border-2 bg-gray-100 p-2"
119
  bind:value={$settings.customPrompts[$page.params.model]}
120
  />
121
+ {#if model.tokenizer && $settings.customPrompts[$page.params.model]}
122
+ <TokensCounter
123
+ classNames="absolute bottom-2 right-2"
124
+ prompt={$settings.customPrompts[$page.params.model]}
125
+ modelTokenizer={model.tokenizer}
126
+ truncate={model?.parameters?.truncate}
127
+ />
128
+ {/if}
129
  </div>
130
  </div>