Spaces:
Runtime error
Runtime error
machineuser
commited on
Commit
•
5ee3e16
1
Parent(s):
a2706d6
Sync widgets demo
Browse files
packages/tasks/src/snippets/curl.ts
CHANGED
@@ -1,8 +1,8 @@
|
|
1 |
-
import type { ModelData } from "../model-data.js";
|
2 |
import type { PipelineType } from "../pipelines.js";
|
3 |
import { getModelInputSnippet } from "./inputs.js";
|
|
|
4 |
|
5 |
-
export const snippetBasic = (model:
|
6 |
`curl https://api-inference.huggingface.co/models/${model.id} \\
|
7 |
-X POST \\
|
8 |
-d '{"inputs": ${getModelInputSnippet(model, true)}}' \\
|
@@ -10,7 +10,7 @@ export const snippetBasic = (model: ModelData, accessToken: string): string =>
|
|
10 |
-H "Authorization: Bearer ${accessToken || `{API_TOKEN}`}"
|
11 |
`;
|
12 |
|
13 |
-
export const snippetZeroShotClassification = (model:
|
14 |
`curl https://api-inference.huggingface.co/models/${model.id} \\
|
15 |
-X POST \\
|
16 |
-d '{"inputs": ${getModelInputSnippet(model, true)}, "parameters": {"candidate_labels": ["refund", "legal", "faq"]}}' \\
|
@@ -18,14 +18,14 @@ export const snippetZeroShotClassification = (model: ModelData, accessToken: str
|
|
18 |
-H "Authorization: Bearer ${accessToken || `{API_TOKEN}`}"
|
19 |
`;
|
20 |
|
21 |
-
export const snippetFile = (model:
|
22 |
`curl https://api-inference.huggingface.co/models/${model.id} \\
|
23 |
-X POST \\
|
24 |
--data-binary '@${getModelInputSnippet(model, true, true)}' \\
|
25 |
-H "Authorization: Bearer ${accessToken || `{API_TOKEN}`}"
|
26 |
`;
|
27 |
|
28 |
-
export const curlSnippets: Partial<Record<PipelineType, (model:
|
29 |
// Same order as in js/src/lib/interfaces/Types.ts
|
30 |
"text-classification": snippetBasic,
|
31 |
"token-classification": snippetBasic,
|
@@ -51,12 +51,12 @@ export const curlSnippets: Partial<Record<PipelineType, (model: ModelData, acces
|
|
51 |
"image-segmentation": snippetFile,
|
52 |
};
|
53 |
|
54 |
-
export function getCurlInferenceSnippet(model:
|
55 |
return model.pipeline_tag && model.pipeline_tag in curlSnippets
|
56 |
? curlSnippets[model.pipeline_tag]?.(model, accessToken) ?? ""
|
57 |
: "";
|
58 |
}
|
59 |
|
60 |
-
export function hasCurlInferenceSnippet(model:
|
61 |
return !!model.pipeline_tag && model.pipeline_tag in curlSnippets;
|
62 |
}
|
|
|
|
|
1 |
import type { PipelineType } from "../pipelines.js";
|
2 |
import { getModelInputSnippet } from "./inputs.js";
|
3 |
+
import type { ModelDataMinimal } from "./types.js";
|
4 |
|
5 |
+
export const snippetBasic = (model: ModelDataMinimal, accessToken: string): string =>
|
6 |
`curl https://api-inference.huggingface.co/models/${model.id} \\
|
7 |
-X POST \\
|
8 |
-d '{"inputs": ${getModelInputSnippet(model, true)}}' \\
|
|
|
10 |
-H "Authorization: Bearer ${accessToken || `{API_TOKEN}`}"
|
11 |
`;
|
12 |
|
13 |
+
export const snippetZeroShotClassification = (model: ModelDataMinimal, accessToken: string): string =>
|
14 |
`curl https://api-inference.huggingface.co/models/${model.id} \\
|
15 |
-X POST \\
|
16 |
-d '{"inputs": ${getModelInputSnippet(model, true)}, "parameters": {"candidate_labels": ["refund", "legal", "faq"]}}' \\
|
|
|
18 |
-H "Authorization: Bearer ${accessToken || `{API_TOKEN}`}"
|
19 |
`;
|
20 |
|
21 |
+
export const snippetFile = (model: ModelDataMinimal, accessToken: string): string =>
|
22 |
`curl https://api-inference.huggingface.co/models/${model.id} \\
|
23 |
-X POST \\
|
24 |
--data-binary '@${getModelInputSnippet(model, true, true)}' \\
|
25 |
-H "Authorization: Bearer ${accessToken || `{API_TOKEN}`}"
|
26 |
`;
|
27 |
|
28 |
+
export const curlSnippets: Partial<Record<PipelineType, (model: ModelDataMinimal, accessToken: string) => string>> = {
|
29 |
// Same order as in js/src/lib/interfaces/Types.ts
|
30 |
"text-classification": snippetBasic,
|
31 |
"token-classification": snippetBasic,
|
|
|
51 |
"image-segmentation": snippetFile,
|
52 |
};
|
53 |
|
54 |
+
export function getCurlInferenceSnippet(model: ModelDataMinimal, accessToken: string): string {
|
55 |
return model.pipeline_tag && model.pipeline_tag in curlSnippets
|
56 |
? curlSnippets[model.pipeline_tag]?.(model, accessToken) ?? ""
|
57 |
: "";
|
58 |
}
|
59 |
|
60 |
+
export function hasCurlInferenceSnippet(model: Pick<ModelDataMinimal, "pipeline_tag">): boolean {
|
61 |
return !!model.pipeline_tag && model.pipeline_tag in curlSnippets;
|
62 |
}
|
packages/tasks/src/snippets/inputs.ts
CHANGED
@@ -1,5 +1,5 @@
|
|
1 |
-
import type { ModelData } from "../model-data";
|
2 |
import type { PipelineType } from "../pipelines";
|
|
|
3 |
|
4 |
const inputsZeroShotClassification = () =>
|
5 |
`"Hi, I recently bought a device from your company but it is not working as advertised and I would like to get reimbursed!"`;
|
@@ -44,7 +44,7 @@ const inputsTextGeneration = () => `"Can you please let us know more details abo
|
|
44 |
|
45 |
const inputsText2TextGeneration = () => `"The answer to the universe is"`;
|
46 |
|
47 |
-
const inputsFillMask = (model:
|
48 |
|
49 |
const inputsSentenceSimilarity = () =>
|
50 |
`{
|
@@ -84,7 +84,7 @@ const inputsTabularPrediction = () =>
|
|
84 |
const inputsZeroShotImageClassification = () => `"cats.jpg"`;
|
85 |
|
86 |
const modelInputSnippets: {
|
87 |
-
[key in PipelineType]?: (model:
|
88 |
} = {
|
89 |
"audio-to-audio": inputsAudioToAudio,
|
90 |
"audio-classification": inputsAudioClassification,
|
@@ -116,7 +116,7 @@ const modelInputSnippets: {
|
|
116 |
|
117 |
// Use noWrap to put the whole snippet on a single line (removing new lines and tabulations)
|
118 |
// Use noQuotes to strip quotes from start & end (example: "abc" -> abc)
|
119 |
-
export function getModelInputSnippet(model:
|
120 |
if (model.pipeline_tag) {
|
121 |
const inputs = modelInputSnippets[model.pipeline_tag];
|
122 |
if (inputs) {
|
|
|
|
|
1 |
import type { PipelineType } from "../pipelines";
|
2 |
+
import type { ModelDataMinimal } from "./types";
|
3 |
|
4 |
const inputsZeroShotClassification = () =>
|
5 |
`"Hi, I recently bought a device from your company but it is not working as advertised and I would like to get reimbursed!"`;
|
|
|
44 |
|
45 |
const inputsText2TextGeneration = () => `"The answer to the universe is"`;
|
46 |
|
47 |
+
const inputsFillMask = (model: ModelDataMinimal) => `"The answer to the universe is ${model.mask_token}."`;
|
48 |
|
49 |
const inputsSentenceSimilarity = () =>
|
50 |
`{
|
|
|
84 |
const inputsZeroShotImageClassification = () => `"cats.jpg"`;
|
85 |
|
86 |
const modelInputSnippets: {
|
87 |
+
[key in PipelineType]?: (model: ModelDataMinimal) => string;
|
88 |
} = {
|
89 |
"audio-to-audio": inputsAudioToAudio,
|
90 |
"audio-classification": inputsAudioClassification,
|
|
|
116 |
|
117 |
// Use noWrap to put the whole snippet on a single line (removing new lines and tabulations)
|
118 |
// Use noQuotes to strip quotes from start & end (example: "abc" -> abc)
|
119 |
+
export function getModelInputSnippet(model: ModelDataMinimal, noWrap = false, noQuotes = false): string {
|
120 |
if (model.pipeline_tag) {
|
121 |
const inputs = modelInputSnippets[model.pipeline_tag];
|
122 |
if (inputs) {
|
packages/tasks/src/snippets/js.ts
CHANGED
@@ -1,8 +1,8 @@
|
|
1 |
-
import type { ModelData } from "../model-data.js";
|
2 |
import type { PipelineType } from "../pipelines.js";
|
3 |
import { getModelInputSnippet } from "./inputs.js";
|
|
|
4 |
|
5 |
-
export const snippetBasic = (model:
|
6 |
`async function query(data) {
|
7 |
const response = await fetch(
|
8 |
"https://api-inference.huggingface.co/models/${model.id}",
|
@@ -20,7 +20,7 @@ query({"inputs": ${getModelInputSnippet(model)}}).then((response) => {
|
|
20 |
console.log(JSON.stringify(response));
|
21 |
});`;
|
22 |
|
23 |
-
export const snippetZeroShotClassification = (model:
|
24 |
`async function query(data) {
|
25 |
const response = await fetch(
|
26 |
"https://api-inference.huggingface.co/models/${model.id}",
|
@@ -40,7 +40,7 @@ query({"inputs": ${getModelInputSnippet(
|
|
40 |
console.log(JSON.stringify(response));
|
41 |
});`;
|
42 |
|
43 |
-
export const snippetTextToImage = (model:
|
44 |
`async function query(data) {
|
45 |
const response = await fetch(
|
46 |
"https://api-inference.huggingface.co/models/${model.id}",
|
@@ -57,7 +57,7 @@ query({"inputs": ${getModelInputSnippet(model)}}).then((response) => {
|
|
57 |
// Use image
|
58 |
});`;
|
59 |
|
60 |
-
export const snippetTextToAudio = (model:
|
61 |
const commonSnippet = `async function query(data) {
|
62 |
const response = await fetch(
|
63 |
"https://api-inference.huggingface.co/models/${model.id}",
|
@@ -93,7 +93,7 @@ export const snippetTextToAudio = (model: ModelData, accessToken: string): strin
|
|
93 |
}
|
94 |
};
|
95 |
|
96 |
-
export const snippetFile = (model:
|
97 |
`async function query(filename) {
|
98 |
const data = fs.readFileSync(filename);
|
99 |
const response = await fetch(
|
@@ -112,7 +112,7 @@ query(${getModelInputSnippet(model)}).then((response) => {
|
|
112 |
console.log(JSON.stringify(response));
|
113 |
});`;
|
114 |
|
115 |
-
export const jsSnippets: Partial<Record<PipelineType, (model:
|
116 |
// Same order as in js/src/lib/interfaces/Types.ts
|
117 |
"text-classification": snippetBasic,
|
118 |
"token-classification": snippetBasic,
|
@@ -138,12 +138,12 @@ export const jsSnippets: Partial<Record<PipelineType, (model: ModelData, accessT
|
|
138 |
"image-segmentation": snippetFile,
|
139 |
};
|
140 |
|
141 |
-
export function getJsInferenceSnippet(model:
|
142 |
return model.pipeline_tag && model.pipeline_tag in jsSnippets
|
143 |
? jsSnippets[model.pipeline_tag]?.(model, accessToken) ?? ""
|
144 |
: "";
|
145 |
}
|
146 |
|
147 |
-
export function hasJsInferenceSnippet(model:
|
148 |
return !!model.pipeline_tag && model.pipeline_tag in jsSnippets;
|
149 |
}
|
|
|
|
|
1 |
import type { PipelineType } from "../pipelines.js";
|
2 |
import { getModelInputSnippet } from "./inputs.js";
|
3 |
+
import type { ModelDataMinimal } from "./types.js";
|
4 |
|
5 |
+
export const snippetBasic = (model: ModelDataMinimal, accessToken: string): string =>
|
6 |
`async function query(data) {
|
7 |
const response = await fetch(
|
8 |
"https://api-inference.huggingface.co/models/${model.id}",
|
|
|
20 |
console.log(JSON.stringify(response));
|
21 |
});`;
|
22 |
|
23 |
+
export const snippetZeroShotClassification = (model: ModelDataMinimal, accessToken: string): string =>
|
24 |
`async function query(data) {
|
25 |
const response = await fetch(
|
26 |
"https://api-inference.huggingface.co/models/${model.id}",
|
|
|
40 |
console.log(JSON.stringify(response));
|
41 |
});`;
|
42 |
|
43 |
+
export const snippetTextToImage = (model: ModelDataMinimal, accessToken: string): string =>
|
44 |
`async function query(data) {
|
45 |
const response = await fetch(
|
46 |
"https://api-inference.huggingface.co/models/${model.id}",
|
|
|
57 |
// Use image
|
58 |
});`;
|
59 |
|
60 |
+
export const snippetTextToAudio = (model: ModelDataMinimal, accessToken: string): string => {
|
61 |
const commonSnippet = `async function query(data) {
|
62 |
const response = await fetch(
|
63 |
"https://api-inference.huggingface.co/models/${model.id}",
|
|
|
93 |
}
|
94 |
};
|
95 |
|
96 |
+
export const snippetFile = (model: ModelDataMinimal, accessToken: string): string =>
|
97 |
`async function query(filename) {
|
98 |
const data = fs.readFileSync(filename);
|
99 |
const response = await fetch(
|
|
|
112 |
console.log(JSON.stringify(response));
|
113 |
});`;
|
114 |
|
115 |
+
export const jsSnippets: Partial<Record<PipelineType, (model: ModelDataMinimal, accessToken: string) => string>> = {
|
116 |
// Same order as in js/src/lib/interfaces/Types.ts
|
117 |
"text-classification": snippetBasic,
|
118 |
"token-classification": snippetBasic,
|
|
|
138 |
"image-segmentation": snippetFile,
|
139 |
};
|
140 |
|
141 |
+
export function getJsInferenceSnippet(model: ModelDataMinimal, accessToken: string): string {
|
142 |
return model.pipeline_tag && model.pipeline_tag in jsSnippets
|
143 |
? jsSnippets[model.pipeline_tag]?.(model, accessToken) ?? ""
|
144 |
: "";
|
145 |
}
|
146 |
|
147 |
+
export function hasJsInferenceSnippet(model: ModelDataMinimal): boolean {
|
148 |
return !!model.pipeline_tag && model.pipeline_tag in jsSnippets;
|
149 |
}
|
packages/tasks/src/snippets/python.ts
CHANGED
@@ -1,8 +1,8 @@
|
|
1 |
-
import type { ModelData } from "../model-data.js";
|
2 |
import type { PipelineType } from "../pipelines.js";
|
3 |
import { getModelInputSnippet } from "./inputs.js";
|
|
|
4 |
|
5 |
-
export const snippetZeroShotClassification = (model:
|
6 |
`def query(payload):
|
7 |
response = requests.post(API_URL, headers=headers, json=payload)
|
8 |
return response.json()
|
@@ -12,7 +12,7 @@ output = query({
|
|
12 |
"parameters": {"candidate_labels": ["refund", "legal", "faq"]},
|
13 |
})`;
|
14 |
|
15 |
-
export const snippetZeroShotImageClassification = (model:
|
16 |
`def query(data):
|
17 |
with open(data["image_path"], "rb") as f:
|
18 |
img = f.read()
|
@@ -28,7 +28,7 @@ output = query({
|
|
28 |
"parameters": {"candidate_labels": ["cat", "dog", "llama"]},
|
29 |
})`;
|
30 |
|
31 |
-
export const snippetBasic = (model:
|
32 |
`def query(payload):
|
33 |
response = requests.post(API_URL, headers=headers, json=payload)
|
34 |
return response.json()
|
@@ -37,7 +37,7 @@ output = query({
|
|
37 |
"inputs": ${getModelInputSnippet(model)},
|
38 |
})`;
|
39 |
|
40 |
-
export const snippetFile = (model:
|
41 |
`def query(filename):
|
42 |
with open(filename, "rb") as f:
|
43 |
data = f.read()
|
@@ -46,7 +46,7 @@ export const snippetFile = (model: ModelData): string =>
|
|
46 |
|
47 |
output = query(${getModelInputSnippet(model)})`;
|
48 |
|
49 |
-
export const snippetTextToImage = (model:
|
50 |
`def query(payload):
|
51 |
response = requests.post(API_URL, headers=headers, json=payload)
|
52 |
return response.content
|
@@ -58,7 +58,7 @@ import io
|
|
58 |
from PIL import Image
|
59 |
image = Image.open(io.BytesIO(image_bytes))`;
|
60 |
|
61 |
-
export const snippetTabular = (model:
|
62 |
`def query(payload):
|
63 |
response = requests.post(API_URL, headers=headers, json=payload)
|
64 |
return response.content
|
@@ -66,7 +66,7 @@ response = query({
|
|
66 |
"inputs": {"data": ${getModelInputSnippet(model)}},
|
67 |
})`;
|
68 |
|
69 |
-
export const snippetTextToAudio = (model:
|
70 |
// Transformers TTS pipeline and api-inference-community (AIC) pipeline outputs are diverged
|
71 |
// with the latest update to inference-api (IA).
|
72 |
// Transformers IA returns a byte object (wav file), whereas AIC returns wav and sampling_rate.
|
@@ -95,7 +95,7 @@ Audio(audio, rate=sampling_rate)`;
|
|
95 |
}
|
96 |
};
|
97 |
|
98 |
-
export const snippetDocumentQuestionAnswering = (model:
|
99 |
`def query(payload):
|
100 |
with open(payload["image"], "rb") as f:
|
101 |
img = f.read()
|
@@ -107,7 +107,7 @@ output = query({
|
|
107 |
"inputs": ${getModelInputSnippet(model)},
|
108 |
})`;
|
109 |
|
110 |
-
export const pythonSnippets: Partial<Record<PipelineType, (model:
|
111 |
// Same order as in tasks/src/pipelines.ts
|
112 |
"text-classification": snippetBasic,
|
113 |
"token-classification": snippetBasic,
|
@@ -137,7 +137,7 @@ export const pythonSnippets: Partial<Record<PipelineType, (model: ModelData) =>
|
|
137 |
"zero-shot-image-classification": snippetZeroShotImageClassification,
|
138 |
};
|
139 |
|
140 |
-
export function getPythonInferenceSnippet(model:
|
141 |
const body =
|
142 |
model.pipeline_tag && model.pipeline_tag in pythonSnippets ? pythonSnippets[model.pipeline_tag]?.(model) ?? "" : "";
|
143 |
|
@@ -149,6 +149,6 @@ headers = {"Authorization": ${accessToken ? `"Bearer ${accessToken}"` : `f"Beare
|
|
149 |
${body}`;
|
150 |
}
|
151 |
|
152 |
-
export function hasPythonInferenceSnippet(model:
|
153 |
return !!model.pipeline_tag && model.pipeline_tag in pythonSnippets;
|
154 |
}
|
|
|
|
|
1 |
import type { PipelineType } from "../pipelines.js";
|
2 |
import { getModelInputSnippet } from "./inputs.js";
|
3 |
+
import type { ModelDataMinimal } from "./types.js";
|
4 |
|
5 |
+
export const snippetZeroShotClassification = (model: ModelDataMinimal): string =>
|
6 |
`def query(payload):
|
7 |
response = requests.post(API_URL, headers=headers, json=payload)
|
8 |
return response.json()
|
|
|
12 |
"parameters": {"candidate_labels": ["refund", "legal", "faq"]},
|
13 |
})`;
|
14 |
|
15 |
+
export const snippetZeroShotImageClassification = (model: ModelDataMinimal): string =>
|
16 |
`def query(data):
|
17 |
with open(data["image_path"], "rb") as f:
|
18 |
img = f.read()
|
|
|
28 |
"parameters": {"candidate_labels": ["cat", "dog", "llama"]},
|
29 |
})`;
|
30 |
|
31 |
+
export const snippetBasic = (model: ModelDataMinimal): string =>
|
32 |
`def query(payload):
|
33 |
response = requests.post(API_URL, headers=headers, json=payload)
|
34 |
return response.json()
|
|
|
37 |
"inputs": ${getModelInputSnippet(model)},
|
38 |
})`;
|
39 |
|
40 |
+
export const snippetFile = (model: ModelDataMinimal): string =>
|
41 |
`def query(filename):
|
42 |
with open(filename, "rb") as f:
|
43 |
data = f.read()
|
|
|
46 |
|
47 |
output = query(${getModelInputSnippet(model)})`;
|
48 |
|
49 |
+
export const snippetTextToImage = (model: ModelDataMinimal): string =>
|
50 |
`def query(payload):
|
51 |
response = requests.post(API_URL, headers=headers, json=payload)
|
52 |
return response.content
|
|
|
58 |
from PIL import Image
|
59 |
image = Image.open(io.BytesIO(image_bytes))`;
|
60 |
|
61 |
+
export const snippetTabular = (model: ModelDataMinimal): string =>
|
62 |
`def query(payload):
|
63 |
response = requests.post(API_URL, headers=headers, json=payload)
|
64 |
return response.content
|
|
|
66 |
"inputs": {"data": ${getModelInputSnippet(model)}},
|
67 |
})`;
|
68 |
|
69 |
+
export const snippetTextToAudio = (model: ModelDataMinimal): string => {
|
70 |
// Transformers TTS pipeline and api-inference-community (AIC) pipeline outputs are diverged
|
71 |
// with the latest update to inference-api (IA).
|
72 |
// Transformers IA returns a byte object (wav file), whereas AIC returns wav and sampling_rate.
|
|
|
95 |
}
|
96 |
};
|
97 |
|
98 |
+
export const snippetDocumentQuestionAnswering = (model: ModelDataMinimal): string =>
|
99 |
`def query(payload):
|
100 |
with open(payload["image"], "rb") as f:
|
101 |
img = f.read()
|
|
|
107 |
"inputs": ${getModelInputSnippet(model)},
|
108 |
})`;
|
109 |
|
110 |
+
export const pythonSnippets: Partial<Record<PipelineType, (model: ModelDataMinimal) => string>> = {
|
111 |
// Same order as in tasks/src/pipelines.ts
|
112 |
"text-classification": snippetBasic,
|
113 |
"token-classification": snippetBasic,
|
|
|
137 |
"zero-shot-image-classification": snippetZeroShotImageClassification,
|
138 |
};
|
139 |
|
140 |
+
export function getPythonInferenceSnippet(model: ModelDataMinimal, accessToken: string): string {
|
141 |
const body =
|
142 |
model.pipeline_tag && model.pipeline_tag in pythonSnippets ? pythonSnippets[model.pipeline_tag]?.(model) ?? "" : "";
|
143 |
|
|
|
149 |
${body}`;
|
150 |
}
|
151 |
|
152 |
+
export function hasPythonInferenceSnippet(model: ModelDataMinimal): boolean {
|
153 |
return !!model.pipeline_tag && model.pipeline_tag in pythonSnippets;
|
154 |
}
|
packages/tasks/src/snippets/types.ts
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import type { ModelData } from "../model-data";
|
2 |
+
|
3 |
+
/**
|
4 |
+
* Minimal model data required for snippets.
|
5 |
+
*
|
6 |
+
* Add more fields as needed.
|
7 |
+
*/
|
8 |
+
export type ModelDataMinimal = Pick<ModelData, "id" | "pipeline_tag" | "mask_token" | "library_name">;
|