Spaces:
Runtime error
Runtime error
machineuser
commited on
Commit
•
6c7ce80
1
Parent(s):
9c0be59
Sync widgets demo
Browse files- packages/tasks/package.json +1 -1
- packages/tasks/src/const.ts +2 -0
- packages/tasks/src/index.ts +5 -1
- packages/tasks/src/modelLibraries.ts +4 -0
- packages/tasks/src/pipelines.ts +17 -0
- packages/tasks/src/tags.ts +15 -0
- packages/tasks/src/tasksData.ts +2 -0
- packages/tasks/src/video-classification/about.md +8 -28
- packages/widgets/src/lib/components/Icons/IconMaskGeneration.svelte +22 -0
- packages/widgets/src/lib/components/Icons/IconZeroShotObjectDetection.svelte +27 -0
- packages/widgets/src/lib/components/InferenceWidget/InferenceWidget.svelte +4 -0
- packages/widgets/src/lib/components/InferenceWidget/shared/WidgetInfo/WidgetInfo.svelte +2 -2
- packages/widgets/src/lib/components/PipelineIcon/PipelineIcon.svelte +4 -0
- packages/widgets/src/routes/+page.svelte +2 -2
- pnpm-lock.yaml +0 -0
packages/tasks/package.json
CHANGED
@@ -1,7 +1,7 @@
|
|
1 |
{
|
2 |
"name": "@huggingface/tasks",
|
3 |
"packageManager": "[email protected]",
|
4 |
-
"version": "0.0.
|
5 |
"description": "List of ML tasks for huggingface.co/tasks",
|
6 |
"repository": "https://github.com/huggingface/huggingface.js.git",
|
7 |
"publishConfig": {
|
|
|
1 |
{
|
2 |
"name": "@huggingface/tasks",
|
3 |
"packageManager": "[email protected]",
|
4 |
+
"version": "0.0.6",
|
5 |
"description": "List of ML tasks for huggingface.co/tasks",
|
6 |
"repository": "https://github.com/huggingface/huggingface.js.git",
|
7 |
"publishConfig": {
|
packages/tasks/src/const.ts
CHANGED
@@ -19,6 +19,7 @@ export const TASKS_MODEL_LIBRARIES: Record<PipelineType, ModelLibraryKey[]> = {
|
|
19 |
"image-to-image": [],
|
20 |
"image-to-text": ["transformers.js"],
|
21 |
"video-classification": [],
|
|
|
22 |
"multiple-choice": ["transformers"],
|
23 |
"object-detection": ["transformers", "transformers.js"],
|
24 |
other: [],
|
@@ -56,4 +57,5 @@ export const TASKS_MODEL_LIBRARIES: Record<PipelineType, ModelLibraryKey[]> = {
|
|
56 |
"voice-activity-detection": [],
|
57 |
"zero-shot-classification": ["transformers", "transformers.js"],
|
58 |
"zero-shot-image-classification": ["transformers.js"],
|
|
|
59 |
};
|
|
|
19 |
"image-to-image": [],
|
20 |
"image-to-text": ["transformers.js"],
|
21 |
"video-classification": [],
|
22 |
+
"mask-generation": ["transformers"],
|
23 |
"multiple-choice": ["transformers"],
|
24 |
"object-detection": ["transformers", "transformers.js"],
|
25 |
other: [],
|
|
|
57 |
"voice-activity-detection": [],
|
58 |
"zero-shot-classification": ["transformers", "transformers.js"],
|
59 |
"zero-shot-image-classification": ["transformers.js"],
|
60 |
+
"zero-shot-object-detection": ["transformers"],
|
61 |
};
|
packages/tasks/src/index.ts
CHANGED
@@ -8,6 +8,10 @@ export {
|
|
8 |
type Modality,
|
9 |
MODALITIES,
|
10 |
MODALITY_LABELS,
|
|
|
|
|
11 |
} from "./pipelines";
|
12 |
-
export { ModelLibrary } from "./modelLibraries";
|
13 |
export type { ModelLibraryKey } from "./modelLibraries";
|
|
|
|
|
|
8 |
type Modality,
|
9 |
MODALITIES,
|
10 |
MODALITY_LABELS,
|
11 |
+
SUBTASK_TYPES,
|
12 |
+
PIPELINE_TYPES_SET,
|
13 |
} from "./pipelines";
|
14 |
+
export { ModelLibrary, ALL_DISPLAY_MODEL_LIBRARY_KEYS } from "./modelLibraries";
|
15 |
export type { ModelLibraryKey } from "./modelLibraries";
|
16 |
+
|
17 |
+
export { TAG_NFAA_CONTENT, OTHER_TAGS_SUGGESTIONS, TAG_TEXT_GENERATION_INFERENCE, TAG_CUSTOM_CODE } from "./tags";
|
packages/tasks/src/modelLibraries.ts
CHANGED
@@ -41,3 +41,7 @@ export enum ModelLibrary {
|
|
41 |
}
|
42 |
|
43 |
export type ModelLibraryKey = keyof typeof ModelLibrary;
|
|
|
|
|
|
|
|
|
|
41 |
}
|
42 |
|
43 |
export type ModelLibraryKey = keyof typeof ModelLibrary;
|
44 |
+
|
45 |
+
export const ALL_DISPLAY_MODEL_LIBRARY_KEYS = Object.keys(ModelLibrary).filter(
|
46 |
+
(k) => !["doctr", "k2", "mindspore", "tensorflowtts"].includes(k)
|
47 |
+
);
|
packages/tasks/src/pipelines.ts
CHANGED
@@ -606,6 +606,16 @@ export const PIPELINE_DATA = {
|
|
606 |
modality: "multimodal",
|
607 |
color: "green",
|
608 |
},
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
609 |
other: {
|
610 |
name: "Other",
|
611 |
modality: "other",
|
@@ -616,4 +626,11 @@ export const PIPELINE_DATA = {
|
|
616 |
} satisfies Record<string, PipelineData>;
|
617 |
|
618 |
export type PipelineType = keyof typeof PIPELINE_DATA;
|
|
|
619 |
export const PIPELINE_TYPES = Object.keys(PIPELINE_DATA) as PipelineType[];
|
|
|
|
|
|
|
|
|
|
|
|
|
|
606 |
modality: "multimodal",
|
607 |
color: "green",
|
608 |
},
|
609 |
+
"mask-generation": {
|
610 |
+
name: "Mask Generation",
|
611 |
+
modality: "cv",
|
612 |
+
color: "indigo",
|
613 |
+
},
|
614 |
+
"zero-shot-object-detection": {
|
615 |
+
name: "Zero-Shot Object Detection",
|
616 |
+
modality: "cv",
|
617 |
+
color: "yellow",
|
618 |
+
},
|
619 |
other: {
|
620 |
name: "Other",
|
621 |
modality: "other",
|
|
|
626 |
} satisfies Record<string, PipelineData>;
|
627 |
|
628 |
export type PipelineType = keyof typeof PIPELINE_DATA;
|
629 |
+
|
630 |
export const PIPELINE_TYPES = Object.keys(PIPELINE_DATA) as PipelineType[];
|
631 |
+
|
632 |
+
export const SUBTASK_TYPES = Object.values(PIPELINE_DATA)
|
633 |
+
.flatMap((data) => ("subtasks" in data ? data.subtasks : []))
|
634 |
+
.map((s) => s.type);
|
635 |
+
|
636 |
+
export const PIPELINE_TYPES_SET = new Set(PIPELINE_TYPES);
|
packages/tasks/src/tags.ts
ADDED
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
export const TAG_NFAA_CONTENT = "not-for-all-audiences";
|
2 |
+
export const OTHER_TAGS_SUGGESTIONS = [
|
3 |
+
"chemistry",
|
4 |
+
"biology",
|
5 |
+
"finance",
|
6 |
+
"legal",
|
7 |
+
"music",
|
8 |
+
"art",
|
9 |
+
"code",
|
10 |
+
"climate",
|
11 |
+
"medical",
|
12 |
+
TAG_NFAA_CONTENT,
|
13 |
+
];
|
14 |
+
export const TAG_TEXT_GENERATION_INFERENCE = "text-generation-inference";
|
15 |
+
export const TAG_CUSTOM_CODE = "custom_code";
|
packages/tasks/src/tasksData.ts
CHANGED
@@ -54,6 +54,7 @@ export const TASKS_DATA: Record<PipelineType, TaskData | undefined> = {
|
|
54 |
"image-segmentation": getData("image-segmentation", imageSegmentation),
|
55 |
"image-to-image": getData("image-to-image", imageToImage),
|
56 |
"image-to-text": getData("image-to-text", imageToText),
|
|
|
57 |
"multiple-choice": undefined,
|
58 |
"object-detection": getData("object-detection", objectDetection),
|
59 |
"video-classification": getData("video-classification", videoClassification),
|
@@ -84,6 +85,7 @@ export const TASKS_DATA: Record<PipelineType, TaskData | undefined> = {
|
|
84 |
"voice-activity-detection": undefined,
|
85 |
"zero-shot-classification": getData("zero-shot-classification", zeroShotClassification),
|
86 |
"zero-shot-image-classification": getData("zero-shot-image-classification", zeroShotImageClassification),
|
|
|
87 |
} as const;
|
88 |
|
89 |
/**
|
|
|
54 |
"image-segmentation": getData("image-segmentation", imageSegmentation),
|
55 |
"image-to-image": getData("image-to-image", imageToImage),
|
56 |
"image-to-text": getData("image-to-text", imageToText),
|
57 |
+
"mask-generation": getData("mask-generation", placeholder),
|
58 |
"multiple-choice": undefined,
|
59 |
"object-detection": getData("object-detection", objectDetection),
|
60 |
"video-classification": getData("video-classification", videoClassification),
|
|
|
85 |
"voice-activity-detection": undefined,
|
86 |
"zero-shot-classification": getData("zero-shot-classification", zeroShotClassification),
|
87 |
"zero-shot-image-classification": getData("zero-shot-image-classification", zeroShotImageClassification),
|
88 |
+
"zero-shot-object-detection": getData("zero-shot-object-detection", placeholder),
|
89 |
} as const;
|
90 |
|
91 |
/**
|
packages/tasks/src/video-classification/about.md
CHANGED
@@ -15,34 +15,14 @@ Models trained in video classification can improve user experience by organizing
|
|
15 |
Below you can find code for inferring with a pre-trained video classification model.
|
16 |
|
17 |
```python
|
18 |
-
from transformers import
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
#
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
# Sub-sample a fixed set of frames and convert them to a NumPy array.
|
28 |
-
num_frames = 16
|
29 |
-
subsampler = UniformTemporalSubsample(num_frames)
|
30 |
-
subsampled_frames = subsampler(video_data)
|
31 |
-
video_data_np = subsampled_frames.numpy().transpose(1, 2, 3, 0)
|
32 |
-
|
33 |
-
# Preprocess the video frames.
|
34 |
-
inputs = feature_extractor(list(video_data_np), return_tensors="pt")
|
35 |
-
|
36 |
-
# Run inference
|
37 |
-
with torch.no_grad():
|
38 |
-
outputs = model(**inputs)
|
39 |
-
logits = outputs.logits
|
40 |
-
|
41 |
-
# Model predicts one of the 400 Kinetics 400 classes
|
42 |
-
predicted_label = logits.argmax(-1).item()
|
43 |
-
print(model.config.id2label[predicted_label])
|
44 |
-
# `eating spaghetti` (if you chose this video:
|
45 |
-
# https://hf.co/datasets/nielsr/video-demo/resolve/main/eating_spaghetti.mp4)
|
46 |
```
|
47 |
|
48 |
## Useful Resources
|
|
|
15 |
Below you can find code for inferring with a pre-trained video classification model.
|
16 |
|
17 |
```python
|
18 |
+
from transformers import pipeline
|
19 |
+
|
20 |
+
pipe = pipeline(task = "video-classification", model="nateraw/videomae-base-finetuned-ucf101-subset")
|
21 |
+
pipe("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/basketball.avi?download=true")
|
22 |
+
|
23 |
+
#[{'score': 0.90, 'label': 'BasketballDunk'},
|
24 |
+
# {'score': 0.02, 'label': 'BalanceBeam'},
|
25 |
+
# ... ]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
26 |
```
|
27 |
|
28 |
## Useful Resources
|
packages/widgets/src/lib/components/Icons/IconMaskGeneration.svelte
ADDED
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
<script lang="ts">
|
2 |
+
export let classNames = "";
|
3 |
+
</script>
|
4 |
+
|
5 |
+
<svg
|
6 |
+
class={classNames}
|
7 |
+
xmlns="http://www.w3.org/2000/svg"
|
8 |
+
xmlns:xlink="http://www.w3.org/1999/xlink"
|
9 |
+
aria-hidden="true"
|
10 |
+
focusable="false"
|
11 |
+
role="img"
|
12 |
+
width="1em"
|
13 |
+
height="1em"
|
14 |
+
preserveAspectRatio="xMidYMid meet"
|
15 |
+
viewBox="0 0 12 12"
|
16 |
+
><path
|
17 |
+
fill="currentColor"
|
18 |
+
fill-rule="evenodd"
|
19 |
+
d="M1.84.73h6.63a.7.7 0 0 1 .7.7v4.36h-.7V1.43H1.84v3.54l.9-.9a.7.7 0 0 1 1 0l1.74 1.75a.79.79 0 0 0-.52.47L3.24 4.57l-1.4 1.4v2.08h3.07v.7H1.84a.7.7 0 0 1-.7-.7V1.43a.7.7 0 0 1 .7-.7Zm5.38 4.74.32.32H5.91l.32-.32a.7.7 0 0 1 .99 0Zm-.61-1.43A1.05 1.05 0 1 1 5.45 2.3 1.05 1.05 0 0 1 6.6 4.04Zm-.39-1.16a.35.35 0 1 0-.39.58.35.35 0 0 0 .4-.58Zm3.99 8.43a.65.65 0 0 0 .56-.64v-1.3h-.65.65v1.3a.65.65 0 0 1-.56.64Zm-.09-.64h-1.3 1.3Zm-1.33.68h1.33a.68.68 0 0 0 .68-.68V9.34h-.7v1.3H8.8v.71ZM6.22 8.43v-1.3 1.3Zm1.3-1.3v-.66h-1.3 1.3v.65ZM5.54 8.45h.7v-1.3h1.3v-.72H6.23a.68.68 0 0 0-.68.68v1.34Zm1.98 2.86v-.65h-1.3v-1.3h-.65.65v1.3h1.3v.65ZM5.54 9.34v1.33a.68.68 0 0 0 .68.68h1.33v-.71h-1.3v-1.3h-.71Zm5.23-.91v-1.3a.65.65 0 0 0-.65-.66h-1.3 1.3a.65.65 0 0 1 .65.65v1.3ZM8.8 6.44v.71h1.3v1.3h.71V7.13a.68.68 0 0 0-.68-.68H8.8Z"
|
20 |
+
clip-rule="evenodd"
|
21 |
+
/></svg
|
22 |
+
>
|
packages/widgets/src/lib/components/Icons/IconZeroShotObjectDetection.svelte
ADDED
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
<script lang="ts">
|
2 |
+
export let classNames = "";
|
3 |
+
</script>
|
4 |
+
|
5 |
+
<svg
|
6 |
+
class={classNames}
|
7 |
+
xmlns="http://www.w3.org/2000/svg"
|
8 |
+
xmlns:xlink="http://www.w3.org/1999/xlink"
|
9 |
+
aria-hidden="true"
|
10 |
+
focusable="false"
|
11 |
+
role="img"
|
12 |
+
width="1em"
|
13 |
+
height="1em"
|
14 |
+
preserveAspectRatio="xMidYMid meet"
|
15 |
+
viewBox="0 0 12 12"
|
16 |
+
><path
|
17 |
+
fill="currentColor"
|
18 |
+
fill-rule="evenodd"
|
19 |
+
d="M1.84.73h6.63a.7.7 0 0 1 .7.7v4.36h-.7V1.43H1.84v3.54l.9-.9a.7.7 0 0 1 1 0l1.74 1.75a.79.79 0 0 0-.52.47L3.24 4.57l-1.4 1.4v2.08h3.07v.7H1.84a.7.7 0 0 1-.7-.7V1.43a.7.7 0 0 1 .7-.7Zm5.38 4.74.32.32H5.91l.32-.32a.7.7 0 0 1 .99 0Zm-.61-1.43A1.05 1.05 0 1 1 5.45 2.3 1.05 1.05 0 0 1 6.6 4.04Zm-.39-1.16a.35.35 0 1 0-.39.58.35.35 0 0 0 .4-.58Z"
|
20 |
+
clip-rule="evenodd"
|
21 |
+
/><path
|
22 |
+
fill="currentColor"
|
23 |
+
fill-rule="evenodd"
|
24 |
+
d="M7.77 7.07h-1.6v1.42h1.6V7.07Zm-2.2-.6v2.62h2.8V6.47h-2.8ZM8.53 10.17H6.17v.43h2.36v-.43Zm-2.96-.6v1.63h3.55V9.57H5.58ZM10.16 7.07h-.72v1.42h.72V7.07Zm-1.32-.6v2.62h1.92V6.47H8.84Z"
|
25 |
+
clip-rule="evenodd"
|
26 |
+
/></svg
|
27 |
+
>
|
packages/widgets/src/lib/components/InferenceWidget/InferenceWidget.svelte
CHANGED
@@ -27,6 +27,7 @@
|
|
27 |
import ZeroShotClassificationWidget from "./widgets/ZeroShowClassificationWidget/ZeroShotClassificationWidget.svelte";
|
28 |
import ZeroShotImageClassificationWidget from "./widgets/ZeroShotImageClassificationWidget/ZeroShotImageClassificationWidget.svelte";
|
29 |
import type { PipelineType } from "@huggingface/tasks";
|
|
|
30 |
|
31 |
export let apiToken: WidgetProps["apiToken"] = undefined;
|
32 |
export let callApiOnMount = false;
|
@@ -97,4 +98,7 @@
|
|
97 |
|
98 |
{#if widgetComponent}
|
99 |
<svelte:component this={widgetComponent} {...widgetProps} />
|
|
|
|
|
|
|
100 |
{/if}
|
|
|
27 |
import ZeroShotClassificationWidget from "./widgets/ZeroShowClassificationWidget/ZeroShotClassificationWidget.svelte";
|
28 |
import ZeroShotImageClassificationWidget from "./widgets/ZeroShotImageClassificationWidget/ZeroShotImageClassificationWidget.svelte";
|
29 |
import type { PipelineType } from "@huggingface/tasks";
|
30 |
+
import WidgetInfo from "./shared/WidgetInfo/WidgetInfo.svelte";
|
31 |
|
32 |
export let apiToken: WidgetProps["apiToken"] = undefined;
|
33 |
export let callApiOnMount = false;
|
|
|
98 |
|
99 |
{#if widgetComponent}
|
100 |
<svelte:component this={widgetComponent} {...widgetProps} />
|
101 |
+
{:else}
|
102 |
+
<!-- Still show widget error (such as "pipeline not support", etc.) when there is no widget for a task -->
|
103 |
+
<WidgetInfo {model} />
|
104 |
{/if}
|
packages/widgets/src/lib/components/InferenceWidget/shared/WidgetInfo/WidgetInfo.svelte
CHANGED
@@ -5,8 +5,8 @@
|
|
5 |
import IconInfo from "$lib/components/Icons/IconInfo.svelte";
|
6 |
|
7 |
export let model: WidgetProps["model"];
|
8 |
-
export let computeTime: string;
|
9 |
-
export let error: string;
|
10 |
export let modelLoadInfo: ModelLoadInfo | undefined = undefined;
|
11 |
export let modelTooBig = false;
|
12 |
|
|
|
5 |
import IconInfo from "$lib/components/Icons/IconInfo.svelte";
|
6 |
|
7 |
export let model: WidgetProps["model"];
|
8 |
+
export let computeTime: string = "";
|
9 |
+
export let error: string = "";
|
10 |
export let modelLoadInfo: ModelLoadInfo | undefined = undefined;
|
11 |
export let modelTooBig = false;
|
12 |
|
packages/widgets/src/lib/components/PipelineIcon/PipelineIcon.svelte
CHANGED
@@ -35,6 +35,8 @@
|
|
35 |
import IconUnconditionalImageGeneration from "../Icons/IconUnconditionalImageGeneration.svelte";
|
36 |
import IconDocumentQuestionAnswering from "../Icons/IconDocumentQuestionAnswering.svelte";
|
37 |
import IconGraphML from "../Icons/IconGraphML.svelte";
|
|
|
|
|
38 |
import type { PipelineType } from "@huggingface/tasks";
|
39 |
|
40 |
export let classNames = "";
|
@@ -80,6 +82,8 @@
|
|
80 |
"tabular-regression": IconTabularRegression,
|
81 |
"text-to-video": IconTextToVideo,
|
82 |
"document-question-answering": IconDocumentQuestionAnswering,
|
|
|
|
|
83 |
};
|
84 |
|
85 |
$: iconComponent =
|
|
|
35 |
import IconUnconditionalImageGeneration from "../Icons/IconUnconditionalImageGeneration.svelte";
|
36 |
import IconDocumentQuestionAnswering from "../Icons/IconDocumentQuestionAnswering.svelte";
|
37 |
import IconGraphML from "../Icons/IconGraphML.svelte";
|
38 |
+
import IconZeroShotObjectDetection from "../Icons/IconZeroShotClassification.svelte";
|
39 |
+
import IconMaskGeneration from "../Icons/IconMaskGeneration.svelte";
|
40 |
import type { PipelineType } from "@huggingface/tasks";
|
41 |
|
42 |
export let classNames = "";
|
|
|
82 |
"tabular-regression": IconTabularRegression,
|
83 |
"text-to-video": IconTextToVideo,
|
84 |
"document-question-answering": IconDocumentQuestionAnswering,
|
85 |
+
"mask-generation": IconMaskGeneration,
|
86 |
+
"zero-shot-object-detection": IconZeroShotObjectDetection,
|
87 |
};
|
88 |
|
89 |
$: iconComponent =
|
packages/widgets/src/routes/+page.svelte
CHANGED
@@ -442,8 +442,8 @@
|
|
442 |
const modelsDisabled: ModelData[] = [
|
443 |
{
|
444 |
id: "gpt2",
|
445 |
-
pipeline_tag:
|
446 |
-
inference: InferenceDisplayability.
|
447 |
},
|
448 |
{
|
449 |
id: "gpt2",
|
|
|
442 |
const modelsDisabled: ModelData[] = [
|
443 |
{
|
444 |
id: "gpt2",
|
445 |
+
pipeline_tag: undefined,
|
446 |
+
inference: InferenceDisplayability.PipelineNotDetected,
|
447 |
},
|
448 |
{
|
449 |
id: "gpt2",
|
pnpm-lock.yaml
CHANGED
The diff for this file is too large to render.
See raw diff
|
|