jbilcke-hf HF staff commited on
Commit
193dbf1
1 Parent(s): 17ee0ab

fix replicate

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. bun.lockb +0 -0
  2. package.json +13 -13
  3. packages/api-client/package.json +2 -0
  4. packages/app/package.json +1 -1
  5. packages/app/src/app/api/resolve/providers/replicate/index.ts +57 -14
  6. packages/app/src/app/api/resolve/route.ts +3 -0
  7. packages/app/src/components/toolbars/top-menu/lists/AssistantWorkflows.tsx +11 -9
  8. packages/app/src/components/toolbars/top-menu/lists/ImageDepthWorkflows.tsx +11 -9
  9. packages/app/src/components/toolbars/top-menu/lists/ImageGenerationWorkflows.tsx +80 -26
  10. packages/app/src/components/toolbars/top-menu/lists/ImageSegmentationWorkflows.tsx +15 -9
  11. packages/app/src/components/toolbars/top-menu/lists/ImageUpscalingWorkflows.tsx +13 -9
  12. packages/app/src/components/toolbars/top-menu/lists/LoraModelList.tsx +66 -0
  13. packages/app/src/components/toolbars/top-menu/lists/MusicGenerationWorkflows.tsx +13 -9
  14. packages/app/src/components/toolbars/top-menu/lists/SoundGenerationWorkflows.tsx +13 -9
  15. packages/app/src/components/toolbars/top-menu/lists/VideoDepthWorkflows.tsx +11 -9
  16. packages/app/src/components/toolbars/top-menu/lists/VideoGenerationWorkflows.tsx +13 -9
  17. packages/app/src/components/toolbars/top-menu/lists/VideoSegmentationWorkflows.tsx +15 -9
  18. packages/app/src/components/toolbars/top-menu/lists/VideoUpscalingWorkflows.tsx +13 -9
  19. packages/app/src/components/toolbars/top-menu/lists/VoiceGenerationWorkflows.tsx +13 -9
  20. packages/app/src/components/toolbars/top-menu/lists/getWorkflowProviders.ts +10 -8
  21. packages/app/src/lib/utils/decodeOutput.ts +1 -0
  22. packages/app/src/lib/utils/fetchContentToBase64.ts +18 -1
  23. packages/app/src/services/editors/filter-editor/filters/analogLens.ts +10 -0
  24. packages/app/src/services/editors/filter-editor/filters/cinematic.ts +6 -0
  25. packages/app/src/services/editors/filter-editor/filters/colorMapping.ts +4 -0
  26. packages/app/src/services/editors/filter-editor/filters/colorTemperature.ts +3 -0
  27. packages/app/src/services/editors/filter-editor/filters/crossProcessing.ts +4 -0
  28. packages/app/src/services/editors/filter-editor/filters/filmDegradation.ts +9 -0
  29. packages/app/src/services/editors/filter-editor/filters/infrared.ts +4 -0
  30. packages/app/src/services/editors/filter-editor/filters/lomography.ts +5 -0
  31. packages/app/src/services/editors/filter-editor/filters/splitToning.ts +5 -0
  32. packages/app/src/services/editors/filter-editor/filters/toneMapping.ts +6 -0
  33. packages/app/src/services/editors/filter-editor/filters/vintageFilm.ts +8 -0
  34. packages/app/src/services/editors/workflow-editor/workflows/aitube/index.ts +2 -0
  35. packages/app/src/services/editors/workflow-editor/workflows/anthropic/index.ts +3 -0
  36. packages/app/src/services/editors/workflow-editor/workflows/bigmodel/index.ts +1 -0
  37. packages/app/src/services/editors/workflow-editor/workflows/cohere/index.ts +4 -0
  38. packages/app/src/services/editors/workflow-editor/workflows/comfyicu/index.ts +1 -0
  39. packages/app/src/services/editors/workflow-editor/workflows/comfyui/index.ts +6 -0
  40. packages/app/src/services/editors/workflow-editor/workflows/common/defaultValues.ts +44 -7
  41. packages/app/src/services/editors/workflow-editor/workflows/common/loras/canWorkflowUseLora.ts +9 -0
  42. packages/app/src/services/editors/workflow-editor/workflows/common/loras/getWorkflowInputField.ts +8 -0
  43. packages/app/src/services/editors/workflow-editor/workflows/common/loras/getWorkflowLora.ts +28 -0
  44. packages/app/src/services/editors/workflow-editor/workflows/common/loras/index.ts +73 -0
  45. packages/app/src/services/editors/workflow-editor/workflows/common/types.ts +32 -0
  46. packages/app/src/services/editors/workflow-editor/workflows/elevenlabs/index.ts +2 -0
  47. packages/app/src/services/editors/workflow-editor/workflows/falai/defaultWorkflows.ts +41 -0
  48. packages/app/src/services/editors/workflow-editor/workflows/replicate/defaultWorkflows.ts +35 -3
  49. packages/app/src/services/settings/useSettings.ts +143 -148
  50. packages/app/src/services/settings/workflows/parseWorkflow.ts +76 -0
bun.lockb CHANGED
Binary files a/bun.lockb and b/bun.lockb differ
 
package.json CHANGED
@@ -1,22 +1,12 @@
1
  {
2
  "name": "@aitube/clapper-monorepo",
3
  "version": "0.2.4",
4
- "private": true,
5
  "description": "A monorepo for the Clapper project. Individual packages are in the packages directory.",
6
- "workspaces": [
7
- "packages/clap",
8
- "packages/timeline",
9
- "packages/api-client",
10
- "packages/io",
11
- "packages/colors",
12
- "packages/engine",
13
- "packages/broadway",
14
- "packages/clapper-services",
15
- "packages/app"
16
- ],
17
  "engines": {
18
  "bun": ">=1.0.0"
19
  },
 
 
20
  "scripts": {
21
  "dev": "bun run --cwd packages/app dev",
22
  "start": "bun run --cwd packages/app start",
@@ -36,10 +26,20 @@
36
  "test:all": "bun run --cwd packages/clap test && bun run --cwd packages/timeline test && bun run --cwd packages/api-client test && bun run --cwd packages/io test && bun run --cwd packages/colors test && bun run --cwd packages/engine test && bun run --cwd packages/broadway test && bun run --cwd packages/clapper-services test && bun run --cwd packages/app test",
37
  "format": "bun run --cwd packages/app format"
38
  },
39
- "packageManager": "[email protected]",
40
  "trustedDependencies": [
41
  "@aitube/clapper",
42
  "onnxruntime-node",
43
  "protobufjs"
 
 
 
 
 
 
 
 
 
 
 
44
  ]
45
  }
 
1
  {
2
  "name": "@aitube/clapper-monorepo",
3
  "version": "0.2.4",
 
4
  "description": "A monorepo for the Clapper project. Individual packages are in the packages directory.",
 
 
 
 
 
 
 
 
 
 
 
5
  "engines": {
6
  "bun": ">=1.0.0"
7
  },
8
+ "packageManager": "[email protected]",
9
+ "private": true,
10
  "scripts": {
11
  "dev": "bun run --cwd packages/app dev",
12
  "start": "bun run --cwd packages/app start",
 
26
  "test:all": "bun run --cwd packages/clap test && bun run --cwd packages/timeline test && bun run --cwd packages/api-client test && bun run --cwd packages/io test && bun run --cwd packages/colors test && bun run --cwd packages/engine test && bun run --cwd packages/broadway test && bun run --cwd packages/clapper-services test && bun run --cwd packages/app test",
27
  "format": "bun run --cwd packages/app format"
28
  },
 
29
  "trustedDependencies": [
30
  "@aitube/clapper",
31
  "onnxruntime-node",
32
  "protobufjs"
33
+ ],
34
+ "workspaces": [
35
+ "packages/clap",
36
+ "packages/timeline",
37
+ "packages/api-client",
38
+ "packages/io",
39
+ "packages/colors",
40
+ "packages/engine",
41
+ "packages/broadway",
42
+ "packages/clapper-services",
43
+ "packages/app"
44
  ]
45
  }
packages/api-client/package.json CHANGED
@@ -39,6 +39,8 @@
39
  "dist/**/*.d.ts"
40
  ],
41
  "dependencies": {
 
 
42
  "query-string": "^9.0.0"
43
  }
44
  }
 
39
  "dist/**/*.d.ts"
40
  ],
41
  "dependencies": {
42
+ "@aitube/clap": "workspace:*",
43
+ "@types/bun": "latest",
44
  "query-string": "^9.0.0"
45
  }
46
  }
packages/app/package.json CHANGED
@@ -41,7 +41,7 @@
41
  "@aitube/clapper-services": "workspace:*",
42
  "@aitube/engine": "workspace:*",
43
  "@aitube/timeline": "workspace:*",
44
- "@fal-ai/serverless-client": "^0.13.0",
45
  "@ffmpeg/ffmpeg": "^0.12.10",
46
  "@ffmpeg/util": "^0.12.1",
47
  "@gradio/client": "^1.5.0",
 
41
  "@aitube/clapper-services": "workspace:*",
42
  "@aitube/engine": "workspace:*",
43
  "@aitube/timeline": "workspace:*",
44
+ "@fal-ai/serverless-client": "^0.14.2",
45
  "@ffmpeg/ffmpeg": "^0.12.10",
46
  "@ffmpeg/util": "^0.12.1",
47
  "@gradio/client": "^1.5.0",
packages/app/src/app/api/resolve/providers/replicate/index.ts CHANGED
@@ -1,8 +1,11 @@
1
  import Replicate from 'replicate'
2
 
3
- import { ClapSegmentCategory } from '@aitube/clap'
4
  import { ResolveRequest } from '@aitube/clapper-services'
5
  import { TimelineSegment } from '@aitube/timeline'
 
 
 
6
 
7
  export async function resolveSegment(
8
  request: ResolveRequest
@@ -12,22 +15,28 @@ export async function resolveSegment(
12
  }
13
  const replicate = new Replicate({ auth: request.settings.replicateApiKey })
14
 
15
- if (request.segment.category !== ClapSegmentCategory.STORYBOARD) {
16
- throw new Error(
17
- `Clapper doesn't support ${request.segment.category} generation for provider "Replicate". Please open a pull request with (working code) to solve this!`
18
- )
19
- }
20
-
21
  const segment = request.segment
22
 
23
- // this mapping isn't great, we should use something auto-adapting
24
- // like we are doing for Hugging Face (match the fields etc)
25
- if (request.segment.category === ClapSegmentCategory.STORYBOARD) {
 
 
 
26
  let params: object = {
27
  prompt: request.prompts.image.positive,
28
  width: request.meta.width,
29
  height: request.meta.height,
 
30
  }
 
 
 
 
 
 
 
 
31
  if (
32
  request.settings.imageGenerationWorkflow.data === 'fofr/pulid-lightning'
33
  ) {
@@ -35,6 +44,28 @@ export async function resolveSegment(
35
  ...params,
36
  face_image: request.prompts.image.identity,
37
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
38
  } else if (
39
  request.settings.imageGenerationWorkflow.data === 'zsxkib/pulid'
40
  ) {
@@ -43,11 +74,21 @@ export async function resolveSegment(
43
  main_face_image: request.prompts.image.identity,
44
  }
45
  }
 
 
 
 
 
 
 
46
  const response = (await replicate.run(
47
- request.settings.imageGenerationWorkflow as any,
48
  { input: params }
49
  )) as any
50
- segment.assetUrl = `${response.output || ''}`
 
 
 
51
  } else if (request.segment.category === ClapSegmentCategory.DIALOGUE) {
52
  const response = (await replicate.run(
53
  request.settings.voiceGenerationWorkflow.data as any,
@@ -55,20 +96,22 @@ export async function resolveSegment(
55
  input: {
56
  text: request.prompts.voice.positive,
57
  audio: request.prompts.voice.identity,
 
58
  },
59
  }
60
  )) as any
61
- segment.assetUrl = `${response.output || ''}`
62
  } else if (request.segment.category === ClapSegmentCategory.VIDEO) {
63
  const response = (await replicate.run(
64
  request.settings.videoGenerationWorkflow.data as any,
65
  {
66
  input: {
67
  image: request.prompts.video.image,
 
68
  },
69
  }
70
  )) as any
71
- segment.assetUrl = `${response.output || ''}`
72
  } else {
73
  throw new Error(
74
  `Clapper doesn't support ${request.segment.category} generation for provider "Replicate". Please open a pull request with (working code) to solve this!`
 
1
  import Replicate from 'replicate'
2
 
3
+ import { ClapMediaOrientation, ClapSegmentCategory } from '@aitube/clap'
4
  import { ResolveRequest } from '@aitube/clapper-services'
5
  import { TimelineSegment } from '@aitube/timeline'
6
+ import { getWorkflowInputValues } from '../getWorkflowInputValues'
7
+ import { defaultLoraModels } from '@/services/editors/workflow-editor/workflows/common/loras'
8
+ import { getWorkflowLora } from '@/services/editors/workflow-editor/workflows/common/loras/getWorkflowLora'
9
 
10
  export async function resolveSegment(
11
  request: ResolveRequest
 
15
  }
16
  const replicate = new Replicate({ auth: request.settings.replicateApiKey })
17
 
 
 
 
 
 
 
18
  const segment = request.segment
19
 
20
+ if (request.segment.category == ClapSegmentCategory.STORYBOARD) {
21
+
22
+ const { workflowValues } = getWorkflowInputValues(
23
+ request.settings.imageGenerationWorkflow
24
+ )
25
+
26
  let params: object = {
27
  prompt: request.prompts.image.positive,
28
  width: request.meta.width,
29
  height: request.meta.height,
30
+ disable_safety_checker: !request.settings.censorNotForAllAudiencesContent,
31
  }
32
+
33
+ const aspectRatio =
34
+ request.meta.orientation === ClapMediaOrientation.SQUARE
35
+ ? "1:1"
36
+ : request.meta.orientation === ClapMediaOrientation.PORTRAIT
37
+ ? "9:16"
38
+ : "16:9"
39
+
40
  if (
41
  request.settings.imageGenerationWorkflow.data === 'fofr/pulid-lightning'
42
  ) {
 
44
  ...params,
45
  face_image: request.prompts.image.identity,
46
  }
47
+ } else if (
48
+ request.settings.imageGenerationWorkflow.data === 'lucataco/flux-dev-lora'
49
+ ) {
50
+
51
+ // note: this isn't the right place to do this, because maybe the LoRAs are dynamic
52
+ const loraModel = getWorkflowLora(request.settings.imageGenerationWorkflow)
53
+
54
+ params = {
55
+ // for some reason this model doesn't support arbitrary width and height,
56
+ // at least not at the time of writing..
57
+ aspect_ratio: aspectRatio,
58
+
59
+ hf_lora: workflowValues['hf_lora'] || '',
60
+
61
+ prompt: [
62
+ loraModel?.trigger,
63
+ request.prompts.image.positive
64
+ ].filter(x => x).join(' '),
65
+
66
+ disable_safety_checker: !request.settings.censorNotForAllAudiencesContent,
67
+ }
68
+
69
  } else if (
70
  request.settings.imageGenerationWorkflow.data === 'zsxkib/pulid'
71
  ) {
 
74
  main_face_image: request.prompts.image.identity,
75
  }
76
  }
77
+
78
+ /*
79
+ console.log("debug:", {
80
+ model: request.settings.imageGenerationWorkflow.data,
81
+ params,
82
+ })
83
+ */
84
  const response = (await replicate.run(
85
+ request.settings.imageGenerationWorkflow.data as any,
86
  { input: params }
87
  )) as any
88
+
89
+
90
+ segment.assetUrl = `${response[0] || ''}`
91
+
92
  } else if (request.segment.category === ClapSegmentCategory.DIALOGUE) {
93
  const response = (await replicate.run(
94
  request.settings.voiceGenerationWorkflow.data as any,
 
96
  input: {
97
  text: request.prompts.voice.positive,
98
  audio: request.prompts.voice.identity,
99
+ disable_safety_checker: !request.settings.censorNotForAllAudiencesContent,
100
  },
101
  }
102
  )) as any
103
+ segment.assetUrl = `${response[0] || ''}`
104
  } else if (request.segment.category === ClapSegmentCategory.VIDEO) {
105
  const response = (await replicate.run(
106
  request.settings.videoGenerationWorkflow.data as any,
107
  {
108
  input: {
109
  image: request.prompts.video.image,
110
+ disable_safety_checker: !request.settings.censorNotForAllAudiencesContent,
111
  },
112
  }
113
  )) as any
114
+ segment.assetUrl = `${response[0] || ''}`
115
  } else {
116
  throw new Error(
117
  `Clapper doesn't support ${request.segment.category} generation for provider "Replicate". Please open a pull request with (working code) to solve this!`
packages/app/src/app/api/resolve/route.ts CHANGED
@@ -132,6 +132,9 @@ export async function POST(req: NextRequest) {
132
  segment.outputType === ClapOutputType.AUDIO ||
133
  segment.outputType === ClapOutputType.VIDEO
134
  ) {
 
 
 
135
  const { durationInMs, hasAudio } = await getMediaInfo(segment.assetUrl)
136
  segment.assetDurationInMs = durationInMs
137
 
 
132
  segment.outputType === ClapOutputType.AUDIO ||
133
  segment.outputType === ClapOutputType.VIDEO
134
  ) {
135
+
136
+
137
+ // TODO this should be down in the browser side, so that we can scale better
138
  const { durationInMs, hasAudio } = await getMediaInfo(segment.assetUrl)
139
  segment.assetDurationInMs = durationInMs
140
 
packages/app/src/components/toolbars/top-menu/lists/AssistantWorkflows.tsx CHANGED
@@ -21,18 +21,20 @@ import {
21
  ClapWorkflowProviderLogo,
22
  ClapWorkflowProviderName,
23
  } from '@/components/core/providers'
 
 
 
24
 
25
  export function AssistantWorkflows() {
26
- const workflowId = useSettings((s) => s.assistantWorkflow)
27
- const setWorkflowId = useSettings((s) => s.setAssistantWorkflow)
28
  const availableWorkflows = useWorkflowEditor((s) => s.availableWorkflows)
29
 
30
- const { workflows, providers, nbProviders } = findWorkflows(
31
- availableWorkflows,
32
- { category: ClapWorkflowCategory.ASSISTANT }
33
- )
34
 
35
- const { workflow } = findWorkflows(workflows, { workflowId })
36
 
37
  if (!nbProviders) {
38
  return null
@@ -65,7 +67,7 @@ export function AssistantWorkflows() {
65
  {workflows?.map((w) => (
66
  <MenubarCheckboxItem
67
  key={w.id}
68
- checked={workflowId === w.id}
69
  disabled={hasNoPublicAPI(w)}
70
  onClick={(e) => {
71
  if (hasNoPublicAPI(w)) {
@@ -73,7 +75,7 @@ export function AssistantWorkflows() {
73
  e.preventDefault()
74
  return false
75
  }
76
- setWorkflowId(w.id)
77
  e.stopPropagation()
78
  e.preventDefault()
79
  return false
 
21
  ClapWorkflowProviderLogo,
22
  ClapWorkflowProviderName,
23
  } from '@/components/core/providers'
24
+ import { parseWorkflow } from '@/services/settings/workflows/parseWorkflow'
25
+
26
+ const category = ClapWorkflowCategory.ASSISTANT
27
 
28
  export function AssistantWorkflows() {
29
+ const assistantWorkflow = useSettings((s) => s.assistantWorkflow)
30
+ const setAssistantWorkflow = useSettings((s) => s.setAssistantWorkflow)
31
  const availableWorkflows = useWorkflowEditor((s) => s.availableWorkflows)
32
 
33
+ const { providers, nbProviders } = findWorkflows(availableWorkflows, {
34
+ category,
35
+ })
 
36
 
37
+ const workflow = parseWorkflow(assistantWorkflow, category)
38
 
39
  if (!nbProviders) {
40
  return null
 
67
  {workflows?.map((w) => (
68
  <MenubarCheckboxItem
69
  key={w.id}
70
+ checked={workflow.id === w.id}
71
  disabled={hasNoPublicAPI(w)}
72
  onClick={(e) => {
73
  if (hasNoPublicAPI(w)) {
 
75
  e.preventDefault()
76
  return false
77
  }
78
+ setAssistantWorkflow(w)
79
  e.stopPropagation()
80
  e.preventDefault()
81
  return false
packages/app/src/components/toolbars/top-menu/lists/ImageDepthWorkflows.tsx CHANGED
@@ -21,18 +21,20 @@ import {
21
  ClapWorkflowProviderLogo,
22
  ClapWorkflowProviderName,
23
  } from '@/components/core/providers'
 
 
 
24
 
25
  export function ImageDepthWorkflows() {
26
- const workflowId = useSettings((s) => s.imageDepthWorkflow)
27
- const setWorkflowId = useSettings((s) => s.setImageDepthWorkflow)
28
  const availableWorkflows = useWorkflowEditor((s) => s.availableWorkflows)
29
 
30
- const { workflows, providers, nbProviders } = findWorkflows(
31
- availableWorkflows,
32
- { category: ClapWorkflowCategory.IMAGE_DEPTH_MAPPING }
33
- )
34
 
35
- const { workflow } = findWorkflows(workflows, { workflowId })
36
 
37
  if (!nbProviders) {
38
  return null
@@ -65,7 +67,7 @@ export function ImageDepthWorkflows() {
65
  {workflows?.map((w) => (
66
  <MenubarCheckboxItem
67
  key={w.id}
68
- checked={workflowId === w.id}
69
  disabled={hasNoPublicAPI(w)}
70
  onClick={(e) => {
71
  if (hasNoPublicAPI(w)) {
@@ -73,7 +75,7 @@ export function ImageDepthWorkflows() {
73
  e.preventDefault()
74
  return false
75
  }
76
- setWorkflowId(w.id)
77
  e.stopPropagation()
78
  e.preventDefault()
79
  return false
 
21
  ClapWorkflowProviderLogo,
22
  ClapWorkflowProviderName,
23
  } from '@/components/core/providers'
24
+ import { parseWorkflow } from '@/services/settings/workflows/parseWorkflow'
25
+
26
+ const category = ClapWorkflowCategory.IMAGE_DEPTH_MAPPING
27
 
28
  export function ImageDepthWorkflows() {
29
+ const imageDepthWorkflow = useSettings((s) => s.imageDepthWorkflow)
30
+ const setImageDepthWorkflow = useSettings((s) => s.setImageDepthWorkflow)
31
  const availableWorkflows = useWorkflowEditor((s) => s.availableWorkflows)
32
 
33
+ const { providers, nbProviders } = findWorkflows(availableWorkflows, {
34
+ category,
35
+ })
 
36
 
37
+ const workflow = parseWorkflow(imageDepthWorkflow, category)
38
 
39
  if (!nbProviders) {
40
  return null
 
67
  {workflows?.map((w) => (
68
  <MenubarCheckboxItem
69
  key={w.id}
70
+ checked={workflow.id === w.id}
71
  disabled={hasNoPublicAPI(w)}
72
  onClick={(e) => {
73
  if (hasNoPublicAPI(w)) {
 
75
  e.preventDefault()
76
  return false
77
  }
78
+ setImageDepthWorkflow(w)
79
  e.stopPropagation()
80
  e.preventDefault()
81
  return false
packages/app/src/components/toolbars/top-menu/lists/ImageGenerationWorkflows.tsx CHANGED
@@ -1,6 +1,10 @@
1
  'use client'
2
 
3
- import { ClapWorkflowCategory, ClapWorkflowProvider } from '@aitube/clap'
 
 
 
 
4
 
5
  import {
6
  MenubarCheckboxItem,
@@ -21,23 +25,33 @@ import {
21
  ClapWorkflowProviderLogo,
22
  ClapWorkflowProviderName,
23
  } from '@/components/core/providers'
 
 
 
 
24
 
25
- export function ImageGenerationWorkflows() {
26
- const workflowId = useSettings((s) => s.imageGenerationWorkflow)
27
- const setWorkflowId = useSettings((s) => s.setImageGenerationWorkflow)
28
- const availableWorkflows = useWorkflowEditor((s) => s.availableWorkflows)
29
 
30
- const { workflows, providers, nbProviders } = findWorkflows(
31
- availableWorkflows,
32
- { category: ClapWorkflowCategory.IMAGE_GENERATION }
 
 
 
33
  )
 
34
 
35
- const { workflow } = findWorkflows(workflows, { workflowId })
 
 
36
 
37
  if (!nbProviders) {
38
  return null
39
  }
40
 
 
 
 
41
  return (
42
  <MenubarSub>
43
  <MenubarSubTrigger>
@@ -62,26 +76,66 @@ export function ImageGenerationWorkflows() {
62
  </ClapWorkflowProviderName>
63
  </MenubarSubTrigger>
64
  <MenubarSubContent>
65
- {workflows?.map((w) => (
66
- <MenubarCheckboxItem
67
- key={w.id}
68
- checked={workflowId === w.id}
69
- disabled={hasNoPublicAPI(w)}
70
- onClick={(e) => {
71
- if (hasNoPublicAPI(w)) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
72
  e.stopPropagation()
73
  e.preventDefault()
74
  return false
75
- }
76
- setWorkflowId(w.id)
77
- e.stopPropagation()
78
- e.preventDefault()
79
- return false
80
- }}
81
- >
82
- {w.label}
83
- </MenubarCheckboxItem>
84
- ))}
85
  </MenubarSubContent>
86
  </MenubarSub>
87
  ))}
 
1
  'use client'
2
 
3
+ import {
4
+ ClapInputCategory,
5
+ ClapWorkflowCategory,
6
+ ClapWorkflowProvider,
7
+ } from '@aitube/clap'
8
 
9
  import {
10
  MenubarCheckboxItem,
 
25
  ClapWorkflowProviderLogo,
26
  ClapWorkflowProviderName,
27
  } from '@/components/core/providers'
28
+ import { parseWorkflow } from '@/services/settings/workflows/parseWorkflow'
29
+ import { Lora } from '@/services/editors/workflow-editor/workflows/common/types'
30
+ import { getWorkflowLora } from '@/services/editors/workflow-editor/workflows/common/loras/getWorkflowLora'
31
+ import { getWorkflowInputField } from '@/services/editors/workflow-editor/workflows/common/loras/getWorkflowInputField'
32
 
33
+ import { LoraModelList } from './LoraModelList'
 
 
 
34
 
35
+ const category = ClapWorkflowCategory.IMAGE_GENERATION
36
+
37
+ export function ImageGenerationWorkflows() {
38
+ const imageGenerationWorkflow = useSettings((s) => s.imageGenerationWorkflow)
39
+ const setImageGenerationWorkflow = useSettings(
40
+ (s) => s.setImageGenerationWorkflow
41
  )
42
+ const availableWorkflows = useWorkflowEditor((s) => s.availableWorkflows)
43
 
44
+ const { providers, nbProviders } = findWorkflows(availableWorkflows, {
45
+ category,
46
+ })
47
 
48
  if (!nbProviders) {
49
  return null
50
  }
51
 
52
+ const workflow = parseWorkflow(imageGenerationWorkflow, category)
53
+ const workflowLora = getWorkflowLora(workflow)
54
+
55
  return (
56
  <MenubarSub>
57
  <MenubarSubTrigger>
 
76
  </ClapWorkflowProviderName>
77
  </MenubarSubTrigger>
78
  <MenubarSubContent>
79
+ {workflows?.map((w) => {
80
+ // if this workflow has at least one field of type lora
81
+ const loraFieldName = getWorkflowInputField(
82
+ w,
83
+ ClapInputCategory.LORA
84
+ )?.id
85
+ if (loraFieldName) {
86
+ return (
87
+ <LoraModelList
88
+ key={`wf_${w.id}`}
89
+ workflow={w}
90
+ currentLora={workflowLora}
91
+ onChange={(newLora?: Lora) => {
92
+
93
+ console.log(`onChange:`, {
94
+ w,
95
+ newLora,
96
+ loraFieldName,
97
+ repoUrl: newLora?.repoOrUrl,
98
+ newWorkflowValue: {
99
+ ...w,
100
+ inputValues: {
101
+ ...w.inputValues,
102
+ [loraFieldName]: newLora?.repoOrUrl || '',
103
+ },
104
+ },
105
+ })
106
+ setImageGenerationWorkflow({
107
+ ...w,
108
+ inputValues: {
109
+ ...w.inputValues,
110
+ [loraFieldName]: newLora?.repoOrUrl || '',
111
+ },
112
+ })
113
+ }}
114
+ />
115
+ )
116
+ }
117
+
118
+ return (
119
+ <MenubarCheckboxItem
120
+ key={`wf_${w.id}`}
121
+ checked={workflow.id === w.id}
122
+ disabled={hasNoPublicAPI(w)}
123
+ onClick={(e) => {
124
+ if (hasNoPublicAPI(w)) {
125
+ e.stopPropagation()
126
+ e.preventDefault()
127
+ return false
128
+ }
129
+ setImageGenerationWorkflow(w)
130
  e.stopPropagation()
131
  e.preventDefault()
132
  return false
133
+ }}
134
+ >
135
+ {w.label}
136
+ </MenubarCheckboxItem>
137
+ )
138
+ })}
 
 
 
 
139
  </MenubarSubContent>
140
  </MenubarSub>
141
  ))}
packages/app/src/components/toolbars/top-menu/lists/ImageSegmentationWorkflows.tsx CHANGED
@@ -21,18 +21,24 @@ import {
21
  ClapWorkflowProviderLogo,
22
  ClapWorkflowProviderName,
23
  } from '@/components/core/providers'
 
 
 
24
 
25
  export function ImageSegmentationWorkflows() {
26
- const workflowId = useSettings((s) => s.imageSegmentationWorkflow)
27
- const setWorkflowId = useSettings((s) => s.setImageSegmentationWorkflow)
 
 
 
 
28
  const availableWorkflows = useWorkflowEditor((s) => s.availableWorkflows)
29
 
30
- const { workflows, providers, nbProviders } = findWorkflows(
31
- availableWorkflows,
32
- { category: ClapWorkflowCategory.IMAGE_SEGMENTATION }
33
- )
34
 
35
- const { workflow } = findWorkflows(workflows, { workflowId })
36
 
37
  if (!nbProviders) {
38
  return null
@@ -65,7 +71,7 @@ export function ImageSegmentationWorkflows() {
65
  {workflows?.map((w) => (
66
  <MenubarCheckboxItem
67
  key={w.id}
68
- checked={workflowId === w.id}
69
  disabled={hasNoPublicAPI(w)}
70
  onClick={(e) => {
71
  if (hasNoPublicAPI(w)) {
@@ -73,7 +79,7 @@ export function ImageSegmentationWorkflows() {
73
  e.preventDefault()
74
  return false
75
  }
76
- setWorkflowId(w.id)
77
  e.stopPropagation()
78
  e.preventDefault()
79
  return false
 
21
  ClapWorkflowProviderLogo,
22
  ClapWorkflowProviderName,
23
  } from '@/components/core/providers'
24
+ import { parseWorkflow } from '@/services/settings/workflows/parseWorkflow'
25
+
26
+ const category = ClapWorkflowCategory.IMAGE_SEGMENTATION
27
 
28
  export function ImageSegmentationWorkflows() {
29
+ const imageSegmentationWorkflow = useSettings(
30
+ (s) => s.imageSegmentationWorkflow
31
+ )
32
+ const setImageSegmentationWorkflow = useSettings(
33
+ (s) => s.setImageSegmentationWorkflow
34
+ )
35
  const availableWorkflows = useWorkflowEditor((s) => s.availableWorkflows)
36
 
37
+ const { providers, nbProviders } = findWorkflows(availableWorkflows, {
38
+ category,
39
+ })
 
40
 
41
+ const workflow = parseWorkflow(imageSegmentationWorkflow, category)
42
 
43
  if (!nbProviders) {
44
  return null
 
71
  {workflows?.map((w) => (
72
  <MenubarCheckboxItem
73
  key={w.id}
74
+ checked={workflow.id === w.id}
75
  disabled={hasNoPublicAPI(w)}
76
  onClick={(e) => {
77
  if (hasNoPublicAPI(w)) {
 
79
  e.preventDefault()
80
  return false
81
  }
82
+ setImageSegmentationWorkflow(w)
83
  e.stopPropagation()
84
  e.preventDefault()
85
  return false
packages/app/src/components/toolbars/top-menu/lists/ImageUpscalingWorkflows.tsx CHANGED
@@ -21,18 +21,22 @@ import {
21
  ClapWorkflowProviderLogo,
22
  ClapWorkflowProviderName,
23
  } from '@/components/core/providers'
 
 
 
24
 
25
  export function ImageUpscalingWorkflows() {
26
- const workflowId = useSettings((s) => s.imageUpscalingWorkflow)
27
- const setWorkflowId = useSettings((s) => s.setImageUpscalingWorkflow)
 
 
28
  const availableWorkflows = useWorkflowEditor((s) => s.availableWorkflows)
29
 
30
- const { workflows, providers, nbProviders } = findWorkflows(
31
- availableWorkflows,
32
- { category: ClapWorkflowCategory.IMAGE_UPSCALING }
33
- )
34
 
35
- const { workflow } = findWorkflows(workflows, { workflowId })
36
 
37
  if (!nbProviders) {
38
  return null
@@ -65,7 +69,7 @@ export function ImageUpscalingWorkflows() {
65
  {workflows?.map((w) => (
66
  <MenubarCheckboxItem
67
  key={w.id}
68
- checked={workflowId === w.id}
69
  disabled={hasNoPublicAPI(w)}
70
  onClick={(e) => {
71
  if (hasNoPublicAPI(w)) {
@@ -73,7 +77,7 @@ export function ImageUpscalingWorkflows() {
73
  e.preventDefault()
74
  return false
75
  }
76
- setWorkflowId(w.id)
77
  e.stopPropagation()
78
  e.preventDefault()
79
  return false
 
21
  ClapWorkflowProviderLogo,
22
  ClapWorkflowProviderName,
23
  } from '@/components/core/providers'
24
+ import { parseWorkflow } from '@/services/settings/workflows/parseWorkflow'
25
+
26
+ const category = ClapWorkflowCategory.IMAGE_UPSCALING
27
 
28
  export function ImageUpscalingWorkflows() {
29
+ const imageUpscalingWorkflow = useSettings((s) => s.imageUpscalingWorkflow)
30
+ const setImageUpscalingWorkflow = useSettings(
31
+ (s) => s.setImageUpscalingWorkflow
32
+ )
33
  const availableWorkflows = useWorkflowEditor((s) => s.availableWorkflows)
34
 
35
+ const { providers, nbProviders } = findWorkflows(availableWorkflows, {
36
+ category,
37
+ })
 
38
 
39
+ const workflow = parseWorkflow(imageUpscalingWorkflow, category)
40
 
41
  if (!nbProviders) {
42
  return null
 
69
  {workflows?.map((w) => (
70
  <MenubarCheckboxItem
71
  key={w.id}
72
+ checked={workflow.id === w.id}
73
  disabled={hasNoPublicAPI(w)}
74
  onClick={(e) => {
75
  if (hasNoPublicAPI(w)) {
 
77
  e.preventDefault()
78
  return false
79
  }
80
+ setImageUpscalingWorkflow(w)
81
  e.stopPropagation()
82
  e.preventDefault()
83
  return false
packages/app/src/components/toolbars/top-menu/lists/LoraModelList.tsx ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 'use client'
2
+
3
+ import { ClapWorkflow } from '@aitube/clap'
4
+
5
+ import {
6
+ MenubarCheckboxItem,
7
+ MenubarContent,
8
+ MenubarItem,
9
+ MenubarMenu,
10
+ MenubarSeparator,
11
+ MenubarSub,
12
+ MenubarSubContent,
13
+ MenubarSubTrigger,
14
+ } from '@/components/ui/menubar'
15
+
16
+ import { defaultLoraModels } from '@/services/editors/workflow-editor/workflows/common/loras'
17
+ import { Lora } from '@/services/editors/workflow-editor/workflows/common/types'
18
+
19
+ export function LoraModelList({
20
+ workflow,
21
+ currentLora,
22
+ onChange,
23
+ }: {
24
+ workflow: ClapWorkflow
25
+ currentLora?: Lora
26
+ onChange: (lora?: Lora) => void
27
+ }) {
28
+ return (
29
+ <MenubarSub>
30
+ <MenubarSubTrigger>
31
+ <div className="pl-6">
32
+ <span>{workflow.label}</span>
33
+ <span className="ml-1 opacity-70">{currentLora ? `(${currentLora.label})` : `(no lora selected)`}</span>
34
+ </div>
35
+ </MenubarSubTrigger>
36
+ <MenubarSubContent>
37
+ <MenubarCheckboxItem
38
+ key={'no_lora'}
39
+ checked={!currentLora?.id}
40
+ onClick={(e) => {
41
+ onChange(undefined)
42
+ e.stopPropagation()
43
+ e.preventDefault()
44
+ return false
45
+ }}
46
+ >
47
+ No LoRA
48
+ </MenubarCheckboxItem>
49
+ {defaultLoraModels.map((lora: Lora) => (
50
+ <MenubarCheckboxItem
51
+ key={lora.id}
52
+ checked={currentLora?.id === lora.id}
53
+ onClick={(e) => {
54
+ onChange(lora)
55
+ e.stopPropagation()
56
+ e.preventDefault()
57
+ return false
58
+ }}
59
+ >
60
+ {lora.label}
61
+ </MenubarCheckboxItem>
62
+ ))}
63
+ </MenubarSubContent>
64
+ </MenubarSub>
65
+ )
66
+ }
packages/app/src/components/toolbars/top-menu/lists/MusicGenerationWorkflows.tsx CHANGED
@@ -21,18 +21,22 @@ import {
21
  ClapWorkflowProviderLogo,
22
  ClapWorkflowProviderName,
23
  } from '@/components/core/providers'
 
 
 
24
 
25
  export function MusicGenerationWorkflows() {
26
- const workflowId = useSettings((s) => s.musicGenerationWorkflow)
27
- const setWorkflowId = useSettings((s) => s.setMusicGenerationWorkflow)
 
 
28
  const availableWorkflows = useWorkflowEditor((s) => s.availableWorkflows)
29
 
30
- const { workflows, providers, nbProviders } = findWorkflows(
31
- availableWorkflows,
32
- { category: ClapWorkflowCategory.MUSIC_GENERATION }
33
- )
34
 
35
- const { workflow } = findWorkflows(workflows, { workflowId })
36
 
37
  if (!nbProviders) {
38
  return null
@@ -65,7 +69,7 @@ export function MusicGenerationWorkflows() {
65
  {workflows?.map((w) => (
66
  <MenubarCheckboxItem
67
  key={w.id}
68
- checked={workflowId === w.id}
69
  disabled={hasNoPublicAPI(w)}
70
  onClick={(e) => {
71
  if (hasNoPublicAPI(w)) {
@@ -73,7 +77,7 @@ export function MusicGenerationWorkflows() {
73
  e.preventDefault()
74
  return false
75
  }
76
- setWorkflowId(w.id)
77
  e.stopPropagation()
78
  e.preventDefault()
79
  return false
 
21
  ClapWorkflowProviderLogo,
22
  ClapWorkflowProviderName,
23
  } from '@/components/core/providers'
24
+ import { parseWorkflow } from '@/services/settings/workflows/parseWorkflow'
25
+
26
+ const category = ClapWorkflowCategory.MUSIC_GENERATION
27
 
28
  export function MusicGenerationWorkflows() {
29
+ const musicGenerationWorkflow = useSettings((s) => s.musicGenerationWorkflow)
30
+ const setMusicGenerationWorkflow = useSettings(
31
+ (s) => s.setMusicGenerationWorkflow
32
+ )
33
  const availableWorkflows = useWorkflowEditor((s) => s.availableWorkflows)
34
 
35
+ const { providers, nbProviders } = findWorkflows(availableWorkflows, {
36
+ category,
37
+ })
 
38
 
39
+ const workflow = parseWorkflow(musicGenerationWorkflow, category)
40
 
41
  if (!nbProviders) {
42
  return null
 
69
  {workflows?.map((w) => (
70
  <MenubarCheckboxItem
71
  key={w.id}
72
+ checked={workflow.id === w.id}
73
  disabled={hasNoPublicAPI(w)}
74
  onClick={(e) => {
75
  if (hasNoPublicAPI(w)) {
 
77
  e.preventDefault()
78
  return false
79
  }
80
+ setMusicGenerationWorkflow(w)
81
  e.stopPropagation()
82
  e.preventDefault()
83
  return false
packages/app/src/components/toolbars/top-menu/lists/SoundGenerationWorkflows.tsx CHANGED
@@ -21,18 +21,22 @@ import {
21
  ClapWorkflowProviderLogo,
22
  ClapWorkflowProviderName,
23
  } from '@/components/core/providers'
 
 
 
24
 
25
  export function SoundGenerationWorkflows() {
26
- const workflowId = useSettings((s) => s.soundGenerationWorkflow)
27
- const setWorkflowId = useSettings((s) => s.setSoundGenerationWorkflow)
 
 
28
  const availableWorkflows = useWorkflowEditor((s) => s.availableWorkflows)
29
 
30
- const { workflows, providers, nbProviders } = findWorkflows(
31
- availableWorkflows,
32
- { category: ClapWorkflowCategory.SOUND_GENERATION }
33
- )
34
 
35
- const { workflow } = findWorkflows(workflows, { workflowId })
36
 
37
  if (!nbProviders) {
38
  return null
@@ -65,7 +69,7 @@ export function SoundGenerationWorkflows() {
65
  {workflows?.map((w) => (
66
  <MenubarCheckboxItem
67
  key={w.id}
68
- checked={workflowId === w.id}
69
  disabled={hasNoPublicAPI(w)}
70
  onClick={(e) => {
71
  if (hasNoPublicAPI(w)) {
@@ -73,7 +77,7 @@ export function SoundGenerationWorkflows() {
73
  e.preventDefault()
74
  return false
75
  }
76
- setWorkflowId(w.id)
77
  e.stopPropagation()
78
  e.preventDefault()
79
  return false
 
21
  ClapWorkflowProviderLogo,
22
  ClapWorkflowProviderName,
23
  } from '@/components/core/providers'
24
+ import { parseWorkflow } from '@/services/settings/workflows/parseWorkflow'
25
+
26
+ const category = ClapWorkflowCategory.SOUND_GENERATION
27
 
28
  export function SoundGenerationWorkflows() {
29
+ const soundGenerationWorkflow = useSettings((s) => s.soundGenerationWorkflow)
30
+ const setSoundGenerationWorkflow = useSettings(
31
+ (s) => s.setSoundGenerationWorkflow
32
+ )
33
  const availableWorkflows = useWorkflowEditor((s) => s.availableWorkflows)
34
 
35
+ const { providers, nbProviders } = findWorkflows(availableWorkflows, {
36
+ category,
37
+ })
 
38
 
39
+ const workflow = parseWorkflow(soundGenerationWorkflow, category)
40
 
41
  if (!nbProviders) {
42
  return null
 
69
  {workflows?.map((w) => (
70
  <MenubarCheckboxItem
71
  key={w.id}
72
+ checked={workflow.id === w.id}
73
  disabled={hasNoPublicAPI(w)}
74
  onClick={(e) => {
75
  if (hasNoPublicAPI(w)) {
 
77
  e.preventDefault()
78
  return false
79
  }
80
+ setSoundGenerationWorkflow(w)
81
  e.stopPropagation()
82
  e.preventDefault()
83
  return false
packages/app/src/components/toolbars/top-menu/lists/VideoDepthWorkflows.tsx CHANGED
@@ -21,18 +21,20 @@ import {
21
  ClapWorkflowProviderLogo,
22
  ClapWorkflowProviderName,
23
  } from '@/components/core/providers'
 
 
 
24
 
25
  export function VideoDepthWorkflows() {
26
- const workflowId = useSettings((s) => s.videoDepthWorkflow)
27
- const setWorkflowId = useSettings((s) => s.setVideoDepthWorkflow)
28
  const availableWorkflows = useWorkflowEditor((s) => s.availableWorkflows)
29
 
30
- const { workflows, providers, nbProviders } = findWorkflows(
31
- availableWorkflows,
32
- { category: ClapWorkflowCategory.VIDEO_DEPTH_MAPPING }
33
- )
34
 
35
- const { workflow } = findWorkflows(workflows, { workflowId })
36
 
37
  if (!nbProviders) {
38
  return null
@@ -65,7 +67,7 @@ export function VideoDepthWorkflows() {
65
  {workflows?.map((w) => (
66
  <MenubarCheckboxItem
67
  key={w.id}
68
- checked={workflowId === w.id}
69
  disabled={hasNoPublicAPI(w)}
70
  onClick={(e) => {
71
  if (hasNoPublicAPI(w)) {
@@ -73,7 +75,7 @@ export function VideoDepthWorkflows() {
73
  e.preventDefault()
74
  return false
75
  }
76
- setWorkflowId(w.id)
77
  e.stopPropagation()
78
  e.preventDefault()
79
  return false
 
21
  ClapWorkflowProviderLogo,
22
  ClapWorkflowProviderName,
23
  } from '@/components/core/providers'
24
+ import { parseWorkflow } from '@/services/settings/workflows/parseWorkflow'
25
+
26
+ const category = ClapWorkflowCategory.VIDEO_DEPTH_MAPPING
27
 
28
  export function VideoDepthWorkflows() {
29
+ const videoDepthWorkflow = useSettings((s) => s.videoDepthWorkflow)
30
+ const setVideoDepthWorkflow = useSettings((s) => s.setVideoDepthWorkflow)
31
  const availableWorkflows = useWorkflowEditor((s) => s.availableWorkflows)
32
 
33
+ const { providers, nbProviders } = findWorkflows(availableWorkflows, {
34
+ category,
35
+ })
 
36
 
37
+ const workflow = parseWorkflow(videoDepthWorkflow, category)
38
 
39
  if (!nbProviders) {
40
  return null
 
67
  {workflows?.map((w) => (
68
  <MenubarCheckboxItem
69
  key={w.id}
70
+ checked={workflow.id === w.id}
71
  disabled={hasNoPublicAPI(w)}
72
  onClick={(e) => {
73
  if (hasNoPublicAPI(w)) {
 
75
  e.preventDefault()
76
  return false
77
  }
78
+ setVideoDepthWorkflow(w)
79
  e.stopPropagation()
80
  e.preventDefault()
81
  return false
packages/app/src/components/toolbars/top-menu/lists/VideoGenerationWorkflows.tsx CHANGED
@@ -21,18 +21,22 @@ import {
21
  ClapWorkflowProviderLogo,
22
  ClapWorkflowProviderName,
23
  } from '@/components/core/providers'
 
 
 
24
 
25
  export function VideoGenerationWorkflows() {
26
- const workflowId = useSettings((s) => s.videoGenerationWorkflow)
27
- const setWorkflowId = useSettings((s) => s.setVideoGenerationWorkflow)
 
 
28
  const availableWorkflows = useWorkflowEditor((s) => s.availableWorkflows)
29
 
30
- const { workflows, providers, nbProviders } = findWorkflows(
31
- availableWorkflows,
32
- { category: ClapWorkflowCategory.VIDEO_GENERATION }
33
- )
34
 
35
- const { workflow } = findWorkflows(workflows, { workflowId })
36
 
37
  if (!nbProviders) {
38
  return null
@@ -65,7 +69,7 @@ export function VideoGenerationWorkflows() {
65
  {workflows?.map((w) => (
66
  <MenubarCheckboxItem
67
  key={w.id}
68
- checked={workflowId === w.id}
69
  disabled={hasNoPublicAPI(w)}
70
  onClick={(e) => {
71
  if (hasNoPublicAPI(w)) {
@@ -73,7 +77,7 @@ export function VideoGenerationWorkflows() {
73
  e.preventDefault()
74
  return false
75
  }
76
- setWorkflowId(w.id)
77
  e.stopPropagation()
78
  e.preventDefault()
79
  return false
 
21
  ClapWorkflowProviderLogo,
22
  ClapWorkflowProviderName,
23
  } from '@/components/core/providers'
24
+ import { parseWorkflow } from '@/services/settings/workflows/parseWorkflow'
25
+
26
+ const category = ClapWorkflowCategory.VIDEO_GENERATION
27
 
28
  export function VideoGenerationWorkflows() {
29
+ const videoGenerationWorkflow = useSettings((s) => s.videoGenerationWorkflow)
30
+ const setVideoGenerationWorkflow = useSettings(
31
+ (s) => s.setVideoGenerationWorkflow
32
+ )
33
  const availableWorkflows = useWorkflowEditor((s) => s.availableWorkflows)
34
 
35
+ const { providers, nbProviders } = findWorkflows(availableWorkflows, {
36
+ category,
37
+ })
 
38
 
39
+ const workflow = parseWorkflow(videoGenerationWorkflow, category)
40
 
41
  if (!nbProviders) {
42
  return null
 
69
  {workflows?.map((w) => (
70
  <MenubarCheckboxItem
71
  key={w.id}
72
+ checked={workflow.id === w.id}
73
  disabled={hasNoPublicAPI(w)}
74
  onClick={(e) => {
75
  if (hasNoPublicAPI(w)) {
 
77
  e.preventDefault()
78
  return false
79
  }
80
+ setVideoGenerationWorkflow(w)
81
  e.stopPropagation()
82
  e.preventDefault()
83
  return false
packages/app/src/components/toolbars/top-menu/lists/VideoSegmentationWorkflows.tsx CHANGED
@@ -21,18 +21,24 @@ import {
21
  ClapWorkflowProviderLogo,
22
  ClapWorkflowProviderName,
23
  } from '@/components/core/providers'
 
 
 
24
 
25
  export function VideoSegmentationWorkflows() {
26
- const workflowId = useSettings((s) => s.videoSegmentationWorkflow)
27
- const setWorkflowId = useSettings((s) => s.setVideoSegmentationWorkflow)
 
 
 
 
28
  const availableWorkflows = useWorkflowEditor((s) => s.availableWorkflows)
29
 
30
- const { workflows, providers, nbProviders } = findWorkflows(
31
- availableWorkflows,
32
- { category: ClapWorkflowCategory.VIDEO_SEGMENTATION }
33
- )
34
 
35
- const { workflow } = findWorkflows(workflows, { workflowId })
36
 
37
  if (!nbProviders) {
38
  return null
@@ -65,7 +71,7 @@ export function VideoSegmentationWorkflows() {
65
  {workflows?.map((w) => (
66
  <MenubarCheckboxItem
67
  key={w.id}
68
- checked={workflowId === w.id}
69
  disabled={hasNoPublicAPI(w)}
70
  onClick={(e) => {
71
  if (hasNoPublicAPI(w)) {
@@ -73,7 +79,7 @@ export function VideoSegmentationWorkflows() {
73
  e.preventDefault()
74
  return false
75
  }
76
- setWorkflowId(w.id)
77
  e.stopPropagation()
78
  e.preventDefault()
79
  return false
 
21
  ClapWorkflowProviderLogo,
22
  ClapWorkflowProviderName,
23
  } from '@/components/core/providers'
24
+ import { parseWorkflow } from '@/services/settings/workflows/parseWorkflow'
25
+
26
+ const category = ClapWorkflowCategory.VIDEO_SEGMENTATION
27
 
28
  export function VideoSegmentationWorkflows() {
29
+ const videoSegmentationWorkflow = useSettings(
30
+ (s) => s.videoSegmentationWorkflow
31
+ )
32
+ const setVideoSegmentationWorkflow = useSettings(
33
+ (s) => s.setVideoSegmentationWorkflow
34
+ )
35
  const availableWorkflows = useWorkflowEditor((s) => s.availableWorkflows)
36
 
37
+ const { providers, nbProviders } = findWorkflows(availableWorkflows, {
38
+ category: ClapWorkflowCategory.VIDEO_SEGMENTATION,
39
+ })
 
40
 
41
+ const workflow = parseWorkflow(videoSegmentationWorkflow, category)
42
 
43
  if (!nbProviders) {
44
  return null
 
71
  {workflows?.map((w) => (
72
  <MenubarCheckboxItem
73
  key={w.id}
74
+ checked={workflow.id === w.id}
75
  disabled={hasNoPublicAPI(w)}
76
  onClick={(e) => {
77
  if (hasNoPublicAPI(w)) {
 
79
  e.preventDefault()
80
  return false
81
  }
82
+ setVideoSegmentationWorkflow(w)
83
  e.stopPropagation()
84
  e.preventDefault()
85
  return false
packages/app/src/components/toolbars/top-menu/lists/VideoUpscalingWorkflows.tsx CHANGED
@@ -21,18 +21,22 @@ import {
21
  ClapWorkflowProviderLogo,
22
  ClapWorkflowProviderName,
23
  } from '@/components/core/providers'
 
 
 
24
 
25
  export function VideoUpscalingWorkflows() {
26
- const workflowId = useSettings((s) => s.videoUpscalingWorkflow)
27
- const setWorkflowId = useSettings((s) => s.setVideoUpscalingWorkflow)
 
 
28
  const availableWorkflows = useWorkflowEditor((s) => s.availableWorkflows)
29
 
30
- const { workflows, providers, nbProviders } = findWorkflows(
31
- availableWorkflows,
32
- { category: ClapWorkflowCategory.VIDEO_UPSCALING }
33
- )
34
 
35
- const { workflow } = findWorkflows(workflows, { workflowId })
36
 
37
  if (!nbProviders) {
38
  return null
@@ -65,7 +69,7 @@ export function VideoUpscalingWorkflows() {
65
  {workflows?.map((w) => (
66
  <MenubarCheckboxItem
67
  key={w.id}
68
- checked={workflowId === w.id}
69
  disabled={hasNoPublicAPI(w)}
70
  onClick={(e) => {
71
  if (hasNoPublicAPI(w)) {
@@ -73,7 +77,7 @@ export function VideoUpscalingWorkflows() {
73
  e.preventDefault()
74
  return false
75
  }
76
- setWorkflowId(w.id)
77
  e.stopPropagation()
78
  e.preventDefault()
79
  return false
 
21
  ClapWorkflowProviderLogo,
22
  ClapWorkflowProviderName,
23
  } from '@/components/core/providers'
24
+ import { parseWorkflow } from '@/services/settings/workflows/parseWorkflow'
25
+
26
+ const category = ClapWorkflowCategory.VIDEO_UPSCALING
27
 
28
  export function VideoUpscalingWorkflows() {
29
+ const videoUpscalingWorkflow = useSettings((s) => s.videoUpscalingWorkflow)
30
+ const setVideoUpscalingWorkflow = useSettings(
31
+ (s) => s.setVideoUpscalingWorkflow
32
+ )
33
  const availableWorkflows = useWorkflowEditor((s) => s.availableWorkflows)
34
 
35
+ const { providers, nbProviders } = findWorkflows(availableWorkflows, {
36
+ category,
37
+ })
 
38
 
39
+ const workflow = parseWorkflow(videoUpscalingWorkflow, category)
40
 
41
  if (!nbProviders) {
42
  return null
 
69
  {workflows?.map((w) => (
70
  <MenubarCheckboxItem
71
  key={w.id}
72
+ checked={workflow.id === w.id}
73
  disabled={hasNoPublicAPI(w)}
74
  onClick={(e) => {
75
  if (hasNoPublicAPI(w)) {
 
77
  e.preventDefault()
78
  return false
79
  }
80
+ setVideoUpscalingWorkflow(w)
81
  e.stopPropagation()
82
  e.preventDefault()
83
  return false
packages/app/src/components/toolbars/top-menu/lists/VoiceGenerationWorkflows.tsx CHANGED
@@ -21,18 +21,22 @@ import {
21
  ClapWorkflowProviderLogo,
22
  ClapWorkflowProviderName,
23
  } from '@/components/core/providers'
 
 
 
24
 
25
  export function VoiceGenerationWorkflows() {
26
- const workflowId = useSettings((s) => s.voiceGenerationWorkflow)
27
- const setWorkflowId = useSettings((s) => s.setVoiceGenerationWorkflow)
 
 
28
  const availableWorkflows = useWorkflowEditor((s) => s.availableWorkflows)
29
 
30
- const { workflows, providers, nbProviders } = findWorkflows(
31
- availableWorkflows,
32
- { category: ClapWorkflowCategory.VOICE_GENERATION }
33
- )
34
 
35
- const { workflow } = findWorkflows(workflows, { workflowId })
36
 
37
  if (!nbProviders) {
38
  return null
@@ -65,7 +69,7 @@ export function VoiceGenerationWorkflows() {
65
  {workflows?.map((w) => (
66
  <MenubarCheckboxItem
67
  key={w.id}
68
- checked={workflowId === w.id}
69
  disabled={hasNoPublicAPI(w)}
70
  onClick={(e) => {
71
  if (hasNoPublicAPI(w)) {
@@ -73,7 +77,7 @@ export function VoiceGenerationWorkflows() {
73
  e.preventDefault()
74
  return false
75
  }
76
- setWorkflowId(w.id)
77
  e.stopPropagation()
78
  e.preventDefault()
79
  return false
 
21
  ClapWorkflowProviderLogo,
22
  ClapWorkflowProviderName,
23
  } from '@/components/core/providers'
24
+ import { parseWorkflow } from '@/services/settings/workflows/parseWorkflow'
25
+
26
+ const category = ClapWorkflowCategory.VOICE_GENERATION
27
 
28
  export function VoiceGenerationWorkflows() {
29
+ const voiceGenerationWorkflow = useSettings((s) => s.voiceGenerationWorkflow)
30
+ const setVoiceGenerationWorkflow = useSettings(
31
+ (s) => s.setVoiceGenerationWorkflow
32
+ )
33
  const availableWorkflows = useWorkflowEditor((s) => s.availableWorkflows)
34
 
35
+ const { providers, nbProviders } = findWorkflows(availableWorkflows, {
36
+ category,
37
+ })
 
38
 
39
+ const workflow = parseWorkflow(voiceGenerationWorkflow, category)
40
 
41
  if (!nbProviders) {
42
  return null
 
69
  {workflows?.map((w) => (
70
  <MenubarCheckboxItem
71
  key={w.id}
72
+ checked={workflow.id === w.id}
73
  disabled={hasNoPublicAPI(w)}
74
  onClick={(e) => {
75
  if (hasNoPublicAPI(w)) {
 
77
  e.preventDefault()
78
  return false
79
  }
80
+ setVoiceGenerationWorkflow(w)
81
  e.stopPropagation()
82
  e.preventDefault()
83
  return false
packages/app/src/components/toolbars/top-menu/lists/getWorkflowProviders.ts CHANGED
@@ -5,6 +5,15 @@ import {
5
  ClapWorkflowProvider,
6
  } from '@aitube/clap'
7
 
 
 
 
 
 
 
 
 
 
8
  /**
9
  * Helper to find workflows by id, category, provider or engine
10
  *
@@ -21,14 +30,7 @@ export function findWorkflows(
21
  provider?: ClapWorkflowProvider
22
  engine?: ClapWorkflowEngine
23
  }
24
- ): {
25
- workflow?: ClapWorkflow
26
- workflows: ClapWorkflow[]
27
- nbWorkflows: number
28
- providers: Partial<Record<ClapWorkflowProvider, ClapWorkflow[]>>
29
- nbProviders: number
30
- workflowIds: Record<string, ClapWorkflow>
31
- } {
32
  const workflows: ClapWorkflow[] = []
33
  const providers: Partial<Record<ClapWorkflowProvider, ClapWorkflow[]>> = {}
34
  const workflowIds: Record<string, ClapWorkflow> = {}
 
5
  ClapWorkflowProvider,
6
  } from '@aitube/clap'
7
 
8
+ export type WorkflowSearchResults = {
9
+ workflow?: ClapWorkflow
10
+ workflows: ClapWorkflow[]
11
+ nbWorkflows: number
12
+ providers: Partial<Record<ClapWorkflowProvider, ClapWorkflow[]>>
13
+ nbProviders: number
14
+ workflowIds: Record<string, ClapWorkflow>
15
+ }
16
+
17
  /**
18
  * Helper to find workflows by id, category, provider or engine
19
  *
 
30
  provider?: ClapWorkflowProvider
31
  engine?: ClapWorkflowEngine
32
  }
33
+ ): WorkflowSearchResults {
 
 
 
 
 
 
 
34
  const workflows: ClapWorkflow[] = []
35
  const providers: Partial<Record<ClapWorkflowProvider, ClapWorkflow[]>> = {}
36
  const workflowIds: Record<string, ClapWorkflow> = {}
packages/app/src/lib/utils/decodeOutput.ts CHANGED
@@ -14,6 +14,7 @@ export async function decodeOutput(input: any): Promise<string> {
14
  ? urlOrBase64
15
  : await fetchContentToBase64(urlOrBase64)
16
 
 
17
  if (base64Url.startsWith('data:image/')) {
18
  if (
19
  base64Url.startsWith('data:image/jpeg') ||
 
14
  ? urlOrBase64
15
  : await fetchContentToBase64(urlOrBase64)
16
 
17
+
18
  if (base64Url.startsWith('data:image/')) {
19
  if (
20
  base64Url.startsWith('data:image/jpeg') ||
packages/app/src/lib/utils/fetchContentToBase64.ts CHANGED
@@ -1,4 +1,7 @@
1
  export async function fetchContentToBase64(url: string) {
 
 
 
2
  const res = await fetch(url, {
3
  method: 'GET',
4
  headers: {
@@ -11,5 +14,19 @@ export async function fetchContentToBase64(url: string) {
11
  const blob = await res.blob()
12
  const buffer = Buffer.from(await blob.arrayBuffer())
13
 
14
- return 'data:' + blob.type + ';base64,' + buffer.toString('base64')
 
 
 
 
 
 
 
 
 
 
 
 
 
 
15
  }
 
1
  export async function fetchContentToBase64(url: string) {
2
+
3
+ const predictedFormat = url.split(".").pop()?.trim().toLowerCase()
4
+
5
  const res = await fetch(url, {
6
  method: 'GET',
7
  headers: {
 
14
  const blob = await res.blob()
15
  const buffer = Buffer.from(await blob.arrayBuffer())
16
 
17
+ // some providers such as Replicate return a generic octet-stream type in the headers
18
+ const type = blob.type === "application/octet-stream"
19
+ ? (predictedFormat === "webp" ? "image/webp" :
20
+ predictedFormat === "jpeg" ? "image/jpeg" :
21
+ predictedFormat === "jpg" ? "image/jpeg" :
22
+ predictedFormat === "png" ? "image/png" :
23
+ predictedFormat === "avif" ? "image/avif" :
24
+ predictedFormat === "heic" ? "image/heic" :
25
+ predictedFormat === "mp4" ? "video/mp4" :
26
+ predictedFormat === "mp3" ? "audio/mp3" :
27
+ predictedFormat === "wav" ? "audio/wav" :
28
+ "application/octet-stream"
29
+ ) : blob.type
30
+
31
+ return 'data:' + type + ';base64,' + buffer.toString('base64')
32
  }
packages/app/src/services/editors/filter-editor/filters/analogLens.ts CHANGED
@@ -1,3 +1,4 @@
 
1
  import { Filter } from '@aitube/clapper-services'
2
 
3
  export const analogLensSimulator: Filter = {
@@ -8,6 +9,7 @@ export const analogLensSimulator: Filter = {
8
  id: 'chromaticAberration',
9
  label: 'Chromatic aberration',
10
  description: 'Chromatic aberration strength',
 
11
  type: 'number',
12
  minValue: 0,
13
  maxValue: 0.05,
@@ -17,6 +19,7 @@ export const analogLensSimulator: Filter = {
17
  id: 'vignetteStrength',
18
  label: 'Vignette strength',
19
  description: 'Vignette strength',
 
20
  type: 'number',
21
  minValue: 0,
22
  maxValue: 1,
@@ -26,6 +29,7 @@ export const analogLensSimulator: Filter = {
26
  id: 'vignetteRadius',
27
  label: 'Vignette radius',
28
  description: 'Vignette radius',
 
29
  type: 'number',
30
  minValue: 0,
31
  maxValue: 1,
@@ -35,6 +39,7 @@ export const analogLensSimulator: Filter = {
35
  id: 'distortion',
36
  label: 'Distortion',
37
  description: 'Lens distortion',
 
38
  type: 'number',
39
  minValue: -1,
40
  maxValue: 1,
@@ -44,6 +49,7 @@ export const analogLensSimulator: Filter = {
44
  id: 'bloomStrength',
45
  label: 'Bloom strength',
46
  description: 'Bloom strength',
 
47
  type: 'number',
48
  minValue: 0,
49
  maxValue: 1,
@@ -53,6 +59,7 @@ export const analogLensSimulator: Filter = {
53
  id: 'bloomRadius',
54
  label: 'Bloom radius',
55
  description: 'Bloom radius',
 
56
  type: 'number',
57
  minValue: 1,
58
  maxValue: 10,
@@ -62,6 +69,7 @@ export const analogLensSimulator: Filter = {
62
  id: 'dofFocusDistance',
63
  label: 'DOF focus distance',
64
  description: 'Depth of field focus distance',
 
65
  type: 'number',
66
  minValue: 0,
67
  maxValue: 1,
@@ -71,6 +79,7 @@ export const analogLensSimulator: Filter = {
71
  id: 'dofFocusRange',
72
  label: 'DOF focus range',
73
  description: 'Depth of field focus range',
 
74
  type: 'number',
75
  minValue: 0.01,
76
  maxValue: 1,
@@ -80,6 +89,7 @@ export const analogLensSimulator: Filter = {
80
  id: 'dofBlurStrength',
81
  label: 'DOF blur strength',
82
  description: 'Depth of field blur strength',
 
83
  type: 'number',
84
  minValue: 0,
85
  maxValue: 1,
 
1
+ import { ClapInputCategory } from '@aitube/clap'
2
  import { Filter } from '@aitube/clapper-services'
3
 
4
  export const analogLensSimulator: Filter = {
 
9
  id: 'chromaticAberration',
10
  label: 'Chromatic aberration',
11
  description: 'Chromatic aberration strength',
12
+ category: ClapInputCategory.UNKNOWN,
13
  type: 'number',
14
  minValue: 0,
15
  maxValue: 0.05,
 
19
  id: 'vignetteStrength',
20
  label: 'Vignette strength',
21
  description: 'Vignette strength',
22
+ category: ClapInputCategory.UNKNOWN,
23
  type: 'number',
24
  minValue: 0,
25
  maxValue: 1,
 
29
  id: 'vignetteRadius',
30
  label: 'Vignette radius',
31
  description: 'Vignette radius',
32
+ category: ClapInputCategory.UNKNOWN,
33
  type: 'number',
34
  minValue: 0,
35
  maxValue: 1,
 
39
  id: 'distortion',
40
  label: 'Distortion',
41
  description: 'Lens distortion',
42
+ category: ClapInputCategory.UNKNOWN,
43
  type: 'number',
44
  minValue: -1,
45
  maxValue: 1,
 
49
  id: 'bloomStrength',
50
  label: 'Bloom strength',
51
  description: 'Bloom strength',
52
+ category: ClapInputCategory.UNKNOWN,
53
  type: 'number',
54
  minValue: 0,
55
  maxValue: 1,
 
59
  id: 'bloomRadius',
60
  label: 'Bloom radius',
61
  description: 'Bloom radius',
62
+ category: ClapInputCategory.UNKNOWN,
63
  type: 'number',
64
  minValue: 1,
65
  maxValue: 10,
 
69
  id: 'dofFocusDistance',
70
  label: 'DOF focus distance',
71
  description: 'Depth of field focus distance',
72
+ category: ClapInputCategory.UNKNOWN,
73
  type: 'number',
74
  minValue: 0,
75
  maxValue: 1,
 
79
  id: 'dofFocusRange',
80
  label: 'DOF focus range',
81
  description: 'Depth of field focus range',
82
+ category: ClapInputCategory.UNKNOWN,
83
  type: 'number',
84
  minValue: 0.01,
85
  maxValue: 1,
 
89
  id: 'dofBlurStrength',
90
  label: 'DOF blur strength',
91
  description: 'Depth of field blur strength',
92
+ category: ClapInputCategory.UNKNOWN,
93
  type: 'number',
94
  minValue: 0,
95
  maxValue: 1,
packages/app/src/services/editors/filter-editor/filters/cinematic.ts CHANGED
@@ -1,3 +1,4 @@
 
1
  import { Filter } from '@aitube/clapper-services'
2
 
3
  export const cinematic: Filter = {
@@ -8,6 +9,7 @@ export const cinematic: Filter = {
8
  id: 'preset',
9
  label: 'Preset',
10
  description: 'Cinematic color preset',
 
11
  type: 'string',
12
  allowedValues: [
13
  'Blade Runner',
@@ -22,6 +24,7 @@ export const cinematic: Filter = {
22
  id: 'intensity',
23
  label: 'Intensity',
24
  description: 'Intensity of the cinematic effect',
 
25
  type: 'number',
26
  minValue: 0,
27
  maxValue: 1,
@@ -31,6 +34,7 @@ export const cinematic: Filter = {
31
  id: 'contrast',
32
  label: 'Contrast',
33
  description: 'Image contrast',
 
34
  type: 'number',
35
  minValue: 0.5,
36
  maxValue: 2,
@@ -40,6 +44,7 @@ export const cinematic: Filter = {
40
  id: 'grain',
41
  label: 'Film Grain',
42
  description: 'Intensity of film grain effect',
 
43
  type: 'number',
44
  minValue: 0,
45
  maxValue: 1,
@@ -49,6 +54,7 @@ export const cinematic: Filter = {
49
  id: 'blur',
50
  label: 'Blur',
51
  description: 'Slight blur effect',
 
52
  type: 'number',
53
  minValue: 0,
54
  maxValue: 1,
 
1
+ import { ClapInputCategory } from '@aitube/clap'
2
  import { Filter } from '@aitube/clapper-services'
3
 
4
  export const cinematic: Filter = {
 
9
  id: 'preset',
10
  label: 'Preset',
11
  description: 'Cinematic color preset',
12
+ category: ClapInputCategory.UNKNOWN,
13
  type: 'string',
14
  allowedValues: [
15
  'Blade Runner',
 
24
  id: 'intensity',
25
  label: 'Intensity',
26
  description: 'Intensity of the cinematic effect',
27
+ category: ClapInputCategory.UNKNOWN,
28
  type: 'number',
29
  minValue: 0,
30
  maxValue: 1,
 
34
  id: 'contrast',
35
  label: 'Contrast',
36
  description: 'Image contrast',
37
+ category: ClapInputCategory.UNKNOWN,
38
  type: 'number',
39
  minValue: 0.5,
40
  maxValue: 2,
 
44
  id: 'grain',
45
  label: 'Film Grain',
46
  description: 'Intensity of film grain effect',
47
+ category: ClapInputCategory.UNKNOWN,
48
  type: 'number',
49
  minValue: 0,
50
  maxValue: 1,
 
54
  id: 'blur',
55
  label: 'Blur',
56
  description: 'Slight blur effect',
57
+ category: ClapInputCategory.UNKNOWN,
58
  type: 'number',
59
  minValue: 0,
60
  maxValue: 1,
packages/app/src/services/editors/filter-editor/filters/colorMapping.ts CHANGED
@@ -1,3 +1,4 @@
 
1
  import { Filter } from '@aitube/clapper-services'
2
 
3
  export const colorMapping: Filter = {
@@ -8,6 +9,7 @@ export const colorMapping: Filter = {
8
  id: 'redMultiplier',
9
  label: 'Red multiplier',
10
  description: 'Red channel multiplier',
 
11
  type: 'number',
12
  minValue: 0,
13
  maxValue: 2,
@@ -17,6 +19,7 @@ export const colorMapping: Filter = {
17
  id: 'greenMultiplier',
18
  label: 'Green multiplier',
19
  description: 'Green channel multiplier',
 
20
  type: 'number',
21
  minValue: 0,
22
  maxValue: 2,
@@ -26,6 +29,7 @@ export const colorMapping: Filter = {
26
  id: 'blueMultiplier',
27
  label: 'Blue multiplier',
28
  description: 'Blue channel multiplier',
 
29
  type: 'number',
30
  minValue: 0,
31
  maxValue: 2,
 
1
+ import { ClapInputCategory } from '@aitube/clap'
2
  import { Filter } from '@aitube/clapper-services'
3
 
4
  export const colorMapping: Filter = {
 
9
  id: 'redMultiplier',
10
  label: 'Red multiplier',
11
  description: 'Red channel multiplier',
12
+ category: ClapInputCategory.UNKNOWN,
13
  type: 'number',
14
  minValue: 0,
15
  maxValue: 2,
 
19
  id: 'greenMultiplier',
20
  label: 'Green multiplier',
21
  description: 'Green channel multiplier',
22
+ category: ClapInputCategory.UNKNOWN,
23
  type: 'number',
24
  minValue: 0,
25
  maxValue: 2,
 
29
  id: 'blueMultiplier',
30
  label: 'Blue multiplier',
31
  description: 'Blue channel multiplier',
32
+ category: ClapInputCategory.UNKNOWN,
33
  type: 'number',
34
  minValue: 0,
35
  maxValue: 2,
packages/app/src/services/editors/filter-editor/filters/colorTemperature.ts CHANGED
@@ -1,3 +1,4 @@
 
1
  import { Filter } from '@aitube/clapper-services'
2
 
3
  export const colorTemperature: Filter = {
@@ -9,6 +10,7 @@ export const colorTemperature: Filter = {
9
  label: 'Temperature',
10
  description: 'Color temperature in Kelvin',
11
  type: 'number',
 
12
  minValue: 1000,
13
  maxValue: 40000,
14
  defaultValue: 6500,
@@ -17,6 +19,7 @@ export const colorTemperature: Filter = {
17
  id: 'tint',
18
  label: 'Tint',
19
  description: 'Green-Magenta tint',
 
20
  type: 'number',
21
  minValue: -100,
22
  maxValue: 100,
 
1
+ import { ClapInputCategory } from '@aitube/clap'
2
  import { Filter } from '@aitube/clapper-services'
3
 
4
  export const colorTemperature: Filter = {
 
10
  label: 'Temperature',
11
  description: 'Color temperature in Kelvin',
12
  type: 'number',
13
+ category: ClapInputCategory.UNKNOWN,
14
  minValue: 1000,
15
  maxValue: 40000,
16
  defaultValue: 6500,
 
19
  id: 'tint',
20
  label: 'Tint',
21
  description: 'Green-Magenta tint',
22
+ category: ClapInputCategory.UNKNOWN,
23
  type: 'number',
24
  minValue: -100,
25
  maxValue: 100,
packages/app/src/services/editors/filter-editor/filters/crossProcessing.ts CHANGED
@@ -1,3 +1,4 @@
 
1
  import { Filter } from '@aitube/clapper-services'
2
 
3
  export const crossProcessing: Filter = {
@@ -9,6 +10,7 @@ export const crossProcessing: Filter = {
9
  label: 'Intensity',
10
  description: 'Intensity of the cross-processing effect',
11
  type: 'number',
 
12
  minValue: 0,
13
  maxValue: 1,
14
  defaultValue: 0.5,
@@ -17,6 +19,7 @@ export const crossProcessing: Filter = {
17
  id: 'contrastBoost',
18
  label: 'Contrast boost',
19
  description: 'Amount of contrast boost',
 
20
  type: 'number',
21
  minValue: 0,
22
  maxValue: 1,
@@ -26,6 +29,7 @@ export const crossProcessing: Filter = {
26
  id: 'colorShift',
27
  label: 'Color shift',
28
  description: 'Direction of color shift',
 
29
  type: 'string',
30
  allowedValues: ['Cool', 'Warm'],
31
  defaultValue: 'Cool',
 
1
+ import { ClapInputCategory } from '@aitube/clap'
2
  import { Filter } from '@aitube/clapper-services'
3
 
4
  export const crossProcessing: Filter = {
 
10
  label: 'Intensity',
11
  description: 'Intensity of the cross-processing effect',
12
  type: 'number',
13
+ category: ClapInputCategory.UNKNOWN,
14
  minValue: 0,
15
  maxValue: 1,
16
  defaultValue: 0.5,
 
19
  id: 'contrastBoost',
20
  label: 'Contrast boost',
21
  description: 'Amount of contrast boost',
22
+ category: ClapInputCategory.UNKNOWN,
23
  type: 'number',
24
  minValue: 0,
25
  maxValue: 1,
 
29
  id: 'colorShift',
30
  label: 'Color shift',
31
  description: 'Direction of color shift',
32
+ category: ClapInputCategory.UNKNOWN,
33
  type: 'string',
34
  allowedValues: ['Cool', 'Warm'],
35
  defaultValue: 'Cool',
packages/app/src/services/editors/filter-editor/filters/filmDegradation.ts CHANGED
@@ -1,3 +1,4 @@
 
1
  import { Filter } from '@aitube/clapper-services'
2
 
3
  export const filmDegradation: Filter = {
@@ -8,6 +9,7 @@ export const filmDegradation: Filter = {
8
  id: 'scratchesIntensity',
9
  label: 'Scratches intensity',
10
  description: 'Intensity of film scratches',
 
11
  type: 'number',
12
  minValue: 0,
13
  maxValue: 1,
@@ -17,6 +19,7 @@ export const filmDegradation: Filter = {
17
  id: 'dustIntensity',
18
  label: 'Dust intensity',
19
  description: 'Intensity of dust and spots',
 
20
  type: 'number',
21
  minValue: 0,
22
  maxValue: 1,
@@ -26,6 +29,7 @@ export const filmDegradation: Filter = {
26
  id: 'grainIntensity',
27
  label: 'Grain intensity',
28
  description: 'Intensity of film grain',
 
29
  type: 'number',
30
  minValue: 0,
31
  maxValue: 1,
@@ -35,6 +39,7 @@ export const filmDegradation: Filter = {
35
  id: 'colorFading',
36
  label: 'Color fading',
37
  description: 'Color fading effect',
 
38
  type: 'number',
39
  minValue: 0,
40
  maxValue: 1,
@@ -44,6 +49,7 @@ export const filmDegradation: Filter = {
44
  id: 'vignettingIntensity',
45
  label: 'Vignetting intensity',
46
  description: 'Intensity of vignetting effect',
 
47
  type: 'number',
48
  minValue: 0,
49
  maxValue: 1,
@@ -53,6 +59,7 @@ export const filmDegradation: Filter = {
53
  id: 'flickerIntensity',
54
  label: 'Flicker intensity',
55
  description: 'Intensity of light flickering',
 
56
  type: 'number',
57
  minValue: 0,
58
  maxValue: 1,
@@ -62,6 +69,7 @@ export const filmDegradation: Filter = {
62
  id: 'lightLeakIntensity',
63
  label: 'Light leak intensity',
64
  description: 'Intensity of light leaks',
 
65
  type: 'number',
66
  minValue: 0,
67
  maxValue: 1,
@@ -71,6 +79,7 @@ export const filmDegradation: Filter = {
71
  id: 'filmType',
72
  label: 'Film type',
73
  description: 'Type of film to simulate',
 
74
  type: 'string',
75
  allowedValues: ['color', 'blackAndWhite', 'sepia'],
76
  defaultValue: 'color',
 
1
+ import { ClapInputCategory } from '@aitube/clap'
2
  import { Filter } from '@aitube/clapper-services'
3
 
4
  export const filmDegradation: Filter = {
 
9
  id: 'scratchesIntensity',
10
  label: 'Scratches intensity',
11
  description: 'Intensity of film scratches',
12
+ category: ClapInputCategory.UNKNOWN,
13
  type: 'number',
14
  minValue: 0,
15
  maxValue: 1,
 
19
  id: 'dustIntensity',
20
  label: 'Dust intensity',
21
  description: 'Intensity of dust and spots',
22
+ category: ClapInputCategory.UNKNOWN,
23
  type: 'number',
24
  minValue: 0,
25
  maxValue: 1,
 
29
  id: 'grainIntensity',
30
  label: 'Grain intensity',
31
  description: 'Intensity of film grain',
32
+ category: ClapInputCategory.UNKNOWN,
33
  type: 'number',
34
  minValue: 0,
35
  maxValue: 1,
 
39
  id: 'colorFading',
40
  label: 'Color fading',
41
  description: 'Color fading effect',
42
+ category: ClapInputCategory.UNKNOWN,
43
  type: 'number',
44
  minValue: 0,
45
  maxValue: 1,
 
49
  id: 'vignettingIntensity',
50
  label: 'Vignetting intensity',
51
  description: 'Intensity of vignetting effect',
52
+ category: ClapInputCategory.UNKNOWN,
53
  type: 'number',
54
  minValue: 0,
55
  maxValue: 1,
 
59
  id: 'flickerIntensity',
60
  label: 'Flicker intensity',
61
  description: 'Intensity of light flickering',
62
+ category: ClapInputCategory.UNKNOWN,
63
  type: 'number',
64
  minValue: 0,
65
  maxValue: 1,
 
69
  id: 'lightLeakIntensity',
70
  label: 'Light leak intensity',
71
  description: 'Intensity of light leaks',
72
+ category: ClapInputCategory.UNKNOWN,
73
  type: 'number',
74
  minValue: 0,
75
  maxValue: 1,
 
79
  id: 'filmType',
80
  label: 'Film type',
81
  description: 'Type of film to simulate',
82
+ category: ClapInputCategory.UNKNOWN,
83
  type: 'string',
84
  allowedValues: ['color', 'blackAndWhite', 'sepia'],
85
  defaultValue: 'color',
packages/app/src/services/editors/filter-editor/filters/infrared.ts CHANGED
@@ -1,3 +1,4 @@
 
1
  import { Filter } from '@aitube/clapper-services'
2
 
3
  export const infraredBlackAndWhite: Filter = {
@@ -8,6 +9,7 @@ export const infraredBlackAndWhite: Filter = {
8
  id: 'contrast',
9
  label: 'Contrast',
10
  description: 'Image contrast',
 
11
  type: 'number',
12
  minValue: 0.5,
13
  maxValue: 2.0,
@@ -17,6 +19,7 @@ export const infraredBlackAndWhite: Filter = {
17
  id: 'grain',
18
  label: 'Grain',
19
  description: 'Film grain intensity',
 
20
  type: 'number',
21
  minValue: 0,
22
  maxValue: 1,
@@ -26,6 +29,7 @@ export const infraredBlackAndWhite: Filter = {
26
  id: 'glow',
27
  label: 'Glow',
28
  description: 'Infrared glow effect',
 
29
  type: 'number',
30
  minValue: 0,
31
  maxValue: 1,
 
1
+ import { ClapInputCategory } from '@aitube/clap'
2
  import { Filter } from '@aitube/clapper-services'
3
 
4
  export const infraredBlackAndWhite: Filter = {
 
9
  id: 'contrast',
10
  label: 'Contrast',
11
  description: 'Image contrast',
12
+ category: ClapInputCategory.UNKNOWN,
13
  type: 'number',
14
  minValue: 0.5,
15
  maxValue: 2.0,
 
19
  id: 'grain',
20
  label: 'Grain',
21
  description: 'Film grain intensity',
22
+ category: ClapInputCategory.UNKNOWN,
23
  type: 'number',
24
  minValue: 0,
25
  maxValue: 1,
 
29
  id: 'glow',
30
  label: 'Glow',
31
  description: 'Infrared glow effect',
32
+ category: ClapInputCategory.UNKNOWN,
33
  type: 'number',
34
  minValue: 0,
35
  maxValue: 1,
packages/app/src/services/editors/filter-editor/filters/lomography.ts CHANGED
@@ -1,3 +1,4 @@
 
1
  import { Filter } from '@aitube/clapper-services'
2
 
3
  export const lomography: Filter = {
@@ -8,6 +9,7 @@ export const lomography: Filter = {
8
  id: 'saturation',
9
  label: 'Saturation',
10
  description: 'Color saturation',
 
11
  type: 'number',
12
  minValue: 0,
13
  maxValue: 2,
@@ -17,6 +19,7 @@ export const lomography: Filter = {
17
  id: 'contrast',
18
  label: 'Contrast',
19
  description: 'Image contrast',
 
20
  type: 'number',
21
  minValue: 0.5,
22
  maxValue: 2,
@@ -26,6 +29,7 @@ export const lomography: Filter = {
26
  id: 'vignetteIntensity',
27
  label: 'Vignette intensity',
28
  description: 'Intensity of vignette effect',
 
29
  type: 'number',
30
  minValue: 0,
31
  maxValue: 1,
@@ -35,6 +39,7 @@ export const lomography: Filter = {
35
  id: 'lightLeakIntensity',
36
  label: 'Light leak intensity',
37
  description: 'Intensity of light leak effect',
 
38
  type: 'number',
39
  minValue: 0,
40
  maxValue: 1,
 
1
+ import { ClapInputCategory } from '@aitube/clap'
2
  import { Filter } from '@aitube/clapper-services'
3
 
4
  export const lomography: Filter = {
 
9
  id: 'saturation',
10
  label: 'Saturation',
11
  description: 'Color saturation',
12
+ category: ClapInputCategory.UNKNOWN,
13
  type: 'number',
14
  minValue: 0,
15
  maxValue: 2,
 
19
  id: 'contrast',
20
  label: 'Contrast',
21
  description: 'Image contrast',
22
+ category: ClapInputCategory.UNKNOWN,
23
  type: 'number',
24
  minValue: 0.5,
25
  maxValue: 2,
 
29
  id: 'vignetteIntensity',
30
  label: 'Vignette intensity',
31
  description: 'Intensity of vignette effect',
32
+ category: ClapInputCategory.UNKNOWN,
33
  type: 'number',
34
  minValue: 0,
35
  maxValue: 1,
 
39
  id: 'lightLeakIntensity',
40
  label: 'Light leak intensity',
41
  description: 'Intensity of light leak effect',
42
+ category: ClapInputCategory.UNKNOWN,
43
  type: 'number',
44
  minValue: 0,
45
  maxValue: 1,
packages/app/src/services/editors/filter-editor/filters/splitToning.ts CHANGED
@@ -1,3 +1,4 @@
 
1
  import { Filter } from '@aitube/clapper-services'
2
 
3
  export const splitToning: Filter = {
@@ -8,6 +9,7 @@ export const splitToning: Filter = {
8
  id: 'highlightColor',
9
  label: 'Highlight color',
10
  description: 'Color for highlights',
 
11
  type: 'string',
12
  allowedValues: ['Red', 'Green', 'Blue', 'Yellow', 'Cyan', 'Magenta'],
13
  defaultValue: 'Yellow',
@@ -16,6 +18,7 @@ export const splitToning: Filter = {
16
  id: 'shadowColor',
17
  label: 'Shadow color',
18
  description: 'Color for shadows',
 
19
  type: 'string',
20
  allowedValues: ['Red', 'Green', 'Blue', 'Yellow', 'Cyan', 'Magenta'],
21
  defaultValue: 'Blue',
@@ -24,6 +27,7 @@ export const splitToning: Filter = {
24
  id: 'balance',
25
  label: 'Balance',
26
  description: 'Balance between highlights and shadows',
 
27
  type: 'number',
28
  minValue: -1,
29
  maxValue: 1,
@@ -33,6 +37,7 @@ export const splitToning: Filter = {
33
  id: 'intensity',
34
  label: 'Intensity',
35
  description: 'Intensity of the split toning effect',
 
36
  type: 'number',
37
  minValue: 0,
38
  maxValue: 1,
 
1
+ import { ClapInputCategory } from '@aitube/clap'
2
  import { Filter } from '@aitube/clapper-services'
3
 
4
  export const splitToning: Filter = {
 
9
  id: 'highlightColor',
10
  label: 'Highlight color',
11
  description: 'Color for highlights',
12
+ category: ClapInputCategory.UNKNOWN,
13
  type: 'string',
14
  allowedValues: ['Red', 'Green', 'Blue', 'Yellow', 'Cyan', 'Magenta'],
15
  defaultValue: 'Yellow',
 
18
  id: 'shadowColor',
19
  label: 'Shadow color',
20
  description: 'Color for shadows',
21
+ category: ClapInputCategory.UNKNOWN,
22
  type: 'string',
23
  allowedValues: ['Red', 'Green', 'Blue', 'Yellow', 'Cyan', 'Magenta'],
24
  defaultValue: 'Blue',
 
27
  id: 'balance',
28
  label: 'Balance',
29
  description: 'Balance between highlights and shadows',
30
+ category: ClapInputCategory.UNKNOWN,
31
  type: 'number',
32
  minValue: -1,
33
  maxValue: 1,
 
37
  id: 'intensity',
38
  label: 'Intensity',
39
  description: 'Intensity of the split toning effect',
40
+ category: ClapInputCategory.UNKNOWN,
41
  type: 'number',
42
  minValue: 0,
43
  maxValue: 1,
packages/app/src/services/editors/filter-editor/filters/toneMapping.ts CHANGED
@@ -1,3 +1,4 @@
 
1
  import { Filter } from '@aitube/clapper-services'
2
 
3
  export const hdrToneMapping: Filter = {
@@ -8,6 +9,7 @@ export const hdrToneMapping: Filter = {
8
  id: 'exposure',
9
  label: 'Exposure',
10
  description: 'Exposure adjustment',
 
11
  type: 'number',
12
  minValue: -2,
13
  maxValue: 2,
@@ -17,6 +19,7 @@ export const hdrToneMapping: Filter = {
17
  id: 'contrast',
18
  label: 'Contrast',
19
  description: 'Contrast adjustment',
 
20
  type: 'number',
21
  minValue: 0.5,
22
  maxValue: 2,
@@ -26,6 +29,7 @@ export const hdrToneMapping: Filter = {
26
  id: 'saturation',
27
  label: 'Saturation',
28
  description: 'Color saturation',
 
29
  type: 'number',
30
  minValue: 0,
31
  maxValue: 2,
@@ -35,6 +39,7 @@ export const hdrToneMapping: Filter = {
35
  id: 'highlights',
36
  label: 'Highlights',
37
  description: 'Highlight adjustment',
 
38
  type: 'number',
39
  minValue: -1,
40
  maxValue: 1,
@@ -44,6 +49,7 @@ export const hdrToneMapping: Filter = {
44
  id: 'shadows',
45
  label: 'Shadows',
46
  description: 'Shadow adjustment',
 
47
  type: 'number',
48
  minValue: -1,
49
  maxValue: 1,
 
1
+ import { ClapInputCategory } from '@aitube/clap'
2
  import { Filter } from '@aitube/clapper-services'
3
 
4
  export const hdrToneMapping: Filter = {
 
9
  id: 'exposure',
10
  label: 'Exposure',
11
  description: 'Exposure adjustment',
12
+ category: ClapInputCategory.UNKNOWN,
13
  type: 'number',
14
  minValue: -2,
15
  maxValue: 2,
 
19
  id: 'contrast',
20
  label: 'Contrast',
21
  description: 'Contrast adjustment',
22
+ category: ClapInputCategory.UNKNOWN,
23
  type: 'number',
24
  minValue: 0.5,
25
  maxValue: 2,
 
29
  id: 'saturation',
30
  label: 'Saturation',
31
  description: 'Color saturation',
32
+ category: ClapInputCategory.UNKNOWN,
33
  type: 'number',
34
  minValue: 0,
35
  maxValue: 2,
 
39
  id: 'highlights',
40
  label: 'Highlights',
41
  description: 'Highlight adjustment',
42
+ category: ClapInputCategory.UNKNOWN,
43
  type: 'number',
44
  minValue: -1,
45
  maxValue: 1,
 
49
  id: 'shadows',
50
  label: 'Shadows',
51
  description: 'Shadow adjustment',
52
+ category: ClapInputCategory.UNKNOWN,
53
  type: 'number',
54
  minValue: -1,
55
  maxValue: 1,
packages/app/src/services/editors/filter-editor/filters/vintageFilm.ts CHANGED
@@ -1,3 +1,4 @@
 
1
  import { Filter } from '@aitube/clapper-services'
2
 
3
  export const vintageFilm: Filter = {
@@ -8,6 +9,7 @@ export const vintageFilm: Filter = {
8
  id: 'preset',
9
  label: 'Preset',
10
  description: 'Vintage film stock preset',
 
11
  type: 'string',
12
  allowedValues: [
13
  'Kodachrome 64',
@@ -47,6 +49,7 @@ export const vintageFilm: Filter = {
47
  id: 'intensity',
48
  label: 'Intensity',
49
  description: 'Intensity of the film stock effect',
 
50
  type: 'number',
51
  minValue: 0,
52
  maxValue: 1,
@@ -56,6 +59,7 @@ export const vintageFilm: Filter = {
56
  id: 'grain',
57
  label: 'Grain',
58
  description: 'Film grain intensity',
 
59
  type: 'number',
60
  minValue: 0,
61
  maxValue: 1,
@@ -65,6 +69,7 @@ export const vintageFilm: Filter = {
65
  id: 'ageEffect',
66
  label: 'Age effect',
67
  description: 'Simulated age of the film',
 
68
  type: 'number',
69
  minValue: 0,
70
  maxValue: 1,
@@ -74,6 +79,7 @@ export const vintageFilm: Filter = {
74
  id: 'colorShift',
75
  label: 'Color shift',
76
  description: 'Color shift adjustment',
 
77
  type: 'number',
78
  minValue: -1,
79
  maxValue: 1,
@@ -83,6 +89,7 @@ export const vintageFilm: Filter = {
83
  id: 'contrast',
84
  label: 'Contrast',
85
  description: 'Contrast adjustment',
 
86
  type: 'number',
87
  minValue: 0.5,
88
  maxValue: 2,
@@ -92,6 +99,7 @@ export const vintageFilm: Filter = {
92
  id: 'saturation',
93
  label: 'Saturation',
94
  description: 'Saturation adjustment',
 
95
  type: 'number',
96
  minValue: 0,
97
  maxValue: 2,
 
1
+ import { ClapInputCategory } from '@aitube/clap'
2
  import { Filter } from '@aitube/clapper-services'
3
 
4
  export const vintageFilm: Filter = {
 
9
  id: 'preset',
10
  label: 'Preset',
11
  description: 'Vintage film stock preset',
12
+ category: ClapInputCategory.UNKNOWN,
13
  type: 'string',
14
  allowedValues: [
15
  'Kodachrome 64',
 
49
  id: 'intensity',
50
  label: 'Intensity',
51
  description: 'Intensity of the film stock effect',
52
+ category: ClapInputCategory.UNKNOWN,
53
  type: 'number',
54
  minValue: 0,
55
  maxValue: 1,
 
59
  id: 'grain',
60
  label: 'Grain',
61
  description: 'Film grain intensity',
62
+ category: ClapInputCategory.UNKNOWN,
63
  type: 'number',
64
  minValue: 0,
65
  maxValue: 1,
 
69
  id: 'ageEffect',
70
  label: 'Age effect',
71
  description: 'Simulated age of the film',
72
+ category: ClapInputCategory.UNKNOWN,
73
  type: 'number',
74
  minValue: 0,
75
  maxValue: 1,
 
79
  id: 'colorShift',
80
  label: 'Color shift',
81
  description: 'Color shift adjustment',
82
+ category: ClapInputCategory.UNKNOWN,
83
  type: 'number',
84
  minValue: -1,
85
  maxValue: 1,
 
89
  id: 'contrast',
90
  label: 'Contrast',
91
  description: 'Contrast adjustment',
92
+ category: ClapInputCategory.UNKNOWN,
93
  type: 'number',
94
  minValue: 0.5,
95
  maxValue: 2,
 
99
  id: 'saturation',
100
  label: 'Saturation',
101
  description: 'Saturation adjustment',
102
+ category: ClapInputCategory.UNKNOWN,
103
  type: 'number',
104
  minValue: 0,
105
  maxValue: 2,
packages/app/src/services/editors/workflow-editor/workflows/aitube/index.ts CHANGED
@@ -25,6 +25,7 @@ export const aitubeWorkflows: ClapWorkflow[] = [
25
  author: 'AiTube.at',
26
  thumbnailUrl: '',
27
  nonCommercial: false,
 
28
  engine: ClapWorkflowEngine.OPENCLAP,
29
  category: ClapWorkflowCategory.IMAGE_GENERATION,
30
  provider: ClapWorkflowProvider.AITUBE,
@@ -45,6 +46,7 @@ export const aitubeWorkflows: ClapWorkflow[] = [
45
  author: 'AiTube.at',
46
  thumbnailUrl: '',
47
  nonCommercial: false,
 
48
  engine: ClapWorkflowEngine.OPENCLAP,
49
  category: ClapWorkflowCategory.MUSIC_GENERATION,
50
  provider: ClapWorkflowProvider.AITUBE,
 
25
  author: 'AiTube.at',
26
  thumbnailUrl: '',
27
  nonCommercial: false,
28
+ canSupportLora: false,
29
  engine: ClapWorkflowEngine.OPENCLAP,
30
  category: ClapWorkflowCategory.IMAGE_GENERATION,
31
  provider: ClapWorkflowProvider.AITUBE,
 
46
  author: 'AiTube.at',
47
  thumbnailUrl: '',
48
  nonCommercial: false,
49
+ canSupportLora: false,
50
  engine: ClapWorkflowEngine.OPENCLAP,
51
  category: ClapWorkflowCategory.MUSIC_GENERATION,
52
  provider: ClapWorkflowProvider.AITUBE,
packages/app/src/services/editors/workflow-editor/workflows/anthropic/index.ts CHANGED
@@ -25,6 +25,7 @@ export const anthropicWorkflows: ClapWorkflow[] = [
25
  author: 'Anthropic',
26
  thumbnailUrl: '',
27
  nonCommercial: false,
 
28
  engine: ClapWorkflowEngine.REST_API,
29
  category: ClapWorkflowCategory.ASSISTANT,
30
  provider: ClapWorkflowProvider.ANTHROPIC,
@@ -43,6 +44,7 @@ export const anthropicWorkflows: ClapWorkflow[] = [
43
  author: 'Anthropic',
44
  thumbnailUrl: '',
45
  nonCommercial: false,
 
46
  engine: ClapWorkflowEngine.REST_API,
47
  category: ClapWorkflowCategory.ASSISTANT,
48
  provider: ClapWorkflowProvider.ANTHROPIC,
@@ -61,6 +63,7 @@ export const anthropicWorkflows: ClapWorkflow[] = [
61
  author: 'Anthropic',
62
  thumbnailUrl: '',
63
  nonCommercial: false,
 
64
  engine: ClapWorkflowEngine.REST_API,
65
  category: ClapWorkflowCategory.ASSISTANT,
66
  provider: ClapWorkflowProvider.ANTHROPIC,
 
25
  author: 'Anthropic',
26
  thumbnailUrl: '',
27
  nonCommercial: false,
28
+ canSupportLora: false,
29
  engine: ClapWorkflowEngine.REST_API,
30
  category: ClapWorkflowCategory.ASSISTANT,
31
  provider: ClapWorkflowProvider.ANTHROPIC,
 
44
  author: 'Anthropic',
45
  thumbnailUrl: '',
46
  nonCommercial: false,
47
+ canSupportLora: false,
48
  engine: ClapWorkflowEngine.REST_API,
49
  category: ClapWorkflowCategory.ASSISTANT,
50
  provider: ClapWorkflowProvider.ANTHROPIC,
 
63
  author: 'Anthropic',
64
  thumbnailUrl: '',
65
  nonCommercial: false,
66
+ canSupportLora: false,
67
  engine: ClapWorkflowEngine.REST_API,
68
  category: ClapWorkflowCategory.ASSISTANT,
69
  provider: ClapWorkflowProvider.ANTHROPIC,
packages/app/src/services/editors/workflow-editor/workflows/bigmodel/index.ts CHANGED
@@ -15,6 +15,7 @@ export const bigModelWorkflows: ClapWorkflow[] = [
15
  author: '',
16
  thumbnailUrl: '',
17
  nonCommercial: false,
 
18
  engine: ClapWorkflowEngine.REST_API,
19
  provider: ClapWorkflowProvider.BIGMODEL,
20
  category: ClapWorkflowCategory.VIDEO_GENERATION,
 
15
  author: '',
16
  thumbnailUrl: '',
17
  nonCommercial: false,
18
+ canSupportLora: false,
19
  engine: ClapWorkflowEngine.REST_API,
20
  provider: ClapWorkflowProvider.BIGMODEL,
21
  category: ClapWorkflowCategory.VIDEO_GENERATION,
packages/app/src/services/editors/workflow-editor/workflows/cohere/index.ts CHANGED
@@ -56,6 +56,7 @@ export const cohereWorkflows: ClapWorkflow[] = [
56
  author: 'Cohere',
57
  thumbnailUrl: '',
58
  nonCommercial: false,
 
59
  engine: ClapWorkflowEngine.REST_API,
60
  category: ClapWorkflowCategory.ASSISTANT,
61
  provider: ClapWorkflowProvider.COHERE,
@@ -74,6 +75,7 @@ export const cohereWorkflows: ClapWorkflow[] = [
74
  author: 'Cohere',
75
  thumbnailUrl: '',
76
  nonCommercial: false,
 
77
  engine: ClapWorkflowEngine.REST_API,
78
  category: ClapWorkflowCategory.ASSISTANT,
79
  provider: ClapWorkflowProvider.COHERE,
@@ -92,6 +94,7 @@ export const cohereWorkflows: ClapWorkflow[] = [
92
  author: 'Cohere',
93
  thumbnailUrl: '',
94
  nonCommercial: false,
 
95
  engine: ClapWorkflowEngine.REST_API,
96
  category: ClapWorkflowCategory.ASSISTANT,
97
  provider: ClapWorkflowProvider.COHERE,
@@ -110,6 +113,7 @@ export const cohereWorkflows: ClapWorkflow[] = [
110
  author: 'Cohere',
111
  thumbnailUrl: '',
112
  nonCommercial: false,
 
113
  engine: ClapWorkflowEngine.REST_API,
114
  category: ClapWorkflowCategory.ASSISTANT,
115
  provider: ClapWorkflowProvider.COHERE,
 
56
  author: 'Cohere',
57
  thumbnailUrl: '',
58
  nonCommercial: false,
59
+ canSupportLora: false,
60
  engine: ClapWorkflowEngine.REST_API,
61
  category: ClapWorkflowCategory.ASSISTANT,
62
  provider: ClapWorkflowProvider.COHERE,
 
75
  author: 'Cohere',
76
  thumbnailUrl: '',
77
  nonCommercial: false,
78
+ canSupportLora: false,
79
  engine: ClapWorkflowEngine.REST_API,
80
  category: ClapWorkflowCategory.ASSISTANT,
81
  provider: ClapWorkflowProvider.COHERE,
 
94
  author: 'Cohere',
95
  thumbnailUrl: '',
96
  nonCommercial: false,
97
+ canSupportLora: false,
98
  engine: ClapWorkflowEngine.REST_API,
99
  category: ClapWorkflowCategory.ASSISTANT,
100
  provider: ClapWorkflowProvider.COHERE,
 
113
  author: 'Cohere',
114
  thumbnailUrl: '',
115
  nonCommercial: false,
116
+ canSupportLora: false,
117
  engine: ClapWorkflowEngine.REST_API,
118
  category: ClapWorkflowCategory.ASSISTANT,
119
  provider: ClapWorkflowProvider.COHERE,
packages/app/src/services/editors/workflow-editor/workflows/comfyicu/index.ts CHANGED
@@ -30,6 +30,7 @@ export const comfyicuWorkflows: ClapWorkflow[] = [
30
  author: 'BFL (https://BlackForestLabs.ai)',
31
  thumbnailUrl: '',
32
  nonCommercial: false,
 
33
  engine: ClapWorkflowEngine.COMFYUI_WORKFLOW,
34
  provider: ClapWorkflowProvider.COMFYICU,
35
  category: ClapWorkflowCategory.IMAGE_GENERATION,
 
30
  author: 'BFL (https://BlackForestLabs.ai)',
31
  thumbnailUrl: '',
32
  nonCommercial: false,
33
+ canSupportLora: false,
34
  engine: ClapWorkflowEngine.COMFYUI_WORKFLOW,
35
  provider: ClapWorkflowProvider.COMFYICU,
36
  category: ClapWorkflowCategory.IMAGE_GENERATION,
packages/app/src/services/editors/workflow-editor/workflows/comfyui/index.ts CHANGED
@@ -20,6 +20,7 @@ export const comfyuiWorkflows: ClapWorkflow[] = [
20
  author: '',
21
  thumbnailUrl: '',
22
  nonCommercial: false,
 
23
  engine: ClapWorkflowEngine.COMFYUI_WORKFLOW,
24
  provider: ClapWorkflowProvider.COMFYUI,
25
  category: ClapWorkflowCategory.IMAGE_GENERATION,
@@ -52,6 +53,7 @@ export async function getDynamicComfyuiWorkflows(): Promise<ClapWorkflow[]> {
52
  author: 'You',
53
  thumbnailUrl: '',
54
  nonCommercial: false,
 
55
  engine: ClapWorkflowEngine.COMFYUI_WORKFLOW,
56
  provider: ClapWorkflowProvider.COMFYUI,
57
  category: ClapWorkflowCategory.IMAGE_GENERATION,
@@ -70,6 +72,7 @@ export async function getDynamicComfyuiWorkflows(): Promise<ClapWorkflow[]> {
70
  author: 'You',
71
  thumbnailUrl: '',
72
  nonCommercial: false,
 
73
  engine: ClapWorkflowEngine.COMFYUI_WORKFLOW,
74
  provider: ClapWorkflowProvider.COMFYUI,
75
  category: ClapWorkflowCategory.VIDEO_GENERATION,
@@ -88,6 +91,7 @@ export async function getDynamicComfyuiWorkflows(): Promise<ClapWorkflow[]> {
88
  author: 'You',
89
  thumbnailUrl: '',
90
  nonCommercial: false,
 
91
  engine: ClapWorkflowEngine.COMFYUI_WORKFLOW,
92
  provider: ClapWorkflowProvider.COMFYUI,
93
  category: ClapWorkflowCategory.VOICE_GENERATION,
@@ -106,6 +110,7 @@ export async function getDynamicComfyuiWorkflows(): Promise<ClapWorkflow[]> {
106
  author: 'You',
107
  thumbnailUrl: '',
108
  nonCommercial: false,
 
109
  engine: ClapWorkflowEngine.COMFYUI_WORKFLOW,
110
  provider: ClapWorkflowProvider.COMFYUI,
111
  category: ClapWorkflowCategory.MUSIC_GENERATION,
@@ -124,6 +129,7 @@ export async function getDynamicComfyuiWorkflows(): Promise<ClapWorkflow[]> {
124
  author: 'You',
125
  thumbnailUrl: '',
126
  nonCommercial: false,
 
127
  engine: ClapWorkflowEngine.COMFYUI_WORKFLOW,
128
  provider: ClapWorkflowProvider.COMFYUI,
129
  category: ClapWorkflowCategory.SOUND_GENERATION,
 
20
  author: '',
21
  thumbnailUrl: '',
22
  nonCommercial: false,
23
+ canSupportLora: false,
24
  engine: ClapWorkflowEngine.COMFYUI_WORKFLOW,
25
  provider: ClapWorkflowProvider.COMFYUI,
26
  category: ClapWorkflowCategory.IMAGE_GENERATION,
 
53
  author: 'You',
54
  thumbnailUrl: '',
55
  nonCommercial: false,
56
+ canSupportLora: false,
57
  engine: ClapWorkflowEngine.COMFYUI_WORKFLOW,
58
  provider: ClapWorkflowProvider.COMFYUI,
59
  category: ClapWorkflowCategory.IMAGE_GENERATION,
 
72
  author: 'You',
73
  thumbnailUrl: '',
74
  nonCommercial: false,
75
+ canSupportLora: false,
76
  engine: ClapWorkflowEngine.COMFYUI_WORKFLOW,
77
  provider: ClapWorkflowProvider.COMFYUI,
78
  category: ClapWorkflowCategory.VIDEO_GENERATION,
 
91
  author: 'You',
92
  thumbnailUrl: '',
93
  nonCommercial: false,
94
+ canSupportLora: false,
95
  engine: ClapWorkflowEngine.COMFYUI_WORKFLOW,
96
  provider: ClapWorkflowProvider.COMFYUI,
97
  category: ClapWorkflowCategory.VOICE_GENERATION,
 
110
  author: 'You',
111
  thumbnailUrl: '',
112
  nonCommercial: false,
113
+ canSupportLora: false,
114
  engine: ClapWorkflowEngine.COMFYUI_WORKFLOW,
115
  provider: ClapWorkflowProvider.COMFYUI,
116
  category: ClapWorkflowCategory.MUSIC_GENERATION,
 
129
  author: 'You',
130
  thumbnailUrl: '',
131
  nonCommercial: false,
132
+ canSupportLora: false,
133
  engine: ClapWorkflowEngine.COMFYUI_WORKFLOW,
134
  provider: ClapWorkflowProvider.COMFYUI,
135
  category: ClapWorkflowCategory.SOUND_GENERATION,
packages/app/src/services/editors/workflow-editor/workflows/common/defaultValues.ts CHANGED
@@ -1,4 +1,5 @@
1
  import { ClapInputField } from '@aitube/clap'
 
2
 
3
  // IMPORTANT: do NOT modify those default fields,
4
  // otherwise you might break the workflow of someone else!
@@ -9,6 +10,7 @@ export const genericInput: ClapInputField = {
9
  id: 'input',
10
  label: 'Input',
11
  description: 'Input',
 
12
  type: 'string',
13
  allowedValues: [],
14
  defaultValue: '',
@@ -18,6 +20,7 @@ export const genericText: ClapInputField = {
18
  id: 'text',
19
  label: 'Text',
20
  description: 'Text',
 
21
  type: 'string',
22
  allowedValues: [],
23
  defaultValue: '',
@@ -27,6 +30,7 @@ export const genericPrompt: ClapInputField = {
27
  id: 'prompt',
28
  label: 'Prompt',
29
  description: 'Prompt',
 
30
  type: 'string',
31
  allowedValues: [],
32
  defaultValue: '',
@@ -36,6 +40,7 @@ export const genericRatio: ClapInputField = {
36
  id: 'ratio',
37
  label: 'Image ratio',
38
  description: 'Image ratio (default to 1:1)',
 
39
  type: 'string',
40
  allowedValues: ['1:1', '16:9', '9:16'],
41
  defaultValue: '1:1',
@@ -45,16 +50,38 @@ export const genericSeed: ClapInputField = {
45
  id: 'seed',
46
  label: 'Seed',
47
  description: 'Seed',
48
- type: 'number',
 
49
  minValue: 0,
50
  maxValue: Math.pow(2, 31),
51
  defaultValue: 0,
52
  }
53
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
54
  export const genericImage: ClapInputField = {
55
  id: 'image',
56
  label: 'Image',
57
  description: 'Image',
 
58
  type: 'string',
59
  allowedValues: [],
60
  defaultValue: '',
@@ -64,6 +91,7 @@ export const genericImageUrl: ClapInputField = {
64
  id: 'image_url',
65
  label: 'Image URL',
66
  description: 'Image URL',
 
67
  type: 'string',
68
  allowedValues: [],
69
  defaultValue: '',
@@ -73,6 +101,7 @@ export const genericVideo: ClapInputField = {
73
  id: 'video',
74
  label: 'Video',
75
  description: 'Video',
 
76
  type: 'string',
77
  allowedValues: [],
78
  defaultValue: '',
@@ -82,6 +111,7 @@ export const genericVideoUrl: ClapInputField = {
82
  id: 'video_url',
83
  label: 'Video URL',
84
  description: 'Video URL',
 
85
  type: 'string',
86
  allowedValues: [],
87
  defaultValue: '',
@@ -91,6 +121,7 @@ export const genericVoice: ClapInputField = {
91
  id: 'voice',
92
  label: 'Voice',
93
  description: 'Voice',
 
94
  type: 'string',
95
  allowedValues: [],
96
  defaultValue: '',
@@ -100,6 +131,7 @@ export const genericAudio: ClapInputField = {
100
  id: 'audio',
101
  label: 'Audio',
102
  description: 'Audio',
 
103
  type: 'string',
104
  allowedValues: [],
105
  defaultValue: '',
@@ -109,6 +141,7 @@ export const genericInferenceSteps: ClapInputField = {
109
  id: 'num_inference_steps',
110
  label: 'Inference steps',
111
  description: 'Number of inference steps',
 
112
  type: 'number',
113
  minValue: 1,
114
  maxValue: 50,
@@ -119,9 +152,8 @@ export const genericUpscalingFactor: ClapInputField = {
119
  id: 'upscaling_factor',
120
  label: 'Upscaling Factor',
121
  description: 'Upscaling Factor (2, 3, 4..)',
122
- // <-- TODO: we should be able to have type: 'integer'
123
- // that is not a big issue, however (the implementation can do the rounding)
124
- type: 'number',
125
  minValue: 2,
126
  maxValue: 4,
127
  defaultValue: 2,
@@ -132,8 +164,7 @@ export const genericOverlappingTiles: ClapInputField = {
132
  label: 'Overlapping Tiles',
133
  description:
134
  'Overlapping tiles should reduce visible seams, but doubles the inference time.',
135
- // <-- TODO: we should be able to have type: 'integer'
136
- // that is not a big issue, however (the implementation can do the rounding)
137
  type: 'boolean',
138
  defaultValue: true,
139
  }
@@ -143,7 +174,8 @@ export const genericMotionBucketId: ClapInputField = {
143
  label: 'Motion Bucket ID',
144
  description:
145
  'The motion bucket ID determines the motion of the generated video. The higher the number, the more motion there will be.',
146
- type: 'number',
 
147
  minValue: 0,
148
  maxValue: 255,
149
  defaultValue: 127,
@@ -154,6 +186,7 @@ export const genericConditioningAugmentation: ClapInputField = {
154
  label: 'Conditioning Augmentation',
155
  description:
156
  'The conditoning augmentation determines the amount of noise that will be added to the conditioning frame. The higher the number, the more noise there will be, and the less the video will look like the initial image. Increase it for more motion.',
 
157
  type: 'number',
158
  minValue: 0,
159
  maxValue: 1,
@@ -164,6 +197,7 @@ export const genericWidth1024: ClapInputField = {
164
  id: 'width',
165
  label: 'Width',
166
  description: 'Width',
 
167
  type: 'number',
168
  minValue: 256,
169
  maxValue: 1024,
@@ -174,6 +208,7 @@ export const genericWidth2048: ClapInputField = {
174
  id: 'width',
175
  label: 'Width',
176
  description: 'Width',
 
177
  type: 'number',
178
  minValue: 256,
179
  maxValue: 2048,
@@ -184,6 +219,7 @@ export const genericHeight1024: ClapInputField = {
184
  id: 'height',
185
  label: 'Height',
186
  description: 'Height',
 
187
  type: 'number',
188
  minValue: 256,
189
  maxValue: 1024,
@@ -194,6 +230,7 @@ export const genericHeight2048: ClapInputField = {
194
  id: 'height',
195
  label: 'Height',
196
  description: 'Height',
 
197
  type: 'number',
198
  minValue: 256,
199
  maxValue: 2048,
 
1
  import { ClapInputField } from '@aitube/clap'
2
+ import { ClapInputCategory } from '@aitube/clap'
3
 
4
  // IMPORTANT: do NOT modify those default fields,
5
  // otherwise you might break the workflow of someone else!
 
10
  id: 'input',
11
  label: 'Input',
12
  description: 'Input',
13
+ category: ClapInputCategory.PROMPT,
14
  type: 'string',
15
  allowedValues: [],
16
  defaultValue: '',
 
20
  id: 'text',
21
  label: 'Text',
22
  description: 'Text',
23
+ category: ClapInputCategory.PROMPT,
24
  type: 'string',
25
  allowedValues: [],
26
  defaultValue: '',
 
30
  id: 'prompt',
31
  label: 'Prompt',
32
  description: 'Prompt',
33
+ category: ClapInputCategory.PROMPT,
34
  type: 'string',
35
  allowedValues: [],
36
  defaultValue: '',
 
40
  id: 'ratio',
41
  label: 'Image ratio',
42
  description: 'Image ratio (default to 1:1)',
43
+ category: ClapInputCategory.UNKNOWN,
44
  type: 'string',
45
  allowedValues: ['1:1', '16:9', '9:16'],
46
  defaultValue: '1:1',
 
50
  id: 'seed',
51
  label: 'Seed',
52
  description: 'Seed',
53
+ category: ClapInputCategory.SEED,
54
+ type: 'number', // <-- TODO: replace by 'integer' (might break stuff)
55
  minValue: 0,
56
  maxValue: Math.pow(2, 31),
57
  defaultValue: 0,
58
  }
59
 
60
+ export const genericLora: ClapInputField = {
61
+ id: 'lora',
62
+ label: 'Lora URL',
63
+ description: 'Lora URL',
64
+ category: ClapInputCategory.LORA,
65
+ type: 'string',
66
+ allowedValues: [],
67
+ defaultValue: '',
68
+ }
69
+
70
+ export const genericLoraUrl: ClapInputField = {
71
+ id: 'lora_url',
72
+ label: 'Lora URL',
73
+ description: 'Lora URL',
74
+ category: ClapInputCategory.LORA,
75
+ type: 'string',
76
+ allowedValues: [],
77
+ defaultValue: '',
78
+ }
79
+
80
  export const genericImage: ClapInputField = {
81
  id: 'image',
82
  label: 'Image',
83
  description: 'Image',
84
+ category: ClapInputCategory.IMAGE_URL,
85
  type: 'string',
86
  allowedValues: [],
87
  defaultValue: '',
 
91
  id: 'image_url',
92
  label: 'Image URL',
93
  description: 'Image URL',
94
+ category: ClapInputCategory.IMAGE_URL,
95
  type: 'string',
96
  allowedValues: [],
97
  defaultValue: '',
 
101
  id: 'video',
102
  label: 'Video',
103
  description: 'Video',
104
+ category: ClapInputCategory.VIDEO_URL,
105
  type: 'string',
106
  allowedValues: [],
107
  defaultValue: '',
 
111
  id: 'video_url',
112
  label: 'Video URL',
113
  description: 'Video URL',
114
+ category: ClapInputCategory.VIDEO_URL,
115
  type: 'string',
116
  allowedValues: [],
117
  defaultValue: '',
 
121
  id: 'voice',
122
  label: 'Voice',
123
  description: 'Voice',
124
+ category: ClapInputCategory.SOUND_URL,
125
  type: 'string',
126
  allowedValues: [],
127
  defaultValue: '',
 
131
  id: 'audio',
132
  label: 'Audio',
133
  description: 'Audio',
134
+ category: ClapInputCategory.SOUND_URL,
135
  type: 'string',
136
  allowedValues: [],
137
  defaultValue: '',
 
141
  id: 'num_inference_steps',
142
  label: 'Inference steps',
143
  description: 'Number of inference steps',
144
+ category: ClapInputCategory.INFERENCE_STEPS,
145
  type: 'number',
146
  minValue: 1,
147
  maxValue: 50,
 
152
  id: 'upscaling_factor',
153
  label: 'Upscaling Factor',
154
  description: 'Upscaling Factor (2, 3, 4..)',
155
+ category: ClapInputCategory.UPSCALING_FACTOR,
156
+ type: 'number', // <-- TODO: replace by 'integer' (might break stuff)
 
157
  minValue: 2,
158
  maxValue: 4,
159
  defaultValue: 2,
 
164
  label: 'Overlapping Tiles',
165
  description:
166
  'Overlapping tiles should reduce visible seams, but doubles the inference time.',
167
+ category: ClapInputCategory.CUSTOM,
 
168
  type: 'boolean',
169
  defaultValue: true,
170
  }
 
174
  label: 'Motion Bucket ID',
175
  description:
176
  'The motion bucket ID determines the motion of the generated video. The higher the number, the more motion there will be.',
177
+ category: ClapInputCategory.CUSTOM,
178
+ type: 'number', // <-- TODO: replace by 'integer' (might break stuff)
179
  minValue: 0,
180
  maxValue: 255,
181
  defaultValue: 127,
 
186
  label: 'Conditioning Augmentation',
187
  description:
188
  'The conditoning augmentation determines the amount of noise that will be added to the conditioning frame. The higher the number, the more noise there will be, and the less the video will look like the initial image. Increase it for more motion.',
189
+ category: ClapInputCategory.CUSTOM,
190
  type: 'number',
191
  minValue: 0,
192
  maxValue: 1,
 
197
  id: 'width',
198
  label: 'Width',
199
  description: 'Width',
200
+ category: ClapInputCategory.WIDTH,
201
  type: 'number',
202
  minValue: 256,
203
  maxValue: 1024,
 
208
  id: 'width',
209
  label: 'Width',
210
  description: 'Width',
211
+ category: ClapInputCategory.WIDTH,
212
  type: 'number',
213
  minValue: 256,
214
  maxValue: 2048,
 
219
  id: 'height',
220
  label: 'Height',
221
  description: 'Height',
222
+ category: ClapInputCategory.HEIGHT,
223
  type: 'number',
224
  minValue: 256,
225
  maxValue: 1024,
 
230
  id: 'height',
231
  label: 'Height',
232
  description: 'Height',
233
+ category: ClapInputCategory.HEIGHT,
234
  type: 'number',
235
  minValue: 256,
236
  maxValue: 2048,
packages/app/src/services/editors/workflow-editor/workflows/common/loras/canWorkflowUseLora.ts ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ import { ClapInputCategory, ClapWorkflow } from '@aitube/clap/dist/types'
2
+
3
+ export function canWorkflowUseLora(workflow: ClapWorkflow): boolean {
4
+ return workflow.inputFields.some(
5
+ ({ category }) => category === ClapInputCategory.LORA
6
+ // category === ClapInputCategory.LORA_HF_MODEL ||
7
+ // category === ClapInputCategory.LORA_WEIGHT_URL
8
+ )
9
+ }
packages/app/src/services/editors/workflow-editor/workflows/common/loras/getWorkflowInputField.ts ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ import { ClapInputCategory, ClapInputField, ClapWorkflow } from '@aitube/clap'
2
+
3
+ export function getWorkflowInputField(
4
+ workflow: ClapWorkflow,
5
+ category: ClapInputCategory
6
+ ): ClapInputField | undefined {
7
+ return workflow.inputFields.find((field) => field.category === category)
8
+ }
packages/app/src/services/editors/workflow-editor/workflows/common/loras/getWorkflowLora.ts ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { ClapInputCategory, ClapWorkflow } from '@aitube/clap'
2
+ import { Lora } from '@/services/editors/workflow-editor/workflows/common/types'
3
+ import { defaultLoraModels } from '@/services/editors/workflow-editor/workflows/common/loras'
4
+
5
+ import { getWorkflowInputField } from './getWorkflowInputField'
6
+
7
+ export function getWorkflowLora(workflow: ClapWorkflow): Lora | undefined {
8
+ const inputField = getWorkflowInputField(workflow, ClapInputCategory.LORA)
9
+
10
+ if (!inputField) {
11
+ return undefined
12
+ }
13
+
14
+ const loraRepoOrUrl: string = workflow.inputValues[inputField.id]
15
+
16
+ if (!loraRepoOrUrl) {
17
+ return undefined
18
+ }
19
+
20
+ const loraModel = defaultLoraModels.find((lora) => (
21
+ lora.repoOrUrl === loraRepoOrUrl
22
+ ))
23
+
24
+ if (!loraModel) {
25
+ return undefined
26
+ }
27
+ return loraModel
28
+ }
packages/app/src/services/editors/workflow-editor/workflows/common/loras/index.ts ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // a list of FLUX.1 LoRA models that can be used with various providers
2
+
3
+ import { LoraBaseModel, Lora } from '../types'
4
+
5
+ export const defaultLoraModels: Lora[] = [
6
+ {
7
+ id: 'lora://hf.co/models/alvdansen/flux-koda',
8
+ label: 'Koda',
9
+ baseModel: LoraBaseModel.FLUX,
10
+ description:
11
+ "Koda captures the nostalgic essence of early 1990s photography, evoking memories of disposable cameras and carefree travels. It specializes in creating images with a distinct vintage quality, characterized by slightly washed-out colors, soft focus, and the occasional light leak or film grain. The model excels at producing slice-of-life scenes that feel spontaneous and candid, as if plucked from a family photo album or a backpacker's travel diary.",
12
+
13
+ thumbnailUrl:
14
+ 'https://hf.co/alvdansen/flux-koda/resolve/main/images/ComfyUI_00566_%20(2).png',
15
+
16
+ projectUrl: 'https://hf.co/alvdansen/flux-koda',
17
+
18
+ author: '@alvdansen',
19
+
20
+ // trigger (usually some kind of unique string sequence, eg TOK)
21
+ trigger: 'flmft style',
22
+
23
+ extensions:
24
+ 'kodachrome, blurry, realistic, still life, depth of field, scenery, no humans, monochrome, greyscale, traditional media, horizon, looking at viewer, light particles, shadow',
25
+
26
+ repoOrUrl: 'alvdansen/flux-koda',
27
+ },
28
+ {
29
+ id: 'lora://hf.co/models/veryVANYA/ps1-style-flux',
30
+ label: 'PS1 Style',
31
+ baseModel: LoraBaseModel.FLUX,
32
+ description: `late 90s/early 2000s ps1/n64 console graphics.
33
+
34
+ 5000 steps
35
+
36
+ trained on 15 gpt4o captioned and adjusted ps1/n64 game screenshots using https://github.com/ostris/ai-toolkit/tree/main`,
37
+
38
+ thumbnailUrl:
39
+ 'https://huggingface.co/veryVANYA/ps1-style-flux/resolve/main/24440109.jpeg',
40
+
41
+ projectUrl: 'https://hf.co/veryVANYA/ps1-style-flux',
42
+ author: '@veryVANYA',
43
+
44
+ // trigger (usually some kind of unique string sequence, eg TOK)
45
+ trigger: 'ps1',
46
+
47
+ extensions: 'ps1 game screenshot',
48
+
49
+ repoOrUrl: 'veryVANYA/ps1-style-flux',
50
+ },
51
+ /*
52
+ {
53
+ id: 'lora://hf.co/models/jbilcke-hf/experimental-model-1',
54
+ label: 'Experimental Model 1',
55
+ baseModel: LoraBaseModel.FLUX,
56
+ description: 'A model for internal testing',
57
+
58
+ thumbnailUrl: '',
59
+
60
+ projectUrl: 'clapper.app',
61
+
62
+ author: '@jbilcke-hf',
63
+
64
+ // trigger (usually some kind of unique string sequence, eg TOK)
65
+ trigger: 'TKE1',
66
+
67
+ extensions:
68
+ 'movie screencap from <YEAR>, in <color|black & white>, with film grain.',
69
+
70
+ repoOrUrl: 'jbilcke-hf/experimental-model-1',
71
+ },
72
+ */
73
+ ]
packages/app/src/services/editors/workflow-editor/workflows/common/types.ts CHANGED
@@ -1,3 +1,35 @@
1
  import { ClapWorkflow } from '@aitube/clap'
2
 
3
  export type DynamicClapWorkflow = () => Promise<ClapWorkflow[]>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import { ClapWorkflow } from '@aitube/clap'
2
 
3
  export type DynamicClapWorkflow = () => Promise<ClapWorkflow[]>
4
+
5
+ export enum LoraBaseModel {
6
+ FLUX = 'FLUX',
7
+ SDXL = 'SDXL',
8
+ }
9
+
10
+ export type Lora = {
11
+ id: string
12
+
13
+ label: string
14
+
15
+ baseModel: LoraBaseModel.FLUX
16
+
17
+ description: string
18
+
19
+ thumbnailUrl: string
20
+
21
+ // URL to the page presenting the LoRA (eg. HF model page)
22
+ projectUrl: string
23
+
24
+ author: string
25
+
26
+ // trigger (usually some kind of unique string sequence, eg TOK)
27
+ trigger: string
28
+
29
+ // additional keywords suggested by the author
30
+ extensions: string
31
+
32
+ // name of the model repository on Hugging Face
33
+ // or direct URL to the weights
34
+ repoOrUrl: string
35
+ }
packages/app/src/services/editors/workflow-editor/workflows/elevenlabs/index.ts CHANGED
@@ -19,6 +19,7 @@ export const elevenlabsWorkflows: ClapWorkflow[] = [
19
  author: 'ElevenLabs',
20
  thumbnailUrl: '',
21
  nonCommercial: false,
 
22
  engine: ClapWorkflowEngine.REST_API,
23
  provider: ClapWorkflowProvider.ELEVENLABS,
24
  category: ClapWorkflowCategory.VOICE_GENERATION,
@@ -40,6 +41,7 @@ export const elevenlabsWorkflows: ClapWorkflow[] = [
40
  author: 'ElevenLabs',
41
  thumbnailUrl: '',
42
  nonCommercial: false,
 
43
  engine: ClapWorkflowEngine.REST_API,
44
  provider: ClapWorkflowProvider.ELEVENLABS,
45
  category: ClapWorkflowCategory.SOUND_GENERATION,
 
19
  author: 'ElevenLabs',
20
  thumbnailUrl: '',
21
  nonCommercial: false,
22
+ canSupportLora: false,
23
  engine: ClapWorkflowEngine.REST_API,
24
  provider: ClapWorkflowProvider.ELEVENLABS,
25
  category: ClapWorkflowCategory.VOICE_GENERATION,
 
41
  author: 'ElevenLabs',
42
  thumbnailUrl: '',
43
  nonCommercial: false,
44
+ canSupportLora: false,
45
  engine: ClapWorkflowEngine.REST_API,
46
  provider: ClapWorkflowProvider.ELEVENLABS,
47
  category: ClapWorkflowCategory.SOUND_GENERATION,
packages/app/src/services/editors/workflow-editor/workflows/falai/defaultWorkflows.ts CHANGED
@@ -22,6 +22,7 @@ import {
22
  genericOverlappingTiles,
23
  genericInferenceSteps,
24
  genericImageUrl,
 
25
  } from '../common/defaultValues'
26
  import { sampleDrivingVideo, sampleVoice } from '@/lib/core/constants'
27
 
@@ -45,6 +46,7 @@ export const defaultWorkflows: ClapWorkflow[] = [
45
  author: '',
46
  thumbnailUrl: '',
47
  nonCommercial: false,
 
48
  engine: ClapWorkflowEngine.REST_API,
49
  provider: ClapWorkflowProvider.FALAI,
50
  category: ClapWorkflowCategory.VIDEO_GENERATION,
@@ -65,6 +67,38 @@ export const defaultWorkflows: ClapWorkflow[] = [
65
  cond_aug: 0.02,
66
  },
67
  },
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
68
  {
69
  id: 'falai://fal-ai/flux-realism',
70
  label: 'Flux Realism LoRA',
@@ -73,6 +107,7 @@ export const defaultWorkflows: ClapWorkflow[] = [
73
  author: '',
74
  thumbnailUrl: '',
75
  nonCommercial: false,
 
76
  engine: ClapWorkflowEngine.REST_API,
77
  provider: ClapWorkflowProvider.FALAI,
78
  category: ClapWorkflowCategory.IMAGE_GENERATION,
@@ -102,6 +137,7 @@ export const defaultWorkflows: ClapWorkflow[] = [
102
  author: 'BFL (https://BlackForestLabs.ai)',
103
  thumbnailUrl: '',
104
  nonCommercial: false,
 
105
  engine: ClapWorkflowEngine.REST_API,
106
  provider: ClapWorkflowProvider.FALAI,
107
  category: ClapWorkflowCategory.IMAGE_GENERATION,
@@ -133,6 +169,7 @@ export const defaultWorkflows: ClapWorkflow[] = [
133
  author: 'BFL (https://BlackForestLabs.ai)',
134
  thumbnailUrl: '',
135
  nonCommercial: false,
 
136
  engine: ClapWorkflowEngine.REST_API,
137
  provider: ClapWorkflowProvider.FALAI,
138
  category: ClapWorkflowCategory.IMAGE_GENERATION,
@@ -153,6 +190,7 @@ export const defaultWorkflows: ClapWorkflow[] = [
153
  author: 'BFL (https://BlackForestLabs.ai)',
154
  thumbnailUrl: '',
155
  nonCommercial: false,
 
156
  engine: ClapWorkflowEngine.REST_API,
157
  provider: ClapWorkflowProvider.FALAI,
158
  category: ClapWorkflowCategory.IMAGE_GENERATION,
@@ -184,6 +222,7 @@ export const defaultWorkflows: ClapWorkflow[] = [
184
  author: 'Stability AI',
185
  thumbnailUrl: '',
186
  nonCommercial: false,
 
187
  engine: ClapWorkflowEngine.REST_API,
188
  provider: ClapWorkflowProvider.FALAI,
189
  category: ClapWorkflowCategory.IMAGE_GENERATION,
@@ -215,6 +254,7 @@ export const defaultWorkflows: ClapWorkflow[] = [
215
  author: 'Stability AI',
216
  thumbnailUrl: '',
217
  nonCommercial: false,
 
218
  engine: ClapWorkflowEngine.REST_API,
219
  provider: ClapWorkflowProvider.FALAI,
220
  category: ClapWorkflowCategory.IMAGE_GENERATION,
@@ -246,6 +286,7 @@ export const defaultWorkflows: ClapWorkflow[] = [
246
  author: 'AuraSR',
247
  thumbnailUrl: '',
248
  nonCommercial: false,
 
249
  engine: ClapWorkflowEngine.REST_API,
250
  provider: ClapWorkflowProvider.FALAI,
251
  category: ClapWorkflowCategory.IMAGE_UPSCALING,
 
22
  genericOverlappingTiles,
23
  genericInferenceSteps,
24
  genericImageUrl,
25
+ genericLora,
26
  } from '../common/defaultValues'
27
  import { sampleDrivingVideo, sampleVoice } from '@/lib/core/constants'
28
 
 
46
  author: '',
47
  thumbnailUrl: '',
48
  nonCommercial: false,
49
+ canSupportLora: false,
50
  engine: ClapWorkflowEngine.REST_API,
51
  provider: ClapWorkflowProvider.FALAI,
52
  category: ClapWorkflowCategory.VIDEO_GENERATION,
 
67
  cond_aug: 0.02,
68
  },
69
  },
70
+ /*
71
+ {
72
+ id: 'falai://fal-ai/flux-general',
73
+ label: 'Flux.1-[DEV] with LoRAs',
74
+ description: '',
75
+ tags: ['Flux', 'LoRA'],
76
+ author: '',
77
+ thumbnailUrl: '',
78
+ nonCommercial: false,
79
+ canSupportLora: false,
80
+ engine: ClapWorkflowEngine.REST_API,
81
+ provider: ClapWorkflowProvider.FALAI,
82
+ category: ClapWorkflowCategory.IMAGE_GENERATION,
83
+ data: 'fal-ai/flux-general',
84
+ schema: '',
85
+ inputFields: [
86
+ genericPrompt,
87
+ genericWidth2048,
88
+ genericHeight2048,
89
+ genericInferenceSteps,
90
+ ],
91
+ inputValues: {
92
+ [genericPrompt.id]: genericPrompt.defaultValue,
93
+ [genericWidth2048.id]: genericWidth2048.defaultValue,
94
+ [genericHeight2048.id]: genericHeight2048.defaultValue,
95
+ [genericInferenceSteps.id]: genericInferenceSteps.defaultValue,
96
+
97
+ // support LoRA for this model is a bit tricky, as the parameter must be in JSON
98
+ // (this is an array of LoraWeight objects, see: https://fal.ai/models/fal-ai/flux-general/playground)
99
+ },
100
+ },
101
+ */
102
  {
103
  id: 'falai://fal-ai/flux-realism',
104
  label: 'Flux Realism LoRA',
 
107
  author: '',
108
  thumbnailUrl: '',
109
  nonCommercial: false,
110
+ canSupportLora: false,
111
  engine: ClapWorkflowEngine.REST_API,
112
  provider: ClapWorkflowProvider.FALAI,
113
  category: ClapWorkflowCategory.IMAGE_GENERATION,
 
137
  author: 'BFL (https://BlackForestLabs.ai)',
138
  thumbnailUrl: '',
139
  nonCommercial: false,
140
+ canSupportLora: false,
141
  engine: ClapWorkflowEngine.REST_API,
142
  provider: ClapWorkflowProvider.FALAI,
143
  category: ClapWorkflowCategory.IMAGE_GENERATION,
 
169
  author: 'BFL (https://BlackForestLabs.ai)',
170
  thumbnailUrl: '',
171
  nonCommercial: false,
172
+ canSupportLora: false,
173
  engine: ClapWorkflowEngine.REST_API,
174
  provider: ClapWorkflowProvider.FALAI,
175
  category: ClapWorkflowCategory.IMAGE_GENERATION,
 
190
  author: 'BFL (https://BlackForestLabs.ai)',
191
  thumbnailUrl: '',
192
  nonCommercial: false,
193
+ canSupportLora: false,
194
  engine: ClapWorkflowEngine.REST_API,
195
  provider: ClapWorkflowProvider.FALAI,
196
  category: ClapWorkflowCategory.IMAGE_GENERATION,
 
222
  author: 'Stability AI',
223
  thumbnailUrl: '',
224
  nonCommercial: false,
225
+ canSupportLora: false,
226
  engine: ClapWorkflowEngine.REST_API,
227
  provider: ClapWorkflowProvider.FALAI,
228
  category: ClapWorkflowCategory.IMAGE_GENERATION,
 
254
  author: 'Stability AI',
255
  thumbnailUrl: '',
256
  nonCommercial: false,
257
+ canSupportLora: false,
258
  engine: ClapWorkflowEngine.REST_API,
259
  provider: ClapWorkflowProvider.FALAI,
260
  category: ClapWorkflowCategory.IMAGE_GENERATION,
 
286
  author: 'AuraSR',
287
  thumbnailUrl: '',
288
  nonCommercial: false,
289
+ canSupportLora: false,
290
  engine: ClapWorkflowEngine.REST_API,
291
  provider: ClapWorkflowProvider.FALAI,
292
  category: ClapWorkflowCategory.IMAGE_UPSCALING,
packages/app/src/services/editors/workflow-editor/workflows/replicate/defaultWorkflows.ts CHANGED
@@ -9,6 +9,7 @@ import {
9
  genericHeight1024,
10
  genericHeight2048,
11
  genericImage,
 
12
  genericPrompt,
13
  genericVideo,
14
  genericWidth1024,
@@ -16,9 +17,40 @@ import {
16
  } from '../common/defaultValues'
17
 
18
  export const defaultWorkflows: ClapWorkflow[] = [
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
19
  {
20
  id: 'replicate://black-forest-labs/flux-pro',
21
- label: 'FLUX.1 [pro]',
22
  description: '',
23
  tags: ['flux'],
24
  author: 'BFL (https://BlackForestLabs.ai)',
@@ -47,7 +79,7 @@ export const defaultWorkflows: ClapWorkflow[] = [
47
  },
48
  {
49
  id: 'replicate://black-forest-labs/flux-schnell',
50
- label: 'FLUX.1 [schnell]',
51
  description: '',
52
  tags: ['flux'],
53
  author: 'BFL (https://BlackForestLabs.ai)',
@@ -68,7 +100,7 @@ export const defaultWorkflows: ClapWorkflow[] = [
68
  },
69
  {
70
  id: 'replicate://black-forest-labs/flux-dev',
71
- label: 'FLUX.1 [dev]',
72
  description: '',
73
  tags: ['flux'],
74
  author: 'BFL (https://BlackForestLabs.ai)',
 
9
  genericHeight1024,
10
  genericHeight2048,
11
  genericImage,
12
+ genericLora,
13
  genericPrompt,
14
  genericVideo,
15
  genericWidth1024,
 
17
  } from '../common/defaultValues'
18
 
19
  export const defaultWorkflows: ClapWorkflow[] = [
20
+ {
21
+ id: 'replicate://lucataco/flux-dev-lora',
22
+ label: 'Flux-dev-lora',
23
+ description: '',
24
+ tags: ['flux'],
25
+ author: '@lucataco',
26
+ thumbnailUrl: '',
27
+ engine: ClapWorkflowEngine.REST_API,
28
+ category: ClapWorkflowCategory.IMAGE_GENERATION,
29
+ provider: ClapWorkflowProvider.REPLICATE,
30
+ data: 'lucataco/flux-dev-lora',
31
+ // data: 'lucataco/flux-dev-lora:94a0c19e55e36f75d657ecf9eada9f16a233b5329fb9cdf8e2b9ecd093e5c97e',
32
+ /**
33
+ * Inputs of the workflow (this is used to build an UI for the automatically)
34
+ */
35
+ inputFields: [
36
+ genericPrompt,
37
+ genericWidth2048,
38
+ genericHeight2048,
39
+ {
40
+ ...genericLora,
41
+ id: 'hf_lora',
42
+ },
43
+ ],
44
+ inputValues: {
45
+ prompt: genericPrompt.defaultValue,
46
+ width: genericWidth2048.defaultValue,
47
+ height: genericHeight2048.defaultValue,
48
+ hf_lora: genericLora.defaultValue,
49
+ },
50
+ },
51
  {
52
  id: 'replicate://black-forest-labs/flux-pro',
53
+ label: 'Flux-pro',
54
  description: '',
55
  tags: ['flux'],
56
  author: 'BFL (https://BlackForestLabs.ai)',
 
79
  },
80
  {
81
  id: 'replicate://black-forest-labs/flux-schnell',
82
+ label: 'Flux-schnell',
83
  description: '',
84
  tags: ['flux'],
85
  author: 'BFL (https://BlackForestLabs.ai)',
 
100
  },
101
  {
102
  id: 'replicate://black-forest-labs/flux-dev',
103
+ label: 'Flux-dev',
104
  description: '',
105
  tags: ['flux'],
106
  author: 'BFL (https://BlackForestLabs.ai)',
packages/app/src/services/settings/useSettings.ts CHANGED
@@ -2,7 +2,12 @@
2
 
3
  import { create } from 'zustand'
4
  import { persist } from 'zustand/middleware'
5
- import { getValidNumber, ClapWorkflowProvider } from '@aitube/clap'
 
 
 
 
 
6
  import { parseRenderingStrategy, RenderingStrategy } from '@aitube/timeline'
7
  import {
8
  ComfyIcuAccelerator,
@@ -18,10 +23,7 @@ import { getDefaultSettingsState } from './getDefaultSettingsState'
18
  import { getValidComfyWorkflowTemplate } from '@/lib/utils/getValidComfyWorkflowTemplate'
19
  import { parseComfyIcuAccelerator } from '@/lib/utils/parseComfyIcuAccelerator'
20
 
21
- // that may not be the best way to do this,
22
- // and importing useWorkflowEditor here is a bit tricky
23
- import { findWorkflows } from '@/components/toolbars/top-menu/lists/getWorkflowProviders'
24
- import { useWorkflowEditor } from '../editors/workflow-editor/useWorkflowEditor'
25
 
26
  export const useSettings = create<SettingsStore>()(
27
  persist(
@@ -303,147 +305,137 @@ export const useSettings = create<SettingsStore>()(
303
  ),
304
  })
305
  },
306
- setAssistantWorkflow: (assistantWorkflow?: string) => {
307
  const { assistantWorkflow: defaultAssistantWorkflow } =
308
  getDefaultSettingsState()
309
  set({
310
- assistantWorkflow: getValidString(
311
- assistantWorkflow,
312
- defaultAssistantWorkflow
313
- ),
314
  })
315
  },
316
- setAssistantTurboWorkflow: (assistantTurboWorkflow?: string) => {
317
  const { assistantTurboWorkflow: defaultAssistantTurboWorkflow } =
318
  getDefaultSettingsState()
319
  set({
320
- assistantTurboWorkflow: getValidString(
321
- assistantTurboWorkflow,
322
- defaultAssistantTurboWorkflow
323
- ),
324
  })
325
  },
326
- setImageGenerationWorkflow: (imageGenerationWorkflow?: string) => {
327
  const { imageGenerationWorkflow: defaultImageGenerationWorkflow } =
328
  getDefaultSettingsState()
329
  set({
330
- imageGenerationWorkflow: getValidString(
331
- imageGenerationWorkflow,
332
- defaultImageGenerationWorkflow
333
- ),
334
  })
335
  },
336
  setImageGenerationTurboWorkflow: (
337
- imageGenerationTurboWorkflow?: string
338
  ) => {
339
  const {
340
  imageGenerationTurboWorkflow: defaultImageGenerationTurboWorkflow,
341
  } = getDefaultSettingsState()
342
  set({
343
- imageGenerationTurboWorkflow: getValidString(
344
- imageGenerationTurboWorkflow,
345
- defaultImageGenerationTurboWorkflow
346
- ),
347
  })
348
  },
349
- setImageUpscalingWorkflow: (imageUpscalingWorkflow?: string) => {
350
  const { imageUpscalingWorkflow: defaultImageUpscalingWorkflow } =
351
  getDefaultSettingsState()
352
  set({
353
- imageUpscalingWorkflow: getValidString(
354
- imageUpscalingWorkflow,
355
- defaultImageUpscalingWorkflow
356
- ),
357
  })
358
  },
359
- setImageDepthWorkflow: (imageDepthWorkflow?: string) => {
360
  const { imageDepthWorkflow: defaultImageDepthWorkflow } =
361
  getDefaultSettingsState()
362
  set({
363
- imageDepthWorkflow: getValidString(
364
- imageDepthWorkflow,
365
- defaultImageDepthWorkflow
366
- ),
367
  })
368
  },
369
- setImageSegmentationWorkflow: (imageSegmentationWorkflow?: string) => {
 
 
370
  const { imageSegmentationWorkflow: defaultImageSegmentationWorkflow } =
371
  getDefaultSettingsState()
372
  set({
373
- imageSegmentationWorkflow: getValidString(
374
- imageSegmentationWorkflow,
375
- defaultImageSegmentationWorkflow
376
- ),
377
  })
378
  },
379
- setVideoGenerationWorkflow: (videoGenerationWorkflow?: string) => {
380
  const { videoGenerationWorkflow: defaultVideoGenerationWorkflow } =
381
  getDefaultSettingsState()
382
  set({
383
- videoGenerationWorkflow: getValidString(
384
- videoGenerationWorkflow,
385
- defaultVideoGenerationWorkflow
386
- ),
387
  })
388
  },
389
- setVideoUpscalingWorkflow: (videoUpscalingWorkflow?: string) => {
390
  const { videoUpscalingWorkflow: defaultVideoUpscalingWorkflow } =
391
  getDefaultSettingsState()
392
  set({
393
- videoUpscalingWorkflow: getValidString(
394
- videoUpscalingWorkflow,
395
- defaultVideoUpscalingWorkflow
396
- ),
397
  })
398
  },
399
- setVideoDepthWorkflow: (videoDepthWorkflow?: string) => {
400
  const { videoDepthWorkflow: defaultVideoDepthWorkflow } =
401
  getDefaultSettingsState()
402
  set({
403
- videoDepthWorkflow: getValidString(
404
- videoDepthWorkflow,
405
- defaultVideoDepthWorkflow
406
- ),
407
  })
408
  },
409
- setVideoSegmentationWorkflow: (videoSegmentationWorkflow?: string) => {
 
 
410
  const { videoSegmentationWorkflow: defaultVideoSegmentationWorkflow } =
411
  getDefaultSettingsState()
412
  set({
413
- videoSegmentationWorkflow: getValidString(
414
- videoSegmentationWorkflow,
415
- defaultVideoSegmentationWorkflow
416
- ),
417
  })
418
  },
419
- setSoundGenerationWorkflow: (soundGenerationWorkflow?: string) => {
420
  const { soundGenerationWorkflow: defaultSoundGenerationWorkflow } =
421
  getDefaultSettingsState()
422
  set({
423
- soundGenerationWorkflow: getValidString(
424
- soundGenerationWorkflow,
425
- defaultSoundGenerationWorkflow
426
- ),
427
  })
428
  },
429
- setVoiceGenerationWorkflow: (voiceGenerationWorkflow?: string) => {
430
  const { voiceGenerationWorkflow: defaultVoiceGenerationWorkflow } =
431
  getDefaultSettingsState()
432
  set({
433
- voiceGenerationWorkflow: getValidString(
434
- voiceGenerationWorkflow,
435
- defaultVoiceGenerationWorkflow
436
- ),
437
  })
438
  },
439
- setMusicGenerationWorkflow: (musicGenerationWorkflow?: string) => {
440
  const { musicGenerationWorkflow: defaultVoiceGenerationWorkflow } =
441
  getDefaultSettingsState()
442
  set({
443
- musicGenerationWorkflow: getValidString(
444
- musicGenerationWorkflow,
445
- defaultVoiceGenerationWorkflow
446
- ),
447
  })
448
  },
449
  setImageRenderingStrategy: (
@@ -735,82 +727,86 @@ export const useSettings = create<SettingsStore>()(
735
  const state = get()
736
  const defaultSettings = getDefaultSettingsState()
737
 
738
- // I think this is causing some issues,
739
- // with the settings having a dependency over the workflows,
740
- // which creates a loop
741
- //
742
- // we should probably this step else where
743
- const availableWorkflows =
744
- useWorkflowEditor.getState().availableWorkflows
745
 
746
- const assistantWorkflowId =
747
- state.assistantWorkflow || defaultSettings.assistantWorkflow
 
 
 
748
 
749
- const assistantTurboWorkflowId =
750
- state.assistantTurboWorkflow || defaultSettings.assistantTurboWorkflow
751
-
752
- const imageGenerationWorkflowId =
753
  state.imageGenerationWorkflow ||
754
- defaultSettings.imageGenerationWorkflow
 
 
755
 
756
- const imageGenerationTurboWorkflowId =
757
  state.imageGenerationTurboWorkflow ||
758
- defaultSettings.imageGenerationTurboWorkflow
759
-
760
- const imageUpscalingWorkflowId =
761
- state.imageUpscalingWorkflow || defaultSettings.imageUpscalingWorkflow
762
-
763
- const imageDepthWorkflowId =
764
- state.imageDepthWorkflow || defaultSettings.imageDepthWorkflow
765
-
766
- const imageSegmentationWorkflowId =
 
 
 
 
 
 
 
767
  state.imageSegmentationWorkflow ||
768
- defaultSettings.imageSegmentationWorkflow
 
 
769
 
770
- const videoGenerationWorkflowId =
771
  state.videoGenerationWorkflow ||
772
- defaultSettings.videoGenerationWorkflow
 
 
773
 
774
- const videoDepthWorkflowId =
775
- state.videoDepthWorkflow || defaultSettings.videoDepthWorkflow
 
 
776
 
777
- const videoSegmentationWorkflowId =
778
  state.videoSegmentationWorkflow ||
779
- defaultSettings.videoSegmentationWorkflow
 
 
780
 
781
- const videoUpscalingWorkflowId =
782
- state.videoUpscalingWorkflow || defaultSettings.videoUpscalingWorkflow
 
 
 
783
 
784
- const soundGenerationWorkflowId =
785
  state.soundGenerationWorkflow ||
786
- defaultSettings.soundGenerationWorkflow
 
 
787
 
788
- const voiceGenerationWorkflowId =
789
  state.voiceGenerationWorkflow ||
790
- defaultSettings.voiceGenerationWorkflow
 
 
791
 
792
- const musicGenerationWorkflowId =
793
  state.musicGenerationWorkflow ||
794
- defaultSettings.musicGenerationWorkflow
795
-
796
- const { workflowIds } = findWorkflows(availableWorkflows, {
797
- workflowIds: [
798
- assistantWorkflowId,
799
- assistantTurboWorkflowId,
800
- imageGenerationWorkflowId,
801
- imageGenerationTurboWorkflowId,
802
- imageUpscalingWorkflowId,
803
- imageDepthWorkflowId,
804
- imageSegmentationWorkflowId,
805
- videoGenerationWorkflowId,
806
- videoDepthWorkflowId,
807
- videoSegmentationWorkflowId,
808
- videoUpscalingWorkflowId,
809
- soundGenerationWorkflowId,
810
- voiceGenerationWorkflowId,
811
- musicGenerationWorkflowId,
812
- ],
813
- })
814
 
815
  return {
816
  // why do we need those fallbacks? because some users will leave the fields empty,
@@ -884,21 +880,20 @@ export const useSettings = create<SettingsStore>()(
884
  videoNegativePrompt:
885
  state.videoNegativePrompt || defaultSettings.videoNegativePrompt,
886
 
887
- assistantWorkflow: workflowIds[assistantWorkflowId],
888
- assistantTurboWorkflow: workflowIds[assistantTurboWorkflowId],
889
- imageGenerationWorkflow: workflowIds[imageGenerationWorkflowId],
890
- imageGenerationTurboWorkflow:
891
- workflowIds[imageGenerationTurboWorkflowId],
892
- imageUpscalingWorkflow: workflowIds[imageUpscalingWorkflowId],
893
- imageDepthWorkflow: workflowIds[imageDepthWorkflowId],
894
- imageSegmentationWorkflow: workflowIds[imageSegmentationWorkflowId],
895
- videoGenerationWorkflow: workflowIds[videoGenerationWorkflowId],
896
- videoDepthWorkflow: workflowIds[videoDepthWorkflowId],
897
- videoSegmentationWorkflow: workflowIds[videoSegmentationWorkflowId],
898
- videoUpscalingWorkflow: workflowIds[videoUpscalingWorkflowId],
899
- soundGenerationWorkflow: workflowIds[soundGenerationWorkflowId],
900
- voiceGenerationWorkflow: workflowIds[voiceGenerationWorkflowId],
901
- musicGenerationWorkflow: workflowIds[musicGenerationWorkflowId],
902
 
903
  imageRenderingStrategy:
904
  state.imageRenderingStrategy ||
 
2
 
3
  import { create } from 'zustand'
4
  import { persist } from 'zustand/middleware'
5
+ import {
6
+ getValidNumber,
7
+ ClapWorkflowProvider,
8
+ ClapWorkflow,
9
+ ClapWorkflowCategory,
10
+ } from '@aitube/clap'
11
  import { parseRenderingStrategy, RenderingStrategy } from '@aitube/timeline'
12
  import {
13
  ComfyIcuAccelerator,
 
23
  import { getValidComfyWorkflowTemplate } from '@/lib/utils/getValidComfyWorkflowTemplate'
24
  import { parseComfyIcuAccelerator } from '@/lib/utils/parseComfyIcuAccelerator'
25
 
26
+ import { parseWorkflow } from './workflows/parseWorkflow'
 
 
 
27
 
28
  export const useSettings = create<SettingsStore>()(
29
  persist(
 
305
  ),
306
  })
307
  },
308
+ setAssistantWorkflow: (assistantWorkflow?: ClapWorkflow) => {
309
  const { assistantWorkflow: defaultAssistantWorkflow } =
310
  getDefaultSettingsState()
311
  set({
312
+ assistantWorkflow: assistantWorkflow
313
+ ? JSON.stringify(assistantWorkflow)
314
+ : defaultAssistantWorkflow,
 
315
  })
316
  },
317
+ setAssistantTurboWorkflow: (assistantTurboWorkflow?: ClapWorkflow) => {
318
  const { assistantTurboWorkflow: defaultAssistantTurboWorkflow } =
319
  getDefaultSettingsState()
320
  set({
321
+ assistantTurboWorkflow: assistantTurboWorkflow
322
+ ? JSON.stringify(assistantTurboWorkflow)
323
+ : defaultAssistantTurboWorkflow,
 
324
  })
325
  },
326
+ setImageGenerationWorkflow: (imageGenerationWorkflow?: ClapWorkflow) => {
327
  const { imageGenerationWorkflow: defaultImageGenerationWorkflow } =
328
  getDefaultSettingsState()
329
  set({
330
+ imageGenerationWorkflow: imageGenerationWorkflow
331
+ ? JSON.stringify(imageGenerationWorkflow)
332
+ : defaultImageGenerationWorkflow,
 
333
  })
334
  },
335
  setImageGenerationTurboWorkflow: (
336
+ imageGenerationTurboWorkflow?: ClapWorkflow
337
  ) => {
338
  const {
339
  imageGenerationTurboWorkflow: defaultImageGenerationTurboWorkflow,
340
  } = getDefaultSettingsState()
341
  set({
342
+ imageGenerationTurboWorkflow: imageGenerationTurboWorkflow
343
+ ? JSON.stringify(imageGenerationTurboWorkflow)
344
+ : defaultImageGenerationTurboWorkflow,
 
345
  })
346
  },
347
+ setImageUpscalingWorkflow: (imageUpscalingWorkflow?: ClapWorkflow) => {
348
  const { imageUpscalingWorkflow: defaultImageUpscalingWorkflow } =
349
  getDefaultSettingsState()
350
  set({
351
+ imageUpscalingWorkflow: imageUpscalingWorkflow
352
+ ? JSON.stringify(imageUpscalingWorkflow)
353
+ : defaultImageUpscalingWorkflow,
 
354
  })
355
  },
356
+ setImageDepthWorkflow: (imageDepthWorkflow?: ClapWorkflow) => {
357
  const { imageDepthWorkflow: defaultImageDepthWorkflow } =
358
  getDefaultSettingsState()
359
  set({
360
+ imageDepthWorkflow: imageDepthWorkflow
361
+ ? JSON.stringify(imageDepthWorkflow)
362
+ : defaultImageDepthWorkflow,
 
363
  })
364
  },
365
+ setImageSegmentationWorkflow: (
366
+ imageSegmentationWorkflow?: ClapWorkflow
367
+ ) => {
368
  const { imageSegmentationWorkflow: defaultImageSegmentationWorkflow } =
369
  getDefaultSettingsState()
370
  set({
371
+ imageSegmentationWorkflow: imageSegmentationWorkflow
372
+ ? JSON.stringify(imageSegmentationWorkflow)
373
+ : defaultImageSegmentationWorkflow,
 
374
  })
375
  },
376
+ setVideoGenerationWorkflow: (videoGenerationWorkflow?: ClapWorkflow) => {
377
  const { videoGenerationWorkflow: defaultVideoGenerationWorkflow } =
378
  getDefaultSettingsState()
379
  set({
380
+ videoGenerationWorkflow: videoGenerationWorkflow
381
+ ? JSON.stringify(videoGenerationWorkflow)
382
+ : defaultVideoGenerationWorkflow,
 
383
  })
384
  },
385
+ setVideoUpscalingWorkflow: (videoUpscalingWorkflow?: ClapWorkflow) => {
386
  const { videoUpscalingWorkflow: defaultVideoUpscalingWorkflow } =
387
  getDefaultSettingsState()
388
  set({
389
+ videoUpscalingWorkflow: videoUpscalingWorkflow
390
+ ? JSON.stringify(videoUpscalingWorkflow)
391
+ : defaultVideoUpscalingWorkflow,
 
392
  })
393
  },
394
+ setVideoDepthWorkflow: (videoDepthWorkflow?: ClapWorkflow) => {
395
  const { videoDepthWorkflow: defaultVideoDepthWorkflow } =
396
  getDefaultSettingsState()
397
  set({
398
+ videoDepthWorkflow: videoDepthWorkflow
399
+ ? JSON.stringify(videoDepthWorkflow)
400
+ : defaultVideoDepthWorkflow,
 
401
  })
402
  },
403
+ setVideoSegmentationWorkflow: (
404
+ videoSegmentationWorkflow?: ClapWorkflow
405
+ ) => {
406
  const { videoSegmentationWorkflow: defaultVideoSegmentationWorkflow } =
407
  getDefaultSettingsState()
408
  set({
409
+ videoSegmentationWorkflow: videoSegmentationWorkflow
410
+ ? JSON.stringify(videoSegmentationWorkflow)
411
+ : defaultVideoSegmentationWorkflow,
 
412
  })
413
  },
414
+ setSoundGenerationWorkflow: (soundGenerationWorkflow?: ClapWorkflow) => {
415
  const { soundGenerationWorkflow: defaultSoundGenerationWorkflow } =
416
  getDefaultSettingsState()
417
  set({
418
+ soundGenerationWorkflow: soundGenerationWorkflow
419
+ ? JSON.stringify(soundGenerationWorkflow)
420
+ : defaultSoundGenerationWorkflow,
 
421
  })
422
  },
423
+ setVoiceGenerationWorkflow: (voiceGenerationWorkflow?: ClapWorkflow) => {
424
  const { voiceGenerationWorkflow: defaultVoiceGenerationWorkflow } =
425
  getDefaultSettingsState()
426
  set({
427
+ voiceGenerationWorkflow: voiceGenerationWorkflow
428
+ ? JSON.stringify(voiceGenerationWorkflow)
429
+ : defaultVoiceGenerationWorkflow,
 
430
  })
431
  },
432
+ setMusicGenerationWorkflow: (musicGenerationWorkflow?: ClapWorkflow) => {
433
  const { musicGenerationWorkflow: defaultVoiceGenerationWorkflow } =
434
  getDefaultSettingsState()
435
  set({
436
+ musicGenerationWorkflow: musicGenerationWorkflow
437
+ ? JSON.stringify(musicGenerationWorkflow)
438
+ : defaultVoiceGenerationWorkflow,
 
439
  })
440
  },
441
  setImageRenderingStrategy: (
 
727
  const state = get()
728
  const defaultSettings = getDefaultSettingsState()
729
 
730
+ const assistantWorkflow = parseWorkflow(
731
+ state.assistantWorkflow || defaultSettings.assistantWorkflow,
732
+ ClapWorkflowCategory.ASSISTANT
733
+ )
 
 
 
734
 
735
+ const assistantTurboWorkflow = parseWorkflow(
736
+ state.assistantTurboWorkflow ||
737
+ defaultSettings.assistantTurboWorkflow,
738
+ ClapWorkflowCategory.ASSISTANT
739
+ )
740
 
741
+ const imageGenerationWorkflow = parseWorkflow(
 
 
 
742
  state.imageGenerationWorkflow ||
743
+ defaultSettings.imageGenerationWorkflow,
744
+ ClapWorkflowCategory.IMAGE_GENERATION
745
+ )
746
 
747
+ const imageGenerationTurboWorkflow = parseWorkflow(
748
  state.imageGenerationTurboWorkflow ||
749
+ defaultSettings.imageGenerationTurboWorkflow,
750
+ ClapWorkflowCategory.IMAGE_GENERATION
751
+ )
752
+
753
+ const imageUpscalingWorkflow = parseWorkflow(
754
+ state.imageUpscalingWorkflow ||
755
+ defaultSettings.imageUpscalingWorkflow,
756
+ ClapWorkflowCategory.IMAGE_UPSCALING
757
+ )
758
+
759
+ const imageDepthWorkflow = parseWorkflow(
760
+ state.imageDepthWorkflow || defaultSettings.imageDepthWorkflow,
761
+ ClapWorkflowCategory.IMAGE_DEPTH_MAPPING
762
+ )
763
+
764
+ const imageSegmentationWorkflow = parseWorkflow(
765
  state.imageSegmentationWorkflow ||
766
+ defaultSettings.imageSegmentationWorkflow,
767
+ ClapWorkflowCategory.IMAGE_SEGMENTATION
768
+ )
769
 
770
+ const videoGenerationWorkflow = parseWorkflow(
771
  state.videoGenerationWorkflow ||
772
+ defaultSettings.videoGenerationWorkflow,
773
+ ClapWorkflowCategory.VIDEO_GENERATION
774
+ )
775
 
776
+ const videoDepthWorkflow = parseWorkflow(
777
+ state.videoDepthWorkflow || defaultSettings.videoDepthWorkflow,
778
+ ClapWorkflowCategory.VIDEO_DEPTH_MAPPING
779
+ )
780
 
781
+ const videoSegmentationWorkflow = parseWorkflow(
782
  state.videoSegmentationWorkflow ||
783
+ defaultSettings.videoSegmentationWorkflow,
784
+ ClapWorkflowCategory.VIDEO_SEGMENTATION
785
+ )
786
 
787
+ const videoUpscalingWorkflow = parseWorkflow(
788
+ state.videoUpscalingWorkflow ||
789
+ defaultSettings.videoUpscalingWorkflow,
790
+ ClapWorkflowCategory.VIDEO_UPSCALING
791
+ )
792
 
793
+ const soundGenerationWorkflow = parseWorkflow(
794
  state.soundGenerationWorkflow ||
795
+ defaultSettings.soundGenerationWorkflow,
796
+ ClapWorkflowCategory.SOUND_GENERATION
797
+ )
798
 
799
+ const voiceGenerationWorkflow = parseWorkflow(
800
  state.voiceGenerationWorkflow ||
801
+ defaultSettings.voiceGenerationWorkflow,
802
+ ClapWorkflowCategory.VOICE_GENERATION
803
+ )
804
 
805
+ const musicGenerationWorkflow = parseWorkflow(
806
  state.musicGenerationWorkflow ||
807
+ defaultSettings.musicGenerationWorkflow,
808
+ ClapWorkflowCategory.MUSIC_GENERATION
809
+ )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
810
 
811
  return {
812
  // why do we need those fallbacks? because some users will leave the fields empty,
 
880
  videoNegativePrompt:
881
  state.videoNegativePrompt || defaultSettings.videoNegativePrompt,
882
 
883
+ assistantWorkflow,
884
+ assistantTurboWorkflow,
885
+ imageGenerationWorkflow,
886
+ imageGenerationTurboWorkflow,
887
+ imageUpscalingWorkflow,
888
+ imageDepthWorkflow,
889
+ imageSegmentationWorkflow,
890
+ videoGenerationWorkflow,
891
+ videoDepthWorkflow,
892
+ videoSegmentationWorkflow,
893
+ videoUpscalingWorkflow,
894
+ soundGenerationWorkflow,
895
+ voiceGenerationWorkflow,
896
+ musicGenerationWorkflow,
 
897
 
898
  imageRenderingStrategy:
899
  state.imageRenderingStrategy ||
packages/app/src/services/settings/workflows/parseWorkflow.ts ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import {
2
+ findWorkflows,
3
+ WorkflowSearchResults,
4
+ } from '@/components/toolbars/top-menu/lists/getWorkflowProviders'
5
+ import { useWorkflowEditor } from '@/services/editors'
6
+ import {
7
+ ClapWorkflow,
8
+ ClapWorkflowCategory,
9
+ ClapWorkflowEngine,
10
+ ClapWorkflowProvider,
11
+ } from '@aitube/clap'
12
+ import { WorkflowEditorStore } from '@aitube/clapper-services'
13
+
14
+ export function parseWorkflow(
15
+ input: string,
16
+ category: ClapWorkflowCategory
17
+ ): ClapWorkflow {
18
+ const noWorkflow: ClapWorkflow = {
19
+ id: `empty://${category}`,
20
+ label: 'No workflow',
21
+ description: '',
22
+ tags: [],
23
+ author: '',
24
+ thumbnailUrl: '',
25
+ nonCommercial: false,
26
+ canSupportLora: false,
27
+ engine: ClapWorkflowEngine.DEFAULT,
28
+ category,
29
+ provider: ClapWorkflowProvider.NONE,
30
+ data: '',
31
+ schema: '',
32
+ inputFields: [],
33
+ inputValues: {},
34
+ }
35
+
36
+ // console.log("parseWorkflow:", { input })
37
+
38
+ try {
39
+ const maybeWorkflow =
40
+ typeof input === 'string'
41
+ ? (JSON.parse(input) as ClapWorkflow)
42
+ : (input as ClapWorkflow) // fallback in case some users had a bad version which didn't serialize to JSON
43
+ // console.log("maybeWorkflow:", { maybeWorkflow })
44
+ const looksValid =
45
+ typeof maybeWorkflow?.id === 'string' &&
46
+ typeof maybeWorkflow?.label === 'string' &&
47
+ typeof maybeWorkflow?.description === 'string' &&
48
+ typeof maybeWorkflow?.author === 'string' &&
49
+ typeof maybeWorkflow?.thumbnailUrl === 'string' &&
50
+ typeof maybeWorkflow?.data === 'string' &&
51
+ Array.isArray(maybeWorkflow?.inputFields) &&
52
+ typeof maybeWorkflow?.inputValues === 'object'
53
+ if (!looksValid) {
54
+ throw new Error(`the workflow data seems invalid`)
55
+ }
56
+ return maybeWorkflow
57
+ } catch (err) {
58
+ // console.log("error:", err)
59
+ // MIGRATION OF OLDER SETTINGS
60
+ // in case the user has an old version of the settings, the "workflow"
61
+ // will be a simple ID. So we try to recover that
62
+ const results: WorkflowSearchResults = findWorkflows(
63
+ useWorkflowEditor.getState().availableWorkflows,
64
+ { workflowId: input }
65
+ )
66
+
67
+ if (results.workflow) {
68
+ return results.workflow
69
+ }
70
+
71
+ // for now let's assume we ave two cases:
72
+ // 1. the user has an old version of the settings, and we need to migrate it
73
+ // 2. the user has an empty
74
+ return noWorkflow
75
+ }
76
+ }