jbilcke-hf HF staff commited on
Commit
6186d0e
1 Parent(s): 6318758

add new providers

Browse files
Files changed (41) hide show
  1. packages/app/public/images/providers/bigmodel.jpeg +3 -0
  2. packages/app/public/images/providers/letzai.png +3 -0
  3. packages/app/public/images/providers/piapi.jpg +3 -0
  4. packages/app/src/app/api/resolve/providers/bigmodel/callCogVideoX.ts +75 -0
  5. packages/app/src/app/api/resolve/providers/bigmodel/index.ts +56 -0
  6. packages/app/src/app/api/resolve/providers/bigmodel/types.ts +44 -0
  7. packages/app/src/app/api/resolve/providers/falai/index.ts +2 -5
  8. packages/app/src/app/api/resolve/providers/index.ts +8 -5
  9. packages/app/src/app/api/resolve/providers/letzai/callCreateImage.ts +52 -0
  10. packages/app/src/app/api/resolve/providers/letzai/index.ts +72 -0
  11. packages/app/src/app/api/resolve/providers/letzai/types.ts +43 -0
  12. packages/app/src/app/api/resolve/providers/piapi/index.ts +100 -0
  13. packages/app/src/app/api/resolve/providers/piapi/lumalabs/createAndFetchDreamMachineVideo.ts +59 -0
  14. packages/app/src/app/api/resolve/providers/piapi/lumalabs/createDreamMachineVideo.ts +37 -0
  15. packages/app/src/app/api/resolve/providers/piapi/lumalabs/fetchDreamMachineVideoResult.ts +35 -0
  16. packages/app/src/app/api/resolve/providers/piapi/lumalabs/types.ts +50 -0
  17. packages/app/src/app/api/resolve/providers/piapi/midjourney/createImage.ts +53 -0
  18. packages/app/src/app/api/resolve/providers/piapi/midjourney/createMidjourneyImage.ts +37 -0
  19. packages/app/src/app/api/resolve/providers/piapi/midjourney/fetchMidjourneyResult.ts +37 -0
  20. packages/app/src/app/api/resolve/providers/piapi/midjourney/types.ts +66 -0
  21. packages/app/src/app/api/resolve/route.ts +12 -3
  22. packages/app/src/components/core/providers/logos.ts +23 -13
  23. packages/app/src/components/forms/FormArea.tsx +89 -0
  24. packages/app/src/components/forms/index.ts +1 -0
  25. packages/app/src/components/settings/image.tsx +14 -3
  26. packages/app/src/components/settings/index.tsx +0 -2
  27. packages/app/src/components/settings/music.tsx +16 -6
  28. packages/app/src/components/settings/provider.tsx +33 -0
  29. packages/app/src/components/settings/sound.tsx +16 -5
  30. packages/app/src/components/settings/video.tsx +14 -2
  31. packages/app/src/components/settings/voice.tsx +16 -5
  32. packages/app/src/services/editors/workflow-editor/workflows/bigmodel/index.ts +26 -0
  33. packages/app/src/services/editors/workflow-editor/workflows/common/defaultValues.ts +27 -0
  34. packages/app/src/services/editors/workflow-editor/workflows/falai/defaultWorkflows.ts +9 -23
  35. packages/app/src/services/editors/workflow-editor/workflows/huggingface/index.ts +19 -0
  36. packages/app/src/services/editors/workflow-editor/workflows/letzai/index.ts +32 -0
  37. packages/app/src/services/editors/workflow-editor/workflows/piapi/index.ts +58 -0
  38. packages/app/src/services/settings/getDefaultSettingsState.ts +3 -0
  39. packages/app/src/services/settings/useSettings.ts +29 -1
  40. packages/clap/src/types.ts +3 -0
  41. packages/clapper-services/src/settings.ts +6 -0
packages/app/public/images/providers/bigmodel.jpeg ADDED

Git LFS Details

  • SHA256: 3c2ea43259fb8e8f9b6d736535cb6c7d95481e91bb4dfef1cff54287f74a3f81
  • Pointer size: 128 Bytes
  • Size of remote file: 721 Bytes
packages/app/public/images/providers/letzai.png ADDED

Git LFS Details

  • SHA256: b63b008d252d0b6c1b0cbd4928e049e69f1e1c420fd829d41433501f4ed59874
  • Pointer size: 128 Bytes
  • Size of remote file: 124 Bytes
packages/app/public/images/providers/piapi.jpg ADDED

Git LFS Details

  • SHA256: 69030e64f87e3f85504c3dc4b29687fe81ae368f3906714cc599bf84066bdf89
  • Pointer size: 129 Bytes
  • Size of remote file: 5.41 kB
packages/app/src/app/api/resolve/providers/bigmodel/callCogVideoX.ts ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import {
2
+ TaskResultResponse,
3
+ VideoGenerationParams,
4
+ VideoGenerationResponse,
5
+ } from './types'
6
+
7
+ /**
8
+ * Asynchronous function to generate a video using the CogVideoX API and retrieve the result.
9
+ * @param apiKey - The API key for authentication.
10
+ * @param params - The parameters for video generation.
11
+ * @returns A promise that resolves to the video generation result.
12
+ */
13
+ export async function callCogVideoX(
14
+ apiKey: string,
15
+ params: VideoGenerationParams
16
+ ): Promise<TaskResultResponse> {
17
+ const baseUrl = 'https://open.bigmodel.cn/api/paas/v4'
18
+ const headers = {
19
+ 'Content-Type': 'application/json',
20
+ Authorization: `Bearer ${apiKey}`,
21
+ }
22
+
23
+ try {
24
+ // Step 1: Initialize video generation
25
+ const generationResponse = await fetch(`${baseUrl}/videos/generations`, {
26
+ method: 'POST',
27
+ headers: headers,
28
+ body: JSON.stringify(params),
29
+ })
30
+
31
+ if (!generationResponse.ok) {
32
+ throw new Error(`HTTP error! status: ${generationResponse.status}`)
33
+ }
34
+
35
+ const generationData: VideoGenerationResponse =
36
+ await generationResponse.json()
37
+ const { id } = generationData
38
+
39
+ // Step 2: Poll for the task result
40
+ let taskResult: TaskResultResponse
41
+ do {
42
+ const resultResponse = await fetch(`${baseUrl}/async-result/${id}`, {
43
+ method: 'GET',
44
+ headers: headers,
45
+ })
46
+
47
+ if (!resultResponse.ok) {
48
+ throw new Error(`HTTP error! status: ${resultResponse.status}`)
49
+ }
50
+
51
+ taskResult = await resultResponse.json()
52
+
53
+ if (taskResult.task_status === 'PROCESSING') {
54
+ // Wait for 5 seconds before polling again
55
+ await new Promise((resolve) => setTimeout(resolve, 5000))
56
+ }
57
+ } while (taskResult.task_status === 'PROCESSING')
58
+
59
+ return taskResult
60
+ } catch (error) {
61
+ console.error('Error in video generation:', error)
62
+ throw error
63
+ }
64
+ }
65
+
66
+ // Example usage
67
+ // const apiKey = 'your-api-key-here';
68
+ // const params: VideoGenerationParams = {
69
+ // model: 'cogvideox',
70
+ // prompt: 'Peter Rabbit drives a small car, wandering on the road, with a face full of happiness and joy.'
71
+ // };
72
+ //
73
+ // generateVideo(apiKey, params)
74
+ // .then(result => console.log(result))
75
+ // .catch(error => console.error(error));
packages/app/src/app/api/resolve/providers/bigmodel/index.ts ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { TimelineSegment } from '@aitube/timeline'
2
+ import { ResolveRequest } from '@aitube/clapper-services'
3
+ import { ClapSegmentCategory } from '@aitube/clap'
4
+
5
+ import { callCogVideoX } from './callCogVideoX'
6
+
7
+ export async function resolveSegment(
8
+ request: ResolveRequest
9
+ ): Promise<TimelineSegment> {
10
+ if (!request.settings.bigModelApiKey) {
11
+ throw new Error(`Missing API key for "BigModel.cn"`)
12
+ }
13
+
14
+ const segment: TimelineSegment = request.segment
15
+
16
+ let model = request.settings.imageGenerationWorkflow.data || ''
17
+
18
+ if (request.segment.category === ClapSegmentCategory.VIDEO) {
19
+ model = request.settings.videoGenerationWorkflow.data || ''
20
+
21
+ /*
22
+ note
23
+ if (!request.prompts.image.positive) {
24
+ console.error(
25
+ `resolveSegment: cannot resolve a storyboard with an empty prompt`
26
+ )
27
+ return segment
28
+ }
29
+ */
30
+
31
+ if (!request.prompts.video.image) {
32
+ throw new Error(
33
+ `cannot generate a video without a storyboard (the concept of Clapper is to use storyboards)`
34
+ )
35
+ }
36
+
37
+ // https://bigmodel.cn/dev/api#cogvideox
38
+ const result = await callCogVideoX(request.settings.bigModelApiKey, {
39
+ model: request.settings.videoGenerationWorkflow,
40
+ image_url: request.prompts.video.image,
41
+ })
42
+
43
+ const video = result.video_result.at(0)
44
+ if (!video) {
45
+ throw new Error(`Failed to generate at least one video`)
46
+ }
47
+
48
+ segment.assetUrl = video.url
49
+ } else {
50
+ throw new Error(
51
+ `Clapper doesn't support ${request.segment.category} generation for provider "Fal.ai". Please open a pull request with (working code) to solve this!`
52
+ )
53
+ }
54
+
55
+ return segment
56
+ }
packages/app/src/app/api/resolve/providers/bigmodel/types.ts ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /**
2
+ * Represents the parameters for the video generation request.
3
+ */
4
+ export type VideoGenerationParams = {
5
+ model: string
6
+ request_id?: string
7
+ user_id?: string
8
+ } & (
9
+ | {
10
+ prompt?: string
11
+ }
12
+ | {
13
+ image_url?: string
14
+ }
15
+ )
16
+
17
+ /**
18
+ * Represents the response from the video generation request.
19
+ */
20
+ export type VideoGenerationResponse = {
21
+ request_id: string
22
+ id: string
23
+ model: string
24
+ task_status: 'PROCESSING' | 'SUCCESS' | 'FAIL'
25
+ }
26
+
27
+ /**
28
+ * Represents the video result in the task result query.
29
+ */
30
+ export type VideoResult = {
31
+ url: string
32
+ cover_image_url: string
33
+ }
34
+
35
+ /**
36
+ * Represents the response from the task result query.
37
+ */
38
+ export type TaskResultResponse = {
39
+ model: string
40
+ video_result: VideoResult[]
41
+ task_status: 'PROCESSING' | 'SUCCESS' | 'FAIL'
42
+ request_id: string
43
+ id: string
44
+ }
packages/app/src/app/api/resolve/providers/falai/index.ts CHANGED
@@ -136,10 +136,7 @@ export async function resolveSegment(
136
  )
137
  }
138
 
139
- const storyboard = request.segments.find(
140
- (s) => s.category === ClapSegmentCategory.STORYBOARD
141
- )
142
- if (!storyboard) {
143
  throw new Error(
144
  `cannot generate a video without a storyboard (the concept of Clapper is to use storyboards)`
145
  )
@@ -149,7 +146,7 @@ export async function resolveSegment(
149
  input: {
150
  ...getWorkflowInputValues(request.settings.videoGenerationWorkflow),
151
 
152
- image_url: storyboard.assetUrl,
153
 
154
  sync_mode: true,
155
  enable_safety_checker: request.settings.censorNotForAllAudiencesContent,
 
136
  )
137
  }
138
 
139
+ if (!request.prompts.video.image) {
 
 
 
140
  throw new Error(
141
  `cannot generate a video without a storyboard (the concept of Clapper is to use storyboards)`
142
  )
 
146
  input: {
147
  ...getWorkflowInputValues(request.settings.videoGenerationWorkflow),
148
 
149
+ image_url: request.prompts.video.image,
150
 
151
  sync_mode: true,
152
  enable_safety_checker: request.settings.censorNotForAllAudiencesContent,
packages/app/src/app/api/resolve/providers/index.ts CHANGED
@@ -1,10 +1,13 @@
1
- export { resolveSegment as resolveSegmentUsingHuggingFace } from './huggingface'
 
 
 
2
  export { resolveSegment as resolveSegmentUsingComfyReplicate } from './comfy-replicate'
3
- export { resolveSegment as resolveSegmentUsingReplicate } from './replicate'
4
  export { resolveSegment as resolveSegmentUsingComfyUI } from './comfyui'
5
- export { resolveSegment as resolveSegmentUsingComfyIcu } from './comfy-comfyicu'
6
- export { resolveSegment as resolveSegmentUsingComfyDeploy } from './comfy-comfydeploy'
7
  export { resolveSegment as resolveSegmentUsingFalAi } from './falai'
8
- export { resolveSegment as resolveSegmentUsingAiTube } from './aitube'
 
9
  export { resolveSegment as resolveSegmentUsingModelsLab } from './modelslab'
 
 
10
  export { resolveSegment as resolveSegmentUsingStabilityAi } from './stabilityai'
 
1
+ export { resolveSegment as resolveSegmentUsingAiTube } from './aitube'
2
+ export { resolveSegment as resolveSegmentUsingBigModel } from './stabilityai'
3
+ export { resolveSegment as resolveSegmentUsingComfyDeploy } from './comfy-comfydeploy'
4
+ export { resolveSegment as resolveSegmentUsingComfyIcu } from './comfy-comfyicu'
5
  export { resolveSegment as resolveSegmentUsingComfyReplicate } from './comfy-replicate'
 
6
  export { resolveSegment as resolveSegmentUsingComfyUI } from './comfyui'
 
 
7
  export { resolveSegment as resolveSegmentUsingFalAi } from './falai'
8
+ export { resolveSegment as resolveSegmentUsingHuggingFace } from './huggingface'
9
+ export { resolveSegment as resolveSegmentUsingLetzAi } from './letzai'
10
  export { resolveSegment as resolveSegmentUsingModelsLab } from './modelslab'
11
+ export { resolveSegment as resolveSegmentUsingPiApi } from './piapi'
12
+ export { resolveSegment as resolveSegmentUsingReplicate } from './replicate'
13
  export { resolveSegment as resolveSegmentUsingStabilityAi } from './stabilityai'
packages/app/src/app/api/resolve/providers/letzai/callCreateImage.ts ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { ImageCreationParams, ImageCreationResponse } from './types'
2
+
3
+ /**
4
+ * Asynchronous function to create an image using the LetzAI API.
5
+ * @param apiKey - The API key for authentication.
6
+ * @param params - The parameters for image creation.
7
+ * @returns A promise that resolves to the image creation result.
8
+ */
9
+ export async function callCreateImage(
10
+ apiKey: string,
11
+ params: ImageCreationParams
12
+ ): Promise<ImageCreationResponse> {
13
+ const baseUrl = 'https://api.letz.ai'
14
+ const headers = {
15
+ 'Content-Type': 'application/json',
16
+ Authorization: `Bearer ${apiKey}`,
17
+ }
18
+
19
+ try {
20
+ const response = await fetch(`${baseUrl}/images`, {
21
+ method: 'POST',
22
+ headers: headers,
23
+ body: JSON.stringify(params),
24
+ })
25
+
26
+ if (!response.ok) {
27
+ throw new Error(`HTTP error! status: ${response.status}`)
28
+ }
29
+
30
+ const data: ImageCreationResponse = await response.json()
31
+ return data
32
+ } catch (error) {
33
+ console.error('Error in image creation:', error)
34
+ throw error
35
+ }
36
+ }
37
+
38
+ // Example usage
39
+ // const apiKey = 'your-api-key-here';
40
+ // const params: ImageCreationParams = {
41
+ // prompt: "A beautiful sunset over a calm ocean",
42
+ // model: "sd_xl_base_1.0",
43
+ // width: 1024,
44
+ // height: 1024,
45
+ // steps: 30,
46
+ // guidance: 7.5,
47
+ // outputFormat: "png"
48
+ // };
49
+ //
50
+ // callCreateImage(apiKey, params)
51
+ // .then(result => console.log(result))
52
+ // .catch(error => console.error(error));
packages/app/src/app/api/resolve/providers/letzai/index.ts ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { TimelineSegment } from '@aitube/timeline'
2
+ import { ResolveRequest } from '@aitube/clapper-services'
3
+ import { ClapSegmentCategory, generateSeed } from '@aitube/clap'
4
+
5
+ import { getWorkflowInputValues } from '../getWorkflowInputValues'
6
+ import { callCreateImage } from './callCreateImage'
7
+
8
+ export async function resolveSegment(
9
+ request: ResolveRequest
10
+ ): Promise<TimelineSegment> {
11
+ if (!request.settings.letzAiApiKey) {
12
+ throw new Error(`Missing API key for "LetzAi"`)
13
+ }
14
+
15
+ const segment: TimelineSegment = request.segment
16
+
17
+ let model = request.settings.imageGenerationWorkflow.data || ''
18
+
19
+ if (request.segment.category === ClapSegmentCategory.STORYBOARD) {
20
+ model = request.settings.imageGenerationWorkflow.data || ''
21
+
22
+ if (!request.prompts.image.positive) {
23
+ console.error(
24
+ `resolveSegment: cannot resolve a storyboard with an empty prompt`
25
+ )
26
+ return segment
27
+ }
28
+
29
+ const { workflowDefaultValues, workflowValues } = getWorkflowInputValues(
30
+ request.settings.imageGenerationWorkflow
31
+ )
32
+
33
+ const result = await callCreateImage(request.settings.letzAiApiKey, {
34
+ prompt: request.prompts.image.positive,
35
+ negativePrompt: request.prompts.image.negative,
36
+ // model: string;
37
+ width:
38
+ request.meta.width ||
39
+ workflowValues.width ||
40
+ workflowDefaultValues.width,
41
+ height:
42
+ request.meta.width ||
43
+ workflowValues.height ||
44
+ workflowDefaultValues.height,
45
+ // steps: number;
46
+ // guidance: number;
47
+ seed: generateSeed(),
48
+ // scheduler: string;
49
+ // outputFormat: string;
50
+ })
51
+
52
+ if (request.settings.censorNotForAllAudiencesContent) {
53
+ if (result.nsfw) {
54
+ throw new Error(
55
+ `The generated content has been filtered according to your safety settings`
56
+ )
57
+ }
58
+ }
59
+
60
+ if (!result.output) {
61
+ throw new Error(`Failed to generate at least one video`)
62
+ }
63
+
64
+ segment.assetUrl = `${result.output || ''}`
65
+ } else {
66
+ throw new Error(
67
+ `Clapper doesn't support ${request.segment.category} generation for provider "Fal.ai". Please open a pull request with (working code) to solve this!`
68
+ )
69
+ }
70
+
71
+ return segment
72
+ }
packages/app/src/app/api/resolve/providers/letzai/types.ts ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /**
2
+ * Represents the parameters for the image creation request.
3
+ */
4
+ export type ImageCreationParams = {
5
+ prompt: string
6
+ negativePrompt?: string
7
+ model?: string
8
+ width?: number
9
+ height?: number
10
+ steps?: number
11
+ guidance?: number
12
+ seed?: number
13
+ scheduler?: string
14
+ outputFormat?: string
15
+ }
16
+
17
+ /**
18
+ * Represents the response from the image creation request.
19
+ */
20
+ export type ImageCreationResponse = {
21
+ id: string
22
+ createdAt: string
23
+ updatedAt: string
24
+ prompt: string
25
+ negativePrompt: string | null
26
+ model: string
27
+ width: number
28
+ height: number
29
+ steps: number
30
+ guidance: number
31
+ seed: number
32
+ scheduler: string
33
+ status: string
34
+ error: string | null
35
+ progress: number
36
+ outputFormat: string
37
+ output: string | null
38
+ nsfw: boolean
39
+ user: {
40
+ id: string
41
+ username: string
42
+ }
43
+ }
packages/app/src/app/api/resolve/providers/piapi/index.ts ADDED
@@ -0,0 +1,100 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { TimelineSegment } from '@aitube/timeline'
2
+ import { ResolveRequest } from '@aitube/clapper-services'
3
+ import { ClapSegmentCategory } from '@aitube/clap'
4
+
5
+ import { getWorkflowInputValues } from '../getWorkflowInputValues'
6
+ import { createImage } from './midjourney/createImage'
7
+ import { createAndFetchDreamMachineVideo } from './lumalabs/createAndFetchDreamMachineVideo'
8
+
9
+ export async function resolveSegment(
10
+ request: ResolveRequest
11
+ ): Promise<TimelineSegment> {
12
+ if (!request.settings.piApiApiKey) {
13
+ throw new Error(`Missing API key for "PiApi"`)
14
+ }
15
+
16
+ const segment: TimelineSegment = request.segment
17
+
18
+ let model = request.settings.imageGenerationWorkflow.data || ''
19
+
20
+ if (request.segment.category === ClapSegmentCategory.STORYBOARD) {
21
+ model = request.settings.imageGenerationWorkflow.data || ''
22
+
23
+ if (!request.prompts.image.positive) {
24
+ console.error(
25
+ `resolveSegment: cannot resolve a storyboard with an empty prompt`
26
+ )
27
+ return segment
28
+ }
29
+
30
+ const { workflowDefaultValues, workflowValues } = getWorkflowInputValues(
31
+ request.settings.imageGenerationWorkflow
32
+ )
33
+
34
+ const width =
35
+ request.meta.width || workflowValues.width || workflowDefaultValues.width
36
+
37
+ const height =
38
+ request.meta.width ||
39
+ workflowValues.height ||
40
+ workflowDefaultValues.height
41
+
42
+ const aspectRatio =
43
+ width > height ? '16:9' : height > width ? '9:16' : '1:1'
44
+
45
+ const result = await createImage(request.settings.piApiApiKey, {
46
+ prompt: request.prompts.image.positive,
47
+ aspect_ratio: aspectRatio,
48
+ // skip_prompt_check?: boolean;
49
+ process_mode: 'relax', // | 'fast' | 'turbo';
50
+ // webhook_endpoint?: string;
51
+ // webhook_secret?: string;
52
+ // bot_id?: number;
53
+ })
54
+
55
+ if (!result.task_result.image_url) {
56
+ throw new Error(`Failed to generate at least one image`)
57
+ }
58
+
59
+ segment.assetUrl = `${result.task_result.image_url || ''}`
60
+ } else if (request.segment.category === ClapSegmentCategory.VIDEO) {
61
+ model = request.settings.videoGenerationWorkflow.data || ''
62
+
63
+ if (!request.prompts.video.image) {
64
+ console.error(
65
+ `resolveSegment: cannot generate video without a storyboard`
66
+ )
67
+ return segment
68
+ }
69
+
70
+ const { workflowDefaultValues, workflowValues } = getWorkflowInputValues(
71
+ request.settings.videoGenerationWorkflow
72
+ )
73
+
74
+ const result = await createAndFetchDreamMachineVideo(
75
+ request.settings.piApiApiKey,
76
+ {
77
+ prompt: request.prompts.image.positive,
78
+ expand_prompt: false,
79
+ image_url: request.prompts.video.image,
80
+
81
+ // nice feature! we should use it :)
82
+ // image_end_url?: string;
83
+ }
84
+ )
85
+
86
+ result.data.generation.video
87
+
88
+ if (!result.data.generation.video) {
89
+ throw new Error(`Failed to generate at least one video`)
90
+ }
91
+
92
+ segment.assetUrl = `${result.data.generation.video || ''}`
93
+ } else {
94
+ throw new Error(
95
+ `Clapper doesn't support ${request.segment.category} generation for provider "Fal.ai". Please open a pull request with (working code) to solve this!`
96
+ )
97
+ }
98
+
99
+ return segment
100
+ }
packages/app/src/app/api/resolve/providers/piapi/lumalabs/createAndFetchDreamMachineVideo.ts ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { createDreamMachineVideo } from './createDreamMachineVideo'
2
+ import { fetchDreamMachineVideoResult } from './fetchDreamMachineVideoResult'
3
+ import { VideoFetchResponse, VideoGenerationParams } from './types'
4
+
5
+ /**
6
+ * Asynchronous function to create a video and wait for the result.
7
+ * @param apiKey - The API key for authentication.
8
+ * @param params - The parameters for video generation.
9
+ * @param maxAttempts - Maximum number of fetch attempts (default: 60).
10
+ * @param delayMs - Delay between fetch attempts in milliseconds (default: 5000).
11
+ * @returns A promise that resolves to the final fetch response.
12
+ */
13
+ export async function createAndFetchDreamMachineVideo(
14
+ apiKey: string,
15
+ params: VideoGenerationParams,
16
+ maxAttempts: number = 60,
17
+ delayMs: number = 5000
18
+ ): Promise<VideoFetchResponse> {
19
+ const creationResponse = await createDreamMachineVideo(apiKey, params)
20
+
21
+ let attempts = 0
22
+ while (attempts < maxAttempts) {
23
+ const fetchResponse = await fetchDreamMachineVideoResult(
24
+ apiKey,
25
+ creationResponse.data.task_id
26
+ )
27
+
28
+ if (
29
+ fetchResponse.data.status === 'finished' ||
30
+ fetchResponse.data.generation.state === 'completed'
31
+ ) {
32
+ return fetchResponse
33
+ }
34
+
35
+ if (
36
+ fetchResponse.data.status === 'failed' ||
37
+ fetchResponse.data.generation.state === 'failed'
38
+ ) {
39
+ throw new Error('Video generation failed')
40
+ }
41
+
42
+ attempts++
43
+ await new Promise((resolve) => setTimeout(resolve, delayMs))
44
+ }
45
+
46
+ throw new Error('Max attempts reached, video generation timed out')
47
+ }
48
+
49
+ // Example usage
50
+ // const apiKey = 'your-api-key-here';
51
+ // const params: VideoGenerationParams = {
52
+ // prompt: "a cute puppy",
53
+ // expand_prompt: false,
54
+ // image_url: "https://example.com/image.jpg"
55
+ // };
56
+ //
57
+ // createAndFetchDreamMachineVideo(apiKey, params)
58
+ // .then(result => console.log(result))
59
+ // .catch(error => console.error(error));
packages/app/src/app/api/resolve/providers/piapi/lumalabs/createDreamMachineVideo.ts ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { VideoGenerationParams, VideoGenerationResponse } from './types'
2
+
3
+ /**
4
+ * Asynchronous function to initiate video generation using the PiAPI Dream Machine API.
5
+ * @param apiKey - The API key for authentication.
6
+ * @param params - The parameters for video generation.
7
+ * @returns A promise that resolves to the video generation response.
8
+ */
9
+ export async function createDreamMachineVideo(
10
+ apiKey: string,
11
+ params: VideoGenerationParams
12
+ ): Promise<VideoGenerationResponse> {
13
+ const baseUrl = 'https://api.piapi.ai'
14
+ const headers = {
15
+ Accept: 'application/json',
16
+ 'Content-Type': 'application/json',
17
+ 'X-API-Key': apiKey,
18
+ }
19
+
20
+ try {
21
+ const response = await fetch(`${baseUrl}/api/luma/v1/video`, {
22
+ method: 'POST',
23
+ headers: headers,
24
+ body: JSON.stringify(params),
25
+ })
26
+
27
+ if (!response.ok) {
28
+ throw new Error(`HTTP error! status: ${response.status}`)
29
+ }
30
+
31
+ const data: VideoGenerationResponse = await response.json()
32
+ return data
33
+ } catch (error) {
34
+ console.error('Error in video generation:', error)
35
+ throw error
36
+ }
37
+ }
packages/app/src/app/api/resolve/providers/piapi/lumalabs/fetchDreamMachineVideoResult.ts ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { VideoFetchResponse } from './types'
2
+
3
+ /**
4
+ * Asynchronous function to fetch the result of a Dream Machine video generation task.
5
+ * @param apiKey - The API key for authentication.
6
+ * @param taskId - The ID of the task to fetch.
7
+ * @returns A promise that resolves to the fetch response.
8
+ */
9
+ export async function fetchDreamMachineVideoResult(
10
+ apiKey: string,
11
+ taskId: string
12
+ ): Promise<VideoFetchResponse> {
13
+ const baseUrl = 'https://api.piapi.ai'
14
+ const headers = {
15
+ Accept: 'application/json',
16
+ 'X-API-Key': apiKey,
17
+ }
18
+
19
+ try {
20
+ const response = await fetch(`${baseUrl}/api/luma/v1/video/${taskId}`, {
21
+ method: 'GET',
22
+ headers: headers,
23
+ })
24
+
25
+ if (!response.ok) {
26
+ throw new Error(`HTTP error! status: ${response.status}`)
27
+ }
28
+
29
+ const data: VideoFetchResponse = await response.json()
30
+ return data
31
+ } catch (error) {
32
+ console.error('Error in fetching video result:', error)
33
+ throw error
34
+ }
35
+ }
packages/app/src/app/api/resolve/providers/piapi/lumalabs/types.ts ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /**
2
+ * Represents the parameters for the video generation request.
3
+ */
4
+ export type VideoGenerationParams = {
5
+ prompt: string
6
+ expand_prompt: boolean
7
+ image_url?: string
8
+ image_end_url?: string
9
+ loop?: boolean
10
+ }
11
+
12
+ /**
13
+ * Represents the response from the video generation request.
14
+ */
15
+ export type VideoGenerationResponse = {
16
+ code: number
17
+ data: {
18
+ task_id: string
19
+ }
20
+ message: string
21
+ }
22
+
23
+ /**
24
+ * Represents the response from the fetch video result request.
25
+ */
26
+ export type VideoFetchResponse = {
27
+ code: number
28
+ data: {
29
+ task_id: string
30
+ input: string
31
+ status: string
32
+ metadata: {
33
+ created_at: string
34
+ started_at: string
35
+ ended_at: string
36
+ quota_frozen: number
37
+ quota_usage: number
38
+ }
39
+ generation: {
40
+ id: string
41
+ prompt: string
42
+ state: string
43
+ created_at: string
44
+ video: string | null
45
+ like: string | null
46
+ estimate_wait_seconds: number | null
47
+ }
48
+ }
49
+ message: string
50
+ }
packages/app/src/app/api/resolve/providers/piapi/midjourney/createImage.ts ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { createMidjourneyImage } from './createMidjourneyImage'
2
+ import { fetchMidjourneyResult } from './fetchMidjourneyResult'
3
+ import { FetchResponse, ImageCreationParams } from './types'
4
+
5
+ /**
6
+ * Asynchronous function to create an image and wait for the result.
7
+ * @param apiKey - The API key for authentication.
8
+ * @param params - The parameters for image creation.
9
+ * @param maxAttempts - Maximum number of fetch attempts (default: 10).
10
+ * @param delayMs - Delay between fetch attempts in milliseconds (default: 5000).
11
+ * @returns A promise that resolves to the final fetch response.
12
+ */
13
+ export async function createImage(
14
+ apiKey: string,
15
+ params: ImageCreationParams,
16
+ maxAttempts: number = 10,
17
+ delayMs: number = 5000
18
+ ): Promise<FetchResponse> {
19
+ const creationResponse = await createMidjourneyImage(apiKey, params)
20
+
21
+ let attempts = 0
22
+ while (attempts < maxAttempts) {
23
+ const fetchResponse = await fetchMidjourneyResult(
24
+ apiKey,
25
+ creationResponse.task_id
26
+ )
27
+
28
+ if (fetchResponse.status === 'finished') {
29
+ return fetchResponse
30
+ }
31
+
32
+ if (fetchResponse.status === 'failed') {
33
+ throw new Error('Image creation failed')
34
+ }
35
+
36
+ attempts++
37
+ await new Promise((resolve) => setTimeout(resolve, delayMs))
38
+ }
39
+
40
+ throw new Error('Max attempts reached, image creation timed out')
41
+ }
42
+
43
+ // Example usage
44
+ // const apiKey = 'your-api-key-here';
45
+ // const params: ImageCreationParams = {
46
+ // prompt: "a mountain",
47
+ // process_mode: "fast",
48
+ // aspect_ratio: "1:1"
49
+ // };
50
+ //
51
+ // createAndFetchMidjourneyImage(apiKey, params)
52
+ // .then(result => console.log(result))
53
+ // .catch(error => console.error(error));
packages/app/src/app/api/resolve/providers/piapi/midjourney/createMidjourneyImage.ts ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { ImageCreationParams, ImageCreationResponse } from './types'
2
+
3
+ /**
4
+ * Asynchronous function to create an image using the PiAPI Midjourney API.
5
+ * @param apiKey - The API key for authentication.
6
+ * @param params - The parameters for image creation.
7
+ * @returns A promise that resolves to the image creation response.
8
+ */
9
+ export async function createMidjourneyImage(
10
+ apiKey: string,
11
+ params: ImageCreationParams
12
+ ): Promise<ImageCreationResponse> {
13
+ const baseUrl = 'https://api.piapi.ai'
14
+ const headers = {
15
+ Accept: 'application/json',
16
+ 'Content-Type': 'application/json',
17
+ 'X-API-KEY': apiKey,
18
+ }
19
+
20
+ try {
21
+ const response = await fetch(`${baseUrl}/mj/v2/imagine`, {
22
+ method: 'POST',
23
+ headers: headers,
24
+ body: JSON.stringify(params),
25
+ })
26
+
27
+ if (!response.ok) {
28
+ throw new Error(`HTTP error! status: ${response.status}`)
29
+ }
30
+
31
+ const data: ImageCreationResponse = await response.json()
32
+ return data
33
+ } catch (error) {
34
+ console.error('Error in image creation:', error)
35
+ throw error
36
+ }
37
+ }
packages/app/src/app/api/resolve/providers/piapi/midjourney/fetchMidjourneyResult.ts ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { FetchResponse } from './types'
2
+
3
+ /**
4
+ * Asynchronous function to fetch the result of a Midjourney task.
5
+ * @param apiKey - The API key for authentication.
6
+ * @param taskId - The ID of the task to fetch.
7
+ * @returns A promise that resolves to the fetch response.
8
+ */
9
+ export async function fetchMidjourneyResult(
10
+ apiKey: string,
11
+ taskId: string
12
+ ): Promise<FetchResponse> {
13
+ const baseUrl = 'https://api.piapi.ai'
14
+ const headers = {
15
+ Accept: 'application/json',
16
+ 'Content-Type': 'application/json',
17
+ 'X-API-KEY': apiKey,
18
+ }
19
+
20
+ try {
21
+ const response = await fetch(`${baseUrl}/mj/v2/fetch`, {
22
+ method: 'POST',
23
+ headers: headers,
24
+ body: JSON.stringify({ task_id: taskId }),
25
+ })
26
+
27
+ if (!response.ok) {
28
+ throw new Error(`HTTP error! status: ${response.status}`)
29
+ }
30
+
31
+ const data: FetchResponse = await response.json()
32
+ return data
33
+ } catch (error) {
34
+ console.error('Error in fetching result:', error)
35
+ throw error
36
+ }
37
+ }
packages/app/src/app/api/resolve/providers/piapi/midjourney/types.ts ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /**
2
+ * Represents the parameters for the image creation request.
3
+ */
4
+ export type ImageCreationParams = {
5
+ prompt: string
6
+ skip_prompt_check?: boolean
7
+ process_mode?: 'relax' | 'fast' | 'turbo'
8
+ aspect_ratio?: string
9
+ webhook_endpoint?: string
10
+ webhook_secret?: string
11
+ bot_id?: number
12
+ }
13
+
14
+ /**
15
+ * Represents the response from the image creation request.
16
+ */
17
+ export type ImageCreationResponse = {
18
+ task_id: string
19
+ status: string
20
+ message: string
21
+ }
22
+
23
+ /**
24
+ * Represents the task result in the fetch response.
25
+ */
26
+ export type TaskResult = {
27
+ discord_image_url?: string
28
+ image_url?: string
29
+ image_urls?: string[]
30
+ permanent_url?: string
31
+ task_progress?: number
32
+ intermediate_image_urls?: string[] | null
33
+ image_id?: string
34
+ seed?: string
35
+ result_message_id?: string
36
+ quota_used?: number
37
+ credit?: number
38
+ message?: string
39
+ warning?: string
40
+ error_messages?: string[]
41
+ need_retry?: boolean
42
+ actions?: string[]
43
+ }
44
+
45
+ /**
46
+ * Represents the response from the fetch request.
47
+ */
48
+ export type FetchResponse = {
49
+ task_id: string
50
+ status: string
51
+ process_time: number
52
+ retry_count: number
53
+ meta: {
54
+ account_id: string
55
+ task_type: string
56
+ process_mode: string
57
+ created_at: number
58
+ created_at_utc: string
59
+ started_at: number
60
+ started_at_utc: string
61
+ ended_at: number
62
+ ended_at_utc: string
63
+ [key: string]: any // For other possible meta fields
64
+ }
65
+ task_result: TaskResult
66
+ }
packages/app/src/app/api/resolve/route.ts CHANGED
@@ -21,6 +21,9 @@ import {
21
  resolveSegmentUsingModelsLab,
22
  resolveSegmentUsingStabilityAi,
23
  resolveSegmentUsingComfyUI,
 
 
 
24
  } from './providers'
25
 
26
  import { ResolveRequest } from '@aitube/clapper-services'
@@ -88,9 +91,15 @@ export async function POST(req: NextRequest) {
88
  ? resolveSegmentUsingFalAi
89
  : provider === ClapWorkflowProvider.MODELSLAB
90
  ? resolveSegmentUsingModelsLab
91
- : provider === ClapWorkflowProvider.AITUBE
92
- ? resolveSegmentUsingAiTube
93
- : null
 
 
 
 
 
 
94
 
95
  if (!resolveSegment) {
96
  throw new Error(
 
21
  resolveSegmentUsingModelsLab,
22
  resolveSegmentUsingStabilityAi,
23
  resolveSegmentUsingComfyUI,
24
+ resolveSegmentUsingLetzAi,
25
+ resolveSegmentUsingBigModel,
26
+ resolveSegmentUsingPiApi,
27
  } from './providers'
28
 
29
  import { ResolveRequest } from '@aitube/clapper-services'
 
91
  ? resolveSegmentUsingFalAi
92
  : provider === ClapWorkflowProvider.MODELSLAB
93
  ? resolveSegmentUsingModelsLab
94
+ : provider === ClapWorkflowProvider.LETZAI
95
+ ? resolveSegmentUsingLetzAi
96
+ : provider === ClapWorkflowProvider.BIGMODEL
97
+ ? resolveSegmentUsingBigModel
98
+ : provider === ClapWorkflowProvider.PIAPI
99
+ ? resolveSegmentUsingPiApi
100
+ : provider === ClapWorkflowProvider.AITUBE
101
+ ? resolveSegmentUsingAiTube
102
+ : null
103
 
104
  if (!resolveSegment) {
105
  throw new Error(
packages/app/src/components/core/providers/logos.ts CHANGED
@@ -1,16 +1,20 @@
1
  import { ClapWorkflowProvider } from '@aitube/clap'
2
 
3
  const none = '/images/providers/none.png'
4
- const builtin = '/images/providers/none.png' // <-- TODO put Clapper logo here
5
- const comfyui = '/images/providers/comfyui.png'
 
 
6
  const anthropic = '/images/providers/anthropic.png'
 
 
7
  const cohere = '/images/providers/cohere.png'
8
- const comfyicu = '/images/providers/comfyicu.png'
9
  const comfydeploy = '/images/providers/none.png' // <- feel free to open a PR
 
 
10
  const elevenlabs = '/images/providers/elevenlabs.png'
11
  const everartai = '/images/providers/everartai.png'
12
  const falai = '/images/providers/falai.png'
13
- const aitube = '/images/providers/none.png' // <- TODO
14
  const fireworks = '/images/providers/fireworks.png'
15
  const google = '/images/providers/google.png'
16
  const groq = '/images/providers/groq.png'
@@ -19,11 +23,13 @@ const huggingface = '/images/providers/huggingface.png'
19
  const kitsai = '/images/providers/kitsai.png'
20
  const kuaishou = '/images/providers/kuaishou.png'
21
  const leonardoai = '/images/providers/leonardoai.png'
 
22
  const lumalabs = '/images/providers/lumalabs.png'
23
  const midjourney = '/images/providers/midjourney.png'
24
  const mistralai = '/images/providers/mistralai.png'
25
  const modelslab = '/images/providers/modelslab.jpeg'
26
  const openai = '/images/providers/openai.png'
 
27
  const replicate = '/images/providers/replicate.jpeg'
28
  const runwayml = '/images/providers/runwayml.png'
29
  const stabilityai = '/images/providers/stabilityai.png'
@@ -33,33 +39,37 @@ const udio = '/images/providers/udio.png'
33
  export const ClapWorkflowProvidersLogos: Record<ClapWorkflowProvider, string> =
34
  {
35
  [ClapWorkflowProvider.NONE]: none,
36
- [ClapWorkflowProvider.BUILTIN]: builtin,
37
- [ClapWorkflowProvider.COMFYUI]: comfyui,
38
  [ClapWorkflowProvider.ANTHROPIC]: anthropic,
 
 
39
  [ClapWorkflowProvider.COHERE]: cohere,
 
 
 
 
40
  [ClapWorkflowProvider.ELEVENLABS]: elevenlabs,
 
41
  [ClapWorkflowProvider.FALAI]: falai,
42
- [ClapWorkflowProvider.AITUBE]: aitube,
43
  [ClapWorkflowProvider.FIREWORKSAI]: fireworks,
44
  [ClapWorkflowProvider.GOOGLE]: google,
45
  [ClapWorkflowProvider.GROQ]: groq,
46
  [ClapWorkflowProvider.HEDRA]: hedra,
47
  [ClapWorkflowProvider.HUGGINGFACE]: huggingface,
48
  [ClapWorkflowProvider.KITSAI]: kitsai,
 
 
 
49
  [ClapWorkflowProvider.LUMALABS]: lumalabs,
50
  [ClapWorkflowProvider.MIDJOURNEY]: midjourney,
51
  [ClapWorkflowProvider.MISTRALAI]: mistralai,
52
  [ClapWorkflowProvider.MODELSLAB]: modelslab,
53
  [ClapWorkflowProvider.OPENAI]: openai,
 
54
  [ClapWorkflowProvider.REPLICATE]: replicate,
55
  [ClapWorkflowProvider.RUNWAYML]: runwayml,
56
  [ClapWorkflowProvider.STABILITYAI]: stabilityai,
57
  [ClapWorkflowProvider.SUNO]: suno,
58
  [ClapWorkflowProvider.UDIO]: udio,
59
- [ClapWorkflowProvider.CUSTOM]: none,
60
- [ClapWorkflowProvider.COMFYDEPLOY]: comfydeploy,
61
- [ClapWorkflowProvider.COMFYICU]: comfyicu,
62
- [ClapWorkflowProvider.KUAISHOU]: kuaishou,
63
- [ClapWorkflowProvider.LEONARDOAI]: leonardoai,
64
- [ClapWorkflowProvider.EVERARTAI]: everartai,
65
  }
 
1
  import { ClapWorkflowProvider } from '@aitube/clap'
2
 
3
  const none = '/images/providers/none.png'
4
+
5
+ // ------------
6
+
7
+ const aitube = '/images/providers/none.png' // <- TODO
8
  const anthropic = '/images/providers/anthropic.png'
9
+ const bigmodel = '/images/providers/bigmodel.jpeg'
10
+ const builtin = '/images/providers/none.png' // <-- TODO put Clapper logo here
11
  const cohere = '/images/providers/cohere.png'
 
12
  const comfydeploy = '/images/providers/none.png' // <- feel free to open a PR
13
+ const comfyicu = '/images/providers/comfyicu.png'
14
+ const comfyui = '/images/providers/comfyui.png'
15
  const elevenlabs = '/images/providers/elevenlabs.png'
16
  const everartai = '/images/providers/everartai.png'
17
  const falai = '/images/providers/falai.png'
 
18
  const fireworks = '/images/providers/fireworks.png'
19
  const google = '/images/providers/google.png'
20
  const groq = '/images/providers/groq.png'
 
23
  const kitsai = '/images/providers/kitsai.png'
24
  const kuaishou = '/images/providers/kuaishou.png'
25
  const leonardoai = '/images/providers/leonardoai.png'
26
+ const letzai = '/images/providers/letzai.png'
27
  const lumalabs = '/images/providers/lumalabs.png'
28
  const midjourney = '/images/providers/midjourney.png'
29
  const mistralai = '/images/providers/mistralai.png'
30
  const modelslab = '/images/providers/modelslab.jpeg'
31
  const openai = '/images/providers/openai.png'
32
+ const piapi = '/images/providers/piapi.jpg'
33
  const replicate = '/images/providers/replicate.jpeg'
34
  const runwayml = '/images/providers/runwayml.png'
35
  const stabilityai = '/images/providers/stabilityai.png'
 
39
  export const ClapWorkflowProvidersLogos: Record<ClapWorkflowProvider, string> =
40
  {
41
  [ClapWorkflowProvider.NONE]: none,
42
+ // ----
43
+ [ClapWorkflowProvider.AITUBE]: aitube,
44
  [ClapWorkflowProvider.ANTHROPIC]: anthropic,
45
+ [ClapWorkflowProvider.BIGMODEL]: bigmodel,
46
+ [ClapWorkflowProvider.BUILTIN]: builtin,
47
  [ClapWorkflowProvider.COHERE]: cohere,
48
+ [ClapWorkflowProvider.COMFYDEPLOY]: comfydeploy,
49
+ [ClapWorkflowProvider.COMFYICU]: comfyicu,
50
+ [ClapWorkflowProvider.COMFYUI]: comfyui,
51
+ [ClapWorkflowProvider.CUSTOM]: none,
52
  [ClapWorkflowProvider.ELEVENLABS]: elevenlabs,
53
+ [ClapWorkflowProvider.EVERARTAI]: everartai,
54
  [ClapWorkflowProvider.FALAI]: falai,
 
55
  [ClapWorkflowProvider.FIREWORKSAI]: fireworks,
56
  [ClapWorkflowProvider.GOOGLE]: google,
57
  [ClapWorkflowProvider.GROQ]: groq,
58
  [ClapWorkflowProvider.HEDRA]: hedra,
59
  [ClapWorkflowProvider.HUGGINGFACE]: huggingface,
60
  [ClapWorkflowProvider.KITSAI]: kitsai,
61
+ [ClapWorkflowProvider.KUAISHOU]: kuaishou,
62
+ [ClapWorkflowProvider.LEONARDOAI]: leonardoai,
63
+ [ClapWorkflowProvider.LETZAI]: letzai,
64
  [ClapWorkflowProvider.LUMALABS]: lumalabs,
65
  [ClapWorkflowProvider.MIDJOURNEY]: midjourney,
66
  [ClapWorkflowProvider.MISTRALAI]: mistralai,
67
  [ClapWorkflowProvider.MODELSLAB]: modelslab,
68
  [ClapWorkflowProvider.OPENAI]: openai,
69
+ [ClapWorkflowProvider.PIAPI]: piapi,
70
  [ClapWorkflowProvider.REPLICATE]: replicate,
71
  [ClapWorkflowProvider.RUNWAYML]: runwayml,
72
  [ClapWorkflowProvider.STABILITYAI]: stabilityai,
73
  [ClapWorkflowProvider.SUNO]: suno,
74
  [ClapWorkflowProvider.UDIO]: udio,
 
 
 
 
 
 
75
  }
packages/app/src/components/forms/FormArea.tsx ADDED
@@ -0,0 +1,89 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import {
2
+ ChangeEvent,
3
+ HTMLInputTypeAttribute,
4
+ ReactNode,
5
+ useMemo,
6
+ useRef,
7
+ } from 'react'
8
+
9
+ import { cn, getValidNumber, isValidNumber } from '@/lib/utils'
10
+
11
+ import { Input } from '../ui/input'
12
+
13
+ import { FormField } from './FormField'
14
+ import { useTheme } from '@/services'
15
+ import { Textarea } from '../ui/textarea'
16
+
17
+ export function FormArea<T>(
18
+ {
19
+ label,
20
+ className,
21
+ placeholder,
22
+ value,
23
+ minValue,
24
+ maxValue,
25
+ defaultValue,
26
+ disabled,
27
+ onChange,
28
+ type,
29
+ rows = 4,
30
+ ...props
31
+ }: {
32
+ label?: ReactNode
33
+ className?: string
34
+ placeholder?: ReactNode
35
+ value?: T
36
+ minValue?: T
37
+ maxValue?: T
38
+ defaultValue?: T
39
+ disabled?: boolean
40
+ onChange?: (newValue: T) => void
41
+ type?: HTMLInputTypeAttribute
42
+ rows?: number
43
+ props?: any
44
+ }
45
+ // & Omit<ComponentProps<typeof Input>, "value" | "defaultValue" | "placeholder" | "type" | "className" | "disabled" | "onChange">
46
+ // & ComponentProps<typeof Input>
47
+ ) {
48
+ const theme = useTheme()
49
+
50
+ const ref = useRef<HTMLTextAreaElement>(null)
51
+
52
+ const handleChange = useMemo(
53
+ () => (event: ChangeEvent<HTMLTextAreaElement>) => {
54
+ if (disabled) {
55
+ return
56
+ }
57
+ if (!onChange) {
58
+ return
59
+ }
60
+
61
+ const rawStringValue = `${event.currentTarget.value || ''}`
62
+
63
+ onChange(rawStringValue as T)
64
+ },
65
+ [defaultValue, disabled, onChange]
66
+ )
67
+
68
+ return (
69
+ <FormField label={label}>
70
+ <Textarea
71
+ ref={ref}
72
+ placeholder={`${placeholder || defaultValue || ''}`}
73
+ className={cn(`w-full`, `font-mono text-xs font-light`, className)}
74
+ disabled={disabled}
75
+ onChange={handleChange}
76
+ rows={rows}
77
+ // {...props}
78
+ value={`${value || defaultValue}`}
79
+ // since we are controlling the element with value=*, we should not use defaultValue=*
80
+ // defaultValue={`${defaultValue || ""}`}
81
+
82
+ style={{
83
+ borderRadius: theme.formInputRadius || '8px',
84
+ }}
85
+ {...props}
86
+ />
87
+ </FormField>
88
+ )
89
+ }
packages/app/src/components/forms/index.ts CHANGED
@@ -1,3 +1,4 @@
 
1
  export { FormDir } from './FormDir'
2
  export { FormField } from './FormField'
3
  export { FormFile } from './FormFile'
 
1
+ export { FormArea } from './FormArea'
2
  export { FormDir } from './FormDir'
3
  export { FormField } from './FormField'
4
  export { FormFile } from './FormFile'
packages/app/src/components/settings/image.tsx CHANGED
@@ -1,11 +1,14 @@
1
- import { FormSection } from '@/components/forms/FormSection'
2
  import { getDefaultSettingsState, useSettings } from '@/services/settings'
3
 
4
- import { FormInput } from '../forms/FormInput'
5
-
6
  export function SettingsSectionImage() {
7
  const defaultSettings = getDefaultSettingsState()
8
 
 
 
 
 
 
9
  const imagePromptPrefix = useSettings((s) => s.imagePromptPrefix)
10
  const setImagePromptPrefix = useSettings((s) => s.setImagePromptPrefix)
11
 
@@ -52,6 +55,14 @@ export function SettingsSectionImage() {
52
  defaultValue={defaultSettings.imageNegativePrompt}
53
  onChange={setImageNegativePrompt}
54
  />
 
 
 
 
 
 
 
 
55
  </FormSection>
56
  </div>
57
  )
 
1
+ import { FormArea, FormInput, FormSection } from '@/components/forms'
2
  import { getDefaultSettingsState, useSettings } from '@/services/settings'
3
 
 
 
4
  export function SettingsSectionImage() {
5
  const defaultSettings = getDefaultSettingsState()
6
 
7
+ const comfyWorkflowForImage = useSettings((s) => s.comfyWorkflowForImage)
8
+ const setComfyWorkflowForImage = useSettings(
9
+ (s) => s.setComfyWorkflowForImage
10
+ )
11
+
12
  const imagePromptPrefix = useSettings((s) => s.imagePromptPrefix)
13
  const setImagePromptPrefix = useSettings((s) => s.setImagePromptPrefix)
14
 
 
55
  defaultValue={defaultSettings.imageNegativePrompt}
56
  onChange={setImageNegativePrompt}
57
  />
58
+
59
+ <FormArea
60
+ label="Custom ComfyUI workflows for images"
61
+ value={comfyWorkflowForImage}
62
+ defaultValue={defaultSettings.comfyWorkflowForImage}
63
+ onChange={setComfyWorkflowForImage}
64
+ rows={8}
65
+ />
66
  </FormSection>
67
  </div>
68
  )
packages/app/src/components/settings/index.tsx CHANGED
@@ -1,5 +1,3 @@
1
- import { useState, useTransition } from 'react'
2
-
3
  import { Button } from '@/components/ui/button'
4
  import { Dialog, DialogContent, DialogFooter } from '@/components/ui/dialog'
5
  import { ScrollArea } from '@/components/ui/scroll-area'
 
 
 
1
  import { Button } from '@/components/ui/button'
2
  import { Dialog, DialogContent, DialogFooter } from '@/components/ui/dialog'
3
  import { ScrollArea } from '@/components/ui/scroll-area'
packages/app/src/components/settings/music.tsx CHANGED
@@ -1,14 +1,24 @@
1
- import { FormSection } from '@/components/forms/FormSection'
2
- import { getDefaultSettingsState } from '@/services/settings'
3
 
4
  export function SettingsSectionMusic() {
 
 
 
 
 
 
 
5
  return (
6
  <div className="flex flex-col justify-between space-y-6">
7
  <FormSection label="Music rendering">
8
- <p>No settings for the music yet.</p>
9
- <p>
10
- (In the future we might add a system prompt or prompt template here)
11
- </p>
 
 
 
12
  </FormSection>
13
  </div>
14
  )
 
1
+ import { FormArea, FormSection } from '@/components/forms'
2
+ import { getDefaultSettingsState, useSettings } from '@/services/settings'
3
 
4
  export function SettingsSectionMusic() {
5
+ const defaultSettings = getDefaultSettingsState()
6
+
7
+ const comfyWorkflowForMusic = useSettings((s) => s.comfyWorkflowForMusic)
8
+ const setComfyWorkflowForMusic = useSettings(
9
+ (s) => s.setComfyWorkflowForMusic
10
+ )
11
+
12
  return (
13
  <div className="flex flex-col justify-between space-y-6">
14
  <FormSection label="Music rendering">
15
+ <FormArea
16
+ label="Custom ComfyUI workflows for music"
17
+ value={comfyWorkflowForMusic}
18
+ defaultValue={defaultSettings.comfyWorkflowForMusic}
19
+ onChange={setComfyWorkflowForMusic}
20
+ rows={8}
21
+ />
22
  </FormSection>
23
  </div>
24
  )
packages/app/src/components/settings/provider.tsx CHANGED
@@ -92,6 +92,15 @@ export function SettingsSectionProvider() {
92
  const kitsAiApiKey = useSettings((s) => s.kitsAiApiKey)
93
  const setKitsAiApiKey = useSettings((s) => s.setKitsAiApiKey)
94
 
 
 
 
 
 
 
 
 
 
95
  const apiKeyType = showApiKeys ? 'text' : 'password'
96
 
97
  return (
@@ -296,6 +305,30 @@ export function SettingsSectionProvider() {
296
  onChange={setKitsAiApiKey}
297
  type={apiKeyType}
298
  />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
299
  </FormSection>
300
  </div>
301
  )
 
92
  const kitsAiApiKey = useSettings((s) => s.kitsAiApiKey)
93
  const setKitsAiApiKey = useSettings((s) => s.setKitsAiApiKey)
94
 
95
+ const letzAiApiKey = useSettings((s) => s.letzAiApiKey)
96
+ const setLetzAiApiKey = useSettings((s) => s.setLetzAiApiKey)
97
+
98
+ const bigModelApiKey = useSettings((s) => s.bigModelApiKey)
99
+ const setBigModelApiKey = useSettings((s) => s.setBigModelApiKey)
100
+
101
+ const piApiApiKey = useSettings((s) => s.piApiApiKey)
102
+ const setPiApiApiKey = useSettings((s) => s.setPiApiApiKey)
103
+
104
  const apiKeyType = showApiKeys ? 'text' : 'password'
105
 
106
  return (
 
305
  onChange={setKitsAiApiKey}
306
  type={apiKeyType}
307
  />
308
+
309
+ <FormInput
310
+ label="LetzAI API Key"
311
+ value={letzAiApiKey}
312
+ defaultValue={defaultSettings.letzAiApiKey}
313
+ onChange={setLetzAiApiKey}
314
+ type={apiKeyType}
315
+ />
316
+
317
+ <FormInput
318
+ label="BigModel API Key"
319
+ value={bigModelApiKey}
320
+ defaultValue={defaultSettings.bigModelApiKey}
321
+ onChange={setBigModelApiKey}
322
+ type={apiKeyType}
323
+ />
324
+
325
+ <FormInput
326
+ label="PiApi API Key"
327
+ value={piApiApiKey}
328
+ defaultValue={defaultSettings.piApiApiKey}
329
+ onChange={setPiApiApiKey}
330
+ type={apiKeyType}
331
+ />
332
  </FormSection>
333
  </div>
334
  )
packages/app/src/components/settings/sound.tsx CHANGED
@@ -1,13 +1,24 @@
1
- import { FormSection } from '@/components/forms/FormSection'
 
2
 
3
  export function SettingsSectionSound() {
 
 
 
 
 
 
 
4
  return (
5
  <div className="flex flex-col justify-between space-y-6">
6
  <FormSection label="Sound rendering">
7
- <p>No settings for the sound yet.</p>
8
- <p>
9
- (In the future we might add a system prompt or prompt template here)
10
- </p>
 
 
 
11
  </FormSection>
12
  </div>
13
  )
 
1
+ import { FormArea, FormSection } from '@/components/forms'
2
+ import { getDefaultSettingsState, useSettings } from '@/services/settings'
3
 
4
  export function SettingsSectionSound() {
5
+ const defaultSettings = getDefaultSettingsState()
6
+
7
+ const comfyWorkflowForSound = useSettings((s) => s.comfyWorkflowForSound)
8
+ const setComfyWorkflowForSound = useSettings(
9
+ (s) => s.setComfyWorkflowForSound
10
+ )
11
+
12
  return (
13
  <div className="flex flex-col justify-between space-y-6">
14
  <FormSection label="Sound rendering">
15
+ <FormArea
16
+ label="Custom ComfyUI workflows for sound"
17
+ value={comfyWorkflowForSound}
18
+ defaultValue={defaultSettings.comfyWorkflowForSound}
19
+ onChange={setComfyWorkflowForSound}
20
+ rows={8}
21
+ />
22
  </FormSection>
23
  </div>
24
  )
packages/app/src/components/settings/video.tsx CHANGED
@@ -1,10 +1,14 @@
1
- import { FormSection } from '@/components/forms/FormSection'
2
  import { getDefaultSettingsState, useSettings } from '@/services/settings'
3
- import { FormInput } from '../forms/FormInput'
4
 
5
  export function SettingsSectionVideo() {
6
  const defaultSettings = getDefaultSettingsState()
7
 
 
 
 
 
 
8
  const videoPromptPrefix = useSettings((s) => s.videoPromptPrefix)
9
  const setVideoPromptPrefix = useSettings((s) => s.setVideoPromptPrefix)
10
 
@@ -51,6 +55,14 @@ export function SettingsSectionVideo() {
51
  defaultValue={defaultSettings.videoNegativePrompt}
52
  onChange={setVideoNegativePrompt}
53
  />
 
 
 
 
 
 
 
 
54
  </FormSection>
55
  </div>
56
  )
 
1
+ import { FormArea, FormInput, FormSection } from '@/components/forms'
2
  import { getDefaultSettingsState, useSettings } from '@/services/settings'
 
3
 
4
  export function SettingsSectionVideo() {
5
  const defaultSettings = getDefaultSettingsState()
6
 
7
+ const comfyWorkflowForVideo = useSettings((s) => s.comfyWorkflowForVideo)
8
+ const setComfyWorkflowForVideo = useSettings(
9
+ (s) => s.setComfyWorkflowForVideo
10
+ )
11
+
12
  const videoPromptPrefix = useSettings((s) => s.videoPromptPrefix)
13
  const setVideoPromptPrefix = useSettings((s) => s.setVideoPromptPrefix)
14
 
 
55
  defaultValue={defaultSettings.videoNegativePrompt}
56
  onChange={setVideoNegativePrompt}
57
  />
58
+
59
+ <FormArea
60
+ label="Custom ComfyUI workflows for video"
61
+ value={comfyWorkflowForVideo}
62
+ defaultValue={defaultSettings.comfyWorkflowForVideo}
63
+ onChange={setComfyWorkflowForVideo}
64
+ rows={8}
65
+ />
66
  </FormSection>
67
  </div>
68
  )
packages/app/src/components/settings/voice.tsx CHANGED
@@ -1,13 +1,24 @@
1
- import { FormSection } from '@/components/forms/FormSection'
 
2
 
3
  export function SettingsSectionVoice() {
 
 
 
 
 
 
 
4
  return (
5
  <div className="flex flex-col justify-between space-y-6">
6
  <FormSection label="Voice rendering">
7
- <p>No settings for the voice yet.</p>
8
- <p>
9
- (In the future we might add a system prompt or prompt template here)
10
- </p>
 
 
 
11
  </FormSection>
12
  </div>
13
  )
 
1
+ import { FormArea, FormSection } from '@/components/forms'
2
+ import { getDefaultSettingsState, useSettings } from '@/services/settings'
3
 
4
  export function SettingsSectionVoice() {
5
+ const defaultSettings = getDefaultSettingsState()
6
+
7
+ const comfyWorkflowForVoice = useSettings((s) => s.comfyWorkflowForVoice)
8
+ const setComfyWorkflowForVoice = useSettings(
9
+ (s) => s.setComfyWorkflowForVoice
10
+ )
11
+
12
  return (
13
  <div className="flex flex-col justify-between space-y-6">
14
  <FormSection label="Voice rendering">
15
+ <FormArea
16
+ label="Custom ComfyUI workflows for voice"
17
+ value={comfyWorkflowForVoice}
18
+ defaultValue={defaultSettings.comfyWorkflowForVoice}
19
+ onChange={setComfyWorkflowForVoice}
20
+ rows={8}
21
+ />
22
  </FormSection>
23
  </div>
24
  )
packages/app/src/services/editors/workflow-editor/workflows/bigmodel/index.ts ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import {
2
+ ClapWorkflow,
3
+ ClapWorkflowCategory,
4
+ ClapWorkflowEngine,
5
+ ClapWorkflowProvider,
6
+ } from '@aitube/clap'
7
+ import { genericImage, genericImageUrl } from '../common/defaultValues'
8
+
9
+ export const bigModelWorkflows: ClapWorkflow[] = [
10
+ {
11
+ id: 'bigmodel://api/paas/v4/cogvideox',
12
+ label: 'CogVideoX',
13
+ description: '',
14
+ tags: ['video'],
15
+ author: '',
16
+ thumbnailUrl: '',
17
+ engine: ClapWorkflowEngine.REST_API,
18
+ provider: ClapWorkflowProvider.BIGMODEL,
19
+ category: ClapWorkflowCategory.VIDEO_GENERATION,
20
+ data: 'cogvideox', // <- "code" of the model, see: https://bigmodel.cn/dev/api#cogvideox
21
+ inputFields: [genericImageUrl],
22
+ inputValues: {
23
+ [genericImageUrl.id]: genericImageUrl.defaultValue,
24
+ },
25
+ },
26
+ ]
packages/app/src/services/editors/workflow-editor/workflows/common/defaultValues.ts CHANGED
@@ -32,6 +32,15 @@ export const genericPrompt: ClapInputField = {
32
  defaultValue: '',
33
  }
34
 
 
 
 
 
 
 
 
 
 
35
  export const genericSeed: ClapInputField = {
36
  id: 'seed',
37
  label: 'Seed',
@@ -51,6 +60,15 @@ export const genericImage: ClapInputField = {
51
  defaultValue: '',
52
  }
53
 
 
 
 
 
 
 
 
 
 
54
  export const genericVideo: ClapInputField = {
55
  id: 'video',
56
  label: 'Video',
@@ -60,6 +78,15 @@ export const genericVideo: ClapInputField = {
60
  defaultValue: '',
61
  }
62
 
 
 
 
 
 
 
 
 
 
63
  export const genericVoice: ClapInputField = {
64
  id: 'voice',
65
  label: 'Voice',
 
32
  defaultValue: '',
33
  }
34
 
35
+ export const genericRatio: ClapInputField = {
36
+ id: 'ratio',
37
+ label: 'Image ratio',
38
+ description: 'Image ratio (default to 1:1)',
39
+ type: 'string',
40
+ allowedValues: ['1:1', '16:9', '9:16'],
41
+ defaultValue: '1:1',
42
+ }
43
+
44
  export const genericSeed: ClapInputField = {
45
  id: 'seed',
46
  label: 'Seed',
 
60
  defaultValue: '',
61
  }
62
 
63
+ export const genericImageUrl: ClapInputField = {
64
+ id: 'image_url',
65
+ label: 'Image URL',
66
+ description: 'Image URL',
67
+ type: 'string',
68
+ allowedValues: [],
69
+ defaultValue: '',
70
+ }
71
+
72
  export const genericVideo: ClapInputField = {
73
  id: 'video',
74
  label: 'Video',
 
78
  defaultValue: '',
79
  }
80
 
81
+ export const genericVideoUrl: ClapInputField = {
82
+ id: 'video_url',
83
+ label: 'Video URL',
84
+ description: 'Video URL',
85
+ type: 'string',
86
+ allowedValues: [],
87
+ defaultValue: '',
88
+ }
89
+
90
  export const genericVoice: ClapInputField = {
91
  id: 'voice',
92
  label: 'Voice',
packages/app/src/services/editors/workflow-editor/workflows/falai/defaultWorkflows.ts CHANGED
@@ -13,7 +13,7 @@ import {
13
  genericInput,
14
  genericSeed,
15
  genericPrompt,
16
- genericVideo,
17
  genericWidth1024,
18
  genericWidth2048,
19
  genericMotionBucketId,
@@ -21,6 +21,7 @@ import {
21
  genericUpscalingFactor,
22
  genericOverlappingTiles,
23
  genericInferenceSteps,
 
24
  } from '../common/defaultValues'
25
  import { sampleDrivingVideo, sampleVoice } from '@/lib/core/constants'
26
 
@@ -48,10 +49,7 @@ export const defaultWorkflows: ClapWorkflow[] = [
48
  category: ClapWorkflowCategory.VIDEO_GENERATION,
49
  data: 'fal-ai/stable-video',
50
  inputFields: [
51
- {
52
- ...genericImage,
53
- id: 'image_url',
54
- },
55
  genericSeed,
56
  genericMotionBucketId,
57
  {
@@ -60,7 +58,7 @@ export const defaultWorkflows: ClapWorkflow[] = [
60
  },
61
  ],
62
  inputValues: {
63
- image_url: genericImage.defaultValue,
64
  [genericMotionBucketId.id]: 55,
65
  cond_aug: 0.02,
66
  },
@@ -238,15 +236,12 @@ export const defaultWorkflows: ClapWorkflow[] = [
238
  category: ClapWorkflowCategory.IMAGE_UPSCALING,
239
  data: 'fal-ai/aura-sr',
240
  inputFields: [
241
- {
242
- ...genericImage,
243
- id: 'image_url',
244
- },
245
  genericUpscalingFactor,
246
  genericOverlappingTiles,
247
  ],
248
  inputValues: {
249
- image_url: genericImage.defaultValue,
250
  [genericUpscalingFactor.id]: genericUpscalingFactor.defaultValue,
251
  [genericOverlappingTiles.id]: genericOverlappingTiles.defaultValue,
252
  },
@@ -312,19 +307,10 @@ export const defaultWorkflows: ClapWorkflow[] = [
312
  provider: ClapWorkflowProvider.FALAI,
313
  category: ClapWorkflowCategory.VIDEO_GENERATION,
314
  data: 'fal-ai/live-portrait',
315
- inputFields: [
316
- {
317
- ...genericImage,
318
- id: 'image_url',
319
- },
320
- {
321
- ...genericVideo,
322
- id: 'video_url',
323
- },
324
- ],
325
  inputValues: {
326
- image_url: genericImage.defaultValue,
327
- video_url: sampleDrivingVideo,
328
  },
329
  },
330
  ]
 
13
  genericInput,
14
  genericSeed,
15
  genericPrompt,
16
+ genericVideoUrl,
17
  genericWidth1024,
18
  genericWidth2048,
19
  genericMotionBucketId,
 
21
  genericUpscalingFactor,
22
  genericOverlappingTiles,
23
  genericInferenceSteps,
24
+ genericImageUrl,
25
  } from '../common/defaultValues'
26
  import { sampleDrivingVideo, sampleVoice } from '@/lib/core/constants'
27
 
 
49
  category: ClapWorkflowCategory.VIDEO_GENERATION,
50
  data: 'fal-ai/stable-video',
51
  inputFields: [
52
+ genericImageUrl,
 
 
 
53
  genericSeed,
54
  genericMotionBucketId,
55
  {
 
58
  },
59
  ],
60
  inputValues: {
61
+ [genericImageUrl.id]: genericImageUrl.defaultValue,
62
  [genericMotionBucketId.id]: 55,
63
  cond_aug: 0.02,
64
  },
 
236
  category: ClapWorkflowCategory.IMAGE_UPSCALING,
237
  data: 'fal-ai/aura-sr',
238
  inputFields: [
239
+ genericImageUrl,
 
 
 
240
  genericUpscalingFactor,
241
  genericOverlappingTiles,
242
  ],
243
  inputValues: {
244
+ [genericImageUrl.id]: genericImageUrl.defaultValue,
245
  [genericUpscalingFactor.id]: genericUpscalingFactor.defaultValue,
246
  [genericOverlappingTiles.id]: genericOverlappingTiles.defaultValue,
247
  },
 
307
  provider: ClapWorkflowProvider.FALAI,
308
  category: ClapWorkflowCategory.VIDEO_GENERATION,
309
  data: 'fal-ai/live-portrait',
310
+ inputFields: [genericImageUrl, genericVideoUrl],
 
 
 
 
 
 
 
 
 
311
  inputValues: {
312
+ [genericImageUrl.id]: genericImageUrl.defaultValue,
313
+ [genericVideoUrl.id]: sampleDrivingVideo,
314
  },
315
  },
316
  ]
packages/app/src/services/editors/workflow-editor/workflows/huggingface/index.ts CHANGED
@@ -58,6 +58,25 @@ export const huggingfaceWorkflows: ClapWorkflow[] = [
58
  // TODO: add guidance scale and number of steps
59
  },
60
  },
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
61
  {
62
  id: 'huggingface://models/coqui/XTTS-v2',
63
  label: 'Coqui XTTS-v2',
 
58
  // TODO: add guidance scale and number of steps
59
  },
60
  },
61
+ {
62
+ id: 'huggingface://models/jbilcke-hf/flux-dev-panorama-lora-2',
63
+ label: 'FLUX.1-[dev] Panorama Lora (v2)',
64
+ description: 'Generate 360° panoramas using Flux (non-commercial use)',
65
+ tags: ['flux', '360°', 'panorama'],
66
+ author: '@jbilcke-hf',
67
+ // TODO add specific field about licensing?
68
+ thumbnailUrl: '',
69
+ engine: ClapWorkflowEngine.REST_API,
70
+ provider: ClapWorkflowProvider.HUGGINGFACE,
71
+ category: ClapWorkflowCategory.IMAGE_GENERATION,
72
+ data: 'jbilcke-hf/flux-dev-panorama-lora-2',
73
+ inputFields: [genericPrompt, genericWidth2048, genericHeight2048],
74
+ inputValues: {
75
+ prompt: genericPrompt.defaultValue,
76
+ width: genericWidth2048.defaultValue,
77
+ height: genericHeight2048.defaultValue,
78
+ },
79
+ },
80
  {
81
  id: 'huggingface://models/coqui/XTTS-v2',
82
  label: 'Coqui XTTS-v2',
packages/app/src/services/editors/workflow-editor/workflows/letzai/index.ts ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import {
2
+ ClapWorkflow,
3
+ ClapWorkflowCategory,
4
+ ClapWorkflowEngine,
5
+ ClapWorkflowProvider,
6
+ } from '@aitube/clap'
7
+ import {
8
+ genericHeight1024,
9
+ genericPrompt,
10
+ genericWidth1024,
11
+ } from '../common/defaultValues'
12
+
13
+ export const letzAiWorkflows: ClapWorkflow[] = [
14
+ {
15
+ id: 'letzai://api/images',
16
+ label: 'Create image with LetzAI',
17
+ image: '',
18
+ tags: ['image'],
19
+ author: '',
20
+ thumbnailUrl: '',
21
+ engine: ClapWorkflowEngine.REST_API,
22
+ provider: ClapWorkflowProvider.LETZAI,
23
+ category: ClapWorkflowCategory.IMAGE_GENERATION,
24
+ data: 'api/images', // <- this value isn't really used, it's just to put something here
25
+ inputFields: [genericPrompt, genericWidth1024, genericHeight1024],
26
+ inputValues: {
27
+ [genericPrompt.id]: genericPrompt.defaultValue,
28
+ [genericWidth1024.id]: genericWidth1024.defaultValue,
29
+ [genericHeight1024.id]: genericHeight1024.defaultValue,
30
+ },
31
+ },
32
+ ]
packages/app/src/services/editors/workflow-editor/workflows/piapi/index.ts ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import {
2
+ ClapWorkflow,
3
+ ClapWorkflowCategory,
4
+ ClapWorkflowEngine,
5
+ ClapWorkflowProvider,
6
+ } from '@aitube/clap'
7
+ import { genericImageUrl, genericPrompt } from '../common/defaultValues'
8
+
9
+ export const piApiWorkflows: ClapWorkflow[] = [
10
+ {
11
+ id: 'piapi://Midjourney/Imagine',
12
+ label: 'Midjourne Imagine',
13
+ image: '',
14
+ tags: ['Midjourney'],
15
+ author: '',
16
+ thumbnailUrl: '',
17
+ engine: ClapWorkflowEngine.REST_API,
18
+ provider: ClapWorkflowProvider.PIAPI,
19
+ category: ClapWorkflowCategory.IMAGE_GENERATION,
20
+ data: 'mj/v2/imagine', // <- this value isn't really used, it's just to put something here
21
+ inputFields: [
22
+ genericPrompt,
23
+ // genericRatio
24
+ //genericWidth1024,
25
+ //genericHeight1024,
26
+ ],
27
+ inputValues: {
28
+ [genericPrompt.id]: genericPrompt.defaultValue,
29
+ // genericRatio
30
+ //[genericWidth1024.id]: genericWidth1024.defaultValue,
31
+ //[genericHeight1024.id]: genericHeight1024.defaultValue,
32
+ },
33
+ },
34
+ {
35
+ id: 'piapi://LumaLabs/DreamMachine',
36
+ label: 'Luma Labs Dream Machine',
37
+ image: '',
38
+ tags: ['Dream Machine'],
39
+ author: '',
40
+ thumbnailUrl: '',
41
+ engine: ClapWorkflowEngine.REST_API,
42
+ provider: ClapWorkflowProvider.PIAPI,
43
+ category: ClapWorkflowCategory.IMAGE_GENERATION,
44
+ data: 'luma/v1/video', // <- this value isn't really used, it's just to put something here
45
+ inputFields: [
46
+ genericImageUrl,
47
+ // genericRatio
48
+ //genericWidth1024,
49
+ //genericHeight1024,
50
+ ],
51
+ inputValues: {
52
+ [genericImageUrl.id]: genericImageUrl.defaultValue,
53
+ // genericRatio
54
+ //[genericWidth1024.id]: genericWidth1024.defaultValue,
55
+ //[genericHeight1024.id]: genericHeight1024.defaultValue,
56
+ },
57
+ },
58
+ ]
packages/app/src/services/settings/getDefaultSettingsState.ts CHANGED
@@ -26,6 +26,9 @@ export function getDefaultSettingsState(): SettingsState {
26
  mistralAiApiKey: '',
27
  stabilityAiApiKey: '',
28
  fireworksAiApiKey: '',
 
 
 
29
 
30
  broadcastObsServerHost: '192.168.1.22',
31
  broadcastObsServerPort: 4455,
 
26
  mistralAiApiKey: '',
27
  stabilityAiApiKey: '',
28
  fireworksAiApiKey: '',
29
+ letzAiApiKey: '',
30
+ bigModelApiKey: '',
31
+ piApiApiKey: '',
32
 
33
  broadcastObsServerHost: '192.168.1.22',
34
  broadcastObsServerPort: 4455,
packages/app/src/services/settings/useSettings.ts CHANGED
@@ -210,6 +210,31 @@ export const useSettings = create<SettingsStore>()(
210
  })
211
  },
212
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
213
  setBroadcastObsServerHost: (broadcastObsServerHost: string) => {
214
  set({ broadcastObsServerHost })
215
  },
@@ -829,7 +854,10 @@ export const useSettings = create<SettingsStore>()(
829
  state.fireworksAiApiKey || defaultSettings.fireworksAiApiKey,
830
  stabilityAiApiKey:
831
  state.stabilityAiApiKey || defaultSettings.stabilityAiApiKey,
832
-
 
 
 
833
  broadcastObsServerHost:
834
  state.broadcastObsServerHost ||
835
  defaultSettings.broadcastObsServerHost,
 
210
  })
211
  },
212
 
213
+ setLetzAiApiKey: (letzAiApiKey?: string) => {
214
+ set({
215
+ letzAiApiKey: getValidString(
216
+ letzAiApiKey,
217
+ getDefaultSettingsState().letzAiApiKey
218
+ ),
219
+ })
220
+ },
221
+ setBigModelApiKey: (bigModelApiKey?: string) => {
222
+ set({
223
+ bigModelApiKey: getValidString(
224
+ bigModelApiKey,
225
+ getDefaultSettingsState().bigModelApiKey
226
+ ),
227
+ })
228
+ },
229
+ setPiApiApiKey: (piApiApiKey?: string) => {
230
+ set({
231
+ piApiApiKey: getValidString(
232
+ piApiApiKey,
233
+ getDefaultSettingsState().piApiApiKey
234
+ ),
235
+ })
236
+ },
237
+
238
  setBroadcastObsServerHost: (broadcastObsServerHost: string) => {
239
  set({ broadcastObsServerHost })
240
  },
 
854
  state.fireworksAiApiKey || defaultSettings.fireworksAiApiKey,
855
  stabilityAiApiKey:
856
  state.stabilityAiApiKey || defaultSettings.stabilityAiApiKey,
857
+ letzAiApiKey: state.letzAiApiKey || defaultSettings.letzAiApiKey,
858
+ bigModelApiKey:
859
+ state.bigModelApiKey || defaultSettings.bigModelApiKey,
860
+ piApiApiKey: state.piApiApiKey || defaultSettings.piApiApiKey,
861
  broadcastObsServerHost:
862
  state.broadcastObsServerHost ||
863
  defaultSettings.broadcastObsServerHost,
packages/clap/src/types.ts CHANGED
@@ -527,6 +527,9 @@ export enum ClapWorkflowProvider {
527
  HEDRA = "HEDRA",
528
  LEONARDOAI = "LEONARDOAI",
529
  EVERARTAI = "EVERARTAI",
 
 
 
530
  }
531
 
532
 
 
527
  HEDRA = "HEDRA",
528
  LEONARDOAI = "LEONARDOAI",
529
  EVERARTAI = "EVERARTAI",
530
+ LETZAI = "LETZAI",
531
+ BIGMODEL = "BIGMODEL",
532
+ PIAPI = "PIAPI",
533
  }
534
 
535
 
packages/clapper-services/src/settings.ts CHANGED
@@ -25,6 +25,9 @@ export type BaseSettings = {
25
  mistralAiApiKey: string
26
  stabilityAiApiKey: string
27
  fireworksAiApiKey: string
 
 
 
28
 
29
  broadcastObsServerHost: string
30
  broadcastObsServerPort: number
@@ -140,6 +143,9 @@ export type SettingsControls = {
140
  setMistralAiApiKey: (mistralAiApiKey?: string) => void
141
  setKitsAiApiKey: (kitsAiApiKey?: string) => void
142
  setStabilityAiApiKey: (stabilityAiApiKey?: string) => void
 
 
 
143
 
144
  setCensorNotForAllAudiencesContent: (censorNotForAllAudiencesContent?: boolean) => void
145
  setImagePromptPrefix: (imagePromptPrefix?: string) => void
 
25
  mistralAiApiKey: string
26
  stabilityAiApiKey: string
27
  fireworksAiApiKey: string
28
+ letzAiApiKey: string
29
+ bigModelApiKey: string
30
+ piApiApiKey: string
31
 
32
  broadcastObsServerHost: string
33
  broadcastObsServerPort: number
 
143
  setMistralAiApiKey: (mistralAiApiKey?: string) => void
144
  setKitsAiApiKey: (kitsAiApiKey?: string) => void
145
  setStabilityAiApiKey: (stabilityAiApiKey?: string) => void
146
+ setLetzAiApiKey: (letzAiApiKey?: string) => void
147
+ setBigModelApiKey: (bigModelApiKey?: string) => void
148
+ setPiApiApiKey: (piApiApiKey?: string) => void
149
 
150
  setCensorNotForAllAudiencesContent: (censorNotForAllAudiencesContent?: boolean) => void
151
  setImagePromptPrefix: (imagePromptPrefix?: string) => void