diff --git a/bun.lockb b/bun.lockb index b283d9475692797ac901b78e8538e87c885e4f42..1d4fb537aa7b7f36bdf94f0ba7f96ec8f0574c4b 100755 Binary files a/bun.lockb and b/bun.lockb differ diff --git a/package.json b/package.json index f775ffc5482c3d7a0b5a7135eec78d9a863d7e85..4949d9eb5ad79e2b13c3b20dc105f67d663702ee 100644 --- a/package.json +++ b/package.json @@ -1,22 +1,12 @@ { "name": "@aitube/clapper-monorepo", "version": "0.2.4", - "private": true, "description": "A monorepo for the Clapper project. Individual packages are in the packages directory.", - "workspaces": [ - "packages/clap", - "packages/timeline", - "packages/api-client", - "packages/io", - "packages/colors", - "packages/engine", - "packages/broadway", - "packages/clapper-services", - "packages/app" - ], "engines": { "bun": ">=1.0.0" }, + "packageManager": "bun@1.0.25", + "private": true, "scripts": { "dev": "bun run --cwd packages/app dev", "start": "bun run --cwd packages/app start", @@ -36,10 +26,20 @@ "test:all": "bun run --cwd packages/clap test && bun run --cwd packages/timeline test && bun run --cwd packages/api-client test && bun run --cwd packages/io test && bun run --cwd packages/colors test && bun run --cwd packages/engine test && bun run --cwd packages/broadway test && bun run --cwd packages/clapper-services test && bun run --cwd packages/app test", "format": "bun run --cwd packages/app format" }, - "packageManager": "bun@1.0.25", "trustedDependencies": [ "@aitube/clapper", "onnxruntime-node", "protobufjs" + ], + "workspaces": [ + "packages/clap", + "packages/timeline", + "packages/api-client", + "packages/io", + "packages/colors", + "packages/engine", + "packages/broadway", + "packages/clapper-services", + "packages/app" ] } \ No newline at end of file diff --git a/packages/api-client/package.json b/packages/api-client/package.json index 67462a2f526df65666c3117f90a0086554f579ef..bb4bb01b611fcbd9ef5b1e73f7447503a3a496b0 100644 --- a/packages/api-client/package.json +++ b/packages/api-client/package.json @@ -39,6 +39,8 @@ "dist/**/*.d.ts" ], "dependencies": { + "@aitube/clap": "workspace:*", + "@types/bun": "latest", "query-string": "^9.0.0" } } diff --git a/packages/app/package.json b/packages/app/package.json index f58653b14aec5c74a9be1657b722e98f3e4fd316..f10d3c01e6f2a9d3fe1dcbf424ca9072618a99c5 100644 --- a/packages/app/package.json +++ b/packages/app/package.json @@ -41,7 +41,7 @@ "@aitube/clapper-services": "workspace:*", "@aitube/engine": "workspace:*", "@aitube/timeline": "workspace:*", - "@fal-ai/serverless-client": "^0.13.0", + "@fal-ai/serverless-client": "^0.14.2", "@ffmpeg/ffmpeg": "^0.12.10", "@ffmpeg/util": "^0.12.1", "@gradio/client": "^1.5.0", diff --git a/packages/app/src/app/api/resolve/providers/replicate/index.ts b/packages/app/src/app/api/resolve/providers/replicate/index.ts index d3cfe32ec10e03f84cc783113f0a792851303df8..ce5b72cad4ee8f2c8ec919b7044d6fa9aed141f0 100644 --- a/packages/app/src/app/api/resolve/providers/replicate/index.ts +++ b/packages/app/src/app/api/resolve/providers/replicate/index.ts @@ -1,8 +1,11 @@ import Replicate from 'replicate' -import { ClapSegmentCategory } from '@aitube/clap' +import { ClapMediaOrientation, ClapSegmentCategory } from '@aitube/clap' import { ResolveRequest } from '@aitube/clapper-services' import { TimelineSegment } from '@aitube/timeline' +import { getWorkflowInputValues } from '../getWorkflowInputValues' +import { defaultLoraModels } from '@/services/editors/workflow-editor/workflows/common/loras' +import { getWorkflowLora } from '@/services/editors/workflow-editor/workflows/common/loras/getWorkflowLora' export async function resolveSegment( request: ResolveRequest @@ -12,22 +15,28 @@ export async function resolveSegment( } const replicate = new Replicate({ auth: request.settings.replicateApiKey }) - if (request.segment.category !== ClapSegmentCategory.STORYBOARD) { - throw new Error( - `Clapper doesn't support ${request.segment.category} generation for provider "Replicate". Please open a pull request with (working code) to solve this!` - ) - } - const segment = request.segment - // this mapping isn't great, we should use something auto-adapting - // like we are doing for Hugging Face (match the fields etc) - if (request.segment.category === ClapSegmentCategory.STORYBOARD) { + if (request.segment.category == ClapSegmentCategory.STORYBOARD) { + + const { workflowValues } = getWorkflowInputValues( + request.settings.imageGenerationWorkflow + ) + let params: object = { prompt: request.prompts.image.positive, width: request.meta.width, height: request.meta.height, + disable_safety_checker: !request.settings.censorNotForAllAudiencesContent, } + + const aspectRatio = + request.meta.orientation === ClapMediaOrientation.SQUARE + ? "1:1" + : request.meta.orientation === ClapMediaOrientation.PORTRAIT + ? "9:16" + : "16:9" + if ( request.settings.imageGenerationWorkflow.data === 'fofr/pulid-lightning' ) { @@ -35,6 +44,28 @@ export async function resolveSegment( ...params, face_image: request.prompts.image.identity, } + } else if ( + request.settings.imageGenerationWorkflow.data === 'lucataco/flux-dev-lora' + ) { + + // note: this isn't the right place to do this, because maybe the LoRAs are dynamic + const loraModel = getWorkflowLora(request.settings.imageGenerationWorkflow) + + params = { + // for some reason this model doesn't support arbitrary width and height, + // at least not at the time of writing.. + aspect_ratio: aspectRatio, + + hf_lora: workflowValues['hf_lora'] || '', + + prompt: [ + loraModel?.trigger, + request.prompts.image.positive + ].filter(x => x).join(' '), + + disable_safety_checker: !request.settings.censorNotForAllAudiencesContent, + } + } else if ( request.settings.imageGenerationWorkflow.data === 'zsxkib/pulid' ) { @@ -43,11 +74,21 @@ export async function resolveSegment( main_face_image: request.prompts.image.identity, } } + + /* + console.log("debug:", { + model: request.settings.imageGenerationWorkflow.data, + params, + }) + */ const response = (await replicate.run( - request.settings.imageGenerationWorkflow as any, + request.settings.imageGenerationWorkflow.data as any, { input: params } )) as any - segment.assetUrl = `${response.output || ''}` + + + segment.assetUrl = `${response[0] || ''}` + } else if (request.segment.category === ClapSegmentCategory.DIALOGUE) { const response = (await replicate.run( request.settings.voiceGenerationWorkflow.data as any, @@ -55,20 +96,22 @@ export async function resolveSegment( input: { text: request.prompts.voice.positive, audio: request.prompts.voice.identity, + disable_safety_checker: !request.settings.censorNotForAllAudiencesContent, }, } )) as any - segment.assetUrl = `${response.output || ''}` + segment.assetUrl = `${response[0] || ''}` } else if (request.segment.category === ClapSegmentCategory.VIDEO) { const response = (await replicate.run( request.settings.videoGenerationWorkflow.data as any, { input: { image: request.prompts.video.image, + disable_safety_checker: !request.settings.censorNotForAllAudiencesContent, }, } )) as any - segment.assetUrl = `${response.output || ''}` + segment.assetUrl = `${response[0] || ''}` } else { throw new Error( `Clapper doesn't support ${request.segment.category} generation for provider "Replicate". Please open a pull request with (working code) to solve this!` diff --git a/packages/app/src/app/api/resolve/route.ts b/packages/app/src/app/api/resolve/route.ts index 24ec2b15b5f90bf988478f592984a396f2cc4345..7983abedbc550bad0b6fdd179addfec1aca3fe5f 100644 --- a/packages/app/src/app/api/resolve/route.ts +++ b/packages/app/src/app/api/resolve/route.ts @@ -132,6 +132,9 @@ export async function POST(req: NextRequest) { segment.outputType === ClapOutputType.AUDIO || segment.outputType === ClapOutputType.VIDEO ) { + + + // TODO this should be down in the browser side, so that we can scale better const { durationInMs, hasAudio } = await getMediaInfo(segment.assetUrl) segment.assetDurationInMs = durationInMs diff --git a/packages/app/src/components/toolbars/top-menu/lists/AssistantWorkflows.tsx b/packages/app/src/components/toolbars/top-menu/lists/AssistantWorkflows.tsx index d34915ccc0ad1fa1c65d50b82f59f6ecc0f85c33..72b72075a87c66ca871c32cf6c03fd3b6d1ae1e4 100644 --- a/packages/app/src/components/toolbars/top-menu/lists/AssistantWorkflows.tsx +++ b/packages/app/src/components/toolbars/top-menu/lists/AssistantWorkflows.tsx @@ -21,18 +21,20 @@ import { ClapWorkflowProviderLogo, ClapWorkflowProviderName, } from '@/components/core/providers' +import { parseWorkflow } from '@/services/settings/workflows/parseWorkflow' + +const category = ClapWorkflowCategory.ASSISTANT export function AssistantWorkflows() { - const workflowId = useSettings((s) => s.assistantWorkflow) - const setWorkflowId = useSettings((s) => s.setAssistantWorkflow) + const assistantWorkflow = useSettings((s) => s.assistantWorkflow) + const setAssistantWorkflow = useSettings((s) => s.setAssistantWorkflow) const availableWorkflows = useWorkflowEditor((s) => s.availableWorkflows) - const { workflows, providers, nbProviders } = findWorkflows( - availableWorkflows, - { category: ClapWorkflowCategory.ASSISTANT } - ) + const { providers, nbProviders } = findWorkflows(availableWorkflows, { + category, + }) - const { workflow } = findWorkflows(workflows, { workflowId }) + const workflow = parseWorkflow(assistantWorkflow, category) if (!nbProviders) { return null @@ -65,7 +67,7 @@ export function AssistantWorkflows() { {workflows?.map((w) => ( { if (hasNoPublicAPI(w)) { @@ -73,7 +75,7 @@ export function AssistantWorkflows() { e.preventDefault() return false } - setWorkflowId(w.id) + setAssistantWorkflow(w) e.stopPropagation() e.preventDefault() return false diff --git a/packages/app/src/components/toolbars/top-menu/lists/ImageDepthWorkflows.tsx b/packages/app/src/components/toolbars/top-menu/lists/ImageDepthWorkflows.tsx index 44948c785730d7cfa9f2313b42da2cdaa16eccfe..7dde58fc45879cdef7325998ed82ed03e6d20e42 100644 --- a/packages/app/src/components/toolbars/top-menu/lists/ImageDepthWorkflows.tsx +++ b/packages/app/src/components/toolbars/top-menu/lists/ImageDepthWorkflows.tsx @@ -21,18 +21,20 @@ import { ClapWorkflowProviderLogo, ClapWorkflowProviderName, } from '@/components/core/providers' +import { parseWorkflow } from '@/services/settings/workflows/parseWorkflow' + +const category = ClapWorkflowCategory.IMAGE_DEPTH_MAPPING export function ImageDepthWorkflows() { - const workflowId = useSettings((s) => s.imageDepthWorkflow) - const setWorkflowId = useSettings((s) => s.setImageDepthWorkflow) + const imageDepthWorkflow = useSettings((s) => s.imageDepthWorkflow) + const setImageDepthWorkflow = useSettings((s) => s.setImageDepthWorkflow) const availableWorkflows = useWorkflowEditor((s) => s.availableWorkflows) - const { workflows, providers, nbProviders } = findWorkflows( - availableWorkflows, - { category: ClapWorkflowCategory.IMAGE_DEPTH_MAPPING } - ) + const { providers, nbProviders } = findWorkflows(availableWorkflows, { + category, + }) - const { workflow } = findWorkflows(workflows, { workflowId }) + const workflow = parseWorkflow(imageDepthWorkflow, category) if (!nbProviders) { return null @@ -65,7 +67,7 @@ export function ImageDepthWorkflows() { {workflows?.map((w) => ( { if (hasNoPublicAPI(w)) { @@ -73,7 +75,7 @@ export function ImageDepthWorkflows() { e.preventDefault() return false } - setWorkflowId(w.id) + setImageDepthWorkflow(w) e.stopPropagation() e.preventDefault() return false diff --git a/packages/app/src/components/toolbars/top-menu/lists/ImageGenerationWorkflows.tsx b/packages/app/src/components/toolbars/top-menu/lists/ImageGenerationWorkflows.tsx index e851e87d862078b4ea194400e657ee99aedfca3a..005cf11fdbe545a9e233491ffb3a6696fa766e82 100644 --- a/packages/app/src/components/toolbars/top-menu/lists/ImageGenerationWorkflows.tsx +++ b/packages/app/src/components/toolbars/top-menu/lists/ImageGenerationWorkflows.tsx @@ -1,6 +1,10 @@ 'use client' -import { ClapWorkflowCategory, ClapWorkflowProvider } from '@aitube/clap' +import { + ClapInputCategory, + ClapWorkflowCategory, + ClapWorkflowProvider, +} from '@aitube/clap' import { MenubarCheckboxItem, @@ -21,23 +25,33 @@ import { ClapWorkflowProviderLogo, ClapWorkflowProviderName, } from '@/components/core/providers' +import { parseWorkflow } from '@/services/settings/workflows/parseWorkflow' +import { Lora } from '@/services/editors/workflow-editor/workflows/common/types' +import { getWorkflowLora } from '@/services/editors/workflow-editor/workflows/common/loras/getWorkflowLora' +import { getWorkflowInputField } from '@/services/editors/workflow-editor/workflows/common/loras/getWorkflowInputField' -export function ImageGenerationWorkflows() { - const workflowId = useSettings((s) => s.imageGenerationWorkflow) - const setWorkflowId = useSettings((s) => s.setImageGenerationWorkflow) - const availableWorkflows = useWorkflowEditor((s) => s.availableWorkflows) +import { LoraModelList } from './LoraModelList' - const { workflows, providers, nbProviders } = findWorkflows( - availableWorkflows, - { category: ClapWorkflowCategory.IMAGE_GENERATION } +const category = ClapWorkflowCategory.IMAGE_GENERATION + +export function ImageGenerationWorkflows() { + const imageGenerationWorkflow = useSettings((s) => s.imageGenerationWorkflow) + const setImageGenerationWorkflow = useSettings( + (s) => s.setImageGenerationWorkflow ) + const availableWorkflows = useWorkflowEditor((s) => s.availableWorkflows) - const { workflow } = findWorkflows(workflows, { workflowId }) + const { providers, nbProviders } = findWorkflows(availableWorkflows, { + category, + }) if (!nbProviders) { return null } + const workflow = parseWorkflow(imageGenerationWorkflow, category) + const workflowLora = getWorkflowLora(workflow) + return ( @@ -62,26 +76,66 @@ export function ImageGenerationWorkflows() { - {workflows?.map((w) => ( - { - if (hasNoPublicAPI(w)) { + {workflows?.map((w) => { + // if this workflow has at least one field of type lora + const loraFieldName = getWorkflowInputField( + w, + ClapInputCategory.LORA + )?.id + if (loraFieldName) { + return ( + { + + console.log(`onChange:`, { + w, + newLora, + loraFieldName, + repoUrl: newLora?.repoOrUrl, + newWorkflowValue: { + ...w, + inputValues: { + ...w.inputValues, + [loraFieldName]: newLora?.repoOrUrl || '', + }, + }, + }) + setImageGenerationWorkflow({ + ...w, + inputValues: { + ...w.inputValues, + [loraFieldName]: newLora?.repoOrUrl || '', + }, + }) + }} + /> + ) + } + + return ( + { + if (hasNoPublicAPI(w)) { + e.stopPropagation() + e.preventDefault() + return false + } + setImageGenerationWorkflow(w) e.stopPropagation() e.preventDefault() return false - } - setWorkflowId(w.id) - e.stopPropagation() - e.preventDefault() - return false - }} - > - {w.label} - - ))} + }} + > + {w.label} + + ) + })} ))} diff --git a/packages/app/src/components/toolbars/top-menu/lists/ImageSegmentationWorkflows.tsx b/packages/app/src/components/toolbars/top-menu/lists/ImageSegmentationWorkflows.tsx index 6e217d267004412ab90a791140cec3cfc5c9ded6..f3d794c0c8510f9fae91c093de4a61a6d9aa4e3d 100644 --- a/packages/app/src/components/toolbars/top-menu/lists/ImageSegmentationWorkflows.tsx +++ b/packages/app/src/components/toolbars/top-menu/lists/ImageSegmentationWorkflows.tsx @@ -21,18 +21,24 @@ import { ClapWorkflowProviderLogo, ClapWorkflowProviderName, } from '@/components/core/providers' +import { parseWorkflow } from '@/services/settings/workflows/parseWorkflow' + +const category = ClapWorkflowCategory.IMAGE_SEGMENTATION export function ImageSegmentationWorkflows() { - const workflowId = useSettings((s) => s.imageSegmentationWorkflow) - const setWorkflowId = useSettings((s) => s.setImageSegmentationWorkflow) + const imageSegmentationWorkflow = useSettings( + (s) => s.imageSegmentationWorkflow + ) + const setImageSegmentationWorkflow = useSettings( + (s) => s.setImageSegmentationWorkflow + ) const availableWorkflows = useWorkflowEditor((s) => s.availableWorkflows) - const { workflows, providers, nbProviders } = findWorkflows( - availableWorkflows, - { category: ClapWorkflowCategory.IMAGE_SEGMENTATION } - ) + const { providers, nbProviders } = findWorkflows(availableWorkflows, { + category, + }) - const { workflow } = findWorkflows(workflows, { workflowId }) + const workflow = parseWorkflow(imageSegmentationWorkflow, category) if (!nbProviders) { return null @@ -65,7 +71,7 @@ export function ImageSegmentationWorkflows() { {workflows?.map((w) => ( { if (hasNoPublicAPI(w)) { @@ -73,7 +79,7 @@ export function ImageSegmentationWorkflows() { e.preventDefault() return false } - setWorkflowId(w.id) + setImageSegmentationWorkflow(w) e.stopPropagation() e.preventDefault() return false diff --git a/packages/app/src/components/toolbars/top-menu/lists/ImageUpscalingWorkflows.tsx b/packages/app/src/components/toolbars/top-menu/lists/ImageUpscalingWorkflows.tsx index fc8e46df2d2d9d4fb27b50c1eb3e9c7f1b08a3af..5dc329c7fa2e2cfb618682d0baf2eb0e41b5f0b4 100644 --- a/packages/app/src/components/toolbars/top-menu/lists/ImageUpscalingWorkflows.tsx +++ b/packages/app/src/components/toolbars/top-menu/lists/ImageUpscalingWorkflows.tsx @@ -21,18 +21,22 @@ import { ClapWorkflowProviderLogo, ClapWorkflowProviderName, } from '@/components/core/providers' +import { parseWorkflow } from '@/services/settings/workflows/parseWorkflow' + +const category = ClapWorkflowCategory.IMAGE_UPSCALING export function ImageUpscalingWorkflows() { - const workflowId = useSettings((s) => s.imageUpscalingWorkflow) - const setWorkflowId = useSettings((s) => s.setImageUpscalingWorkflow) + const imageUpscalingWorkflow = useSettings((s) => s.imageUpscalingWorkflow) + const setImageUpscalingWorkflow = useSettings( + (s) => s.setImageUpscalingWorkflow + ) const availableWorkflows = useWorkflowEditor((s) => s.availableWorkflows) - const { workflows, providers, nbProviders } = findWorkflows( - availableWorkflows, - { category: ClapWorkflowCategory.IMAGE_UPSCALING } - ) + const { providers, nbProviders } = findWorkflows(availableWorkflows, { + category, + }) - const { workflow } = findWorkflows(workflows, { workflowId }) + const workflow = parseWorkflow(imageUpscalingWorkflow, category) if (!nbProviders) { return null @@ -65,7 +69,7 @@ export function ImageUpscalingWorkflows() { {workflows?.map((w) => ( { if (hasNoPublicAPI(w)) { @@ -73,7 +77,7 @@ export function ImageUpscalingWorkflows() { e.preventDefault() return false } - setWorkflowId(w.id) + setImageUpscalingWorkflow(w) e.stopPropagation() e.preventDefault() return false diff --git a/packages/app/src/components/toolbars/top-menu/lists/LoraModelList.tsx b/packages/app/src/components/toolbars/top-menu/lists/LoraModelList.tsx new file mode 100644 index 0000000000000000000000000000000000000000..93b1ad03f86fc49d002d550cfde8b96de3e334e8 --- /dev/null +++ b/packages/app/src/components/toolbars/top-menu/lists/LoraModelList.tsx @@ -0,0 +1,66 @@ +'use client' + +import { ClapWorkflow } from '@aitube/clap' + +import { + MenubarCheckboxItem, + MenubarContent, + MenubarItem, + MenubarMenu, + MenubarSeparator, + MenubarSub, + MenubarSubContent, + MenubarSubTrigger, +} from '@/components/ui/menubar' + +import { defaultLoraModels } from '@/services/editors/workflow-editor/workflows/common/loras' +import { Lora } from '@/services/editors/workflow-editor/workflows/common/types' + +export function LoraModelList({ + workflow, + currentLora, + onChange, +}: { + workflow: ClapWorkflow + currentLora?: Lora + onChange: (lora?: Lora) => void +}) { + return ( + + +
+ {workflow.label} + {currentLora ? `(${currentLora.label})` : `(no lora selected)`} +
+
+ + { + onChange(undefined) + e.stopPropagation() + e.preventDefault() + return false + }} + > + No LoRA + + {defaultLoraModels.map((lora: Lora) => ( + { + onChange(lora) + e.stopPropagation() + e.preventDefault() + return false + }} + > + {lora.label} + + ))} + +
+ ) +} diff --git a/packages/app/src/components/toolbars/top-menu/lists/MusicGenerationWorkflows.tsx b/packages/app/src/components/toolbars/top-menu/lists/MusicGenerationWorkflows.tsx index 26afac98d429d92a3e50d6937b2b897852521623..85b4806bc6341d82a83667e5bb88225920ce4d4b 100644 --- a/packages/app/src/components/toolbars/top-menu/lists/MusicGenerationWorkflows.tsx +++ b/packages/app/src/components/toolbars/top-menu/lists/MusicGenerationWorkflows.tsx @@ -21,18 +21,22 @@ import { ClapWorkflowProviderLogo, ClapWorkflowProviderName, } from '@/components/core/providers' +import { parseWorkflow } from '@/services/settings/workflows/parseWorkflow' + +const category = ClapWorkflowCategory.MUSIC_GENERATION export function MusicGenerationWorkflows() { - const workflowId = useSettings((s) => s.musicGenerationWorkflow) - const setWorkflowId = useSettings((s) => s.setMusicGenerationWorkflow) + const musicGenerationWorkflow = useSettings((s) => s.musicGenerationWorkflow) + const setMusicGenerationWorkflow = useSettings( + (s) => s.setMusicGenerationWorkflow + ) const availableWorkflows = useWorkflowEditor((s) => s.availableWorkflows) - const { workflows, providers, nbProviders } = findWorkflows( - availableWorkflows, - { category: ClapWorkflowCategory.MUSIC_GENERATION } - ) + const { providers, nbProviders } = findWorkflows(availableWorkflows, { + category, + }) - const { workflow } = findWorkflows(workflows, { workflowId }) + const workflow = parseWorkflow(musicGenerationWorkflow, category) if (!nbProviders) { return null @@ -65,7 +69,7 @@ export function MusicGenerationWorkflows() { {workflows?.map((w) => ( { if (hasNoPublicAPI(w)) { @@ -73,7 +77,7 @@ export function MusicGenerationWorkflows() { e.preventDefault() return false } - setWorkflowId(w.id) + setMusicGenerationWorkflow(w) e.stopPropagation() e.preventDefault() return false diff --git a/packages/app/src/components/toolbars/top-menu/lists/SoundGenerationWorkflows.tsx b/packages/app/src/components/toolbars/top-menu/lists/SoundGenerationWorkflows.tsx index 78c5edc46ce2b28f42b0f09a41117da8894b07c2..ad22d9bd1f85247b3c821ff7b884996290cd59d5 100644 --- a/packages/app/src/components/toolbars/top-menu/lists/SoundGenerationWorkflows.tsx +++ b/packages/app/src/components/toolbars/top-menu/lists/SoundGenerationWorkflows.tsx @@ -21,18 +21,22 @@ import { ClapWorkflowProviderLogo, ClapWorkflowProviderName, } from '@/components/core/providers' +import { parseWorkflow } from '@/services/settings/workflows/parseWorkflow' + +const category = ClapWorkflowCategory.SOUND_GENERATION export function SoundGenerationWorkflows() { - const workflowId = useSettings((s) => s.soundGenerationWorkflow) - const setWorkflowId = useSettings((s) => s.setSoundGenerationWorkflow) + const soundGenerationWorkflow = useSettings((s) => s.soundGenerationWorkflow) + const setSoundGenerationWorkflow = useSettings( + (s) => s.setSoundGenerationWorkflow + ) const availableWorkflows = useWorkflowEditor((s) => s.availableWorkflows) - const { workflows, providers, nbProviders } = findWorkflows( - availableWorkflows, - { category: ClapWorkflowCategory.SOUND_GENERATION } - ) + const { providers, nbProviders } = findWorkflows(availableWorkflows, { + category, + }) - const { workflow } = findWorkflows(workflows, { workflowId }) + const workflow = parseWorkflow(soundGenerationWorkflow, category) if (!nbProviders) { return null @@ -65,7 +69,7 @@ export function SoundGenerationWorkflows() { {workflows?.map((w) => ( { if (hasNoPublicAPI(w)) { @@ -73,7 +77,7 @@ export function SoundGenerationWorkflows() { e.preventDefault() return false } - setWorkflowId(w.id) + setSoundGenerationWorkflow(w) e.stopPropagation() e.preventDefault() return false diff --git a/packages/app/src/components/toolbars/top-menu/lists/VideoDepthWorkflows.tsx b/packages/app/src/components/toolbars/top-menu/lists/VideoDepthWorkflows.tsx index e81a9c0a190d5364899713de7a45c02f9df0b9df..db00363c43fd899b1fb2f77e4b816f96d1c1988f 100644 --- a/packages/app/src/components/toolbars/top-menu/lists/VideoDepthWorkflows.tsx +++ b/packages/app/src/components/toolbars/top-menu/lists/VideoDepthWorkflows.tsx @@ -21,18 +21,20 @@ import { ClapWorkflowProviderLogo, ClapWorkflowProviderName, } from '@/components/core/providers' +import { parseWorkflow } from '@/services/settings/workflows/parseWorkflow' + +const category = ClapWorkflowCategory.VIDEO_DEPTH_MAPPING export function VideoDepthWorkflows() { - const workflowId = useSettings((s) => s.videoDepthWorkflow) - const setWorkflowId = useSettings((s) => s.setVideoDepthWorkflow) + const videoDepthWorkflow = useSettings((s) => s.videoDepthWorkflow) + const setVideoDepthWorkflow = useSettings((s) => s.setVideoDepthWorkflow) const availableWorkflows = useWorkflowEditor((s) => s.availableWorkflows) - const { workflows, providers, nbProviders } = findWorkflows( - availableWorkflows, - { category: ClapWorkflowCategory.VIDEO_DEPTH_MAPPING } - ) + const { providers, nbProviders } = findWorkflows(availableWorkflows, { + category, + }) - const { workflow } = findWorkflows(workflows, { workflowId }) + const workflow = parseWorkflow(videoDepthWorkflow, category) if (!nbProviders) { return null @@ -65,7 +67,7 @@ export function VideoDepthWorkflows() { {workflows?.map((w) => ( { if (hasNoPublicAPI(w)) { @@ -73,7 +75,7 @@ export function VideoDepthWorkflows() { e.preventDefault() return false } - setWorkflowId(w.id) + setVideoDepthWorkflow(w) e.stopPropagation() e.preventDefault() return false diff --git a/packages/app/src/components/toolbars/top-menu/lists/VideoGenerationWorkflows.tsx b/packages/app/src/components/toolbars/top-menu/lists/VideoGenerationWorkflows.tsx index 16b81af49c469afdd4aff6a067c3dbd768bdefb5..94e90b06a97ea675a7a9652eaa1964c7998be9b5 100644 --- a/packages/app/src/components/toolbars/top-menu/lists/VideoGenerationWorkflows.tsx +++ b/packages/app/src/components/toolbars/top-menu/lists/VideoGenerationWorkflows.tsx @@ -21,18 +21,22 @@ import { ClapWorkflowProviderLogo, ClapWorkflowProviderName, } from '@/components/core/providers' +import { parseWorkflow } from '@/services/settings/workflows/parseWorkflow' + +const category = ClapWorkflowCategory.VIDEO_GENERATION export function VideoGenerationWorkflows() { - const workflowId = useSettings((s) => s.videoGenerationWorkflow) - const setWorkflowId = useSettings((s) => s.setVideoGenerationWorkflow) + const videoGenerationWorkflow = useSettings((s) => s.videoGenerationWorkflow) + const setVideoGenerationWorkflow = useSettings( + (s) => s.setVideoGenerationWorkflow + ) const availableWorkflows = useWorkflowEditor((s) => s.availableWorkflows) - const { workflows, providers, nbProviders } = findWorkflows( - availableWorkflows, - { category: ClapWorkflowCategory.VIDEO_GENERATION } - ) + const { providers, nbProviders } = findWorkflows(availableWorkflows, { + category, + }) - const { workflow } = findWorkflows(workflows, { workflowId }) + const workflow = parseWorkflow(videoGenerationWorkflow, category) if (!nbProviders) { return null @@ -65,7 +69,7 @@ export function VideoGenerationWorkflows() { {workflows?.map((w) => ( { if (hasNoPublicAPI(w)) { @@ -73,7 +77,7 @@ export function VideoGenerationWorkflows() { e.preventDefault() return false } - setWorkflowId(w.id) + setVideoGenerationWorkflow(w) e.stopPropagation() e.preventDefault() return false diff --git a/packages/app/src/components/toolbars/top-menu/lists/VideoSegmentationWorkflows.tsx b/packages/app/src/components/toolbars/top-menu/lists/VideoSegmentationWorkflows.tsx index e0eab94e357c7820dbf79ab4a150b035a4580a1e..8415219adc2c169415dbfce17f13d8d1ebbb05ea 100644 --- a/packages/app/src/components/toolbars/top-menu/lists/VideoSegmentationWorkflows.tsx +++ b/packages/app/src/components/toolbars/top-menu/lists/VideoSegmentationWorkflows.tsx @@ -21,18 +21,24 @@ import { ClapWorkflowProviderLogo, ClapWorkflowProviderName, } from '@/components/core/providers' +import { parseWorkflow } from '@/services/settings/workflows/parseWorkflow' + +const category = ClapWorkflowCategory.VIDEO_SEGMENTATION export function VideoSegmentationWorkflows() { - const workflowId = useSettings((s) => s.videoSegmentationWorkflow) - const setWorkflowId = useSettings((s) => s.setVideoSegmentationWorkflow) + const videoSegmentationWorkflow = useSettings( + (s) => s.videoSegmentationWorkflow + ) + const setVideoSegmentationWorkflow = useSettings( + (s) => s.setVideoSegmentationWorkflow + ) const availableWorkflows = useWorkflowEditor((s) => s.availableWorkflows) - const { workflows, providers, nbProviders } = findWorkflows( - availableWorkflows, - { category: ClapWorkflowCategory.VIDEO_SEGMENTATION } - ) + const { providers, nbProviders } = findWorkflows(availableWorkflows, { + category: ClapWorkflowCategory.VIDEO_SEGMENTATION, + }) - const { workflow } = findWorkflows(workflows, { workflowId }) + const workflow = parseWorkflow(videoSegmentationWorkflow, category) if (!nbProviders) { return null @@ -65,7 +71,7 @@ export function VideoSegmentationWorkflows() { {workflows?.map((w) => ( { if (hasNoPublicAPI(w)) { @@ -73,7 +79,7 @@ export function VideoSegmentationWorkflows() { e.preventDefault() return false } - setWorkflowId(w.id) + setVideoSegmentationWorkflow(w) e.stopPropagation() e.preventDefault() return false diff --git a/packages/app/src/components/toolbars/top-menu/lists/VideoUpscalingWorkflows.tsx b/packages/app/src/components/toolbars/top-menu/lists/VideoUpscalingWorkflows.tsx index ebc055df4f3ccf0ccfa902d1bc13cdc0fd6c86ad..c47023e57199cf0485d593e6fe5c3d6faf31e80b 100644 --- a/packages/app/src/components/toolbars/top-menu/lists/VideoUpscalingWorkflows.tsx +++ b/packages/app/src/components/toolbars/top-menu/lists/VideoUpscalingWorkflows.tsx @@ -21,18 +21,22 @@ import { ClapWorkflowProviderLogo, ClapWorkflowProviderName, } from '@/components/core/providers' +import { parseWorkflow } from '@/services/settings/workflows/parseWorkflow' + +const category = ClapWorkflowCategory.VIDEO_UPSCALING export function VideoUpscalingWorkflows() { - const workflowId = useSettings((s) => s.videoUpscalingWorkflow) - const setWorkflowId = useSettings((s) => s.setVideoUpscalingWorkflow) + const videoUpscalingWorkflow = useSettings((s) => s.videoUpscalingWorkflow) + const setVideoUpscalingWorkflow = useSettings( + (s) => s.setVideoUpscalingWorkflow + ) const availableWorkflows = useWorkflowEditor((s) => s.availableWorkflows) - const { workflows, providers, nbProviders } = findWorkflows( - availableWorkflows, - { category: ClapWorkflowCategory.VIDEO_UPSCALING } - ) + const { providers, nbProviders } = findWorkflows(availableWorkflows, { + category, + }) - const { workflow } = findWorkflows(workflows, { workflowId }) + const workflow = parseWorkflow(videoUpscalingWorkflow, category) if (!nbProviders) { return null @@ -65,7 +69,7 @@ export function VideoUpscalingWorkflows() { {workflows?.map((w) => ( { if (hasNoPublicAPI(w)) { @@ -73,7 +77,7 @@ export function VideoUpscalingWorkflows() { e.preventDefault() return false } - setWorkflowId(w.id) + setVideoUpscalingWorkflow(w) e.stopPropagation() e.preventDefault() return false diff --git a/packages/app/src/components/toolbars/top-menu/lists/VoiceGenerationWorkflows.tsx b/packages/app/src/components/toolbars/top-menu/lists/VoiceGenerationWorkflows.tsx index 5e03d99292428ac3d89bbde3ac6543a73aa6c0cb..9085e480512b51fec5840dfa4845c7401f9a3b63 100644 --- a/packages/app/src/components/toolbars/top-menu/lists/VoiceGenerationWorkflows.tsx +++ b/packages/app/src/components/toolbars/top-menu/lists/VoiceGenerationWorkflows.tsx @@ -21,18 +21,22 @@ import { ClapWorkflowProviderLogo, ClapWorkflowProviderName, } from '@/components/core/providers' +import { parseWorkflow } from '@/services/settings/workflows/parseWorkflow' + +const category = ClapWorkflowCategory.VOICE_GENERATION export function VoiceGenerationWorkflows() { - const workflowId = useSettings((s) => s.voiceGenerationWorkflow) - const setWorkflowId = useSettings((s) => s.setVoiceGenerationWorkflow) + const voiceGenerationWorkflow = useSettings((s) => s.voiceGenerationWorkflow) + const setVoiceGenerationWorkflow = useSettings( + (s) => s.setVoiceGenerationWorkflow + ) const availableWorkflows = useWorkflowEditor((s) => s.availableWorkflows) - const { workflows, providers, nbProviders } = findWorkflows( - availableWorkflows, - { category: ClapWorkflowCategory.VOICE_GENERATION } - ) + const { providers, nbProviders } = findWorkflows(availableWorkflows, { + category, + }) - const { workflow } = findWorkflows(workflows, { workflowId }) + const workflow = parseWorkflow(voiceGenerationWorkflow, category) if (!nbProviders) { return null @@ -65,7 +69,7 @@ export function VoiceGenerationWorkflows() { {workflows?.map((w) => ( { if (hasNoPublicAPI(w)) { @@ -73,7 +77,7 @@ export function VoiceGenerationWorkflows() { e.preventDefault() return false } - setWorkflowId(w.id) + setVoiceGenerationWorkflow(w) e.stopPropagation() e.preventDefault() return false diff --git a/packages/app/src/components/toolbars/top-menu/lists/getWorkflowProviders.ts b/packages/app/src/components/toolbars/top-menu/lists/getWorkflowProviders.ts index 6c0d896cf00840bdfa8547c6d9da72c05f552b2c..9894310ba62c4b5cd347c370ebf71162fbc2f07c 100644 --- a/packages/app/src/components/toolbars/top-menu/lists/getWorkflowProviders.ts +++ b/packages/app/src/components/toolbars/top-menu/lists/getWorkflowProviders.ts @@ -5,6 +5,15 @@ import { ClapWorkflowProvider, } from '@aitube/clap' +export type WorkflowSearchResults = { + workflow?: ClapWorkflow + workflows: ClapWorkflow[] + nbWorkflows: number + providers: Partial> + nbProviders: number + workflowIds: Record +} + /** * Helper to find workflows by id, category, provider or engine * @@ -21,14 +30,7 @@ export function findWorkflows( provider?: ClapWorkflowProvider engine?: ClapWorkflowEngine } -): { - workflow?: ClapWorkflow - workflows: ClapWorkflow[] - nbWorkflows: number - providers: Partial> - nbProviders: number - workflowIds: Record -} { +): WorkflowSearchResults { const workflows: ClapWorkflow[] = [] const providers: Partial> = {} const workflowIds: Record = {} diff --git a/packages/app/src/lib/utils/decodeOutput.ts b/packages/app/src/lib/utils/decodeOutput.ts index 346bbf57bed7240b00e94b4425095bb6e097e26c..ae078d6ebd9580febff7f00aa230e95e490fadc9 100644 --- a/packages/app/src/lib/utils/decodeOutput.ts +++ b/packages/app/src/lib/utils/decodeOutput.ts @@ -14,6 +14,7 @@ export async function decodeOutput(input: any): Promise { ? urlOrBase64 : await fetchContentToBase64(urlOrBase64) + if (base64Url.startsWith('data:image/')) { if ( base64Url.startsWith('data:image/jpeg') || diff --git a/packages/app/src/lib/utils/fetchContentToBase64.ts b/packages/app/src/lib/utils/fetchContentToBase64.ts index 1bf93e71ea15c5d6892fe28901d9de1b269427e3..8e687a1ae1244cf62cb5837f9e6d6b3bdb13994d 100644 --- a/packages/app/src/lib/utils/fetchContentToBase64.ts +++ b/packages/app/src/lib/utils/fetchContentToBase64.ts @@ -1,4 +1,7 @@ export async function fetchContentToBase64(url: string) { + + const predictedFormat = url.split(".").pop()?.trim().toLowerCase() + const res = await fetch(url, { method: 'GET', headers: { @@ -11,5 +14,19 @@ export async function fetchContentToBase64(url: string) { const blob = await res.blob() const buffer = Buffer.from(await blob.arrayBuffer()) - return 'data:' + blob.type + ';base64,' + buffer.toString('base64') + // some providers such as Replicate return a generic octet-stream type in the headers + const type = blob.type === "application/octet-stream" + ? (predictedFormat === "webp" ? "image/webp" : + predictedFormat === "jpeg" ? "image/jpeg" : + predictedFormat === "jpg" ? "image/jpeg" : + predictedFormat === "png" ? "image/png" : + predictedFormat === "avif" ? "image/avif" : + predictedFormat === "heic" ? "image/heic" : + predictedFormat === "mp4" ? "video/mp4" : + predictedFormat === "mp3" ? "audio/mp3" : + predictedFormat === "wav" ? "audio/wav" : + "application/octet-stream" + ) : blob.type + + return 'data:' + type + ';base64,' + buffer.toString('base64') } diff --git a/packages/app/src/services/editors/filter-editor/filters/analogLens.ts b/packages/app/src/services/editors/filter-editor/filters/analogLens.ts index 9221752b901ec2c7e18da134abe916bbbbc1654c..c314f1be7a77c47f17776f126764600f1b94dc06 100644 --- a/packages/app/src/services/editors/filter-editor/filters/analogLens.ts +++ b/packages/app/src/services/editors/filter-editor/filters/analogLens.ts @@ -1,3 +1,4 @@ +import { ClapInputCategory } from '@aitube/clap' import { Filter } from '@aitube/clapper-services' export const analogLensSimulator: Filter = { @@ -8,6 +9,7 @@ export const analogLensSimulator: Filter = { id: 'chromaticAberration', label: 'Chromatic aberration', description: 'Chromatic aberration strength', + category: ClapInputCategory.UNKNOWN, type: 'number', minValue: 0, maxValue: 0.05, @@ -17,6 +19,7 @@ export const analogLensSimulator: Filter = { id: 'vignetteStrength', label: 'Vignette strength', description: 'Vignette strength', + category: ClapInputCategory.UNKNOWN, type: 'number', minValue: 0, maxValue: 1, @@ -26,6 +29,7 @@ export const analogLensSimulator: Filter = { id: 'vignetteRadius', label: 'Vignette radius', description: 'Vignette radius', + category: ClapInputCategory.UNKNOWN, type: 'number', minValue: 0, maxValue: 1, @@ -35,6 +39,7 @@ export const analogLensSimulator: Filter = { id: 'distortion', label: 'Distortion', description: 'Lens distortion', + category: ClapInputCategory.UNKNOWN, type: 'number', minValue: -1, maxValue: 1, @@ -44,6 +49,7 @@ export const analogLensSimulator: Filter = { id: 'bloomStrength', label: 'Bloom strength', description: 'Bloom strength', + category: ClapInputCategory.UNKNOWN, type: 'number', minValue: 0, maxValue: 1, @@ -53,6 +59,7 @@ export const analogLensSimulator: Filter = { id: 'bloomRadius', label: 'Bloom radius', description: 'Bloom radius', + category: ClapInputCategory.UNKNOWN, type: 'number', minValue: 1, maxValue: 10, @@ -62,6 +69,7 @@ export const analogLensSimulator: Filter = { id: 'dofFocusDistance', label: 'DOF focus distance', description: 'Depth of field focus distance', + category: ClapInputCategory.UNKNOWN, type: 'number', minValue: 0, maxValue: 1, @@ -71,6 +79,7 @@ export const analogLensSimulator: Filter = { id: 'dofFocusRange', label: 'DOF focus range', description: 'Depth of field focus range', + category: ClapInputCategory.UNKNOWN, type: 'number', minValue: 0.01, maxValue: 1, @@ -80,6 +89,7 @@ export const analogLensSimulator: Filter = { id: 'dofBlurStrength', label: 'DOF blur strength', description: 'Depth of field blur strength', + category: ClapInputCategory.UNKNOWN, type: 'number', minValue: 0, maxValue: 1, diff --git a/packages/app/src/services/editors/filter-editor/filters/cinematic.ts b/packages/app/src/services/editors/filter-editor/filters/cinematic.ts index debfaa68cb0f6a3cbb71307da4ed057270df7b7a..ecb8f8670a29bbb583bf7f4bb46272192d70db56 100644 --- a/packages/app/src/services/editors/filter-editor/filters/cinematic.ts +++ b/packages/app/src/services/editors/filter-editor/filters/cinematic.ts @@ -1,3 +1,4 @@ +import { ClapInputCategory } from '@aitube/clap' import { Filter } from '@aitube/clapper-services' export const cinematic: Filter = { @@ -8,6 +9,7 @@ export const cinematic: Filter = { id: 'preset', label: 'Preset', description: 'Cinematic color preset', + category: ClapInputCategory.UNKNOWN, type: 'string', allowedValues: [ 'Blade Runner', @@ -22,6 +24,7 @@ export const cinematic: Filter = { id: 'intensity', label: 'Intensity', description: 'Intensity of the cinematic effect', + category: ClapInputCategory.UNKNOWN, type: 'number', minValue: 0, maxValue: 1, @@ -31,6 +34,7 @@ export const cinematic: Filter = { id: 'contrast', label: 'Contrast', description: 'Image contrast', + category: ClapInputCategory.UNKNOWN, type: 'number', minValue: 0.5, maxValue: 2, @@ -40,6 +44,7 @@ export const cinematic: Filter = { id: 'grain', label: 'Film Grain', description: 'Intensity of film grain effect', + category: ClapInputCategory.UNKNOWN, type: 'number', minValue: 0, maxValue: 1, @@ -49,6 +54,7 @@ export const cinematic: Filter = { id: 'blur', label: 'Blur', description: 'Slight blur effect', + category: ClapInputCategory.UNKNOWN, type: 'number', minValue: 0, maxValue: 1, diff --git a/packages/app/src/services/editors/filter-editor/filters/colorMapping.ts b/packages/app/src/services/editors/filter-editor/filters/colorMapping.ts index 1b9333e6be9a8d50dd6c0be4050ed538e08ffcca..e8d7b981c1872adc8bd847f9672c3d995aa53594 100644 --- a/packages/app/src/services/editors/filter-editor/filters/colorMapping.ts +++ b/packages/app/src/services/editors/filter-editor/filters/colorMapping.ts @@ -1,3 +1,4 @@ +import { ClapInputCategory } from '@aitube/clap' import { Filter } from '@aitube/clapper-services' export const colorMapping: Filter = { @@ -8,6 +9,7 @@ export const colorMapping: Filter = { id: 'redMultiplier', label: 'Red multiplier', description: 'Red channel multiplier', + category: ClapInputCategory.UNKNOWN, type: 'number', minValue: 0, maxValue: 2, @@ -17,6 +19,7 @@ export const colorMapping: Filter = { id: 'greenMultiplier', label: 'Green multiplier', description: 'Green channel multiplier', + category: ClapInputCategory.UNKNOWN, type: 'number', minValue: 0, maxValue: 2, @@ -26,6 +29,7 @@ export const colorMapping: Filter = { id: 'blueMultiplier', label: 'Blue multiplier', description: 'Blue channel multiplier', + category: ClapInputCategory.UNKNOWN, type: 'number', minValue: 0, maxValue: 2, diff --git a/packages/app/src/services/editors/filter-editor/filters/colorTemperature.ts b/packages/app/src/services/editors/filter-editor/filters/colorTemperature.ts index f61a027cd0cf9a572acdd2e1281e27d435dce071..628243285d957d64334f990d7f13074b8afd99c2 100644 --- a/packages/app/src/services/editors/filter-editor/filters/colorTemperature.ts +++ b/packages/app/src/services/editors/filter-editor/filters/colorTemperature.ts @@ -1,3 +1,4 @@ +import { ClapInputCategory } from '@aitube/clap' import { Filter } from '@aitube/clapper-services' export const colorTemperature: Filter = { @@ -9,6 +10,7 @@ export const colorTemperature: Filter = { label: 'Temperature', description: 'Color temperature in Kelvin', type: 'number', + category: ClapInputCategory.UNKNOWN, minValue: 1000, maxValue: 40000, defaultValue: 6500, @@ -17,6 +19,7 @@ export const colorTemperature: Filter = { id: 'tint', label: 'Tint', description: 'Green-Magenta tint', + category: ClapInputCategory.UNKNOWN, type: 'number', minValue: -100, maxValue: 100, diff --git a/packages/app/src/services/editors/filter-editor/filters/crossProcessing.ts b/packages/app/src/services/editors/filter-editor/filters/crossProcessing.ts index 8b230a8f2c1208365eab9ee54b2dbf8038e8b09b..3e1a5000ce2fb54d479f2c26a08be043915b19dd 100644 --- a/packages/app/src/services/editors/filter-editor/filters/crossProcessing.ts +++ b/packages/app/src/services/editors/filter-editor/filters/crossProcessing.ts @@ -1,3 +1,4 @@ +import { ClapInputCategory } from '@aitube/clap' import { Filter } from '@aitube/clapper-services' export const crossProcessing: Filter = { @@ -9,6 +10,7 @@ export const crossProcessing: Filter = { label: 'Intensity', description: 'Intensity of the cross-processing effect', type: 'number', + category: ClapInputCategory.UNKNOWN, minValue: 0, maxValue: 1, defaultValue: 0.5, @@ -17,6 +19,7 @@ export const crossProcessing: Filter = { id: 'contrastBoost', label: 'Contrast boost', description: 'Amount of contrast boost', + category: ClapInputCategory.UNKNOWN, type: 'number', minValue: 0, maxValue: 1, @@ -26,6 +29,7 @@ export const crossProcessing: Filter = { id: 'colorShift', label: 'Color shift', description: 'Direction of color shift', + category: ClapInputCategory.UNKNOWN, type: 'string', allowedValues: ['Cool', 'Warm'], defaultValue: 'Cool', diff --git a/packages/app/src/services/editors/filter-editor/filters/filmDegradation.ts b/packages/app/src/services/editors/filter-editor/filters/filmDegradation.ts index caf3a3c041c699d2fcc58d0fd2f801b1b1c29ecf..42cae0a97905f7b8f31bcbe29590cd3a8e440bf9 100644 --- a/packages/app/src/services/editors/filter-editor/filters/filmDegradation.ts +++ b/packages/app/src/services/editors/filter-editor/filters/filmDegradation.ts @@ -1,3 +1,4 @@ +import { ClapInputCategory } from '@aitube/clap' import { Filter } from '@aitube/clapper-services' export const filmDegradation: Filter = { @@ -8,6 +9,7 @@ export const filmDegradation: Filter = { id: 'scratchesIntensity', label: 'Scratches intensity', description: 'Intensity of film scratches', + category: ClapInputCategory.UNKNOWN, type: 'number', minValue: 0, maxValue: 1, @@ -17,6 +19,7 @@ export const filmDegradation: Filter = { id: 'dustIntensity', label: 'Dust intensity', description: 'Intensity of dust and spots', + category: ClapInputCategory.UNKNOWN, type: 'number', minValue: 0, maxValue: 1, @@ -26,6 +29,7 @@ export const filmDegradation: Filter = { id: 'grainIntensity', label: 'Grain intensity', description: 'Intensity of film grain', + category: ClapInputCategory.UNKNOWN, type: 'number', minValue: 0, maxValue: 1, @@ -35,6 +39,7 @@ export const filmDegradation: Filter = { id: 'colorFading', label: 'Color fading', description: 'Color fading effect', + category: ClapInputCategory.UNKNOWN, type: 'number', minValue: 0, maxValue: 1, @@ -44,6 +49,7 @@ export const filmDegradation: Filter = { id: 'vignettingIntensity', label: 'Vignetting intensity', description: 'Intensity of vignetting effect', + category: ClapInputCategory.UNKNOWN, type: 'number', minValue: 0, maxValue: 1, @@ -53,6 +59,7 @@ export const filmDegradation: Filter = { id: 'flickerIntensity', label: 'Flicker intensity', description: 'Intensity of light flickering', + category: ClapInputCategory.UNKNOWN, type: 'number', minValue: 0, maxValue: 1, @@ -62,6 +69,7 @@ export const filmDegradation: Filter = { id: 'lightLeakIntensity', label: 'Light leak intensity', description: 'Intensity of light leaks', + category: ClapInputCategory.UNKNOWN, type: 'number', minValue: 0, maxValue: 1, @@ -71,6 +79,7 @@ export const filmDegradation: Filter = { id: 'filmType', label: 'Film type', description: 'Type of film to simulate', + category: ClapInputCategory.UNKNOWN, type: 'string', allowedValues: ['color', 'blackAndWhite', 'sepia'], defaultValue: 'color', diff --git a/packages/app/src/services/editors/filter-editor/filters/infrared.ts b/packages/app/src/services/editors/filter-editor/filters/infrared.ts index e6a3db00c09a9b7f0434507de3a13c382e9ff485..7135673562df3a114b63e29e32087e6fee9c4928 100644 --- a/packages/app/src/services/editors/filter-editor/filters/infrared.ts +++ b/packages/app/src/services/editors/filter-editor/filters/infrared.ts @@ -1,3 +1,4 @@ +import { ClapInputCategory } from '@aitube/clap' import { Filter } from '@aitube/clapper-services' export const infraredBlackAndWhite: Filter = { @@ -8,6 +9,7 @@ export const infraredBlackAndWhite: Filter = { id: 'contrast', label: 'Contrast', description: 'Image contrast', + category: ClapInputCategory.UNKNOWN, type: 'number', minValue: 0.5, maxValue: 2.0, @@ -17,6 +19,7 @@ export const infraredBlackAndWhite: Filter = { id: 'grain', label: 'Grain', description: 'Film grain intensity', + category: ClapInputCategory.UNKNOWN, type: 'number', minValue: 0, maxValue: 1, @@ -26,6 +29,7 @@ export const infraredBlackAndWhite: Filter = { id: 'glow', label: 'Glow', description: 'Infrared glow effect', + category: ClapInputCategory.UNKNOWN, type: 'number', minValue: 0, maxValue: 1, diff --git a/packages/app/src/services/editors/filter-editor/filters/lomography.ts b/packages/app/src/services/editors/filter-editor/filters/lomography.ts index c634b0543e4997c3c1fb7b1fe1d560ec34804ed4..1a25a8f6bfc74e3928a5bf54f7520de292ab48bb 100644 --- a/packages/app/src/services/editors/filter-editor/filters/lomography.ts +++ b/packages/app/src/services/editors/filter-editor/filters/lomography.ts @@ -1,3 +1,4 @@ +import { ClapInputCategory } from '@aitube/clap' import { Filter } from '@aitube/clapper-services' export const lomography: Filter = { @@ -8,6 +9,7 @@ export const lomography: Filter = { id: 'saturation', label: 'Saturation', description: 'Color saturation', + category: ClapInputCategory.UNKNOWN, type: 'number', minValue: 0, maxValue: 2, @@ -17,6 +19,7 @@ export const lomography: Filter = { id: 'contrast', label: 'Contrast', description: 'Image contrast', + category: ClapInputCategory.UNKNOWN, type: 'number', minValue: 0.5, maxValue: 2, @@ -26,6 +29,7 @@ export const lomography: Filter = { id: 'vignetteIntensity', label: 'Vignette intensity', description: 'Intensity of vignette effect', + category: ClapInputCategory.UNKNOWN, type: 'number', minValue: 0, maxValue: 1, @@ -35,6 +39,7 @@ export const lomography: Filter = { id: 'lightLeakIntensity', label: 'Light leak intensity', description: 'Intensity of light leak effect', + category: ClapInputCategory.UNKNOWN, type: 'number', minValue: 0, maxValue: 1, diff --git a/packages/app/src/services/editors/filter-editor/filters/splitToning.ts b/packages/app/src/services/editors/filter-editor/filters/splitToning.ts index d85dda8451233d024e5fbc6d733cf3738a608cf1..b29887bb52281a553139d24fe5a25c5b1fa09e9a 100644 --- a/packages/app/src/services/editors/filter-editor/filters/splitToning.ts +++ b/packages/app/src/services/editors/filter-editor/filters/splitToning.ts @@ -1,3 +1,4 @@ +import { ClapInputCategory } from '@aitube/clap' import { Filter } from '@aitube/clapper-services' export const splitToning: Filter = { @@ -8,6 +9,7 @@ export const splitToning: Filter = { id: 'highlightColor', label: 'Highlight color', description: 'Color for highlights', + category: ClapInputCategory.UNKNOWN, type: 'string', allowedValues: ['Red', 'Green', 'Blue', 'Yellow', 'Cyan', 'Magenta'], defaultValue: 'Yellow', @@ -16,6 +18,7 @@ export const splitToning: Filter = { id: 'shadowColor', label: 'Shadow color', description: 'Color for shadows', + category: ClapInputCategory.UNKNOWN, type: 'string', allowedValues: ['Red', 'Green', 'Blue', 'Yellow', 'Cyan', 'Magenta'], defaultValue: 'Blue', @@ -24,6 +27,7 @@ export const splitToning: Filter = { id: 'balance', label: 'Balance', description: 'Balance between highlights and shadows', + category: ClapInputCategory.UNKNOWN, type: 'number', minValue: -1, maxValue: 1, @@ -33,6 +37,7 @@ export const splitToning: Filter = { id: 'intensity', label: 'Intensity', description: 'Intensity of the split toning effect', + category: ClapInputCategory.UNKNOWN, type: 'number', minValue: 0, maxValue: 1, diff --git a/packages/app/src/services/editors/filter-editor/filters/toneMapping.ts b/packages/app/src/services/editors/filter-editor/filters/toneMapping.ts index 50f0e6dad6c2482f0b50e7f31233cef6d9c19668..036fcb5b0c0e3c4e558d988e6691758adf07b49f 100644 --- a/packages/app/src/services/editors/filter-editor/filters/toneMapping.ts +++ b/packages/app/src/services/editors/filter-editor/filters/toneMapping.ts @@ -1,3 +1,4 @@ +import { ClapInputCategory } from '@aitube/clap' import { Filter } from '@aitube/clapper-services' export const hdrToneMapping: Filter = { @@ -8,6 +9,7 @@ export const hdrToneMapping: Filter = { id: 'exposure', label: 'Exposure', description: 'Exposure adjustment', + category: ClapInputCategory.UNKNOWN, type: 'number', minValue: -2, maxValue: 2, @@ -17,6 +19,7 @@ export const hdrToneMapping: Filter = { id: 'contrast', label: 'Contrast', description: 'Contrast adjustment', + category: ClapInputCategory.UNKNOWN, type: 'number', minValue: 0.5, maxValue: 2, @@ -26,6 +29,7 @@ export const hdrToneMapping: Filter = { id: 'saturation', label: 'Saturation', description: 'Color saturation', + category: ClapInputCategory.UNKNOWN, type: 'number', minValue: 0, maxValue: 2, @@ -35,6 +39,7 @@ export const hdrToneMapping: Filter = { id: 'highlights', label: 'Highlights', description: 'Highlight adjustment', + category: ClapInputCategory.UNKNOWN, type: 'number', minValue: -1, maxValue: 1, @@ -44,6 +49,7 @@ export const hdrToneMapping: Filter = { id: 'shadows', label: 'Shadows', description: 'Shadow adjustment', + category: ClapInputCategory.UNKNOWN, type: 'number', minValue: -1, maxValue: 1, diff --git a/packages/app/src/services/editors/filter-editor/filters/vintageFilm.ts b/packages/app/src/services/editors/filter-editor/filters/vintageFilm.ts index ed4c782a554f50d47031b3bf58d75757713e4d53..7f5098ae009c33688db164a6915b921e2bb62a11 100644 --- a/packages/app/src/services/editors/filter-editor/filters/vintageFilm.ts +++ b/packages/app/src/services/editors/filter-editor/filters/vintageFilm.ts @@ -1,3 +1,4 @@ +import { ClapInputCategory } from '@aitube/clap' import { Filter } from '@aitube/clapper-services' export const vintageFilm: Filter = { @@ -8,6 +9,7 @@ export const vintageFilm: Filter = { id: 'preset', label: 'Preset', description: 'Vintage film stock preset', + category: ClapInputCategory.UNKNOWN, type: 'string', allowedValues: [ 'Kodachrome 64', @@ -47,6 +49,7 @@ export const vintageFilm: Filter = { id: 'intensity', label: 'Intensity', description: 'Intensity of the film stock effect', + category: ClapInputCategory.UNKNOWN, type: 'number', minValue: 0, maxValue: 1, @@ -56,6 +59,7 @@ export const vintageFilm: Filter = { id: 'grain', label: 'Grain', description: 'Film grain intensity', + category: ClapInputCategory.UNKNOWN, type: 'number', minValue: 0, maxValue: 1, @@ -65,6 +69,7 @@ export const vintageFilm: Filter = { id: 'ageEffect', label: 'Age effect', description: 'Simulated age of the film', + category: ClapInputCategory.UNKNOWN, type: 'number', minValue: 0, maxValue: 1, @@ -74,6 +79,7 @@ export const vintageFilm: Filter = { id: 'colorShift', label: 'Color shift', description: 'Color shift adjustment', + category: ClapInputCategory.UNKNOWN, type: 'number', minValue: -1, maxValue: 1, @@ -83,6 +89,7 @@ export const vintageFilm: Filter = { id: 'contrast', label: 'Contrast', description: 'Contrast adjustment', + category: ClapInputCategory.UNKNOWN, type: 'number', minValue: 0.5, maxValue: 2, @@ -92,6 +99,7 @@ export const vintageFilm: Filter = { id: 'saturation', label: 'Saturation', description: 'Saturation adjustment', + category: ClapInputCategory.UNKNOWN, type: 'number', minValue: 0, maxValue: 2, diff --git a/packages/app/src/services/editors/workflow-editor/workflows/aitube/index.ts b/packages/app/src/services/editors/workflow-editor/workflows/aitube/index.ts index 1b262784274f2463b0c8135c2436c1891765787b..20bbac018dd3630029dc82460b64be376e542949 100644 --- a/packages/app/src/services/editors/workflow-editor/workflows/aitube/index.ts +++ b/packages/app/src/services/editors/workflow-editor/workflows/aitube/index.ts @@ -25,6 +25,7 @@ export const aitubeWorkflows: ClapWorkflow[] = [ author: 'AiTube.at', thumbnailUrl: '', nonCommercial: false, + canSupportLora: false, engine: ClapWorkflowEngine.OPENCLAP, category: ClapWorkflowCategory.IMAGE_GENERATION, provider: ClapWorkflowProvider.AITUBE, @@ -45,6 +46,7 @@ export const aitubeWorkflows: ClapWorkflow[] = [ author: 'AiTube.at', thumbnailUrl: '', nonCommercial: false, + canSupportLora: false, engine: ClapWorkflowEngine.OPENCLAP, category: ClapWorkflowCategory.MUSIC_GENERATION, provider: ClapWorkflowProvider.AITUBE, diff --git a/packages/app/src/services/editors/workflow-editor/workflows/anthropic/index.ts b/packages/app/src/services/editors/workflow-editor/workflows/anthropic/index.ts index 997f57a8ebf7a5ac958c04e06af984391275bbf5..e2f05b538c3646ca74f085d89c71db84f0259407 100644 --- a/packages/app/src/services/editors/workflow-editor/workflows/anthropic/index.ts +++ b/packages/app/src/services/editors/workflow-editor/workflows/anthropic/index.ts @@ -25,6 +25,7 @@ export const anthropicWorkflows: ClapWorkflow[] = [ author: 'Anthropic', thumbnailUrl: '', nonCommercial: false, + canSupportLora: false, engine: ClapWorkflowEngine.REST_API, category: ClapWorkflowCategory.ASSISTANT, provider: ClapWorkflowProvider.ANTHROPIC, @@ -43,6 +44,7 @@ export const anthropicWorkflows: ClapWorkflow[] = [ author: 'Anthropic', thumbnailUrl: '', nonCommercial: false, + canSupportLora: false, engine: ClapWorkflowEngine.REST_API, category: ClapWorkflowCategory.ASSISTANT, provider: ClapWorkflowProvider.ANTHROPIC, @@ -61,6 +63,7 @@ export const anthropicWorkflows: ClapWorkflow[] = [ author: 'Anthropic', thumbnailUrl: '', nonCommercial: false, + canSupportLora: false, engine: ClapWorkflowEngine.REST_API, category: ClapWorkflowCategory.ASSISTANT, provider: ClapWorkflowProvider.ANTHROPIC, diff --git a/packages/app/src/services/editors/workflow-editor/workflows/bigmodel/index.ts b/packages/app/src/services/editors/workflow-editor/workflows/bigmodel/index.ts index 0c3c8b90e51cf22bc71aed58852afe5323b95661..def1b90b86c86d09c0f31cd29d3cd2fc731412e2 100644 --- a/packages/app/src/services/editors/workflow-editor/workflows/bigmodel/index.ts +++ b/packages/app/src/services/editors/workflow-editor/workflows/bigmodel/index.ts @@ -15,6 +15,7 @@ export const bigModelWorkflows: ClapWorkflow[] = [ author: '', thumbnailUrl: '', nonCommercial: false, + canSupportLora: false, engine: ClapWorkflowEngine.REST_API, provider: ClapWorkflowProvider.BIGMODEL, category: ClapWorkflowCategory.VIDEO_GENERATION, diff --git a/packages/app/src/services/editors/workflow-editor/workflows/cohere/index.ts b/packages/app/src/services/editors/workflow-editor/workflows/cohere/index.ts index 5f2de36bf59f7d3ee871d1113ff20dbec18cb7a5..b3e04148fd5ece8c949cc405eb750df0da9fccb6 100644 --- a/packages/app/src/services/editors/workflow-editor/workflows/cohere/index.ts +++ b/packages/app/src/services/editors/workflow-editor/workflows/cohere/index.ts @@ -56,6 +56,7 @@ export const cohereWorkflows: ClapWorkflow[] = [ author: 'Cohere', thumbnailUrl: '', nonCommercial: false, + canSupportLora: false, engine: ClapWorkflowEngine.REST_API, category: ClapWorkflowCategory.ASSISTANT, provider: ClapWorkflowProvider.COHERE, @@ -74,6 +75,7 @@ export const cohereWorkflows: ClapWorkflow[] = [ author: 'Cohere', thumbnailUrl: '', nonCommercial: false, + canSupportLora: false, engine: ClapWorkflowEngine.REST_API, category: ClapWorkflowCategory.ASSISTANT, provider: ClapWorkflowProvider.COHERE, @@ -92,6 +94,7 @@ export const cohereWorkflows: ClapWorkflow[] = [ author: 'Cohere', thumbnailUrl: '', nonCommercial: false, + canSupportLora: false, engine: ClapWorkflowEngine.REST_API, category: ClapWorkflowCategory.ASSISTANT, provider: ClapWorkflowProvider.COHERE, @@ -110,6 +113,7 @@ export const cohereWorkflows: ClapWorkflow[] = [ author: 'Cohere', thumbnailUrl: '', nonCommercial: false, + canSupportLora: false, engine: ClapWorkflowEngine.REST_API, category: ClapWorkflowCategory.ASSISTANT, provider: ClapWorkflowProvider.COHERE, diff --git a/packages/app/src/services/editors/workflow-editor/workflows/comfyicu/index.ts b/packages/app/src/services/editors/workflow-editor/workflows/comfyicu/index.ts index 9d7158ca7a2bf9f318592c23955198399cbb3455..113b8be8347f7aa445e0916ce6c7aec5ffbdab5f 100644 --- a/packages/app/src/services/editors/workflow-editor/workflows/comfyicu/index.ts +++ b/packages/app/src/services/editors/workflow-editor/workflows/comfyicu/index.ts @@ -30,6 +30,7 @@ export const comfyicuWorkflows: ClapWorkflow[] = [ author: 'BFL (https://BlackForestLabs.ai)', thumbnailUrl: '', nonCommercial: false, + canSupportLora: false, engine: ClapWorkflowEngine.COMFYUI_WORKFLOW, provider: ClapWorkflowProvider.COMFYICU, category: ClapWorkflowCategory.IMAGE_GENERATION, diff --git a/packages/app/src/services/editors/workflow-editor/workflows/comfyui/index.ts b/packages/app/src/services/editors/workflow-editor/workflows/comfyui/index.ts index 1ee4593fd1d9a64e00c5ae8f0c8454e4c8b3de51..de95c4edd859299cf3d0fb4036c84ecf816a6e23 100644 --- a/packages/app/src/services/editors/workflow-editor/workflows/comfyui/index.ts +++ b/packages/app/src/services/editors/workflow-editor/workflows/comfyui/index.ts @@ -20,6 +20,7 @@ export const comfyuiWorkflows: ClapWorkflow[] = [ author: '', thumbnailUrl: '', nonCommercial: false, + canSupportLora: false, engine: ClapWorkflowEngine.COMFYUI_WORKFLOW, provider: ClapWorkflowProvider.COMFYUI, category: ClapWorkflowCategory.IMAGE_GENERATION, @@ -52,6 +53,7 @@ export async function getDynamicComfyuiWorkflows(): Promise { author: 'You', thumbnailUrl: '', nonCommercial: false, + canSupportLora: false, engine: ClapWorkflowEngine.COMFYUI_WORKFLOW, provider: ClapWorkflowProvider.COMFYUI, category: ClapWorkflowCategory.IMAGE_GENERATION, @@ -70,6 +72,7 @@ export async function getDynamicComfyuiWorkflows(): Promise { author: 'You', thumbnailUrl: '', nonCommercial: false, + canSupportLora: false, engine: ClapWorkflowEngine.COMFYUI_WORKFLOW, provider: ClapWorkflowProvider.COMFYUI, category: ClapWorkflowCategory.VIDEO_GENERATION, @@ -88,6 +91,7 @@ export async function getDynamicComfyuiWorkflows(): Promise { author: 'You', thumbnailUrl: '', nonCommercial: false, + canSupportLora: false, engine: ClapWorkflowEngine.COMFYUI_WORKFLOW, provider: ClapWorkflowProvider.COMFYUI, category: ClapWorkflowCategory.VOICE_GENERATION, @@ -106,6 +110,7 @@ export async function getDynamicComfyuiWorkflows(): Promise { author: 'You', thumbnailUrl: '', nonCommercial: false, + canSupportLora: false, engine: ClapWorkflowEngine.COMFYUI_WORKFLOW, provider: ClapWorkflowProvider.COMFYUI, category: ClapWorkflowCategory.MUSIC_GENERATION, @@ -124,6 +129,7 @@ export async function getDynamicComfyuiWorkflows(): Promise { author: 'You', thumbnailUrl: '', nonCommercial: false, + canSupportLora: false, engine: ClapWorkflowEngine.COMFYUI_WORKFLOW, provider: ClapWorkflowProvider.COMFYUI, category: ClapWorkflowCategory.SOUND_GENERATION, diff --git a/packages/app/src/services/editors/workflow-editor/workflows/common/defaultValues.ts b/packages/app/src/services/editors/workflow-editor/workflows/common/defaultValues.ts index b380769f1d333944c5942ae4fa3c1e6224f3891a..c2f6db164ed2946b84b4e4e78afea4ec702e1334 100644 --- a/packages/app/src/services/editors/workflow-editor/workflows/common/defaultValues.ts +++ b/packages/app/src/services/editors/workflow-editor/workflows/common/defaultValues.ts @@ -1,4 +1,5 @@ import { ClapInputField } from '@aitube/clap' +import { ClapInputCategory } from '@aitube/clap' // IMPORTANT: do NOT modify those default fields, // otherwise you might break the workflow of someone else! @@ -9,6 +10,7 @@ export const genericInput: ClapInputField = { id: 'input', label: 'Input', description: 'Input', + category: ClapInputCategory.PROMPT, type: 'string', allowedValues: [], defaultValue: '', @@ -18,6 +20,7 @@ export const genericText: ClapInputField = { id: 'text', label: 'Text', description: 'Text', + category: ClapInputCategory.PROMPT, type: 'string', allowedValues: [], defaultValue: '', @@ -27,6 +30,7 @@ export const genericPrompt: ClapInputField = { id: 'prompt', label: 'Prompt', description: 'Prompt', + category: ClapInputCategory.PROMPT, type: 'string', allowedValues: [], defaultValue: '', @@ -36,6 +40,7 @@ export const genericRatio: ClapInputField = { id: 'ratio', label: 'Image ratio', description: 'Image ratio (default to 1:1)', + category: ClapInputCategory.UNKNOWN, type: 'string', allowedValues: ['1:1', '16:9', '9:16'], defaultValue: '1:1', @@ -45,16 +50,38 @@ export const genericSeed: ClapInputField = { id: 'seed', label: 'Seed', description: 'Seed', - type: 'number', + category: ClapInputCategory.SEED, + type: 'number', // <-- TODO: replace by 'integer' (might break stuff) minValue: 0, maxValue: Math.pow(2, 31), defaultValue: 0, } +export const genericLora: ClapInputField = { + id: 'lora', + label: 'Lora URL', + description: 'Lora URL', + category: ClapInputCategory.LORA, + type: 'string', + allowedValues: [], + defaultValue: '', +} + +export const genericLoraUrl: ClapInputField = { + id: 'lora_url', + label: 'Lora URL', + description: 'Lora URL', + category: ClapInputCategory.LORA, + type: 'string', + allowedValues: [], + defaultValue: '', +} + export const genericImage: ClapInputField = { id: 'image', label: 'Image', description: 'Image', + category: ClapInputCategory.IMAGE_URL, type: 'string', allowedValues: [], defaultValue: '', @@ -64,6 +91,7 @@ export const genericImageUrl: ClapInputField = { id: 'image_url', label: 'Image URL', description: 'Image URL', + category: ClapInputCategory.IMAGE_URL, type: 'string', allowedValues: [], defaultValue: '', @@ -73,6 +101,7 @@ export const genericVideo: ClapInputField = { id: 'video', label: 'Video', description: 'Video', + category: ClapInputCategory.VIDEO_URL, type: 'string', allowedValues: [], defaultValue: '', @@ -82,6 +111,7 @@ export const genericVideoUrl: ClapInputField = { id: 'video_url', label: 'Video URL', description: 'Video URL', + category: ClapInputCategory.VIDEO_URL, type: 'string', allowedValues: [], defaultValue: '', @@ -91,6 +121,7 @@ export const genericVoice: ClapInputField = { id: 'voice', label: 'Voice', description: 'Voice', + category: ClapInputCategory.SOUND_URL, type: 'string', allowedValues: [], defaultValue: '', @@ -100,6 +131,7 @@ export const genericAudio: ClapInputField = { id: 'audio', label: 'Audio', description: 'Audio', + category: ClapInputCategory.SOUND_URL, type: 'string', allowedValues: [], defaultValue: '', @@ -109,6 +141,7 @@ export const genericInferenceSteps: ClapInputField = { id: 'num_inference_steps', label: 'Inference steps', description: 'Number of inference steps', + category: ClapInputCategory.INFERENCE_STEPS, type: 'number', minValue: 1, maxValue: 50, @@ -119,9 +152,8 @@ export const genericUpscalingFactor: ClapInputField = { id: 'upscaling_factor', label: 'Upscaling Factor', description: 'Upscaling Factor (2, 3, 4..)', - // <-- TODO: we should be able to have type: 'integer' - // that is not a big issue, however (the implementation can do the rounding) - type: 'number', + category: ClapInputCategory.UPSCALING_FACTOR, + type: 'number', // <-- TODO: replace by 'integer' (might break stuff) minValue: 2, maxValue: 4, defaultValue: 2, @@ -132,8 +164,7 @@ export const genericOverlappingTiles: ClapInputField = { label: 'Overlapping Tiles', description: 'Overlapping tiles should reduce visible seams, but doubles the inference time.', - // <-- TODO: we should be able to have type: 'integer' - // that is not a big issue, however (the implementation can do the rounding) + category: ClapInputCategory.CUSTOM, type: 'boolean', defaultValue: true, } @@ -143,7 +174,8 @@ export const genericMotionBucketId: ClapInputField = { label: 'Motion Bucket ID', description: 'The motion bucket ID determines the motion of the generated video. The higher the number, the more motion there will be.', - type: 'number', + category: ClapInputCategory.CUSTOM, + type: 'number', // <-- TODO: replace by 'integer' (might break stuff) minValue: 0, maxValue: 255, defaultValue: 127, @@ -154,6 +186,7 @@ export const genericConditioningAugmentation: ClapInputField = { label: 'Conditioning Augmentation', description: 'The conditoning augmentation determines the amount of noise that will be added to the conditioning frame. The higher the number, the more noise there will be, and the less the video will look like the initial image. Increase it for more motion.', + category: ClapInputCategory.CUSTOM, type: 'number', minValue: 0, maxValue: 1, @@ -164,6 +197,7 @@ export const genericWidth1024: ClapInputField = { id: 'width', label: 'Width', description: 'Width', + category: ClapInputCategory.WIDTH, type: 'number', minValue: 256, maxValue: 1024, @@ -174,6 +208,7 @@ export const genericWidth2048: ClapInputField = { id: 'width', label: 'Width', description: 'Width', + category: ClapInputCategory.WIDTH, type: 'number', minValue: 256, maxValue: 2048, @@ -184,6 +219,7 @@ export const genericHeight1024: ClapInputField = { id: 'height', label: 'Height', description: 'Height', + category: ClapInputCategory.HEIGHT, type: 'number', minValue: 256, maxValue: 1024, @@ -194,6 +230,7 @@ export const genericHeight2048: ClapInputField = { id: 'height', label: 'Height', description: 'Height', + category: ClapInputCategory.HEIGHT, type: 'number', minValue: 256, maxValue: 2048, diff --git a/packages/app/src/services/editors/workflow-editor/workflows/common/loras/canWorkflowUseLora.ts b/packages/app/src/services/editors/workflow-editor/workflows/common/loras/canWorkflowUseLora.ts new file mode 100644 index 0000000000000000000000000000000000000000..425ea8dbac1e3fb01c3b2ee5144c7f5410fa68bd --- /dev/null +++ b/packages/app/src/services/editors/workflow-editor/workflows/common/loras/canWorkflowUseLora.ts @@ -0,0 +1,9 @@ +import { ClapInputCategory, ClapWorkflow } from '@aitube/clap/dist/types' + +export function canWorkflowUseLora(workflow: ClapWorkflow): boolean { + return workflow.inputFields.some( + ({ category }) => category === ClapInputCategory.LORA + // category === ClapInputCategory.LORA_HF_MODEL || + // category === ClapInputCategory.LORA_WEIGHT_URL + ) +} diff --git a/packages/app/src/services/editors/workflow-editor/workflows/common/loras/getWorkflowInputField.ts b/packages/app/src/services/editors/workflow-editor/workflows/common/loras/getWorkflowInputField.ts new file mode 100644 index 0000000000000000000000000000000000000000..7e8e995d29add87e18f1e56019e30de8f89d7c92 --- /dev/null +++ b/packages/app/src/services/editors/workflow-editor/workflows/common/loras/getWorkflowInputField.ts @@ -0,0 +1,8 @@ +import { ClapInputCategory, ClapInputField, ClapWorkflow } from '@aitube/clap' + +export function getWorkflowInputField( + workflow: ClapWorkflow, + category: ClapInputCategory +): ClapInputField | undefined { + return workflow.inputFields.find((field) => field.category === category) +} diff --git a/packages/app/src/services/editors/workflow-editor/workflows/common/loras/getWorkflowLora.ts b/packages/app/src/services/editors/workflow-editor/workflows/common/loras/getWorkflowLora.ts new file mode 100644 index 0000000000000000000000000000000000000000..057c7904bcde334cc013e24978c5e705fe192671 --- /dev/null +++ b/packages/app/src/services/editors/workflow-editor/workflows/common/loras/getWorkflowLora.ts @@ -0,0 +1,28 @@ +import { ClapInputCategory, ClapWorkflow } from '@aitube/clap' +import { Lora } from '@/services/editors/workflow-editor/workflows/common/types' +import { defaultLoraModels } from '@/services/editors/workflow-editor/workflows/common/loras' + +import { getWorkflowInputField } from './getWorkflowInputField' + +export function getWorkflowLora(workflow: ClapWorkflow): Lora | undefined { + const inputField = getWorkflowInputField(workflow, ClapInputCategory.LORA) + + if (!inputField) { + return undefined + } + + const loraRepoOrUrl: string = workflow.inputValues[inputField.id] + + if (!loraRepoOrUrl) { + return undefined + } + + const loraModel = defaultLoraModels.find((lora) => ( + lora.repoOrUrl === loraRepoOrUrl + )) + + if (!loraModel) { + return undefined + } + return loraModel +} diff --git a/packages/app/src/services/editors/workflow-editor/workflows/common/loras/index.ts b/packages/app/src/services/editors/workflow-editor/workflows/common/loras/index.ts new file mode 100644 index 0000000000000000000000000000000000000000..25f4a7f5d94b001d0db41fed26c9e6a2a8ea2231 --- /dev/null +++ b/packages/app/src/services/editors/workflow-editor/workflows/common/loras/index.ts @@ -0,0 +1,73 @@ +// a list of FLUX.1 LoRA models that can be used with various providers + +import { LoraBaseModel, Lora } from '../types' + +export const defaultLoraModels: Lora[] = [ + { + id: 'lora://hf.co/models/alvdansen/flux-koda', + label: 'Koda', + baseModel: LoraBaseModel.FLUX, + description: + "Koda captures the nostalgic essence of early 1990s photography, evoking memories of disposable cameras and carefree travels. It specializes in creating images with a distinct vintage quality, characterized by slightly washed-out colors, soft focus, and the occasional light leak or film grain. The model excels at producing slice-of-life scenes that feel spontaneous and candid, as if plucked from a family photo album or a backpacker's travel diary.", + + thumbnailUrl: + 'https://hf.co/alvdansen/flux-koda/resolve/main/images/ComfyUI_00566_%20(2).png', + + projectUrl: 'https://hf.co/alvdansen/flux-koda', + + author: '@alvdansen', + + // trigger (usually some kind of unique string sequence, eg TOK) + trigger: 'flmft style', + + extensions: + 'kodachrome, blurry, realistic, still life, depth of field, scenery, no humans, monochrome, greyscale, traditional media, horizon, looking at viewer, light particles, shadow', + + repoOrUrl: 'alvdansen/flux-koda', + }, + { + id: 'lora://hf.co/models/veryVANYA/ps1-style-flux', + label: 'PS1 Style', + baseModel: LoraBaseModel.FLUX, + description: `late 90s/early 2000s ps1/n64 console graphics. + +5000 steps + +trained on 15 gpt4o captioned and adjusted ps1/n64 game screenshots using https://github.com/ostris/ai-toolkit/tree/main`, + + thumbnailUrl: + 'https://huggingface.co/veryVANYA/ps1-style-flux/resolve/main/24440109.jpeg', + + projectUrl: 'https://hf.co/veryVANYA/ps1-style-flux', + author: '@veryVANYA', + + // trigger (usually some kind of unique string sequence, eg TOK) + trigger: 'ps1', + + extensions: 'ps1 game screenshot', + + repoOrUrl: 'veryVANYA/ps1-style-flux', + }, + /* + { + id: 'lora://hf.co/models/jbilcke-hf/experimental-model-1', + label: 'Experimental Model 1', + baseModel: LoraBaseModel.FLUX, + description: 'A model for internal testing', + + thumbnailUrl: '', + + projectUrl: 'clapper.app', + + author: '@jbilcke-hf', + + // trigger (usually some kind of unique string sequence, eg TOK) + trigger: 'TKE1', + + extensions: + 'movie screencap from , in , with film grain.', + + repoOrUrl: 'jbilcke-hf/experimental-model-1', + }, + */ +] diff --git a/packages/app/src/services/editors/workflow-editor/workflows/common/types.ts b/packages/app/src/services/editors/workflow-editor/workflows/common/types.ts index 071c443a3880496922fcea2031cf6cd422e36c01..0759d5eb0b76d5a6cc05d1565a84808efab97f2d 100644 --- a/packages/app/src/services/editors/workflow-editor/workflows/common/types.ts +++ b/packages/app/src/services/editors/workflow-editor/workflows/common/types.ts @@ -1,3 +1,35 @@ import { ClapWorkflow } from '@aitube/clap' export type DynamicClapWorkflow = () => Promise + +export enum LoraBaseModel { + FLUX = 'FLUX', + SDXL = 'SDXL', +} + +export type Lora = { + id: string + + label: string + + baseModel: LoraBaseModel.FLUX + + description: string + + thumbnailUrl: string + + // URL to the page presenting the LoRA (eg. HF model page) + projectUrl: string + + author: string + + // trigger (usually some kind of unique string sequence, eg TOK) + trigger: string + + // additional keywords suggested by the author + extensions: string + + // name of the model repository on Hugging Face + // or direct URL to the weights + repoOrUrl: string +} diff --git a/packages/app/src/services/editors/workflow-editor/workflows/elevenlabs/index.ts b/packages/app/src/services/editors/workflow-editor/workflows/elevenlabs/index.ts index a60623fbfa8b1c1ab558f0a72d3a00f6c3f3e1ce..0db86550e6376e2c1f71f0f65d8896c17b4e8532 100644 --- a/packages/app/src/services/editors/workflow-editor/workflows/elevenlabs/index.ts +++ b/packages/app/src/services/editors/workflow-editor/workflows/elevenlabs/index.ts @@ -19,6 +19,7 @@ export const elevenlabsWorkflows: ClapWorkflow[] = [ author: 'ElevenLabs', thumbnailUrl: '', nonCommercial: false, + canSupportLora: false, engine: ClapWorkflowEngine.REST_API, provider: ClapWorkflowProvider.ELEVENLABS, category: ClapWorkflowCategory.VOICE_GENERATION, @@ -40,6 +41,7 @@ export const elevenlabsWorkflows: ClapWorkflow[] = [ author: 'ElevenLabs', thumbnailUrl: '', nonCommercial: false, + canSupportLora: false, engine: ClapWorkflowEngine.REST_API, provider: ClapWorkflowProvider.ELEVENLABS, category: ClapWorkflowCategory.SOUND_GENERATION, diff --git a/packages/app/src/services/editors/workflow-editor/workflows/falai/defaultWorkflows.ts b/packages/app/src/services/editors/workflow-editor/workflows/falai/defaultWorkflows.ts index df1476205938d43e9745b235dc8b6782a5df304f..a214f1dc3cfd232053301fb745ffe2e9bd24e546 100644 --- a/packages/app/src/services/editors/workflow-editor/workflows/falai/defaultWorkflows.ts +++ b/packages/app/src/services/editors/workflow-editor/workflows/falai/defaultWorkflows.ts @@ -22,6 +22,7 @@ import { genericOverlappingTiles, genericInferenceSteps, genericImageUrl, + genericLora, } from '../common/defaultValues' import { sampleDrivingVideo, sampleVoice } from '@/lib/core/constants' @@ -45,6 +46,7 @@ export const defaultWorkflows: ClapWorkflow[] = [ author: '', thumbnailUrl: '', nonCommercial: false, + canSupportLora: false, engine: ClapWorkflowEngine.REST_API, provider: ClapWorkflowProvider.FALAI, category: ClapWorkflowCategory.VIDEO_GENERATION, @@ -65,6 +67,38 @@ export const defaultWorkflows: ClapWorkflow[] = [ cond_aug: 0.02, }, }, + /* + { + id: 'falai://fal-ai/flux-general', + label: 'Flux.1-[DEV] with LoRAs', + description: '', + tags: ['Flux', 'LoRA'], + author: '', + thumbnailUrl: '', + nonCommercial: false, + canSupportLora: false, + engine: ClapWorkflowEngine.REST_API, + provider: ClapWorkflowProvider.FALAI, + category: ClapWorkflowCategory.IMAGE_GENERATION, + data: 'fal-ai/flux-general', + schema: '', + inputFields: [ + genericPrompt, + genericWidth2048, + genericHeight2048, + genericInferenceSteps, + ], + inputValues: { + [genericPrompt.id]: genericPrompt.defaultValue, + [genericWidth2048.id]: genericWidth2048.defaultValue, + [genericHeight2048.id]: genericHeight2048.defaultValue, + [genericInferenceSteps.id]: genericInferenceSteps.defaultValue, + + // support LoRA for this model is a bit tricky, as the parameter must be in JSON + // (this is an array of LoraWeight objects, see: https://fal.ai/models/fal-ai/flux-general/playground) + }, + }, + */ { id: 'falai://fal-ai/flux-realism', label: 'Flux Realism LoRA', @@ -73,6 +107,7 @@ export const defaultWorkflows: ClapWorkflow[] = [ author: '', thumbnailUrl: '', nonCommercial: false, + canSupportLora: false, engine: ClapWorkflowEngine.REST_API, provider: ClapWorkflowProvider.FALAI, category: ClapWorkflowCategory.IMAGE_GENERATION, @@ -102,6 +137,7 @@ export const defaultWorkflows: ClapWorkflow[] = [ author: 'BFL (https://BlackForestLabs.ai)', thumbnailUrl: '', nonCommercial: false, + canSupportLora: false, engine: ClapWorkflowEngine.REST_API, provider: ClapWorkflowProvider.FALAI, category: ClapWorkflowCategory.IMAGE_GENERATION, @@ -133,6 +169,7 @@ export const defaultWorkflows: ClapWorkflow[] = [ author: 'BFL (https://BlackForestLabs.ai)', thumbnailUrl: '', nonCommercial: false, + canSupportLora: false, engine: ClapWorkflowEngine.REST_API, provider: ClapWorkflowProvider.FALAI, category: ClapWorkflowCategory.IMAGE_GENERATION, @@ -153,6 +190,7 @@ export const defaultWorkflows: ClapWorkflow[] = [ author: 'BFL (https://BlackForestLabs.ai)', thumbnailUrl: '', nonCommercial: false, + canSupportLora: false, engine: ClapWorkflowEngine.REST_API, provider: ClapWorkflowProvider.FALAI, category: ClapWorkflowCategory.IMAGE_GENERATION, @@ -184,6 +222,7 @@ export const defaultWorkflows: ClapWorkflow[] = [ author: 'Stability AI', thumbnailUrl: '', nonCommercial: false, + canSupportLora: false, engine: ClapWorkflowEngine.REST_API, provider: ClapWorkflowProvider.FALAI, category: ClapWorkflowCategory.IMAGE_GENERATION, @@ -215,6 +254,7 @@ export const defaultWorkflows: ClapWorkflow[] = [ author: 'Stability AI', thumbnailUrl: '', nonCommercial: false, + canSupportLora: false, engine: ClapWorkflowEngine.REST_API, provider: ClapWorkflowProvider.FALAI, category: ClapWorkflowCategory.IMAGE_GENERATION, @@ -246,6 +286,7 @@ export const defaultWorkflows: ClapWorkflow[] = [ author: 'AuraSR', thumbnailUrl: '', nonCommercial: false, + canSupportLora: false, engine: ClapWorkflowEngine.REST_API, provider: ClapWorkflowProvider.FALAI, category: ClapWorkflowCategory.IMAGE_UPSCALING, diff --git a/packages/app/src/services/editors/workflow-editor/workflows/replicate/defaultWorkflows.ts b/packages/app/src/services/editors/workflow-editor/workflows/replicate/defaultWorkflows.ts index bfcde08e8dc2c4d0f670d7f5e7149b4bb90780f8..16ba166f7c1141b972d30705118e2d7917c0f1fd 100644 --- a/packages/app/src/services/editors/workflow-editor/workflows/replicate/defaultWorkflows.ts +++ b/packages/app/src/services/editors/workflow-editor/workflows/replicate/defaultWorkflows.ts @@ -9,6 +9,7 @@ import { genericHeight1024, genericHeight2048, genericImage, + genericLora, genericPrompt, genericVideo, genericWidth1024, @@ -16,9 +17,40 @@ import { } from '../common/defaultValues' export const defaultWorkflows: ClapWorkflow[] = [ + { + id: 'replicate://lucataco/flux-dev-lora', + label: 'Flux-dev-lora', + description: '', + tags: ['flux'], + author: '@lucataco', + thumbnailUrl: '', + engine: ClapWorkflowEngine.REST_API, + category: ClapWorkflowCategory.IMAGE_GENERATION, + provider: ClapWorkflowProvider.REPLICATE, + data: 'lucataco/flux-dev-lora', + // data: 'lucataco/flux-dev-lora:94a0c19e55e36f75d657ecf9eada9f16a233b5329fb9cdf8e2b9ecd093e5c97e', + /** + * Inputs of the workflow (this is used to build an UI for the automatically) + */ + inputFields: [ + genericPrompt, + genericWidth2048, + genericHeight2048, + { + ...genericLora, + id: 'hf_lora', + }, + ], + inputValues: { + prompt: genericPrompt.defaultValue, + width: genericWidth2048.defaultValue, + height: genericHeight2048.defaultValue, + hf_lora: genericLora.defaultValue, + }, + }, { id: 'replicate://black-forest-labs/flux-pro', - label: 'FLUX.1 [pro]', + label: 'Flux-pro', description: '', tags: ['flux'], author: 'BFL (https://BlackForestLabs.ai)', @@ -47,7 +79,7 @@ export const defaultWorkflows: ClapWorkflow[] = [ }, { id: 'replicate://black-forest-labs/flux-schnell', - label: 'FLUX.1 [schnell]', + label: 'Flux-schnell', description: '', tags: ['flux'], author: 'BFL (https://BlackForestLabs.ai)', @@ -68,7 +100,7 @@ export const defaultWorkflows: ClapWorkflow[] = [ }, { id: 'replicate://black-forest-labs/flux-dev', - label: 'FLUX.1 [dev]', + label: 'Flux-dev', description: '', tags: ['flux'], author: 'BFL (https://BlackForestLabs.ai)', diff --git a/packages/app/src/services/settings/useSettings.ts b/packages/app/src/services/settings/useSettings.ts index aec8840318d89070826774b0f49b2f42e50548cd..bf8a07d1e7f8bda26fca53c98aa8f02eebb4a6cd 100644 --- a/packages/app/src/services/settings/useSettings.ts +++ b/packages/app/src/services/settings/useSettings.ts @@ -2,7 +2,12 @@ import { create } from 'zustand' import { persist } from 'zustand/middleware' -import { getValidNumber, ClapWorkflowProvider } from '@aitube/clap' +import { + getValidNumber, + ClapWorkflowProvider, + ClapWorkflow, + ClapWorkflowCategory, +} from '@aitube/clap' import { parseRenderingStrategy, RenderingStrategy } from '@aitube/timeline' import { ComfyIcuAccelerator, @@ -18,10 +23,7 @@ import { getDefaultSettingsState } from './getDefaultSettingsState' import { getValidComfyWorkflowTemplate } from '@/lib/utils/getValidComfyWorkflowTemplate' import { parseComfyIcuAccelerator } from '@/lib/utils/parseComfyIcuAccelerator' -// that may not be the best way to do this, -// and importing useWorkflowEditor here is a bit tricky -import { findWorkflows } from '@/components/toolbars/top-menu/lists/getWorkflowProviders' -import { useWorkflowEditor } from '../editors/workflow-editor/useWorkflowEditor' +import { parseWorkflow } from './workflows/parseWorkflow' export const useSettings = create()( persist( @@ -303,147 +305,137 @@ export const useSettings = create()( ), }) }, - setAssistantWorkflow: (assistantWorkflow?: string) => { + setAssistantWorkflow: (assistantWorkflow?: ClapWorkflow) => { const { assistantWorkflow: defaultAssistantWorkflow } = getDefaultSettingsState() set({ - assistantWorkflow: getValidString( - assistantWorkflow, - defaultAssistantWorkflow - ), + assistantWorkflow: assistantWorkflow + ? JSON.stringify(assistantWorkflow) + : defaultAssistantWorkflow, }) }, - setAssistantTurboWorkflow: (assistantTurboWorkflow?: string) => { + setAssistantTurboWorkflow: (assistantTurboWorkflow?: ClapWorkflow) => { const { assistantTurboWorkflow: defaultAssistantTurboWorkflow } = getDefaultSettingsState() set({ - assistantTurboWorkflow: getValidString( - assistantTurboWorkflow, - defaultAssistantTurboWorkflow - ), + assistantTurboWorkflow: assistantTurboWorkflow + ? JSON.stringify(assistantTurboWorkflow) + : defaultAssistantTurboWorkflow, }) }, - setImageGenerationWorkflow: (imageGenerationWorkflow?: string) => { + setImageGenerationWorkflow: (imageGenerationWorkflow?: ClapWorkflow) => { const { imageGenerationWorkflow: defaultImageGenerationWorkflow } = getDefaultSettingsState() set({ - imageGenerationWorkflow: getValidString( - imageGenerationWorkflow, - defaultImageGenerationWorkflow - ), + imageGenerationWorkflow: imageGenerationWorkflow + ? JSON.stringify(imageGenerationWorkflow) + : defaultImageGenerationWorkflow, }) }, setImageGenerationTurboWorkflow: ( - imageGenerationTurboWorkflow?: string + imageGenerationTurboWorkflow?: ClapWorkflow ) => { const { imageGenerationTurboWorkflow: defaultImageGenerationTurboWorkflow, } = getDefaultSettingsState() set({ - imageGenerationTurboWorkflow: getValidString( - imageGenerationTurboWorkflow, - defaultImageGenerationTurboWorkflow - ), + imageGenerationTurboWorkflow: imageGenerationTurboWorkflow + ? JSON.stringify(imageGenerationTurboWorkflow) + : defaultImageGenerationTurboWorkflow, }) }, - setImageUpscalingWorkflow: (imageUpscalingWorkflow?: string) => { + setImageUpscalingWorkflow: (imageUpscalingWorkflow?: ClapWorkflow) => { const { imageUpscalingWorkflow: defaultImageUpscalingWorkflow } = getDefaultSettingsState() set({ - imageUpscalingWorkflow: getValidString( - imageUpscalingWorkflow, - defaultImageUpscalingWorkflow - ), + imageUpscalingWorkflow: imageUpscalingWorkflow + ? JSON.stringify(imageUpscalingWorkflow) + : defaultImageUpscalingWorkflow, }) }, - setImageDepthWorkflow: (imageDepthWorkflow?: string) => { + setImageDepthWorkflow: (imageDepthWorkflow?: ClapWorkflow) => { const { imageDepthWorkflow: defaultImageDepthWorkflow } = getDefaultSettingsState() set({ - imageDepthWorkflow: getValidString( - imageDepthWorkflow, - defaultImageDepthWorkflow - ), + imageDepthWorkflow: imageDepthWorkflow + ? JSON.stringify(imageDepthWorkflow) + : defaultImageDepthWorkflow, }) }, - setImageSegmentationWorkflow: (imageSegmentationWorkflow?: string) => { + setImageSegmentationWorkflow: ( + imageSegmentationWorkflow?: ClapWorkflow + ) => { const { imageSegmentationWorkflow: defaultImageSegmentationWorkflow } = getDefaultSettingsState() set({ - imageSegmentationWorkflow: getValidString( - imageSegmentationWorkflow, - defaultImageSegmentationWorkflow - ), + imageSegmentationWorkflow: imageSegmentationWorkflow + ? JSON.stringify(imageSegmentationWorkflow) + : defaultImageSegmentationWorkflow, }) }, - setVideoGenerationWorkflow: (videoGenerationWorkflow?: string) => { + setVideoGenerationWorkflow: (videoGenerationWorkflow?: ClapWorkflow) => { const { videoGenerationWorkflow: defaultVideoGenerationWorkflow } = getDefaultSettingsState() set({ - videoGenerationWorkflow: getValidString( - videoGenerationWorkflow, - defaultVideoGenerationWorkflow - ), + videoGenerationWorkflow: videoGenerationWorkflow + ? JSON.stringify(videoGenerationWorkflow) + : defaultVideoGenerationWorkflow, }) }, - setVideoUpscalingWorkflow: (videoUpscalingWorkflow?: string) => { + setVideoUpscalingWorkflow: (videoUpscalingWorkflow?: ClapWorkflow) => { const { videoUpscalingWorkflow: defaultVideoUpscalingWorkflow } = getDefaultSettingsState() set({ - videoUpscalingWorkflow: getValidString( - videoUpscalingWorkflow, - defaultVideoUpscalingWorkflow - ), + videoUpscalingWorkflow: videoUpscalingWorkflow + ? JSON.stringify(videoUpscalingWorkflow) + : defaultVideoUpscalingWorkflow, }) }, - setVideoDepthWorkflow: (videoDepthWorkflow?: string) => { + setVideoDepthWorkflow: (videoDepthWorkflow?: ClapWorkflow) => { const { videoDepthWorkflow: defaultVideoDepthWorkflow } = getDefaultSettingsState() set({ - videoDepthWorkflow: getValidString( - videoDepthWorkflow, - defaultVideoDepthWorkflow - ), + videoDepthWorkflow: videoDepthWorkflow + ? JSON.stringify(videoDepthWorkflow) + : defaultVideoDepthWorkflow, }) }, - setVideoSegmentationWorkflow: (videoSegmentationWorkflow?: string) => { + setVideoSegmentationWorkflow: ( + videoSegmentationWorkflow?: ClapWorkflow + ) => { const { videoSegmentationWorkflow: defaultVideoSegmentationWorkflow } = getDefaultSettingsState() set({ - videoSegmentationWorkflow: getValidString( - videoSegmentationWorkflow, - defaultVideoSegmentationWorkflow - ), + videoSegmentationWorkflow: videoSegmentationWorkflow + ? JSON.stringify(videoSegmentationWorkflow) + : defaultVideoSegmentationWorkflow, }) }, - setSoundGenerationWorkflow: (soundGenerationWorkflow?: string) => { + setSoundGenerationWorkflow: (soundGenerationWorkflow?: ClapWorkflow) => { const { soundGenerationWorkflow: defaultSoundGenerationWorkflow } = getDefaultSettingsState() set({ - soundGenerationWorkflow: getValidString( - soundGenerationWorkflow, - defaultSoundGenerationWorkflow - ), + soundGenerationWorkflow: soundGenerationWorkflow + ? JSON.stringify(soundGenerationWorkflow) + : defaultSoundGenerationWorkflow, }) }, - setVoiceGenerationWorkflow: (voiceGenerationWorkflow?: string) => { + setVoiceGenerationWorkflow: (voiceGenerationWorkflow?: ClapWorkflow) => { const { voiceGenerationWorkflow: defaultVoiceGenerationWorkflow } = getDefaultSettingsState() set({ - voiceGenerationWorkflow: getValidString( - voiceGenerationWorkflow, - defaultVoiceGenerationWorkflow - ), + voiceGenerationWorkflow: voiceGenerationWorkflow + ? JSON.stringify(voiceGenerationWorkflow) + : defaultVoiceGenerationWorkflow, }) }, - setMusicGenerationWorkflow: (musicGenerationWorkflow?: string) => { + setMusicGenerationWorkflow: (musicGenerationWorkflow?: ClapWorkflow) => { const { musicGenerationWorkflow: defaultVoiceGenerationWorkflow } = getDefaultSettingsState() set({ - musicGenerationWorkflow: getValidString( - musicGenerationWorkflow, - defaultVoiceGenerationWorkflow - ), + musicGenerationWorkflow: musicGenerationWorkflow + ? JSON.stringify(musicGenerationWorkflow) + : defaultVoiceGenerationWorkflow, }) }, setImageRenderingStrategy: ( @@ -735,82 +727,86 @@ export const useSettings = create()( const state = get() const defaultSettings = getDefaultSettingsState() - // I think this is causing some issues, - // with the settings having a dependency over the workflows, - // which creates a loop - // - // we should probably this step else where - const availableWorkflows = - useWorkflowEditor.getState().availableWorkflows + const assistantWorkflow = parseWorkflow( + state.assistantWorkflow || defaultSettings.assistantWorkflow, + ClapWorkflowCategory.ASSISTANT + ) - const assistantWorkflowId = - state.assistantWorkflow || defaultSettings.assistantWorkflow + const assistantTurboWorkflow = parseWorkflow( + state.assistantTurboWorkflow || + defaultSettings.assistantTurboWorkflow, + ClapWorkflowCategory.ASSISTANT + ) - const assistantTurboWorkflowId = - state.assistantTurboWorkflow || defaultSettings.assistantTurboWorkflow - - const imageGenerationWorkflowId = + const imageGenerationWorkflow = parseWorkflow( state.imageGenerationWorkflow || - defaultSettings.imageGenerationWorkflow + defaultSettings.imageGenerationWorkflow, + ClapWorkflowCategory.IMAGE_GENERATION + ) - const imageGenerationTurboWorkflowId = + const imageGenerationTurboWorkflow = parseWorkflow( state.imageGenerationTurboWorkflow || - defaultSettings.imageGenerationTurboWorkflow - - const imageUpscalingWorkflowId = - state.imageUpscalingWorkflow || defaultSettings.imageUpscalingWorkflow - - const imageDepthWorkflowId = - state.imageDepthWorkflow || defaultSettings.imageDepthWorkflow - - const imageSegmentationWorkflowId = + defaultSettings.imageGenerationTurboWorkflow, + ClapWorkflowCategory.IMAGE_GENERATION + ) + + const imageUpscalingWorkflow = parseWorkflow( + state.imageUpscalingWorkflow || + defaultSettings.imageUpscalingWorkflow, + ClapWorkflowCategory.IMAGE_UPSCALING + ) + + const imageDepthWorkflow = parseWorkflow( + state.imageDepthWorkflow || defaultSettings.imageDepthWorkflow, + ClapWorkflowCategory.IMAGE_DEPTH_MAPPING + ) + + const imageSegmentationWorkflow = parseWorkflow( state.imageSegmentationWorkflow || - defaultSettings.imageSegmentationWorkflow + defaultSettings.imageSegmentationWorkflow, + ClapWorkflowCategory.IMAGE_SEGMENTATION + ) - const videoGenerationWorkflowId = + const videoGenerationWorkflow = parseWorkflow( state.videoGenerationWorkflow || - defaultSettings.videoGenerationWorkflow + defaultSettings.videoGenerationWorkflow, + ClapWorkflowCategory.VIDEO_GENERATION + ) - const videoDepthWorkflowId = - state.videoDepthWorkflow || defaultSettings.videoDepthWorkflow + const videoDepthWorkflow = parseWorkflow( + state.videoDepthWorkflow || defaultSettings.videoDepthWorkflow, + ClapWorkflowCategory.VIDEO_DEPTH_MAPPING + ) - const videoSegmentationWorkflowId = + const videoSegmentationWorkflow = parseWorkflow( state.videoSegmentationWorkflow || - defaultSettings.videoSegmentationWorkflow + defaultSettings.videoSegmentationWorkflow, + ClapWorkflowCategory.VIDEO_SEGMENTATION + ) - const videoUpscalingWorkflowId = - state.videoUpscalingWorkflow || defaultSettings.videoUpscalingWorkflow + const videoUpscalingWorkflow = parseWorkflow( + state.videoUpscalingWorkflow || + defaultSettings.videoUpscalingWorkflow, + ClapWorkflowCategory.VIDEO_UPSCALING + ) - const soundGenerationWorkflowId = + const soundGenerationWorkflow = parseWorkflow( state.soundGenerationWorkflow || - defaultSettings.soundGenerationWorkflow + defaultSettings.soundGenerationWorkflow, + ClapWorkflowCategory.SOUND_GENERATION + ) - const voiceGenerationWorkflowId = + const voiceGenerationWorkflow = parseWorkflow( state.voiceGenerationWorkflow || - defaultSettings.voiceGenerationWorkflow + defaultSettings.voiceGenerationWorkflow, + ClapWorkflowCategory.VOICE_GENERATION + ) - const musicGenerationWorkflowId = + const musicGenerationWorkflow = parseWorkflow( state.musicGenerationWorkflow || - defaultSettings.musicGenerationWorkflow - - const { workflowIds } = findWorkflows(availableWorkflows, { - workflowIds: [ - assistantWorkflowId, - assistantTurboWorkflowId, - imageGenerationWorkflowId, - imageGenerationTurboWorkflowId, - imageUpscalingWorkflowId, - imageDepthWorkflowId, - imageSegmentationWorkflowId, - videoGenerationWorkflowId, - videoDepthWorkflowId, - videoSegmentationWorkflowId, - videoUpscalingWorkflowId, - soundGenerationWorkflowId, - voiceGenerationWorkflowId, - musicGenerationWorkflowId, - ], - }) + defaultSettings.musicGenerationWorkflow, + ClapWorkflowCategory.MUSIC_GENERATION + ) return { // why do we need those fallbacks? because some users will leave the fields empty, @@ -884,21 +880,20 @@ export const useSettings = create()( videoNegativePrompt: state.videoNegativePrompt || defaultSettings.videoNegativePrompt, - assistantWorkflow: workflowIds[assistantWorkflowId], - assistantTurboWorkflow: workflowIds[assistantTurboWorkflowId], - imageGenerationWorkflow: workflowIds[imageGenerationWorkflowId], - imageGenerationTurboWorkflow: - workflowIds[imageGenerationTurboWorkflowId], - imageUpscalingWorkflow: workflowIds[imageUpscalingWorkflowId], - imageDepthWorkflow: workflowIds[imageDepthWorkflowId], - imageSegmentationWorkflow: workflowIds[imageSegmentationWorkflowId], - videoGenerationWorkflow: workflowIds[videoGenerationWorkflowId], - videoDepthWorkflow: workflowIds[videoDepthWorkflowId], - videoSegmentationWorkflow: workflowIds[videoSegmentationWorkflowId], - videoUpscalingWorkflow: workflowIds[videoUpscalingWorkflowId], - soundGenerationWorkflow: workflowIds[soundGenerationWorkflowId], - voiceGenerationWorkflow: workflowIds[voiceGenerationWorkflowId], - musicGenerationWorkflow: workflowIds[musicGenerationWorkflowId], + assistantWorkflow, + assistantTurboWorkflow, + imageGenerationWorkflow, + imageGenerationTurboWorkflow, + imageUpscalingWorkflow, + imageDepthWorkflow, + imageSegmentationWorkflow, + videoGenerationWorkflow, + videoDepthWorkflow, + videoSegmentationWorkflow, + videoUpscalingWorkflow, + soundGenerationWorkflow, + voiceGenerationWorkflow, + musicGenerationWorkflow, imageRenderingStrategy: state.imageRenderingStrategy || diff --git a/packages/app/src/services/settings/workflows/parseWorkflow.ts b/packages/app/src/services/settings/workflows/parseWorkflow.ts new file mode 100644 index 0000000000000000000000000000000000000000..e3b04425b2ecf054f0edd5568e0580c8981aadf2 --- /dev/null +++ b/packages/app/src/services/settings/workflows/parseWorkflow.ts @@ -0,0 +1,76 @@ +import { + findWorkflows, + WorkflowSearchResults, +} from '@/components/toolbars/top-menu/lists/getWorkflowProviders' +import { useWorkflowEditor } from '@/services/editors' +import { + ClapWorkflow, + ClapWorkflowCategory, + ClapWorkflowEngine, + ClapWorkflowProvider, +} from '@aitube/clap' +import { WorkflowEditorStore } from '@aitube/clapper-services' + +export function parseWorkflow( + input: string, + category: ClapWorkflowCategory +): ClapWorkflow { + const noWorkflow: ClapWorkflow = { + id: `empty://${category}`, + label: 'No workflow', + description: '', + tags: [], + author: '', + thumbnailUrl: '', + nonCommercial: false, + canSupportLora: false, + engine: ClapWorkflowEngine.DEFAULT, + category, + provider: ClapWorkflowProvider.NONE, + data: '', + schema: '', + inputFields: [], + inputValues: {}, + } + + // console.log("parseWorkflow:", { input }) + + try { + const maybeWorkflow = + typeof input === 'string' + ? (JSON.parse(input) as ClapWorkflow) + : (input as ClapWorkflow) // fallback in case some users had a bad version which didn't serialize to JSON + // console.log("maybeWorkflow:", { maybeWorkflow }) + const looksValid = + typeof maybeWorkflow?.id === 'string' && + typeof maybeWorkflow?.label === 'string' && + typeof maybeWorkflow?.description === 'string' && + typeof maybeWorkflow?.author === 'string' && + typeof maybeWorkflow?.thumbnailUrl === 'string' && + typeof maybeWorkflow?.data === 'string' && + Array.isArray(maybeWorkflow?.inputFields) && + typeof maybeWorkflow?.inputValues === 'object' + if (!looksValid) { + throw new Error(`the workflow data seems invalid`) + } + return maybeWorkflow + } catch (err) { + // console.log("error:", err) + // MIGRATION OF OLDER SETTINGS + // in case the user has an old version of the settings, the "workflow" + // will be a simple ID. So we try to recover that + const results: WorkflowSearchResults = findWorkflows( + useWorkflowEditor.getState().availableWorkflows, + { workflowId: input } + ) + + if (results.workflow) { + return results.workflow + } + + // for now let's assume we ave two cases: + // 1. the user has an old version of the settings, and we need to migrate it + // 2. the user has an empty + return noWorkflow + } +} diff --git a/packages/app/tsconfig.json b/packages/app/tsconfig.json index c2cc2f8731a7e72aac9f70b92e382f65defa826c..737ee26f47d9fef904e494326578509bfa07b91e 100644 --- a/packages/app/tsconfig.json +++ b/packages/app/tsconfig.json @@ -21,8 +21,18 @@ "paths": { "@/*": ["./src/*"] }, - "types": ["@webgpu/types", "@types/dom-speech-recognition"] + "types": [ + "@types/fluent-ffmpeg", + "@webgpu/types", + "@types/dom-speech-recognition" + ] }, - "include": ["next-env.d.ts", "**/*.ts", "**/*.tsx", ".next/types/**/*.ts", "vitest.config.mts"], + "include": [ + "next-env.d.ts", + "**/*.ts", + "**/*.tsx", + ".next/types/**/*.ts", + "vitest.config.mts" + ], "exclude": ["node_modules"] } diff --git a/packages/clap/src/index.ts b/packages/clap/src/index.ts index 46e8d8b198511c3f7b3053fcb4174a9397db45c3..0bfe226ba01b5827bdcdc2364b75dcf14f45ce39 100644 --- a/packages/clap/src/index.ts +++ b/packages/clap/src/index.ts @@ -24,6 +24,7 @@ export { ClapTracks, ClapVoice, ClapCompletionMode, + ClapInputCategory, ClapInputField, ClapInputFieldNumber, ClapInputFieldInteger, diff --git a/packages/clap/src/types.ts b/packages/clap/src/types.ts index 2ae2bbb663a461c56eb3fd7f6a183dba751428a4..238b9392a3a130d38d7b65cd0eca192efab14ce5 100644 --- a/packages/clap/src/types.ts +++ b/packages/clap/src/types.ts @@ -434,6 +434,28 @@ export type ClapSegment = { seed: number } +// to help automatically map Clapper fields to any kind of workflows, +// we are going to use those enums +// that will help determine the semantic meaning of each field +export enum ClapInputCategory { + PROMPT = "PROMPT", + IMAGE_URL = "IMAGE_URL", + SOUND_URL = "SOUND_URL", + VIDEO_URL = "VIDEO_URL", + WIDTH = "WIDTH", + HEIGHT = "HEIGHT", + SEED = "SEED", + LORA = "LORA", + //LORA_HF_MODEL = "LORA_HF_MODEL", + //LORA_WEIGHT_URL = "LORA_WEIGHT_URL", + //MISC_HF_MODEL = "MISC_HF_MODEL", + // MISC_WEIGHT_URL = "MISC_WEIGHT_URL", + ITERATION_STEPS = "ITERATION_STEPS", + GUIDANCE_SCALE = "GUIDANCE_SCALE", + UPSCALING_FACTOR = "UPSCALING_FACTOR", + UNKNOWN = "UNKNOWN", +} + export type ClapInputFieldNumber = { type: 'number' minValue: number @@ -479,6 +501,15 @@ export type ClapInputField = { * Description of what the input field does */ description: string + + /** + * The category of the input + * This helps us associate an arbitrary parameter name + * to one of the category recognized by clapper + * + * eg "hf_model", "hf_model_repo" -> ClapInputCategory.LORA + */ + category: ClapInputCategory } & ( | ClapInputFieldNumber | ClapInputFieldInteger @@ -629,7 +660,7 @@ export type ClapWorkflow = { category: ClapWorkflowCategory provider: ClapWorkflowProvider - + /** * The workflow data itself * diff --git a/packages/clapper-services/src/settings.ts b/packages/clapper-services/src/settings.ts index 1a4553c16cb9e84fa9d054fb64141172ba9eead0..6332260ac1eed3acc72f311b4bdd3a9117915cbc 100644 --- a/packages/clapper-services/src/settings.ts +++ b/packages/clapper-services/src/settings.ts @@ -155,20 +155,20 @@ export type SettingsControls = { setVideoPromptSuffix: (videoPromptSuffix?: string) => void setVideoNegativePrompt: (videoNegativePrompt?: string) => void - setAssistantWorkflow: (assistantWorkflow?: string) => void - setAssistantTurboWorkflow: (assistantTurboWorkflow?: string) => void - setImageGenerationWorkflow: (imageGenerationWorkflow?: string) => void - setImageGenerationTurboWorkflow: (imageGenerationTurboWorkflow?: string) => void - setImageUpscalingWorkflow: (imageUpscalingWorkflow?: string) => void - setImageDepthWorkflow: (imageDepthWorkflow?: string) => void - setImageSegmentationWorkflow: (imageSegmentationWorkflow?: string) => void - setVideoGenerationWorkflow: (videoGenerationWorkflow?: string) => void - setVideoDepthWorkflow: (videoDepthWorkflow?: string) => void - setVideoSegmentationWorkflow: (videoSegmentationWorkflow?: string) => void - setVideoUpscalingWorkflow: (videoUpscalingWorkflow?: string) => void - setSoundGenerationWorkflow: (soundGenerationWorkflow?: string) => void - setVoiceGenerationWorkflow: (voiceGenerationWorkflow?: string) => void - setMusicGenerationWorkflow: (musicGenerationWorkflow?: string) => void + setAssistantWorkflow: (assistantWorkflow?: ClapWorkflow) => void + setAssistantTurboWorkflow: (assistantTurboWorkflow?: ClapWorkflow) => void + setImageGenerationWorkflow: (imageGenerationWorkflow?: ClapWorkflow) => void + setImageGenerationTurboWorkflow: (imageGenerationTurboWorkflow?: ClapWorkflow) => void + setImageUpscalingWorkflow: (imageUpscalingWorkflow?: ClapWorkflow) => void + setImageDepthWorkflow: (imageDepthWorkflow?: ClapWorkflow) => void + setImageSegmentationWorkflow: (imageSegmentationWorkflow?: ClapWorkflow) => void + setVideoGenerationWorkflow: (videoGenerationWorkflow?: ClapWorkflow) => void + setVideoDepthWorkflow: (videoDepthWorkflow?: ClapWorkflow) => void + setVideoSegmentationWorkflow: (videoSegmentationWorkflow?: ClapWorkflow) => void + setVideoUpscalingWorkflow: (videoUpscalingWorkflow?: ClapWorkflow) => void + setSoundGenerationWorkflow: (soundGenerationWorkflow?: ClapWorkflow) => void + setVoiceGenerationWorkflow: (voiceGenerationWorkflow?: ClapWorkflow) => void + setMusicGenerationWorkflow: (musicGenerationWorkflow?: ClapWorkflow) => void setImageRenderingStrategy: (imageRenderingStrategy?: RenderingStrategy) => void setImageUpscalingRenderingStrategy: (imageUpscalingRenderingStrategy?: RenderingStrategy) => void