radames commited on
Commit
cd80689
1 Parent(s): 1d3190d

remove img2img app

Browse files
Files changed (2) hide show
  1. app-img2img.py +0 -271
  2. static/img2img.html +0 -383
app-img2img.py DELETED
@@ -1,271 +0,0 @@
1
- import asyncio
2
- import json
3
- import logging
4
- import traceback
5
- from pydantic import BaseModel
6
-
7
- from fastapi import FastAPI, WebSocket, HTTPException, WebSocketDisconnect
8
- from fastapi.middleware.cors import CORSMiddleware
9
- from fastapi.responses import (
10
- StreamingResponse,
11
- JSONResponse,
12
- HTMLResponse,
13
- FileResponse,
14
- )
15
-
16
- from diffusers import AutoPipelineForImage2Image, AutoencoderTiny
17
- from compel import Compel
18
- import torch
19
-
20
- try:
21
- import intel_extension_for_pytorch as ipex
22
- except:
23
- pass
24
- from PIL import Image
25
- import numpy as np
26
- import gradio as gr
27
- import io
28
- import uuid
29
- import os
30
- import time
31
- import psutil
32
-
33
- MAX_QUEUE_SIZE = int(os.environ.get("MAX_QUEUE_SIZE", 0))
34
- TIMEOUT = float(os.environ.get("TIMEOUT", 0))
35
- SAFETY_CHECKER = os.environ.get("SAFETY_CHECKER", None)
36
- TORCH_COMPILE = os.environ.get("TORCH_COMPILE", None)
37
-
38
- WIDTH = 512
39
- HEIGHT = 512
40
- # disable tiny autoencoder for better quality speed tradeoff
41
- USE_TINY_AUTOENCODER = True
42
-
43
- # check if MPS is available OSX only M1/M2/M3 chips
44
- mps_available = hasattr(torch.backends, "mps") and torch.backends.mps.is_available()
45
- xpu_available = hasattr(torch, "xpu") and torch.xpu.is_available()
46
- device = torch.device(
47
- "cuda" if torch.cuda.is_available() else "xpu" if xpu_available else "cpu"
48
- )
49
- torch_device = device
50
-
51
- # change to torch.float16 to save GPU memory
52
- torch_dtype = torch.float32
53
-
54
- print(f"TIMEOUT: {TIMEOUT}")
55
- print(f"SAFETY_CHECKER: {SAFETY_CHECKER}")
56
- print(f"MAX_QUEUE_SIZE: {MAX_QUEUE_SIZE}")
57
- print(f"device: {device}")
58
-
59
- if mps_available:
60
- device = torch.device("mps")
61
- torch_device = "cpu"
62
- torch_dtype = torch.float32
63
-
64
- if SAFETY_CHECKER == "True":
65
- pipe = AutoPipelineForImage2Image.from_pretrained(
66
- "SimianLuo/LCM_Dreamshaper_v7",
67
- )
68
- else:
69
- pipe = AutoPipelineForImage2Image.from_pretrained(
70
- "SimianLuo/LCM_Dreamshaper_v7",
71
- safety_checker=None,
72
- )
73
-
74
- if USE_TINY_AUTOENCODER:
75
- pipe.vae = AutoencoderTiny.from_pretrained(
76
- "madebyollin/taesd", torch_dtype=torch_dtype, use_safetensors=True
77
- )
78
- pipe.set_progress_bar_config(disable=True)
79
- pipe.to(device=torch_device, dtype=torch_dtype).to(device)
80
- pipe.unet.to(memory_format=torch.channels_last)
81
-
82
- if psutil.virtual_memory().total < 64 * 1024**3:
83
- pipe.enable_attention_slicing()
84
-
85
- if TORCH_COMPILE:
86
- pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True)
87
- pipe.vae = torch.compile(pipe.vae, mode="reduce-overhead", fullgraph=True)
88
-
89
- pipe(prompt="warmup", image=[Image.new("RGB", (512, 512))])
90
-
91
- compel_proc = Compel(
92
- tokenizer=pipe.tokenizer,
93
- text_encoder=pipe.text_encoder,
94
- truncate_long_prompts=False,
95
- )
96
- user_queue_map = {}
97
-
98
-
99
- class InputParams(BaseModel):
100
- seed: int = 2159232
101
- prompt: str
102
- guidance_scale: float = 8.0
103
- strength: float = 0.5
104
- steps: int = 4
105
- lcm_steps: int = 50
106
- width: int = WIDTH
107
- height: int = HEIGHT
108
-
109
-
110
- def predict(
111
- input_image: Image.Image, params: InputParams, prompt_embeds: torch.Tensor = None
112
- ):
113
- generator = torch.manual_seed(params.seed)
114
- results = pipe(
115
- prompt_embeds=prompt_embeds,
116
- generator=generator,
117
- image=input_image,
118
- strength=params.strength,
119
- num_inference_steps=params.steps,
120
- guidance_scale=params.guidance_scale,
121
- width=params.width,
122
- height=params.height,
123
- original_inference_steps=params.lcm_steps,
124
- output_type="pil",
125
- )
126
- nsfw_content_detected = (
127
- results.nsfw_content_detected[0]
128
- if "nsfw_content_detected" in results
129
- else False
130
- )
131
- if nsfw_content_detected:
132
- return None
133
- return results.images[0]
134
-
135
-
136
- app = FastAPI()
137
- app.add_middleware(
138
- CORSMiddleware,
139
- allow_origins=["*"],
140
- allow_credentials=True,
141
- allow_methods=["*"],
142
- allow_headers=["*"],
143
- )
144
-
145
-
146
- @app.websocket("/ws")
147
- async def websocket_endpoint(websocket: WebSocket):
148
- await websocket.accept()
149
- if MAX_QUEUE_SIZE > 0 and len(user_queue_map) >= MAX_QUEUE_SIZE:
150
- print("Server is full")
151
- await websocket.send_json({"status": "error", "message": "Server is full"})
152
- await websocket.close()
153
- return
154
-
155
- try:
156
- uid = str(uuid.uuid4())
157
- print(f"New user connected: {uid}")
158
- await websocket.send_json(
159
- {"status": "success", "message": "Connected", "userId": uid}
160
- )
161
- user_queue_map[uid] = {"queue": asyncio.Queue()}
162
- await websocket.send_json(
163
- {"status": "start", "message": "Start Streaming", "userId": uid}
164
- )
165
- await handle_websocket_data(websocket, uid)
166
- except WebSocketDisconnect as e:
167
- logging.error(f"WebSocket Error: {e}, {uid}")
168
- traceback.print_exc()
169
- finally:
170
- print(f"User disconnected: {uid}")
171
- queue_value = user_queue_map.pop(uid, None)
172
- queue = queue_value.get("queue", None)
173
- if queue:
174
- while not queue.empty():
175
- try:
176
- queue.get_nowait()
177
- except asyncio.QueueEmpty:
178
- continue
179
-
180
-
181
- @app.get("/queue_size")
182
- async def get_queue_size():
183
- queue_size = len(user_queue_map)
184
- return JSONResponse({"queue_size": queue_size})
185
-
186
-
187
- @app.get("/stream/{user_id}")
188
- async def stream(user_id: uuid.UUID):
189
- uid = str(user_id)
190
- try:
191
- user_queue = user_queue_map[uid]
192
- queue = user_queue["queue"]
193
-
194
- async def generate():
195
- last_prompt: str = None
196
- prompt_embeds: torch.Tensor = None
197
- while True:
198
- data = await queue.get()
199
- input_image = data["image"]
200
- params = data["params"]
201
- if input_image is None:
202
- continue
203
- # avoid recalculate prompt embeds
204
- if last_prompt != params.prompt:
205
- print("new prompt")
206
- prompt_embeds = compel_proc(params.prompt)
207
- last_prompt = params.prompt
208
-
209
- image = predict(
210
- input_image,
211
- params,
212
- prompt_embeds,
213
- )
214
- if image is None:
215
- continue
216
- frame_data = io.BytesIO()
217
- image.save(frame_data, format="JPEG")
218
- frame_data = frame_data.getvalue()
219
- if frame_data is not None and len(frame_data) > 0:
220
- yield b"--frame\r\nContent-Type: image/jpeg\r\n\r\n" + frame_data + b"\r\n"
221
-
222
- await asyncio.sleep(1.0 / 120.0)
223
-
224
- return StreamingResponse(
225
- generate(), media_type="multipart/x-mixed-replace;boundary=frame"
226
- )
227
- except Exception as e:
228
- logging.error(f"Streaming Error: {e}, {user_queue_map}")
229
- traceback.print_exc()
230
- return HTTPException(status_code=404, detail="User not found")
231
-
232
-
233
- async def handle_websocket_data(websocket: WebSocket, user_id: uuid.UUID):
234
- uid = str(user_id)
235
- user_queue = user_queue_map[uid]
236
- queue = user_queue["queue"]
237
- if not queue:
238
- return HTTPException(status_code=404, detail="User not found")
239
- last_time = time.time()
240
- try:
241
- while True:
242
- data = await websocket.receive_bytes()
243
- params = await websocket.receive_json()
244
- params = InputParams(**params)
245
- pil_image = Image.open(io.BytesIO(data))
246
-
247
- while not queue.empty():
248
- try:
249
- queue.get_nowait()
250
- except asyncio.QueueEmpty:
251
- continue
252
- await queue.put({"image": pil_image, "params": params})
253
- if TIMEOUT > 0 and time.time() - last_time > TIMEOUT:
254
- await websocket.send_json(
255
- {
256
- "status": "timeout",
257
- "message": "Your session has ended",
258
- "userId": uid,
259
- }
260
- )
261
- await websocket.close()
262
- return
263
-
264
- except Exception as e:
265
- logging.error(f"Error: {e}")
266
- traceback.print_exc()
267
-
268
-
269
- @app.get("/", response_class=HTMLResponse)
270
- async def root():
271
- return FileResponse("./static/img2img.html")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
static/img2img.html DELETED
@@ -1,383 +0,0 @@
1
- <!doctype html>
2
- <html>
3
-
4
- <head>
5
- <meta charset="UTF-8">
6
- <title>Real-Time Latent Consistency Model</title>
7
- <meta name="viewport" content="width=device-width, initial-scale=1.0">
8
- <script
9
- src="https://cdnjs.cloudflare.com/ajax/libs/iframe-resizer/4.3.1/iframeResizer.contentWindow.min.js"></script>
10
- <script src="https://cdn.jsdelivr.net/npm/[email protected]/piexif.min.js"></script>
11
- <script src="https://cdn.tailwindcss.com"></script>
12
- <style type="text/tailwindcss">
13
- .button {
14
- @apply bg-gray-700 hover:bg-gray-800 text-white font-normal p-2 rounded disabled:bg-gray-300 dark:disabled:bg-gray-700 disabled:cursor-not-allowed dark:disabled:text-black
15
- }
16
- </style>
17
- <script type="module">
18
- const getValue = (id) => {
19
- const el = document.querySelector(`${id}`)
20
- if (el.type === "checkbox")
21
- return el.checked;
22
- return el.value;
23
- }
24
-
25
- const startBtn = document.querySelector("#start");
26
- const stopBtn = document.querySelector("#stop");
27
- const videoEl = document.querySelector("#webcam");
28
- const imageEl = document.querySelector("#player");
29
- const queueSizeEl = document.querySelector("#queue_size");
30
- const errorEl = document.querySelector("#error");
31
- const snapBtn = document.querySelector("#snap");
32
- const webcamsEl = document.querySelector("#webcams");
33
-
34
- function LCMLive(webcamVideo, liveImage) {
35
- let websocket;
36
-
37
- async function start() {
38
- return new Promise((resolve, reject) => {
39
- const websocketURL = `${window.location.protocol === "https:" ? "wss" : "ws"
40
- }:${window.location.host}/ws`;
41
-
42
- const socket = new WebSocket(websocketURL);
43
- socket.onopen = () => {
44
- console.log("Connected to websocket");
45
- };
46
- socket.onclose = () => {
47
- console.log("Disconnected from websocket");
48
- stop();
49
- resolve({ "status": "disconnected" });
50
- };
51
- socket.onerror = (err) => {
52
- console.error(err);
53
- reject(err);
54
- };
55
- socket.onmessage = (event) => {
56
- const data = JSON.parse(event.data);
57
- switch (data.status) {
58
- case "success":
59
- break;
60
- case "start":
61
- const userId = data.userId;
62
- initVideoStream(userId);
63
- break;
64
- case "timeout":
65
- stop();
66
- resolve({ "status": "timeout" });
67
- case "error":
68
- stop();
69
- reject(data.message);
70
-
71
- }
72
- };
73
- websocket = socket;
74
- })
75
- }
76
- function switchCamera() {
77
- const constraints = {
78
- audio: false,
79
- video: { width: 1024, height: 1024, deviceId: mediaDevices[webcamsEl.value].deviceId }
80
- };
81
- navigator.mediaDevices
82
- .getUserMedia(constraints)
83
- .then((mediaStream) => {
84
- webcamVideo.removeEventListener("timeupdate", videoTimeUpdateHandler);
85
- webcamVideo.srcObject = mediaStream;
86
- webcamVideo.onloadedmetadata = () => {
87
- webcamVideo.play();
88
- webcamVideo.addEventListener("timeupdate", videoTimeUpdateHandler);
89
- };
90
- })
91
- .catch((err) => {
92
- console.error(`${err.name}: ${err.message}`);
93
- });
94
- }
95
-
96
- async function videoTimeUpdateHandler() {
97
- const dimension = getValue("input[name=dimension]:checked");
98
- const [WIDTH, HEIGHT] = JSON.parse(dimension);
99
-
100
- const canvas = new OffscreenCanvas(WIDTH, HEIGHT);
101
- const videoW = webcamVideo.videoWidth;
102
- const videoH = webcamVideo.videoHeight;
103
- const aspectRatio = WIDTH / HEIGHT;
104
-
105
- const ctx = canvas.getContext("2d");
106
- ctx.drawImage(webcamVideo, videoW / 2 - videoH * aspectRatio / 2, 0, videoH * aspectRatio, videoH, 0, 0, WIDTH, HEIGHT)
107
- const blob = await canvas.convertToBlob({ type: "image/jpeg", quality: 1 });
108
- websocket.send(blob);
109
- websocket.send(JSON.stringify({
110
- "seed": getValue("#seed"),
111
- "prompt": getValue("#prompt"),
112
- "guidance_scale": getValue("#guidance-scale"),
113
- "strength": getValue("#strength"),
114
- "steps": getValue("#steps"),
115
- "lcm_steps": getValue("#lcm_steps"),
116
- "width": WIDTH,
117
- "height": HEIGHT,
118
- }));
119
- }
120
- let mediaDevices = [];
121
- async function initVideoStream(userId) {
122
- liveImage.src = `/stream/${userId}`;
123
- await navigator.mediaDevices.enumerateDevices()
124
- .then(devices => {
125
- const cameras = devices.filter(device => device.kind === 'videoinput');
126
- mediaDevices = cameras;
127
- webcamsEl.innerHTML = "";
128
- cameras.forEach((camera, index) => {
129
- const option = document.createElement("option");
130
- option.value = index;
131
- option.innerText = camera.label;
132
- webcamsEl.appendChild(option);
133
- option.selected = index === 0;
134
- });
135
- webcamsEl.addEventListener("change", switchCamera);
136
- })
137
- .catch(err => {
138
- console.error(err);
139
- });
140
- const constraints = {
141
- audio: false,
142
- video: { width: 1024, height: 1024, deviceId: mediaDevices[0].deviceId }
143
- };
144
- navigator.mediaDevices
145
- .getUserMedia(constraints)
146
- .then((mediaStream) => {
147
- webcamVideo.srcObject = mediaStream;
148
- webcamVideo.onloadedmetadata = () => {
149
- webcamVideo.play();
150
- webcamVideo.addEventListener("timeupdate", videoTimeUpdateHandler);
151
- };
152
- })
153
- .catch((err) => {
154
- console.error(`${err.name}: ${err.message}`);
155
- });
156
- }
157
-
158
- async function stop() {
159
- websocket.close();
160
- navigator.mediaDevices.getUserMedia({ video: true }).then((mediaStream) => {
161
- mediaStream.getTracks().forEach((track) => track.stop());
162
- });
163
- webcamVideo.removeEventListener("timeupdate", videoTimeUpdateHandler);
164
- webcamsEl.removeEventListener("change", switchCamera);
165
- webcamVideo.srcObject = null;
166
- }
167
- return {
168
- start,
169
- stop
170
- }
171
- }
172
- function toggleMessage(type) {
173
- errorEl.hidden = false;
174
- errorEl.scrollIntoView();
175
- switch (type) {
176
- case "error":
177
- errorEl.innerText = "To many users are using the same GPU, please try again later.";
178
- errorEl.classList.toggle("bg-red-300", "text-red-900");
179
- break;
180
- case "success":
181
- errorEl.innerText = "Your session has ended, please start a new one.";
182
- errorEl.classList.toggle("bg-green-300", "text-green-900");
183
- break;
184
- }
185
- setTimeout(() => {
186
- errorEl.hidden = true;
187
- }, 2000);
188
- }
189
- function snapImage() {
190
- try {
191
- const zeroth = {};
192
- const exif = {};
193
- const gps = {};
194
- zeroth[piexif.ImageIFD.Make] = "LCM Image-to-Image";
195
- zeroth[piexif.ImageIFD.ImageDescription] = `prompt: ${getValue("#prompt")} | seed: ${getValue("#seed")} | guidance_scale: ${getValue("#guidance-scale")} | strength: ${getValue("#strength")} | lcm_steps: ${getValue("#lcm_steps")} | steps: ${getValue("#steps")}`;
196
- zeroth[piexif.ImageIFD.Software] = "https://github.com/radames/Real-Time-Latent-Consistency-Model";
197
-
198
- exif[piexif.ExifIFD.DateTimeOriginal] = new Date().toISOString();
199
-
200
- const exifObj = { "0th": zeroth, "Exif": exif, "GPS": gps };
201
- const exifBytes = piexif.dump(exifObj);
202
-
203
- const canvas = document.createElement("canvas");
204
- canvas.width = imageEl.naturalWidth;
205
- canvas.height = imageEl.naturalHeight;
206
- const ctx = canvas.getContext("2d");
207
- ctx.drawImage(imageEl, 0, 0);
208
- const dataURL = canvas.toDataURL("image/jpeg");
209
- const withExif = piexif.insert(exifBytes, dataURL);
210
-
211
- const a = document.createElement("a");
212
- a.href = withExif;
213
- a.download = `lcm_txt_2_img${Date.now()}.png`;
214
- a.click();
215
- } catch (err) {
216
- console.log(err);
217
- }
218
- }
219
-
220
-
221
- const lcmLive = LCMLive(videoEl, imageEl);
222
- startBtn.addEventListener("click", async () => {
223
- try {
224
- startBtn.disabled = true;
225
- snapBtn.disabled = false;
226
- const res = await lcmLive.start();
227
- startBtn.disabled = false;
228
- if (res.status === "timeout")
229
- toggleMessage("success")
230
- } catch (err) {
231
- console.log(err);
232
- toggleMessage("error")
233
- startBtn.disabled = false;
234
- }
235
- });
236
- stopBtn.addEventListener("click", () => {
237
- lcmLive.stop();
238
- });
239
- window.addEventListener("beforeunload", () => {
240
- lcmLive.stop();
241
- });
242
- snapBtn.addEventListener("click", snapImage);
243
- setInterval(() =>
244
- fetch("/queue_size")
245
- .then((res) => res.json())
246
- .then((data) => {
247
- queueSizeEl.innerText = data.queue_size;
248
- })
249
- .catch((err) => {
250
- console.log(err);
251
- })
252
- , 5000);
253
- </script>
254
- </head>
255
-
256
- <body class="text-black dark:bg-gray-900 dark:text-white">
257
- <div class="fixed right-2 top-2 p-4 font-bold text-sm rounded-lg max-w-xs text-center" id="error">
258
- </div>
259
- <main class="container mx-auto px-4 py-4 max-w-4xl flex flex-col gap-4">
260
- <article class="text-center max-w-xl mx-auto">
261
- <h1 class="text-3xl font-bold">Real-Time Latent Consistency Model</h1>
262
- <h2 class="text-2xl font-bold mb-4">Image to Image</h2>
263
- <p class="text-sm">
264
- This demo showcases
265
- <a href="https://huggingface.co/SimianLuo/LCM_Dreamshaper_v7" target="_blank"
266
- class="text-blue-500 underline hover:no-underline">LCM</a> Image to Image pipeline
267
- using
268
- <a href="https://github.com/huggingface/diffusers/tree/main/examples/community#latent-consistency-pipeline"
269
- target="_blank" class="text-blue-500 underline hover:no-underline">Diffusers</a> with a MJPEG
270
- stream server.
271
- </p>
272
- <p class="text-sm">
273
- There are <span id="queue_size" class="font-bold">0</span> user(s) sharing the same GPU, affecting
274
- real-time performance. Maximum queue size is 4. <a
275
- href="https://huggingface.co/spaces/radames/Real-Time-Latent-Consistency-Model?duplicate=true"
276
- target="_blank" class="text-blue-500 underline hover:no-underline">Duplicate</a> and run it on your
277
- own GPU.
278
- </p>
279
- </article>
280
- <div>
281
- <h2 class="font-medium">Prompt</h2>
282
- <p class="text-sm text-gray-500">
283
- Change the prompt to generate different images, accepts <a
284
- href="https://github.com/damian0815/compel/blob/main/doc/syntax.md" target="_blank"
285
- class="text-blue-500 underline hover:no-underline">Compel</a> syntax.
286
- </p>
287
- <div class="flex text-normal px-1 py-1 border border-gray-700 rounded-md items-center">
288
- <textarea type="text" id="prompt" class="font-light w-full px-3 py-2 mx-1 outline-none dark:text-black"
289
- title="Prompt, this is an example, feel free to modify"
290
- placeholder="Add your prompt here...">Portrait of The Terminator with , glare pose, detailed, intricate, full of colour, cinematic lighting, trending on artstation, 8k, hyperrealistic, focused, extreme details, unreal engine 5, cinematic, masterpiece</textarea>
291
- </div>
292
-
293
- </div>
294
- <div class="">
295
- <details>
296
- <summary class="font-medium cursor-pointer">Advanced Options</summary>
297
- <div class="grid grid-cols-3 sm:grid-cols-6 items-center gap-3 py-3">
298
- <label for="webcams" class="text-sm font-medium">Camera Options: </label>
299
- <select id="webcams" class="text-sm border-2 border-gray-500 rounded-md font-light dark:text-black">
300
- </select>
301
- <div></div>
302
- <label class="text-sm font-medium " for="steps">Inference Steps
303
- </label>
304
- <input type="range" id="steps" name="steps" min="1" max="20" value="4"
305
- oninput="this.nextElementSibling.value = Number(this.value)">
306
- <output class="text-xs w-[50px] text-center font-light px-1 py-1 border border-gray-700 rounded-md">
307
- 4</output>
308
- <!-- -->
309
- <label class="text-sm font-medium" for="lcm_steps">LCM Inference Steps
310
- </label>
311
- <input type="range" id="lcm_steps" name="lcm_steps" min="2" max="60" value="50"
312
- oninput="this.nextElementSibling.value = Number(this.value)">
313
- <output class="text-xs w-[50px] text-center font-light px-1 py-1 border border-gray-700 rounded-md">
314
- 50</output>
315
- <!-- -->
316
- <label class="text-sm font-medium" for="guidance-scale">Guidance Scale
317
- </label>
318
- <input type="range" id="guidance-scale" name="guidance-scale" min="0" max="30" step="0.001"
319
- value="8.0" oninput="this.nextElementSibling.value = Number(this.value).toFixed(2)">
320
- <output class="text-xs w-[50px] text-center font-light px-1 py-1 border border-gray-700 rounded-md">
321
- 8.0</output>
322
- <!-- -->
323
- <label class="text-sm font-medium" for="strength">Strength</label>
324
- <input type="range" id="strength" name="strength" min="0.1" max="1" step="0.001" value="0.50"
325
- oninput="this.nextElementSibling.value = Number(this.value).toFixed(2)">
326
- <output class="text-xs w-[50px] text-center font-light px-1 py-1 border border-gray-700 rounded-md">
327
- 0.5</output>
328
- <!-- -->
329
- <label class="text-sm font-medium" for="seed">Seed</label>
330
- <input type="number" id="seed" name="seed" value="299792458"
331
- class="font-light border border-gray-700 text-right rounded-md p-2 dark:text-black">
332
- <button
333
- onclick="document.querySelector('#seed').value = Math.floor(Math.random() * Number.MAX_SAFE_INTEGER)"
334
- class="button">
335
- Rand
336
- </button>
337
- <!-- -->
338
- <!-- -->
339
- <label class="text-sm font-medium" for="dimension">Image Dimensions</label>
340
- <div class="col-span-2 flex gap-2">
341
- <div class="flex gap-1">
342
- <input type="radio" id="dimension512" name="dimension" value="[512,512]" checked
343
- class="cursor-pointer">
344
- <label for="dimension512" class="text-sm cursor-pointer">512x512</label>
345
- </div>
346
- <div class="flex gap-1">
347
- <input type="radio" id="dimension768" name="dimension" value="[768,768]"
348
- lass="cursor-pointer">
349
- <label for="dimension768" class="text-sm cursor-pointer">768x768</label>
350
- </div>
351
- </div>
352
- <!-- -->
353
- </div>
354
- </details>
355
- </div>
356
- <div class="flex gap-3">
357
- <button id="start" class="button">
358
- Start
359
- </button>
360
- <button id="stop" class="button">
361
- Stop
362
- </button>
363
- <button id="snap" disabled class="button ml-auto">
364
- Snapshot
365
- </button>
366
- </div>
367
- <div class="relative rounded-lg border border-slate-300 overflow-hidden">
368
- <img id="player" class="w-full aspect-square rounded-lg "
369
- src="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAQAAAC1HAwCAAAAC0lEQVR42mNkYAAAAAYAAjCB0C8AAAAASUVORK5CYII=">
370
- <div class="absolute top-0 left-0 w-1/4 aspect-square">
371
- <video id="webcam" class="w-full aspect-square relative z-10 object-cover" playsinline autoplay muted
372
- loop></video>
373
- <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 448 448" width="100"
374
- class="w-full p-4 absolute top-0 opacity-20 z-0">
375
- <path fill="currentColor"
376
- d="M224 256a128 128 0 1 0 0-256 128 128 0 1 0 0 256zm-45.7 48A178.3 178.3 0 0 0 0 482.3 29.7 29.7 0 0 0 29.7 512h388.6a29.7 29.7 0 0 0 29.7-29.7c0-98.5-79.8-178.3-178.3-178.3h-91.4z" />
377
- </svg>
378
- </div>
379
- </div>
380
- </main>
381
- </body>
382
-
383
- </html>