|
|
|
|
|
from __future__ import annotations |
|
|
|
import gradio as gr |
|
import torch |
|
import re |
|
import openai |
|
from cairosvg import svg2png |
|
|
|
from app_canny import create_demo as create_demo_canny |
|
from app_depth import create_demo as create_demo_depth |
|
from app_ip2p import create_demo as create_demo_ip2p |
|
from app_lineart import create_demo as create_demo_lineart |
|
from app_mlsd import create_demo as create_demo_mlsd |
|
from app_normal import create_demo as create_demo_normal |
|
from app_openpose import create_demo as create_demo_openpose |
|
from app_scribble import create_demo as create_demo_scribble |
|
from app_scribble_interactive import create_demo as create_demo_scribble_interactive |
|
from app_segmentation import create_demo as create_demo_segmentation |
|
from app_shuffle import create_demo as create_demo_shuffle |
|
from app_softedge import create_demo as create_demo_softedge |
|
from model import Model |
|
from settings import ALLOW_CHANGING_BASE_MODEL, DEFAULT_MODEL_ID, SHOW_DUPLICATE_BUTTON |
|
|
|
DESCRIPTION = "# ControlNet v1.1" |
|
|
|
if not torch.cuda.is_available(): |
|
DESCRIPTION += "\n<p>Running on CPU ๐ฅถ This demo does not work on CPU.</p>" |
|
|
|
model = Model(base_model_id=DEFAULT_MODEL_ID, task_name="Canny") |
|
|
|
def gpt_control(apikey, prompt): |
|
|
|
openai.api_key = apikey |
|
|
|
|
|
messages = [{"role": "system", "content": "You are an SVG expert with years of experience and multiple contributions to the SVG project. Based on the prompt and the description, please generate the corresponding SVG code."}, |
|
{"role": "user", "content": f"""Provide only the shell command without any explanations. |
|
The current objective is below. Reply with the SVG code only: |
|
OBJECTIVE: {prompt} |
|
YOUR SVG CODE: |
|
"""}] |
|
|
|
completion = openai.ChatCompletion.create( |
|
model = "gpt-4", |
|
messages = messages |
|
) |
|
|
|
chat_response = completion.choices[0].message.content |
|
|
|
code = re.findall('<.*>', chat_response) |
|
code_new = '\n'.join(code) |
|
|
|
svg_code = f""" |
|
{code_new} |
|
""" |
|
svg2png(bytestring=svg_code,write_to='output.png') |
|
|
|
return 'output.png' |
|
|
|
|
|
with gr.Blocks(css="style.css") as demo: |
|
gr.HTML("<center>" |
|
"<h1>๐๐ช๐ - ControlNet with GPT-4</h1>" |
|
"</center>") |
|
|
|
gr.Markdown("## <center>๐ Born to Create: Controllable Text-to-Image Generation with GPT-4</center>") |
|
|
|
gr.DuplicateButton( |
|
value="Duplicate Space for private use", |
|
elem_id="duplicate-button", |
|
visible=SHOW_DUPLICATE_BUTTON, |
|
) |
|
|
|
with gr.Tab("GPT-4 Control"): |
|
with gr.Row(): |
|
with gr.Column(): |
|
inp1 = gr.Textbox(label="OpenAI API Key", type="password") |
|
inp2 = gr.Textbox(label="Position Prompt (as simple as possible)") |
|
btn1 = gr.Button("GPT-4 Control", variant="primary") |
|
with gr.Column(): |
|
out1 = gr.Image(label="Output Image", type="pil", interactive=True) |
|
|
|
btn1.click(gpt_control, [inp1, inp2], [out1]) |
|
|
|
|
|
with gr.Tabs(): |
|
with gr.TabItem("Canny"): |
|
create_demo_canny(model.process_canny) |
|
with gr.TabItem("MLSD"): |
|
create_demo_mlsd(model.process_mlsd) |
|
with gr.TabItem("Scribble"): |
|
create_demo_scribble(model.process_scribble) |
|
with gr.TabItem("Scribble Interactive"): |
|
create_demo_scribble_interactive(model.process_scribble_interactive) |
|
with gr.TabItem("SoftEdge"): |
|
create_demo_softedge(model.process_softedge) |
|
with gr.TabItem("OpenPose"): |
|
create_demo_openpose(model.process_openpose) |
|
with gr.TabItem("Segmentation"): |
|
create_demo_segmentation(model.process_segmentation) |
|
with gr.TabItem("Depth"): |
|
create_demo_depth(model.process_depth) |
|
with gr.TabItem("Normal map"): |
|
create_demo_normal(model.process_normal) |
|
with gr.TabItem("Lineart"): |
|
create_demo_lineart(model.process_lineart) |
|
with gr.TabItem("Content Shuffle"): |
|
create_demo_shuffle(model.process_shuffle) |
|
with gr.TabItem("Instruct Pix2Pix"): |
|
create_demo_ip2p(model.process_ip2p) |
|
|
|
with gr.Accordion(label="Base model", open=False): |
|
with gr.Row(): |
|
with gr.Column(scale=5): |
|
current_base_model = gr.Text(label="Current base model") |
|
with gr.Column(scale=1): |
|
check_base_model_button = gr.Button("Check current base model") |
|
with gr.Row(): |
|
with gr.Column(scale=5): |
|
new_base_model_id = gr.Text( |
|
label="New base model", |
|
max_lines=1, |
|
placeholder="runwayml/stable-diffusion-v1-5", |
|
info="The base model must be compatible with Stable Diffusion v1.5.", |
|
interactive=ALLOW_CHANGING_BASE_MODEL, |
|
) |
|
with gr.Column(scale=1): |
|
change_base_model_button = gr.Button("Change base model", interactive=ALLOW_CHANGING_BASE_MODEL) |
|
if not ALLOW_CHANGING_BASE_MODEL: |
|
gr.Markdown( |
|
"""The base model is not allowed to be changed in this Space so as not to slow down the demo, but it can be changed if you duplicate the Space.""" |
|
) |
|
|
|
check_base_model_button.click( |
|
fn=lambda: model.base_model_id, |
|
outputs=current_base_model, |
|
queue=False, |
|
api_name="check_base_model", |
|
) |
|
gr.on( |
|
triggers=[new_base_model_id.submit, change_base_model_button.click], |
|
fn=model.set_base_model, |
|
inputs=new_base_model_id, |
|
outputs=current_base_model, |
|
api_name=False, |
|
) |
|
|
|
if __name__ == "__main__": |
|
demo.queue(max_size=20).launch() |
|
|