Spaces:
Running
Running
JarvisLabs
commited on
Commit
•
16786bc
1
Parent(s):
ef578ea
Upload 3 files
Browse files- app.py +12 -2
- train_tab.py +7 -4
app.py
CHANGED
@@ -1,10 +1,20 @@
|
|
1 |
import gradio as gr
|
2 |
from gen_tab import create_gen_tab
|
3 |
from train_tab import create_train_tab
|
|
|
|
|
4 |
|
5 |
-
|
|
|
|
|
|
|
|
|
|
|
6 |
with gr.Tabs() as tabs:
|
7 |
create_gen_tab()
|
8 |
create_train_tab()
|
|
|
|
|
|
|
9 |
|
10 |
-
demo.launch(debug=True)
|
|
|
1 |
import gradio as gr
|
2 |
from gen_tab import create_gen_tab
|
3 |
from train_tab import create_train_tab
|
4 |
+
from dotenv import load_dotenv, find_dotenv
|
5 |
+
import os
|
6 |
|
7 |
+
_ = load_dotenv(find_dotenv())
|
8 |
+
with gr.Blocks(theme=gr.themes.Soft(
|
9 |
+
radius_size=gr.themes.sizes.radius_none,
|
10 |
+
primary_hue=gr.themes.colors.emerald, secondary_hue=gr.themes.colors.green
|
11 |
+
|
12 |
+
)) as demo:
|
13 |
with gr.Tabs() as tabs:
|
14 |
create_gen_tab()
|
15 |
create_train_tab()
|
16 |
+
# with gr.TabItem("Theme builder"):
|
17 |
+
# gr.themes.builder()
|
18 |
+
|
19 |
|
20 |
+
demo.launch(share=True,debug=True) #,auth=[("username", "password"),(os.getenv("APP_USER"),os.getenv("APP_PW"))])
|
train_tab.py
CHANGED
@@ -8,7 +8,7 @@ def create_train_tab():
|
|
8 |
gr.Markdown("# Image Importing & Auto captions")
|
9 |
with gr.Row():
|
10 |
input_images = gr.File(file_count="multiple", type="filepath", label="Upload Images")
|
11 |
-
label_model = gr.Dropdown(["blip", "llava-16","img2prompt"], label="Caption model", info="Auto caption model")
|
12 |
token_string= gr.Textbox(label="Token string",value="TOK",interactive=True,
|
13 |
info="A unique string that will be trained to refer to the concept in the input images. Can be anything, but TOK works well.")
|
14 |
context_text = gr.Textbox(label="Context Text", info="Context Text for auto caption",value=" I want a description caption for this image")
|
@@ -39,7 +39,7 @@ def create_train_tab():
|
|
39 |
gr.Markdown("# Training on replicate")
|
40 |
with gr.Row():
|
41 |
traning_model = gr.Dropdown(["flux"], label="Caption model", info="Auto caption model")
|
42 |
-
traning_destination = gr.Textbox(label="destination",info="add in replicate model destination")
|
43 |
seed = gr.Number(label="Seed", value=42,info="Random seed integer for reproducible training. Leave empty to use a random seed.")
|
44 |
max_train_steps =gr.Number(label="max_train_steps", value= 1000, info="Number of individual training steps. Takes precedence over num_train_epochs.")
|
45 |
|
@@ -51,8 +51,11 @@ def create_train_tab():
|
|
51 |
|
52 |
train_button.click(fn=traning_function, inputs=[zip_output,traning_model,traning_destination,seed,token_string,max_train_steps],
|
53 |
outputs=[training_logs,traning_finnal],queue=True)
|
54 |
-
|
55 |
-
|
|
|
|
|
|
|
56 |
|
57 |
# traning_finnal.change(
|
58 |
# fn=update_dropdown,
|
|
|
8 |
gr.Markdown("# Image Importing & Auto captions")
|
9 |
with gr.Row():
|
10 |
input_images = gr.File(file_count="multiple", type="filepath", label="Upload Images")
|
11 |
+
label_model = gr.Dropdown(["None","blip", "llava-16","img2prompt"],value="None", label="Caption model", info="Auto caption model")
|
12 |
token_string= gr.Textbox(label="Token string",value="TOK",interactive=True,
|
13 |
info="A unique string that will be trained to refer to the concept in the input images. Can be anything, but TOK works well.")
|
14 |
context_text = gr.Textbox(label="Context Text", info="Context Text for auto caption",value=" I want a description caption for this image")
|
|
|
39 |
gr.Markdown("# Training on replicate")
|
40 |
with gr.Row():
|
41 |
traning_model = gr.Dropdown(["flux"], label="Caption model", info="Auto caption model")
|
42 |
+
traning_destination = gr.Textbox(label="destination",info="add in replicate model destination, format [user]/[model_name]")
|
43 |
seed = gr.Number(label="Seed", value=42,info="Random seed integer for reproducible training. Leave empty to use a random seed.")
|
44 |
max_train_steps =gr.Number(label="max_train_steps", value= 1000, info="Number of individual training steps. Takes precedence over num_train_epochs.")
|
45 |
|
|
|
51 |
|
52 |
train_button.click(fn=traning_function, inputs=[zip_output,traning_model,traning_destination,seed,token_string,max_train_steps],
|
53 |
outputs=[training_logs,traning_finnal],queue=True)
|
54 |
+
|
55 |
+
process_button.click(fn=process_images, inputs=[input_images,label_model,context_text,token_string],
|
56 |
+
outputs=[image_output,text_output,zip_output],queue=True)
|
57 |
+
|
58 |
+
btn_update_zip.click(fn=create_zip, inputs=[input_images,text_output,token_string],outputs=zip_output)
|
59 |
|
60 |
# traning_finnal.change(
|
61 |
# fn=update_dropdown,
|