Spaces:
Sleeping
Sleeping
JarvisLabs
commited on
Commit
•
d6f10f4
1
Parent(s):
353926b
Upload 3 files
Browse files- app.py +144 -237
- model_dict.json +7 -0
- requirments.txt +6 -0
app.py
CHANGED
@@ -1,238 +1,145 @@
|
|
1 |
-
|
2 |
-
from
|
3 |
-
|
4 |
-
import
|
5 |
-
import
|
6 |
-
import
|
7 |
-
|
8 |
-
import
|
9 |
-
import
|
10 |
-
import
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
|
86 |
-
|
87 |
-
|
88 |
-
|
89 |
-
|
90 |
-
|
91 |
-
|
92 |
-
|
93 |
-
|
94 |
-
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
-
|
99 |
-
|
100 |
-
|
101 |
-
|
102 |
-
|
103 |
-
|
104 |
-
|
105 |
-
|
106 |
-
|
107 |
-
|
108 |
-
|
109 |
-
|
110 |
-
|
111 |
-
|
112 |
-
|
113 |
-
|
114 |
-
|
115 |
-
|
116 |
-
|
117 |
-
|
118 |
-
|
119 |
-
|
120 |
-
|
121 |
-
|
122 |
-
|
123 |
-
|
124 |
-
|
125 |
-
|
126 |
-
|
127 |
-
|
128 |
-
|
129 |
-
|
130 |
-
|
131 |
-
|
132 |
-
|
133 |
-
|
134 |
-
|
135 |
-
|
136 |
-
|
137 |
-
|
138 |
-
|
139 |
-
|
140 |
-
|
141 |
-
|
142 |
-
|
143 |
-
|
144 |
-
|
145 |
-
"batch_size": 1,
|
146 |
-
"autocaption": True,
|
147 |
-
"trigger_word": token_string,
|
148 |
-
"learning_rate": 0.0004,
|
149 |
-
"seed": seed,
|
150 |
-
"input_images": zip_path
|
151 |
-
},
|
152 |
-
)
|
153 |
-
|
154 |
-
training_logs = f"Training started with model: {training_model}\n"
|
155 |
-
training_logs += f"Destination: {training_destination}\n"
|
156 |
-
training_logs += f"Seed: {seed}\n"
|
157 |
-
training_logs += f"Token string: {token_string}\n"
|
158 |
-
training_logs += f"Max train steps: {max_train_steps}\n"
|
159 |
-
|
160 |
-
# Poll the training status
|
161 |
-
while training.status != "succeeded":
|
162 |
-
training.reload()
|
163 |
-
training_logs += f"Training status: {training.status}\n"
|
164 |
-
training_logs += f"{training.logs}\n"
|
165 |
-
yield training_logs, None
|
166 |
-
time.sleep(10) # Wait for 10 seconds before checking again
|
167 |
-
|
168 |
-
training_logs += "Training completed!\n"
|
169 |
-
if hf_repo_id and hf_token:
|
170 |
-
training_logs += f"Uploading to Hugging Face repo: {hf_repo_id}\n"
|
171 |
-
# Here you would implement the logic to upload to Hugging Face
|
172 |
-
|
173 |
-
# In a real scenario, you might want to download and display some result images
|
174 |
-
# For now, we'll just return the original images
|
175 |
-
images = [Image.open(file) for file in files]
|
176 |
-
|
177 |
-
yield training_logs, images
|
178 |
-
|
179 |
-
except Exception as e:
|
180 |
-
yield f"An error occurred: {str(e)}", None
|
181 |
-
|
182 |
-
with gr.Blocks() as demo:
|
183 |
-
gr.Markdown("# Image Captioning")
|
184 |
-
with gr.Row():
|
185 |
-
input_images = gr.File(file_count="multiple", type="filepath", label="Upload Images")
|
186 |
-
label_model = gr.Dropdown(["blip", "llava-16","img2prompt"], label="Caption model", info="Auto caption model")
|
187 |
-
context_text = gr.Textbox(label="Context Text", info="Context Text for auto catpion",value=" I want a description caption for this image")
|
188 |
-
# Replicate API Key input
|
189 |
-
replicate_api_key = gr.Textbox(
|
190 |
-
label="Replicate API Key",
|
191 |
-
info="API key for Replicate",
|
192 |
-
type="password"
|
193 |
-
)
|
194 |
-
api_key_status = gr.Textbox(label="API Key Status", interactive=False)
|
195 |
-
|
196 |
-
with gr.Row():
|
197 |
-
process_button = gr.Button("Process Images")
|
198 |
-
#Image outputs
|
199 |
-
with gr.Row():
|
200 |
-
gr.Markdown("# Captions")
|
201 |
-
with gr.Row():
|
202 |
-
with gr.Column():
|
203 |
-
image_output = gr.Gallery(type="pil",object_fit="fill")
|
204 |
-
with gr.Column():
|
205 |
-
text_output = gr.Textbox( interactive=True)
|
206 |
-
#Traning options
|
207 |
-
with gr.Row():
|
208 |
-
gr.Markdown("# Training on replicate")
|
209 |
-
with gr.Row():
|
210 |
-
traning_model = gr.Dropdown(["flux", "SDXL",""], label="Caption model", info="Auto caption model")
|
211 |
-
traning_destination = gr.Textbox(label="destination",info="add in replicate model destination")
|
212 |
-
seed = gr.Number(label="Seed", value=42,info="Random seed integer for reproducible training. Leave empty to use a random seed.")
|
213 |
-
token_stringn = "TOK"# gr.Textbox(label="Token string",value="TOK",info="A unique string that will be trained to refer to the concept in the input images. Can be anything, but TOK works well.")
|
214 |
-
max_train_steps =gr.Number(label="max_train_steps", value= 1000, info="Number of individual training steps. Takes precedence over num_train_epochs.")
|
215 |
-
with gr.Row():
|
216 |
-
hf_repo_id = gr.Textbox(label="Hugging face repo id",info="Hugging Face repository ID, if you'd like to upload the trained LoRA to Hugging Face. For example, lucataco/flux-dev-lora.")
|
217 |
-
hf_token = gr.Textbox(label="Hugging face write token",info="Hugging Face token, if you'd like to upload the trained LoRA to Hugging Face.")
|
218 |
-
with gr.Row():
|
219 |
-
train_button = gr.Button("Train")
|
220 |
-
with gr.Row():
|
221 |
-
training_logs = gr.Textbox(label="Training logs")
|
222 |
-
training_images = gr.Gallery(label="Training images")
|
223 |
-
|
224 |
-
|
225 |
-
|
226 |
-
train_button.click(fn=traning_function, inputs=[input_images,text_output,traning_model,traning_destination,seed,token_stringn,max_train_steps,hf_repo_id,hf_token],
|
227 |
-
outputs=[image_output,text_output])
|
228 |
-
|
229 |
-
process_button.click(fn=process_images, inputs=[input_images,label_model,context_text], outputs=[image_output,text_output])
|
230 |
-
# Add event listener for API key changes
|
231 |
-
replicate_api_key.change(
|
232 |
-
fn=update_replicate_api_key,
|
233 |
-
inputs=[replicate_api_key],
|
234 |
-
outputs=[api_key_status]
|
235 |
-
)
|
236 |
-
|
237 |
-
|
238 |
demo.launch(debug=True)
|
|
|
1 |
+
|
2 |
+
from dotenv import load_dotenv, find_dotenv
|
3 |
+
_ = load_dotenv(find_dotenv())
|
4 |
+
from src.utils import create_zip,add_to_prompt,update_dropdown
|
5 |
+
from src.rep_api import replicate_caption_api,generate_image_replicate,traning_function,update_replicate_api_key
|
6 |
+
import gradio as gr
|
7 |
+
from PIL import Image
|
8 |
+
import os
|
9 |
+
import time
|
10 |
+
import json
|
11 |
+
|
12 |
+
|
13 |
+
# The dictionary data
|
14 |
+
prompt_dict = {
|
15 |
+
"Character": ["Asian girl with black hair", "A man with blond hair", "A Cat girl anime character with purple hair", "A Green Alien with big black eyes"],
|
16 |
+
"Clothes": ["Wearing a blue jacket", "Wearing a black business suit", "Wearing a purple jumpsuit", "Wearing shorts and a white T-shirt"],
|
17 |
+
"Pose": ["Close up portrait", "Standing doing a peace sign", "Folding arms", "holding a phone"],
|
18 |
+
"Style": ["Simple white background", "Fashion runway", "Inside a business conference", "Inside a spaceship"]
|
19 |
+
}
|
20 |
+
style_json="model_dict.json"
|
21 |
+
model_dict=json.load(open(style_json,"r"))
|
22 |
+
|
23 |
+
def process_images(files,model,context_text):
|
24 |
+
images = []
|
25 |
+
textbox =""
|
26 |
+
for file in files:
|
27 |
+
print(file)
|
28 |
+
image = Image.open(file)
|
29 |
+
caption = replicate_caption_api(image,model,context_text)
|
30 |
+
textbox += f"Tags: {caption}, file: " + os.path.basename(file) + "\n"
|
31 |
+
images.append(image)
|
32 |
+
#texts.append(textbox)
|
33 |
+
zip_path=create_zip(files,textbox,"TOK")
|
34 |
+
|
35 |
+
return images, textbox,zip_path
|
36 |
+
|
37 |
+
|
38 |
+
|
39 |
+
|
40 |
+
with gr.Blocks( theme="NoCrypt/miku") as demo:
|
41 |
+
|
42 |
+
with gr.Tabs() as tabs:
|
43 |
+
with gr.TabItem("Image Generator"):
|
44 |
+
gr.Markdown(" #Image Generator")
|
45 |
+
with gr.Row():
|
46 |
+
with gr.Column():
|
47 |
+
inp = gr.Textbox(label="Prompt")
|
48 |
+
|
49 |
+
|
50 |
+
btn = gr.Button("Generate")
|
51 |
+
with gr.Column():
|
52 |
+
ar = gr.Dropdown(["1:1","16:9","9:16","5:3"], label="Aspect Ratio", info="Aspect Ratio")
|
53 |
+
style_mode = gr.Dropdown(model_dict.keys(),label="Style lore")
|
54 |
+
api_path = gr.Textbox(label="API_route",info="replicate api route goes here")
|
55 |
+
|
56 |
+
|
57 |
+
|
58 |
+
with gr.Accordion("Prompt Support", open=False):
|
59 |
+
for key, values in prompt_dict.items():
|
60 |
+
with gr.Row():
|
61 |
+
#gr.Markdown(f"**{key}**")
|
62 |
+
gr.Button(key,interactive=False)
|
63 |
+
for value in values:
|
64 |
+
gr.Button(value).click(add_to_prompt, inputs=[inp, gr.Textbox(value,visible=False)], outputs=inp)
|
65 |
+
|
66 |
+
with gr.Row():
|
67 |
+
gen_out = gr.Image(label="Generated Image",type="filepath")
|
68 |
+
|
69 |
+
|
70 |
+
btn.click(generate_image_replicate, inputs=[inp,api_path], outputs=gen_out,queue=True)
|
71 |
+
|
72 |
+
|
73 |
+
|
74 |
+
with gr.TabItem("Model Trainner"):
|
75 |
+
gr.Markdown("# Image Importing & Auto captions")
|
76 |
+
with gr.Row():
|
77 |
+
input_images = gr.File(file_count="multiple", type="filepath", label="Upload Images")
|
78 |
+
label_model = gr.Dropdown(["blip", "llava-16","img2prompt"], label="Caption model", info="Auto caption model")
|
79 |
+
token_string= gr.Textbox(label="Token string",value="TOK",interactive=True,
|
80 |
+
info="A unique string that will be trained to refer to the concept in the input images. Can be anything, but TOK works well.")
|
81 |
+
context_text = gr.Textbox(label="Context Text", info="Context Text for auto catpion",value=" I want a description caption for this image")
|
82 |
+
# Replicate API Key input
|
83 |
+
replicate_api_key = gr.Textbox(
|
84 |
+
label="Replicate API Key",
|
85 |
+
info="API key for Replicate",
|
86 |
+
value=os.environ.get("REPLICATE_API_TOKEN", ""),
|
87 |
+
type="password"
|
88 |
+
)
|
89 |
+
api_key_status = gr.Textbox(label="API Key Status", interactive=False)
|
90 |
+
|
91 |
+
with gr.Row():
|
92 |
+
process_button = gr.Button("Process Images")
|
93 |
+
#Image outputs
|
94 |
+
with gr.Row():
|
95 |
+
gr.Markdown("# Traning Captions Data")
|
96 |
+
with gr.Row():
|
97 |
+
with gr.Column():
|
98 |
+
image_output = gr.Gallery(type="pil",object_fit="fill")
|
99 |
+
with gr.Column():
|
100 |
+
text_output = gr.Textbox( interactive=True)
|
101 |
+
with gr.Row():
|
102 |
+
zip_output = gr.File(label="Zip file")
|
103 |
+
btn_update_zip = gr.Button("Update zip file")
|
104 |
+
|
105 |
+
|
106 |
+
|
107 |
+
#Traning options
|
108 |
+
with gr.Row():
|
109 |
+
gr.Markdown("# Training on replicate")
|
110 |
+
with gr.Row():
|
111 |
+
traning_model = gr.Dropdown(["flux"], label="Caption model", info="Auto caption model")
|
112 |
+
traning_destination = gr.Textbox(label="destination",info="add in replicate model destination")
|
113 |
+
seed = gr.Number(label="Seed", value=42,info="Random seed integer for reproducible training. Leave empty to use a random seed.")
|
114 |
+
|
115 |
+
max_train_steps =gr.Number(label="max_train_steps", value= 1000, info="Number of individual training steps. Takes precedence over num_train_epochs.")
|
116 |
+
#with gr.Row():
|
117 |
+
# hf_repo_id = gr.Textbox(label="Hugging face repo id",info="Hugging Face repository ID, if you'd like to upload the trained LoRA to Hugging Face. For example, lucataco/flux-dev-lora.")
|
118 |
+
# hf_token = gr.Textbox(label="Hugging face write token",info="Hugging Face token, if you'd like to upload the trained LoRA to Hugging Face.")
|
119 |
+
with gr.Row():
|
120 |
+
train_button = gr.Button("Train")
|
121 |
+
with gr.Row():
|
122 |
+
training_logs = gr.Textbox(label="Training logs")
|
123 |
+
traning_finnal = gr.Textbox(label="Traning finnal")
|
124 |
+
#training_images = gr.Gallery(label="Training images")
|
125 |
+
|
126 |
+
|
127 |
+
#gr.Textbox("TOK",visible=False) added to deal with odd ies of the token string being a gradio class
|
128 |
+
train_button.click(fn=traning_function, inputs=[zip_output,traning_model,traning_destination,seed,token_string,max_train_steps], #,hf_repo_id,hf_token
|
129 |
+
outputs=[training_logs,traning_finnal],queue=True)
|
130 |
+
process_button.click(fn=process_images, inputs=[input_images,label_model,context_text,token_string], outputs=[image_output,text_output,zip_output],queue=True)
|
131 |
+
btn_update_zip.click(fn=create_zip, inputs=[image_output,text_output,token_string],outputs=zip_output)
|
132 |
+
# Add event listener for API key changes
|
133 |
+
traning_finnal.change(
|
134 |
+
fn=update_dropdown,
|
135 |
+
inputs=[traning_finnal,token_string],
|
136 |
+
outputs=style_mode
|
137 |
+
)
|
138 |
+
replicate_api_key.change(
|
139 |
+
fn=update_replicate_api_key,
|
140 |
+
inputs=[replicate_api_key],
|
141 |
+
outputs=[api_key_status]
|
142 |
+
)
|
143 |
+
#jarvis-labs2024/sioux-flux
|
144 |
+
demo.queue() # Queue for concurrent users
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
145 |
demo.launch(debug=True)
|
model_dict.json
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"Base": "black-forest-labs/flux-dev",
|
3 |
+
"Raylean": "jarvis-labs2024/flux-raylene:5574556226d11e0f10855a957d91f118a9178c8fc77e7e7b18830627ce3184f1",
|
4 |
+
"Alice": "jarvis-labs2024/flux-raylene:5574556226d11e0f10855a957d91f118a9178c8fc77e7e7b18830627ce3184f1",
|
5 |
+
"AppleSeed": "jarvis-labs2024/flux-appleseed:0aecb9fdfb17a2517112cc70b4a1898aa7791da84a010419782ce7043481edec",
|
6 |
+
"console_cowboy_flux": "jarvis-labs2024/console_cowboy_flux:53ff894d719f73dc11ca54fdb6ecf044d7d202aa30fce43236fbfda30b19ef62"
|
7 |
+
}
|
requirments.txt
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
gradio
|
2 |
+
fal
|
3 |
+
fal-client
|
4 |
+
numpy
|
5 |
+
replicate
|
6 |
+
python-dotenv
|