Spaces:
Sleeping
Sleeping
ahmedfaiyaz
commited on
Commit
•
396d9d8
1
Parent(s):
5629976
added zero gpu support
Browse files
app.py
CHANGED
@@ -2,6 +2,7 @@ from diffusers import DiffusionPipeline
|
|
2 |
from typing import List, Optional, Tuple, Union
|
3 |
import torch
|
4 |
import gradio as gr
|
|
|
5 |
css="""
|
6 |
#input-panel{
|
7 |
align-items:center;
|
@@ -443,13 +444,13 @@ character_mappings_model_wise={
|
|
443 |
}
|
444 |
|
445 |
|
446 |
-
|
447 |
def generate(modelname:str,input_text:str,batch_size:int,inference_steps:int):
|
448 |
batch_size=int(batch_size)
|
449 |
inference_steps=int(inference_steps)
|
450 |
print(f"Generating image with label:{character_mappings_model_wise[current_model][input_text]} batch size:{batch_size}")
|
451 |
label=int(character_mappings_model_wise[current_model][input_text])
|
452 |
-
pipeline.embedding=torch.tensor([label],device="
|
453 |
generate_image=pipeline(batch_size=batch_size,num_inference_steps=inference_steps).images
|
454 |
return generate_image
|
455 |
|
@@ -457,6 +458,8 @@ def generate(modelname:str,input_text:str,batch_size:int,inference_steps:int):
|
|
457 |
def switch_pipeline(modelname:str):
|
458 |
global pipeline
|
459 |
pipeline = DiffusionPipeline.from_pretrained(modelname,custom_pipeline="ahmedfaiyaz/OkkhorDiffusion",embedding=torch.int16)
|
|
|
|
|
460 |
global current_model
|
461 |
current_model=modelname
|
462 |
return f"""
|
|
|
2 |
from typing import List, Optional, Tuple, Union
|
3 |
import torch
|
4 |
import gradio as gr
|
5 |
+
import spaces
|
6 |
css="""
|
7 |
#input-panel{
|
8 |
align-items:center;
|
|
|
444 |
}
|
445 |
|
446 |
|
447 |
+
@spaces.GPU
|
448 |
def generate(modelname:str,input_text:str,batch_size:int,inference_steps:int):
|
449 |
batch_size=int(batch_size)
|
450 |
inference_steps=int(inference_steps)
|
451 |
print(f"Generating image with label:{character_mappings_model_wise[current_model][input_text]} batch size:{batch_size}")
|
452 |
label=int(character_mappings_model_wise[current_model][input_text])
|
453 |
+
pipeline.embedding=torch.tensor([label],device="cuda") #testing zero gpu
|
454 |
generate_image=pipeline(batch_size=batch_size,num_inference_steps=inference_steps).images
|
455 |
return generate_image
|
456 |
|
|
|
458 |
def switch_pipeline(modelname:str):
|
459 |
global pipeline
|
460 |
pipeline = DiffusionPipeline.from_pretrained(modelname,custom_pipeline="ahmedfaiyaz/OkkhorDiffusion",embedding=torch.int16)
|
461 |
+
pipeline.to('cuda')
|
462 |
+
|
463 |
global current_model
|
464 |
current_model=modelname
|
465 |
return f"""
|