Spaces:
Sleeping
Sleeping
ahmedfaiyaz
commited on
Commit
•
dfed960
1
Parent(s):
396d9d8
reverting :(
Browse files
app.py
CHANGED
@@ -2,7 +2,6 @@ from diffusers import DiffusionPipeline
|
|
2 |
from typing import List, Optional, Tuple, Union
|
3 |
import torch
|
4 |
import gradio as gr
|
5 |
-
import spaces
|
6 |
css="""
|
7 |
#input-panel{
|
8 |
align-items:center;
|
@@ -444,13 +443,12 @@ character_mappings_model_wise={
|
|
444 |
}
|
445 |
|
446 |
|
447 |
-
@spaces.GPU
|
448 |
def generate(modelname:str,input_text:str,batch_size:int,inference_steps:int):
|
449 |
batch_size=int(batch_size)
|
450 |
inference_steps=int(inference_steps)
|
451 |
print(f"Generating image with label:{character_mappings_model_wise[current_model][input_text]} batch size:{batch_size}")
|
452 |
label=int(character_mappings_model_wise[current_model][input_text])
|
453 |
-
pipeline.embedding=torch.tensor([label],device="
|
454 |
generate_image=pipeline(batch_size=batch_size,num_inference_steps=inference_steps).images
|
455 |
return generate_image
|
456 |
|
@@ -458,7 +456,7 @@ def generate(modelname:str,input_text:str,batch_size:int,inference_steps:int):
|
|
458 |
def switch_pipeline(modelname:str):
|
459 |
global pipeline
|
460 |
pipeline = DiffusionPipeline.from_pretrained(modelname,custom_pipeline="ahmedfaiyaz/OkkhorDiffusion",embedding=torch.int16)
|
461 |
-
|
462 |
|
463 |
global current_model
|
464 |
current_model=modelname
|
|
|
2 |
from typing import List, Optional, Tuple, Union
|
3 |
import torch
|
4 |
import gradio as gr
|
|
|
5 |
css="""
|
6 |
#input-panel{
|
7 |
align-items:center;
|
|
|
443 |
}
|
444 |
|
445 |
|
|
|
446 |
def generate(modelname:str,input_text:str,batch_size:int,inference_steps:int):
|
447 |
batch_size=int(batch_size)
|
448 |
inference_steps=int(inference_steps)
|
449 |
print(f"Generating image with label:{character_mappings_model_wise[current_model][input_text]} batch size:{batch_size}")
|
450 |
label=int(character_mappings_model_wise[current_model][input_text])
|
451 |
+
pipeline.embedding=torch.tensor([label],device="cpu") #testing zero gpu
|
452 |
generate_image=pipeline(batch_size=batch_size,num_inference_steps=inference_steps).images
|
453 |
return generate_image
|
454 |
|
|
|
456 |
def switch_pipeline(modelname:str):
|
457 |
global pipeline
|
458 |
pipeline = DiffusionPipeline.from_pretrained(modelname,custom_pipeline="ahmedfaiyaz/OkkhorDiffusion",embedding=torch.int16)
|
459 |
+
|
460 |
|
461 |
global current_model
|
462 |
current_model=modelname
|