twodgirl commited on
Commit
c05f545
1 Parent(s): 32177db

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +8 -5
app.py CHANGED
@@ -74,7 +74,7 @@ def generate_image(prompt,
74
  sft_format=processor.sft_format,
75
  system_prompt='')
76
  text = text + processor.image_start_tag
77
- input_ids = torch.LongTensor(processor.tokenizer.encode(prompt))
78
  output, patches = generate(input_ids,
79
  width // 16 * 16,
80
  height // 16 * 16,
@@ -89,8 +89,8 @@ with gr.Blocks() as demo:
89
  with gr.Row():
90
  with gr.Column():
91
  prompt = gr.Textbox(label='Prompt', value='portrait, color, cinematic')
92
- width = gr.Slider(128, 1536, 128, step=16, label='Width')
93
- height = gr.Slider(128, 1536, 128, step=16, label='Height')
94
  guidance = gr.Slider(1.0, 10.0, 5, step=0.1, label='Guidance')
95
  seed = gr.Number(-1, precision=0, label='Seed (-1 for random)')
96
 
@@ -119,8 +119,11 @@ if __name__ == '__main__':
119
  tokenizer = processor.tokenizer
120
  # model: MultiModalityCausalLM = AutoModelForCausalLM.from_pretrained(model_path, trust_remote_code=True)
121
  config = AutoConfig.from_pretrained(model_path)
122
- config.language_config._attn_implementation = 'eager'
123
- model = AutoModelForCausalLM.from_config(config, trust_remote_code=True)
 
 
 
124
  if torch.cuda.is_available():
125
  model = model.to(torch.bfloat16).cuda()
126
  else:
 
74
  sft_format=processor.sft_format,
75
  system_prompt='')
76
  text = text + processor.image_start_tag
77
+ input_ids = torch.LongTensor(processor.tokenizer.encode(text))
78
  output, patches = generate(input_ids,
79
  width // 16 * 16,
80
  height // 16 * 16,
 
89
  with gr.Row():
90
  with gr.Column():
91
  prompt = gr.Textbox(label='Prompt', value='portrait, color, cinematic')
92
+ width = gr.Slider(64, 1536, 384, step=16, label='Width')
93
+ height = gr.Slider(64, 1536, 384, step=16, label='Height')
94
  guidance = gr.Slider(1.0, 10.0, 5, step=0.1, label='Guidance')
95
  seed = gr.Number(-1, precision=0, label='Seed (-1 for random)')
96
 
 
119
  tokenizer = processor.tokenizer
120
  # model: MultiModalityCausalLM = AutoModelForCausalLM.from_pretrained(model_path, trust_remote_code=True)
121
  config = AutoConfig.from_pretrained(model_path)
122
+ language_config = config.language_config
123
+ language_config._attn_implementation = 'eager'
124
+ model = AutoModelForCausalLM.from_pretrained(model_path,
125
+ language_config=language_config,
126
+ trust_remote_code=True)
127
  if torch.cuda.is_available():
128
  model = model.to(torch.bfloat16).cuda()
129
  else: