File size: 3,073 Bytes
79bfb65
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0a683e1
79bfb65
 
 
 
 
 
 
 
 
628d57a
79bfb65
 
 
 
 
 
 
 
 
 
 
628d57a
79bfb65
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
import gradio as gr
import torch
from torch import Tensor, nn
from transformers import (CLIPTextModel, CLIPTokenizer, T5EncoderModel,
                          T5Tokenizer)
import spaces
import numpy as np
import io
import base64

class HFEmbedder(nn.Module):
    def __init__(self, version: str, max_length: int, **hf_kwargs):
        super().__init__()
        self.is_clip = version.startswith("openai")
        self.max_length = max_length
        self.output_key = "pooler_output" if self.is_clip else "last_hidden_state"

        if self.is_clip:
            self.tokenizer: CLIPTokenizer = CLIPTokenizer.from_pretrained(version, max_length=max_length)
            self.hf_module: CLIPTextModel = CLIPTextModel.from_pretrained(version, **hf_kwargs)
        else:
            self.tokenizer: T5Tokenizer = T5Tokenizer.from_pretrained(version, max_length=max_length)
            self.hf_module: T5EncoderModel = T5EncoderModel.from_pretrained(version, **hf_kwargs)

        self.hf_module = self.hf_module.eval().requires_grad_(False)

    def forward(self, text: list[str]) -> Tensor:
        batch_encoding = self.tokenizer(
            text,
            truncation=True,
            max_length=self.max_length,
            return_length=False,
            return_overflowing_tokens=False,
            padding="max_length",
            return_tensors="pt",
        )

        outputs = self.hf_module(
            input_ids=batch_encoding["input_ids"].to(self.hf_module.device),
            attention_mask=None,
            output_hidden_states=False,
        )
        return outputs[self.output_key]



def load_t5(device: str | torch.device = "cuda", max_length: int = 512) -> HFEmbedder:
    # max length 64, 128, 256 and 512 should work (if your sequence is short enough)
    return HFEmbedder("lnyan/t5-v1_1-xxl-encoder", max_length=max_length, torch_dtype=torch.bfloat16).to("cuda")


def load_clip(device: str | torch.device = "cuda") -> HFEmbedder:
    return HFEmbedder("openai/clip-vit-large-patch14", max_length=77, torch_dtype=torch.bfloat16).to("cuda")

# @spaces.GPU(duration=20)
def load_encoders():
    is_schnell = True
    t5 = load_t5("cuda", max_length=256 if is_schnell else 512)
    clip = load_clip("cuda")
    return t5, clip

import numpy as np
def b64(txt,vec):
    buffer = io.BytesIO()
    torch.save((txt,vec), buffer)
    buffer.seek(0)
    encoded = base64.b64encode(buffer.getvalue()).decode('utf-8')
    return encoded

t5,clip=load_encoders()
@spaces.GPU(duration=10)
def convert(prompt):
    if isinstance(prompt, str):
        prompt = [prompt]
    txt = t5(prompt)
    vec = clip(prompt)
    return b64(txt,vec)

with gr.Blocks() as demo:
    gr.Markdown("""A workaround for flux-flax to fit into 40G VRAM""")
    with gr.Row():
        with gr.Column():
            prompt = gr.Textbox(label="prompt")
            convert_btn = gr.Button(value="Convert")
        with gr.Column():
            output = gr.Textbox(label="output")

    convert_btn.click(convert, inputs=prompt, outputs=output, api_name="convert")


demo.launch()