Spaces:
Runtime error
Runtime error
RamAnanth1
commited on
Commit
•
b783390
1
Parent(s):
d2690a6
Update ldm/modules/encoders/modules.py
Browse files
ldm/modules/encoders/modules.py
CHANGED
@@ -57,7 +57,7 @@ def disabled_train(self, mode=True):
|
|
57 |
|
58 |
class FrozenT5Embedder(AbstractEncoder):
|
59 |
"""Uses the T5 transformer encoder for text"""
|
60 |
-
def __init__(self, version="google/t5-v1_1-large", device="
|
61 |
super().__init__()
|
62 |
self.tokenizer = T5Tokenizer.from_pretrained(version)
|
63 |
self.transformer = T5EncoderModel.from_pretrained(version)
|
@@ -92,7 +92,7 @@ class FrozenCLIPEmbedder(AbstractEncoder):
|
|
92 |
"pooled",
|
93 |
"hidden"
|
94 |
]
|
95 |
-
def __init__(self, version="openai/clip-vit-large-patch14", device="
|
96 |
freeze=True, layer="last", layer_idx=None): # clip-vit-base-patch32
|
97 |
super().__init__()
|
98 |
assert layer in self.LAYERS
|
@@ -140,7 +140,7 @@ class FrozenOpenCLIPEmbedder(AbstractEncoder):
|
|
140 |
"last",
|
141 |
"penultimate"
|
142 |
]
|
143 |
-
def __init__(self, arch="ViT-H-14", version="laion2b_s32b_b79k", device="
|
144 |
freeze=True, layer="last"):
|
145 |
super().__init__()
|
146 |
assert layer in self.LAYERS
|
@@ -194,7 +194,7 @@ class FrozenOpenCLIPEmbedder(AbstractEncoder):
|
|
194 |
|
195 |
|
196 |
class FrozenCLIPT5Encoder(AbstractEncoder):
|
197 |
-
def __init__(self, clip_version="openai/clip-vit-large-patch14", t5_version="google/t5-v1_1-xl", device="
|
198 |
clip_max_length=77, t5_max_length=77):
|
199 |
super().__init__()
|
200 |
self.clip_encoder = FrozenCLIPEmbedder(clip_version, device, max_length=clip_max_length)
|
|
|
57 |
|
58 |
class FrozenT5Embedder(AbstractEncoder):
|
59 |
"""Uses the T5 transformer encoder for text"""
|
60 |
+
def __init__(self, version="google/t5-v1_1-large", device="cpu", max_length=77, freeze=True): # others are google/t5-v1_1-xl and google/t5-v1_1-xxl
|
61 |
super().__init__()
|
62 |
self.tokenizer = T5Tokenizer.from_pretrained(version)
|
63 |
self.transformer = T5EncoderModel.from_pretrained(version)
|
|
|
92 |
"pooled",
|
93 |
"hidden"
|
94 |
]
|
95 |
+
def __init__(self, version="openai/clip-vit-large-patch14", device="cpu", max_length=77,
|
96 |
freeze=True, layer="last", layer_idx=None): # clip-vit-base-patch32
|
97 |
super().__init__()
|
98 |
assert layer in self.LAYERS
|
|
|
140 |
"last",
|
141 |
"penultimate"
|
142 |
]
|
143 |
+
def __init__(self, arch="ViT-H-14", version="laion2b_s32b_b79k", device="cpu", max_length=77,
|
144 |
freeze=True, layer="last"):
|
145 |
super().__init__()
|
146 |
assert layer in self.LAYERS
|
|
|
194 |
|
195 |
|
196 |
class FrozenCLIPT5Encoder(AbstractEncoder):
|
197 |
+
def __init__(self, clip_version="openai/clip-vit-large-patch14", t5_version="google/t5-v1_1-xl", device="cpu",
|
198 |
clip_max_length=77, t5_max_length=77):
|
199 |
super().__init__()
|
200 |
self.clip_encoder = FrozenCLIPEmbedder(clip_version, device, max_length=clip_max_length)
|