Spaces:
Running
Running
freQuensy23
commited on
Commit
β’
e4bfc4a
1
Parent(s):
d4fbf47
Fix torch
Browse files- README.md +2 -2
- generators.py +2 -2
README.md
CHANGED
@@ -4,8 +4,8 @@ emoji: ποΈπ¨
|
|
4 |
colorFrom: yellow
|
5 |
colorTo: pink
|
6 |
sdk: gradio
|
7 |
-
sdk_version: 4.
|
8 |
-
python_version: 3.
|
9 |
app_file: app.py
|
10 |
pinned: false
|
11 |
license: mit
|
|
|
4 |
colorFrom: yellow
|
5 |
colorTo: pink
|
6 |
sdk: gradio
|
7 |
+
sdk_version: 4.41.0
|
8 |
+
python_version: 3.10.13
|
9 |
app_file: app.py
|
10 |
pinned: false
|
11 |
license: mit
|
generators.py
CHANGED
@@ -60,7 +60,7 @@ async def generate_llama2(system_input, user_input):
|
|
60 |
yield message.choices[0].delta.content
|
61 |
|
62 |
|
63 |
-
@spaces.GPU
|
64 |
async def generate_openllama(system_input, user_input):
|
65 |
model_path = 'openlm-research/open_llama_3b_v2'
|
66 |
tokenizer = LlamaTokenizer.from_pretrained(model_path)
|
@@ -74,7 +74,7 @@ async def generate_openllama(system_input, user_input):
|
|
74 |
return tokenizer.decode(output[0], skip_special_tokens=True)
|
75 |
|
76 |
|
77 |
-
@spaces.GPU
|
78 |
async def generate_bloom(system_input, user_input):
|
79 |
model_path = 'bigscience/bloom-7b1'
|
80 |
tokenizer = AutoTokenizer.from_pretrained(model_path)
|
|
|
60 |
yield message.choices[0].delta.content
|
61 |
|
62 |
|
63 |
+
@spaces.GPU(duration=120)
|
64 |
async def generate_openllama(system_input, user_input):
|
65 |
model_path = 'openlm-research/open_llama_3b_v2'
|
66 |
tokenizer = LlamaTokenizer.from_pretrained(model_path)
|
|
|
74 |
return tokenizer.decode(output[0], skip_special_tokens=True)
|
75 |
|
76 |
|
77 |
+
@spaces.GPU(duration=120)
|
78 |
async def generate_bloom(system_input, user_input):
|
79 |
model_path = 'bigscience/bloom-7b1'
|
80 |
tokenizer = AutoTokenizer.from_pretrained(model_path)
|