Spaces:
Running
on
A10G
Running
on
A10G
tricktreat
commited on
Commit
•
a948da3
1
Parent(s):
2da68ac
add hint
Browse files- app.py +1 -0
- awesome_chat.py +1 -1
- models_server.py +4 -4
app.py
CHANGED
@@ -128,6 +128,7 @@ with gr.Blocks(css=css) as demo:
|
|
128 |
gr.Markdown("<p align='center'><img src='https://i.ibb.co/qNH3Jym/logo.png' height='25' width='95'></p>")
|
129 |
gr.Markdown("<p align='center' style='font-size: 20px;'>A system to connect LLMs with ML community. See our <a href='https://github.com/microsoft/JARVIS'>Project</a> and <a href='http://arxiv.org/abs/2303.17580'>Paper</a>.</p>")
|
130 |
gr.HTML('''<center><a href="https://huggingface.co/spaces/microsoft/HuggingGPT?duplicate=true"><img src="https://bit.ly/3gLdBN6" alt="Duplicate Space"></a>Duplicate the Space and run securely with your OpenAI API Key and Hugging Face Token</center>''')
|
|
|
131 |
if not OPENAI_KEY:
|
132 |
with gr.Row().style():
|
133 |
with gr.Column(scale=0.85):
|
|
|
128 |
gr.Markdown("<p align='center'><img src='https://i.ibb.co/qNH3Jym/logo.png' height='25' width='95'></p>")
|
129 |
gr.Markdown("<p align='center' style='font-size: 20px;'>A system to connect LLMs with ML community. See our <a href='https://github.com/microsoft/JARVIS'>Project</a> and <a href='http://arxiv.org/abs/2303.17580'>Paper</a>.</p>")
|
130 |
gr.HTML('''<center><a href="https://huggingface.co/spaces/microsoft/HuggingGPT?duplicate=true"><img src="https://bit.ly/3gLdBN6" alt="Duplicate Space"></a>Duplicate the Space and run securely with your OpenAI API Key and Hugging Face Token</center>''')
|
131 |
+
gr.HTML('''<center>Note: Only a few models are deployed in the local inference endpoint due to hardware limitations. In addition, online HuggingFace inference endpoints may sometimes not be available. Thus the capability of HuggingGPT is limited.</center>''')
|
132 |
if not OPENAI_KEY:
|
133 |
with gr.Row().style():
|
134 |
with gr.Column(scale=0.85):
|
awesome_chat.py
CHANGED
@@ -49,7 +49,7 @@ if LOG_HF_TOKEN:
|
|
49 |
)
|
50 |
|
51 |
logger = logging.getLogger(__name__)
|
52 |
-
logger.setLevel(logging.
|
53 |
|
54 |
handler = logging.StreamHandler()
|
55 |
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
|
|
|
49 |
)
|
50 |
|
51 |
logger = logging.getLogger(__name__)
|
52 |
+
logger.setLevel(logging.CRITICAL)
|
53 |
|
54 |
handler = logging.StreamHandler()
|
55 |
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
|
models_server.py
CHANGED
@@ -250,10 +250,10 @@ def load_pipes(local_deployment):
|
|
250 |
# "model": pipeline(task="translation", model=f"t5-base"),
|
251 |
# "device": "cuda:0"
|
252 |
# },
|
253 |
-
"impira/layoutlm-document-qa": {
|
254 |
-
|
255 |
-
|
256 |
-
},
|
257 |
"ydshieh/vit-gpt2-coco-en": {
|
258 |
"model": pipeline(task="image-to-text", model=f"{local_models}ydshieh/vit-gpt2-coco-en"),
|
259 |
"device": "cuda:0"
|
|
|
250 |
# "model": pipeline(task="translation", model=f"t5-base"),
|
251 |
# "device": "cuda:0"
|
252 |
# },
|
253 |
+
# "impira/layoutlm-document-qa": {
|
254 |
+
# "model": pipeline(task="document-question-answering", model=f"{local_models}impira/layoutlm-document-qa"),
|
255 |
+
# "device": "cuda:0"
|
256 |
+
# },
|
257 |
"ydshieh/vit-gpt2-coco-en": {
|
258 |
"model": pipeline(task="image-to-text", model=f"{local_models}ydshieh/vit-gpt2-coco-en"),
|
259 |
"device": "cuda:0"
|