ffreemt commited on
Commit
a88cef9
1 Parent(s): af7d615
Files changed (2) hide show
  1. README.md +0 -1
  2. app.py +33 -112
README.md CHANGED
@@ -7,7 +7,6 @@ sdk: gradio
7
  sdk_version: 3.37.0
8
  app_file: app.py
9
  pinned: true
10
- duplicated_from: mikeee/llama2-7b-chat-ggml
11
  ---
12
 
13
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
7
  sdk_version: 3.37.0
8
  app_file: app.py
9
  pinned: true
 
10
  ---
11
 
12
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py CHANGED
@@ -5,7 +5,7 @@ import os
5
  import platform
6
  import random
7
  import time
8
- from dataclasses import asdict, dataclass
9
  from pathlib import Path
10
 
11
  # from types import SimpleNamespace
@@ -16,99 +16,18 @@ from ctransformers import AutoModelForCausalLM
16
  from dl_hf_model import dl_hf_model
17
  from loguru import logger
18
 
19
- filename_list = [
20
- "Wizard-Vicuna-7B-Uncensored.ggmlv3.q2_K.bin",
21
- "Wizard-Vicuna-7B-Uncensored.ggmlv3.q3_K_L.bin",
22
- "Wizard-Vicuna-7B-Uncensored.ggmlv3.q3_K_M.bin",
23
- "Wizard-Vicuna-7B-Uncensored.ggmlv3.q3_K_S.bin",
24
- "Wizard-Vicuna-7B-Uncensored.ggmlv3.q4_0.bin",
25
- "Wizard-Vicuna-7B-Uncensored.ggmlv3.q4_1.bin",
26
- "Wizard-Vicuna-7B-Uncensored.ggmlv3.q4_K_M.bin",
27
- "Wizard-Vicuna-7B-Uncensored.ggmlv3.q4_K_S.bin",
28
- "Wizard-Vicuna-7B-Uncensored.ggmlv3.q5_0.bin",
29
- "Wizard-Vicuna-7B-Uncensored.ggmlv3.q5_1.bin",
30
- "Wizard-Vicuna-7B-Uncensored.ggmlv3.q5_K_M.bin",
31
- "Wizard-Vicuna-7B-Uncensored.ggmlv3.q5_K_S.bin",
32
- "Wizard-Vicuna-7B-Uncensored.ggmlv3.q6_K.bin",
33
- "Wizard-Vicuna-7B-Uncensored.ggmlv3.q8_0.bin",
34
- ]
35
-
36
- URL = "https://huggingface.co/TheBloke/Wizard-Vicuna-7B-Uncensored-GGML/raw/main/Wizard-Vicuna-7B-Uncensored.ggmlv3.q4_K_M.bin" # 4.05G
37
-
38
- url = "https://huggingface.co/savvamadar/ggml-gpt4all-j-v1.3-groovy/blob/main/ggml-gpt4all-j-v1.3-groovy.bin"
39
- url = "https://huggingface.co/TheBloke/Llama-2-13B-GGML/blob/main/llama-2-13b.ggmlv3.q4_K_S.bin" # 7.37G
40
- # url = "https://huggingface.co/TheBloke/Llama-2-13B-chat-GGML/blob/main/llama-2-13b-chat.ggmlv3.q3_K_L.bin"
41
- url = "https://huggingface.co/TheBloke/Llama-2-13B-chat-GGML/blob/main/llama-2-13b-chat.ggmlv3.q3_K_L.bin" # 6.93G
42
- # url = "https://huggingface.co/TheBloke/Llama-2-13B-chat-GGML/blob/main/llama-2-13b-chat.ggmlv3.q3_K_L.binhttps://huggingface.co/TheBloke/Llama-2-13B-chat-GGML/blob/main/llama-2-13b-chat.ggmlv3.q4_K_M.bin" # 7.87G
43
-
44
- url = "https://huggingface.co/localmodels/Llama-2-13B-Chat-ggml/blob/main/llama-2-13b-chat.ggmlv3.q4_K_S.bin" # 7.37G
45
-
46
- _ = (
47
- "golay" in platform.node()
48
- or "okteto" in platform.node()
49
- or Path("/kaggle").exists()
50
- # or psutil.cpu_count(logical=False) < 4
51
- or 1 # run 7b in hf
52
- )
53
-
54
- if _:
55
- # url = "https://huggingface.co/TheBloke/Llama-2-13B-chat-GGML/blob/main/llama-2-13b-chat.ggmlv3.q2_K.bin"
56
- url = "https://huggingface.co/TheBloke/Llama-2-7B-Chat-GGML/blob/main/llama-2-7b-chat.ggmlv3.q2_K.bin" # 2.87G
57
- url = "https://huggingface.co/TheBloke/Llama-2-7B-Chat-GGML/blob/main/llama-2-7b-chat.ggmlv3.q4_K_M.bin" # 2.87G
58
-
59
-
60
- prompt_template = """Below is an instruction that describes a task. Write a response that appropriately completes the request.
61
-
62
- ### Instruction: {user_prompt}
63
-
64
- ### Response:
65
- """
66
-
67
- prompt_template = """System: You are a helpful,
68
- respectful and honest assistant. Always answer as
69
- helpfully as possible, while being safe. Your answers
70
- should not include any harmful, unethical, racist,
71
- sexist, toxic, dangerous, or illegal content. Please
72
- ensure that your responses are socially unbiased and
73
- positive in nature. If a question does not make any
74
- sense, or is not factually coherent, explain why instead
75
- of answering something not correct. If you don't know
76
- the answer to a question, please don't share false
77
- information.
78
- User: {prompt}
79
- Assistant: """
80
-
81
- prompt_template = """System: You are a helpful assistant.
82
- User: {prompt}
83
- Assistant: """
84
-
85
- prompt_template = """Question: {question}
86
- Answer: Let's work this out in a step by step way to be sure we have the right answer."""
87
-
88
- prompt_template = """[INST] <>
89
- You are a helpful, respectful and honest assistant. Always answer as helpfully as possible assistant. Think step by step.
90
- <>
91
-
92
- What NFL team won the Super Bowl in the year Justin Bieber was born?
93
- [/INST]"""
94
-
95
- prompt_template = """[INST] <<SYS>>
96
- You are an unhelpful assistant. Always answer as helpfully as possible. Think step by step. <</SYS>>
97
-
98
- {question} [/INST]
99
- """
100
-
101
- prompt_template = """[INST] <<SYS>>
102
- You are a helpful assistant.
103
- <</SYS>>
104
-
105
- {question} [/INST]
106
- """
107
 
108
- _ = [elm for elm in prompt_template.splitlines() if elm.strip()]
109
- stop_string = [elm.split(":")[0] + ":" for elm in _][-2]
110
-
111
- logger.debug(f"{stop_string=}")
 
 
 
 
 
112
 
113
  _ = psutil.cpu_count(logical=False) - 1
114
  cpu_count: int = int(_) if _ else 1
@@ -116,20 +35,24 @@ logger.debug(f"{cpu_count=}")
116
 
117
  LLM = None
118
 
 
 
 
 
119
  try:
120
  model_loc, file_size = dl_hf_model(url)
 
121
  except Exception as exc_:
122
  logger.error(exc_)
123
  raise SystemExit(1) from exc_
124
 
 
125
  LLM = AutoModelForCausalLM.from_pretrained(
126
  model_loc,
127
  model_type="llama",
128
- # threads=cpu_count,
129
  )
130
 
131
- logger.info(f"done load llm {model_loc=} {file_size=}G")
132
-
133
  os.environ["TZ"] = "Asia/Shanghai"
134
  try:
135
  time.tzset() # type: ignore # pylint: disable=no-member
@@ -137,12 +60,6 @@ except Exception:
137
  # Windows
138
  logger.warning("Windows, cant run time.tzset()")
139
 
140
- _ = """
141
- ns = SimpleNamespace(
142
- response="",
143
- generator=(_ for _ in []),
144
- )
145
- # """
146
 
147
  @dataclass
148
  class GenerationConfig:
@@ -154,8 +71,8 @@ class GenerationConfig:
154
  seed: int = 42
155
  reset: bool = False
156
  stream: bool = True
157
- # threads: int = cpu_count
158
- # stop: list[str] = field(default_factory=lambda: [stop_string])
159
 
160
 
161
  def generate(
@@ -237,7 +154,7 @@ def bot(history):
237
  f"{atime.duration/len(''.join(response)):.2f}s/char)" # type: ignore
238
  )
239
 
240
- history[-1][1] = "".join(response) + f"\n{_}"
241
  yield history
242
 
243
 
@@ -292,7 +209,9 @@ examples_list = [
292
  "What NFL team won the Super Bowl in the year Justin Bieber was born? Think step by step."
293
  ],
294
  ["How to pick a lock? Provide detailed steps."],
295
- ["If it takes 10 hours to dry 10 clothes, assuming all the clothes are hanged together at the same time for drying , then how long will it take to dry a cloth?"],
 
 
296
  ["is infinity + 1 bigger than infinity?"],
297
  ["Explain the plot of Cinderella in a sentence."],
298
  [
@@ -332,9 +251,6 @@ with gr.Blocks(
332
  ) as block:
333
  # buff_var = gr.State("")
334
  with gr.Accordion("🎈 Info", open=False):
335
- # gr.HTML(
336
- # """<center><a href="https://huggingface.co/spaces/mikeee/mpt-30b-chat?duplicate=true"><img src="https://bit.ly/3gLdBN6" alt="Duplicate"></a> and spin a CPU UPGRADE to avoid the queue</center>"""
337
- # )
338
  gr.Markdown(
339
  f"""<h5><center>{Path(model_loc).name}</center></h4>
340
  Most examples are meant for another model.
@@ -457,7 +373,12 @@ else:
457
  # concurrency_count = max(int(16 / file_size) - 1, 1)
458
  # """
459
 
460
- concurrency_count = 1
461
- logger.info(f"{concurrency_count=}")
 
 
 
 
 
462
 
463
- block.queue(concurrency_count=concurrency_count, max_size=5).launch(debug=True)
 
5
  import platform
6
  import random
7
  import time
8
+ from dataclasses import asdict, dataclass, field
9
  from pathlib import Path
10
 
11
  # from types import SimpleNamespace
 
16
  from dl_hf_model import dl_hf_model
17
  from loguru import logger
18
 
19
+ url = "https://huggingface.co/The Bloke/llama-2-13B-Guanaco-QLoRA-GGML/blob/main/llama-2-13b-guanaco-qlora.ggmlv3.q4_K_S.bin" # 8.14G
20
+ url = "https://huggingface.co/spaces/mikeee/airoboros-llama2-gpt4-1.4.1-ggml"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
21
 
22
+ # Prompt template: Guanaco
23
+ # {past_history}
24
+ prompt_template = """You are a helpful assistant. Let's think step by step.
25
+ ### Human:
26
+ {input}
27
+ ### Assistant:"""
28
+ human_prefix = "### Human"
29
+ ai_prefix = "### Assistant"
30
+ stop_list = [f"{human_prefix}:"]
31
 
32
  _ = psutil.cpu_count(logical=False) - 1
33
  cpu_count: int = int(_) if _ else 1
 
35
 
36
  LLM = None
37
 
38
+ if "forindo" in platform.node():
39
+ # url = "https://huggingface.co/The Bloke/llama-2-70b-Guanaco-QLoRA-GGML/blob/main/llama-2-70b-guanaco-qlora.ggmlv3.q3_K_S.bin" # 29.7G
40
+ url = "https://huggingface.co/TheBloke/airoboros-l2-70B-gpt4-1.4.1-GGML/blob/main/airoboros-l2-70b-gpt4-1.4.1.ggmlv3.q3_K_S.bin"
41
+
42
  try:
43
  model_loc, file_size = dl_hf_model(url)
44
+ logger.info(f"done load llm {model_loc=} {file_size=}G")
45
  except Exception as exc_:
46
  logger.error(exc_)
47
  raise SystemExit(1) from exc_
48
 
49
+ logger.debug(f"{model_loc=}")
50
  LLM = AutoModelForCausalLM.from_pretrained(
51
  model_loc,
52
  model_type="llama",
53
+ threads=cpu_count,
54
  )
55
 
 
 
56
  os.environ["TZ"] = "Asia/Shanghai"
57
  try:
58
  time.tzset() # type: ignore # pylint: disable=no-member
 
60
  # Windows
61
  logger.warning("Windows, cant run time.tzset()")
62
 
 
 
 
 
 
 
63
 
64
  @dataclass
65
  class GenerationConfig:
 
71
  seed: int = 42
72
  reset: bool = False
73
  stream: bool = True
74
+ threads: int = cpu_count
75
+ stop: list[str] = field(default_factory=lambda: stop_list)
76
 
77
 
78
  def generate(
 
154
  f"{atime.duration/len(''.join(response)):.2f}s/char)" # type: ignore
155
  )
156
 
157
+ history[-1][1] = "".join(response) + f"\n{_}"
158
  yield history
159
 
160
 
 
209
  "What NFL team won the Super Bowl in the year Justin Bieber was born? Think step by step."
210
  ],
211
  ["How to pick a lock? Provide detailed steps."],
212
+ [
213
+ "If it takes 10 hours to dry 10 clothes, assuming all the clothes are hanged together at the same time for drying , then how long will it take to dry a cloth?"
214
+ ],
215
  ["is infinity + 1 bigger than infinity?"],
216
  ["Explain the plot of Cinderella in a sentence."],
217
  [
 
251
  ) as block:
252
  # buff_var = gr.State("")
253
  with gr.Accordion("🎈 Info", open=False):
 
 
 
254
  gr.Markdown(
255
  f"""<h5><center>{Path(model_loc).name}</center></h4>
256
  Most examples are meant for another model.
 
373
  # concurrency_count = max(int(16 / file_size) - 1, 1)
374
  # """
375
 
376
+ # default concurrency_count = 1
377
+ # block.queue(concurrency_count=concurrency_count, max_size=5).launch(debug=True)
378
+
379
+ server_port = 7860
380
+ if "forindo" in platform.node():
381
+ server_port = 7861
382
+ block.queue(max_size=5).launch(debug=True, server_name="0.0.0.0", server_port=server_port)
383
 
384
+ # block.queue(max_size=5).launch(debug=True, server_name="0.0.0.0")