Update to Swahili model
Browse files
app.py
CHANGED
@@ -14,7 +14,7 @@ MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "4096"))
|
|
14 |
DESCRIPTION = """\
|
15 |
# Tamil Llama 2
|
16 |
|
17 |
-
This Space demonstrates the
|
18 |
"""
|
19 |
|
20 |
LICENSE = """
|
@@ -25,17 +25,18 @@ As a derivate work of [Llama-2-7b-chat](https://huggingface.co/meta-llama/Llama-
|
|
25 |
this demo is governed by the original [license](https://huggingface.co/spaces/huggingface-projects/llama-2-7b-chat/blob/main/LICENSE.txt) and [acceptable use policy](https://huggingface.co/spaces/huggingface-projects/llama-2-7b-chat/blob/main/USE_POLICY.md).
|
26 |
"""
|
27 |
|
28 |
-
SYSTEM_PROMPT = "
|
29 |
|
30 |
-
PROMPT_TEMPLATE = """
|
31 |
|
32 |
if not torch.cuda.is_available():
|
33 |
DESCRIPTION += "\n<p>Running on CPU 🥶 This demo does not work on CPU.</p>"
|
34 |
|
35 |
if torch.cuda.is_available():
|
36 |
-
model_id = "
|
37 |
model = AutoModelForCausalLM.from_pretrained(model_id, torch_dtype=torch.float16, device_map="auto")
|
38 |
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
|
|
39 |
tokenizer.chat_template = PROMPT_TEMPLATE
|
40 |
tokenizer.use_default_system_prompt = False
|
41 |
|
@@ -125,11 +126,11 @@ chat_interface = gr.ChatInterface(
|
|
125 |
],
|
126 |
stop_btn=None,
|
127 |
examples=[
|
128 |
-
["
|
129 |
-
["
|
130 |
-
["
|
131 |
-
["
|
132 |
-
["
|
133 |
],
|
134 |
)
|
135 |
|
|
|
14 |
DESCRIPTION = """\
|
15 |
# Tamil Llama 2
|
16 |
|
17 |
+
This Space demonstrates the [Swahili (Jacaranda) model](https://huggingface.co/abhinand/tamil-llama-7b-instruct-v0.1) fine-tuned from Llama-2 7b, used as a daily life AI assistant.
|
18 |
"""
|
19 |
|
20 |
LICENSE = """
|
|
|
25 |
this demo is governed by the original [license](https://huggingface.co/spaces/huggingface-projects/llama-2-7b-chat/blob/main/LICENSE.txt) and [acceptable use policy](https://huggingface.co/spaces/huggingface-projects/llama-2-7b-chat/blob/main/USE_POLICY.md).
|
26 |
"""
|
27 |
|
28 |
+
SYSTEM_PROMPT = ""
|
29 |
|
30 |
+
PROMPT_TEMPLATE = """Below is a chat history between a user and an AI assistant (you). The user gives instructions. Write a response that appropriately completes the latest request.\n\n{% for message in messages %}{% if message['role'] == 'user' %}{{ '\n### Instruction:\n' + message['content'] + '\n'}}{% elif message['role'] == 'assistant' %}{{ '\n### Response:\n' + message['content'] + '\n'}}{% endif %}{% endfor %}\n\n### Response:\n"""
|
31 |
|
32 |
if not torch.cuda.is_available():
|
33 |
DESCRIPTION += "\n<p>Running on CPU 🥶 This demo does not work on CPU.</p>"
|
34 |
|
35 |
if torch.cuda.is_available():
|
36 |
+
model_id = "Jacaranda/UlizaLlama"
|
37 |
model = AutoModelForCausalLM.from_pretrained(model_id, torch_dtype=torch.float16, device_map="auto")
|
38 |
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
39 |
+
tokenizer.add_special_tokens({'pad_token': '[PAD]'}
|
40 |
tokenizer.chat_template = PROMPT_TEMPLATE
|
41 |
tokenizer.use_default_system_prompt = False
|
42 |
|
|
|
126 |
],
|
127 |
stop_btn=None,
|
128 |
examples=[
|
129 |
+
["Ninawezaje kupata usingizi haraka?"],
|
130 |
+
["Bosi wangu anadhibiti sana, nifanye nini?"],
|
131 |
+
["Je, ni vipindi gani muhimu katika historia vya kujua kuvihusu?"],
|
132 |
+
["Ni kazi gani nzuri ikiwa ninataka kupata pesa lakini pia kufurahiya?"],
|
133 |
+
["Nivae nini kwenye harusi?"],
|
134 |
],
|
135 |
)
|
136 |
|