israel commited on
Commit
c76fb34
1 Parent(s): 43d4cb6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +11 -11
app.py CHANGED
@@ -1,27 +1,27 @@
1
  import streamlit as st
2
 
3
 
4
- # import torch
5
- # from transformers import pipeline
6
- # from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig
7
 
8
 
9
- # quantization_config = BitsAndBytesConfig(load_in_4bit=True)
10
 
11
 
12
- # model_name = "masakhane/zephyr-7b-gemma-sft-african-alpaca"
13
 
14
- # tokenizer = AutoTokenizer.from_pretrained(model_name)
15
- # model = AutoModelForCausalLM.from_pretrained(model_name, quantization_config=quantization_config)
16
 
17
 
18
- # pipe = pipeline("text-generation", model=model,tokenizer=tokenizer, torch_dtype=torch.bfloat16, device_map="auto")
19
 
20
 
21
- import torch
22
- from transformers import pipeline
23
 
24
- pipe = pipeline("text-generation", model="masakhane/zephyr-7b-gemma-sft-african-alpaca", torch_dtype=torch.bfloat16, device_map="auto")
25
 
26
  # We use the tokenizer's chat template to format each message - see https://huggingface.co/docs/transformers/main/en/chat_templating
27
  # messages = [
 
1
  import streamlit as st
2
 
3
 
4
+ import torch
5
+ from transformers import pipeline
6
+ from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig
7
 
8
 
9
+ quantization_config = BitsAndBytesConfig(load_in_4bit=True)
10
 
11
 
12
+ model_name = "masakhane/zephyr-7b-gemma-sft-african-alpaca"
13
 
14
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
15
+ model = AutoModelForCausalLM.from_pretrained(model_name, quantization_config=quantization_config)
16
 
17
 
18
+ pipe = pipeline("text-generation", model=model,tokenizer=tokenizer, torch_dtype=torch.bfloat16, device_map="auto")
19
 
20
 
21
+ # import torch
22
+ # from transformers import pipeline
23
 
24
+ # pipe = pipeline("text-generation", model="masakhane/zephyr-7b-gemma-sft-african-alpaca", torch_dtype=torch.bfloat16, device_map="auto")
25
 
26
  # We use the tokenizer's chat template to format each message - see https://huggingface.co/docs/transformers/main/en/chat_templating
27
  # messages = [