davidberenstein1957 HF staff commited on
Commit
77bf3e6
1 Parent(s): b30641c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -2
app.py CHANGED
@@ -15,7 +15,7 @@ DEFAULT_MAX_NEW_TOKENS = 1024
15
  MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "8192"))
16
 
17
  if torch.cuda.is_available():
18
- model_id = "meta-llama/Meta-Llama-3-8B-Instruct"
19
  model = AutoModelForCausalLM.from_pretrained(model_id, torch_dtype=torch.float16, device_map="auto")
20
  tokenizer = AutoTokenizer.from_pretrained(model_id)
21
  style = "<style>.user-message,.system-message{display:flex;margin:10px}.user-message .message-content{background-color:#c2e3f7;color:#000}.system-message .message-content{background-color:#f5f5f5;color:#000}.message-content{padding:10px;border-radius:10px;max-width:70%;word-wrap:break-word}.container{display:flex;justify-content:space-between}.column{width:48%}</style>"
@@ -70,7 +70,7 @@ chat_interface = ChatInterface(
70
  max_turns=10,
71
  repo_id="llm-human-feedback-collector-chat-interface-kto",
72
  chatbot=gr.Chatbot(
73
- height=450, label="Meta-Llama-3-8B-Instruct", show_share_button=True
74
  ),
75
  css=style,
76
  cache_examples=False,
 
15
  MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "8192"))
16
 
17
  if torch.cuda.is_available():
18
+ model_id = "meta-llama/Meta-Llama-3.1-8B-Instruct"
19
  model = AutoModelForCausalLM.from_pretrained(model_id, torch_dtype=torch.float16, device_map="auto")
20
  tokenizer = AutoTokenizer.from_pretrained(model_id)
21
  style = "<style>.user-message,.system-message{display:flex;margin:10px}.user-message .message-content{background-color:#c2e3f7;color:#000}.system-message .message-content{background-color:#f5f5f5;color:#000}.message-content{padding:10px;border-radius:10px;max-width:70%;word-wrap:break-word}.container{display:flex;justify-content:space-between}.column{width:48%}</style>"
 
70
  max_turns=10,
71
  repo_id="llm-human-feedback-collector-chat-interface-kto",
72
  chatbot=gr.Chatbot(
73
+ height=450, label="Meta-Llama-3.1-8B-Instruct", show_share_button=True
74
  ),
75
  css=style,
76
  cache_examples=False,