Spaces:
Sleeping
Sleeping
AdityaShingote
commited on
Commit
•
601256e
1
Parent(s):
a7cd460
Update app.py
Browse files
app.py
CHANGED
@@ -1,24 +1,27 @@
|
|
1 |
-
import
|
2 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
|
|
3 |
|
4 |
-
#
|
|
|
|
|
|
|
5 |
model_name = "Qwen/Qwen1.5-7B"
|
6 |
-
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
7 |
-
model = AutoModelForCausalLM.from_pretrained(model_name)
|
8 |
|
|
|
9 |
def generate_article(topic):
|
10 |
inputs = tokenizer(f"Generate article for the NY times tweet {topic}", return_tensors="pt")
|
11 |
outputs = model.generate(inputs['input_ids'], max_new_tokens=512, temperature=0.5)
|
12 |
return tokenizer.decode(outputs[0], skip_special_tokens=True)
|
13 |
|
14 |
-
#
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
# Launch the Gradio app
|
24 |
-
iface.launch()
|
|
|
1 |
+
import streamlit as st
|
2 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
3 |
+
import torch
|
4 |
|
5 |
+
# Set up the Hugging Face API token
|
6 |
+
HF_token = "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"
|
7 |
+
|
8 |
+
# Load the tokenizer and model
|
9 |
model_name = "Qwen/Qwen1.5-7B"
|
10 |
+
tokenizer = AutoTokenizer.from_pretrained(model_name, use_auth_token=HF_token)
|
11 |
+
model = AutoModelForCausalLM.from_pretrained(model_name, use_auth_token=HF_token)
|
12 |
|
13 |
+
# Function to generate article
|
14 |
def generate_article(topic):
|
15 |
inputs = tokenizer(f"Generate article for the NY times tweet {topic}", return_tensors="pt")
|
16 |
outputs = model.generate(inputs['input_ids'], max_new_tokens=512, temperature=0.5)
|
17 |
return tokenizer.decode(outputs[0], skip_special_tokens=True)
|
18 |
|
19 |
+
# Streamlit app interface
|
20 |
+
st.title("Article Generator")
|
21 |
+
topic = st.text_input("Enter a topic:")
|
22 |
+
if st.button("Generate"):
|
23 |
+
if topic:
|
24 |
+
article = generate_article(topic)
|
25 |
+
st.write(article)
|
26 |
+
else:
|
27 |
+
st.write("Please enter a topic.")
|
|
|
|