fiona commited on
Commit
24db546
1 Parent(s): 2ce5005

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +18 -15
app.py CHANGED
@@ -5,30 +5,33 @@ from langchain.chains.question_answering import load_qa_chain
5
  from langchain.llms import OpenAI
6
  import os
7
  import streamlit as st
 
 
 
8
 
9
- with open("guide1.txt") as f:
10
- hitchhikersguide = f.read()
 
 
11
 
12
- text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0, separator = "\n")
13
- texts = text_splitter.split_text(hitchhikersguide)
14
 
15
- embeddings = OpenAIEmbeddings()
 
16
 
17
- docsearch = Chroma.from_texts(texts, embeddings, metadatas=[{"source": str(i)} for i in range(len(texts))]).as_retriever()
18
-
19
- chain = load_qa_chain(OpenAI(temperature=0), chain_type="stuff")
20
-
21
- def make_inference(query):
22
- docs = docsearch.get_relevant_documents(query)
23
- return(chain.run(input_documents=docs, question=query))
24
 
25
  if __name__ == "__main__":
26
  # Title of the web application
27
- st.title('🗣️TalkToMyDoc📄')
28
 
29
  # Text input widget
30
- user_input = st.text_input('Enter a question about Hitchhiker\'s Galaxy Guide book:', '', help='🗣️TalkToMyDoc📄 is a tool that allows you to ask questions about a document. In this case - Hitch Hitchhiker\'s Guide to the Galaxy..')
31
 
32
  # Displaying output directly below the input field
33
  if user_input:
34
- st.write('Answer:', make_inference(user_input))
 
5
  from langchain.llms import OpenAI
6
  import os
7
  import streamlit as st
8
+ import torch
9
+ from peft import PeftModel, PeftConfig
10
+ from transformers import AutoModelForCausalLM, AutoTokenizer
11
 
12
+ peft_model_id = "fiona/to_onion_news_converter"
13
+ config = PeftConfig.from_pretrained(peft_model_id)
14
+ model = AutoModelForCausalLM.from_pretrained(config.base_model_name_or_path, return_dict=True, load_in_8bit=False)
15
+ tokenizer = AutoTokenizer.from_pretrained(config.base_model_name_or_path)
16
 
17
+ # Load the Lora model
18
+ model = PeftModel.from_pretrained(model, peft_model_id)
19
 
20
+ def make_inference(news_headline):
21
+ batch = tokenizer(f"### INSTRUCTION\nBelow is a standard news headline, please rewrite it in a satirical style .\n\n### Standard:\n{news_headline}\n\n### new news:\n", return_tensors='pt')
22
 
23
+ with torch.cuda.amp.autocast():
24
+ output_tokens = model.generate(**batch, max_new_tokens=200)
25
+
26
+ return tokenizer.decode(output_tokens[0], skip_special_tokens=True)
 
 
 
27
 
28
  if __name__ == "__main__":
29
  # Title of the web application
30
+ st.title('Onion news converter')
31
 
32
  # Text input widget
33
+ user_input = st.text_input('Enter a news headline', '')
34
 
35
  # Displaying output directly below the input field
36
  if user_input:
37
+ st.write('The onion style:', make_inference(user_input))