File size: 2,061 Bytes
e46bf2b 678f2f3 e46bf2b 1e0f6a3 e46bf2b |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 |
import streamlit as st
from transformers import AutoModelForCausalLM, AutoTokenizer
# Load pre-trained model and tokenizer
model = AutoModelForCausalLM.from_pretrained("TheBloke/dolphin-2.2.1-mistral-7B-GGUF", model_file="dolphin-2.2.1-mistral-7b.Q4_K_M.gguf", model_type="mistral", gpu_layers=50)
tokenizer = AutoTokenizer.from_pretrained("TheBloke/dolphin-2.2.1-mistral-7B-GGUF", model_file="dolphin-2.2.1-mistral-7b.Q4_K_M.gguf")
def generate_mcqs(paragraph):
sentences = paragraph.split('. ')
mcqs = []
for sentence in sentences:
if sentence:
prompt = f"Generate a multiple-choice question based on the following sentence: {sentence}"
inputs = tokenizer.encode(prompt, return_tensors="pt")
outputs = model.generate(inputs, max_length=50, num_return_sequences=1)
question = tokenizer.decode(outputs[0], skip_special_tokens=True)
# Generate options (this is a simplified example)
options = ["Option A", "Option B", "Option C", "Option D"]
correct_answer = options[0]
mcqs.append({
"mcq": question,
"options": {
"a": options[0],
"b": options[1],
"c": options[2],
"d": options[3]
},
"correct": "a"
})
return mcqs
# Streamlit UI
st.title("MCQ Generator")
st.write("Enter a paragraph to generate multiple-choice questions.")
paragraph = st.text_area("Paragraph", height=200)
if st.button("Generate MCQs"):
if paragraph:
mcqs = generate_mcqs(paragraph)
for i, mcq in enumerate(mcqs):
st.write(f"**Question {i+1}:** {mcq['mcq']}")
st.write(f"a. {mcq['options']['a']}")
st.write(f"b. {mcq['options']['b']}")
st.write(f"c. {mcq['options']['c']}")
st.write(f"d. {mcq['options']['d']}")
st.write(f"**Correct Answer:** {mcq['correct']}")
else:
st.write("Please enter a paragraph.")
|