|
import streamlit as st |
|
from transformers import AutoModelForCausalLM, AutoTokenizer |
|
|
|
|
|
model = AutoModelForCausalLM.from_pretrained("TheBloke/dolphin-2.2.1-mistral-7B-GGUF", model_file="dolphin-2.2.1-mistral-7b.Q4_K_M.gguf", model_type="mistral", gpu_layers=50) |
|
tokenizer = AutoTokenizer.from_pretrained("TheBloke/dolphin-2.2.1-mistral-7B-GGUF", model_file="dolphin-2.2.1-mistral-7b.Q4_K_M.gguf") |
|
|
|
def generate_mcqs(paragraph): |
|
sentences = paragraph.split('. ') |
|
mcqs = [] |
|
|
|
for sentence in sentences: |
|
if sentence: |
|
prompt = f"Generate a multiple-choice question based on the following sentence: {sentence}" |
|
inputs = tokenizer.encode(prompt, return_tensors="pt") |
|
outputs = model.generate(inputs, max_length=50, num_return_sequences=1) |
|
question = tokenizer.decode(outputs[0], skip_special_tokens=True) |
|
|
|
|
|
options = ["Option A", "Option B", "Option C", "Option D"] |
|
correct_answer = options[0] |
|
|
|
mcqs.append({ |
|
"mcq": question, |
|
"options": { |
|
"a": options[0], |
|
"b": options[1], |
|
"c": options[2], |
|
"d": options[3] |
|
}, |
|
"correct": "a" |
|
}) |
|
|
|
return mcqs |
|
|
|
|
|
st.title("MCQ Generator") |
|
st.write("Enter a paragraph to generate multiple-choice questions.") |
|
|
|
paragraph = st.text_area("Paragraph", height=200) |
|
if st.button("Generate MCQs"): |
|
if paragraph: |
|
mcqs = generate_mcqs(paragraph) |
|
for i, mcq in enumerate(mcqs): |
|
st.write(f"**Question {i+1}:** {mcq['mcq']}") |
|
st.write(f"a. {mcq['options']['a']}") |
|
st.write(f"b. {mcq['options']['b']}") |
|
st.write(f"c. {mcq['options']['c']}") |
|
st.write(f"d. {mcq['options']['d']}") |
|
st.write(f"**Correct Answer:** {mcq['correct']}") |
|
else: |
|
st.write("Please enter a paragraph.") |
|
|