Spaces:
Sleeping
Sleeping
| import gradio as gr | |
| import os | |
| import time | |
| from langchain_community.document_loaders import PyPDFLoader | |
| from langchain_text_splitters import RecursiveCharacterTextSplitter | |
| from langchain_community.embeddings.fastembed import FastEmbedEmbeddings | |
| from langchain_community.vectorstores import FAISS | |
| from langchain_huggingface import HuggingFaceEndpoint | |
| from langchain_core.prompts import ChatPromptTemplate | |
| # --- 1. Model Setup --- | |
| HF_TOKEN = os.environ.get("HF_TOKEN") | |
| # Setup the Model (Llama 3 8B via API) | |
| if HF_TOKEN: | |
| llm = HuggingFaceEndpoint( | |
| repo_id="meta-llama/Meta-Llama-3-8B-Instruct", | |
| temperature=0.5, | |
| max_new_tokens=4096, | |
| huggingfacehub_api_token=HF_TOKEN, | |
| ) | |
| else: | |
| llm = None | |
| # --- 2. Logic --- | |
| def generate_question_paper(pdf_file, difficulty, num_questions): | |
| # Error Handling for missing token | |
| if not HF_TOKEN: | |
| return "β οΈ Error: HF_TOKEN is missing. Please add it in Space Settings > Secrets.", "Error" | |
| if not pdf_file: | |
| return "β οΈ Please upload a PDF file first.", "Input Error" | |
| try: | |
| # Progress updates (simulated for UI feedback) | |
| yield "π Reading PDF...", "Processing" | |
| loader = PyPDFLoader(pdf_file.name) | |
| pages = loader.load() | |
| if not pages: | |
| return "β Error: The PDF appears to be empty or unreadable.", "Error" | |
| yield f"βοΈ Splitting {len(pages)} pages...", "Processing" | |
| text_splitter = RecursiveCharacterTextSplitter( | |
| chunk_size=1000, | |
| chunk_overlap=100 | |
| ) | |
| chunks = text_splitter.split_documents(pages) | |
| yield "π§ Analyzing content...", "Embedding" | |
| embeddings = FastEmbedEmbeddings() | |
| vector_store = FAISS.from_documents(chunks, embeddings) | |
| yield "π Retrieving key concepts...", "Retrieving" | |
| retriever = vector_store.as_retriever(search_kwargs={"k": 5}) | |
| context_docs = retriever.invoke("Summary of key topics, definitions, and important details") | |
| context_text = "\n\n".join([doc.page_content for doc in context_docs]) | |
| # Prompt | |
| template = """ | |
| You are an expert academic examiner. Create a rigorous Question Paper based ONLY on the provided context. | |
| CONTEXT: | |
| {context} | |
| INSTRUCTIONS: | |
| - Difficulty: {difficulty} | |
| - Total Questions: {num_questions} | |
| - Structure: | |
| * Part A: Multiple Choice ({num_questions} questions) | |
| * Part B: Short Answer (2 questions) | |
| * Part C: Essay/Long Answer (1 question) | |
| - Include an "Answer Key" section at the very bottom. | |
| OUTPUT FORMAT: | |
| Return valid Markdown. Use bold headers. Do not output conversational filler. | |
| """ | |
| prompt = ChatPromptTemplate.from_template(template) | |
| chain = prompt | llm | |
| yield "β¨ Generating final paper...", "Generating" | |
| response = chain.invoke({ | |
| "context": context_text, | |
| "difficulty": difficulty, | |
| "num_questions": num_questions | |
| }) | |
| yield response, "Complete" | |
| except Exception as e: | |
| yield f"β System Error: {str(e)}", "Failed" | |
| # --- 3. Custom UI --- | |
| # Custom CSS for a professional look | |
| custom_css = """ | |
| .container { max-width: 1200px; margin: auto; padding-top: 20px; } | |
| .header-text { text-align: center; font-family: 'Helvetica', sans-serif; } | |
| .header-text h1 { color: #2D3748; font-size: 3em; margin-bottom: 0px; } | |
| .header-text h3 { color: #718096; font-weight: 300; } | |
| .submit-btn { background: linear-gradient(90deg, #4F46E5 0%, #7C3AED 100%) !important; color: white !important; border: none !important; } | |
| .status-bar { border: 1px solid #e2e8f0; background: #f7fafc; padding: 10px; border-radius: 8px; color: #4a5568; } | |
| """ | |
| theme = gr.themes.Soft( | |
| primary_hue="indigo", | |
| secondary_hue="blue", | |
| neutral_hue="slate", | |
| text_size="lg" | |
| ) | |
| with gr.Blocks(theme=theme, css=custom_css, title="AI Exam Gen") as demo: | |
| with gr.Column(elem_classes="container"): | |
| # Header | |
| gr.HTML(""" | |
| <div class="header-text"> | |
| <h1>π AI Question Paper Generator</h1> | |
| <h3>Upload study material, get a formatted exam in seconds.</h3> | |
| </div> | |
| """) | |
| with gr.Row(variant="panel", equal_height=True): | |
| # Left Column: Inputs | |
| with gr.Column(scale=1): | |
| gr.Markdown("### π οΈ Configuration") | |
| pdf_input = gr.File( | |
| label="Upload PDF (Study Notes/Book)", | |
| file_types=[".pdf"], | |
| file_count="single", | |
| height=100 | |
| ) | |
| with gr.Group(): | |
| difficulty = gr.Radio( | |
| ["Easy", "Medium", "Hard"], | |
| label="Difficulty Level", | |
| value="Medium", | |
| info="Adjusts complexity of questions." | |
| ) | |
| num_questions = gr.Slider( | |
| minimum=5, | |
| maximum=20, | |
| value=10, | |
| step=1, | |
| label="Number of MCQs", | |
| info="How many objective questions?" | |
| ) | |
| btn = gr.Button("β¨ Generate Question Paper", elem_classes="submit-btn", variant="primary") | |
| status = gr.Textbox(label="Status", placeholder="Ready", interactive=False, max_lines=1) | |
| # Right Column: Output | |
| with gr.Column(scale=2): | |
| gr.Markdown("### π Generated Exam") | |
| output = gr.Markdown( | |
| label="Exam Paper", | |
| value="_Your generated question paper will appear here..._", | |
| show_copy_button=True, | |
| line_breaks=True | |
| ) | |
| # Logic | |
| btn.click( | |
| fn=generate_question_paper, | |
| inputs=[pdf_input, difficulty, num_questions], | |
| outputs=[output, status] | |
| ) | |
| # Footer | |
| gr.Markdown( | |
| """ | |
| <div style="text-align: center; color: #a0aec0; margin-top: 40px;"> | |
| Powered by Llama 3 β’ LangChain β’ Hugging Face | |
| </div> | |
| """ | |
| ) | |
| if __name__ == "__main__": | |
| demo.launch() |