| import gradio as gr |
| from transformers import pipeline |
| import PyPDF2 |
| import re |
| import os |
| import io |
| import random |
| import time |
| from groq import Groq |
|
|
| |
| print("=" * 50) |
| print("STARTING APPLICATION - ENVIRONMENT CHECK") |
| print("=" * 50) |
|
|
| |
| print("\nAll Environment Variables:") |
| for key in os.environ: |
| if 'KEY' in key or 'TOKEN' in key or 'SECRET' in key: |
| value = os.environ[key] |
| masked = value[:4] + "..." + value[-4:] if len(value) > 8 else "SET" |
| print(f" {key}: {masked}") |
|
|
| |
| try: |
| from google import genai |
| from google.genai import types |
| print("\nβ
Imported google.genai successfully") |
| except ImportError as e: |
| print(f"\nβ Failed to import google.genai: {e}") |
| genai = None |
| types = None |
|
|
| |
| |
| hf_token = os.environ.get("HF_TOKEN") or os.getenv("HF_TOKEN") |
| gemini_key = os.environ.get("GEMINI_API_KEY") or os.getenv("GEMINI_API_KEY") |
| groq_key = os.environ.get("GROQ_API_KEY") or os.getenv("GROQ_API_KEY") |
|
|
| print(f"\nToken Check:") |
| print(f" HF_TOKEN: {'SET (' + hf_token[:4] + '...)' if hf_token else 'NOT SET'}") |
| print(f" GEMINI_API_KEY: {'SET (' + gemini_key[:4] + '...)' if gemini_key else 'NOT SET'}") |
| print(f" GROQ_API_KEY: {'SET (' + groq_key[:4] + '...)' if groq_key else 'NOT SET'}") |
|
|
| |
| gemini_client = None |
| if gemini_key and genai: |
| try: |
| print(f"\nAttempting Gemini setup...") |
| try: |
| gemini_client = genai.Client(api_key=gemini_key) |
| print(" β
Gemini client initialized with new SDK") |
| except Exception as e1: |
| print(f" β οΈ New SDK failed: {e1}") |
| try: |
| gemini_client = genai.client.Client(api_key=gemini_key) |
| print(" β
Gemini client initialized with alternate method") |
| except Exception as e2: |
| print(f" β οΈ Alternate failed: {e2}") |
| |
| try: |
| import google.generativeai as old_genai |
| old_genai.configure(api_key=gemini_key) |
| gemini_client = old_genai |
| print(" β
Using legacy google.generativeai") |
| except Exception as e3: |
| print(f" β Legacy also failed: {e3}") |
| except Exception as e: |
| print(f" β Gemini setup error: {e}") |
| else: |
| if not gemini_key: |
| print("\nβ οΈ No GEMINI_API_KEY found") |
| if not genai: |
| print("β οΈ Google GenAI SDK not imported") |
|
|
| |
| groq_client = None |
| if groq_key: |
| try: |
| print(f"\nAttempting Groq setup...") |
| groq_client = Groq(api_key=groq_key) |
| |
| test_response = groq_client.chat.completions.create( |
| messages=[{"role": "user", "content": "Hi"}], |
| model="llama-3.3-70b-versatile", |
| max_completion_tokens=5 |
| ) |
| print(" β
Groq client initialized and tested successfully") |
| except Exception as e: |
| print(f" β Groq setup error: {e}") |
| groq_client = None |
| else: |
| print("\nβ οΈ No GROQ_API_KEY found") |
|
|
| print("\n" + "=" * 50) |
| print("SETUP COMPLETE") |
| print("=" * 50) |
|
|
| |
| summarizer = None |
|
|
| def load_summarizer(): |
| global summarizer |
| if summarizer is None: |
| try: |
| print("Loading summarizer...") |
| summarizer = pipeline("summarization", model="sshleifer/distilbart-cnn-12-6", device=-1) |
| print("β
Summarizer ready") |
| except Exception as e: |
| print(f"β Summarizer failed: {e}") |
| return summarizer |
|
|
| |
| def check_status(): |
| statuses = [] |
| |
| |
| if gemini_client and gemini_key: |
| statuses.append("β
Gemini Ready") |
| elif gemini_key: |
| statuses.append("β οΈ Gemini key present but client failed") |
| else: |
| statuses.append("β Gemini: Add GEMINI_API_KEY") |
| |
| |
| if groq_client and groq_key: |
| statuses.append("β
Groq Ready") |
| elif groq_key: |
| statuses.append("β οΈ Groq key present but client failed") |
| else: |
| statuses.append("β Groq: Add GROQ_API_KEY") |
| |
| return " | ".join(statuses) |
|
|
| |
|
|
| def extract_text_from_pdf(pdf_file): |
| """Extract text from PDF - handle both file path and bytes""" |
| if pdf_file is None: |
| return None, "Please upload a PDF file." |
| |
| try: |
| if isinstance(pdf_file, str): |
| with open(pdf_file, 'rb') as f: |
| pdf_reader = PyPDF2.PdfReader(f) |
| text = "" |
| for page in pdf_reader.pages: |
| page_text = page.extract_text() |
| if page_text: |
| text += page_text + "\n" |
| else: |
| if hasattr(pdf_file, 'read'): |
| pdf_bytes = pdf_file.read() |
| if hasattr(pdf_file, 'seek'): |
| pdf_file.seek(0) |
| else: |
| pdf_bytes = pdf_file |
| |
| if isinstance(pdf_bytes, bytes): |
| pdf_stream = io.BytesIO(pdf_bytes) |
| else: |
| pdf_stream = io.BytesIO(pdf_bytes.encode() if isinstance(pdf_bytes, str) else pdf_bytes) |
| |
| pdf_reader = PyPDF2.PdfReader(pdf_stream) |
| text = "" |
| for page in pdf_reader.pages: |
| page_text = page.extract_text() |
| if page_text: |
| text += page_text + "\n" |
| |
| text = re.sub(r'\s+', ' ', text).strip() |
| |
| if len(text) < 50: |
| return None, "Could not extract text. PDF may be image-based or scanned." |
| |
| return text, None |
| |
| except Exception as e: |
| return None, f"Error reading PDF: {str(e)}" |
|
|
| def summarize_with_gemini(text, max_length, min_length): |
| """Try to use Gemini for summarization""" |
| if not gemini_client or not gemini_key: |
| return None |
| |
| try: |
| if hasattr(gemini_client, 'models'): |
| prompt = f"Summarize the following text in {min_length}-{max_length} words. Be concise and clear:\n\n{text[:15000]}" |
| |
| try: |
| response = gemini_client.models.generate_content( |
| model="gemini-2.5-flash", |
| contents=prompt |
| ) |
| return response.text |
| except: |
| try: |
| response = gemini_client.models.generate_content( |
| model="gemini-2.5-flash", |
| contents=prompt, |
| config=types.GenerateContentConfig( |
| max_output_tokens=500 |
| ) if types else None |
| ) |
| return response.text |
| except: |
| pass |
| |
| if hasattr(gemini_client, 'GenerativeModel'): |
| model = gemini_client.GenerativeModel('gemini-2.5-flash') |
| prompt = f"Summarize the following text in {min_length}-{max_length} words:\n\n{text[:15000]}" |
| response = model.generate_content(prompt) |
| return response.text |
| |
| except Exception as e: |
| print(f"Gemini summary error: {e}") |
| |
| return None |
|
|
| def summarize_pdf(pdf_file, max_length, min_length): |
| text, error = extract_text_from_pdf(pdf_file) |
| if error: |
| return error |
| |
| gemini_result = summarize_with_gemini(text, max_length, min_length) |
| if gemini_result: |
| return gemini_result |
| |
| summ = load_summarizer() |
| if summ: |
| try: |
| result = summ(text[:3500], max_length=max_length, min_length=min_length, do_sample=False) |
| return result[0]['summary_text'] |
| except Exception as e: |
| return f"Summarization error: {str(e)}" |
| |
| return "Error: No summarization available" |
|
|
| def generate_essay_with_gemini(prompt, essay_type, word_count, tone): |
| """Generate essay using Gemini""" |
| if not gemini_client or not gemini_key: |
| return None |
| |
| try: |
| full_prompt = f"""You are an expert academic writer. Write a {essay_type} essay in {tone} tone. |
| Target length: approximately {word_count} words. |
| Topic: {prompt} |
| Requirements: |
| - Engaging introduction with clear thesis statement |
| - Well-structured body paragraphs with supporting arguments and evidence |
| - Strong conclusion that summarizes main points |
| - Use academic vocabulary and formal writing style |
| Write the essay now:""" |
| |
| if hasattr(gemini_client, 'models'): |
| try: |
| response = gemini_client.models.generate_content( |
| model="gemini-2.5-flash", |
| contents=full_prompt |
| ) |
| essay = response.text.strip() |
| except: |
| if hasattr(gemini_client, 'GenerativeModel'): |
| model = gemini_client.GenerativeModel('gemini-2.5-flash') |
| response = model.generate_content(full_prompt) |
| essay = response.text.strip() |
| else: |
| return None |
| else: |
| model = gemini_client.GenerativeModel('gemini-2.5-flash') |
| response = model.generate_content(full_prompt) |
| essay = response.text.strip() |
| |
| essay = re.sub(r'^#+\s*', '', essay) |
| word_count_actual = len(essay.split()) |
| |
| return f"""# {essay_type} Essay: {prompt[:50]}{'...' if len(prompt) > 50 else ''} |
| {essay} |
| --- |
| *~{word_count_actual} words | {tone} | Gemini*""" |
| |
| except Exception as e: |
| print(f"Essay generation error: {e}") |
| return None |
|
|
| def generate_essay(prompt, essay_type, word_count, tone): |
| if not prompt or len(prompt.strip()) < 10: |
| return "Please provide a detailed prompt (at least 10 characters)." |
| |
| if gemini_client and gemini_key: |
| gemini_result = generate_essay_with_gemini(prompt, essay_type, word_count, tone) |
| if gemini_result: |
| return gemini_result |
| |
| return f"""β AI generation not available. |
| Template Essay: {prompt} |
| Introduction: |
| {prompt} is an important topic that requires careful consideration. This essay explores its key aspects. |
| Body: |
| The significance of {prompt} cannot be overstated. Various perspectives exist on this matter, with experts debating the best approaches. Research continues to reveal new insights. |
| Conclusion: |
| In conclusion, {prompt} represents a complex issue that demands attention. |
| --- |
| *Template fallback - Gemini SDK issue* |
| Check: 1) GEMINI_API_KEY is set 2) google-genai package is installed""" |
|
|
| def summarize_text(text, max_length, min_length): |
| if len(text.strip()) < 100: |
| return "Please provide at least 100 characters to summarize." |
| |
| gemini_result = summarize_with_gemini(text, max_length, min_length) |
| if gemini_result: |
| return gemini_result |
| |
| summ = load_summarizer() |
| if summ: |
| try: |
| result = summ(text[:3500], max_length=max_length, min_length=min_length, do_sample=False) |
| return result[0]['summary_text'] |
| except Exception as e: |
| return f"Error: {str(e)}" |
| |
| return "Error: No summarization available" |
|
|
| |
|
|
| def extract_sentences(text): |
| sentences = re.split(r'[.!?]', text) |
| return [s.strip() for s in sentences if len(s.split()) > 6] |
|
|
| def create_quiz(text, num_questions): |
| sentences = extract_sentences(text) |
|
|
| if len(sentences) < num_questions: |
| num_questions = len(sentences) |
|
|
| selected = random.sample(sentences, num_questions) |
|
|
| quiz_data = [] |
|
|
| for sentence in selected: |
| words = sentence.split() |
| keyword = random.choice(words[2:-2]) |
|
|
| question = sentence.replace(keyword, "_____") |
|
|
| all_words = list(set(text.split())) |
| wrong = random.sample(all_words, 3) |
|
|
| options = wrong + [keyword] |
| random.shuffle(options) |
|
|
| quiz_data.append({ |
| "question": question, |
| "options": options, |
| "answer": keyword |
| }) |
|
|
| return quiz_data |
|
|
| def start_quiz(text, num_questions, timer_minutes): |
| if not text.strip(): |
| return "β οΈ Please enter study material.", None, None, None, None, None, None, "" |
|
|
| quiz = create_quiz(text, num_questions) |
|
|
| end_time = time.time() + (timer_minutes * 60) |
|
|
| return show_question(quiz, 0, 0, end_time) |
|
|
| def show_question(quiz, index, score, end_time): |
| if time.time() > end_time: |
| return finish_quiz(score, len(quiz)) |
|
|
| if index >= len(quiz): |
| return finish_quiz(score, len(quiz)) |
|
|
| q = quiz[index] |
|
|
| remaining = int(end_time - time.time()) |
|
|
| timer_display = f"β³ Time Left: {remaining} seconds" |
|
|
| return ( |
| f"### Question {index+1}:\n\n{q['question']}", |
| gr.update(choices=q["options"], value=None), |
| f"Score: {score}", |
| quiz, |
| index, |
| score, |
| end_time, |
| timer_display |
| ) |
|
|
| def submit_answer(selected, quiz, index, score, end_time): |
| if selected == quiz[index]["answer"]: |
| score += 1 |
|
|
| index += 1 |
| return show_question(quiz, index, score, end_time) |
|
|
| def finish_quiz(score, total): |
| return ( |
| f"## β
Quiz Finished!\n\nFinal Score: **{score}/{total}**", |
| gr.update(choices=[]), |
| "", |
| None, |
| None, |
| None, |
| None, |
| "" |
| ) |
|
|
| |
|
|
| def translate_to_urdu(text): |
| """Translate English text to Urdu using Groq's Llama 3.3 70B""" |
| if not text or not text.strip(): |
| return "Please enter some text to translate." |
| |
| if not groq_client: |
| return "β Groq API not configured. Please add GROQ_API_KEY." |
| |
| try: |
| chat_completion = groq_client.chat.completions.create( |
| messages=[ |
| { |
| "role": "system", |
| "content": "You are a professional English to Urdu translator. Translate the given text accurately to Urdu (Ψ§Ψ±Ψ―Ω) using natural, conversational language. Respond ONLY with the translation, no explanations." |
| }, |
| { |
| "role": "user", |
| "content": f"Translate to Urdu:\n\n{text}" |
| } |
| ], |
| model="llama-3.3-70b-versatile", |
| temperature=0.3, |
| max_completion_tokens=2048, |
| ) |
| |
| return chat_completion.choices[0].message.content |
| |
| except Exception as e: |
| return f"Error: {str(e)}" |
|
|
| |
|
|
| custom_css = """ |
| .header { |
| text-align: center; |
| margin-bottom: 2rem; |
| padding: 2rem; |
| background: linear-gradient(135deg, #059669, #6b7280); |
| border-radius: 12px; |
| color: white; |
| } |
| .header h1 { |
| font-size: 2.5rem; |
| margin-bottom: 0.5rem; |
| } |
| .status-ok { |
| background: #d1fae5; |
| border: 2px solid #059669; |
| padding: 1rem; |
| border-radius: 8px; |
| text-align: center; |
| color: #065f46; |
| font-weight: bold; |
| margin-bottom: 1rem; |
| } |
| .status-warn { |
| background: #fef3c7; |
| border: 2px solid #f59e0b; |
| padding: 1rem; |
| border-radius: 8px; |
| text-align: center; |
| color: #92400e; |
| margin-bottom: 1rem; |
| } |
| .status-error { |
| background: #fee2e2; |
| border: 2px solid #ef4444; |
| padding: 1rem; |
| border-radius: 8px; |
| text-align: center; |
| color: #991b1b; |
| margin-bottom: 1rem; |
| } |
| @import url('https://fonts.googleapis.com/css2?family=Noto+Nastaliq+Urdu&display=swap'); |
| .urdu-text { |
| font-family: 'Noto Nastaliq Urdu', serif; |
| font-size: 1.5em; |
| line-height: 2; |
| direction: rtl; |
| text-align: right; |
| } |
| """ |
|
|
| |
|
|
| with gr.Blocks(title="Student AI Suite") as demo: |
| |
| |
| status = check_status() |
| if "β
" in status and "β" not in status: |
| gr.HTML(f'<div class="status-ok">{status}</div>') |
| elif "β" in status: |
| gr.HTML(f'<div class="status-error">{status}</div>') |
| else: |
| gr.HTML(f'<div class="status-warn">{status}</div>') |
| |
| gr.HTML('<div class="header"><h1>π Student AI Suite</h1><p>Essay Generator β’ PDF Summarizer β’ Quiz Generator β’ Urdu Translator</p></div>') |
| |
| with gr.Tabs(): |
| |
| |
| with gr.TabItem("π Essay & PDF Tools"): |
| with gr.Tabs(): |
| with gr.TabItem("π PDF Summarizer"): |
| pdf_input = gr.File(label="Upload PDF", file_types=[".pdf"], type="binary") |
| with gr.Row(): |
| max_len = gr.Slider(50, 500, 200, step=10, label="Max Length") |
| min_len = gr.Slider(20, 200, 50, step=10, label="Min Length") |
| summarize_btn = gr.Button("π Summarize PDF", variant="primary") |
| pdf_output = gr.Textbox(label="Summary", lines=12) |
| |
| gr.Markdown("---") |
| text_input = gr.Textbox(label="Or paste text", lines=5) |
| text_btn = gr.Button("Summarize Text") |
| text_output = gr.Textbox(label="Summary", lines=8) |
| |
| with gr.TabItem("βοΈ Essay Generator"): |
| prompt_input = gr.Textbox(label="Essay Topic", placeholder="Example: 'The impact of AI on education'", lines=3) |
| essay_type = gr.Dropdown(["Argumentative", "Expository", "Descriptive", "Persuasive"], value="Argumentative") |
| tone = gr.Dropdown(["Academic", "Formal", "Neutral"], value="Academic") |
| words = gr.Slider(200, 1000, 500, step=50, label="Word Count") |
| gen_btn = gr.Button("β¨ Generate Essay", variant="primary") |
| essay_output = gr.Textbox(label="Generated Essay", lines=25) |
| |
| |
| with gr.TabItem("π― Smart Quiz Generator"): |
| gr.Markdown("### Paste your study text β generate quiz β test yourself!") |
| |
| quiz_text_input = gr.Textbox(lines=6, label="Study Material") |
| |
| with gr.Row(): |
| num_questions = gr.Slider(1, 10, value=5, step=1, label="Number of Questions") |
| timer_minutes = gr.Slider(1, 10, value=2, step=1, label="Timer (Minutes)") |
| |
| start_btn = gr.Button("Start Quiz", variant="primary") |
| |
| question_box = gr.Markdown() |
| options_radio = gr.Radio(label="Select Answer") |
| submit_btn = gr.Button("Submit Answer", variant="secondary") |
| score_display = gr.Markdown() |
| timer_display = gr.Markdown() |
| |
| |
| quiz_state = gr.State() |
| index_state = gr.State() |
| score_state = gr.State() |
| endtime_state = gr.State() |
| |
| |
| with gr.TabItem("π English to Urdu"): |
| gr.Markdown("### Powered by Groq AI") |
| |
| with gr.Row(): |
| with gr.Column(): |
| input_text = gr.Textbox( |
| label="English Text", |
| placeholder="Enter text to translate...", |
| lines=4 |
| ) |
| translate_btn = gr.Button("Translate", variant="primary") |
| |
| with gr.Column(): |
| output_text = gr.Textbox( |
| label="Urdu Translation (Ψ§Ψ±Ψ―Ω)", |
| lines=4, |
| elem_classes=["urdu-text"], |
| interactive=False |
| ) |
| |
| examples = [ |
| "Hello, how are you?", |
| "I love Pakistan.", |
| "The weather is beautiful today." |
| ] |
| gr.Examples(examples=examples, inputs=input_text) |
| |
| |
| |
| |
| summarize_btn.click(summarize_pdf, [pdf_input, max_len, min_len], pdf_output) |
| text_btn.click(summarize_text, [text_input, max_len, min_len], text_output) |
| gen_btn.click(generate_essay, [prompt_input, essay_type, words, tone], essay_output) |
| |
| |
| start_btn.click( |
| start_quiz, |
| inputs=[quiz_text_input, num_questions, timer_minutes], |
| outputs=[ |
| question_box, |
| options_radio, |
| score_display, |
| quiz_state, |
| index_state, |
| score_state, |
| endtime_state, |
| timer_display |
| ] |
| ) |
| |
| submit_btn.click( |
| submit_answer, |
| inputs=[options_radio, quiz_state, index_state, score_state, endtime_state], |
| outputs=[ |
| question_box, |
| options_radio, |
| score_display, |
| quiz_state, |
| index_state, |
| score_state, |
| endtime_state, |
| timer_display |
| ] |
| ) |
| |
| |
| translate_btn.click(fn=translate_to_urdu, inputs=input_text, outputs=output_text) |
| input_text.submit(fn=translate_to_urdu, inputs=input_text, outputs=output_text) |
|
|
| if __name__ == "__main__": |
| demo.launch(server_name="0.0.0.0", server_port=7860, css=custom_css) |