File size: 1,593 Bytes
4f346e3
 
 
 
 
 
 
 
 
 
 
7c60401
4f346e3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
import gradio as gr
import os
from langchain_google_genai import ChatGoogleGenerativeAI

llm = None  # Global Gemini model

# Set up Gemini model
def setup_gemini(api_key):
    try:
        os.environ["GOOGLE_API_KEY"] = api_key
        global llm
        llm = ChatGoogleGenerativeAI(model="gemini-1.5-flash-latest", temperature=0.2)
        return "βœ… Gemini model is ready!"
    except Exception as e:
        return f"❌ Error: {str(e)}"

# Ask question using Chain of Thought prompting
def solve_step_by_step(question):
    if not llm:
        return "⚠️ Please initialize Gemini first."
    
    prompt = question.strip()
    if not prompt.endswith("Let's think step by step."):
        prompt += " Let's think step by step."
    
    try:
        return llm.invoke(prompt)
    except Exception as e:
        return f"❌ Error: {str(e)}"

# Gradio UI
with gr.Blocks(title="Chain of Thought with Gemini") as demo:
    gr.Markdown("## πŸ€– Chain of Thought Prompting (Gemini)")
    
    api_key = gr.Textbox(label="πŸ”‘ Gemini API Key", type="password", placeholder="Paste your Gemini key")
    status = gr.Textbox(label="Status", interactive=False)

    setup_btn = gr.Button("βš™οΈ Initialize Gemini")

    question = gr.Textbox(label="❓ Your Question", placeholder="e.g., What is 15 * 4?")
    answer = gr.Textbox(label="🧠 Gemini's Reasoning", lines=10)

    ask_btn = gr.Button("πŸ” Think Step by Step")

    setup_btn.click(fn=setup_gemini, inputs=[api_key], outputs=[status])
    ask_btn.click(fn=solve_step_by_step, inputs=[question], outputs=[answer])

demo.launch()