Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,15 +1,20 @@
|
|
| 1 |
import gradio as gr
|
| 2 |
import cohere
|
| 3 |
from dotenv import load_dotenv
|
|
|
|
| 4 |
|
| 5 |
# Load environment variables
|
| 6 |
load_dotenv()
|
| 7 |
|
| 8 |
-
# Initialize Cohere API client
|
| 9 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 10 |
MODEL_NAME = 'command-r-plus' # Define model name as a constant
|
| 11 |
|
| 12 |
-
# Adaptive learning functions
|
| 13 |
def assess_knowledge(name, experience, goals):
|
| 14 |
try:
|
| 15 |
level_prompt = f"User Name: {name}, Experience: {experience}, Goals: {goals}. Classify as beginner, intermediate, or advanced."
|
|
@@ -17,7 +22,7 @@ def assess_knowledge(name, experience, goals):
|
|
| 17 |
level = response.generations[0].text.strip()
|
| 18 |
return level
|
| 19 |
except Exception as e:
|
| 20 |
-
return "Error in knowledge assessment:
|
| 21 |
|
| 22 |
def generate_explanation(topic, level):
|
| 23 |
try:
|
|
@@ -26,7 +31,7 @@ def generate_explanation(topic, level):
|
|
| 26 |
explanation = response.generations[0].text.strip()
|
| 27 |
return explanation
|
| 28 |
except Exception as e:
|
| 29 |
-
return "Error in generating explanation:
|
| 30 |
|
| 31 |
def generate_challenge(topic, level):
|
| 32 |
try:
|
|
@@ -35,15 +40,14 @@ def generate_challenge(topic, level):
|
|
| 35 |
challenge = response.generations[0].text.strip()
|
| 36 |
return challenge
|
| 37 |
except Exception as e:
|
| 38 |
-
return "Error in generating challenge:
|
| 39 |
|
| 40 |
-
#
|
| 41 |
def tutor_interface(name, experience, goals, topic, request_challenge=False):
|
| 42 |
-
# Validate inputs
|
| 43 |
if not all([name, experience, goals, topic]):
|
| 44 |
return "", "Please fill in all required fields."
|
| 45 |
|
| 46 |
-
# Generate adaptive content
|
| 47 |
level = assess_knowledge(name, experience, goals)
|
| 48 |
explanation = generate_explanation(topic, level)
|
| 49 |
|
|
@@ -52,10 +56,10 @@ def tutor_interface(name, experience, goals, topic, request_challenge=False):
|
|
| 52 |
response = f"**Level:** {level}\n\n**Explanation:**\n{explanation}\n\n**Challenge:**\n{challenge}"
|
| 53 |
else:
|
| 54 |
response = f"**Level:** {level}\n\n**Explanation:**\n{explanation}"
|
| 55 |
-
|
| 56 |
return "Generation complete!", response
|
| 57 |
|
| 58 |
-
# Gradio UI setup with
|
| 59 |
with gr.Blocks(theme=gr.themes.Soft()) as demo:
|
| 60 |
gr.Markdown("""
|
| 61 |
# Adaptive Computer Science Tutor
|
|
@@ -91,7 +95,7 @@ with gr.Blocks(theme=gr.themes.Soft()) as demo:
|
|
| 91 |
|
| 92 |
# Handle submission with loading state
|
| 93 |
submit_button.click(
|
| 94 |
-
fn=lambda: ("Generating response...", ""), #
|
| 95 |
outputs=[status, output],
|
| 96 |
).then(
|
| 97 |
fn=tutor_interface,
|
|
|
|
| 1 |
import gradio as gr
|
| 2 |
import cohere
|
| 3 |
from dotenv import load_dotenv
|
| 4 |
+
import os
|
| 5 |
|
| 6 |
# Load environment variables
|
| 7 |
load_dotenv()
|
| 8 |
|
| 9 |
+
# Initialize Cohere API client with error handling for missing API key
|
| 10 |
+
COHERE_API_KEY = os.getenv("COHERE_API_KEY")
|
| 11 |
+
if COHERE_API_KEY is None:
|
| 12 |
+
raise ValueError("Cohere API key not found. Please set COHERE_API_KEY in environment variables.")
|
| 13 |
+
co = cohere.Client(COHERE_API_KEY)
|
| 14 |
+
|
| 15 |
MODEL_NAME = 'command-r-plus' # Define model name as a constant
|
| 16 |
|
| 17 |
+
# Adaptive learning functions
|
| 18 |
def assess_knowledge(name, experience, goals):
|
| 19 |
try:
|
| 20 |
level_prompt = f"User Name: {name}, Experience: {experience}, Goals: {goals}. Classify as beginner, intermediate, or advanced."
|
|
|
|
| 22 |
level = response.generations[0].text.strip()
|
| 23 |
return level
|
| 24 |
except Exception as e:
|
| 25 |
+
return f"Error in knowledge assessment: {str(e)}"
|
| 26 |
|
| 27 |
def generate_explanation(topic, level):
|
| 28 |
try:
|
|
|
|
| 31 |
explanation = response.generations[0].text.strip()
|
| 32 |
return explanation
|
| 33 |
except Exception as e:
|
| 34 |
+
return f"Error in generating explanation: {str(e)}"
|
| 35 |
|
| 36 |
def generate_challenge(topic, level):
|
| 37 |
try:
|
|
|
|
| 40 |
challenge = response.generations[0].text.strip()
|
| 41 |
return challenge
|
| 42 |
except Exception as e:
|
| 43 |
+
return f"Error in generating challenge: {str(e)}"
|
| 44 |
|
| 45 |
+
# Tutor interface function with error handling and formatted output
|
| 46 |
def tutor_interface(name, experience, goals, topic, request_challenge=False):
|
|
|
|
| 47 |
if not all([name, experience, goals, topic]):
|
| 48 |
return "", "Please fill in all required fields."
|
| 49 |
|
| 50 |
+
# Generate adaptive content with structured response formatting
|
| 51 |
level = assess_knowledge(name, experience, goals)
|
| 52 |
explanation = generate_explanation(topic, level)
|
| 53 |
|
|
|
|
| 56 |
response = f"**Level:** {level}\n\n**Explanation:**\n{explanation}\n\n**Challenge:**\n{challenge}"
|
| 57 |
else:
|
| 58 |
response = f"**Level:** {level}\n\n**Explanation:**\n{explanation}"
|
| 59 |
+
|
| 60 |
return "Generation complete!", response
|
| 61 |
|
| 62 |
+
# Gradio UI setup with a loading status and structured layout
|
| 63 |
with gr.Blocks(theme=gr.themes.Soft()) as demo:
|
| 64 |
gr.Markdown("""
|
| 65 |
# Adaptive Computer Science Tutor
|
|
|
|
| 95 |
|
| 96 |
# Handle submission with loading state
|
| 97 |
submit_button.click(
|
| 98 |
+
fn=lambda: ("Generating response...", ""), # Show loading message
|
| 99 |
outputs=[status, output],
|
| 100 |
).then(
|
| 101 |
fn=tutor_interface,
|