Spaces:
Running
Running
Upload 4 files
Browse files- README.md +39 -10
- app.py +79 -21
- config.json +14 -12
- requirements.txt +1 -1
README.md
CHANGED
|
@@ -1,22 +1,51 @@
|
|
| 1 |
---
|
| 2 |
-
title:
|
| 3 |
-
emoji:
|
| 4 |
colorFrom: blue
|
| 5 |
-
colorTo:
|
| 6 |
sdk: gradio
|
| 7 |
-
sdk_version: 5.
|
| 8 |
app_file: app.py
|
| 9 |
pinned: false
|
| 10 |
-
license:
|
| 11 |
-
short_description:
|
| 12 |
---
|
| 13 |
|
| 14 |
-
#
|
| 15 |
|
| 16 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 17 |
|
| 18 |
## Configuration
|
| 19 |
-
- **Model**:
|
| 20 |
- **API Key Variable**: API_KEY
|
| 21 |
- **HF Token Variable**: HF_TOKEN (for auto-updates)
|
| 22 |
-
- **Access Control**: Enabled (ACCESS_CODE)
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
---
|
| 2 |
+
title: Course Assistant Example
|
| 3 |
+
emoji: 💬
|
| 4 |
colorFrom: blue
|
| 5 |
+
colorTo: green
|
| 6 |
sdk: gradio
|
| 7 |
+
sdk_version: 5.42.0
|
| 8 |
app_file: app.py
|
| 9 |
pinned: false
|
| 10 |
+
license: mit
|
| 11 |
+
short_description: Python support for cultural analytics students
|
| 12 |
---
|
| 13 |
|
| 14 |
+
# Course Assistant Example
|
| 15 |
|
| 16 |
+
Python support for cultural analytics students
|
| 17 |
+
|
| 18 |
+
## Quick Setup
|
| 19 |
+
|
| 20 |
+
### Step 1: Configure API Key (Required)
|
| 21 |
+
1. Get your API key from https://openrouter.ai/keys
|
| 22 |
+
2. In Settings → Variables and secrets
|
| 23 |
+
3. Add secret: `API_KEY`
|
| 24 |
+
4. Paste your OpenRouter API key
|
| 25 |
+
|
| 26 |
+
### Step 2: Configure HuggingFace Token (Optional)
|
| 27 |
+
1. Get your token from https://huggingface.co/settings/tokens
|
| 28 |
+
2. In Settings → Variables and secrets
|
| 29 |
+
3. Add secret: `HF_TOKEN`
|
| 30 |
+
4. Paste your HuggingFace token (needs write permissions)
|
| 31 |
+
5. This enables automatic configuration updates
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
### Step 3: Set Access Code
|
| 35 |
+
1. In Settings → Variables and secrets
|
| 36 |
+
2. Add secret: `ACCESS_CODE`
|
| 37 |
+
3. Set your chosen password
|
| 38 |
+
4. Share with authorized users
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
### Step 3: Test Your Space
|
| 42 |
+
Your Space should now be running! Try the example prompts or ask your own questions.
|
| 43 |
|
| 44 |
## Configuration
|
| 45 |
+
- **Model**: openai/gpt-oss-120b
|
| 46 |
- **API Key Variable**: API_KEY
|
| 47 |
- **HF Token Variable**: HF_TOKEN (for auto-updates)
|
| 48 |
+
- **Access Control**: Enabled (ACCESS_CODE)
|
| 49 |
+
|
| 50 |
+
## Support
|
| 51 |
+
For help, visit the HuggingFace documentation or community forums.
|
app.py
CHANGED
|
@@ -12,23 +12,23 @@ from typing import List, Dict, Optional, Any, Tuple
|
|
| 12 |
|
| 13 |
|
| 14 |
# Configuration
|
| 15 |
-
SPACE_NAME = '
|
| 16 |
-
SPACE_DESCRIPTION = '
|
| 17 |
|
| 18 |
# Default configuration values
|
| 19 |
DEFAULT_CONFIG = {
|
| 20 |
'name': SPACE_NAME,
|
| 21 |
'description': SPACE_DESCRIPTION,
|
| 22 |
-
'system_prompt': "
|
| 23 |
-
'temperature': 0.
|
| 24 |
-
'max_tokens':
|
| 25 |
-
'model': '
|
| 26 |
'api_key_var': 'API_KEY',
|
| 27 |
'theme': 'Default',
|
| 28 |
-
'grounding_urls': ["https://
|
| 29 |
'enable_dynamic_urls': True,
|
| 30 |
'enable_file_upload': True,
|
| 31 |
-
'examples': ['
|
| 32 |
'language': 'English',
|
| 33 |
'locked': False
|
| 34 |
}
|
|
@@ -329,17 +329,18 @@ def get_grounding_context() -> str:
|
|
| 329 |
return ""
|
| 330 |
|
| 331 |
|
| 332 |
-
def
|
| 333 |
-
"""Export conversation history to
|
| 334 |
if not history:
|
| 335 |
return "No conversation to export."
|
| 336 |
|
| 337 |
-
|
|
|
|
| 338 |
Generated on: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}
|
| 339 |
Space: {SPACE_NAME}
|
| 340 |
Model: {MODEL}
|
| 341 |
|
| 342 |
-
|
| 343 |
|
| 344 |
"""
|
| 345 |
|
|
@@ -349,13 +350,28 @@ Model: {MODEL}
|
|
| 349 |
role = message.get('role', 'unknown')
|
| 350 |
content = message.get('content', '')
|
| 351 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 352 |
if role == 'user':
|
| 353 |
message_count += 1
|
| 354 |
-
|
| 355 |
elif role == 'assistant':
|
| 356 |
-
|
| 357 |
|
| 358 |
-
return
|
| 359 |
|
| 360 |
|
| 361 |
def generate_response(message: str, history: List[Dict[str, str]], files: Optional[List] = None) -> str:
|
|
@@ -576,7 +592,11 @@ def create_interface():
|
|
| 576 |
|
| 577 |
# Create chat interface
|
| 578 |
chatbot = gr.Chatbot(type="messages", height=400)
|
| 579 |
-
msg = gr.Textbox(
|
|
|
|
|
|
|
|
|
|
|
|
|
| 580 |
|
| 581 |
with gr.Row():
|
| 582 |
submit_btn = gr.Button("Send", variant="primary")
|
|
@@ -603,12 +623,12 @@ def create_interface():
|
|
| 603 |
return None
|
| 604 |
|
| 605 |
try:
|
| 606 |
-
content =
|
| 607 |
|
| 608 |
# Create filename
|
| 609 |
space_name_safe = re.sub(r'[^a-zA-Z0-9]+', '_', SPACE_NAME).lower()
|
| 610 |
timestamp = datetime.now().strftime('%Y%m%d_%H%M%S')
|
| 611 |
-
filename = f"{space_name_safe}_conversation_{timestamp}.
|
| 612 |
|
| 613 |
# Save to temp file
|
| 614 |
temp_path = Path(tempfile.gettempdir()) / filename
|
|
@@ -647,10 +667,13 @@ def create_interface():
|
|
| 647 |
# Get response
|
| 648 |
response = generate_response(message, formatted_history, files_state)
|
| 649 |
|
| 650 |
-
#
|
|
|
|
|
|
|
|
|
|
| 651 |
chat_history = chat_history + [
|
| 652 |
-
{"role": "user", "content": message},
|
| 653 |
-
{"role": "assistant", "content": response}
|
| 654 |
]
|
| 655 |
|
| 656 |
# Update stored history for export
|
|
@@ -1038,6 +1061,41 @@ def create_interface():
|
|
| 1038 |
inputs=[access_input, access_granted],
|
| 1039 |
outputs=[access_panel, main_panel, access_status, access_granted]
|
| 1040 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1041 |
|
| 1042 |
return demo
|
| 1043 |
|
|
|
|
| 12 |
|
| 13 |
|
| 14 |
# Configuration
|
| 15 |
+
SPACE_NAME = 'Course Assistant Example'
|
| 16 |
+
SPACE_DESCRIPTION = 'Python support for cultural analytics students'
|
| 17 |
|
| 18 |
# Default configuration values
|
| 19 |
DEFAULT_CONFIG = {
|
| 20 |
'name': SPACE_NAME,
|
| 21 |
'description': SPACE_DESCRIPTION,
|
| 22 |
+
'system_prompt': "You're a Python guide for CCNY's CSC 10800 where September covers foundations (command line, Jupyter, script anatomy), October builds programming basics (data types through functions) with Activities 1-2, and November-December advances to pandas, network analysis, and data collection with Activities 3-5, culminating in a Social Coding Portfolio. Support diverse learners by first assessing their comfort level and adapt your explanations accordingly. Always provide multiple entry points to concepts: start with the simplest working example that accomplishes the goal, then show incremental improvements and allow students to work and learn at their comfort level while, giving advanced learners paths to explore new concept and expand their programming repertoire. Expect to complete all responses in under 1000 tokens.",
|
| 23 |
+
'temperature': 0.5,
|
| 24 |
+
'max_tokens': 1000,
|
| 25 |
+
'model': 'openai/gpt-oss-120b',
|
| 26 |
'api_key_var': 'API_KEY',
|
| 27 |
'theme': 'Default',
|
| 28 |
+
'grounding_urls': ["https://zmuhls.github.io/ccny-data-science/syllabus/", "https://zmuhls.github.io/ccny-data-science/schedule/"],
|
| 29 |
'enable_dynamic_urls': True,
|
| 30 |
'enable_file_upload': True,
|
| 31 |
+
'examples': ['How do I set up a interactive development environment?', 'Where can I find the course schedule?', 'When is the social coding portfolio due?', 'How do I push a commit to GitHub?', "I'm confused on how to use Jupyter notebooks"],
|
| 32 |
'language': 'English',
|
| 33 |
'locked': False
|
| 34 |
}
|
|
|
|
| 329 |
return ""
|
| 330 |
|
| 331 |
|
| 332 |
+
def export_conversation_to_text(history: List[Dict[str, str]]) -> str:
|
| 333 |
+
"""Export conversation history to text with timestamps"""
|
| 334 |
if not history:
|
| 335 |
return "No conversation to export."
|
| 336 |
|
| 337 |
+
text_content = f"""Conversation Export
|
| 338 |
+
==================
|
| 339 |
Generated on: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}
|
| 340 |
Space: {SPACE_NAME}
|
| 341 |
Model: {MODEL}
|
| 342 |
|
| 343 |
+
==================
|
| 344 |
|
| 345 |
"""
|
| 346 |
|
|
|
|
| 350 |
role = message.get('role', 'unknown')
|
| 351 |
content = message.get('content', '')
|
| 352 |
|
| 353 |
+
# Get timestamp from message or use current time as fallback
|
| 354 |
+
timestamp_str = message.get('timestamp', '')
|
| 355 |
+
if timestamp_str:
|
| 356 |
+
try:
|
| 357 |
+
# Parse ISO format timestamp and format it nicely
|
| 358 |
+
timestamp = datetime.fromisoformat(timestamp_str)
|
| 359 |
+
formatted_timestamp = timestamp.strftime('%Y-%m-%d %H:%M:%S')
|
| 360 |
+
except:
|
| 361 |
+
formatted_timestamp = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
|
| 362 |
+
else:
|
| 363 |
+
formatted_timestamp = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
|
| 364 |
+
|
| 365 |
+
# Get message length
|
| 366 |
+
msg_length = message.get('length', len(content))
|
| 367 |
+
|
| 368 |
if role == 'user':
|
| 369 |
message_count += 1
|
| 370 |
+
text_content += f"[{formatted_timestamp}] User Message {message_count} ({msg_length} chars):\n{content}\n\n"
|
| 371 |
elif role == 'assistant':
|
| 372 |
+
text_content += f"[{formatted_timestamp}] Assistant Response {message_count} ({msg_length} chars):\n{content}\n\n------------------\n\n"
|
| 373 |
|
| 374 |
+
return text_content
|
| 375 |
|
| 376 |
|
| 377 |
def generate_response(message: str, history: List[Dict[str, str]], files: Optional[List] = None) -> str:
|
|
|
|
| 592 |
|
| 593 |
# Create chat interface
|
| 594 |
chatbot = gr.Chatbot(type="messages", height=400)
|
| 595 |
+
msg = gr.Textbox(
|
| 596 |
+
label="Message (Shift+Enter to send)",
|
| 597 |
+
placeholder="Type your message here...",
|
| 598 |
+
lines=2
|
| 599 |
+
)
|
| 600 |
|
| 601 |
with gr.Row():
|
| 602 |
submit_btn = gr.Button("Send", variant="primary")
|
|
|
|
| 623 |
return None
|
| 624 |
|
| 625 |
try:
|
| 626 |
+
content = export_conversation_to_text(chat_history)
|
| 627 |
|
| 628 |
# Create filename
|
| 629 |
space_name_safe = re.sub(r'[^a-zA-Z0-9]+', '_', SPACE_NAME).lower()
|
| 630 |
timestamp = datetime.now().strftime('%Y%m%d_%H%M%S')
|
| 631 |
+
filename = f"{space_name_safe}_conversation_{timestamp}.txt"
|
| 632 |
|
| 633 |
# Save to temp file
|
| 634 |
temp_path = Path(tempfile.gettempdir()) / filename
|
|
|
|
| 667 |
# Get response
|
| 668 |
response = generate_response(message, formatted_history, files_state)
|
| 669 |
|
| 670 |
+
# Get current timestamp
|
| 671 |
+
current_time = datetime.now()
|
| 672 |
+
|
| 673 |
+
# Update chat history with timestamps and message lengths
|
| 674 |
chat_history = chat_history + [
|
| 675 |
+
{"role": "user", "content": message, "timestamp": current_time.isoformat(), "length": len(message)},
|
| 676 |
+
{"role": "assistant", "content": response, "timestamp": current_time.isoformat(), "length": len(response)}
|
| 677 |
]
|
| 678 |
|
| 679 |
# Update stored history for export
|
|
|
|
| 1061 |
inputs=[access_input, access_granted],
|
| 1062 |
outputs=[access_panel, main_panel, access_status, access_granted]
|
| 1063 |
)
|
| 1064 |
+
|
| 1065 |
+
|
| 1066 |
+
# Add keyboard shortcuts
|
| 1067 |
+
demo.load(
|
| 1068 |
+
None,
|
| 1069 |
+
None,
|
| 1070 |
+
None,
|
| 1071 |
+
js="""
|
| 1072 |
+
() => {
|
| 1073 |
+
// Focus on message input when page loads
|
| 1074 |
+
setTimeout(() => {
|
| 1075 |
+
const msgInput = document.querySelector('textarea');
|
| 1076 |
+
if (msgInput) msgInput.focus();
|
| 1077 |
+
}, 100);
|
| 1078 |
+
|
| 1079 |
+
// Keyboard shortcuts
|
| 1080 |
+
document.addEventListener('keydown', function(e) {
|
| 1081 |
+
// Ctrl+L to clear chat
|
| 1082 |
+
if (e.ctrlKey && e.key === 'l') {
|
| 1083 |
+
e.preventDefault();
|
| 1084 |
+
const buttons = Array.from(document.querySelectorAll('button'));
|
| 1085 |
+
const clearBtn = buttons.find(btn => btn.textContent.includes('Clear'));
|
| 1086 |
+
if (clearBtn) clearBtn.click();
|
| 1087 |
+
}
|
| 1088 |
+
// Ctrl+E to export
|
| 1089 |
+
else if (e.ctrlKey && e.key === 'e') {
|
| 1090 |
+
e.preventDefault();
|
| 1091 |
+
const buttons = Array.from(document.querySelectorAll('button'));
|
| 1092 |
+
const exportBtn = buttons.find(btn => btn.textContent.includes('Export'));
|
| 1093 |
+
if (exportBtn) exportBtn.click();
|
| 1094 |
+
}
|
| 1095 |
+
});
|
| 1096 |
+
}
|
| 1097 |
+
"""
|
| 1098 |
+
)
|
| 1099 |
|
| 1100 |
return demo
|
| 1101 |
|
config.json
CHANGED
|
@@ -1,23 +1,25 @@
|
|
| 1 |
{
|
| 2 |
-
"name": "
|
| 3 |
-
"tagline": "
|
| 4 |
-
"description": "
|
| 5 |
-
"system_prompt": "
|
| 6 |
"model": "openai/gpt-oss-120b",
|
| 7 |
"language": "English",
|
| 8 |
"api_key_var": "API_KEY",
|
| 9 |
-
"temperature": 0.
|
| 10 |
-
"max_tokens":
|
| 11 |
"examples": [
|
| 12 |
-
"
|
| 13 |
-
"
|
| 14 |
-
"
|
|
|
|
|
|
|
| 15 |
],
|
| 16 |
"grounding_urls": [
|
| 17 |
-
"https://
|
|
|
|
| 18 |
],
|
| 19 |
"enable_dynamic_urls": true,
|
| 20 |
"enable_file_upload": true,
|
| 21 |
-
"theme": "
|
| 22 |
-
"locked": false
|
| 23 |
}
|
|
|
|
| 1 |
{
|
| 2 |
+
"name": "Course Assistant Example",
|
| 3 |
+
"tagline": "Python support for cultural analytics students",
|
| 4 |
+
"description": "Python support for cultural analytics students",
|
| 5 |
+
"system_prompt": "You're a Python guide for CCNY's CSC 10800 where September covers foundations (command line, Jupyter, script anatomy), October builds programming basics (data types through functions) with Activities 1-2, and November-December advances to pandas, network analysis, and data collection with Activities 3-5, culminating in a Social Coding Portfolio. Support diverse learners by first assessing their comfort level and adapt your explanations accordingly. Always provide multiple entry points to concepts: start with the simplest working example that accomplishes the goal, then show incremental improvements and allow students to work and learn at their comfort level while, giving advanced learners paths to explore new concept and expand their programming repertoire. Expect to complete all responses in under 1000 tokens.",
|
| 6 |
"model": "openai/gpt-oss-120b",
|
| 7 |
"language": "English",
|
| 8 |
"api_key_var": "API_KEY",
|
| 9 |
+
"temperature": 0.5,
|
| 10 |
+
"max_tokens": 1000,
|
| 11 |
"examples": [
|
| 12 |
+
"How do I set up a interactive development environment?",
|
| 13 |
+
"Where can I find the course schedule?",
|
| 14 |
+
"When is the social coding portfolio due?",
|
| 15 |
+
"How do I push a commit to GitHub?",
|
| 16 |
+
"I'm confused on how to use Jupyter notebooks"
|
| 17 |
],
|
| 18 |
"grounding_urls": [
|
| 19 |
+
"https://zmuhls.github.io/ccny-data-science/syllabus/",
|
| 20 |
+
"https://zmuhls.github.io/ccny-data-science/schedule/"
|
| 21 |
],
|
| 22 |
"enable_dynamic_urls": true,
|
| 23 |
"enable_file_upload": true,
|
| 24 |
+
"theme": "Default"
|
|
|
|
| 25 |
}
|
requirements.txt
CHANGED
|
@@ -1,4 +1,4 @@
|
|
| 1 |
-
gradio>=5.
|
| 2 |
requests>=2.32.3
|
| 3 |
beautifulsoup4>=4.12.3
|
| 4 |
python-dotenv>=1.0.0
|
|
|
|
| 1 |
+
gradio>=5.42.0
|
| 2 |
requests>=2.32.3
|
| 3 |
beautifulsoup4>=4.12.3
|
| 4 |
python-dotenv>=1.0.0
|