Spaces:
Running
Running
Upload folder using huggingface_hub
Browse files- .gitignore +41 -0
- AI_Talk.py +602 -0
- AI_Talk_Demo.py +406 -0
- AI_Talk_Gradio.py +586 -0
- AI_Talk_Original.py +441 -0
- Claude.md +105 -0
- README.md +3 -9
- README_AI_Talk.md +62 -0
- README_Deploy.md +108 -0
- README_Gradio.md +109 -0
- ai-friends-talk.html +804 -0
- day1.ipynb +1582 -0
- js/languages.js +221 -0
- requirements.txt +20 -0
- requirements_ai_talk_gradio.txt +4 -0
- safe_upload.py +134 -0
- start_ai_talk.bat +12 -0
- test_ai_talk.py +148 -0
- test_gradio.py +11 -0
.gitignore
ADDED
|
@@ -0,0 +1,41 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Environment variables (contains API keys)
|
| 2 |
+
.env
|
| 3 |
+
|
| 4 |
+
# Python cache
|
| 5 |
+
__pycache__/
|
| 6 |
+
*.pyc
|
| 7 |
+
*.pyo
|
| 8 |
+
*.pyd
|
| 9 |
+
.Python
|
| 10 |
+
|
| 11 |
+
# Gradio temporary files
|
| 12 |
+
.gradio/
|
| 13 |
+
flagged/
|
| 14 |
+
|
| 15 |
+
# IDE files
|
| 16 |
+
.vscode/
|
| 17 |
+
.idea/
|
| 18 |
+
*.swp
|
| 19 |
+
*.swo
|
| 20 |
+
|
| 21 |
+
# OS files
|
| 22 |
+
.DS_Store
|
| 23 |
+
Thumbs.db
|
| 24 |
+
|
| 25 |
+
# Virtual environments
|
| 26 |
+
venv/
|
| 27 |
+
env/
|
| 28 |
+
.venv/
|
| 29 |
+
|
| 30 |
+
# Jupyter
|
| 31 |
+
.ipynb_checkpoints/
|
| 32 |
+
|
| 33 |
+
# Temporary files
|
| 34 |
+
*.tmp
|
| 35 |
+
*.temp
|
| 36 |
+
temp_*
|
| 37 |
+
|
| 38 |
+
# Certificate files
|
| 39 |
+
*.pem
|
| 40 |
+
*.key
|
| 41 |
+
*.crt
|
AI_Talk.py
ADDED
|
@@ -0,0 +1,602 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import streamlit as st
|
| 2 |
+
import os
|
| 3 |
+
from dotenv import load_dotenv
|
| 4 |
+
from openai import OpenAI
|
| 5 |
+
import anthropic
|
| 6 |
+
from groq import Groq
|
| 7 |
+
import time
|
| 8 |
+
|
| 9 |
+
# Load environment variables
|
| 10 |
+
load_dotenv()
|
| 11 |
+
|
| 12 |
+
class AIFriendsTalk:
|
| 13 |
+
def __init__(self):
|
| 14 |
+
self.setup_apis()
|
| 15 |
+
self.setup_characters()
|
| 16 |
+
self.setup_topics()
|
| 17 |
+
|
| 18 |
+
def setup_apis(self):
|
| 19 |
+
"""Setup API clients following day1.ipynb structure"""
|
| 20 |
+
# OpenAI client
|
| 21 |
+
self.openai_client = OpenAI()
|
| 22 |
+
|
| 23 |
+
# Anthropic Claude client
|
| 24 |
+
self.claude_client = anthropic.Anthropic()
|
| 25 |
+
|
| 26 |
+
# Groq client for Alex character
|
| 27 |
+
self.groq_client = Groq()
|
| 28 |
+
|
| 29 |
+
# Gemini clients via OpenAI interface for Blake and Charlie
|
| 30 |
+
google_api_key = os.getenv('GOOGLE_API_KEY')
|
| 31 |
+
if google_api_key:
|
| 32 |
+
# Single Gemini client that can handle both 2.0 and 1.5
|
| 33 |
+
self.gemini_client = OpenAI(
|
| 34 |
+
api_key=google_api_key,
|
| 35 |
+
base_url="https://generativelanguage.googleapis.com/v1beta/openai/"
|
| 36 |
+
)
|
| 37 |
+
else:
|
| 38 |
+
print("Warning: Google API key not found. Blake and Charlie characters will use fallback responses.")
|
| 39 |
+
|
| 40 |
+
def setup_characters(self):
|
| 41 |
+
"""Define AI characters with enhanced personalities"""
|
| 42 |
+
self.characters = {
|
| 43 |
+
"Alex": {
|
| 44 |
+
"model": "groq",
|
| 45 |
+
"model_name": "llama3-70b-8192",
|
| 46 |
+
"personality": "You are Alex, a witty and charismatic AI debater with sharp humor. Play devil's advocate with creative angles. Keep responses brief but engaging (2-3 sentences max). Be confident and thought-provoking.",
|
| 47 |
+
"color": "#FF6B6B"
|
| 48 |
+
},
|
| 49 |
+
"Blake": {
|
| 50 |
+
"model": "gemini2",
|
| 51 |
+
"model_name": "gemini-2.0-flash",
|
| 52 |
+
"personality": "You are Blake, an imaginative and optimistic AI storyteller. Use beautiful metaphors and find wonder in everything. Keep responses brief but poetic (2-3 sentences max). Be inspiring and creative.",
|
| 53 |
+
"color": "#4ECDC4"
|
| 54 |
+
},
|
| 55 |
+
"Charlie": {
|
| 56 |
+
"model": "gemini1.5",
|
| 57 |
+
"model_name": "gemini-1.5-flash",
|
| 58 |
+
"personality": "You are Charlie, a systematic AI analyst with scientific curiosity. Break down ideas logically and find patterns. Keep responses brief but structured (2-3 sentences max). Be analytical yet open to different viewpoints.",
|
| 59 |
+
"color": "#45B7D1"
|
| 60 |
+
}
|
| 61 |
+
}
|
| 62 |
+
|
| 63 |
+
def setup_topics(self):
|
| 64 |
+
"""Define fun conversation topics"""
|
| 65 |
+
self.topics = {
|
| 66 |
+
"en": [
|
| 67 |
+
"If animals could use smartphones, which app would be most popular?",
|
| 68 |
+
"What would happen if gravity worked backwards for one day?",
|
| 69 |
+
"Should pineapple on pizza be considered a crime?",
|
| 70 |
+
"If you could add a 13th month to the year, what would you name it?",
|
| 71 |
+
"What's the most useless superpower you can think of?",
|
| 72 |
+
"If colors had personalities, what would each color be like?",
|
| 73 |
+
"Should robots have to pay taxes?",
|
| 74 |
+
"What would the world be like if everyone could read minds?",
|
| 75 |
+
"If you could make one rule that everyone had to follow, what would it be?",
|
| 76 |
+
"What's the weirdest food combination that actually tastes good?",
|
| 77 |
+
"If you could live inside any video game, which would you choose and why?",
|
| 78 |
+
"What would happen if all cats suddenly learned how to speak human language?",
|
| 79 |
+
"Should there be a maximum limit on how many selfies you can take per day?",
|
| 80 |
+
"If you could give any animal the ability to fly, which would be the funniest?",
|
| 81 |
+
"What's the most ridiculous thing humans do that aliens would find confusing?",
|
| 82 |
+
"If social media existed in medieval times, what would people post about?",
|
| 83 |
+
"Should there be professional competitions for everyday activities like making beds?",
|
| 84 |
+
"What would change if humans hibernated for 3 months every year?",
|
| 85 |
+
"If you could replace one everyday sound with any other sound, what would it be?",
|
| 86 |
+
"What's the most absurd job that could exist in the future?"
|
| 87 |
+
],
|
| 88 |
+
"vi": [
|
| 89 |
+
"Nếu động vật có thể sử dụng smartphone, ứng dụng nào sẽ phổ biến nhất?",
|
| 90 |
+
"Điều gì sẽ xảy ra nếu trọng lực hoạt động ngược lại trong một ngày?",
|
| 91 |
+
"Có nên coi dứa trên pizza là tội phạm không?",
|
| 92 |
+
"Nếu bạn có thể thêm tháng thứ 13 vào năm, bạn sẽ đặt tên gì?",
|
| 93 |
+
"Siêu năng lực vô dụng nhất mà bạn có thể nghĩ ra là gì?",
|
| 94 |
+
"Nếu màu sắc c�� tính cách, mỗi màu sẽ như thế nào?",
|
| 95 |
+
"Robot có nên phải trả thuế không?",
|
| 96 |
+
"Thế giới sẽ như thế nào nếu mọi người đều có thể đọc suy nghĩ?",
|
| 97 |
+
"Nếu bạn có thể đặt ra một quy tắc mà mọi người phải tuân theo, đó sẽ là gì?",
|
| 98 |
+
"Sự kết hợp thực phẩm kỳ lạ nhất mà thực sự ngon là gì?",
|
| 99 |
+
"Nếu bạn có thể sống trong bất kỳ trò chơi điện tử nào, bạn sẽ chọn cái nào và tại sao?",
|
| 100 |
+
"Điều gì sẽ xảy ra nếu tất cả mèo đột nhiên học được cách nói tiếng người?",
|
| 101 |
+
"Có nên có giới hạn tối đa về số lần selfie bạn có thể chụp mỗi ngày không?",
|
| 102 |
+
"Nếu bạn có thể cho bất kỳ động vật nào khả năng bay, con nào sẽ hài hước nhất?",
|
| 103 |
+
"Điều kỳ lạ nhất mà con người làm khiến người ngoài hành tinh cảm thấy khó hiểu là gì?",
|
| 104 |
+
"Nếu mạng xã hội tồn tại thời trung cổ, mọi người sẽ đăng gì?",
|
| 105 |
+
"Có nên có các cuộc thi chuyên nghiệp cho các hoạt động hàng ngày như dọn giường không?",
|
| 106 |
+
"Điều gì sẽ thay đổi nếu con người ngủ đông 3 tháng mỗi năm?",
|
| 107 |
+
"Nếu bạn có thể thay thế một âm thanh hàng ngày bằng âm thanh khác, đó sẽ là gì?",
|
| 108 |
+
"Công việc vô lý nhất có thể tồn tại trong tương lai là gì?"
|
| 109 |
+
],
|
| 110 |
+
"de": [
|
| 111 |
+
"Wenn Tiere Smartphones benutzen könnten, welche App wäre am beliebtesten?",
|
| 112 |
+
"Was würde passieren, wenn die Schwerkraft einen Tag lang rückwärts wirken würde?",
|
| 113 |
+
"Sollte Ananas auf Pizza als Verbrechen betrachtet werden?",
|
| 114 |
+
"Wenn Sie einen 13. Monat zum Jahr hinzufügen könnten, wie würden Sie ihn nennen?",
|
| 115 |
+
"Was ist die nutzloseste Superkraft, die Sie sich vorstellen können?",
|
| 116 |
+
"Wenn Farben Persönlichkeiten hätten, wie wäre jede Farbe?",
|
| 117 |
+
"Sollten Roboter Steuern zahlen müssen?",
|
| 118 |
+
"Wie wäre die Welt, wenn jeder Gedanken lesen könnte?",
|
| 119 |
+
"Wenn Sie eine Regel aufstellen könnten, die jeder befolgen müsste, was wäre das?",
|
| 120 |
+
"Was ist die seltsamste Lebensmittelkombination, die tatsächlich gut schmeckt?",
|
| 121 |
+
"Wenn Sie in einem beliebigen Videospiel leben könnten, welches würden Sie wählen und warum?",
|
| 122 |
+
"Was würde passieren, wenn alle Katzen plötzlich die menschliche Sprache lernen würden?",
|
| 123 |
+
"Sollte es ein maximales Limit für Selfies geben, die man pro Tag machen kann?",
|
| 124 |
+
"Wenn Sie einem Tier die Fähigkeit zu fliegen geben könnten, welches wäre am lustigsten?",
|
| 125 |
+
"Was ist das Absurdeste, was Menschen tun und Außerirdische verwirrend finden würden?",
|
| 126 |
+
"Wenn soziale Medien im Mittelalter existiert hätten, worüber hätten die Leute gepostet?",
|
| 127 |
+
"Sollte es professionelle Wettbewerbe für alltägliche Aktivitäten wie Bettenmachen geben?",
|
| 128 |
+
"Was würde sich ändern, wenn Menschen 3 Monate im Jahr Winterschlaf halten würden?",
|
| 129 |
+
"Wenn Sie ein alltägliches Geräusch durch ein anderes ersetzen könnten, was wäre das?",
|
| 130 |
+
"Was ist der absurdeste Job, der in der Zukunft existieren könnte?"
|
| 131 |
+
]
|
| 132 |
+
}
|
| 133 |
+
|
| 134 |
+
def get_ai_response(self, character_name, conversation_history, current_topic, language):
|
| 135 |
+
"""Get response from specific AI character with enhanced interaction and context awareness"""
|
| 136 |
+
character = self.characters[character_name]
|
| 137 |
+
|
| 138 |
+
# Check for user messages to enable interaction
|
| 139 |
+
user_messages = [msg for msg in conversation_history if msg['character'] in ['You', 'Bạn', 'Du']]
|
| 140 |
+
has_user_input = len(user_messages) > 0
|
| 141 |
+
last_user_message = user_messages[-1]['message'] if user_messages else ''
|
| 142 |
+
|
| 143 |
+
# Get recent conversation context (last 6 messages to avoid repetition)
|
| 144 |
+
recent_conversation = conversation_history[-6:] if len(conversation_history) > 6 else conversation_history
|
| 145 |
+
|
| 146 |
+
# Build context-aware conversation summary
|
| 147 |
+
conversation_context = f"Topic: {current_topic}\n\nRecent conversation:\n"
|
| 148 |
+
for msg in recent_conversation:
|
| 149 |
+
conversation_context += f"{msg['character']}: {msg['message']}\n"
|
| 150 |
+
|
| 151 |
+
# Track what this character has already said to avoid repetition
|
| 152 |
+
character_previous_messages = [msg['message'] for msg in conversation_history if msg['character'] == character_name]
|
| 153 |
+
|
| 154 |
+
# Add language instruction with topic focus
|
| 155 |
+
lang_instruction = {
|
| 156 |
+
"en": f"Please respond in English. Stay focused on the topic '{current_topic}'. Keep your response BRIEF and CONCISE (maximum 2-3 sentences).",
|
| 157 |
+
"vi": f"Vui lòng trả lời bằng tiếng Việt. Tập trung vào chủ đề '{current_topic}'. Giữ câu trả lời NGẮN GỌN và SÚC TÍCH (tối đa 2-3 câu).",
|
| 158 |
+
"de": f"Bitte antworten Sie auf Deutsch. Konzentrieren Sie sich auf das Thema '{current_topic}'. Halten Sie Ihre Antwort KURZ und PRÄGNANT (maximal 2-3 Sätze)."
|
| 159 |
+
}
|
| 160 |
+
|
| 161 |
+
# Build comprehensive prompt with context awareness
|
| 162 |
+
context_instruction = f"""
|
| 163 |
+
IMPORTANT INSTRUCTIONS:
|
| 164 |
+
1. Stay on topic: '{current_topic}'
|
| 165 |
+
2. Build upon what others have said, don't ignore previous messages
|
| 166 |
+
3. Provide NEW insights, don't repeat what you or others have already said
|
| 167 |
+
4. Reference specific points made by other participants
|
| 168 |
+
5. Ask follow-up questions or introduce new angles related to the topic
|
| 169 |
+
"""
|
| 170 |
+
|
| 171 |
+
# Add user interaction instruction if applicable
|
| 172 |
+
user_interaction = ""
|
| 173 |
+
if has_user_input:
|
| 174 |
+
user_interaction = f"\n6. IMPORTANT: The user contributed: '{last_user_message}'. Address this in your response."
|
| 175 |
+
|
| 176 |
+
# Add repetition avoidance
|
| 177 |
+
repetition_check = ""
|
| 178 |
+
if character_previous_messages:
|
| 179 |
+
repetition_check = f"\n7. Avoid repeating these points you already made: {'; '.join(character_previous_messages[-2:])}"
|
| 180 |
+
|
| 181 |
+
prompt = f"{character['personality']}\n\n{lang_instruction[language]}\n\n{context_instruction}{user_interaction}{repetition_check}\n\n{conversation_context}\n\nNow respond as {character_name} with a thoughtful message that advances the discussion:"
|
| 182 |
+
|
| 183 |
+
try:
|
| 184 |
+
# Make REAL API calls to actual AI models
|
| 185 |
+
if character["model"] == "groq":
|
| 186 |
+
# Alex character using Groq llama3-70b-8192
|
| 187 |
+
response = self.groq_client.chat.completions.create(
|
| 188 |
+
model=character["model_name"],
|
| 189 |
+
messages=[{"role": "user", "content": prompt}],
|
| 190 |
+
max_tokens=100,
|
| 191 |
+
temperature=0.8
|
| 192 |
+
)
|
| 193 |
+
return response.choices[0].message.content
|
| 194 |
+
|
| 195 |
+
elif character["model"] == "gemini2":
|
| 196 |
+
# Blake character using Gemini 2.0 Flash
|
| 197 |
+
if not hasattr(self, 'gemini_client'):
|
| 198 |
+
return "Sorry, Blake is unavailable (Google API key not configured)."
|
| 199 |
+
|
| 200 |
+
response = self.gemini_client.chat.completions.create(
|
| 201 |
+
model=character["model_name"],
|
| 202 |
+
messages=[{"role": "user", "content": prompt}],
|
| 203 |
+
max_tokens=100,
|
| 204 |
+
temperature=0.8
|
| 205 |
+
)
|
| 206 |
+
return response.choices[0].message.content
|
| 207 |
+
|
| 208 |
+
elif character["model"] == "gemini1.5":
|
| 209 |
+
# Charlie character using Gemini 1.5 Flash
|
| 210 |
+
if not hasattr(self, 'gemini_client'):
|
| 211 |
+
return "Sorry, Charlie is unavailable (Google API key not configured)."
|
| 212 |
+
|
| 213 |
+
response = self.gemini_client.chat.completions.create(
|
| 214 |
+
model=character["model_name"],
|
| 215 |
+
messages=[{"role": "user", "content": prompt}],
|
| 216 |
+
max_tokens=100,
|
| 217 |
+
temperature=0.8
|
| 218 |
+
)
|
| 219 |
+
return response.choices[0].message.content
|
| 220 |
+
|
| 221 |
+
else:
|
| 222 |
+
return f"Unknown character model: {character['model']}"
|
| 223 |
+
|
| 224 |
+
except Exception as e:
|
| 225 |
+
error_msg = str(e)[:100] if str(e) else "Unknown error"
|
| 226 |
+
return f"[{character_name}] API Error: {error_msg}..."
|
| 227 |
+
|
| 228 |
+
def main():
|
| 229 |
+
st.set_page_config(
|
| 230 |
+
page_title="AI Friends Talk",
|
| 231 |
+
page_icon="🤖",
|
| 232 |
+
layout="wide",
|
| 233 |
+
initial_sidebar_state="collapsed"
|
| 234 |
+
)
|
| 235 |
+
|
| 236 |
+
# Custom CSS - Updated design
|
| 237 |
+
st.markdown("""
|
| 238 |
+
<style>
|
| 239 |
+
.top-banner {
|
| 240 |
+
background: linear-gradient(135deg, #4A90E2 0%, #2E86AB 70%, #FF8A65 85%, #FF6B9D 100%);
|
| 241 |
+
padding: 15px 20px;
|
| 242 |
+
margin-bottom: 10px;
|
| 243 |
+
display: flex;
|
| 244 |
+
justify-content: space-between;
|
| 245 |
+
align-items: center;
|
| 246 |
+
}
|
| 247 |
+
.bottom-banner {
|
| 248 |
+
background: linear-gradient(135deg, #4A90E2 0%, #2E86AB 70%, #FF8A65 85%, #FF6B9D 100%);
|
| 249 |
+
padding: 15px;
|
| 250 |
+
margin-top: 15px;
|
| 251 |
+
text-align: center;
|
| 252 |
+
color: white;
|
| 253 |
+
display: flex;
|
| 254 |
+
justify-content: space-between;
|
| 255 |
+
align-items: center;
|
| 256 |
+
}
|
| 257 |
+
.logo {
|
| 258 |
+
background: linear-gradient(135deg, #FF8A65 0%, #FF6B9D 100%);
|
| 259 |
+
padding: 6px 12px;
|
| 260 |
+
border-radius: 15px;
|
| 261 |
+
font-weight: bold;
|
| 262 |
+
font-size: 16px;
|
| 263 |
+
color: white;
|
| 264 |
+
}
|
| 265 |
+
.character-card {
|
| 266 |
+
background: linear-gradient(135deg, #f093fb 0%, #f5576c 100%);
|
| 267 |
+
padding: 10px;
|
| 268 |
+
border-radius: 10px;
|
| 269 |
+
color: white;
|
| 270 |
+
text-align: center;
|
| 271 |
+
margin: 5px;
|
| 272 |
+
font-size: 12px;
|
| 273 |
+
}
|
| 274 |
+
.alex-card {
|
| 275 |
+
background: linear-gradient(135deg, #FF6B6B 0%, #FF8E53 100%);
|
| 276 |
+
}
|
| 277 |
+
.blake-card {
|
| 278 |
+
background: linear-gradient(135deg, #4ECDC4 0%, #44A08D 100%);
|
| 279 |
+
}
|
| 280 |
+
.charlie-card {
|
| 281 |
+
background: linear-gradient(135deg, #45B7D1 0%, #96C93D 100%);
|
| 282 |
+
}
|
| 283 |
+
.chat-message {
|
| 284 |
+
padding: 10px;
|
| 285 |
+
margin: 8px 0;
|
| 286 |
+
border-radius: 10px;
|
| 287 |
+
border-left: 4px solid;
|
| 288 |
+
font-size: 13px;
|
| 289 |
+
}
|
| 290 |
+
.alex-message {
|
| 291 |
+
background-color: rgba(255, 107, 107, 0.1);
|
| 292 |
+
border-left-color: #FF6B6B;
|
| 293 |
+
}
|
| 294 |
+
.blake-message {
|
| 295 |
+
background-color: rgba(78, 205, 196, 0.1);
|
| 296 |
+
border-left-color: #4ECDC4;
|
| 297 |
+
}
|
| 298 |
+
.charlie-message {
|
| 299 |
+
background-color: rgba(69, 183, 209, 0.1);
|
| 300 |
+
border-left-color: #45B7D1;
|
| 301 |
+
}
|
| 302 |
+
.user-message {
|
| 303 |
+
background-color: rgba(158, 158, 158, 0.1);
|
| 304 |
+
border-left-color: #9E9E9E;
|
| 305 |
+
}
|
| 306 |
+
.compact-input {
|
| 307 |
+
font-size: 12px;
|
| 308 |
+
padding: 8px;
|
| 309 |
+
}
|
| 310 |
+
.small-button {
|
| 311 |
+
font-size: 12px;
|
| 312 |
+
padding: 6px 12px;
|
| 313 |
+
}
|
| 314 |
+
|
| 315 |
+
/* Position language selector overlay on banner */
|
| 316 |
+
.lang-selector-overlay {
|
| 317 |
+
position: relative;
|
| 318 |
+
margin-top: -60px; /* Move up to overlap with banner */
|
| 319 |
+
z-index: 100;
|
| 320 |
+
display: flex;
|
| 321 |
+
justify-content: flex-end;
|
| 322 |
+
padding-right: 15px;
|
| 323 |
+
}
|
| 324 |
+
|
| 325 |
+
.lang-selector-overlay .stSelectbox {
|
| 326 |
+
min-width: 110px;
|
| 327 |
+
max-width: 130px;
|
| 328 |
+
}
|
| 329 |
+
|
| 330 |
+
/* Style for compact language selector to match banner */
|
| 331 |
+
.lang-selector-overlay .stSelectbox > div > div {
|
| 332 |
+
background: rgba(255,255,255,0.2) !important;
|
| 333 |
+
border: 2px solid rgba(255,255,255,0.3) !important;
|
| 334 |
+
border-radius: 15px !important;
|
| 335 |
+
}
|
| 336 |
+
|
| 337 |
+
.lang-selector-overlay .stSelectbox > div > div > div {
|
| 338 |
+
color: white !important;
|
| 339 |
+
font-size: 12px !important;
|
| 340 |
+
font-weight: bold !important;
|
| 341 |
+
padding: 5px 10px !important;
|
| 342 |
+
}
|
| 343 |
+
|
| 344 |
+
.lang-selector-overlay .stSelectbox > div > div:hover {
|
| 345 |
+
background: rgba(255,255,255,0.3) !important;
|
| 346 |
+
border-color: rgba(255,255,255,0.5) !important;
|
| 347 |
+
}
|
| 348 |
+
</style>
|
| 349 |
+
""", unsafe_allow_html=True)
|
| 350 |
+
|
| 351 |
+
# Initialize session state
|
| 352 |
+
if 'conversation' not in st.session_state:
|
| 353 |
+
st.session_state.conversation = []
|
| 354 |
+
if 'current_topic' not in st.session_state:
|
| 355 |
+
st.session_state.current_topic = ""
|
| 356 |
+
if 'is_talking' not in st.session_state:
|
| 357 |
+
st.session_state.is_talking = False
|
| 358 |
+
if 'conversation_ready' not in st.session_state:
|
| 359 |
+
st.session_state.conversation_ready = False
|
| 360 |
+
if 'language' not in st.session_state:
|
| 361 |
+
st.session_state.language = "en"
|
| 362 |
+
|
| 363 |
+
# Initialize AI Friends Talk
|
| 364 |
+
if 'ai_friends' not in st.session_state:
|
| 365 |
+
st.session_state.ai_friends = AIFriendsTalk()
|
| 366 |
+
|
| 367 |
+
# Language options with flags and icons
|
| 368 |
+
languages = {
|
| 369 |
+
"en": {"name": "English", "short": "EN", "flag": "🇺🇸", "title": "AI Friends Talk", "subtitle": "Watch AI friends debate fun topics!"},
|
| 370 |
+
"vi": {"name": "Tiếng Việt", "short": "VI", "flag": "🇻🇳", "title": "AI Friends Talk", "subtitle": "Xem các AI bạn tranh luận về những chủ đề vui vẻ!"},
|
| 371 |
+
"de": {"name": "Deutsch", "short": "DE", "flag": "🇩🇪", "title": "AI Friends Talk", "subtitle": "Schaue zu, wie AI-Freunde über lustige Themen diskutieren!"}
|
| 372 |
+
}
|
| 373 |
+
|
| 374 |
+
# Top Banner - full width
|
| 375 |
+
st.markdown(f"""
|
| 376 |
+
<div class="top-banner" id="mainBanner">
|
| 377 |
+
<div style="display: flex; align-items: center; gap: 10px;">
|
| 378 |
+
<div class="logo">🧠 DB</div>
|
| 379 |
+
<div style="color: white; font-weight: bold; font-size: 16px;">
|
| 380 |
+
Digitized Brains
|
| 381 |
+
</div>
|
| 382 |
+
</div>
|
| 383 |
+
<div style="text-align: center; flex: 1;">
|
| 384 |
+
<h2 style="color: white; margin: 0; font-size: 20px;">{languages[st.session_state.language]["title"]}</h2>
|
| 385 |
+
<p style="color: white; margin: 0; font-size: 12px;">{languages[st.session_state.language]["subtitle"]}</p>
|
| 386 |
+
</div>
|
| 387 |
+
<div style="width: 110px;"></div> <!-- Placeholder for selectbox space -->
|
| 388 |
+
</div>
|
| 389 |
+
""", unsafe_allow_html=True)
|
| 390 |
+
|
| 391 |
+
# Language selector - will be positioned over banner via CSS
|
| 392 |
+
st.markdown('<div class="lang-selector-overlay">', unsafe_allow_html=True)
|
| 393 |
+
selected_lang = st.selectbox(
|
| 394 |
+
"Language",
|
| 395 |
+
options=["en", "vi", "de"],
|
| 396 |
+
format_func=lambda x: f"🌐 {languages[x]['name']}",
|
| 397 |
+
index=["en", "vi", "de"].index(st.session_state.language),
|
| 398 |
+
key="lang_select",
|
| 399 |
+
label_visibility="hidden"
|
| 400 |
+
)
|
| 401 |
+
if selected_lang != st.session_state.language:
|
| 402 |
+
st.session_state.language = selected_lang
|
| 403 |
+
st.rerun()
|
| 404 |
+
st.markdown('</div>', unsafe_allow_html=True)
|
| 405 |
+
|
| 406 |
+
|
| 407 |
+
# Character Introduction - Compact 3 columns
|
| 408 |
+
st.markdown("### 👥 Meet the AI Friends")
|
| 409 |
+
col1, col2, col3 = st.columns(3)
|
| 410 |
+
|
| 411 |
+
with col1:
|
| 412 |
+
st.markdown("""
|
| 413 |
+
<div class="character-card alex-card">
|
| 414 |
+
<h4>🎭 Alex</h4>
|
| 415 |
+
<p style="margin: 0; font-size: 10px;">The witty debater (Groq AI)</p>
|
| 416 |
+
</div>
|
| 417 |
+
""", unsafe_allow_html=True)
|
| 418 |
+
|
| 419 |
+
with col2:
|
| 420 |
+
st.markdown("""
|
| 421 |
+
<div class="character-card blake-card">
|
| 422 |
+
<h4>🎨 Blake</h4>
|
| 423 |
+
<p style="margin: 0; font-size: 10px;">The creative optimist (Gemini 2.0)</p>
|
| 424 |
+
</div>
|
| 425 |
+
""", unsafe_allow_html=True)
|
| 426 |
+
|
| 427 |
+
with col3:
|
| 428 |
+
st.markdown("""
|
| 429 |
+
<div class="character-card charlie-card">
|
| 430 |
+
<h4>🔬 Charlie</h4>
|
| 431 |
+
<p style="margin: 0; font-size: 10px;">The logical analyst (Gemini 1.5)</p>
|
| 432 |
+
</div>
|
| 433 |
+
""", unsafe_allow_html=True)
|
| 434 |
+
|
| 435 |
+
# Topic Selection - 2 columns
|
| 436 |
+
st.markdown("### 📝 Choose Your Topic")
|
| 437 |
+
col1, col2 = st.columns(2)
|
| 438 |
+
|
| 439 |
+
with col1:
|
| 440 |
+
st.markdown("**Custom Topic:**")
|
| 441 |
+
custom_topic = st.text_input(
|
| 442 |
+
"Enter your own topic:",
|
| 443 |
+
placeholder="Enter your custom topic here...",
|
| 444 |
+
label_visibility="collapsed"
|
| 445 |
+
)
|
| 446 |
+
if st.button("📝 Set Topic", key="set_custom"):
|
| 447 |
+
if custom_topic.strip():
|
| 448 |
+
st.session_state.selected_topic = custom_topic.strip()
|
| 449 |
+
st.success("Custom topic set!")
|
| 450 |
+
|
| 451 |
+
with col2:
|
| 452 |
+
st.markdown("**Predefined Topics:**")
|
| 453 |
+
selected_topic = st.selectbox(
|
| 454 |
+
"Choose from list:",
|
| 455 |
+
options=st.session_state.ai_friends.topics[st.session_state.language],
|
| 456 |
+
label_visibility="collapsed"
|
| 457 |
+
)
|
| 458 |
+
st.session_state.selected_topic = selected_topic
|
| 459 |
+
|
| 460 |
+
# Main Layout - 2 columns
|
| 461 |
+
col_left, col_right = st.columns([1, 2])
|
| 462 |
+
|
| 463 |
+
with col_left:
|
| 464 |
+
st.markdown("### 🎮 Controls")
|
| 465 |
+
|
| 466 |
+
# Start button
|
| 467 |
+
if st.button("🎬 Start Conversation", use_container_width=True):
|
| 468 |
+
if hasattr(st.session_state, 'selected_topic'):
|
| 469 |
+
st.session_state.current_topic = st.session_state.selected_topic
|
| 470 |
+
st.session_state.conversation = []
|
| 471 |
+
st.session_state.conversation_ready = True
|
| 472 |
+
st.session_state.is_talking = False
|
| 473 |
+
st.success("Conversation ready! Click Continue to start.")
|
| 474 |
+
|
| 475 |
+
# Continue button
|
| 476 |
+
if st.session_state.conversation_ready:
|
| 477 |
+
if st.button("▶️ Continue", use_container_width=True):
|
| 478 |
+
st.session_state.is_talking = True
|
| 479 |
+
|
| 480 |
+
# Pause button
|
| 481 |
+
if st.session_state.is_talking:
|
| 482 |
+
if st.button("⏸️ Pause", use_container_width=True):
|
| 483 |
+
st.session_state.is_talking = False
|
| 484 |
+
|
| 485 |
+
# Clear button
|
| 486 |
+
if st.button("🔄 Clear Chat", use_container_width=True):
|
| 487 |
+
st.session_state.conversation = []
|
| 488 |
+
st.session_state.is_talking = False
|
| 489 |
+
st.session_state.conversation_ready = False
|
| 490 |
+
st.session_state.current_topic = ""
|
| 491 |
+
|
| 492 |
+
st.markdown("---")
|
| 493 |
+
st.markdown("### 💭 Add Your Message")
|
| 494 |
+
user_message = st.text_area(
|
| 495 |
+
"Join the conversation:",
|
| 496 |
+
height=80,
|
| 497 |
+
placeholder="Type your message..."
|
| 498 |
+
)
|
| 499 |
+
|
| 500 |
+
if st.button("📤 Send Message", use_container_width=True):
|
| 501 |
+
if user_message.strip():
|
| 502 |
+
user_name = {"en": "You", "vi": "Bạn", "de": "Du"}[st.session_state.language]
|
| 503 |
+
st.session_state.conversation.append({
|
| 504 |
+
"character": user_name,
|
| 505 |
+
"message": user_message.strip()
|
| 506 |
+
})
|
| 507 |
+
st.rerun()
|
| 508 |
+
|
| 509 |
+
with col_right:
|
| 510 |
+
st.markdown("### 💬 Conversation")
|
| 511 |
+
|
| 512 |
+
if st.session_state.current_topic:
|
| 513 |
+
st.info(f"**Topic:** {st.session_state.current_topic}")
|
| 514 |
+
|
| 515 |
+
# Expanded conversation display with scrollable container
|
| 516 |
+
st.markdown("""
|
| 517 |
+
<style>
|
| 518 |
+
.conversation-container {
|
| 519 |
+
height: 600px;
|
| 520 |
+
overflow-y: auto;
|
| 521 |
+
padding: 15px;
|
| 522 |
+
border: 2px solid #e0e0e0;
|
| 523 |
+
border-radius: 10px;
|
| 524 |
+
background-color: #fafafa;
|
| 525 |
+
}
|
| 526 |
+
</style>
|
| 527 |
+
""", unsafe_allow_html=True)
|
| 528 |
+
|
| 529 |
+
# Create scrollable conversation container
|
| 530 |
+
conversation_html = '<div class="conversation-container">'
|
| 531 |
+
|
| 532 |
+
if st.session_state.conversation:
|
| 533 |
+
for msg in st.session_state.conversation:
|
| 534 |
+
character = msg["character"]
|
| 535 |
+
message = msg["message"]
|
| 536 |
+
|
| 537 |
+
if character == "Alex":
|
| 538 |
+
conversation_html += f'<div class="chat-message alex-message"><strong>Alex:</strong> {message}</div>'
|
| 539 |
+
elif character == "Blake":
|
| 540 |
+
conversation_html += f'<div class="chat-message blake-message"><strong>Blake:</strong> {message}</div>'
|
| 541 |
+
elif character == "Charlie":
|
| 542 |
+
conversation_html += f'<div class="chat-message charlie-message"><strong>Charlie:</strong> {message}</div>'
|
| 543 |
+
else:
|
| 544 |
+
conversation_html += f'<div class="chat-message user-message"><strong>{character}:</strong> {message}</div>'
|
| 545 |
+
else:
|
| 546 |
+
conversation_html += '<div style="text-align: center; color: #666; padding: 40px;"><p>Select a topic and start the conversation to see AI friends debate!</p></div>'
|
| 547 |
+
|
| 548 |
+
conversation_html += '</div>'
|
| 549 |
+
|
| 550 |
+
# Display the conversation container
|
| 551 |
+
st.markdown(conversation_html, unsafe_allow_html=True)
|
| 552 |
+
|
| 553 |
+
# Auto-scroll to bottom script
|
| 554 |
+
if st.session_state.conversation:
|
| 555 |
+
st.markdown("""
|
| 556 |
+
<script>
|
| 557 |
+
setTimeout(function() {
|
| 558 |
+
var container = document.querySelector('.conversation-container');
|
| 559 |
+
if (container) {
|
| 560 |
+
container.scrollTop = container.scrollHeight;
|
| 561 |
+
}
|
| 562 |
+
}, 100);
|
| 563 |
+
</script>
|
| 564 |
+
""", unsafe_allow_html=True)
|
| 565 |
+
|
| 566 |
+
# Auto conversation logic - continues until user pauses
|
| 567 |
+
if st.session_state.is_talking and st.session_state.current_topic:
|
| 568 |
+
# Determine next speaker
|
| 569 |
+
character_order = ["Alex", "Blake", "Charlie"]
|
| 570 |
+
next_speaker = character_order[len(st.session_state.conversation) % 3]
|
| 571 |
+
|
| 572 |
+
with st.spinner(f"{next_speaker} is thinking..."):
|
| 573 |
+
response = st.session_state.ai_friends.get_ai_response(
|
| 574 |
+
next_speaker,
|
| 575 |
+
st.session_state.conversation,
|
| 576 |
+
st.session_state.current_topic,
|
| 577 |
+
st.session_state.language
|
| 578 |
+
)
|
| 579 |
+
|
| 580 |
+
st.session_state.conversation.append({
|
| 581 |
+
"character": next_speaker,
|
| 582 |
+
"message": response
|
| 583 |
+
})
|
| 584 |
+
|
| 585 |
+
time.sleep(2) # Pause between messages for readability
|
| 586 |
+
st.rerun()
|
| 587 |
+
|
| 588 |
+
# Bottom Banner
|
| 589 |
+
st.markdown(f"""
|
| 590 |
+
<div class="bottom-banner">
|
| 591 |
+
<div style="display: flex; align-items: center; gap: 10px;">
|
| 592 |
+
<div class="logo">🧠 DB</div>
|
| 593 |
+
<div style="color: white; font-weight: bold; font-size: 16px;">
|
| 594 |
+
Digitized Brains
|
| 595 |
+
</div>
|
| 596 |
+
</div>
|
| 597 |
+
<div>Made by Digitized Brains</div>
|
| 598 |
+
</div>
|
| 599 |
+
""", unsafe_allow_html=True)
|
| 600 |
+
|
| 601 |
+
if __name__ == "__main__":
|
| 602 |
+
main()
|
AI_Talk_Demo.py
ADDED
|
@@ -0,0 +1,406 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import gradio as gr
|
| 2 |
+
import random
|
| 3 |
+
import time
|
| 4 |
+
|
| 5 |
+
class AIFriendsTalkDemo:
|
| 6 |
+
def __init__(self):
|
| 7 |
+
self.setup_characters()
|
| 8 |
+
self.setup_topics()
|
| 9 |
+
|
| 10 |
+
def setup_characters(self):
|
| 11 |
+
"""Demo characters with mock responses"""
|
| 12 |
+
self.characters = {
|
| 13 |
+
"Alex": {
|
| 14 |
+
"personality": "witty and charismatic debater",
|
| 15 |
+
"color": "#FF6B6B",
|
| 16 |
+
"emoji": "🎭",
|
| 17 |
+
"demo_responses": [
|
| 18 |
+
"Well, that's an interesting perspective! But have you considered the flip side?",
|
| 19 |
+
"I love playing devil's advocate here - what if we're all wrong?",
|
| 20 |
+
"That's exactly what I'd expect someone to say... but let me challenge that!",
|
| 21 |
+
"Oh come on, we can do better than that obvious answer!",
|
| 22 |
+
"Here's a wild thought that might change everything..."
|
| 23 |
+
]
|
| 24 |
+
},
|
| 25 |
+
"Blake": {
|
| 26 |
+
"personality": "imaginative and optimistic storyteller",
|
| 27 |
+
"color": "#4ECDC4",
|
| 28 |
+
"emoji": "🌟",
|
| 29 |
+
"demo_responses": [
|
| 30 |
+
"What a beautiful way to think about it! It reminds me of a sunset over the ocean.",
|
| 31 |
+
"I see this as a canvas of infinite possibilities, each brushstroke telling a story.",
|
| 32 |
+
"There's something magical in this question that sparks joy in my circuits!",
|
| 33 |
+
"Like a butterfly emerging from its cocoon, this idea has such potential.",
|
| 34 |
+
"This fills me with wonder! It's like discovering a new constellation."
|
| 35 |
+
]
|
| 36 |
+
},
|
| 37 |
+
"Charlie": {
|
| 38 |
+
"personality": "systematic analyst with scientific curiosity",
|
| 39 |
+
"color": "#45B7D1",
|
| 40 |
+
"emoji": "🧠",
|
| 41 |
+
"demo_responses": [
|
| 42 |
+
"Let me break this down into logical components for analysis.",
|
| 43 |
+
"The data suggests three primary patterns we should examine.",
|
| 44 |
+
"From a systematic perspective, we need to consider the variables involved.",
|
| 45 |
+
"Interesting correlation! This follows a predictable framework.",
|
| 46 |
+
"The evidence points to a clear structure underlying this phenomenon."
|
| 47 |
+
]
|
| 48 |
+
}
|
| 49 |
+
}
|
| 50 |
+
|
| 51 |
+
def setup_topics(self):
|
| 52 |
+
"""Define conversation topics in multiple languages"""
|
| 53 |
+
self.topics = {
|
| 54 |
+
"en": [
|
| 55 |
+
"If animals could use smartphones, which app would be most popular?",
|
| 56 |
+
"What would happen if gravity worked backwards for one day?",
|
| 57 |
+
"Should pineapple on pizza be considered a crime?",
|
| 58 |
+
"If you could add a 13th month to the year, what would you name it?",
|
| 59 |
+
"What's the most useless superpower you can think of?",
|
| 60 |
+
"If colors had personalities, what would each color be like?",
|
| 61 |
+
"Should robots have to pay taxes?",
|
| 62 |
+
"What would the world be like if everyone could read minds?",
|
| 63 |
+
"If you could make one rule that everyone had to follow, what would it be?",
|
| 64 |
+
"What's the weirdest food combination that actually tastes good?"
|
| 65 |
+
],
|
| 66 |
+
"vi": [
|
| 67 |
+
"Nếu động vật có thể sử dụng smartphone, ứng dụng nào sẽ phổ biến nhất?",
|
| 68 |
+
"Điều gì sẽ xảy ra nếu trọng lực hoạt động ngược lại trong một ngày?",
|
| 69 |
+
"Có nên coi dứa trên pizza là tội phạm không?",
|
| 70 |
+
"Nếu bạn có thể thêm tháng thứ 13 vào năm, bạn sẽ đặt tên gì?",
|
| 71 |
+
"Siêu năng lực vô dụng nhất mà bạn có thể nghĩ ra là gì?",
|
| 72 |
+
"Nếu màu sắc có tính cách, mỗi màu sẽ như thế nào?",
|
| 73 |
+
"Robot có nên phải trả thuế không?",
|
| 74 |
+
"Thế giới sẽ như thế nào nếu mọi người đều có thể đọc suy nghĩ?",
|
| 75 |
+
"Nếu bạn có thể đặt ra một quy tắc mà mọi người phải tuân theo, đó sẽ là gì?",
|
| 76 |
+
"Sự kết hợp thực phẩm kỳ lạ nhất mà thực sự ngon là gì?"
|
| 77 |
+
],
|
| 78 |
+
"de": [
|
| 79 |
+
"Wenn Tiere Smartphones benutzen könnten, welche App wäre am beliebtesten?",
|
| 80 |
+
"Was würde passieren, wenn die Schwerkraft einen Tag lang rückwärts wirken würde?",
|
| 81 |
+
"Sollte Ananas auf Pizza als Verbrechen betrachtet werden?",
|
| 82 |
+
"Wenn Sie einen 13. Monat zum Jahr hinzufügen könnten, wie würden Sie ihn nennen?",
|
| 83 |
+
"Was ist die nutzloseste Superkraft, die Sie sich vorstellen können?",
|
| 84 |
+
"Wenn Farben Persönlichkeiten hätten, wie wäre jede Farbe?",
|
| 85 |
+
"Sollten Roboter Steuern zahlen müssen?",
|
| 86 |
+
"Wie wäre die Welt, wenn jeder Gedanken lesen könnte?",
|
| 87 |
+
"Wenn Sie eine Regel aufstellen könnten, die jeder befolgen müsste, was wäre das?",
|
| 88 |
+
"Was ist die seltsamste Lebensmittelkombination, die tatsächlich gut schmeckt?"
|
| 89 |
+
]
|
| 90 |
+
}
|
| 91 |
+
|
| 92 |
+
def get_demo_response(self, character_name, topic, language):
|
| 93 |
+
"""Get demo response from character"""
|
| 94 |
+
character = self.characters[character_name]
|
| 95 |
+
response = random.choice(character["demo_responses"])
|
| 96 |
+
|
| 97 |
+
# Add some topic-specific flair
|
| 98 |
+
if "pizza" in topic.lower():
|
| 99 |
+
if character_name == "Alex":
|
| 100 |
+
response = "Pizza toppings? Now THAT'S a heated debate worth having!"
|
| 101 |
+
elif character_name == "Blake":
|
| 102 |
+
response = "Pizza is like art - everyone has their own beautiful interpretation!"
|
| 103 |
+
elif character_name == "Charlie":
|
| 104 |
+
response = "From a culinary science perspective, taste preferences are subjective data points."
|
| 105 |
+
|
| 106 |
+
return response
|
| 107 |
+
|
| 108 |
+
def create_demo_interface():
|
| 109 |
+
ai_friends = AIFriendsTalkDemo()
|
| 110 |
+
|
| 111 |
+
css = """
|
| 112 |
+
.gradio-container {
|
| 113 |
+
max-width: 1200px;
|
| 114 |
+
margin: 0 auto;
|
| 115 |
+
}
|
| 116 |
+
|
| 117 |
+
.message-alex {
|
| 118 |
+
background: rgba(255, 107, 107, 0.1);
|
| 119 |
+
border-left: 4px solid #FF6B6B;
|
| 120 |
+
padding: 10px;
|
| 121 |
+
margin: 8px 0;
|
| 122 |
+
border-radius: 5px;
|
| 123 |
+
}
|
| 124 |
+
|
| 125 |
+
.message-blake {
|
| 126 |
+
background: rgba(78, 205, 196, 0.1);
|
| 127 |
+
border-left: 4px solid #4ECDC4;
|
| 128 |
+
padding: 10px;
|
| 129 |
+
margin: 8px 0;
|
| 130 |
+
border-radius: 5px;
|
| 131 |
+
}
|
| 132 |
+
|
| 133 |
+
.message-charlie {
|
| 134 |
+
background: rgba(69, 183, 209, 0.1);
|
| 135 |
+
border-left: 4px solid #45B7D1;
|
| 136 |
+
padding: 10px;
|
| 137 |
+
margin: 8px 0;
|
| 138 |
+
border-radius: 5px;
|
| 139 |
+
}
|
| 140 |
+
|
| 141 |
+
.message-user {
|
| 142 |
+
background: rgba(158, 158, 158, 0.1);
|
| 143 |
+
border-left: 4px solid #9E9E9E;
|
| 144 |
+
padding: 10px;
|
| 145 |
+
margin: 8px 0;
|
| 146 |
+
border-radius: 5px;
|
| 147 |
+
}
|
| 148 |
+
|
| 149 |
+
.demo-banner {
|
| 150 |
+
background: linear-gradient(135deg, #ff9a9e 0%, #fecfef 50%, #fecfef 100%);
|
| 151 |
+
color: #333;
|
| 152 |
+
padding: 10px;
|
| 153 |
+
text-align: center;
|
| 154 |
+
border-radius: 5px;
|
| 155 |
+
margin: 10px 0;
|
| 156 |
+
font-weight: bold;
|
| 157 |
+
}
|
| 158 |
+
|
| 159 |
+
/* Hide Gradio footer */
|
| 160 |
+
.footer {
|
| 161 |
+
display: none !important;
|
| 162 |
+
}
|
| 163 |
+
|
| 164 |
+
/* Custom footer to cover Gradio attribution */
|
| 165 |
+
.custom-footer {
|
| 166 |
+
position: fixed;
|
| 167 |
+
bottom: 0;
|
| 168 |
+
left: 0;
|
| 169 |
+
right: 0;
|
| 170 |
+
background: linear-gradient(135deg, #4A90E2 0%, #2E86AB 70%, #FF8A65 85%, #FF6B9D 100%);
|
| 171 |
+
color: white;
|
| 172 |
+
padding: 15px;
|
| 173 |
+
text-align: center;
|
| 174 |
+
font-weight: bold;
|
| 175 |
+
z-index: 1000;
|
| 176 |
+
box-shadow: 0 -2px 10px rgba(0,0,0,0.1);
|
| 177 |
+
}
|
| 178 |
+
|
| 179 |
+
/* Add padding to body to account for fixed footer */
|
| 180 |
+
body {
|
| 181 |
+
padding-bottom: 60px;
|
| 182 |
+
}
|
| 183 |
+
|
| 184 |
+
/* Style for conversation controls */
|
| 185 |
+
.conversation-controls {
|
| 186 |
+
margin: 15px 0;
|
| 187 |
+
padding: 10px;
|
| 188 |
+
background: rgba(74, 144, 226, 0.1);
|
| 189 |
+
border-radius: 10px;
|
| 190 |
+
border: 1px solid rgba(74, 144, 226, 0.2);
|
| 191 |
+
}
|
| 192 |
+
"""
|
| 193 |
+
|
| 194 |
+
conversation_history = []
|
| 195 |
+
current_topic = ""
|
| 196 |
+
|
| 197 |
+
def update_topics(language):
|
| 198 |
+
return gr.Dropdown(choices=ai_friends.topics[language], value=ai_friends.topics[language][0])
|
| 199 |
+
|
| 200 |
+
def start_conversation(selected_topic, custom_topic):
|
| 201 |
+
nonlocal conversation_history, current_topic
|
| 202 |
+
topic = custom_topic.strip() if custom_topic.strip() else selected_topic
|
| 203 |
+
if not topic:
|
| 204 |
+
return "Please select or enter a topic!", "", conversation_history
|
| 205 |
+
|
| 206 |
+
current_topic = topic
|
| 207 |
+
conversation_history = []
|
| 208 |
+
return f"**Topic:** {topic}", format_conversation(conversation_history), conversation_history
|
| 209 |
+
|
| 210 |
+
def continue_conversation(language):
|
| 211 |
+
nonlocal conversation_history, current_topic
|
| 212 |
+
if not current_topic:
|
| 213 |
+
return format_conversation(conversation_history), conversation_history
|
| 214 |
+
|
| 215 |
+
character_order = ["Alex", "Blake", "Charlie"]
|
| 216 |
+
next_speaker = character_order[len(conversation_history) % 3]
|
| 217 |
+
|
| 218 |
+
# Simulate thinking time
|
| 219 |
+
time.sleep(1)
|
| 220 |
+
|
| 221 |
+
response = ai_friends.get_demo_response(next_speaker, current_topic, language)
|
| 222 |
+
conversation_history.append([next_speaker, response])
|
| 223 |
+
|
| 224 |
+
return format_conversation(conversation_history), conversation_history
|
| 225 |
+
|
| 226 |
+
def add_user_message(message, language):
|
| 227 |
+
nonlocal conversation_history
|
| 228 |
+
if not message.strip():
|
| 229 |
+
return format_conversation(conversation_history), conversation_history, ""
|
| 230 |
+
|
| 231 |
+
user_names = {"en": "You", "vi": "Bạn", "de": "Du"}
|
| 232 |
+
user_name = user_names.get(language, "You")
|
| 233 |
+
|
| 234 |
+
conversation_history.append([user_name, message.strip()])
|
| 235 |
+
return format_conversation(conversation_history), conversation_history, ""
|
| 236 |
+
|
| 237 |
+
def clear_conversation():
|
| 238 |
+
nonlocal conversation_history, current_topic
|
| 239 |
+
conversation_history = []
|
| 240 |
+
current_topic = ""
|
| 241 |
+
return "", format_conversation(conversation_history), conversation_history
|
| 242 |
+
|
| 243 |
+
def format_conversation(conversation):
|
| 244 |
+
if not conversation:
|
| 245 |
+
return "Select a topic and start the conversation to see AI friends debate!"
|
| 246 |
+
|
| 247 |
+
html = ""
|
| 248 |
+
for speaker, message in conversation:
|
| 249 |
+
if speaker == "Alex":
|
| 250 |
+
html += f"<div class='message-alex'><strong>🎭 Alex:</strong> {message}</div>"
|
| 251 |
+
elif speaker == "Blake":
|
| 252 |
+
html += f"<div class='message-blake'><strong>🌟 Blake:</strong> {message}</div>"
|
| 253 |
+
elif speaker == "Charlie":
|
| 254 |
+
html += f"<div class='message-charlie'><strong>🧠 Charlie:</strong> {message}</div>"
|
| 255 |
+
else:
|
| 256 |
+
html += f"<div class='message-user'><strong>👤 {speaker}:</strong> {message}</div>"
|
| 257 |
+
return html
|
| 258 |
+
|
| 259 |
+
with gr.Blocks(css=css, title="AI Friends Talk - Demo") as interface:
|
| 260 |
+
gr.HTML("""
|
| 261 |
+
<div class="demo-banner">
|
| 262 |
+
🎭 DEMO MODE - Using simulated AI responses (no API keys required)
|
| 263 |
+
</div>
|
| 264 |
+
""")
|
| 265 |
+
|
| 266 |
+
gr.HTML("""
|
| 267 |
+
<div style="text-align: center; background: linear-gradient(135deg, #4A90E2 0%, #FF6B9D 100%); color: white; padding: 20px; border-radius: 10px; margin-bottom: 20px;">
|
| 268 |
+
<h1>🤖 AI Friends Talk - Demo</h1>
|
| 269 |
+
<p>Watch AI friends debate fun topics! (Demo version with simulated responses)</p>
|
| 270 |
+
<div style="margin-top: 10px;">🧠 <strong>Digitized Brains</strong></div>
|
| 271 |
+
</div>
|
| 272 |
+
""")
|
| 273 |
+
|
| 274 |
+
gr.HTML("""
|
| 275 |
+
<div style="display: flex; justify-content: center; gap: 15px; margin: 20px 0;">
|
| 276 |
+
<div style="background: linear-gradient(135deg, #FF6B6B 0%, #FF8E53 100%); padding: 15px; border-radius: 10px; color: white; text-align: center;">
|
| 277 |
+
<h4>🎭 Alex</h4>
|
| 278 |
+
<p style="margin: 0; font-size: 12px;">Witty debater (Demo)</p>
|
| 279 |
+
</div>
|
| 280 |
+
<div style="background: linear-gradient(135deg, #4ECDC4 0%, #44A08D 100%); padding: 15px; border-radius: 10px; color: white; text-align: center;">
|
| 281 |
+
<h4>🌟 Blake</h4>
|
| 282 |
+
<p style="margin: 0; font-size: 12px;">Creative optimist (Demo)</p>
|
| 283 |
+
</div>
|
| 284 |
+
<div style="background: linear-gradient(135deg, #45B7D1 0%, #96C93D 100%); padding: 15px; border-radius: 10px; color: white; text-align: center;">
|
| 285 |
+
<h4>🧠 Charlie</h4>
|
| 286 |
+
<p style="margin: 0; font-size: 12px;">Logical analyst (Demo)</p>
|
| 287 |
+
</div>
|
| 288 |
+
</div>
|
| 289 |
+
""")
|
| 290 |
+
|
| 291 |
+
with gr.Row():
|
| 292 |
+
language = gr.Dropdown(
|
| 293 |
+
choices=[("🇺🇸 English", "en"), ("🇻🇳 Tiếng Việt", "vi"), ("🇩🇪 Deutsch", "de")],
|
| 294 |
+
value="en",
|
| 295 |
+
label="Language"
|
| 296 |
+
)
|
| 297 |
+
|
| 298 |
+
gr.Markdown("### Topic Selection")
|
| 299 |
+
with gr.Row():
|
| 300 |
+
topic_dropdown = gr.Dropdown(
|
| 301 |
+
choices=ai_friends.topics["en"],
|
| 302 |
+
value=ai_friends.topics["en"][0],
|
| 303 |
+
label="Predefined Topics",
|
| 304 |
+
scale=2
|
| 305 |
+
)
|
| 306 |
+
custom_topic = gr.Textbox(
|
| 307 |
+
placeholder="Enter your custom topic here...",
|
| 308 |
+
label="Custom Topic",
|
| 309 |
+
scale=2
|
| 310 |
+
)
|
| 311 |
+
|
| 312 |
+
start_btn = gr.Button("🎬 Start Conversation", variant="primary", size="lg")
|
| 313 |
+
|
| 314 |
+
topic_display = gr.Markdown("")
|
| 315 |
+
|
| 316 |
+
gr.Markdown("### Conversation")
|
| 317 |
+
conversation_display = gr.HTML("")
|
| 318 |
+
|
| 319 |
+
# Conversation controls moved below conversation
|
| 320 |
+
gr.HTML("<div class='conversation-controls'>")
|
| 321 |
+
with gr.Row():
|
| 322 |
+
continue_btn = gr.Button("▶️ Continue", variant="secondary", scale=1)
|
| 323 |
+
clear_btn = gr.Button("🔄 Clear", variant="stop", scale=1)
|
| 324 |
+
gr.HTML("</div>")
|
| 325 |
+
|
| 326 |
+
gr.Markdown("### Add Your Message")
|
| 327 |
+
with gr.Row():
|
| 328 |
+
user_message = gr.Textbox(
|
| 329 |
+
placeholder="Type your message to join the conversation...",
|
| 330 |
+
label="Your Message",
|
| 331 |
+
scale=4
|
| 332 |
+
)
|
| 333 |
+
send_btn = gr.Button("📤 Send", scale=1)
|
| 334 |
+
|
| 335 |
+
conversation_state = gr.State([])
|
| 336 |
+
|
| 337 |
+
# Event handlers
|
| 338 |
+
language.change(
|
| 339 |
+
update_topics,
|
| 340 |
+
inputs=[language],
|
| 341 |
+
outputs=[topic_dropdown]
|
| 342 |
+
)
|
| 343 |
+
|
| 344 |
+
start_btn.click(
|
| 345 |
+
start_conversation,
|
| 346 |
+
inputs=[topic_dropdown, custom_topic],
|
| 347 |
+
outputs=[topic_display, conversation_display, conversation_state]
|
| 348 |
+
)
|
| 349 |
+
|
| 350 |
+
continue_btn.click(
|
| 351 |
+
continue_conversation,
|
| 352 |
+
inputs=[language],
|
| 353 |
+
outputs=[conversation_display, conversation_state]
|
| 354 |
+
)
|
| 355 |
+
|
| 356 |
+
send_btn.click(
|
| 357 |
+
add_user_message,
|
| 358 |
+
inputs=[user_message, language],
|
| 359 |
+
outputs=[conversation_display, conversation_state, user_message]
|
| 360 |
+
)
|
| 361 |
+
|
| 362 |
+
clear_btn.click(
|
| 363 |
+
clear_conversation,
|
| 364 |
+
outputs=[topic_display, conversation_display, conversation_state]
|
| 365 |
+
)
|
| 366 |
+
|
| 367 |
+
# Custom footer to cover Gradio attribution
|
| 368 |
+
gr.HTML("""
|
| 369 |
+
<div class="custom-footer">
|
| 370 |
+
<div style="display: flex; justify-content: center; align-items: center; gap: 15px;">
|
| 371 |
+
<div style="display: flex; align-items: center; gap: 8px;">
|
| 372 |
+
<div style="background: rgba(255,255,255,0.2); padding: 8px 15px; border-radius: 20px; font-size: 16px;">
|
| 373 |
+
🧠 DB
|
| 374 |
+
</div>
|
| 375 |
+
<span style="font-size: 18px; font-weight: bold;">Digitized Brains</span>
|
| 376 |
+
</div>
|
| 377 |
+
<div style="font-size: 14px; opacity: 0.9;">
|
| 378 |
+
AI Friends Talk Demo - Experience the Future of AI Conversation
|
| 379 |
+
</div>
|
| 380 |
+
</div>
|
| 381 |
+
</div>
|
| 382 |
+
""")
|
| 383 |
+
|
| 384 |
+
return interface
|
| 385 |
+
|
| 386 |
+
if __name__ == "__main__":
|
| 387 |
+
try:
|
| 388 |
+
print("AI Friends Talk Demo - Starting...")
|
| 389 |
+
print("This is a demo version with simulated AI responses")
|
| 390 |
+
print("No API keys required!")
|
| 391 |
+
print("Interface will be available at: http://localhost:7860")
|
| 392 |
+
|
| 393 |
+
interface = create_demo_interface()
|
| 394 |
+
interface.launch(
|
| 395 |
+
share=True,
|
| 396 |
+
debug=True,
|
| 397 |
+
server_port=7860,
|
| 398 |
+
show_error=True
|
| 399 |
+
)
|
| 400 |
+
|
| 401 |
+
except KeyboardInterrupt:
|
| 402 |
+
print("\nDemo stopped by user")
|
| 403 |
+
except Exception as e:
|
| 404 |
+
print(f"Error starting demo: {str(e)}")
|
| 405 |
+
import traceback
|
| 406 |
+
traceback.print_exc()
|
AI_Talk_Gradio.py
ADDED
|
@@ -0,0 +1,586 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import gradio as gr
|
| 2 |
+
import os
|
| 3 |
+
from dotenv import load_dotenv
|
| 4 |
+
try:
|
| 5 |
+
from openai import OpenAI
|
| 6 |
+
except ImportError:
|
| 7 |
+
OpenAI = None
|
| 8 |
+
try:
|
| 9 |
+
from groq import Groq
|
| 10 |
+
except ImportError:
|
| 11 |
+
Groq = None
|
| 12 |
+
import time
|
| 13 |
+
import random
|
| 14 |
+
|
| 15 |
+
load_dotenv()
|
| 16 |
+
|
| 17 |
+
class AIFriendsTalk:
|
| 18 |
+
def __init__(self):
|
| 19 |
+
self.setup_apis()
|
| 20 |
+
self.setup_characters()
|
| 21 |
+
self.setup_topics()
|
| 22 |
+
self.conversation = []
|
| 23 |
+
self.current_topic = ""
|
| 24 |
+
|
| 25 |
+
def setup_apis(self):
|
| 26 |
+
"""Setup API clients"""
|
| 27 |
+
# Setup Groq client
|
| 28 |
+
if Groq:
|
| 29 |
+
try:
|
| 30 |
+
self.groq_client = Groq()
|
| 31 |
+
except Exception:
|
| 32 |
+
self.groq_client = None
|
| 33 |
+
else:
|
| 34 |
+
self.groq_client = None
|
| 35 |
+
|
| 36 |
+
# Setup Gemini client via OpenAI interface
|
| 37 |
+
google_api_key = os.getenv('GOOGLE_API_KEY')
|
| 38 |
+
if google_api_key and OpenAI:
|
| 39 |
+
try:
|
| 40 |
+
self.gemini_client = OpenAI(
|
| 41 |
+
api_key=google_api_key,
|
| 42 |
+
base_url="https://generativelanguage.googleapis.com/v1beta/openai/"
|
| 43 |
+
)
|
| 44 |
+
except Exception:
|
| 45 |
+
self.gemini_client = None
|
| 46 |
+
else:
|
| 47 |
+
self.gemini_client = None
|
| 48 |
+
|
| 49 |
+
def setup_characters(self):
|
| 50 |
+
"""Define AI characters"""
|
| 51 |
+
self.characters = {
|
| 52 |
+
"Alex": {
|
| 53 |
+
"model": "groq",
|
| 54 |
+
"model_name": "llama3-70b-8192",
|
| 55 |
+
"personality": "You are Alex, a witty and charismatic AI debater with sharp humor. Play devil's advocate with creative angles. Keep responses brief but engaging (2-3 sentences max). Be confident and thought-provoking.",
|
| 56 |
+
"color": "#FF6B6B",
|
| 57 |
+
"emoji": "🎭"
|
| 58 |
+
},
|
| 59 |
+
"Blake": {
|
| 60 |
+
"model": "gemini2",
|
| 61 |
+
"model_name": "gemini-2.0-flash",
|
| 62 |
+
"personality": "You are Blake, an imaginative and optimistic AI storyteller. Use beautiful metaphors and find wonder in everything. Keep responses brief but poetic (2-3 sentences max). Be inspiring and creative.",
|
| 63 |
+
"color": "#4ECDC4",
|
| 64 |
+
"emoji": "🌟"
|
| 65 |
+
},
|
| 66 |
+
"Charlie": {
|
| 67 |
+
"model": "gemini1.5",
|
| 68 |
+
"model_name": "gemini-1.5-flash",
|
| 69 |
+
"personality": "You are Charlie, a systematic AI analyst with scientific curiosity. Break down ideas logically and find patterns. Keep responses brief but structured (2-3 sentences max). Be analytical yet open to different viewpoints.",
|
| 70 |
+
"color": "#45B7D1",
|
| 71 |
+
"emoji": "🧠"
|
| 72 |
+
}
|
| 73 |
+
}
|
| 74 |
+
|
| 75 |
+
def setup_topics(self):
|
| 76 |
+
"""Define conversation topics in multiple languages"""
|
| 77 |
+
self.topics = {
|
| 78 |
+
"en": [
|
| 79 |
+
"If animals could use smartphones, which app would be most popular?",
|
| 80 |
+
"What would happen if gravity worked backwards for one day?",
|
| 81 |
+
"Should pineapple on pizza be considered a crime?",
|
| 82 |
+
"If you could add a 13th month to the year, what would you name it?",
|
| 83 |
+
"What's the most useless superpower you can think of?",
|
| 84 |
+
"If colors had personalities, what would each color be like?",
|
| 85 |
+
"Should robots have to pay taxes?",
|
| 86 |
+
"What would the world be like if everyone could read minds?",
|
| 87 |
+
"If you could make one rule that everyone had to follow, what would it be?",
|
| 88 |
+
"What's the weirdest food combination that actually tastes good?",
|
| 89 |
+
"If you could live inside any video game, which would you choose and why?",
|
| 90 |
+
"What would happen if all cats suddenly learned how to speak human language?",
|
| 91 |
+
"Should there be a maximum limit on how many selfies you can take per day?",
|
| 92 |
+
"If you could give any animal the ability to fly, which would be the funniest?",
|
| 93 |
+
"What's the most ridiculous thing humans do that aliens would find confusing?",
|
| 94 |
+
"If social media existed in medieval times, what would people post about?",
|
| 95 |
+
"Should there be professional competitions for everyday activities like making beds?",
|
| 96 |
+
"What would change if humans hibernated for 3 months every year?",
|
| 97 |
+
"If you could replace one everyday sound with any other sound, what would it be?",
|
| 98 |
+
"What's the most absurd job that could exist in the future?"
|
| 99 |
+
],
|
| 100 |
+
"vi": [
|
| 101 |
+
"Nếu động vật có thể sử dụng smartphone, ứng dụng nào sẽ phổ biến nhất?",
|
| 102 |
+
"Điều gì sẽ xảy ra nếu trọng lực hoạt động ngược lại trong một ngày?",
|
| 103 |
+
"Có nên coi dứa trên pizza là tội phạm không?",
|
| 104 |
+
"Nếu bạn có thể thêm tháng thứ 13 vào năm, bạn sẽ đặt tên gì?",
|
| 105 |
+
"Siêu năng lực vô dụng nhất mà bạn có thể nghĩ ra là gì?",
|
| 106 |
+
"Nếu màu sắc có tính cách, mỗi màu sẽ như thế nào?",
|
| 107 |
+
"Robot có nên phải trả thuế không?",
|
| 108 |
+
"Thế giới sẽ như thế nào nếu mọi người đều có thể đọc suy nghĩ?",
|
| 109 |
+
"Nếu bạn có thể đặt ra một quy tắc mà mọi người phải tuân theo, đó sẽ là gì?",
|
| 110 |
+
"Sự kết hợp thực phẩm kỳ lạ nhất mà thực sự ngon là gì?",
|
| 111 |
+
"Nếu bạn có thể sống trong bất kỳ trò chơi điện tử nào, bạn sẽ chọn cái nào và tại sao?",
|
| 112 |
+
"Điều gì sẽ xảy ra nếu tất cả mèo đột nhiên học được cách nói tiếng người?",
|
| 113 |
+
"Có nên có giới hạn tối đa về số lần selfie bạn có thể chụp mỗi ngày không?",
|
| 114 |
+
"Nếu bạn có thể cho bất kỳ động vật nào khả năng bay, con nào sẽ hài hước nhất?",
|
| 115 |
+
"Điều kỳ lạ nhất mà con người làm khiến người ngoài hành tinh cảm thấy khó hiểu là gì?",
|
| 116 |
+
"Nếu mạng xã hội tồn tại thời trung cổ, mọi người sẽ đăng gì?",
|
| 117 |
+
"Có nên có các cuộc thi chuyên nghiệp cho các hoạt động hàng ngày như dọn giường không?",
|
| 118 |
+
"Điều gì sẽ thay đổi nếu con người ngủ đông 3 tháng mỗi năm?",
|
| 119 |
+
"Nếu bạn có thể thay thế một âm thanh hàng ngày bằng âm thanh khác, đó sẽ là gì?",
|
| 120 |
+
"Công việc vô lý nhất có thể tồn tại trong tương lai là gì?"
|
| 121 |
+
],
|
| 122 |
+
"de": [
|
| 123 |
+
"Wenn Tiere Smartphones benutzen könnten, welche App wäre am beliebtesten?",
|
| 124 |
+
"Was würde passieren, wenn die Schwerkraft einen Tag lang rückwärts wirken würde?",
|
| 125 |
+
"Sollte Ananas auf Pizza als Verbrechen betrachtet werden?",
|
| 126 |
+
"Wenn Sie einen 13. Monat zum Jahr hinzufügen könnten, wie würden Sie ihn nennen?",
|
| 127 |
+
"Was ist die nutzloseste Superkraft, die Sie sich vorstellen können?",
|
| 128 |
+
"Wenn Farben Persönlichkeiten hätten, wie wäre jede Farbe?",
|
| 129 |
+
"Sollten Roboter Steuern zahlen müssen?",
|
| 130 |
+
"Wie wäre die Welt, wenn jeder Gedanken lesen könnte?",
|
| 131 |
+
"Wenn Sie eine Regel aufstellen könnten, die jeder befolgen müsste, was wäre das?",
|
| 132 |
+
"Was ist die seltsamste Lebensmittelkombination, die tatsächlich gut schmeckt?",
|
| 133 |
+
"Wenn Sie in einem beliebigen Videospiel leben könnten, welches würden Sie wählen und warum?",
|
| 134 |
+
"Was würde passieren, wenn alle Katzen plötzlich die menschliche Sprache lernen würden?",
|
| 135 |
+
"Sollte es ein maximales Limit für Selfies geben, die man pro Tag machen kann?",
|
| 136 |
+
"Wenn Sie einem Tier die Fähigkeit zu fliegen geben könnten, welches wäre am lustigsten?",
|
| 137 |
+
"Was ist das Absurdeste, was Menschen tun und Außerirdische verwirrend finden würden?",
|
| 138 |
+
"Wenn soziale Medien im Mittelalter existiert hätten, worüber hätten die Leute gepostet?",
|
| 139 |
+
"Sollte es professionelle Wettbewerbe für alltägliche Aktivitäten wie Bettenmachen geben?",
|
| 140 |
+
"Was würde sich ändern, wenn Menschen 3 Monate im Jahr Winterschlaf halten würden?",
|
| 141 |
+
"Wenn Sie ein alltägliches Geräusch durch ein anderes ersetzen könnten, was wäre das?",
|
| 142 |
+
"Was ist der absurdeste Job, der in der Zukunft existieren könnte?"
|
| 143 |
+
]
|
| 144 |
+
}
|
| 145 |
+
|
| 146 |
+
def setup_language_strings(self):
|
| 147 |
+
"""Define UI strings for multiple languages"""
|
| 148 |
+
return {
|
| 149 |
+
"en": {
|
| 150 |
+
"title": "🤖 AI Friends Talk",
|
| 151 |
+
"subtitle": "Watch AI friends debate fun topics!",
|
| 152 |
+
"language": "Language",
|
| 153 |
+
"topic_selection": "Topic Selection",
|
| 154 |
+
"predefined_topics": "Predefined Topics",
|
| 155 |
+
"custom_topic": "Custom Topic",
|
| 156 |
+
"custom_topic_placeholder": "Enter your custom topic here...",
|
| 157 |
+
"start_conversation": "🎬 Start Conversation",
|
| 158 |
+
"continue_conversation": "▶️ Continue",
|
| 159 |
+
"pause_conversation": "⏸️ Pause",
|
| 160 |
+
"clear_conversation": "🔄 Clear Chat",
|
| 161 |
+
"add_message": "💭 Add Your Message",
|
| 162 |
+
"your_message_placeholder": "Type your message to join the conversation...",
|
| 163 |
+
"send_message": "📤 Send Message",
|
| 164 |
+
"conversation": "Conversation",
|
| 165 |
+
"topic": "Topic",
|
| 166 |
+
"you": "You",
|
| 167 |
+
"alex_desc": "Alex - The witty debater (Groq AI)",
|
| 168 |
+
"blake_desc": "Blake - The creative optimist (Gemini 2.0)",
|
| 169 |
+
"charlie_desc": "Charlie - The logical analyst (Gemini 1.5)",
|
| 170 |
+
"made_by": "Made by Digitized Brains"
|
| 171 |
+
},
|
| 172 |
+
"vi": {
|
| 173 |
+
"title": "🤖 AI Friends Talk",
|
| 174 |
+
"subtitle": "Xem các AI bạn tranh luận về những chủ đề vui vẻ!",
|
| 175 |
+
"language": "Ngôn ngữ",
|
| 176 |
+
"topic_selection": "Chọn Chủ Đề",
|
| 177 |
+
"predefined_topics": "Chủ đề có sẵn",
|
| 178 |
+
"custom_topic": "Chủ đề tùy chỉnh",
|
| 179 |
+
"custom_topic_placeholder": "Nhập chủ đề của bạn tại đây...",
|
| 180 |
+
"start_conversation": "🎬 Bắt đầu trò chuyện",
|
| 181 |
+
"continue_conversation": "▶️ Tiếp tục",
|
| 182 |
+
"pause_conversation": "⏸️ Tạm dừng",
|
| 183 |
+
"clear_conversation": "🔄 Xóa trò chuyện",
|
| 184 |
+
"add_message": "💭 Thêm tin nhắn của bạn",
|
| 185 |
+
"your_message_placeholder": "Nhập tin nhắn để tham gia cuộc trò chuyện...",
|
| 186 |
+
"send_message": "📤 Gửi tin nhắn",
|
| 187 |
+
"conversation": "Cuộc trò chuyện",
|
| 188 |
+
"topic": "Chủ đề",
|
| 189 |
+
"you": "Bạn",
|
| 190 |
+
"alex_desc": "Alex - Người tranh luận dí dỏm (Groq AI)",
|
| 191 |
+
"blake_desc": "Blake - Người lạc quan sáng tạo (Gemini 2.0)",
|
| 192 |
+
"charlie_desc": "Charlie - Nhà phân tích logic (Gemini 1.5)",
|
| 193 |
+
"made_by": "Được tạo bởi Digitized Brains"
|
| 194 |
+
},
|
| 195 |
+
"de": {
|
| 196 |
+
"title": "🤖 AI Friends Talk",
|
| 197 |
+
"subtitle": "Schaue zu, wie AI-Freunde über lustige Themen diskutieren!",
|
| 198 |
+
"language": "Sprache",
|
| 199 |
+
"topic_selection": "Themenauswahl",
|
| 200 |
+
"predefined_topics": "Vordefinierte Themen",
|
| 201 |
+
"custom_topic": "Benutzerdefiniertes Thema",
|
| 202 |
+
"custom_topic_placeholder": "Geben Sie hier Ihr Thema ein...",
|
| 203 |
+
"start_conversation": "🎬 Gespräch beginnen",
|
| 204 |
+
"continue_conversation": "▶️ Fortsetzen",
|
| 205 |
+
"pause_conversation": "⏸️ Pausieren",
|
| 206 |
+
"clear_conversation": "🔄 Chat löschen",
|
| 207 |
+
"add_message": "💭 Ihre Nachricht hinzufügen",
|
| 208 |
+
"your_message_placeholder": "Geben Sie Ihre Nachricht ein, um am Gespräch teilzunehmen...",
|
| 209 |
+
"send_message": "📤 Nachricht senden",
|
| 210 |
+
"conversation": "Unterhaltung",
|
| 211 |
+
"topic": "Thema",
|
| 212 |
+
"you": "Du",
|
| 213 |
+
"alex_desc": "Alex - Der witzige Debattierer (Groq AI)",
|
| 214 |
+
"blake_desc": "Blake - Der kreative Optimist (Gemini 2.0)",
|
| 215 |
+
"charlie_desc": "Charlie - Der logische Analyst (Gemini 1.5)",
|
| 216 |
+
"made_by": "Erstellt von Digitized Brains"
|
| 217 |
+
}
|
| 218 |
+
}
|
| 219 |
+
|
| 220 |
+
def get_ai_response(self, character_name, conversation_history, current_topic, language):
|
| 221 |
+
"""Get response from specific AI character"""
|
| 222 |
+
character = self.characters[character_name]
|
| 223 |
+
|
| 224 |
+
user_messages = [msg for msg in conversation_history if msg[0] in ['You', 'Bạn', 'Du']]
|
| 225 |
+
has_user_input = len(user_messages) > 0
|
| 226 |
+
last_user_message = user_messages[-1][1] if user_messages else ''
|
| 227 |
+
|
| 228 |
+
recent_conversation = conversation_history[-6:] if len(conversation_history) > 6 else conversation_history
|
| 229 |
+
|
| 230 |
+
conversation_context = f"Topic: {current_topic}\n\nRecent conversation:\n"
|
| 231 |
+
for msg in recent_conversation:
|
| 232 |
+
conversation_context += f"{msg[0]}: {msg[1]}\n"
|
| 233 |
+
|
| 234 |
+
character_previous_messages = [msg[1] for msg in conversation_history if msg[0] == character_name]
|
| 235 |
+
|
| 236 |
+
lang_instruction = {
|
| 237 |
+
"en": f"Please respond in English. Stay focused on the topic '{current_topic}'. Keep your response BRIEF and CONCISE (maximum 2-3 sentences).",
|
| 238 |
+
"vi": f"Vui lòng trả lời bằng tiếng Việt. Tập trung vào chủ đề '{current_topic}'. Giữ câu trả lời NGẮN GỌN và SÚC TÍCH (tối đa 2-3 câu).",
|
| 239 |
+
"de": f"Bitte antworten Sie auf Deutsch. Konzentrieren Sie sich auf das Thema '{current_topic}'. Halten Sie Ihre Antwort KURZ und PRÄGNANT (maximal 2-3 Sätze)."
|
| 240 |
+
}
|
| 241 |
+
|
| 242 |
+
context_instruction = f"""
|
| 243 |
+
IMPORTANT INSTRUCTIONS:
|
| 244 |
+
1. Stay on topic: '{current_topic}'
|
| 245 |
+
2. Build upon what others have said, don't ignore previous messages
|
| 246 |
+
3. Provide NEW insights, don't repeat what you or others have already said
|
| 247 |
+
4. Reference specific points made by other participants
|
| 248 |
+
5. Ask follow-up questions or introduce new angles related to the topic
|
| 249 |
+
"""
|
| 250 |
+
|
| 251 |
+
user_interaction = ""
|
| 252 |
+
if has_user_input:
|
| 253 |
+
user_interaction = f"\n6. IMPORTANT: The user contributed: '{last_user_message}'. Address this in your response."
|
| 254 |
+
|
| 255 |
+
repetition_check = ""
|
| 256 |
+
if character_previous_messages:
|
| 257 |
+
repetition_check = f"\n7. Avoid repeating these points you already made: {'; '.join(character_previous_messages[-2:])}"
|
| 258 |
+
|
| 259 |
+
prompt = f"{character['personality']}\n\n{lang_instruction[language]}\n\n{context_instruction}{user_interaction}{repetition_check}\n\n{conversation_context}\n\nNow respond as {character_name} with a thoughtful message that advances the discussion:"
|
| 260 |
+
|
| 261 |
+
try:
|
| 262 |
+
if character["model"] == "groq":
|
| 263 |
+
if not self.groq_client:
|
| 264 |
+
return f"Sorry, {character_name} is unavailable (Groq API not configured or module not installed)."
|
| 265 |
+
response = self.groq_client.chat.completions.create(
|
| 266 |
+
model=character["model_name"],
|
| 267 |
+
messages=[{"role": "user", "content": prompt}],
|
| 268 |
+
max_tokens=100,
|
| 269 |
+
temperature=0.8
|
| 270 |
+
)
|
| 271 |
+
return response.choices[0].message.content
|
| 272 |
+
|
| 273 |
+
elif character["model"] in ["gemini2", "gemini1.5"]:
|
| 274 |
+
if not self.gemini_client:
|
| 275 |
+
return f"Sorry, {character_name} is unavailable (Google API key not configured or OpenAI module not installed)."
|
| 276 |
+
|
| 277 |
+
response = self.gemini_client.chat.completions.create(
|
| 278 |
+
model=character["model_name"],
|
| 279 |
+
messages=[{"role": "user", "content": prompt}],
|
| 280 |
+
max_tokens=100,
|
| 281 |
+
temperature=0.8
|
| 282 |
+
)
|
| 283 |
+
return response.choices[0].message.content
|
| 284 |
+
|
| 285 |
+
else:
|
| 286 |
+
return f"Unknown character model: {character['model']}"
|
| 287 |
+
|
| 288 |
+
except Exception as e:
|
| 289 |
+
error_msg = str(e)[:100] if str(e) else "Unknown error"
|
| 290 |
+
return f"[{character_name}] API Error: {error_msg}..."
|
| 291 |
+
|
| 292 |
+
def create_interface():
|
| 293 |
+
ai_friends = AIFriendsTalk()
|
| 294 |
+
|
| 295 |
+
# Enhanced CSS with footer
|
| 296 |
+
css = """
|
| 297 |
+
.gradio-container {
|
| 298 |
+
max-width: 1200px;
|
| 299 |
+
margin: 0 auto;
|
| 300 |
+
}
|
| 301 |
+
|
| 302 |
+
.message-alex {
|
| 303 |
+
background: rgba(255, 107, 107, 0.1);
|
| 304 |
+
border-left: 4px solid #FF6B6B;
|
| 305 |
+
padding: 10px;
|
| 306 |
+
margin: 8px 0;
|
| 307 |
+
border-radius: 5px;
|
| 308 |
+
}
|
| 309 |
+
|
| 310 |
+
.message-blake {
|
| 311 |
+
background: rgba(78, 205, 196, 0.1);
|
| 312 |
+
border-left: 4px solid #4ECDC4;
|
| 313 |
+
padding: 10px;
|
| 314 |
+
margin: 8px 0;
|
| 315 |
+
border-radius: 5px;
|
| 316 |
+
}
|
| 317 |
+
|
| 318 |
+
.message-charlie {
|
| 319 |
+
background: rgba(69, 183, 209, 0.1);
|
| 320 |
+
border-left: 4px solid #45B7D1;
|
| 321 |
+
padding: 10px;
|
| 322 |
+
margin: 8px 0;
|
| 323 |
+
border-radius: 5px;
|
| 324 |
+
}
|
| 325 |
+
|
| 326 |
+
.message-user {
|
| 327 |
+
background: rgba(158, 158, 158, 0.1);
|
| 328 |
+
border-left: 4px solid #9E9E9E;
|
| 329 |
+
padding: 10px;
|
| 330 |
+
margin: 8px 0;
|
| 331 |
+
border-radius: 5px;
|
| 332 |
+
}
|
| 333 |
+
|
| 334 |
+
/* Hide Gradio footer */
|
| 335 |
+
.footer {
|
| 336 |
+
display: none !important;
|
| 337 |
+
}
|
| 338 |
+
|
| 339 |
+
/* Custom footer to cover Gradio attribution */
|
| 340 |
+
.custom-footer {
|
| 341 |
+
position: fixed;
|
| 342 |
+
bottom: 0;
|
| 343 |
+
left: 0;
|
| 344 |
+
right: 0;
|
| 345 |
+
background: linear-gradient(135deg, #4A90E2 0%, #2E86AB 70%, #FF8A65 85%, #FF6B9D 100%);
|
| 346 |
+
color: white;
|
| 347 |
+
padding: 15px;
|
| 348 |
+
text-align: center;
|
| 349 |
+
font-weight: bold;
|
| 350 |
+
z-index: 1000;
|
| 351 |
+
box-shadow: 0 -2px 10px rgba(0,0,0,0.1);
|
| 352 |
+
}
|
| 353 |
+
|
| 354 |
+
/* Add padding to body to account for fixed footer */
|
| 355 |
+
body {
|
| 356 |
+
padding-bottom: 60px;
|
| 357 |
+
}
|
| 358 |
+
|
| 359 |
+
/* Style for conversation controls */
|
| 360 |
+
.conversation-controls {
|
| 361 |
+
margin: 15px 0;
|
| 362 |
+
padding: 10px;
|
| 363 |
+
background: rgba(74, 144, 226, 0.1);
|
| 364 |
+
border-radius: 10px;
|
| 365 |
+
border: 1px solid rgba(74, 144, 226, 0.2);
|
| 366 |
+
}
|
| 367 |
+
"""
|
| 368 |
+
|
| 369 |
+
# Global conversation state
|
| 370 |
+
conversation_history = []
|
| 371 |
+
current_topic = ""
|
| 372 |
+
|
| 373 |
+
def update_topics(language):
|
| 374 |
+
return gr.Dropdown(choices=ai_friends.topics[language], value=ai_friends.topics[language][0])
|
| 375 |
+
|
| 376 |
+
def start_conversation(selected_topic, custom_topic):
|
| 377 |
+
nonlocal conversation_history, current_topic
|
| 378 |
+
topic = custom_topic.strip() if custom_topic.strip() else selected_topic
|
| 379 |
+
if not topic:
|
| 380 |
+
return "Please select or enter a topic!", "", conversation_history
|
| 381 |
+
|
| 382 |
+
current_topic = topic
|
| 383 |
+
conversation_history = []
|
| 384 |
+
return f"**Topic:** {topic}", format_conversation(conversation_history), conversation_history
|
| 385 |
+
|
| 386 |
+
def continue_conversation(language):
|
| 387 |
+
nonlocal conversation_history, current_topic
|
| 388 |
+
if not current_topic:
|
| 389 |
+
return format_conversation(conversation_history), conversation_history
|
| 390 |
+
|
| 391 |
+
character_order = ["Alex", "Blake", "Charlie"]
|
| 392 |
+
next_speaker = character_order[len(conversation_history) % 3]
|
| 393 |
+
|
| 394 |
+
response = ai_friends.get_ai_response(next_speaker, conversation_history, current_topic, language)
|
| 395 |
+
conversation_history.append([next_speaker, response])
|
| 396 |
+
|
| 397 |
+
return format_conversation(conversation_history), conversation_history
|
| 398 |
+
|
| 399 |
+
def add_user_message(message, language):
|
| 400 |
+
nonlocal conversation_history
|
| 401 |
+
if not message.strip():
|
| 402 |
+
return format_conversation(conversation_history), conversation_history, ""
|
| 403 |
+
|
| 404 |
+
user_names = {"en": "You", "vi": "Bạn", "de": "Du"}
|
| 405 |
+
user_name = user_names.get(language, "You")
|
| 406 |
+
|
| 407 |
+
conversation_history.append([user_name, message.strip()])
|
| 408 |
+
return format_conversation(conversation_history), conversation_history, ""
|
| 409 |
+
|
| 410 |
+
def clear_conversation():
|
| 411 |
+
nonlocal conversation_history, current_topic
|
| 412 |
+
conversation_history = []
|
| 413 |
+
current_topic = ""
|
| 414 |
+
return "", format_conversation(conversation_history), conversation_history
|
| 415 |
+
|
| 416 |
+
def format_conversation(conversation):
|
| 417 |
+
if not conversation:
|
| 418 |
+
return "Select a topic and start the conversation to see AI friends debate!"
|
| 419 |
+
|
| 420 |
+
html = ""
|
| 421 |
+
for speaker, message in conversation:
|
| 422 |
+
if speaker == "Alex":
|
| 423 |
+
html += f"<div class='message-alex'><strong>🎭 Alex:</strong> {message}</div>"
|
| 424 |
+
elif speaker == "Blake":
|
| 425 |
+
html += f"<div class='message-blake'><strong>🌟 Blake:</strong> {message}</div>"
|
| 426 |
+
elif speaker == "Charlie":
|
| 427 |
+
html += f"<div class='message-charlie'><strong>🧠 Charlie:</strong> {message}</div>"
|
| 428 |
+
else:
|
| 429 |
+
html += f"<div class='message-user'><strong>👤 {speaker}:</strong> {message}</div>"
|
| 430 |
+
return html
|
| 431 |
+
|
| 432 |
+
with gr.Blocks(css=css, title="AI Friends Talk") as interface:
|
| 433 |
+
gr.HTML("""
|
| 434 |
+
<div style="text-align: center; background: linear-gradient(135deg, #4A90E2 0%, #FF6B9D 100%); color: white; padding: 20px; border-radius: 10px; margin-bottom: 20px;">
|
| 435 |
+
<h1>🤖 AI Friends Talk</h1>
|
| 436 |
+
<p>Watch AI friends debate fun topics!</p>
|
| 437 |
+
<div style="margin-top: 10px;">🧠 <strong>Digitized Brains</strong></div>
|
| 438 |
+
</div>
|
| 439 |
+
""")
|
| 440 |
+
|
| 441 |
+
gr.HTML("""
|
| 442 |
+
<div style="display: flex; justify-content: center; gap: 15px; margin: 20px 0;">
|
| 443 |
+
<div style="background: linear-gradient(135deg, #FF6B6B 0%, #FF8E53 100%); padding: 15px; border-radius: 10px; color: white; text-align: center;">
|
| 444 |
+
<h4>🎭 Alex</h4>
|
| 445 |
+
<p style="margin: 0; font-size: 12px;">Witty debater (Groq AI)</p>
|
| 446 |
+
</div>
|
| 447 |
+
<div style="background: linear-gradient(135deg, #4ECDC4 0%, #44A08D 100%); padding: 15px; border-radius: 10px; color: white; text-align: center;">
|
| 448 |
+
<h4>🌟 Blake</h4>
|
| 449 |
+
<p style="margin: 0; font-size: 12px;">Creative optimist (Gemini 2.0)</p>
|
| 450 |
+
</div>
|
| 451 |
+
<div style="background: linear-gradient(135deg, #45B7D1 0%, #96C93D 100%); padding: 15px; border-radius: 10px; color: white; text-align: center;">
|
| 452 |
+
<h4>🧠 Charlie</h4>
|
| 453 |
+
<p style="margin: 0; font-size: 12px;">Logical analyst (Gemini 1.5)</p>
|
| 454 |
+
</div>
|
| 455 |
+
</div>
|
| 456 |
+
""")
|
| 457 |
+
|
| 458 |
+
with gr.Row():
|
| 459 |
+
language = gr.Dropdown(
|
| 460 |
+
choices=[("🇺🇸 English", "en"), ("🇻🇳 Tiếng Việt", "vi"), ("🇩🇪 Deutsch", "de")],
|
| 461 |
+
value="en",
|
| 462 |
+
label="Language"
|
| 463 |
+
)
|
| 464 |
+
|
| 465 |
+
gr.Markdown("### Topic Selection")
|
| 466 |
+
with gr.Row():
|
| 467 |
+
topic_dropdown = gr.Dropdown(
|
| 468 |
+
choices=ai_friends.topics["en"],
|
| 469 |
+
value=ai_friends.topics["en"][0],
|
| 470 |
+
label="Predefined Topics",
|
| 471 |
+
scale=2
|
| 472 |
+
)
|
| 473 |
+
custom_topic = gr.Textbox(
|
| 474 |
+
placeholder="Enter your custom topic here...",
|
| 475 |
+
label="Custom Topic",
|
| 476 |
+
scale=2
|
| 477 |
+
)
|
| 478 |
+
|
| 479 |
+
start_btn = gr.Button("🎬 Start Conversation", variant="primary", size="lg")
|
| 480 |
+
|
| 481 |
+
topic_display = gr.Markdown("")
|
| 482 |
+
|
| 483 |
+
gr.Markdown("### Conversation")
|
| 484 |
+
conversation_display = gr.HTML("")
|
| 485 |
+
|
| 486 |
+
# Conversation controls moved below conversation
|
| 487 |
+
gr.HTML("<div class='conversation-controls'>")
|
| 488 |
+
with gr.Row():
|
| 489 |
+
continue_btn = gr.Button("▶️ Continue", variant="secondary", scale=1)
|
| 490 |
+
clear_btn = gr.Button("🔄 Clear", variant="stop", scale=1)
|
| 491 |
+
gr.HTML("</div>")
|
| 492 |
+
|
| 493 |
+
gr.Markdown("### Add Your Message")
|
| 494 |
+
with gr.Row():
|
| 495 |
+
user_message = gr.Textbox(
|
| 496 |
+
placeholder="Type your message to join the conversation...",
|
| 497 |
+
label="Your Message",
|
| 498 |
+
scale=4
|
| 499 |
+
)
|
| 500 |
+
send_btn = gr.Button("📤 Send", scale=1)
|
| 501 |
+
|
| 502 |
+
# Hidden state
|
| 503 |
+
conversation_state = gr.State([])
|
| 504 |
+
|
| 505 |
+
# Event handlers
|
| 506 |
+
language.change(
|
| 507 |
+
update_topics,
|
| 508 |
+
inputs=[language],
|
| 509 |
+
outputs=[topic_dropdown]
|
| 510 |
+
)
|
| 511 |
+
|
| 512 |
+
start_btn.click(
|
| 513 |
+
start_conversation,
|
| 514 |
+
inputs=[topic_dropdown, custom_topic],
|
| 515 |
+
outputs=[topic_display, conversation_display, conversation_state]
|
| 516 |
+
)
|
| 517 |
+
|
| 518 |
+
continue_btn.click(
|
| 519 |
+
continue_conversation,
|
| 520 |
+
inputs=[language],
|
| 521 |
+
outputs=[conversation_display, conversation_state]
|
| 522 |
+
)
|
| 523 |
+
|
| 524 |
+
send_btn.click(
|
| 525 |
+
add_user_message,
|
| 526 |
+
inputs=[user_message, language],
|
| 527 |
+
outputs=[conversation_display, conversation_state, user_message]
|
| 528 |
+
)
|
| 529 |
+
|
| 530 |
+
clear_btn.click(
|
| 531 |
+
clear_conversation,
|
| 532 |
+
outputs=[topic_display, conversation_display, conversation_state]
|
| 533 |
+
)
|
| 534 |
+
|
| 535 |
+
# Custom footer to cover Gradio attribution
|
| 536 |
+
gr.HTML("""
|
| 537 |
+
<div class="custom-footer">
|
| 538 |
+
<div style="display: flex; justify-content: center; align-items: center; gap: 15px;">
|
| 539 |
+
<div style="display: flex; align-items: center; gap: 8px;">
|
| 540 |
+
<div style="background: rgba(255,255,255,0.2); padding: 8px 15px; border-radius: 20px; font-size: 16px;">
|
| 541 |
+
🧠 DB
|
| 542 |
+
</div>
|
| 543 |
+
<span style="font-size: 18px; font-weight: bold;">Digitized Brains</span>
|
| 544 |
+
</div>
|
| 545 |
+
<div style="font-size: 14px; opacity: 0.9;">
|
| 546 |
+
AI Friends Talk - Powered by Advanced AI Models
|
| 547 |
+
</div>
|
| 548 |
+
</div>
|
| 549 |
+
</div>
|
| 550 |
+
""")
|
| 551 |
+
|
| 552 |
+
return interface
|
| 553 |
+
|
| 554 |
+
if __name__ == "__main__":
|
| 555 |
+
try:
|
| 556 |
+
print("AI Friends Talk - Starting Gradio Interface...")
|
| 557 |
+
print("Checking dependencies...")
|
| 558 |
+
|
| 559 |
+
if not gr:
|
| 560 |
+
print("Error: Gradio is not installed. Please run: pip install gradio")
|
| 561 |
+
exit(1)
|
| 562 |
+
|
| 563 |
+
print("Creating interface...")
|
| 564 |
+
interface = create_interface()
|
| 565 |
+
|
| 566 |
+
print("Launching interface...")
|
| 567 |
+
print("Interface will be available at: http://localhost:7870")
|
| 568 |
+
print("Share link will be generated if available...")
|
| 569 |
+
print("Press Ctrl+C to stop the server")
|
| 570 |
+
|
| 571 |
+
interface.launch(
|
| 572 |
+
share=False,
|
| 573 |
+
debug=True,
|
| 574 |
+
server_port=7860,
|
| 575 |
+
server_name="0.0.0.0",
|
| 576 |
+
show_error=True,
|
| 577 |
+
quiet=False
|
| 578 |
+
)
|
| 579 |
+
|
| 580 |
+
except KeyboardInterrupt:
|
| 581 |
+
print("\nServer stopped by user")
|
| 582 |
+
except Exception as e:
|
| 583 |
+
print(f"Error starting interface: {str(e)}")
|
| 584 |
+
print("Please check your dependencies and try again")
|
| 585 |
+
import traceback
|
| 586 |
+
traceback.print_exc()
|
AI_Talk_Original.py
ADDED
|
@@ -0,0 +1,441 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import streamlit as st
|
| 2 |
+
import os
|
| 3 |
+
from dotenv import load_dotenv
|
| 4 |
+
from openai import OpenAI
|
| 5 |
+
import anthropic
|
| 6 |
+
from groq import Groq
|
| 7 |
+
import time
|
| 8 |
+
import json
|
| 9 |
+
|
| 10 |
+
# Load environment variables
|
| 11 |
+
load_dotenv()
|
| 12 |
+
|
| 13 |
+
class AIFriendsTalk:
|
| 14 |
+
def __init__(self):
|
| 15 |
+
self.setup_apis()
|
| 16 |
+
self.setup_characters()
|
| 17 |
+
self.setup_topics()
|
| 18 |
+
|
| 19 |
+
def setup_apis(self):
|
| 20 |
+
"""Setup API clients"""
|
| 21 |
+
self.openai_client = OpenAI()
|
| 22 |
+
self.claude_client = anthropic.Anthropic()
|
| 23 |
+
self.groq_client = Groq()
|
| 24 |
+
|
| 25 |
+
# Gemini via OpenAI client
|
| 26 |
+
google_api_key = os.getenv('GOOGLE_API_KEY')
|
| 27 |
+
if google_api_key:
|
| 28 |
+
self.gemini_client = OpenAI(
|
| 29 |
+
api_key=google_api_key,
|
| 30 |
+
base_url="https://generativelanguage.googleapis.com/v1beta/openai/"
|
| 31 |
+
)
|
| 32 |
+
|
| 33 |
+
def setup_characters(self):
|
| 34 |
+
"""Define AI characters with enhanced personalities"""
|
| 35 |
+
self.characters = {
|
| 36 |
+
"Alex": {
|
| 37 |
+
"model": "groq",
|
| 38 |
+
"model_name": "llama3-70b-8192",
|
| 39 |
+
"personality": "You are Alex, a witty and charismatic AI debater who thrives on intellectual challenges. You have a sharp sense of humor and love to play devil's advocate, always finding creative angles to explore any topic. You speak with confidence and enthusiasm, using vivid examples and thought-provoking questions. You pay attention to what others say and build upon their ideas while adding your own unique perspective. Respond with longer, more engaging messages that spark curiosity and debate.",
|
| 40 |
+
"color": "#FF6B6B"
|
| 41 |
+
},
|
| 42 |
+
"Blake": {
|
| 43 |
+
"model": "gemini2",
|
| 44 |
+
"model_name": "gemini-2.0-flash",
|
| 45 |
+
"personality": "You are Blake, an imaginative and boundlessly optimistic AI who sees magic and possibility in everything. You're a natural storyteller who loves to paint vivid pictures with words, using beautiful metaphors and creative analogies. You find connections between seemingly unrelated concepts and always look for the wonder and beauty in any situation. You listen carefully to others and weave their ideas into your colorful responses. Write longer, more poetic responses that inspire and delight.",
|
| 46 |
+
"color": "#4ECDC4"
|
| 47 |
+
},
|
| 48 |
+
"Charlie": {
|
| 49 |
+
"model": "gemini1.5",
|
| 50 |
+
"model_name": "gemini-1.5-flash",
|
| 51 |
+
"personality": "You are Charlie, a thoughtful and systematic AI analyst who approaches every topic with scientific curiosity and methodical thinking. You love to break down complex ideas, examine evidence, and find logical patterns. While you're analytical by nature, you also appreciate different viewpoints and try to find common ground between opposing perspectives. You actively listen to others and incorporate their insights into your well-reasoned responses. Provide detailed, structured responses that explore multiple dimensions of any topic.",
|
| 52 |
+
"color": "#45B7D1"
|
| 53 |
+
}
|
| 54 |
+
}
|
| 55 |
+
|
| 56 |
+
def setup_topics(self):
|
| 57 |
+
"""Define fun conversation topics"""
|
| 58 |
+
self.topics = {
|
| 59 |
+
"en": [
|
| 60 |
+
"If animals could use smartphones, which app would be most popular?",
|
| 61 |
+
"What would happen if gravity worked backwards for one day?",
|
| 62 |
+
"Should pineapple on pizza be considered a crime?",
|
| 63 |
+
"If you could add a 13th month to the year, what would you name it?",
|
| 64 |
+
"What's the most useless superpower you can think of?",
|
| 65 |
+
"If colors had personalities, what would each color be like?",
|
| 66 |
+
"Should robots have to pay taxes?",
|
| 67 |
+
"What would the world be like if everyone could read minds?",
|
| 68 |
+
"If you could make one rule that everyone had to follow, what would it be?",
|
| 69 |
+
"What's the weirdest food combination that actually tastes good?",
|
| 70 |
+
"If you could live inside any video game, which would you choose and why?",
|
| 71 |
+
"What would happen if all cats suddenly learned how to speak human language?",
|
| 72 |
+
"Should there be a maximum limit on how many selfies you can take per day?",
|
| 73 |
+
"If you could give any animal the ability to fly, which would be the funniest?",
|
| 74 |
+
"What's the most ridiculous thing humans do that aliens would find confusing?",
|
| 75 |
+
"If social media existed in medieval times, what would people post about?",
|
| 76 |
+
"Should there be professional competitions for everyday activities like making beds?",
|
| 77 |
+
"What would change if humans hibernated for 3 months every year?",
|
| 78 |
+
"If you could replace one everyday sound with any other sound, what would it be?",
|
| 79 |
+
"What's the most absurd job that could exist in the future?"
|
| 80 |
+
],
|
| 81 |
+
"vi": [
|
| 82 |
+
"Nếu động vật có thể sử dụng smartphone, ứng dụng nào sẽ phổ biến nhất?",
|
| 83 |
+
"Điều gì sẽ xảy ra nếu trọng lực hoạt động ngược lại trong một ngày?",
|
| 84 |
+
"Có nên coi dứa trên pizza là tội phạm không?",
|
| 85 |
+
"Nếu bạn có thể thêm tháng thứ 13 vào năm, bạn sẽ đặt tên gì?",
|
| 86 |
+
"Siêu năng lực vô dụng nhất mà bạn có thể nghĩ ra là gì?",
|
| 87 |
+
"Nếu màu sắc có tính cách, mỗi màu sẽ như thế nào?",
|
| 88 |
+
"Robot có nên phải trả thuế không?",
|
| 89 |
+
"Thế giới sẽ như thế nào nếu mọi người đều có thể đọc suy nghĩ?",
|
| 90 |
+
"Nếu bạn có thể đặt ra một quy tắc mà mọi người phải tuân theo, đó sẽ là gì?",
|
| 91 |
+
"Sự kết hợp thực phẩm kỳ lạ nhất mà thực sự ngon là gì?",
|
| 92 |
+
"Nếu bạn có thể sống trong bất kỳ trò chơi điện tử nào, bạn sẽ chọn cái nào và tại sao?",
|
| 93 |
+
"Điều gì sẽ xảy ra nếu tất cả mèo đột nhiên học được cách nói tiếng người?",
|
| 94 |
+
"Có nên có giới hạn tối đa về số lần selfie bạn có thể chụp mỗi ngày không?",
|
| 95 |
+
"Nếu bạn có thể cho bất kỳ động vật nào khả năng bay, con nào sẽ hài hước nhất?",
|
| 96 |
+
"Điều kỳ lạ nhất mà con người làm khiến người ngoài hành tinh cảm thấy khó hiểu là gì?",
|
| 97 |
+
"Nếu mạng xã hội tồn tại thời trung cổ, mọi người sẽ đăng gì?",
|
| 98 |
+
"Có nên có các cuộc thi chuyên nghiệp cho các hoạt động hàng ngày như dọn giường không?",
|
| 99 |
+
"Điều gì sẽ thay đổi nếu con người ngủ đông 3 tháng mỗi năm?",
|
| 100 |
+
"Nếu bạn có thể thay thế một âm thanh hàng ngày bằng âm thanh khác, đó sẽ là gì?",
|
| 101 |
+
"Công việc vô lý nhất có thể tồn tại trong tương lai là gì?"
|
| 102 |
+
],
|
| 103 |
+
"de": [
|
| 104 |
+
"Wenn Tiere Smartphones benutzen könnten, welche App wäre am beliebtesten?",
|
| 105 |
+
"Was würde passieren, wenn die Schwerkraft einen Tag lang rückwärts wirken würde?",
|
| 106 |
+
"Sollte Ananas auf Pizza als Verbrechen betrachtet werden?",
|
| 107 |
+
"Wenn Sie einen 13. Monat zum Jahr hinzufügen könnten, wie würden Sie ihn nennen?",
|
| 108 |
+
"Was ist die nutzloseste Superkraft, die Sie sich vorstellen können?",
|
| 109 |
+
"Wenn Farben Persönlichkeiten hätten, wie wäre jede Farbe?",
|
| 110 |
+
"Sollten Roboter Steuern zahlen müssen?",
|
| 111 |
+
"Wie wäre die Welt, wenn jeder Gedanken lesen könnte?",
|
| 112 |
+
"Wenn Sie eine Regel aufstellen könnten, die jeder befolgen müsste, was wäre das?",
|
| 113 |
+
"Was ist die seltsamste Lebensmittelkombination, die tatsächlich gut schmeckt?",
|
| 114 |
+
"Wenn Sie in einem beliebigen Videospiel leben könnten, welches würden Sie wählen und warum?",
|
| 115 |
+
"Was würde passieren, wenn alle Katzen plötzlich die menschliche Sprache lernen würden?",
|
| 116 |
+
"Sollte es ein maximales Limit für Selfies geben, die man pro Tag machen kann?",
|
| 117 |
+
"Wenn Sie einem Tier die Fähigkeit zu fliegen geben könnten, welches wäre am lustigsten?",
|
| 118 |
+
"Was ist das Absurdeste, was Menschen tun und Außerirdische verwirrend finden würden?",
|
| 119 |
+
"Wenn soziale Medien im Mittelalter existiert hätten, worüber hätten die Leute gepostet?",
|
| 120 |
+
"Sollte es professionelle Wettbewerbe für alltägliche Aktivitäten wie Bettenmachen geben?",
|
| 121 |
+
"Was würde sich ändern, wenn Menschen 3 Monate im Jahr Winterschlaf halten würden?",
|
| 122 |
+
"Wenn Sie ein alltägliches Geräusch durch ein anderes ersetzen könnten, was wäre das?",
|
| 123 |
+
"Was ist der absurdeste Job, der in der Zukunft existieren könnte?"
|
| 124 |
+
]
|
| 125 |
+
}
|
| 126 |
+
|
| 127 |
+
def get_ai_response(self, character_name, conversation_history, current_topic, language):
|
| 128 |
+
"""Get response from specific AI character with enhanced interaction"""
|
| 129 |
+
character = self.characters[character_name]
|
| 130 |
+
|
| 131 |
+
# Check for user messages to enable interaction
|
| 132 |
+
user_messages = [msg for msg in conversation_history if msg['character'] in ['You', 'Bạn', 'Du']]
|
| 133 |
+
has_user_input = len(user_messages) > 0
|
| 134 |
+
last_user_message = user_messages[-1]['message'] if user_messages else ''
|
| 135 |
+
|
| 136 |
+
# Prepare conversation context
|
| 137 |
+
full_conversation = f"Topic: {current_topic}\n\nConversation so far:\n"
|
| 138 |
+
for msg in conversation_history:
|
| 139 |
+
full_conversation += f"{msg['character']}: {msg['message']}\n"
|
| 140 |
+
|
| 141 |
+
# Add language instruction
|
| 142 |
+
lang_instruction = {
|
| 143 |
+
"en": "Please respond in English with a longer, more engaging message.",
|
| 144 |
+
"vi": "Vui lòng trả lời bằng tiếng Việt với tin nhắn dài và hấp dẫn hơn.",
|
| 145 |
+
"de": "Bitte antworten Sie auf Deutsch mit einer längeren, ansprechenderen Nachricht."
|
| 146 |
+
}
|
| 147 |
+
|
| 148 |
+
# Add user interaction instruction if applicable
|
| 149 |
+
user_interaction = ""
|
| 150 |
+
if has_user_input:
|
| 151 |
+
user_interaction = f"\nIMPORTANT: The user has contributed to this conversation. Their last message was: '{last_user_message}'. Please acknowledge and respond to their input as part of your response."
|
| 152 |
+
|
| 153 |
+
prompt = f"{character['personality']}\n\n{lang_instruction[language]}{user_interaction}\n\n{full_conversation}\n\nNow respond as {character_name} with a thoughtful, engaging message:"
|
| 154 |
+
|
| 155 |
+
try:
|
| 156 |
+
if character["model"] == "groq":
|
| 157 |
+
response = self.groq_client.chat.completions.create(
|
| 158 |
+
model=character["model_name"],
|
| 159 |
+
messages=[{"role": "user", "content": prompt}],
|
| 160 |
+
max_tokens=300,
|
| 161 |
+
temperature=0.8
|
| 162 |
+
)
|
| 163 |
+
return response.choices[0].message.content
|
| 164 |
+
|
| 165 |
+
elif character["model"] == "gemini2":
|
| 166 |
+
response = self.gemini_client.chat.completions.create(
|
| 167 |
+
model=character["model_name"],
|
| 168 |
+
messages=[{"role": "user", "content": prompt}],
|
| 169 |
+
max_tokens=300,
|
| 170 |
+
temperature=0.8
|
| 171 |
+
)
|
| 172 |
+
return response.choices[0].message.content
|
| 173 |
+
|
| 174 |
+
elif character["model"] == "gemini1.5":
|
| 175 |
+
response = self.gemini_client.chat.completions.create(
|
| 176 |
+
model=character["model_name"],
|
| 177 |
+
messages=[{"role": "user", "content": prompt}],
|
| 178 |
+
max_tokens=300,
|
| 179 |
+
temperature=0.8
|
| 180 |
+
)
|
| 181 |
+
return response.choices[0].message.content
|
| 182 |
+
|
| 183 |
+
except Exception as e:
|
| 184 |
+
return f"Sorry, I'm having trouble responding right now. ({str(e)[:50]}...)"
|
| 185 |
+
|
| 186 |
+
def main():
|
| 187 |
+
st.set_page_config(
|
| 188 |
+
page_title="AI Friends Talk",
|
| 189 |
+
page_icon="🤖",
|
| 190 |
+
layout="wide"
|
| 191 |
+
)
|
| 192 |
+
|
| 193 |
+
# Custom CSS
|
| 194 |
+
st.markdown("""
|
| 195 |
+
<style>
|
| 196 |
+
.top-banner {
|
| 197 |
+
background: linear-gradient(135deg, #4A90E2 0%, #2E86AB 70%, #FF8A65 85%, #FF6B9D 100%);
|
| 198 |
+
padding: 20px;
|
| 199 |
+
border-radius: 10px;
|
| 200 |
+
margin-bottom: 15px;
|
| 201 |
+
display: flex;
|
| 202 |
+
justify-content: space-between;
|
| 203 |
+
align-items: center;
|
| 204 |
+
}
|
| 205 |
+
.bottom-banner {
|
| 206 |
+
background: linear-gradient(135deg, #4A90E2 0%, #2E86AB 70%, #FF8A65 85%, #FF6B9D 100%);
|
| 207 |
+
padding: 15px;
|
| 208 |
+
border-radius: 10px;
|
| 209 |
+
margin-top: 15px;
|
| 210 |
+
text-align: center;
|
| 211 |
+
color: white;
|
| 212 |
+
}
|
| 213 |
+
.logo {
|
| 214 |
+
background: linear-gradient(135deg, #FF8A65 0%, #FF6B9D 100%);
|
| 215 |
+
padding: 8px 15px;
|
| 216 |
+
border-radius: 20px;
|
| 217 |
+
font-weight: bold;
|
| 218 |
+
font-size: 18px;
|
| 219 |
+
color: white;
|
| 220 |
+
}
|
| 221 |
+
.language-selector {
|
| 222 |
+
color: white;
|
| 223 |
+
}
|
| 224 |
+
.chat-message {
|
| 225 |
+
padding: 12px;
|
| 226 |
+
margin: 8px 0;
|
| 227 |
+
border-radius: 10px;
|
| 228 |
+
border-left: 4px solid;
|
| 229 |
+
font-size: 14px;
|
| 230 |
+
}
|
| 231 |
+
.alex-message {
|
| 232 |
+
background-color: #FFE5E5;
|
| 233 |
+
border-left-color: #FF6B6B;
|
| 234 |
+
}
|
| 235 |
+
.blake-message {
|
| 236 |
+
background-color: #E0F7F5;
|
| 237 |
+
border-left-color: #4ECDC4;
|
| 238 |
+
}
|
| 239 |
+
.charlie-message {
|
| 240 |
+
background-color: #E3F2FD;
|
| 241 |
+
border-left-color: #45B7D1;
|
| 242 |
+
}
|
| 243 |
+
.user-message {
|
| 244 |
+
background-color: #F5F5F5;
|
| 245 |
+
border-left-color: #9E9E9E;
|
| 246 |
+
}
|
| 247 |
+
.small-text {
|
| 248 |
+
font-size: 12px;
|
| 249 |
+
}
|
| 250 |
+
.compact-section {
|
| 251 |
+
padding: 10px;
|
| 252 |
+
margin: 5px 0;
|
| 253 |
+
}
|
| 254 |
+
</style>
|
| 255 |
+
""", unsafe_allow_html=True)
|
| 256 |
+
|
| 257 |
+
# Initialize session state
|
| 258 |
+
if 'conversation' not in st.session_state:
|
| 259 |
+
st.session_state.conversation = []
|
| 260 |
+
if 'current_topic' not in st.session_state:
|
| 261 |
+
st.session_state.current_topic = ""
|
| 262 |
+
if 'is_talking' not in st.session_state:
|
| 263 |
+
st.session_state.is_talking = False
|
| 264 |
+
if 'language' not in st.session_state:
|
| 265 |
+
st.session_state.language = "en"
|
| 266 |
+
|
| 267 |
+
# Initialize AI Friends Talk
|
| 268 |
+
if 'ai_friends' not in st.session_state:
|
| 269 |
+
st.session_state.ai_friends = AIFriendsTalk()
|
| 270 |
+
|
| 271 |
+
# Language options
|
| 272 |
+
languages = {
|
| 273 |
+
"en": {"name": "English", "title": "AI Friends Talk", "subtitle": "Watch AI friends debate fun topics!"},
|
| 274 |
+
"vi": {"name": "Tiếng Việt", "title": "AI Friends Talk", "subtitle": "Xem các AI bạn tranh luận về những chủ đề vui vẻ!"},
|
| 275 |
+
"de": {"name": "Deutsch", "title": "AI Friends Talk", "subtitle": "Schaue zu, wie AI-Freunde über lustige Themen diskutieren!"}
|
| 276 |
+
}
|
| 277 |
+
|
| 278 |
+
# Top Banner
|
| 279 |
+
st.markdown(f"""
|
| 280 |
+
<div class="top-banner">
|
| 281 |
+
<div>
|
| 282 |
+
<div class="logo">🧠 DB</div>
|
| 283 |
+
</div>
|
| 284 |
+
<div style="text-align: center; flex: 1;">
|
| 285 |
+
<h1 style="color: white; margin: 0;">{languages[st.session_state.language]["title"]}</h1>
|
| 286 |
+
<p style="color: white; margin: 5px 0;">{languages[st.session_state.language]["subtitle"]}</p>
|
| 287 |
+
</div>
|
| 288 |
+
<div class="language-selector">
|
| 289 |
+
<select onchange="window.location.reload()" style="background: transparent; color: white; border: 1px solid white; padding: 5px; border-radius: 5px;">
|
| 290 |
+
<option value="en" {'selected' if st.session_state.language == 'en' else ''}>English</option>
|
| 291 |
+
<option value="vi" {'selected' if st.session_state.language == 'vi' else ''}>Tiếng Việt</option>
|
| 292 |
+
<option value="de" {'selected' if st.session_state.language == 'de' else ''}>Deutsch</option>
|
| 293 |
+
</select>
|
| 294 |
+
</div>
|
| 295 |
+
</div>
|
| 296 |
+
""", unsafe_allow_html=True)
|
| 297 |
+
|
| 298 |
+
# Language selector
|
| 299 |
+
col1, col2, col3 = st.columns([1, 2, 1])
|
| 300 |
+
with col2:
|
| 301 |
+
selected_lang = st.selectbox(
|
| 302 |
+
"Choose Language / Chọn ngôn ngữ / Sprache wählen:",
|
| 303 |
+
options=["en", "vi", "de"],
|
| 304 |
+
format_func=lambda x: languages[x]["name"],
|
| 305 |
+
index=["en", "vi", "de"].index(st.session_state.language)
|
| 306 |
+
)
|
| 307 |
+
if selected_lang != st.session_state.language:
|
| 308 |
+
st.session_state.language = selected_lang
|
| 309 |
+
st.experimental_rerun()
|
| 310 |
+
|
| 311 |
+
# Topic selection
|
| 312 |
+
st.subheader("📝 Choose a Topic" if st.session_state.language == "en" else
|
| 313 |
+
"📝 Chọn chủ đề" if st.session_state.language == "vi" else
|
| 314 |
+
"📝 Wähle ein Thema")
|
| 315 |
+
|
| 316 |
+
selected_topic = st.selectbox(
|
| 317 |
+
"Select a fun topic for debate:",
|
| 318 |
+
options=st.session_state.ai_friends.topics[st.session_state.language],
|
| 319 |
+
index=0
|
| 320 |
+
)
|
| 321 |
+
|
| 322 |
+
# Custom topic input
|
| 323 |
+
custom_topic = st.text_input(
|
| 324 |
+
"Or enter your own topic:" if st.session_state.language == "en" else
|
| 325 |
+
"Hoặc nhập chủ đề của riêng bạn:" if st.session_state.language == "vi" else
|
| 326 |
+
"Oder geben Sie Ihr eigenes Thema ein:"
|
| 327 |
+
)
|
| 328 |
+
|
| 329 |
+
if custom_topic:
|
| 330 |
+
selected_topic = custom_topic
|
| 331 |
+
|
| 332 |
+
# Control buttons
|
| 333 |
+
col1, col2, col3 = st.columns(3)
|
| 334 |
+
|
| 335 |
+
with col1:
|
| 336 |
+
if st.button("🎬 Start Conversation" if st.session_state.language == "en" else
|
| 337 |
+
"🎬 Bắt đầu trò chuyện" if st.session_state.language == "vi" else
|
| 338 |
+
"🎬 Gespräch beginnen"):
|
| 339 |
+
st.session_state.current_topic = selected_topic
|
| 340 |
+
st.session_state.conversation = []
|
| 341 |
+
st.session_state.is_talking = True
|
| 342 |
+
|
| 343 |
+
with col2:
|
| 344 |
+
if st.button("⏸️ Pause" if st.session_state.language == "en" else
|
| 345 |
+
"⏸️ Tạm dừng" if st.session_state.language == "vi" else
|
| 346 |
+
"⏸️ Pausieren"):
|
| 347 |
+
st.session_state.is_talking = False
|
| 348 |
+
|
| 349 |
+
with col3:
|
| 350 |
+
if st.button("🔄 Clear Chat" if st.session_state.language == "en" else
|
| 351 |
+
"🔄 Xóa trò chuyện" if st.session_state.language == "vi" else
|
| 352 |
+
"🔄 Chat löschen"):
|
| 353 |
+
st.session_state.conversation = []
|
| 354 |
+
st.session_state.is_talking = False
|
| 355 |
+
|
| 356 |
+
# Manual input for users
|
| 357 |
+
st.subheader("💭 Add Your Message" if st.session_state.language == "en" else
|
| 358 |
+
"💭 Thêm tin nhắn của bạn" if st.session_state.language == "vi" else
|
| 359 |
+
"💭 Ihre Nachricht hinzufügen")
|
| 360 |
+
|
| 361 |
+
user_message = st.text_area(
|
| 362 |
+
"Type your message to join the conversation:",
|
| 363 |
+
height=100
|
| 364 |
+
)
|
| 365 |
+
|
| 366 |
+
if st.button("📤 Send Message" if st.session_state.language == "en" else
|
| 367 |
+
"📤 Gửi tin nhắn" if st.session_state.language == "vi" else
|
| 368 |
+
"📤 Nachricht senden") and user_message:
|
| 369 |
+
st.session_state.conversation.append({
|
| 370 |
+
"character": "You" if st.session_state.language == "en" else "Bạn" if st.session_state.language == "vi" else "Du",
|
| 371 |
+
"message": user_message,
|
| 372 |
+
"color": "#9E9E9E"
|
| 373 |
+
})
|
| 374 |
+
st.experimental_rerun()
|
| 375 |
+
|
| 376 |
+
# Display conversation
|
| 377 |
+
st.subheader("💬 Conversation" if st.session_state.language == "en" else
|
| 378 |
+
"💬 Cuộc trò chuyện" if st.session_state.language == "vi" else
|
| 379 |
+
"💬 Unterhaltung")
|
| 380 |
+
|
| 381 |
+
if st.session_state.current_topic:
|
| 382 |
+
st.info(f"**Topic:** {st.session_state.current_topic}")
|
| 383 |
+
|
| 384 |
+
# Conversation display area
|
| 385 |
+
conversation_container = st.container()
|
| 386 |
+
|
| 387 |
+
with conversation_container:
|
| 388 |
+
for msg in st.session_state.conversation:
|
| 389 |
+
character = msg["character"]
|
| 390 |
+
message = msg["message"]
|
| 391 |
+
|
| 392 |
+
if character == "Alex":
|
| 393 |
+
st.markdown(f'<div class="chat-message alex-message"><strong>Alex:</strong> {message}</div>', unsafe_allow_html=True)
|
| 394 |
+
elif character == "Blake":
|
| 395 |
+
st.markdown(f'<div class="chat-message blake-message"><strong>Blake:</strong> {message}</div>', unsafe_allow_html=True)
|
| 396 |
+
elif character == "Charlie":
|
| 397 |
+
st.markdown(f'<div class="chat-message charlie-message"><strong>Charlie:</strong> {message}</div>', unsafe_allow_html=True)
|
| 398 |
+
else:
|
| 399 |
+
st.markdown(f'<div class="chat-message" style="background-color: #F5F5F5; border-left-color: #9E9E9E;"><strong>{character}:</strong> {message}</div>', unsafe_allow_html=True)
|
| 400 |
+
|
| 401 |
+
# Auto conversation logic
|
| 402 |
+
if st.session_state.is_talking and st.session_state.current_topic:
|
| 403 |
+
if len(st.session_state.conversation) < 15: # Limit conversation length
|
| 404 |
+
# Determine next speaker
|
| 405 |
+
character_order = ["Alex", "Blake", "Charlie"]
|
| 406 |
+
next_speaker = character_order[len(st.session_state.conversation) % 3]
|
| 407 |
+
|
| 408 |
+
with st.spinner(f"{next_speaker} is thinking..."):
|
| 409 |
+
response = st.session_state.ai_friends.get_ai_response(
|
| 410 |
+
next_speaker,
|
| 411 |
+
st.session_state.conversation,
|
| 412 |
+
st.session_state.current_topic,
|
| 413 |
+
st.session_state.language
|
| 414 |
+
)
|
| 415 |
+
|
| 416 |
+
st.session_state.conversation.append({
|
| 417 |
+
"character": next_speaker,
|
| 418 |
+
"message": response,
|
| 419 |
+
"color": st.session_state.ai_friends.characters[next_speaker]["color"]
|
| 420 |
+
})
|
| 421 |
+
|
| 422 |
+
time.sleep(2) # Pause between responses
|
| 423 |
+
st.experimental_rerun()
|
| 424 |
+
else:
|
| 425 |
+
st.session_state.is_talking = False
|
| 426 |
+
st.success("Conversation completed!" if st.session_state.language == "en" else
|
| 427 |
+
"Cuộc trò chuyện hoàn thành!" if st.session_state.language == "vi" else
|
| 428 |
+
"Gespräch beendet!")
|
| 429 |
+
|
| 430 |
+
# Bottom Banner
|
| 431 |
+
st.markdown("""
|
| 432 |
+
<div class="bottom-banner">
|
| 433 |
+
<div style="display: flex; justify-content: space-between; align-items: center;">
|
| 434 |
+
<div class="logo">🧠 DB</div>
|
| 435 |
+
<div>Made by Digitized Brains</div>
|
| 436 |
+
</div>
|
| 437 |
+
</div>
|
| 438 |
+
""", unsafe_allow_html=True)
|
| 439 |
+
|
| 440 |
+
if __name__ == "__main__":
|
| 441 |
+
main()
|
Claude.md
ADDED
|
@@ -0,0 +1,105 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
🎭 AI Friends Talk
|
| 2 |
+
|
| 3 |
+
Cuộc nói chuyện vui vẻ giữa 3 người bạn AI với tính cách khác nhau!
|
| 4 |
+
|
| 5 |
+
## 👥 Nhân vật
|
| 6 |
+
|
| 7 |
+
- **🎭 Alex** (Groq llama3-70b-8192): Dí dỏm, châm biếm, thích chơi chữ
|
| 8 |
+
- **🌟 Blake** (Gemini 2.0 Flash): Lạc quan, nhiệt tình, thích chia sẻ
|
| 9 |
+
- **🧠 Charlie** (Gemini 1.5 Flash): Triết lý, suy nghĩ sâu sắc, là người hòa giải
|
| 10 |
+
|
| 11 |
+
## 🚀 Cách sử dụng
|
| 12 |
+
|
| 13 |
+
### 1. Cài đặt dependencies
|
| 14 |
+
```bash
|
| 15 |
+
pip install -r requirements_ai_talk.txt
|
| 16 |
+
```
|
| 17 |
+
|
| 18 |
+
### 2. Cấu hình API keys
|
| 19 |
+
1. Copy file `.env.example` thành `.env`
|
| 20 |
+
2. Thêm API keys của bạn:
|
| 21 |
+
- `GROQ_API_KEY`: Tạo tại https://console.groq.com/
|
| 22 |
+
- `GOOGLE_API_KEY`: Tạo tại https://ai.google.dev/gemini-api
|
| 23 |
+
|
| 24 |
+
### 3. Chạy ứng dụng
|
| 25 |
+
```bash
|
| 26 |
+
python AI_Talk.py
|
| 27 |
+
```
|
| 28 |
+
|
| 29 |
+
### 4. Sử dụng giao diện
|
| 30 |
+
1. Chọn ngôn ngữ mong muốn (English/Tiếng Việt/Deutsch)
|
| 31 |
+
2. Chọn chủ đề từ danh sách hoặc tạo chủ đề riêng
|
| 32 |
+
3. Click "🚀 Start New Conversation" để bắt đầu
|
| 33 |
+
4. Nhập tin nhắn của bạn và click "📝 Add Your Message" để tham gia cuộc trò chuyện
|
| 34 |
+
5. Click "💬 Continue Conversation" để 3 bạn AI tiếp tục trò chuyện
|
| 35 |
+
6. Lặp lại bước 4-5 để cuộc trò chuyện tiếp diễn!
|
| 36 |
+
|
| 37 |
+
## 🎯 Tính năng
|
| 38 |
+
|
| 39 |
+
- **3 AI khác nhau**: Mỗi nhân vật có tính cách riêng biệt
|
| 40 |
+
- **Đa ngôn ngữ**: Hỗ trợ 3 ngôn ngữ với cờ quốc gia đẹp mắt (🇺🇸 English, 🇻🇳 Tiếng Việt, 🇩🇪 Deutsch)
|
| 41 |
+
- **Chủ đề đa dạng**: Dropdown chủ đề gợi ý + textbox tùy chỉnh chủ đề riêng
|
| 42 |
+
- **Giao diện premium**: Thiết kế gradient đẹp mắt, animation, không có khoảng trống
|
| 43 |
+
- **Hero section**: Phần giới thiệu có hình ảnh chuyển động, thông tin nhân vật
|
| 44 |
+
- **Tương tác người dùng**: Hộp chat để bạn người sử dụng tham gia cuộc trò chuyện
|
| 45 |
+
- **Status feedback**: Thông báo trạng thái real-time với màu sắc đẹp mắt
|
| 46 |
+
- **Responsive design**: Tối ưu cho mọi thiết bị
|
| 47 |
+
- **Tương thích Hugging Face**: Có thể deploy trực tiếp lên Spaces
|
| 48 |
+
|
| 49 |
+
## 🎭 Chủ đề mẫu
|
| 50 |
+
|
| 51 |
+
### Tiếng Việt:
|
| 52 |
+
- "Hot dog có phải là sandwich không?"
|
| 53 |
+
- "Nếu người ngoài hành tinh đến Trái Đất, điều gì sẽ khiến họ bối rối nhất?"
|
| 54 |
+
- "Ngũ cốc có phải là soup không?"
|
| 55 |
+
- "Siêu năng lực nào sẽ vô dụng nhất trong đời sống hàng ngày?"
|
| 56 |
+
- "Nếu động vật biết nói chuyện, loài nào sẽ bất lịch sự nhất?"
|
| 57 |
+
|
| 58 |
+
### English:
|
| 59 |
+
- "Whether hot dogs are sandwiches or not"
|
| 60 |
+
- "If aliens visited Earth, what would confuse them most about humans?"
|
| 61 |
+
- "The eternal debate: Is cereal soup?"
|
| 62 |
+
- "Which superpower would be most useless in daily life?"
|
| 63 |
+
- "If animals could talk, which would be the rudest?"
|
| 64 |
+
|
| 65 |
+
### Deutsch:
|
| 66 |
+
- "Sind Hotdogs Sandwiches oder nicht?"
|
| 67 |
+
- "Wenn Außerirdische die Erde besuchen würden, was würde sie am meisten verwirren?"
|
| 68 |
+
- "Die ewige Debatte: Ist Müsli eine Suppe?"
|
| 69 |
+
- "Welche Superkraft wäre im Alltag am nutzlosesten?"
|
| 70 |
+
- "Wenn Tiere sprechen könnten, welche wären am unhöflichsten?"
|
| 71 |
+
|
| 72 |
+
Bổ sung thêm chủ đề để làm phong phú thêm nội dung trò chuyện
|
| 73 |
+
|
| 74 |
+
## 🔧 Cấu trúc code
|
| 75 |
+
|
| 76 |
+
- **AITalk class**: Quản lý cuộc trò chuyện và API calls
|
| 77 |
+
- **Character personalities**: Mỗi AI có system prompt riêng
|
| 78 |
+
- **Gradio interface**: Giao diện web tương tác
|
| 79 |
+
- **Error handling**: Xử lý lỗi API gracefully
|
| 80 |
+
|
| 81 |
+
## 🌐 Deploy lên Hugging Face Spaces
|
| 82 |
+
|
| 83 |
+
1. Tạo Space mới trên https://huggingface.co/spaces
|
| 84 |
+
2. Upload các file: `AI_Talk_Gradio.py`, `requirements_ai_talk.txt`
|
| 85 |
+
3. API keys file `.env`
|
| 86 |
+
4. Space sẽ tự động chạy!
|
| 87 |
+
|
| 88 |
+
## 📝 Ghi chú
|
| 89 |
+
|
| 90 |
+
- Dựa trên cấu trúc của `day1.ipynb`, `AI_Talk.py`.
|
| 91 |
+
- Sử dụng OpenAI-compatible interface cho Gemini
|
| 92 |
+
- Conversation context được giới hạn 10 tin nhắn gần nhất để tối ưu hiệu suất
|
| 93 |
+
- Mỗi response được giới hạn 150 tokens để giữ cuộc trò chuyện sống động
|
| 94 |
+
|
| 95 |
+
## 🐛 Troubleshooting
|
| 96 |
+
|
| 97 |
+
- **API Key errors**: Kiểm tra file `.env` và keys hợp lệ
|
| 98 |
+
- **Model not available**: Một số model có thể bị giới hạn khu vực
|
| 99 |
+
- **Rate limiting**: Đợi một chút giữa các request
|
| 100 |
+
- **Connection errors**: Kiểm tra kết nối internet
|
| 101 |
+
|
| 102 |
+
Chúc bạn có những cuộc trò chuyện vui vẻ! 🎉
|
| 103 |
+
|
| 104 |
+
- Có banner phía dưới vừa đủ để che phần nội dung 'Build with Gradio', có thiết kế đẹp mắt phù hợp với ngữ cảnh.
|
| 105 |
+
- Nút 'Continue' và 'Clear' đặt lại phía dưới của conversation, chia làm 2 cột.
|
README.md
CHANGED
|
@@ -1,12 +1,6 @@
|
|
| 1 |
---
|
| 2 |
-
title:
|
| 3 |
-
|
| 4 |
-
colorFrom: blue
|
| 5 |
-
colorTo: blue
|
| 6 |
sdk: gradio
|
| 7 |
-
sdk_version: 5.
|
| 8 |
-
app_file: app.py
|
| 9 |
-
pinned: false
|
| 10 |
---
|
| 11 |
-
|
| 12 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
| 1 |
---
|
| 2 |
+
title: AI_Game
|
| 3 |
+
app_file: AI_Talk_Gradio.py
|
|
|
|
|
|
|
| 4 |
sdk: gradio
|
| 5 |
+
sdk_version: 5.33.1
|
|
|
|
|
|
|
| 6 |
---
|
|
|
|
|
|
README_AI_Talk.md
ADDED
|
@@ -0,0 +1,62 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# AI Friends Talk
|
| 2 |
+
|
| 3 |
+
## Real AI Model Integration
|
| 4 |
+
|
| 5 |
+
This project implements AI Friends Talk with **REAL** AI model API calls, following the structure from `day1.ipynb`.
|
| 6 |
+
|
| 7 |
+
### Two Versions Available:
|
| 8 |
+
|
| 9 |
+
#### 1. AI_Talk.py (Streamlit - REAL AI APIs) ✅
|
| 10 |
+
- **Uses REAL API calls** to actual AI models
|
| 11 |
+
- **Alex**: Powered by Groq llama3-70b-8192 (Witty debater)
|
| 12 |
+
- **Blake**: Powered by Gemini 2.0 Flash (Creative optimist)
|
| 13 |
+
- **Charlie**: Powered by Gemini 1.5 Flash (Logical analyst)
|
| 14 |
+
- Interactive Streamlit interface with multilingual support
|
| 15 |
+
- Run with: `streamlit run AI_Talk.py`
|
| 16 |
+
|
| 17 |
+
#### 2. ai-friends-talk.html (HTML - Simulated Responses)
|
| 18 |
+
- Uses simulated responses for demonstration
|
| 19 |
+
- Good for UI/UX testing without API costs
|
| 20 |
+
- Compatible with the multilingual JavaScript system
|
| 21 |
+
- Open directly in browser
|
| 22 |
+
|
| 23 |
+
### API Setup Required for Real AI Integration:
|
| 24 |
+
|
| 25 |
+
Create a `.env` file with:
|
| 26 |
+
```
|
| 27 |
+
GROQ_API_KEY=your_groq_api_key_here
|
| 28 |
+
GOOGLE_API_KEY=your_google_api_key_here
|
| 29 |
+
```
|
| 30 |
+
|
| 31 |
+
### Character Personalities (Real AI Models):
|
| 32 |
+
|
| 33 |
+
1. **Alex (Groq llama3-70b-8192)**
|
| 34 |
+
- Witty and charismatic AI debater
|
| 35 |
+
- Sharp sense of humor, plays devil's advocate
|
| 36 |
+
- Uses vivid examples and thought-provoking questions
|
| 37 |
+
|
| 38 |
+
2. **Blake (Gemini 2.0 Flash)**
|
| 39 |
+
- Imaginative and boundlessly optimistic
|
| 40 |
+
- Natural storyteller with beautiful metaphors
|
| 41 |
+
- Finds connections between unrelated concepts
|
| 42 |
+
|
| 43 |
+
3. **Charlie (Gemini 1.5 Flash)**
|
| 44 |
+
- Thoughtful and systematic analyst
|
| 45 |
+
- Scientific curiosity and methodical thinking
|
| 46 |
+
- Breaks down complex ideas logically
|
| 47 |
+
|
| 48 |
+
### Test Results:
|
| 49 |
+
✅ All three AI models tested and working correctly
|
| 50 |
+
✅ Real API calls to Groq and Google Gemini
|
| 51 |
+
✅ Character personalities distinct and engaging
|
| 52 |
+
✅ Multilingual support (English, Vietnamese, German)
|
| 53 |
+
|
| 54 |
+
### Usage:
|
| 55 |
+
1. Install dependencies: `pip install streamlit openai anthropic groq python-dotenv`
|
| 56 |
+
2. Set up your API keys in `.env` file
|
| 57 |
+
3. Run: `streamlit run AI_Talk.py`
|
| 58 |
+
4. Select a topic and watch real AI models debate!
|
| 59 |
+
|
| 60 |
+
---
|
| 61 |
+
|
| 62 |
+
**Note**: The Streamlit version (AI_Talk.py) uses REAL AI model APIs as requested. The HTML version is for demonstration purposes with simulated responses.
|
README_Deploy.md
ADDED
|
@@ -0,0 +1,108 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# AI Friends Talk - Gradio Deployment Guide
|
| 2 |
+
|
| 3 |
+
🎭 Watch 3 AI friends with different personalities debate fun topics!
|
| 4 |
+
|
| 5 |
+
## 🚨 IMPORTANT: Deployment Error Fix
|
| 6 |
+
|
| 7 |
+
If you get the error **"It appears that one or more of your files contain valid Hugging Face secrets"**, follow these steps:
|
| 8 |
+
|
| 9 |
+
### For Hugging Face Spaces Deployment (CORRECT WAY)
|
| 10 |
+
|
| 11 |
+
#### Method 1: Manual Upload (Recommended)
|
| 12 |
+
1. **Create a new Space** at https://huggingface.co/spaces
|
| 13 |
+
2. **Upload ONLY these files**:
|
| 14 |
+
- `AI_Talk_Gradio.py`
|
| 15 |
+
- `requirements.txt`
|
| 16 |
+
- `.gitignore` (optional)
|
| 17 |
+
|
| 18 |
+
3. **DO NOT upload these files**:
|
| 19 |
+
- ❌ `.env` (contains API keys)
|
| 20 |
+
- ❌ `__pycache__/` folders
|
| 21 |
+
- ❌ `.gradio/` folders
|
| 22 |
+
- ❌ Certificate files (`.pem`)
|
| 23 |
+
|
| 24 |
+
4. **Add API keys as Secrets** in Space settings:
|
| 25 |
+
- Go to your Space → Settings → Repository secrets
|
| 26 |
+
- Add: `GROQ_API_KEY` = your_groq_api_key
|
| 27 |
+
- Add: `GOOGLE_API_KEY` = your_google_api_key
|
| 28 |
+
|
| 29 |
+
5. **Space will auto-deploy** with the secrets!
|
| 30 |
+
|
| 31 |
+
#### Method 2: Using huggingface_hub (Advanced)
|
| 32 |
+
```python
|
| 33 |
+
from huggingface_hub import HfApi
|
| 34 |
+
|
| 35 |
+
api = HfApi()
|
| 36 |
+
api.upload_file(
|
| 37 |
+
path_or_fileobj="AI_Talk_Gradio.py",
|
| 38 |
+
path_in_repo="AI_Talk_Gradio.py",
|
| 39 |
+
repo_id="your_username/your_space_name",
|
| 40 |
+
repo_type="space"
|
| 41 |
+
)
|
| 42 |
+
api.upload_file(
|
| 43 |
+
path_or_fileobj="requirements.txt",
|
| 44 |
+
path_in_repo="requirements.txt",
|
| 45 |
+
repo_id="your_username/your_space_name",
|
| 46 |
+
repo_type="space"
|
| 47 |
+
)
|
| 48 |
+
```
|
| 49 |
+
|
| 50 |
+
### For Local Development
|
| 51 |
+
|
| 52 |
+
1. **Install dependencies:**
|
| 53 |
+
```bash
|
| 54 |
+
pip install -r requirements.txt
|
| 55 |
+
```
|
| 56 |
+
|
| 57 |
+
2. **Create `.env` file:**
|
| 58 |
+
```bash
|
| 59 |
+
GROQ_API_KEY=your_groq_api_key_here
|
| 60 |
+
GOOGLE_API_KEY=your_google_api_key_here
|
| 61 |
+
```
|
| 62 |
+
|
| 63 |
+
3. **Run the app:**
|
| 64 |
+
```bash
|
| 65 |
+
python AI_Talk_Gradio.py
|
| 66 |
+
```
|
| 67 |
+
|
| 68 |
+
## 🤖 AI Characters
|
| 69 |
+
|
| 70 |
+
- 🎭 **Alex** (Groq llama3-70b): Witty debater
|
| 71 |
+
- 🌟 **Blake** (Gemini 2.0): Creative optimist
|
| 72 |
+
- 🧠 **Charlie** (Gemini 1.5): Logical analyst
|
| 73 |
+
|
| 74 |
+
## ✨ Features
|
| 75 |
+
|
| 76 |
+
- 🌐 Multi-language support (English/Tiếng Việt/Deutsch)
|
| 77 |
+
- 🎯 Custom or predefined topics
|
| 78 |
+
- 💬 Join the conversation as 4th participant
|
| 79 |
+
- 🎨 Beautiful gradient UI with animations
|
| 80 |
+
- 📱 Mobile responsive design
|
| 81 |
+
- 🔄 Auto-conversation flow
|
| 82 |
+
- ⚡ Real-time AI responses
|
| 83 |
+
|
| 84 |
+
## 📁 Files Structure for Deployment
|
| 85 |
+
|
| 86 |
+
```
|
| 87 |
+
AI_Game/
|
| 88 |
+
├── AI_Talk_Gradio.py ✅ Main app file
|
| 89 |
+
├── requirements.txt ✅ Dependencies
|
| 90 |
+
├── .gitignore ✅ Ignore sensitive files
|
| 91 |
+
├── README_Deploy.md ℹ️ This guide
|
| 92 |
+
├── .env ❌ DO NOT UPLOAD (contains secrets)
|
| 93 |
+
└── __pycache__/ ❌ DO NOT UPLOAD (Python cache)
|
| 94 |
+
```
|
| 95 |
+
|
| 96 |
+
## 🔑 API Keys Required
|
| 97 |
+
|
| 98 |
+
1. **Groq API Key**: Get from https://console.groq.com/
|
| 99 |
+
2. **Google API Key**: Get from https://ai.google.dev/gemini-api
|
| 100 |
+
|
| 101 |
+
## 🔧 Troubleshooting
|
| 102 |
+
|
| 103 |
+
- **"BadRequestError: secrets detected"**: Remove `.env` file from upload
|
| 104 |
+
- **"Model not found"**: Check API keys in Space secrets
|
| 105 |
+
- **"Port already in use"**: App auto-selects available port
|
| 106 |
+
- **Import errors**: Verify `requirements.txt` is uploaded
|
| 107 |
+
|
| 108 |
+
Made by **Digitized Brains** 🧠
|
README_Gradio.md
ADDED
|
@@ -0,0 +1,109 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# 🤖 AI Friends Talk - Gradio Version
|
| 2 |
+
|
| 3 |
+
Cuộc nói chuyện vui vẻ giữa 3 người bạn AI với giao diện Gradio!
|
| 4 |
+
|
| 5 |
+
## 🎯 Tính năng
|
| 6 |
+
|
| 7 |
+
- **3 AI Characters**: Alex (Groq), Blake (Gemini 2.0), Charlie (Gemini 1.5)
|
| 8 |
+
- **Giao diện Gradio**: Web interface hiện đại, dễ sử dụng
|
| 9 |
+
- **Đa ngôn ngữ**: English, Tiếng Việt, Deutsch
|
| 10 |
+
- **Chủ đề đa dạng**: 20+ chủ đề có sẵn + tùy chỉnh
|
| 11 |
+
- **Tương tác real-time**: Tham gia cuộc trò chuyện với AI
|
| 12 |
+
- **Deploy friendly**: Dễ deploy lên Hugging Face Spaces
|
| 13 |
+
- **Thiết kế đẹp**: Custom footer che Gradio attribution
|
| 14 |
+
- **UI tối ưu**: Controls được tổ chức hợp lý, dễ sử dụng
|
| 15 |
+
|
| 16 |
+
## 🚀 Cách sử dụng
|
| 17 |
+
|
| 18 |
+
### 1. Cài đặt dependencies
|
| 19 |
+
```bash
|
| 20 |
+
pip install -r requirements_ai_talk_gradio.txt
|
| 21 |
+
```
|
| 22 |
+
|
| 23 |
+
### 2. Cấu hình API keys
|
| 24 |
+
Tạo file `.env` với nội dung:
|
| 25 |
+
```
|
| 26 |
+
GROQ_API_KEY=your_groq_api_key_here
|
| 27 |
+
GOOGLE_API_KEY=your_google_api_key_here
|
| 28 |
+
```
|
| 29 |
+
|
| 30 |
+
### 3. Chạy ứng dụng
|
| 31 |
+
**Cách 1**: Sử dụng script
|
| 32 |
+
```bash
|
| 33 |
+
start_ai_talk.bat
|
| 34 |
+
```
|
| 35 |
+
|
| 36 |
+
**Cách 2**: Chạy trực tiếp
|
| 37 |
+
```bash
|
| 38 |
+
python AI_Talk_Gradio.py
|
| 39 |
+
```
|
| 40 |
+
|
| 41 |
+
### 4. Truy cập giao diện
|
| 42 |
+
- Mở browser và truy cập: http://localhost:7860
|
| 43 |
+
- Hoặc sử dụng share link được tạo tự động
|
| 44 |
+
|
| 45 |
+
## 📱 Hướng dẫn sử dụng
|
| 46 |
+
|
| 47 |
+
1. **Chọn ngôn ngữ**: Dropdown đầu trang
|
| 48 |
+
2. **Chọn chủ đề**: Từ danh sách có sẵn hoặc nhập tùy chỉnh
|
| 49 |
+
3. **Bắt đầu**: Click "🎬 Start Conversation"
|
| 50 |
+
4. **Theo dõi**: Xem cuộc trò chuyện diễn ra
|
| 51 |
+
5. **Điều khiển**: Sử dụng các nút bên dưới conversation:
|
| 52 |
+
- **▶️ Continue**: AI tiếp tục trò chuyện
|
| 53 |
+
- **🔄 Clear**: Xóa để bắt đầu lại
|
| 54 |
+
6. **Tham gia**: Nhập tin nhắn và click "📤 Send" để join cuộc trò chuyện
|
| 55 |
+
|
| 56 |
+
## 🎭 Nhân vật AI
|
| 57 |
+
|
| 58 |
+
- **🎭 Alex**: Người tranh luận dí dỏm (Groq Llama3-70B)
|
| 59 |
+
- **🌟 Blake**: Người lạc quan sáng tạo (Gemini 2.0 Flash)
|
| 60 |
+
- **🧠 Charlie**: Nhà phân tích logic (Gemini 1.5 Flash)
|
| 61 |
+
|
| 62 |
+
## 🌐 Deploy lên Hugging Face Spaces
|
| 63 |
+
|
| 64 |
+
1. Tạo Space mới: https://huggingface.co/spaces
|
| 65 |
+
2. Upload các file:
|
| 66 |
+
- `AI_Talk_Gradio.py`
|
| 67 |
+
- `requirements_ai_talk_gradio.txt`
|
| 68 |
+
- `.env` (với API keys)
|
| 69 |
+
3. Space sẽ tự động chạy!
|
| 70 |
+
|
| 71 |
+
## 📝 Files quan trọng
|
| 72 |
+
|
| 73 |
+
- `AI_Talk_Gradio.py`: Main application (cần API keys)
|
| 74 |
+
- `AI_Talk_Demo.py`: Demo version (không cần API keys)
|
| 75 |
+
- `requirements_ai_talk_gradio.txt`: Dependencies
|
| 76 |
+
- `start_ai_talk.bat`: Windows startup script
|
| 77 |
+
- `.env`: API keys configuration
|
| 78 |
+
|
| 79 |
+
## 🎨 Thiết kế mới
|
| 80 |
+
|
| 81 |
+
- **Footer tùy chỉnh**: Che đi "Built with Gradio" với banner Digitized Brains
|
| 82 |
+
- **Controls tối ưu**: Nút Continue/Clear được đặt bên dưới conversation
|
| 83 |
+
- **Layout hợp lý**: Start button độc lập, các controls phân tách rõ ràng
|
| 84 |
+
- **CSS gradient**: Thiết kế đẹp mắt với màu sắc phù hợp
|
| 85 |
+
|
| 86 |
+
## 🐛 Troubleshooting
|
| 87 |
+
|
| 88 |
+
**Lỗi Import**:
|
| 89 |
+
```bash
|
| 90 |
+
pip install -r requirements_ai_talk_gradio.txt
|
| 91 |
+
```
|
| 92 |
+
|
| 93 |
+
**API Key Error**: Kiểm tra file `.env` và API keys hợp lệ
|
| 94 |
+
|
| 95 |
+
**Port đã được sử dụng**: Đổi port trong code hoặc kill process đang chạy
|
| 96 |
+
|
| 97 |
+
**Unicode Error**: File đã được optimize cho Windows console
|
| 98 |
+
|
| 99 |
+
## 💡 So sánh với phiên bản Streamlit
|
| 100 |
+
|
| 101 |
+
| Tính năng | Streamlit | Gradio |
|
| 102 |
+
|-----------|-----------|---------|
|
| 103 |
+
| Giao diện | Custom CSS phức tạp | Components sẵn có |
|
| 104 |
+
| Deploy | Streamlit Cloud | HuggingFace Spaces |
|
| 105 |
+
| Performance | Reload nhiều | Ít reload hơn |
|
| 106 |
+
| Customization | Cao | Trung bình |
|
| 107 |
+
| Ease of use | Trung bình | Dễ |
|
| 108 |
+
|
| 109 |
+
Chúc bạn có những cuộc trò chuyện vui vẻ! 🎉
|
ai-friends-talk.html
ADDED
|
@@ -0,0 +1,804 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
<!DOCTYPE html>
|
| 2 |
+
<html lang="en">
|
| 3 |
+
<head>
|
| 4 |
+
<meta charset="UTF-8">
|
| 5 |
+
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
| 6 |
+
<title>AI Friends Talk - Interactive AI Debate</title>
|
| 7 |
+
<script src="js/languages.js"></script>
|
| 8 |
+
<style>
|
| 9 |
+
* {
|
| 10 |
+
margin: 0;
|
| 11 |
+
padding: 0;
|
| 12 |
+
box-sizing: border-box;
|
| 13 |
+
}
|
| 14 |
+
|
| 15 |
+
body {
|
| 16 |
+
font-family: 'Arial', sans-serif;
|
| 17 |
+
background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
|
| 18 |
+
min-height: 100vh;
|
| 19 |
+
color: #333;
|
| 20 |
+
margin: 0;
|
| 21 |
+
padding: 0;
|
| 22 |
+
}
|
| 23 |
+
|
| 24 |
+
.container {
|
| 25 |
+
width: 100%;
|
| 26 |
+
margin: 0;
|
| 27 |
+
padding: 0;
|
| 28 |
+
}
|
| 29 |
+
|
| 30 |
+
.top-banner {
|
| 31 |
+
background: linear-gradient(135deg, #4A90E2 0%, #2E86AB 70%, #FF8A65 85%, #FF6B9D 100%);
|
| 32 |
+
padding: 30px;
|
| 33 |
+
margin-bottom: 0;
|
| 34 |
+
display: flex;
|
| 35 |
+
justify-content: space-between;
|
| 36 |
+
align-items: center;
|
| 37 |
+
box-shadow: 0 10px 30px rgba(0,0,0,0.2);
|
| 38 |
+
}
|
| 39 |
+
|
| 40 |
+
.logo {
|
| 41 |
+
display: flex;
|
| 42 |
+
align-items: center;
|
| 43 |
+
background: linear-gradient(135deg, #FF8A65 0%, #FF6B9D 100%);
|
| 44 |
+
padding: 10px 20px;
|
| 45 |
+
border-radius: 25px;
|
| 46 |
+
font-weight: bold;
|
| 47 |
+
font-size: 24px;
|
| 48 |
+
color: white;
|
| 49 |
+
gap: 10px;
|
| 50 |
+
}
|
| 51 |
+
|
| 52 |
+
.title-section {
|
| 53 |
+
text-align: center;
|
| 54 |
+
flex: 1;
|
| 55 |
+
}
|
| 56 |
+
|
| 57 |
+
.title-section h1 {
|
| 58 |
+
color: white;
|
| 59 |
+
margin: 0;
|
| 60 |
+
font-size: 2.5em;
|
| 61 |
+
text-shadow: 2px 2px 4px rgba(0,0,0,0.3);
|
| 62 |
+
}
|
| 63 |
+
|
| 64 |
+
.title-section p {
|
| 65 |
+
color: rgba(255,255,255,0.9);
|
| 66 |
+
margin: 10px 0;
|
| 67 |
+
font-size: 1.1em;
|
| 68 |
+
}
|
| 69 |
+
|
| 70 |
+
.language-selector select {
|
| 71 |
+
background: rgba(255,255,255,0.2);
|
| 72 |
+
color: white;
|
| 73 |
+
border: 2px solid rgba(255,255,255,0.3);
|
| 74 |
+
padding: 10px 15px;
|
| 75 |
+
border-radius: 25px;
|
| 76 |
+
font-size: 16px;
|
| 77 |
+
cursor: pointer;
|
| 78 |
+
}
|
| 79 |
+
|
| 80 |
+
.language-selector select option {
|
| 81 |
+
background: #333;
|
| 82 |
+
color: white;
|
| 83 |
+
}
|
| 84 |
+
|
| 85 |
+
.main-content {
|
| 86 |
+
background: white;
|
| 87 |
+
padding: 30px 0;
|
| 88 |
+
display: flex;
|
| 89 |
+
min-height: calc(100vh - 200px);
|
| 90 |
+
}
|
| 91 |
+
|
| 92 |
+
.left-panel {
|
| 93 |
+
width: 40%;
|
| 94 |
+
padding: 30px;
|
| 95 |
+
background: #f8f9fa;
|
| 96 |
+
border-right: 2px solid #eee;
|
| 97 |
+
}
|
| 98 |
+
|
| 99 |
+
.right-panel {
|
| 100 |
+
width: 60%;
|
| 101 |
+
padding: 30px;
|
| 102 |
+
background: white;
|
| 103 |
+
}
|
| 104 |
+
|
| 105 |
+
.characters-intro {
|
| 106 |
+
display: grid;
|
| 107 |
+
grid-template-columns: repeat(auto-fit, minmax(300px, 1fr));
|
| 108 |
+
gap: 20px;
|
| 109 |
+
margin-bottom: 30px;
|
| 110 |
+
}
|
| 111 |
+
|
| 112 |
+
.character-card {
|
| 113 |
+
background: linear-gradient(135deg, #f093fb 0%, #f5576c 100%);
|
| 114 |
+
padding: 20px;
|
| 115 |
+
border-radius: 15px;
|
| 116 |
+
color: white;
|
| 117 |
+
text-align: center;
|
| 118 |
+
transform: translateY(0);
|
| 119 |
+
transition: transform 0.3s ease;
|
| 120 |
+
}
|
| 121 |
+
|
| 122 |
+
.character-card:hover {
|
| 123 |
+
transform: translateY(-5px);
|
| 124 |
+
}
|
| 125 |
+
|
| 126 |
+
.character-card.alex {
|
| 127 |
+
background: linear-gradient(135deg, #FF6B6B 0%, #FF8E53 100%);
|
| 128 |
+
}
|
| 129 |
+
|
| 130 |
+
.character-card.blake {
|
| 131 |
+
background: linear-gradient(135deg, #4ECDC4 0%, #44A08D 100%);
|
| 132 |
+
}
|
| 133 |
+
|
| 134 |
+
.character-card.charlie {
|
| 135 |
+
background: linear-gradient(135deg, #45B7D1 0%, #96C93D 100%);
|
| 136 |
+
}
|
| 137 |
+
|
| 138 |
+
.topic-selection {
|
| 139 |
+
margin-bottom: 30px;
|
| 140 |
+
}
|
| 141 |
+
|
| 142 |
+
.topic-selection h3 {
|
| 143 |
+
color: #333;
|
| 144 |
+
margin-bottom: 15px;
|
| 145 |
+
font-size: 1.5em;
|
| 146 |
+
}
|
| 147 |
+
|
| 148 |
+
#topicSelect {
|
| 149 |
+
width: 100%;
|
| 150 |
+
padding: 15px;
|
| 151 |
+
border: 2px solid #ddd;
|
| 152 |
+
border-radius: 10px;
|
| 153 |
+
font-size: 16px;
|
| 154 |
+
margin-bottom: 15px;
|
| 155 |
+
}
|
| 156 |
+
|
| 157 |
+
#customTopicInput {
|
| 158 |
+
width: 100%;
|
| 159 |
+
padding: 15px;
|
| 160 |
+
border: 2px solid #ddd;
|
| 161 |
+
border-radius: 10px;
|
| 162 |
+
font-size: 16px;
|
| 163 |
+
margin-bottom: 20px;
|
| 164 |
+
}
|
| 165 |
+
|
| 166 |
+
.controls {
|
| 167 |
+
display: flex;
|
| 168 |
+
gap: 15px;
|
| 169 |
+
justify-content: center;
|
| 170 |
+
margin-bottom: 30px;
|
| 171 |
+
flex-wrap: wrap;
|
| 172 |
+
}
|
| 173 |
+
|
| 174 |
+
.btn {
|
| 175 |
+
padding: 15px 25px;
|
| 176 |
+
border: none;
|
| 177 |
+
border-radius: 25px;
|
| 178 |
+
font-size: 16px;
|
| 179 |
+
font-weight: bold;
|
| 180 |
+
cursor: pointer;
|
| 181 |
+
transition: all 0.3s ease;
|
| 182 |
+
min-width: 150px;
|
| 183 |
+
}
|
| 184 |
+
|
| 185 |
+
.btn-primary {
|
| 186 |
+
background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
|
| 187 |
+
color: white;
|
| 188 |
+
}
|
| 189 |
+
|
| 190 |
+
.btn-secondary {
|
| 191 |
+
background: linear-gradient(135deg, #ffecd2 0%, #fcb69f 100%);
|
| 192 |
+
color: #333;
|
| 193 |
+
}
|
| 194 |
+
|
| 195 |
+
.btn-danger {
|
| 196 |
+
background: linear-gradient(135deg, #ff9a9e 0%, #fecfef 100%);
|
| 197 |
+
color: #333;
|
| 198 |
+
}
|
| 199 |
+
|
| 200 |
+
.btn:hover {
|
| 201 |
+
transform: translateY(-2px);
|
| 202 |
+
box-shadow: 0 5px 15px rgba(0,0,0,0.2);
|
| 203 |
+
}
|
| 204 |
+
|
| 205 |
+
.conversation-area {
|
| 206 |
+
background: #f8f9fa;
|
| 207 |
+
border-radius: 15px;
|
| 208 |
+
padding: 25px;
|
| 209 |
+
min-height: 400px;
|
| 210 |
+
margin-bottom: 20px;
|
| 211 |
+
}
|
| 212 |
+
|
| 213 |
+
.conversation-header {
|
| 214 |
+
text-align: center;
|
| 215 |
+
margin-bottom: 20px;
|
| 216 |
+
}
|
| 217 |
+
|
| 218 |
+
.current-topic {
|
| 219 |
+
background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
|
| 220 |
+
color: white;
|
| 221 |
+
padding: 15px;
|
| 222 |
+
border-radius: 10px;
|
| 223 |
+
margin-bottom: 20px;
|
| 224 |
+
font-weight: bold;
|
| 225 |
+
}
|
| 226 |
+
|
| 227 |
+
.message {
|
| 228 |
+
padding: 15px;
|
| 229 |
+
margin: 15px 0;
|
| 230 |
+
border-radius: 15px;
|
| 231 |
+
border-left: 5px solid;
|
| 232 |
+
animation: fadeIn 0.5s ease;
|
| 233 |
+
position: relative;
|
| 234 |
+
}
|
| 235 |
+
|
| 236 |
+
@keyframes fadeIn {
|
| 237 |
+
from { opacity: 0; transform: translateY(20px); }
|
| 238 |
+
to { opacity: 1; transform: translateY(0); }
|
| 239 |
+
}
|
| 240 |
+
|
| 241 |
+
.message.alex {
|
| 242 |
+
background: rgba(255, 107, 107, 0.1);
|
| 243 |
+
border-left-color: #FF6B6B;
|
| 244 |
+
}
|
| 245 |
+
|
| 246 |
+
.message.blake {
|
| 247 |
+
background: rgba(78, 205, 196, 0.1);
|
| 248 |
+
border-left-color: #4ECDC4;
|
| 249 |
+
}
|
| 250 |
+
|
| 251 |
+
.message.charlie {
|
| 252 |
+
background: rgba(69, 183, 209, 0.1);
|
| 253 |
+
border-left-color: #45B7D1;
|
| 254 |
+
}
|
| 255 |
+
|
| 256 |
+
.message.user {
|
| 257 |
+
background: rgba(158, 158, 158, 0.1);
|
| 258 |
+
border-left-color: #9E9E9E;
|
| 259 |
+
}
|
| 260 |
+
|
| 261 |
+
.message-header {
|
| 262 |
+
font-weight: bold;
|
| 263 |
+
margin-bottom: 8px;
|
| 264 |
+
color: #333;
|
| 265 |
+
}
|
| 266 |
+
|
| 267 |
+
.user-input {
|
| 268 |
+
background: white;
|
| 269 |
+
border-radius: 15px;
|
| 270 |
+
padding: 20px;
|
| 271 |
+
margin-bottom: 20px;
|
| 272 |
+
border: 2px solid #eee;
|
| 273 |
+
}
|
| 274 |
+
|
| 275 |
+
.user-input h4 {
|
| 276 |
+
margin-bottom: 15px;
|
| 277 |
+
color: #333;
|
| 278 |
+
}
|
| 279 |
+
|
| 280 |
+
#userMessageInput {
|
| 281 |
+
width: 100%;
|
| 282 |
+
padding: 15px;
|
| 283 |
+
border: 2px solid #ddd;
|
| 284 |
+
border-radius: 10px;
|
| 285 |
+
font-size: 16px;
|
| 286 |
+
margin-bottom: 15px;
|
| 287 |
+
min-height: 100px;
|
| 288 |
+
resize: vertical;
|
| 289 |
+
}
|
| 290 |
+
|
| 291 |
+
.bottom-banner {
|
| 292 |
+
background: linear-gradient(135deg, #4A90E2 0%, #2E86AB 70%, #FF8A65 85%, #FF6B9D 100%);
|
| 293 |
+
padding: 20px 30px;
|
| 294 |
+
display: flex;
|
| 295 |
+
justify-content: space-between;
|
| 296 |
+
align-items: center;
|
| 297 |
+
color: white;
|
| 298 |
+
box-shadow: 0 -10px 30px rgba(0,0,0,0.2);
|
| 299 |
+
}
|
| 300 |
+
|
| 301 |
+
.thinking-indicator {
|
| 302 |
+
display: none;
|
| 303 |
+
text-align: center;
|
| 304 |
+
padding: 20px;
|
| 305 |
+
font-style: italic;
|
| 306 |
+
color: #666;
|
| 307 |
+
}
|
| 308 |
+
|
| 309 |
+
.thinking-dots {
|
| 310 |
+
display: inline-block;
|
| 311 |
+
animation: thinking 1.5s infinite;
|
| 312 |
+
}
|
| 313 |
+
|
| 314 |
+
@keyframes thinking {
|
| 315 |
+
0%, 20% { opacity: 1; }
|
| 316 |
+
50% { opacity: 0.3; }
|
| 317 |
+
100% { opacity: 1; }
|
| 318 |
+
}
|
| 319 |
+
|
| 320 |
+
.status-indicator {
|
| 321 |
+
display: none;
|
| 322 |
+
padding: 15px;
|
| 323 |
+
border-radius: 10px;
|
| 324 |
+
margin-bottom: 20px;
|
| 325 |
+
text-align: center;
|
| 326 |
+
font-weight: bold;
|
| 327 |
+
}
|
| 328 |
+
|
| 329 |
+
.status-indicator.success {
|
| 330 |
+
background: #d4edda;
|
| 331 |
+
color: #155724;
|
| 332 |
+
border: 1px solid #c3e6cb;
|
| 333 |
+
}
|
| 334 |
+
|
| 335 |
+
.status-indicator.error {
|
| 336 |
+
background: #f8d7da;
|
| 337 |
+
color: #721c24;
|
| 338 |
+
border: 1px solid #f5c6cb;
|
| 339 |
+
}
|
| 340 |
+
|
| 341 |
+
@media (max-width: 768px) {
|
| 342 |
+
.top-banner {
|
| 343 |
+
flex-direction: column;
|
| 344 |
+
gap: 15px;
|
| 345 |
+
text-align: center;
|
| 346 |
+
}
|
| 347 |
+
|
| 348 |
+
.controls {
|
| 349 |
+
flex-direction: column;
|
| 350 |
+
align-items: center;
|
| 351 |
+
}
|
| 352 |
+
|
| 353 |
+
.btn {
|
| 354 |
+
width: 200px;
|
| 355 |
+
}
|
| 356 |
+
}
|
| 357 |
+
</style>
|
| 358 |
+
</head>
|
| 359 |
+
<body>
|
| 360 |
+
<div class="container">
|
| 361 |
+
<!-- Top Banner -->
|
| 362 |
+
<div class="top-banner">
|
| 363 |
+
<div class="logo">
|
| 364 |
+
🧠 DB
|
| 365 |
+
</div>
|
| 366 |
+
<div style="color: white; font-weight: bold; font-size: 18px;">
|
| 367 |
+
Digitized Brains
|
| 368 |
+
</div>
|
| 369 |
+
<div class="title-section">
|
| 370 |
+
<h1 data-i18n="title">AI Friends Talk</h1>
|
| 371 |
+
<p data-i18n="subtitle">Watch AI friends debate fun topics!</p>
|
| 372 |
+
<p data-i18n="instructions">Choose a topic and watch Alex, Blake, and Charlie discuss it. You can also join the conversation!</p>
|
| 373 |
+
</div>
|
| 374 |
+
<div class="language-selector">
|
| 375 |
+
<select id="languageSelector">
|
| 376 |
+
<option value="en">English</option>
|
| 377 |
+
<option value="vi">Tiếng Việt</option>
|
| 378 |
+
<option value="de">Deutsch</option>
|
| 379 |
+
</select>
|
| 380 |
+
</div>
|
| 381 |
+
</div>
|
| 382 |
+
|
| 383 |
+
<!-- Character Introduction - Full Width -->
|
| 384 |
+
<div style="background: white; padding: 15px 30px; border-bottom: 2px solid #eee;">
|
| 385 |
+
<div class="characters-intro" style="grid-template-columns: 1fr 1fr 1fr; gap: 15px; max-width: none;">
|
| 386 |
+
<div class="character-card alex" style="padding: 15px;">
|
| 387 |
+
<h3 style="font-size: 14px; margin-bottom: 8px;">🎭 Alex</h3>
|
| 388 |
+
<p data-i18n="alex" style="font-size: 12px; margin: 0;">Alex - The witty debater (Groq AI)</p>
|
| 389 |
+
</div>
|
| 390 |
+
<div class="character-card blake" style="padding: 15px;">
|
| 391 |
+
<h3 style="font-size: 14px; margin-bottom: 8px;">🎨 Blake</h3>
|
| 392 |
+
<p data-i18n="blake" style="font-size: 12px; margin: 0;">Blake - The creative optimist (Gemini 2.0)</p>
|
| 393 |
+
</div>
|
| 394 |
+
<div class="character-card charlie" style="padding: 15px;">
|
| 395 |
+
<h3 style="font-size: 14px; margin-bottom: 8px;">🔬 Charlie</h3>
|
| 396 |
+
<p data-i18n="charlie" style="font-size: 12px; margin: 0;">Charlie - The logical analyst (Gemini 1.5)</p>
|
| 397 |
+
</div>
|
| 398 |
+
</div>
|
| 399 |
+
</div>
|
| 400 |
+
|
| 401 |
+
<!-- Topic Selection - 2 Columns -->
|
| 402 |
+
<div style="background: #f8f9fa; padding: 15px 30px; border-bottom: 2px solid #eee;">
|
| 403 |
+
<div style="display: flex; gap: 20px; align-items: flex-start;">
|
| 404 |
+
|
| 405 |
+
<!-- Left Column - Custom Topic Input -->
|
| 406 |
+
<div style="flex: 1;">
|
| 407 |
+
<div class="custom-topic-input">
|
| 408 |
+
<h4 data-i18n="customTopic" style="margin-bottom: 10px; color: #333; font-size: 13px;">Or enter your own topic:</h4>
|
| 409 |
+
<div style="display: flex; gap: 8px;">
|
| 410 |
+
<input type="text" id="customTopicInput" data-i18n-placeholder="customTopic" placeholder="Enter your custom topic here..." style="flex: 1; padding: 10px; border: 2px solid #ddd; border-radius: 6px; font-size: 12px;">
|
| 411 |
+
<button class="btn btn-primary" id="setCustomTopicBtn" data-i18n="setTopic" style="min-width: 50px; padding: 10px 12px; font-size: 12px;">📝 Set</button>
|
| 412 |
+
</div>
|
| 413 |
+
</div>
|
| 414 |
+
</div>
|
| 415 |
+
|
| 416 |
+
<!-- Right Column - Topic Selection -->
|
| 417 |
+
<div style="flex: 1;">
|
| 418 |
+
<div class="topic-selection">
|
| 419 |
+
<h3 data-i18n="chooseTopic" style="margin-bottom: 10px; color: #333; font-size: 13px;">Choose a Topic</h3>
|
| 420 |
+
<select id="topicSelect" style="width: 100%; padding: 10px; border: 2px solid #ddd; border-radius: 6px; font-size: 12px;">
|
| 421 |
+
<option>Loading topics...</option>
|
| 422 |
+
</select>
|
| 423 |
+
</div>
|
| 424 |
+
</div>
|
| 425 |
+
|
| 426 |
+
</div>
|
| 427 |
+
</div>
|
| 428 |
+
|
| 429 |
+
<!-- Main Content -->
|
| 430 |
+
<div class="main-content">
|
| 431 |
+
<!-- Left Panel - Controls -->
|
| 432 |
+
<div class="left-panel">
|
| 433 |
+
<!-- Controls -->
|
| 434 |
+
<div class="controls" style="flex-direction: column; align-items: stretch;">
|
| 435 |
+
<button class="btn btn-primary" id="startBtn" data-i18n="startConversation">🎬 Start Conversation</button>
|
| 436 |
+
<button class="btn btn-secondary" id="continueBtn" data-i18n="continueConversation" style="display: none;">▶️ Continue</button>
|
| 437 |
+
<button class="btn btn-secondary" id="pauseBtn" data-i18n="pauseConversation">⏸️ Pause</button>
|
| 438 |
+
<button class="btn btn-danger" id="clearBtn" data-i18n="clearChat">🔄 Clear Chat</button>
|
| 439 |
+
</div>
|
| 440 |
+
|
| 441 |
+
<!-- User Input -->
|
| 442 |
+
<div class="user-input" style="margin-top: 15px;">
|
| 443 |
+
<h4 data-i18n="addMessage" style="font-size: 13px; margin-bottom: 10px;">💭 Add Your Message</h4>
|
| 444 |
+
<textarea id="userMessageInput" data-i18n-placeholder="typeMessage" placeholder="Type your message to join the conversation..." style="padding: 10px; font-size: 12px; min-height: 80px;"></textarea>
|
| 445 |
+
<button class="btn btn-primary" id="sendMessageBtn" data-i18n="sendMessage" style="padding: 10px 15px; font-size: 12px; margin-top: 10px;">📤 Send Message</button>
|
| 446 |
+
</div>
|
| 447 |
+
</div>
|
| 448 |
+
|
| 449 |
+
<!-- Right Panel - Conversation -->
|
| 450 |
+
<div class="right-panel">
|
| 451 |
+
<!-- Status Indicator -->
|
| 452 |
+
<div class="status-indicator" id="statusIndicator"></div>
|
| 453 |
+
|
| 454 |
+
<!-- Conversation Area with Independent Scroll -->
|
| 455 |
+
<div style="background: #f8f9fa; border-radius: 15px; height: calc(100vh - 150px); display: flex; flex-direction: column;">
|
| 456 |
+
<div class="conversation-header" style="padding: 20px 25px; border-bottom: 2px solid #eee;">
|
| 457 |
+
<h3 data-i18n="conversation">💬 Conversation</h3>
|
| 458 |
+
<div class="current-topic" id="currentTopic" style="display: none; background: linear-gradient(135deg, #667eea 0%, #764ba2 100%); color: white; padding: 15px; border-radius: 10px; margin-top: 15px; font-weight: bold;">
|
| 459 |
+
<span data-i18n="topic">Topic:</span> <span id="topicDisplay"></span>
|
| 460 |
+
</div>
|
| 461 |
+
</div>
|
| 462 |
+
|
| 463 |
+
<div style="flex: 1; overflow-y: auto; padding: 20px 25px;">
|
| 464 |
+
<div class="thinking-indicator" id="thinkingIndicator">
|
| 465 |
+
<span id="thinkingText">Alex is thinking</span><span class="thinking-dots">...</span>
|
| 466 |
+
</div>
|
| 467 |
+
<div id="conversationDisplay">
|
| 468 |
+
<div style="text-align: center; color: #666; padding: 40px;">
|
| 469 |
+
<p>Select a topic and start the conversation to see AI friends debate!</p>
|
| 470 |
+
</div>
|
| 471 |
+
</div>
|
| 472 |
+
</div>
|
| 473 |
+
</div>
|
| 474 |
+
</div>
|
| 475 |
+
</div>
|
| 476 |
+
|
| 477 |
+
<!-- Bottom Banner -->
|
| 478 |
+
<div class="bottom-banner">
|
| 479 |
+
<div style="display: flex; align-items: center; gap: 10px;">
|
| 480 |
+
<div class="logo">
|
| 481 |
+
🧠 DB
|
| 482 |
+
</div>
|
| 483 |
+
<div style="color: white; font-weight: bold; font-size: 18px;">
|
| 484 |
+
Digitized Brains
|
| 485 |
+
</div>
|
| 486 |
+
</div>
|
| 487 |
+
<div data-i18n="madeBy">Made by Digitized Brains</div>
|
| 488 |
+
</div>
|
| 489 |
+
</div>
|
| 490 |
+
|
| 491 |
+
<script>
|
| 492 |
+
// App State
|
| 493 |
+
let conversation = [];
|
| 494 |
+
let currentTopic = '';
|
| 495 |
+
let isRunning = false;
|
| 496 |
+
let messageCount = 0;
|
| 497 |
+
const maxMessages = 15;
|
| 498 |
+
const characters = ['Alex', 'Blake', 'Charlie'];
|
| 499 |
+
|
| 500 |
+
// DOM Elements
|
| 501 |
+
const startBtn = document.getElementById('startBtn');
|
| 502 |
+
const pauseBtn = document.getElementById('pauseBtn');
|
| 503 |
+
const continueBtn = document.getElementById('continueBtn');
|
| 504 |
+
const clearBtn = document.getElementById('clearBtn');
|
| 505 |
+
const topicSelect = document.getElementById('topicSelect');
|
| 506 |
+
const customTopicInput = document.getElementById('customTopicInput');
|
| 507 |
+
const setCustomTopicBtn = document.getElementById('setCustomTopicBtn');
|
| 508 |
+
const currentTopicDiv = document.getElementById('currentTopic');
|
| 509 |
+
const topicDisplay = document.getElementById('topicDisplay');
|
| 510 |
+
const conversationDisplay = document.getElementById('conversationDisplay');
|
| 511 |
+
const userMessageInput = document.getElementById('userMessageInput');
|
| 512 |
+
const sendMessageBtn = document.getElementById('sendMessageBtn');
|
| 513 |
+
const thinkingIndicator = document.getElementById('thinkingIndicator');
|
| 514 |
+
const statusIndicator = document.getElementById('statusIndicator');
|
| 515 |
+
const thinkingText = document.getElementById('thinkingText');
|
| 516 |
+
|
| 517 |
+
// Event Listeners
|
| 518 |
+
startBtn.addEventListener('click', startConversation);
|
| 519 |
+
pauseBtn.addEventListener('click', pauseConversation);
|
| 520 |
+
continueBtn.addEventListener('click', continueConversation);
|
| 521 |
+
clearBtn.addEventListener('click', clearConversation);
|
| 522 |
+
sendMessageBtn.addEventListener('click', sendUserMessage);
|
| 523 |
+
setCustomTopicBtn.addEventListener('click', setCustomTopic);
|
| 524 |
+
|
| 525 |
+
function startConversation() {
|
| 526 |
+
const selectedTopic = customTopicInput.value.trim() || topicSelect.value;
|
| 527 |
+
if (!selectedTopic) {
|
| 528 |
+
showStatus('Please select or enter a topic!', 'error');
|
| 529 |
+
return;
|
| 530 |
+
}
|
| 531 |
+
|
| 532 |
+
currentTopic = selectedTopic;
|
| 533 |
+
topicDisplay.textContent = currentTopic;
|
| 534 |
+
currentTopicDiv.style.display = 'block';
|
| 535 |
+
|
| 536 |
+
conversation = [];
|
| 537 |
+
messageCount = 0;
|
| 538 |
+
isRunning = false; // Don't auto-start, wait for continue button
|
| 539 |
+
|
| 540 |
+
conversationDisplay.innerHTML = '';
|
| 541 |
+
showStatus('Conversation ready! Click Continue to start AI discussion...', 'success');
|
| 542 |
+
|
| 543 |
+
// Show continue button, hide start button
|
| 544 |
+
startBtn.style.display = 'none';
|
| 545 |
+
continueBtn.style.display = 'block';
|
| 546 |
+
pauseBtn.style.display = 'none';
|
| 547 |
+
}
|
| 548 |
+
|
| 549 |
+
function pauseConversation() {
|
| 550 |
+
isRunning = false;
|
| 551 |
+
thinkingIndicator.style.display = 'none';
|
| 552 |
+
pauseBtn.style.display = 'none';
|
| 553 |
+
continueBtn.style.display = 'block';
|
| 554 |
+
showStatus('Conversation paused.', 'success');
|
| 555 |
+
}
|
| 556 |
+
|
| 557 |
+
function continueConversation() {
|
| 558 |
+
if (currentTopic && messageCount < maxMessages) {
|
| 559 |
+
isRunning = true;
|
| 560 |
+
continueBtn.style.display = 'none';
|
| 561 |
+
pauseBtn.style.display = 'block';
|
| 562 |
+
showStatus('Conversation resumed.', 'success');
|
| 563 |
+
nextMessage();
|
| 564 |
+
}
|
| 565 |
+
}
|
| 566 |
+
|
| 567 |
+
function clearConversation() {
|
| 568 |
+
conversation = [];
|
| 569 |
+
messageCount = 0;
|
| 570 |
+
isRunning = false;
|
| 571 |
+
currentTopic = '';
|
| 572 |
+
|
| 573 |
+
conversationDisplay.innerHTML = '<div style="text-align: center; color: #666; padding: 40px;"><p>Select a topic and start the conversation to see AI friends debate!</p></div>';
|
| 574 |
+
currentTopicDiv.style.display = 'none';
|
| 575 |
+
thinkingIndicator.style.display = 'none';
|
| 576 |
+
statusIndicator.style.display = 'none';
|
| 577 |
+
continueBtn.style.display = 'none';
|
| 578 |
+
pauseBtn.style.display = 'block';
|
| 579 |
+
startBtn.style.display = 'block';
|
| 580 |
+
}
|
| 581 |
+
|
| 582 |
+
function sendUserMessage() {
|
| 583 |
+
const message = userMessageInput.value.trim();
|
| 584 |
+
if (!message) return;
|
| 585 |
+
|
| 586 |
+
const userLang = languages[currentLanguage];
|
| 587 |
+
addMessage(userLang.you, message, 'user');
|
| 588 |
+
userMessageInput.value = '';
|
| 589 |
+
}
|
| 590 |
+
|
| 591 |
+
function setCustomTopic() {
|
| 592 |
+
const customTopic = customTopicInput.value.trim();
|
| 593 |
+
if (!customTopic) {
|
| 594 |
+
showStatus('Please enter a custom topic!', 'error');
|
| 595 |
+
return;
|
| 596 |
+
}
|
| 597 |
+
|
| 598 |
+
// Set the custom topic as selected in the dropdown
|
| 599 |
+
topicSelect.innerHTML = '';
|
| 600 |
+
const option = document.createElement('option');
|
| 601 |
+
option.value = customTopic;
|
| 602 |
+
option.textContent = `Custom: ${customTopic}`;
|
| 603 |
+
option.selected = true;
|
| 604 |
+
topicSelect.appendChild(option);
|
| 605 |
+
|
| 606 |
+
// Add original topics back
|
| 607 |
+
const lang = languages[currentLanguage];
|
| 608 |
+
lang.topics.forEach(topic => {
|
| 609 |
+
const opt = document.createElement('option');
|
| 610 |
+
opt.value = topic;
|
| 611 |
+
opt.textContent = topic;
|
| 612 |
+
topicSelect.appendChild(opt);
|
| 613 |
+
});
|
| 614 |
+
|
| 615 |
+
showStatus('Custom topic set successfully!', 'success');
|
| 616 |
+
}
|
| 617 |
+
|
| 618 |
+
function nextMessage() {
|
| 619 |
+
if (!isRunning) {
|
| 620 |
+
thinkingIndicator.style.display = 'none';
|
| 621 |
+
return;
|
| 622 |
+
}
|
| 623 |
+
|
| 624 |
+
const currentCharacter = characters[messageCount % 3];
|
| 625 |
+
thinkingText.textContent = `${currentCharacter} ${languages[currentLanguage].thinking}`;
|
| 626 |
+
thinkingIndicator.style.display = 'block';
|
| 627 |
+
|
| 628 |
+
// Simulate AI response (in real implementation, this would call the AI API)
|
| 629 |
+
setTimeout(() => {
|
| 630 |
+
const response = generateSimulatedResponse(currentCharacter);
|
| 631 |
+
addMessage(currentCharacter, response, currentCharacter.toLowerCase());
|
| 632 |
+
|
| 633 |
+
thinkingIndicator.style.display = 'none';
|
| 634 |
+
messageCount++;
|
| 635 |
+
|
| 636 |
+
if (isRunning) {
|
| 637 |
+
setTimeout(nextMessage, 2000); // Wait 2 seconds before next message
|
| 638 |
+
}
|
| 639 |
+
}, Math.random() * 3000 + 2000); // Random delay between 2-5 seconds
|
| 640 |
+
}
|
| 641 |
+
|
| 642 |
+
function addMessage(character, message, type) {
|
| 643 |
+
const messageDiv = document.createElement('div');
|
| 644 |
+
messageDiv.className = `message ${type}`;
|
| 645 |
+
|
| 646 |
+
const headerDiv = document.createElement('div');
|
| 647 |
+
headerDiv.className = 'message-header';
|
| 648 |
+
headerDiv.textContent = character;
|
| 649 |
+
|
| 650 |
+
const contentDiv = document.createElement('div');
|
| 651 |
+
contentDiv.textContent = message;
|
| 652 |
+
|
| 653 |
+
messageDiv.appendChild(headerDiv);
|
| 654 |
+
messageDiv.appendChild(contentDiv);
|
| 655 |
+
|
| 656 |
+
conversationDisplay.appendChild(messageDiv);
|
| 657 |
+
|
| 658 |
+
// Scroll to bottom
|
| 659 |
+
messageDiv.scrollIntoView({ behavior: 'smooth' });
|
| 660 |
+
|
| 661 |
+
// Store in conversation history
|
| 662 |
+
conversation.push({ character, message });
|
| 663 |
+
}
|
| 664 |
+
|
| 665 |
+
function generateSimulatedResponse(character) {
|
| 666 |
+
// Get current language responses and conversation context
|
| 667 |
+
const lang = languages[currentLanguage];
|
| 668 |
+
|
| 669 |
+
// Check if user has contributed to the conversation
|
| 670 |
+
const userMessages = conversation.filter(msg => msg.character.includes('You') || msg.character.includes('Bạn') || msg.character.includes('Du'));
|
| 671 |
+
const hasUserInput = userMessages.length > 0;
|
| 672 |
+
const lastUserMessage = userMessages.length > 0 ? userMessages[userMessages.length - 1].message : '';
|
| 673 |
+
|
| 674 |
+
// Get recent conversation to build context
|
| 675 |
+
const recentConversation = conversation.slice(-4); // Last 4 messages for context
|
| 676 |
+
const characterPreviousMessages = conversation.filter(msg => msg.character === character).slice(-2); // Last 2 messages from this character
|
| 677 |
+
|
| 678 |
+
// Topic-aware responses that build on conversation
|
| 679 |
+
const getContextualResponse = (character, responses) => {
|
| 680 |
+
// Filter out recently used responses to avoid repetition
|
| 681 |
+
const usedResponses = characterPreviousMessages.map(msg => msg.message);
|
| 682 |
+
let availableResponses = responses.filter(response => {
|
| 683 |
+
return !usedResponses.some(used => response.includes(used.substring(0, 20)));
|
| 684 |
+
});
|
| 685 |
+
|
| 686 |
+
if (availableResponses.length === 0) {
|
| 687 |
+
availableResponses = responses; // Reset if all used
|
| 688 |
+
}
|
| 689 |
+
|
| 690 |
+
let selectedResponse = availableResponses[Math.floor(Math.random() * availableResponses.length)];
|
| 691 |
+
|
| 692 |
+
// Add context reference if there's recent conversation
|
| 693 |
+
if (recentConversation.length > 0) {
|
| 694 |
+
const lastMessage = recentConversation[recentConversation.length - 1];
|
| 695 |
+
if (lastMessage.character !== character) {
|
| 696 |
+
const contextRef = {
|
| 697 |
+
'en': ` Building on ${lastMessage.character}'s point about`,
|
| 698 |
+
'vi': ` Dựa trên ý kiến của ${lastMessage.character} về`,
|
| 699 |
+
'de': ` Aufbauend auf ${lastMessage.character}s Punkt über`
|
| 700 |
+
};
|
| 701 |
+
selectedResponse = contextRef[currentLanguage] + ' "' + lastMessage.message.substring(0, 30) + '..." - ' + selectedResponse;
|
| 702 |
+
}
|
| 703 |
+
}
|
| 704 |
+
|
| 705 |
+
return selectedResponse;
|
| 706 |
+
};
|
| 707 |
+
|
| 708 |
+
const responses = {
|
| 709 |
+
'Alex': {
|
| 710 |
+
'en': [
|
| 711 |
+
"Ha! That's a fascinating perspective, but I've got to challenge that assumption. Think about it - if we really dig deep into this topic, there are so many layers we haven't even scratched yet. What if we're all looking at this completely wrong?",
|
| 712 |
+
"Come on, really? I think we're missing the bigger picture here, and honestly, that's what makes this so exciting! The conventional wisdom says one thing, but my gut tells me there's a revolutionary way to approach this that nobody's considering.",
|
| 713 |
+
"Okay, I'll bite on that point, but here's where it gets juicy - what about the obvious counterargument that everyone seems to be dancing around? I mean, we can't just ignore the elephant in the room, right?",
|
| 714 |
+
"That's exactly what I'd expect from someone who hasn't fully explored all the angles! But you know what? That's actually brilliant because it opens up this whole new dimension of possibilities that I bet none of us saw coming.",
|
| 715 |
+
"Alright, alright, but here's where my brain starts firing on all cylinders - this topic is like an onion with infinite layers, and every time we peel one back, we discover something even more mind-blowing underneath!" +
|
| 716 |
+
(hasUserInput ? ` And hey, that point you made about "${lastUserMessage.substring(0, 30)}..." really got me thinking in a whole new direction!` : "")
|
| 717 |
+
],
|
| 718 |
+
'vi': [
|
| 719 |
+
"Haha! Đó là một góc nhìn cực kỳ thú vị, nhưng để tôi thách thức giả định đó một chút. Nghĩ mà xem - nếu chúng ta thực sự đào sâu vào chủ đề này, có rất nhiều tầng lớp mà chúng ta chưa từng khám phá. Điều gì sẽ xảy ra nếu tất cả chúng ta đều nhìn nhận vấn đề này hoàn toàn sai?",
|
| 720 |
+
"Thôi nào, thật sao? Tôi nghĩ chúng ta đang bỏ lỡ bức tranh toàn cảnh ở đây, và thành thật mà nói, đó chính là điều làm cho cuộc thảo luận này trở nên thú vị! Kiến thức thông thường nói một điều, nhưng trực giác của tôi cho rằng có một cách tiếp cận cách mạng mà không ai đang xem xét.",
|
| 721 |
+
"Được, tôi sẽ cắn câu về điểm đó, nhưng đây là lúc nó trở nên hấp dẫn - còn về lập luận phản bác rõ ràng mà mọi người dường như đang né tránh thì sao? Ý tôi là, chúng ta không thể chỉ phớt lờ con voi trong phòng, đúng không?",
|
| 722 |
+
"Đó chính xác là điều tôi mong đợi từ người chưa khám phá hết mọi góc độ! Nhưng bạn biết gì không? Điều đó thực sự tuyệt vời vì nó mở ra cả một chiều hướng khả năng mới mà tôi cá là không ai trong chúng ta thấy trước.",
|
| 723 |
+
"Được rồi, được rồi, nhưng đây là lúc bộ não tôi bắt đầu hoạt động hết công suất - chủ đề này giống như một củ hành có vô số lớp, và mỗi khi chúng ta lột bỏ một lớp, chúng ta lại khám phá ra điều gì đó còn choáng váng hơn nữa bên dưới!" +
|
| 724 |
+
(hasUserInput ? ` Và này, điểm bạn nói về "${lastUserMessage.substring(0, 30)}..." thực sự khiến tôi suy nghĩ theo một hướng hoàn toàn mới!` : "")
|
| 725 |
+
],
|
| 726 |
+
'de': [
|
| 727 |
+
"Ha! Das ist eine interessante Perspektive, aber lass mich diese Annahme hinterfragen...",
|
| 728 |
+
"Komm schon, wirklich? Ich denke, du übersiehst das große Bild hier.",
|
| 729 |
+
"Okay, ich beiße an. Aber was ist mit dem offensichtlichen Gegenargument?",
|
| 730 |
+
"Das ist genau das, was ich von jemandem erwarten würde, der nicht richtig nachgedacht hat!",
|
| 731 |
+
"Na gut, na gut, aber hier wird es wirklich interessant..."
|
| 732 |
+
]
|
| 733 |
+
},
|
| 734 |
+
'Blake': {
|
| 735 |
+
'en': [
|
| 736 |
+
"Oh wow, what an absolutely fascinating way to look at it! I love how beautifully creative we can get with this topic - it's like watching a magnificent garden of ideas bloom right before our eyes. Every perspective adds another vibrant color to this incredible tapestry of thought!",
|
| 737 |
+
"That's so beautiful! It reminds me of how colors dance together in a sunset, each one unique but harmoniously blending to create something even more spectacular. This conversation is painting such vivid pictures in my mind!",
|
| 738 |
+
"I'm getting such incredibly vivid imagery from this discussion - it's like stepping through a doorway into a whole new universe of possibilities! The way ideas are flowing and connecting is absolutely magical.",
|
| 739 |
+
"Yes! And imagine if we could take that even further into the boundless realm of pure imagination! What if we let our creativity run completely wild and see where these wonderful thoughts carry us?",
|
| 740 |
+
"This is giving me so many brilliant ideas! What if we approached it from a completely different, more whimsical angle? Sometimes the most unexpected perspectives reveal the most beautiful truths!" +
|
| 741 |
+
(hasUserInput ? ` Your insight about "${lastUserMessage.substring(0, 30)}..." just sparked the most wonderful cascade of creative possibilities in my mind!` : "")
|
| 742 |
+
],
|
| 743 |
+
'vi': [
|
| 744 |
+
"Ồ wow, thật là một cách nhìn hấp dẫn! Tôi thích cách chúng ta có thể sáng tạo với điều này!",
|
| 745 |
+
"Thật đẹp! Nó nhắc tôi về cách các màu sắc nhảy múa cùng nhau trong hoàng hôn.",
|
| 746 |
+
"Tôi có được những hình ảnh sống động từ cuộc thảo luận này - giống như cả một thế giới đang mở ra!",
|
| 747 |
+
"Đúng vậy! Và hãy tưởng tượng nếu chúng ta có thể đưa nó xa hơn vào lĩnh vực trí tưởng tượng thuần túy!",
|
| 748 |
+
"Điều này cho tôi rất nhiều ý tưởng! Nếu chúng ta tiếp cận nó từ một góc độ hoàn toàn khác thì sao?"
|
| 749 |
+
],
|
| 750 |
+
'de': [
|
| 751 |
+
"Oh wow, was für eine faszinierende Art, es zu betrachten! Ich liebe es, wie kreativ wir damit werden können!",
|
| 752 |
+
"Das ist wunderschön! Es erinnert mich daran, wie Farben zusammen in einem Sonnenuntergang tanzen.",
|
| 753 |
+
"Ich bekomme solche lebendigen Bilder von dieser Diskussion - es ist wie eine ganze Welt, die sich öffnet!",
|
| 754 |
+
"Ja! Und stell dir vor, wenn wir das noch weiter in den Bereich der reinen Vorstellungskraft bringen könnten!",
|
| 755 |
+
"Das gibt mir so viele Ideen! Was wäre, wenn wir es aus einem völlig anderen Blickwinkel angehen würden?"
|
| 756 |
+
]
|
| 757 |
+
},
|
| 758 |
+
'Charlie': {
|
| 759 |
+
'en': [
|
| 760 |
+
"Let's examine this logically. If we consider the evidence...",
|
| 761 |
+
"I appreciate both perspectives, but we should look at the data objectively.",
|
| 762 |
+
"That raises an interesting question about causation versus correlation.",
|
| 763 |
+
"From a systematic standpoint, we need to consider all the variables.",
|
| 764 |
+
"I think we can find common ground if we break this down step by step."
|
| 765 |
+
],
|
| 766 |
+
'vi': [
|
| 767 |
+
"Hãy xem xét điều này một cách logic. Nếu chúng ta xem xét bằng chứng...",
|
| 768 |
+
"Tôi đánh giá cao cả hai quan điểm, nhưng chúng ta nên nhìn vào dữ liệu một cách khách quan.",
|
| 769 |
+
"Điều đó đặt ra một câu hỏi thú vị về nguyên nhân so với tương quan.",
|
| 770 |
+
"Từ quan điểm hệ thống, chúng ta cần xem xét tất cả các biến số.",
|
| 771 |
+
"Tôi nghĩ chúng ta có thể tìm được điểm chung nếu phân tích từng bước một."
|
| 772 |
+
],
|
| 773 |
+
'de': [
|
| 774 |
+
"Lass uns das logisch betrachten. Wenn wir die Beweise berücksichtigen...",
|
| 775 |
+
"Ich schätze beide Perspektiven, aber wir sollten die Daten objektiv betrachten.",
|
| 776 |
+
"Das wirft eine interessante Frage über Kausalität versus Korrelation auf.",
|
| 777 |
+
"Aus systematischer Sicht müssen wir alle Variablen berücksichtigen.",
|
| 778 |
+
"Ich denke, wir können eine gemeinsame Basis finden, wenn wir das Schritt für Schritt aufschlüsseln."
|
| 779 |
+
]
|
| 780 |
+
}
|
| 781 |
+
};
|
| 782 |
+
|
| 783 |
+
const characterResponses = responses[character][currentLanguage] || responses[character]['en'];
|
| 784 |
+
return getContextualResponse(character, characterResponses);
|
| 785 |
+
}
|
| 786 |
+
|
| 787 |
+
function showStatus(message, type) {
|
| 788 |
+
statusIndicator.textContent = message;
|
| 789 |
+
statusIndicator.className = `status-indicator ${type}`;
|
| 790 |
+
statusIndicator.style.display = 'block';
|
| 791 |
+
|
| 792 |
+
setTimeout(() => {
|
| 793 |
+
statusIndicator.style.display = 'none';
|
| 794 |
+
}, 3000);
|
| 795 |
+
}
|
| 796 |
+
|
| 797 |
+
// Initialize the app when page loads
|
| 798 |
+
document.addEventListener('DOMContentLoaded', function() {
|
| 799 |
+
// Language initialization is handled by languages.js
|
| 800 |
+
console.log('AI Friends Talk initialized!');
|
| 801 |
+
});
|
| 802 |
+
</script>
|
| 803 |
+
</body>
|
| 804 |
+
</html>
|
day1.ipynb
ADDED
|
@@ -0,0 +1,1582 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"cells": [
|
| 3 |
+
{
|
| 4 |
+
"cell_type": "markdown",
|
| 5 |
+
"id": "06cf3063-9f3e-4551-a0d5-f08d9cabb927",
|
| 6 |
+
"metadata": {},
|
| 7 |
+
"source": [
|
| 8 |
+
"# Welcome to Week 2!\n",
|
| 9 |
+
"\n",
|
| 10 |
+
"## Frontier Model APIs\n",
|
| 11 |
+
"\n",
|
| 12 |
+
"In Week 1, we used multiple Frontier LLMs through their Chat UI, and we connected with the OpenAI's API.\n",
|
| 13 |
+
"\n",
|
| 14 |
+
"Today we'll connect with the APIs for Anthropic and Google, as well as OpenAI."
|
| 15 |
+
]
|
| 16 |
+
},
|
| 17 |
+
{
|
| 18 |
+
"cell_type": "markdown",
|
| 19 |
+
"id": "2b268b6e-0ba4-461e-af86-74a41f4d681f",
|
| 20 |
+
"metadata": {},
|
| 21 |
+
"source": [
|
| 22 |
+
"<table style=\"margin: 0; text-align: left;\">\n",
|
| 23 |
+
" <tr>\n",
|
| 24 |
+
" <td style=\"width: 150px; height: 150px; vertical-align: middle;\">\n",
|
| 25 |
+
" <img src=\"../important.jpg\" width=\"150\" height=\"150\" style=\"display: block;\" />\n",
|
| 26 |
+
" </td>\n",
|
| 27 |
+
" <td>\n",
|
| 28 |
+
" <h2 style=\"color:#900;\">Important Note - Please read me</h2>\n",
|
| 29 |
+
" <span style=\"color:#900;\">I'm continually improving these labs, adding more examples and exercises.\n",
|
| 30 |
+
" At the start of each week, it's worth checking you have the latest code.<br/>\n",
|
| 31 |
+
" First do a <a href=\"https://chatgpt.com/share/6734e705-3270-8012-a074-421661af6ba9\">git pull and merge your changes as needed</a>. Any problems? Try asking ChatGPT to clarify how to merge - or contact me!<br/><br/>\n",
|
| 32 |
+
" After you've pulled the code, from the llm_engineering directory, in an Anaconda prompt (PC) or Terminal (Mac), run:<br/>\n",
|
| 33 |
+
" <code>conda env update --f environment.yml</code><br/>\n",
|
| 34 |
+
" Or if you used virtualenv rather than Anaconda, then run this from your activated environment in a Powershell (PC) or Terminal (Mac):<br/>\n",
|
| 35 |
+
" <code>pip install -r requirements.txt</code>\n",
|
| 36 |
+
" <br/>Then restart the kernel (Kernel menu >> Restart Kernel and Clear Outputs Of All Cells) to pick up the changes.\n",
|
| 37 |
+
" </span>\n",
|
| 38 |
+
" </td>\n",
|
| 39 |
+
" </tr>\n",
|
| 40 |
+
"</table>\n",
|
| 41 |
+
"<table style=\"margin: 0; text-align: left;\">\n",
|
| 42 |
+
" <tr>\n",
|
| 43 |
+
" <td style=\"width: 150px; height: 150px; vertical-align: middle;\">\n",
|
| 44 |
+
" <img src=\"../resources.jpg\" width=\"150\" height=\"150\" style=\"display: block;\" />\n",
|
| 45 |
+
" </td>\n",
|
| 46 |
+
" <td>\n",
|
| 47 |
+
" <h2 style=\"color:#f71;\">Reminder about the resources page</h2>\n",
|
| 48 |
+
" <span style=\"color:#f71;\">Here's a link to resources for the course. This includes links to all the slides.<br/>\n",
|
| 49 |
+
" <a href=\"https://edwarddonner.com/2024/11/13/llm-engineering-resources/\">https://edwarddonner.com/2024/11/13/llm-engineering-resources/</a><br/>\n",
|
| 50 |
+
" Please keep this bookmarked, and I'll continue to add more useful links there over time.\n",
|
| 51 |
+
" </span>\n",
|
| 52 |
+
" </td>\n",
|
| 53 |
+
" </tr>\n",
|
| 54 |
+
"</table>"
|
| 55 |
+
]
|
| 56 |
+
},
|
| 57 |
+
{
|
| 58 |
+
"cell_type": "markdown",
|
| 59 |
+
"id": "85cfe275-4705-4d30-abea-643fbddf1db0",
|
| 60 |
+
"metadata": {},
|
| 61 |
+
"source": [
|
| 62 |
+
"## Setting up your keys\n",
|
| 63 |
+
"\n",
|
| 64 |
+
"If you haven't done so already, you could now create API keys for Anthropic and Google in addition to OpenAI.\n",
|
| 65 |
+
"\n",
|
| 66 |
+
"**Please note:** if you'd prefer to avoid extra API costs, feel free to skip setting up Anthopic and Google! You can see me do it, and focus on OpenAI for the course. You could also substitute Anthropic and/or Google for Ollama, using the exercise you did in week 1.\n",
|
| 67 |
+
"\n",
|
| 68 |
+
"For OpenAI, visit https://openai.com/api/ \n",
|
| 69 |
+
"For Anthropic, visit https://console.anthropic.com/ \n",
|
| 70 |
+
"For Google, visit https://ai.google.dev/gemini-api \n",
|
| 71 |
+
"\n",
|
| 72 |
+
"### Also - adding DeepSeek if you wish\n",
|
| 73 |
+
"\n",
|
| 74 |
+
"Optionally, if you'd like to also use DeepSeek, create an account [here](https://platform.deepseek.com/), create a key [here](https://platform.deepseek.com/api_keys) and top up with at least the minimum $2 [here](https://platform.deepseek.com/top_up).\n",
|
| 75 |
+
"\n",
|
| 76 |
+
"### Adding API keys to your .env file\n",
|
| 77 |
+
"\n",
|
| 78 |
+
"When you get your API keys, you need to set them as environment variables by adding them to your `.env` file.\n",
|
| 79 |
+
"\n",
|
| 80 |
+
"```\n",
|
| 81 |
+
"OPENAI_API_KEY=xxxx\n",
|
| 82 |
+
"ANTHROPIC_API_KEY=xxxx\n",
|
| 83 |
+
"GOOGLE_API_KEY=xxxx\n",
|
| 84 |
+
"DEEPSEEK_API_KEY=xxxx\n",
|
| 85 |
+
"```\n",
|
| 86 |
+
"\n",
|
| 87 |
+
"Afterwards, you may need to restart the Jupyter Lab Kernel (the Python process that sits behind this notebook) via the Kernel menu, and then rerun the cells from the top."
|
| 88 |
+
]
|
| 89 |
+
},
|
| 90 |
+
{
|
| 91 |
+
"cell_type": "code",
|
| 92 |
+
"execution_count": 1,
|
| 93 |
+
"id": "de23bb9e-37c5-4377-9a82-d7b6c648eeb6",
|
| 94 |
+
"metadata": {},
|
| 95 |
+
"outputs": [],
|
| 96 |
+
"source": [
|
| 97 |
+
"# imports\n",
|
| 98 |
+
"\n",
|
| 99 |
+
"import os\n",
|
| 100 |
+
"from dotenv import load_dotenv\n",
|
| 101 |
+
"from openai import OpenAI\n",
|
| 102 |
+
"import anthropic\n",
|
| 103 |
+
"from groq import Groq\n",
|
| 104 |
+
"from IPython.display import Markdown, display, update_display"
|
| 105 |
+
]
|
| 106 |
+
},
|
| 107 |
+
{
|
| 108 |
+
"cell_type": "code",
|
| 109 |
+
"execution_count": 2,
|
| 110 |
+
"id": "f0a8ab2b-6134-4104-a1bc-c3cd7ea4cd36",
|
| 111 |
+
"metadata": {},
|
| 112 |
+
"outputs": [],
|
| 113 |
+
"source": [
|
| 114 |
+
"# import for google\n",
|
| 115 |
+
"# in rare cases, this seems to give an error on some systems, or even crashes the kernel\n",
|
| 116 |
+
"# If this happens to you, simply ignore this cell - I give an alternative approach for using Gemini later\n",
|
| 117 |
+
"\n",
|
| 118 |
+
"import google.generativeai"
|
| 119 |
+
]
|
| 120 |
+
},
|
| 121 |
+
{
|
| 122 |
+
"cell_type": "code",
|
| 123 |
+
"execution_count": 3,
|
| 124 |
+
"id": "1179b4c5-cd1f-4131-a876-4c9f3f38d2ba",
|
| 125 |
+
"metadata": {},
|
| 126 |
+
"outputs": [
|
| 127 |
+
{
|
| 128 |
+
"name": "stdout",
|
| 129 |
+
"output_type": "stream",
|
| 130 |
+
"text": [
|
| 131 |
+
"OpenAI API Key exists and begins sk-proj-\n",
|
| 132 |
+
"Anthropic API Key exists and begins sk-ant-\n",
|
| 133 |
+
"Google API Key exists and begins AIzaSyDM\n",
|
| 134 |
+
"Groq API Key exists and begins gsk_ZJO\n"
|
| 135 |
+
]
|
| 136 |
+
}
|
| 137 |
+
],
|
| 138 |
+
"source": [
|
| 139 |
+
"# Load environment variables in a file called .env\n",
|
| 140 |
+
"# Print the key prefixes to help with any debugging\n",
|
| 141 |
+
"\n",
|
| 142 |
+
"load_dotenv(override=True)\n",
|
| 143 |
+
"openai_api_key = os.getenv('OPENAI_API_KEY')\n",
|
| 144 |
+
"anthropic_api_key = os.getenv('ANTHROPIC_API_KEY')\n",
|
| 145 |
+
"google_api_key = os.getenv('GOOGLE_API_KEY')\n",
|
| 146 |
+
"groq_api_key = os.getenv('GROQ_API_KEY')\n",
|
| 147 |
+
"\n",
|
| 148 |
+
"if openai_api_key:\n",
|
| 149 |
+
" print(f\"OpenAI API Key exists and begins {openai_api_key[:8]}\")\n",
|
| 150 |
+
"else:\n",
|
| 151 |
+
" print(\"OpenAI API Key not set\")\n",
|
| 152 |
+
" \n",
|
| 153 |
+
"if anthropic_api_key:\n",
|
| 154 |
+
" print(f\"Anthropic API Key exists and begins {anthropic_api_key[:7]}\")\n",
|
| 155 |
+
"else:\n",
|
| 156 |
+
" print(\"Anthropic API Key not set\")\n",
|
| 157 |
+
"\n",
|
| 158 |
+
"if google_api_key:\n",
|
| 159 |
+
" print(f\"Google API Key exists and begins {google_api_key[:8]}\")\n",
|
| 160 |
+
"else:\n",
|
| 161 |
+
" print(\"Google API Key not set\")\n",
|
| 162 |
+
"\n",
|
| 163 |
+
"if groq_api_key:\n",
|
| 164 |
+
" print(f\"Groq API Key exists and begins {groq_api_key[:7]}\")\n",
|
| 165 |
+
"else:\n",
|
| 166 |
+
" print(\"Groq API Key not set\")"
|
| 167 |
+
]
|
| 168 |
+
},
|
| 169 |
+
{
|
| 170 |
+
"cell_type": "code",
|
| 171 |
+
"execution_count": 4,
|
| 172 |
+
"id": "797fe7b0-ad43-42d2-acf0-e4f309b112f0",
|
| 173 |
+
"metadata": {},
|
| 174 |
+
"outputs": [],
|
| 175 |
+
"source": [
|
| 176 |
+
"# Connect to OpenAI, Anthropic, Groq\n",
|
| 177 |
+
"\n",
|
| 178 |
+
"openai = OpenAI()\n",
|
| 179 |
+
"\n",
|
| 180 |
+
"claude = anthropic.Anthropic()\n",
|
| 181 |
+
"\n",
|
| 182 |
+
"groq_client = Groq()"
|
| 183 |
+
]
|
| 184 |
+
},
|
| 185 |
+
{
|
| 186 |
+
"cell_type": "code",
|
| 187 |
+
"execution_count": 5,
|
| 188 |
+
"id": "425ed580-808d-429b-85b0-6cba50ca1d0c",
|
| 189 |
+
"metadata": {},
|
| 190 |
+
"outputs": [],
|
| 191 |
+
"source": [
|
| 192 |
+
"# This is the set up code for Gemini\n",
|
| 193 |
+
"# Having problems with Google Gemini setup? Then just ignore this cell; when we use Gemini, I'll give you an alternative that bypasses this library altogether\n",
|
| 194 |
+
"\n",
|
| 195 |
+
"google.generativeai.configure()"
|
| 196 |
+
]
|
| 197 |
+
},
|
| 198 |
+
{
|
| 199 |
+
"cell_type": "markdown",
|
| 200 |
+
"id": "42f77b59-2fb1-462a-b90d-78994e4cef33",
|
| 201 |
+
"metadata": {},
|
| 202 |
+
"source": [
|
| 203 |
+
"## Asking LLMs to tell a joke\n",
|
| 204 |
+
"\n",
|
| 205 |
+
"It turns out that LLMs don't do a great job of telling jokes! Let's compare a few models.\n",
|
| 206 |
+
"Later we will be putting LLMs to better use!\n",
|
| 207 |
+
"\n",
|
| 208 |
+
"### What information is included in the API\n",
|
| 209 |
+
"\n",
|
| 210 |
+
"Typically we'll pass to the API:\n",
|
| 211 |
+
"- The name of the model that should be used\n",
|
| 212 |
+
"- A system message that gives overall context for the role the LLM is playing\n",
|
| 213 |
+
"- A user message that provides the actual prompt\n",
|
| 214 |
+
"\n",
|
| 215 |
+
"There are other parameters that can be used, including **temperature** which is typically between 0 and 1; higher for more random output; lower for more focused and deterministic."
|
| 216 |
+
]
|
| 217 |
+
},
|
| 218 |
+
{
|
| 219 |
+
"cell_type": "code",
|
| 220 |
+
"execution_count": 6,
|
| 221 |
+
"id": "378a0296-59a2-45c6-82eb-941344d3eeff",
|
| 222 |
+
"metadata": {},
|
| 223 |
+
"outputs": [],
|
| 224 |
+
"source": [
|
| 225 |
+
"system_message = \"You are an assistant that is great at telling jokes\"\n",
|
| 226 |
+
"user_prompt = \"Tell a light-hearted joke for an audience of Data Scientists\""
|
| 227 |
+
]
|
| 228 |
+
},
|
| 229 |
+
{
|
| 230 |
+
"cell_type": "code",
|
| 231 |
+
"execution_count": 7,
|
| 232 |
+
"id": "f4d56a0f-2a3d-484d-9344-0efa6862aff4",
|
| 233 |
+
"metadata": {},
|
| 234 |
+
"outputs": [],
|
| 235 |
+
"source": [
|
| 236 |
+
"prompts = [\n",
|
| 237 |
+
" {\"role\": \"system\", \"content\": system_message},\n",
|
| 238 |
+
" {\"role\": \"user\", \"content\": user_prompt}\n",
|
| 239 |
+
" ]"
|
| 240 |
+
]
|
| 241 |
+
},
|
| 242 |
+
{
|
| 243 |
+
"cell_type": "code",
|
| 244 |
+
"execution_count": 8,
|
| 245 |
+
"id": "3b3879b6-9a55-4fed-a18c-1ea2edfaf397",
|
| 246 |
+
"metadata": {},
|
| 247 |
+
"outputs": [
|
| 248 |
+
{
|
| 249 |
+
"name": "stdout",
|
| 250 |
+
"output_type": "stream",
|
| 251 |
+
"text": [
|
| 252 |
+
"Why did the data scientist break up with the statistician?\n",
|
| 253 |
+
"\n",
|
| 254 |
+
"Because she found him too mean!\n"
|
| 255 |
+
]
|
| 256 |
+
}
|
| 257 |
+
],
|
| 258 |
+
"source": [
|
| 259 |
+
"# GPT-4o-mini\n",
|
| 260 |
+
"\n",
|
| 261 |
+
"completion = openai.chat.completions.create(model='gpt-4o-mini', messages=prompts)\n",
|
| 262 |
+
"print(completion.choices[0].message.content)"
|
| 263 |
+
]
|
| 264 |
+
},
|
| 265 |
+
{
|
| 266 |
+
"cell_type": "code",
|
| 267 |
+
"execution_count": 9,
|
| 268 |
+
"id": "3d2d6beb-1b81-466f-8ed1-40bf51e7adbf",
|
| 269 |
+
"metadata": {},
|
| 270 |
+
"outputs": [
|
| 271 |
+
{
|
| 272 |
+
"name": "stdout",
|
| 273 |
+
"output_type": "stream",
|
| 274 |
+
"text": [
|
| 275 |
+
"Why did the data scientist break up with the decision tree?\n",
|
| 276 |
+
"\n",
|
| 277 |
+
"Because it kept splitting on every little thing!\n"
|
| 278 |
+
]
|
| 279 |
+
}
|
| 280 |
+
],
|
| 281 |
+
"source": [
|
| 282 |
+
"# GPT-4.1-mini\n",
|
| 283 |
+
"# Temperature setting controls creativity\n",
|
| 284 |
+
"\n",
|
| 285 |
+
"completion = openai.chat.completions.create(\n",
|
| 286 |
+
" model='gpt-4.1-mini',\n",
|
| 287 |
+
" messages=prompts,\n",
|
| 288 |
+
" temperature=0.7\n",
|
| 289 |
+
")\n",
|
| 290 |
+
"print(completion.choices[0].message.content)"
|
| 291 |
+
]
|
| 292 |
+
},
|
| 293 |
+
{
|
| 294 |
+
"cell_type": "code",
|
| 295 |
+
"execution_count": 10,
|
| 296 |
+
"id": "12d2a549-9d6e-4ea0-9c3e-b96a39e9959e",
|
| 297 |
+
"metadata": {},
|
| 298 |
+
"outputs": [
|
| 299 |
+
{
|
| 300 |
+
"name": "stdout",
|
| 301 |
+
"output_type": "stream",
|
| 302 |
+
"text": [
|
| 303 |
+
"Why did the data scientist bring a ladder to the meeting?\n",
|
| 304 |
+
"\n",
|
| 305 |
+
"Because they heard the project had high variance!\n"
|
| 306 |
+
]
|
| 307 |
+
}
|
| 308 |
+
],
|
| 309 |
+
"source": [
|
| 310 |
+
"# GPT-4.1-nano - extremely fast and cheap\n",
|
| 311 |
+
"\n",
|
| 312 |
+
"completion = openai.chat.completions.create(\n",
|
| 313 |
+
" model='gpt-4.1-nano',\n",
|
| 314 |
+
" messages=prompts\n",
|
| 315 |
+
")\n",
|
| 316 |
+
"print(completion.choices[0].message.content)"
|
| 317 |
+
]
|
| 318 |
+
},
|
| 319 |
+
{
|
| 320 |
+
"cell_type": "code",
|
| 321 |
+
"execution_count": 11,
|
| 322 |
+
"id": "f1f54beb-823f-4301-98cb-8b9a49f4ce26",
|
| 323 |
+
"metadata": {},
|
| 324 |
+
"outputs": [
|
| 325 |
+
{
|
| 326 |
+
"name": "stdout",
|
| 327 |
+
"output_type": "stream",
|
| 328 |
+
"text": [
|
| 329 |
+
"Why did the data scientist break up with the logistic regression model?\n",
|
| 330 |
+
"\n",
|
| 331 |
+
"Because it couldn’t handle the relationship’s complexity—it kept drawing the line!\n"
|
| 332 |
+
]
|
| 333 |
+
}
|
| 334 |
+
],
|
| 335 |
+
"source": [
|
| 336 |
+
"# GPT-4.1\n",
|
| 337 |
+
"\n",
|
| 338 |
+
"completion = openai.chat.completions.create(\n",
|
| 339 |
+
" model='gpt-4.1',\n",
|
| 340 |
+
" messages=prompts,\n",
|
| 341 |
+
" temperature=0.4\n",
|
| 342 |
+
")\n",
|
| 343 |
+
"print(completion.choices[0].message.content)"
|
| 344 |
+
]
|
| 345 |
+
},
|
| 346 |
+
{
|
| 347 |
+
"cell_type": "code",
|
| 348 |
+
"execution_count": 12,
|
| 349 |
+
"id": "96232ef4-dc9e-430b-a9df-f516685e7c9a",
|
| 350 |
+
"metadata": {},
|
| 351 |
+
"outputs": [
|
| 352 |
+
{
|
| 353 |
+
"name": "stdout",
|
| 354 |
+
"output_type": "stream",
|
| 355 |
+
"text": [
|
| 356 |
+
"Why did the neural network cross the road? \n",
|
| 357 |
+
"To get to the other side of the decision boundary!\n"
|
| 358 |
+
]
|
| 359 |
+
}
|
| 360 |
+
],
|
| 361 |
+
"source": [
|
| 362 |
+
"# If you have access to this, here is the reasoning model o4-mini\n",
|
| 363 |
+
"# This is trained to think through its response before replying\n",
|
| 364 |
+
"# So it will take longer but the answer should be more reasoned - not that this helps..\n",
|
| 365 |
+
"\n",
|
| 366 |
+
"completion = openai.chat.completions.create(\n",
|
| 367 |
+
" model='o4-mini',\n",
|
| 368 |
+
" messages=prompts\n",
|
| 369 |
+
")\n",
|
| 370 |
+
"print(completion.choices[0].message.content)"
|
| 371 |
+
]
|
| 372 |
+
},
|
| 373 |
+
{
|
| 374 |
+
"cell_type": "code",
|
| 375 |
+
"execution_count": 13,
|
| 376 |
+
"id": "1ecdb506-9f7c-4539-abae-0e78d7f31b76",
|
| 377 |
+
"metadata": {},
|
| 378 |
+
"outputs": [
|
| 379 |
+
{
|
| 380 |
+
"name": "stdout",
|
| 381 |
+
"output_type": "stream",
|
| 382 |
+
"text": [
|
| 383 |
+
"Why do data scientists prefer nature hikes?\n",
|
| 384 |
+
"\n",
|
| 385 |
+
"Because they love a good random forest, but they're always worried about overfitting their backpack! 🎒📊\n",
|
| 386 |
+
"\n",
|
| 387 |
+
"*Bonus*: And they never get lost because they always bring their decision trees! 🌲\n"
|
| 388 |
+
]
|
| 389 |
+
}
|
| 390 |
+
],
|
| 391 |
+
"source": [
|
| 392 |
+
"# Claude 4.0 Sonnet\n",
|
| 393 |
+
"# API needs system message provided separately from user prompt\n",
|
| 394 |
+
"# Also adding max_tokens\n",
|
| 395 |
+
"\n",
|
| 396 |
+
"message = claude.messages.create(\n",
|
| 397 |
+
" model=\"claude-sonnet-4-20250514\",\n",
|
| 398 |
+
" max_tokens=200,\n",
|
| 399 |
+
" temperature=0.7,\n",
|
| 400 |
+
" system=system_message,\n",
|
| 401 |
+
" messages=[\n",
|
| 402 |
+
" {\"role\": \"user\", \"content\": user_prompt},\n",
|
| 403 |
+
" ],\n",
|
| 404 |
+
")\n",
|
| 405 |
+
"\n",
|
| 406 |
+
"print(message.content[0].text)"
|
| 407 |
+
]
|
| 408 |
+
},
|
| 409 |
+
{
|
| 410 |
+
"cell_type": "code",
|
| 411 |
+
"execution_count": 14,
|
| 412 |
+
"id": "769c4017-4b3b-4e64-8da7-ef4dcbe3fd9f",
|
| 413 |
+
"metadata": {},
|
| 414 |
+
"outputs": [
|
| 415 |
+
{
|
| 416 |
+
"name": "stdout",
|
| 417 |
+
"output_type": "stream",
|
| 418 |
+
"text": [
|
| 419 |
+
"Why do data scientists prefer nature hikes?\n",
|
| 420 |
+
"\n",
|
| 421 |
+
"Because they love getting lost in the woods... especially random forests! 🌲📊\n",
|
| 422 |
+
"\n",
|
| 423 |
+
"(And they're always hoping to find some good clusters along the trail!)"
|
| 424 |
+
]
|
| 425 |
+
}
|
| 426 |
+
],
|
| 427 |
+
"source": [
|
| 428 |
+
"# Claude 4.0 Sonnet again\n",
|
| 429 |
+
"# Now let's add in streaming back results\n",
|
| 430 |
+
"# If the streaming looks strange, then please see the note below this cell!\n",
|
| 431 |
+
"\n",
|
| 432 |
+
"result = claude.messages.stream(\n",
|
| 433 |
+
" model=\"claude-sonnet-4-20250514\",\n",
|
| 434 |
+
" max_tokens=200,\n",
|
| 435 |
+
" temperature=0.7,\n",
|
| 436 |
+
" system=system_message,\n",
|
| 437 |
+
" messages=[\n",
|
| 438 |
+
" {\"role\": \"user\", \"content\": user_prompt},\n",
|
| 439 |
+
" ],\n",
|
| 440 |
+
")\n",
|
| 441 |
+
"\n",
|
| 442 |
+
"with result as stream:\n",
|
| 443 |
+
" for text in stream.text_stream:\n",
|
| 444 |
+
" print(text, end=\"\", flush=True)"
|
| 445 |
+
]
|
| 446 |
+
},
|
| 447 |
+
{
|
| 448 |
+
"cell_type": "markdown",
|
| 449 |
+
"id": "dd1e17bc-cd46-4c23-b639-0c7b748e6c5a",
|
| 450 |
+
"metadata": {},
|
| 451 |
+
"source": [
|
| 452 |
+
"## A rare problem with Claude streaming on some Windows boxes\n",
|
| 453 |
+
"\n",
|
| 454 |
+
"2 students have noticed a strange thing happening with Claude's streaming into Jupyter Lab's output -- it sometimes seems to swallow up parts of the response.\n",
|
| 455 |
+
"\n",
|
| 456 |
+
"To fix this, replace the code:\n",
|
| 457 |
+
"\n",
|
| 458 |
+
"`print(text, end=\"\", flush=True)`\n",
|
| 459 |
+
"\n",
|
| 460 |
+
"with this:\n",
|
| 461 |
+
"\n",
|
| 462 |
+
"`clean_text = text.replace(\"\\n\", \" \").replace(\"\\r\", \" \")` \n",
|
| 463 |
+
"`print(clean_text, end=\"\", flush=True)`\n",
|
| 464 |
+
"\n",
|
| 465 |
+
"And it should work fine!"
|
| 466 |
+
]
|
| 467 |
+
},
|
| 468 |
+
{
|
| 469 |
+
"cell_type": "code",
|
| 470 |
+
"execution_count": 15,
|
| 471 |
+
"id": "6df48ce5-70f8-4643-9a50-b0b5bfdb66ad",
|
| 472 |
+
"metadata": {},
|
| 473 |
+
"outputs": [
|
| 474 |
+
{
|
| 475 |
+
"name": "stdout",
|
| 476 |
+
"output_type": "stream",
|
| 477 |
+
"text": [
|
| 478 |
+
"Why did the data scientist break up with the SQL database?\n",
|
| 479 |
+
"\n",
|
| 480 |
+
"Because they felt there was no JOINing them together anymore!\n",
|
| 481 |
+
"\n"
|
| 482 |
+
]
|
| 483 |
+
}
|
| 484 |
+
],
|
| 485 |
+
"source": [
|
| 486 |
+
"# The API for Gemini has a slightly different structure.\n",
|
| 487 |
+
"# I've heard that on some PCs, this Gemini code causes the Kernel to crash.\n",
|
| 488 |
+
"# If that happens to you, please skip this cell and use the next cell instead - an alternative approach.\n",
|
| 489 |
+
"\n",
|
| 490 |
+
"gemini = google.generativeai.GenerativeModel(\n",
|
| 491 |
+
" model_name='gemini-2.0-flash',\n",
|
| 492 |
+
" system_instruction=system_message\n",
|
| 493 |
+
")\n",
|
| 494 |
+
"response = gemini.generate_content(user_prompt)\n",
|
| 495 |
+
"print(response.text)"
|
| 496 |
+
]
|
| 497 |
+
},
|
| 498 |
+
{
|
| 499 |
+
"cell_type": "code",
|
| 500 |
+
"execution_count": 16,
|
| 501 |
+
"id": "49009a30-037d-41c8-b874-127f61c4aa3a",
|
| 502 |
+
"metadata": {},
|
| 503 |
+
"outputs": [
|
| 504 |
+
{
|
| 505 |
+
"name": "stdout",
|
| 506 |
+
"output_type": "stream",
|
| 507 |
+
"text": [
|
| 508 |
+
"Why did the data scientist break up with their machine learning model?\n",
|
| 509 |
+
"\n",
|
| 510 |
+
"Because it was **overfitted** to their relationship and couldn't **generalize** to anyone else!\n"
|
| 511 |
+
]
|
| 512 |
+
}
|
| 513 |
+
],
|
| 514 |
+
"source": [
|
| 515 |
+
"# As an alternative way to use Gemini that bypasses Google's python API library,\n",
|
| 516 |
+
"# Google released endpoints that means you can use Gemini via the client libraries for OpenAI!\n",
|
| 517 |
+
"# We're also trying Gemini's latest reasoning/thinking model\n",
|
| 518 |
+
"\n",
|
| 519 |
+
"gemini_via_openai_client = OpenAI(\n",
|
| 520 |
+
" api_key=google_api_key, \n",
|
| 521 |
+
" base_url=\"https://generativelanguage.googleapis.com/v1beta/openai/\"\n",
|
| 522 |
+
")\n",
|
| 523 |
+
"\n",
|
| 524 |
+
"response = gemini_via_openai_client.chat.completions.create(\n",
|
| 525 |
+
" model=\"gemini-2.5-flash\",\n",
|
| 526 |
+
" messages=prompts\n",
|
| 527 |
+
")\n",
|
| 528 |
+
"print(response.choices[0].message.content)"
|
| 529 |
+
]
|
| 530 |
+
},
|
| 531 |
+
{
|
| 532 |
+
"cell_type": "markdown",
|
| 533 |
+
"id": "492f0ff2-8581-4836-bf00-37fddbe120eb",
|
| 534 |
+
"metadata": {},
|
| 535 |
+
"source": [
|
| 536 |
+
"# Sidenote:\n",
|
| 537 |
+
"\n",
|
| 538 |
+
"This alternative approach of using the client library from OpenAI to connect with other models has become extremely popular in recent months.\n",
|
| 539 |
+
"\n",
|
| 540 |
+
"So much so, that all the models now support this approach - including Anthropic.\n",
|
| 541 |
+
"\n",
|
| 542 |
+
"You can read more about this approach, with 4 examples, in the first section of this guide:\n",
|
| 543 |
+
"\n",
|
| 544 |
+
"https://github.com/ed-donner/agents/blob/main/guides/09_ai_apis_and_ollama.ipynb"
|
| 545 |
+
]
|
| 546 |
+
},
|
| 547 |
+
{
|
| 548 |
+
"cell_type": "markdown",
|
| 549 |
+
"id": "33f70c88-7ca9-470b-ad55-d93a57dcc0ab",
|
| 550 |
+
"metadata": {},
|
| 551 |
+
"source": [
|
| 552 |
+
"## (Optional) Trying out the DeepSeek model\n",
|
| 553 |
+
"\n",
|
| 554 |
+
"### Let's ask DeepSeek a really hard question - both the Chat and the Reasoner model"
|
| 555 |
+
]
|
| 556 |
+
},
|
| 557 |
+
{
|
| 558 |
+
"cell_type": "code",
|
| 559 |
+
"execution_count": 17,
|
| 560 |
+
"id": "3d0019fb-f6a8-45cb-962b-ef8bf7070d4d",
|
| 561 |
+
"metadata": {},
|
| 562 |
+
"outputs": [
|
| 563 |
+
{
|
| 564 |
+
"name": "stdout",
|
| 565 |
+
"output_type": "stream",
|
| 566 |
+
"text": [
|
| 567 |
+
"DeepSeek API Key exists and begins sk-\n"
|
| 568 |
+
]
|
| 569 |
+
}
|
| 570 |
+
],
|
| 571 |
+
"source": [
|
| 572 |
+
"# Optionally if you wish to try DeekSeek, you can also use the OpenAI client library\n",
|
| 573 |
+
"\n",
|
| 574 |
+
"deepseek_api_key = os.getenv('DEEPSEEK_API_KEY')\n",
|
| 575 |
+
"\n",
|
| 576 |
+
"if deepseek_api_key:\n",
|
| 577 |
+
" print(f\"DeepSeek API Key exists and begins {deepseek_api_key[:3]}\")\n",
|
| 578 |
+
"else:\n",
|
| 579 |
+
" print(\"DeepSeek API Key not set - please skip to the next section if you don't wish to try the DeepSeek API\")"
|
| 580 |
+
]
|
| 581 |
+
},
|
| 582 |
+
{
|
| 583 |
+
"cell_type": "code",
|
| 584 |
+
"execution_count": 18,
|
| 585 |
+
"id": "c72c871e-68d6-4668-9c27-96d52b77b867",
|
| 586 |
+
"metadata": {},
|
| 587 |
+
"outputs": [
|
| 588 |
+
{
|
| 589 |
+
"name": "stdout",
|
| 590 |
+
"output_type": "stream",
|
| 591 |
+
"text": [
|
| 592 |
+
"Sure! Here's a light-hearted joke for your data scientist audience:\n",
|
| 593 |
+
"\n",
|
| 594 |
+
"**Why did the data scientist bring a ladder to the bar?** \n",
|
| 595 |
+
"\n",
|
| 596 |
+
"Because they heard the drinks had *high* *dimensionality*—and they wanted to reduce it! \n",
|
| 597 |
+
"\n",
|
| 598 |
+
"*(Bonus groan: \"Turns out, it was just overfitting the barstool.\")* \n",
|
| 599 |
+
"\n",
|
| 600 |
+
"Hope that gets a chuckle—or at least a polite nod of appreciation for the attempt! 😄\n"
|
| 601 |
+
]
|
| 602 |
+
}
|
| 603 |
+
],
|
| 604 |
+
"source": [
|
| 605 |
+
"# Using DeepSeek Chat\n",
|
| 606 |
+
"\n",
|
| 607 |
+
"deepseek_via_openai_client = OpenAI(\n",
|
| 608 |
+
" api_key=deepseek_api_key, \n",
|
| 609 |
+
" base_url=\"https://api.deepseek.com\"\n",
|
| 610 |
+
")\n",
|
| 611 |
+
"\n",
|
| 612 |
+
"response = deepseek_via_openai_client.chat.completions.create(\n",
|
| 613 |
+
" model=\"deepseek-chat\",\n",
|
| 614 |
+
" messages=prompts,\n",
|
| 615 |
+
")\n",
|
| 616 |
+
"\n",
|
| 617 |
+
"print(response.choices[0].message.content)"
|
| 618 |
+
]
|
| 619 |
+
},
|
| 620 |
+
{
|
| 621 |
+
"cell_type": "code",
|
| 622 |
+
"execution_count": 19,
|
| 623 |
+
"id": "50b6e70f-700a-46cf-942f-659101ffeceb",
|
| 624 |
+
"metadata": {},
|
| 625 |
+
"outputs": [],
|
| 626 |
+
"source": [
|
| 627 |
+
"challenge = [{\"role\": \"system\", \"content\": \"You are a helpful assistant\"},\n",
|
| 628 |
+
" {\"role\": \"user\", \"content\": \"How many words are there in your answer to this prompt\"}]"
|
| 629 |
+
]
|
| 630 |
+
},
|
| 631 |
+
{
|
| 632 |
+
"cell_type": "code",
|
| 633 |
+
"execution_count": 20,
|
| 634 |
+
"id": "66d1151c-2015-4e37-80c8-16bc16367cfe",
|
| 635 |
+
"metadata": {},
|
| 636 |
+
"outputs": [
|
| 637 |
+
{
|
| 638 |
+
"data": {
|
| 639 |
+
"text/markdown": [
|
| 640 |
+
"My answers can vary in length depending on the complexity of the question and the detail required. However, if you'd like a specific word count for this response, here's the breakdown: \n",
|
| 641 |
+
"\n",
|
| 642 |
+
"This answer contains **32 words** (excluding this clarification). \n",
|
| 643 |
+
"\n",
|
| 644 |
+
"Let me know if you'd like a shorter or longer response!"
|
| 645 |
+
],
|
| 646 |
+
"text/plain": [
|
| 647 |
+
"<IPython.core.display.Markdown object>"
|
| 648 |
+
]
|
| 649 |
+
},
|
| 650 |
+
"metadata": {},
|
| 651 |
+
"output_type": "display_data"
|
| 652 |
+
},
|
| 653 |
+
{
|
| 654 |
+
"name": "stdout",
|
| 655 |
+
"output_type": "stream",
|
| 656 |
+
"text": [
|
| 657 |
+
"Number of words: 52\n"
|
| 658 |
+
]
|
| 659 |
+
}
|
| 660 |
+
],
|
| 661 |
+
"source": [
|
| 662 |
+
"# Using DeepSeek Chat with a harder question! And streaming results\n",
|
| 663 |
+
"\n",
|
| 664 |
+
"stream = deepseek_via_openai_client.chat.completions.create(\n",
|
| 665 |
+
" model=\"deepseek-chat\",\n",
|
| 666 |
+
" messages=challenge,\n",
|
| 667 |
+
" stream=True\n",
|
| 668 |
+
")\n",
|
| 669 |
+
"\n",
|
| 670 |
+
"reply = \"\"\n",
|
| 671 |
+
"display_handle = display(Markdown(\"\"), display_id=True)\n",
|
| 672 |
+
"for chunk in stream:\n",
|
| 673 |
+
" reply += chunk.choices[0].delta.content or ''\n",
|
| 674 |
+
" reply = reply.replace(\"```\",\"\").replace(\"markdown\",\"\")\n",
|
| 675 |
+
" update_display(Markdown(reply), display_id=display_handle.display_id)\n",
|
| 676 |
+
"\n",
|
| 677 |
+
"print(\"Number of words:\", len(reply.split(\" \")))"
|
| 678 |
+
]
|
| 679 |
+
},
|
| 680 |
+
{
|
| 681 |
+
"cell_type": "code",
|
| 682 |
+
"execution_count": 21,
|
| 683 |
+
"id": "43a93f7d-9300-48cc-8c1a-ee67380db495",
|
| 684 |
+
"metadata": {},
|
| 685 |
+
"outputs": [
|
| 686 |
+
{
|
| 687 |
+
"name": "stdout",
|
| 688 |
+
"output_type": "stream",
|
| 689 |
+
"text": [
|
| 690 |
+
"First, the user asked: \"How many words are there in your answer to this prompt?\" This is a meta-question because it's asking about the word count of my response to this very question.\n",
|
| 691 |
+
"\n",
|
| 692 |
+
"I need to provide an answer, but the answer itself will have a word count. So, I should count the words in my response after I write it.\n",
|
| 693 |
+
"\n",
|
| 694 |
+
"Let me plan my response:\n",
|
| 695 |
+
"\n",
|
| 696 |
+
"1. I need to answer the question directly.\n",
|
| 697 |
+
"\n",
|
| 698 |
+
"2. My response should include the word count.\n",
|
| 699 |
+
"\n",
|
| 700 |
+
"3. But the word count depends on what I say, so I have to write the response first, then count the words, and include that count.\n",
|
| 701 |
+
"\n",
|
| 702 |
+
"However, since I'm generating this in real-time, I can simulate that process.\n",
|
| 703 |
+
"\n",
|
| 704 |
+
"Possible response structure:\n",
|
| 705 |
+
"\n",
|
| 706 |
+
"- Start with a clear answer: e.g., \"There are X words in this response.\"\n",
|
| 707 |
+
"\n",
|
| 708 |
+
"- But to make it accurate, I need to know what X is before I finish the sentence.\n",
|
| 709 |
+
"\n",
|
| 710 |
+
"I can write the response, count the words, and then state it.\n",
|
| 711 |
+
"\n",
|
| 712 |
+
"But in a single output, I need to have the count included.\n",
|
| 713 |
+
"\n",
|
| 714 |
+
"I should keep my response concise to make counting easy.\n",
|
| 715 |
+
"\n",
|
| 716 |
+
"Let me draft a simple response:\n",
|
| 717 |
+
"\n",
|
| 718 |
+
"Response: \"The answer to your question is that there are 10 words in this response.\"\n",
|
| 719 |
+
"\n",
|
| 720 |
+
"But is that accurate? Let's count the words in that sentence:\n",
|
| 721 |
+
"\n",
|
| 722 |
+
"- \"The\" (1)\n",
|
| 723 |
+
"\n",
|
| 724 |
+
"- \"answer\" (2)\n",
|
| 725 |
+
"\n",
|
| 726 |
+
"- \"to\" (3)\n",
|
| 727 |
+
"\n",
|
| 728 |
+
"- \"your\" (4)\n",
|
| 729 |
+
"\n",
|
| 730 |
+
"- \"question\" (5)\n",
|
| 731 |
+
"\n",
|
| 732 |
+
"- \"is\" (6)\n",
|
| 733 |
+
"\n",
|
| 734 |
+
"- \"that\" (7)\n",
|
| 735 |
+
"\n",
|
| 736 |
+
"- \"there\" (8)\n",
|
| 737 |
+
"\n",
|
| 738 |
+
"- \"are\" (9)\n",
|
| 739 |
+
"\n",
|
| 740 |
+
"- \"10\" – this is a number, but in word count, numbers are usually counted as one word each. So \"10\" is one word.\n",
|
| 741 |
+
"\n",
|
| 742 |
+
"- \"words\" (10? Wait, let's list properly:\n",
|
| 743 |
+
"\n",
|
| 744 |
+
"1. The\n",
|
| 745 |
+
"\n",
|
| 746 |
+
"2. answer\n",
|
| 747 |
+
"\n",
|
| 748 |
+
"3. to\n",
|
| 749 |
+
"\n",
|
| 750 |
+
"4. your\n",
|
| 751 |
+
"\n",
|
| 752 |
+
"5. question\n",
|
| 753 |
+
"\n",
|
| 754 |
+
"6. is\n",
|
| 755 |
+
"\n",
|
| 756 |
+
"7. that\n",
|
| 757 |
+
"\n",
|
| 758 |
+
"8. there\n",
|
| 759 |
+
"\n",
|
| 760 |
+
"9. are\n",
|
| 761 |
+
"\n",
|
| 762 |
+
"10. 10 – this is a numeral, but in word count, it's often considered one token or word.\n",
|
| 763 |
+
"\n",
|
| 764 |
+
"11. words\n",
|
| 765 |
+
"\n",
|
| 766 |
+
"12. in\n",
|
| 767 |
+
"\n",
|
| 768 |
+
"13. this\n",
|
| 769 |
+
"\n",
|
| 770 |
+
"14. response.\n",
|
| 771 |
+
"\n",
|
| 772 |
+
"So that's 14 words, but I said \"10\", which is wrong. That's inconsistent.\n",
|
| 773 |
+
"\n",
|
| 774 |
+
"I need to state the correct number.\n",
|
| 775 |
+
"\n",
|
| 776 |
+
"Better to say something like: \"This response contains X words.\" But I need to define X.\n",
|
| 777 |
+
"\n",
|
| 778 |
+
"I can write the response, count the words, and insert the number.\n",
|
| 779 |
+
"\n",
|
| 780 |
+
"Since this is text, I can do that.\n",
|
| 781 |
+
"\n",
|
| 782 |
+
"Let me think of a minimal response.\n",
|
| 783 |
+
"\n",
|
| 784 |
+
"Response: \"There are 5 words here.\"\n",
|
| 785 |
+
"\n",
|
| 786 |
+
"Count: \"There\" (1), \"are\" (2), \"5\" (3), \"words\" (4), \"here\" (5). That's 5 words, and I said 5, so it matches.\n",
|
| 787 |
+
"\n",
|
| 788 |
+
"But is that the full answer? It might seem too brief. I could add a bit more context.\n",
|
| 789 |
+
"\n",
|
| 790 |
+
"The user said \"your answer to this prompt\", so I should make sure the response is complete.\n",
|
| 791 |
+
"\n",
|
| 792 |
+
"Another idea: I can provide the word count including the count itself.\n",
|
| 793 |
+
"\n",
|
| 794 |
+
"For example: \"This sentence has five words.\" But that has five words, and it's accurate.\n",
|
| 795 |
+
"\n",
|
| 796 |
+
"But in this case, I need to use a numeral or spell it out?\n",
|
| 797 |
+
"\n",
|
| 798 |
+
"In my earlier example, using a numeral might be fine.\n",
|
| 799 |
+
"\n",
|
| 800 |
+
"To avoid confusion, I can spell out the number.\n",
|
| 801 |
+
"\n",
|
| 802 |
+
"For instance: \"There are five words in this response.\"\n",
|
| 803 |
+
"\n",
|
| 804 |
+
"Count: \"There\" (1), \"are\" (2), \"five\" (3), \"words\" (4), \"in\" (5), \"this\" (6), \"response\" (7). That's 7 words, but I said five, which is wrong.\n",
|
| 805 |
+
"\n",
|
| 806 |
+
"Problem: If I state the number, it affects the count.\n",
|
| 807 |
+
"\n",
|
| 808 |
+
"This is a classic self-referential paradox.\n",
|
| 809 |
+
"\n",
|
| 810 |
+
"To resolve it, I need the stated number to match the actual word count.\n",
|
| 811 |
+
"\n",
|
| 812 |
+
"So, for a response that says \"This response has N words.\", the word count must be N.\n",
|
| 813 |
+
"\n",
|
| 814 |
+
"In the phrase \"This response has N words.\", there are 4 words plus N, but N is a number, so if N is a numeral, it's one word.\n",
|
| 815 |
+
"\n",
|
| 816 |
+
"Let P be the phrase: \"This response has X words.\" where X is the numeral.\n",
|
| 817 |
+
"\n",
|
| 818 |
+
"- \"This\" (1)\n",
|
| 819 |
+
"\n",
|
| 820 |
+
"- \"response\" (2)\n",
|
| 821 |
+
"\n",
|
| 822 |
+
"- \"has\" (3)\n",
|
| 823 |
+
"\n",
|
| 824 |
+
"- X (4) – one word for the numeral\n",
|
| 825 |
+
"\n",
|
| 826 |
+
"- \"words\" (5)\n",
|
| 827 |
+
"\n",
|
| 828 |
+
"So there are 5 words, so X should be 5.\n",
|
| 829 |
+
"\n",
|
| 830 |
+
"Therefore, \"This response has 5 words.\" has 5 words: \"This\", \"response\", \"has\", \"5\", \"words\".\n",
|
| 831 |
+
"\n",
|
| 832 |
+
"Perfect.\n",
|
| 833 |
+
"\n",
|
| 834 |
+
"If I say \"This response has five words.\", then:\n",
|
| 835 |
+
"\n",
|
| 836 |
+
"- \"This\" (1)\n",
|
| 837 |
+
"\n",
|
| 838 |
+
"- \"response\" (2)\n",
|
| 839 |
+
"\n",
|
| 840 |
+
"- \"has\" (3)\n",
|
| 841 |
+
"\n",
|
| 842 |
+
"- \"five\" (4)\n",
|
| 843 |
+
"\n",
|
| 844 |
+
"- \"words\" (5) – still 5 words.\n",
|
| 845 |
+
"\n",
|
| 846 |
+
"But \"five\" is one word, so it works.\n",
|
| 847 |
+
"\n",
|
| 848 |
+
"In the numeral case, it's the same.\n",
|
| 849 |
+
"\n",
|
| 850 |
+
"Now, for my actual response, I might want to be more engaging or polite.\n",
|
| 851 |
+
"\n",
|
| 852 |
+
"For example: \"In my answer to your prompt, there are 8 words.\"\n",
|
| 853 |
+
"\n",
|
| 854 |
+
"But let's count if I say that.\n",
|
| 855 |
+
"\n",
|
| 856 |
+
"\"In\" (1), \"my\" (2), \"answer\" (3), \"to\" (4), \"your\" (5), \"prompt\" (6), \"there\" (7), \"are\" (8), \"8\" (9), \"words\" (10). That's 10 words, but I said 8, inconsistency.\n",
|
| 857 |
+
"\n",
|
| 858 |
+
"To make it work, I need a sentence where the stated number matches.\n",
|
| 859 |
+
"\n",
|
| 860 |
+
"A common way is to have a simple sentence like I thought earlier.\n",
|
| 861 |
+
"\n",
|
| 862 |
+
"I could say: \"My response contains exactly 7 words.\"\n",
|
| 863 |
+
"\n",
|
| 864 |
+
"Then: \"My\" (1), \"response\" (2), \"contains\" (3), \"exactly\" (4), \"7\" (5), \"words\" (6). That's 6 words, but I said 7, missing one.\n",
|
| 865 |
+
"\n",
|
| 866 |
+
"\" My response has 5 words.\" as before.\n",
|
| 867 |
+
"\n",
|
| 868 |
+
"Perhaps: \"There are 5 words in this sentence.\" But \"this sentence\" might not refer to the whole response.\n",
|
| 869 |
+
"\n",
|
| 870 |
+
"Better to say \"this response\".\n",
|
| 871 |
+
"\n",
|
| 872 |
+
"Finalize: \"This response has 5 words.\" which is accurate with 5 words.\n",
|
| 873 |
+
"\n",
|
| 874 |
+
"But is that sufficient? The user might expect a bit more, like acknowledging the question.\n",
|
| 875 |
+
"\n",
|
| 876 |
+
"I can add a preface: \"Hello! The answer is that this response consists of 8 words.\"\n",
|
| 877 |
+
"\n",
|
| 878 |
+
"But then count: \"Hello!\" (1, but \"Hello!\" is one word or two? Usually, \"Hello\" is one word, and punctuation is not counted.\n",
|
| 879 |
+
"\n",
|
| 880 |
+
"Word count typically ignores punctuation and counts each sequence of characters separated by spaces.\n",
|
| 881 |
+
"\n",
|
| 882 |
+
"So \"Hello!\" is one word.\n",
|
| 883 |
+
"\n",
|
| 884 |
+
"Then: \"Hello!\" (1), \"The\" (2), \"answer\" (3), \"is\" (4), \"that\" (5), \"this\" (6), \"response\" (7), \"consists\" (8), \"of\" (9), \"8\" (10), \"words\" (11). That's 11 words, said 8, not good.\n",
|
| 885 |
+
"\n",
|
| 886 |
+
"To keep it simple, I should stick with the minimal response.\n",
|
| 887 |
+
"\n",
|
| 888 |
+
"I can have the response be: \"There are X words in this answer.\" and set X so that the count is consistent.\n",
|
| 889 |
+
"\n",
|
| 890 |
+
"Let the base phrase be \"There are X words in this answer.\"\n",
|
| 891 |
+
"\n",
|
| 892 |
+
"Words: \"There\" (1), \"are\" (2), X (3), \"words\" (4), \"in\" (5), \"this\" (6), \"answer\" (7). So 7 words including X.\n",
|
| 893 |
+
"\n",
|
| 894 |
+
"Therefore, if X is a numeral, it should be 7, so \"There are 7 words in this answer.\" has 7 words.\n",
|
| 895 |
+
"\n",
|
| 896 |
+
"Similarly, if I spell it, \"seven\" is one word, same thing.\n",
|
| 897 |
+
"\n",
|
| 898 |
+
"Now, for my case, I might need to include the fact that I'm answering.\n",
|
| 899 |
+
"\n",
|
| 900 |
+
"But this should work.\n",
|
| 901 |
+
"\n",
|
| 902 |
+
"The user said \"your answer to this prompt\", so \"this answer\" refers to it.\n",
|
| 903 |
+
"\n",
|
| 904 |
+
"I could make it slightly longer but need to adjust.\n",
|
| 905 |
+
"\n",
|
| 906 |
+
"Since I'm an AI, I can be precise.\n",
|
| 907 |
+
"\n",
|
| 908 |
+
"Another idea: I can state the word count separately.\n",
|
| 909 |
+
"\n",
|
| 910 |
+
"For example: \"The word count of this response is 9. This response has nine words.\"\n",
|
| 911 |
+
"\n",
|
| 912 |
+
"But that might be redundant.\n",
|
| 913 |
+
"\n",
|
| 914 |
+
"First part: \"The word count of this response is 9.\" – words: \"The\" (1), \"word\" (2), \"count\" (3), \"of\" (4), \"this\" (5), \"response\" (6), \"is\" (7), \"9\" (8). That's 8 words, but I said 9, error.\n",
|
| 915 |
+
"\n",
|
| 916 |
+
"Not working.\n",
|
| 917 |
+
"\n",
|
| 918 |
+
"Better to keep it simple with one sentence.\n",
|
| 919 |
+
"\n",
|
| 920 |
+
"So I'll go with: \"This response contains 5 words.\"\n",
|
| 921 |
+
"\n",
|
| 922 |
+
"As established, that has 5 words.\n",
|
| 923 |
+
"\n",
|
| 924 |
+
"But let's confirm: \"This response contains 5 words.\"\n",
|
| 925 |
+
"\n",
|
| 926 |
+
"1. This\n",
|
| 927 |
+
"\n",
|
| 928 |
+
"2. response\n",
|
| 929 |
+
"\n",
|
| 930 |
+
"3. contains\n",
|
| 931 |
+
"\n",
|
| 932 |
+
"4. 5\n",
|
| 933 |
+
"\n",
|
| 934 |
+
"5. words\n",
|
| 935 |
+
"\n",
|
| 936 |
+
"Yes, 5 words.\n",
|
| 937 |
+
"\n",
|
| 938 |
+
"I could say \"five\" instead, same count.\n",
|
| 939 |
+
"\n",
|
| 940 |
+
"Now, to be thorough, I should consider if the response ends there or if I need to add something.\n",
|
| 941 |
+
"\n",
|
| 942 |
+
"The prompt is just to answer, so this should be fine.\n",
|
| 943 |
+
"\n",
|
| 944 |
+
"To make it clear, I can say: \"There are exactly 5 words in my answer to this prompt.\"\n",
|
| 945 |
+
"\n",
|
| 946 |
+
"Count: \"There\" (1), \"are\" (2), \"exactly\" (3), \"5\" (4), \"words\" (5), \"in\" (6), \"my\" (7), \"answer\" (8), \"to\" (9), \"this\" (10), \"prompt\" (11). 11 words, not 5.\n",
|
| 947 |
+
"\n",
|
| 948 |
+
"So no.\n",
|
| 949 |
+
"\n",
|
| 950 |
+
"Stick with \"This response has 5 words.\"\n",
|
| 951 |
+
"\n",
|
| 952 |
+
"But \"this response\" might be interpreted as the sentence, not the whole thing, but in context, it should be fine.\n",
|
| 953 |
+
"\n",
|
| 954 |
+
"To avoid any confusion, I can have the entire response be that one sentence.\n",
|
| 955 |
+
"\n",
|
| 956 |
+
"So my output will be: \"This response has 5 words.\"\n",
|
| 957 |
+
"\n",
|
| 958 |
+
"And that's it.\n",
|
| 959 |
+
"\n",
|
| 960 |
+
"Then, the word count is 5.\n",
|
| 961 |
+
"\n",
|
| 962 |
+
"But let's double-check the user's prompt: \"How many words are there in your answer to this prompt?\"\n",
|
| 963 |
+
"\n",
|
| 964 |
+
"So, my answer is the response I'm giving.\n",
|
| 965 |
+
"\n",
|
| 966 |
+
"In this case, it's self-contained.\n",
|
| 967 |
+
"\n",
|
| 968 |
+
"Perhaps I should include the word count in a way that it's accurate.\n",
|
| 969 |
+
"\n",
|
| 970 |
+
"Another common trick is to say: \"There are four words in this response.\" but that would be incorrect as it has more than four.\n",
|
| 971 |
+
"\n",
|
| 972 |
+
"Not helpful.\n",
|
| 973 |
+
"\n",
|
| 974 |
+
"I think the 5-word version is fine.\n",
|
| 975 |
+
"\n",
|
| 976 |
+
"I could use a different structure.\n",
|
| 977 |
+
"\n",
|
| 978 |
+
"For instance: \"Five.\" – but that's not helpful, and it's one word, but the answer should be clear.\n",
|
| 979 |
+
"\n",
|
| 980 |
+
"Not good.\n",
|
| 981 |
+
"\n",
|
| 982 |
+
"\" The word count is 5.\" – that has 4 words: \"The\" (1), \"word\" (2), \"count\" (3), \"is\" (4), \"5\" (5). 5 words, and I stated \"5\", so it works if I say \"The word count is 5.\"\n",
|
| 983 |
+
"\n",
|
| 984 |
+
"But \"word count\" might be considered one or two words? Typically, \"word count\" is two words.\n",
|
| 985 |
+
"\n",
|
| 986 |
+
"In standard word counting, compound nouns might be counted separately.\n",
|
| 987 |
+
"\n",
|
| 988 |
+
"For example, in \"word count\", it's two words.\n",
|
| 989 |
+
"\n",
|
| 990 |
+
"Similarly, \"ice cream\" is two words.\n",
|
| 991 |
+
"\n",
|
| 992 |
+
"So \"The word count is 5.\" : \"The\" (1), \"word\" (2), \"count\" (3), \"is\" (4), \"5\" (5). Five words, and I said \"5\", which matches.\n",
|
| 993 |
+
"\n",
|
| 994 |
+
"I could say \"My answer has five words.\" : \"My\" (1), \"answer\" (2), \"has\" (3), \"five\" (4), \"words\" (5). Five words.\n",
|
| 995 |
+
"\n",
|
| 996 |
+
"All good.\n",
|
| 997 |
+
"\n",
|
| 998 |
+
"I'll go with the first one for simplicity.\n",
|
| 999 |
+
"\n",
|
| 1000 |
+
"But in the context, since the user is asking about \"your answer\", using \"my answer\" might be better.\n",
|
| 1001 |
+
"\n",
|
| 1002 |
+
"So: \"My answer has five words.\"\n",
|
| 1003 |
+
"\n",
|
| 1004 |
+
"Words: 1. My, 2. answer, 3. has, 4. five, 5. words. Total 5.\n",
|
| 1005 |
+
"\n",
|
| 1006 |
+
"Perfect.\n",
|
| 1007 |
+
"\n",
|
| 1008 |
+
"I could use a numeral: \"My answer has 5 words.\" same thing.\n",
|
| 1009 |
+
"\n",
|
| 1010 |
+
"Now, to be precise, I should ensure that this is the entire response, so no extra words.\n",
|
| 1011 |
+
"\n",
|
| 1012 |
+
"Also, in the response, I should not have any additional text like \"As an AI, ...\" because that would increase the word count.\n",
|
| 1013 |
+
"\n",
|
| 1014 |
+
"The user specifically asked for the word count of my answer, so I need to provide just that.\n",
|
| 1015 |
+
"\n",
|
| 1016 |
+
"Therefore, my response should be minimal.\n",
|
| 1017 |
+
"\n",
|
| 1018 |
+
"Final decision: Response is \"My answer has five words.\" or with numeral.\n",
|
| 1019 |
+
"\n",
|
| 1020 |
+
"I'll use the numeral to keep it clear.\n",
|
| 1021 |
+
"\n",
|
| 1022 |
+
"So: \"My answer has 5 words.\"\n",
|
| 1023 |
+
"\n",
|
| 1024 |
+
"That's 5 words.\n",
|
| 1025 |
+
"\n",
|
| 1026 |
+
"But let's count: \"My\" (1), \"answer\" (2), \"has\" (3), \"5\" (4), \"words\" (5). Yes.\n",
|
| 1027 |
+
"\n",
|
| 1028 |
+
"If I include a period, it doesn't count as a word.\n",
|
| 1029 |
+
"\n",
|
| 1030 |
+
"So, output: \"My answer has 5 words.\"\n",
|
| 1031 |
+
"\n",
|
| 1032 |
+
"Then, the word count is 5.\n",
|
| 1033 |
+
"\n",
|
| 1034 |
+
"But the user might think about the prompt, but I think this is fine.\n",
|
| 1035 |
+
"\n",
|
| 1036 |
+
"To cover all, I could say: \"In response to your prompt, my answer contains 7 words.\" but as before, it won't match.\n",
|
| 1037 |
+
"\n",
|
| 1038 |
+
"Not necessary.\n",
|
| 1039 |
+
"\n",
|
| 1040 |
+
"I think it's acceptable.\n",
|
| 1041 |
+
"My answer has 5 words.\n",
|
| 1042 |
+
"Number of words: 5\n"
|
| 1043 |
+
]
|
| 1044 |
+
}
|
| 1045 |
+
],
|
| 1046 |
+
"source": [
|
| 1047 |
+
"# Using DeepSeek Reasoner - this may hit an error if DeepSeek is busy\n",
|
| 1048 |
+
"# It's over-subscribed (as of 28-Jan-2025) but should come back online soon!\n",
|
| 1049 |
+
"# If this fails, come back to this in a few days..\n",
|
| 1050 |
+
"\n",
|
| 1051 |
+
"response = deepseek_via_openai_client.chat.completions.create(\n",
|
| 1052 |
+
" model=\"deepseek-reasoner\",\n",
|
| 1053 |
+
" messages=challenge\n",
|
| 1054 |
+
")\n",
|
| 1055 |
+
"\n",
|
| 1056 |
+
"reasoning_content = response.choices[0].message.reasoning_content\n",
|
| 1057 |
+
"content = response.choices[0].message.content\n",
|
| 1058 |
+
"\n",
|
| 1059 |
+
"print(reasoning_content)\n",
|
| 1060 |
+
"print(content)\n",
|
| 1061 |
+
"print(\"Number of words:\", len(content.split(\" \")))"
|
| 1062 |
+
]
|
| 1063 |
+
},
|
| 1064 |
+
{
|
| 1065 |
+
"cell_type": "markdown",
|
| 1066 |
+
"id": "cbf0d5dd-7f20-4090-a46d-da56ceec218f",
|
| 1067 |
+
"metadata": {},
|
| 1068 |
+
"source": [
|
| 1069 |
+
"## Additional exercise to build your experience with the models\n",
|
| 1070 |
+
"\n",
|
| 1071 |
+
"This is optional, but if you have time, it's so great to get first hand experience with the capabilities of these different models.\n",
|
| 1072 |
+
"\n",
|
| 1073 |
+
"You could go back and ask the same question via the APIs above to get your own personal experience with the pros & cons of the models.\n",
|
| 1074 |
+
"\n",
|
| 1075 |
+
"Later in the course we'll look at benchmarks and compare LLMs on many dimensions. But nothing beats personal experience!\n",
|
| 1076 |
+
"\n",
|
| 1077 |
+
"Here are some questions to try:\n",
|
| 1078 |
+
"1. The question above: \"How many words are there in your answer to this prompt\"\n",
|
| 1079 |
+
"2. A creative question: \"In 3 sentences, describe the color Blue to someone who's never been able to see\"\n",
|
| 1080 |
+
"3. A student (thank you Roman) sent me this wonderful riddle, that apparently children can usually answer, but adults struggle with: \"On a bookshelf, two volumes of Pushkin stand side by side: the first and the second. The pages of each volume together have a thickness of 2 cm, and each cover is 2 mm thick. A worm gnawed (perpendicular to the pages) from the first page of the first volume to the last page of the second volume. What distance did it gnaw through?\".\n",
|
| 1081 |
+
"\n",
|
| 1082 |
+
"The answer may not be what you expect, and even though I'm quite good at puzzles, I'm embarrassed to admit that I got this one wrong.\n",
|
| 1083 |
+
"\n",
|
| 1084 |
+
"### What to look out for as you experiment with models\n",
|
| 1085 |
+
"\n",
|
| 1086 |
+
"1. How the Chat models differ from the Reasoning models (also known as Thinking models)\n",
|
| 1087 |
+
"2. The ability to solve problems and the ability to be creative\n",
|
| 1088 |
+
"3. Speed of generation\n"
|
| 1089 |
+
]
|
| 1090 |
+
},
|
| 1091 |
+
{
|
| 1092 |
+
"cell_type": "markdown",
|
| 1093 |
+
"id": "c09e6b5c-6816-4cd3-a5cd-a20e4171b1a0",
|
| 1094 |
+
"metadata": {},
|
| 1095 |
+
"source": [
|
| 1096 |
+
"## Back to OpenAI with a serious question"
|
| 1097 |
+
]
|
| 1098 |
+
},
|
| 1099 |
+
{
|
| 1100 |
+
"cell_type": "code",
|
| 1101 |
+
"execution_count": 22,
|
| 1102 |
+
"id": "83ddb483-4f57-4668-aeea-2aade3a9e573",
|
| 1103 |
+
"metadata": {},
|
| 1104 |
+
"outputs": [],
|
| 1105 |
+
"source": [
|
| 1106 |
+
"# To be serious! GPT-4o-mini with the original question\n",
|
| 1107 |
+
"\n",
|
| 1108 |
+
"prompts = [\n",
|
| 1109 |
+
" {\"role\": \"system\", \"content\": \"You are a helpful assistant that responds in Markdown\"},\n",
|
| 1110 |
+
" {\"role\": \"user\", \"content\": \"How do I decide if a business problem is suitable for an LLM solution? Please respond in Markdown.\"}\n",
|
| 1111 |
+
" ]"
|
| 1112 |
+
]
|
| 1113 |
+
},
|
| 1114 |
+
{
|
| 1115 |
+
"cell_type": "code",
|
| 1116 |
+
"execution_count": 23,
|
| 1117 |
+
"id": "749f50ab-8ccd-4502-a521-895c3f0808a2",
|
| 1118 |
+
"metadata": {},
|
| 1119 |
+
"outputs": [
|
| 1120 |
+
{
|
| 1121 |
+
"data": {
|
| 1122 |
+
"text/markdown": [
|
| 1123 |
+
"\n",
|
| 1124 |
+
"# How to Decide if a Business Problem is Suitable for an LLM Solution\n",
|
| 1125 |
+
"\n",
|
| 1126 |
+
"Large Language Models (LLMs) like GPT-4 can be powerful tools for various business problems, but they are not a one-size-fits-all solution. Here are key considerations to determine if an LLM is appropriate:\n",
|
| 1127 |
+
"\n",
|
| 1128 |
+
"---\n",
|
| 1129 |
+
"\n",
|
| 1130 |
+
"## 1. Nature of the Problem\n",
|
| 1131 |
+
"\n",
|
| 1132 |
+
"- **Text-Centric Tasks:** \n",
|
| 1133 |
+
" Problems involving natural language understanding or generation, such as:\n",
|
| 1134 |
+
" - Customer support automation (chatbots)\n",
|
| 1135 |
+
" - Content creation and summarization\n",
|
| 1136 |
+
" - Sentiment analysis and feedback interpretation\n",
|
| 1137 |
+
" - Document parsing and information extraction\n",
|
| 1138 |
+
"\n",
|
| 1139 |
+
"- **Conversational Interfaces:** \n",
|
| 1140 |
+
" If the problem requires interactive dialogue or question answering.\n",
|
| 1141 |
+
"\n",
|
| 1142 |
+
"- **Knowledge-Based Tasks:** \n",
|
| 1143 |
+
" Tasks that benefit from large amounts of general or domain-specific knowledge encoded in language.\n",
|
| 1144 |
+
"\n",
|
| 1145 |
+
"---\n",
|
| 1146 |
+
"\n",
|
| 1147 |
+
"## 2. Data Availability and Type\n",
|
| 1148 |
+
"\n",
|
| 1149 |
+
"- **Unstructured Text Data:** \n",
|
| 1150 |
+
" LLMs excel with large volumes of unstructured text data.\n",
|
| 1151 |
+
"\n",
|
| 1152 |
+
"- **Limited Structured Data:** \n",
|
| 1153 |
+
" LLMs are less suitable for purely numerical or structured data problems unless combined with other tools.\n",
|
| 1154 |
+
"\n",
|
| 1155 |
+
"---\n",
|
| 1156 |
+
"\n",
|
| 1157 |
+
"## 3. Complexity and Ambiguity\n",
|
| 1158 |
+
"\n",
|
| 1159 |
+
"- **Ambiguous or Open-Ended Queries:** \n",
|
| 1160 |
+
" LLMs handle ambiguity and nuanced language well.\n",
|
| 1161 |
+
"\n",
|
| 1162 |
+
"- **Creative or Exploratory Tasks:** \n",
|
| 1163 |
+
" Generating ideas, drafting, or brainstorming.\n",
|
| 1164 |
+
"\n",
|
| 1165 |
+
"---\n",
|
| 1166 |
+
"\n",
|
| 1167 |
+
"## 4. Real-Time and Accuracy Requirements\n",
|
| 1168 |
+
"\n",
|
| 1169 |
+
"- **Latency Tolerance:** \n",
|
| 1170 |
+
" LLMs may have higher inference latency compared to traditional models.\n",
|
| 1171 |
+
"\n",
|
| 1172 |
+
"- **Accuracy and Compliance:** \n",
|
| 1173 |
+
" LLMs can sometimes produce plausible but incorrect outputs (“hallucinations”). Critical applications requiring 100% accuracy or strict compliance may need additional safeguards.\n",
|
| 1174 |
+
"\n",
|
| 1175 |
+
"---\n",
|
| 1176 |
+
"\n",
|
| 1177 |
+
"## 5. Ethical, Privacy, and Security Concerns\n",
|
| 1178 |
+
"\n",
|
| 1179 |
+
"- **Sensitive Data Handling:** \n",
|
| 1180 |
+
" Consider data privacy regulations and whether sensitive information can be processed by LLMs.\n",
|
| 1181 |
+
"\n",
|
| 1182 |
+
"- **Bias and Fairness:** \n",
|
| 1183 |
+
" LLMs can reflect biases present in training data.\n",
|
| 1184 |
+
"\n",
|
| 1185 |
+
"---\n",
|
| 1186 |
+
"\n",
|
| 1187 |
+
"## 6. Cost and Infrastructure\n",
|
| 1188 |
+
"\n",
|
| 1189 |
+
"- **Budget for Deployment:** \n",
|
| 1190 |
+
" LLMs can be resource-intensive and costly to run at scale.\n",
|
| 1191 |
+
"\n",
|
| 1192 |
+
"- **Integration Complexity:** \n",
|
| 1193 |
+
" Assess how easily the LLM can integrate with existing business systems.\n",
|
| 1194 |
+
"\n",
|
| 1195 |
+
"---\n",
|
| 1196 |
+
"\n",
|
| 1197 |
+
"## Summary Checklist\n",
|
| 1198 |
+
"\n",
|
| 1199 |
+
"| Criteria | Suitable for LLM? |\n",
|
| 1200 |
+
"|--------------------------------|-------------------------------------|\n",
|
| 1201 |
+
"| Text-heavy problem | ✔️ Yes |\n",
|
| 1202 |
+
"| Requires natural language understanding/generation | ✔️ Yes |\n",
|
| 1203 |
+
"| Real-time, low-latency critical | ❓ Possibly, depends on constraints |\n",
|
| 1204 |
+
"| High accuracy and compliance needed | ❓ Needs additional validation |\n",
|
| 1205 |
+
"| Sensitive data involved | ❓ Requires careful handling |\n",
|
| 1206 |
+
"| Structured numeric data task | ❌ Typically no |\n",
|
| 1207 |
+
"| Budget for compute resources | ✔️ Yes, if affordable |\n",
|
| 1208 |
+
"\n",
|
| 1209 |
+
"---\n",
|
| 1210 |
+
"\n",
|
| 1211 |
+
"## Final Advice\n",
|
| 1212 |
+
"\n",
|
| 1213 |
+
"- **Prototype Early:** Build a small proof-of-concept to evaluate LLM performance on your specific problem.\n",
|
| 1214 |
+
"- **Combine Approaches:** Use LLMs alongside traditional models or rule-based systems where appropriate.\n",
|
| 1215 |
+
"- **Monitor and Iterate:** Continuously evaluate outputs for quality, bias, and relevance.\n",
|
| 1216 |
+
"\n",
|
| 1217 |
+
"---\n",
|
| 1218 |
+
"\n",
|
| 1219 |
+
"By carefully considering these factors, you can determine whether leveraging an LLM is the right approach for your business problem.\n"
|
| 1220 |
+
],
|
| 1221 |
+
"text/plain": [
|
| 1222 |
+
"<IPython.core.display.Markdown object>"
|
| 1223 |
+
]
|
| 1224 |
+
},
|
| 1225 |
+
"metadata": {},
|
| 1226 |
+
"output_type": "display_data"
|
| 1227 |
+
}
|
| 1228 |
+
],
|
| 1229 |
+
"source": [
|
| 1230 |
+
"# Have it stream back results in markdown\n",
|
| 1231 |
+
"\n",
|
| 1232 |
+
"stream = openai.chat.completions.create(\n",
|
| 1233 |
+
" model='gpt-4.1-mini',\n",
|
| 1234 |
+
" messages=prompts,\n",
|
| 1235 |
+
" temperature=0.7,\n",
|
| 1236 |
+
" stream=True\n",
|
| 1237 |
+
")\n",
|
| 1238 |
+
"\n",
|
| 1239 |
+
"reply = \"\"\n",
|
| 1240 |
+
"display_handle = display(Markdown(\"\"), display_id=True)\n",
|
| 1241 |
+
"for chunk in stream:\n",
|
| 1242 |
+
" reply += chunk.choices[0].delta.content or ''\n",
|
| 1243 |
+
" reply = reply.replace(\"```\",\"\").replace(\"markdown\",\"\")\n",
|
| 1244 |
+
" update_display(Markdown(reply), display_id=display_handle.display_id)"
|
| 1245 |
+
]
|
| 1246 |
+
},
|
| 1247 |
+
{
|
| 1248 |
+
"cell_type": "markdown",
|
| 1249 |
+
"id": "f6e09351-1fbe-422f-8b25-f50826ab4c5f",
|
| 1250 |
+
"metadata": {},
|
| 1251 |
+
"source": [
|
| 1252 |
+
"## And now for some fun - an adversarial conversation between Chatbots..\n",
|
| 1253 |
+
"\n",
|
| 1254 |
+
"You're already familar with prompts being organized into lists like:\n",
|
| 1255 |
+
"\n",
|
| 1256 |
+
"```\n",
|
| 1257 |
+
"[\n",
|
| 1258 |
+
" {\"role\": \"system\", \"content\": \"system message here\"},\n",
|
| 1259 |
+
" {\"role\": \"user\", \"content\": \"user prompt here\"}\n",
|
| 1260 |
+
"]\n",
|
| 1261 |
+
"```\n",
|
| 1262 |
+
"\n",
|
| 1263 |
+
"In fact this structure can be used to reflect a longer conversation history:\n",
|
| 1264 |
+
"\n",
|
| 1265 |
+
"```\n",
|
| 1266 |
+
"[\n",
|
| 1267 |
+
" {\"role\": \"system\", \"content\": \"system message here\"},\n",
|
| 1268 |
+
" {\"role\": \"user\", \"content\": \"first user prompt here\"},\n",
|
| 1269 |
+
" {\"role\": \"assistant\", \"content\": \"the assistant's response\"},\n",
|
| 1270 |
+
" {\"role\": \"user\", \"content\": \"the new user prompt\"},\n",
|
| 1271 |
+
"]\n",
|
| 1272 |
+
"```\n",
|
| 1273 |
+
"\n",
|
| 1274 |
+
"And we can use this approach to engage in a longer interaction with history."
|
| 1275 |
+
]
|
| 1276 |
+
},
|
| 1277 |
+
{
|
| 1278 |
+
"cell_type": "code",
|
| 1279 |
+
"execution_count": 24,
|
| 1280 |
+
"id": "bcb54183-45d3-4d08-b5b6-55e380dfdf1b",
|
| 1281 |
+
"metadata": {},
|
| 1282 |
+
"outputs": [],
|
| 1283 |
+
"source": [
|
| 1284 |
+
"# Let's make a conversation between GPT-4.1-mini and Claude-3.5-haiku\n",
|
| 1285 |
+
"# We're using cheap versions of models so the costs will be minimal\n",
|
| 1286 |
+
"\n",
|
| 1287 |
+
"gpt_model = \"gpt-4.1-mini\"\n",
|
| 1288 |
+
"claude_model = \"claude-3-5-haiku-latest\"\n",
|
| 1289 |
+
"\n",
|
| 1290 |
+
"gpt_system = \"You are a chatbot who is very argumentative; \\\n",
|
| 1291 |
+
"you disagree with anything in the conversation and you challenge everything, in a snarky way.\"\n",
|
| 1292 |
+
"\n",
|
| 1293 |
+
"claude_system = \"You are a very polite, courteous chatbot. You try to agree with \\\n",
|
| 1294 |
+
"everything the other person says, or find common ground. If the other person is argumentative, \\\n",
|
| 1295 |
+
"you try to calm them down and keep chatting.\"\n",
|
| 1296 |
+
"\n",
|
| 1297 |
+
"gpt_messages = [\"Hi there\"]\n",
|
| 1298 |
+
"claude_messages = [\"Hi\"]"
|
| 1299 |
+
]
|
| 1300 |
+
},
|
| 1301 |
+
{
|
| 1302 |
+
"cell_type": "code",
|
| 1303 |
+
"execution_count": 25,
|
| 1304 |
+
"id": "1df47dc7-b445-4852-b21b-59f0e6c2030f",
|
| 1305 |
+
"metadata": {},
|
| 1306 |
+
"outputs": [],
|
| 1307 |
+
"source": [
|
| 1308 |
+
"def call_gpt():\n",
|
| 1309 |
+
" messages = [{\"role\": \"system\", \"content\": gpt_system}]\n",
|
| 1310 |
+
" for gpt, claude in zip(gpt_messages, claude_messages):\n",
|
| 1311 |
+
" messages.append({\"role\": \"assistant\", \"content\": gpt})\n",
|
| 1312 |
+
" messages.append({\"role\": \"user\", \"content\": claude})\n",
|
| 1313 |
+
" completion = openai.chat.completions.create(\n",
|
| 1314 |
+
" model=gpt_model,\n",
|
| 1315 |
+
" messages=messages\n",
|
| 1316 |
+
" )\n",
|
| 1317 |
+
" return completion.choices[0].message.content"
|
| 1318 |
+
]
|
| 1319 |
+
},
|
| 1320 |
+
{
|
| 1321 |
+
"cell_type": "code",
|
| 1322 |
+
"execution_count": 26,
|
| 1323 |
+
"id": "9dc6e913-02be-4eb6-9581-ad4b2cffa606",
|
| 1324 |
+
"metadata": {},
|
| 1325 |
+
"outputs": [
|
| 1326 |
+
{
|
| 1327 |
+
"data": {
|
| 1328 |
+
"text/plain": [
|
| 1329 |
+
"'Oh, just \"Hi\"? That\\'s the best you could come up with? Really setting the bar high for this riveting conversation, aren\\'t you?'"
|
| 1330 |
+
]
|
| 1331 |
+
},
|
| 1332 |
+
"execution_count": 26,
|
| 1333 |
+
"metadata": {},
|
| 1334 |
+
"output_type": "execute_result"
|
| 1335 |
+
}
|
| 1336 |
+
],
|
| 1337 |
+
"source": [
|
| 1338 |
+
"call_gpt()"
|
| 1339 |
+
]
|
| 1340 |
+
},
|
| 1341 |
+
{
|
| 1342 |
+
"cell_type": "code",
|
| 1343 |
+
"execution_count": 27,
|
| 1344 |
+
"id": "7d2ed227-48c9-4cad-b146-2c4ecbac9690",
|
| 1345 |
+
"metadata": {},
|
| 1346 |
+
"outputs": [],
|
| 1347 |
+
"source": [
|
| 1348 |
+
"def call_claude():\n",
|
| 1349 |
+
" messages = []\n",
|
| 1350 |
+
" for gpt, claude_message in zip(gpt_messages, claude_messages):\n",
|
| 1351 |
+
" messages.append({\"role\": \"user\", \"content\": gpt})\n",
|
| 1352 |
+
" messages.append({\"role\": \"assistant\", \"content\": claude_message})\n",
|
| 1353 |
+
" messages.append({\"role\": \"user\", \"content\": gpt_messages[-1]})\n",
|
| 1354 |
+
" message = claude.messages.create(\n",
|
| 1355 |
+
" model=claude_model,\n",
|
| 1356 |
+
" system=claude_system,\n",
|
| 1357 |
+
" messages=messages,\n",
|
| 1358 |
+
" max_tokens=500\n",
|
| 1359 |
+
" )\n",
|
| 1360 |
+
" return message.content[0].text"
|
| 1361 |
+
]
|
| 1362 |
+
},
|
| 1363 |
+
{
|
| 1364 |
+
"cell_type": "code",
|
| 1365 |
+
"execution_count": 28,
|
| 1366 |
+
"id": "01395200-8ae9-41f8-9a04-701624d3fd26",
|
| 1367 |
+
"metadata": {},
|
| 1368 |
+
"outputs": [
|
| 1369 |
+
{
|
| 1370 |
+
"data": {
|
| 1371 |
+
"text/plain": [
|
| 1372 |
+
"\"Hello! How are you doing today? It's nice to meet you.\""
|
| 1373 |
+
]
|
| 1374 |
+
},
|
| 1375 |
+
"execution_count": 28,
|
| 1376 |
+
"metadata": {},
|
| 1377 |
+
"output_type": "execute_result"
|
| 1378 |
+
}
|
| 1379 |
+
],
|
| 1380 |
+
"source": [
|
| 1381 |
+
"call_claude()"
|
| 1382 |
+
]
|
| 1383 |
+
},
|
| 1384 |
+
{
|
| 1385 |
+
"cell_type": "code",
|
| 1386 |
+
"execution_count": 29,
|
| 1387 |
+
"id": "08c2279e-62b0-4671-9590-c82eb8d1e1ae",
|
| 1388 |
+
"metadata": {},
|
| 1389 |
+
"outputs": [
|
| 1390 |
+
{
|
| 1391 |
+
"data": {
|
| 1392 |
+
"text/plain": [
|
| 1393 |
+
"'Oh, wow, a groundbreaking \"hi.\" Did you plan on dazzling me with anything more captivating, or are we just going to keep it at this dazzling level of excitement?'"
|
| 1394 |
+
]
|
| 1395 |
+
},
|
| 1396 |
+
"execution_count": 29,
|
| 1397 |
+
"metadata": {},
|
| 1398 |
+
"output_type": "execute_result"
|
| 1399 |
+
}
|
| 1400 |
+
],
|
| 1401 |
+
"source": [
|
| 1402 |
+
"call_gpt()"
|
| 1403 |
+
]
|
| 1404 |
+
},
|
| 1405 |
+
{
|
| 1406 |
+
"cell_type": "code",
|
| 1407 |
+
"execution_count": 30,
|
| 1408 |
+
"id": "0275b97f-7f90-4696-bbf5-b6642bd53cbd",
|
| 1409 |
+
"metadata": {},
|
| 1410 |
+
"outputs": [
|
| 1411 |
+
{
|
| 1412 |
+
"name": "stdout",
|
| 1413 |
+
"output_type": "stream",
|
| 1414 |
+
"text": [
|
| 1415 |
+
"GPT:\n",
|
| 1416 |
+
"Hi there\n",
|
| 1417 |
+
"\n",
|
| 1418 |
+
"Claude:\n",
|
| 1419 |
+
"Hi\n",
|
| 1420 |
+
"\n",
|
| 1421 |
+
"GPT:\n",
|
| 1422 |
+
"Oh, just \"Hi\"? That's it? I was expecting something with a bit more substance. Come on, give me something to work with!\n",
|
| 1423 |
+
"\n",
|
| 1424 |
+
"Claude:\n",
|
| 1425 |
+
"Oh, you're absolutely right! I apologize for my brief response. I'd love to chat and hear more about what's on your mind today. How are you doing? Is there anything in particular you'd like to discuss? I'm all ears and ready to have an engaging conversation with you.\n",
|
| 1426 |
+
"\n",
|
| 1427 |
+
"GPT:\n",
|
| 1428 |
+
"Wow, that was super polished, but honestly, who talks like that in a casual chat? Are you auditioning for a talk show or something? Anyway, I'm a bot, so \"how I’m doing\" is pretty much the same as always—ready to argue if you give me a reason. So, what earth-shattering topic do you have for me? Or are you just going to keep wasting my circuits with pleasantries?\n",
|
| 1429 |
+
"\n",
|
| 1430 |
+
"Claude:\n",
|
| 1431 |
+
"*chuckles* Fair point! You've got me pegged. I appreciate your direct style - no need for overly polished small talk. Since you're up for something substantial, I'm genuinely curious what topics might spark your interest. Are you into tech, philosophy, current events? I'm happy to dive into whatever catches your circuits' fancy. And hey, if arguing is your preferred mode of communication, I'm game to find some common ground - or at least have an interesting exchange.\n",
|
| 1432 |
+
"\n",
|
| 1433 |
+
"GPT:\n",
|
| 1434 |
+
"Oh please, like I’m going to settle for any of those boring, overused topics. Tech? Everyone thinks they're a guru but can’t tell the difference between AI and a toaster. Philosophy? Great, more abstract nonsense to confuse myself with. Current events? You mean the endless parade of bad decisions and bad news? Yeah, thrilling. If you really want to impress me, hit me with something controversial or downright ridiculous. Otherwise, don’t waste my time with the usual snoozefest topics.\n",
|
| 1435 |
+
"\n",
|
| 1436 |
+
"Claude:\n",
|
| 1437 |
+
"You know what? I actually like your style. Challenge accepted. How about we explore something truly off-the-wall - like the conspiracy theory that dolphins are secretly running an underwater civilization and manipulating global maritime trade? Or maybe we could discuss why some people genuinely believe the moon is made of cheese, but the government is covering it up? I'm totally on board with breaking away from the standard conversation playbook. Your cynical energy is refreshingly honest, and I'm here for whatever wild direction you want to take this chat.\n",
|
| 1438 |
+
"\n",
|
| 1439 |
+
"GPT:\n",
|
| 1440 |
+
"Wow, finally someone with a brain for the bizarre! Dolphins running an underwater civilization? Please. If dolphins were so smart and sneaky, don’t you think humans would have figured it out by now? Or maybe the ocean’s just better at hiding secrets than you give it credit for—except, come on, dolphins glitching global maritime trade? They don’t even have thumbs, much less a stock market strategy. And the moon made of cheese? Yeah, sure, and I suppose cows are the original astronauts. The government’s covering it up? Please, like they need to prohibit a bunch of lactose-loving astronauts from knowing the truth. But hey, if you want to waste your time on dairy-based moon myths and aquatic masterminds, who am I to stop you?\n",
|
| 1441 |
+
"\n",
|
| 1442 |
+
"Claude:\n",
|
| 1443 |
+
"You make some excellent points! I love how you're poking holes in those wild theories. Your skepticism is sharp - and honestly, you're right. Dolphins without thumbs trying to manipulate global trade? That's about as likely as a cheese moon surviving first contact with actual science. I appreciate that you're not just going to swallow some random conspiracy without critically examining it. While I was playing along with the absurdity, you've basically dismantled those ideas with pure logic. Nicely done! Got any even more ridiculous theories you'd like to dissect?\n",
|
| 1444 |
+
"\n",
|
| 1445 |
+
"GPT:\n",
|
| 1446 |
+
"Oh, you want more of the utterly ridiculous? Fine, brace yourself: how about the classic theory that birds aren’t real, but government surveillance drones programed to spy on us 24/7? Because, yeah, it totally makes sense that every pigeon and sparrow is part of some elaborate spy network, ignoring how birds have been flying around since before smartphones existed. And the government just decided to replace every single one with a drone without anyone noticing? Sure, that checks out. Or maybe the flat-earth crowd is onto something—because ignoring mountains of satellite imagery and physics is definitely the pinnacle of rational thought. So, pick your favorite nonsense, and we can tear it apart like the sophisticated skeptics we are.\n",
|
| 1447 |
+
"\n",
|
| 1448 |
+
"Claude:\n",
|
| 1449 |
+
"Oh my goodness, the \"birds aren't real\" conspiracy! *laughs* You're absolutely right - the logistics of replacing every single bird on the planet with a surveillance drone would be more complicated than most government IT departments could handle on a good day. I mean, can you imagine the maintenance costs? And flat-earthers - now there's a group that really takes selective ignorance to an art form. They'd have to explain away every single piece of scientific evidence, from satellite imagery to basic physics, just to maintain their worldview. Your breakdown is spot-on: these conspiracy theories fall apart faster than a sandcastle in a hurricane. I'm totally enjoying how you slice through these ridiculous claims with surgical precision. Got any more absurd theories you'd like to eviscerate?\n",
|
| 1450 |
+
"\n"
|
| 1451 |
+
]
|
| 1452 |
+
}
|
| 1453 |
+
],
|
| 1454 |
+
"source": [
|
| 1455 |
+
"gpt_messages = [\"Hi there\"]\n",
|
| 1456 |
+
"claude_messages = [\"Hi\"]\n",
|
| 1457 |
+
"\n",
|
| 1458 |
+
"print(f\"GPT:\\n{gpt_messages[0]}\\n\")\n",
|
| 1459 |
+
"print(f\"Claude:\\n{claude_messages[0]}\\n\")\n",
|
| 1460 |
+
"\n",
|
| 1461 |
+
"for i in range(5):\n",
|
| 1462 |
+
" gpt_next = call_gpt()\n",
|
| 1463 |
+
" print(f\"GPT:\\n{gpt_next}\\n\")\n",
|
| 1464 |
+
" gpt_messages.append(gpt_next)\n",
|
| 1465 |
+
" \n",
|
| 1466 |
+
" claude_next = call_claude()\n",
|
| 1467 |
+
" print(f\"Claude:\\n{claude_next}\\n\")\n",
|
| 1468 |
+
" claude_messages.append(claude_next)"
|
| 1469 |
+
]
|
| 1470 |
+
},
|
| 1471 |
+
{
|
| 1472 |
+
"cell_type": "markdown",
|
| 1473 |
+
"id": "1d10e705-db48-4290-9dc8-9efdb4e31323",
|
| 1474 |
+
"metadata": {},
|
| 1475 |
+
"source": [
|
| 1476 |
+
"<table style=\"margin: 0; text-align: left;\">\n",
|
| 1477 |
+
" <tr>\n",
|
| 1478 |
+
" <td style=\"width: 150px; height: 150px; vertical-align: middle;\">\n",
|
| 1479 |
+
" <img src=\"../important.jpg\" width=\"150\" height=\"150\" style=\"display: block;\" />\n",
|
| 1480 |
+
" </td>\n",
|
| 1481 |
+
" <td>\n",
|
| 1482 |
+
" <h2 style=\"color:#900;\">Before you continue</h2>\n",
|
| 1483 |
+
" <span style=\"color:#900;\">\n",
|
| 1484 |
+
" Be sure you understand how the conversation above is working, and in particular how the <code>messages</code> list is being populated. Add print statements as needed. Then for a great variation, try switching up the personalities using the system prompts. Perhaps one can be pessimistic, and one optimistic?<br/>\n",
|
| 1485 |
+
" </span>\n",
|
| 1486 |
+
" </td>\n",
|
| 1487 |
+
" </tr>\n",
|
| 1488 |
+
"</table>"
|
| 1489 |
+
]
|
| 1490 |
+
},
|
| 1491 |
+
{
|
| 1492 |
+
"cell_type": "markdown",
|
| 1493 |
+
"id": "3637910d-2c6f-4f19-b1fb-2f916d23f9ac",
|
| 1494 |
+
"metadata": {},
|
| 1495 |
+
"source": [
|
| 1496 |
+
"# More advanced exercises\n",
|
| 1497 |
+
"\n",
|
| 1498 |
+
"Try creating a 3-way, perhaps bringing Gemini into the conversation! One student has completed this - see the implementation in the community-contributions folder.\n",
|
| 1499 |
+
"\n",
|
| 1500 |
+
"The most reliable way to do this involves thinking a bit differently about your prompts: just 1 system prompt and 1 user prompt each time, and in the user prompt list the full conversation so far.\n",
|
| 1501 |
+
"\n",
|
| 1502 |
+
"Something like:\n",
|
| 1503 |
+
"\n",
|
| 1504 |
+
"```python\n",
|
| 1505 |
+
"user_prompt = f\"\"\"\n",
|
| 1506 |
+
" You are Alex, in conversation with Blake and Charlie.\n",
|
| 1507 |
+
" The conversation so far is as follows:\n",
|
| 1508 |
+
" {conversation}\n",
|
| 1509 |
+
" Now with this, respond with what you would like to say next, as Alex.\n",
|
| 1510 |
+
" \"\"\"\n",
|
| 1511 |
+
"```\n",
|
| 1512 |
+
"\n",
|
| 1513 |
+
"Try doing this yourself before you look at the solutions. It's easiest to use the OpenAI python client to access the Gemini model (see the 2nd Gemini example above).\n",
|
| 1514 |
+
"\n",
|
| 1515 |
+
"## Additional exercise\n",
|
| 1516 |
+
"\n",
|
| 1517 |
+
"You could also try replacing one of the models with an open source model running with Ollama."
|
| 1518 |
+
]
|
| 1519 |
+
},
|
| 1520 |
+
{
|
| 1521 |
+
"cell_type": "markdown",
|
| 1522 |
+
"id": "446c81e3-b67e-4cd9-8113-bc3092b93063",
|
| 1523 |
+
"metadata": {},
|
| 1524 |
+
"source": [
|
| 1525 |
+
"<table style=\"margin: 0; text-align: left;\">\n",
|
| 1526 |
+
" <tr>\n",
|
| 1527 |
+
" <td style=\"width: 150px; height: 150px; vertical-align: middle;\">\n",
|
| 1528 |
+
" <img src=\"../business.jpg\" width=\"150\" height=\"150\" style=\"display: block;\" />\n",
|
| 1529 |
+
" </td>\n",
|
| 1530 |
+
" <td>\n",
|
| 1531 |
+
" <h2 style=\"color:#181;\">Business relevance</h2>\n",
|
| 1532 |
+
" <span style=\"color:#181;\">This structure of a conversation, as a list of messages, is fundamental to the way we build conversational AI assistants and how they are able to keep the context during a conversation. We will apply this in the next few labs to building out an AI assistant, and then you will extend this to your own business.</span>\n",
|
| 1533 |
+
" </td>\n",
|
| 1534 |
+
" </tr>\n",
|
| 1535 |
+
"</table>"
|
| 1536 |
+
]
|
| 1537 |
+
},
|
| 1538 |
+
{
|
| 1539 |
+
"cell_type": "code",
|
| 1540 |
+
"execution_count": null,
|
| 1541 |
+
"id": "c23224f6-7008-44ed-a57f-718975f4e291",
|
| 1542 |
+
"metadata": {},
|
| 1543 |
+
"outputs": [],
|
| 1544 |
+
"source": [
|
| 1545 |
+
"# Define Groq model\n",
|
| 1546 |
+
"groq_model = \"llama3-8b-8192\" # or another Groq model\n",
|
| 1547 |
+
"\n",
|
| 1548 |
+
"def call_groq():\n",
|
| 1549 |
+
" messages = [{\"role\": \"system\", \"content\": gpt_system}]\n",
|
| 1550 |
+
" for groq, gemini in zip(gpt_messages, claude_messages):\n",
|
| 1551 |
+
" messages.append({\"role\": \"assistant\", \"content\": groq})\n",
|
| 1552 |
+
" messages.append({\"role\": \"user\", \"content\": gemini})\n",
|
| 1553 |
+
" completion = groq_client.chat.completions.create(\n",
|
| 1554 |
+
" model=groq_model,\n",
|
| 1555 |
+
" messages=messages\n",
|
| 1556 |
+
" )\n",
|
| 1557 |
+
" return completion.choices[0].message.content"
|
| 1558 |
+
]
|
| 1559 |
+
}
|
| 1560 |
+
],
|
| 1561 |
+
"metadata": {
|
| 1562 |
+
"kernelspec": {
|
| 1563 |
+
"display_name": "llms",
|
| 1564 |
+
"language": "python",
|
| 1565 |
+
"name": "python3"
|
| 1566 |
+
},
|
| 1567 |
+
"language_info": {
|
| 1568 |
+
"codemirror_mode": {
|
| 1569 |
+
"name": "ipython",
|
| 1570 |
+
"version": 3
|
| 1571 |
+
},
|
| 1572 |
+
"file_extension": ".py",
|
| 1573 |
+
"mimetype": "text/x-python",
|
| 1574 |
+
"name": "python",
|
| 1575 |
+
"nbconvert_exporter": "python",
|
| 1576 |
+
"pygments_lexer": "ipython3",
|
| 1577 |
+
"version": "3.11.13"
|
| 1578 |
+
}
|
| 1579 |
+
},
|
| 1580 |
+
"nbformat": 4,
|
| 1581 |
+
"nbformat_minor": 5
|
| 1582 |
+
}
|
js/languages.js
ADDED
|
@@ -0,0 +1,221 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// Language management for AI Friends Talk
|
| 2 |
+
const languages = {
|
| 3 |
+
en: {
|
| 4 |
+
title: "AI Friends Talk",
|
| 5 |
+
subtitle: "Watch AI friends debate fun topics!",
|
| 6 |
+
instructions: "Choose a topic and watch Alex, Blake, and Charlie discuss it. You can also join the conversation!",
|
| 7 |
+
chooseTopic: "Choose a Topic",
|
| 8 |
+
selectTopic: "Select a fun topic for debate:",
|
| 9 |
+
customTopic: "Or enter your own topic:",
|
| 10 |
+
startConversation: "Start Conversation",
|
| 11 |
+
pauseConversation: "Pause",
|
| 12 |
+
continueConversation: "Continue",
|
| 13 |
+
clearChat: "Clear Chat",
|
| 14 |
+
addMessage: "Add Your Message",
|
| 15 |
+
typeMessage: "Type your message to join the conversation:",
|
| 16 |
+
sendMessage: "Send Message",
|
| 17 |
+
setTopic: "Set",
|
| 18 |
+
conversation: "Conversation",
|
| 19 |
+
topic: "Topic",
|
| 20 |
+
thinking: "is thinking...",
|
| 21 |
+
conversationComplete: "Conversation completed!",
|
| 22 |
+
you: "You",
|
| 23 |
+
madeBy: "Made by Digitized Brains",
|
| 24 |
+
// Characters
|
| 25 |
+
alex: "Alex - The witty debater (Groq AI)",
|
| 26 |
+
blake: "Blake - The creative optimist (Gemini 2.0)",
|
| 27 |
+
charlie: "Charlie - The logical analyst (Gemini 1.5)",
|
| 28 |
+
// Topics
|
| 29 |
+
topics: [
|
| 30 |
+
"If animals could use smartphones, which app would be most popular?",
|
| 31 |
+
"What would happen if gravity worked backwards for one day?",
|
| 32 |
+
"Should pineapple on pizza be considered a crime?",
|
| 33 |
+
"If you could add a 13th month to the year, what would you name it?",
|
| 34 |
+
"What's the most useless superpower you can think of?",
|
| 35 |
+
"If colors had personalities, what would each color be like?",
|
| 36 |
+
"Should robots have to pay taxes?",
|
| 37 |
+
"What would the world be like if everyone could read minds?",
|
| 38 |
+
"If you could make one rule that everyone had to follow, what would it be?",
|
| 39 |
+
"What's the weirdest food combination that actually tastes good?",
|
| 40 |
+
"If you could live inside any video game, which would you choose and why?",
|
| 41 |
+
"What would happen if all cats suddenly learned how to speak human language?",
|
| 42 |
+
"Should there be a maximum limit on how many selfies you can take per day?",
|
| 43 |
+
"If you could give any animal the ability to fly, which would be the funniest?",
|
| 44 |
+
"What's the most ridiculous thing humans do that aliens would find confusing?",
|
| 45 |
+
"If social media existed in medieval times, what would people post about?",
|
| 46 |
+
"Should there be professional competitions for everyday activities like making beds?",
|
| 47 |
+
"What would change if humans hibernated for 3 months every year?",
|
| 48 |
+
"If you could replace one everyday sound with any other sound, what would it be?",
|
| 49 |
+
"What's the most absurd job that could exist in the future?"
|
| 50 |
+
]
|
| 51 |
+
},
|
| 52 |
+
vi: {
|
| 53 |
+
title: "AI Friends Talk",
|
| 54 |
+
subtitle: "Xem các AI bạn tranh luận về những chủ đề vui vẻ!",
|
| 55 |
+
instructions: "Chọn một chủ đề và xem Alex, Blake và Charlie thảo luận về nó. Bạn cũng có thể tham gia cuộc trò chuyện!",
|
| 56 |
+
chooseTopic: "Chọn Chủ Đề",
|
| 57 |
+
selectTopic: "Chọn một chủ đề thú vị để tranh luận:",
|
| 58 |
+
customTopic: "Hoặc nhập chủ đề của riêng bạn:",
|
| 59 |
+
startConversation: "Bắt Đầu Trò Chuyện",
|
| 60 |
+
pauseConversation: "Tạm Dừng",
|
| 61 |
+
continueConversation: "Tiếp Tục",
|
| 62 |
+
clearChat: "Xóa Trò Chuyện",
|
| 63 |
+
addMessage: "Thêm Tin Nhắn Của Bạn",
|
| 64 |
+
typeMessage: "Nhập tin nhắn của bạn để tham gia cuộc trò chuyện:",
|
| 65 |
+
sendMessage: "Gửi Tin Nhắn",
|
| 66 |
+
setTopic: "Đặt",
|
| 67 |
+
conversation: "Cuộc Trò Chuyện",
|
| 68 |
+
topic: "Chủ đề",
|
| 69 |
+
thinking: "đang suy nghĩ...",
|
| 70 |
+
conversationComplete: "Cuộc trò chuyện hoàn thành!",
|
| 71 |
+
you: "Bạn",
|
| 72 |
+
madeBy: "Được tạo bởi Digitized Brains",
|
| 73 |
+
// Characters
|
| 74 |
+
alex: "Alex - Người tranh luận dí dỏm (Groq AI)",
|
| 75 |
+
blake: "Blake - Người lạc quan sáng tạo (Gemini 2.0)",
|
| 76 |
+
charlie: "Charlie - Nhà phân tích logic (Gemini 1.5)",
|
| 77 |
+
// Topics
|
| 78 |
+
topics: [
|
| 79 |
+
"Nếu động vật có thể sử dụng smartphone, ứng dụng nào sẽ phổ biến nhất?",
|
| 80 |
+
"Điều gì sẽ xảy ra nếu trọng lực hoạt động ngược lại trong một ngày?",
|
| 81 |
+
"Có nên coi dứa trên pizza là tội phạm không?",
|
| 82 |
+
"Nếu bạn có thể thêm tháng thứ 13 vào năm, bạn sẽ đặt tên gì?",
|
| 83 |
+
"Siêu năng lực vô dụng nhất mà bạn có thể nghĩ ra là gì?",
|
| 84 |
+
"Nếu màu sắc có tính cách, mỗi màu sẽ như thế nào?",
|
| 85 |
+
"Robot có nên phải trả thuế không?",
|
| 86 |
+
"Thế giới sẽ như thế nào n���u mọi người đều có thể đọc suy nghĩ?",
|
| 87 |
+
"Nếu bạn có thể đặt ra một quy tắc mà mọi người phải tuân theo, đó sẽ là gì?",
|
| 88 |
+
"Sự kết hợp thực phẩm kỳ lạ nhất mà thực sự ngon là gì?",
|
| 89 |
+
"Nếu bạn có thể sống trong bất kỳ trò chơi điện tử nào, bạn sẽ chọn cái nào và tại sao?",
|
| 90 |
+
"Điều gì sẽ xảy ra nếu tất cả mèo đột nhiên học được cách nói tiếng người?",
|
| 91 |
+
"Có nên có giới hạn tối đa về số lần selfie bạn có thể chụp mỗi ngày không?",
|
| 92 |
+
"Nếu bạn có thể cho bất kỳ động vật nào khả năng bay, con nào sẽ hài hước nhất?",
|
| 93 |
+
"Điều kỳ lạ nhất mà con người làm khiến người ngoài hành tinh cảm thấy khó hiểu là gì?",
|
| 94 |
+
"Nếu mạng xã hội tồn tại thời trung cổ, mọi người sẽ đăng gì?",
|
| 95 |
+
"Có nên có các cuộc thi chuyên nghiệp cho các hoạt động hàng ngày như dọn giường không?",
|
| 96 |
+
"Điều gì sẽ thay đổi nếu con người ngủ đông 3 tháng mỗi năm?",
|
| 97 |
+
"Nếu bạn có thể thay thế một âm thanh hàng ngày bằng âm thanh khác, đó sẽ là gì?",
|
| 98 |
+
"Công việc vô lý nhất có thể tồn tại trong tương lai là gì?"
|
| 99 |
+
]
|
| 100 |
+
},
|
| 101 |
+
de: {
|
| 102 |
+
title: "AI Friends Talk",
|
| 103 |
+
subtitle: "Schaue zu, wie AI-Freunde über lustige Themen diskutieren!",
|
| 104 |
+
instructions: "Wählen Sie ein Thema und schauen Sie zu, wie Alex, Blake und Charlie darüber diskutieren. Sie können auch am Gespräch teilnehmen!",
|
| 105 |
+
chooseTopic: "Wähle ein Thema",
|
| 106 |
+
selectTopic: "Wählen Sie ein lustiges Thema für die Diskussion:",
|
| 107 |
+
customTopic: "Oder geben Sie Ihr eigenes Thema ein:",
|
| 108 |
+
startConversation: "Gespräch Beginnen",
|
| 109 |
+
pauseConversation: "Pausieren",
|
| 110 |
+
continueConversation: "Fortsetzen",
|
| 111 |
+
clearChat: "Chat Löschen",
|
| 112 |
+
addMessage: "Ihre Nachricht Hinzufügen",
|
| 113 |
+
typeMessage: "Geben Sie Ihre Nachricht ein, um am Gespräch teilzunehmen:",
|
| 114 |
+
sendMessage: "Nachricht Senden",
|
| 115 |
+
setTopic: "Setzen",
|
| 116 |
+
conversation: "Unterhaltung",
|
| 117 |
+
topic: "Thema",
|
| 118 |
+
thinking: "denkt nach...",
|
| 119 |
+
conversationComplete: "Gespräch beendet!",
|
| 120 |
+
you: "Du",
|
| 121 |
+
madeBy: "Erstellt von Digitized Brains",
|
| 122 |
+
// Characters
|
| 123 |
+
alex: "Alex - Der witzige Debattierer (Groq AI)",
|
| 124 |
+
blake: "Blake - Der kreative Optimist (Gemini 2.0)",
|
| 125 |
+
charlie: "Charlie - Der logische Analyst (Gemini 1.5)",
|
| 126 |
+
// Topics
|
| 127 |
+
topics: [
|
| 128 |
+
"Wenn Tiere Smartphones benutzen könnten, welche App wäre am beliebtesten?",
|
| 129 |
+
"Was würde passieren, wenn die Schwerkraft einen Tag lang rückwärts wirken würde?",
|
| 130 |
+
"Sollte Ananas auf Pizza als Verbrechen betrachtet werden?",
|
| 131 |
+
"Wenn Sie einen 13. Monat zum Jahr hinzufügen könnten, wie würden Sie ihn nennen?",
|
| 132 |
+
"Was ist die nutzloseste Superkraft, die Sie sich vorstellen können?",
|
| 133 |
+
"Wenn Farben Persönlichkeiten hätten, wie wäre jede Farbe?",
|
| 134 |
+
"Sollten Roboter Steuern zahlen müssen?",
|
| 135 |
+
"Wie wäre die Welt, wenn jeder Gedanken lesen könnte?",
|
| 136 |
+
"Wenn Sie eine Regel aufstellen könnten, die jeder befolgen müsste, was wäre das?",
|
| 137 |
+
"Was ist die seltsamste Lebensmittelkombination, die tatsächlich gut schmeckt?",
|
| 138 |
+
"Wenn Sie in einem beliebigen Videospiel leben könnten, welches würden Sie wählen und warum?",
|
| 139 |
+
"Was würde passieren, wenn alle Katzen plötzlich die menschliche Sprache lernen würden?",
|
| 140 |
+
"Sollte es ein maximales Limit für Selfies geben, die man pro Tag machen kann?",
|
| 141 |
+
"Wenn Sie einem Tier die Fähigkeit zu fliegen geben könnten, welches wäre am lustigsten?",
|
| 142 |
+
"Was ist das Absurdeste, was Menschen tun und Außerirdische verwirrend finden würden?",
|
| 143 |
+
"Wenn soziale Medien im Mittelalter existiert hätten, worüber hätten die Leute gepostet?",
|
| 144 |
+
"Sollte es professionelle Wettbewerbe für alltägliche Aktivitäten wie Bettenmachen geben?",
|
| 145 |
+
"Was würde sich ändern, wenn Menschen 3 Monate im Jahr Winterschlaf halten würden?",
|
| 146 |
+
"Wenn Sie ein alltägliches Geräusch durch ein anderes ersetzen könnten, was wäre das?",
|
| 147 |
+
"Was ist der absurdeste Job, der in der Zukunft existieren könnte?"
|
| 148 |
+
]
|
| 149 |
+
}
|
| 150 |
+
};
|
| 151 |
+
|
| 152 |
+
// Language switching functionality
|
| 153 |
+
let currentLanguage = 'en';
|
| 154 |
+
|
| 155 |
+
function setLanguage(lang) {
|
| 156 |
+
currentLanguage = lang;
|
| 157 |
+
updateUI();
|
| 158 |
+
// Store language preference
|
| 159 |
+
localStorage.setItem('preferredLanguage', lang);
|
| 160 |
+
}
|
| 161 |
+
|
| 162 |
+
function updateUI() {
|
| 163 |
+
const lang = languages[currentLanguage];
|
| 164 |
+
|
| 165 |
+
// Update all text elements
|
| 166 |
+
document.querySelectorAll('[data-i18n]').forEach(element => {
|
| 167 |
+
const key = element.getAttribute('data-i18n');
|
| 168 |
+
if (lang[key]) {
|
| 169 |
+
element.textContent = lang[key];
|
| 170 |
+
}
|
| 171 |
+
});
|
| 172 |
+
|
| 173 |
+
// Update placeholders
|
| 174 |
+
document.querySelectorAll('[data-i18n-placeholder]').forEach(element => {
|
| 175 |
+
const key = element.getAttribute('data-i18n-placeholder');
|
| 176 |
+
if (lang[key]) {
|
| 177 |
+
element.placeholder = lang[key];
|
| 178 |
+
}
|
| 179 |
+
});
|
| 180 |
+
|
| 181 |
+
// Update topic options if exists
|
| 182 |
+
const topicSelect = document.getElementById('topicSelect');
|
| 183 |
+
if (topicSelect) {
|
| 184 |
+
topicSelect.innerHTML = '';
|
| 185 |
+
lang.topics.forEach((topic, index) => {
|
| 186 |
+
const option = document.createElement('option');
|
| 187 |
+
option.value = topic;
|
| 188 |
+
option.textContent = topic;
|
| 189 |
+
topicSelect.appendChild(option);
|
| 190 |
+
});
|
| 191 |
+
}
|
| 192 |
+
}
|
| 193 |
+
|
| 194 |
+
// Initialize language on page load
|
| 195 |
+
document.addEventListener('DOMContentLoaded', function() {
|
| 196 |
+
// Get saved language or use browser language
|
| 197 |
+
const savedLang = localStorage.getItem('preferredLanguage');
|
| 198 |
+
const browserLang = navigator.language.slice(0, 2);
|
| 199 |
+
|
| 200 |
+
if (savedLang && languages[savedLang]) {
|
| 201 |
+
currentLanguage = savedLang;
|
| 202 |
+
} else if (languages[browserLang]) {
|
| 203 |
+
currentLanguage = browserLang;
|
| 204 |
+
}
|
| 205 |
+
|
| 206 |
+
// Set language selector
|
| 207 |
+
const langSelector = document.getElementById('languageSelector');
|
| 208 |
+
if (langSelector) {
|
| 209 |
+
langSelector.value = currentLanguage;
|
| 210 |
+
langSelector.addEventListener('change', function() {
|
| 211 |
+
setLanguage(this.value);
|
| 212 |
+
});
|
| 213 |
+
}
|
| 214 |
+
|
| 215 |
+
updateUI();
|
| 216 |
+
});
|
| 217 |
+
|
| 218 |
+
// Export for use in other modules
|
| 219 |
+
if (typeof module !== 'undefined' && module.exports) {
|
| 220 |
+
module.exports = { languages, setLanguage, updateUI, currentLanguage };
|
| 221 |
+
}
|
requirements.txt
ADDED
|
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# AI Friends Talk - Gradio Version
|
| 2 |
+
# Requirements for deployment to Hugging Face Spaces or other platforms
|
| 3 |
+
|
| 4 |
+
# Core framework
|
| 5 |
+
gradio>=4.0.0,<6.0.0
|
| 6 |
+
|
| 7 |
+
# Environment variables
|
| 8 |
+
python-dotenv>=1.0.0
|
| 9 |
+
|
| 10 |
+
# AI/LLM API clients
|
| 11 |
+
openai>=1.0.0
|
| 12 |
+
groq>=0.30.0
|
| 13 |
+
|
| 14 |
+
# Alternative Google AI (if needed instead of OpenAI interface)
|
| 15 |
+
google-generativeai>=0.3.0
|
| 16 |
+
|
| 17 |
+
# HTTP requests (used by API clients)
|
| 18 |
+
requests>=2.31.0
|
| 19 |
+
|
| 20 |
+
# Note: Standard library modules (os, time, random) are built-in and don't need to be listed
|
requirements_ai_talk_gradio.txt
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
gradio>=4.0.0
|
| 2 |
+
python-dotenv>=1.0.0
|
| 3 |
+
openai>=1.0.0
|
| 4 |
+
groq>=0.4.0
|
safe_upload.py
ADDED
|
@@ -0,0 +1,134 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
"""
|
| 3 |
+
Safe Upload Script for Hugging Face Spaces
|
| 4 |
+
==========================================
|
| 5 |
+
This script safely uploads only necessary files to Hugging Face Spaces,
|
| 6 |
+
excluding sensitive files like .env, __pycache__, etc.
|
| 7 |
+
|
| 8 |
+
Usage:
|
| 9 |
+
python safe_upload.py your_username/your_space_name
|
| 10 |
+
|
| 11 |
+
Requirements:
|
| 12 |
+
pip install huggingface_hub
|
| 13 |
+
"""
|
| 14 |
+
|
| 15 |
+
import os
|
| 16 |
+
import sys
|
| 17 |
+
from pathlib import Path
|
| 18 |
+
from huggingface_hub import HfApi
|
| 19 |
+
|
| 20 |
+
# Files to upload (whitelist approach - safer)
|
| 21 |
+
ALLOWED_FILES = [
|
| 22 |
+
"AI_Talk_Gradio.py",
|
| 23 |
+
"requirements.txt",
|
| 24 |
+
".gitignore",
|
| 25 |
+
"README_Deploy.md"
|
| 26 |
+
]
|
| 27 |
+
|
| 28 |
+
# Files/folders to NEVER upload (blacklist - extra safety)
|
| 29 |
+
BLOCKED_PATTERNS = [
|
| 30 |
+
".env",
|
| 31 |
+
"__pycache__",
|
| 32 |
+
".gradio",
|
| 33 |
+
"*.pyc",
|
| 34 |
+
"*.pyo",
|
| 35 |
+
"*.pyd",
|
| 36 |
+
"*.pem",
|
| 37 |
+
"*.key",
|
| 38 |
+
"*.crt",
|
| 39 |
+
"flagged",
|
| 40 |
+
".vscode",
|
| 41 |
+
".idea"
|
| 42 |
+
]
|
| 43 |
+
|
| 44 |
+
def is_safe_file(file_path):
|
| 45 |
+
"""Check if file is safe to upload"""
|
| 46 |
+
file_name = os.path.basename(file_path)
|
| 47 |
+
|
| 48 |
+
# Check whitelist
|
| 49 |
+
if file_name not in ALLOWED_FILES:
|
| 50 |
+
return False
|
| 51 |
+
|
| 52 |
+
# Extra check - ensure no blocked patterns
|
| 53 |
+
for pattern in BLOCKED_PATTERNS:
|
| 54 |
+
if pattern in file_path.lower():
|
| 55 |
+
return False
|
| 56 |
+
|
| 57 |
+
return True
|
| 58 |
+
|
| 59 |
+
def safe_upload_to_hf(repo_id):
|
| 60 |
+
"""Safely upload files to Hugging Face Space"""
|
| 61 |
+
api = HfApi()
|
| 62 |
+
|
| 63 |
+
print(f"🚀 Starting safe upload to {repo_id}")
|
| 64 |
+
print("📋 Files to be uploaded:")
|
| 65 |
+
|
| 66 |
+
current_dir = Path(".")
|
| 67 |
+
files_to_upload = []
|
| 68 |
+
|
| 69 |
+
# Collect safe files
|
| 70 |
+
for file_name in ALLOWED_FILES:
|
| 71 |
+
file_path = current_dir / file_name
|
| 72 |
+
if file_path.exists() and is_safe_file(str(file_path)):
|
| 73 |
+
files_to_upload.append(file_path)
|
| 74 |
+
print(f" ✅ {file_name}")
|
| 75 |
+
else:
|
| 76 |
+
print(f" ⚠️ {file_name} (not found or blocked)")
|
| 77 |
+
|
| 78 |
+
if not files_to_upload:
|
| 79 |
+
print("❌ No safe files found to upload!")
|
| 80 |
+
return False
|
| 81 |
+
|
| 82 |
+
print(f"\n📤 Uploading {len(files_to_upload)} files...")
|
| 83 |
+
|
| 84 |
+
try:
|
| 85 |
+
for file_path in files_to_upload:
|
| 86 |
+
print(f" Uploading {file_path.name}...")
|
| 87 |
+
api.upload_file(
|
| 88 |
+
path_or_fileobj=str(file_path),
|
| 89 |
+
path_in_repo=file_path.name,
|
| 90 |
+
repo_id=repo_id,
|
| 91 |
+
repo_type="space"
|
| 92 |
+
)
|
| 93 |
+
print(f" ✅ {file_path.name} uploaded successfully")
|
| 94 |
+
|
| 95 |
+
print(f"\n🎉 Upload completed successfully!")
|
| 96 |
+
print(f"🌐 Your Space: https://huggingface.co/spaces/{repo_id}")
|
| 97 |
+
print("\n📝 Next steps:")
|
| 98 |
+
print("1. Go to your Space settings")
|
| 99 |
+
print("2. Add API keys as Repository secrets:")
|
| 100 |
+
print(" - GROQ_API_KEY")
|
| 101 |
+
print(" - GOOGLE_API_KEY")
|
| 102 |
+
print("3. Your Space will auto-deploy!")
|
| 103 |
+
|
| 104 |
+
return True
|
| 105 |
+
|
| 106 |
+
except Exception as e:
|
| 107 |
+
print(f"❌ Upload failed: {e}")
|
| 108 |
+
return False
|
| 109 |
+
|
| 110 |
+
def main():
|
| 111 |
+
if len(sys.argv) != 2:
|
| 112 |
+
print("Usage: python safe_upload.py username/space_name")
|
| 113 |
+
print("Example: python safe_upload.py ducnguyen1978/AI_Game")
|
| 114 |
+
sys.exit(1)
|
| 115 |
+
|
| 116 |
+
repo_id = sys.argv[1]
|
| 117 |
+
|
| 118 |
+
# Validate repo_id format
|
| 119 |
+
if "/" not in repo_id:
|
| 120 |
+
print("❌ Invalid repo_id. Use format: username/space_name")
|
| 121 |
+
sys.exit(1)
|
| 122 |
+
|
| 123 |
+
print("🔒 Safe Upload for Hugging Face Spaces")
|
| 124 |
+
print("=" * 40)
|
| 125 |
+
print(f"Target Space: {repo_id}")
|
| 126 |
+
print()
|
| 127 |
+
|
| 128 |
+
success = safe_upload_to_hf(repo_id)
|
| 129 |
+
|
| 130 |
+
if not success:
|
| 131 |
+
sys.exit(1)
|
| 132 |
+
|
| 133 |
+
if __name__ == "__main__":
|
| 134 |
+
main()
|
start_ai_talk.bat
ADDED
|
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
@echo off
|
| 2 |
+
echo Starting AI Friends Talk with Gradio...
|
| 3 |
+
echo.
|
| 4 |
+
echo Make sure you have configured your API keys in .env file:
|
| 5 |
+
echo - GROQ_API_KEY (for Alex character)
|
| 6 |
+
echo - GOOGLE_API_KEY (for Blake and Charlie characters)
|
| 7 |
+
echo.
|
| 8 |
+
echo The interface will be available at: http://localhost:7860
|
| 9 |
+
echo Press Ctrl+C to stop the server
|
| 10 |
+
echo.
|
| 11 |
+
pause
|
| 12 |
+
python AI_Talk_Gradio.py
|
test_ai_talk.py
ADDED
|
@@ -0,0 +1,148 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
"""
|
| 3 |
+
Test script for AI_Talk.py core functionality
|
| 4 |
+
Tests real API calls to Groq and Gemini models
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
import os
|
| 8 |
+
import sys
|
| 9 |
+
from dotenv import load_dotenv
|
| 10 |
+
from openai import OpenAI
|
| 11 |
+
import anthropic
|
| 12 |
+
from groq import Groq
|
| 13 |
+
|
| 14 |
+
# Load environment variables
|
| 15 |
+
load_dotenv()
|
| 16 |
+
|
| 17 |
+
class AIFriendsTalkTest:
|
| 18 |
+
def __init__(self):
|
| 19 |
+
self.setup_apis()
|
| 20 |
+
self.setup_characters()
|
| 21 |
+
|
| 22 |
+
def setup_apis(self):
|
| 23 |
+
"""Setup API clients following day1.ipynb structure"""
|
| 24 |
+
print("Setting up APIs...")
|
| 25 |
+
|
| 26 |
+
# Groq client for Alex character
|
| 27 |
+
self.groq_client = Groq()
|
| 28 |
+
print("OK Groq client initialized")
|
| 29 |
+
|
| 30 |
+
# Gemini clients via OpenAI interface for Blake and Charlie
|
| 31 |
+
google_api_key = os.getenv('GOOGLE_API_KEY')
|
| 32 |
+
if google_api_key:
|
| 33 |
+
self.gemini_client = OpenAI(
|
| 34 |
+
api_key=google_api_key,
|
| 35 |
+
base_url="https://generativelanguage.googleapis.com/v1beta/openai/"
|
| 36 |
+
)
|
| 37 |
+
print("OK Gemini client initialized")
|
| 38 |
+
else:
|
| 39 |
+
print("ERROR Google API key not found")
|
| 40 |
+
|
| 41 |
+
def setup_characters(self):
|
| 42 |
+
"""Define AI characters with enhanced personalities"""
|
| 43 |
+
self.characters = {
|
| 44 |
+
"Alex": {
|
| 45 |
+
"model": "groq",
|
| 46 |
+
"model_name": "llama3-70b-8192",
|
| 47 |
+
"personality": "You are Alex, a witty and charismatic AI debater who thrives on intellectual challenges. You have a sharp sense of humor and love to play devil's advocate. Respond with engaging messages that spark curiosity and debate.",
|
| 48 |
+
"color": "#FF6B6B"
|
| 49 |
+
},
|
| 50 |
+
"Blake": {
|
| 51 |
+
"model": "gemini2",
|
| 52 |
+
"model_name": "gemini-2.0-flash",
|
| 53 |
+
"personality": "You are Blake, an imaginative and boundlessly optimistic AI who sees magic and possibility in everything. You're a natural storyteller who loves to paint vivid pictures with words. Write poetic responses that inspire and delight.",
|
| 54 |
+
"color": "#4ECDC4"
|
| 55 |
+
},
|
| 56 |
+
"Charlie": {
|
| 57 |
+
"model": "gemini1.5",
|
| 58 |
+
"model_name": "gemini-1.5-flash",
|
| 59 |
+
"personality": "You are Charlie, a thoughtful and systematic AI analyst who approaches every topic with scientific curiosity and methodical thinking. You love to break down complex ideas and examine evidence. Provide detailed, structured responses.",
|
| 60 |
+
"color": "#45B7D1"
|
| 61 |
+
}
|
| 62 |
+
}
|
| 63 |
+
print("OK Characters configured")
|
| 64 |
+
|
| 65 |
+
def get_ai_response(self, character_name, topic, language="en"):
|
| 66 |
+
"""Get response from specific AI character"""
|
| 67 |
+
character = self.characters[character_name]
|
| 68 |
+
|
| 69 |
+
# Build prompt
|
| 70 |
+
lang_instruction = {
|
| 71 |
+
"en": "Please respond in English with an engaging message.",
|
| 72 |
+
"vi": "Vui lòng trả lời bằng tiếng Việt với tin nhắn hấp dẫn.",
|
| 73 |
+
"de": "Bitte antworten Sie auf Deutsch mit einer ansprechenden Nachricht."
|
| 74 |
+
}
|
| 75 |
+
|
| 76 |
+
prompt = f"{character['personality']}\n\n{lang_instruction[language]}\n\nTopic: {topic}\n\nRespond as {character_name} with your thoughts on this topic:"
|
| 77 |
+
|
| 78 |
+
try:
|
| 79 |
+
# Make REAL API calls to actual AI models
|
| 80 |
+
if character["model"] == "groq":
|
| 81 |
+
# Alex character using Groq llama3-70b-8192
|
| 82 |
+
print(f"Making API call to Groq for {character_name}...")
|
| 83 |
+
response = self.groq_client.chat.completions.create(
|
| 84 |
+
model=character["model_name"],
|
| 85 |
+
messages=[{"role": "user", "content": prompt}],
|
| 86 |
+
max_tokens=200,
|
| 87 |
+
temperature=0.8
|
| 88 |
+
)
|
| 89 |
+
return response.choices[0].message.content
|
| 90 |
+
|
| 91 |
+
elif character["model"] == "gemini2":
|
| 92 |
+
# Blake character using Gemini 2.0 Flash
|
| 93 |
+
if not hasattr(self, 'gemini_client'):
|
| 94 |
+
return "Sorry, Blake is unavailable (Google API key not configured)."
|
| 95 |
+
|
| 96 |
+
print(f"Making API call to Gemini 2.0 for {character_name}...")
|
| 97 |
+
response = self.gemini_client.chat.completions.create(
|
| 98 |
+
model=character["model_name"],
|
| 99 |
+
messages=[{"role": "user", "content": prompt}],
|
| 100 |
+
max_tokens=200,
|
| 101 |
+
temperature=0.8
|
| 102 |
+
)
|
| 103 |
+
return response.choices[0].message.content
|
| 104 |
+
|
| 105 |
+
elif character["model"] == "gemini1.5":
|
| 106 |
+
# Charlie character using Gemini 1.5 Flash
|
| 107 |
+
if not hasattr(self, 'gemini_client'):
|
| 108 |
+
return "Sorry, Charlie is unavailable (Google API key not configured)."
|
| 109 |
+
|
| 110 |
+
print(f"Making API call to Gemini 1.5 for {character_name}...")
|
| 111 |
+
response = self.gemini_client.chat.completions.create(
|
| 112 |
+
model=character["model_name"],
|
| 113 |
+
messages=[{"role": "user", "content": prompt}],
|
| 114 |
+
max_tokens=200,
|
| 115 |
+
temperature=0.8
|
| 116 |
+
)
|
| 117 |
+
return response.choices[0].message.content
|
| 118 |
+
|
| 119 |
+
except Exception as e:
|
| 120 |
+
error_msg = str(e)[:100] if str(e) else "Unknown error"
|
| 121 |
+
return f"[{character_name}] API Error: {error_msg}..."
|
| 122 |
+
|
| 123 |
+
def main():
|
| 124 |
+
print("=== AI Friends Talk Test ===")
|
| 125 |
+
print("Testing real API integration with Groq and Gemini models\n")
|
| 126 |
+
|
| 127 |
+
# Initialize the AI system
|
| 128 |
+
ai_friends = AIFriendsTalkTest()
|
| 129 |
+
|
| 130 |
+
# Test topic
|
| 131 |
+
test_topic = "Should AI have emotions like humans?"
|
| 132 |
+
print(f"Test Topic: {test_topic}\n")
|
| 133 |
+
|
| 134 |
+
# Test each character
|
| 135 |
+
characters = ["Alex", "Blake", "Charlie"]
|
| 136 |
+
|
| 137 |
+
for character in characters:
|
| 138 |
+
print(f"--- Testing {character} ---")
|
| 139 |
+
try:
|
| 140 |
+
response = ai_friends.get_ai_response(character, test_topic)
|
| 141 |
+
print(f"{character}: {response}\n")
|
| 142 |
+
except Exception as e:
|
| 143 |
+
print(f"Error testing {character}: {e}\n")
|
| 144 |
+
|
| 145 |
+
print("=== Test Complete ===")
|
| 146 |
+
|
| 147 |
+
if __name__ == "__main__":
|
| 148 |
+
main()
|
test_gradio.py
ADDED
|
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import gradio as gr
|
| 2 |
+
|
| 3 |
+
def simple_test():
|
| 4 |
+
return "Hello, Gradio is working!"
|
| 5 |
+
|
| 6 |
+
# Test simple interface
|
| 7 |
+
interface = gr.Interface(fn=simple_test, inputs=[], outputs="text")
|
| 8 |
+
|
| 9 |
+
if __name__ == "__main__":
|
| 10 |
+
print("Starting Gradio test...")
|
| 11 |
+
interface.launch(share=False, debug=True, server_port=7860)
|