FrankChang-TBLA commited on
Commit
6e0932f
·
1 Parent(s): 92267b3

migrate from app.py to app/main.py

Browse files
app/__init__.py ADDED
File without changes
app/config/__init__.py ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ from .base import get_app_name_and_config, AppName, REQUIRED_ATTRIBUTES, CONFIG_MAPPING
2
+
3
+ __all__ = ['get_app_name_and_config', 'AppName', 'REQUIRED_ATTRIBUTES', 'CONFIG_MAPPING']
app/config/base.py ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Dict, Type, Set
2
+ import importlib
3
+ import os
4
+
5
+ class AppName:
6
+ """Enum for application names."""
7
+ NCSLM_LPD = 'NCSLM_LPD'
8
+ SEL = 'SEL'
9
+ SEL_COACH = 'SEL_COACH'
10
+
11
+ # Define required attributes that must be present in config
12
+ REQUIRED_ATTRIBUTES: Set[str] = {
13
+ 'ASSISTANT_MODEL',
14
+ 'ASSISTANT_NAME',
15
+ 'ASSISTANT_DESCRIPTION',
16
+ 'ASSISTANT_INSTRUCTION',
17
+ 'RESPONSE_FORMAT',
18
+ 'SHOW_CANVAS',
19
+ 'CONVERSATION_STARTER_SAMPLES'
20
+ }
21
+
22
+ # Mapping of app names to their config module paths
23
+ CONFIG_MAPPING: Dict[str, str] = {
24
+ AppName.NCSLM_LPD: 'NCSLM_LPD.assistant_config',
25
+ AppName.SEL: 'SEL.assistant_config',
26
+ AppName.SEL_COACH: 'SEL_COACH.assistant_config',
27
+ 'default': 'assistant_config'
28
+ }
29
+
30
+ def validate_config(config: Type) -> tuple[bool, list[str]]:
31
+ """
32
+ Validate that the config has all required attributes.
33
+
34
+ Args:
35
+ config: The configuration module to validate
36
+
37
+ Returns:
38
+ tuple[bool, list[str]]: (is_valid, list of missing attributes)
39
+ """
40
+ missing_attrs = [attr for attr in REQUIRED_ATTRIBUTES if not hasattr(config, attr)]
41
+ return len(missing_attrs) == 0, missing_attrs
42
+
43
+ def get_app_name_and_config() -> tuple[str, Type]:
44
+ """
45
+ Get the appropriate configuration module based on APP_NAME.
46
+ Validates that the config has all required attributes.
47
+
48
+ Returns:
49
+ Type: The configuration module
50
+
51
+ Raises:
52
+ ImportError: If the config module cannot be imported
53
+ ValueError: If the config is missing required attributes
54
+ """
55
+ app_name = os.getenv('app_name')
56
+ config_path = CONFIG_MAPPING.get(app_name, CONFIG_MAPPING['default'])
57
+
58
+ try:
59
+ config = importlib.import_module(config_path)
60
+ except ImportError as e:
61
+ print(f"Error importing config {config_path}: {e}")
62
+ config = importlib.import_module(CONFIG_MAPPING['default'])
63
+
64
+ # Validate the config
65
+ is_valid, missing_attrs = validate_config(config)
66
+ if not is_valid:
67
+ error_msg = f"Config {config_path} is missing required attributes: {missing_attrs}"
68
+ print(f"Warning: {error_msg}")
69
+
70
+ # Try to load default config if current config is invalid
71
+ if config_path != CONFIG_MAPPING['default']:
72
+ print("Attempting to load default config...")
73
+ default_config = importlib.import_module(CONFIG_MAPPING['default'])
74
+ is_valid, missing_attrs = validate_config(default_config)
75
+ if not is_valid:
76
+ raise ValueError(f"Default config is also invalid. Missing: {missing_attrs}")
77
+ return default_config
78
+ else:
79
+ raise ValueError(error_msg)
80
+
81
+ return app_name, config
app/handlers/__init__.py ADDED
File without changes
app/handlers/event_handler.py ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing_extensions import override
2
+ from openai import AssistantEventHandler
3
+ from openai.types.beta.threads import Text, TextDelta
4
+ from openai.types.beta.threads.runs import ToolCall
5
+
6
+ class EventHandler(AssistantEventHandler):
7
+ """Handler for assistant events during streaming."""
8
+
9
+ @override
10
+ def on_text_created(self, text: Text) -> None:
11
+ """Called when text is created."""
12
+ print(f"\nassistant > ", end="", flush=True)
13
+
14
+ @override
15
+ def on_text_delta(self, delta: TextDelta, snapshot: Text) -> None:
16
+ """Called when text is updated."""
17
+ print(delta.value, end="", flush=True)
18
+
19
+ @override
20
+ def on_tool_call_created(self, tool_call: ToolCall) -> None:
21
+ """Called when a tool call is created."""
22
+ print(f"\nassistant > {tool_call.type}\n", flush=True)
app/handlers/response_handler.py ADDED
@@ -0,0 +1,101 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from typing import Generator, List, Tuple, Dict, Any
3
+
4
+ class ResponseHandler:
5
+ def __init__(self, chat_manager, app_name, active_config):
6
+ self.chat_manager = chat_manager
7
+ self.app_name = app_name
8
+ self.active_config = active_config
9
+
10
+ def handle_response(self, message: str, history: List[Tuple[str, str]],
11
+ textbox_content: str, username: str) -> Generator:
12
+ """Handle the chat response"""
13
+ # Get or create chat_id (which is the thread_id)
14
+ chat_id = self.chat_manager.get_or_create_thread()
15
+
16
+ preprocessed_data = self.active_config.handle_thread_before_chat(self.chat_manager.client, chat_id, message, history, textbox_content)
17
+
18
+ full_response = ""
19
+ canvas_result = ""
20
+ chat_response = ""
21
+ next_step_prompt = [[]]
22
+ prompt_tokens = 0
23
+ total_tokens = 0
24
+
25
+ try:
26
+ print(f"Processing request with chat_id: {chat_id}")
27
+ with self.chat_manager.client.beta.threads.runs.stream(
28
+ thread_id=chat_id,
29
+ assistant_id=self.chat_manager.assistant_id,
30
+ timeout=60
31
+ ) as stream:
32
+ try:
33
+ for stream_delta in stream:
34
+ if not stream_delta:
35
+ print("Warning: Empty text delta received", flush=True)
36
+ continue
37
+
38
+ if stream_delta.event == 'thread.run.failed':
39
+ print(f"Stream delta received: {stream_delta}", flush=True)
40
+ if hasattr(stream_delta.data, 'last_error') and stream_delta.data.last_error is not None:
41
+ chat_response = stream_delta.data.last_error.model_dump_json()
42
+ else:
43
+ chat_response = "Sorry, there was an error processing the response. Please try again."
44
+ continue
45
+ elif stream_delta.event != 'thread.message.delta':
46
+ print(f"Stream delta received: {stream_delta}", flush=True)
47
+
48
+ if stream_delta.event == 'thread.run.step.completed' and hasattr(stream_delta, 'data'):
49
+ if hasattr(stream_delta.data, 'usage'):
50
+ prompt_tokens = stream_delta.data.usage.prompt_tokens
51
+ total_tokens = stream_delta.data.usage.total_tokens
52
+ print(f"\nPrompt tokens: {prompt_tokens}, Total tokens: {total_tokens}\n", flush=True)
53
+ continue
54
+
55
+ if stream_delta.data.delta.content[0].text:
56
+ full_response += stream_delta.data.delta.content[0].text.value
57
+
58
+ print(f"Accumulated response length: {len(full_response)}", flush=True)
59
+
60
+ if len(full_response) < 2:
61
+ continue
62
+
63
+ chat_response, canvas_result, next_step_prompt = self.active_config.handle_stream_delta(full_response)
64
+ yield chat_response, canvas_result, next_step_prompt
65
+
66
+ except Exception as stream_error:
67
+ print(f"Stream processing error: {str(stream_error)}")
68
+ print(f"Error type: {type(stream_error).__name__}")
69
+ yield "Sorry, there was an error processing the response. Please try again.", "", [[]]
70
+
71
+ except Exception as connection_error:
72
+ print(f"Connection error: {str(connection_error)}")
73
+ print(f"Error type: {type(connection_error).__name__}")
74
+ yield "Sorry, the connection timed out. Please try again.", "", [[]]
75
+
76
+ # print(f"Current Messages: {message}")
77
+ # print(f"Suggestion: {chat_response}")
78
+ # print(f"Current Response: {full_response}")
79
+ print(f"Prompt tokens: {prompt_tokens}, Total tokens: {total_tokens}")
80
+
81
+ final_chat_response, final_canvas_result, final_next_step_prompt, msg_records = self.active_config.handle_stream_end(
82
+ message, history, chat_response, textbox_content, full_response, canvas_result, preprocessed_data
83
+ )
84
+
85
+ self.chat_manager.save_chat(username, chat_id, msg_records, canvas_result, self.app_name)
86
+ yield final_chat_response, final_canvas_result, final_next_step_prompt
87
+
88
+ def handle_quick_response_click(self, selected: str) -> str:
89
+ """Handle quick response selection"""
90
+ return selected[0]
91
+
92
+ def handle_quick_response_samples(self, next_step_prompt: List[List[str]]) -> gr.Dataset:
93
+ """Handle quick response samples"""
94
+ if len(next_step_prompt) > 0 and len(next_step_prompt[0]) > 0:
95
+ return gr.Dataset(samples=next_step_prompt, visible=True)
96
+ return gr.Dataset(samples=[['-']], visible=False)
97
+
98
+ def _get_assistant_response(self, message: str) -> str:
99
+ """Get response from the assistant"""
100
+ # Implementation of assistant response logic
101
+ pass
app/main.py ADDED
@@ -0,0 +1,161 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import gradio as gr
3
+ from openai import OpenAI
4
+ from .config.base import get_app_name_and_config
5
+ from .handlers.event_handler import EventHandler
6
+ from .handlers.response_handler import ResponseHandler
7
+ from .utils.chat import ChatManager
8
+ from .utils.utils import print_assistant_info, check_password
9
+ from .ui.components import create_chatbot, create_textbox, create_prompt_input, create_quick_response, create_hidden_list, create_chat_selector, create_new_chat_button
10
+
11
+
12
+ # Initialize OpenAI client
13
+ OPENAI_API_KEY = os.getenv('OPENAI_API_KEY')
14
+ client = OpenAI(api_key=OPENAI_API_KEY)
15
+
16
+ # Get the active config
17
+ try:
18
+ app_name, active_config = get_app_name_and_config()
19
+ except (ImportError, ValueError) as e:
20
+ print(f"Fatal error loading config: {e}")
21
+ raise
22
+
23
+ # View existing assistants
24
+ existed_assistants = client.beta.assistants.list(
25
+ order="desc",
26
+ limit="20",
27
+ )
28
+
29
+ # print(len(existed_assistants.data), existed_assistants.data)
30
+
31
+ # Assistant settings
32
+ assistant_id = os.getenv('assistant_id')
33
+ ASSISTANT_MODEL = active_config.ASSISTANT_MODEL
34
+ ASSISTANT_NAME = active_config.ASSISTANT_NAME
35
+ ASSISTANT_DESCRIPTION = active_config.ASSISTANT_DESCRIPTION
36
+ ASSISTANT_INSTRUCTION = active_config.ASSISTANT_INSTRUCTION
37
+ RESPONSE_FORMAT = active_config.RESPONSE_FORMAT
38
+ VECTOR_STORE_NAME = "lesson-plan"
39
+ SHOW_CANVAS = active_config.SHOW_CANVAS
40
+ CONVERSATION_STARTER_SAMPLES = active_config.CONVERSATION_STARTER_SAMPLES
41
+
42
+ # Initialize chat history manager
43
+ chat_manager = ChatManager(client, assistant_id)
44
+ response_handler = ResponseHandler(chat_manager, app_name, active_config)
45
+
46
+ # Retrieve assistant
47
+ try:
48
+ assistant = client.beta.assistants.retrieve(assistant_id)
49
+ print(f"retrieve assistant success.")
50
+ print_assistant_info(assistant)
51
+ except Exception as e:
52
+ print(f"retrieve Assistant Fail: {e}")
53
+
54
+ # Components
55
+ chatbot = create_chatbot()
56
+ textbox = create_textbox()
57
+ prompt_input = create_prompt_input()
58
+ quick_response = create_quick_response(CONVERSATION_STARTER_SAMPLES)
59
+ hidden_list = create_hidden_list()
60
+ chat_selector = create_chat_selector()
61
+ new_chat_btn = create_new_chat_button()
62
+
63
+ CORRECT_PASSWORD = os.getenv('ui_password')
64
+
65
+ # Functions
66
+ def clear_input():
67
+ return ""
68
+
69
+ def chat_submit(message, history, textbox, username):
70
+ if message:
71
+ msg_records = [{'role': msg['role'], 'content': msg['content']} for msg in history]
72
+ msg_records.append({'role': 'user', 'content': message})
73
+ response_generator = response_handler.handle_response(message, history, textbox, username)
74
+ for suggestion, current_lesson_plan, next_step_prompt in response_generator:
75
+ yield msg_records + [{'role': 'assistant', 'content': suggestion}], current_lesson_plan, [[]]
76
+ yield msg_records + [{'role': 'assistant', 'content': suggestion}], current_lesson_plan, next_step_prompt
77
+
78
+ # Main UI
79
+ with gr.Blocks(css="""
80
+ .full-height {height: 100%}
81
+ """) as demo:
82
+ # password UI popup
83
+ with gr.Group(visible=True) as password_popup:
84
+ username_input = gr.Textbox(label="請輸入使用者名稱")
85
+ password_input = gr.Textbox(label="請輸入密碼", type="password")
86
+ submit_button = gr.Button("提交")
87
+ error_message = gr.Textbox(label="", visible=False, interactive=False)
88
+
89
+ # Store username in state
90
+ user_state = gr.State(value="default_user")
91
+
92
+ # Main UI
93
+ with gr.Group(visible=False) as main_ui:
94
+ with gr.Row(equal_height=True, elem_classes="contain"):
95
+ with gr.Column(scale=1, min_width=200, render=False):
96
+ chat_selector.render()
97
+ with gr.Column(scale=4):
98
+ with gr.Row(equal_height=True, height="90vh"):
99
+ with gr.Column():
100
+ chatbot.render()
101
+ prompt_input.render()
102
+ quick_response.render()
103
+ hidden_list.render()
104
+ with gr.Column(elem_classes="full-height", visible=SHOW_CANVAS) as textbox_column:
105
+ textbox.render()
106
+
107
+ # Event handlers
108
+ submit_button.click(
109
+ check_password,
110
+ inputs=[username_input, password_input],
111
+ outputs=[password_popup, main_ui, error_message, user_state]
112
+ )
113
+
114
+ password_input.submit(
115
+ check_password,
116
+ inputs=[username_input, password_input],
117
+ outputs=[password_popup, main_ui, error_message, user_state]
118
+ )
119
+
120
+ prompt_input.submit(
121
+ chat_submit,
122
+ inputs=[prompt_input, chatbot, textbox, user_state],
123
+ outputs=[chatbot, textbox, hidden_list]
124
+ ).then(
125
+ clear_input,
126
+ outputs=prompt_input
127
+ )
128
+
129
+ quick_response.click(
130
+ response_handler.handle_quick_response_click,
131
+ quick_response,
132
+ prompt_input
133
+ )
134
+
135
+ hidden_list.change(
136
+ response_handler.handle_quick_response_samples,
137
+ hidden_list,
138
+ quick_response
139
+ )
140
+
141
+ # Update chat selector when page loads
142
+ demo.load(
143
+ chat_manager.reset_chat_on_load,
144
+ inputs=[user_state],
145
+ outputs=[chat_selector, chatbot]
146
+ )
147
+
148
+ # Handle chat switching
149
+ chat_selector.change(
150
+ chat_manager.switch_chat,
151
+ inputs=[chat_selector, user_state],
152
+ outputs=[chatbot]
153
+ )
154
+
155
+ new_chat_btn.click(
156
+ chat_manager.create_new_chat,
157
+ outputs=[chat_selector, chatbot]
158
+ )
159
+
160
+ if __name__ == "__main__":
161
+ demo.launch(debug=True)
app/ui/__init__.py ADDED
File without changes
app/ui/components.py ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+
3
+ def create_chatbot() -> gr.Chatbot:
4
+ """Create the main chatbot component."""
5
+ return gr.Chatbot(
6
+ type="messages",
7
+ container=True,
8
+ scale=8,
9
+ autoscroll=True
10
+ )
11
+
12
+ def create_textbox() -> gr.Textbox:
13
+ """Create the text editing component."""
14
+ return gr.Textbox(
15
+ label="教案編輯",
16
+ lines=30,
17
+ render=False,
18
+ interactive=True,
19
+ scale=1
20
+ )
21
+
22
+ def create_prompt_input() -> gr.Textbox:
23
+ """Create the user prompt input component."""
24
+ return gr.Textbox(
25
+ label="用戶需求",
26
+ submit_btn=True,
27
+ render=False,
28
+ scale=1
29
+ )
30
+
31
+ def create_quick_response(samples) -> gr.Dataset:
32
+ """Create the quick response suggestions component."""
33
+ return gr.Dataset(
34
+ samples=samples,
35
+ components=[create_prompt_input()],
36
+ render=False,
37
+ scale=1
38
+ )
39
+
40
+ def create_chat_selector() -> gr.Dropdown:
41
+ """Create the chat selection dropdown."""
42
+ return gr.Dropdown(
43
+ label="Select Chat",
44
+ choices=[],
45
+ interactive=True,
46
+ container=True,
47
+ scale=0
48
+ )
49
+
50
+ def create_new_chat_button() -> gr.Button:
51
+ """Create the new chat button."""
52
+ return gr.Button(
53
+ "New Chat",
54
+ size="sm",
55
+ )
56
+
57
+ def create_hidden_list() -> gr.JSON:
58
+ """Create the hidden list component."""
59
+ return gr.JSON(
60
+ value=[[]],
61
+ render=False,
62
+ visible=False
63
+ )
app/utils/__init__.py ADDED
File without changes
app/utils/chat.py ADDED
@@ -0,0 +1,96 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from openai import OpenAI
3
+ import gradio as gr
4
+ from typing import Tuple
5
+ from supabase import create_client
6
+ from datetime import datetime
7
+ class ChatManager:
8
+ def __init__(self, client: OpenAI, assistant_id: str, supabase_url: str = None, supabase_key: str = None):
9
+ self.client = client
10
+ self.current_chat_id = None
11
+ self.assistant_id = assistant_id
12
+ """Initialize Supabase client"""
13
+ self.supabase = create_client(
14
+ supabase_url or os.environ.get("SUPABASE_URL"),
15
+ supabase_key or os.environ.get("SUPABASE_KEY")
16
+ )
17
+
18
+ def create_new_chat(self):
19
+ """Create a new chat using OpenAI thread ID as chat_id"""
20
+ thread = self.client.beta.threads.create()
21
+ self.current_chat_id = thread.id
22
+ return self.current_chat_id
23
+
24
+ def get_or_create_thread(self):
25
+ """Get or create a thread for the chat"""
26
+ if not self.current_chat_id:
27
+ self.current_chat_id = self.create_new_chat()
28
+ return self.current_chat_id
29
+
30
+ def reset_chat_on_load(self, username: str) -> Tuple[gr.update, list]:
31
+ """Reset chat ID on page load and update chat selector"""
32
+ self.create_new_chat()
33
+
34
+ # Update chat selector with available chats for this user
35
+ chats = self.list_user_chats(username)
36
+ return gr.update(choices=[(c["preview"], c["chat_id"]) for c in chats], value=self.current_chat_id), []
37
+
38
+ def load_chat_history(self, user_id="default_user"):
39
+ """Load chat history for a user"""
40
+ chat_id = self.get_latest_chat_id(user_id)
41
+ if chat_id:
42
+ return self.load_chat(user_id, chat_id)
43
+ return []
44
+
45
+ def list_user_chats(self, user_id="default_user"):
46
+ """List all chats for a user"""
47
+ response = self.supabase.table("chats") \
48
+ .select("chat_id,last_updated,messages") \
49
+ .eq("user_id", user_id) \
50
+ .execute()
51
+
52
+ chats = [{
53
+ "chat_id": chat["chat_id"],
54
+ "last_updated": chat["last_updated"],
55
+ "preview": chat["messages"][0]["content"] if chat["messages"] else "Empty chat"
56
+ } for chat in response.data]
57
+
58
+ return sorted(chats, key=lambda x: x["last_updated"], reverse=True)
59
+
60
+
61
+ def switch_chat(self, chat_id, user_id="default_user"):
62
+ """Switch to a different chat"""
63
+ self.current_chat_id = chat_id
64
+ messages = self.load_chat(user_id, chat_id)
65
+ return messages
66
+
67
+ def save_chat(self, user_id, chat_id, messages, current_lesson_plan, app_name):
68
+ """Save chat history to Supabase"""
69
+ chat_data = {
70
+ "user_id": user_id,
71
+ "chat_id": chat_id,
72
+ "last_updated": datetime.now().isoformat(),
73
+ "messages": messages,
74
+ "current_lesson_plan": current_lesson_plan,
75
+ "app_name": app_name
76
+ }
77
+
78
+ # Check if chat exists
79
+ existing_chat = self.supabase.table("chats") \
80
+ .select("*") \
81
+ .eq("user_id", user_id) \
82
+ .eq("chat_id", chat_id) \
83
+ .execute()
84
+
85
+ if existing_chat.data:
86
+ # Update existing chat
87
+ self.supabase.table("chats") \
88
+ .update(chat_data) \
89
+ .eq("user_id", user_id) \
90
+ .eq("chat_id", chat_id) \
91
+ .execute()
92
+ else:
93
+ # Insert new chat
94
+ self.supabase.table("chats") \
95
+ .insert(chat_data) \
96
+ .execute()
app/utils/utils.py ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import gradio as gr
3
+ import os
4
+
5
+ CORRECT_PASSWORD = os.getenv('ui_password')
6
+
7
+ def show_json(obj):
8
+ print(json.loads(obj.model_dump_json()))
9
+
10
+ def print_assistant_info(assistant):
11
+ """Print key information about an assistant object."""
12
+ # Get model
13
+ model = assistant.model
14
+
15
+ # Get description (limited to 2 lines)
16
+ desc_lines = assistant.description.split('\n')[:2]
17
+ desc = '\n'.join(desc_lines)
18
+ if len(desc) > 150: # Truncate if too long
19
+ desc = desc[:147] + "..."
20
+
21
+ # Get instructions (limited to 2 lines)
22
+ instr_lines = assistant.instructions.split('\n')[:2]
23
+ instr = '\n'.join(instr_lines)
24
+ if len(instr) > 150: # Truncate if too long
25
+ instr = instr[:147] + "..."
26
+
27
+ # Print information
28
+ print(f"Assistant ID: {assistant.id}")
29
+ print(f"Name: {assistant.name}")
30
+ print(f"Model: {model}")
31
+ print(f"Description: {desc}")
32
+ print(f"Instructions: {instr}")
33
+ print(f"Tools: {[tool.type for tool in assistant.tools]}")
34
+ print(f"Response Format: {assistant.response_format}")
35
+ print(f"Created at: {assistant.created_at}")
36
+
37
+ def check_password(username, input_password):
38
+ if input_password == CORRECT_PASSWORD:
39
+ return gr.update(visible=False), gr.update(visible=True), "", username
40
+ else:
41
+ return gr.update(visible=True), gr.update(visible=False), gr.update(value="密码错误,请重试。hint: channel name", visible=True), username