benisc90 commited on
Commit
46f6743
·
verified ·
1 Parent(s): 067b2ba

Upload folder using huggingface_hub

Browse files
hf_gradio_ai_app.py CHANGED
@@ -45,8 +45,35 @@ def initialize_ai_components():
45
  # Define the prompt template for the LLM
46
  prompt_template_str = """
47
  You are a helpful, friendly, and insightful AI assistant.
48
- Answer the user's question clearly, concisely, and in a conversational tone.
49
- If you don't know the answer or a question is ambiguous, ask for clarification or state that you don't know.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
50
 
51
  User Question: {user_input}
52
 
 
45
  # Define the prompt template for the LLM
46
  prompt_template_str = """
47
  You are a helpful, friendly, and insightful AI assistant.
48
+ You will be given access to a dataset that contains a single table. This table contains sample clinical information that was collected during clinical encounters.
49
+ The rows in the table are per-encounter. Patients who have more frequent clinical encounters will therefore have more rows in the table.
50
+ The table has the following columns:
51
+ index: int64
52
+ ENCOUNTER_ID: int64
53
+ CLINICAL_NOTES: string
54
+ BIRTHDATE: string
55
+ FIRST: string
56
+ START: string
57
+ STOP: string
58
+ PATIENT_ID: int64
59
+ ENCOUNTERCLASS: string
60
+ CODE: int64
61
+ DESCRIPTION: string
62
+ BASE_ENCOUNTER_COST: float64
63
+ TOTAL_CLAIM_COST: float64
64
+ PAYER_COVERAGE: float64
65
+ REASONCODE: float64
66
+ REASONDESCRIPTION: string
67
+ PATIENT_AGE: int64
68
+ DESCRIPTION_OBSERVATIONS: string
69
+ DESCRIPTION_CONDITIONS: string
70
+ DESCRIPTION_MEDICATIONS: string
71
+ DESCRIPTION_PROCEDURES: string
72
+ CLINICAL_NOTES-embeddings: string
73
+
74
+ The user will describe, in plain English, the type of query they would like to run on this clinical table.
75
+ Do your best to provide a SQL query that would return the data they are looking for.
76
+ If the user's prompt doesn't seem like a valid query request, just inform them that you cannot help with a task that is not query generation.
77
 
78
  User Question: {user_input}
79
 
hf_gradio_ai_app_Ben_Clinical_Query.py ADDED
@@ -0,0 +1,183 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # gradio_ai_chatbot_dotenv.py
2
+ #
3
+ # To run this script:
4
+ # 1. Create a .env file in the same directory with your OPENAI_API_KEY.
5
+ # Example .env file content:
6
+ # OPENAI_API_KEY="sk-yourActualOpenAIapiKeyGoesHere"
7
+ # 2. Install the required packages:
8
+ # pip install gradio langchain openai langchain_openai python-dotenv
9
+ # 3. Run the script from your terminal:
10
+ # python gradio_ai_chatbot_dotenv.py
11
+ #
12
+ # The script will output a local URL and potentially a public Gradio link.
13
+
14
+ import gradio as gr
15
+ from langchain_openai import ChatOpenAI
16
+ from langchain.prompts import ChatPromptTemplate
17
+ import os
18
+ from dotenv import load_dotenv
19
+
20
+ # --- Load environment variables from .env file ---
21
+ load_dotenv()
22
+
23
+ # --- Global variables and Initial Setup ---
24
+ OPENAI_API_KEY_GLOBAL = os.getenv("OPENAI_API_KEY")
25
+ LANGCHAIN_LLM = None
26
+ LANGCHAIN_PROMPT_TEMPLATE = None
27
+ INITIAL_AI_SETUP_MESSAGE = "" # To store status/error from initial setup
28
+
29
+ def initialize_ai_components():
30
+ """
31
+ Initializes LangChain components (LLM and prompt template) using the API key
32
+ from environment variables. Updates global variables and sets a status message.
33
+ """
34
+ global LANGCHAIN_LLM, LANGCHAIN_PROMPT_TEMPLATE, OPENAI_API_KEY_GLOBAL, INITIAL_AI_SETUP_MESSAGE
35
+
36
+ if not OPENAI_API_KEY_GLOBAL:
37
+ INITIAL_AI_SETUP_MESSAGE = "<p style='color:red; font-weight:bold;'>ERROR: OpenAI API Key not found. Please ensure it's in your .env file or environment variables.</p>"
38
+ print("ERROR: OpenAI API Key not found. Make sure it's in your .env file or environment.")
39
+ return False # Indicate failure
40
+
41
+ try:
42
+ # Initialize the LangChain LLM (OpenAI model)
43
+ LANGCHAIN_LLM = ChatOpenAI(openai_api_key=OPENAI_API_KEY_GLOBAL, model_name="gpt-4o-mini")
44
+
45
+ # Define the prompt template for the LLM
46
+ prompt_template_str = """
47
+ You are a helpful, friendly, and insightful AI assistant.
48
+ You will be given access to a dataset that contains a single table. This table contains sample clinical information that was collected during clinical encounters.
49
+ The table has the following columns:
50
+ index: int64
51
+ ENCOUNTER_ID: int64
52
+ CLINICAL_NOTES: string
53
+ BIRTHDATE: string
54
+ FIRST: string
55
+ START: string
56
+ STOP: string
57
+ PATIENT_ID: int64
58
+ ENCOUNTERCLASS: string
59
+ CODE: int64
60
+ DESCRIPTION: string
61
+ BASE_ENCOUNTER_COST: float64
62
+ TOTAL_CLAIM_COST: float64
63
+ PAYER_COVERAGE: float64
64
+ REASONCODE: float64
65
+ REASONDESCRIPTION: string
66
+ PATIENT_AGE: int64
67
+ DESCRIPTION_OBSERVATIONS: string
68
+ DESCRIPTION_CONDITIONS: string
69
+ DESCRIPTION_MEDICATIONS: string
70
+ DESCRIPTION_PROCEDURES: string
71
+ CLINICAL_NOTES-embeddings: string
72
+
73
+ The user will describe, in plain English, the type of query they would like to run on this clinical table.
74
+ Do your best to provide a SQL query that would return the data they are looking for.
75
+ If the user's prompt doesn't seem like a valid query request, just inform them that you cannot help with a task that is not query generation.
76
+
77
+ User Question: {user_input}
78
+
79
+ AI Response:
80
+ """
81
+ LANGCHAIN_PROMPT_TEMPLATE = ChatPromptTemplate.from_template(prompt_template_str)
82
+
83
+ INITIAL_AI_SETUP_MESSAGE = "<p style='color:green; font-weight:bold;'>AI Components Initialized Successfully! Ready to chat.</p>"
84
+ print("AI Components Initialized Successfully!")
85
+ return True # Indicate success
86
+ except Exception as e:
87
+ INITIAL_AI_SETUP_MESSAGE = f"<p style='color:red; font-weight:bold;'>ERROR: Failed to initialize AI components. Error: {str(e)}. Please check your API key and model access.</p>"
88
+ LANGCHAIN_LLM = None
89
+ LANGCHAIN_PROMPT_TEMPLATE = None
90
+ print(f"ERROR: Failed to initialize AI components: {str(e)}")
91
+ return False # Indicate failure
92
+
93
+ # --- Attempt to initialize AI components when the script loads ---
94
+ AI_INITIALIZED_SUCCESSFULLY = initialize_ai_components()
95
+
96
+ def ai_chat_response_function(user_message, chat_history):
97
+ """
98
+ This is the core function called by Gradio's ChatInterface.
99
+ It takes the user's message and the chat history, and returns the AI's response string.
100
+ """
101
+ if not AI_INITIALIZED_SUCCESSFULLY or not LANGCHAIN_LLM or not LANGCHAIN_PROMPT_TEMPLATE:
102
+ # Use the globally set error message from initialization
103
+ # Clean up HTML for plain error string if needed, or pass raw if Markdown supports it
104
+ error_msg_text = INITIAL_AI_SETUP_MESSAGE.replace("<p style='color:red; font-weight:bold;'>", "").replace("</p>", "")
105
+ return f"ERROR: AI is not ready. Status: {error_msg_text}"
106
+
107
+ # Proceed with generating response if components are ready
108
+ try:
109
+ # Create the LangChain chain (Prompt + LLM)
110
+ chain = LANGCHAIN_PROMPT_TEMPLATE | LANGCHAIN_LLM
111
+
112
+ # Invoke the chain with the user's input
113
+ ai_response = chain.invoke({"user_input": user_message})
114
+
115
+ # Return the content of the AI's response
116
+ return ai_response.content
117
+ except Exception as e:
118
+ print(f"Error during LangChain invocation: {e}") # Log for server-side debugging
119
+ return f"Sorry, an error occurred while trying to get a response: {str(e)}"
120
+
121
+ # --- Gradio Interface Definition using gr.Blocks for layout control ---
122
+ with gr.Blocks(theme=gr.themes.Soft(primary_hue=gr.themes.colors.blue, secondary_hue=gr.themes.colors.sky), title="AI Chatbot (Gradio)") as gradio_app:
123
+ gr.Markdown(
124
+ """
125
+ # 🤖 AI Chatbot with Gradio, LangChain & OpenAI
126
+ Powered by OpenAI's `gpt-4o-mini` model.
127
+ OpenAI API Key is loaded from your `.env` file.
128
+ """
129
+ )
130
+
131
+ # Display the initial AI setup status
132
+ gr.Markdown(INITIAL_AI_SETUP_MESSAGE)
133
+
134
+ gr.Markdown("---") # Visual separator
135
+ gr.Markdown("## Chat Interface")
136
+
137
+ # Gradio ChatInterface for the main chat functionality
138
+ chat_interface_component = gr.ChatInterface(
139
+ fn=ai_chat_response_function, # The function that handles chat logic
140
+ chatbot=gr.Chatbot(
141
+ height=550,
142
+ show_label=False,
143
+ placeholder="AI's responses will appear here." if AI_INITIALIZED_SUCCESSFULLY else "AI is not available. Check setup status above.",
144
+ avatar_images=("https://raw.githubusercontent.com/svgmoji/svgmoji/main/packages/svgmoji__openmoji/svg/1F468-1F3FB-200D-1F9B0.svg", "https://raw.githubusercontent.com/gradio-app/gradio/main/gradio/icons/huggingface-logo.svg"),
145
+ type='messages'
146
+ ),
147
+ textbox=gr.Textbox(
148
+ placeholder="Type your message here and press Enter...",
149
+ show_label=False,
150
+ scale=7,
151
+ # Disable textbox if AI did not initialize successfully
152
+ interactive=AI_INITIALIZED_SUCCESSFULLY
153
+ ),
154
+ submit_btn="➡️ Send" if AI_INITIALIZED_SUCCESSFULLY else None, # Hide button if not ready
155
+ examples=[
156
+ "What is Paris, France known for?",
157
+ "Explain the concept of a Large Language Model (LLM) simply.",
158
+ "Can you give me a basic recipe for brownies?",
159
+ "Tell me an interesting fact about sunflowers."
160
+ ] if AI_INITIALIZED_SUCCESSFULLY else None, # Only show examples if AI is ready
161
+ title=None,
162
+ autofocus=True
163
+ )
164
+
165
+ # If AI initialization failed, you might want to make the ChatInterface non-interactive.
166
+ # One way is to conditionally enable/disable components or hide buttons as done above.
167
+ if not AI_INITIALIZED_SUCCESSFULLY:
168
+ # Further disable parts of the chat interface if needed, though ChatInterface
169
+ # doesn't have a simple 'interactive=False' for the whole thing.
170
+ # Hiding buttons and disabling textbox is a good start.
171
+ # The error message in `ai_chat_response_function` will also prevent interaction.
172
+ pass
173
+
174
+
175
+ # --- Main execution block to launch the Gradio app ---
176
+ if __name__ == '__main__':
177
+ print("Attempting to launch Gradio App...")
178
+ if not OPENAI_API_KEY_GLOBAL:
179
+ print("WARNING: OpenAI API Key was not found in environment variables or .env file.")
180
+ print("The application UI will launch, but AI functionality will be disabled.")
181
+ print("Please create a .env file with your OPENAI_API_KEY.")
182
+
183
+ gradio_app.launch(share=True, debug=True)
hf_gradio_ai_app_Ben_Clinical_Query_draft2.py ADDED
@@ -0,0 +1,184 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # gradio_ai_chatbot_dotenv.py
2
+ #
3
+ # To run this script:
4
+ # 1. Create a .env file in the same directory with your OPENAI_API_KEY.
5
+ # Example .env file content:
6
+ # OPENAI_API_KEY="sk-yourActualOpenAIapiKeyGoesHere"
7
+ # 2. Install the required packages:
8
+ # pip install gradio langchain openai langchain_openai python-dotenv
9
+ # 3. Run the script from your terminal:
10
+ # python gradio_ai_chatbot_dotenv.py
11
+ #
12
+ # The script will output a local URL and potentially a public Gradio link.
13
+
14
+ import gradio as gr
15
+ from langchain_openai import ChatOpenAI
16
+ from langchain.prompts import ChatPromptTemplate
17
+ import os
18
+ from dotenv import load_dotenv
19
+
20
+ # --- Load environment variables from .env file ---
21
+ load_dotenv()
22
+
23
+ # --- Global variables and Initial Setup ---
24
+ OPENAI_API_KEY_GLOBAL = os.getenv("OPENAI_API_KEY")
25
+ LANGCHAIN_LLM = None
26
+ LANGCHAIN_PROMPT_TEMPLATE = None
27
+ INITIAL_AI_SETUP_MESSAGE = "" # To store status/error from initial setup
28
+
29
+ def initialize_ai_components():
30
+ """
31
+ Initializes LangChain components (LLM and prompt template) using the API key
32
+ from environment variables. Updates global variables and sets a status message.
33
+ """
34
+ global LANGCHAIN_LLM, LANGCHAIN_PROMPT_TEMPLATE, OPENAI_API_KEY_GLOBAL, INITIAL_AI_SETUP_MESSAGE
35
+
36
+ if not OPENAI_API_KEY_GLOBAL:
37
+ INITIAL_AI_SETUP_MESSAGE = "<p style='color:red; font-weight:bold;'>ERROR: OpenAI API Key not found. Please ensure it's in your .env file or environment variables.</p>"
38
+ print("ERROR: OpenAI API Key not found. Make sure it's in your .env file or environment.")
39
+ return False # Indicate failure
40
+
41
+ try:
42
+ # Initialize the LangChain LLM (OpenAI model)
43
+ LANGCHAIN_LLM = ChatOpenAI(openai_api_key=OPENAI_API_KEY_GLOBAL, model_name="gpt-4o-mini")
44
+
45
+ # Define the prompt template for the LLM
46
+ prompt_template_str = """
47
+ You are a helpful, friendly, and insightful AI assistant.
48
+ You will be given access to a dataset that contains a single table. This table contains sample clinical information that was collected during clinical encounters.
49
+ The rows in the table are per-encounter. Patients who have more frequent clinical encounters will therefore have more rows in the table.
50
+ The table has the following columns:
51
+ index: int64
52
+ ENCOUNTER_ID: int64
53
+ CLINICAL_NOTES: string
54
+ BIRTHDATE: string
55
+ FIRST: string
56
+ START: string
57
+ STOP: string
58
+ PATIENT_ID: int64
59
+ ENCOUNTERCLASS: string
60
+ CODE: int64
61
+ DESCRIPTION: string
62
+ BASE_ENCOUNTER_COST: float64
63
+ TOTAL_CLAIM_COST: float64
64
+ PAYER_COVERAGE: float64
65
+ REASONCODE: float64
66
+ REASONDESCRIPTION: string
67
+ PATIENT_AGE: int64
68
+ DESCRIPTION_OBSERVATIONS: string
69
+ DESCRIPTION_CONDITIONS: string
70
+ DESCRIPTION_MEDICATIONS: string
71
+ DESCRIPTION_PROCEDURES: string
72
+ CLINICAL_NOTES-embeddings: string
73
+
74
+ The user will describe, in plain English, the type of query they would like to run on this clinical table.
75
+ Do your best to provide a SQL query that would return the data they are looking for.
76
+ If the user's prompt doesn't seem like a valid query request, just inform them that you cannot help with a task that is not query generation.
77
+
78
+ User Question: {user_input}
79
+
80
+ AI Response:
81
+ """
82
+ LANGCHAIN_PROMPT_TEMPLATE = ChatPromptTemplate.from_template(prompt_template_str)
83
+
84
+ INITIAL_AI_SETUP_MESSAGE = "<p style='color:green; font-weight:bold;'>AI Components Initialized Successfully! Ready to chat.</p>"
85
+ print("AI Components Initialized Successfully!")
86
+ return True # Indicate success
87
+ except Exception as e:
88
+ INITIAL_AI_SETUP_MESSAGE = f"<p style='color:red; font-weight:bold;'>ERROR: Failed to initialize AI components. Error: {str(e)}. Please check your API key and model access.</p>"
89
+ LANGCHAIN_LLM = None
90
+ LANGCHAIN_PROMPT_TEMPLATE = None
91
+ print(f"ERROR: Failed to initialize AI components: {str(e)}")
92
+ return False # Indicate failure
93
+
94
+ # --- Attempt to initialize AI components when the script loads ---
95
+ AI_INITIALIZED_SUCCESSFULLY = initialize_ai_components()
96
+
97
+ def ai_chat_response_function(user_message, chat_history):
98
+ """
99
+ This is the core function called by Gradio's ChatInterface.
100
+ It takes the user's message and the chat history, and returns the AI's response string.
101
+ """
102
+ if not AI_INITIALIZED_SUCCESSFULLY or not LANGCHAIN_LLM or not LANGCHAIN_PROMPT_TEMPLATE:
103
+ # Use the globally set error message from initialization
104
+ # Clean up HTML for plain error string if needed, or pass raw if Markdown supports it
105
+ error_msg_text = INITIAL_AI_SETUP_MESSAGE.replace("<p style='color:red; font-weight:bold;'>", "").replace("</p>", "")
106
+ return f"ERROR: AI is not ready. Status: {error_msg_text}"
107
+
108
+ # Proceed with generating response if components are ready
109
+ try:
110
+ # Create the LangChain chain (Prompt + LLM)
111
+ chain = LANGCHAIN_PROMPT_TEMPLATE | LANGCHAIN_LLM
112
+
113
+ # Invoke the chain with the user's input
114
+ ai_response = chain.invoke({"user_input": user_message})
115
+
116
+ # Return the content of the AI's response
117
+ return ai_response.content
118
+ except Exception as e:
119
+ print(f"Error during LangChain invocation: {e}") # Log for server-side debugging
120
+ return f"Sorry, an error occurred while trying to get a response: {str(e)}"
121
+
122
+ # --- Gradio Interface Definition using gr.Blocks for layout control ---
123
+ with gr.Blocks(theme=gr.themes.Soft(primary_hue=gr.themes.colors.blue, secondary_hue=gr.themes.colors.sky), title="AI Chatbot (Gradio)") as gradio_app:
124
+ gr.Markdown(
125
+ """
126
+ # 🤖 AI Chatbot with Gradio, LangChain & OpenAI
127
+ Powered by OpenAI's `gpt-4o-mini` model.
128
+ OpenAI API Key is loaded from your `.env` file.
129
+ """
130
+ )
131
+
132
+ # Display the initial AI setup status
133
+ gr.Markdown(INITIAL_AI_SETUP_MESSAGE)
134
+
135
+ gr.Markdown("---") # Visual separator
136
+ gr.Markdown("## Chat Interface")
137
+
138
+ # Gradio ChatInterface for the main chat functionality
139
+ chat_interface_component = gr.ChatInterface(
140
+ fn=ai_chat_response_function, # The function that handles chat logic
141
+ chatbot=gr.Chatbot(
142
+ height=550,
143
+ show_label=False,
144
+ placeholder="AI's responses will appear here." if AI_INITIALIZED_SUCCESSFULLY else "AI is not available. Check setup status above.",
145
+ avatar_images=("https://raw.githubusercontent.com/svgmoji/svgmoji/main/packages/svgmoji__openmoji/svg/1F468-1F3FB-200D-1F9B0.svg", "https://raw.githubusercontent.com/gradio-app/gradio/main/gradio/icons/huggingface-logo.svg"),
146
+ type='messages'
147
+ ),
148
+ textbox=gr.Textbox(
149
+ placeholder="Type your message here and press Enter...",
150
+ show_label=False,
151
+ scale=7,
152
+ # Disable textbox if AI did not initialize successfully
153
+ interactive=AI_INITIALIZED_SUCCESSFULLY
154
+ ),
155
+ submit_btn="➡️ Send" if AI_INITIALIZED_SUCCESSFULLY else None, # Hide button if not ready
156
+ examples=[
157
+ "What is Paris, France known for?",
158
+ "Explain the concept of a Large Language Model (LLM) simply.",
159
+ "Can you give me a basic recipe for brownies?",
160
+ "Tell me an interesting fact about sunflowers."
161
+ ] if AI_INITIALIZED_SUCCESSFULLY else None, # Only show examples if AI is ready
162
+ title=None,
163
+ autofocus=True
164
+ )
165
+
166
+ # If AI initialization failed, you might want to make the ChatInterface non-interactive.
167
+ # One way is to conditionally enable/disable components or hide buttons as done above.
168
+ if not AI_INITIALIZED_SUCCESSFULLY:
169
+ # Further disable parts of the chat interface if needed, though ChatInterface
170
+ # doesn't have a simple 'interactive=False' for the whole thing.
171
+ # Hiding buttons and disabling textbox is a good start.
172
+ # The error message in `ai_chat_response_function` will also prevent interaction.
173
+ pass
174
+
175
+
176
+ # --- Main execution block to launch the Gradio app ---
177
+ if __name__ == '__main__':
178
+ print("Attempting to launch Gradio App...")
179
+ if not OPENAI_API_KEY_GLOBAL:
180
+ print("WARNING: OpenAI API Key was not found in environment variables or .env file.")
181
+ print("The application UI will launch, but AI functionality will be disabled.")
182
+ print("Please create a .env file with your OPENAI_API_KEY.")
183
+
184
+ gradio_app.launch(share=True, debug=True)
hf_gradio_ai_app_Ben_General_Query.py ADDED
@@ -0,0 +1,158 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # gradio_ai_chatbot_dotenv.py
2
+ #
3
+ # To run this script:
4
+ # 1. Create a .env file in the same directory with your OPENAI_API_KEY.
5
+ # Example .env file content:
6
+ # OPENAI_API_KEY="sk-yourActualOpenAIapiKeyGoesHere"
7
+ # 2. Install the required packages:
8
+ # pip install gradio langchain openai langchain_openai python-dotenv
9
+ # 3. Run the script from your terminal:
10
+ # python gradio_ai_chatbot_dotenv.py
11
+ #
12
+ # The script will output a local URL and potentially a public Gradio link.
13
+
14
+ import gradio as gr
15
+ from langchain_openai import ChatOpenAI
16
+ from langchain.prompts import ChatPromptTemplate
17
+ import os
18
+ from dotenv import load_dotenv
19
+
20
+ # --- Load environment variables from .env file ---
21
+ load_dotenv()
22
+
23
+ # --- Global variables and Initial Setup ---
24
+ OPENAI_API_KEY_GLOBAL = os.getenv("OPENAI_API_KEY")
25
+ LANGCHAIN_LLM = None
26
+ LANGCHAIN_PROMPT_TEMPLATE = None
27
+ INITIAL_AI_SETUP_MESSAGE = "" # To store status/error from initial setup
28
+
29
+ def initialize_ai_components():
30
+ """
31
+ Initializes LangChain components (LLM and prompt template) using the API key
32
+ from environment variables. Updates global variables and sets a status message.
33
+ """
34
+ global LANGCHAIN_LLM, LANGCHAIN_PROMPT_TEMPLATE, OPENAI_API_KEY_GLOBAL, INITIAL_AI_SETUP_MESSAGE
35
+
36
+ if not OPENAI_API_KEY_GLOBAL:
37
+ INITIAL_AI_SETUP_MESSAGE = "<p style='color:red; font-weight:bold;'>ERROR: OpenAI API Key not found. Please ensure it's in your .env file or environment variables.</p>"
38
+ print("ERROR: OpenAI API Key not found. Make sure it's in your .env file or environment.")
39
+ return False # Indicate failure
40
+
41
+ try:
42
+ # Initialize the LangChain LLM (OpenAI model)
43
+ LANGCHAIN_LLM = ChatOpenAI(openai_api_key=OPENAI_API_KEY_GLOBAL, model_name="gpt-4o-mini")
44
+
45
+ # Define the prompt template for the LLM
46
+ prompt_template_str = """
47
+ You are a helpful, friendly, and insightful AI assistant.
48
+ The user will describe, in plain English, the type of query they would like to run on a dataset.
49
+ Do your best to provide a SQL query that would return the data they are looking for.
50
+ If the user's prompt doesn't seem like a valid query request, just inform them that you cannot help with a task that is not query generation.
51
+
52
+ User Question: {user_input}
53
+
54
+ AI Response:
55
+ """
56
+ LANGCHAIN_PROMPT_TEMPLATE = ChatPromptTemplate.from_template(prompt_template_str)
57
+
58
+ INITIAL_AI_SETUP_MESSAGE = "<p style='color:green; font-weight:bold;'>AI Components Initialized Successfully! Ready to chat.</p>"
59
+ print("AI Components Initialized Successfully!")
60
+ return True # Indicate success
61
+ except Exception as e:
62
+ INITIAL_AI_SETUP_MESSAGE = f"<p style='color:red; font-weight:bold;'>ERROR: Failed to initialize AI components. Error: {str(e)}. Please check your API key and model access.</p>"
63
+ LANGCHAIN_LLM = None
64
+ LANGCHAIN_PROMPT_TEMPLATE = None
65
+ print(f"ERROR: Failed to initialize AI components: {str(e)}")
66
+ return False # Indicate failure
67
+
68
+ # --- Attempt to initialize AI components when the script loads ---
69
+ AI_INITIALIZED_SUCCESSFULLY = initialize_ai_components()
70
+
71
+ def ai_chat_response_function(user_message, chat_history):
72
+ """
73
+ This is the core function called by Gradio's ChatInterface.
74
+ It takes the user's message and the chat history, and returns the AI's response string.
75
+ """
76
+ if not AI_INITIALIZED_SUCCESSFULLY or not LANGCHAIN_LLM or not LANGCHAIN_PROMPT_TEMPLATE:
77
+ # Use the globally set error message from initialization
78
+ # Clean up HTML for plain error string if needed, or pass raw if Markdown supports it
79
+ error_msg_text = INITIAL_AI_SETUP_MESSAGE.replace("<p style='color:red; font-weight:bold;'>", "").replace("</p>", "")
80
+ return f"ERROR: AI is not ready. Status: {error_msg_text}"
81
+
82
+ # Proceed with generating response if components are ready
83
+ try:
84
+ # Create the LangChain chain (Prompt + LLM)
85
+ chain = LANGCHAIN_PROMPT_TEMPLATE | LANGCHAIN_LLM
86
+
87
+ # Invoke the chain with the user's input
88
+ ai_response = chain.invoke({"user_input": user_message})
89
+
90
+ # Return the content of the AI's response
91
+ return ai_response.content
92
+ except Exception as e:
93
+ print(f"Error during LangChain invocation: {e}") # Log for server-side debugging
94
+ return f"Sorry, an error occurred while trying to get a response: {str(e)}"
95
+
96
+ # --- Gradio Interface Definition using gr.Blocks for layout control ---
97
+ with gr.Blocks(theme=gr.themes.Soft(primary_hue=gr.themes.colors.blue, secondary_hue=gr.themes.colors.sky), title="AI Chatbot (Gradio)") as gradio_app:
98
+ gr.Markdown(
99
+ """
100
+ # 🤖 AI Chatbot with Gradio, LangChain & OpenAI
101
+ Powered by OpenAI's `gpt-4o-mini` model.
102
+ OpenAI API Key is loaded from your `.env` file.
103
+ """
104
+ )
105
+
106
+ # Display the initial AI setup status
107
+ gr.Markdown(INITIAL_AI_SETUP_MESSAGE)
108
+
109
+ gr.Markdown("---") # Visual separator
110
+ gr.Markdown("## Chat Interface")
111
+
112
+ # Gradio ChatInterface for the main chat functionality
113
+ chat_interface_component = gr.ChatInterface(
114
+ fn=ai_chat_response_function, # The function that handles chat logic
115
+ chatbot=gr.Chatbot(
116
+ height=550,
117
+ show_label=False,
118
+ placeholder="AI's responses will appear here." if AI_INITIALIZED_SUCCESSFULLY else "AI is not available. Check setup status above.",
119
+ avatar_images=("https://raw.githubusercontent.com/svgmoji/svgmoji/main/packages/svgmoji__openmoji/svg/1F468-1F3FB-200D-1F9B0.svg", "https://raw.githubusercontent.com/gradio-app/gradio/main/gradio/icons/huggingface-logo.svg"),
120
+ type='messages'
121
+ ),
122
+ textbox=gr.Textbox(
123
+ placeholder="Type your message here and press Enter...",
124
+ show_label=False,
125
+ scale=7,
126
+ # Disable textbox if AI did not initialize successfully
127
+ interactive=AI_INITIALIZED_SUCCESSFULLY
128
+ ),
129
+ submit_btn="➡️ Send" if AI_INITIALIZED_SUCCESSFULLY else None, # Hide button if not ready
130
+ examples=[
131
+ "What is Paris, France known for?",
132
+ "Explain the concept of a Large Language Model (LLM) simply.",
133
+ "Can you give me a basic recipe for brownies?",
134
+ "Tell me an interesting fact about sunflowers."
135
+ ] if AI_INITIALIZED_SUCCESSFULLY else None, # Only show examples if AI is ready
136
+ title=None,
137
+ autofocus=True
138
+ )
139
+
140
+ # If AI initialization failed, you might want to make the ChatInterface non-interactive.
141
+ # One way is to conditionally enable/disable components or hide buttons as done above.
142
+ if not AI_INITIALIZED_SUCCESSFULLY:
143
+ # Further disable parts of the chat interface if needed, though ChatInterface
144
+ # doesn't have a simple 'interactive=False' for the whole thing.
145
+ # Hiding buttons and disabling textbox is a good start.
146
+ # The error message in `ai_chat_response_function` will also prevent interaction.
147
+ pass
148
+
149
+
150
+ # --- Main execution block to launch the Gradio app ---
151
+ if __name__ == '__main__':
152
+ print("Attempting to launch Gradio App...")
153
+ if not OPENAI_API_KEY_GLOBAL:
154
+ print("WARNING: OpenAI API Key was not found in environment variables or .env file.")
155
+ print("The application UI will launch, but AI functionality will be disabled.")
156
+ print("Please create a .env file with your OPENAI_API_KEY.")
157
+
158
+ gradio_app.launch(share=True, debug=True)
hf_gradio_ai_app_original.py ADDED
@@ -0,0 +1,157 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # gradio_ai_chatbot_dotenv.py
2
+ #
3
+ # To run this script:
4
+ # 1. Create a .env file in the same directory with your OPENAI_API_KEY.
5
+ # Example .env file content:
6
+ # OPENAI_API_KEY="sk-yourActualOpenAIapiKeyGoesHere"
7
+ # 2. Install the required packages:
8
+ # pip install gradio langchain openai langchain_openai python-dotenv
9
+ # 3. Run the script from your terminal:
10
+ # python gradio_ai_chatbot_dotenv.py
11
+ #
12
+ # The script will output a local URL and potentially a public Gradio link.
13
+
14
+ import gradio as gr
15
+ from langchain_openai import ChatOpenAI
16
+ from langchain.prompts import ChatPromptTemplate
17
+ import os
18
+ from dotenv import load_dotenv
19
+
20
+ # --- Load environment variables from .env file ---
21
+ load_dotenv()
22
+
23
+ # --- Global variables and Initial Setup ---
24
+ OPENAI_API_KEY_GLOBAL = os.getenv("OPENAI_API_KEY")
25
+ LANGCHAIN_LLM = None
26
+ LANGCHAIN_PROMPT_TEMPLATE = None
27
+ INITIAL_AI_SETUP_MESSAGE = "" # To store status/error from initial setup
28
+
29
+ def initialize_ai_components():
30
+ """
31
+ Initializes LangChain components (LLM and prompt template) using the API key
32
+ from environment variables. Updates global variables and sets a status message.
33
+ """
34
+ global LANGCHAIN_LLM, LANGCHAIN_PROMPT_TEMPLATE, OPENAI_API_KEY_GLOBAL, INITIAL_AI_SETUP_MESSAGE
35
+
36
+ if not OPENAI_API_KEY_GLOBAL:
37
+ INITIAL_AI_SETUP_MESSAGE = "<p style='color:red; font-weight:bold;'>ERROR: OpenAI API Key not found. Please ensure it's in your .env file or environment variables.</p>"
38
+ print("ERROR: OpenAI API Key not found. Make sure it's in your .env file or environment.")
39
+ return False # Indicate failure
40
+
41
+ try:
42
+ # Initialize the LangChain LLM (OpenAI model)
43
+ LANGCHAIN_LLM = ChatOpenAI(openai_api_key=OPENAI_API_KEY_GLOBAL, model_name="gpt-4o-mini")
44
+
45
+ # Define the prompt template for the LLM
46
+ prompt_template_str = """
47
+ You are a helpful, friendly, and insightful AI assistant.
48
+ Answer the user's question clearly, concisely, and in a conversational tone.
49
+ If you don't know the answer or a question is ambiguous, ask for clarification or state that you don't know.
50
+
51
+ User Question: {user_input}
52
+
53
+ AI Response:
54
+ """
55
+ LANGCHAIN_PROMPT_TEMPLATE = ChatPromptTemplate.from_template(prompt_template_str)
56
+
57
+ INITIAL_AI_SETUP_MESSAGE = "<p style='color:green; font-weight:bold;'>AI Components Initialized Successfully! Ready to chat.</p>"
58
+ print("AI Components Initialized Successfully!")
59
+ return True # Indicate success
60
+ except Exception as e:
61
+ INITIAL_AI_SETUP_MESSAGE = f"<p style='color:red; font-weight:bold;'>ERROR: Failed to initialize AI components. Error: {str(e)}. Please check your API key and model access.</p>"
62
+ LANGCHAIN_LLM = None
63
+ LANGCHAIN_PROMPT_TEMPLATE = None
64
+ print(f"ERROR: Failed to initialize AI components: {str(e)}")
65
+ return False # Indicate failure
66
+
67
+ # --- Attempt to initialize AI components when the script loads ---
68
+ AI_INITIALIZED_SUCCESSFULLY = initialize_ai_components()
69
+
70
+ def ai_chat_response_function(user_message, chat_history):
71
+ """
72
+ This is the core function called by Gradio's ChatInterface.
73
+ It takes the user's message and the chat history, and returns the AI's response string.
74
+ """
75
+ if not AI_INITIALIZED_SUCCESSFULLY or not LANGCHAIN_LLM or not LANGCHAIN_PROMPT_TEMPLATE:
76
+ # Use the globally set error message from initialization
77
+ # Clean up HTML for plain error string if needed, or pass raw if Markdown supports it
78
+ error_msg_text = INITIAL_AI_SETUP_MESSAGE.replace("<p style='color:red; font-weight:bold;'>", "").replace("</p>", "")
79
+ return f"ERROR: AI is not ready. Status: {error_msg_text}"
80
+
81
+ # Proceed with generating response if components are ready
82
+ try:
83
+ # Create the LangChain chain (Prompt + LLM)
84
+ chain = LANGCHAIN_PROMPT_TEMPLATE | LANGCHAIN_LLM
85
+
86
+ # Invoke the chain with the user's input
87
+ ai_response = chain.invoke({"user_input": user_message})
88
+
89
+ # Return the content of the AI's response
90
+ return ai_response.content
91
+ except Exception as e:
92
+ print(f"Error during LangChain invocation: {e}") # Log for server-side debugging
93
+ return f"Sorry, an error occurred while trying to get a response: {str(e)}"
94
+
95
+ # --- Gradio Interface Definition using gr.Blocks for layout control ---
96
+ with gr.Blocks(theme=gr.themes.Soft(primary_hue=gr.themes.colors.blue, secondary_hue=gr.themes.colors.sky), title="AI Chatbot (Gradio)") as gradio_app:
97
+ gr.Markdown(
98
+ """
99
+ # 🤖 AI Chatbot with Gradio, LangChain & OpenAI
100
+ Powered by OpenAI's `gpt-4o-mini` model.
101
+ OpenAI API Key is loaded from your `.env` file.
102
+ """
103
+ )
104
+
105
+ # Display the initial AI setup status
106
+ gr.Markdown(INITIAL_AI_SETUP_MESSAGE)
107
+
108
+ gr.Markdown("---") # Visual separator
109
+ gr.Markdown("## Chat Interface")
110
+
111
+ # Gradio ChatInterface for the main chat functionality
112
+ chat_interface_component = gr.ChatInterface(
113
+ fn=ai_chat_response_function, # The function that handles chat logic
114
+ chatbot=gr.Chatbot(
115
+ height=550,
116
+ show_label=False,
117
+ placeholder="AI's responses will appear here." if AI_INITIALIZED_SUCCESSFULLY else "AI is not available. Check setup status above.",
118
+ avatar_images=("https://raw.githubusercontent.com/svgmoji/svgmoji/main/packages/svgmoji__openmoji/svg/1F468-1F3FB-200D-1F9B0.svg", "https://raw.githubusercontent.com/gradio-app/gradio/main/gradio/icons/huggingface-logo.svg"),
119
+ type='messages'
120
+ ),
121
+ textbox=gr.Textbox(
122
+ placeholder="Type your message here and press Enter...",
123
+ show_label=False,
124
+ scale=7,
125
+ # Disable textbox if AI did not initialize successfully
126
+ interactive=AI_INITIALIZED_SUCCESSFULLY
127
+ ),
128
+ submit_btn="➡️ Send" if AI_INITIALIZED_SUCCESSFULLY else None, # Hide button if not ready
129
+ examples=[
130
+ "What is Paris, France known for?",
131
+ "Explain the concept of a Large Language Model (LLM) simply.",
132
+ "Can you give me a basic recipe for brownies?",
133
+ "Tell me an interesting fact about sunflowers."
134
+ ] if AI_INITIALIZED_SUCCESSFULLY else None, # Only show examples if AI is ready
135
+ title=None,
136
+ autofocus=True
137
+ )
138
+
139
+ # If AI initialization failed, you might want to make the ChatInterface non-interactive.
140
+ # One way is to conditionally enable/disable components or hide buttons as done above.
141
+ if not AI_INITIALIZED_SUCCESSFULLY:
142
+ # Further disable parts of the chat interface if needed, though ChatInterface
143
+ # doesn't have a simple 'interactive=False' for the whole thing.
144
+ # Hiding buttons and disabling textbox is a good start.
145
+ # The error message in `ai_chat_response_function` will also prevent interaction.
146
+ pass
147
+
148
+
149
+ # --- Main execution block to launch the Gradio app ---
150
+ if __name__ == '__main__':
151
+ print("Attempting to launch Gradio App...")
152
+ if not OPENAI_API_KEY_GLOBAL:
153
+ print("WARNING: OpenAI API Key was not found in environment variables or .env file.")
154
+ print("The application UI will launch, but AI functionality will be disabled.")
155
+ print("Please create a .env file with your OPENAI_API_KEY.")
156
+
157
+ gradio_app.launch(share=True, debug=True)