Spaces:
Sleeping
Sleeping
Ari commited on
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,75 +1,69 @@
|
|
| 1 |
import os
|
| 2 |
import streamlit as st
|
| 3 |
import pandas as pd
|
| 4 |
-
import numpy as np
|
| 5 |
import sqlite3
|
| 6 |
from langchain import OpenAI, LLMChain, PromptTemplate
|
|
|
|
| 7 |
import sqlparse
|
| 8 |
import logging
|
|
|
|
| 9 |
from sklearn.linear_model import LinearRegression
|
| 10 |
from sklearn.model_selection import train_test_split
|
| 11 |
from sklearn.metrics import mean_squared_error, r2_score
|
| 12 |
-
import statsmodels.api as sm
|
| 13 |
-
|
| 14 |
-
|
| 15 |
-
|
| 16 |
-
# Configure logging
|
| 17 |
-
logging.basicConfig(level=logging.ERROR, format='%(asctime)s - %(levelname)s - %(message)s')
|
| 18 |
-
|
| 19 |
-
# Step 1: Load the dataset
|
| 20 |
-
def load_data():
|
| 21 |
-
st.header("Select or Upload a Dataset")
|
| 22 |
-
|
| 23 |
-
dataset_options = {
|
| 24 |
-
"Default Dataset": "default_data.csv",
|
| 25 |
-
# Add more datasets as needed
|
| 26 |
-
"Upload Your Own Dataset": None
|
| 27 |
-
}
|
| 28 |
-
|
| 29 |
-
selected_option = st.selectbox("Choose a dataset:", list(dataset_options.keys()))
|
| 30 |
-
|
| 31 |
-
if selected_option == "Upload Your Own Dataset":
|
| 32 |
-
uploaded_file = st.file_uploader("Upload your dataset (CSV file)", type=["csv"])
|
| 33 |
-
if uploaded_file is not None:
|
| 34 |
-
data = pd.read_csv(uploaded_file)
|
| 35 |
-
st.success("Data successfully loaded!")
|
| 36 |
-
return data
|
| 37 |
-
else:
|
| 38 |
-
st.info("Please upload a CSV file to proceed.")
|
| 39 |
-
return None
|
| 40 |
-
else:
|
| 41 |
-
file_path = dataset_options[selected_option]
|
| 42 |
-
if os.path.exists(file_path):
|
| 43 |
-
data = pd.read_csv(file_path)
|
| 44 |
-
st.success(f"'{selected_option}' successfully loaded!")
|
| 45 |
-
return data
|
| 46 |
-
else:
|
| 47 |
-
st.error(f"File '{file_path}' not found.")
|
| 48 |
-
return None
|
| 49 |
|
| 50 |
-
|
|
|
|
| 51 |
|
| 52 |
-
|
| 53 |
-
|
| 54 |
-
|
|
|
|
|
|
|
| 55 |
else:
|
| 56 |
-
|
| 57 |
-
|
| 58 |
-
|
| 59 |
-
|
| 60 |
-
|
| 61 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 62 |
template = """
|
| 63 |
-
You are an expert data scientist
|
| 64 |
|
| 65 |
Instructions:
|
| 66 |
|
| 67 |
- If the question involves data retrieval or simple aggregations, generate a SQL query.
|
| 68 |
-
- If the question requires statistical analysis
|
| 69 |
-
- If the question involves predictions, modeling, or recommendations, generate a Python code snippet using scikit-learn or pandas.
|
| 70 |
- Ensure that you only use the columns provided.
|
| 71 |
- Do not include any import statements in the code.
|
| 72 |
-
-
|
|
|
|
| 73 |
|
| 74 |
Question: {question}
|
| 75 |
|
|
@@ -79,125 +73,77 @@ Valid columns: {columns}
|
|
| 79 |
|
| 80 |
Response:
|
| 81 |
"""
|
| 82 |
-
|
| 83 |
-
|
| 84 |
-
|
| 85 |
-
|
| 86 |
prompt = PromptTemplate(template=template, input_variables=['question', 'table_name', 'columns'])
|
| 87 |
-
|
| 88 |
-
|
| 89 |
-
|
| 90 |
-
|
| 91 |
-
|
| 92 |
-
|
| 93 |
-
|
| 94 |
-
|
| 95 |
-
|
| 96 |
-
|
| 97 |
-
|
| 98 |
-
|
| 99 |
-
|
| 100 |
-
|
| 101 |
-
|
| 102 |
-
|
| 103 |
-
|
| 104 |
-
|
| 105 |
-
|
| 106 |
-
|
| 107 |
-
|
| 108 |
-
|
| 109 |
-
|
| 110 |
-
|
| 111 |
-
|
| 112 |
-
|
| 113 |
-
|
| 114 |
-
|
| 115 |
-
|
| 116 |
-
|
| 117 |
-
|
| 118 |
-
|
| 119 |
-
|
| 120 |
-
|
| 121 |
-
|
| 122 |
-
|
| 123 |
-
|
| 124 |
-
|
| 125 |
-
|
| 126 |
-
|
| 127 |
-
|
| 128 |
-
|
| 129 |
-
|
| 130 |
-
|
| 131 |
-
|
| 132 |
-
|
| 133 |
-
|
| 134 |
-
|
| 135 |
-
|
| 136 |
-
|
| 137 |
-
|
| 138 |
-
|
| 139 |
-
|
| 140 |
-
# Append user message to history
|
| 141 |
-
st.session_state.history.append({"role": "user", "content": user_prompt})
|
| 142 |
-
|
| 143 |
-
if "columns" in user_prompt.lower():
|
| 144 |
-
assistant_response = f"The columns are: {', '.join(valid_columns)}"
|
| 145 |
-
st.session_state.history.append({"role": "assistant", "content": assistant_response})
|
| 146 |
else:
|
| 147 |
-
|
| 148 |
-
|
| 149 |
-
|
| 150 |
-
|
| 151 |
-
|
| 152 |
-
|
| 153 |
-
|
| 154 |
-
|
| 155 |
-
|
| 156 |
-
|
| 157 |
-
st.write(f"**Generated Code:**\n```python\n{code}\n```")
|
| 158 |
-
try:
|
| 159 |
-
result = execute_code(code)
|
| 160 |
-
assistant_response = "Result:"
|
| 161 |
-
st.session_state.history.append({"role": "assistant", "content": assistant_response})
|
| 162 |
-
st.session_state.history.append({"role": "assistant", "content": result})
|
| 163 |
-
except Exception as e:
|
| 164 |
-
logging.error(f"An error occurred during code execution: {e}")
|
| 165 |
-
assistant_response = f"Error executing code: {e}"
|
| 166 |
-
st.session_state.history.append({"role": "assistant", "content": assistant_response})
|
| 167 |
else:
|
| 168 |
-
|
| 169 |
-
|
| 170 |
-
|
| 171 |
-
|
| 172 |
-
|
| 173 |
-
|
| 174 |
-
|
| 175 |
-
|
| 176 |
-
# Reset the user_input in session state
|
| 177 |
-
st.session_state['user_input'] = ''
|
| 178 |
-
|
| 179 |
-
# Initialize session state variables
|
| 180 |
-
if 'history' not in st.session_state:
|
| 181 |
-
st.session_state.history = []
|
| 182 |
-
|
| 183 |
-
if 'user_input' not in st.session_state:
|
| 184 |
-
st.session_state['user_input'] = ''
|
| 185 |
-
|
| 186 |
-
|
| 187 |
-
# Display the conversation history
|
| 188 |
-
for message in st.session_state.history:
|
| 189 |
-
if message['role'] == 'user':
|
| 190 |
-
st.markdown(f"**User:** {message['content']}")
|
| 191 |
-
elif message['role'] == 'assistant':
|
| 192 |
-
content = message['content']
|
| 193 |
-
if isinstance(content, pd.DataFrame):
|
| 194 |
-
st.markdown("**Assistant:** Here are the results:")
|
| 195 |
-
st.dataframe(content)
|
| 196 |
-
elif isinstance(content, (int, float, str, list, dict)):
|
| 197 |
-
st.markdown(f"**Assistant:** {content}")
|
| 198 |
-
else:
|
| 199 |
-
st.markdown(f"**Assistant:** {content}")
|
| 200 |
-
|
| 201 |
-
# Place the text input after displaying the conversation
|
| 202 |
-
st.text_input("Enter your question:", key='user_input', on_change=process_input)
|
| 203 |
-
|
|
|
|
| 1 |
import os
|
| 2 |
import streamlit as st
|
| 3 |
import pandas as pd
|
|
|
|
| 4 |
import sqlite3
|
| 5 |
from langchain import OpenAI, LLMChain, PromptTemplate
|
| 6 |
+
from langchain_community.utilities import SQLDatabase
|
| 7 |
import sqlparse
|
| 8 |
import logging
|
| 9 |
+
from sql_metadata import Parser # Added import
|
| 10 |
from sklearn.linear_model import LinearRegression
|
| 11 |
from sklearn.model_selection import train_test_split
|
| 12 |
from sklearn.metrics import mean_squared_error, r2_score
|
| 13 |
+
import statsmodels.api as sm
|
| 14 |
+
import numpy as np
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 15 |
|
| 16 |
+
# OpenAI API key (ensure it is securely stored)
|
| 17 |
+
openai_api_key = os.getenv("OPENAI_API_KEY")
|
| 18 |
|
| 19 |
+
# Step 1: Upload CSV data file (or use default)
|
| 20 |
+
csv_file = st.file_uploader("Upload your CSV file", type=["csv"])
|
| 21 |
+
if csv_file is None:
|
| 22 |
+
data = pd.read_csv("default_data.csv") # Use default CSV if no file is uploaded
|
| 23 |
+
st.write("Using default_data.csv file.")
|
| 24 |
else:
|
| 25 |
+
data = pd.read_csv(csv_file)
|
| 26 |
+
st.write(f"Data Preview ({csv_file.name}):")
|
| 27 |
+
st.dataframe(data.head())
|
| 28 |
+
|
| 29 |
+
# Step 2: Load CSV data into a persistent SQLite database
|
| 30 |
+
db_file = 'my_database.db'
|
| 31 |
+
conn = sqlite3.connect(db_file)
|
| 32 |
+
table_name = csv_file.name.split('.')[0] if csv_file else "default_table"
|
| 33 |
+
data.to_sql(table_name, conn, index=False, if_exists='replace')
|
| 34 |
+
|
| 35 |
+
# SQL table metadata (for validation and schema)
|
| 36 |
+
valid_columns = list(data.columns)
|
| 37 |
+
st.write(f"Valid columns: {valid_columns}")
|
| 38 |
+
|
| 39 |
+
# Step 3: Define SQL validation helpers
|
| 40 |
+
def validate_sql(query, valid_columns):
|
| 41 |
+
"""Validates the SQL query by ensuring it references only valid columns."""
|
| 42 |
+
parser = Parser(query)
|
| 43 |
+
columns_in_query = parser.columns
|
| 44 |
+
for column in columns_in_query:
|
| 45 |
+
if column not in valid_columns:
|
| 46 |
+
st.write(f"Invalid column detected: {column}")
|
| 47 |
+
return False
|
| 48 |
+
return True
|
| 49 |
+
|
| 50 |
+
def validate_sql_with_sqlparse(query):
|
| 51 |
+
"""Validates SQL syntax using sqlparse."""
|
| 52 |
+
parsed_query = sqlparse.parse(query)
|
| 53 |
+
return len(parsed_query) > 0
|
| 54 |
+
|
| 55 |
+
# Step 4: Set up the LLM Chain to generate SQL queries or Python code
|
| 56 |
template = """
|
| 57 |
+
You are an expert data scientist. Given a natural language question, the name of the table, and a list of valid columns, decide whether to generate a SQL query to retrieve data or a Python code snippet to perform statistical analysis, time series analysis, or build a simple machine learning model.
|
| 58 |
|
| 59 |
Instructions:
|
| 60 |
|
| 61 |
- If the question involves data retrieval or simple aggregations, generate a SQL query.
|
| 62 |
+
- If the question requires statistical analysis, time series analysis, or machine learning, generate a Python code snippet using pandas, numpy, statsmodels, or scikit-learn.
|
|
|
|
| 63 |
- Ensure that you only use the columns provided.
|
| 64 |
- Do not include any import statements in the code.
|
| 65 |
+
- For SQL queries, provide the query directly.
|
| 66 |
+
- For Python code, provide the code between <CODE> and </CODE> tags.
|
| 67 |
|
| 68 |
Question: {question}
|
| 69 |
|
|
|
|
| 73 |
|
| 74 |
Response:
|
| 75 |
"""
|
|
|
|
|
|
|
|
|
|
|
|
|
| 76 |
prompt = PromptTemplate(template=template, input_variables=['question', 'table_name', 'columns'])
|
| 77 |
+
sql_generation_chain = LLMChain(llm=OpenAI(temperature=0), prompt=prompt)
|
| 78 |
+
|
| 79 |
+
# Step 5: Generate SQL query or Python code based on user input
|
| 80 |
+
user_prompt = st.text_input("Enter your natural language prompt:")
|
| 81 |
+
if user_prompt:
|
| 82 |
+
try:
|
| 83 |
+
# Step 6: Adjust the logic to handle "what are the columns" query
|
| 84 |
+
if "columns" in user_prompt.lower():
|
| 85 |
+
# Custom logic to return columns
|
| 86 |
+
st.write(f"The columns are: {', '.join(valid_columns)}")
|
| 87 |
+
else:
|
| 88 |
+
columns = ', '.join(valid_columns)
|
| 89 |
+
response = sql_generation_chain.run({'question': user_prompt, 'table_name': table_name, 'columns': columns})
|
| 90 |
+
|
| 91 |
+
# Check if the response contains Python code
|
| 92 |
+
if '<CODE>' in response and '</CODE>' in response:
|
| 93 |
+
# Extract code between <CODE> and </CODE>
|
| 94 |
+
start_index = response.find('<CODE>') + len('<CODE>')
|
| 95 |
+
end_index = response.find('</CODE>')
|
| 96 |
+
code = response[start_index:end_index].strip()
|
| 97 |
+
|
| 98 |
+
# Optionally display the code
|
| 99 |
+
# st.write("Generated Python Code:")
|
| 100 |
+
# st.code(code, language='python')
|
| 101 |
+
|
| 102 |
+
# Execute the code safely
|
| 103 |
+
try:
|
| 104 |
+
# Prepare the local namespace
|
| 105 |
+
local_vars = {
|
| 106 |
+
'pd': pd,
|
| 107 |
+
'np': np,
|
| 108 |
+
'data': data.copy(),
|
| 109 |
+
'result': None,
|
| 110 |
+
'LinearRegression': LinearRegression,
|
| 111 |
+
'train_test_split': train_test_split,
|
| 112 |
+
'mean_squared_error': mean_squared_error,
|
| 113 |
+
'r2_score': r2_score,
|
| 114 |
+
'sm': sm
|
| 115 |
+
}
|
| 116 |
+
exec(code, {}, local_vars)
|
| 117 |
+
result = local_vars.get('result')
|
| 118 |
+
|
| 119 |
+
if result is not None:
|
| 120 |
+
st.write("Result:")
|
| 121 |
+
if isinstance(result, pd.DataFrame):
|
| 122 |
+
st.dataframe(result)
|
| 123 |
+
else:
|
| 124 |
+
st.write(result)
|
| 125 |
+
else:
|
| 126 |
+
st.write("Code executed successfully.")
|
| 127 |
+
except Exception as e:
|
| 128 |
+
logging.error(f"An error occurred during code execution: {e}")
|
| 129 |
+
st.write(f"Error executing code: {e}")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 130 |
else:
|
| 131 |
+
# Assume it's a SQL query
|
| 132 |
+
generated_sql = response.strip()
|
| 133 |
+
# Optionally display the generated SQL query
|
| 134 |
+
# st.write(f"Generated SQL Query:\n{generated_sql}")
|
| 135 |
+
|
| 136 |
+
# Step 7: Validate SQL query
|
| 137 |
+
if not validate_sql_with_sqlparse(generated_sql):
|
| 138 |
+
st.write("Generated SQL is not valid.")
|
| 139 |
+
elif not validate_sql(generated_sql, valid_columns):
|
| 140 |
+
st.write("Generated SQL references invalid columns.")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 141 |
else:
|
| 142 |
+
# Step 8: Execute SQL query
|
| 143 |
+
result = pd.read_sql_query(generated_sql, conn)
|
| 144 |
+
st.write("Query Results:")
|
| 145 |
+
st.dataframe(result)
|
| 146 |
+
|
| 147 |
+
except Exception as e:
|
| 148 |
+
logging.error(f"An error occurred: {e}")
|
| 149 |
+
st.write(f"Error: {e}")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|