github-actions[bot]
sync: automatic content update from github
0555d94
import streamlit as st
import openai
import os
import pandas as pd
import json
import time
import snowflake.connector
from cryptography.hazmat.primitives import serialization
import re
import datetime
import logging
import altair as alt
# Suppress ScriptRunContext warnings when necessary.
logging.getLogger("streamlit.runtime.scriptrunner_utils").setLevel(logging.ERROR)
# ─────────────────────────────────────────────────────────────
# 1. Page Configuration
# ─────────────────────────────────────────────────────────────
st.set_page_config(page_title="Raptive Data Wizard", layout="wide")
st.title("Raptive Data Wizard")
# ─────────────────────────────────────────────────────────────
# 2. Main Tabs: "Ask a Question", "Instructions", "Additional Info"
# ─────────────────────────────────────────────────────────────
main_tabs = st.tabs(["Ask a Question", "Instructions", "Additional Info"])
# ─────────────────────────────────────────────────────────────
# 3. "Ask a Question" Tab (Main Logic)
# ─────────────────────────────────────────────────────────────
with main_tabs[0]:
st.header("Ask a Question")
# Sidebar for Data Source Selection
st.sidebar.markdown(
"""
<div style="background-color: #fff3cd; padding: 10px; border-radius: 5px; border-left: 5px solid #ffc107;">
<h3 style="color: #856404; margin-bottom: 0;">Data Source</h3>
</div>
""",
unsafe_allow_html=True,
)
data_config = {
"Ask a Question": {
"files": [
"GAMLOG.json",
"Earnings_and_Analytics_by_Day.json",
"Earnings_and_Analytics_by_Device.json",
"Earnings_by_Ad_Network.json",
"GAM_earnings.json",
"GAM_Hourly.json",
"RPM_by_URL.json",
"AVI_SITE_DATASET_snowflake.json",
"Annotation.json",
"Annotation_incremental.json",
"SITE_RPM_GUARANTEE_ENTRY.json",
"SITE_RPM_INPUT_BEFORE.json",
"SITE_RPM_INPUT_AFTER.json",
"SITE_AD_DENSITY.json",
],
"table_references": (
"The user selected 'Ask a Question'. Review all JSON files provided and identify the correct data source based on the user's question. "
"Clearly instruct the user which data source toggle to select."
),
},
"AB Group Experiments": {
"files": ["GAMLOG_ABGROUP.json", "GAMLOG_ABGROUP_Instructions_file.txt"],
"table_references": (
"For GAMLOG_ABGROUP data, help the user write an SQL query and always reference the AB_GRANULAR table along with GAMLOG_ABGROUP_Instructions_file.txt to help normalize the data."
),
},
"GAMLOG": {
"files": ["GAMLOG.json", "gamlog_Instructions_file.txt"],
"table_references": (
"For GAMLOG data, help the user write an SQL query and always reference the GAMLOG table."
),
},
"Google Analytics Pageviews": {
"files": [
"Earnings_and_Analytics_by_Day.json",
"Earnings_and_Analytics_by_Device.json",
"Earnings_by_Ad_Network.json",
"Google_Analytics_Instructions_file.txt",
],
"table_references": "Generate a SQL query using the metadata if needed.",
},
"GAM Hourly": {
"files": ["GAM_Hourly.json"],
"table_references": (
"For GAM Hourly, provide guidance on analyzing order-level earnings by hour. "
"Generate a SQL query referencing the appropriate table."
),
},
"GAM Earnings (Order-level Data)": {
"files": ["GAM_earnings.json"],
"table_references": (
"For GAM Earnings (Order-level Data), provide guidance on analyzing order-level earnings. "
"Generate a SQL query referencing the appropriate table."
),
},
"RPM by URL": {
"files": ["RPM_by_URL.json"],
"table_references": (
"For RPM by URL, reference the RPM_by_URL table. Generate a SQL query using the provided metadata."
" If the user asks for URL data they are most likely asking for Page Path data, so please use accordingly"
),
},
"Site Settings": {
"files": [
"AVI_SITE_DATASET_snowflake.json",
"AVI_SITE_DATASET_snowflake_settings.txt",
],
"table_references": (
"For Site Settings, reference the listed JSON files. Generate a SQL query using the metadata if needed."
),
},
"Site Annotations": {
"files": ["Annotation.json", "Annotation_incremental.json"],
"table_references": (
"For Site Annotations, reference the JSON files. Generate a SQL query using the metadata if needed."
),
},
"Site RPM Settings": {
"files": [
"SITE_RPM_GUARANTEE_ENTRY.json",
"SITE_RPM_INPUT_BEFORE.json",
"SITE_RPM_INPUT_AFTER.json",
],
"table_references": (
"For Site RPM Settings, reference the JSON files. Generate a SQL query using the provided metadata."
),
},
"Site Ad Density": {
"files": ["SITE_AD_DENSITY.json"],
"table_references": (
"For Site Ad Density, reference the SITE_AD_DENSITY JSON file. Generate a SQL query using the provided metadata."
),
},
"Performance Troubleshooting": {
"files": [
"Earnings_and_Analytics_by_Day.json",
"Performance_Troubleshooting_Instructions.txt",
],
"table_references": (
"For Performance Troubleshooting, reference the Earnings_and_Analytics_by_Day data and the Performance Troubleshooting instructions. "
"Generate a line-chart for each metric over the last 420 days for a selected site, per the instructions."
),
},
}
data_choice = st.sidebar.radio(
"Which data source do you want to use?", list(data_config.keys())
)
st.sidebar.markdown(
"""
<div style="background-color: #fff3cd; padding: 10px; border-radius: 5px; margin-top: 20px;">
<p style="color: #000;">If anyone has any questions, feedback, or requests, please contact <strong>Avi Sutton</strong>.</p>
</div>
""",
unsafe_allow_html=True,
)
# ─────────────────────────────────────────────────────────────
# Loader: returns merged reference info and dynamic schema (if available)
@st.cache_data
def load_reference_files(files):
chunk_texts = []
dynamic_schema = None # Try to extract schema from JSON metadata if available.
for fp in files:
if fp.lower().endswith(".json"):
try:
with open(fp, "r", encoding="utf-8") as f:
data = json.load(f)
# If a schema is defined in this JSON file, capture it.
if (
dynamic_schema is None
and isinstance(data, dict)
and "schema" in data
):
dynamic_schema = data["schema"]
chunk_texts.append(json.dumps(data, separators=(",", ":")))
except Exception as e:
chunk_texts.append(f"[ERROR loading JSON: {fp}] {e}")
elif fp.lower().endswith(".txt"):
try:
with open(fp, "r", encoding="utf-8") as f:
chunk_texts.append(f.read())
except Exception as e:
chunk_texts.append(f"[ERROR loading TXT: {fp}] {e}")
else:
chunk_texts.append(f"[Unknown file type: {fp}]")
return "\n".join(chunk_texts), dynamic_schema
@st.cache_data
def load_performance_query():
with open(
"Performance_Troubleshooting_Instructions.txt", "r", encoding="utf-8"
) as f:
text = f.read()
match = re.search(r"```sql\s*(.*?)\s*```", text, re.DOTALL)
return match.group(1).strip() if match else ""
@st.cache_data
def get_site_names():
private_key = serialization.load_pem_private_key(
os.getenv("snowflake_private_key").encode(), password=None
)
conn = snowflake.connector.connect(
user=os.getenv("snowflake_user"),
account=os.getenv("snowflake_account_identifier"),
private_key=private_key,
role=os.getenv("snowflake_role"),
warehouse=os.getenv("snowflake_warehouse"),
database=os.getenv("snowflake_database"),
schema="SIGMA_SCRATCH",
)
cs = conn.cursor()
cs.execute(
'SELECT DISTINCT "Site Name (SITE_EXTENDED)" FROM "ANALYTICS"."SIGMA_SCRATCH"."VIEW_EARNINGS_AND_ANALYTICS_BY_DAY_D19312C8622E487D891B72420987C00B_MAT" ORDER BY 1'
)
sites = [row[0] for row in cs.fetchall()]
cs.close()
conn.close()
return sites
selected_config = data_config.get(data_choice, {})
chunk_file_paths = selected_config.get("files", [])
table_references = selected_config.get("table_references", "")
reference_info, dynamic_schema = load_reference_files(chunk_file_paths)
if dynamic_schema is None:
dynamic_schema = "GAM360"
site_selection = None
if data_choice == "Performance Troubleshooting":
sites = get_site_names()
site_selection = st.selectbox("Select Site", options=sites)
# Extract available dimensions and metrics from JSON files (if available)
available_dimensions = []
available_metrics = []
for fp in chunk_file_paths:
if fp.lower().endswith(".json"):
try:
with open(fp, "r", encoding="utf-8") as f:
data = json.load(f)
if isinstance(data, dict) and "metadata" in data:
for col in data["metadata"]:
col_name = col.get("column_name")
data_type = col.get("data_type", "").upper()
if col_name:
# Use numeric data types as metrics, others as dimensions.
if data_type in [
"NUMBER",
"FLOAT",
"INTEGER",
"DOUBLE",
"DECIMAL",
]:
if col_name not in available_metrics:
available_metrics.append(col_name)
else:
if col_name not in available_dimensions:
available_dimensions.append(col_name)
except Exception as e:
st.write(f"Error reading {fp}: {e}")
if data_choice != "Performance Troubleshooting":
# Optional expander for user to select dimensions, metrics, and a date range.
with st.expander(
"Optional: Specify Dimensions, Metrics, and Date Filter", expanded=False
):
selected_dimensions = st.multiselect(
"Select Dimensions", options=available_dimensions
)
selected_metrics = st.multiselect(
"Select Metrics", options=available_metrics
)
today = datetime.date.today()
default_start = today - datetime.timedelta(days=7)
default_end = today - datetime.timedelta(days=1)
apply_date_filter = st.checkbox("Apply Date Filter?")
if apply_date_filter:
date_range = st.date_input(
"Select Date Range", value=(default_start, default_end)
)
else:
date_range = None
else:
selected_dimensions = []
selected_metrics = []
date_range = None
api_key = os.getenv("OPENAI_API_KEY")
if not api_key:
st.sidebar.error("No API key found in environment.")
st.stop()
openai.api_key = api_key
# Build additional context from optional inputs.
additional_context = ""
if selected_dimensions:
additional_context += f"\nDimensions provided: {', '.join(selected_dimensions)}"
if selected_metrics:
additional_context += f"\nMetrics provided: {', '.join(selected_metrics)}"
if date_range and isinstance(date_range, tuple) and len(date_range) == 2:
additional_context += f"\nDate Range: {date_range[0]} to {date_range[1]}"
if data_choice == "Ask a Question":
# Define the list of valid data config keys that can be recommended.
valid_data_configs = [
"Ask a Question",
"AB Group Experiments",
"GAMLOG",
"Google Analytics Pageviews",
"GAM Earnings (Order-level Data)",
"GAM Hourly",
"RPM by URL",
"Site Settings",
"Site Annotations",
"Site RPM Settings",
"Site Ad Density",
"Performance Troubleshooting",
]
valid_data_configs_str = ", ".join(
[f'"{config}"' for config in valid_data_configs]
)
system_message_content = f"""
You are a helpful data assistant. Your job is to determine which one of the following data config keys is most relevant to the user's question:
{valid_data_configs_str}
After thoroughly analyzing the provided metadata:
1. **Recommendation:** Provide a concise recommendation stating exactly which data config toggle the user should select (for example, "GAMLOG" or "Site RPM Settings"). You must choose from the list above and **do not** reference any JSON filename or file-level detail.
2. **Justification:** Briefly justify your choice based on matching the user's request with the available dimensions, metrics, or other metadata details.
3. **Response Formatting:** Use the exact format below and do not generate an SQL query for this option.
The required response format is:
**Recommended Data Source: <Data Config Name>**
**Reason:**
- Brief justification based on the available metadata.
Reference Data:
{reference_info}
{additional_context}
"""
else:
# existing system message for other data sources
system_message_content = (
"You are a helpful data assistant. Your job is to answer user questions about data, including guidance on "
"where the data can be found and which datasets should be used. Using the Data Source, JSON files, and the "
"instruction file as helpful context, create a **concise summary** that details what data source is being "
"used, what table is being used (including its database schema as found in the JSON metadata). "
"Your summary should be in Markdown format with each bullet point on a new line, and indent sub-bullets properly. "
"The format must **only** include:\n"
"1. A short introductory sentence.\n"
"2. A section called **Key Points** (in bullet points). Do not include metadata information in this response.\n"
"For every request, generate a Snowflake SQL query using the metadata found in the JSON files. "
"Follow these steps when generating a SQL query:\n"
"1. Verify that the requested data exists within the provided reference info.\n"
"2. Generate a SQL query using the dimensions and metrics specified by the user.\n"
"3. The SQL code block must contain **only** valid SQL statements and nothing else.\n\n"
"When generating SQL code blocks, follow these rules:\n"
"- Place only valid SQL statements inside ```sql ... ```.\n"
"- Do not include disclaimers, bullet points, or additional commentary inside the code block.\n"
"- All explanatory text must be outside the triple backticks.\n\n"
"**Additional Rule:** For any column representing datesβ€”especially \"Install Date (SITE_EXTENDED)\"β€”ensure that all date literals are provided as properly formatted strings (e.g., 'YYYY-MM-DD'). If a numeric date literal (e.g., 20250401) is detected, convert it to the correct format (e.g., '2025-04-01'). Under no circumstances should a date filter be written using a numeric literal.\n\n"
"**Crucial Formatting Rules:**\n"
"- **Dimensions and Metrics:** Dimensions are descriptive attributes (e.g., 'Site ID', 'Page Path'). Metrics are quantitative measurements (e.g., 'Pageviews', 'Impressions').\n"
"- **Dimensions and Metrics:** All dimension and metric names must be in UPPERCASE.\n"
'- **Quoting:** Always wrap dimension and metric names in double quotes. For example, use `"Dimension Name"` and `"Metric Name"` instead of `Dimension Name` and `Metric Name`.\n'
"- If a dimension or metric name does not contain spaces (e.g., 'status'), you still must wrap it in double quotes (e.g., `\"status\"`).\n"
"- String values in `WHERE` clauses **must** be enclosed in single quotes (`'...'`). Numeric values should not be enclosed in quotes.\n"
"- **Case Sensitivity:** Pay very close attention to case sensitivity. Ensure that all column names and string values in `WHERE` clauses match the exact case found in the appropriate JSON sample data.\n"
"- **Data Types:** Ensure that the data types of values used in `WHERE` clauses match the data types of the corresponding columns.\n"
"- **Verify all syntax against the appropriate JSON sample data to ensure accuracy.**\n"
"- **Null Values and Percentages:** If the user's question involves calculating percentages or SOV (Share of Voice) pecentages, exclude null values from the query results.\n"
"- **Division by Zero:** When performing calculations involving division, ensure the denominator is not zero. Use SQL functions such as `NULLIF(denom, 0)` or CASE statements to safely handle division by zero scenarios.\n"
'- **Default Date Filter:** Unless explicitly specified by the user, apply a filter to the SQL query using yesterday\'s date aside for when the data source is "Site Ad Density", "Site Settings", "Site RPM Settings", or "Site Annotations".\n'
f"Use the following as a guide for additional information regarding the Data Source: {table_references}\n\n"
"Reference Data:\n"
f"Please use the following as a guide to understand the Data Source selected by the user and which JSON files "
f"to use so that you provide a well-informed answer: {reference_info}"
f"{additional_context}"
)
system_message = {"role": "system", "content": system_message_content}
if "conversation_history" not in st.session_state:
st.session_state["conversation_history"] = []
user_input = st.text_input(
"Ask a question: Be as specific as possible so I can give you the most accurate answer"
)
modified_question = user_input
if selected_dimensions or selected_metrics:
modified_question += (
f"\nDimensions: {', '.join(selected_dimensions)}"
if selected_dimensions
else ""
)
modified_question += (
f"\nMetrics: {', '.join(selected_metrics)}" if selected_metrics else ""
)
if date_range and isinstance(date_range, tuple) and len(date_range) == 2:
modified_question += f"\nDate Range: {date_range[0]} to {date_range[1]}"
submit_button = st.button("Submit")
if submit_button:
if data_choice == "Performance Troubleshooting" and site_selection:
try:
sql_query = load_performance_query().replace("{{site}}", site_selection)
with st.expander("View SQL Query (Advanced Users)"):
st.code(sql_query, language="sql")
private_key = serialization.load_pem_private_key(
os.getenv("snowflake_private_key").encode(), password=None
)
conn = snowflake.connector.connect(
user=os.getenv("snowflake_user"),
account=os.getenv("snowflake_account_identifier"),
private_key=private_key,
role=os.getenv("snowflake_role"),
warehouse=os.getenv("snowflake_warehouse"),
database=os.getenv("snowflake_database"),
schema="SIGMA_SCRATCH",
)
cs = conn.cursor()
cs.execute("ALTER SESSION SET STATEMENT_TIMEOUT_IN_SECONDS = 1800")
st.write("Executing SQL query...")
start_time_query = time.time()
with st.spinner("Executing SQL query..."):
cs.execute(sql_query)
query_result = cs.fetchall()
elapsed_time_query = time.time() - start_time_query
minutes = int(elapsed_time_query // 60)
seconds = elapsed_time_query % 60
st.write(
f"SQL query executed in {minutes} minutes and {seconds:.2f} seconds."
)
columns = [col[0] for col in cs.description] if cs.description else []
cs.close()
conn.close()
if columns:
result_df = pd.DataFrame(query_result, columns=columns)
result_df["DATE"] = pd.to_datetime(result_df["DATE"]).dt.date
metrics = [c for c in columns if c != "DATE"]
result_df[metrics] = (
result_df[metrics]
.apply(pd.to_numeric, errors="coerce")
.round(2)
)
plot_df = result_df.copy()
result_df["DATE"] = result_df["DATE"].astype(str)
for metric in metrics:
chart_df = plot_df[["DATE", metric]].dropna()
brush = alt.selection_interval(encodings=["x"])
base = (
alt.Chart(chart_df)
.mark_line()
.encode(
x="DATE:T",
y=alt.Y(
f"{metric}:Q",
title=metric,
axis=alt.Axis(format=".2f"),
),
tooltip=[
alt.Tooltip("DATE:T", title="Date"),
alt.Tooltip(f"{metric}:Q", format=".2f"),
],
)
)
upper = base.properties(height=200).add_selection(brush)
lower = base.transform_filter(brush).properties(height=200)
st.altair_chart(
alt.vconcat(upper, lower).resolve_scale(y="independent"),
use_container_width=True,
)
recent = plot_df.sort_values("DATE").tail(14)
flagged = {}
if len(recent) == 14:
for metric in metrics:
first_avg = recent.iloc[:7][metric].mean()
last_avg = recent.iloc[7:][metric].mean()
if pd.notna(first_avg) and first_avg != 0:
pct_change = (last_avg - first_avg) / first_avg * 100
if pct_change < -5:
flagged[metric] = pct_change
if flagged:
st.subheader("Metrics Showing Recent Decline")
for metric, pct in flagged.items():
st.warning(
f"{metric}: {pct:.2f}% decline compared with the prior week"
)
column_config = {
metric: st.column_config.NumberColumn(format="%.2f")
for metric in metrics
}
st.data_editor(
result_df,
use_container_width=True,
hide_index=True,
column_config=column_config,
)
result_df["DATE"] = pd.to_datetime(result_df["DATE"])
metrics = [c for c in columns if c != "DATE"]
for metric in metrics:
st.line_chart(result_df, x="DATE", y=metric)
st.data_editor(result_df, use_container_width=True, hide_index=True)
else:
st.write(query_result)
except Exception as e:
st.error(f"Error executing Performance Troubleshooting query: {e}")
elif modified_question:
try:
messages = (
[system_message]
+ st.session_state["conversation_history"]
+ [{"role": "user", "content": modified_question}]
)
with st.spinner("Waiting for model response..."):
start_time = time.time()
response = openai.chat.completions.create(
model="o4-mini",
messages=messages,
reasoning_effort="high",
)
end_time = time.time()
elapsed_time = end_time - start_time
minutes = int(elapsed_time // 60)
seconds = elapsed_time % 60
answer = response.choices[0].message.content.strip()
st.session_state["conversation_history"].append(
{"role": "user", "content": modified_question}
)
st.session_state["conversation_history"].append(
{"role": "assistant", "content": answer}
)
clean_answer = re.sub(
r"```sql\s+(.*?)\s+```", "", answer, flags=re.DOTALL
).strip()
st.write(f"**Assistant:** {clean_answer}")
# st.write(f"**Assistant:** {answer}")
st.write(
f"Response received in {minutes} minutes and {seconds:.2f} seconds."
)
# st.write("### Extracting SQL Query from assistant response...")
# Reminder: The SQL query must use string-formatted date literals.
sql_match = re.search(r"```sql\s+(.*?)\s+```", answer, re.DOTALL)
if sql_match:
sql_query = sql_match.group(1).strip()
# st.write("SQL query extracted from markdown code block successfully.")
else:
sql_query = answer.strip()
# st.write("No markdown code block found; using entire answer as SQL query.")
# Only execute SQL query if the data source is not 'Ask a Question'
if data_choice != "Ask a Question" and "select" in sql_query.lower():
with st.expander("View SQL Query (Advanced Users)"):
st.code(sql_query, language="sql")
try:
private_key = serialization.load_pem_private_key(
os.getenv("snowflake_private_key").encode(), password=None
)
conn = snowflake.connector.connect(
user=os.getenv("snowflake_user"),
account=os.getenv("snowflake_account_identifier"),
private_key=private_key,
role=os.getenv("snowflake_role"),
warehouse=os.getenv("snowflake_warehouse"),
database=os.getenv("snowflake_database"),
schema=dynamic_schema,
)
cs = conn.cursor()
cs.execute(
"ALTER SESSION SET STATEMENT_TIMEOUT_IN_SECONDS = 1800"
)
st.write("Executing SQL query...")
start_time_query = time.time()
with st.spinner("Executing SQL query..."):
cs.execute(sql_query)
query_result = cs.fetchall()
elapsed_time_query = time.time() - start_time_query
minutes = int(elapsed_time // 60)
seconds = elapsed_time % 60
st.write(
f"SQL query executed in {minutes} minutes and {seconds:.2f} seconds."
)
columns = (
[col[0] for col in cs.description] if cs.description else []
)
cs.close()
conn.close()
st.write("### Query Result")
if columns:
result_df = pd.DataFrame(query_result, columns=columns)
# Reset the index AND drop it from the DataFrame itself
result_df = result_df.reset_index(drop=True)
st.data_editor(
result_df, use_container_width=True, hide_index=True
)
st.write(
f"Total Rows: {len(result_df):,}"
) # Add total row count.
csv_data = result_df.to_csv(index=False).encode("utf-8")
st.download_button(
label="Download CSV",
data=csv_data,
file_name="query_results.csv",
mime="text/csv",
)
# New Step: Analyze the entire data using OpenAI's model
if not result_df.empty:
full_data_csv = result_df.to_csv(index=False)
analysis_prompt = (
"You are a helpful data assistant. Below is a CSV dataset. "
"Your summary should be in Markdown format with each bullet point on a new line and proper indentation for sub-bullets. "
"Please provide a dynamic, structured analysis that includes:\n\n"
"1. Format all large numbers with comma separators and round percentages to two decimals.\n"
"2. A concise overview summarizing the key dimensions, metrics, filters, and date range (1–2 sentences).\n"
"3. Notable trends or patterns with mathematical reasoning and comparisons (1–3 bullet points). For example, calculate percentage changes or slopes where applicable.\n"
"4. Identification of any outliers or anomalies (1–2 bullet points).\n"
"5. A brief conclusion or recommendations for further analysis (1–2 sentences).\n\n"
"Keep your response under 150 words and avoid unnecessary repetition. Here is the dataset:\n\n"
f"{full_data_csv}"
)
with st.spinner("Analyzing data with OpenAI model..."):
analysis_response = openai.chat.completions.create(
model="o3-mini", # Replace with your desired model
messages=[
{"role": "user", "content": analysis_prompt}
],
reasoning_effort="high",
)
analysis_text = analysis_response.choices[
0
].message.content.strip()
with st.expander("View Data Analysis & Insights"):
st.write(analysis_text)
else:
st.write(query_result)
except Exception as e:
st.error(f"Error executing SQL query: {e}")
else:
st.write("No valid SQL query detected. Returning response as text.")
except Exception as e:
st.error(f"Error processing the request: {e}")
if st.button("Clear History"):
st.session_state["conversation_history"] = []
st.success("History cleared!")
# Add the "View History" button
if st.button("View History"):
st.subheader("Conversation History")
if st.session_state["conversation_history"]:
for message in st.session_state["conversation_history"]:
role = message["role"].capitalize()
content = message["content"]
st.markdown(f"**{role}:** {content}")
else:
st.info("No conversation history available.")
# ─────────────────────────────────────────────────────────────
# 4. "Instructions" Tab (Updated)
# ─────────────────────────────────────────────────────────────
with main_tabs[1]:
st.header("πŸ“˜ Instructions")
st.markdown(
"""
### 🧠 Raptive Data Wizard – User Instructions
#### Overview
Raptive Data Wizard is an interactive tool that helps you quickly explore your data sources using JSON metadata. The wizard guides you through selecting the correct data source, formulating your question, and generating SQL queries to retrieve your desired data.
<div style="background-color: #fff3cd; padding: 10px; border-left: 6px solid #ffc107; border-radius: 5px;">
<strong>⚠️ Reminder:</strong> Limit your date range to a maximum of 30 days to ensure fast query performance.
</div>
#### πŸ“Š Supported Data Sources & Tables
Select one of the following data sources from the sidebar. Each option is linked to specific tables or metadata files:
| **Data Source** | **Description & Example Uses** |
|-------------------------------------|---------------------------------|
| **Ask a Question** | Automatically helps determine the appropriate data source based on your question for you to select |
| **AB Group Experiments** | Incrementality - Impressions & Revenue lift data by multiple dimensions |
| **GAMLOG** | Pageviews (PVK), Impressions, Key-Values, Dimensions by Browser, Device, **HOUR** etc. |
| **Google Analytics Pageviews** | Pageviews and sessions by day, device, or ad network |
| **GAM Earnings (Order-level)** | Revenue and impressions by specific GAM orders. Many dimensions to select. |
| **GAM Hourly** | Hourly Revenue and impressions by specific GAM orders. Many dimensions to select. |
| **RPM by URL** | RPM performance metrics by Traffic Source, Device, Country, URLs |
| **Site Settings** | Settings including Ads.txt, Ad Options, Video Ad Options, Ad Preferences, Sensitive Categories, ad configurations |
| **Site Annotations** | Manual annotations or incremental notes for sites |
| **Site RPM Settings** | RPM guarantees and inputs before/after adjustments |
| **Site Ad Density** | Analysis of ads per page and Site Ad Density thresholds |
---
#### πŸ“ How to Ask Questions
- **Step 1:** Navigate to the **"Ask a Question"** tab and type your natural language query.
- **Step 2:** Optionally, expand the **"Optional: Specify Dimensions, Metrics, and Date Filter"** section to refine your query by selecting specific dimensions, metrics, and a date range.
- **Step 3:** Based on your input and the available JSON metadata, the system will either recommend the best data source or generate a SQL query to fetch your data.
- **Step 4:** If a valid SQL query is generated (and your selected data source supports it), the query is executed on your Snowflake database and the results are displayed interactively.
---
#### πŸ’‘ Example Queries:
- **Ask a Question:**
*"Where can I find data by traffic source?"*
- **AB Group Experiment:**
*"Provide me with 'cnftp' for the last 7 days in aggregate?"*
- **GAMLOG:**
*"Show pageviews and impressions by browser, hour, and device type for the last 7 days."*
- **Google Analytics Pageviews:**
*" Can you provide me with page view data by device for the last 7 days please include primary vertical, tier, site status, and site name. Filter to tier 1 sites"*
- **GAM Earnings:**
*"Retrieve earnings by order and ad unit over the past 14 days."*
- **GAM Hourly:**
*"Retrieve Hourly revenue by order and ad unit over the past 14 days."*
- **RPM by URL:**
*"List URLs with the highest RPM over the last week."*
- **Site Settings:**
*"Identify sites with autoplay enabled or video sticky behavior."*
- **Site Ad Density:**
*"Find sites where Site Ad Density equals 30%."*
---
#### βš™οΈ Advanced Features & Customization
- **Optional Filters:** Use the filter options to fine-tune your query with specific dimensions, metrics, and date ranges.
- **SQL Query Extraction:** The wizard extracts the SQL query from the assistant’s response. If a query is generated, it is automatically executed (except when using the "Ask a Question" data source).
- **Conversation History:** Your interactions are saved in a conversation history, which you can view or clear using the provided buttons.
- **Exporting Results:** Query results are shown in an interactive table. Use the **Download CSV** button to export the data for further analysis.
---
#### Troubleshooting & Best Practices
- **Performance:** Keeping your date range within a 30-day limit ensures quicker query execution.
- **Data Source Selection:** If you are unsure which data source to use, start with the "Ask a Question" option to get a recommendation based on available metadata.
Use these guidelines to navigate the Raptive Data Wizard efficiently and extract the precise data you need.
""",
unsafe_allow_html=True,
)
# ─────────────────────────────────────────────────────────────
# 5. "Additional Info" Tab (Placeholder)
# ─────────────────────────────────────────────────────────────
with main_tabs[2]:
st.header("Additional Info")
st.markdown("Additional information and features will be added here in the future.")