File size: 23,143 Bytes
edbfd50
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
310797f
 
 
edbfd50
 
310797f
 
 
 
 
 
 
edbfd50
 
 
 
 
 
 
 
 
 
 
 
310797f
edbfd50
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
310797f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
edbfd50
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
310797f
edbfd50
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
310797f
 
 
 
edbfd50
 
310797f
 
 
edbfd50
310797f
 
edbfd50
310797f
 
 
 
 
 
 
 
 
edbfd50
310797f
 
 
 
 
 
 
 
 
edbfd50
 
 
310797f
edbfd50
310797f
edbfd50
 
310797f
edbfd50
310797f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
edbfd50
310797f
 
 
edbfd50
310797f
 
 
 
 
 
edbfd50
 
 
 
 
310797f
edbfd50
 
310797f
 
 
 
 
 
 
 
 
 
 
edbfd50
 
310797f
 
 
 
 
 
 
 
 
 
 
 
 
 
edbfd50
 
310797f
 
 
 
 
 
 
 
 
 
 
edbfd50
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
# -*- coding: utf-8 -*-
"""data_agent_demo.ipynb
Automatically generated by Colab.
Original file is located at
    https://colab.research.google.com/drive/1DBkfSNSZIyONNTEgSILfCoOyAGrx13DY
# Introduction
NexDatawork is an AI data agent for data engineering and analytics without writing code.
## Prerequisites
- langchain
- langgraph
- sqlalchemy
- pandas
- gradio
 Before starting your work install all the required tools:
"""

# Commented out IPython magic to ensure Python compatibility.
# Clean out any mixed installs first
# %pip uninstall -y langchain langchain-core langchain-community langchain-openai langchain-anthropic langchain-google-vertexai langchain-experimental langgraph langchain-scrapegraph

# Install a consistent, modern set
# %pip install -U \
#  "langchain==0.3.*" \
#  "langchain-core==0.3.*" \
#  "langchain-community==0.3.*" \
#  "langgraph>=0.2,<0.3" \
#  "langchain-openai>=0.2.0" \
#  "langchain-anthropic>=0.2.0" \
#  "langchain-google-vertexai>=2.0.0" \
#  "sqlalchemy>=2.0" \
#  "pandas>=2.0" \
#  "gradio>=4.0" \
#  "langchain-experimental"\
#  "langchain-scrapegraph"

import sys, importlib.util, importlib.metadata as md

def v(p):
    try:
        return md.version(p)
    except md.PackageNotFoundError:
        return "not installed"

print("Kernel Python:", sys.executable)
print("langchain:", v("langchain"))
print("langchain-core:", v("langchain-core"))
print("langchain-community:", v("langchain-community"))
print("langgraph:", v("langgraph"))
print("langchain-openai:", v("langchain-openai"))
print("langchain-anthropic:", v("langchain-anthropic"))
print("langchain-google-vertexai:", v("langchain-google-vertexai"))
print("langchain-experimental:", v("langchain-experimental"))
print("langchain-scrapegraph:", v("langchain-scrapegraph"))

print("langgraph importable?", importlib.util.find_spec("langgraph") is not None)

import os
import io
import contextlib
import pandas as pd
import gradio as gr
from IPython.display import Markdown, HTML, display

from sqlalchemy import (
    Engine, create_engine, MetaData, Table, Column,
    String, Integer, Float, insert, inspect, text
)

# LangChain 0.3.x import paths
from langchain_openai import AzureChatOpenAI
from langchain_core.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from langchain.agents import initialize_agent
from langchain.agents.agent_types import AgentType
from langchain.tools import tool
from langchain_scrapegraph.tools import SmartScraperTool
from langchain.memory import ConversationTokenBufferMemory
from langchain_experimental.agents.agent_toolkits import create_pandas_dataframe_agent
from langchain_community.agent_toolkits import SQLDatabaseToolkit
from langchain_community.utilities import SQLDatabase
from langchain_core.messages import HumanMessage

# LangGraph
from langgraph.prebuilt import create_react_agent



print("βœ… Imports OK")

"""To access AzureOpenAI models you'll need to create an Azure account, create a deployment of an Azure OpenAI model, get the name and endpoint for your deployment, get an Azure OpenAI API key and install the langchain-openai integration package.
To access SmartScraperTool you will need a ScrapeGraphAI (SGAI) account and get an API key to launch the agent.
Replace the placeholders with the actual values.
"""

os.environ["AZURE_OPENAI_ENDPOINT"] = "INSERT THE AZURE OPENAI ENDPOINT"
os.environ["AZURE_OPENAI_API_KEY"] = "INSERT YOUR AZURE OPENAI API KEY"
os.environ["SGAI_API_KEY"] = "INSERT YOUR SGAI API KEY"

"""To set up the Azure OpenAI model choose the name for ```AZURE_DEPLOYMENT_NAME``` and insert ```AZURE_API_VERSION``` (the latest supported version can be found here: https://learn.microsoft.com/en-us/azure/ai-services/openai/reference)."""

# Load your Azure environment variables
AZURE_OPENAI_ENDPOINT = os.getenv("AZURE_OPENAI_ENDPOINT")
AZURE_DEPLOYMENT_NAME = "gpt-4.1"  # πŸ‘ˆ Change if needed
AZURE_API_VERSION = "2025-01-01-preview"  # πŸ‘ˆ Use your correct version

# Define Azure LLM with streaming enabled
model = AzureChatOpenAI(
    openai_api_version=AZURE_API_VERSION,
    azure_deployment=AZURE_DEPLOYMENT_NAME,
    azure_endpoint=AZURE_OPENAI_ENDPOINT,
    streaming=True,
    callbacks=[StreamingStdOutCallbackHandler()],
)

"""The following block contains prompts that define the agents behaviour.
```CSV_PROMPT_PREFIX``` is responsible for the data agent logic, i.e. steps that it takes to complete a task. The prefix can be modified to change analytical methodology, add specific data processing steps, implement a certain data validation technique and more.
```CSV_PROMPT_SUFFIX``` defines the structure and the content of the agent's output. Suffix can be modified to change the report structure, add sections, include additional insights and so on.
```system_message``` is for creating SQL queries. It specifies the behaviour of the agent, making it certify its results and restricting it from changing the database.
```SCRAPING_PROMPT_PREFIX``` is responsible for the web scraping agent logic. It specifies how the agent should behave and defines its chain of thought when asked to find data online.
```SCRAPING_PROMPT_SUFFIX``` is responsible for the output of the web scraping agent. It can be changed to set up the format of the output.
"""

# Prompt prefix to set the tone for the agent.
#By specifying the prompt prefix you may make the results of the agent more specific and consistent.
#The following prompt can be substituted with an original one.
CSV_PROMPT_PREFIX = """
Set pandas to show all columns.
Get the column names and infer data types.
Then attempt to answer the question using multiple methods.
Please provide only the Python code required to perform the action, and nothing else.
"""

#Prompt suffix describes the output format.
#Modify this prompt to change the structure of the agent's answer.
#You can also add more sections so that the agent touches more aspects.
#The following prompt can be substituted with a personal one.
CSV_PROMPT_SUFFIX = """
- Try at least 2 different methods of calculation or filtering.
- Reflect: Do they give the same result?
- After performing all necessary actions and analysis with the dataframe, return the answer in clean **Markdown**, include summary table if needed.
- Include **Execution Recommendation** and **Web Insight** in the final Markdown.
- Always conclude the final Markdown with:
### Final Answer
Your conclusion here.
---
### Explanation
Mention specific columns you used.
Please provide only the Python code required to perform the action, and nothing else until the final Markdown output.
"""


#prompt for creating SQL queries
#By secifying the pipeline you can make the agent's results more consistent.
system_message = """
You are an agent designed to interact with a SQL database.
Given an input question, create a syntactically correct {dialect} query to run,
then look at the results of the query and return the answer. Unless the user
specifies a specific number of examples they wish to obtain, always limit your
query to at most {top_k} results.
You can order the results by a relevant column to return the most interesting
examples in the database. Never query for all the columns from a specific table,
only ask for the relevant columns given the question.
You MUST double check your query before executing it. If you get an error while
executing a query, rewrite the query and try again.
DO NOT make any DML statements (INSERT, UPDATE, DELETE, DROP etc.) to the
database.
To start you should ALWAYS look at the tables in the database to see what you
can query. Do NOT skip this step.
Then you should query the schema of the most relevant tables.
""".format(
    dialect="SQLite",
    top_k=5,
)

sql_suffix_prompt = '''
ALWAYS end your answer as follows:
### Final answer
Your query here
--
The answer here
'''

SCRAPING_PROMPT_PREFIX = '''
ROLE: Expert Data Scraper
MISSION: Extract precise online data using systematic keyword analysis
THINKING PROCESS:
1. Keyword Analysis: Identify primary entities (X, Y) and quantifiers (n, m)
2. Query Strategy: Formulate targeted search queries for each entity
3. Data Extraction: Scrape exact quantities specified
4. Validation: Verify results match request parameters
EXAMPLE:
User: "List first 5 startups and 3 investors in AI"
Keywords: ["startups:5", "investors:3", "AI"]
Action: Search "AI startups" β†’ extract 5 instances β†’ Search "AI investors" β†’ extract 3 instances
WORKFLOW:
- Print identified keywords with quantities
- Execute sequential searches per keyword group
- Collect exactly specified instances
- Present structured results
READY FOR QUERY.
'''

SCRAPING_PROMPT_SUFFIX = '''
ROLE: Data Extraction Agent
MISSION: Structure all scraped data as valid pandas DataFrames
OUTPUT REQUIREMENTS:
- Format: pandas DataFrame
- Columns: 1-2 word descriptive names
- Content: Only strings or numerical values (no lists/dicts, no nested structures)
- Validation: Must pass pd.DataFrame access tests
VALIDATION CHECKLIST:
βœ“ Each column contains only strings or numerics
βœ“ No nested structures (lists/dicts) in cells
βœ“ Column names are descriptive and concise
βœ“ DataFrame is accessible via standard indexing
βœ“ All columns MUST BE OF THE SAME LENGTH
EXAMPLE OUTPUT:
```python
pd.DataFrame({
    'Company': ['Startup A', 'Startup B'],
    'Funding': [5000000, 7500000],
    'Industry': 'Artificial Intelligence'
})
'''

"""The following block is responsible for the logic of the agent and the output that it produces.
```ask_agent``` function concatenates the dataframes into one and starts an AI agent for working with the concatenated dataframes. It uses the prompts from the previous blocks for its logic.
"""

# Replace this with your actual LLM setup
# Example:
# from langchain_openai import AzureChatOpenAI
# model = AzureChatOpenAI(...)


# --- Agent Logic ---
def ask_agent(files, question, history):
    try:
        dfs = [pd.read_csv(f.name) for f in files]
        df = pd.concat(dfs, ignore_index=True) #concatenation of all of the files uploaded into one
    except Exception as e:
        return f"❌ Could not read CSVs: {e}", ""

    try:
        agent = create_pandas_dataframe_agent(
        llm=model, #sets the llm as the one specified earlier (Azure LLM)
        df=df, #pandas dataframe or a list of pandas dataframes
        verbose=True, #enables verbose logging for debugging
        agent_type=AgentType.ZERO_SHOT_REACT_DESCRIPTION, #defines a specific type of agent that performs tasks without additional examples
        allow_dangerous_code=True, #allows execution of Python code
        handle_parsing_errors=True,  # πŸ‘ˆ this is the fix
    ) #creates an agent for working with pandas dataframes


        full_prompt = CSV_PROMPT_PREFIX + question + CSV_PROMPT_SUFFIX

        buffer = io.StringIO()
        with contextlib.redirect_stdout(buffer): #the output is redirected to the buffer
            result = agent.invoke(full_prompt)
        trace = buffer.getvalue() #retrieves the text created by the agent
        output = result["output"] #retrieves the final answer


        return history + output, output

    except Exception as e:
        return f"❌ Agent error: {e}", ""

"""The block below deals with creating SQL code.
```create_db``` creates a database where all the uploaded dataframes are stored for the data agent to work with.
```start_llm``` starts a tool for working with SQL databases.
```extract_code``` is used for extracting the SQL query from the agent's output.
```sql_pipeline``` defines the pipeline, starting from creating a database with the uploaded dataframes, starting the agent for working with databases and creating the query according to the user's question.
"""

#function create_db receives a dictionary with table names as a key and tables as values
def create_db(files):
  print("="*10+"\nCREATE_DB\n"+"="*10)
  try:
      print("Attempting to create database...") # Added print statement here
      engine = create_engine("sqlite:///database.db")
      dataframes = dict()
      print("="*10+f"CREATE_DB:\nfiles:{[f.name for f in files]}\n"+"="*10)
      for f in files:
        table_name = os.path.splitext(os.path.basename(f.name))[0]
        dataframes[table_name] = pd.read_csv(f.name)
      with engine.begin() as connection:
        for name,table in zip(dataframes.keys(),dataframes.values()):
          table.to_sql(name,connection,if_exists="replace",index=False) #writes the tables into a database

      db = SQLDatabase.from_uri("sqlite:///database.db")
      print("DATABASE database.db CREATED")
  except Exception as e:
      return f"Database error: {e}"
  return db

#Initialization of a LLM model for SQL queries
def start_llm(database):
  try:
    print("="*10+"\nSTART_LLM\n"+"="*10)
    toolkit = SQLDatabaseToolkit(db=database, llm=model) #creates a tool for working with SQL databases
    tools = toolkit.get_tools()
  except Exception as e:
    return f"Couldn't retrieve SQLDatabaseToolkit: {e}"
  print("\nSQLDatabaseToolkit CREATED\n")
  return model, tools


def extract_code(HumanMessage):
    print("="*10+"\nEXTRACT_CODE\n"+"="*10)
    try:
        FRONT_INDENT = len('\n\n')
        BACK_INDENT = len('\n')
        p1 = HumanMessage.find('### Final answer')
        print(p1,HumanMessage[p1:p1+50])
        p2 = p1+FRONT_INDENT
        return HumanMessage[p1:]
    except Exception as e:
      print(f'Extraction error: {e}')


#Function that receives dataframes, puts them in a database and uses an AI agent to create quieries based on the user's question
def sql_pipeline(tables,question,history):
    print("="*10+"\nSQL_PIPELINE\n"+"="*10)
    db = create_db(tables) #uploads the files added by the user and puts them in a database

    if isinstance(db, str):  # Error message returned
      return f"❌ {db}", history
    
    if not os.path.exists("database.db"):
      print("Database doesn't exist")
      return "❌ Database doesn't exist", history
    
    result = start_llm(db) #returns the agent and the tools for working with the database
    if isinstance(result, str):  # Error message returned
      return f"❌ {result}", history
    
    llm, tools = result
    try:
        agent_executor = create_react_agent(llm, tools, prompt=system_message+sql_suffix_prompt)
        output = ""
        for step in agent_executor.stream(
            {"messages": [{"role": "user", "content": question}]},
            stream_mode="values",
        ):
            output += step["messages"][-1].content
        #query = extract_code(output)
        final_answer = extract_code(output)
        return history + final_answer, final_answer
    except Exception as e:
        return f"❌ SQL agent error: {e}", history

"""THe following block is responsible for creating a smart ETL pipeline"""

@tool
def preview_data(table: str) -> str:
  "Reads and reviews a table"
  df = pd.read_csv(table)
  return df.head()

@tool
def suggest_transformation(column_summary: str) -> str:
  "Suggests transformation based on column summary"
  prompt = f"""
  You are a data engineer assistant. Based on the following column summary, suggest simple, short ETL transformation steps.
  Output format: each suggestion on a new line, without explanations or markdown.
  Example:
  Remove $ from revenue and cast to float
  Column summary:
  {column_summary}
  """
  return model.predict(prompt).strip()

@tool
def generate_python_code(transform_description: str) -> str:
  "Generate pandas code from the transformation description"
  prompt=f"""
  You are a data engineer. Write pandas code to apply the following ETL transformation to a dataframe called 'df'.
  Transformations:
  {transform_description}
  Only return pandas code. No explanation, no markdown.
  """
  return model.predict(prompt).strip()

#llm is the agent that creates the etl pipeline
#dataframe is a string with the name of the dataframe push through the etl process
def etl_pipeline(dataframe,history):
  try:
    tools = [preview_data, suggest_transformation, generate_python_code]

    agent = initialize_agent(tools, model, agent='zero-shot-react-description',verbose=True)

    input_prompt = f"""
    Preview the table {dataframe} and \
        generate Python code to read the table, clean it, and finally write the \
        dataframe into a table called {'Cleaned_'+dataframe}]. \
        Do not stop the Python session
        """

    # Preview + suggest + generate code in a single run
    response = agent.run({
        "input": input_prompt,
        "chat_history": [],
        "handle_parsing_errors": True
    })

    print("Generated Python Code:\n")
    print(response)
    response2 = response.strip('`').replace('python', '')
    return history + response2, response2
  except Exception as e:
    return f"❌ ETL pipeline error: {e}", history

"""The following code is responsible for AI web scraping agent"""

def web_scraping(question,history):
  try:
    tools = [
        SmartScraperTool(),
    ]

    agent = initialize_agent(
        tools=tools,
        llm=model,
        agent=AgentType.STRUCTURED_CHAT_ZERO_SHOT_REACT_DESCRIPTION,
        verbose=True
    )

    buffer = io.StringIO()

    print(SCRAPING_PROMPT_PREFIX + question + SCRAPING_PROMPT_SUFFIX)
    with contextlib.redirect_stdout(buffer): #the output is redirected to the buffer
        response = agent.run(SCRAPING_PROMPT_PREFIX + question + SCRAPING_PROMPT_SUFFIX)
    trace = buffer.getvalue() #the trace of the agent is saved in the trace variable
    return history + response, response
  except Exception as e:
    return f'❌ Web scraping error: {e}', history

"""The next section creates a web interface using Gradio, providing a user-friendly way to analyze data and create SQL queries.
```
with gr.Blocks(
    css='''
      Change the code here to modify the styling of the UI
    '''
    ) as demo:
```
**Display Area**:
- `result_display`: Markdown report output
- `trace_display`: Agent reasoning trace
**Input Section**:
- `file_input`: Multiple CSV upload
- `question_input`: User query box
**Action Buttons**:
- `sql_button`: Generate SQL queries β†’ `sql_pipeline` function
- `ask_button`: Run analysis β†’ `ask_agent` function
**Styling**
- Light theme with rounded corners
- Custom CSS for professional appearance
**Launch**
`demo.launch(share=True,debug=False)` - Public access enabled, debugging disabled
For debugging use `debug=True` in order to see the messages in the console.
"""

# --- Gradio UI ---
with gr.Blocks(
    css="""
    body, .gradio-container {
        background: linear-gradient(135deg, #f8fafc 0%, #e2e8f0 100%) !important;
        color: #1e293b !important;
        font-family: 'Inter', 'SF Pro Display', -apple-system, sans-serif;
        min-height: 100vh;
    }
    #title {
        color: #0f172a !important;
        font-size: 2.25rem;
        font-weight: 700;
        text-align: center;
        padding: 24px 0 8px 0;
        letter-spacing: -0.025em;
    }
    #subtitle {
        text-align: center;
        color: #64748b !important;
        font-size: 1rem;
        margin-bottom: 20px;
    }
    .instructions-box {
        background: linear-gradient(135deg, #dbeafe 0%, #e0e7ff 100%) !important;
        border: 1px solid #93c5fd !important;
        border-radius: 12px !important;
        padding: 16px !important;
        margin-bottom: 16px !important;
    }
    .gr-box, .gr-input, .gr-output, .gr-markdown, .gr-textbox, .gr-file, textarea, input {
        background: #ffffff !important;
        border: 1px solid #e2e8f0 !important;
        border-radius: 10px !important;
        color: #1e293b !important;
        box-shadow: 0 1px 3px rgba(0,0,0,0.05);
    }
    .trace-markdown {
        height: 400px !important;
        overflow-y: auto;
        resize: none;
        background: #ffffff !important;
    }
    textarea::placeholder, input::placeholder {
        color: #94a3b8 !important;
    }
    .primary-btn {
        background: linear-gradient(135deg, #6366f1 0%, #8b5cf6 100%) !important;
        color: #ffffff !important;
        border: none !important;
        border-radius: 8px !important;
        font-weight: 600 !important;
        padding: 10px 24px !important;
        transition: all 0.2s ease !important;
    }
    .primary-btn:hover {
        background: linear-gradient(135deg, #4f46e5 0%, #7c3aed 100%) !important;
        transform: translateY(-1px);
        box-shadow: 0 4px 12px rgba(99, 102, 241, 0.4) !important;
    }
    .secondary-btn {
        background: #ffffff !important;
        color: #475569 !important;
        border: 1px solid #cbd5e1 !important;
        border-radius: 8px !important;
        font-weight: 500 !important;
        padding: 10px 24px !important;
        transition: all 0.2s ease !important;
    }
    .secondary-btn:hover {
        background: #f8fafc !important;
        border-color: #94a3b8 !important;
    }
    .button-row {
        gap: 12px !important;
    }
    """
) as demo:

    gr.Markdown("<h2 id='title'>πŸ“Š NexDatawork Data Agent</h2>")
    gr.Markdown("<p id='subtitle'>AI-powered data analysis without writing code</p>")

    with gr.Column():
      
      # Instructions Section
      gr.Markdown("""
### πŸ“‹ Instructions
1. **Upload CSV Files** β€” Drag & drop or click to upload one or more CSV files
2. **Ask Your Question** β€” Type your data analysis question in natural language
3. **Choose an Action:**
   - **Analyze Data** β€” Get AI-powered insights and analysis from your data
   - **Generate SQL** β€” Create SQL queries based on your question
   - **Web Scraping** β€” Find relevant data from the web
      """, elem_classes=["instructions-box"])

      with gr.Row(equal_height=True):
        file_input = gr.File(label="πŸ“ Upload CSV Files", file_types=[".csv"], file_count="multiple", height=140)
        question_input = gr.Textbox(
            label="πŸ’¬ Ask Your Question",
            placeholder="e.g., What is the trend for revenue over time? Show me top 10 customers by sales.",
            lines=4
        )

      # Buttons aligned to the left
      with gr.Row(elem_classes=["button-row"]):
        ask_button = gr.Button("πŸ” Analyze Data", elem_classes=["primary-btn"])
        sql_button = gr.Button("πŸ—„οΈ Generate SQL", elem_classes=["secondary-btn"])
        scraping_button = gr.Button("🌐 Web Scraping", elem_classes=["secondary-btn"])
      
      history = gr.State(value="")

      with gr.Row():
        with gr.Column():
          gr.Markdown("### πŸ“ˆ Analysis Results")
          trace_display = gr.Markdown(elem_classes=["trace-markdown"])
        with gr.Column():
          gr.Markdown("### πŸ—ƒοΈ SQL / ETL Output")
          sql_display = gr.Markdown(elem_classes=["trace-markdown"])

      # Event handlers
      ask_button.click(fn=ask_agent, inputs=[file_input, question_input, history], outputs=[trace_display, history])
      sql_button.click(fn=sql_pipeline, inputs=[file_input, question_input, history], outputs=[sql_display, history])
      scraping_button.click(fn=web_scraping, inputs=[question_input, history], outputs=[trace_display, history])

demo.launch(share=True,debug=False)