VcRlAgent commited on
Commit
4335673
·
1 Parent(s): 2088725

Testing Natural Lang SQL

Browse files
Files changed (2) hide show
  1. app.py +64 -39
  2. app.py.bak +43 -0
app.py CHANGED
@@ -1,43 +1,68 @@
1
- import os
2
- import gradio as gr
3
- from openai import OpenAI
4
- from huggingface_hub import InferenceClient
 
 
5
 
 
 
 
 
 
 
 
 
 
 
 
 
6
 
7
- # Initialize HF Router client using OpenAI SDK
8
- '''
9
- client = OpenAI(
10
- base_url="https://router.huggingface.co/v1",
11
- api_key=os.environ["HF_TOKEN"], # ensure HF_TOKEN is set
12
- )
13
- '''
14
-
15
- client = InferenceClient(api_key=os.environ["HF_TOKEN"])
16
-
17
- # LLM function
18
- def ask_llm(prompt):
19
- try:
20
- completion = client.chat.completions.create(
21
- model="meta-llama/Llama-3.1-8B-Instruct",
22
- messages=[
23
- {"role": "user", "content": prompt}
24
- ],
25
- max_tokens=200,
26
- temperature=0.7
27
- )
28
- #return completion.choices[0].message["content"]
29
- return completion.choices[0].message.content
30
- except Exception as e:
31
- return f"Error: {str(e)}"
32
-
33
-
34
- # Build Gradio UI
35
- demo = gr.Interface(
36
- fn=ask_llm,
37
- inputs=gr.Textbox(lines=3, label="Ask the AI"),
38
- outputs=gr.Textbox(label="Response"),
39
- title="HF Inference Client LLM Demo",
40
- description="Powered by HuggingFace InferenceClient SDK."
41
  )
42
 
43
- demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from langchain_community.utilities import SQLDatabase
2
+ from langchain_openai import ChatOpenAI
3
+ from langchain_community.agent_toolkits import create_sql_agent
4
+ import pandas as pd
5
+ from sqlalchemy import create_engine
6
+ from datetime import datetime, timedelta
7
 
8
+ # Sample Jira data structure
9
+ jira_data = {
10
+ 'ticket_id': ['PROJ-1', 'PROJ-2', 'PROJ-3', 'PROJ-4'],
11
+ 'summary': ['Bug in login', 'Feature request', 'Performance issue', 'Security bug'],
12
+ 'status': ['Closed', 'Open', 'In Progress', 'Closed'],
13
+ 'priority': ['P1', 'P3', 'P2', 'P1'],
14
+ 'severity': ['Critical', 'Low', 'Medium', 'Critical'],
15
+ 'created_date': ['2024-01-01', '2024-01-05', '2024-01-10', '2024-01-15'],
16
+ 'closed_date': ['2024-01-03', None, None, '2024-01-16'],
17
+ 'resolution_time_hours': [48, None, None, 24],
18
+ 'assignee': ['john', 'jane', 'bob', 'alice']
19
+ }
20
 
21
+ df = pd.DataFrame(jira_data)
22
+ df['created_date'] = pd.to_datetime(df['created_date'])
23
+ df['closed_date'] = pd.to_datetime(df['closed_date'])
24
+
25
+ # Save to SQLite
26
+ engine = create_engine("sqlite:///jira.db")
27
+ df.to_sql("tickets", engine, if_exists="replace", index=False)
28
+
29
+ # Create agent with table description
30
+ db = SQLDatabase(engine)
31
+
32
+ # Add table descriptions for better context
33
+ table_info = """
34
+ The 'tickets' table contains Jira ticket data with columns:
35
+ - ticket_id: Unique ticket identifier (e.g., PROJ-123)
36
+ - summary: Brief description of the ticket
37
+ - status: Current status (Open, In Progress, Closed)
38
+ - priority: Priority level (P1=Highest, P2=High, P3=Medium, P4=Low)
39
+ - severity: Severity level (Critical, High, Medium, Low)
40
+ - created_date: When ticket was created
41
+ - closed_date: When ticket was closed (NULL if still open)
42
+ - resolution_time_hours: Time taken to resolve in hours
43
+ - assignee: Person assigned to ticket
44
+ """
45
+
46
+ llm = ChatOpenAI(model="gpt-4o-mini", temperature=0)
47
+ agent = create_sql_agent(
48
+ llm,
49
+ db=db,
50
+ verbose=True,
51
+ agent_type="openai-tools"
 
 
 
52
  )
53
 
54
+ # Query examples
55
+ questions = [
56
+ "What is the average resolution time?",
57
+ "How many tickets are open vs closed?",
58
+ "Show distribution of tickets by severity",
59
+ "Which assignee has the most P1 tickets?",
60
+ "How many critical tickets were resolved in less than 48 hours?",
61
+ ]
62
+
63
+ for q in questions:
64
+ print(f"\n{'='*60}")
65
+ print(f"Q: {q}")
66
+ print(f"{'='*60}")
67
+ result = agent.invoke(q)
68
+ print(f"A: {result['output']}\n")
app.py.bak ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import gradio as gr
3
+ from openai import OpenAI
4
+ from huggingface_hub import InferenceClient
5
+
6
+
7
+ # Initialize HF Router client using OpenAI SDK
8
+ '''
9
+ client = OpenAI(
10
+ base_url="https://router.huggingface.co/v1",
11
+ api_key=os.environ["HF_TOKEN"], # ensure HF_TOKEN is set
12
+ )
13
+ '''
14
+
15
+ client = InferenceClient(api_key=os.environ["HF_TOKEN"])
16
+
17
+ # LLM function
18
+ def ask_llm(prompt):
19
+ try:
20
+ completion = client.chat.completions.create(
21
+ model="meta-llama/Llama-3.1-8B-Instruct",
22
+ messages=[
23
+ {"role": "user", "content": prompt}
24
+ ],
25
+ max_tokens=200,
26
+ temperature=0.7
27
+ )
28
+ #return completion.choices[0].message["content"]
29
+ return completion.choices[0].message.content
30
+ except Exception as e:
31
+ return f"Error: {str(e)}"
32
+
33
+
34
+ # Build Gradio UI
35
+ demo = gr.Interface(
36
+ fn=ask_llm,
37
+ inputs=gr.Textbox(lines=3, label="Ask the AI"),
38
+ outputs=gr.Textbox(label="Response"),
39
+ title="HF Inference Client LLM Demo",
40
+ description="Powered by HuggingFace InferenceClient SDK."
41
+ )
42
+
43
+ demo.launch()