SheikhIrtiza commited on
Commit
12a88e5
·
1 Parent(s): 17fda43

updated app.py

Browse files
Files changed (5) hide show
  1. .gitignore +4 -0
  2. .gradio/flagged/dataset1.csv +13 -0
  3. __init__.py +0 -0
  4. app.py +20 -0
  5. llm.py +134 -0
.gitignore ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ .env
2
+ __pycache__
3
+ /env
4
+ env/
.gradio/flagged/dataset1.csv ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Ask your finance-related question here:,FinanceWise AI Response,timestamp
2
+ I received a text message from unknown he is asking for my Aadhaar and name details,"Be cautious with such messages. Never share your Aadhaar number or any personal details with unknown individuals, as this can lead to identity theft and financial fraud.
3
+
4
+ It's possible that this is a phishing attempt, where the scammer may use your Aadhaar number to access your bank accounts, credit cards, or other financial information.
5
+
6
+ Here are some steps you can take:
7
+ 1. Do not respond to the message or provide any information.
8
+ 2. Report the incident to the authorities, such as the Aadhaar helpline or your local cybercrime cell.
9
+ 3. Monitor your bank and credit card statements for any suspicious transactions.
10
+ 4. Consider filing a complaint with the Unique Identification Authority of India (UIDAI) or your local police station.
11
+
12
+ Remember, it's always better to err on the side of caution when it comes to sharing personal and financial information. If you're unsure about the authenticity of a message or request, it's best to verify it through official channels before taking any action.",2025-04-30 12:08:10.100123
13
+ ,"Sorry, I can only help with finance-related questions.",2025-05-02 11:27:57.490997
__init__.py ADDED
File without changes
app.py ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from llm import financial_chat, get_groq_client # Make sure llm.py is importable
3
+
4
+ # Load the Groq client once to reuse
5
+ groq_client = get_groq_client()
6
+
7
+ def chat_with_user(user_input):
8
+ return financial_chat(user_input, groq_client=groq_client)
9
+
10
+ # Create the Gradio Interface
11
+ iface = gr.Interface(
12
+ fn=chat_with_user,
13
+ inputs=gr.Textbox(label="Ask your finance-related question here:"),
14
+ outputs=gr.Textbox(label="FinanceWise AI Response"),
15
+ title="FinanceWise AI Assistant",
16
+ description="This assistant only answers finance-related questions."
17
+ )
18
+
19
+ if __name__ == "__main__":
20
+ iface.launch()
llm.py ADDED
@@ -0,0 +1,134 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # llm.py
2
+ import os
3
+ import torch
4
+ from transformers import AutoTokenizer, AutoModelForSequenceClassification
5
+ from groq import Groq
6
+ from dotenv import load_dotenv
7
+ load_dotenv()
8
+
9
+
10
+ def load_llm_components(device=torch.device("cpu")):
11
+ """
12
+ Loads the tokenizer and LLM model used for generating explanations.
13
+
14
+ Returns:
15
+ tokenizer: The Hugging Face tokenizer.
16
+ model: The pre-trained model for sequence classification.
17
+ """
18
+ try:
19
+
20
+ tokenizer = AutoTokenizer.from_pretrained("ProsusAI/finbert")
21
+ model = AutoModelForSequenceClassification.from_pretrained("ProsusAI/finbert")
22
+
23
+ print("LLM components loaded successfully!")
24
+ except Exception as e:
25
+ print(f"Failed to load LLM components: {e}")
26
+ tokenizer, model = None, None
27
+ return tokenizer, model
28
+
29
+ def get_groq_client():
30
+ """
31
+ Initializes and returns a Groq client using the GROQ_API_KEY from environment variables.
32
+
33
+ Raises:
34
+ ValueError: If the GROQ_API_KEY is not set.
35
+ """
36
+ api_key = os.environ.get("GROQ_API_KEY")
37
+ if not api_key:
38
+ raise ValueError("GROQ_API_KEY not set in environment variables.")
39
+ groq_client = Groq(api_key=api_key)
40
+ return groq_client
41
+
42
+ def generate_explanation(transaction, tokenizer, model, device=torch.device("cpu")):
43
+ """
44
+ Generates a fraud explanation based on transaction details using the loaded LLM.
45
+
46
+ Args:
47
+ transaction (dict): Dictionary containing transaction details (e.g., amount, credit_limit, amount_ratio, use_chip).
48
+ tokenizer: The tokenizer corresponding to the LLM.
49
+ model: The pre-trained model used for generating explanations.
50
+ device: Torch device to run the model on.
51
+
52
+ Returns:
53
+ A string explanation of the risk assessment.
54
+ """
55
+ if tokenizer is None or model is None:
56
+ return "Explanation service unavailable"
57
+
58
+ try:
59
+ text = (
60
+ f"Amount: ${transaction['amount']:.2f}, "
61
+ f"Credit Limit: ${transaction['credit_limit']:.2f}, "
62
+ f"Ratio: {transaction['amount_ratio']:.2f}, "
63
+ f"Chip Usage: {transaction.get('use_chip', 'N/A')}"
64
+ )
65
+ inputs = tokenizer([text], return_tensors="pt", truncation=True, padding=True).to(device)
66
+ with torch.no_grad():
67
+ outputs = model(**inputs)
68
+ probs = torch.nn.functional.softmax(outputs.logits, dim=-1)
69
+ explanation = f"Risk assessment: {probs[0][1]*100:.1f}% suspicious activity likelihood"
70
+ except Exception as e:
71
+ explanation = f"Error generating explanation: {e}"
72
+ return explanation
73
+
74
+ def financial_chat(user_input, groq_client=None):
75
+ """
76
+ Provides a financial advice chat response using the Groq API.
77
+
78
+ Args:
79
+ user_input (str): The user's query.
80
+ groq_client: An instance of the Groq client. If not provided, it will be initialized using the API key.
81
+
82
+ Returns:
83
+ A string response from the financial advice chat.
84
+ """
85
+ if groq_client is None:
86
+ try:
87
+ groq_client = get_groq_client()
88
+ except Exception as e:
89
+ return f"Error initializing chat service: {e}"
90
+
91
+ try:
92
+ response = groq_client.chat.completions.create(
93
+ messages=[
94
+ {
95
+ "role": "system",
96
+
97
+ "content": (
98
+ "You are FinanceWise AI, a strict financial advisor. Only respond to finance-related questions. "
99
+ "If the user asks about anything unrelated to finance (e.g. languages, science, entertainment), reply: "
100
+ "'Sorry, I can only help with finance-related questions.'"
101
+ ),
102
+
103
+ },
104
+ {"role": "user", "content": user_input}
105
+ ],
106
+ model="llama-3.3-70b-versatile",
107
+ temperature=0.3,
108
+ top_p=0.7
109
+ )
110
+ chat_response = response.choices[0].message.content
111
+ except Exception as e:
112
+ chat_response = f"Error during chat: {e}"
113
+
114
+ return chat_response
115
+
116
+ if __name__ == "__main__":
117
+ # Testing the LLM components locally
118
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
119
+ tokenizer, model = load_llm_components(device=device)
120
+
121
+ sample_transaction = {
122
+ "amount": 120.50,
123
+ "credit_limit": 500.00,
124
+ "amount_ratio": 120.50 / 500.00,
125
+ "use_chip": True
126
+ }
127
+
128
+ explanation = generate_explanation(sample_transaction, tokenizer, model, device=device)
129
+ print("Explanation:", explanation)
130
+
131
+ # Test financial chat using the Groq client
132
+ user_query = "what is chinar quantum ai in kashmir"
133
+ chat_response = financial_chat(user_query)
134
+ print("Chat response:", chat_response)