SheikhIrtiza's picture
updated app.py
12a88e5
# llm.py
import os
import torch
from transformers import AutoTokenizer, AutoModelForSequenceClassification
from groq import Groq
from dotenv import load_dotenv
load_dotenv()
def load_llm_components(device=torch.device("cpu")):
"""
Loads the tokenizer and LLM model used for generating explanations.
Returns:
tokenizer: The Hugging Face tokenizer.
model: The pre-trained model for sequence classification.
"""
try:
tokenizer = AutoTokenizer.from_pretrained("ProsusAI/finbert")
model = AutoModelForSequenceClassification.from_pretrained("ProsusAI/finbert")
print("LLM components loaded successfully!")
except Exception as e:
print(f"Failed to load LLM components: {e}")
tokenizer, model = None, None
return tokenizer, model
def get_groq_client():
"""
Initializes and returns a Groq client using the GROQ_API_KEY from environment variables.
Raises:
ValueError: If the GROQ_API_KEY is not set.
"""
api_key = os.environ.get("GROQ_API_KEY")
if not api_key:
raise ValueError("GROQ_API_KEY not set in environment variables.")
groq_client = Groq(api_key=api_key)
return groq_client
def generate_explanation(transaction, tokenizer, model, device=torch.device("cpu")):
"""
Generates a fraud explanation based on transaction details using the loaded LLM.
Args:
transaction (dict): Dictionary containing transaction details (e.g., amount, credit_limit, amount_ratio, use_chip).
tokenizer: The tokenizer corresponding to the LLM.
model: The pre-trained model used for generating explanations.
device: Torch device to run the model on.
Returns:
A string explanation of the risk assessment.
"""
if tokenizer is None or model is None:
return "Explanation service unavailable"
try:
text = (
f"Amount: ${transaction['amount']:.2f}, "
f"Credit Limit: ${transaction['credit_limit']:.2f}, "
f"Ratio: {transaction['amount_ratio']:.2f}, "
f"Chip Usage: {transaction.get('use_chip', 'N/A')}"
)
inputs = tokenizer([text], return_tensors="pt", truncation=True, padding=True).to(device)
with torch.no_grad():
outputs = model(**inputs)
probs = torch.nn.functional.softmax(outputs.logits, dim=-1)
explanation = f"Risk assessment: {probs[0][1]*100:.1f}% suspicious activity likelihood"
except Exception as e:
explanation = f"Error generating explanation: {e}"
return explanation
def financial_chat(user_input, groq_client=None):
"""
Provides a financial advice chat response using the Groq API.
Args:
user_input (str): The user's query.
groq_client: An instance of the Groq client. If not provided, it will be initialized using the API key.
Returns:
A string response from the financial advice chat.
"""
if groq_client is None:
try:
groq_client = get_groq_client()
except Exception as e:
return f"Error initializing chat service: {e}"
try:
response = groq_client.chat.completions.create(
messages=[
{
"role": "system",
"content": (
"You are FinanceWise AI, a strict financial advisor. Only respond to finance-related questions. "
"If the user asks about anything unrelated to finance (e.g. languages, science, entertainment), reply: "
"'Sorry, I can only help with finance-related questions.'"
),
},
{"role": "user", "content": user_input}
],
model="llama-3.3-70b-versatile",
temperature=0.3,
top_p=0.7
)
chat_response = response.choices[0].message.content
except Exception as e:
chat_response = f"Error during chat: {e}"
return chat_response
if __name__ == "__main__":
# Testing the LLM components locally
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
tokenizer, model = load_llm_components(device=device)
sample_transaction = {
"amount": 120.50,
"credit_limit": 500.00,
"amount_ratio": 120.50 / 500.00,
"use_chip": True
}
explanation = generate_explanation(sample_transaction, tokenizer, model, device=device)
print("Explanation:", explanation)
# Test financial chat using the Groq client
user_query = "what is chinar quantum ai in kashmir"
chat_response = financial_chat(user_query)
print("Chat response:", chat_response)