File size: 4,799 Bytes
12a88e5 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 |
# llm.py
import os
import torch
from transformers import AutoTokenizer, AutoModelForSequenceClassification
from groq import Groq
from dotenv import load_dotenv
load_dotenv()
def load_llm_components(device=torch.device("cpu")):
"""
Loads the tokenizer and LLM model used for generating explanations.
Returns:
tokenizer: The Hugging Face tokenizer.
model: The pre-trained model for sequence classification.
"""
try:
tokenizer = AutoTokenizer.from_pretrained("ProsusAI/finbert")
model = AutoModelForSequenceClassification.from_pretrained("ProsusAI/finbert")
print("LLM components loaded successfully!")
except Exception as e:
print(f"Failed to load LLM components: {e}")
tokenizer, model = None, None
return tokenizer, model
def get_groq_client():
"""
Initializes and returns a Groq client using the GROQ_API_KEY from environment variables.
Raises:
ValueError: If the GROQ_API_KEY is not set.
"""
api_key = os.environ.get("GROQ_API_KEY")
if not api_key:
raise ValueError("GROQ_API_KEY not set in environment variables.")
groq_client = Groq(api_key=api_key)
return groq_client
def generate_explanation(transaction, tokenizer, model, device=torch.device("cpu")):
"""
Generates a fraud explanation based on transaction details using the loaded LLM.
Args:
transaction (dict): Dictionary containing transaction details (e.g., amount, credit_limit, amount_ratio, use_chip).
tokenizer: The tokenizer corresponding to the LLM.
model: The pre-trained model used for generating explanations.
device: Torch device to run the model on.
Returns:
A string explanation of the risk assessment.
"""
if tokenizer is None or model is None:
return "Explanation service unavailable"
try:
text = (
f"Amount: ${transaction['amount']:.2f}, "
f"Credit Limit: ${transaction['credit_limit']:.2f}, "
f"Ratio: {transaction['amount_ratio']:.2f}, "
f"Chip Usage: {transaction.get('use_chip', 'N/A')}"
)
inputs = tokenizer([text], return_tensors="pt", truncation=True, padding=True).to(device)
with torch.no_grad():
outputs = model(**inputs)
probs = torch.nn.functional.softmax(outputs.logits, dim=-1)
explanation = f"Risk assessment: {probs[0][1]*100:.1f}% suspicious activity likelihood"
except Exception as e:
explanation = f"Error generating explanation: {e}"
return explanation
def financial_chat(user_input, groq_client=None):
"""
Provides a financial advice chat response using the Groq API.
Args:
user_input (str): The user's query.
groq_client: An instance of the Groq client. If not provided, it will be initialized using the API key.
Returns:
A string response from the financial advice chat.
"""
if groq_client is None:
try:
groq_client = get_groq_client()
except Exception as e:
return f"Error initializing chat service: {e}"
try:
response = groq_client.chat.completions.create(
messages=[
{
"role": "system",
"content": (
"You are FinanceWise AI, a strict financial advisor. Only respond to finance-related questions. "
"If the user asks about anything unrelated to finance (e.g. languages, science, entertainment), reply: "
"'Sorry, I can only help with finance-related questions.'"
),
},
{"role": "user", "content": user_input}
],
model="llama-3.3-70b-versatile",
temperature=0.3,
top_p=0.7
)
chat_response = response.choices[0].message.content
except Exception as e:
chat_response = f"Error during chat: {e}"
return chat_response
if __name__ == "__main__":
# Testing the LLM components locally
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
tokenizer, model = load_llm_components(device=device)
sample_transaction = {
"amount": 120.50,
"credit_limit": 500.00,
"amount_ratio": 120.50 / 500.00,
"use_chip": True
}
explanation = generate_explanation(sample_transaction, tokenizer, model, device=device)
print("Explanation:", explanation)
# Test financial chat using the Groq client
user_query = "what is chinar quantum ai in kashmir"
chat_response = financial_chat(user_query)
print("Chat response:", chat_response)
|