SnehaAkula commited on
Commit
0aee03f
·
verified ·
1 Parent(s): 39fb8ad

Upload app.py

Browse files
Files changed (1) hide show
  1. app.py +446 -0
app.py ADDED
@@ -0,0 +1,446 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from flask import Flask, jsonify, request, render_template
2
+ import pandas as pd
3
+ from flask_cors import CORS
4
+ import os
5
+ import json
6
+ import torch
7
+ from sentence_transformers import SentenceTransformer
8
+ import logging
9
+ from transformers import GPT2LMHeadModel, GPT2Tokenizer
10
+ from datetime import datetime
11
+ from transformers import BertTokenizer, BertForSequenceClassification
12
+ import random
13
+ import re
14
+
15
+ app = Flask(__name__)
16
+ app.json.sort_keys = False
17
+ CORS(app)
18
+
19
+ # Configure logging
20
+ logging.basicConfig(level=logging.DEBUG)
21
+
22
+
23
+ # Load the SentenceTransformer model
24
+ model = SentenceTransformer('sentence-transformers/all-MiniLM-L6-v2')
25
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
26
+ model.to(device)
27
+ print("---"*30)
28
+ print(device)
29
+
30
+ # Load tokenizer and model
31
+ loaded_model = BertForSequenceClassification.from_pretrained('saved_model')
32
+ loaded_tokenizer = BertTokenizer.from_pretrained('saved_model')
33
+
34
+ tokenizer = GPT2Tokenizer.from_pretrained("checkpoint-15000")
35
+ model_gpt = GPT2LMHeadModel.from_pretrained("checkpoint-15000")
36
+ model_gpt.to(device)
37
+ print("===="*20)
38
+
39
+ # df_case = pd.read_csv('case_clustering24.csv', on_bad_lines="skip")
40
+ grouped = pd.read_csv('grouped_22_23_24.csv', on_bad_lines="skip")
41
+
42
+
43
+ from openai import OpenAI
44
+ api_key = "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"
45
+ #api_key = "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"
46
+ client = OpenAI(api_key = api_key)
47
+
48
+ def ask_gpt(question):
49
+ # Using the text completion model
50
+ response = client.chat.completions.create(
51
+ model = "gpt-4o-mini",
52
+ messages = [
53
+ {"role":"system","content":"you are a helpful assistant."},
54
+ {"role":"user","content": question}
55
+
56
+ ]
57
+ )
58
+ return response.choices[0].message.content
59
+
60
+
61
+ class DataFrameManager:
62
+ def __init__(self, file_path):
63
+ self.file_path = file_path
64
+ self.df = pd.DataFrame() # Initialize an empty DataFrame
65
+ self.load_dataframe()
66
+
67
+ def load_dataframe(self):
68
+ if os.path.exists(self.file_path):
69
+ constant_date = "2024-01-01"
70
+ constant_policy_number = "POL123456"
71
+ constant_status = "Active"
72
+ self.df = pd.read_csv(self.file_path)
73
+ self.df['date'] = constant_date
74
+ self.df['policy_number'] = constant_policy_number
75
+ self.df['status'] = constant_status
76
+ self.df = self.df.rename(columns={
77
+ 'note_id': 'id',
78
+ 'cleaned_comments': 'summary',
79
+ 'summarized_text': 'suggested_summary'
80
+ })
81
+ else:
82
+ print(f"File not found: {self.file_path}")
83
+
84
+ def get_dataframe(self):
85
+ return self.df.copy().head(200)
86
+
87
+ df_manager = DataFrameManager('client_notes_Sneha.csv')
88
+
89
+ # Define a function for text generation
90
+ def generate_text(prompt_text, max_length=100,num_return_sequences=10):
91
+ # Tokenize the prompt text and convert to tensor
92
+ input_ids = tokenizer(prompt_text, return_tensors="pt").input_ids.to(device)
93
+ attention_mask = tokenizer(
94
+ prompt_text, return_tensors="pt").attention_mask.to(device)
95
+ print("........")
96
+ try:
97
+ # Move input_ids and attention_mask tensor to GPU
98
+ input_ids = input_ids.to(device)
99
+ attention_mask = attention_mask.to(device)
100
+
101
+ outputs = model_gpt.generate(
102
+ input_ids=input_ids,
103
+ attention_mask=attention_mask,
104
+ pad_token_id=tokenizer.pad_token_id,
105
+ #max_length=10,
106
+ max_new_tokens=3,
107
+ num_beams=50,
108
+ temperature=0.7,
109
+ top_k=50,
110
+ top_p=0.9,
111
+ do_sample=True,
112
+ num_return_sequences=num_return_sequences
113
+ )
114
+ print(outputs)
115
+ print(",,,")
116
+ # Decode the generated text
117
+ generated_texts = [tokenizer.decode(output, skip_special_tokens=True) for output in outputs]
118
+ print(generated_texts)
119
+ unique_texts = list(set(generated_texts))
120
+ return unique_texts[:5]
121
+ except Exception as e:
122
+ print(str(e))
123
+
124
+
125
+ @app.route('/get-csv')
126
+ def get_csv():
127
+ df = df_manager.get_dataframe()
128
+ data = df.to_dict(orient='records')
129
+ return jsonify(data)
130
+
131
+ @app.route('/search_notes', methods=['POST'])
132
+ def search_notes():
133
+ request_data = request.get_json()
134
+ claim_id_to_search = request_data.get('name', '')
135
+
136
+ full_df = df_manager.get_dataframe()
137
+ # print("DataFrame columns:", full_df.columns) # Debug output
138
+ # print("DataFrame first few rows:", full_df.head()) # Debug output
139
+
140
+ if claim_id_to_search:
141
+ try:
142
+ claim_id_to_search = int(claim_id_to_search) # Convert the claim ID from string to integer
143
+ # print("Searching for ID:", claim_id_to_search) # Debug output
144
+
145
+ filtered_df = full_df[full_df['id'] == claim_id_to_search]
146
+ # print("Filtered DataFrame:", filtered_df) # Debug output
147
+
148
+ if filtered_df.empty:
149
+ print("No matching records found, returning full DataFrame.")
150
+ return jsonify({})
151
+ else:
152
+ return jsonify(filtered_df.to_dict(orient='records'))
153
+ except ValueError as e:
154
+ print(e)
155
+ return jsonify({"error": "Invalid claim ID format"}), 400
156
+ else:
157
+ print("No claim ID provided, returning full DataFrame.")
158
+ return jsonify({})
159
+
160
+
161
+ @app.route('/get-similarity', methods=['POST'])
162
+ def get_similarity():
163
+ data = request.json
164
+ logging.debug(f"Received payload: {data}")
165
+
166
+ if not data or 'id' not in data:
167
+ return jsonify({'error': 'No valid data provided'}), 400
168
+
169
+ note_id = data['id']
170
+ logging.debug(f"Note ID: {note_id}")
171
+ df = df_manager.get_dataframe()
172
+ print(df.columns)
173
+ filtered_df = df[df['id'] == note_id]
174
+ if filtered_df.empty:
175
+ return jsonify({'error': 'No matching record found'}), 404
176
+
177
+ row = filtered_df.iloc[0]
178
+ summarized_text = row['suggested_summary']
179
+ logging.debug(f"Summarized Text: {summarized_text}")
180
+
181
+ # Encode the target summarized_text
182
+ target_embedding = model.encode(summarized_text, convert_to_tensor=True, device=device).unsqueeze(0)
183
+
184
+ # Calculate similarities with all entries in the suggested_summary column
185
+ similarities = []
186
+ for index, row in df.iterrows():
187
+ text = row['suggested_summary']
188
+ embedding = model.encode(text, convert_to_tensor=True, device=device).unsqueeze(0)
189
+ similarity = torch.nn.functional.cosine_similarity(target_embedding, embedding).item()
190
+ similarities.append({
191
+ 'id': row['id'],
192
+ 'status': row['status'],
193
+ 'policy_number': row['policy_number'],
194
+ 'date': row['date_created'].split(' ')[0],
195
+ 'summary': row['summary'],
196
+ 'suggested_summary': text,
197
+ 'similarity': similarity
198
+ })
199
+
200
+ # Convert the results to a dataframe
201
+ similarity_df = pd.DataFrame(similarities)
202
+
203
+ # Sort the dataframe by similarity in descending order
204
+ similarity_df = similarity_df.sort_values(by='similarity', ascending=False)
205
+ print(similarity_df.head())
206
+ print(similarity_df.columns)
207
+ # Convert the dataframe to a dictionary
208
+ result = similarity_df.to_dict(orient='records')
209
+
210
+ return jsonify(result), 200
211
+
212
+
213
+ @app.route('/autocomplete', methods=['GET'])
214
+ def auto_complete():
215
+ data = request.args
216
+ print("-----------------------")
217
+ print(data)
218
+ prompt = data.get('prompt', '')
219
+ print(prompt)
220
+ if not prompt:
221
+ return jsonify({"error":"No prompt Provided"}),400
222
+ try:
223
+ print("====")
224
+ generated_texts = generate_text(prompt)
225
+ print(generated_texts)
226
+ return jsonify({'generated_text': generated_texts})
227
+
228
+ except Exception as e:
229
+ return jsonify({"error":str(e)}),500
230
+
231
+
232
+ @app.route('/assign-case-id', methods=['POST'])
233
+ def classify_claim():
234
+ data = request.json
235
+ case_id = int(data['case_id'])
236
+ # claim_line_id = data['claim_line_id']
237
+ diagnosis = data['diagnosis']
238
+ claim_line_note = data['claim_line_notes']
239
+ print(data['service_date'])
240
+ service_date = datetime.strptime(data['service_date'], "%Y-%m-%d")
241
+ print("++++++"*30)
242
+ print("dddddddd")
243
+
244
+ # Convert all `case_id` values in `grouped` to integers
245
+ grouped['case_id'] = grouped['case_id'].astype(int)
246
+
247
+ record = grouped[grouped['case_id'] == case_id]
248
+
249
+ # Check if the record is empty and return an appropriate response
250
+ if record.empty:
251
+ return jsonify({"error": "No record found for the given case_id"}), 404
252
+ # Check if case_id is present
253
+ if case_id not in grouped['case_id'].values:
254
+ new_case_id = random.randint(100000, 999999)
255
+ return jsonify({"message": f"New Case: Customer id {case_id} not found. \n"
256
+ f"A new case has been created with Case ID: {new_case_id}." })
257
+
258
+
259
+ print("--"*2)
260
+ print(record['service_date'])
261
+ # Compare service_date
262
+ existing_service_date = datetime.strptime(eval(record['service_date'].values[0])[-1], "%Y-%m-%d")
263
+ #existing_service_date = datetime.strptime(eval(record['service_date'].values[0])[-1],'%d-%m-%Y')
264
+ print("-")
265
+ print(existing_service_date)
266
+ print("-")
267
+ print(service_date)
268
+ #
269
+ is_recent = (service_date - existing_service_date).days < 90
270
+ print(is_recent)
271
+
272
+ if case_id in grouped['case_id'].values and not is_recent:
273
+ return jsonify({
274
+ "message": (
275
+ f"New case (Customer id {case_id} found, however service date is more than 90 days), "
276
+ f"Last service date: {existing_service_date.strftime('%Y-%m-%d')}"
277
+ )
278
+ })
279
+
280
+
281
+ # history for bert
282
+ past_claims_data = {}
283
+ for _, row in record.iterrows():
284
+ case_id = row['case_id'] # Extract the case_id for reference
285
+ num_claims = len(row['service_date'])
286
+
287
+ # Create sequences of claims within the same case
288
+ for i in range(1, num_claims):
289
+ row['claim_line_note'] = str([i for i in eval(row['claim_line_note'],{'nan':'nan'}) if i != 'nan'])
290
+ input_sequence = (
291
+ f"Diagnosis History: {', '.join(map(str, eval(row['diagnosis'])))}, "
292
+ f"Claim Line Notes History: {', '.join(map(str, eval(row['claim_line_note'])))}, "
293
+ f"Service Dates History: {', '.join(map(str, eval(row['service_date'])))}")
294
+
295
+ past_claims_data["input_sequence"]= input_sequence
296
+
297
+
298
+ # history for llm
299
+ past_claims_data_llm = {}
300
+ for _, row in record.iterrows():
301
+ case_id = row['case_id'] # Extract the case_id for reference
302
+ num_claims = len(row['service_date'])
303
+
304
+ # Create sequences of claims within the same case
305
+ for i in range(1, num_claims):
306
+ row['claim_line_note'] = str([i for i in eval(row['claim_line_note'],{'nan':'nan'}) if i != 'nan'])
307
+ past_claims_data_llm["Diagnosis History"]= ', '.join(map(str, eval(row['diagnosis'])))
308
+ past_claims_data_llm["Claim Line Notes"]= ', '.join(map(str, eval(row['claim_line_note'])))
309
+
310
+
311
+ print("***********************Past claim History***********************")
312
+ print(past_claims_data['input_sequence'])
313
+ print()
314
+
315
+
316
+ # new claim info
317
+ new_claim = (
318
+ f"New Diagnosis: {', '.join(map(str, [diagnosis]))}, "
319
+ f"New Claim Line Note: {', '.join(map(str, [claim_line_note]))}, "
320
+ f"New Service Date: {', '.join(map(str, [service_date]))}"
321
+ )
322
+
323
+ print("***********************New claim Data***********************")
324
+ print(new_claim)
325
+ print("***********************")
326
+
327
+ # Tokenize the test data
328
+ inputs = loaded_tokenizer(past_claims_data['input_sequence'], new_claim, padding=True, truncation=True, return_tensors="pt")
329
+
330
+ # Get model predictions
331
+ with torch.no_grad():
332
+ outputs = loaded_model(**inputs)
333
+ predictions = torch.argmax(outputs.logits, dim=-1)
334
+ bert_probabilities = torch.nn.functional.softmax(outputs.logits, dim=-1)
335
+
336
+ pred_label = predictions.tolist()[0]
337
+ new_case_id = random.randint(100000, 999999)
338
+ new_claim_id = random.randint(100000, 999999)
339
+ # Generate final output based on prediction
340
+ if pred_label == 1:
341
+ # New Case: Generate a 6-digit random case ID
342
+
343
+ final_output = (
344
+ f"New Claim ID: {new_claim_id}."
345
+ f"New Case: A new case has been created with Case ID: {new_case_id}. "
346
+ f"The diagnosis and claim notes indicate it's a New Case. "
347
+ f"Diagnosis: {diagnosis}, Claim Line Note: {claim_line_note}, Service Date: {service_date.strftime('%Y-%m-%d')}."
348
+ )
349
+ else:
350
+ # Follow-up Case: Add reasoning
351
+ final_output = (
352
+ f"New Claim ID: {new_claim_id}."
353
+ f"Follow-up Case: The claim has been classified as a follow-up case for Case ID: {case_id}. "
354
+ f"The diagnosis and claim notes indicate a follow-up claim, and the service date is within 30 days of the last service date."
355
+ )
356
+
357
+ ## LLM
358
+ system_prompt = """Respond to the human as helpfully and accurately as possible. You are an expert in analyzing medical claims. Your task is to compare the new claim with past claims and determine if the New Claim is a "Follow-up Claim" (related to an existing issue) or a "Different Claim" (a separate, unrelated issue).
359
+ To make this determination, carefully analyze both the below diagnosis and the claim line notes for patterns, similarities, or differences.
360
+
361
+ **Existing Claims:**
362
+ - Diagnosis: "{past_claims_data_diagnosis}"
363
+ - Claim Line Note: "{past_claims_data_claim_line_note}"
364
+
365
+ **New Claim:**
366
+ - Diagnosis: "{diagnosis}"
367
+ - Claim Line Note: "{claim_line_note}"
368
+
369
+ Use a json blob to output a confidence sore along with a reasoning.
370
+ Valid "category" values: Follow-up Case, New Case
371
+ Valid "confidence_score" values: 1-100
372
+ Provide only ONE action per $JSON_BLOB, as shown:
373
+ ```
374
+ {{
375
+ "action": $CATEGORY_NAME
376
+ "confidence_score": "$CONFIDENCE_SCORE",
377
+ "reasoning": $REASONING
378
+ }}
379
+ ```
380
+ Begin! Reminder to ALWAYS respond with a valid json blob of a single action.
381
+ Respond directly if appropriate. Format is Action:```$JSON_BLOB```"""
382
+
383
+ system_prompt = system_prompt.format(diagnosis=diagnosis, claim_line_note=claim_line_note,
384
+ past_claims_data_diagnosis=past_claims_data_llm["Diagnosis History"],
385
+ past_claims_data_claim_line_note=past_claims_data_llm["Claim Line Notes"])
386
+
387
+ # Get the LLM's response
388
+ llm_response = ask_gpt(system_prompt)
389
+ print(llm_response)
390
+ # Function to extract the JSON blob from the LLM response
391
+ def extract_json_blobs(response):
392
+ try:
393
+ # Use regex to find all JSON blobs within the backticks
394
+ matches = re.findall(r'\{.*?\}', response, re.DOTALL)
395
+ # matches = re.findall(r'\{(?:[^{}]|(?R))*\}', response)
396
+ json_blobs = [json.loads(match) for match in matches]
397
+ for blob in json_blobs:
398
+ if 'confidence_score' in blob:
399
+ blob['confidence_score'] = float(blob['confidence_score']) / 100
400
+ return json_blobs
401
+ except json.JSONDecodeError as e:
402
+ print(f"Error in parsing JSON: {e}")
403
+ return []
404
+
405
+ final_output_llm = extract_json_blobs(llm_response)[0]
406
+ print(final_output_llm)
407
+
408
+ # Assign weights to BERT and LLM responses (adjust as per requirement)
409
+ json_confidence_follow_up = final_output_llm['confidence_score'] if final_output_llm['action'] == 'Follow-up Case' else 1-final_output_llm['confidence_score']
410
+ json_confidence_new_case = final_output_llm['confidence_score'] if final_output_llm['action'] == 'New Case' else 1-final_output_llm['confidence_score']
411
+
412
+ # Extract BERT probabilities
413
+ bert_confidence_follow_up = bert_probabilities[0][0].item()
414
+ bert_confidence_new_case = bert_probabilities[0][1].item()
415
+
416
+ # (simple average)
417
+ combined_confidence_follow_up = 0.35 * bert_confidence_follow_up + 0.65 * json_confidence_follow_up
418
+ combined_confidence_new_case = 0.35 * bert_confidence_new_case + 0.65 * json_confidence_new_case
419
+
420
+ final_prediction = "New Case" if combined_confidence_new_case > combined_confidence_follow_up else "Follow-Up Case"
421
+
422
+ if final_prediction == "Follow-Up Case":
423
+ # Construct the response with Markdown-style formatting
424
+ return jsonify({
425
+ #"BERT Prediction": "New Case" if pred_label == 1 else "Follow-Up Case",
426
+ #"LLM Prediction": final_output_llm["action"],
427
+ "Ensembled model Prediction": final_prediction,
428
+ "New Claim ID": new_claim_id,
429
+ "Weighted Confidence Score": round(max(combined_confidence_follow_up, combined_confidence_new_case), 2),
430
+ "Reasoning": final_output_llm['reasoning']
431
+ })
432
+ else:
433
+ return jsonify({
434
+ # "BERT Prediction": "New Case" if pred_label == 1 else "Follow-Up Case",
435
+ # "LLM Prediction": final_output_llm["action"],
436
+ "Ensembled models Prediction": final_prediction,
437
+ "New Claim ID": new_claim_id,
438
+ "Weighted Confidence Score": round(max(combined_confidence_follow_up, combined_confidence_new_case), 2),
439
+ "New Case ID": new_case_id,
440
+ "Reasoning": final_output_llm['reasoning']
441
+ })
442
+
443
+
444
+
445
+ if __name__ == '__main__':
446
+ app.run(host='0.0.0.0', port=5000, debug=True)