Update app.py
Browse files
app.py
CHANGED
|
@@ -29,31 +29,48 @@ llm = AutoModelForCausalLM.from_pretrained(llama_model_name, token=API_TOKEN)
|
|
| 29 |
# Generate advice using RAG
|
| 30 |
def generate_advice(extracted_data):
|
| 31 |
try:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 32 |
recommendations = []
|
| 33 |
|
| 34 |
for item in extracted_data:
|
|
|
|
|
|
|
|
|
|
|
|
|
| 35 |
# Prepare the query string
|
| 36 |
query = f"{item['Component']} {item['Status']}"
|
| 37 |
print(f"Processing query: {query}") # Debug print
|
| 38 |
|
| 39 |
-
# Generate query embedding and
|
| 40 |
query_embedding = embedding_model.encode([query])
|
| 41 |
query_embedding = np.array(query_embedding, dtype="float32").reshape(1, -1)
|
| 42 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 43 |
# Search for the closest match in FAISS
|
| 44 |
_, idx = index.search(query_embedding, 1)
|
| 45 |
-
print(f"FAISS Index: {idx}, Best Match Raw: {kb[idx[0][0]]}")
|
| 46 |
|
| 47 |
-
# Retrieve the closest match
|
| 48 |
best_match = kb[idx[0][0]]
|
| 49 |
-
if not isinstance(best_match, dict):
|
| 50 |
-
raise ValueError(f"Best match retrieved is not a dictionary: {best_match}")
|
| 51 |
|
| 52 |
# Prepare the LLM prompt
|
| 53 |
role = "Medical expert providing advice based on lab results."
|
| 54 |
prompt = f"""
|
| 55 |
Lab Test: {item['Component']}
|
| 56 |
-
Value: {item
|
| 57 |
Status: {item['Status']}
|
| 58 |
|
| 59 |
Medical Guidelines: {best_match['Advice']}
|
|
@@ -61,6 +78,7 @@ def generate_advice(extracted_data):
|
|
| 61 |
Provide additional insights or recommendations.
|
| 62 |
"""
|
| 63 |
|
|
|
|
| 64 |
message_yours = [
|
| 65 |
{"role": "system", "content": role},
|
| 66 |
{"role": "user", "content": prompt},
|
|
@@ -73,7 +91,6 @@ def generate_advice(extracted_data):
|
|
| 73 |
return_tensors="pt",
|
| 74 |
)
|
| 75 |
|
| 76 |
-
# Generate response using LLaMA
|
| 77 |
output = llm.generate(
|
| 78 |
input_ids=input_text_with_your_role,
|
| 79 |
max_length=150,
|
|
@@ -81,7 +98,7 @@ def generate_advice(extracted_data):
|
|
| 81 |
)
|
| 82 |
advice = tokenizer.decode(output[0], skip_special_tokens=True).strip()
|
| 83 |
|
| 84 |
-
# Append result
|
| 85 |
recommendations.append({
|
| 86 |
"Component": item["Component"],
|
| 87 |
"Advice": advice
|
|
|
|
| 29 |
# Generate advice using RAG
|
| 30 |
def generate_advice(extracted_data):
|
| 31 |
try:
|
| 32 |
+
# Ensure extracted_data is valid
|
| 33 |
+
if not isinstance(extracted_data, list):
|
| 34 |
+
raise ValueError("Input data must be a list of dictionaries.")
|
| 35 |
+
if not all(isinstance(item, dict) for item in extracted_data):
|
| 36 |
+
raise ValueError("Each item in input data must be a dictionary.")
|
| 37 |
+
|
| 38 |
recommendations = []
|
| 39 |
|
| 40 |
for item in extracted_data:
|
| 41 |
+
# Validate required keys
|
| 42 |
+
if not all(k in item for k in ["Component", "Status"]):
|
| 43 |
+
raise ValueError("Each input item must have 'Component' and 'Status' keys.")
|
| 44 |
+
|
| 45 |
# Prepare the query string
|
| 46 |
query = f"{item['Component']} {item['Status']}"
|
| 47 |
print(f"Processing query: {query}") # Debug print
|
| 48 |
|
| 49 |
+
# Generate query embedding and reshape
|
| 50 |
query_embedding = embedding_model.encode([query])
|
| 51 |
query_embedding = np.array(query_embedding, dtype="float32").reshape(1, -1)
|
| 52 |
|
| 53 |
+
# Debugging embedding dimensions
|
| 54 |
+
print(f"Query Embedding Shape: {query_embedding.shape}, FAISS Index Dim: {index.d}")
|
| 55 |
+
|
| 56 |
+
# Validate embedding dimensions
|
| 57 |
+
if query_embedding.shape[1] != index.d:
|
| 58 |
+
raise ValueError(
|
| 59 |
+
f"Embedding dimension mismatch: Query ({query_embedding.shape[1]}), Index ({index.d})"
|
| 60 |
+
)
|
| 61 |
+
|
| 62 |
# Search for the closest match in FAISS
|
| 63 |
_, idx = index.search(query_embedding, 1)
|
| 64 |
+
print(f"FAISS Index: {idx}, Best Match Raw: {kb[idx[0][0]]}")
|
| 65 |
|
| 66 |
+
# Retrieve the closest match
|
| 67 |
best_match = kb[idx[0][0]]
|
|
|
|
|
|
|
| 68 |
|
| 69 |
# Prepare the LLM prompt
|
| 70 |
role = "Medical expert providing advice based on lab results."
|
| 71 |
prompt = f"""
|
| 72 |
Lab Test: {item['Component']}
|
| 73 |
+
Value: {item.get('Value', 'Unknown')} {item.get('Units', '')}
|
| 74 |
Status: {item['Status']}
|
| 75 |
|
| 76 |
Medical Guidelines: {best_match['Advice']}
|
|
|
|
| 78 |
Provide additional insights or recommendations.
|
| 79 |
"""
|
| 80 |
|
| 81 |
+
# Generate advice using LLaMA model
|
| 82 |
message_yours = [
|
| 83 |
{"role": "system", "content": role},
|
| 84 |
{"role": "user", "content": prompt},
|
|
|
|
| 91 |
return_tensors="pt",
|
| 92 |
)
|
| 93 |
|
|
|
|
| 94 |
output = llm.generate(
|
| 95 |
input_ids=input_text_with_your_role,
|
| 96 |
max_length=150,
|
|
|
|
| 98 |
)
|
| 99 |
advice = tokenizer.decode(output[0], skip_special_tokens=True).strip()
|
| 100 |
|
| 101 |
+
# Append the result
|
| 102 |
recommendations.append({
|
| 103 |
"Component": item["Component"],
|
| 104 |
"Advice": advice
|