Spaces:
Runtime error
Runtime error
| from config import agent, logger, analysis_collection | |
| from datetime import datetime | |
| import asyncio | |
| async def analyze_patient_report(patient_id, report_content, file_type, file_content): | |
| try: | |
| # Simulate analysis (replace with actual logic) | |
| conversation = [{"role": "system", "content": agent.chat_prompt}] | |
| conversation.append({"role": "user", "content": f"Analyze this report for suicide risk: {report_content}"}) | |
| input_ids = agent.tokenizer.apply_chat_template( | |
| conversation, add_generation_prompt=True, return_tensors="pt" | |
| ).to(agent.device) | |
| output = agent.model.generate( | |
| input_ids, | |
| do_sample=True, | |
| temperature=0.5, | |
| max_new_tokens=1024, | |
| pad_token_id=agent.tokenizer.eos_token_id, | |
| return_dict_in_generate=True | |
| ) | |
| text = agent.tokenizer.decode(output["sequences"][0][input_ids.shape[1]:], skip_special_tokens=True) | |
| # Parse the text to extract risk level and score (simplified example) | |
| risk_level = "moderate" # Replace with actual parsing logic | |
| risk_score = 0.7 # Replace with actual parsing logic | |
| analysis = { | |
| "patient_id": patient_id, | |
| "report_content": report_content, | |
| "file_type": file_type, | |
| "timestamp": datetime.utcnow(), | |
| "suicide_risk": { | |
| "level": risk_level, | |
| "score": risk_score, | |
| "factors": ["depression", "isolation"] # Example factors | |
| }, | |
| "summary": { | |
| "summary": "Patient shows signs of moderate risk.", | |
| "recommendations": "Monitor closely and schedule follow-up." | |
| } | |
| } | |
| await analysis_collection.insert_one(analysis) | |
| logger.info(f"Analysis completed for patient {patient_id} at {datetime.utcnow().isoformat()}") | |
| return analysis | |
| except Exception as e: | |
| logger.error(f"Error analyzing patient report: {str(e)} at {datetime.utcnow().isoformat()}") | |
| raise |