DeepRat commited on
Commit
9d6f15c
·
verified ·
1 Parent(s): 583f49b

Update MEDEX_FINAL.py

Browse files
Files changed (1) hide show
  1. MEDEX_FINAL.py +162 -0
MEDEX_FINAL.py CHANGED
@@ -577,6 +577,168 @@ FORMATO ESTRICTO PARA CASOS CLÍNICOS:
577
  print(f"❌ {error_msg}")
578
  return error_msg
579
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
580
  async def _generate_streaming(
581
  self,
582
  messages: List[Dict],
 
577
  print(f"❌ {error_msg}")
578
  return error_msg
579
 
580
+ async def generate_response_stream(self, query: str):
581
+ """
582
+ Genera respuesta médica con streaming REAL para Streamlit.
583
+ Yielde los chunks progresivamente en lugar de retornar todo al final.
584
+ """
585
+
586
+ # Analizar query
587
+ user_type = self.detect_user_type(query)
588
+ is_emergency = self.detect_emergency(query)
589
+
590
+ # Actualizar estadísticas
591
+ self.session_stats["queries"] += 1
592
+ if is_emergency:
593
+ self.session_stats["emergencies"] += 1
594
+ if user_type == "Professional":
595
+ self.session_stats["professional_queries"] += 1
596
+ else:
597
+ self.session_stats["educational_queries"] += 1
598
+
599
+ # Crear system prompt
600
+ system_prompt = self.create_system_prompt(user_type, is_emergency)
601
+
602
+ # Configurar herramientas para búsqueda web si no es emergencia
603
+ tools = None
604
+ if not is_emergency:
605
+ tools = [{"type": "builtin_function", "function": {"name": "$web_search"}}]
606
+
607
+ # Preparar mensajes
608
+ messages = [
609
+ {"role": "system", "content": system_prompt},
610
+ {"role": "user", "content": query},
611
+ ]
612
+
613
+ # Agregar historial reciente si existe
614
+ if self.conversation_history:
615
+ for interaction in self.conversation_history[-3:]:
616
+ if "user_query" in interaction:
617
+ messages.insert(
618
+ -1, {"role": "user", "content": interaction["user_query"]}
619
+ )
620
+ if "response" in interaction:
621
+ messages.insert(
622
+ -1,
623
+ {"role": "assistant", "content": interaction["response"][:500]},
624
+ )
625
+
626
+ print(
627
+ f"\n🩺 MedeX - Usuario: {user_type.upper()} | Emergencia: {'SÍ' if is_emergency else 'NO'}"
628
+ )
629
+
630
+ # Configurar max_tokens dinámico
631
+ if user_type == "Educational":
632
+ max_tokens = 5120
633
+ else:
634
+ max_tokens = 5120
635
+
636
+ try:
637
+ # Manejar tool calls si es necesario
638
+ finish_reason = None
639
+ full_response = ""
640
+
641
+ while finish_reason is None or finish_reason == "tool_calls":
642
+ stream = self.client.chat.completions.create(
643
+ model="kimi-k2-0711-preview",
644
+ messages=messages,
645
+ temperature=0.6,
646
+ max_tokens=max_tokens,
647
+ stream=True,
648
+ tools=tools,
649
+ )
650
+
651
+ tool_calls = []
652
+ current_message = {"role": "assistant", "content": ""}
653
+
654
+ for chunk in stream:
655
+ if chunk.choices:
656
+ choice = chunk.choices[0]
657
+ finish_reason = choice.finish_reason
658
+
659
+ if choice.delta:
660
+ # Contenido normal - YIELD para streaming
661
+ if choice.delta.content:
662
+ content_chunk = choice.delta.content
663
+ full_response += content_chunk
664
+ current_message["content"] += content_chunk
665
+
666
+ # YIELD el chunk para Streamlit
667
+ yield content_chunk
668
+
669
+ # Tool calls
670
+ if choice.delta.tool_calls:
671
+ for tool_call in choice.delta.tool_calls:
672
+ if len(tool_calls) <= tool_call.index:
673
+ tool_calls.extend(
674
+ [None]
675
+ * (tool_call.index + 1 - len(tool_calls))
676
+ )
677
+
678
+ if tool_calls[tool_call.index] is None:
679
+ tool_calls[tool_call.index] = {
680
+ "id": tool_call.id,
681
+ "type": tool_call.type,
682
+ "function": {
683
+ "name": tool_call.function.name,
684
+ "arguments": "",
685
+ },
686
+ }
687
+
688
+ if tool_call.function.arguments:
689
+ tool_calls[tool_call.index]["function"][
690
+ "arguments"
691
+ ] += tool_call.function.arguments
692
+
693
+ # Si hay tool calls, procesarlos
694
+ if finish_reason == "tool_calls" and tool_calls:
695
+ current_message["tool_calls"] = [
696
+ tc for tc in tool_calls if tc is not None
697
+ ]
698
+ messages.append(current_message)
699
+
700
+ yield "\n\n🔍 _Buscando información médica actualizada..._\n\n"
701
+
702
+ for tool_call in current_message["tool_calls"]:
703
+ if tool_call["function"]["name"] == "$web_search":
704
+ try:
705
+ arguments = json.loads(
706
+ tool_call["function"]["arguments"]
707
+ )
708
+ messages.append(
709
+ {
710
+ "role": "tool",
711
+ "tool_call_id": tool_call["id"],
712
+ "name": "$web_search",
713
+ "content": json.dumps(arguments),
714
+ }
715
+ )
716
+ except:
717
+ messages.append(
718
+ {
719
+ "role": "tool",
720
+ "tool_call_id": tool_call["id"],
721
+ "name": "$web_search",
722
+ "content": json.dumps({"query": query}),
723
+ }
724
+ )
725
+
726
+ # Guardar en historial
727
+ self.conversation_history.append(
728
+ {
729
+ "timestamp": datetime.now().isoformat(),
730
+ "user_query": query,
731
+ "response": full_response,
732
+ "user_type": user_type,
733
+ "is_emergency": is_emergency,
734
+ }
735
+ )
736
+
737
+ except Exception as e:
738
+ error_msg = f"❌ Error en streaming: {e}"
739
+ print(error_msg)
740
+ yield error_msg
741
+
742
  async def _generate_streaming(
743
  self,
744
  messages: List[Dict],