JairoCesar commited on
Commit
088ccd5
verified
1 Parent(s): 82049f1

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +160 -46
app.py CHANGED
@@ -1,60 +1,113 @@
1
  import streamlit as st
2
- from huggingface_hub import InferenceClient
3
  from docx import Document
4
  import tempfile
5
  import os
6
  import re
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7
 
8
  # Initialize the client with a publicly available model that doesn't require API token
9
- # Models like facebook/opt-350m, EleutherAI/gpt-neo-125m, or bigscience/bloom-560m are good options
10
- client = InferenceClient(model="facebook/opt-1.3b")
 
 
 
 
 
 
 
11
 
12
  # Function to format the prompt for Rorschach interpretation
13
  def format_prompt(message, history):
14
- # For OPT model, we use a simpler prompt format
15
- # Start with context about the task
16
- prompt = "Esto es una interpretaci贸n del Test de Rorschach. "
17
-
18
- # Add history if any
19
- for user_prompt, bot_response in history:
20
- prompt += f"Persona: {user_prompt} Respuesta: {bot_response} "
21
-
22
- # Add current message
23
- prompt += f"Persona describe lo que ve en la mancha de Rorschach: {message} Interpretaci贸n psicol贸gica detallada: "
24
-
25
  return prompt
26
 
27
  # Function to generate response
28
- def generate(prompt, history, temperature=0.9, max_new_tokens=512, top_p=0.95, repetition_penalty=1.0):
29
  try:
30
- temperature = max(float(temperature), 1e-2)
31
- top_p = float(top_p)
32
-
33
- generate_kwargs = dict(
34
- temperature=temperature,
35
- max_new_tokens=max_new_tokens,
36
- top_p=top_p,
37
- repetition_penalty=repetition_penalty,
38
- do_sample=True,
39
- seed=42,
40
- )
41
-
42
  formatted_prompt = format_prompt(prompt, history)
43
 
44
- # Simplified call to text_generation for OPT model
45
- response = client.text_generation(
46
- formatted_prompt,
47
- max_new_tokens=max_new_tokens,
48
- temperature=temperature,
49
- top_p=top_p,
50
- repetition_penalty=repetition_penalty,
51
- do_sample=True
52
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
53
 
54
- return response
55
  except Exception as e:
56
- st.error(f"Error generando respuesta: {str(e)}")
57
- return "Lo siento, hubo un error al generar la interpretaci贸n. Por favor, intenta de nuevo."
 
 
58
 
59
  # Function to replace variables in a Word template
60
  def replace_variables_word(doc, variables):
@@ -95,21 +148,75 @@ def generate_word_document(interpretation):
95
 
96
  # Streamlit interface
97
  st.title("Interpretaci贸n del Test de Rorschach")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
98
 
99
  # Chat history
100
  if 'history' not in st.session_state:
101
  st.session_state.history = []
102
 
103
- # User input
104
- entrada_usuario = st.text_input("Qu茅 ves en las im谩genes:", key="entrada_usuario")
 
 
 
 
 
105
 
106
  # Generate response and create Word document
107
- if st.button("Enviar"):
108
  if entrada_usuario:
109
- with st.spinner("Generando interpretaci贸n..."):
110
- bot_response = generate(entrada_usuario, st.session_state.history)
 
 
 
 
 
 
 
 
 
 
 
 
111
  st.session_state.history.append((entrada_usuario, bot_response))
112
-
 
 
 
 
113
  # Generate Word document
114
  document_path = generate_word_document(bot_response)
115
 
@@ -117,7 +224,7 @@ if st.button("Enviar"):
117
  # Provide download link
118
  with open(document_path, "rb") as file:
119
  st.download_button(
120
- label="Descargar Interpretaci贸n",
121
  data=file,
122
  file_name="Interpretacion_Rorschach.docx",
123
  mime="application/vnd.openxmlformats-officedocument.wordprocessingml.document"
@@ -127,6 +234,13 @@ if st.button("Enviar"):
127
  os.unlink(document_path)
128
  except:
129
  pass
 
 
 
 
 
 
 
130
 
131
  # Display conversation
132
  if st.session_state.history:
 
1
  import streamlit as st
 
2
  from docx import Document
3
  import tempfile
4
  import os
5
  import re
6
+ import requests
7
+ import json
8
+ import traceback
9
+
10
+ # Set page config
11
+ st.set_page_config(page_title="Interpretador de Rorschach", page_icon="馃", layout="wide")
12
+
13
+ # Add custom CSS
14
+ st.markdown("""
15
+ <style>
16
+ .main {
17
+ background-color: #f5f5f5;
18
+ }
19
+ .stApp {
20
+ max-width: 1200px;
21
+ margin: 0 auto;
22
+ }
23
+ h1 {
24
+ color: #2c3e50;
25
+ }
26
+ .debug-info {
27
+ background-color: #f0f0f0;
28
+ padding: 10px;
29
+ border-radius: 5px;
30
+ font-family: monospace;
31
+ margin-top: 20px;
32
+ }
33
+ </style>
34
+ """, unsafe_allow_html=True)
35
+
36
+ # Initialize session state for debugging
37
+ if "debug_info" not in st.session_state:
38
+ st.session_state["debug_info"] = ""
39
 
40
  # Initialize the client with a publicly available model that doesn't require API token
41
+ AVAILABLE_MODELS = {
42
+ "GPT-2 (ligero)": "gpt2",
43
+ "Distil-GPT2 (m谩s r谩pido)": "distilgpt2",
44
+ "GPT-2 Medium (mejor calidad)": "gpt2-medium",
45
+ "BLOOM (multiling眉e)": "bigscience/bloom-560m"
46
+ }
47
+
48
+ # Default model
49
+ DEFAULT_MODEL = "gpt2"
50
 
51
  # Function to format the prompt for Rorschach interpretation
52
  def format_prompt(message, history):
53
+ # Simple prompt for GPT-2
54
+ prompt = "A continuaci贸n una interpretaci贸n psicol贸gica del Test de Rorschach basada en lo que la persona describe ver: \n\n"
55
+ prompt += f"Descripci贸n del paciente: {message}\n\n"
56
+ prompt += "Interpretaci贸n psicol贸gica: "
 
 
 
 
 
 
 
57
  return prompt
58
 
59
  # Function to generate response
60
+ def generate(prompt, history, temperature=0.7, max_new_tokens=256, top_p=0.9, repetition_penalty=1.0):
61
  try:
 
 
 
 
 
 
 
 
 
 
 
 
62
  formatted_prompt = format_prompt(prompt, history)
63
 
64
+ # Use the Inference API directly with minimal parameters
65
+ payload = {
66
+ "inputs": formatted_prompt,
67
+ "parameters": {
68
+ "max_new_tokens": max_new_tokens,
69
+ "temperature": temperature,
70
+ "top_p": top_p,
71
+ "do_sample": True
72
+ }
73
+ }
74
+
75
+ # Get selected model from session state
76
+ model_id = AVAILABLE_MODELS[selected_model]
77
+
78
+ # For debugging
79
+ if debug_mode:
80
+ st.session_state["debug_info"] = f"Model: {model_id}\nPrompt: {formatted_prompt}"
81
+
82
+ # Direct API call using the inference endpoint
83
+ api_url = f"https://api-inference.huggingface.co/models/{model_id}"
84
+
85
+ # Use the requests library directly
86
+ headers = {"Content-Type": "application/json"}
87
+ response = requests.post(api_url, headers=headers, json=payload)
88
+
89
+ if response.status_code != 200:
90
+ error_msg = f"API Error: Status code {response.status_code} - {response.text}"
91
+ if debug_mode:
92
+ st.session_state["debug_info"] += f"\n\nError: {error_msg}"
93
+ return f"Error en la API de Hugging Face. Por favor intenta con otro modelo o m谩s tarde."
94
+
95
+ # Extract the generated text
96
+ result = response.json()[0]["generated_text"]
97
+
98
+ # Remove the input prompt to get only the new content
99
+ new_content = result[len(formatted_prompt):]
100
+
101
+ if debug_mode:
102
+ st.session_state["debug_info"] += f"\n\nResponse: {new_content[:100]}..."
103
+
104
+ return new_content if new_content else "No se pudo generar una interpretaci贸n. Por favor intenta con otra descripci贸n."
105
 
 
106
  except Exception as e:
107
+ error_traceback = traceback.format_exc()
108
+ if debug_mode:
109
+ st.session_state["debug_info"] += f"\n\nException: {str(e)}\n{error_traceback}"
110
+ return f"Lo siento, ocurri贸 un error: {str(e)}. Por favor, intenta de nuevo con otra descripci贸n o modelo."
111
 
112
  # Function to replace variables in a Word template
113
  def replace_variables_word(doc, variables):
 
148
 
149
  # Streamlit interface
150
  st.title("Interpretaci贸n del Test de Rorschach")
151
+ st.markdown("#### An谩lisis psicol贸gico basado en las descripciones de las manchas de Rorschach")
152
+
153
+ # Sidebar for settings
154
+ with st.sidebar:
155
+ st.header("Configuraci贸n")
156
+
157
+ # Model selection
158
+ selected_model = st.selectbox(
159
+ "Seleccionar modelo:",
160
+ list(AVAILABLE_MODELS.keys()),
161
+ index=0
162
+ )
163
+
164
+ # Temperature setting (creativity)
165
+ temperature = st.slider(
166
+ "Temperatura (creatividad):",
167
+ min_value=0.1,
168
+ max_value=1.0,
169
+ value=0.7,
170
+ step=0.1
171
+ )
172
+
173
+ # Max tokens
174
+ max_tokens = st.slider(
175
+ "Longitud m谩xima:",
176
+ min_value=50,
177
+ max_value=500,
178
+ value=200,
179
+ step=50
180
+ )
181
+
182
+ # Debug mode
183
+ debug_mode = st.checkbox("Modo debug", value=False)
184
 
185
  # Chat history
186
  if 'history' not in st.session_state:
187
  st.session_state.history = []
188
 
189
+ # User input with placeholder
190
+ entrada_usuario = st.text_area(
191
+ "驴Qu茅 ves en la imagen? Describe con detalle:",
192
+ height=100,
193
+ key="entrada_usuario",
194
+ placeholder="Por ejemplo: Veo dos personas enfrentadas, sosteniendo algo entre ellas. Tambi茅n hay formas que parecen alas en la parte superior..."
195
+ )
196
 
197
  # Generate response and create Word document
198
+ if st.button("Analizar Respuesta", type="primary"):
199
  if entrada_usuario:
200
+ with st.spinner("Generando interpretaci贸n psicol贸gica..."):
201
+ # Get the selected model ID
202
+ model_name = selected_model
203
+ model_id = AVAILABLE_MODELS[selected_model]
204
+
205
+ # Generate the response
206
+ bot_response = generate(
207
+ entrada_usuario,
208
+ st.session_state.history,
209
+ temperature=temperature,
210
+ max_new_tokens=max_tokens
211
+ )
212
+
213
+ # Add to history
214
  st.session_state.history.append((entrada_usuario, bot_response))
215
+
216
+ # Show the generated response
217
+ st.subheader("Interpretaci贸n Psicol贸gica:")
218
+ st.write(bot_response)
219
+
220
  # Generate Word document
221
  document_path = generate_word_document(bot_response)
222
 
 
224
  # Provide download link
225
  with open(document_path, "rb") as file:
226
  st.download_button(
227
+ label="馃搫 Descargar Interpretaci贸n como Documento",
228
  data=file,
229
  file_name="Interpretacion_Rorschach.docx",
230
  mime="application/vnd.openxmlformats-officedocument.wordprocessingml.document"
 
234
  os.unlink(document_path)
235
  except:
236
  pass
237
+ else:
238
+ st.warning("Por favor ingresa tu descripci贸n de lo que ves en la imagen primero.")
239
+
240
+ # Display debug information if enabled
241
+ if debug_mode and st.session_state["debug_info"]:
242
+ st.markdown("### Informaci贸n de Depuraci贸n")
243
+ st.markdown(f"<div class='debug-info'>{st.session_state['debug_info']}</div>", unsafe_allow_html=True)
244
 
245
  # Display conversation
246
  if st.session_state.history: