Spaces:
Sleeping
Sleeping
Commit
路
5c10f04
1
Parent(s):
0e2ecb3
- app.py +11 -7
- interactions.log +4 -0
app.py
CHANGED
|
@@ -12,7 +12,7 @@ app = FastAPI()
|
|
| 12 |
HUGGING_FACE_TOKEN = os.getenv("HUGGING_FACE_TOKEN")
|
| 13 |
API_URL = "https://api-inference.huggingface.co/models/meta-llama/Meta-Llama-3-70B-Instruct"
|
| 14 |
|
| 15 |
-
headers = {"Authorization": f"Bearer
|
| 16 |
|
| 17 |
# Configurar el logger
|
| 18 |
logging.basicConfig(filename='interactions.log', level=logging.INFO,
|
|
@@ -33,7 +33,7 @@ def query(api_url, headers, payload):
|
|
| 33 |
def preparar_texto(contexto, borrador):
|
| 34 |
return f"{contexto}\n\n{borrador}"
|
| 35 |
|
| 36 |
-
def mejorar_resolucion(context, borrador, api_url, headers, max_length=
|
| 37 |
prompt = preparar_texto(context, borrador)
|
| 38 |
payload = {
|
| 39 |
"inputs": prompt,
|
|
@@ -50,15 +50,19 @@ def mejorar_resolucion(context, borrador, api_url, headers, max_length=1000):
|
|
| 50 |
|
| 51 |
result = query(api_url, headers, payload)
|
| 52 |
|
| 53 |
-
if
|
| 54 |
-
texto_generado = result['generated_text']
|
| 55 |
context += f"\n\n{texto_generado}"
|
| 56 |
logging.info(f"Prompt: {prompt}\nGenerated: {texto_generado.strip()}")
|
| 57 |
return texto_generado.strip(), context
|
| 58 |
-
|
| 59 |
error_message = result.get("error", "No se pudo obtener la respuesta del modelo.")
|
| 60 |
logging.error(f"Prompt: {prompt}\nError: {error_message}")
|
| 61 |
return None, context
|
|
|
|
|
|
|
|
|
|
|
|
|
| 62 |
|
| 63 |
# Funci贸n de predicci贸n para la interfaz de Gradio
|
| 64 |
def predict(message, history):
|
|
@@ -76,7 +80,7 @@ def predict(message, history):
|
|
| 76 |
for item in history_transformer_format:
|
| 77 |
context += f"\n\n{item[0]}"
|
| 78 |
|
| 79 |
-
resolucion_mejorada, context = mejorar_resolucion(context, message, API_URL, headers, max_length=
|
| 80 |
if resolucion_mejorada:
|
| 81 |
history.append([message, resolucion_mejorada])
|
| 82 |
return resolucion_mejorada
|
|
@@ -93,7 +97,7 @@ async def api_predict(request: Request):
|
|
| 93 |
|
| 94 |
# Crear y lanzar la interfaz de Gradio
|
| 95 |
iface = gr.ChatInterface(predict)
|
| 96 |
-
iface.launch(
|
| 97 |
|
| 98 |
# Ejecutar la aplicaci贸n FastAPI
|
| 99 |
if __name__ == "__main__":
|
|
|
|
| 12 |
HUGGING_FACE_TOKEN = os.getenv("HUGGING_FACE_TOKEN")
|
| 13 |
API_URL = "https://api-inference.huggingface.co/models/meta-llama/Meta-Llama-3-70B-Instruct"
|
| 14 |
|
| 15 |
+
headers = {"Authorization": f"Bearer " + HUGGING_FACE_TOKEN}
|
| 16 |
|
| 17 |
# Configurar el logger
|
| 18 |
logging.basicConfig(filename='interactions.log', level=logging.INFO,
|
|
|
|
| 33 |
def preparar_texto(contexto, borrador):
|
| 34 |
return f"{contexto}\n\n{borrador}"
|
| 35 |
|
| 36 |
+
def mejorar_resolucion(context, borrador, api_url, headers, max_length=2000):
|
| 37 |
prompt = preparar_texto(context, borrador)
|
| 38 |
payload = {
|
| 39 |
"inputs": prompt,
|
|
|
|
| 50 |
|
| 51 |
result = query(api_url, headers, payload)
|
| 52 |
|
| 53 |
+
if isinstance(result, list) and 'generated_text' in result[0]:
|
| 54 |
+
texto_generado = result[0]['generated_text']
|
| 55 |
context += f"\n\n{texto_generado}"
|
| 56 |
logging.info(f"Prompt: {prompt}\nGenerated: {texto_generado.strip()}")
|
| 57 |
return texto_generado.strip(), context
|
| 58 |
+
elif isinstance(result, dict) and "error" in result:
|
| 59 |
error_message = result.get("error", "No se pudo obtener la respuesta del modelo.")
|
| 60 |
logging.error(f"Prompt: {prompt}\nError: {error_message}")
|
| 61 |
return None, context
|
| 62 |
+
else:
|
| 63 |
+
error_message = "Formato de respuesta desconocido."
|
| 64 |
+
logging.error(f"Prompt: {prompt}\nError: {error_message}")
|
| 65 |
+
return None, context
|
| 66 |
|
| 67 |
# Funci贸n de predicci贸n para la interfaz de Gradio
|
| 68 |
def predict(message, history):
|
|
|
|
| 80 |
for item in history_transformer_format:
|
| 81 |
context += f"\n\n{item[0]}"
|
| 82 |
|
| 83 |
+
resolucion_mejorada, context = mejorar_resolucion(context, message, API_URL, headers, max_length=2000)
|
| 84 |
if resolucion_mejorada:
|
| 85 |
history.append([message, resolucion_mejorada])
|
| 86 |
return resolucion_mejorada
|
|
|
|
| 97 |
|
| 98 |
# Crear y lanzar la interfaz de Gradio
|
| 99 |
iface = gr.ChatInterface(predict)
|
| 100 |
+
iface.launch()
|
| 101 |
|
| 102 |
# Ejecutar la aplicaci贸n FastAPI
|
| 103 |
if __name__ == "__main__":
|
interactions.log
CHANGED
|
@@ -112,3 +112,7 @@ Soy un asistente legal especializado en derecho chileno, capacitado para optimiz
|
|
| 112 |
2024-07-06 17:55:00,524 - INFO - HTTP Request: GET http://localhost:7860/startup-events "HTTP/1.1 200 OK"
|
| 113 |
2024-07-06 17:55:00,534 - INFO - HTTP Request: HEAD http://localhost:7860/ "HTTP/1.1 200 OK"
|
| 114 |
2024-07-06 17:55:00,677 - INFO - HTTP Request: GET https://api.gradio.app/pkg-version "HTTP/1.1 200 OK"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 112 |
2024-07-06 17:55:00,524 - INFO - HTTP Request: GET http://localhost:7860/startup-events "HTTP/1.1 200 OK"
|
| 113 |
2024-07-06 17:55:00,534 - INFO - HTTP Request: HEAD http://localhost:7860/ "HTTP/1.1 200 OK"
|
| 114 |
2024-07-06 17:55:00,677 - INFO - HTTP Request: GET https://api.gradio.app/pkg-version "HTTP/1.1 200 OK"
|
| 115 |
+
2024-07-06 17:55:33,070 - INFO - HTTP Request: GET https://checkip.amazonaws.com/ "HTTP/1.1 200 "
|
| 116 |
+
2024-07-06 17:55:33,279 - INFO - HTTP Request: GET http://localhost:7860/startup-events "HTTP/1.1 200 OK"
|
| 117 |
+
2024-07-06 17:55:33,291 - INFO - HTTP Request: HEAD http://localhost:7860/ "HTTP/1.1 200 OK"
|
| 118 |
+
2024-07-06 17:55:33,420 - INFO - HTTP Request: GET https://api.gradio.app/pkg-version "HTTP/1.1 200 OK"
|