ressay1973 commited on
Commit
25c8f8e
·
verified ·
1 Parent(s): 1d414cc

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +80 -72
app.py CHANGED
@@ -1,85 +1,93 @@
1
- from smolagents import CodeAgent, HfApiModel, tool
 
 
 
2
  import yaml
3
- import gradio as gr
4
-
5
- def classify_incident(service_criticity: str, disruption: str, affectation_time: str, magnitude: str, workaround: str) -> str:
6
- """Classifies an incident based on predefined rules."""
7
- if service_criticity == "High" and disruption == "Full":
8
- return "P1" # Critical issue
9
- elif service_criticity == "High" and (disruption == "Degraded" or affectation_time == "Up 15 mins"):
10
- return "P2"
11
- elif service_criticity == "Low" and magnitude == "Low" and workaround == "Yes":
12
- return "P4"
 
 
 
 
 
 
 
 
 
 
 
 
13
  else:
14
- return "P3" # Default priority
 
 
 
 
15
 
 
16
  @tool
17
- def diagnose_incident(
18
- service_criticity: str,
19
- disruption: str,
20
- affectation_time: str,
21
- magnitude: str,
22
- workaround: str
23
- ) -> str:
24
- """Determines the severity priority of an incident.
25
-
26
- Parameters:
27
- service_criticity (str): High or Low.
28
- disruption (str): Full, Degraded, or None.
29
- affectation_time (str): Up 15 mins, Less 15 mins.
30
- magnitude (str): High, Low, or None.
31
- workaround (str): Yes or No.
32
-
33
- Returns:
34
- str: The classified priority of the incident.
35
  """
36
- priority = classify_incident(service_criticity, disruption, affectation_time, magnitude, workaround)
37
- return f"Incident classified as priority: {priority}"
38
-
39
- # Load model with optimized settings
40
- model = HfApiModel(
41
- max_tokens=512, # Reduce max tokens to improve response time
42
- temperature=0.3, # Lower temperature for more deterministic results
43
- model_id='Qwen/Qwen2.5-Coder-32B-Instruct'
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
44
  )
45
 
46
- # Load prompt templates
47
- with open("prompts.yaml", 'r') as stream:
48
- prompt_templates = yaml.safe_load(stream)
49
 
50
- # Create agent
 
 
51
  agent = CodeAgent(
52
  model=model,
53
- tools=[diagnose_incident],
54
- max_steps=3, # Reduce steps to minimize latency
55
- verbosity_level=0, # Lower verbosity for efficiency
56
- name="Incident Diagnosis Agent",
57
- description="An agent that classifies incidents based on severity criteria.",
 
 
58
  prompt_templates=prompt_templates
59
  )
60
 
61
- def chat_diagnose_incident(service_criticity, disruption, affectation_time, magnitude, workaround):
62
- """Diagnoses an incident based on user inputs."""
63
- priority = diagnose_incident(service_criticity, disruption, affectation_time, magnitude, workaround)
64
- return f"Agent: El incidente se clasifica como prioridad {priority}."
65
-
66
- # Gradio UI
67
- iface = gr.Interface(
68
- fn=chat_diagnose_incident,
69
- inputs=[
70
- gr.Dropdown(["High", "Low"], label="Criticidad del servicio"),
71
- gr.Dropdown(["Full", "Degraded", "None"], label="Interrupción"),
72
- gr.Dropdown(["Up 15 mins", "Less 15 mins"], label="Tiempo de afectación"),
73
- gr.Dropdown(["High", "Low", "None"], label="Magnitud"),
74
- gr.Dropdown(["Yes", "No"], label="¿Hay workaround?"),
75
- ],
76
- outputs=gr.Textbox(),
77
- title="Asistente de Diagnóstico de Incidentes",
78
- description="Selecciona las opciones para diagnosticar el incidente.",
79
- theme="default"
80
- )
81
-
82
- # Lanzar UI
83
- if __name__ == "__main__":
84
- iface.launch()
85
-
 
1
+ from smolagents import CodeAgent, DuckDuckGoSearchTool, HfApiModel, load_tool, tool
2
+ import datetime
3
+ import requests
4
+ import pytz
5
  import yaml
6
+
7
+ from tools.final_answer import FinalAnswerTool
8
+ from Gradio_UI import GradioUI
9
+
10
+ # Herramienta para clasificar alertas P1, P2, P3 basado en criterios de la tabla
11
+
12
+
13
+
14
+ @tool
15
+ def classify_alert(service_criticity: str, disruption: str, affectation_time: str, magnitude: str, business_workaround: str) -> str:
16
+ """Clasifica una alerta en P1, P2 o P3 según los criterios especificados.
17
+ Args:
18
+ service_criticity: 'High' o 'Low'
19
+ disruption: 'Full', 'Degraded', 'None'
20
+ affectation_time: 'Up 15 mins' o 'Less 15 mins'
21
+ magnitude: 'High', 'Low', 'None'
22
+ business_workaround: 'Yes' o 'No'
23
+ """
24
+ if service_criticity == "High" and disruption in ["Full", "Degraded"] and magnitude == "High" and business_workaround == "No":
25
+ return "P1 - Crítico"
26
+ elif service_criticity == "High" and (disruption in ["Full", "Degraded"] or affectation_time == "Up 15 mins"):
27
+ return "P2 - Importante"
28
  else:
29
+ return "P3 - Menor"
30
+
31
+
32
+
33
+
34
 
35
+ # Herramienta para generar resúmenes de reportes de infraestructura
36
  @tool
37
+ def summarize_report(report_text: str) -> str:
38
+ """Genera un resumen de un reporte sobre la infraestructura de la plataforma.
39
+ Args:
40
+ report_text: Texto del reporte.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
41
  """
42
+ api_url = "https://api-inference.huggingface.co/models/facebook/bart-large-cnn"
43
+ headers = {"Authorization": "Bearer YOUR_HF_API_KEY"}
44
+ payload = {"inputs": report_text}
45
+
46
+ response = requests.post(api_url, headers=headers, json=payload)
47
+ if response.status_code == 200:
48
+ return response.json()[0]['summary_text']
49
+ else:
50
+ return "Error al generar el resumen."
51
+
52
+
53
+
54
+
55
+
56
+
57
+
58
+
59
+
60
+
61
+
62
+
63
+
64
+
65
+
66
+
67
+
68
+
69
+
70
+
71
+ final_answer = FinalAnswerTool()
72
+
73
+ custom_role_conversions=None,
74
  )
75
 
76
+ image_generation_tool = load_tool("agents-course/text-to-image", trust_remote_code=True)
 
 
77
 
78
+ with open("prompts.yaml", 'r') as stream:
79
+ prompt_templates = yaml.safe_load(stream)
80
+
81
  agent = CodeAgent(
82
  model=model,
83
+ tools=[final_answer, classify_alert, summarize_report],
84
+ max_steps=6,
85
+ verbosity_level=1,
86
+ grammar=None,
87
+ planning_interval=None,
88
+ name=None,
89
+ description=None,
90
  prompt_templates=prompt_templates
91
  )
92
 
93
+ GradioUI(agent).launch()