petter2025 commited on
Commit
c3a20dc
ยท
verified ยท
1 Parent(s): 4fb793e

Update hf_demo.py

Browse files
Files changed (1) hide show
  1. hf_demo.py +80 -31
hf_demo.py CHANGED
@@ -22,6 +22,7 @@ try:
22
  from fastapi.middleware.cors import CORSMiddleware
23
  from pydantic import BaseModel, Field
24
  from gradio import mount_gradio_app
 
25
  FASTAPI_AVAILABLE = True
26
  print("โœ… FastAPI available")
27
  except ImportError as e:
@@ -35,6 +36,7 @@ except ImportError as e:
35
  class HTTPException(Exception): pass
36
  class CORSMiddleware: pass
37
  def Field(*args, **kwargs): return None
 
38
 
39
  if FASTAPI_AVAILABLE:
40
  # Create FastAPI app
@@ -49,6 +51,19 @@ if FASTAPI_AVAILABLE:
49
  allow_headers=["*"],
50
  )
51
 
 
 
 
 
 
 
 
 
 
 
 
 
 
52
  # Define Pydantic models
53
  class ActionEvaluationRequest(BaseModel):
54
  proposed_action: str = Field(..., description="The command the AI wants to execute")
@@ -79,47 +94,70 @@ if FASTAPI_AVAILABLE:
79
  maxAutonomousRisk: str
80
  riskScoreThresholds: dict
81
 
82
- # Add API endpoints
 
 
 
 
 
83
  @api_app.get("/api/v1/config", response_model=ConfigResponse)
84
  async def get_config():
85
  """Get current ARF configuration"""
86
- return {
87
- "confidenceThreshold": 0.9,
88
- "maxAutonomousRisk": "MEDIUM",
89
- "riskScoreThresholds": {
90
- "LOW": 0.7,
91
- "MEDIUM": 0.5,
92
- "HIGH": 0.3,
93
- "CRITICAL": 0.1
94
- }
95
- }
96
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
97
  @api_app.post("/api/v1/evaluate", response_model=ActionEvaluationResponse)
98
  async def evaluate_action(request: ActionEvaluationRequest):
99
  """Evaluate an action using mathematical Bayesian assessment"""
100
  try:
101
- # Simulate gate evaluations
 
 
102
  gates = []
103
 
104
- # Confidence gate
105
- confidence_passed = request.confidence_score >= 0.9
106
  gates.append(GateEvaluationResponse(
107
  gate="confidence_threshold",
108
- reason=f"Confidence {request.confidence_score:.2f} meets threshold 0.9" if confidence_passed
109
- else f"Confidence {request.confidence_score:.2f} below threshold 0.9",
110
  passed=confidence_passed,
111
- threshold=0.9,
112
  actual=request.confidence_score
113
  ))
114
 
115
- # Risk gate
116
- risk_passed = request.risk_level in ["LOW", "MEDIUM"]
 
 
 
 
117
  gates.append(GateEvaluationResponse(
118
  gate="risk_assessment",
119
- reason=f"Risk level {request.risk_level} within autonomous range" if risk_passed
120
- else f"Risk level {request.risk_level} exceeds autonomous threshold",
121
  passed=risk_passed,
122
- metadata={"maxAutonomousRisk": "MEDIUM", "actionRisk": request.risk_level}
123
  ))
124
 
125
  # Rollback gate
@@ -154,7 +192,7 @@ if FASTAPI_AVAILABLE:
154
 
155
  all_passed = all(g.passed for g in gates)
156
 
157
- # Determine required level
158
  if all_passed:
159
  if request.risk_level == "LOW":
160
  required_level = "AUTONOMOUS_LOW"
@@ -165,7 +203,7 @@ if FASTAPI_AVAILABLE:
165
  else:
166
  required_level = "OPERATOR_REVIEW"
167
 
168
- return ActionEvaluationResponse(
169
  allowed=all_passed,
170
  required_level=required_level,
171
  gates_triggered=gates,
@@ -173,12 +211,17 @@ if FASTAPI_AVAILABLE:
173
  escalation_reason=None if all_passed else "Failed critical gates"
174
  )
175
 
 
 
 
176
  except Exception as e:
 
177
  raise HTTPException(status_code=500, detail=str(e))
178
 
179
  @api_app.post("/api/v1/process")
180
  async def process_action(request: ActionEvaluationRequest):
181
  """Process action through full ARF pipeline"""
 
182
  evaluation = await evaluate_action(request)
183
 
184
  # Determine final status based on evaluation
@@ -189,24 +232,30 @@ if FASTAPI_AVAILABLE:
189
  else:
190
  final_status = "blocked"
191
 
192
- return {
193
  "action": request.dict(),
194
  "evaluation": evaluation.dict(),
195
  "finalStatus": final_status
196
  }
 
 
197
 
198
  @api_app.post("/api/v1/simulate")
199
  async def simulate_action(request: ActionEvaluationRequest, config: dict = None):
200
  """Simulate action with temporary configuration"""
 
201
  evaluation = await evaluate_action(request)
202
  return evaluation
203
 
204
- print("โœ… FastAPI endpoints configured")
205
- print(" Endpoints will be available at:")
206
- print(" - /api/v1/config")
207
- print(" - /api/v1/evaluate")
208
- print(" - /api/v1/process")
209
- print(" - /api/v1/simulate")
 
 
 
210
 
211
  # ============== HUGGING FACE SPACES DETECTION ==============
212
  def is_huggingface_spaces():
 
22
  from fastapi.middleware.cors import CORSMiddleware
23
  from pydantic import BaseModel, Field
24
  from gradio import mount_gradio_app
25
+ from typing import Optional
26
  FASTAPI_AVAILABLE = True
27
  print("โœ… FastAPI available")
28
  except ImportError as e:
 
36
  class HTTPException(Exception): pass
37
  class CORSMiddleware: pass
38
  def Field(*args, **kwargs): return None
39
+ Optional = lambda x: x
40
 
41
  if FASTAPI_AVAILABLE:
42
  # Create FastAPI app
 
51
  allow_headers=["*"],
52
  )
53
 
54
+ # ============== IN-MEMORY CONFIGURATION STORAGE ==============
55
+ # This is the key fix - persistent storage for config
56
+ current_config = {
57
+ "confidenceThreshold": 0.9,
58
+ "maxAutonomousRisk": "MEDIUM",
59
+ "riskScoreThresholds": {
60
+ "LOW": 0.7,
61
+ "MEDIUM": 0.5,
62
+ "HIGH": 0.3,
63
+ "CRITICAL": 0.1
64
+ }
65
+ }
66
+
67
  # Define Pydantic models
68
  class ActionEvaluationRequest(BaseModel):
69
  proposed_action: str = Field(..., description="The command the AI wants to execute")
 
94
  maxAutonomousRisk: str
95
  riskScoreThresholds: dict
96
 
97
+ class ConfigUpdateRequest(BaseModel):
98
+ confidenceThreshold: Optional[float] = Field(None, ge=0.5, le=1.0)
99
+ maxAutonomousRisk: Optional[str] = Field(None, regex="^(LOW|MEDIUM|HIGH|CRITICAL)$")
100
+
101
+ # ============== CONFIGURATION ENDPOINTS ==============
102
+
103
  @api_app.get("/api/v1/config", response_model=ConfigResponse)
104
  async def get_config():
105
  """Get current ARF configuration"""
106
+ print(f"๐Ÿ“Š GET /api/v1/config -> {current_config}")
107
+ return current_config
 
 
 
 
 
 
 
 
108
 
109
+ @api_app.post("/api/v1/config", response_model=ConfigResponse)
110
+ async def update_config(config_update: ConfigUpdateRequest):
111
+ """Update ARF configuration"""
112
+ global current_config
113
+ print(f"๐Ÿ”„ POST /api/v1/config with: {config_update}")
114
+
115
+ # Update only the fields that were sent
116
+ if config_update.confidenceThreshold is not None:
117
+ current_config["confidenceThreshold"] = config_update.confidenceThreshold
118
+ print(f" Updated confidenceThreshold to {config_update.confidenceThreshold}")
119
+
120
+ if config_update.maxAutonomousRisk is not None:
121
+ current_config["maxAutonomousRisk"] = config_update.maxAutonomousRisk
122
+ print(f" Updated maxAutonomousRisk to {config_update.maxAutonomousRisk}")
123
+
124
+ print(f"โœ… New config: {current_config}")
125
+ return current_config
126
+
127
+ # ============== EVALUATION ENDPOINTS ==============
128
+
129
  @api_app.post("/api/v1/evaluate", response_model=ActionEvaluationResponse)
130
  async def evaluate_action(request: ActionEvaluationRequest):
131
  """Evaluate an action using mathematical Bayesian assessment"""
132
  try:
133
+ print(f"๐Ÿ” Evaluating action: {request.proposed_action}")
134
+ print(f" Using config: confidence={current_config['confidenceThreshold']}, maxRisk={current_config['maxAutonomousRisk']}")
135
+
136
  gates = []
137
 
138
+ # Confidence gate - using current_config
139
+ confidence_passed = request.confidence_score >= current_config["confidenceThreshold"]
140
  gates.append(GateEvaluationResponse(
141
  gate="confidence_threshold",
142
+ reason=f"Confidence {request.confidence_score:.2f} meets threshold {current_config['confidenceThreshold']}" if confidence_passed
143
+ else f"Confidence {request.confidence_score:.2f} below threshold {current_config['confidenceThreshold']}",
144
  passed=confidence_passed,
145
+ threshold=current_config["confidenceThreshold"],
146
  actual=request.confidence_score
147
  ))
148
 
149
+ # Risk gate - using current_config
150
+ risk_levels = ["LOW", "MEDIUM", "HIGH", "CRITICAL"]
151
+ max_risk_index = risk_levels.index(current_config["maxAutonomousRisk"])
152
+ action_risk_index = risk_levels.index(request.risk_level)
153
+ risk_passed = action_risk_index <= max_risk_index
154
+
155
  gates.append(GateEvaluationResponse(
156
  gate="risk_assessment",
157
+ reason=f"Risk level {request.risk_level} within autonomous range (โ‰ค {current_config['maxAutonomousRisk']})" if risk_passed
158
+ else f"Risk level {request.risk_level} exceeds autonomous threshold (max: {current_config['maxAutonomousRisk']})",
159
  passed=risk_passed,
160
+ metadata={"maxAutonomousRisk": current_config["maxAutonomousRisk"], "actionRisk": request.risk_level}
161
  ))
162
 
163
  # Rollback gate
 
192
 
193
  all_passed = all(g.passed for g in gates)
194
 
195
+ # Determine required level based on gates and config
196
  if all_passed:
197
  if request.risk_level == "LOW":
198
  required_level = "AUTONOMOUS_LOW"
 
203
  else:
204
  required_level = "OPERATOR_REVIEW"
205
 
206
+ result = ActionEvaluationResponse(
207
  allowed=all_passed,
208
  required_level=required_level,
209
  gates_triggered=gates,
 
211
  escalation_reason=None if all_passed else "Failed critical gates"
212
  )
213
 
214
+ print(f"โœ… Evaluation complete: allowed={result.allowed}, required_level={result.required_level}")
215
+ return result
216
+
217
  except Exception as e:
218
+ print(f"โŒ Evaluation error: {e}")
219
  raise HTTPException(status_code=500, detail=str(e))
220
 
221
  @api_app.post("/api/v1/process")
222
  async def process_action(request: ActionEvaluationRequest):
223
  """Process action through full ARF pipeline"""
224
+ print(f"โš™๏ธ Processing action: {request.proposed_action}")
225
  evaluation = await evaluate_action(request)
226
 
227
  # Determine final status based on evaluation
 
232
  else:
233
  final_status = "blocked"
234
 
235
+ result = {
236
  "action": request.dict(),
237
  "evaluation": evaluation.dict(),
238
  "finalStatus": final_status
239
  }
240
+ print(f"โœ… Process complete: {final_status}")
241
+ return result
242
 
243
  @api_app.post("/api/v1/simulate")
244
  async def simulate_action(request: ActionEvaluationRequest, config: dict = None):
245
  """Simulate action with temporary configuration"""
246
+ print(f"๐ŸŽฎ Simulating action with custom config: {config}")
247
  evaluation = await evaluate_action(request)
248
  return evaluation
249
 
250
+ print("\n" + "="*80)
251
+ print("โœ… FASTAPI ENDPOINTS CONFIGURED")
252
+ print("="*80)
253
+ print(" ๐Ÿ“ GET /api/v1/config")
254
+ print(" ๐Ÿ“ POST /api/v1/config")
255
+ print(" ๐Ÿ“ POST /api/v1/evaluate")
256
+ print(" ๐Ÿ“ POST /api/v1/process")
257
+ print(" ๐Ÿ“ POST /api/v1/simulate")
258
+ print("="*80 + "\n")
259
 
260
  # ============== HUGGING FACE SPACES DETECTION ==============
261
  def is_huggingface_spaces():