riazmo commited on
Commit
1eb4280
·
verified ·
1 Parent(s): df8b9ef

Upload 2 files

Browse files
Files changed (2) hide show
  1. core/hf_inference.py +50 -39
  2. core/rule_engine.py +43 -16
core/hf_inference.py CHANGED
@@ -328,24 +328,19 @@ class HFInferenceClient:
328
 
329
  def __init__(self):
330
  self.settings = get_settings()
331
- self.token = self.settings.hf.hf_token
332
-
 
 
333
  if not self.token:
334
  raise ValueError("HF_TOKEN is required for inference")
335
-
336
- # New HuggingFace router URL (api-inference is deprecated)
337
- # The huggingface_hub library should handle this automatically with newer versions,
338
- # but we set it explicitly for compatibility
339
- router_url = "https://router.huggingface.co"
340
-
341
- # Create clients with explicit base_url for new router
342
- try:
343
- self.sync_client = InferenceClient(token=self.token, base_url=router_url)
344
- self.async_client = AsyncInferenceClient(token=self.token, base_url=router_url)
345
- except TypeError:
346
- # Fallback for older huggingface_hub versions that don't support base_url
347
- self.sync_client = InferenceClient(token=self.token)
348
- self.async_client = AsyncInferenceClient(token=self.token)
349
 
350
  def get_model_for_agent(self, agent_name: str) -> str:
351
  """Get the appropriate model for an agent."""
@@ -424,19 +419,24 @@ class HFInferenceClient:
424
  temperature=temperature,
425
  )
426
  return response.choices[0].message.content
427
-
428
  except Exception as e:
429
- # Try fallback model
 
430
  fallback = self.settings.models.fallback_model
431
- if fallback != model:
432
- print(f"Primary model {model} failed, trying fallback: {fallback}")
433
- response = self.sync_client.chat_completion(
434
- model=fallback,
435
- messages=messages,
436
- max_tokens=max_tokens,
437
- temperature=temperature,
438
- )
439
- return response.choices[0].message.content
 
 
 
 
440
  raise e
441
 
442
  async def complete_async(
@@ -471,18 +471,24 @@ class HFInferenceClient:
471
  temperature=temperature,
472
  )
473
  return response.choices[0].message.content
474
-
475
  except Exception as e:
 
 
476
  fallback = self.settings.models.fallback_model
477
- if fallback != model:
478
- print(f"Primary model {model} failed, trying fallback: {fallback}")
479
- response = await self.async_client.chat_completion(
480
- model=fallback,
481
- messages=messages,
482
- max_tokens=max_tokens,
483
- temperature=temperature,
484
- )
485
- return response.choices[0].message.content
 
 
 
 
486
  raise e
487
 
488
  async def stream_async(
@@ -523,9 +529,14 @@ _client: Optional[HFInferenceClient] = None
523
 
524
 
525
  def get_inference_client() -> HFInferenceClient:
526
- """Get or create the inference client singleton."""
 
 
 
 
527
  global _client
528
- if _client is None:
 
529
  _client = HFInferenceClient()
530
  return _client
531
 
 
328
 
329
  def __init__(self):
330
  self.settings = get_settings()
331
+ # Read token fresh from env — the Settings singleton may have been
332
+ # created before the user entered their token via the Gradio UI.
333
+ self.token = os.getenv("HF_TOKEN", "") or self.settings.hf.hf_token
334
+
335
  if not self.token:
336
  raise ValueError("HF_TOKEN is required for inference")
337
+
338
+ # Do NOT set base_url the huggingface_hub library routes requests
339
+ # to the correct endpoint automatically based on the model parameter.
340
+ # Setting base_url overrides per-model routing and causes API errors
341
+ # when different models are used across agents.
342
+ self.sync_client = InferenceClient(token=self.token)
343
+ self.async_client = AsyncInferenceClient(token=self.token)
 
 
 
 
 
 
 
344
 
345
  def get_model_for_agent(self, agent_name: str) -> str:
346
  """Get the appropriate model for an agent."""
 
419
  temperature=temperature,
420
  )
421
  return response.choices[0].message.content
422
+
423
  except Exception as e:
424
+ error_msg = str(e)
425
+ print(f"[HF] Primary model {model} failed: {error_msg[:120]}")
426
  fallback = self.settings.models.fallback_model
427
+ if fallback and fallback != model:
428
+ print(f"[HF] Trying fallback: {fallback}")
429
+ try:
430
+ response = self.sync_client.chat_completion(
431
+ model=fallback,
432
+ messages=messages,
433
+ max_tokens=max_tokens,
434
+ temperature=temperature,
435
+ )
436
+ return response.choices[0].message.content
437
+ except Exception as fallback_err:
438
+ print(f"[HF] Fallback {fallback} also failed: {str(fallback_err)[:120]}")
439
+ raise fallback_err
440
  raise e
441
 
442
  async def complete_async(
 
471
  temperature=temperature,
472
  )
473
  return response.choices[0].message.content
474
+
475
  except Exception as e:
476
+ error_msg = str(e)
477
+ print(f"[HF] Primary model {model} failed: {error_msg[:120]}")
478
  fallback = self.settings.models.fallback_model
479
+ if fallback and fallback != model:
480
+ print(f"[HF] Trying fallback: {fallback}")
481
+ try:
482
+ response = await self.async_client.chat_completion(
483
+ model=fallback,
484
+ messages=messages,
485
+ max_tokens=max_tokens,
486
+ temperature=temperature,
487
+ )
488
+ return response.choices[0].message.content
489
+ except Exception as fallback_err:
490
+ print(f"[HF] Fallback {fallback} also failed: {str(fallback_err)[:120]}")
491
+ raise fallback_err
492
  raise e
493
 
494
  async def stream_async(
 
529
 
530
 
531
  def get_inference_client() -> HFInferenceClient:
532
+ """Get or create the inference client singleton.
533
+
534
+ Re-creates the client if the token has changed (e.g. user entered it
535
+ via the Gradio UI after initial startup).
536
+ """
537
  global _client
538
+ current_token = os.getenv("HF_TOKEN", "")
539
+ if _client is None or (_client.token != current_token and current_token):
540
  _client = HFInferenceClient()
541
  return _client
542
 
core/rule_engine.py CHANGED
@@ -280,37 +280,58 @@ def find_aa_compliant_color(hex_color: str, background: str = "#ffffff", target_
280
  a darkened/lightened version that passes.
281
  """
282
  current_contrast = get_contrast_ratio(hex_color, background)
283
-
284
  if current_contrast >= target_contrast:
285
  return hex_color
286
-
287
- # Determine if we need to darken or lighten
 
 
288
  bg_luminance = get_relative_luminance(background)
289
  color_luminance = get_relative_luminance(hex_color)
290
-
291
- # If background is light, darken the color; if dark, lighten it
292
- should_darken = bg_luminance > 0.5
293
-
294
  best_color = hex_color
295
  best_contrast = current_contrast
296
-
297
  for i in range(1, 101):
298
  factor = i / 100
299
-
300
  if should_darken:
301
  new_color = darken_color(hex_color, factor)
302
  else:
303
  new_color = lighten_color(hex_color, factor)
304
-
305
  new_contrast = get_contrast_ratio(new_color, background)
306
-
307
  if new_contrast >= target_contrast:
308
  return new_color
309
-
310
  if new_contrast > best_contrast:
311
  best_contrast = new_contrast
312
  best_color = new_color
313
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
314
  return best_color
315
 
316
 
@@ -536,20 +557,26 @@ def analyze_accessibility(color_tokens: dict, fg_bg_pairs: list[dict] = None) ->
536
  # --- Real foreground-background pair checks ---
537
  if fg_bg_pairs:
538
  for pair in fg_bg_pairs:
539
- fg = pair.get("foreground", "")
540
- bg = pair.get("background", "")
541
  element = pair.get("element", "")
542
  if not (fg.startswith("#") and bg.startswith("#")):
543
  continue
 
 
 
544
  try:
545
  ratio = get_contrast_ratio(fg, bg)
 
 
 
546
  if ratio < 4.5:
547
  # This pair fails AA — record it
548
  fix = find_aa_compliant_color(fg, bg, 4.5)
549
  fix_contrast = get_contrast_ratio(fix, bg)
550
  results.append(ColorAccessibility(
551
  hex_color=fg,
552
- name=f"fg:{fg} on bg:{bg} ({element})",
553
  contrast_on_white=get_contrast_ratio(fg, "#ffffff"),
554
  contrast_on_black=get_contrast_ratio(fg, "#000000"),
555
  passes_aa_normal=False,
 
280
  a darkened/lightened version that passes.
281
  """
282
  current_contrast = get_contrast_ratio(hex_color, background)
283
+
284
  if current_contrast >= target_contrast:
285
  return hex_color
286
+
287
+ # Determine direction: move fg *away* from bg in luminance.
288
+ # If fg is lighter than bg → darken fg to increase gap.
289
+ # If fg is darker than bg → lighten fg to increase gap.
290
  bg_luminance = get_relative_luminance(background)
291
  color_luminance = get_relative_luminance(hex_color)
292
+
293
+ should_darken = color_luminance >= bg_luminance
294
+
 
295
  best_color = hex_color
296
  best_contrast = current_contrast
297
+
298
  for i in range(1, 101):
299
  factor = i / 100
300
+
301
  if should_darken:
302
  new_color = darken_color(hex_color, factor)
303
  else:
304
  new_color = lighten_color(hex_color, factor)
305
+
306
  new_contrast = get_contrast_ratio(new_color, background)
307
+
308
  if new_contrast >= target_contrast:
309
  return new_color
310
+
311
  if new_contrast > best_contrast:
312
  best_contrast = new_contrast
313
  best_color = new_color
314
+
315
+ # If first direction didn't reach target, try the opposite direction
316
+ # (e.g., very similar luminances where either direction could work)
317
+ should_darken = not should_darken
318
+ for i in range(1, 101):
319
+ factor = i / 100
320
+
321
+ if should_darken:
322
+ new_color = darken_color(hex_color, factor)
323
+ else:
324
+ new_color = lighten_color(hex_color, factor)
325
+
326
+ new_contrast = get_contrast_ratio(new_color, background)
327
+
328
+ if new_contrast >= target_contrast:
329
+ return new_color
330
+
331
+ if new_contrast > best_contrast:
332
+ best_contrast = new_contrast
333
+ best_color = new_color
334
+
335
  return best_color
336
 
337
 
 
557
  # --- Real foreground-background pair checks ---
558
  if fg_bg_pairs:
559
  for pair in fg_bg_pairs:
560
+ fg = pair.get("foreground", "").lower()
561
+ bg = pair.get("background", "").lower()
562
  element = pair.get("element", "")
563
  if not (fg.startswith("#") and bg.startswith("#")):
564
  continue
565
+ # Skip same-color pairs (invisible/placeholder text — not real failures)
566
+ if fg == bg:
567
+ continue
568
  try:
569
  ratio = get_contrast_ratio(fg, bg)
570
+ # Skip near-identical pairs (ratio < 1.1) — likely decorative/hidden
571
+ if ratio < 1.1:
572
+ continue
573
  if ratio < 4.5:
574
  # This pair fails AA — record it
575
  fix = find_aa_compliant_color(fg, bg, 4.5)
576
  fix_contrast = get_contrast_ratio(fix, bg)
577
  results.append(ColorAccessibility(
578
  hex_color=fg,
579
+ name=f"fg:{fg} on bg:{bg} ({element}) [{ratio:.1f}:1]",
580
  contrast_on_white=get_contrast_ratio(fg, "#ffffff"),
581
  contrast_on_black=get_contrast_ratio(fg, "#000000"),
582
  passes_aa_normal=False,