""" Arabic Function Calling Leaderboard (AFCL) ========================================== Professional leaderboard for evaluating LLMs on Arabic function calling. """ import gradio as gr import pandas as pd import json import os import re import time import requests from typing import Dict, List, Optional from threading import Thread from datasets import load_dataset # Constants TITLE = "Arabic Function Calling Leaderboard" TITLE_AR = "لوحة تقييم استدعاء الدوال بالعربية" # All 28 Models to evaluate MODELS_TO_EVALUATE = [ # Arabic-Native LLMs {"model": "Jais-30B-Chat", "model_id": "inceptionai/jais-30b-chat-v3", "organization": "Inception AI", "params": "30B", "type": "Arabic-Native"}, {"model": "ALLaM-7B-Instruct", "model_id": "sdaia/allam-1-7b-instruct", "organization": "SDAIA", "params": "7B", "type": "Arabic-Native"}, {"model": "SILMA-9B-Instruct", "model_id": "silma-ai/SILMA-9B-Instruct-v1.0", "organization": "Silma AI", "params": "9B", "type": "Arabic-Native"}, {"model": "Fanar-Star-1.2B", "model_id": "QatarComputing/fanar-star-1.2b", "organization": "QCRI", "params": "1.2B", "type": "Arabic-Native"}, {"model": "AceGPT-13B-Chat", "model_id": "FreedomIntelligence/AceGPT-13B-chat", "organization": "FreedomIntelligence", "params": "13B", "type": "Arabic-Native"}, {"model": "AraGPT2-Mega", "model_id": "aubmindlab/aragpt2-mega", "organization": "AUB MIND Lab", "params": "1.5B", "type": "Arabic-Native"}, # Multilingual with strong Arabic {"model": "Qwen2.5-72B-Instruct", "model_id": "Qwen/Qwen2.5-72B-Instruct", "organization": "Alibaba", "params": "72B", "type": "Multilingual"}, {"model": "Qwen2.5-32B-Instruct", "model_id": "Qwen/Qwen2.5-32B-Instruct", "organization": "Alibaba", "params": "32B", "type": "Multilingual"}, {"model": "Qwen2.5-7B-Instruct", "model_id": "Qwen/Qwen2.5-7B-Instruct", "organization": "Alibaba", "params": "7B", "type": "Multilingual"}, {"model": "Llama-3.1-70B-Instruct", "model_id": "meta-llama/Llama-3.1-70B-Instruct", "organization": "Meta", "params": "70B", "type": "Multilingual"}, {"model": "Llama-3.1-8B-Instruct", "model_id": "meta-llama/Llama-3.1-8B-Instruct", "organization": "Meta", "params": "8B", "type": "Multilingual"}, {"model": "Gemma-2-27B-IT", "model_id": "google/gemma-2-27b-it", "organization": "Google", "params": "27B", "type": "Multilingual"}, {"model": "Gemma-2-9B-IT", "model_id": "google/gemma-2-9b-it", "organization": "Google", "params": "9B", "type": "Multilingual"}, # Cohere Arabic Models {"model": "Aya-Expanse-32B", "model_id": "CohereForAI/aya-expanse-32b", "organization": "Cohere", "params": "32B", "type": "Multilingual"}, {"model": "Aya-Expanse-8B", "model_id": "CohereForAI/aya-expanse-8b", "organization": "Cohere", "params": "8B", "type": "Multilingual"}, {"model": "Command-R7B-Arabic", "model_id": "CohereForAI/c4ai-command-r7b-arabic-02-2025", "organization": "Cohere", "params": "7B", "type": "Arabic-Tuned"}, # Falcon (UAE) {"model": "Falcon-180B-Chat", "model_id": "tiiuae/falcon-180B-chat", "organization": "TII UAE", "params": "180B", "type": "Multilingual"}, {"model": "Falcon-40B-Instruct", "model_id": "tiiuae/falcon-40b-instruct", "organization": "TII UAE", "params": "40B", "type": "Multilingual"}, # Mistral {"model": "Mistral-Large", "model_id": "mistralai/Mistral-Large-Instruct-2411", "organization": "Mistral AI", "params": "123B", "type": "Multilingual"}, {"model": "Mixtral-8x22B", "model_id": "mistralai/Mixtral-8x22B-Instruct-v0.1", "organization": "Mistral AI", "params": "141B", "type": "Multilingual"}, {"model": "Mistral-7B-Instruct", "model_id": "mistralai/Mistral-7B-Instruct-v0.3", "organization": "Mistral AI", "params": "7B", "type": "Multilingual"}, # Others {"model": "DeepSeek-V3", "model_id": "deepseek-ai/DeepSeek-V3", "organization": "DeepSeek", "params": "671B", "type": "Multilingual"}, {"model": "Phi-4", "model_id": "microsoft/phi-4", "organization": "Microsoft", "params": "14B", "type": "Multilingual"}, {"model": "Phi-3-Mini", "model_id": "microsoft/Phi-3-mini-4k-instruct", "organization": "Microsoft", "params": "3.8B", "type": "Multilingual"}, {"model": "BLOOM-176B", "model_id": "bigscience/bloom", "organization": "BigScience", "params": "176B", "type": "Multilingual"}, {"model": "BLOOMZ-7B1", "model_id": "bigscience/bloomz-7b1", "organization": "BigScience", "params": "7B", "type": "Multilingual"}, # Arabic Fine-tuned {"model": "Arabic-Llama-3.1-8B", "model_id": "Ammar-Arabi/Arabic-Llama-3.1-8B-Instruct", "organization": "Community", "params": "8B", "type": "Arabic-Tuned"}, {"model": "Llama3-8B-Arabic", "model_id": "MahmoudAshraf/Llama3-8B-Arabic-instruct", "organization": "Community", "params": "8B", "type": "Arabic-Tuned"}, ] # Global state LEADERBOARD_DATA = [] EVALUATION_STATUS = {"current": "Initializing...", "progress": 0, "total": len(MODELS_TO_EVALUATE)} RESULTS_DATASET_REPO = "HeshamHaroon/AFCL-Results" # HuggingFace dataset for persistent storage def load_cached_results() -> List[Dict]: """Load cached evaluation results from HuggingFace dataset.""" try: from huggingface_hub import hf_hub_download # Try to download the results file from HF file_path = hf_hub_download( repo_id=RESULTS_DATASET_REPO, filename="results.json", repo_type="dataset", token=os.getenv("HF_TOKEN") ) with open(file_path, 'r', encoding='utf-8') as f: cached = json.load(f) print(f"✅ Loaded {len(cached)} cached results from HuggingFace") return cached except Exception as e: print(f"No cached results found (will evaluate all models): {e}") return [] def save_cached_results(results: List[Dict]): """Save evaluation results to HuggingFace dataset for persistence.""" try: from huggingface_hub import HfApi, create_repo import tempfile api = HfApi() token = os.getenv("HF_TOKEN") # Create the dataset repo if it doesn't exist try: create_repo( repo_id=RESULTS_DATASET_REPO, repo_type="dataset", exist_ok=True, token=token ) except: pass # Save results to a temp file and upload with tempfile.NamedTemporaryFile(mode='w', suffix='.json', delete=False, encoding='utf-8') as f: json.dump(results, f, ensure_ascii=False, indent=2) temp_path = f.name api.upload_file( path_or_fileobj=temp_path, path_in_repo="results.json", repo_id=RESULTS_DATASET_REPO, repo_type="dataset", token=token, commit_message=f"Update results ({len(results)} models)" ) os.unlink(temp_path) print(f"✅ Saved {len(results)} results to HuggingFace dataset") except Exception as e: print(f"⚠️ Error saving to HuggingFace (results may not persist): {e}") # Custom CSS for professional look CUSTOM_CSS = """ /* Professional Dark Theme */ .gradio-container { background: linear-gradient(135deg, #0f0f1a 0%, #1a1a2e 100%) !important; font-family: 'Inter', -apple-system, BlinkMacSystemFont, sans-serif !important; } /* Header styling */ .header-container { background: linear-gradient(135deg, #667eea 0%, #764ba2 100%); border-radius: 16px; padding: 32px; margin-bottom: 24px; box-shadow: 0 20px 40px rgba(102, 126, 234, 0.3); } /* Stats cards */ .stat-card { background: rgba(255,255,255,0.05); backdrop-filter: blur(10px); border: 1px solid rgba(255,255,255,0.1); border-radius: 12px; padding: 24px; text-align: center; transition: transform 0.3s ease; } .stat-card:hover { transform: translateY(-4px); } .stat-value { font-size: 2.5rem; font-weight: 700; background: linear-gradient(135deg, #667eea, #764ba2); -webkit-background-clip: text; -webkit-text-fill-color: transparent; } .stat-label { color: #a0a0a0; font-size: 0.9rem; margin-top: 8px; } /* Table styling */ .leaderboard-table { background: rgba(255,255,255,0.02) !important; border-radius: 12px !important; border: 1px solid rgba(255,255,255,0.1) !important; } /* Rank badges */ .rank-1 { color: #ffd700 !important; font-weight: bold; } .rank-2 { color: #c0c0c0 !important; font-weight: bold; } .rank-3 { color: #cd7f32 !important; font-weight: bold; } /* Progress bar */ .progress-container { background: rgba(255,255,255,0.1); border-radius: 8px; padding: 16px; margin: 16px 0; } .progress-bar { height: 8px; background: linear-gradient(90deg, #667eea, #764ba2); border-radius: 4px; transition: width 0.5s ease; } /* Tabs */ .tabs { border: none !important; } .tab-nav { background: transparent !important; border-bottom: 2px solid rgba(255,255,255,0.1) !important; } .tab-nav button { color: #a0a0a0 !important; font-weight: 500 !important; padding: 12px 24px !important; } .tab-nav button.selected { color: #667eea !important; border-bottom: 2px solid #667eea !important; } /* Category pills */ .category-pill { display: inline-block; padding: 4px 12px; border-radius: 20px; font-size: 0.75rem; font-weight: 500; } .cat-arabic { background: #22c55e20; color: #22c55e; } .cat-multilingual { background: #3b82f620; color: #3b82f6; } .cat-tuned { background: #f59e0b20; color: #f59e0b; } """ def load_evaluation_dataset(): """Load ALL Arabic FC dataset from HuggingFace (train + test = 1,470 samples).""" try: # Load both train and test splits dataset = load_dataset("HeshamHaroon/Arabic_Function_Calling") samples = [] # Process all splits (train + test) for split_name in dataset.keys(): for item in dataset[split_name]: sample = { 'id': item['id'], 'query_ar': item['query_ar'], 'functions': json.loads(item['functions']) if item['functions'] else [], 'ground_truth': json.loads(item['ground_truth']) if item['ground_truth'] else None, 'category': item['category'], } samples.append(sample) print(f"Loaded {len(samples)} total samples from all splits") return samples except Exception as e: print(f"Error loading dataset: {e}") return [] def create_prompt(query: str, functions: List[Dict]) -> str: """Create evaluation prompt in Arabic with full function details.""" # Arabic system prompt prompt = """أنت مساعد ذكي متخصص في استدعاء الدوال البرمجية. مهمتك هي تحليل طلب المستخدم واختيار الدالة المناسبة مع تحديد المعاملات الصحيحة. ### الدوال المتاحة: """ for f in functions: func_name = f.get('name', '') func_desc = f.get('description', 'لا يوجد وصف') prompt += f"**{func_name}**\n" prompt += f"الوصف: {func_desc}\n" if 'parameters' in f: params = f['parameters'] if 'properties' in params: prompt += "المعاملات:\n" required_params = params.get('required', []) for param_name, param_info in params['properties'].items(): param_type = param_info.get('type', 'any') param_desc = param_info.get('description', '') is_required = param_name in required_params req_str = " (مطلوب)" if is_required else " (اختياري)" prompt += f" • {param_name} ({param_type}){req_str}: {param_desc}\n" prompt += "\n" prompt += f"""### طلب المستخدم: {query} ### التعليمات: 1. حلل طلب المستخدم بعناية 2. اختر الدالة المناسبة من القائمة أعلاه 3. استخرج قيم المعاملات من الطلب 4. أجب بصيغة JSON فقط ### صيغة الإجابة: إذا كانت هناك دالة مناسبة: {{"name": "اسم_الدالة", "arguments": {{"المعامل1": "القيمة1", "المعامل2": "القيمة2"}}}} إذا لم تكن هناك دالة مناسبة للطلب: {{"name": null, "arguments": {{}}}} ### الإجابة (JSON فقط): """ return prompt def call_model(model_id: str, prompt: str) -> str: """Call model via HuggingFace Inference API.""" token = os.getenv("HF_TOKEN", "") headers = {"Authorization": f"Bearer {token}"} url = f"https://api-inference.huggingface.co/models/{model_id}" payload = {"inputs": prompt, "parameters": {"max_new_tokens": 200, "temperature": 0.1}} try: response = requests.post(url, headers=headers, json=payload, timeout=60) if response.status_code == 503: time.sleep(20) response = requests.post(url, headers=headers, json=payload, timeout=60) result = response.json() if isinstance(result, list) and result: return result[0].get("generated_text", "") return str(result) except: return "" def parse_response(response: str) -> Optional[Dict]: """Parse function call from response with robust extraction.""" if not response: return None # Clean up response response = response.strip() # Try direct JSON parse first try: data = json.loads(response) if isinstance(data, dict): return data except: pass # Try to find JSON block (handles markdown code blocks) json_patterns = [ r'```json\s*([\s\S]*?)\s*```', # ```json ... ``` r'```\s*([\s\S]*?)\s*```', # ``` ... ``` r'(\{[\s\S]*\})', # Any JSON object ] for pattern in json_patterns: matches = re.findall(pattern, response) for match in matches: try: data = json.loads(match.strip()) if isinstance(data, dict) and 'name' in data: return data except: continue # Try to extract JSON starting from first { start_idx = response.find('{') if start_idx != -1: # Find matching closing brace brace_count = 0 for i, char in enumerate(response[start_idx:], start_idx): if char == '{': brace_count += 1 elif char == '}': brace_count -= 1 if brace_count == 0: try: json_str = response[start_idx:i+1] data = json.loads(json_str) if isinstance(data, dict): return data except: pass break # Check for explicit "no function" indicators no_call_patterns = [ 'no function', 'cannot', 'لا يمكن', 'لا توجد', 'null', 'none', 'not applicable', 'غير متاح', 'لا يوجد', 'no matching', 'no relevant' ] response_lower = response.lower() if any(p in response_lower for p in no_call_patterns): return {"name": None, "arguments": {}} return None def normalize_arabic(text: str) -> str: """Normalize Arabic text for comparison.""" if not text: return "" text = str(text) # Remove diacritics (tashkeel) text = re.sub(r'[\u064B-\u065F\u0670]', '', text) # Normalize alef variants text = re.sub(r'[إأآا]', 'ا', text) # Normalize taa marbuta text = text.replace('ة', 'ه') # Normalize yaa text = text.replace('ى', 'ي') # Lowercase and strip return text.lower().strip() def compare_values(pred_val, exp_val) -> bool: """Compare two values with Arabic normalization.""" pred_str = normalize_arabic(str(pred_val)) exp_str = normalize_arabic(str(exp_val)) # Exact match after normalization if pred_str == exp_str: return True # Try numeric comparison try: if float(pred_val) == float(exp_val): return True except: pass # Check if one contains the other (for partial matches) if pred_str in exp_str or exp_str in pred_str: return True return False def evaluate_sample(model_id: str, sample: Dict) -> float: """Evaluate single sample with robust comparison.""" query = sample.get('query_ar', '') functions = sample.get('functions', []) category = sample.get('category', '') ground_truth = sample.get('ground_truth') prompt = create_prompt(query, functions) response = call_model(model_id, prompt) parsed = parse_response(response) # Handle irrelevance category - should NOT call any function if category == 'irrelevance': if parsed is None: return 1.0 # Correct - no valid response if parsed.get('name') is None or parsed.get('name') == 'null': return 1.0 # Correct - explicitly said no function return 0.0 # Wrong - called a function when shouldn't # For other categories, need valid response if not parsed: return 0.0 if not ground_truth: return 0.0 # Get expected function call expected = ground_truth if isinstance(ground_truth, dict) and 'calls' in ground_truth: calls = ground_truth.get('calls', []) if calls: expected = calls[0] else: expected = ground_truth # Compare function names pred_name = normalize_arabic(str(parsed.get('name', ''))) exp_name = normalize_arabic(str(expected.get('name', ''))) if not pred_name or not exp_name: return 0.0 if pred_name != exp_name: # Try partial match for function names if pred_name not in exp_name and exp_name not in pred_name: return 0.0 # Function name matched - now check arguments pred_args = parsed.get('arguments', {}) or {} exp_args = expected.get('arguments', {}) or {} if not exp_args: return 1.0 # No arguments expected, name matched = success if not pred_args: return 0.5 # Name matched but no arguments provided # Compare arguments matched = 0 total = len(exp_args) for key, exp_val in exp_args.items(): # Try exact key match first if key in pred_args: if compare_values(pred_args[key], exp_val): matched += 1 continue # Try normalized key match norm_key = normalize_arabic(key) for pred_key, pred_val in pred_args.items(): if normalize_arabic(pred_key) == norm_key: if compare_values(pred_val, exp_val): matched += 1 break return matched / total if total > 0 else 1.0 def run_evaluation(): """Run evaluation only on new models (uses cache for existing results).""" global LEADERBOARD_DATA, EVALUATION_STATUS # Step 1: Load cached results first EVALUATION_STATUS["current"] = "Loading cached results..." cached_results = load_cached_results() # Build set of already evaluated model IDs evaluated_models = {r['model_id'] for r in cached_results} print(f"Already evaluated: {len(evaluated_models)} models") # Step 2: Check which models need evaluation models_to_run = [m for m in MODELS_TO_EVALUATE if m['model_id'] not in evaluated_models] if not models_to_run: # All models already evaluated - just use cache EVALUATION_STATUS["current"] = "All models evaluated (from cache)" EVALUATION_STATUS["progress"] = len(MODELS_TO_EVALUATE) LEADERBOARD_DATA = sorted(cached_results, key=lambda x: x['overall'], reverse=True) for i, r in enumerate(LEADERBOARD_DATA, 1): r['rank'] = i print("All models loaded from cache - no new evaluation needed") return # Step 3: Load dataset only if we need to evaluate new models EVALUATION_STATUS["current"] = f"Loading dataset ({len(models_to_run)} new models to evaluate)..." samples = load_evaluation_dataset() if not samples: EVALUATION_STATUS["current"] = "Failed to load dataset" # Still show cached results if cached_results: LEADERBOARD_DATA = sorted(cached_results, key=lambda x: x['overall'], reverse=True) for i, r in enumerate(LEADERBOARD_DATA, 1): r['rank'] = i return # Start with cached results results = list(cached_results) total_models = len(MODELS_TO_EVALUATE) # Step 4: Evaluate only new models for idx, model_config in enumerate(models_to_run): model_name = model_config['model'] model_id = model_config['model_id'] evaluated_count = len(evaluated_models) + idx + 1 EVALUATION_STATUS["current"] = f"Evaluating {model_name}... ({evaluated_count}/{total_models})" EVALUATION_STATUS["progress"] = evaluated_count category_scores = {} category_counts = {} for sample in samples: cat = sample.get('category', 'simple') if cat not in category_scores: category_scores[cat] = 0.0 category_counts[cat] = 0 try: score = evaluate_sample(model_id, sample) category_scores[cat] += score except: pass category_counts[cat] += 1 time.sleep(0.5) # Calculate scores scores = {cat: round((category_scores[cat] / category_counts[cat]) * 100, 1) for cat in category_scores if category_counts[cat] > 0} # Weighted overall weights = {"simple": 0.15, "multiple": 0.10, "parallel": 0.10, "parallel_multiple": 0.10, "irrelevance": 0.15, "dialect_handling": 0.15} overall = sum(scores.get(c, 0) * w for c, w in weights.items()) / sum(weights.values()) new_result = { "model": model_name, "model_id": model_id, "organization": model_config['organization'], "params": model_config['params'], "type": model_config['type'], "overall": round(overall, 1), "simple": scores.get('simple', 0), "multiple": scores.get('multiple', 0), "parallel": scores.get('parallel', 0), "parallel_multiple": scores.get('parallel_multiple', 0), "irrelevance": scores.get('irrelevance', 0), "dialect_handling": scores.get('dialect_handling', 0), "status": "completed" } results.append(new_result) # Save cache after each model (in case of crash) save_cached_results(results) # Update global data after each model temp_results = sorted(results, key=lambda x: x['overall'], reverse=True) for i, r in enumerate(temp_results, 1): r['rank'] = i LEADERBOARD_DATA = temp_results EVALUATION_STATUS["current"] = "Evaluation Complete" EVALUATION_STATUS["progress"] = total_models def get_leaderboard_df(): """Get leaderboard as DataFrame.""" if not LEADERBOARD_DATA: data = [] for i, m in enumerate(MODELS_TO_EVALUATE, 1): data.append({ "Rank": i, "Model": m["model"], "Org": m["organization"], "Size": m["params"], "Type": m["type"], "Overall": "—", "Simple": "—", "Multiple": "—", "Parallel": "—", "Irrelevance": "—", "Dialect": "—", }) return pd.DataFrame(data) data = [] for r in LEADERBOARD_DATA: data.append({ "Rank": f"🥇 {r['rank']}" if r['rank'] == 1 else f"🥈 {r['rank']}" if r['rank'] == 2 else f"🥉 {r['rank']}" if r['rank'] == 3 else r['rank'], "Model": r['model'], "Org": r['organization'], "Size": r['params'], "Type": r['type'], "Overall": f"{r['overall']}%", "Simple": f"{r['simple']}%", "Multiple": f"{r['multiple']}%", "Parallel": f"{r['parallel']}%", "Irrelevance": f"{r['irrelevance']}%", "Dialect": f"{r['dialect_handling']}%", }) return pd.DataFrame(data) def get_status_html(): """Get evaluation status as HTML.""" progress = EVALUATION_STATUS["progress"] total = EVALUATION_STATUS["total"] current = EVALUATION_STATUS["current"] pct = (progress / total) * 100 if total > 0 else 0 return f"""
لوحة تقييم استدعاء الدوال بالعربية
Comprehensive benchmark evaluating LLMs on Arabic function calling across 10 categories including dialects
Single function, single call scenarios
Select correct function from 2-4 options
Multiple calls of same function
Multiple functions, multiple calls
Correctly reject when no function applies
Egyptian 🇪🇬 / Gulf 🇸🇦 / Levantine 🇱🇧
The Arabic Function Calling Leaderboard (AFCL) is the first comprehensive benchmark for evaluating LLMs on function calling capabilities in Arabic. It tests models across Modern Standard Arabic (MSA) and three major dialects: Egyptian, Gulf, and Levantine.
📊 HeshamHaroon/Arabic_Function_Calling
Models are scored using AST-based matching with Arabic text normalization. The overall score is a weighted average across all categories, with emphasis on irrelevance detection and dialect handling.
@misc{afcl2024,
title={Arabic Function Calling Leaderboard},
author={Hesham Haroon},
year={2024},
url={https://huggingface.co/spaces/HeshamHaroon/Arabic-Function-Calling-Leaderboard}
}
Built for the Arabic NLP Community | بُني لمجتمع معالجة اللغة العربية