File size: 1,584 Bytes
d7d1833 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 | """
JuaKazi Bias Evaluation Framework
A modular, maintainable framework for evaluating gender bias detection systems
in African languages.
Main Components:
- models: Core data structures and types
- data_loader: File I/O and data validation
- bias_detector: Bias detection services
- metrics_calculator: Evaluation metrics computation
- evaluator: Main orchestration and coordination
Usage:
from eval.evaluator import BiasEvaluationOrchestrator
orchestrator = BiasEvaluationOrchestrator()
results = orchestrator.run_evaluation()
"""
from .models import (
Language,
BiasCategory,
GroundTruthSample,
BiasDetectionResult,
EvaluationMetrics,
LanguageEvaluationResult,
FailureCase
)
from .evaluator import BiasEvaluationOrchestrator, EvaluationError
from .bias_detector import BiasDetector, BaselineDetector, BiasDetectionError
from .data_loader import GroundTruthLoader, RulesLoader, ResultsWriter, DataLoadError
from .metrics_calculator import MetricsCalculator, MetricsFormatter
__version__ = "1.0.0"
__author__ = "JuaKazi Team"
__all__ = [
# Core models
"Language",
"BiasCategory",
"GroundTruthSample",
"BiasDetectionResult",
"EvaluationMetrics",
"LanguageEvaluationResult",
"FailureCase",
# Main services
"BiasEvaluationOrchestrator",
"BiasDetector",
"BaselineDetector",
"GroundTruthLoader",
"RulesLoader",
"ResultsWriter",
"MetricsCalculator",
"MetricsFormatter",
# Exceptions
"EvaluationError",
"BiasDetectionError",
"DataLoadError"
] |