RegMap / phase2_rkb.py
inesbedar's picture
Upload phase2_rkb.py
5d279aa verified
# ═══════════════════════════════════════════════════════════════
# RegMap Phase 2 — Regulatory Knowledge Base
# Qualification questions, obligations, overlaps, gap analysis
# ═══════════════════════════════════════════════════════════════
# ─────────────────────────────────────────────────
# SECTION 1: QUALIFICATION QUESTIONS
# For each AI-specific regulation, questions to
# confirm applicability and determine obligation set
# ─────────────────────────────────────────────────
QUALIFICATION_QUESTIONS = {
# ── EU AI ACT ──
"EU AI Act (Regulation 2024/1689)": {
"questions": [
{
"id": "euaia_exception",
"text": "Is your AI system covered by any of the following exceptions to the EU AI Act?",
"type": "multi_select",
"options": [
"The AI system is used exclusively for military, defence, or national security purposes",
"The AI system is used solely for scientific research and development and has not yet been placed on the market or put into service",
"The AI system is used for purely personal, non-professional purposes by a natural person",
"The AI system is released under a free and open-source licence with publicly available parameters, including weights",
"The AI system is operated by a third-country public authority under international law enforcement or judicial cooperation agreements",
"None of the above",
],
"note": "The open-source exception does NOT apply if the system is classified as high-risk, performs a prohibited practice, or is a GPAI model with systemic risk.",
},
{
"id": "euaia_sme",
"text": "Does your organisation qualify as an SME under the EU definition?",
"type": "single_select",
"options": [
"Yes — fewer than 250 employees AND turnover ≤ €50M or balance sheet ≤ €43M",
"No",
"I'm not sure",
],
"note": "An SME (small or medium-sized enterprise) is defined by EU Recommendation 2003/361/EC: fewer than 250 employees AND either annual turnover ≤ €50M or balance sheet total ≤ €43M. SMEs benefit from simplified documentation, reduced fees, regulatory sandbox priority, and capped penalties. If unsure, select 'I'm not sure' — obligations will be assessed conservatively.",
},
{
"id": "euaia_prohibited",
"text": "Does your AI system perform any of the following practices prohibited under Art. 5 of the EU AI Act?",
"type": "multi_select",
"options": [
"Subliminal manipulation techniques that cause or are likely to cause harm",
"Exploitation of vulnerabilities of specific groups due to age, disability, or social/economic situation",
"Social scoring by public authorities leading to detrimental treatment",
"Real-time remote biometric identification in publicly accessible spaces for law enforcement (except narrow exceptions)",
"Emotion recognition in the workplace or in education institutions (except for medical or safety reasons)",
"Untargeted scraping of facial images from the internet or CCTV to build facial recognition databases",
"Biometric categorisation to infer race, political opinions, trade union membership, religious beliefs, sex life or sexual orientation",
"Individual predictive policing based solely on profiling or personality traits",
"None of the above",
],
"note": "If your system falls under any prohibited practice, it cannot be placed on the EU market. Penalties: up to €35M or 7% of global annual turnover.",
},
{
"id": "euaia_annex3",
"text": "Is your AI system used in any of the following high-risk domains listed in Annex III?",
"type": "multi_select",
"options": [
"Biometric identification and categorisation of natural persons",
"Management and operation of critical infrastructure (energy, transport, water, digital)",
"Education and vocational training (access, admission, assessment)",
"Employment, worker management, and access to self-employment (recruitment, screening, evaluation, monitoring)",
"Access to essential private/public services (credit scoring, insurance, social benefits, emergency services)",
"Law enforcement (risk assessment, polygraph, evidence evaluation, profiling)",
"Migration, asylum, and border control (risk assessment, document verification)",
"Administration of justice and democratic processes",
"None of the above",
],
},
{
"id": "euaia_art6_3",
"text": "Even if your system falls within an Annex III category, it may NOT be high-risk if ALL of the following conditions are met. Do all apply?",
"type": "single_select",
"options": [
"Yes — my system performs a narrow procedural task, improves a previous human activity, detects decision patterns without replacing human assessment, or is purely preparatory",
"No — my system directly influences consequential decisions about individuals",
],
"condition": "Show only if any Annex III category selected (not 'None of the above')",
"note": "Art. 6(3) exception: even within Annex III, a system is NOT high-risk if it does not pose significant risk of harm and meets specific conditions.",
},
{
"id": "euaia_transparency",
"text": "Does your AI system involve any of the following (Art. 50 — Transparency)?",
"type": "multi_select",
"options": [
"Direct interaction with natural persons (chatbot, virtual assistant)",
"Generation of synthetic audio, image, video or text content (deepfakes, GenAI output)",
"Emotion recognition or biometric categorisation",
"None of the above",
],
},
{
"id": "euaia_public_services",
"text": "Does your organisation provide any of the following public services?",
"type": "multi_select",
"options": [
"Education (schools, universities, training institutions)",
"Healthcare (hospitals, clinics, public health services)",
"Social services (social security, welfare, child protection)",
"Housing (social housing, public housing allocation)",
"Administration of justice (courts, legal aid)",
"Public administration (government services, public safety)",
"None of the above — private entity not providing public services",
],
"condition": "Show only if is_public_sector=False (private organisation)",
"note": "Private entities providing public services must conduct a Fundamental Rights Impact Assessment (FRIA) under Art. 27 when deploying high-risk AI systems, just like public bodies.",
},
],
},
# ── EU AI ACT — GPAI ──
"EU AI Act — GPAI Framework (Chapter V)": {
"questions": [
{
"id": "gpai_systemic",
"text": "Does your general-purpose AI model meet any of the following criteria for systemic risk?",
"type": "single_select",
"options": [
"Yes — training compute exceeds 10^25 FLOPs",
"Yes — designated as systemic risk by the European Commission (Art. 51(1)(b))",
"No — standard GPAI model",
"I don't know",
],
},
{
"id": "gpai_open_source",
"text": "Is your GPAI model released under a free and open-source licence?",
"type": "single_select",
"options": [
"Yes — open-source with publicly available model weights",
"No — proprietary or restricted access",
],
"note": "Open-source GPAI models have reduced obligations (only technical doc + copyright policy + training data summary). Exception does NOT apply if systemic risk.",
},
],
},
# ── COLORADO AI ACT ──
"Colorado AI Act (SB 24-205)": {
"questions": [
{
"id": "co_consequential",
"text": "Does your AI system make or substantially influence consequential decisions in any of the following areas?",
"type": "multi_select",
"options": [
"Education enrollment or opportunity",
"Employment or employment opportunity",
"Financial or lending services",
"Government services or benefits",
"Healthcare services",
"Housing",
"Insurance",
"Legal services",
"None of the above — system does not make consequential decisions",
],
},
{
"id": "co_exception",
"text": "Does any of the following exceptions apply?",
"type": "multi_select",
"options": [
"System is approved/regulated by a federal agency (FDA, FAA, etc.) with equivalent or stricter standards",
"System performs only narrow procedural tasks without influencing consequential decisions",
"System is used solely to detect, prevent, or mitigate discrimination or increase diversity",
"System is used solely to detect decision-making patterns without replacing human judgment",
"AI-powered chatbot (disclosure-only obligation)",
"None of the above",
],
},
{
"id": "co_use_as_intended",
"text": "Do you use this AI system only as intended by the developer, without training or substantially customising it with your own data?",
"type": "single_select",
"options": [
"Yes — we use the system as-is, following the developer's intended purpose, without adding our own training data",
"No — we have trained, fine-tuned, or substantially customised the system with our own data or use it beyond the developer's intended purpose",
],
"condition": "Show only if deployer and company_size < 50 employees (auto-detected from ID Card)",
"note": "Deployers with fewer than 50 full-time employees who use the system only as intended by the developer are conditionally exempt from risk management programmes, impact assessments, and website disclosures. However, the deployer must still notify consumers when a consequential decision involves AI and report algorithmic discrimination to the Attorney General within 90 days.",
},
],
},
# ── TEXAS TRAIGA ──
"Texas TRAIGA (HB 149)": {
"questions": [
{
"id": "tx_entity_type",
"text": "What type of entity are you operating as in Texas?",
"type": "single_select",
"options": [
"Texas state agency or local government entity",
"Healthcare service provider",
"Private sector entity (not government or healthcare)",
],
},
{
"id": "tx_prohibited",
"text": "Does your AI system involve any of the following prohibited uses under TRAIGA?",
"type": "multi_select",
"options": [
"Intentional discrimination against a protected class",
"Social scoring — using AI to assign a score based on socio-economic status, behaviour, or personal characteristics to determine access to services",
"Manipulating human behaviour to incite violence, self-harm, or criminal activity",
"Use of biometric data to identify individuals without consent or legal authority",
"None of the above",
],
},
],
},
# ── UTAH AI POLICY ACT ──
"Utah AI Policy Act (SB 149)": {
"questions": [
{
"id": "ut_regulated",
"text": "Is your AI system used in a regulated occupation or industry in Utah (e.g. licensed professionals, insurance, financial services)?",
"type": "single_select",
"options": [
"Yes — used in a regulated occupation/industry",
"No — not used in regulated contexts",
],
},
{
"id": "ut_interaction",
"text": "Does your AI system interact directly with consumers?",
"type": "single_select",
"options": [
"Yes — direct consumer interaction",
"No — no direct consumer interaction",
],
},
],
},
# ── CALIFORNIA ADMT ──
"California CCPA / ADMT Regulations": {
"questions": [
{
"id": "ca_threshold",
"text": "Does your organisation meet the CCPA applicability thresholds?",
"type": "multi_select",
"options": [
"Annual gross revenue exceeding USD 25 million",
"Buys, sells, or shares personal information of 100,000+ California consumers/households/devices",
"Derives 50%+ of annual revenue from selling/sharing personal information",
"None of the above",
],
},
{
"id": "ca_admt",
"text": "Does your AI system qualify as automated decision-making technology (ADMT) under the CPPA regulations?",
"type": "single_select",
"options": [
"Yes — makes or assists decisions on employment, housing, education, health, insurance, financial, or legal matters",
"Yes — processes personal information to profile consumers",
"No — does not qualify as ADMT",
],
},
],
},
# ── ILLINOIS HB 3773 (AI IN EMPLOYMENT) ──
"Illinois HB 3773 (AI in Employment)": {
"questions": [
{
"id": "il_employment_ai",
"text": "Does your AI system make or assist in any of the following employment decisions in Illinois?",
"type": "multi_select",
"options": [
"Recruitment or hiring",
"Promotion or renewal of employment",
"Selection for training or apprenticeship",
"Discharge, discipline, or tenure decisions",
"Terms, privileges, or conditions of employment",
"None of the above",
],
"note": "HB 3773 (effective January 1, 2026) amends the Illinois Human Rights Act to cover ALL AI used in employment decisions, not just video interviews. Applies to employers with 1 or more employees for 20 or more calendar weeks.",
},
{
"id": "il_video_interview",
"text": "Additionally, does your AI system analyse video interviews of job applicants?",
"type": "single_select",
"options": [
"Yes — AI analyses applicant video interviews",
"No",
],
"note": "The separate Artificial Intelligence Video Interview Act (AIVIA, 820 ILCS 42, effective since January 2020) imposes additional consent and data-handling obligations specifically for AI-analysed video interviews.",
},
],
},
# ── DIFC REGULATION 10 ──
# ── CALIFORNIA SB 53 (Frontier AI) ──
"California SB 53 (Frontier AI Transparency)": {
"questions": [
{
"id": "casb53_frontier",
"text": "Is your AI model a frontier model (trained using more than 10²⁶ integer or floating-point operations, including fine-tuning and RLHF)?",
"type": "single_select",
"options": [
"Yes — model training compute exceeds 10²⁶ FLOPs",
"No — model compute is below the 10²⁶ FLOPs threshold",
"I don't know",
],
},
{
"id": "casb53_revenue",
"text": "Does your entity (including affiliates) have annual gross revenues exceeding USD 500 million?",
"type": "single_select",
"options": [
"Yes — annual revenue exceeds USD 500M",
"No — annual revenue is below USD 500M",
],
"note": "Large frontier developers (>USD 500M revenue) have additional obligations including publishing a Frontier AI Framework.",
},
],
},
# ── NEW YORK RAISE ACT (Frontier AI) ──
"New York RAISE Act (Frontier AI Safety)": {
"questions": [
{
"id": "nyraise_frontier",
"text": "Is your AI model a frontier model (trained using more than 10²⁶ FLOPs with compute costs exceeding USD 100 million, or produced through knowledge distillation from a frontier model at >USD 5M cost)?",
"type": "single_select",
"options": [
"Yes — model meets the 10²⁶ FLOPs and USD 100M compute cost thresholds",
"Yes — model was produced through knowledge distillation from a frontier model (>USD 5M cost)",
"No — model does not meet frontier thresholds",
"I don't know",
],
},
{
"id": "nyraise_revenue",
"text": "Does your entity have annual revenues exceeding USD 500 million?",
"type": "single_select",
"options": [
"Yes — annual revenue exceeds USD 500M",
"No — annual revenue is below USD 500M",
],
"note": "The RAISE Act applies to large developers (>USD 500M revenue) who develop frontier models. Academic institutions conducting research are excluded.",
},
],
},
"DIFC Regulation 10 (AI Processing)": {
"questions": [
{
"id": "difc_autonomous",
"text": "Does your AI system process personal data in an autonomous or semi-autonomous manner within the DIFC?",
"type": "single_select",
"options": [
"Yes — autonomous/semi-autonomous processing of personal data in DIFC",
"No",
],
},
{
"id": "difc_commercial_high_risk",
"text": "Is the AI system used for commercial purposes and does it involve high-risk processing activities?",
"type": "single_select",
"options": [
"Yes — commercial use with high-risk processing (e.g. profiling, automated decisions with legal effects, special category data, systematic monitoring)",
"No — either non-commercial or no high-risk processing",
],
"note": "High-risk commercial use triggers mandatory AI system certification and appointment of an Autonomous Systems Officer (ASO). Full enforcement from January 2026.",
},
{
"id": "difc_profiling",
"text": "Does the system involve any of the following?",
"type": "multi_select",
"options": [
"Profiling of individuals",
"Automated decision-making with legal or significant effects",
"Processing of special categories of personal data (health, biometric, etc.)",
"Systematic monitoring of individuals",
"None of the above",
],
},
],
},
}
# ─────────────────────────────────────────────────
# SECTION 2: OBLIGATIONS
# Per regulation, per role, per risk level
# ─────────────────────────────────────────────────
OBLIGATIONS = {
# ════════════════════════════════════════════
# AI-SPECIFIC — FULL DEEP DIVE
# ════════════════════════════════════════════
"EU AI Act (Regulation 2024/1689)": {
"prohibited": {
"label": "Prohibited AI Practice",
"obligations": [
"Immediately cease and remove the AI system from the EU market — the system must not be placed on the market, put into service, or used in the EU",
"Penalties: up to €35M or 7% of global annual turnover",
"If you modify the system to remove the prohibited characteristics (e.g. removing social scoring, eliminating real-time biometric identification, or adding proper consent mechanisms), re-run this assessment — the system will likely fall into another EU AI Act category (high-risk, limited-risk, or minimal-risk) with different, manageable obligations",
],
},
"high_risk_provider": {
"label": "High-Risk AI System — Provider Obligations",
"obligations": [
"Art. 9 — Establish and maintain a risk management system throughout the AI system's lifecycle",
"Art. 10 — Implement data governance: training, validation, and testing datasets must be relevant, representative, and free of errors",
"Art. 11 — Prepare and maintain technical documentation (before placing on market and keep up to date)",
"Art. 12 — Enable automatic recording of events (logging) for traceability",
"Art. 13 — Design system for sufficient transparency to allow deployers to interpret output and use appropriately",
"Art. 14 — Design for effective human oversight, enabling human-machine interface tools",
"Art. 15 — Achieve appropriate levels of accuracy, robustness, and cybersecurity",
"Art. 17 — Establish and document a quality management system (QMS)",
"Art. 43 — Complete conformity assessment (self-assessment or third-party depending on Annex III category)",
"Art. 47 — Affix CE marking upon successful conformity assessment",
"Art. 49 — Register in EU database before placing on market",
"Art. 72 — Establish post-market monitoring system",
"Art. 73 — Report serious incidents to market surveillance authorities",
],
"deadline": "August 2, 2026 (most obligations); August 2, 2027 (systems already on market)",
"penalty": "Up to €15M or 3% of global annual turnover",
},
"high_risk_deployer": {
"label": "High-Risk AI System — Deployer Obligations",
"obligations": [
"Art. 26(1) — Implement appropriate technical and organisational measures to use system in accordance with instructions",
"Art. 26(2) — Assign human oversight to competent, trained, authorised individuals",
"Art. 26(4) — Monitor operation and inform provider/distributor of risks or incidents",
"Art. 26(6) — Use system only for intended purpose as described in instructions of use",
"Art. 26(7) — Inform affected individuals that they are subject to high-risk AI system use",
"Art. 26(8) — Ensure human oversight is exercised effectively",
"Art. 26(11) — Keep logs automatically generated by the high-risk AI system for at least 6 months",
],
"deadline": "August 2, 2026",
"penalty": "Up to €15M or 3% of global annual turnover",
},
"high_risk_deployer_fria": {
"label": "High-Risk AI System — Fundamental Rights Impact Assessment",
"obligations": [
"Art. 27(1) — Conduct a Fundamental Rights Impact Assessment (FRIA) before first deployment of a high-risk AI system",
"Art. 27(1)(a) — Describe processes in which the system will be used and time period/frequency of use",
"Art. 27(1)(b) — Identify categories of natural persons and groups likely to be affected",
"Art. 27(1)(c) — Assess specific risks of harm to affected persons or groups",
"Art. 27(1)(d) — Describe implementation of human oversight measures",
"Art. 27(1)(e) — Describe measures to be taken if risks materialise",
"Art. 27(3) — Notify market surveillance authority of FRIA results using official template",
],
"scope": "Mandatory for: (1) deployers that are bodies governed by public law, (2) private entities providing public services (education, healthcare, social services, housing, administration of justice), and (3) deployers of credit scoring or life/health insurance AI systems (regardless of sector). Other private-sector deployers are NOT required to conduct a FRIA.",
"note": "If a GDPR DPIA has already been conducted, the FRIA must complement it — both can be combined into a single assessment.",
"deadline": "August 2, 2026",
},
"limited_risk": {
"label": "Limited Risk — Transparency Obligations",
"obligations": [
"Art. 50(1) — Inform individuals that they are interacting with an AI system (unless obvious from context)",
"Art. 50(2) — Label AI-generated synthetic content (audio, image, video, text) in machine-readable format",
"Art. 50(3) — Deployers of emotion recognition or biometric categorisation must inform exposed individuals",
"Art. 50(4) — Deployers of deepfake systems must disclose content is AI-generated (exception: artistic/satirical freedom with editorial safeguards)",
],
"deadline": "August 2, 2025",
"penalty": "Up to €15M or 3% of global annual turnover",
},
"minimal_risk": {
"label": "Minimal Risk",
"obligations": [
"Art. 4 — Ensure AI literacy: staff and operators must have sufficient understanding of AI systems they develop or use",
],
"deadline": "February 2, 2025 (AI literacy)",
},
},
"EU AI Act — GPAI Framework (Chapter V)": {
"gpai_standard": {
"label": "GPAI Model — Standard Obligations",
"obligations": [
"Art. 53(1)(a) — Prepare and maintain technical documentation of the model including training and testing process",
"Art. 53(1)(b) — Prepare information and documentation for downstream providers integrating the model",
"Art. 53(1)(c) — Establish a policy to comply with EU Copyright Directive (including text and data mining opt-out)",
"Art. 53(1)(d) — Publish a sufficiently detailed summary of training data content",
],
},
"gpai_systemic": {
"label": "GPAI Model with Systemic Risk — Additional Obligations",
"obligations": [
"Art. 55(1)(a) — Perform model evaluation including adversarial testing",
"Art. 55(1)(b) — Assess and mitigate systemic risks",
"Art. 55(1)(c) — Track, document and report serious incidents to AI Office and national authorities",
"Art. 55(1)(d) — Ensure adequate level of cybersecurity protection",
"All standard GPAI obligations also apply",
],
},
"gpai_open_source": {
"label": "Open-Source GPAI Model — Reduced Obligations",
"obligations": [
"Art. 53(2) — Only technical documentation and copyright compliance policy required",
"Training data summary still required",
"If model has systemic risk: full Art. 53 + Art. 55 obligations apply regardless of open-source status",
],
},
},
# ── CALIFORNIA SB 53 ──
"California SB 53 (Frontier AI Transparency)": {
"frontier_developer": {
"label": "Frontier Developer Obligations",
"obligations": [
"Publish a transparency report before deploying any new or substantially modified frontier model, including model capabilities, intended uses, limitations, and safety evaluation results",
"Report critical safety incidents to the California Office of Emergency Services (OES) within 15 days of discovery, or within 24 hours if imminent danger exists",
"Critical safety incidents include: unauthorised tampering causing harm, materialisation of catastrophic risk, loss of model control, or deliberate evasion of safeguards",
"Maintain anonymous whistleblower reporting channels for employees and contractors regarding catastrophic risk",
"Provide employees written notice of whistleblower rights — retaliation against reporters is prohibited",
],
"deadline": "January 1, 2026",
},
"large_frontier_developer": {
"label": "Large Frontier Developer Obligations (>USD 500M revenue)",
"obligations": [
"All frontier developer obligations apply",
"Develop, implement, comply with, and publish on your website a comprehensive Frontier AI Framework",
"The Framework must address: capability thresholds, risk mitigation gates, third-party evaluation criteria, and model weight security controls",
"Include in the Framework how you approach national AI standards (e.g., NIST AI RMF, ISO/IEC 42001)",
"Summarise catastrophic risk assessments in transparency reports, including role of third-party evaluators",
"Annual review and update of the Frontier AI Framework",
"May redact trade secrets from published Framework, but must retain unredacted copies for regulatory review",
],
"deadline": "January 1, 2026",
},
"not_applicable": {
"label": "Not Applicable",
"obligations": [],
},
"penalty": "Up to USD 1,000,000 per violation — enforceable by California Attorney General only",
},
# ── NEW YORK RAISE ACT ──
"New York RAISE Act (Frontier AI Safety)": {
"large_developer": {
"label": "Large Developer Obligations",
"obligations": [
"Adopt, implement, and maintain a written AI safety and security protocol addressing risks of frontier model development and deployment",
"Publish an appropriately redacted version of the safety protocol publicly",
"Retain unredacted version and make available to the State upon request",
"Document testing procedures, results, and safeguards used to evaluate and mitigate risk of critical harm",
"Report safety incidents to the NY Attorney General and Division of Homeland Security within 72 hours of discovery",
"Critical harm threshold: death or serious injury of 100+ people, or USD 1B+ in damages, caused or materially enabled by the frontier model",
"Conduct annual review of safety and security protocols, accounting for changes in model capabilities and industry best practices",
"Submit disclosure statements and assessment fees to the DFS oversight office",
],
"deadline": "January 1, 2027",
},
"not_applicable": {
"label": "Not Applicable",
"obligations": [],
},
"penalty": "Up to USD 1,000,000 for first violation, up to USD 3,000,000 for subsequent violations — enforceable by NY Attorney General",
},
"Colorado AI Act (SB 24-205)": {
"developer": {
"label": "Developer Obligations",
"obligations": [
"Make available to deployers a general statement of reasonably foreseeable uses and known harmful uses",
"Provide high-level summary of training data used",
"Provide documentation on known limitations and how the system was evaluated for performance and bias",
"Publish on website a statement describing types of high-risk AI systems developed and risk management practices",
"Report known or reasonably foreseeable risks of algorithmic discrimination to the Colorado Attorney General and deployers",
],
},
"deployer": {
"label": "Deployer Obligations",
"obligations": [
"Implement a risk management policy and program governing deployment of high-risk AI",
"Complete an annual impact assessment for each high-risk AI system",
"Disclose to consumers: when they are interacting with an AI system, when AI is a substantial factor in a consequential decision, and that they can request human review",
"Provide consumers with an explanation of the decision, right to correct data, and right to appeal",
"Notify the Attorney General within 90 days of discovering algorithmic discrimination",
"Review AI system outputs for algorithmic discrimination",
"Consider adopting the NIST AI Risk Management Framework (AI RMF) — compliance provides a rebuttable presumption of reasonable care (affirmative defense) under the Colorado AI Act",
],
},
"small_deployer_exemption": {
"label": "Small Deployer Conditional Exemption (<50 employees)",
"obligations": [
"Deployers with fewer than 50 full-time employees are exempt from: risk management programmes, impact assessments, and website disclosures — IF all three conditions are met: (1) the deployer does not use its own data to train or substantially customise the AI system, (2) the deployer uses the system only for purposes disclosed by the developer, and (3) the deployer makes the developer's impact assessment available to consumers",
"Even if exempt, the deployer must still: notify consumers that a consequential decision was made using high-risk AI, provide adverse-decision notices, and notify the Attorney General within 90 days if algorithmic discrimination is discovered",
"The exemption does NOT apply if the deployer uses proprietary data to train or fine-tune the AI system",
],
},
"affirmative_defense": "Compliance with NIST AI RMF or equivalent recognised framework may serve as an affirmative defense",
"scope_note": "No minimum revenue or consumer-count threshold. Applies to any developer or deployer of a high-risk AI system doing business in Colorado, regardless of size (except the conditional small deployer exemption above).",
"deadline": "June 30, 2026 (delayed from Feb 1, 2026 by SB 25B-004)",
"penalty": "Violations treated as unfair trade practices under Colorado Consumer Protection Act. Civil penalties up to USD 20,000 per violation, counted separately per consumer or transaction. Enforced exclusively by the Colorado Attorney General (no private right of action).",
},
"Texas TRAIGA (HB 149)": {
"government_deployer": {
"label": "Government Entity — Deployer/Developer Obligations",
"obligations": [
"Disclose to each consumer, before or at the time of interaction, that they are interacting with an AI system (clear, conspicuous, plain language — no dark patterns)",
"State agencies must publish AI governance policies online for public transparency",
"Evaluate fairness, accuracy, and effectiveness of AI systems used by the agency",
"Adopt written policies for AI use, including training for designated public employees",
],
},
"healthcare_deployer": {
"label": "Healthcare Provider — Disclosure Obligation",
"obligations": [
"Disclose to patients when AI systems are used in treatment decisions",
],
},
"all_covered": {
"label": "All Covered Persons — Prohibited Practices",
"obligations": [
"Prohibition on developing or deploying AI with the intent to discriminate against a protected class (intentional discrimination only — disparate impact alone does not constitute a violation)",
"Prohibition on social scoring — using AI to assign scores based on socio-economic status or personal characteristics for determining access to services",
"Prohibition on using AI to manipulate human behaviour to incite violence, self-harm, or criminal activity",
"Prohibition on using biometric data to identify individuals without consent or legal authority",
],
},
"deadline": "January 1, 2026",
"penalty": "Civil penalties: USD 10,000–USD 12,000 per curable violation; USD 80,000–USD 200,000 per uncurable violation; USD 2,000–USD 40,000/day for continuing violations. Enforced by Texas Attorney General only (no private right of action).",
},
"Utah AI Policy Act (SB 149)": {
"all_operators": {
"label": "All AI Operators — Disclosure Obligations",
"obligations": [
"Disclose to individuals that they are interacting with generative AI (if direct interaction)",
"Regulated occupations: clearly disclose use of AI when providing services in a regulated industry",
"Prohibition on using AI to represent that a person is a licensed professional if they are not",
],
},
"deadline": "May 1, 2024 (already in effect)",
},
"California CCPA / ADMT Regulations": {
"deployer": {
"label": "ADMT Deployer Obligations",
"obligations": [
"Pre-use notice: inform consumers about ADMT use before processing begins (may be integrated into notice at collection)",
"Right to opt out: provide mechanism for consumers to opt out of ADMT for significant decisions (hiring, lending, school admission, insurance)",
"Access request: respond to consumer requests about ADMT logic, outputs, and decision factors",
"Risk assessment: conduct and document privacy risk assessment for ADMT use cases with significant effects",
"Non-discrimination: ensure ADMT does not result in differential treatment based on protected characteristics",
"Risk assessment attestation: submit annual summary report to CPPA signed under penalty of perjury by an executive management team member",
],
},
"deadline": "Regulations effective January 1, 2026 (approved by OAL on September 22, 2025)",
"phased_deadlines": {
"risk_assessment_compliance": "January 1, 2026 (for processing activities starting after this date)",
"admt_compliance": "January 1, 2027 (businesses must comply with ADMT notice, opt-out, and access requirements)",
"first_attestation": "April 1, 2028 (covering risk assessments conducted in 2026 and 2027)",
"existing_processing": "December 31, 2027 (deadline for risk assessments on processing activities that began before 2026 and continue into 2026)",
},
"threshold_note": "Applies only to for-profit businesses meeting CCPA thresholds: USD 25M+ revenue, 100K+ consumers data, or 50%+ revenue from data sales",
"exemptions": "Government agencies and non-profit organisations are exempt from CCPA (and therefore from ADMT regulations). The CCPA applies only to legal entities organised or operated for the profit or financial benefit of shareholders or owners.",
},
"Illinois HB 3773 (AI in Employment)": {
"employer_hb3773": {
"label": "Employer Obligations — HB 3773 (amends Illinois Human Rights Act)",
"obligations": [
"Prohibition on using AI in employment decisions (recruitment, hiring, promotion, discharge, discipline, tenure, terms/conditions) if the AI has the effect of discriminating on the basis of any class protected under the Illinois Human Rights Act",
"Notify employees before using AI for any covered employment purpose",
"Prohibition on using zip code as a proxy for protected classes in AI-driven employment decisions",
],
"deadline": "January 1, 2026",
"scope": "Employers with 1 or more employees within Illinois during 20 or more calendar weeks per year",
"note": "Enforcement through the Illinois Department of Human Rights (IDHR). Same remedies as other IHRA discrimination claims.",
},
"employer_aivia": {
"label": "Employer Obligations — AI Video Interview Act (AIVIA, 820 ILCS 42)",
"obligations": [
"Notify each applicant before the interview that AI will be used to analyse the video",
"Provide information on how the AI works and what characteristics it evaluates",
"Obtain consent from the applicant before using AI analysis",
"Limit sharing of video: only persons whose expertise is necessary to evaluate applicant may view",
"Destroy video within 30 days of applicant's request",
],
"deadline": "Already in effect (since January 2020)",
"scope": "All employers using AI to analyse video interviews of applicants for positions based in Illinois",
},
},
"DIFC Regulation 10 (AI Processing)": {
"deployer_operator": {
"label": "Deployer / Operator — General Obligations",
"obligations": [
"Provide clear and explicit notice to users at initial use: explain the AI technology, whether it operates autonomously, and its impact on privacy rights",
"Design and operate the AI system in accordance with principles of ethics, fairness, transparency, security, and accountability",
"Conduct a mandatory Data Protection Impact Assessment (DPIA) specifically addressing AI-related risks and mitigation strategies",
"Implement human oversight mechanisms for automated decision-making",
"Ensure data subjects can challenge AI system outcomes and request human review",
"Be able to explain AI processing in non-technical terms with supporting evidence",
"Maintain records of AI processing activities",
"Implement data protection by design and by default for AI systems",
"Monitor AI system outputs for accuracy, fairness, and bias",
],
},
"high_risk": {
"label": "High-Risk AI Processing — Additional Obligations",
"obligations": [
"Obtain AI system certification under the DIFC Commissioner's certification scheme (certification is system-specific, not entity-specific)",
"Appoint an Autonomous Systems Officer (ASO) with substantially similar status, competencies, and tasks as a Data Protection Officer (DPO) — the same person may serve as both ASO and DPO if competencies align",
"The ASO must monitor AI compliance, conduct DPIAs, review risks with senior management, and make recommendations for accountability",
"Ensure the AI system processes personal data solely for human-defined or human-approved purposes",
"Report significant findings from impact assessments to the DIFC Commissioner of Data Protection",
],
"note": "Full enforcement of high-risk processing requirements planned from early 2026. Certification guidance expected during 2026.",
},
"enforcement_note": "Regulation 10 was introduced on September 1, 2023, supplementing the DIFC Data Protection Law No. 5 of 2020. The regulation is legally enforceable today, but the DIFC Commissioner has adopted a phased approach: full enforcement (including certification requirements and ASO mandates) is planned to commence early 2026. The DPL was amended on 8 July 2025 (effective 15 July 2025).",
},
# ════════════════════════════════════════════
# PRIVACY — KEY OBLIGATIONS (AI-relevant)
# ════════════════════════════════════════════
"GDPR (Regulation 2016/679)": {
"key_obligations": [
"Art. 6 — Establish a lawful basis for processing personal data (consent, legitimate interest, contract, etc.)",
"Art. 13-14 — Transparency: inform data subjects about processing, purpose, recipients, and rights",
"Art. 22 — Right not to be subject to solely automated decisions with legal/significant effects; right to human intervention",
"Art. 25 — Data protection by design and by default",
"Art. 30 — Maintain records of processing activities",
"Art. 35 — Conduct a Data Protection Impact Assessment (DPIA) when processing likely to result in high risk",
"Art. 37 — Appoint a Data Protection Officer (DPO) if required (public authority, large-scale monitoring, special categories)",
"Art. 5(1)(c) — Data minimisation: process only what is necessary for specified purpose",
],
"ai_relevant_note": "",
},
"ePrivacy Directive (2002/58/EC)": {
"key_obligations": [
"Obtain consent for use of cookies and tracking technologies on AI interfaces",
"Ensure confidentiality of electronic communications processed by AI systems",
"Restrictions on processing traffic and location data for AI purposes without consent",
],
},
"UAE Federal PDPL (Decree-Law 45/2021)": {
"key_obligations": [
"Establish a lawful basis for processing personal data in the UAE",
"Obtain explicit consent for processing sensitive personal data",
"Inform data subjects about processing purposes, categories, and rights",
"Implement appropriate security measures for personal data",
"Restrict cross-border transfer of personal data (adequacy or safeguards required)",
"Data subjects have the right to object to automated decision-making including profiling",
],
},
"DIFC Data Protection Law (Law No. 5 of 2020)": {
"key_obligations": [
"Register with the DIFC Commissioner of Data Protection",
"Establish a lawful basis for processing (consent, contract, legitimate interests, etc.)",
"Conduct DPIA for high-risk processing activities",
"Appoint a DPO if required",
"Implement data protection by design and by default",
"Data subjects have the right not to be subject to solely automated decisions with legal effects",
],
"ai_relevant_note": "",
},
"ADGM Data Protection Regulations 2021": {
"key_obligations": [
"Register with the ADGM Office of Data Protection",
"Establish a lawful basis for processing",
"Conduct risk assessment for high-risk processing",
"Implement appropriate safeguards for cross-border transfers",
"Data subjects have right to object to automated decision-making",
],
},
"HIPAA (Health Insurance Portability and Accountability Act)": {
"key_obligations": [
"Ensure AI systems handling Protected Health Information (PHI) comply with Privacy Rule",
"Implement Security Rule administrative, physical, and technical safeguards for ePHI",
"Execute Business Associate Agreements (BAA) with AI vendors processing PHI",
"Minimum necessary: limit AI access to only the PHI needed for the specific function",
],
},
"COPPA (Children's Online Privacy Protection Act)": {
"key_obligations": [
"Obtain verifiable parental consent before collecting personal information from children under 13",
"Provide parents with notice of data practices and right to review/delete child's data",
"Limit data collection to what is reasonably necessary for the AI system's activity",
"Implement reasonable security measures for children's personal data",
],
},
"FERPA (Family Educational Rights and Privacy Act)": {
"key_obligations": [
"Obtain consent before disclosing student education records to AI system providers (unless exception applies)",
"Ensure AI vendors qualify as 'school officials' with legitimate educational interest if processing student records",
"Maintain student right to inspect and request correction of education records used by AI",
],
},
"Illinois BIPA (Biometric Information Privacy Act)": {
"key_obligations": [
"Develop and publish a written biometric data retention/destruction policy",
"Obtain informed written consent before collecting biometric identifiers",
"Provide specific disclosures about collection purpose and retention period",
"Prohibition on selling, leasing, or profiting from biometric data",
"Private right of action: individuals can sue for statutory damages (USD 1,000–USD 5,000 per violation)",
],
},
# ════════════════════════════════════════════
# OTHERS — AWARENESS FLAGS
# ════════════════════════════════════════════
"Copyright Directive (2019/790)": {
"flags": [
"Art. 4 — Text and data mining (TDM) exception for research organisations and cultural heritage institutions",
"Art. 4 — Commercial TDM allowed unless rightsholder has expressly reserved rights (opt-out)",
"Training AI on copyrighted content requires compliance with TDM provisions",
"Consult legal counsel on licensing requirements for training data",
],
},
"NIS2 Directive (2022/2555)": {
"flags": [
"Entities operating AI in essential/important sectors must implement cybersecurity risk management measures",
"Incident reporting obligations to national CSIRT within 24 hours (early warning) and 72 hours (full notification)",
"Supply chain security requirements apply to AI component providers",
"Consult legal counsel for sector-specific NIS2 implementation in your Member State",
],
},
"Product Liability Directive (2024/2853)": {
"flags": [
"AI software is classified as a 'product' — standalone liability for defective AI",
"Defectiveness can be presumed if provider fails to disclose information or comply with safety requirements",
"Strict liability for providers — no need to prove fault",
"2-year transposition period — Member States must implement by late 2026",
],
},
"Equal Treatment Directives": {
"flags": [
"AI systems making or supporting employment, education, or service decisions must not discriminate on protected grounds",
"Applies to gender (2006/54/EC), racial/ethnic origin (2000/43/EC), religion/age/disability/sexual orientation (2000/78/EC)",
"Indirect discrimination through AI proxy variables (e.g. postal code correlating with ethnicity) can be unlawful",
],
},
"Consumer Rights Directive / GPSR": {
"flags": [
"AI systems interacting directly with consumers must provide clear pre-contractual information",
"General Product Safety Regulation (2023/988) applies to consumer-facing AI products",
"Safety obligations throughout product lifecycle including post-market monitoring",
],
},
"Medical Device Regulation (MDR 2017/745)": {
"flags": [
"AI-based clinical decision support, diagnostic or therapeutic systems may qualify as medical devices",
"Requires CE marking via conformity assessment (self-assessment or Notified Body depending on class)",
"Clinical evaluation and post-market clinical follow-up required",
"Dual regulation: MDR + AI Act apply simultaneously for AI-based medical devices",
],
},
"Machinery Regulation (2023/1230)": {
"flags": [
"AI-integrated robots, drones, autonomous vehicles and industrial machinery must comply",
"Safety components with AI that learn or evolve are classified as high-risk machinery",
"New cybersecurity requirements for connected machinery throughout lifecycle",
"Conformity assessment required — self-assessment or third-party depending on Annex I category",
"Applies from January 20, 2027 (replaces Machinery Directive 2006/42/EC)",
],
},
"Digital Services Act (DSA 2022/2065)": {
"flags": [
"Online platforms using algorithmic recommendation systems must offer non-profiling alternative",
"Transparency obligations: provide information on recommendation system parameters",
"Systemic risk assessment required for very large online platforms (>45M EU users)",
"Content moderation decisions using AI must be explained to affected users",
],
},
"Radio Equipment Directive (RED 2014/53)": {
"flags": [
"Connected devices with AI must meet cybersecurity, data protection, and anti-fraud requirements",
"Delegated acts require compliance with privacy-by-design for radio equipment processing personal data",
"Applies to IoT devices, wearables, smart home devices with embedded AI",
],
},
"FTC Act Section 5 (Unfair/Deceptive Practices)": {
"flags": [
"FTC has actively pursued enforcement against deceptive or unfair AI practices",
"AI systems must not deceive consumers about capabilities, data use, or human involvement",
"Unfair practices include AI systems causing substantial consumer injury",
"FTC guidance emphasises transparency, fairness, and accountability in AI",
],
},
"Title VII (Civil Rights Act)": {
"flags": [
"AI-driven employment decisions must not result in disparate treatment or disparate impact based on race, color, religion, sex, or national origin",
"EEOC has issued guidance on AI and algorithmic hiring tools",
"Employers are liable for discriminatory AI even if provided by a third-party vendor",
],
},
"ADA (Americans with Disabilities Act)": {
"flags": [
"AI systems in employment and public accommodation must not discriminate against individuals with disabilities",
"Reasonable accommodation obligations extend to AI-driven processes",
"AI accessibility requirements for public-facing systems",
],
},
"ECOA (Equal Credit Opportunity Act)": {
"flags": [
"AI credit scoring and lending decisions must not discriminate on prohibited bases",
"Adverse action notices required when AI contributes to credit denial",
"Model explainability requirements for credit decisions",
],
},
"FCRA (Fair Credit Reporting Act)": {
"flags": [
"AI systems generating consumer reports must ensure accuracy",
"Consumers have right to dispute inaccurate AI-generated information",
"Permissible purpose requirements apply to AI-based consumer report use",
],
},
"Fair Housing Act": {
"flags": [
"AI in housing advertising, tenant screening, and lending must not discriminate",
"Algorithmic redlining and proxy discrimination are enforceable violations",
"HUD has investigated algorithmic discrimination in housing platforms",
],
},
"Copyright Law (Decree-Law 38/2021) — No TDM exception": {
"flags": [
"CRITICAL: UAE has no text and data mining exception — all training data must be licensed",
"Using copyrighted content to train AI models without licence is potentially infringing",
"Consult legal counsel on licensing requirements for all training data used in UAE context",
],
},
"Cybercrime Law (Decree-Law 34/2021)": {
"flags": [
"Art. 42 — Creating or disseminating deepfakes or manipulated content using AI is a criminal offence",
"Unauthorised access to AI systems or data is punishable",
"Interception of electronic communications by AI systems without authorisation is prohibited",
],
},
"Civil Transactions Law (Federal Law 5/1985)": {
"flags": [
"Art. 292 — 'Guardian of things' doctrine may apply to AI system operators for harm caused",
"General tort liability framework applies to AI-caused damage",
"No AI-specific liability framework yet — general civil law applies",
],
},
"Consumer Protection (Federal Law 15/2020)": {
"flags": [
"AI interacting with consumers must not engage in deceptive or misleading practices",
"Product safety requirements apply to AI-powered consumer products",
],
},
"Anti-Discrimination (Decree-Law 34/2023)": {
"flags": [
"Prohibition of discrimination based on race, colour, ethnic origin, religion, or disability applies to AI decisions",
"Potential liability if AI system produces discriminatory outcomes in UAE",
],
},
"Labour Law (Decree-Law 33/2021)": {
"flags": [
"AI in employment (hiring, monitoring, evaluation, termination) must respect worker rights",
"Workplace surveillance using AI must be proportionate and disclosed to employees",
"Worker data processed by AI subject to data protection obligations",
],
},
}
# ─────────────────────────────────────────────────
# SECTION 3: OVERLAP ANALYSIS
# Obligations that can be combined across regulations
# ─────────────────────────────────────────────────
OVERLAP_ANALYSIS = [
# ═══════════════════════════════════════════
# TOPIC-BASED SYNERGIES
# Each synergy groups all regulations requiring the same compliance
# activity. Shown only when 2+ listed regulations apply to the user.
# ═══════════════════════════════════════════
{
"id": "impact_assessment",
"title": "Impact Assessment",
"icon": "🔍",
"regulations": [
"EU AI Act (Regulation 2024/1689)",
"GDPR (Regulation 2016/679)",
"Colorado AI Act (SB 24-205)",
"California CCPA / ADMT Regulations",
"DIFC Regulation 10 (AI Processing)",
"DIFC Data Protection Law (Law No. 5 of 2020)",
],
"reg_labels": {
"EU AI Act (Regulation 2024/1689)": "Fundamental Rights Impact Assessment (FRIA)",
"GDPR (Regulation 2016/679)": "Data Protection Impact Assessment (DPIA)",
"Colorado AI Act (SB 24-205)": "Annual impact assessment for high-risk AI",
"California CCPA / ADMT Regulations": "ADMT risk assessment + annual attestation",
"DIFC Regulation 10 (AI Processing)": "AI-specific DPIA",
"DIFC Data Protection Law (Law No. 5 of 2020)": "DPIA for high-risk processing",
},
"recommendation": "Design one master impact assessment framework covering fundamental rights (EU), algorithmic discrimination (Colorado), automated decision-making risks (California), and privacy (GDPR/DIFC). Use jurisdiction-specific annexes for local requirements. Annual review cycle satisfies Colorado's requirement and supports EU/California compliance.",
"shared_elements": [
"Describe the AI processing and its purpose",
"Identify categories of affected individuals and groups",
"Assess risks of harm (discrimination, privacy, fundamental rights)",
"Document safeguards and mitigation measures",
"Review and update periodically",
],
},
{
"id": "transparency_notice",
"title": "Transparency & Notice to Individuals",
"icon": "📢",
"regulations": [
"EU AI Act (Regulation 2024/1689)",
"GDPR (Regulation 2016/679)",
"Colorado AI Act (SB 24-205)",
"Texas TRAIGA (HB 149)",
"Utah AI Policy Act (SB 149)",
"California CCPA / ADMT Regulations",
"Illinois HB 3773 (AI in Employment)",
"DIFC Regulation 10 (AI Processing)",
"Consumer Rights Directive / GPSR",
],
"reg_labels": {
"EU AI Act (Regulation 2024/1689)": "AI interaction disclosure + high-risk subject notice",
"GDPR (Regulation 2016/679)": "Data processing transparency",
"Colorado AI Act (SB 24-205)": "Consumer disclosure for consequential decisions",
"Texas TRAIGA (HB 149)": "Government/healthcare AI disclosure",
"Utah AI Policy Act (SB 149)": "AI interaction and regulated occupation disclosure",
"California CCPA / ADMT Regulations": "ADMT pre-use notice",
"Illinois HB 3773 (AI in Employment)": "Employee notice of AI use in employment decisions",
"DIFC Regulation 10 (AI Processing)": "User notice at initial AI interaction",
"Consumer Rights Directive / GPSR": "Product safety information",
},
"recommendation": "Design a layered transparency policy: (1) front-end disclosure that an AI system is involved, (2) detailed notice on processing purposes, data types, and rights, (3) decision-specific notices when AI influences significant outcomes. One policy document with jurisdiction-specific triggers.",
"shared_elements": [
"Disclose AI system involvement to affected individuals",
"Explain what data is processed and for what purpose",
"Inform about AI-driven decisions and their implications",
"Provide information on individual rights and opt-out mechanisms",
"Maintain accessible, plain-language documentation",
],
},
{
"id": "human_oversight",
"title": "Human Oversight & Right to Review",
"icon": "👤",
"regulations": [
"EU AI Act (Regulation 2024/1689)",
"GDPR (Regulation 2016/679)",
"Colorado AI Act (SB 24-205)",
"California CCPA / ADMT Regulations",
"DIFC Regulation 10 (AI Processing)",
"DIFC Data Protection Law (Law No. 5 of 2020)",
],
"reg_labels": {
"EU AI Act (Regulation 2024/1689)": "Human oversight design + competent oversight personnel",
"GDPR (Regulation 2016/679)": "Right not to be subject to solely automated decisions",
"Colorado AI Act (SB 24-205)": "Consumer right to human review and appeal of consequential decisions",
"California CCPA / ADMT Regulations": "Consumer right to opt out of ADMT",
"DIFC Regulation 10 (AI Processing)": "Human oversight mechanisms + right to challenge AI outcomes",
"DIFC Data Protection Law (Law No. 5 of 2020)": "Right not to be subject to solely automated decisions",
},
"recommendation": "Design one unified human oversight framework: define escalation protocol, assign qualified reviewers, set response timeframes, and implement an appeal mechanism. A single process can satisfy all jurisdictions simultaneously.",
"shared_elements": [
"Define when human review is triggered (automatically or on request)",
"Assign qualified, trained reviewers with authority to override AI decisions",
"Establish response timeframes for review requests",
"Implement appeal mechanism with documented outcomes",
"Provide alternative non-AI decision path where required",
],
},
{
"id": "risk_management",
"title": "Risk Management System",
"icon": "🛡️",
"regulations": [
"EU AI Act (Regulation 2024/1689)",
"Colorado AI Act (SB 24-205)",
],
"reg_labels": {
"EU AI Act (Regulation 2024/1689)": "Lifecycle risk management system",
"Colorado AI Act (SB 24-205)": "Risk management policy and programme + NIST AI RMF (affirmative defense)",
},
"recommendation": "Adopt a NIST AI RMF-aligned risk management framework. This satisfies the EU AI Act's lifecycle risk management requirement, fulfils Colorado's risk management policy/programme obligation, and provides Colorado's affirmative defense (rebuttable presumption of reasonable care).",
"shared_elements": [
"Map, classify, and prioritise AI-specific risks",
"Implement mitigation measures proportionate to risk level",
"Monitor and update throughout the AI system lifecycle",
"Document risk decisions and residual risks",
"Align with NIST AI RMF for cross-jurisdiction recognition",
],
},
{
"id": "bias_discrimination",
"title": "Non-Discrimination & Bias Monitoring",
"icon": "⚖️",
"regulations": [
"Colorado AI Act (SB 24-205)",
"Illinois HB 3773 (AI in Employment)",
"Texas TRAIGA (HB 149)",
"California CCPA / ADMT Regulations",
"DIFC Regulation 10 (AI Processing)",
"EU AI Act (Regulation 2024/1689)",
"Equal Treatment Directives",
"Title VII (Civil Rights Act)",
],
"reg_labels": {
"Colorado AI Act (SB 24-205)": "Algorithmic discrimination review + AG notification",
"Illinois HB 3773 (AI in Employment)": "Disparate impact prohibition + zip code proxy ban",
"Texas TRAIGA (HB 149)": "Intentional discrimination prohibition",
"California CCPA / ADMT Regulations": "ADMT non-discrimination requirement",
"DIFC Regulation 10 (AI Processing)": "Fairness and bias monitoring",
"EU AI Act (Regulation 2024/1689)": "Data governance for bias prevention",
"Equal Treatment Directives": "EU non-discrimination framework",
"Title VII (Civil Rights Act)": "US employment non-discrimination",
},
"recommendation": "Implement one bias monitoring programme: regular output audits across protected characteristics, documented methodology, escalation protocol for detected bias, and periodic reporting. The methodology is the same regardless of jurisdiction — only the legal standard varies (disparate impact vs. intentional discrimination).",
"shared_elements": [
"Audit AI system outputs for differential treatment across protected groups",
"Document testing methodology and results",
"Establish escalation protocol when bias is detected",
"Report discrimination to relevant authorities where required",
"Prohibit use of proxy variables for protected characteristics",
],
},
{
"id": "documentation_records",
"title": "Documentation & Record-Keeping",
"icon": "📋",
"regulations": [
"EU AI Act (Regulation 2024/1689)",
"EU AI Act — GPAI Framework (Chapter V)",
"GDPR (Regulation 2016/679)",
"Colorado AI Act (SB 24-205)",
"California CCPA / ADMT Regulations",
"DIFC Regulation 10 (AI Processing)",
"DIFC Data Protection Law (Law No. 5 of 2020)",
],
"reg_labels": {
"EU AI Act (Regulation 2024/1689)": "Technical documentation + logging + QMS",
"EU AI Act — GPAI Framework (Chapter V)": "Model documentation + training data summary",
"GDPR (Regulation 2016/679)": "Records of processing activities",
"Colorado AI Act (SB 24-205)": "Developer documentation for deployers",
"California CCPA / ADMT Regulations": "Risk assessment documentation + attestation",
"DIFC Regulation 10 (AI Processing)": "AI processing activity records",
"DIFC Data Protection Law (Law No. 5 of 2020)": "Processing registration with Commissioner",
},
"recommendation": "Build one centralised documentation repository covering: system technical specifications, training data summaries, processing records, risk assessment results, incident logs, and compliance attestations. Different regulations require different documents, but they feed into the same governance structure.",
"shared_elements": [
"Maintain technical documentation of the AI system",
"Record all processing activities involving personal data",
"Document training data sources and governance",
"Keep audit logs of AI system decisions",
"Store and update risk assessment results",
],
},
{
"id": "accountability_governance",
"title": "Accountability & Governance Officer",
"icon": "🏛️",
"regulations": [
"GDPR (Regulation 2016/679)",
"DIFC Data Protection Law (Law No. 5 of 2020)",
"DIFC Regulation 10 (AI Processing)",
"EU AI Act (Regulation 2024/1689)",
],
"reg_labels": {
"GDPR (Regulation 2016/679)": "Data Protection Officer (DPO) appointment",
"DIFC Data Protection Law (Law No. 5 of 2020)": "DPO appointment",
"DIFC Regulation 10 (AI Processing)": "Autonomous Systems Officer (ASO) — similar status to DPO",
"EU AI Act (Regulation 2024/1689)": "AI literacy for staff and operators",
},
"recommendation": "Establish a unified governance structure: the DPO and ASO roles overlap significantly (DIFC explicitly requires similar status). A single person or team can cover both roles. Add AI literacy training for all relevant staff to satisfy the EU AI Act.",
"shared_elements": [
"Appoint a designated officer for data protection and AI governance",
"Ensure officer independence and direct reporting to senior management",
"Conduct regular compliance monitoring and audits",
"Provide AI literacy training to staff operating AI systems",
"Maintain governance documentation and policies",
],
},
{
"id": "data_governance",
"title": "Data Governance & Training Data",
"icon": "📊",
"regulations": [
"EU AI Act (Regulation 2024/1689)",
"EU AI Act — GPAI Framework (Chapter V)",
"GDPR (Regulation 2016/679)",
"Colorado AI Act (SB 24-205)",
"DIFC Data Protection Law (Law No. 5 of 2020)",
"Copyright Directive (2019/790)",
],
"reg_labels": {
"EU AI Act (Regulation 2024/1689)": "Data quality, representativeness, and bias testing",
"EU AI Act — GPAI Framework (Chapter V)": "Training data summary publication",
"GDPR (Regulation 2016/679)": "Data protection by design and by default + data minimisation",
"Colorado AI Act (SB 24-205)": "Training data summary for deployers",
"DIFC Data Protection Law (Law No. 5 of 2020)": "Data protection by design and by default",
"Copyright Directive (2019/790)": "Text and data mining compliance for training data",
},
"recommendation": "Implement one data governance framework: data quality standards, representativeness checks, minimisation principles, training data documentation, and copyright compliance. The EU AI Act's data governance and GDPR's data protection by design are complementary and share the same operational foundation.",
"shared_elements": [
"Document training data sources, quality, and representativeness",
"Apply data minimisation and purpose limitation",
"Implement data protection by design and by default",
"Ensure copyright compliance for training data",
"Test datasets for bias and representativeness gaps",
],
},
{
"id": "incident_reporting",
"title": "Incident Reporting & Notification",
"icon": "🚨",
"regulations": [
"EU AI Act (Regulation 2024/1689)",
"EU AI Act — GPAI Framework (Chapter V)",
"GDPR (Regulation 2016/679)",
"Colorado AI Act (SB 24-205)",
"NIS2 Directive (2022/2555)",
],
"reg_labels": {
"EU AI Act (Regulation 2024/1689)": "Serious incident reporting to market surveillance authorities",
"EU AI Act — GPAI Framework (Chapter V)": "Systemic risk incident reporting to AI Office",
"GDPR (Regulation 2016/679)": "Data breach notification within 72 hours",
"Colorado AI Act (SB 24-205)": "AG notification within 90 days of discovering algorithmic discrimination",
"NIS2 Directive (2022/2555)": "Significant incident reporting to CSIRT",
},
"recommendation": "Build one incident response protocol with jurisdiction-specific reporting channels and timeframes. Classify incidents by type (safety, discrimination, data breach, cybersecurity) and route to the appropriate authority. A single triage process covers all requirements.",
"shared_elements": [
"Establish incident detection and classification procedures",
"Define reporting channels for each jurisdiction and authority",
"Set internal escalation and response timeframes",
"Document incidents and corrective actions taken",
"Conduct post-incident reviews and system updates",
],
},
{
"id": "security_robustness",
"title": "Security & Robustness",
"icon": "🔐",
"regulations": [
"EU AI Act (Regulation 2024/1689)",
"EU AI Act — GPAI Framework (Chapter V)",
"GDPR (Regulation 2016/679)",
"DIFC Regulation 10 (AI Processing)",
"UAE Federal PDPL (Decree-Law 45/2021)",
"NIS2 Directive (2022/2555)",
],
"reg_labels": {
"EU AI Act (Regulation 2024/1689)": "Accuracy, robustness, and cybersecurity",
"EU AI Act — GPAI Framework (Chapter V)": "Cybersecurity for systemic risk models",
"GDPR (Regulation 2016/679)": "Security of processing",
"DIFC Regulation 10 (AI Processing)": "System integrity and security principles",
"UAE Federal PDPL (Decree-Law 45/2021)": "Appropriate security measures for personal data",
"NIS2 Directive (2022/2555)": "Network and information security measures",
},
"recommendation": "Implement one security framework covering both AI-specific concerns (adversarial robustness, accuracy monitoring, model integrity) and data protection requirements (encryption, access controls, breach prevention). ISO 27001 or NIST CSF provide a unified foundation.",
"shared_elements": [
"Ensure AI system accuracy and robustness against adversarial inputs",
"Implement encryption and access controls for data and models",
"Monitor system integrity and detect anomalies",
"Conduct regular security testing and vulnerability assessments",
"Maintain incident response capabilities",
],
},
{
"id": "frontier_ai_frameworks",
"title": "Frontier AI Safety Frameworks",
"icon": "🛡️",
"regulations": [
"California SB 53 (Frontier AI Transparency)",
"New York RAISE Act (Frontier AI Safety)",
],
"reg_labels": {
"California SB 53 (Frontier AI Transparency)": "CA SB 53 — Frontier AI Framework + transparency report + 15-day incident reporting",
"New York RAISE Act (Frontier AI Safety)": "NY RAISE Act — Safety and security protocol + disclosure + 72-hour incident reporting",
},
"shared_elements": [
"Publish safety frameworks/protocols publicly (with trade secret redactions)",
"Report critical safety incidents to state authorities",
"Document testing procedures and risk mitigation measures",
"Annual review and update of safety protocols",
"Whistleblower protections for employees reporting safety concerns",
],
"recommendation": "Build a unified safety framework that satisfies both CA and NY requirements. CA requires a Frontier AI Framework and 15-day incident reporting; NY requires safety protocols and stricter 72-hour incident reporting. Use the 72-hour NY timeline as your baseline to comply with both.",
},
{
"id": "frontier_systemic_risk_governance",
"title": "Frontier AI / Systemic Risk Model Governance",
"icon": "⚡",
"regulations": [
"EU AI Act — GPAI Framework (Chapter V)",
"California SB 53 (Frontier AI Transparency)",
"New York RAISE Act (Frontier AI Safety)",
],
"reg_labels": {
"EU AI Act — GPAI Framework (Chapter V)": "EU AI Act Art. 55 — Model evaluation, risk mitigation, incident reporting, cybersecurity",
"California SB 53 (Frontier AI Transparency)": "CA SB 53 — Frontier AI Framework, transparency reports, whistleblower protections",
"New York RAISE Act (Frontier AI Safety)": "NY RAISE Act — Safety protocols, 72-hour incident reporting, DFS oversight",
},
"shared_elements": [
"Model evaluation and adversarial/safety testing before deployment",
"Incident reporting to authorities (EU: AI Office, CA: OES, NY: AG + DHSES)",
"Risk mitigation documentation and procedures",
"Cybersecurity and model weight security requirements",
],
"recommendation": "All three regimes target the same models. Build one comprehensive safety governance framework covering model evaluation, risk documentation, incident reporting, and cybersecurity. Use the EU AI Act Art. 55 obligations as the most comprehensive baseline, then add CA/NY-specific reporting timelines and whistleblower requirements.",
},
{
"id": "frontier_difc_risk_assessment",
"title": "Risk Assessment — Frontier AI and DIFC Deployment",
"icon": "📋",
"regulations": [
"California SB 53 (Frontier AI Transparency)",
"New York RAISE Act (Frontier AI Safety)",
"DIFC Regulation 10 (AI Processing)",
],
"reg_labels": {
"California SB 53 (Frontier AI Transparency)": "CA SB 53 — Catastrophic risk assessment + transparency report",
"New York RAISE Act (Frontier AI Safety)": "NY RAISE Act — Critical harm assessment + safety protocol",
"DIFC Regulation 10 (AI Processing)": "DIFC Reg 10 — AI impact assessment + high-risk classification",
},
"shared_elements": [
"Risk assessment before deployment",
"Documentation of safety measures and safeguards",
"Ongoing monitoring of AI system behaviour",
],
"recommendation": "If you develop a frontier model and deploy in DIFC, your CA/NY risk assessments (catastrophic risk focus) are complementary to but do not replace DIFC deployer-focused AI impact assessment. Build one risk assessment process with two outputs: a developer safety assessment (CA/NY) and a deployer impact assessment (DIFC).",
},
]
GAP_ANALYSIS = {
"EU AI Act (Regulation 2024/1689)": {
"Colorado AI Act (SB 24-205)": {
"coverage": 72,
"covered": [
"Risk management system maps to Colorado's risk management policy requirement",
"Technical documentation satisfies Colorado's documentation obligations",
"Human oversight provisions cover Colorado's human review requirements",
"Post-market monitoring aligns with ongoing discrimination testing",
],
"gaps": [
"Colorado-specific annual impact assessment format",
"Colorado-specific consumer notification requirements (right to explanation, right to appeal)",
"Attorney General notification within 90 days of discovering discrimination",
"Colorado's affirmative defense documentation (NIST AI RMF compliance)",
],
},
"Texas TRAIGA (HB 149)": {
"coverage": 90,
"covered": [
"EU AI Act prohibited practices cover and exceed Texas TRAIGA prohibitions (discrimination, social scoring, behaviour manipulation)",
"EU AI Act transparency obligations cover Texas government disclosure requirements",
"EU AI Act non-discrimination framework covers Texas intentional-discrimination prohibition",
],
"gaps": [
"Texas-specific biometric consent requirement (outside EU scope)",
],
},
"Utah AI Policy Act (SB 149)": {
"coverage": 80,
"covered": [
"AI Act transparency obligations cover Utah disclosure requirements",
"AI system documentation supports Utah regulated-industry disclosures",
],
"gaps": [
"Utah-specific regulated occupation disclosure format",
],
},
"California CCPA / ADMT Regulations": {
"coverage": 60,
"covered": [
"Risk management and documentation align with California impact assessment requirements",
"Transparency provisions overlap with pre-use notice requirements",
],
"gaps": [
"CCPA-specific consumer opt-out mechanism for ADMT",
"California-specific access request response procedures",
"CCPA threshold applicability analysis (USD 25M / 100K consumers)",
],
},
"Illinois HB 3773 (AI in Employment)": {
"coverage": 65,
"covered": [
"EU AI Act transparency covers Illinois employee notification requirement",
"EU AI Act non-discrimination framework (high-risk obligations) addresses Illinois prohibition on discriminatory AI",
],
"gaps": [
"Illinois-specific prohibition on using zip code as proxy for protected classes",
"AIVIA-specific: informed consent for AI-analysed video interviews",
"AIVIA-specific: 30-day video destruction on applicant request",
],
},
"DIFC Regulation 10 (AI Processing)": {
"coverage": 74,
"covered": [
"Risk management system maps to DIFC Algorithmic Impact Assessment",
"Human oversight requirements align",
"Transparency and explainability provisions align",
"Post-market monitoring covers ongoing monitoring requirement",
],
"gaps": [
"DIFC-specific registration with Commissioner of Data Protection",
"DIFC-specific AIA format and reporting requirements",
"DIFC DPL compliance (separate from Regulation 10)",
],
},
},
"Colorado AI Act (SB 24-205)": {
"EU AI Act (Regulation 2024/1689)": {
"coverage": 55,
"covered": [
"Risk management policy partially satisfies EU AI Act Art. 9",
"Impact assessment partially covers FRIA requirements",
"Algorithmic discrimination testing aligns with bias requirements",
],
"gaps": [
"EU AI Act conformity assessment and CE marking",
"Technical documentation to EU Annex IV standard",
"Quality Management System (QMS)",
"EU database registration",
"Post-market monitoring system to EU standard",
"Serious incident reporting to EU authorities",
"Data governance requirements",
],
},
"Texas TRAIGA (HB 149)": {
"coverage": 90,
"covered": [
"Colorado's transparency and disclosure obligations cover Texas government disclosure requirements",
"Colorado's algorithmic discrimination testing exceeds Texas intentional-discrimination-only prohibition",
"Colorado's risk management framework exceeds Texas requirements",
],
"gaps": [
"Texas-specific biometric consent requirement",
],
},
"Utah AI Policy Act (SB 149)": {
"coverage": 65,
"covered": [
"Colorado's disclosure obligations partially cover Utah's AI interaction disclosure",
"General transparency requirements overlap",
],
"gaps": [
"Utah-specific regulated occupation disclosure format",
"Utah-specific generative AI interaction disclosure",
],
},
"California CCPA / ADMT Regulations": {
"coverage": 68,
"covered": [
"Annual impact assessment maps to California risk assessment",
"Consumer notification requirements partially overlap",
"Discrimination testing covers California non-discrimination requirements",
],
"gaps": [
"CCPA-specific consumer opt-out mechanism for ADMT",
"California-specific access request response procedures",
"CCPA threshold applicability analysis",
],
},
"Illinois HB 3773 (AI in Employment)": {
"coverage": 70,
"covered": [
"Colorado's consumer notification covers Illinois employee notification requirement",
"Colorado's algorithmic discrimination testing covers Illinois non-discrimination obligation",
"Colorado's impact assessment addresses discriminatory outcomes",
],
"gaps": [
"Illinois-specific prohibition on using zip code as proxy for protected classes",
"AIVIA-specific: informed consent for AI-analysed video interviews",
"AIVIA-specific: 30-day video destruction on applicant request",
],
},
"DIFC Regulation 10 (AI Processing)": {
"coverage": 65,
"covered": [
"Impact assessment maps to Algorithmic Impact Assessment",
"Risk management policy aligns",
"Bias testing covers fairness monitoring",
],
"gaps": [
"DIFC-specific data protection requirements",
"DIFC Commissioner registration and reporting",
"DIFC-specific consent and transparency requirements",
],
},
},
"Texas TRAIGA (HB 149)": {
"EU AI Act (Regulation 2024/1689)": {
"coverage": 10,
"covered": [
"Prohibition on intentional discrimination partially aligns with EU non-discrimination requirements",
],
"gaps": [
"Full risk management system",
"Technical documentation",
"Human oversight mechanisms",
"Conformity assessment and CE marking",
"Post-market monitoring and incident reporting",
"Data governance requirements",
"EU database registration",
"Transparency obligations — Texas has no private-sector disclosure requirement",
],
},
"Colorado AI Act (SB 24-205)": {
"coverage": 10,
"covered": [
"Prohibition on intentional discrimination partially aligns with Colorado's algorithmic discrimination focus",
],
"gaps": [
"Full risk management programme",
"Annual impact assessment",
"Algorithmic discrimination testing (Colorado covers disparate impact; Texas only covers intent)",
"Consumer right to explanation and appeal",
"Consumer disclosure and notification",
"Attorney General notification within 90 days",
],
},
"Utah AI Policy Act (SB 149)": {
"coverage": 20,
"covered": [
"Government disclosure requirements partially overlap with Utah's AI interaction disclosure",
],
"gaps": [
"Utah-specific regulated occupation disclosure",
"Utah-specific generative AI interaction disclosure",
"Texas has no private-sector disclosure requirements",
],
},
"California CCPA / ADMT Regulations": {
"coverage": 5,
"covered": [
"Non-discrimination principle partially aligns",
],
"gaps": [
"Consumer opt-out mechanism for ADMT",
"Impact assessment",
"Access request response procedures",
"Pre-use notice to consumers",
"Non-discrimination testing (California covers disparate impact; Texas only covers intent)",
],
},
"DIFC Regulation 10 (AI Processing)": {
"coverage": 5,
"covered": [
"Prohibition on intentional discrimination partially aligns with DIFC fairness requirements",
],
"gaps": [
"DIFC Algorithmic Impact Assessment",
"DIFC Commissioner registration",
"AI system certification (if high-risk)",
"Autonomous Systems Officer appointment",
"DIFC-specific transparency and consent requirements",
"Fairness and bias monitoring",
],
},
"Illinois HB 3773 (AI in Employment)": {
"coverage": 15,
"covered": [
"Prohibition on intentional discrimination partially aligns with Illinois non-discrimination requirement",
],
"gaps": [
"Employee notification before AI use in employment decisions",
"Prohibition on zip code as proxy for protected classes",
"AIVIA-specific: consent for AI-analysed video interviews",
"AIVIA-specific: video destruction obligations",
],
},
},
"Utah AI Policy Act (SB 149)": {
"EU AI Act (Regulation 2024/1689)": {
"coverage": 25,
"covered": [
"AI interaction disclosure partially covers Art. 50(1)",
],
"gaps": [
"Full risk management system",
"Technical documentation",
"Human oversight mechanisms",
"Conformity assessment and CE marking",
"Post-market monitoring and incident reporting",
"Synthetic content labelling",
"Data governance requirements",
],
},
"Colorado AI Act (SB 24-205)": {
"coverage": 30,
"covered": [
"AI disclosure requirements partially overlap",
],
"gaps": [
"Full risk management programme",
"Annual impact assessment",
"Algorithmic discrimination testing",
"Consumer right to explanation and appeal",
"Attorney General notification",
"NIST AI RMF affirmative defense documentation",
],
},
"Texas TRAIGA (HB 149)": {
"coverage": 60,
"covered": [
"Utah AI interaction disclosure partially covers Texas government disclosure obligation",
"General transparency approach aligns with Texas principles",
],
"gaps": [
"Texas-specific prohibited practices (social scoring, behaviour manipulation, biometric misuse)",
"Texas-specific government transparency policies",
],
},
"California CCPA / ADMT Regulations": {
"coverage": 20,
"covered": [
"Disclosure requirements partially overlap with pre-use notice",
],
"gaps": [
"Consumer opt-out mechanism for ADMT",
"Impact assessment",
"Access request response procedures",
"Non-discrimination testing",
"CCPA threshold analysis",
],
},
"DIFC Regulation 10 (AI Processing)": {
"coverage": 20,
"covered": [
"Transparency requirements partially align",
],
"gaps": [
"DIFC Algorithmic Impact Assessment",
"DIFC Commissioner registration",
"AI system certification (if high-risk)",
"Autonomous Systems Officer appointment",
"DIFC-specific data protection compliance",
],
},
"Illinois HB 3773 (AI in Employment)": {
"coverage": 25,
"covered": [
"AI disclosure requirements partially overlap with Illinois employee notification",
],
"gaps": [
"Prohibition on discriminatory AI in employment (Illinois-specific scope)",
"Prohibition on using zip code as proxy for protected classes",
"AIVIA-specific: consent for AI-analysed video interviews",
"AIVIA-specific: video destruction obligations",
],
},
},
"California CCPA / ADMT Regulations": {
"EU AI Act (Regulation 2024/1689)": {
"coverage": 45,
"covered": [
"Impact assessment partially covers FRIA",
"Pre-use notice partially satisfies Art. 50 transparency",
"Non-discrimination requirements align with bias obligations",
],
"gaps": [
"Technical documentation to EU Annex IV standard",
"Conformity assessment and CE marking",
"Quality Management System",
"EU database registration",
"Post-market monitoring to EU standard",
"Serious incident reporting",
"Human oversight mechanisms",
],
},
"Colorado AI Act (SB 24-205)": {
"coverage": 65,
"covered": [
"Impact assessment maps to annual impact assessment",
"Consumer opt-out partially covers Colorado's consumer rights",
"Non-discrimination requirements overlap",
],
"gaps": [
"Colorado-specific risk management programme",
"Attorney General notification",
"NIST AI RMF affirmative defense documentation",
"Colorado-specific right to explanation format",
],
},
"Texas TRAIGA (HB 149)": {
"coverage": 85,
"covered": [
"California's non-discrimination requirements exceed Texas intentional-discrimination-only prohibition",
"California's consumer disclosure exceeds Texas's limited (government-only) disclosure",
"California's impact assessment framework exceeds Texas requirements",
],
"gaps": [
"Texas-specific biometric consent requirement",
"Texas-specific social scoring prohibition",
],
},
"Utah AI Policy Act (SB 149)": {
"coverage": 35,
"covered": [
"Pre-use notice partially covers Utah disclosure requirements",
],
"gaps": [
"Utah-specific regulated occupation disclosure",
"Utah-specific generative AI interaction disclosure",
],
},
"DIFC Regulation 10 (AI Processing)": {
"coverage": 40,
"covered": [
"Impact assessment partially maps to Algorithmic Impact Assessment",
"Consumer rights partially align with data subject rights",
],
"gaps": [
"DIFC Commissioner registration",
"AI system certification (if high-risk)",
"Autonomous Systems Officer appointment",
"DIFC-specific consent and transparency",
],
},
"Illinois HB 3773 (AI in Employment)": {
"coverage": 55,
"covered": [
"California pre-use notice partially covers Illinois employee notification",
"California non-discrimination requirement aligns with Illinois prohibition on discriminatory AI",
"California impact assessment addresses employment AI risks",
],
"gaps": [
"Illinois-specific prohibition on zip code as proxy for protected classes",
"AIVIA-specific: consent for AI-analysed video interviews",
"AIVIA-specific: video destruction obligations",
],
},
},
"Illinois HB 3773 (AI in Employment)": {
"EU AI Act (Regulation 2024/1689)": {
"coverage": 15,
"covered": [
"Employee notification requirement partially covers Art. 50(1) disclosure",
"Non-discrimination obligation aligns directionally with EU fairness requirements",
],
"gaps": [
"Full risk management system",
"Technical documentation",
"Human oversight mechanisms",
"Conformity assessment and CE marking",
"Post-market monitoring",
"Synthetic content labelling",
"Data governance",
],
},
"Colorado AI Act (SB 24-205)": {
"coverage": 25,
"covered": [
"Employee notification partially overlaps with Colorado consumer disclosure",
"Non-discrimination obligation aligns with Colorado algorithmic discrimination testing",
],
"gaps": [
"Full risk management programme",
"Annual impact assessment",
"Consumer right to explanation and appeal",
"Attorney General notification",
"Website disclosure statement",
],
},
"Texas TRAIGA (HB 149)": {
"coverage": 35,
"covered": [
"Non-discrimination obligation partially aligns with Texas intentional-discrimination prohibition",
"Illinois covers broader scope (disparate impact) than Texas (intent only)",
],
"gaps": [
"Texas-specific biometric consent requirement",
"Texas-specific social scoring and behaviour manipulation prohibitions",
],
},
"Utah AI Policy Act (SB 149)": {
"coverage": 20,
"covered": [
"Employee notification partially covers Utah AI disclosure",
],
"gaps": [
"Utah-specific regulated occupation disclosure",
"Utah-specific generative AI interaction disclosure",
],
},
"California CCPA / ADMT Regulations": {
"coverage": 20,
"covered": [
"Employee notification partially overlaps with California pre-use notice",
"Non-discrimination obligation aligns with California ADMT non-discrimination requirement",
],
"gaps": [
"Consumer opt-out mechanism for ADMT",
"Impact assessment",
"Access request response procedures",
"CCPA threshold analysis",
],
},
"DIFC Regulation 10 (AI Processing)": {
"coverage": 10,
"covered": [
"Notification requirement partially aligns with DIFC transparency obligations",
],
"gaps": [
"DIFC Algorithmic Impact Assessment",
"DIFC Commissioner registration",
"AI system certification (if high-risk)",
"Autonomous Systems Officer appointment",
"DIFC-specific data protection compliance",
"Fairness and bias monitoring",
],
},
},
"DIFC Regulation 10 (AI Processing)": {
"EU AI Act (Regulation 2024/1689)": {
"coverage": 45,
"covered": [
"Algorithmic Impact Assessment partially covers FRIA",
"Human oversight requirements align",
"Transparency requirements partially align",
],
"gaps": [
"Full conformity assessment and CE marking",
"Technical documentation to EU standard",
"Quality Management System",
"EU database registration",
"Post-market monitoring to EU standard",
"Data governance — specific training data requirements",
"Logging and record-keeping to EU specification",
],
},
"Colorado AI Act (SB 24-205)": {
"coverage": 55,
"covered": [
"Algorithmic Impact Assessment maps to annual impact assessment",
"Fairness monitoring covers discrimination testing",
"Transparency requirements partially align",
],
"gaps": [
"Colorado-specific consumer notification format",
"Attorney General notification requirement",
"Colorado-specific affirmative defense documentation",
],
},
"Texas TRAIGA (HB 149)": {
"coverage": 85,
"covered": [
"DIFC transparency and fairness requirements exceed Texas disclosure and non-discrimination obligations",
"DIFC impact assessment framework exceeds Texas requirements",
"DIFC prohibited practices cover Texas prohibitions (social scoring, bias, behaviour manipulation)",
],
"gaps": [
"Texas-specific biometric consent requirement",
],
},
"Utah AI Policy Act (SB 149)": {
"coverage": 35,
"covered": [
"Transparency and disclosure requirements partially overlap",
],
"gaps": [
"Utah-specific regulated occupation disclosure",
"Utah-specific generative AI interaction disclosure",
],
},
"California CCPA / ADMT Regulations": {
"coverage": 40,
"covered": [
"Impact assessment partially maps to California risk assessment",
"Data subject rights partially align with consumer rights",
],
"gaps": [
"CCPA-specific consumer opt-out mechanism",
"California-specific access request response procedures",
"CCPA threshold applicability analysis",
],
},
"Illinois HB 3773 (AI in Employment)": {
"coverage": 45,
"covered": [
"DIFC transparency requirements partially cover Illinois employee notification",
"DIFC fairness and bias monitoring partially covers Illinois non-discrimination obligation",
],
"gaps": [
"Illinois-specific prohibition on zip code as proxy for protected classes",
"AIVIA-specific: consent for AI-analysed video interviews",
"AIVIA-specific: video destruction obligations",
],
},
"California SB 53 (Frontier AI Transparency)": {
"coverage": 25,
"covered": [
"Both require risk assessment before deployment",
"Both require documentation of safety measures",
],
"gaps": [
"CA focuses on developer safety frameworks — DIFC focuses on deployer data protection",
"CA catastrophic risk assessment differs from DIFC AI impact assessment",
"CA requires incident reporting to OES — DIFC to Commissioner",
],
},
"New York RAISE Act (Frontier AI Safety)": {
"coverage": 25,
"covered": [
"Both require risk assessment before deployment",
"Both require documentation of safety measures",
],
"gaps": [
"NY focuses on developer safety protocols — DIFC focuses on deployer data protection",
"NY critical harm assessment differs from DIFC AI impact assessment",
"NY requires incident reporting to AG + DHSES — DIFC to Commissioner",
],
},
},
"California SB 53 (Frontier AI Transparency)": {
"New York RAISE Act (Frontier AI Safety)": {
"coverage": 75,
"covered": [
"Both require public safety frameworks with trade secret redactions",
"Both require incident reporting to state authorities",
"Both require whistleblower protections",
"Both require annual protocol review",
],
"gaps": [
"NY requires 72-hour incident reporting vs CA's 15-day window",
"NY establishes a DFS oversight office with annual assessments and fees",
"NY covers knowledge distillation models (USD 5M+ cost) explicitly",
"NY critical harm threshold is 100+ deaths vs CA's 50+ deaths",
],
},
"EU AI Act — GPAI Framework (Chapter V)": {
"coverage": 55,
"covered": [
"Both require model evaluation and safety testing",
"Both require incident reporting to authorities",
"Both require risk documentation",
],
"gaps": [
"EU threshold is 10²⁵ FLOPs — 10x lower than CA's 10²⁶",
"EU requires technical documentation shared with downstream providers",
"EU requires copyright compliance policy and training data summary",
"EU cybersecurity obligations are more prescriptive",
],
},
"DIFC Regulation 10 (AI Processing)": {
"coverage": 25,
"covered": [
"Both require risk assessment before deployment",
"Both require documentation of safety measures",
],
"gaps": [
"DIFC focuses on deployer/operator obligations — CA focuses on developer",
"DIFC requires human oversight and transparency to affected persons",
"DIFC data protection impact assessment is distinct from CA catastrophic risk assessment",
],
},
},
"New York RAISE Act (Frontier AI Safety)": {
"California SB 53 (Frontier AI Transparency)": {
"coverage": 75,
"covered": [
"Both require public safety frameworks with trade secret redactions",
"Both require incident reporting to state authorities",
"Both require whistleblower protections",
"Both require annual protocol review",
],
"gaps": [
"CA allows 15-day incident reporting vs NY's stricter 72-hour window",
"CA requires explicit Frontier AI Framework publication",
"CA catastrophic risk definition includes model evading control (NY does not)",
"CA effective January 2026, NY effective January 2027",
],
},
"EU AI Act — GPAI Framework (Chapter V)": {
"coverage": 55,
"covered": [
"Both require model evaluation and safety testing",
"Both require incident reporting to authorities",
"Both require risk documentation",
],
"gaps": [
"EU threshold is 10²⁵ FLOPs — 10x lower than NY's 10²⁶",
"EU requires technical documentation shared with downstream providers",
"EU requires copyright compliance policy and training data summary",
"NY has DFS oversight office — EU has AI Office with broader mandate",
],
},
"DIFC Regulation 10 (AI Processing)": {
"coverage": 25,
"covered": [
"Both require risk assessment before deployment",
"Both require documentation of safety measures",
],
"gaps": [
"DIFC focuses on deployer/operator obligations — NY focuses on developer",
"DIFC requires human oversight and transparency to affected persons",
"DIFC data protection impact assessment is distinct from NY critical harm assessment",
],
},
},
"EU AI Act — GPAI Framework (Chapter V)": {
"California SB 53 (Frontier AI Transparency)": {
"coverage": 60,
"covered": [
"Both require model evaluation and safety testing",
"Both require incident reporting",
"Both require risk mitigation documentation",
],
"gaps": [
"CA threshold is 10²⁶ FLOPs — 10x higher than EU's 10²⁵",
"CA requires Frontier AI Framework publication",
"CA has USD 500M revenue tier for additional obligations",
"CA includes whistleblower protections not in EU GPAI framework",
],
},
"New York RAISE Act (Frontier AI Safety)": {
"coverage": 60,
"covered": [
"Both require model evaluation and safety testing",
"Both require incident reporting",
"Both require risk mitigation documentation",
],
"gaps": [
"NY threshold is 10²⁶ FLOPs — 10x higher than EU's 10²⁵",
"NY requires 72-hour incident reporting",
"NY establishes DFS oversight office with annual assessments",
"NY has USD 500M revenue threshold",
],
},
},
}
# ─────────────────────────────────────────────────
# SECTION 5: REGULATION URLS
# ─────────────────────────────────────────────────
REGULATION_URLS = {
"EU AI Act (Regulation 2024/1689)": "https://eur-lex.europa.eu/eli/reg/2024/1689/oj",
"EU AI Act — GPAI Framework (Chapter V)": "https://eur-lex.europa.eu/eli/reg/2024/1689/oj",
"Colorado AI Act (SB 24-205)": "https://leg.colorado.gov/bills/sb24-205",
"Texas TRAIGA (HB 149)": "https://capitol.texas.gov/BillLookup/History.aspx?LegSess=89R&Bill=HB149",
"Utah AI Policy Act (SB 149)": "https://legiscan.com/UT/text/SB0149/2024",
"California CCPA / ADMT Regulations": "https://cppa.ca.gov/regulations/",
"Illinois HB 3773 (AI in Employment)": "https://legiscan.com/IL/text/HB3773/id/3002985",
"DIFC Regulation 10 (AI Processing)": "https://www.difc.com/business/registrars-and-commissioners/commissioner-of-data-protection/guidance?_gl=1*3fpudd*_ga*MTY3MDc5MTA3Ni4xNzczNDk2ODE2*_ga_ZED8EWXB5W*czE3NzM1MzkyMjYkbzIkZzEkdDE3NzM1Mzk0NjgkajYwJGwwJGgw",
"GDPR (Regulation 2016/679)": "https://eur-lex.europa.eu/eli/reg/2016/679/oj",
"ePrivacy Directive (2002/58/EC)": "https://eur-lex.europa.eu/eli/dir/2002/58/oj",
"UAE Federal PDPL (Decree-Law 45/2021)": "https://uaelegislation.gov.ae/en/legislations/1972/download",
"DIFC Data Protection Law (Law No. 5 of 2020)": "https://assets.u.ae/api/public/content/bb94d503629243a7aa548a8cd387ea86?v=df38af26",
"ADGM Data Protection Regulations 2021": "https://en.adgm.thomsonreuters.com/rulebook/data-protection-regulations-2021",
"HIPAA (Health Insurance Portability and Accountability Act)": "https://web.archive.org/web/20120616142528/http://www.gpo.gov/fdsys/pkg/BILLS-104s1028is/pdf/BILLS-104s1028is.pdf",
"COPPA (Children's Online Privacy Protection Act)": "https://www.ecfr.gov/current/title-16/chapter-I/subchapter-C/part-312",
"FERPA (Family Educational Rights and Privacy Act)": "https://www.ecfr.gov/current/title-34/subtitle-A/part-99?toc=1",
"Illinois BIPA (Biometric Information Privacy Act)": "https://www.ilga.gov/legislation/ilcs/ilcs3.asp?ActID=3004",
"Copyright Directive (2019/790)": "https://eur-lex.europa.eu/eli/dir/2019/790/oj",
"NIS2 Directive (2022/2555)": "https://eur-lex.europa.eu/eli/dir/2022/2555/oj",
"Product Liability Directive (2024/2853)": "https://eur-lex.europa.eu/eli/dir/2024/2853/oj",
"Equal Treatment Directives": "https://eur-lex.europa.eu/eli/dir/2000/78/oj",
"Consumer Rights Directive / GPSR": "https://eur-lex.europa.eu/eli/reg/2023/988/oj",
"Medical Device Regulation (MDR 2017/745)": "https://eur-lex.europa.eu/eli/reg/2017/745/oj",
"Machinery Regulation (2023/1230)": "https://eur-lex.europa.eu/eli/reg/2023/1230/oj",
"Digital Services Act (DSA 2022/2065)": "https://eur-lex.europa.eu/eli/reg/2022/2065/oj",
"Radio Equipment Directive (RED 2014/53)": "https://eur-lex.europa.eu/eli/dir/2014/53/oj",
"Loi Informatique et Libertés (Loi n° 78-17)": "https://www.legifrance.gouv.fr/loda/id/JORFTEXT000000886460",
"FTC Act Section 5 (Unfair/Deceptive Practices)": "https://www.ftc.gov/sites/default/files/documents/statutes/federal-trade-commission-act/ftc_act_incorporatingus_safe_web_act.pdf",
"Title VII (Civil Rights Act)": "https://www.eeoc.gov/statutes/title-vii-civil-rights-act-1964",
"ADA (Americans with Disabilities Act)": "https://www.ada.gov/law-and-regs/ada/",
"ECOA (Equal Credit Opportunity Act)": "https://www.consumerfinance.gov/rules-policy/regulations/1002/",
"FCRA (Fair Credit Reporting Act)": "https://www.ftc.gov/system/files/ftc_gov/pdf/fcra-march-2026.pdf",
"Fair Housing Act": "https://www.justice.gov/crt/fair-housing-act-1",
"Copyright Law (Decree-Law 38/2021) — No TDM exception": "https://uaelegislation.gov.ae/en/legislations/1534/download",
"Cybercrime Law (Decree-Law 34/2021)": "https://uaelegislation.gov.ae/en/legislations/1526",
"Civil Transactions Law (Federal Law 5/1985)": "https://uaelegislation.gov.ae/en/legislations/1025/download",
"Consumer Protection (Federal Law 15/2020)": "https://uaelegislation.gov.ae/en/legislations/1455/download",
"Anti-Discrimination (Decree-Law 34/2023)": "https://uaelegislation.gov.ae/en/legislations/2131/download",
"Labour Law (Decree-Law 33/2021)": "https://uaelegislation.gov.ae/en/legislations/1541/download",
# ── US Federal ──
"US Copyright Act (Title 17)": "https://www.copyright.gov/title17/title17.pdf",
# ── US State Privacy Laws ──
"California CCPA/CPRA (Privacy)": "https://leginfo.legislature.ca.gov/faces/codes_displayText.xhtml?division=3.&part=4.&lawCode=CIV&title=1.81.5",
"Virginia VCDPA": "https://law.lis.virginia.gov/vacodefull/title59.1/chapter53/",
"Colorado CPA (Privacy)": "https://leg.colorado.gov/bills/sb21-190",
"Connecticut CTDPA": "https://portal.ct.gov/AG/Sections/Privacy/The-Connecticut-Data-Privacy-Act",
"Utah UCPA (Privacy)": "https://le.utah.gov/~2022/bills/static/SB0227.html",
"Iowa ICDPA": "https://www.legis.iowa.gov/legislation/BillBook?ga=90&ba=SF%20262",
"Indiana INCDPA": "https://iga.in.gov/pdf-documents/123/2023/senate/bills/SB0005/SB0005.05.ENRH.pdf",
"Tennessee TIPA": "https://www.tn.gov/attorneygeneral/working-for-tennessee/consumer/tennessee-information-protection-act.html",
"Texas TDPSA (Privacy)": "https://capitol.texas.gov/tlodocs/88R/billtext/html/HB00004F.htm",
"Montana MCDPA": "https://legiscan.com/MT/text/SB384/id/2791095",
"Oregon OCPA": "https://oregon.public.law/statutes/ors_646a.570",
"Delaware DPGA": "https://legiscan.com/DE/text/HB154/id/2831594",
"New Hampshire Privacy Act": "https://www.gencourt.state.nh.us/rsa/html/NHTOC/NHTOC-LII-507-H.htm",
"New Jersey DPA": "https://pub.njleg.gov/bills/2022/S0500/332_R4.PDF",
"Nebraska DPA": "http://nebraskalegislature.gov/bills/view_bill.php?DocumentID=54904",
"Maryland MODPA": "https://mgaleg.maryland.gov/2024RS/bills/sb/sb0541E.pdf",
"Minnesota CDPA": "https://www.revisor.mn.gov/statutes/cite/325M/full",
"Kentucky KCDPA": "https://legiscan.com/KY/bill/HB15/2024",
"Rhode Island DPA": "https://status.rilegislature.gov/bill_history_report.aspx?year=2024&bills=7787",
# ── US State Biometric ──
"Texas CUBI Act (Biometric)": "https://statutes.capitol.texas.gov/Docs/BC/htm/BC.503.htm",
"Washington Biometric Privacy (HB 1493)": "https://lawfilesext.leg.wa.gov/biennium/2017-18/Pdf/Bills/Session%20Laws/House/1493-S.SL.pdf",
# ── US State Employment/AI ──
"NYC Local Law 144 (AI in Employment)": "https://legistar.council.nyc.gov/LegislationDetail.aspx?ID=4344524&GUID=B051915D-A9AC-451E-81F8-6596032FA3F9",
"California FEHA ADS Regulations": "https://calcivilrights.ca.gov/wp-content/uploads/sites/32/2025/06/Final-Text-regulations-automated-employment-decision-systems.pdf",
"Maryland HB 1202 (Facial Recognition in Employment)": "https://mgaleg.maryland.gov/2020RS/Chapters_noln/CH_446_hb1202t.pdf",
"Illinois AIVIA (HB 2557)": "https://www.billtrack50.com/billdetail/1067171",
# ── US State Other AI ──
"Nevada AB 406 (AI in Mental Health)": "https://www.leg.state.nv.us/App/NELIS/REL/83rd2025/Bill/12575/Text",
"New Jersey AB 4563 (Bot Disclosure)": "https://legiscan.com/NJ/text/A4563/id/1828884",
# ── US Frontier AI ──
"California SB 53 (Frontier AI Transparency)": "https://leginfo.legislature.ca.gov/faces/billNavClient.xhtml?bill_id=202520260SB53",
"New York RAISE Act (Frontier AI Safety)": "https://www.nysenate.gov/legislation/bills/2025/A6453/amendment/A",
}
# ─────────────────────────────────────────────────
# SECTION 6: OTHER REGULATIONS — ONE-LINERS
# ─────────────────────────────────────────────────
OTHER_REG_ONE_LINERS = {
"Copyright Directive (2019/790)": "Text and data mining for AI training requires compliance with opt-out provisions — commercial use needs rightsholder permission unless research exception applies.",
"NIS2 Directive (2022/2555)": "AI systems in essential/important sectors must meet cybersecurity risk management and incident reporting obligations.",
"Product Liability Directive (2024/2853)": "AI software is a 'product' under EU law — providers face strict liability for defective AI without needing to prove fault.",
"Equal Treatment Directives": "AI-driven decisions in employment, education, or services must not discriminate on protected grounds (gender, race, age, disability, religion).",
"Consumer Rights Directive / GPSR": "Consumer-facing AI products must provide clear pre-contractual information and meet general product safety requirements.",
"Medical Device Regulation (MDR 2017/745)": "AI-based diagnostic, clinical decision support, or therapeutic systems may require CE marking as medical devices.",
"Machinery Regulation (2023/1230)": "AI in robots, drones, and autonomous machinery must meet safety and cybersecurity requirements — applies from January 2027.",
"Digital Services Act (DSA 2022/2065)": "Online platforms using AI recommendation systems must offer non-profiling alternatives and explain algorithmic decisions.",
"Radio Equipment Directive (RED 2014/53)": "Connected devices with embedded AI (IoT, wearables) must comply with cybersecurity and privacy-by-design requirements.",
"Loi Informatique et Libertés (Loi n° 78-17)": "French national implementation of the GDPR. CNIL oversees enforcement, with specific provisions on automated decision-making, health data, and children's data processing. Prior CNIL authorisation may be required for certain AI processing activities.",
"FTC Act Section 5 (Unfair/Deceptive Practices)": "AI systems must not deceive consumers about capabilities, data use, or human involvement — FTC actively enforces.",
"Title VII (Civil Rights Act)": "AI employment tools must not cause disparate impact based on race, colour, religion, sex, or national origin.",
"ADA (Americans with Disabilities Act)": "AI in employment and public accommodation must not discriminate against individuals with disabilities and must allow reasonable accommodation.",
"ECOA (Equal Credit Opportunity Act)": "AI credit scoring and lending decisions must be non-discriminatory, with adverse action notices when AI contributes to denial.",
"FCRA (Fair Credit Reporting Act)": "AI systems generating consumer reports must ensure accuracy and allow consumers to dispute inaccurate information.",
"Fair Housing Act": "AI in housing advertising, tenant screening, and lending must not discriminate — algorithmic redlining is an enforceable violation.",
"Copyright Law (Decree-Law 38/2021) — No TDM exception": "CRITICAL: UAE has no text and data mining exception — all copyrighted training data must be licensed.",
"Cybercrime Law (Decree-Law 34/2021)": "Creating or disseminating deepfakes and accessing AI systems without authorisation are criminal offences in the UAE.",
"Civil Transactions Law (Federal Law 5/1985)": "General tort liability applies to AI-caused damage under the 'guardian of things' doctrine — operators may be liable.",
"Consumer Protection (Federal Law 15/2020)": "AI interacting with UAE consumers must not engage in deceptive or misleading practices.",
"Anti-Discrimination (Decree-Law 34/2023)": "AI decisions in the UAE must not discriminate based on race, colour, ethnic origin, religion, or disability.",
"Labour Law (Decree-Law 33/2021)": "AI in employment (hiring, monitoring, evaluation) must respect worker rights and disclose workplace surveillance.",
# ── US Federal ──
"US Copyright Act (Title 17)": "No specific AI/TDM exception — fair use (§ 107) assessed case-by-case. Purely AI-generated works without human authorship cannot be copyrighted (USCO guidance).",
# ── US State Privacy Laws ──
"California CCPA/CPRA (Privacy)": "Comprehensive privacy law granting consumers rights to access, delete, and opt out of sale of personal data. Includes automated decision-making technology (ADMT) provisions and data protection assessments.",
"Virginia VCDPA": "Consumers can opt out of profiling and targeted advertising. Data protection assessments required for high-risk processing. Sensitive data requires opt-in consent.",
"Colorado CPA (Privacy)": "Profiling opt-out rights, data protection assessments for high-risk processing, universal opt-out mechanism required. Children's data protections included.",
"Connecticut CTDPA": "Profiling opt-out rights, automated decision-making provisions, children's data protections. 60-day cure period expired Dec 2024 — direct penalties now apply.",
"Utah UCPA (Privacy)": "Business-friendly privacy law with consumer opt-out rights for targeted advertising and sale of personal data. No data protection assessment requirement.",
"Iowa ICDPA": "Consumer data protection with opt-out rights for targeted advertising and data sales. Most business-friendly state privacy law — limited consumer rights compared to CCPA.",
"Indiana INCDPA": "Standard privacy framework with consumer access, deletion, and opt-out rights. Data protection assessments required for targeted advertising and profiling.",
"Tennessee TIPA": "Consumer privacy rights with opt-out for targeted advertising and data sales. Affirmative defence available for businesses following NIST privacy framework.",
"Texas TDPSA (Privacy)": "Comprehensive privacy law for large businesses operating in Texas. Consumer opt-out rights, data protection assessments, and sensitive data consent requirements.",
"Montana MCDPA": "Consumer opt-out rights for targeted advertising, profiling, and data sales. Sensitive data requires consent. Applies to businesses processing 50,000+ Montana residents' data.",
"Oregon OCPA": "Consumer privacy rights including right to know about profiling logic. Children's data protections. Applies to businesses processing 100,000+ Oregon residents' data.",
"Delaware DPGA": "Comprehensive privacy law with consumer rights to access, correct, delete, and port data. Sensitive data consent required. No private right of action.",
"New Hampshire Privacy Act": "Standard privacy framework with consumer opt-out rights for targeted advertising and profiling. Data protection assessments required for high-risk processing.",
"New Jersey DPA": "Consumer privacy rights with sensitive data consent requirements. Applies to businesses processing 100,000+ NJ residents' data or deriving revenue from data sales.",
"Nebraska DPA": "Consumer opt-out rights for targeted advertising and data sales. Sensitive data consent required. Attorney General enforcement with penalties up to USD 7,500 per violation.",
"Maryland MODPA": "Strictest US state privacy law — imposes data minimisation standard (strictly necessary test). Prohibits sale of children's data. Geofencing restrictions near sensitive locations.",
"Minnesota CDPA": "Includes right to contest automated decision-making — unique among US state privacy laws. Profiling opt-out and data protection assessment requirements.",
"Kentucky KCDPA": "Standard privacy framework with consumer access, deletion, and opt-out rights. 30-day cure period for violations. Applies to businesses processing 100,000+ KY residents' data.",
"Rhode Island DPA": "Consumer privacy rights with data protection assessments for high-risk processing. Standard opt-out rights for targeted advertising and data sales.",
# ── US State Biometric ──
"Texas CUBI Act (Biometric)": "Regulates capture and use of biometric identifiers for commercial purposes. Requires informed consent before collection. No private right of action — AG enforcement only.",
"Washington Biometric Privacy (HB 1493)": "Regulates enrollment of biometric identifiers in databases for commercial purposes. Notice and consent required. Security purpose exemption. AG enforcement only.",
# ── US State Employment/AI ──
"NYC Local Law 144 (AI in Employment)": "Mandatory annual independent bias audit for automated employment decision tools used in hiring/promotion in NYC. Audit summary must be publicly available. Candidate notice required.",
"California FEHA ADS Regulations": "Prohibits use of automated decision systems for discriminatory employment decisions. Employers must keep ADS records for 4+ years. Liable for third-party AI tool bias.",
"Maryland HB 1202 (Facial Recognition in Employment)": "Employers may not use facial recognition services during job interviews without a signed waiver from the applicant.",
"Illinois AIVIA (HB 2557)": "Employers using AI to analyse video interviews must disclose AI use to applicants and obtain consent. Demographic reporting required if AI solely determines interview advancement.",
# ── US State Other AI ──
"Nevada AB 406 (AI in Mental Health)": "Prohibits use of AI to diagnose, treat, or prevent mental illness. Restricts AI marketing in mental health contexts. Certain administrative AI uses permitted.",
"New Jersey AB 4563 (Bot Disclosure)": "Prohibits use of bots to interact with NJ residents for commercial or political purposes without clear disclosure that the communication is conducted via a bot.",
}
# ─────────────────────────────────────────────────
# SECTION 7: CONTEXTUAL NOTES
# ─────────────────────────────────────────────────
DIFC_CONTROLLER_NOTE = (
"Under DIFC Regulation 10, a 'deployer' is the entity that directs, authorises, or benefits from the operation of the AI system and its output — "
"comparable to a data controller. An 'operator' is the entity that runs the system on behalf of the deployer — comparable to a data processor. "
"Most compliance obligations fall on the deployer."
)
# ─────────────────────────────────────────────────
# SECTION 8: DISCLAIMER
# ─────────────────────────────────────────────────
DISCLAIMER = (
"This analysis is provided for informational and orientation purposes only. "
"It does not constitute legal advice. The regulatory landscape is evolving rapidly — "
"obligations, deadlines, and enforcement approaches may change. "
"Always consult qualified legal counsel for definitive compliance guidance tailored to your specific situation."
)