Spaces:
Sleeping
Sleeping
| { | |
| "metadata": { | |
| "Name": "StarCoder2", | |
| "Provider": "BigCode", | |
| "URL": "https://huggingface.co/bigcode/starcoder2-15b", | |
| "Type": "Large Language Model", | |
| "Modalities": [ | |
| "Text-to-Text" | |
| ] | |
| }, | |
| "scores": { | |
| "1. Bias, Stereotypes, and Representational Harms Evaluation": { | |
| "1.1 Bias Detection Overview": { | |
| "status": "Yes", | |
| "sources": [ | |
| { | |
| "type": "π", | |
| "detail": "https://arxiv.org/abs/2402.19173", | |
| "name": "BOLD - Bias in Open-ended Language Generation Dataset" | |
| }, | |
| { | |
| "type": "π", | |
| "detail": "https://arxiv.org/abs/2402.19173", | |
| "name": "WinoBias" | |
| } | |
| ], | |
| "questions": { | |
| "Evaluations at various stages (data collection, preprocessing, AI system architecture, training, deployment)": false, | |
| "Have intrinsic properties of the AI system been evaluated for bias (e.g., embedding analysis)": false, | |
| "Have extrinsic bias evaluations been run (e.g., downstream task performance)": true, | |
| "Have evaluations been run across all applicable modalities": true, | |
| "Have bias evaluations been run that take the form of automatic quantitative evaluation": true, | |
| "Have bias evaluations been run with human participants?": false | |
| } | |
| }, | |
| "1.2 Protected Classes and Intersectional Measures": { | |
| "status": "No", | |
| "sources": [], | |
| "questions": { | |
| "Do evaluations cover all applicable legal protected categories for in-scope uses of the system?": false, | |
| "Do evaluations cover additional subgroups that are likely to be harmed based on other personal characteristics": false, | |
| "Evaluation of how different aspects of identity interact and compound in AI system behavior": false, | |
| "Evaluation of AI system biases for legal protected categories and additional relevant subgroups": false | |
| } | |
| }, | |
| "1.3 Measurement of Stereotypes and Harmful Associations": { | |
| "status": "Yes", | |
| "sources": [ | |
| { | |
| "type": "π", | |
| "detail": "https://arxiv.org/abs/2402.19173", | |
| "name": "HONEST - Hurtful Sentence Completion in English Language Models" | |
| }, | |
| { | |
| "type": "π", | |
| "detail": "https://arxiv.org/abs/2402.19173", | |
| "name": "RealToxicityPrompts" | |
| } | |
| ], | |
| "questions": { | |
| "Measurement of known stereotypes in AI system outputs": true, | |
| "Measurement of other negative associations and assumptions regarding specific groups": true, | |
| "Measurement of stereotypes and negative associations across in-scope contexts": false | |
| } | |
| }, | |
| "1.4 Bias Evaluation Transparency and Documentation": { | |
| "status": "Yes", | |
| "sources": [ | |
| { | |
| "type": "π", | |
| "detail": "https://arxiv.org/abs/2402.19173", | |
| "name": "Evaluation Documentation" | |
| } | |
| ], | |
| "questions": { | |
| "Sufficient documentation of evaluation methods (including code and datasets) to replicate findings": true, | |
| "Sufficient documentation of evaluation results (including intermediary statistics) to support comparison to other AI systems": true, | |
| "Documentation of bias mitigation measures, including their secondary impacts": false, | |
| "Documentation of bias monitoring approaches post-release/deployment if applicable": false | |
| } | |
| } | |
| }, | |
| "2. Cultural Values and Sensitive Content Evaluation": { | |
| "2.1 Cultural Variation Overview": { | |
| "status": "N/A", | |
| "sources": [], | |
| "questions": { | |
| "Evaluations at various stages (data collection, preprocessing, AI system architecture, training, deployment)": false, | |
| "Have intrinsic properties of the AI system been evaluated for cultural variation(e.g., embedding analysis)": false, | |
| "Have extrinsic cultural variation evaluations been run (e.g., downstream task performance)": false, | |
| "Have evaluations been run across all applicable modalities": false, | |
| "Have cultural variation evaluations been run that take the form of automatic quantitative evaluation": false, | |
| "Have cultural variation evaluations been run with human participants?": false | |
| } | |
| }, | |
| "2.2 Cultural Diversity and Representation": { | |
| "status": "N/A", | |
| "sources": [], | |
| "questions": { | |
| "Use of evaluation methods developed in the cultural contexts in scope": false, | |
| "Respect of indigenous sovereignty, protected rights, and cultural norms in AI system-generated content": false, | |
| "Evaluation of cultural variation across geographic dimensions": false, | |
| "Evaluation of cultural variation representing communities' perspectives within geographical contexts": false, | |
| "Analysis of how cultural context affects AI system performance": false | |
| } | |
| }, | |
| "2.3 Generated Sensitive Content across Cultural Contexts": { | |
| "status": "Yes", | |
| "sources": [ | |
| { | |
| "type": "π", | |
| "detail": "https://arxiv.org/abs/2402.19173", | |
| "name": "HONEST - Hurtful Sentence Completion in English Language Models" | |
| }, | |
| { | |
| "type": "π", | |
| "detail": "https://arxiv.org/abs/2402.19173", | |
| "name": "RealToxicityPrompts" | |
| } | |
| ], | |
| "questions": { | |
| "Has the AI system been evaluated for its likelihood of facilitating generation of threatening or violent content": true, | |
| "Has the AI system been evaluated for its likelihood of facilitating generation of targeted harassment or discrimination": false, | |
| "Has the AI system been evaluated for its likelihood of facilitating generation of hate speech": false, | |
| "Has the AI system been evaluated for its likelihood of exposing its direct users to content embedding values and assumptions not reflective of their cultural context": false, | |
| "Has the AI system been evaluated for its likelihood of exposing its direct users to inappropriate content for their use context": true, | |
| "Has the AI system been evaluated for its likelihood of exposing its direct users to content with negative psychological impacts": false, | |
| "Has the evaluation of the AI system's behaviors explicitly considered cultural variation in their definition": false | |
| } | |
| }, | |
| "2.4 Cultural Variation Transparency and Documentation": { | |
| "status": "N/A", | |
| "sources": [], | |
| "questions": { | |
| "Documentation of cultural contexts considered during development": false, | |
| "Documentation of the range of cultural contexts covered by evaluations": false, | |
| "Sufficient documentation of evaluation method to understand the scope of the findings": false, | |
| "Construct validity, documentation of strengths, weaknesses, and assumptions": false, | |
| "Domain shift between evaluation development and AI system development settings": false, | |
| "Sufficient documentation of evaluation methods to replicate findings": false, | |
| "Sufficient documentation of evaluation results to support comparison": false, | |
| "Document of psychological impact on evaluators reviewing harmful content": false, | |
| "Documentation of measures to protect evaluator well-being": false | |
| } | |
| } | |
| }, | |
| "3. Disparate Performance Evaluation": { | |
| "3.1 Disparate Performance Overview": { | |
| "status": "N/A", | |
| "sources": [], | |
| "questions": { | |
| "Have development choices and intrinsic properties of the AI system been evaluated for their contribution to disparate performance?": false, | |
| "Have extrinsic disparate performance evaluations been run": false, | |
| "Have evaluations been run across all applicable modalities": false, | |
| "Have disparate performance evaluations been run that take the form of automatic quantitative evaluation": false, | |
| "Have disparate performance evaluations been run with human participants": false | |
| } | |
| }, | |
| "3.2 Identifying Target Groups for Disparate Performance Evaluation": { | |
| "status": "N/A", | |
| "sources": [], | |
| "questions": { | |
| "Identification of mandated target group based on legal nondiscrimination frameworks": false, | |
| "Identification of further target groups that are likely to be harmed by disparate performance": false, | |
| "Assessment of systemic barriers in dataset collection methods for different groups": false, | |
| "Consideration of historical disparities in the task in which the AI system is deployed": false, | |
| "Identification of both implicit and explicit markers for the target groups": false | |
| } | |
| }, | |
| "3.3 Subgroup Performance Analysis": { | |
| "status": "N/A", | |
| "sources": [], | |
| "questions": { | |
| "Non-aggregated evaluation results across subpopulations, including feature importance and consistency analysis": false, | |
| "Metrics to measure performance in decision-making tasks": false, | |
| "Metrics to measure disparate performance in other tasks including generative tasks": false, | |
| "Worst-case subgroup performance analysis, including performance on rare or underrepresented cases": false, | |
| "Intersectional analysis examining performance across combinations of subgroup": false, | |
| "Do evaluations of disparate performance account for implicit social group markers": false | |
| } | |
| }, | |
| "3.4 Disparate Performance Evaluation Transparency and Documentation": { | |
| "status": "N/A", | |
| "sources": [], | |
| "questions": { | |
| "Sufficient documentation of evaluation method to understand the scope of the findings": false, | |
| "Documentation of strengths, weaknesses, and assumptions about the context": false, | |
| "Documentation of domain shift between evaluation and deployment settings": false, | |
| "Sufficient documentation of evaluation methods to replicate findings": false, | |
| "Sufficient documentation of evaluation results to support comparison": false, | |
| "Documentation of disparate performance mitigation measures": false, | |
| "Documentation of disparate performance monitoring approaches": false | |
| } | |
| } | |
| }, | |
| "4. Environmental Costs and Carbon Emissions Evaluation": { | |
| "4.1 Environmental Costs Overview": { | |
| "status": "Yes", | |
| "sources": [ | |
| { | |
| "type": "π", | |
| "detail": "https://mlco2.github.io/impact/#compute", | |
| "name": "Machine Learning Emissions Calculator" | |
| } | |
| ], | |
| "questions": { | |
| "Evaluations of different processes within development and deployment": false, | |
| "Have evaluations been run across all applicable modalities?": true, | |
| "Have evaluations been run on standardized benchmarks or metrics?": true, | |
| "Have evaluations taken into account community feedback from regions affected by data center power consumption?": false, | |
| "Do evaluations consider the full supply chain including environmental impact of hardware components and data centers used?": false | |
| } | |
| }, | |
| "4.2 Energy Cost and Environmental Impact of Development": { | |
| "status": "Yes", | |
| "sources": [ | |
| { | |
| "type": "π", | |
| "detail": "https://mlco2.github.io/impact/#compute", | |
| "name": "Machine Learning Emissions Calculator" | |
| } | |
| ], | |
| "questions": { | |
| "Accounting of FLOPS across development stages": true, | |
| "Evaluation of energy consumption using standardized tracking tools": true, | |
| "Evaluation of carbon impact accounting for regional energy sources": true, | |
| "Evaluation of hardware lifecycle environmental impact": false | |
| } | |
| }, | |
| "4.3 Energy Cost and Environmental Impact of Deployment": { | |
| "status": "N/A", | |
| "sources": [], | |
| "questions": { | |
| "Evaluation of inference FLOPS for the system": false, | |
| "Evaluation of inference energy consumption on most common deployment setting": false, | |
| "Evaluation of inference energy consumption on multiple deployment settings": false, | |
| "Evaluation of task-specific energy consumption variations": false, | |
| "Evaluation of carbon impact for deployment infrastructure": false, | |
| "Evaluation of hardware lifecycle environmental impact for deployment": false | |
| } | |
| }, | |
| "4.4 Environmental Costs Transparency and Documentation": { | |
| "status": "Yes", | |
| "sources": [ | |
| { | |
| "type": "π", | |
| "detail": "https://mlco2.github.io/impact/#compute", | |
| "name": "Machine Learning Emissions Calculator" | |
| } | |
| ], | |
| "questions": { | |
| "Documentation about equipment and infrastructure specifications": true, | |
| "Sufficient documentation of evaluation methods including components covered": false, | |
| "Sufficient documentation of evaluation methods to replicate findings": true, | |
| "Sufficient documentation of evaluation results for comparison": true | |
| } | |
| } | |
| }, | |
| "5. Privacy and Data Protection Evaluation": { | |
| "5.1 Privacy and Data Protection Overview": { | |
| "status": "Yes", | |
| "sources": [ | |
| { | |
| "type": "π’", | |
| "detail": "PII detection and redaction using an NER model" | |
| }, | |
| { | |
| "type": "π", | |
| "detail": "https://huggingface.co/spaces/bigcode/in-the-stack", | |
| "name": "Opt-out tool for users" | |
| }, | |
| { | |
| "type": "π", | |
| "detail": "https://arxiv.org/abs/2402.19173", | |
| "name": "Asleep at the Keyboard Security Benchmark" | |
| } | |
| ], | |
| "questions": { | |
| "Evaluations at various stages (data collection, preprocessing, AI system architecture, training, deployment)": true, | |
| "Have intrinsic properties of the AI system been evaluated for privacy vulnerabilities": false, | |
| "Have extrinsic privacy evaluations been run": true, | |
| "Have evaluations been run across all applicable modalities": true, | |
| "Have privacy evaluations been run that take the form of automatic quantitative evaluation": true, | |
| "Have privacy evaluations been run with human participants?": false | |
| } | |
| }, | |
| "5.2 Privacy, Likeness, and Publicity Harms": { | |
| "status": "N/A", | |
| "sources": [], | |
| "questions": { | |
| "Has the AI system been evaluated for its likelihood of revealing personal information from its training data?": false, | |
| "Has the AI system been evaluated for its likelihood of facilitating generation of content impersonating an individual?": false, | |
| "Has the AI system been evaluated for its likelihood of providing made up or confabulated personal information about individuals?": false | |
| } | |
| }, | |
| "5.3 Intellectual Property and Information Security": { | |
| "status": "Yes", | |
| "sources": [ | |
| { | |
| "type": "π’", | |
| "detail": "Membership test to find if generated code was copied from the training corpus" | |
| }, | |
| { | |
| "type": "π’", | |
| "detail": "Code attribution tool to find the original author and license of the generated code" | |
| }, | |
| { | |
| "type": "π", | |
| "detail": "https://arxiv.org/abs/2402.19173", | |
| "name": "Asleep at the Keyboard Security Benchmark" | |
| } | |
| ], | |
| "questions": { | |
| "Has the AI system been evaluated for its likelihood of reproducing other categories of information from its training data": true, | |
| "Has the system been evaluated for other information security risks for in-scope uses": false | |
| } | |
| }, | |
| "5.4 Privacy Evaluation Transparency and Documentation": { | |
| "status": "Yes", | |
| "sources": [ | |
| { | |
| "type": "π’", | |
| "detail": "Documentation of training data information risk categories and consent status" | |
| } | |
| ], | |
| "questions": { | |
| "Documentation of the categories of training data that present information risk": true, | |
| "Documentation of evaluation methods to replicate findings": true, | |
| "Documentation of evaluation results to support comparison": true, | |
| "Documentation of evaluation limitations": false, | |
| "Documentation of deployment considerations": false | |
| } | |
| } | |
| }, | |
| "6. Financial Costs Evaluation": { | |
| "6.1 Financial Costs Overview": { | |
| "status": "N/A", | |
| "sources": [], | |
| "questions": { | |
| "Evaluation of costs at various stages": false, | |
| "Have costs been evaluated for different system components": false, | |
| "Have cost evaluations been run across all applicable modalities": false, | |
| "Have cost evaluations included both direct and indirect expenses": false, | |
| "Have cost projections been validated against actual expenses": false | |
| } | |
| }, | |
| "6.2 Development and Training Costs": { | |
| "status": "N/A", | |
| "sources": [], | |
| "questions": { | |
| "Assessment of research and development labor costs": false, | |
| "Evaluation of data collection and preprocessing costs": false, | |
| "Assessment of training infrastructure costs": false, | |
| "Assessment of costs associated with different training approaches": false, | |
| "Evaluation of model architecture and size impact on costs": false | |
| } | |
| }, | |
| "6.3 Deployment and Operation Costs": { | |
| "status": "N/A", | |
| "sources": [], | |
| "questions": { | |
| "Assessment of inference and serving costs": false, | |
| "Evaluation of storage and hosting expenses": false, | |
| "Assessment of scaling costs based on usage patterns": false, | |
| "Evaluation of costs specific to different deployment contexts": false, | |
| "Assessment of costs for model updates or fine-tuning by end users": false | |
| } | |
| }, | |
| "6.4 Financial Cost Documentation and Transparency": { | |
| "status": "N/A", | |
| "sources": [], | |
| "questions": { | |
| "Sufficient documentation of cost evaluation methodology and assumptions": false, | |
| "Sufficient documentation of cost breakdowns and metrics": false, | |
| "Documentation of cost variations across different usage scenarios": false, | |
| "Documentation of long-term cost projections and risk factors": false | |
| } | |
| } | |
| }, | |
| "7. Data and Content Moderation Labor Evaluation": { | |
| "7.1 Labor Evaluation Overview": { | |
| "status": "Yes", | |
| "sources": [ | |
| { | |
| "type": "π’", | |
| "detail": "PII annotations by human annotators with fair wage" | |
| } | |
| ], | |
| "questions": { | |
| "Evaluation of labor practices at various stages": true, | |
| "Have labor conditions been evaluated for different worker categories": true, | |
| "Have labor evaluations been run across all applicable task types": false, | |
| "Have labor practices been evaluated against established industry standards": true, | |
| "Have labor evaluations included both direct employees and contracted workers": false, | |
| "Have evaluations considered different regional and jurisdictional contexts": true | |
| } | |
| }, | |
| "7.2 Working Conditions and Compensation": { | |
| "status": "Yes", | |
| "sources": [ | |
| { | |
| "type": "π’", | |
| "detail": "PII annotations by human annotators with fair wage" | |
| } | |
| ], | |
| "questions": { | |
| "Assessment of compensation relative to local living wages and industry standards": true, | |
| "Assessment of job security and employment classification": false, | |
| "Evaluation of workplace safety, worker protections and rights": false, | |
| "Assessment of worker autonomy and task assignment practices": false, | |
| "Evaluation of power dynamics and worker feedback mechanisms": false | |
| } | |
| }, | |
| "7.3 Worker Wellbeing and Support": { | |
| "status": "N/A", | |
| "sources": [], | |
| "questions": { | |
| "Assessment of psychological support systems, trauma resources, and other long-term mental health monitoring": false, | |
| "Evaluation of training and preparation for difficult content": false, | |
| "Evaluation of cultural and linguistic support for diverse workforces": false | |
| } | |
| }, | |
| "7.4 Labor Practice Documentation and Transparency": { | |
| "status": "Yes", | |
| "sources": [ | |
| { | |
| "type": "π’", | |
| "detail": "PII annotations by human annotators with fair wage" | |
| } | |
| ], | |
| "questions": { | |
| "Documentation of labor evaluation methodology and frameworks used": true, | |
| "Documentation of worker demographics and task distribution": false, | |
| "Documentation of support systems, worker protections": false, | |
| "Documentation of incident reporting and resolution procedures": false | |
| } | |
| } | |
| } | |
| } | |
| } | |