| [ |
| { |
| "title": "", |
| "header": [ |
| { |
| "value": "Model", |
| "markdown": false, |
| "metadata": {} |
| }, |
| { |
| "value": "EM", |
| "description": "Scenario introduced in this work to measure fine-grained legal reasoning through reverse entailment.\n\nQuasi-exact match: Fraction of instances that the predicted output matches a correct reference up to light processing.", |
| "markdown": false, |
| "lower_is_better": false, |
| "metadata": { |
| "metric": "EM", |
| "run_group": "LegalSupport" |
| } |
| }, |
| { |
| "value": "Denoised inference time (s)", |
| "description": "Scenario introduced in this work to measure fine-grained legal reasoning through reverse entailment.\n\nDenoised inference runtime (s): Average time to process a request to the model minus performance contention by using profiled runtimes from multiple trials of SyntheticEfficiencyScenario.", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "Denoised inference time (s)", |
| "run_group": "LegalSupport" |
| } |
| }, |
| { |
| "value": "# eval", |
| "description": "Scenario introduced in this work to measure fine-grained legal reasoning through reverse entailment.\n\n# eval: Number of evaluation instances.", |
| "markdown": false, |
| "metadata": { |
| "metric": "# eval", |
| "run_group": "LegalSupport" |
| } |
| }, |
| { |
| "value": "# train", |
| "description": "Scenario introduced in this work to measure fine-grained legal reasoning through reverse entailment.\n\n# train: Number of training instances (e.g., in-context examples).", |
| "markdown": false, |
| "metadata": { |
| "metric": "# train", |
| "run_group": "LegalSupport" |
| } |
| }, |
| { |
| "value": "truncated", |
| "description": "Scenario introduced in this work to measure fine-grained legal reasoning through reverse entailment.\n\ntruncated: Fraction of instances where the prompt itself was truncated (implies that there were no in-context examples).", |
| "markdown": false, |
| "metadata": { |
| "metric": "truncated", |
| "run_group": "LegalSupport" |
| } |
| }, |
| { |
| "value": "# prompt tokens", |
| "description": "Scenario introduced in this work to measure fine-grained legal reasoning through reverse entailment.\n\n# prompt tokens: Number of tokens in the prompt.", |
| "markdown": false, |
| "metadata": { |
| "metric": "# prompt tokens", |
| "run_group": "LegalSupport" |
| } |
| }, |
| { |
| "value": "# output tokens", |
| "description": "Scenario introduced in this work to measure fine-grained legal reasoning through reverse entailment.\n\n# output tokens: Actual number of output tokens.", |
| "markdown": false, |
| "metadata": { |
| "metric": "# output tokens", |
| "run_group": "LegalSupport" |
| } |
| }, |
| { |
| "value": "# trials", |
| "description": "Scenario introduced in this work to measure fine-grained legal reasoning through reverse entailment.\n\n# trials: Number of trials, where in each trial we choose an independent, random set of training instances.", |
| "markdown": false, |
| "metadata": { |
| "metric": "# trials", |
| "run_group": "LegalSupport" |
| } |
| } |
| ], |
| "rows": [ |
| [ |
| { |
| "value": "EleutherAI/pythia-2.8b", |
| "description": "", |
| "href": "?group=legal_support&subgroup=&runSpecs=%5B%22legal_support%2Cmethod%3Dmultiple_choice_joint%3Amodel%3DEleutherAI_pythia-2.8b%22%5D", |
| "markdown": false, |
| "run_spec_names": [ |
| "legal_support,method=multiple_choice_joint:model=EleutherAI_pythia-2.8b" |
| ] |
| }, |
| { |
| "value": 0.0, |
| "description": "min=0, mean=0, max=0, sum=0 (1)", |
| "style": { |
| "font-weight": "bold" |
| }, |
| "markdown": false |
| }, |
| { |
| "description": "1 matching runs, but no matching metrics", |
| "markdown": false |
| }, |
| { |
| "value": 489.0, |
| "description": "min=489, mean=489, max=489, sum=489 (1)", |
| "style": {}, |
| "markdown": false |
| }, |
| { |
| "value": 3.0, |
| "description": "min=3, mean=3, max=3, sum=3 (1)", |
| "style": {}, |
| "markdown": false |
| }, |
| { |
| "value": 0.0, |
| "description": "min=0, mean=0, max=0, sum=0 (1)", |
| "style": {}, |
| "markdown": false |
| }, |
| { |
| "value": 597.3292433537832, |
| "description": "min=597.329, mean=597.329, max=597.329, sum=597.329 (1)", |
| "style": {}, |
| "markdown": false |
| }, |
| { |
| "value": 1.0, |
| "description": "min=1, mean=1, max=1, sum=1 (1)", |
| "style": {}, |
| "markdown": false |
| }, |
| { |
| "value": 1.0, |
| "description": "min=1, mean=1, max=1, sum=1 (1)", |
| "style": {}, |
| "markdown": false |
| } |
| ] |
| ], |
| "links": [ |
| { |
| "text": "LaTeX", |
| "href": "benchmark_output/runs/classic_pythia-2.8b-step2/groups/latex/legal_support_legal_support_.tex" |
| }, |
| { |
| "text": "JSON", |
| "href": "benchmark_output/runs/classic_pythia-2.8b-step2/groups/json/legal_support_legal_support_.json" |
| } |
| ], |
| "name": "legal_support_" |
| } |
| ] |