Datasets:
Tasks:
Text Classification
Languages:
English
Size:
n<1K
Tags:
pragmatic-reasoning
theory-of-mind
emotion-inference
indirect-speech
benchmark
multi-annotator
License:
Upload config.yml with huggingface_hub
Browse files- config.yml +93 -0
config.yml
ADDED
|
@@ -0,0 +1,93 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# =============================================================================
|
| 2 |
+
# CEI-ToM DMLR 2026 Configuration
|
| 3 |
+
# =============================================================================
|
| 4 |
+
# IMPORTANT: DMLR baselines must use models NOT evaluated in the CogSci 2026
|
| 5 |
+
# companion paper. See cogsci_off_limits below for the full exclusion list.
|
| 6 |
+
|
| 7 |
+
llm_inference:
|
| 8 |
+
# Model sets by execution mode
|
| 9 |
+
models:
|
| 10 |
+
# TEST mode: single model for quick validation
|
| 11 |
+
test:
|
| 12 |
+
- id: "gpt-5-mini"
|
| 13 |
+
provider: "openai"
|
| 14 |
+
|
| 15 |
+
# COMPLETE mode: DMLR-safe models (none overlap with CogSci 2026)
|
| 16 |
+
complete:
|
| 17 |
+
# Commercial API models
|
| 18 |
+
- id: "gpt-5-mini"
|
| 19 |
+
provider: "openai"
|
| 20 |
+
- id: "claude-sonnet-4-5"
|
| 21 |
+
provider: "anthropic"
|
| 22 |
+
- id: "grok-4-1-fast-non-reasoning"
|
| 23 |
+
provider: "xai"
|
| 24 |
+
- id: "gemini-2.5-flash"
|
| 25 |
+
provider: "google"
|
| 26 |
+
# Open-source models via API providers
|
| 27 |
+
- id: "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo"
|
| 28 |
+
provider: "together"
|
| 29 |
+
- id: "accounts/fireworks/models/deepseek-v3p1"
|
| 30 |
+
provider: "fireworks"
|
| 31 |
+
- id: "Qwen/Qwen2.5-7B-Instruct-Turbo"
|
| 32 |
+
provider: "together"
|
| 33 |
+
|
| 34 |
+
# Models used in CogSci 2026 — DO NOT use for DMLR baselines
|
| 35 |
+
cogsci_off_limits:
|
| 36 |
+
behavioral:
|
| 37 |
+
- "gpt-5-mini"
|
| 38 |
+
- "claude-haiku-4-5"
|
| 39 |
+
- "grok-4-1-fast"
|
| 40 |
+
- "gemini-3-flash-preview"
|
| 41 |
+
- "kimi-k2-instruct-0905"
|
| 42 |
+
- "qwen3-235b-a22b-instruct-2507"
|
| 43 |
+
- "deepseek-v3p2"
|
| 44 |
+
- "minimax-m2p1"
|
| 45 |
+
- "Meta-Llama-3.1-8B-Instruct-Turbo"
|
| 46 |
+
- "Mistral-Small-24B-Instruct-2501"
|
| 47 |
+
- "gemma-3n-E4B-it"
|
| 48 |
+
probing:
|
| 49 |
+
- "llama-3-8b"
|
| 50 |
+
- "mistral-7b"
|
| 51 |
+
- "flan-t5-xxl"
|
| 52 |
+
- "mixtral-8x22b"
|
| 53 |
+
|
| 54 |
+
# =============================================================================
|
| 55 |
+
# PRICING (USD per 1M tokens, as of 2026-02-07)
|
| 56 |
+
# =============================================================================
|
| 57 |
+
pricing_usd_per_1m_tokens:
|
| 58 |
+
# --- DMLR-safe models (recommended for baselines) ---
|
| 59 |
+
openai:
|
| 60 |
+
gpt-5-mini: {input: 0.25, output: 2.00}
|
| 61 |
+
_default: {input: 0.25, output: 2.00}
|
| 62 |
+
anthropic:
|
| 63 |
+
claude-sonnet-4-5: {input: 3.00, output: 15.00}
|
| 64 |
+
_default: {input: 3.00, output: 15.00}
|
| 65 |
+
google:
|
| 66 |
+
gemini-2.0-flash: {input: 0.10, output: 0.40}
|
| 67 |
+
gemini-2.5-flash: {input: 0.30, output: 2.50}
|
| 68 |
+
_default: {input: 0.30, output: 2.50}
|
| 69 |
+
xai:
|
| 70 |
+
grok-4-1-fast-non-reasoning: {input: 0.20, output: 0.50}
|
| 71 |
+
_default: {input: 0.20, output: 0.50}
|
| 72 |
+
fireworks:
|
| 73 |
+
deepseek-v3p1: {input: 0.15, output: 0.75}
|
| 74 |
+
_default: {input: 1.00, output: 5.00}
|
| 75 |
+
together:
|
| 76 |
+
meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo: {input: 0.88, output: 0.88}
|
| 77 |
+
Qwen/Qwen2.5-7B-Instruct-Turbo: {input: 0.30, output: 0.30}
|
| 78 |
+
_default: {input: 0.15, output: 0.60}
|
| 79 |
+
ollama:
|
| 80 |
+
_default: {input: 0.0, output: 0.0}
|
| 81 |
+
|
| 82 |
+
# --- CogSci models (reference only, DO NOT use for DMLR) ---
|
| 83 |
+
# openai: gpt-5-mini {input: 0.25, output: 2.00}
|
| 84 |
+
# anthropic: claude-haiku-4-5 {input: 1.00, output: 5.00}
|
| 85 |
+
# xai: grok-4-1-fast {input: 0.20, output: 0.50}
|
| 86 |
+
# google: gemini-3-flash {input: 0.50, output: 3.00}
|
| 87 |
+
# fireworks: kimi-k2 {input: 0.60, output: 2.50}
|
| 88 |
+
# fireworks: qwen3-235b {input: 0.22, output: 0.88}
|
| 89 |
+
# fireworks: deepseek-v3p2 {input: 0.56, output: 1.68}
|
| 90 |
+
# fireworks: minimax-m2p1 {input: 0.30, output: 1.20}
|
| 91 |
+
# together: llama-3.1-8b {input: 0.18, output: 0.18}
|
| 92 |
+
# together: mistral-small-24b {input: 0.10, output: 0.30}
|
| 93 |
+
# together: gemma-3n-e4b {input: 0.03, output: 0.03}
|