AION Protocol Development commited on
Commit
0c654b0
Β·
1 Parent(s): 67cec83

feat: Add 3 new FREE models - Mistral Medium 3, GPT-4o-mini (GitHub), TinyLlama 1.1B

Browse files

TIER 2: FREE GITHUB MODELS
- Mistral Medium 3 (GitHub) πŸ†“: Advanced reasoning + vision (131K context)
- GPT-4o-mini (GitHub) πŸ†“: Fast GPT-4o-mini (128K context)

TIER 5: FREE HUGGINGFACE MODELS
- TinyLlama 1.1B πŸ€—: Ultra-fast 1.1B model for simple tasks (2K context)

Total models: 11 (was 8)
All use FREE APIs with appropriate tokens

Files changed (1) hide show
  1. app.py +31 -1
app.py CHANGED
@@ -35,7 +35,28 @@ MODEL_CONFIGS = {
35
  "description": "Best for general purpose"
36
  },
37
 
38
- # === TIER 2: FREE GROQ MODELS ===
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
39
  "Llama 3.3 70B (Groq) πŸš€": {
40
  "provider": "groq",
41
  "model": "llama-3.3-70b-versatile",
@@ -94,6 +115,15 @@ MODEL_CONFIGS = {
94
  "context_window": 16384,
95
  "tier": "free-hf",
96
  "description": "Microsoft's efficient code model via HF Inference API"
 
 
 
 
 
 
 
 
 
97
  }
98
  }
99
 
 
35
  "description": "Best for general purpose"
36
  },
37
 
38
+
39
+ # === TIER 2: FREE GITHUB MODELS ===
40
+ "Mistral Medium 3 (GitHub) πŸ†“": {
41
+ "provider": "github",
42
+ "model": "Mistral-Medium-3",
43
+ "api_key_env": "GITHUB_TOKEN",
44
+ "cost_per_1M_tokens": 0.00,
45
+ "context_window": 131072,
46
+ "tier": "free-github",
47
+ "description": "Advanced reasoning + vision via GitHub Models (FREE)"
48
+ },
49
+ "GPT-4o-mini (GitHub) πŸ†“": {
50
+ "provider": "github",
51
+ "model": "gpt-4o-mini",
52
+ "api_key_env": "GITHUB_TOKEN",
53
+ "cost_per_1M_tokens": 0.00,
54
+ "context_window": 128000,
55
+ "tier": "free-github",
56
+ "description": "Fast GPT-4o-mini via GitHub Models (FREE)"
57
+ },
58
+
59
+ # === TIER 3: FREE GROQ MODELS ===
60
  "Llama 3.3 70B (Groq) πŸš€": {
61
  "provider": "groq",
62
  "model": "llama-3.3-70b-versatile",
 
115
  "context_window": 16384,
116
  "tier": "free-hf",
117
  "description": "Microsoft's efficient code model via HF Inference API"
118
+ },
119
+ "TinyLlama 1.1B πŸ€—": {
120
+ "provider": "huggingface",
121
+ "model": "TinyLlama/TinyLlama-1.1B-Chat-v1.0",
122
+ "api_key_env": "HF_TOKEN",
123
+ "cost_per_1M_tokens": 0.00,
124
+ "context_window": 2048,
125
+ "tier": "free-hf",
126
+ "description": "Ultra-fast 1.1B model for simple tasks (FREE)"
127
  }
128
  }
129