FlameF0X commited on
Commit
ccaf38d
·
verified ·
1 Parent(s): 8d44ff8

Adding new text only LFM model.

Browse files

- Adding `LiquidAI/LFM2-24B-A2B`.
- Arranging the models from the smallest to the larger

Files changed (1) hide show
  1. app.py +8 -7
app.py CHANGED
@@ -3,13 +3,14 @@ from transformers import AutoTokenizer, AutoModelForCausalLM, TextIteratorStream
3
  import torch
4
  from threading import Thread
5
  MODEL_NAMES = {
6
- "LFM2-350M": "LiquidAI/LFM2-350M",
7
- "LFM2-700M": "LiquidAI/LFM2-700M",
8
- "LFM2-1.2B": "LiquidAI/LFM2-1.2B",
9
- "LFM2-2.6B": "LiquidAI/LFM2-2.6B",
10
- "LFM2-8B-A1B": "LiquidAI/LFM2-8B-A1B",
11
- "LFM2.5-1.2B": "LiquidAI/LFM2.5-1.2B-Instruct",
12
- "LFM2.5-1.2B-Thinking": "LiquidAI/LFM2.5-1.2B-Thinking",
 
13
  }
14
  model_cache = {}
15
  def load_model(model_key):
 
3
  import torch
4
  from threading import Thread
5
  MODEL_NAMES = {
6
+ "LFM2-350M": "LiquidAI/LFM2-350M", # LFM2 - 350M
7
+ "LFM2-700M": "LiquidAI/LFM2-700M", # LFM2 - 700M
8
+ "LFM2-1.2B": "LiquidAI/LFM2-1.2B", # LFM2 - 1.2B
9
+ "LFM2.5-1.2B": "LiquidAI/LFM2.5-1.2B-Instruct", # LFM2.5 - 1.2B
10
+ "LFM2.5-1.2B-Thinking": "LiquidAI/LFM2.5-1.2B-Thinking", # LFM2.5 - 1.2B (Thinking)
11
+ "LFM2-2.6B": "LiquidAI/LFM2-2.6B", # LFM2 - 2.6B
12
+ "LFM2-8B-A1B": "LiquidAI/LFM2-8B-A1B", # LFM2 - 8B-A1B
13
+ "LFM2-24B-A3B": "LiquidAI/LFM2-24B-A2B", # LFM2 - 24B-A3B
14
  }
15
  model_cache = {}
16
  def load_model(model_key):