dishitanagi commited on
Commit
1a5492c
·
verified ·
1 Parent(s): 18f262a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -3
app.py CHANGED
@@ -24,7 +24,7 @@ from transformers import AutoTokenizer, AutoModel
24
  hf_token = os.environ.get("llmtoken") # HF_TOKEN is the name of the secret in your Space
25
 
26
  # Step 2: Specify the model
27
- model_name = "Qwen/Qwen3-4B-Instruct-2507"
28
 
29
 
30
  # Step 3: Load tokenizer and model using the token
@@ -43,13 +43,13 @@ arg_dict = {
43
  'all_models':[
44
  # "meta-llama/Llama-3.1-8B", # too big for the A10G 24GB
45
  #"meta-llama/Llama-3.2-3B",
46
- # "gpt2",
47
  # "meta-llama/Llama-3.2-1B",
48
  # "Qwen/Qwen3-8B", # too big for the A10G 24GB
49
  # "Qwen/Qwen3-4B",
50
  # "Qwen/Qwen3-1.7B",
51
  # "Qwen/Qwen3-0.6B",
52
- "Qwen/Qwen3-4B-Instruct-2507",
53
  # "Qwen/Qwen3-4B-Thinking-2507",
54
  ],
55
  # 'load_fp16' : True,
 
24
  hf_token = os.environ.get("llmtoken") # HF_TOKEN is the name of the secret in your Space
25
 
26
  # Step 2: Specify the model
27
+ model_name = "gpt2"
28
 
29
 
30
  # Step 3: Load tokenizer and model using the token
 
43
  'all_models':[
44
  # "meta-llama/Llama-3.1-8B", # too big for the A10G 24GB
45
  #"meta-llama/Llama-3.2-3B",
46
+ "gpt2",
47
  # "meta-llama/Llama-3.2-1B",
48
  # "Qwen/Qwen3-8B", # too big for the A10G 24GB
49
  # "Qwen/Qwen3-4B",
50
  # "Qwen/Qwen3-1.7B",
51
  # "Qwen/Qwen3-0.6B",
52
+ #"Qwen/Qwen3-4B-Instruct-2507",
53
  # "Qwen/Qwen3-4B-Thinking-2507",
54
  ],
55
  # 'load_fp16' : True,