CatoG commited on
Commit
15f0919
ยท
verified ยท
1 Parent(s): 5d3b5c4

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +93 -13
app.py CHANGED
@@ -23,8 +23,68 @@ MODEL_OPTIONS = [
23
  "Qwen/Qwen3-8B",
24
  "moonshotai/Kimi-K2-Tinking",
25
  "openai/gpt-oss-20b",
26
- "zai-org/GLM-4.6",
27
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
28
 
29
 
30
  # Suppress warnings
@@ -244,7 +304,37 @@ with gr.Blocks(title="QA Bot - PDF Question Answering") as demo:
244
  "sentence-transformers/all-mpnet-base-v2",
245
  "sentence-transformers/paraphrase-multilingual-MiniLM-L12-v2",
246
  "BAAI/bge-small-en-v1.5",
247
- "BAAI/bge-base-en-v1.5"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
248
  ],
249
  value="sentence-transformers/all-MiniLM-L6-v2",
250
  info="Model used for generating embeddings"
@@ -293,16 +383,6 @@ with gr.Blocks(title="QA Bot - PDF Question Answering") as demo:
293
 
294
  gr.Markdown(
295
  """
296
- ### ๐Ÿ“ Instructions
297
- 1. Upload a PDF document
298
- 2. Enter your question in the text box
299
- 3. (Optional) Select a different LLM model
300
- 4. (Optional) Adjust advanced settings for fine-tuning
301
- 5. Click "Ask Question" to get an answer
302
-
303
- ### ๐Ÿ” Setup
304
- This Space requires a HuggingFace API token. Set the following in your Space secrets:
305
- - `HF_TOKEN`: Your HuggingFace API token (get it from https://huggingface.co/settings/tokens)
306
  """
307
  )
308
 
 
23
  "Qwen/Qwen3-8B",
24
  "moonshotai/Kimi-K2-Tinking",
25
  "openai/gpt-oss-20b",
26
+ "zai-org/GLM-4.6",
27
+ "moonshotai/Kimi-K2-Thinking",
28
+ "meta-llama/Llama-3.1-8B-Instruct",
29
+ "allenai/Olmo-3-32B-Think",
30
+ "allenai/Olmo-3-7B-Instruct",
31
+ "openai/gpt-oss-20b",
32
+ "allenai/Olmo-3-7B-Think",
33
+ "zai-org/GLM-4.6",
34
+ "openai/gpt-oss-120b",
35
+ "MiniMaxAI/MiniMax-M2",
36
+ "meta-llama/Llama-3.2-3B-Instruct",
37
+ "Qwen/Qwen2.5-7B-Instruct",
38
+ "PrimeIntellect/INTELLECT-3-FP8",
39
+ "deepseek-ai/DeepSeek-V3.2-Exp",
40
+ "Qwen/Qwen3-4B-Instruct-2507",
41
+ "dphn/Dolphin-Mistral-24B-Venice-Edition",
42
+ "meta-llama/Llama-3.1-8B",
43
+ "Qwen/Qwen3-Next-80B-A3B-Instruct",
44
+ "deepseek-ai/DeepSeek-R1",
45
+ "Qwen/Qwen3-8B",
46
+ "Qwen/Qwen3-Coder-30B-A3B-Instruct",
47
+ "meta-llama/Llama-3.2-1B-Instruct",
48
+ "moonshotai/Kimi-K2-Instruct",
49
+ "nvidia/NVIDIA-Nemotron-Nano-12B-v2",
50
+ "meta-llama/Meta-Llama-3-8B-Instruct",
51
+ "meta-llama/Llama-3.3-70B-Instruct",
52
+ "Qwen/Qwen3-32B",
53
+ "HuggingFaceTB/SmolLM3-3B",
54
+ "deepseek-ai/DeepSeek-R1-0528-Qwen3-8B",
55
+ "zai-org/GLM-4.5-Air",
56
+ "Qwen/Qwen3-4B-Thinking-2507",
57
+ "moonshotai/Kimi-K2-Instruct-0905",
58
+ "Qwen/Qwen3-1.7B",
59
+ "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
60
+ "Qwen/Qwen3-235B-A22B-Instruct-2507",
61
+ "Qwen/Qwen3-30B-A3B-Instruct-2507",
62
+ "baichuan-inc/Baichuan-M2-32B",
63
+ "mistralai/Mistral-7B-Instruct-v0.2",
64
+ "meta-llama/Meta-Llama-3-8B",
65
+ "Qwen/Qwen2.5-1.5B-Instruct",
66
+ "Qwen/Qwen3-Next-80B-A3B-Thinking",
67
+ "deepseek-ai/DeepSeek-V3.1-Terminus",
68
+ "HuggingFaceH4/zephyr-7b-beta",
69
+ "google/gemma-2-2b-it",
70
+ "meta-llama/Llama-Guard-3-8B",
71
+ "Qwen/Qwen2.5-Coder-7B-Instruct",
72
+ "deepseek-ai/DeepSeek-R1-Distill-Qwen-14B",
73
+ "Goekdeniz-Guelmez/Josiefied-Qwen3-8B-abliterated-v1",
74
+ "darkc0de/XortronCriminalComputingConfig",
75
+ "ArliAI/QwQ-32B-ArliAI-RpR-v4",
76
+ "inclusionAI/Ling-1T",
77
+ "Gryphe/MythoMax-L2-13b",
78
+ "Qwen/Qwen2.5-7B",
79
+ "Qwen/Qwen2.5-Coder-1.5B-Instruct",
80
+ "deepseek-ai/DeepSeek-R1-Distill-Qwen-7B",
81
+ "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B",
82
+ "DeepHat/DeepHat-V1-7B",
83
+ "Qwen/Qwen3-14B",
84
+ "Qwen/Qwen3-30B-A3B",
85
+ "Intelligent-Internet/II-Medical-8B",
86
+ "zai-org/GLM-4.5",
87
+ ]
88
 
89
 
90
  # Suppress warnings
 
304
  "sentence-transformers/all-mpnet-base-v2",
305
  "sentence-transformers/paraphrase-multilingual-MiniLM-L12-v2",
306
  "BAAI/bge-small-en-v1.5",
307
+ "BAAI/bge-base-en-v1.5",
308
+ "google/embeddinggemma-300m",
309
+ "sentence-transformers/all-MiniLM-L6-v2",
310
+ "BAAI/bge-m3",
311
+ "Qwen/Qwen3-Embedding-8B",
312
+ "sentence-transformers/paraphrase-multilingual-MiniLM-L12-v2",
313
+ "MongoDB/mdbr-leaf-mt",
314
+ "BAAI/bge-base-en-v1.5",
315
+ "intfloat/multilingual-e5-large",
316
+ "ai-forever/ru-en-RoSBERTa",
317
+ "cointegrated/rubert-tiny2",
318
+ "jhgan/ko-sroberta-multitask",
319
+ "sentence-transformers/all-mpnet-base-v2",
320
+ "intfloat/multilingual-e5-small",
321
+ "mixedbread-ai/mxbai-embed-xsmall-v1",
322
+ "Snowflake/snowflake-arctic-embed-l-v2.0",
323
+ "sentence-transformers/LaBSE",
324
+ "sentence-transformers/all-MiniLM-L12-v2",
325
+ "sentence-transformers/paraphrase-multilingual-mpnet-base-v2",
326
+ "shibing624/text2vec-base-chinese",
327
+ "intfloat/multilingual-e5-base",
328
+ "BAAI/bge-large-en-v1.5",
329
+ "BAAI/bge-small-en-v1.5",
330
+ "BAAI/bge-base-zh-v1.5",
331
+ "mixedbread-ai/mxbai-embed-large-v1",
332
+ "Snowflake/snowflake-arctic-embed-m-v1.5",
333
+ "lokeshch19/ModernPubMedBERT",
334
+ "pritamdeka/S-Biomed-Roberta-snli-multinli-stsb",
335
+ "pritamdeka/S-PubMedBert-MS-MARCO",
336
+ "sentence-transformers/clip-ViT-B-32-multilingual-v1",
337
+ "sentence-transformers/msmarco-MiniLM-L6-v3"
338
  ],
339
  value="sentence-transformers/all-MiniLM-L6-v2",
340
  info="Model used for generating embeddings"
 
383
 
384
  gr.Markdown(
385
  """
 
 
 
 
 
 
 
 
 
 
386
  """
387
  )
388