VcRlAgent commited on
Commit
6887954
·
1 Parent(s): c7fafda

Fix Autoconfig

Browse files
Files changed (1) hide show
  1. app.py +22 -19
app.py CHANGED
@@ -65,29 +65,32 @@ def load_llamaindex_stack(model_id: str, max_new_tokens: int, temperature: float
65
 
66
  # Wrap the same tiny HF model for LlamaIndex
67
 
68
- config = AutoConfig.from_pretrained(model_id)
69
- if config.model_type in ["t5", "mt5", "bart", "mbart", "pegasus", "marian", "prophetnet"]:
70
- task = "text2text-generation" # encoder-decoder / seq2seq
71
- else:
72
- task = "text-generation"
73
-
74
  try:
75
- llm = HuggingFaceLLM(
76
- model_name=model_id,
77
- tokenizer_name=model_id,
78
- task=task,
79
- context_window=2048,
80
- generate_kwargs={"max_new_tokens": max_new_tokens, "temperature": temperature},
81
- device_map="cpu",
82
- )
83
- except TypeError:
84
- llm = HuggingFaceLLM(
85
  model_name=model_id,
86
- tokenizer_name=model_id,
 
87
  context_window=2048,
88
- generate_kwargs={"max_new_tokens": max_new_tokens, "temperature": float(temperature)},
89
  device_map="cpu",
90
- )
 
 
 
 
 
 
 
 
 
 
91
 
92
  Settings.embed_model = embed
93
  Settings.llm = llm
 
65
 
66
  # Wrap the same tiny HF model for LlamaIndex
67
 
 
 
 
 
 
 
68
  try:
69
+ config = AutoConfig.from_pretrained(model_id)
70
+ if config.model_type in ["t5", "mt5", "bart", "mbart", "pegasus", "marian", "prophetnet"]:
71
+ task = "text2text-generation" # encoder-decoder / seq2seq
72
+ else:
73
+ task = "text-generation"
74
+
75
+ try:
76
+ llm = HuggingFaceLLM(
 
 
77
  model_name=model_id,
78
+ tokenizer_name=model_id,
79
+ task=task,
80
  context_window=2048,
81
+ generate_kwargs={"max_new_tokens": max_new_tokens, "temperature": temperature},
82
  device_map="cpu",
83
+ )
84
+ except TypeError:
85
+ llm = HuggingFaceLLM(
86
+ model_name=model_id,
87
+ tokenizer_name=model_id,
88
+ context_window=2048,
89
+ generate_kwargs={"max_new_tokens": max_new_tokens, "temperature": float(temperature)},
90
+ device_map="cpu",
91
+ )
92
+ except Exception as e:
93
+ config = None
94
 
95
  Settings.embed_model = embed
96
  Settings.llm = llm