VcRlAgent commited on
Commit
63a82d8
·
1 Parent(s): 6887954

Error Handling LLMWrapper and Autoconfig

Browse files
Files changed (1) hide show
  1. app.py +2 -15
app.py CHANGED
@@ -50,19 +50,6 @@ def load_llamaindex_stack(model_id: str, max_new_tokens: int, temperature: float
50
  # Tiny, fast sentence-transformers model for embeddings
51
  embed = HuggingFaceEmbedding(model_name="sentence-transformers/all-MiniLM-L6-v2")
52
 
53
- """
54
- tok = AutoTokenizer.from_pretrained(model_id)
55
- mdl = AutoModelForSeq2SeqLM.from_pretrained(model_id)
56
- text2text = pipeline(
57
- "text2text-generation",
58
- model=mdl,
59
- tokenizer=tok,
60
- max_new_tokens=max_new_tokens,
61
- temperature=float(temperature)
62
- )
63
- """
64
- #llm = HuggingFaceLLM(pipeline=text2text)
65
-
66
  # Wrap the same tiny HF model for LlamaIndex
67
 
68
  try:
@@ -92,8 +79,8 @@ def load_llamaindex_stack(model_id: str, max_new_tokens: int, temperature: float
92
  except Exception as e:
93
  config = None
94
 
95
- Settings.embed_model = embed
96
- Settings.llm = llm
97
 
98
  # Load small docs (data/notes.txt)
99
  docs = SimpleDirectoryReader(input_dirs=["data"]).load_data()
 
50
  # Tiny, fast sentence-transformers model for embeddings
51
  embed = HuggingFaceEmbedding(model_name="sentence-transformers/all-MiniLM-L6-v2")
52
 
 
 
 
 
 
 
 
 
 
 
 
 
 
53
  # Wrap the same tiny HF model for LlamaIndex
54
 
55
  try:
 
79
  except Exception as e:
80
  config = None
81
 
82
+ #Settings.embed_model = embed
83
+ #Settings.llm = llm
84
 
85
  # Load small docs (data/notes.txt)
86
  docs = SimpleDirectoryReader(input_dirs=["data"]).load_data()