ALVHB95 commited on
Commit
eb3e5ca
·
1 Parent(s): f67b750

new model

Browse files
Files changed (2) hide show
  1. app.py +19 -9
  2. requirements.txt +2 -0
app.py CHANGED
@@ -41,8 +41,8 @@ from pydantic.v1 import BaseModel, Field
41
  from huggingface_hub import snapshot_download
42
 
43
  # Local transformers pipeline (no API token required)
44
- from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, pipeline
45
- from langchain_community.llms import HuggingFacePipeline
46
 
47
  # Theming + URL list
48
  import theme
@@ -170,6 +170,8 @@ SYSTEM_TEMPLATE = (
170
  "{format_instructions}"
171
  )
172
 
 
 
173
  qa_prompt = ChatPromptTemplate.from_template(
174
  SYSTEM_TEMPLATE,
175
  partial_variables={"format_instructions": parser.get_format_instructions()},
@@ -177,14 +179,22 @@ qa_prompt = ChatPromptTemplate.from_template(
177
 
178
 
179
  # =============================
180
- # 4) LLM — local, token-free
181
  # =============================
182
- llm = HuggingFaceHub( repo_id="mistralai/Mixtral-8x7B-v0.1",
183
- task="text-generation",
184
- model_kwargs={ "max_new_tokens": 2000,
185
- "top_k": 30,
186
- "temperature": 0.1,
187
- "repetition_penalty": 1.03 }, )
 
 
 
 
 
 
 
 
188
 
189
  # ===========================================
190
  # 5) Chain (memory + robust JSON extraction)
 
41
  from huggingface_hub import snapshot_download
42
 
43
  # Local transformers pipeline (no API token required)
44
+ from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, pipeline # (still imported; not used in Hub mode)
45
+ from langchain_community.llms import HuggingFacePipeline, HuggingFaceHub # <-- ADDED: HuggingFaceHub import
46
 
47
  # Theming + URL list
48
  import theme
 
170
  "{format_instructions}"
171
  )
172
 
173
+ # NOTE: Your original pattern kept; if you prefer, you can also do:
174
+ # ChatPromptTemplate.from_template(SYSTEM_TEMPLATE).partial(format_instructions=parser.get_format_instructions())
175
  qa_prompt = ChatPromptTemplate.from_template(
176
  SYSTEM_TEMPLATE,
177
  partial_variables={"format_instructions": parser.get_format_instructions()},
 
179
 
180
 
181
  # =============================
182
+ # 4) LLM — HuggingFace Hub (Mixtral)
183
  # =============================
184
+ # REQUIREMENT: set env var HUGGINGFACEHUB_API_TOKEN
185
+ # (Settings → Variables & secrets in your Space)
186
+ llm = HuggingFaceHub(
187
+ repo_id="mistralai/Mixtral-8x7B-v0.1",
188
+ task="text-generation",
189
+ model_kwargs={
190
+ "max_new_tokens": 2000,
191
+ "top_k": 30,
192
+ "temperature": 0.1,
193
+ "repetition_penalty": 1.03,
194
+ # You may also pass: "return_full_text": False
195
+ },
196
+ )
197
+
198
 
199
  # ===========================================
200
  # 5) Chain (memory + robust JSON extraction)
requirements.txt CHANGED
@@ -6,12 +6,14 @@ sentence-transformers>=3.0.0
6
  tensorflow==2.17.0
7
  typing-extensions>=4.7
8
  tf-keras==2.17.0
 
9
 
10
  # LangChain split packages (v0.2 style)
11
  langchain==0.2.12
12
  langchain-community==0.2.10
13
  langchain-text-splitters==0.2.2
14
  langchain-core==0.2.27
 
15
 
16
  # Vector store
17
  chromadb==0.5.3
 
6
  tensorflow==2.17.0
7
  typing-extensions>=4.7
8
  tf-keras==2.17.0
9
+ pillow
10
 
11
  # LangChain split packages (v0.2 style)
12
  langchain==0.2.12
13
  langchain-community==0.2.10
14
  langchain-text-splitters==0.2.2
15
  langchain-core==0.2.27
16
+ langchain-huggingface>=0.0.3
17
 
18
  # Vector store
19
  chromadb==0.5.3