SmokeyBandit commited on
Commit
ee67aea
·
verified ·
1 Parent(s): 5bdd078

Update main.py

Browse files
Files changed (1) hide show
  1. main.py +9 -4
main.py CHANGED
@@ -1,4 +1,9 @@
1
  import os
 
 
 
 
 
2
  from langchain.agents import initialize_agent
3
  from langchain.llms import HuggingFacePipeline
4
  from langchain.tools import BaseTool
@@ -7,13 +12,13 @@ from transformers import pipeline
7
  # Set up the Hugging Face pipeline to run on CPU (device=-1 ensures CPU usage)
8
  pipe = pipeline(
9
  "text-generation",
10
- model="bigscience/bloom", # You can change to another CPU-friendly model if desired
11
- tokenizer="bigscience/bloom",
12
- device=-1, # -1 forces CPU mode
13
  max_new_tokens=512
14
  )
15
 
16
- # Initialize the LangChain LLM with our Hugging Face pipeline
17
  llm = HuggingFacePipeline(pipeline=pipe)
18
 
19
  # Define a custom tool that compiles the research report
 
1
  import os
2
+
3
+ # Set cache directories to a writable location
4
+ os.environ["TRANSFORMERS_CACHE"] = "/tmp/hf"
5
+ os.environ["HF_HOME"] = "/tmp/hf"
6
+
7
  from langchain.agents import initialize_agent
8
  from langchain.llms import HuggingFacePipeline
9
  from langchain.tools import BaseTool
 
12
  # Set up the Hugging Face pipeline to run on CPU (device=-1 ensures CPU usage)
13
  pipe = pipeline(
14
  "text-generation",
15
+ model="bigscience/bloom-560m", # Use a smaller, CPU-friendly model
16
+ tokenizer="bigscience/bloom-560m",
17
+ device=-1, # Force CPU mode
18
  max_new_tokens=512
19
  )
20
 
21
+ # Initialize the LangChain LLM with our HuggingFace pipeline
22
  llm = HuggingFacePipeline(pipeline=pipe)
23
 
24
  # Define a custom tool that compiles the research report