SmokeyBandit commited on
Commit
6e5459c
·
verified ·
1 Parent(s): 1b4bab9

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +136 -4
app.py CHANGED
@@ -1,10 +1,142 @@
1
- # app.py
2
  import sys
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3
 
4
  def main():
5
- print("Hello from my Python script on Hugging Face Spaces!")
6
- # Your code here
7
- # e.g., run your LangChain script, do computations, etc.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8
 
9
  if __name__ == "__main__":
10
  main()
 
1
+ import os
2
  import sys
3
+ import logging
4
+ from typing import Optional
5
+
6
+ # Configure logging
7
+ logging.basicConfig(
8
+ level=logging.INFO,
9
+ format='%(asctime)s - %(levelname)s - %(message)s',
10
+ handlers=[logging.StreamHandler(sys.stdout)]
11
+ )
12
+ logger = logging.getLogger(__name__)
13
+
14
+ def setup_environment():
15
+ """Setup environment variables and cache directories"""
16
+ try:
17
+ cache_dir = "/tmp/hf"
18
+ os.environ["TRANSFORMERS_CACHE"] = cache_dir
19
+ os.environ["HF_HOME"] = cache_dir
20
+
21
+ # Create cache directory if it doesn't exist
22
+ os.makedirs(cache_dir, exist_ok=True)
23
+ logger.info(f"Cache directory set up at {cache_dir}")
24
+ return True
25
+ except Exception as e:
26
+ logger.error(f"Failed to setup environment: {str(e)}")
27
+ return False
28
+
29
+ def initialize_model():
30
+ """Initialize the model and pipeline with error handling"""
31
+ try:
32
+ from transformers import pipeline
33
+ logger.info("Loading model pipeline...")
34
+
35
+ pipe = pipeline(
36
+ "text-generation",
37
+ model="bigscience/bloom-560m", # Using a smaller model
38
+ tokenizer="bigscience/bloom-560m",
39
+ device=-1, # Force CPU
40
+ max_new_tokens=256,
41
+ force_download=True,
42
+ low_cpu_mem_usage=True, # Added for better memory management
43
+ )
44
+ logger.info("Model pipeline loaded successfully")
45
+ return pipe
46
+ except Exception as e:
47
+ logger.error(f"Failed to initialize model: {str(e)}")
48
+ return None
49
+
50
+ def setup_agent(pipe):
51
+ """Setup LangChain agent with the model pipeline"""
52
+ try:
53
+ from langchain_community.llms import HuggingFacePipeline
54
+ from langchain.agents import initialize_agent
55
+ from langchain.agents.agent_types import AgentType
56
+ from langchain.tools import BaseTool
57
+
58
+ # Initialize LLM
59
+ llm = HuggingFacePipeline(pipeline=pipe)
60
+
61
+ # Define the tool
62
+ class MachineryReportTool(BaseTool):
63
+ name = "machinery_report"
64
+ description = (
65
+ "Generates a report on mini construction equipment including "
66
+ "specifications and market analysis."
67
+ )
68
+
69
+ def _run(self, query: str) -> str:
70
+ # Simplified report for memory efficiency
71
+ return """
72
+ Mini Construction Equipment Report:
73
+ 1. Basic Gas-Powered Unit: $3,700, 14HP
74
+ 2. DRT 450: $5,500-6,300, Honda motor
75
+ 3. Mini Skid Steer: $15,000, Kubota diesel
76
+ Market Analysis: 30-40% cheaper than US equivalents
77
+ """
78
+
79
+ def _arun(self, query: str) -> str:
80
+ raise NotImplementedError("Async not supported.")
81
+
82
+ # Initialize agent with minimal settings
83
+ agent = initialize_agent(
84
+ tools=[MachineryReportTool()],
85
+ llm=llm,
86
+ agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,
87
+ verbose=True,
88
+ max_iterations=1,
89
+ early_stopping_method="generate" # Added to prevent infinite loops
90
+ )
91
+ logger.info("Agent initialized successfully")
92
+ return agent
93
+ except Exception as e:
94
+ logger.error(f"Failed to setup agent: {str(e)}")
95
+ return None
96
+
97
+ def run_agent_query(agent, query: str) -> Optional[str]:
98
+ """Run a query through the agent with error handling"""
99
+ try:
100
+ logger.info("Running agent query...")
101
+ result = agent.run(query)
102
+ logger.info("Query completed successfully")
103
+ return result
104
+ except Exception as e:
105
+ logger.error(f"Error running agent query: {str(e)}")
106
+ return f"Error processing query: {str(e)}"
107
 
108
  def main():
109
+ """Main function to run the agent"""
110
+ logger.info("Starting application...")
111
+
112
+ # Setup environment
113
+ if not setup_environment():
114
+ return
115
+
116
+ # Initialize model
117
+ pipe = initialize_model()
118
+ if pipe is None:
119
+ return
120
+
121
+ # Setup agent
122
+ agent = setup_agent(pipe)
123
+ if agent is None:
124
+ return
125
+
126
+ # Define query
127
+ query = (
128
+ "Please provide a brief overview of the mini construction equipment "
129
+ "project, focusing on key specifications and market positioning."
130
+ )
131
+
132
+ # Run query
133
+ result = run_agent_query(agent, query)
134
+
135
+ # Print result
136
+ if result:
137
+ print("\n===== AGENT OUTPUT =====")
138
+ print(result)
139
+ print("=======================")
140
 
141
  if __name__ == "__main__":
142
  main()