jsemrau commited on
Commit
0dccdbe
·
1 Parent(s): 930b9c7

testing model initialization

Browse files
Files changed (1) hide show
  1. app.py +67 -67
app.py CHANGED
@@ -81,72 +81,6 @@ USE_LOCAL_MODELS = os.getenv('USE_LOCAL_MODELS', 'false').lower() == 'true'
81
  if not HF_TOKEN:
82
  print("❌ HuggingFace token not found. Please check your .env file.")
83
 
84
- try:
85
- # Login to HuggingFace
86
- login(HF_TOKEN, add_to_git_credential=False)
87
-
88
- # Initialize NER model
89
- print("Initialize NER")
90
- ner_model = GLiNER.from_pretrained("knowledgator/modern-gliner-bi-large-v1.0")
91
- print(f"Initialized NER")
92
-
93
-
94
- llm_engine = InferenceClientModel(
95
- api_key=HF_TOKEN,
96
- model_id="Qwen/Qwen3-Coder-480B-A35B-Instruct" ,
97
- timeout=3000,
98
- provider="fireworks-ai",
99
- temperature=0.25
100
- )
101
-
102
-
103
- # Initialize agent
104
- agent = CodeAgent(
105
- model=llm_engine,
106
- tools=[],
107
- add_base_tools=False,
108
- name="data_agent",
109
- description="Runs data analysis for you.",
110
- max_steps=1,
111
- )
112
-
113
- # Initialize agent
114
- writer_agent = CodeAgent(
115
- model=llm_engine,
116
- tools=[],
117
- add_base_tools=False,
118
- name="writer_agent",
119
- description="Write an engaging and creative LinkedIn post.",
120
- max_steps=5,
121
- )
122
-
123
- writer_engine = InferenceClientModel(
124
- api_key=HF_TOKEN,
125
- model_id="Qwen/Qwen3-Coder-480B-A35B-Instruct" ,
126
- timeout=3000,
127
- provider="fireworks-ai",
128
- temperature=0.4
129
- )
130
-
131
-
132
- # Initialize agent
133
- editor_agent = CodeAgent(
134
- model=writer_engine,
135
- tools=[],
136
- add_base_tools=False,
137
- name="editor_agent",
138
- description="Edits LinkedIn post.",
139
- max_steps=5,
140
- )
141
-
142
- # Add system prompt
143
- #system_prompt = f"You are a strategic digital marketing manager focused on improving my social footprint. My interests are {interests}. You will receive a social media post. Please let me know which one I should react on."
144
- #agent.prompt_templates["system_prompt"] += system_prompt
145
-
146
- print("… Models initialized successfully!")
147
-
148
- except Exception as e:
149
- print( f"⌠Error initializing models: {str(e)}")
150
 
151
  def check_environment():
152
  """Check if required environment variables are set"""
@@ -335,7 +269,73 @@ def process_single_article(post, interests):
335
  """Process a single news article and generate LinkedIn post"""
336
  global agent, writer_agent, ner_model, editor_agent
337
 
338
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
339
 
340
  if agent is None or ner_model is None or writer_agent is None or editor_agent is None:
341
  return {"error": f"Models agent {agent}, ner_model {type(ner_model)} write_agent {writer_agent}, editor_agent {editor_agent} not initialized. Please initialize models first."}
 
81
  if not HF_TOKEN:
82
  print("❌ HuggingFace token not found. Please check your .env file.")
83
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
84
 
85
  def check_environment():
86
  """Check if required environment variables are set"""
 
269
  """Process a single news article and generate LinkedIn post"""
270
  global agent, writer_agent, ner_model, editor_agent
271
 
272
+ try:
273
+ # Login to HuggingFace
274
+ login(HF_TOKEN, add_to_git_credential=False)
275
+
276
+ # Initialize NER model
277
+ print("Initialize NER")
278
+ ner_model = GLiNER.from_pretrained("knowledgator/modern-gliner-bi-large-v1.0")
279
+ print(f"Initialized NER")
280
+
281
+
282
+ llm_engine = InferenceClientModel(
283
+ api_key=HF_TOKEN,
284
+ model_id="Qwen/Qwen3-Coder-480B-A35B-Instruct" ,
285
+ timeout=3000,
286
+ provider="fireworks-ai",
287
+ temperature=0.25
288
+ )
289
+
290
+
291
+ # Initialize agent
292
+ agent = CodeAgent(
293
+ model=llm_engine,
294
+ tools=[],
295
+ add_base_tools=False,
296
+ name="data_agent",
297
+ description="Runs data analysis for you.",
298
+ max_steps=1,
299
+ )
300
+
301
+ # Initialize agent
302
+ writer_agent = CodeAgent(
303
+ model=llm_engine,
304
+ tools=[],
305
+ add_base_tools=False,
306
+ name="writer_agent",
307
+ description="Write an engaging and creative LinkedIn post.",
308
+ max_steps=5,
309
+ )
310
+
311
+ writer_engine = InferenceClientModel(
312
+ api_key=HF_TOKEN,
313
+ model_id="Qwen/Qwen3-Coder-480B-A35B-Instruct" ,
314
+ timeout=3000,
315
+ provider="fireworks-ai",
316
+ temperature=0.4
317
+ )
318
+
319
+
320
+ # Initialize agent
321
+ editor_agent = CodeAgent(
322
+ model=writer_engine,
323
+ tools=[],
324
+ add_base_tools=False,
325
+ name="editor_agent",
326
+ description="Edits LinkedIn post.",
327
+ max_steps=5,
328
+ )
329
+
330
+ # Add system prompt
331
+ #system_prompt = f"You are a strategic digital marketing manager focused on improving my social footprint. My interests are {interests}. You will receive a social media post. Please let me know which one I should react on."
332
+ #agent.prompt_templates["system_prompt"] += system_prompt
333
+
334
+ print("… Models initialized successfully!")
335
+
336
+ except Exception as e:
337
+ print( f"! Error initializing models: {str(e)}")
338
+
339
 
340
  if agent is None or ner_model is None or writer_agent is None or editor_agent is None:
341
  return {"error": f"Models agent {agent}, ner_model {type(ner_model)} write_agent {writer_agent}, editor_agent {editor_agent} not initialized. Please initialize models first."}