Prof-Hunter commited on
Commit
badd0f9
·
verified ·
1 Parent(s): 4dd65e0

Update agents.py

Browse files
Files changed (1) hide show
  1. agents.py +33 -24
agents.py CHANGED
@@ -1,32 +1,41 @@
1
- from transformers import pipeline
2
- from prompts import *
3
-
4
- generator = pipeline(
5
- "text-generation",
6
- model="HuggingFaceH4/zephyr-7b-beta",
7
- max_new_tokens=300,
8
- temperature=0.7
9
  )
10
 
 
 
 
11
  def call_llm(prompt):
12
- return generator(prompt)[0]["generated_text"]
13
 
14
- def market_agent(problem, memory):
15
- prompt = MARKET_PROMPT.format(problem=problem, memory=memory)
16
- return call_llm(prompt)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
17
 
18
- def finance_agent(problem, memory):
19
- prompt = FINANCE_PROMPT.format(problem=problem, memory=memory)
20
- return call_llm(prompt)
21
 
22
- def risk_agent(problem, memory):
23
- prompt = RISK_PROMPT.format(problem=problem, memory=memory)
24
- return call_llm(prompt)
25
 
26
- def ethics_agent(problem, memory):
27
- prompt = ETHICS_PROMPT.format(problem=problem, memory=memory)
28
- return call_llm(prompt)
29
 
30
- def synthesiser_agent(problem, memory):
31
- prompt = SYNTHESIS_PROMPT.format(problem=problem, memory=memory)
32
- return call_llm(prompt)
 
1
+ from huggingface_hub import InferenceClient
2
+ import os
3
+
4
+ # HF automatically injects token
5
+ client = InferenceClient(
6
+ token=os.getenv("HF_TOKEN")
 
 
7
  )
8
 
9
+ MODEL = "HuggingFaceH4/zephyr-7b-beta"
10
+
11
+
12
  def call_llm(prompt):
 
13
 
14
+ response = client.text_generation(
15
+ model=MODEL,
16
+ prompt=prompt,
17
+ max_new_tokens=400,
18
+ temperature=0.7
19
+ )
20
+
21
+ return response
22
+
23
+
24
+ def market_agent(problem, memory, prompt):
25
+ return call_llm(prompt.format(problem=problem, memory=memory))
26
+
27
+
28
+ def finance_agent(problem, memory, prompt):
29
+ return call_llm(prompt.format(problem=problem, memory=memory))
30
+
31
+
32
+ def risk_agent(problem, memory, prompt):
33
+ return call_llm(prompt.format(problem=problem, memory=memory))
34
 
 
 
 
35
 
36
+ def ethics_agent(problem, memory, prompt):
37
+ return call_llm(prompt.format(problem=problem, memory=memory))
 
38
 
 
 
 
39
 
40
+ def synthesis_agent(problem, memory, prompt):
41
+ return call_llm(prompt.format(problem=problem, memory=memory))