Spaces:
Sleeping
Sleeping
| from huggingface_hub import InferenceClient | |
| import os | |
| # HF automatically injects token | |
| client = InferenceClient( | |
| token=os.getenv("HF_TOKEN") | |
| ) | |
| MODEL = "HuggingFaceH4/zephyr-7b-beta" | |
| def call_llm(prompt): | |
| response = client.text_generation( | |
| model=MODEL, | |
| prompt=prompt, | |
| max_new_tokens=400, | |
| temperature=0.7 | |
| ) | |
| return response | |
| def market_agent(problem, memory, prompt): | |
| return call_llm(prompt.format(problem=problem, memory=memory)) | |
| def finance_agent(problem, memory, prompt): | |
| return call_llm(prompt.format(problem=problem, memory=memory)) | |
| def risk_agent(problem, memory, prompt): | |
| return call_llm(prompt.format(problem=problem, memory=memory)) | |
| def ethics_agent(problem, memory, prompt): | |
| return call_llm(prompt.format(problem=problem, memory=memory)) | |
| def synthesis_agent(problem, memory, prompt): | |
| return call_llm(prompt.format(problem=problem, memory=memory)) | |