File size: 2,740 Bytes
fc8c847
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
from huggingface_hub import login
from smolagents import CodeAgent, DuckDuckGoSearchTool, HfApiModel, tool, Tool, load_tool, InferenceClientModel
from smolagents.models import ChatMessage
from transformers import pipeline
import cohere
from gradio_client import Client
from newsapi import NewsApiClient
import requests
import gradio as gr
import os
from dotenv import load_dotenv
import json
from transformers import pipeline
from mistralai import Mistral

load_dotenv()

newsApiKey = os.getenv('NEWSAPI_KEY')
#grok_api_key = os.getenv('GROK_API_KEY')
#HF_TOKEN = os.getenv("HF_TOKEN")
#login(token=HF_TOKEN)
COHERE_API_KEY = os.getenv('COHERE_API_KEY')

from groq import Groq

#client = Groq(
    #api_key=os.environ.get("GROQ_API_KEY"),
#)



api_key = os.environ["MISTRAL_API_KEY"]
model = "mistral-large-latest"

client = Mistral(api_key=api_key)



#HfApiModel("mistralai/Mistral-7B-v0.1-chat")

def mainFunc(articles, risk_factor):
    if isinstance(articles, str):
        articles = json.loads(articles)
    newsApiKey = os.getenv('NEWSAPI_KEY')
    if not newsApiKey:
        raise ValueError("Missing NEWS_API_KEY in environment variables.")
    
    print(f"data structure of articles is: {articles}")

    prompt = f"""
You are an agent that analyzes the risk factors for a company, by using the data from:
- Documents: {articles['company_info']['documents']}
- News summaries: {articles['news_data']['articles_summary']}. The user wants to know whether a specific risk factor exists: {risk_factor}. Use the information you are provided to evaluate whether it does. respond in the format of yes/no, and then provide reason/s.


"""


    chat_completion = client.chat.complete(
    model=model,
    messages=[
        {
            "role": "system",
            "content": prompt
        },
        {
            "role": "user",
            "content": f"{risk_factor}",
        }
    ],
    #and include word 'json' in messages/prompt
    )
    print(chat_completion.choices[0].message.content)
    return chat_completion.choices[0].message.content

    
    
    return result


#add agents it can hand off to





#agent.prompt_templates["system_prompt"] = agent.prompt_templates["system_prompt"] + "\n when asked for most recent articles, return each article with its dict/list values, rather than just the title"
#agent.run("what are the most recent articles about Microsoft?")
#print(agent.prompt_templates["system_prompt"])


#huggingface-cli login - to set access token in temrainl and save it
#translation function works well

demo = gr.Interface(
    fn=mainFunc,
    inputs=["text", "text"],
    outputs="text",
    title="dynamic specific risk",
    description="finds info about a company"
)

demo.launch(share=True)