File size: 6,344 Bytes
6f4a9dd
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b7b2af3
 
6f4a9dd
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
52e16a3
 
6f4a9dd
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
from huggingface_hub import login
from smolagents import CodeAgent, DuckDuckGoSearchTool, HfApiModel, tool, Tool, load_tool, InferenceClientModel
from smolagents.models import ChatMessage
from transformers import pipeline
import cohere
from gradio_client import Client
from newsapi import NewsApiClient
import requests
import gradio as gr
import os
from dotenv import load_dotenv
import json
from transformers import pipeline

load_dotenv()

newsApiKey = os.getenv('NEWSAPI_KEY')
#grok_api_key = os.getenv('GROK_API_KEY')
#HF_TOKEN = os.getenv("HF_TOKEN")
#login(token=HF_TOKEN)
COHERE_API_KEY = os.getenv('COHERE_API_KEY')

from groq import Groq

client = Groq(
    api_key=os.environ.get("GROQ_API_KEY"),
)


@tool
def get_company_news_articles(company_name: str) -> dict:
    """
    fetch news articles about a company.

    Args:
        company_name (str): the company to get news articles about
    
    Returns:
        list: a list of articles about the company
    """

    response = requests.get(f'https://newsapi.org/v2/everything?q={company_name}&from=2025-06-01&sortBy=popularity&apiKey={newsApiKey}')
    try:
        data = response.json()
    except Exception as e:
        print("❌ Failed to parse JSON:", str(e))
        return []

    print("πŸ” Full NewsAPI response:", data)

    if data.get("status") != "ok":
        print("❌ NewsAPI error:", data.get("message"))
        return []


    print(f"results from news api get request are: {data.get('articles')}")

    articles = data.get('articles', [])
    filtered_articles = [article for article in articles]
    return filtered_articles
    


class OllamaLLM:
    pass
    #just borrow all code below, and switched out the api call for the api responses to localhost port

class grokLLM:
    pass

class mistralLLM:
    pass



class CohereLLM:
    def __init__(self, api_key: str = COHERE_API_KEY, model_name: str = "command-r-plus"):
        self.client = cohere.Client(api_key)
        self.model_name = model_name

    def __call__(self, prompt: str, **kwargs) -> str:
        # Remove keys not supported by Cohere
        kwargs.pop('stop_sequences', None)

        if isinstance(prompt, list):
            prompt = self._convert_chat_to_prompt(prompt)

        # Optional: set temperature, max tokens, etc.
        temperature = kwargs.get("temperature", 0.7)
        max_tokens = kwargs.get("max_tokens", 300)

        response = self.client.generate(
            model=self.model_name,
            prompt=prompt,
            max_tokens=max_tokens,
            temperature=temperature,
        )
        #return response.generations[0].text.strip()

        output_text = response.generations[0].text.strip()

        # βœ… Wrap response in ChatMessage
        return ChatMessage(role="assistant", content=output_text)
    
    def _convert_chat_to_prompt(self, messages):
        """
        Convert chat-style message history to a single string prompt.
        Example input: [{"role": "user", "content": "Hello"}, {"role": "assistant", "content": "Hi!"}]
        """
        prompt = ""
        for message in messages:
            role = message.get("role", "user")
            content = message.get("content", "")
            if role == "user":
                prompt += f"User: {content}\n"
            elif role == "assistant":
                prompt += f"Assistant: {content}\n"
            else:
                prompt += f"{role.title()}: {content}\n"
        prompt += "Assistant:"  # Final cue for model to respond
        return prompt




cohere_model = CohereLLM()





#HfApiModel("mistralai/Mistral-7B-v0.1-chat")

def mainFunc(articles):
    if isinstance(articles, str):
        articles = json.loads(articles)
    newsApiKey = os.getenv('NEWSAPI_KEY')
    if not newsApiKey:
        raise ValueError("Missing NEWS_API_KEY in environment variables.")
    
    print(f"data structure of articles is: {articles}")

    prompt = f"""
You are an agent that analyzes the risk factors for a company, by using the data from:
- Documents: {articles['company_info']['documents']}
- News summaries: {articles['news_data']['articles_summary']}

Please respond using **only** the following JSON format:

{{
  "risk_factors": [
    {{
      "severity": "Low | Medium | High",
      "specific_event": "the specific type of event, i. e. nuclear war"
      "risk_type": "e.g., Operational Risk, Financial Risk, Reputational Risk, Legal Risk",
      "affected_contracts": "Name or path of the affected document",
      "affected_clauses": "Clause number(s) or section(s)",
      "narrative": {{
        "solutions_in_contract": "Summarize contractual protections or remedies",
        "alternative_mitigation_strategies": "Suggest other ways to reduce the risk",
        "monitoring_tasks": "Define ongoing monitoring or reporting actions"
      }}
    }}
  ],
  "available_data": {{
    "context_items": ["Summary of the company info and risk-related context"],
    "document_count": {len(articles['company_info']['documents'])},
    "content_available": true,
    "news_available": true
  }}
}}
"""


    chat_completion = client.chat.completions.create(
    messages=[
        {
            "role": "system",
            "content": prompt
        },
        {
            "role": "user",
            "content": f"What are the {articles['analysis_request']['analysis_scope']} risk factors for {articles['company_info']['name']} ?",
        }
    ],
    model="llama-3.3-70b-versatile",
    response_format={"type": "json_object"},#and include word 'json' in messages/prompt
    )
    print(chat_completion.choices[0].message.content)
    return chat_completion.choices[0].message.content

    
    
    return result


#add agents it can hand off to





#agent.prompt_templates["system_prompt"] = agent.prompt_templates["system_prompt"] + "\n when asked for most recent articles, return each article with its dict/list values, rather than just the title"
#agent.run("what are the most recent articles about Microsoft?")
#print(agent.prompt_templates["system_prompt"])


#huggingface-cli login - to set access token in temrainl and save it
#translation function works well

demo = gr.Interface(
    fn=mainFunc,
    inputs="text",
    outputs="text",
    title="hackathon agent",
    description="finds info about a company"
)

demo.launch(share=True)