Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -84,9 +84,23 @@ def convert_currency(amount: float, from_currency: str, to_currency: str) -> str
|
|
| 84 |
to_currency: Target currency code (e.g., EUR)
|
| 85 |
"""
|
| 86 |
try:
|
| 87 |
-
|
| 88 |
-
|
| 89 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 90 |
except Exception as e:
|
| 91 |
return f"Error converting currency: {str(e)}"
|
| 92 |
|
|
@@ -104,11 +118,49 @@ def get_news_headlines(topic: str, count: int = 5) -> str:
|
|
| 104 |
newsapi = NewsApiClient(api_key=API_KEY)
|
| 105 |
|
| 106 |
try:
|
| 107 |
-
|
| 108 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 109 |
headlines = []
|
| 110 |
-
for idx, article in enumerate(
|
| 111 |
-
|
|
|
|
| 112 |
return "\n".join(headlines)
|
| 113 |
return f"No news found for topic: {topic}"
|
| 114 |
except Exception as e:
|
|
@@ -120,7 +172,10 @@ def get_wikipedia_summary(topic: str) -> str:
|
|
| 120 |
Args:
|
| 121 |
topic: Topic to get summary for
|
| 122 |
"""
|
| 123 |
-
wiki = wikipediaapi.Wikipedia(
|
|
|
|
|
|
|
|
|
|
| 124 |
try:
|
| 125 |
page = wiki.page(topic)
|
| 126 |
if page.exists():
|
|
@@ -185,7 +240,7 @@ final_answer = FinalAnswerTool()
|
|
| 185 |
model = HfApiModel(
|
| 186 |
max_tokens=2096,
|
| 187 |
temperature=0.5,
|
| 188 |
-
model_id='
|
| 189 |
custom_role_conversions=None,
|
| 190 |
)
|
| 191 |
|
|
@@ -205,7 +260,8 @@ agent = CodeAgent(
|
|
| 205 |
get_news_headlines,
|
| 206 |
get_wikipedia_summary,
|
| 207 |
solve_math_expression,
|
| 208 |
-
generate_password
|
|
|
|
| 209 |
],
|
| 210 |
max_steps=6,
|
| 211 |
verbosity_level=1,
|
|
|
|
| 84 |
to_currency: Target currency code (e.g., EUR)
|
| 85 |
"""
|
| 86 |
try:
|
| 87 |
+
# Using ExchangeRate-API
|
| 88 |
+
base_url = "https://api.exchangerate-api.com/v4/latest"
|
| 89 |
+
|
| 90 |
+
# Get exchange rates for the base currency
|
| 91 |
+
response = requests.get(f"{base_url}/{from_currency.upper()}")
|
| 92 |
+
data = response.json()
|
| 93 |
+
|
| 94 |
+
if response.status_code == 200:
|
| 95 |
+
# Get the exchange rate for the target currency
|
| 96 |
+
if to_currency.upper() in data['rates']:
|
| 97 |
+
rate = data['rates'][to_currency.upper()]
|
| 98 |
+
converted_amount = amount * rate
|
| 99 |
+
return f"{amount} {from_currency.upper()} = {converted_amount:.2f} {to_currency.upper()} (Rate: 1 {from_currency.upper()} = {rate:.4f} {to_currency.upper()})"
|
| 100 |
+
else:
|
| 101 |
+
return f"Error: Target currency {to_currency.upper()} not found"
|
| 102 |
+
else:
|
| 103 |
+
return f"Error: {data.get('error', 'Failed to fetch exchange rates')}"
|
| 104 |
except Exception as e:
|
| 105 |
return f"Error converting currency: {str(e)}"
|
| 106 |
|
|
|
|
| 118 |
newsapi = NewsApiClient(api_key=API_KEY)
|
| 119 |
|
| 120 |
try:
|
| 121 |
+
# Try different search queries to get more relevant results
|
| 122 |
+
queries = [
|
| 123 |
+
topic, # Original topic
|
| 124 |
+
f"{topic} latest", # Latest news
|
| 125 |
+
f"{topic} important", # Important news
|
| 126 |
+
]
|
| 127 |
+
|
| 128 |
+
all_articles = []
|
| 129 |
+
seen_titles = set() # To avoid duplicates
|
| 130 |
+
|
| 131 |
+
for query in queries:
|
| 132 |
+
news = newsapi.get_everything(
|
| 133 |
+
q=query,
|
| 134 |
+
language='en',
|
| 135 |
+
sort_by='publishedAt',
|
| 136 |
+
page_size=count
|
| 137 |
+
)
|
| 138 |
+
|
| 139 |
+
if news['articles']:
|
| 140 |
+
for article in news['articles']:
|
| 141 |
+
# Skip if we've seen this title before
|
| 142 |
+
if article['title'] in seen_titles:
|
| 143 |
+
continue
|
| 144 |
+
|
| 145 |
+
seen_titles.add(article['title'])
|
| 146 |
+
# Convert UTC timestamp to datetime
|
| 147 |
+
pub_date = datetime.datetime.strptime(article['publishedAt'], '%Y-%m-%dT%H:%M:%SZ')
|
| 148 |
+
all_articles.append({
|
| 149 |
+
'title': article['title'],
|
| 150 |
+
'source': article['source']['name'],
|
| 151 |
+
'date': pub_date,
|
| 152 |
+
'url': article['url']
|
| 153 |
+
})
|
| 154 |
+
|
| 155 |
+
# Sort by date (newest first) and take the top 'count' articles
|
| 156 |
+
all_articles.sort(key=lambda x: x['date'], reverse=True)
|
| 157 |
+
all_articles = all_articles[:count]
|
| 158 |
+
|
| 159 |
+
if all_articles:
|
| 160 |
headlines = []
|
| 161 |
+
for idx, article in enumerate(all_articles, 1):
|
| 162 |
+
date_str = article['date'].strftime('%Y-%m-%d %H:%M UTC')
|
| 163 |
+
headlines.append(f"{idx}. [{date_str}] {article['title']} ({article['source']})")
|
| 164 |
return "\n".join(headlines)
|
| 165 |
return f"No news found for topic: {topic}"
|
| 166 |
except Exception as e:
|
|
|
|
| 172 |
Args:
|
| 173 |
topic: Topic to get summary for
|
| 174 |
"""
|
| 175 |
+
wiki = wikipediaapi.Wikipedia(
|
| 176 |
+
user_agent='HuggingFaceAgent/1.0 (https://huggingface.co/; contact@huggingface.co)',
|
| 177 |
+
language='en'
|
| 178 |
+
)
|
| 179 |
try:
|
| 180 |
page = wiki.page(topic)
|
| 181 |
if page.exists():
|
|
|
|
| 240 |
model = HfApiModel(
|
| 241 |
max_tokens=2096,
|
| 242 |
temperature=0.5,
|
| 243 |
+
model_id='https://pflgm2locj2t89co.us-east-1.aws.endpoints.huggingface.cloud',
|
| 244 |
custom_role_conversions=None,
|
| 245 |
)
|
| 246 |
|
|
|
|
| 260 |
get_news_headlines,
|
| 261 |
get_wikipedia_summary,
|
| 262 |
solve_math_expression,
|
| 263 |
+
generate_password,
|
| 264 |
+
get_current_time_in_timezone
|
| 265 |
],
|
| 266 |
max_steps=6,
|
| 267 |
verbosity_level=1,
|