Spaces:
Sleeping
Sleeping
Upload 7 files
Browse files- .gitattributes +35 -35
- README.md +13 -13
- app.py +79 -5
- env.py +1 -0
- func.py +49 -0
- functions.py +49 -0
- news_scraper.py +84 -0
.gitattributes
CHANGED
|
@@ -1,35 +1,35 @@
|
|
| 1 |
-
*.7z filter=lfs diff=lfs merge=lfs -text
|
| 2 |
-
*.arrow filter=lfs diff=lfs merge=lfs -text
|
| 3 |
-
*.bin filter=lfs diff=lfs merge=lfs -text
|
| 4 |
-
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
| 5 |
-
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
| 6 |
-
*.ftz filter=lfs diff=lfs merge=lfs -text
|
| 7 |
-
*.gz filter=lfs diff=lfs merge=lfs -text
|
| 8 |
-
*.h5 filter=lfs diff=lfs merge=lfs -text
|
| 9 |
-
*.joblib filter=lfs diff=lfs merge=lfs -text
|
| 10 |
-
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
| 11 |
-
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
| 12 |
-
*.model filter=lfs diff=lfs merge=lfs -text
|
| 13 |
-
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
| 14 |
-
*.npy filter=lfs diff=lfs merge=lfs -text
|
| 15 |
-
*.npz filter=lfs diff=lfs merge=lfs -text
|
| 16 |
-
*.onnx filter=lfs diff=lfs merge=lfs -text
|
| 17 |
-
*.ot filter=lfs diff=lfs merge=lfs -text
|
| 18 |
-
*.parquet filter=lfs diff=lfs merge=lfs -text
|
| 19 |
-
*.pb filter=lfs diff=lfs merge=lfs -text
|
| 20 |
-
*.pickle filter=lfs diff=lfs merge=lfs -text
|
| 21 |
-
*.pkl filter=lfs diff=lfs merge=lfs -text
|
| 22 |
-
*.pt filter=lfs diff=lfs merge=lfs -text
|
| 23 |
-
*.pth filter=lfs diff=lfs merge=lfs -text
|
| 24 |
-
*.rar filter=lfs diff=lfs merge=lfs -text
|
| 25 |
-
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
| 26 |
-
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
| 27 |
-
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
| 28 |
-
*.tar filter=lfs diff=lfs merge=lfs -text
|
| 29 |
-
*.tflite filter=lfs diff=lfs merge=lfs -text
|
| 30 |
-
*.tgz filter=lfs diff=lfs merge=lfs -text
|
| 31 |
-
*.wasm filter=lfs diff=lfs merge=lfs -text
|
| 32 |
-
*.xz filter=lfs diff=lfs merge=lfs -text
|
| 33 |
-
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
-
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
-
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
| 1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
| 2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
| 3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
| 4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
| 5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
| 6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
| 7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
| 8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
| 9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
| 10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
| 11 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
| 12 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
| 13 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
| 14 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
| 15 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
| 16 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
| 17 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
| 18 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
| 19 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
| 20 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
| 21 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
| 22 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
| 23 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
| 24 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
| 25 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
| 26 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
| 27 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
| 28 |
+
*.tar filter=lfs diff=lfs merge=lfs -text
|
| 29 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
| 30 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
| 31 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
| 32 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
| 33 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
README.md
CHANGED
|
@@ -1,13 +1,13 @@
|
|
| 1 |
-
---
|
| 2 |
-
title: Finance Tracker
|
| 3 |
-
emoji: 🐢
|
| 4 |
-
colorFrom: purple
|
| 5 |
-
colorTo: purple
|
| 6 |
-
sdk: streamlit
|
| 7 |
-
sdk_version: 1.35.0
|
| 8 |
-
app_file: app.py
|
| 9 |
-
pinned: false
|
| 10 |
-
license: mit
|
| 11 |
-
---
|
| 12 |
-
|
| 13 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
| 1 |
+
---
|
| 2 |
+
title: Finance Tracker
|
| 3 |
+
emoji: 🐢
|
| 4 |
+
colorFrom: purple
|
| 5 |
+
colorTo: purple
|
| 6 |
+
sdk: streamlit
|
| 7 |
+
sdk_version: 1.35.0
|
| 8 |
+
app_file: app.py
|
| 9 |
+
pinned: false
|
| 10 |
+
license: mit
|
| 11 |
+
---
|
| 12 |
+
|
| 13 |
+
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
app.py
CHANGED
|
@@ -1,5 +1,79 @@
|
|
| 1 |
-
import streamlit as st
|
| 2 |
-
|
| 3 |
-
|
| 4 |
-
|
| 5 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import streamlit as st
|
| 2 |
+
from transformers import pipeline
|
| 3 |
+
import google.generativeai as genai
|
| 4 |
+
import yfinance as yf
|
| 5 |
+
import matplotlib.pyplot as plt
|
| 6 |
+
import pandas as pd
|
| 7 |
+
import func
|
| 8 |
+
import news_scraper
|
| 9 |
+
import requests
|
| 10 |
+
from bs4 import BeautifulSoup
|
| 11 |
+
import json
|
| 12 |
+
|
| 13 |
+
#
|
| 14 |
+
|
| 15 |
+
# S E T U P
|
| 16 |
+
|
| 17 |
+
#
|
| 18 |
+
|
| 19 |
+
# TODO: deploy
|
| 20 |
+
|
| 21 |
+
fin_data = ""
|
| 22 |
+
pipe = pipeline(
|
| 23 |
+
"text-classification",
|
| 24 |
+
model="mrm8488/distilroberta-finetuned-financial-news-sentiment-analysis",
|
| 25 |
+
)
|
| 26 |
+
API_KEY = "AIzaSyDnRd4-UvV4U9oYcZfLXRT224pnU0KwEao"
|
| 27 |
+
model = genai.GenerativeModel("gemini-1.5-flash")
|
| 28 |
+
genai.configure(api_key=API_KEY)
|
| 29 |
+
fig = plt.figure(figsize=(4, 4))
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
st.title("Stock Analysis and Prediction")
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
# FIN INDICATOR CHARTS AND MODELS
|
| 36 |
+
stock_name = st.text_input(label="enter the ticker name")
|
| 37 |
+
# news_scraper
|
| 38 |
+
history = yf.download(stock_name, start="2023-01-01")
|
| 39 |
+
stck = yf.Ticker(stock_name)
|
| 40 |
+
|
| 41 |
+
dict = stck.info
|
| 42 |
+
# st.write(dict)
|
| 43 |
+
df = pd.DataFrame.from_dict(dict, orient="index")
|
| 44 |
+
df = df.reset_index()
|
| 45 |
+
df_str = df.to_string()
|
| 46 |
+
st.write(df_str)
|
| 47 |
+
keywords = [stock_name, "finance", "news news news"]
|
| 48 |
+
news_scraper.perform_search(keywords)
|
| 49 |
+
|
| 50 |
+
with open("results.json", "r", encoding="utf-8") as f:
|
| 51 |
+
data = json.load(f)
|
| 52 |
+
|
| 53 |
+
text_descriptions = ""
|
| 54 |
+
for frame in data:
|
| 55 |
+
|
| 56 |
+
text_descriptions += "Title: " + frame["Title"]
|
| 57 |
+
text_descriptions += " " + (frame["Description"])
|
| 58 |
+
|
| 59 |
+
st.write(text_descriptions)
|
| 60 |
+
# SENTIMENT TRACKER
|
| 61 |
+
# TODO : CONNECT THE SCRAPER TO THE SENTIMENT PIPELINE
|
| 62 |
+
output_sentiment = pipe(text_descriptions)
|
| 63 |
+
st.write(output_sentiment)
|
| 64 |
+
|
| 65 |
+
prompt = f"You are a financial analyst, given relevant data provide only the pros and cons of the stock provide a buy reccomendation on a scale of 1 to 10. This is the financial data {df_str} . Consider the following news : {text_descriptions}, also here is a sentiment score of the recent news{output_sentiment}."
|
| 66 |
+
|
| 67 |
+
|
| 68 |
+
# GEMINI API RESPONSE CODE
|
| 69 |
+
response = model.generate_content(prompt)
|
| 70 |
+
st.write(response.text)
|
| 71 |
+
|
| 72 |
+
|
| 73 |
+
# st.line_chart(history["Close"])
|
| 74 |
+
fig1 = func.plot_column(history, "Close")
|
| 75 |
+
st.pyplot(fig1)
|
| 76 |
+
st.write("% Change")
|
| 77 |
+
fig2 = func.plot_column(history, "Volume")
|
| 78 |
+
st.line_chart(history["Close"].pct_change())
|
| 79 |
+
st.pyplot(fig2)
|
env.py
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
global API_KEY
|
func.py
ADDED
|
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import matplotlib.pyplot as plt
|
| 2 |
+
import numpy as np
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
# HELPER FUNCTIONS:
|
| 6 |
+
def plot_column(dataframe, column_name):
|
| 7 |
+
plt.figure(figsize=(6, 3))
|
| 8 |
+
plt.plot(dataframe[column_name])
|
| 9 |
+
plt.title(f"Plot of {column_name}")
|
| 10 |
+
plt.xlabel("Index")
|
| 11 |
+
plt.ylabel(column_name)
|
| 12 |
+
plt.grid(True)
|
| 13 |
+
return plt
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
# Calculate EMA
|
| 17 |
+
def ema(close, period=20):
|
| 18 |
+
return close.ewm(span=period, adjust=False).mean()
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
# Calculate RSI
|
| 22 |
+
def rsi(close, period=14):
|
| 23 |
+
delta = close.diff()
|
| 24 |
+
gain, loss = delta.copy(), delta.copy()
|
| 25 |
+
gain[gain < 0] = 0
|
| 26 |
+
loss[loss > 0] = 0
|
| 27 |
+
avg_gain = gain.rolling(period).mean()
|
| 28 |
+
avg_loss = abs(loss.rolling(period).mean())
|
| 29 |
+
rs = avg_gain / avg_loss
|
| 30 |
+
rsi = 100.0 - (100.0 / (1.0 + rs))
|
| 31 |
+
return rsi
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
# Calculate MACD
|
| 35 |
+
def macd(close, fast_period=12, slow_period=26, signal_period=9):
|
| 36 |
+
fast_ema = close.ewm(span=fast_period, adjust=False).mean()
|
| 37 |
+
slow_ema = close.ewm(span=slow_period, adjust=False).mean()
|
| 38 |
+
macd_line = fast_ema - slow_ema
|
| 39 |
+
signal_line = macd_line.ewm(span=signal_period, adjust=False).mean()
|
| 40 |
+
histogram = macd_line - signal_line
|
| 41 |
+
return macd_line
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
# Calculate OBV
|
| 45 |
+
def obv(close, volume):
|
| 46 |
+
obv = np.where(
|
| 47 |
+
close > close.shift(), volume, np.where(close < close.shift(), -volume, 0)
|
| 48 |
+
).cumsum()
|
| 49 |
+
return obv
|
functions.py
ADDED
|
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import matplotlib.pyplot as plt
|
| 2 |
+
import numpy as np
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
# HELPER FUNCTIONS:
|
| 6 |
+
def plot_column(dataframe, column_name):
|
| 7 |
+
plt.figure(figsize=(6, 3))
|
| 8 |
+
plt.plot(dataframe[column_name])
|
| 9 |
+
plt.title(f"Plot of {column_name}")
|
| 10 |
+
plt.xlabel("Index")
|
| 11 |
+
plt.ylabel(column_name)
|
| 12 |
+
plt.grid(True)
|
| 13 |
+
return plt
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
# Calculate EMA
|
| 17 |
+
def ema(close, period=20):
|
| 18 |
+
return close.ewm(span=period, adjust=False).mean()
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
# Calculate RSI
|
| 22 |
+
def rsi(close, period=14):
|
| 23 |
+
delta = close.diff()
|
| 24 |
+
gain, loss = delta.copy(), delta.copy()
|
| 25 |
+
gain[gain < 0] = 0
|
| 26 |
+
loss[loss > 0] = 0
|
| 27 |
+
avg_gain = gain.rolling(period).mean()
|
| 28 |
+
avg_loss = abs(loss.rolling(period).mean())
|
| 29 |
+
rs = avg_gain / avg_loss
|
| 30 |
+
rsi = 100.0 - (100.0 / (1.0 + rs))
|
| 31 |
+
return rsi
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
# Calculate MACD
|
| 35 |
+
def macd(close, fast_period=12, slow_period=26, signal_period=9):
|
| 36 |
+
fast_ema = close.ewm(span=fast_period, adjust=False).mean()
|
| 37 |
+
slow_ema = close.ewm(span=slow_period, adjust=False).mean()
|
| 38 |
+
macd_line = fast_ema - slow_ema
|
| 39 |
+
signal_line = macd_line.ewm(span=signal_period, adjust=False).mean()
|
| 40 |
+
histogram = macd_line - signal_line
|
| 41 |
+
return macd_line
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
# Calculate OBV
|
| 45 |
+
def obv(close, volume):
|
| 46 |
+
obv = np.where(
|
| 47 |
+
close > close.shift(), volume, np.where(close < close.shift(), -volume, 0)
|
| 48 |
+
).cumsum()
|
| 49 |
+
return obv
|
news_scraper.py
ADDED
|
@@ -0,0 +1,84 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import requests
|
| 2 |
+
from bs4 import BeautifulSoup
|
| 3 |
+
import json
|
| 4 |
+
|
| 5 |
+
excluded_urls = ["finance.yahoo.com", "google.com/finance"]
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
def search_duckduckgo(keywords):
|
| 9 |
+
url = f"https://duckduckgo.com/html/?q={'+'.join(keywords)}"
|
| 10 |
+
headers = {
|
| 11 |
+
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.3"
|
| 12 |
+
}
|
| 13 |
+
|
| 14 |
+
try:
|
| 15 |
+
response = requests.get(url, headers=headers)
|
| 16 |
+
response.raise_for_status()
|
| 17 |
+
return response.text
|
| 18 |
+
except requests.exceptions.RequestException as e:
|
| 19 |
+
print(f"Error fetching search results: {e}")
|
| 20 |
+
return ""
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
def parse_results(html, keywords):
|
| 24 |
+
soup = BeautifulSoup(html, "html.parser")
|
| 25 |
+
results = soup.select(".result")
|
| 26 |
+
parsed_results = []
|
| 27 |
+
|
| 28 |
+
for result in results:
|
| 29 |
+
try:
|
| 30 |
+
link = result.select_one(".result__a").get(
|
| 31 |
+
"href"
|
| 32 |
+
) # Changed selector for link
|
| 33 |
+
title = result.select_one(".result__a").text # Changed selector for title
|
| 34 |
+
description = result.select_one(
|
| 35 |
+
".result__snippet"
|
| 36 |
+
) # Kept the same selector for description
|
| 37 |
+
|
| 38 |
+
if description:
|
| 39 |
+
description = description.text
|
| 40 |
+
else:
|
| 41 |
+
description = ""
|
| 42 |
+
|
| 43 |
+
result_data = {
|
| 44 |
+
"Link": link,
|
| 45 |
+
"Title": title,
|
| 46 |
+
"Description": description,
|
| 47 |
+
}
|
| 48 |
+
|
| 49 |
+
# Check if the link is not in excluded URLs
|
| 50 |
+
if not any(excluded_url in link for excluded_url in excluded_urls):
|
| 51 |
+
# Check if any keyword is in title, description, or link
|
| 52 |
+
if any(
|
| 53 |
+
keyword.lower() in result_data["Title"].lower()
|
| 54 |
+
or keyword.lower() in result_data["Description"].lower()
|
| 55 |
+
or keyword.lower() in result_data["Link"].lower()
|
| 56 |
+
for keyword in keywords
|
| 57 |
+
):
|
| 58 |
+
print(result_data)
|
| 59 |
+
parsed_results.append(result_data)
|
| 60 |
+
except Exception as e:
|
| 61 |
+
print(f"Error parsing result: {e}")
|
| 62 |
+
|
| 63 |
+
return parsed_results
|
| 64 |
+
|
| 65 |
+
|
| 66 |
+
# keywords = ["tatasteel", "finance", "news"]
|
| 67 |
+
|
| 68 |
+
|
| 69 |
+
def perform_search(keywords):
|
| 70 |
+
html = search_duckduckgo(keywords)
|
| 71 |
+
|
| 72 |
+
if html:
|
| 73 |
+
results = parse_results(html, keywords)
|
| 74 |
+
if results:
|
| 75 |
+
with open("results.json", "w", encoding="utf-8") as f:
|
| 76 |
+
json.dump(results, f, ensure_ascii=False, indent=4)
|
| 77 |
+
else:
|
| 78 |
+
print("No results found.")
|
| 79 |
+
else:
|
| 80 |
+
print("Failed to fetch search results.")
|
| 81 |
+
|
| 82 |
+
|
| 83 |
+
# if __name__ == "__main__":
|
| 84 |
+
# perform_search(keywords = keywords)
|