File size: 6,106 Bytes
1eaa50d
 
 
 
 
 
 
 
 
 
 
 
 
 
1bb035c
 
1eaa50d
1bb035c
1eaa50d
 
 
 
1bb035c
1eaa50d
1bb035c
1eaa50d
 
 
 
 
 
 
 
 
d8ac1a4
 
1eaa50d
d8ac1a4
 
 
 
 
 
 
 
 
 
 
1eaa50d
d8ac1a4
 
 
 
 
 
 
1eaa50d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1706b19
1eaa50d
 
 
d8ac1a4
796470b
d8ac1a4
1eaa50d
d8ac1a4
 
 
1eaa50d
 
 
d8ac1a4
 
 
 
1eaa50d
 
 
 
 
 
 
 
 
 
 
 
 
 
d8ac1a4
1eaa50d
 
 
 
 
d8ac1a4
1eaa50d
d8ac1a4
 
1eaa50d
d8ac1a4
 
1eaa50d
d8ac1a4
 
1eaa50d
 
 
 
 
 
 
 
d8ac1a4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1eaa50d
 
 
d8ac1a4
 
 
 
 
 
 
 
 
1eaa50d
 
d8ac1a4
 
 
1eaa50d
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
# -*- coding: utf-8 -*-
"""NewsSummary.ipynb

Automatically generated by Colab.

Original file is located at
    https://colab.research.google.com/drive/1o6xj-MQoYO_ZXMNd1MTq-8CmtguyphQW
"""

# pip install requests
#
# pip install streamlit

import streamlit as st
import numpy as np
import pandas as pd
import json

import requests
from huggingface_hub import hf_hub_download
from transformers import pipeline
from transformers import BartTokenizer, BartForConditionalGeneration

model_name = "sshleifer/distilbart-cnn-12-6"

import http.client, urllib.parse

conn = http.client.HTTPSConnection('api.thenewsapi.com')
import os

API_TOKEN = os.getenv("NEWS_API_TOKEN")
if not API_TOKEN:
    st.error("API token not set. Please configure NEWS_API_TOKEN in Hugging Face Secrets.")


# A much more robust fetch_news function
def fetch_news():
    if not API_TOKEN:
        st.error("API token not set. Please configure NEWS_API_TOKEN in Hugging Face Secrets.")
        return None # Return None to indicate failure

    params = {
        'api_token': API_TOKEN,
        'limit': 3,
        'locale': 'in',
        'language': 'en',
        'domains': 'indiatimes.com,ndtv.com,thehindu.com'
    }
    try:
        # Use requests.get() for a simple, clean API call
        response = requests.get('https://api.thenewsapi.com/v1/news/all', params=params)
        response.raise_for_status()  # This will automatically catch bad responses (like 404, 500)
        return response.json()
    except requests.exceptions.RequestException as e:
        st.error(f"Failed to fetch news: {e}")
        return None # Return None on failure

# data = json.loads(data)

# print(type(data))

# sample = """Contrary to popular belief, Lorem Ipsum is not simply random text. It has roots in a piece of classical Latin literature from 45 BC, making it over 2000 years old. Richard McClintock, a Latin professor at Hampden-Sydney College in Virginia, looked up one of the more obscure Latin words, consectetur, from a Lorem Ipsum passage, and going through the cites of the word in classical literature, discovered the undoubtable source. Lorem Ipsum comes from sections 1.10.32 and 1.10.33 of "de Finibus Bonorum et Malorum" (The Extremes of Good and Evil) by Cicero, written in 45 BC. This book is a treatise on the theory of ethics, very popular during the Renaissance. The first line of Lorem Ipsum, "Lorem ipsum dolor sit amet..", comes from a line in section 1.10.32.
#
# The standard chunk of Lorem Ipsum used since the 1500s is reproduced below for those interested. Sections 1.10.32 and 1.10.33 from "de Finibus Bonorum et Malorum" by Cicero are also reproduced in their exact original form, accompanied by English versions from the 1914 translation by H. Rackham."""


# model_dir = hf_hub_download(repo_id="sshleifer/distilbart-cnn-12-6", filename="config.json", cache_dir="models")

# summarizer = pipeline("summarization", model="sshleifer/distilbart-cnn-12-6", cache_dir="models")

# bart_model = BartForConditionalGeneration.from_pretrained("facebook/bart-large-cnn")
# bart_tokenizer = BartTokenizer.from_pretrained("facebook/bart-large-cnn")


@st.cache_resource
def load_model_and_tokenizer():
    # Define a writable cache directory inside your project folder
    cache_dir = "/tmp/hf_cache"

    with st.spinner("Loading summarization model... please wait."):
        # Pass the cache_dir directly to the download function
        tokenizer = BartTokenizer.from_pretrained(model_name, cache_dir=cache_dir)
        model = BartForConditionalGeneration.from_pretrained(model_name, cache_dir=cache_dir)
    return tokenizer, model


# bart_tokenizer, bart_model = load_model_and_tokenizer()


def summarize(text, bart_tokenizer, bart_model, maxSummarylength=500):
    inputs = bart_tokenizer.encode("summarize: " + text,
                                   return_tensors="pt",
                                   max_length=1024, truncation=True)
    summary_ids = bart_model.generate(
        inputs,
        max_length=int(maxSummarylength),
        min_length=int(maxSummarylength / 5),
        length_penalty=10.0,
        num_beams=4,
        early_stopping=True
    )
    summary = bart_tokenizer.decode(summary_ids[0], skip_special_tokens=True)
    return summary


# sum = summarize(sample)
# print(sum)


def display(title, url, img, src, txt):
    st.markdown(f"## **{title}**")

    if img:
        st.image(img, use_container_width=True)

    st.write(f"**Source:** {src}")
    st.write(f"**Summary:** {txt}")

    st.markdown(f"[Want to read the full article? Click Here!]({url})")
    st.markdown("------------------------------")


# news_title=[]
# news_text=[]
# news_url=[]
# news_img_url=[]
# news_source=[]
# news_summary=[]
def process(bart_tokenizer, bart_model):
    data = fetch_news()
    if data and "data" in data:

        for single_news in data["data"]:
            #single_news = data["data"][i]
            news_title = single_news.get("title", "No Title Available")
            news_text = single_news.get("snippet", "")
            news_url = single_news.get("url", "#")
            news_img_url = single_news.get("image_url")
            news_source = single_news.get("source", "Source Unknown")
            if not news_text:  # Skip if there is no text to summarize
                continue

            news_summary = summarize(news_text, bart_tokenizer, bart_model)
            display(news_title, news_url, news_img_url, news_source, news_summary)
    else:
        st.warning("No news articles were found.")

def strmlt():
    st.title("Newsflix")
    bart_tokenizer, bart_model = load_model_and_tokenizer()

    # Only show the button if the model loaded correctly
    if bart_tokenizer and bart_model:
        if st.button("Load Latest News"):
            with st.spinner("Fetching and summarizing latest news..."):
                process(bart_tokenizer, bart_model)
    else:
        st.error("Application could not start because the model failed to load.")



if __name__ == "__main__":
    # st.write("Summarized news from The Hindu, NDTV, IndiaTimes")
    strmlt()