Update app.py
Browse files
app.py
CHANGED
|
@@ -1,60 +1,49 @@
|
|
|
|
|
| 1 |
import requests
|
| 2 |
-
import csv
|
| 3 |
from bs4 import BeautifulSoup
|
|
|
|
| 4 |
import gradio as gr
|
| 5 |
|
| 6 |
-
|
| 7 |
-
|
| 8 |
-
|
| 9 |
-
|
| 10 |
-
|
| 11 |
-
|
| 12 |
-
|
| 13 |
-
|
| 14 |
-
|
| 15 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 16 |
soup = BeautifulSoup(response.content, 'html.parser')
|
| 17 |
-
|
| 18 |
-
|
| 19 |
-
|
| 20 |
-
|
| 21 |
-
|
| 22 |
-
|
| 23 |
-
|
| 24 |
-
|
| 25 |
-
|
| 26 |
-
|
| 27 |
-
|
| 28 |
-
|
| 29 |
-
|
| 30 |
-
|
| 31 |
-
|
| 32 |
-
|
| 33 |
-
|
| 34 |
-
|
| 35 |
-
|
| 36 |
-
|
| 37 |
-
|
| 38 |
-
|
| 39 |
-
def save_posts_to_csv(posts):
|
| 40 |
-
with open('posts.csv', 'w', newline='', encoding='utf-8') as f:
|
| 41 |
-
writer = csv.DictWriter(f, fieldnames=['title', 'content', 'featured_image_url', 'category', 'tags'])
|
| 42 |
-
writer.writeheader()
|
| 43 |
-
writer.writerows(posts)
|
| 44 |
-
return 'posts.csv'
|
| 45 |
-
|
| 46 |
-
def scrape_and_save(csv_file):
|
| 47 |
-
posts = scrape_urls(csv_file.name)
|
| 48 |
-
csv_path = save_posts_to_csv(posts)
|
| 49 |
-
return csv_path
|
| 50 |
-
|
| 51 |
-
demo = gr.Interface(
|
| 52 |
-
fn=scrape_and_save,
|
| 53 |
-
inputs=gr.File(label="Upload CSV with URLs"),
|
| 54 |
-
outputs=gr.File(label="Download Posts CSV"),
|
| 55 |
-
title="Web Scraping to CSV",
|
| 56 |
-
description="Upload a CSV file with URLs, and this app will scrape the content and provide a CSV with the results."
|
| 57 |
-
)
|
| 58 |
-
|
| 59 |
-
if __name__ == "__main__":
|
| 60 |
-
demo.launch(share=True)
|
|
|
|
| 1 |
+
# Import required libraries
|
| 2 |
import requests
|
|
|
|
| 3 |
from bs4 import BeautifulSoup
|
| 4 |
+
import pandas as pd
|
| 5 |
import gradio as gr
|
| 6 |
|
| 7 |
+
# Function to scrape a single post
|
| 8 |
+
def scrape_post(link):
|
| 9 |
+
response = requests.get(link)
|
| 10 |
+
soup = BeautifulSoup(response.content, 'html.parser')
|
| 11 |
+
|
| 12 |
+
title = soup.select_one('h1').text.strip()
|
| 13 |
+
content = soup.select_one('section.entry').text.strip()
|
| 14 |
+
featured_img = soup.select_one('img.entry-image')['src']
|
| 15 |
+
category = [a.text for a in soup.select('.entry-cat a')]
|
| 16 |
+
tags = [a.text for a in soup.select('.entry-tag a')]
|
| 17 |
+
|
| 18 |
+
return {'title': title, 'content': content, 'featured_img': featured_img, 'category': category, 'tags': tags}
|
| 19 |
+
|
| 20 |
+
# Function to scrape multiple pages
|
| 21 |
+
def scrape_unwinnable_movies():
|
| 22 |
+
base_url = 'https://unwinnable.com/category/sections/movies-tv/'
|
| 23 |
+
next_page = base_url
|
| 24 |
+
all_posts = []
|
| 25 |
+
|
| 26 |
+
while next_page:
|
| 27 |
+
response = requests.get(next_page)
|
| 28 |
soup = BeautifulSoup(response.content, 'html.parser')
|
| 29 |
+
|
| 30 |
+
post_links = [a['href'] for a in soup.select('.entry-title a')]
|
| 31 |
+
for link in post_links:
|
| 32 |
+
post_data = scrape_post(link)
|
| 33 |
+
all_posts.append(post_data)
|
| 34 |
+
|
| 35 |
+
next_page_elem = soup.select_one('a.next')
|
| 36 |
+
next_page = next_page_elem['href'] if next_page_elem else None
|
| 37 |
+
|
| 38 |
+
return pd.DataFrame(all_posts)
|
| 39 |
+
|
| 40 |
+
# Gradio Interface function
|
| 41 |
+
def scrape_and_display():
|
| 42 |
+
df = scrape_unwinnable_movies()
|
| 43 |
+
return gr.DataFrame.update(value=df)
|
| 44 |
+
|
| 45 |
+
# Create Gradio Interface
|
| 46 |
+
demo = gr.Interface(fn=scrape_and_display, inputs=[], outputs="dataframe", title="Unwinnable Movies Scraper")
|
| 47 |
+
|
| 48 |
+
# Launch the app
|
| 49 |
+
demo.launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|