| import string | |
| import requests | |
| from bs4 import BeautifulSoup | |
| def get_article_names(url): | |
| response = requests.get(url) | |
| response.raise_for_status() # Raise an error if the request failed | |
| soup = BeautifulSoup(response.text, "html.parser") | |
| article_links = soup.select("p") | |
| article_names = [a.get_text().replace(" --", "").replace("\xa0–", "") for a in article_links if a.get_text().strip()][11:-1] | |
| return article_names | |
| article_names = [] | |
| url = "https://en.wikipedia.org/wiki/Wikipedia:Featured_articles" | |
| names = get_article_names(url) | |
| article_names.extend(names) | |
| # Save the article names to a file (one article per line) | |
| output_file = "feat_art.txt" | |
| with open(output_file, "w", encoding="utf-8") as file: | |
| for name in article_names: | |
| file.write(name) | |
| print(f"Saved {len(article_names)} article names to {output_file}") | |