Update app.py
Browse files
app.py
CHANGED
|
@@ -30,24 +30,23 @@ def fetch_naver_news(query, display=10, start=1, sort="date"):
|
|
| 30 |
|
| 31 |
# Step 2: GPT ๋ชจ๋ธ ๋ก๋ (์ง๋ณด์ , ๋ณด์์ ๊ธฐ์ฌ ์์ฑ)
|
| 32 |
def load_gpt_model():
|
| 33 |
-
|
| 34 |
-
|
| 35 |
-
|
| 36 |
-
|
| 37 |
-
|
| 38 |
-
|
| 39 |
-
|
| 40 |
-
|
| 41 |
-
|
| 42 |
-
|
| 43 |
-
|
| 44 |
-
|
| 45 |
-
|
| 46 |
-
|
| 47 |
-
|
| 48 |
-
|
| 49 |
-
|
| 50 |
-
return generated_article
|
| 51 |
|
| 52 |
# Step 4: ์ ์น ์ฑํฅ ๋ถ๋ฅ ํจ์
|
| 53 |
def classify_political_sentiment(text, classifier):
|
|
@@ -83,8 +82,11 @@ def analyze_news_political_orientation(news_items, classifier, gpt_model):
|
|
| 83 |
results[orientation] += 1
|
| 84 |
|
| 85 |
# ์ง๋ณด์ /๋ณด์์ ๊ธฐ์ฌ ์์ฑ
|
| 86 |
-
|
| 87 |
-
|
|
|
|
|
|
|
|
|
|
| 88 |
|
| 89 |
detailed_results.append({
|
| 90 |
"์ ๋ชฉ": title,
|
|
|
|
| 30 |
|
| 31 |
# Step 2: GPT ๋ชจ๋ธ ๋ก๋ (์ง๋ณด์ , ๋ณด์์ ๊ธฐ์ฌ ์์ฑ)
|
| 32 |
def load_gpt_model():
|
| 33 |
+
try:
|
| 34 |
+
gpt_model = pipeline("text-generation", model="gpt2") # Hugging Face์์ ์ฌ์ฉํ ์ ์๋ GPT ๋ชจ๋ธ
|
| 35 |
+
st.write("GPT model loaded successfully.")
|
| 36 |
+
return gpt_model
|
| 37 |
+
except Exception as e:
|
| 38 |
+
st.error(f"Error loading GPT model: {e}")
|
| 39 |
+
return None
|
| 40 |
+
|
| 41 |
+
# Step 3: Hugging Face ์ ์น ์ฑํฅ ๋ถ์ ๋ชจ๋ธ ๋ก๋
|
| 42 |
+
def load_huggingface_model():
|
| 43 |
+
try:
|
| 44 |
+
classifier = pipeline("text-classification", model="bucketresearch/politicalBiasBERT")
|
| 45 |
+
st.write("Political bias model loaded successfully.")
|
| 46 |
+
return classifier
|
| 47 |
+
except Exception as e:
|
| 48 |
+
st.error(f"Error loading political bias model: {e}")
|
| 49 |
+
return None
|
|
|
|
| 50 |
|
| 51 |
# Step 4: ์ ์น ์ฑํฅ ๋ถ๋ฅ ํจ์
|
| 52 |
def classify_political_sentiment(text, classifier):
|
|
|
|
| 82 |
results[orientation] += 1
|
| 83 |
|
| 84 |
# ์ง๋ณด์ /๋ณด์์ ๊ธฐ์ฌ ์์ฑ
|
| 85 |
+
prompt = f"์ง๋ณด์ ๊ด์ ์์ ๊ธฐ์ฌ๋ฅผ ์์ฑํด ์ฃผ์ธ์: {combined_text}"
|
| 86 |
+
progressive_article = gpt_model(prompt, max_length=512, num_return_sequences=1)[0]['generated_text']
|
| 87 |
+
|
| 88 |
+
prompt = f"๋ณด์์ ๊ด์ ์์ ๊ธฐ์ฌ๋ฅผ ์์ฑํด ์ฃผ์ธ์: {combined_text}"
|
| 89 |
+
conservative_article = gpt_model(prompt, max_length=512, num_return_sequences=1)[0]['generated_text']
|
| 90 |
|
| 91 |
detailed_results.append({
|
| 92 |
"์ ๋ชฉ": title,
|