Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,5 +1,4 @@
|
|
| 1 |
import nltk
|
| 2 |
-
# NLTK ํ ํฌ๋์ด์ ๋ฆฌ์์ค ์๋ ๋ค์ด๋ก๋
|
| 3 |
nltk.download("punkt")
|
| 4 |
nltk.download("punkt_tab")
|
| 5 |
|
|
@@ -10,21 +9,42 @@ from markdownify import markdownify as md
|
|
| 10 |
from sumy.parsers.plaintext import PlaintextParser
|
| 11 |
from sumy.nlp.tokenizers import Tokenizer
|
| 12 |
from sumy.summarizers.text_rank import TextRankSummarizer
|
|
|
|
| 13 |
|
| 14 |
def summarize_text(text, sentence_count=3):
|
| 15 |
-
# ํ๊ตญ์ด๋ ๋ฌธ์ฅ ๋จ์๋ก ๋๊ธฐ ์ํด english ํ ํฌ๋์ด์ ์ฌ์ฉ
|
| 16 |
parser = PlaintextParser.from_string(text, Tokenizer("english"))
|
| 17 |
summarizer = TextRankSummarizer()
|
| 18 |
summary_sentences = summarizer(parser.document, sentence_count)
|
| 19 |
-
return
|
| 20 |
|
| 21 |
-
def
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 22 |
headers = {"User-Agent": "Mozilla/5.0"}
|
| 23 |
try:
|
| 24 |
r = requests.get(url, headers=headers, timeout=10)
|
| 25 |
r.raise_for_status()
|
| 26 |
|
| 27 |
-
# HTML ํํ๋ก ๋ณธ๋ฌธ ์ถ์ถ
|
| 28 |
html_content = trafilatura.extract(
|
| 29 |
r.text,
|
| 30 |
output_format="html",
|
|
@@ -33,32 +53,28 @@ def extract_and_summarize(url):
|
|
| 33 |
)
|
| 34 |
|
| 35 |
if not html_content:
|
| 36 |
-
return "๋ณธ๋ฌธ์ ์ถ์ถํ ์ ์์ต๋๋ค.", ""
|
| 37 |
|
| 38 |
-
# HTML โ Markdown ๋ณํ
|
| 39 |
markdown_text = md(html_content, heading_style="ATX")
|
| 40 |
|
| 41 |
-
|
| 42 |
-
|
| 43 |
|
| 44 |
-
return markdown_text,
|
| 45 |
|
| 46 |
-
except requests.exceptions.Timeout:
|
| 47 |
-
return "์์ฒญ์ด ์๊ฐ ์ด๊ณผ๋์์ต๋๋ค.", ""
|
| 48 |
-
except requests.exceptions.RequestException as e:
|
| 49 |
-
return f"์์ฒญ ์คํจ: {e}", ""
|
| 50 |
except Exception as e:
|
| 51 |
-
return f"์๋ฌ ๋ฐ์: {e}", ""
|
| 52 |
|
| 53 |
iface = gr.Interface(
|
| 54 |
-
fn=
|
| 55 |
inputs=gr.Textbox(label="URL ์
๋ ฅ", placeholder="https://example.com"),
|
| 56 |
outputs=[
|
| 57 |
gr.Markdown(label="์ถ์ถ๋ ๋ณธ๋ฌธ"),
|
| 58 |
-
gr.Textbox(label="์๋ ์์ฝ", lines=5)
|
|
|
|
| 59 |
],
|
| 60 |
-
title="๋ณธ๋ฌธ ์ถ์ถ๊ธฐ + ์๋ ์์ฝ",
|
| 61 |
-
description="์นํ์ด์ง URL์ ์
๋ ฅํ๋ฉด ๋ณธ๋ฌธ์ ์ถ์ถํ๊ณ ,
|
| 62 |
)
|
| 63 |
|
| 64 |
if __name__ == "__main__":
|
|
|
|
| 1 |
import nltk
|
|
|
|
| 2 |
nltk.download("punkt")
|
| 3 |
nltk.download("punkt_tab")
|
| 4 |
|
|
|
|
| 9 |
from sumy.parsers.plaintext import PlaintextParser
|
| 10 |
from sumy.nlp.tokenizers import Tokenizer
|
| 11 |
from sumy.summarizers.text_rank import TextRankSummarizer
|
| 12 |
+
import re
|
| 13 |
|
| 14 |
def summarize_text(text, sentence_count=3):
|
|
|
|
| 15 |
parser = PlaintextParser.from_string(text, Tokenizer("english"))
|
| 16 |
summarizer = TextRankSummarizer()
|
| 17 |
summary_sentences = summarizer(parser.document, sentence_count)
|
| 18 |
+
return [str(sentence) for sentence in summary_sentences]
|
| 19 |
|
| 20 |
+
def paraphrase_text(sentences):
|
| 21 |
+
# ๊ฐ๋จํ ๋ก์ปฌ paraphrase: ๋์์ด ์นํ + ๋ฌธ์ฅ ๊ตฌ์กฐ ๋ณ๊ฒฝ
|
| 22 |
+
# (LLM ์์ด ๋์, ํ์ง์ ๊ธฐ๋ณธ ์์ค)
|
| 23 |
+
paraphrased = []
|
| 24 |
+
replacements = {
|
| 25 |
+
"๋
ธ์กฐ": "๋
ธ๋์กฐํฉ",
|
| 26 |
+
"์ฑ๊ณผ๊ธ": "์ฑ๊ณผ ๋ณด์๊ธ",
|
| 27 |
+
"์๊ตฌ": "์์ฒญ",
|
| 28 |
+
"๋ถ๋ง": "์ด์ ์ ๊ธฐ",
|
| 29 |
+
"ํฉ์์": "ํ์ ๊ฒฐ๊ณผ์"
|
| 30 |
+
}
|
| 31 |
+
for s in sentences:
|
| 32 |
+
new_s = s
|
| 33 |
+
for k, v in replacements.items():
|
| 34 |
+
new_s = re.sub(k, v, new_s)
|
| 35 |
+
# ์ด์ ์ด์ง ๋ณ๊ฒฝ ์์
|
| 36 |
+
if "๋" in new_s:
|
| 37 |
+
parts = new_s.split("๋", 1)
|
| 38 |
+
new_s = f"{parts[1].strip()} โ {parts[0].strip()}๋"
|
| 39 |
+
paraphrased.append(new_s)
|
| 40 |
+
return paraphrased
|
| 41 |
+
|
| 42 |
+
def extract_summarize_paraphrase(url):
|
| 43 |
headers = {"User-Agent": "Mozilla/5.0"}
|
| 44 |
try:
|
| 45 |
r = requests.get(url, headers=headers, timeout=10)
|
| 46 |
r.raise_for_status()
|
| 47 |
|
|
|
|
| 48 |
html_content = trafilatura.extract(
|
| 49 |
r.text,
|
| 50 |
output_format="html",
|
|
|
|
| 53 |
)
|
| 54 |
|
| 55 |
if not html_content:
|
| 56 |
+
return "๋ณธ๋ฌธ์ ์ถ์ถํ ์ ์์ต๋๋ค.", "", ""
|
| 57 |
|
|
|
|
| 58 |
markdown_text = md(html_content, heading_style="ATX")
|
| 59 |
|
| 60 |
+
summary_sentences = summarize_text(markdown_text, sentence_count=3)
|
| 61 |
+
paraphrased_sentences = paraphrase_text(summary_sentences)
|
| 62 |
|
| 63 |
+
return markdown_text, "\n".join(summary_sentences), "\n".join(paraphrased_sentences)
|
| 64 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 65 |
except Exception as e:
|
| 66 |
+
return f"์๋ฌ ๋ฐ์: {e}", "", ""
|
| 67 |
|
| 68 |
iface = gr.Interface(
|
| 69 |
+
fn=extract_summarize_paraphrase,
|
| 70 |
inputs=gr.Textbox(label="URL ์
๋ ฅ", placeholder="https://example.com"),
|
| 71 |
outputs=[
|
| 72 |
gr.Markdown(label="์ถ์ถ๋ ๋ณธ๋ฌธ"),
|
| 73 |
+
gr.Textbox(label="์๋ ์์ฝ", lines=5),
|
| 74 |
+
gr.Textbox(label="์๋ ์ฌ์์ฑ", lines=5)
|
| 75 |
],
|
| 76 |
+
title="๋ณธ๋ฌธ ์ถ์ถ๊ธฐ + ์๋ ์์ฝ + ์๋ ์ฌ์์ฑ",
|
| 77 |
+
description="์นํ์ด์ง URL์ ์
๋ ฅํ๋ฉด ๋ณธ๋ฌธ์ ์ถ์ถํ๊ณ , 3๋ฌธ์ฅ ์์ฝ๊ณผ ์ฌ์์ฑ(Paraphrasing) ๊ฒฐ๊ณผ๋ฅผ ์ ๊ณตํฉ๋๋ค."
|
| 78 |
)
|
| 79 |
|
| 80 |
if __name__ == "__main__":
|