Yermek68 commited on
Commit
3cab2bd
·
verified ·
1 Parent(s): 7f458ce

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +88 -122
app.py CHANGED
@@ -3,107 +3,85 @@ from transformers import pipeline
3
  from fastapi import FastAPI
4
  from fastapi.middleware.cors import CORSMiddleware
5
  from langdetect import detect
6
- import re
7
- import datetime
8
- import hashlib
9
- import io
10
 
11
- # ================== Утилиты ==================
12
-
13
- def clean_text(text: str):
14
- text = text.replace("\n", " ").replace("\r", " ")
15
- text = re.sub(r"\s+", " ", text)
16
  text = re.sub(r"[^\w\s.,!?%\-–:;()\"'’«»]", "", text)
17
  return text.strip()
18
 
19
- def detect_language(text: str):
20
  try:
21
  lang = detect(text)
22
  except:
23
  lang = "en"
24
-
25
- kazakh_letters = "қңәөүһіұ"
26
- if any(ch in text.lower() for ch in kazakh_letters):
27
  lang = "kk"
28
  return lang
29
 
30
- def generate_slug(title: str):
31
  slug = re.sub(r"[^a-zA-Zа-яА-Я0-9]+", "-", title.lower()).strip("-")
32
- slug_hash = hashlib.md5(title.encode()).hexdigest()[:6]
33
- return f"/news/{slug}-{slug_hash}"
34
-
35
- # ================== Модели ==================
36
-
37
- summarizers = {}
38
- analyzers = {}
39
-
40
- def get_summarizer(lang: str):
41
- if lang == "ru":
42
- model_name = "IlyaGusev/mbart_ru_sum_gazeta"
43
- elif lang == "kk":
44
- model_name = "facebook/mbart-large-50-many-to-many-mmt"
45
- else:
46
- model_name = "facebook/bart-large-cnn"
47
- if model_name not in summarizers:
48
- summarizers[model_name] = pipeline("summarization", model=model_name)
49
- return summarizers[model_name]
50
-
51
- def get_sentiment_analyzer(lang: str):
52
- if lang in ["ru", "kk"]:
53
- model_name = "nlptown/bert-base-multilingual-uncased-sentiment"
54
- else:
55
- model_name = "cardiffnlp/twitter-roberta-base-sentiment"
56
- if model_name not in analyzers:
57
- analyzers[model_name] = pipeline("sentiment-analysis", model=model_name)
58
- return analyzers[model_name]
59
-
60
- # ================== Контент ==================
61
-
62
- def extract_keywords(text: str, top_n: int = 7):
63
  words = re.findall(r"\b\w{5,}\b", text.lower())
64
  freq = {}
65
  for w in words:
66
  freq[w] = freq.get(w, 0) + 1
67
- keywords = sorted(freq, key=freq.get, reverse=True)[:top_n]
68
- return ", ".join(keywords)
69
 
70
- def detect_topic(text: str):
71
  topics = {
72
  "Экономика": ["рынок", "компания", "инвестиция", "қаржы", "сату"],
73
  "Технологии": ["ai", "робот", "интернет", "жасанды интеллект"],
74
  "Саясат": ["үкімет", "закон", "президент", "выборы"],
75
  "Ғылым": ["зерттеу", "ғалым", "эксперимент"],
76
- "Спорт": ["матч", "команда", "спорт"]
77
  }
78
- text_lower = text.lower()
79
  for topic, words in topics.items():
80
- if any(w in text_lower for w in words):
81
  return topic
82
  return "Жалпы тақырып / Общая тема"
83
 
84
- # ================== Основная логика ==================
85
-
86
- def summarize_text(text: str):
87
  if not text.strip():
88
  return "⚠️ Введите текст для анализа.", None
89
-
90
  text = clean_text(text)
91
  lang = detect_language(text)
92
 
93
- summarizer = get_summarizer(lang)
94
- sentiment_model = get_sentiment_analyzer(lang)
95
 
96
  words = len(text.split())
97
- if words < 80:
98
- max_len, min_len = 70, 20
99
- elif words < 300:
100
- max_len, min_len = 140, 40
101
- else:
102
- max_len, min_len = 220, 60
103
-
104
- summary = summarizer(text, max_length=max_len, min_length=min_len, do_sample=False)[0]["summary_text"]
105
 
106
- sentiment = sentiment_model(summary)[0]["label"].lower()
 
107
  if "5" in sentiment or "pos" in sentiment:
108
  sentiment = "😊 Позитивті / Позитивное"
109
  elif "1" in sentiment or "neg" in sentiment:
@@ -114,49 +92,47 @@ def summarize_text(text: str):
114
  topic = detect_topic(text)
115
  keywords = extract_keywords(text)
116
  title = summary.split(".")[0][:80].strip()
117
- meta_description = summary[:160].strip()
118
  slug = generate_slug(title)
119
  date_now = datetime.datetime.now().strftime("%Y-%m-%d %H:%M")
120
 
121
- score = 0
122
- score += 1 if len(keywords.split(",")) >= 5 else 0
123
- score += 1 if len(meta_description) >= 100 else 0
124
- score += 1 if len(title) > 20 else 0
125
- seo_status = "✅ Оптимально для публикации" if score >= 2 else "⚠️ Недостаточно данных для SEO"
126
-
127
- lang_name = {
128
- "kk": "Қазақ (Kazakh)",
129
- "ru": "Русский (Russian)",
130
- "en": "Ағылшын (English)"
131
- }.get(lang, "Multilingual")
132
-
133
- output = f"# 🧠 Eroha Summarizer PRO++++ v2.4 Publisher Edition\n"
134
- output += f"## 🌍 Language: {lang_name}\n"
135
- output += f"### 📅 Date: {date_now}\n"
136
- output += f"### 📌 Topic: {topic}\n"
137
- output += f"### 💬 Sentiment: {sentiment}\n\n"
138
- output += "---\n\n"
139
- output += f"📄 **Summary:**\n{summary}\n\n"
140
- output += "---\n\n"
141
- output += f"## 🧭 SEO Optimization\n"
142
- output += f"**📰 Title:** {title}\n\n"
 
 
 
143
  output += f"**🔑 Keywords:** {keywords}\n\n"
144
- output += f"**📄 Meta Description:** {meta_description}\n\n"
145
  output += f"**🔗 Slug:** `{slug}`\n\n"
146
- output += f"**📊 SEO Score:** {seo_status}\n\n"
147
- output += "---\n\n"
148
- output += f"🔖 **Tags:** #Eroha #AI #SEO #Publisher #Kazakhstan #Press #News\n"
149
-
150
- # Создание Markdown-файла
151
- filename = f"Eroha_Summary_{datetime.datetime.now().strftime('%Y-%m-%d_%H-%M')}.md"
152
- md_bytes = io.BytesIO(output.encode('utf-8'))
153
- md_bytes.name = filename
154
 
155
- return output, md_bytes
 
 
156
 
157
- # ================== API и UI ==================
158
-
159
- app = FastAPI(title="Eroha Summarizer PRO++++ v2.4 Publisher Edition")
160
  app.add_middleware(CORSMiddleware, allow_origins=["*"], allow_methods=["*"], allow_headers=["*"])
161
 
162
  @app.post("/api/summarize")
@@ -165,25 +141,15 @@ async def summarize_api(data: dict):
165
  summary, _ = summarize_text(text)
166
  return {"summary": summary}
167
 
168
- # Gradio интерфейс
169
- with gr.Blocks(title="Eroha Summarizer PRO++++ v2.4 Publisher Edition") as iface:
170
- gr.Markdown("# 🧠 Eroha Summarizer PRO++++ v2.4 Publisher Edition")
171
- gr.Markdown("AI-инструмент для суммаризации, анализа, SEO и экспорта Markdown (с поддержкой казахского 🇰🇿)")
172
-
173
- input_box = gr.Textbox(lines=8, label="Введите текст / Мәтінді енгізіңіз")
174
- summarize_btn = gr.Button("🚀 Анализ и SEO-суммаризация")
175
- clear_btn = gr.Button("🧹 Очистить")
176
- copy_btn = gr.Button("📋 Копировать результат")
177
- download_btn = gr.File(label="💾 Скачать результат в Markdown")
178
-
179
- output_box = gr.Markdown(label="Результат / Result")
180
-
181
- def process_input(text):
182
- summary, md_file = summarize_text(text)
183
- return summary, md_file
184
-
185
- summarize_btn.click(process_input, inputs=input_box, outputs=[output_box, download_btn])
186
- clear_btn.click(lambda: "", None, input_box)
187
- copy_btn.click(lambda t: t, input_box, input_box)
188
 
189
  iface.launch(server_name="0.0.0.0", server_port=7860)
 
3
  from fastapi import FastAPI
4
  from fastapi.middleware.cors import CORSMiddleware
5
  from langdetect import detect
6
+ import re, datetime, hashlib, io, json
 
 
 
7
 
8
+ # === Утилиты ===
9
+ def clean_text(text):
10
+ text = re.sub(r"\s+", " ", text.replace("\n", " ").replace("\r", " "))
 
 
11
  text = re.sub(r"[^\w\s.,!?%\-–:;()\"'’«»]", "", text)
12
  return text.strip()
13
 
14
+ def detect_language(text):
15
  try:
16
  lang = detect(text)
17
  except:
18
  lang = "en"
19
+ if any(ch in text.lower() for ch in "қңәөүһіұ"):
 
 
20
  lang = "kk"
21
  return lang
22
 
23
+ def generate_slug(title):
24
  slug = re.sub(r"[^a-zA-Zа-яА-Я0-9]+", "-", title.lower()).strip("-")
25
+ return f"/news/{slug}-{hashlib.md5(title.encode()).hexdigest()[:6]}"
26
+
27
+ # === Модели ===
28
+ summarizers, analyzers = {}, {}
29
+
30
+ def get_summarizer(lang):
31
+ model = {
32
+ "ru": "IlyaGusev/mbart_ru_sum_gazeta",
33
+ "kk": "facebook/mbart-large-50-many-to-many-mmt",
34
+ }.get(lang, "facebook/bart-large-cnn")
35
+ if model not in summarizers:
36
+ summarizers[model] = pipeline("summarization", model=model)
37
+ return summarizers[model]
38
+
39
+ def get_sentiment_analyzer(lang):
40
+ model = (
41
+ "nlptown/bert-base-multilingual-uncased-sentiment"
42
+ if lang in ["ru", "kk"]
43
+ else "cardiffnlp/twitter-roberta-base-sentiment"
44
+ )
45
+ if model not in analyzers:
46
+ analyzers[model] = pipeline("sentiment-analysis", model=model)
47
+ return analyzers[model]
48
+
49
+ # === Логика ===
50
+ def extract_keywords(text, top_n=7):
 
 
 
 
 
51
  words = re.findall(r"\b\w{5,}\b", text.lower())
52
  freq = {}
53
  for w in words:
54
  freq[w] = freq.get(w, 0) + 1
55
+ return ", ".join(sorted(freq, key=freq.get, reverse=True)[:top_n])
 
56
 
57
+ def detect_topic(text):
58
  topics = {
59
  "Экономика": ["рынок", "компания", "инвестиция", "қаржы", "сату"],
60
  "Технологии": ["ai", "робот", "интернет", "жасанды интеллект"],
61
  "Саясат": ["үкімет", "закон", "президент", "выборы"],
62
  "Ғылым": ["зерттеу", "ғалым", "эксперимент"],
63
+ "Спорт": ["матч", "команда", "спорт"],
64
  }
65
+ t = text.lower()
66
  for topic, words in topics.items():
67
+ if any(w in t for w in words):
68
  return topic
69
  return "Жалпы тақырып / Общая тема"
70
 
71
+ def summarize_text(text):
 
 
72
  if not text.strip():
73
  return "⚠️ Введите текст для анализа.", None
 
74
  text = clean_text(text)
75
  lang = detect_language(text)
76
 
77
+ summ = get_summarizer(lang)
78
+ sent_model = get_sentiment_analyzer(lang)
79
 
80
  words = len(text.split())
81
+ max_len, min_len = (70, 20) if words < 80 else (140, 40) if words < 300 else (220, 60)
 
 
 
 
 
 
 
82
 
83
+ summary = summ(text, max_length=max_len, min_length=min_len, do_sample=False)[0]["summary_text"]
84
+ sentiment = sent_model(summary)[0]["label"].lower()
85
  if "5" in sentiment or "pos" in sentiment:
86
  sentiment = "😊 Позитивті / Позитивное"
87
  elif "1" in sentiment or "neg" in sentiment:
 
92
  topic = detect_topic(text)
93
  keywords = extract_keywords(text)
94
  title = summary.split(".")[0][:80].strip()
95
+ meta_desc = summary[:160].strip()
96
  slug = generate_slug(title)
97
  date_now = datetime.datetime.now().strftime("%Y-%m-%d %H:%M")
98
 
99
+ # === JSON-LD Schema.org ===
100
+ schema_data = {
101
+ "@context": "https://schema.org",
102
+ "@type": "NewsArticle",
103
+ "headline": title,
104
+ "datePublished": datetime.datetime.now().isoformat(),
105
+ "articleSection": topic,
106
+ "keywords": keywords.split(", "),
107
+ "description": meta_desc,
108
+ "inLanguage": lang,
109
+ "mainEntityOfPage": f"https://eroha.ai{slug}",
110
+ "author": {"@type": "Organization", "name": "Eroha Intelligence Suite"},
111
+ "publisher": {
112
+ "@type": "Organization",
113
+ "name": "Eroha Intelligence Suite",
114
+ "logo": {"@type": "ImageObject", "url": "https://eroha.ai/logo.png"},
115
+ },
116
+ }
117
+ json_ld = json.dumps(schema_data, indent=2, ensure_ascii=False)
118
+
119
+ output = f"# 🧠 Eroha Summarizer PRO++++ v2.5 Press-Optimized Edition\n"
120
+ output += f"## 🌍 Language: {lang.upper()}\n### 📅 Date: {date_now}\n"
121
+ output += f"### 📌 Topic: {topic}\n### 💬 Sentiment: {sentiment}\n\n"
122
+ output += f"---\n\n📄 **Summary:**\n{summary}\n\n---\n\n"
123
+ output += f"## 🧭 SEO Optimization\n**📰 Title:** {title}\n\n"
124
  output += f"**🔑 Keywords:** {keywords}\n\n"
125
+ output += f"**📄 Meta Description:** {meta_desc}\n\n"
126
  output += f"**🔗 Slug:** `{slug}`\n\n"
127
+ output += "---\n\n### 🧱 JSON-LD (Schema.org)\n```json\n{json_ld}\n```\n\n"
128
+ output += "---\n\n🔖 **Tags:** #Eroha #AI #SEO #Press #Kazakhstan #News\n"
 
 
 
 
 
 
129
 
130
+ md_file = io.BytesIO(output.encode("utf-8"))
131
+ md_file.name = f"Eroha_Summary_{datetime.datetime.now().strftime('%Y-%m-%d_%H-%M')}.md"
132
+ return output, md_file
133
 
134
+ # === API + UI ===
135
+ app = FastAPI(title="Eroha Summarizer PRO++++ v2.5")
 
136
  app.add_middleware(CORSMiddleware, allow_origins=["*"], allow_methods=["*"], allow_headers=["*"])
137
 
138
  @app.post("/api/summarize")
 
141
  summary, _ = summarize_text(text)
142
  return {"summary": summary}
143
 
144
+ with gr.Blocks(title="Eroha Summarizer PRO++++ v2.5 Press-Optimized Edition") as iface:
145
+ gr.Markdown("# 🧠 Eroha Summarizer PRO++++ v2.5 Press-Optimized Edition")
146
+ gr.Markdown("AI-инструмент для суммаризации, SEO и экспорта с JSON-LD микроразметкой (NewsArticle)")
147
+ text_in = gr.Textbox(lines=8, label="Введите текст / Мәтінді енгізіңіз")
148
+ btn = gr.Button("🚀 Анализ и экспорт SEO-структуры")
149
+ clear = gr.Button("🧹 Очистить")
150
+ file_out = gr.File(label="💾 Скачать Markdown")
151
+ text_out = gr.Markdown(label="Результат")
152
+ btn.click(lambda t: summarize_text(t), inputs=text_in, outputs=[text_out, file_out])
153
+ clear.click(lambda: "", None, text_in)
 
 
 
 
 
 
 
 
 
 
154
 
155
  iface.launch(server_name="0.0.0.0", server_port=7860)