Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,41 +1,75 @@
|
|
| 1 |
import os
|
| 2 |
-
import
|
|
|
|
|
|
|
|
|
|
| 3 |
import nltk
|
| 4 |
import torch
|
| 5 |
-
from
|
|
|
|
| 6 |
import PyPDF2
|
| 7 |
import gradio as gr
|
| 8 |
|
| 9 |
# تحميل وتفعيل الأدوات المطلوبة
|
| 10 |
-
spacy.cli.download("en_core_web_sm")
|
| 11 |
-
nlp = spacy.load("en_core_web_sm")
|
| 12 |
nltk.download('punkt')
|
| 13 |
|
| 14 |
# التحقق من توفر GPU واستخدامه
|
| 15 |
device = 0 if torch.cuda.is_available() else -1
|
|
|
|
|
|
|
| 16 |
analyzer = pipeline("sentiment-analysis", model="distilbert-base-uncased-finetuned-sst-2-english", device=device)
|
| 17 |
|
| 18 |
-
|
| 19 |
-
|
| 20 |
-
|
| 21 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 22 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 23 |
def nltk_extract_sentences(text):
|
| 24 |
-
sentences = nltk.tokenize.sent_tokenize(text)
|
| 25 |
return sentences
|
| 26 |
|
|
|
|
| 27 |
def nltk_extract_quotes(text):
|
| 28 |
quotes = []
|
| 29 |
-
sentences = nltk.tokenize.sent_tokenize(text)
|
| 30 |
for sentence in sentences:
|
| 31 |
-
if '"' in sentence:
|
| 32 |
quotes.append(sentence)
|
| 33 |
return quotes
|
| 34 |
|
|
|
|
| 35 |
def count_tokens(text):
|
| 36 |
-
tokens =
|
| 37 |
return len(tokens)
|
| 38 |
|
|
|
|
| 39 |
def extract_pdf_text(file_path):
|
| 40 |
with open(file_path, "rb") as pdf_file:
|
| 41 |
pdf_reader = PyPDF2.PdfReader(pdf_file)
|
|
@@ -45,9 +79,50 @@ def extract_pdf_text(file_path):
|
|
| 45 |
text += page.extract_text()
|
| 46 |
return text
|
| 47 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 48 |
def analyze_and_complete(file_paths):
|
| 49 |
results = []
|
| 50 |
-
output_directory = "/Volumes/CLOCKWORK T/clockworkspace/first pro"
|
| 51 |
|
| 52 |
for file_path in file_paths:
|
| 53 |
if file_path.endswith(".pdf"):
|
|
@@ -58,14 +133,25 @@ def analyze_and_complete(file_paths):
|
|
| 58 |
|
| 59 |
filename_prefix = os.path.splitext(os.path.basename(file_path))[0]
|
| 60 |
|
| 61 |
-
|
|
|
|
| 62 |
sentences = nltk_extract_sentences(text)
|
| 63 |
quotes = nltk_extract_quotes(text)
|
| 64 |
token_count = count_tokens(text)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 65 |
|
| 66 |
# حفظ النتائج إلى ملفات
|
| 67 |
-
with open(os.path.join(output_directory, f"{filename_prefix}
|
| 68 |
-
file.write(str(
|
|
|
|
|
|
|
|
|
|
| 69 |
|
| 70 |
with open(os.path.join(output_directory, f"{filename_prefix}_sentences.txt"), "w", encoding="utf-8") as file:
|
| 71 |
file.write("\n".join(sentences))
|
|
@@ -75,8 +161,26 @@ def analyze_and_complete(file_paths):
|
|
| 75 |
|
| 76 |
with open(os.path.join(output_directory, f"{filename_prefix}_token_count.txt"), "w", encoding="utf-8") as file:
|
| 77 |
file.write(str(token_count))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 78 |
|
| 79 |
-
results.append((str(
|
| 80 |
|
| 81 |
return results
|
| 82 |
|
|
|
|
| 1 |
import os
|
| 2 |
+
import re
|
| 3 |
+
import camel_tools
|
| 4 |
+
from camel_tools.tokenizers.word import simple_word_tokenize
|
| 5 |
+
from camel_tools.ner import NERecognizer
|
| 6 |
import nltk
|
| 7 |
import torch
|
| 8 |
+
from collections import Counter
|
| 9 |
+
from transformers import pipeline, AutoModel, AutoTokenizer
|
| 10 |
import PyPDF2
|
| 11 |
import gradio as gr
|
| 12 |
|
| 13 |
# تحميل وتفعيل الأدوات المطلوبة
|
|
|
|
|
|
|
| 14 |
nltk.download('punkt')
|
| 15 |
|
| 16 |
# التحقق من توفر GPU واستخدامه
|
| 17 |
device = 0 if torch.cuda.is_available() else -1
|
| 18 |
+
|
| 19 |
+
# تحميل نماذج التحليل اللغوي
|
| 20 |
analyzer = pipeline("sentiment-analysis", model="distilbert-base-uncased-finetuned-sst-2-english", device=device)
|
| 21 |
|
| 22 |
+
# تحميل نموذج التعرف على الكيانات في camel_tools
|
| 23 |
+
ner = NERecognizer.pretrained()
|
| 24 |
+
|
| 25 |
+
# تحميل نماذج BERT، GPT2، ELECTRA، و AraBERT
|
| 26 |
+
arabic_bert_tokenizer = AutoTokenizer.from_pretrained("asafaya/bert-base-arabic")
|
| 27 |
+
arabic_bert_model = AutoModel.from_pretrained("asafaya/bert-base-arabic")
|
| 28 |
+
|
| 29 |
+
arabic_gpt2_tokenizer = AutoTokenizer.from_pretrained("aubmindlab/aragpt2-base")
|
| 30 |
+
arabic_gpt2_model = AutoModel.from_pretrained("aubmindlab/aragpt2-base")
|
| 31 |
+
|
| 32 |
+
arabic_electra_tokenizer = AutoTokenizer.from_pretrained("aubmindlab/araelectra-base-discriminator")
|
| 33 |
+
arabic_electra_model = AutoModel.from_pretrained("aubmindlab/araelectra-base-discriminator")
|
| 34 |
+
|
| 35 |
+
arabert_tokenizer = AutoTokenizer.from_pretrained("aubmindlab/bert-base-arabertv02")
|
| 36 |
+
arabert_model = AutoModel.from_pretrained("aubmindlab/bert-base-arabertv02")
|
| 37 |
+
|
| 38 |
+
# دالة لتحليل النص باستخدام camel_tools
|
| 39 |
+
def camel_ner_analysis(text):
|
| 40 |
+
tokens = simple_word_tokenize(text)
|
| 41 |
+
entities = ner.predict(tokens)
|
| 42 |
+
entity_dict = {"PERSON": [], "LOC": [], "ORG": [], "DATE": []}
|
| 43 |
+
for token, tag in zip(tokens, entities):
|
| 44 |
+
if tag in entity_dict:
|
| 45 |
+
entity_dict[tag].append((token, tag))
|
| 46 |
+
return entity_dict
|
| 47 |
|
| 48 |
+
# دالة لتحليل المشاعر
|
| 49 |
+
def analyze_sentiments(text):
|
| 50 |
+
sentiments = analyzer(text)
|
| 51 |
+
return sentiments
|
| 52 |
+
|
| 53 |
+
# دالة لتجزئة النص إلى جمل
|
| 54 |
def nltk_extract_sentences(text):
|
| 55 |
+
sentences = nltk.tokenize.sent_tokenize(text, language='arabic')
|
| 56 |
return sentences
|
| 57 |
|
| 58 |
+
# دالة لاستخراج الاقتباسات من النص
|
| 59 |
def nltk_extract_quotes(text):
|
| 60 |
quotes = []
|
| 61 |
+
sentences = nltk.tokenize.sent_tokenize(text, language='arabic')
|
| 62 |
for sentence in sentences:
|
| 63 |
+
if '"' in sentence or '«' في sentence or '»' في sentence:
|
| 64 |
quotes.append(sentence)
|
| 65 |
return quotes
|
| 66 |
|
| 67 |
+
# دالة لعد الرموز في النص
|
| 68 |
def count_tokens(text):
|
| 69 |
+
tokens = simple_word_tokenize(text)
|
| 70 |
return len(tokens)
|
| 71 |
|
| 72 |
+
# دالة لاستخراج النص من ملفات PDF
|
| 73 |
def extract_pdf_text(file_path):
|
| 74 |
with open(file_path, "rb") as pdf_file:
|
| 75 |
pdf_reader = PyPDF2.PdfReader(pdf_file)
|
|
|
|
| 79 |
text += page.extract_text()
|
| 80 |
return text
|
| 81 |
|
| 82 |
+
# دالة لاستخراج المشاهد من النص
|
| 83 |
+
def extract_scenes(text):
|
| 84 |
+
scenes = re.split(r'داخلي|خارجي', text)
|
| 85 |
+
scenes = [scene.strip() for scene in scenes if scene.strip()]
|
| 86 |
+
return scenes
|
| 87 |
+
|
| 88 |
+
# دالة لاستخراج تفاصيل المشهد (المكان والوقت)
|
| 89 |
+
def extract_scene_details(scene):
|
| 90 |
+
details = {}
|
| 91 |
+
location_match = re.search(r'(داخلي|خارجي)', scene)
|
| 92 |
+
time_match = re.search(r'(ليلاً|نهاراً|شروق|غروب)', scene)
|
| 93 |
+
|
| 94 |
+
if location_match:
|
| 95 |
+
details['location'] = location_match.group()
|
| 96 |
+
if time_match:
|
| 97 |
+
details['time'] = time_match.group()
|
| 98 |
+
|
| 99 |
+
return details
|
| 100 |
+
|
| 101 |
+
# دالة لاستخراج أعمار الشخصيات
|
| 102 |
+
def extract_ages(text):
|
| 103 |
+
ages = re.findall(r'\b(\d{1,2})\s*(?:عام|سنة|سنوات)\s*(?:من العمر|عمره|عمرها)', text)
|
| 104 |
+
return ages
|
| 105 |
+
|
| 106 |
+
# دالة لاستخراج وصف الشخصيات
|
| 107 |
+
def extract_character_descriptions(text):
|
| 108 |
+
descriptions = re.findall(r'شخصية\s*(.*?)\s*:\s*وصف\s*(.*?)\s*(?:\.|،)', text, re.DOTALL)
|
| 109 |
+
return descriptions
|
| 110 |
+
|
| 111 |
+
# دالة لاستخراج تكرار الشخصيات
|
| 112 |
+
def extract_character_frequency(entities):
|
| 113 |
+
persons = [ent[0] for ent in entities['PERSON']]
|
| 114 |
+
frequency = Counter(persons)
|
| 115 |
+
return frequency
|
| 116 |
+
|
| 117 |
+
# دالة لاستخراج الحوارات وتحديد المتحدثين
|
| 118 |
+
def extract_dialogues(text):
|
| 119 |
+
dialogues = re.findall(r'(.*?)(?:\s*:\s*)(.*?)(?=\n|$)', text, re.DOTALL)
|
| 120 |
+
return dialogues
|
| 121 |
+
|
| 122 |
+
# دالة لتحليل النصوص واستخراج المعلومات وحفظ النتائج
|
| 123 |
def analyze_and_complete(file_paths):
|
| 124 |
results = []
|
| 125 |
+
output_directory = os.getenv("SPACE_DIR", "/Volumes/CLOCKWORK T/clockworkspace/first pro")
|
| 126 |
|
| 127 |
for file_path in file_paths:
|
| 128 |
if file_path.endswith(".pdf"):
|
|
|
|
| 133 |
|
| 134 |
filename_prefix = os.path.splitext(os.path.basename(file_path))[0]
|
| 135 |
|
| 136 |
+
camel_entities = camel_ner_analysis(text)
|
| 137 |
+
sentiments = analyze_sentiments(text)
|
| 138 |
sentences = nltk_extract_sentences(text)
|
| 139 |
quotes = nltk_extract_quotes(text)
|
| 140 |
token_count = count_tokens(text)
|
| 141 |
+
scenes = extract_scenes(text)
|
| 142 |
+
ages = extract_ages(text)
|
| 143 |
+
character_descriptions = extract_character_descriptions(text)
|
| 144 |
+
character_frequency = extract_character_frequency(camel_entities)
|
| 145 |
+
dialogues = extract_dialogues(text)
|
| 146 |
+
|
| 147 |
+
scene_details = [extract_scene_details(scene) for scene in scenes]
|
| 148 |
|
| 149 |
# حفظ النتائج إلى ملفات
|
| 150 |
+
with open(os.path.join(output_directory, f"{filename_prefix}_entities.txt"), "w", encoding="utf-8") as file:
|
| 151 |
+
file.write(str(camel_entities))
|
| 152 |
+
|
| 153 |
+
with open(os.path.join(output_directory, f"{filename_prefix}_sentiments.txt"), "w", encoding="utf-8") as file:
|
| 154 |
+
file.write(str(sentiments))
|
| 155 |
|
| 156 |
with open(os.path.join(output_directory, f"{filename_prefix}_sentences.txt"), "w", encoding="utf-8") as file:
|
| 157 |
file.write("\n".join(sentences))
|
|
|
|
| 161 |
|
| 162 |
with open(os.path.join(output_directory, f"{filename_prefix}_token_count.txt"), "w", encoding="utf-8") as file:
|
| 163 |
file.write(str(token_count))
|
| 164 |
+
|
| 165 |
+
with open(os.path.join(output_directory, f"{filename_prefix}_scenes.txt"), "w", encoding="utf-8") as file:
|
| 166 |
+
file.write("\n".join(scenes))
|
| 167 |
+
|
| 168 |
+
with open(os.path.join(output_directory, f"{filename_prefix}_scene_details.txt"), "w", encoding="utf-8") as file:
|
| 169 |
+
file.write(str(scene_details))
|
| 170 |
+
|
| 171 |
+
with open(os.path.join(output_directory, f"{filename_prefix}_ages.txt"), "w", encoding="utf-8") as file:
|
| 172 |
+
file.write(str(ages))
|
| 173 |
+
|
| 174 |
+
with open(os.path.join(output_directory, f"{filename_prefix}_character_descriptions.txt"), "w", encoding="utf-8") as file:
|
| 175 |
+
file.write(str(character_descriptions))
|
| 176 |
+
|
| 177 |
+
with open(os.path.join(output_directory, f"{filename_prefix}_character_frequency.txt"), "w", encoding="utf-8") as file:
|
| 178 |
+
file.write(str(character_frequency))
|
| 179 |
+
|
| 180 |
+
with open(os.path.join(output_directory, f"{filename_prefix}_dialogues.txt"), "w", encoding="utf-8") as file:
|
| 181 |
+
file.write(str(dialogues))
|
| 182 |
|
| 183 |
+
results.append((str(camel_entities), str(sentiments), "\n".join(sentences), "\n".join(quotes), str(token_count), "\n".join(scenes), str(scene_details), str(ages), str(character_descriptions), str(character_frequency), str(dialogues)))
|
| 184 |
|
| 185 |
return results
|
| 186 |
|