shaheerawan3 commited on
Commit
d41b0e0
·
verified ·
1 Parent(s): f8166f7

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +335 -215
app.py CHANGED
@@ -9,9 +9,7 @@ from nltk.corpus import stopwords
9
  from nltk.sentiment import SentimentIntensityAnalyzer
10
  from gensim import corpora, models
11
  import spacy
12
- from bs4 import BeautifulSoup
13
  import requests
14
- import wikipedia
15
  from langdetect import detect
16
  import json
17
  import base64
@@ -19,173 +17,270 @@ from datetime import datetime
19
  import tempfile
20
  from fpdf import FPDF
21
  import os
 
 
 
 
 
22
 
23
- # Download required NLTK data
24
- import nltk
25
- try:
26
- nltk.data.find('tokenizers/punkt')
27
- nltk.data.find('stopwords')
28
- nltk.data.find('vader_lexicon')
29
- except LookupError:
30
- nltk.download('punkt')
31
- nltk.download('stopwords')
32
- nltk.download('vader_lexicon')
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
33
 
34
  class AdvancedAnalyzer:
 
 
35
  def __init__(self):
36
- # Initialize sentiment analyzer
37
- self.sentiment_analyzer = SentimentIntensityAnalyzer()
38
 
39
- # Initialize NER model
 
 
40
  try:
 
41
  self.nlp = spacy.load('en_core_web_sm')
42
- except:
43
- os.system('python -m spacy download en_core_web_sm')
44
- self.nlp = spacy.load('en_core_web_sm')
45
-
46
- # Initialize multilingual sentiment model
47
- self.sentiment_model = pipeline(
48
- "sentiment-analysis",
49
- model="nlptown/bert-base-multilingual-uncased-sentiment",
50
- return_all_scores=True
51
- )
52
-
53
- def analyze_sentiment(self, text, language='en'):
54
- """Advanced sentiment analysis with emotion detection"""
55
- if language != 'en':
56
- # Use multilingual model for non-English text
57
- sentiments = self.sentiment_model(text)[0]
58
- return {
59
- 'compound': max(s['score'] for s in sentiments),
60
- 'emotions': {s['label']: s['score'] for s in sentiments}
61
- }
62
- else:
63
- # Use VADER for English text
64
- scores = self.sentiment_analyzer.polarity_scores(text)
65
- return {
66
- 'compound': scores['compound'],
67
- 'emotions': {
68
- 'positive': scores['pos'],
69
- 'negative': scores['neg'],
70
- 'neutral': scores['neu']
71
- }
72
- }
73
-
74
- def extract_entities(self, text):
75
- """Named Entity Recognition"""
76
- doc = self.nlp(text)
77
- entities = {}
78
- for ent in doc.ents:
79
- if ent.label_ not in entities:
80
- entities[ent.label_] = []
81
- entities[ent.label_].append(ent.text)
82
- return entities
83
-
84
- def topic_modeling(self, text):
85
- """Extract main topics from text"""
86
- # Tokenize and remove stopwords
87
- stop_words = set(stopwords.words('english'))
88
- tokens = [word.lower() for word in word_tokenize(text)
89
- if word.lower() not in stop_words and word.isalnum()]
90
 
91
- # Create dictionary and corpus
92
- dictionary = corpora.Dictionary([tokens])
93
- corpus = [dictionary.doc2bow(tokens)]
 
94
 
95
- # Train LDA model
96
- lda_model = models.LdaModel(
97
- corpus,
98
- num_topics=3,
99
- id2word=dictionary,
100
- passes=15
101
- )
102
 
103
- # Extract topics
104
- topics = []
105
- for idx, topic in lda_model.show_topics():
106
- topics.append({
107
- 'id': idx,
108
- 'words': [word.split('*')[1].strip().strip('"')
109
- for word in topic.split('+')]
110
- })
111
- return topics
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
112
 
113
  class PDFGenerator:
 
 
114
  def __init__(self):
115
  self.pdf = FPDF()
116
 
117
- def generate_report(self, analysis_results):
118
- """Generate a professional PDF report"""
119
- self.pdf.add_page()
120
-
121
- # Header
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
122
  self.pdf.set_font('Arial', 'B', 16)
123
  self.pdf.cell(190, 10, 'AI Output Analysis Report', 0, 1, 'C')
124
  self.pdf.ln(10)
125
-
126
- # Summary
 
127
  self.pdf.set_font('Arial', 'B', 12)
128
  self.pdf.cell(190, 10, 'Analysis Summary', 0, 1, 'L')
129
  self.pdf.set_font('Arial', '', 10)
130
 
131
- # Add sentiment scores
 
 
 
 
 
 
132
  self.pdf.cell(190, 10,
133
- f"Overall Sentiment: {analysis_results['sentiment']['compound']:.2f}",
134
  0, 1, 'L')
135
-
136
- # Add topics
137
- self.pdf.set_font('Arial', 'B', 12)
138
- self.pdf.cell(190, 10, 'Main Topics', 0, 1, 'L')
139
- self.pdf.set_font('Arial', '', 10)
140
- for topic in analysis_results['topics']:
141
- self.pdf.cell(190, 10,
142
- f"Topic {topic['id']+1}: {', '.join(topic['words'][:5])}",
143
- 0, 1, 'L')
144
-
145
- # Add entities
146
- self.pdf.set_font('Arial', 'B', 12)
147
- self.pdf.cell(190, 10, 'Named Entities', 0, 1, 'L')
148
- self.pdf.set_font('Arial', '', 10)
149
- for entity_type, entities in analysis_results['entities'].items():
150
- if entities:
151
- self.pdf.cell(190, 10,
152
- f"{entity_type}: {', '.join(entities[:5])}",
153
- 0, 1, 'L')
154
-
155
- # Footer
156
- self.pdf.set_y(-15)
157
- self.pdf.set_font('Arial', 'I', 8)
158
- self.pdf.cell(0, 10, f'Generated on {datetime.now().strftime("%Y-%m-%d %H:%M:%S")}', 0, 0, 'C')
159
- self.pdf.cell(0, 10, 'Created by Muhammad Shaheer', 0, 0, 'R')
160
-
161
- # Save to temporary file
162
- with tempfile.NamedTemporaryFile(delete=False, suffix='.pdf') as tmp:
163
- self.pdf.output(tmp.name)
164
- return tmp.name
165
 
166
  def main():
167
- st.set_page_config(page_title="Enhanced AI Output Analyzer", layout="wide")
 
 
 
 
168
 
169
- # Custom CSS
170
  st.markdown("""
171
  <style>
172
  .main { padding: 2rem; }
173
- .stMetric { background-color: #f0f2f6; padding: 1rem; border-radius: 0.5rem; }
174
- .entity-tag { background-color: #e9ecef; padding: 0.2rem 0.5rem; border-radius: 0.25rem; margin: 0.2rem; }
175
- .dark-mode { background-color: #1a1a1a; color: #ffffff; }
 
 
 
 
 
 
 
 
 
 
176
  </style>
177
  """, unsafe_allow_html=True)
178
 
179
- # Sidebar
 
 
 
 
180
  with st.sidebar:
181
- st.title("Settings")
182
- theme = st.selectbox("Theme", ["Light", "Dark"])
183
- language = st.selectbox("Language", ["English", "Spanish", "French", "German"])
184
- st.markdown("---")
185
- st.markdown("### Analysis Options")
186
- show_sentiment = st.checkbox("Show Sentiment Analysis", True)
187
- show_topics = st.checkbox("Show Topic Analysis", True)
188
- show_entities = st.checkbox("Show Named Entities", True)
 
 
 
189
 
190
  # Main content
191
  st.title("Enhanced AI Output Analyzer")
@@ -193,92 +288,117 @@ def main():
193
  # Input section
194
  input_method = st.radio("Choose input method:", ["Text Input", "File Upload"])
195
 
 
196
  if input_method == "File Upload":
197
- uploaded_file = st.file_uploader("Upload a text file", type=['txt'])
198
- if uploaded_file:
199
- text = uploaded_file.read().decode()
200
- else:
201
- text = ""
202
  else:
203
  text = st.text_area("Enter text to analyze:", height=200)
204
 
205
- # Analysis button
206
- if st.button("Analyze", type="primary") and text:
207
- with st.spinner("Analyzing text..."):
208
- analyzer = AdvancedAnalyzer()
209
-
210
- # Perform analysis
211
- results = {
212
- 'sentiment': analyzer.analyze_sentiment(text),
213
- 'entities': analyzer.extract_entities(text),
214
- 'topics': analyzer.topic_modeling(text)
215
- }
216
-
217
- # Display results
218
- col1, col2, col3 = st.columns(3)
219
-
220
- if show_sentiment:
221
- with col1:
222
- st.metric("Overall Sentiment",
223
- f"{results['sentiment']['compound']:.2f}")
224
-
225
- if show_topics:
226
- with col2:
227
- st.metric("Topics Detected",
228
- len(results['topics']))
229
-
230
- if show_entities:
231
- with col3:
232
- st.metric("Entities Found",
233
- sum(len(ents) for ents in results['entities'].values()))
234
-
235
- # Detailed results
236
- st.subheader("Detailed Analysis")
237
-
238
- tab1, tab2, tab3 = st.tabs(["Sentiment", "Topics", "Entities"])
239
-
240
- with tab1:
241
- emotions_df = pd.DataFrame(
242
- results['sentiment']['emotions'].items(),
243
- columns=['Emotion', 'Score']
244
- )
245
- st.plotly_chart(
246
- px.bar(emotions_df, x='Emotion', y='Score',
247
- title="Emotional Analysis"),
248
- use_container_width=True
249
- )
250
-
251
- with tab2:
252
- for topic in results['topics']:
253
- st.write(f"Topic {topic['id']+1}:", ", ".join(topic['words']))
254
-
255
- with tab3:
256
- for entity_type, entities in results['entities'].items():
257
- if entities:
258
- st.write(f"**{entity_type}:**")
259
- st.write(", ".join(entities))
260
-
261
- # Generate PDF report
262
- pdf_generator = PDFGenerator()
263
- pdf_path = pdf_generator.generate_report(results)
264
-
265
- with open(pdf_path, "rb") as pdf_file:
266
- st.download_button(
267
- label="Download Analysis Report (PDF)",
268
- data=pdf_file,
269
- file_name="analysis_report.pdf",
270
- mime="application/pdf"
271
  )
272
-
273
- # Clean up
274
- os.unlink(pdf_path)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
275
 
276
- # Footer
277
- st.markdown("---")
278
- st.markdown(
279
- "<p style='text-align: center; color: gray;'>Created by Muhammad Shaheer</p>",
280
- unsafe_allow_html=True
281
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
282
 
283
  if __name__ == "__main__":
284
  main()
 
9
  from nltk.sentiment import SentimentIntensityAnalyzer
10
  from gensim import corpora, models
11
  import spacy
 
12
  import requests
 
13
  from langdetect import detect
14
  import json
15
  import base64
 
17
  import tempfile
18
  from fpdf import FPDF
19
  import os
20
+ from functools import lru_cache
21
+ import logging
22
+ from concurrent.futures import ThreadPoolExecutor
23
+ from typing import Dict, List, Any, Optional
24
+ import io
25
 
26
+ # Configure logging
27
+ logging.basicConfig(
28
+ level=logging.INFO,
29
+ format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
30
+ )
31
+ logger = logging.getLogger(__name__)
32
+
33
+ class TextProcessor:
34
+ """Handles text input processing and validation"""
35
+
36
+ @staticmethod
37
+ def process_file_upload(uploaded_file) -> Optional[str]:
38
+ """Process uploaded file and return text content"""
39
+ try:
40
+ if uploaded_file is None:
41
+ return None
42
+
43
+ # Get file extension
44
+ file_extension = uploaded_file.name.split('.')[-1].lower()
45
+
46
+ if file_extension == 'txt':
47
+ return uploaded_file.read().decode('utf-8')
48
+ else:
49
+ raise ValueError(f"Unsupported file type: {file_extension}")
50
+
51
+ except Exception as e:
52
+ logger.error(f"Error processing file upload: {str(e)}")
53
+ st.error(f"Error processing file: {str(e)}")
54
+ return None
55
+
56
+ @staticmethod
57
+ def validate_text(text: str) -> bool:
58
+ """Validate input text"""
59
+ if not text or len(text.strip()) == 0:
60
+ st.error("Please enter some text to analyze")
61
+ return False
62
+ if len(text.split()) > 10000: # Arbitrary limit
63
+ st.error("Text is too long. Please enter a shorter text")
64
+ return False
65
+ return True
66
 
67
  class AdvancedAnalyzer:
68
+ """Handles text analysis using various NLP techniques"""
69
+
70
  def __init__(self):
71
+ self._initialize_models()
 
72
 
73
+ @lru_cache(maxsize=1)
74
+ def _initialize_models(self):
75
+ """Initialize all required models with caching"""
76
  try:
77
+ self.sentiment_analyzer = SentimentIntensityAnalyzer()
78
  self.nlp = spacy.load('en_core_web_sm')
79
+ self.sentiment_model = pipeline(
80
+ "sentiment-analysis",
81
+ model="nlptown/bert-base-multilingual-uncased-sentiment",
82
+ return_all_scores=True
83
+ )
84
+ logger.info("Models initialized successfully")
85
+ except Exception as e:
86
+ logger.error(f"Error initializing models: {str(e)}")
87
+ raise
88
+
89
+ def analyze_sentiment_batch(self, text: str, batch_size: int = 1000) -> Dict:
90
+ """Analyze sentiment in batches for better performance"""
91
+ sentences = sent_tokenize(text)
92
+ results = []
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
93
 
94
+ with ThreadPoolExecutor() as executor:
95
+ for i in range(0, len(sentences), batch_size):
96
+ batch = sentences[i:i + batch_size]
97
+ results.extend(executor.map(self.analyze_sentiment, batch))
98
 
99
+ # Aggregate results
100
+ compound = np.mean([r['compound'] for r in results])
101
+ emotions = {
102
+ 'positive': np.mean([r['emotions']['positive'] for r in results]),
103
+ 'negative': np.mean([r['emotions']['negative'] for r in results]),
104
+ 'neutral': np.mean([r['emotions']['neutral'] for r in results])
105
+ }
106
 
107
+ return {'compound': compound, 'emotions': emotions}
108
+
109
+ def analyze_sentiment(self, text: str, language: str = 'en') -> Dict:
110
+ """Analyze sentiment with emotion detection"""
111
+ try:
112
+ if language != 'en':
113
+ sentiments = self.sentiment_model(text)[0]
114
+ return {
115
+ 'compound': max(s['score'] for s in sentiments),
116
+ 'emotions': {s['label']: s['score'] for s in sentiments}
117
+ }
118
+ else:
119
+ scores = self.sentiment_analyzer.polarity_scores(text)
120
+ return {
121
+ 'compound': scores['compound'],
122
+ 'emotions': {
123
+ 'positive': scores['pos'],
124
+ 'negative': scores['neg'],
125
+ 'neutral': scores['neu']
126
+ }
127
+ }
128
+ except Exception as e:
129
+ logger.error(f"Error in sentiment analysis: {str(e)}")
130
+ raise
131
+
132
+ def extract_entities(self, text: str) -> Dict[str, List[str]]:
133
+ """Extract named entities with confidence scores"""
134
+ try:
135
+ doc = self.nlp(text)
136
+ entities = {}
137
+ for ent in doc.ents:
138
+ if ent.label_ not in entities:
139
+ entities[ent.label_] = []
140
+ # Only include entities with high confidence
141
+ if ent.label_prob >= 0.8:
142
+ entities[ent.label_].append({
143
+ 'text': ent.text,
144
+ 'confidence': round(ent.label_prob, 3)
145
+ })
146
+ return entities
147
+ except Exception as e:
148
+ logger.error(f"Error in entity extraction: {str(e)}")
149
+ raise
150
+
151
+ def topic_modeling(self, text: str, num_topics: int = 3) -> List[Dict]:
152
+ """Extract main topics using LDA with preprocessing"""
153
+ try:
154
+ # Tokenize and clean text
155
+ doc = self.nlp(text.lower())
156
+ tokens = [
157
+ token.lemma_ for token in doc
158
+ if not token.is_stop and not token.is_punct and token.is_alpha
159
+ ]
160
+
161
+ # Create dictionary and corpus
162
+ texts = [tokens]
163
+ dictionary = corpora.Dictionary(texts)
164
+ corpus = [dictionary.doc2bow(text) for text in texts]
165
+
166
+ # Train LDA model with coherence optimization
167
+ lda_model = models.LdaModel(
168
+ corpus=corpus,
169
+ id2word=dictionary,
170
+ num_topics=num_topics,
171
+ random_state=42,
172
+ passes=15,
173
+ alpha='auto',
174
+ per_word_topics=True
175
+ )
176
+
177
+ # Extract topics with probabilities
178
+ topics = []
179
+ for idx, topic in lda_model.show_topics(formatted=False):
180
+ topics.append({
181
+ 'id': idx,
182
+ 'words': [(word, round(prob, 4))
183
+ for word, prob in topic],
184
+ 'coherence': round(lda_model.get_topic_coherence(topic), 4)
185
+ })
186
+
187
+ return sorted(topics, key=lambda x: x['coherence'], reverse=True)
188
+ except Exception as e:
189
+ logger.error(f"Error in topic modeling: {str(e)}")
190
+ raise
191
 
192
  class PDFGenerator:
193
+ """Generates professional PDF reports with visualizations"""
194
+
195
  def __init__(self):
196
  self.pdf = FPDF()
197
 
198
+ def generate_report(self, analysis_results: Dict) -> str:
199
+ """Generate a detailed PDF report with charts"""
200
+ try:
201
+ self.pdf.add_page()
202
+ self._add_header()
203
+ self._add_summary(analysis_results)
204
+ self._add_sentiment_analysis(analysis_results)
205
+ self._add_topics(analysis_results)
206
+ self._add_entities(analysis_results)
207
+ self._add_footer()
208
+
209
+ # Save to temporary file
210
+ with tempfile.NamedTemporaryFile(delete=False, suffix='.pdf') as tmp:
211
+ self.pdf.output(tmp.name)
212
+ return tmp.name
213
+ except Exception as e:
214
+ logger.error(f"Error generating PDF report: {str(e)}")
215
+ raise
216
+
217
+ def _add_header(self):
218
+ """Add report header"""
219
  self.pdf.set_font('Arial', 'B', 16)
220
  self.pdf.cell(190, 10, 'AI Output Analysis Report', 0, 1, 'C')
221
  self.pdf.ln(10)
222
+
223
+ def _add_summary(self, results: Dict):
224
+ """Add analysis summary"""
225
  self.pdf.set_font('Arial', 'B', 12)
226
  self.pdf.cell(190, 10, 'Analysis Summary', 0, 1, 'L')
227
  self.pdf.set_font('Arial', '', 10)
228
 
229
+ compound_score = results['sentiment']['compound']
230
+ sentiment_label = (
231
+ 'Positive' if compound_score > 0.05
232
+ else 'Negative' if compound_score < -0.05
233
+ else 'Neutral'
234
+ )
235
+
236
  self.pdf.cell(190, 10,
237
+ f"Overall Sentiment: {sentiment_label} ({compound_score:.2f})",
238
  0, 1, 'L')
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
239
 
240
  def main():
241
+ st.set_page_config(
242
+ page_title="Enhanced AI Output Analyzer",
243
+ layout="wide",
244
+ initial_sidebar_state="expanded"
245
+ )
246
 
247
+ # Load custom CSS
248
  st.markdown("""
249
  <style>
250
  .main { padding: 2rem; }
251
+ .stMetric {
252
+ background-color: #f0f2f6;
253
+ padding: 1rem;
254
+ border-radius: 0.5rem;
255
+ box-shadow: 0 2px 4px rgba(0,0,0,0.1);
256
+ }
257
+ .entity-tag {
258
+ background-color: #e9ecef;
259
+ padding: 0.2rem 0.5rem;
260
+ border-radius: 0.25rem;
261
+ margin: 0.2rem;
262
+ display: inline-block;
263
+ }
264
  </style>
265
  """, unsafe_allow_html=True)
266
 
267
+ # Initialize session state
268
+ if 'analysis_history' not in st.session_state:
269
+ st.session_state.analysis_history = []
270
+
271
+ # Sidebar configuration
272
  with st.sidebar:
273
+ st.title("Analysis Settings")
274
+
275
+ # Analysis options
276
+ st.subheader("Analysis Options")
277
+ num_topics = st.slider("Number of Topics", 2, 10, 3)
278
+ min_entity_confidence = st.slider("Entity Confidence Threshold", 0.0, 1.0, 0.8)
279
+ batch_size = st.select_slider(
280
+ "Processing Batch Size",
281
+ options=[500, 1000, 2000, 5000],
282
+ value=1000
283
+ )
284
 
285
  # Main content
286
  st.title("Enhanced AI Output Analyzer")
 
288
  # Input section
289
  input_method = st.radio("Choose input method:", ["Text Input", "File Upload"])
290
 
291
+ text_processor = TextProcessor()
292
  if input_method == "File Upload":
293
+ text = text_processor.process_file_upload(
294
+ st.file_uploader("Upload a text file", type=['txt'])
295
+ )
 
 
296
  else:
297
  text = st.text_area("Enter text to analyze:", height=200)
298
 
299
+ # Analysis section
300
+ if st.button("Analyze", type="primary") and text_processor.validate_text(text):
301
+ try:
302
+ with st.spinner("Performing analysis..."):
303
+ analyzer = AdvancedAnalyzer()
304
+
305
+ # Perform analysis with progress tracking
306
+ progress_bar = st.progress(0)
307
+
308
+ # Sentiment analysis
309
+ results = {
310
+ 'sentiment': analyzer.analyze_sentiment_batch(
311
+ text, batch_size=batch_size
312
+ )
313
+ }
314
+ progress_bar.progress(0.33)
315
+
316
+ # Topic modeling
317
+ results['topics'] = analyzer.topic_modeling(
318
+ text, num_topics=num_topics
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
319
  )
320
+ progress_bar.progress(0.66)
321
+
322
+ # Entity extraction
323
+ results['entities'] = analyzer.extract_entities(text)
324
+ progress_bar.progress(1.0)
325
+
326
+ # Display results
327
+ st.success("Analysis complete!")
328
+
329
+ # Save to history
330
+ st.session_state.analysis_history.append({
331
+ 'timestamp': datetime.now(),
332
+ 'results': results
333
+ })
334
+
335
+ # Display visualizations
336
+ display_results(results)
337
+
338
+ # Generate report
339
+ generate_downloadable_report(results)
340
+
341
+ except Exception as e:
342
+ logger.error(f"Error during analysis: {str(e)}")
343
+ st.error(f"An error occurred during analysis: {str(e)}")
344
+
345
+ def display_results(results: Dict):
346
+ """Display analysis results with interactive visualizations"""
347
+ # Sentiment Analysis
348
+ st.subheader("Sentiment Analysis")
349
+ col1, col2 = st.columns(2)
350
 
351
+ with col1:
352
+ # Sentiment gauge
353
+ fig = go.Figure(go.Indicator(
354
+ mode="gauge+number",
355
+ value=results['sentiment']['compound'],
356
+ domain={'x': [0, 1], 'y': [0, 1]},
357
+ gauge={
358
+ 'axis': {'range': [-1, 1]},
359
+ 'bar': {'color': "darkblue"},
360
+ 'steps': [
361
+ {'range': [-1, -0.05], 'color': "lightcoral"},
362
+ {'range': [-0.05, 0.05], 'color': "lightgray"},
363
+ {'range': [0.05, 1], 'color': "lightgreen"}
364
+ ]
365
+ }
366
+ ))
367
+ st.plotly_chart(fig)
368
+
369
+ with col2:
370
+ # Emotions pie chart
371
+ emotions_df = pd.DataFrame(
372
+ results['sentiment']['emotions'].items(),
373
+ columns=['Emotion', 'Score']
374
+ )
375
+ fig = px.pie(
376
+ emotions_df,
377
+ values='Score',
378
+ names='Emotion',
379
+ title="Emotional Distribution"
380
+ )
381
+ st.plotly_chart(fig)
382
+
383
+ def generate_downloadable_report(results: Dict):
384
+ """Generate and provide downloadable report"""
385
+ try:
386
+ pdf_generator = PDFGenerator()
387
+ pdf_path = pdf_generator.generate_report(results)
388
+
389
+ with open(pdf_path, "rb") as pdf_file:
390
+ st.download_button(
391
+ label="📊 Download Analysis Report (PDF)",
392
+ data=pdf_file,
393
+ file_name=f"analysis_report_{datetime.now().strftime('%Y%m%d_%H%M%S')}.pdf",
394
+ mime="application/pdf"
395
+ )
396
+
397
+ # Clean up
398
+ os.unlink(pdf_path)
399
+ except Exception as e:
400
+ logger.error(f"Error generating downloadable report: {str(e)}")
401
+ st.error("Failed to generate report. Please try again.")
402
 
403
  if __name__ == "__main__":
404
  main()