Spaces:
Sleeping
Sleeping
| """Data models for PageSpeed Insights analysis.""" | |
| from dataclasses import dataclass | |
| from typing import List, Optional, Dict, Any | |
| from datetime import datetime | |
| class Opportunity: | |
| """Represents a performance optimization opportunity.""" | |
| title: str | |
| description: str | |
| savings: float | |
| class PageSpeedData: | |
| """Contains PageSpeed Insights analysis data for a single URL.""" | |
| url_original: str | |
| fetch_time: str | |
| device: str | |
| performance_score: float | |
| accessibility_score: float | |
| best_practices_score: float | |
| seo_score: float | |
| fcp: float | |
| lcp: float | |
| tbt: float | |
| cls: float | |
| speed_index: float | |
| opportunities: List[Opportunity] | |
| def from_lighthouse_data(cls, lighthouse_data: Dict[str, Any], site_url: str, strategy: str) -> 'PageSpeedData': | |
| """Create PageSpeedData from Lighthouse API response.""" | |
| audits = lighthouse_data.get('audits', {}) | |
| categories = lighthouse_data.get('categories', {}) | |
| opportunities = [] | |
| for audit_id, audit_data in audits.items(): | |
| if audit_data.get('details', {}).get('type') == 'opportunity': | |
| if audit_data.get('score', 1) < 1: | |
| opportunities.append(Opportunity( | |
| title=audit_data.get('title', ''), | |
| description=audit_data.get('description', ''), | |
| savings=audit_data.get('details', {}).get('overallSavingsMs', 0) / 1000 | |
| )) | |
| return cls( | |
| url_original=lighthouse_data.get('finalDisplayedUrl', site_url), | |
| fetch_time=lighthouse_data.get('fetchTime', datetime.now().isoformat()), | |
| device=strategy, | |
| performance_score=categories.get('performance', {}).get('score', 0) * 100, | |
| accessibility_score=categories.get('accessibility', {}).get('score', 0) * 100, | |
| best_practices_score=categories.get('best-practices', {}).get('score', 0) * 100, | |
| seo_score=categories.get('seo', {}).get('score', 0) * 100, | |
| fcp=audits.get('first-contentful-paint', {}).get('numericValue', 0) / 1000, | |
| lcp=audits.get('largest-contentful-paint', {}).get('numericValue', 0) / 1000, | |
| tbt=audits.get('total-blocking-time', {}).get('numericValue', 0), | |
| cls=audits.get('cumulative-layout-shift', {}).get('numericValue', 0), | |
| speed_index=audits.get('speed-index', {}).get('numericValue', 0) / 1000, | |
| opportunities=opportunities | |
| ) | |
| class MetricComparison: | |
| """Comparison of a single metric between two analyses.""" | |
| antes: float | |
| depois: float | |
| diferenca: float | |
| class ComparisonResult: | |
| """Complete comparison result between two PageSpeed analyses.""" | |
| performance: MetricComparison | |
| accessibility: MetricComparison | |
| best_practices: MetricComparison | |
| seo: MetricComparison | |
| fcp: MetricComparison | |
| lcp: MetricComparison | |
| tbt: MetricComparison | |
| cls: MetricComparison | |
| speed_index: MetricComparison | |
| def from_data(cls, data1: PageSpeedData, data2: PageSpeedData) -> 'ComparisonResult': | |
| """Create comparison result from two PageSpeedData objects.""" | |
| return cls( | |
| performance=MetricComparison( | |
| antes=data1.performance_score, | |
| depois=data2.performance_score, | |
| diferenca=data2.performance_score - data1.performance_score | |
| ), | |
| accessibility=MetricComparison( | |
| antes=data1.accessibility_score, | |
| depois=data2.accessibility_score, | |
| diferenca=data2.accessibility_score - data1.accessibility_score | |
| ), | |
| best_practices=MetricComparison( | |
| antes=data1.best_practices_score, | |
| depois=data2.best_practices_score, | |
| diferenca=data2.best_practices_score - data1.best_practices_score | |
| ), | |
| seo=MetricComparison( | |
| antes=data1.seo_score, | |
| depois=data2.seo_score, | |
| diferenca=data2.seo_score - data1.seo_score | |
| ), | |
| fcp=MetricComparison( | |
| antes=round(data1.fcp, 2), | |
| depois=round(data2.fcp, 2), | |
| diferenca=round(data2.fcp - data1.fcp, 2) | |
| ), | |
| lcp=MetricComparison( | |
| antes=round(data1.lcp, 2), | |
| depois=round(data2.lcp, 2), | |
| diferenca=round(data2.lcp - data1.lcp, 2) | |
| ), | |
| tbt=MetricComparison( | |
| antes=round(data1.tbt, 0), | |
| depois=round(data2.tbt, 0), | |
| diferenca=round(data2.tbt - data1.tbt, 0) | |
| ), | |
| cls=MetricComparison( | |
| antes=round(data1.cls, 3), | |
| depois=round(data2.cls, 3), | |
| diferenca=round(data2.cls - data1.cls, 3) | |
| ), | |
| speed_index=MetricComparison( | |
| antes=round(data1.speed_index, 2), | |
| depois=round(data2.speed_index, 2), | |
| diferenca=round(data2.speed_index - data1.speed_index, 2) | |
| ) | |
| ) | |
| def to_dict(self) -> Dict[str, Dict[str, float]]: | |
| """Convert comparison result to dictionary format for display.""" | |
| return { | |
| 'Performance': { | |
| 'antes': self.performance.antes, | |
| 'depois': self.performance.depois, | |
| 'diferenca': self.performance.diferenca | |
| }, | |
| 'Accessibility': { | |
| 'antes': self.accessibility.antes, | |
| 'depois': self.accessibility.depois, | |
| 'diferenca': self.accessibility.diferenca | |
| }, | |
| 'Best Practices': { | |
| 'antes': self.best_practices.antes, | |
| 'depois': self.best_practices.depois, | |
| 'diferenca': self.best_practices.diferenca | |
| }, | |
| 'SEO': { | |
| 'antes': self.seo.antes, | |
| 'depois': self.seo.depois, | |
| 'diferenca': self.seo.diferenca | |
| }, | |
| 'FCP (s)': { | |
| 'antes': self.fcp.antes, | |
| 'depois': self.fcp.depois, | |
| 'diferenca': self.fcp.diferenca | |
| }, | |
| 'LCP (s)': { | |
| 'antes': self.lcp.antes, | |
| 'depois': self.lcp.depois, | |
| 'diferenca': self.lcp.diferenca | |
| }, | |
| 'TBT (ms)': { | |
| 'antes': self.tbt.antes, | |
| 'depois': self.tbt.depois, | |
| 'diferenca': self.tbt.diferenca | |
| }, | |
| 'CLS': { | |
| 'antes': self.cls.antes, | |
| 'depois': self.cls.depois, | |
| 'diferenca': self.cls.diferenca | |
| }, | |
| 'Speed Index (s)': { | |
| 'antes': self.speed_index.antes, | |
| 'depois': self.speed_index.depois, | |
| 'diferenca': self.speed_index.diferenca | |
| } | |
| } |