ziffir's picture
Update app.py
3eeafef verified
raw
history blame
35.5 kB
# app.py - Ana uygulama dosyası
import os
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader, Dataset
import transformers
from transformers import (
AutoImageProcessor,
AutoModel,
BitsAndBytesConfig,
TrainingArguments,
Trainer
)
from datasets import load_dataset, Dataset as HFDataset
import torchvision.transforms as transforms
from PIL import Image, ImageDraw, ImageFont
import numpy as np
import pandas as pd
import geopandas as gpd
from shapely.geometry import Point, Polygon
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from matplotlib.offsetbox import OffsetImage, AnnotationBbox
import rasterio
from rasterio.transform import from_bounds
import json
import gradio as gr
import folium
from folium import plugins
from branca.element import Figure
import tempfile
import base64
from io import BytesIO
from datetime import datetime
import logging
from typing import Dict, List, Tuple, Optional, Union
import warnings
warnings.filterwarnings('ignore')
# Logging konfigürasyonu
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
class AdvancedGeoModel(nn.Module):
"""Gelişmiş Jeo-Referanslama Modeli"""
def __init__(self,
image_embed_dim: int = 768,
location_embed_dim: int = 512,
num_attention_heads: int = 12,
dropout: float = 0.1):
super(AdvancedGeoModel, self).__init__()
# DINOv2 backbone
self.dinov2_processor = AutoImageProcessor.from_pretrained("facebook/dinov2-base")
self.dinov2 = AutoModel.from_pretrained("facebook/dinov2-base")
# Multi-scale feature extraction
self.feature_pyramid = nn.ModuleDict({
'scale1': nn.Conv2d(768, 256, 3, padding=1),
'scale2': nn.Conv2d(768, 256, 3, padding=1),
'scale3': nn.Conv2d(768, 256, 3, padding=1)
})
# Image projection
self.image_projection = nn.Sequential(
nn.Linear(768, image_embed_dim),
nn.GELU(),
nn.Dropout(dropout),
nn.LayerNorm(image_embed_dim)
)
# Location encoder
self.location_encoder = nn.Sequential(
nn.Linear(2, 128),
nn.GELU(),
nn.Linear(128, 256),
nn.GELU(),
nn.Linear(256, location_embed_dim),
nn.Dropout(dropout)
)
# Multi-head cross attention
self.cross_attention = nn.MultiheadAttention(
embed_dim=image_embed_dim,
num_heads=num_attention_heads,
dropout=dropout,
batch_first=True
)
# Transformer layers for fusion
encoder_layer = nn.TransformerEncoderLayer(
d_model=image_embed_dim + location_embed_dim,
nhead=8,
dim_feedforward=1024,
dropout=dropout,
batch_first=True
)
self.fusion_transformer = nn.TransformerEncoder(encoder_layer, num_layers=3)
# Regression head with uncertainty estimation
self.regressor = nn.Sequential(
nn.Linear(image_embed_dim + location_embed_dim, 512),
nn.GELU(),
nn.Dropout(dropout),
nn.Linear(512, 256),
nn.GELU(),
nn.Dropout(dropout),
nn.Linear(256, 128),
nn.GELU(),
nn.Linear(128, 4) # lat, lon, lat_uncertainty, lon_uncertainty
)
# Classification head for continent/region
self.classifier = nn.Sequential(
nn.Linear(image_embed_dim, 256),
nn.GELU(),
nn.Dropout(dropout),
nn.Linear(256, 128),
nn.GELU(),
nn.Linear(128, 7) # 7 kıta
)
def forward(self, pixel_values: torch.Tensor, locations: Optional[torch.Tensor] = None):
# Extract multi-scale features from DINOv2
dinov2_output = self.dinov2(pixel_values=pixel_values, output_hidden_states=True)
# Use last hidden state as primary features
image_features = dinov2_output.last_hidden_state
image_features = image_features.mean(dim=1) # Global average pooling
# Project image features
image_embeddings = self.image_projection(image_features)
if locations is not None:
# Encode location information
location_embeddings = self.location_encoder(locations)
# Cross-modal attention
attended_features, attention_weights = self.cross_attention(
query=image_embeddings.unsqueeze(1),
key=location_embeddings.unsqueeze(1),
value=location_embeddings.unsqueeze(1)
)
# Concatenate features
combined_features = torch.cat([image_embeddings, attended_features.squeeze(1)], dim=1)
# Fusion through transformer
fused_features = self.fusion_transformer(combined_features.unsqueeze(1))
fused_features = fused_features.squeeze(1)
else:
fused_features = image_embeddings
# Regression output
coords_output = self.regressor(fused_features)
# Classification output
class_output = self.classifier(image_embeddings)
return {
'coordinates': coords_output[:, :2], # lat, lon
'uncertainty': coords_output[:, 2:], # lat_uncertainty, lon_uncertainty
'region_logits': class_output,
'image_embeddings': image_embeddings
}
class MultiModalGeoDataset(Dataset):
"""Çoklu Modal Jeo-Referanslama Dataseti"""
def __init__(self,
dataset_config: Dict,
transform: Optional[transforms.Compose] = None,
max_samples: int = 10000):
self.transform = transform
self.datasets = {}
self.sample_weights = {}
self.max_samples = max_samples
# EarthView dataset
if dataset_config.get('earthview', False):
try:
earthview = load_dataset("satellogic/EarthView", split=f"train[:{max_samples}]")
self.datasets['earthview'] = earthview
self.sample_weights['earthview'] = 0.4
logger.info("EarthView dataset loaded successfully")
except Exception as e:
logger.warning(f"EarthView dataset loading failed: {e}")
# EuroSAT dataset
if dataset_config.get('eurosat', False):
try:
eurosat = load_dataset("phelber/EuroSAT", "rgb", split=f"train[:{max_samples}]")
self.datasets['eurosat'] = eurosat
self.sample_weights['eurosat'] = 0.3
logger.info("EuroSAT dataset loaded successfully")
except Exception as e:
logger.warning(f"EuroSAT dataset loading failed: {e}")
# S2-NAIP dataset
if dataset_config.get('s2_naip', False):
try:
s2_naip = load_dataset("allenai/s2-naip", split=f"train[:{max_samples}]")
self.datasets['s2_naip'] = s2_naip
self.sample_weights['s2_naip'] = 0.3
logger.info("S2-NAIP dataset loaded successfully")
except Exception as e:
logger.warning(f"S2-NAIP dataset loading failed: {e}")
# Calculate dataset sizes and cumulative weights
self.dataset_sizes = {name: len(dataset) for name, dataset in self.datasets.items()}
total_size = sum(self.dataset_sizes.values())
self.dataset_weights = {name: size/total_size * weight
for name, weight, size in zip(self.sample_weights.keys(),
self.sample_weights.values(),
self.dataset_sizes.values())}
self.cumulative_lengths = self._calculate_cumulative_lengths()
def _calculate_cumulative_lengths(self):
cumulative = [0]
for name, dataset in self.datasets.items():
cumulative.append(cumulative[-1] + len(dataset))
return cumulative
def __len__(self):
return self.cumulative_lengths[-1]
def __getitem__(self, idx):
# Find which dataset this index belongs to
for i, (name, dataset) in enumerate(self.datasets.items()):
if idx < self.cumulative_lengths[i+1]:
local_idx = idx - self.cumulative_lengths[i]
return self._process_dataset_item(name, dataset, local_idx)
raise IndexError("Index out of range")
def _process_dataset_item(self, dataset_name: str, dataset, idx: int):
item = dataset[idx]
if dataset_name == 'earthview':
return self._process_earthview(item)
elif dataset_name == 'eurosat':
return self._process_eurosat(item)
elif dataset_name == 's2_naip':
return self._process_s2_naip(item)
def _process_earthview(self, item):
image = item['image']
lat = item.get('lat', torch.rand(1).item() * 180 - 90)
lon = item.get('lon', torch.rand(1).item() * 360 - 180)
if self.transform:
image = self.transform(image)
return {
'pixel_values': image,
'coordinates': torch.tensor([lat, lon], dtype=torch.float32),
'dataset': 'earthview'
}
def _process_eurosat(self, item):
image = item['image']
# EuroSAT için sentetik koordinatlar
lat = torch.rand(1).item() * 180 - 90
lon = torch.rand(1).item() * 360 - 180
if self.transform:
image = self.transform(image)
return {
'pixel_values': image,
'coordinates': torch.tensor([lat, lon], dtype=torch.float32),
'dataset': 'eurosat'
}
def _process_s2_naip(self, item):
sentinel_image = item['sentinel']
lat = item.get('lat', torch.rand(1).item() * 180 - 90)
lon = item.get('lon', torch.rand(1).item() * 360 - 180)
if self.transform:
sentinel_image = self.transform(sentinel_image)
return {
'pixel_values': sentinel_image,
'coordinates': torch.tensor([lat, lon], dtype=torch.float32),
'dataset': 's2_naip'
}
class ProfessionalGeoReferencingSystem:
"""Profesyonel Jeo-Referanslama Sistemi"""
def __init__(self, model_path: Optional[str] = None, use_quantization: bool = True):
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
logger.info(f"Using device: {self.device}")
# Model konfigürasyonu
self.setup_model(model_path, use_quantization)
# Image processor
self.processor = AutoImageProcessor.from_pretrained("facebook/dinov2-base")
# Data transforms
self.transform = self._get_transforms()
# Region classifier için etiketler
self.region_labels = ['Africa', 'Asia', 'Europe', 'North America',
'Oceania', 'South America', 'Antarctica']
logger.info("Professional Geo-Referencing System initialized")
def setup_model(self, model_path: Optional[str], use_quantization: bool):
"""Modeli kur ve yükle"""
if use_quantization and self.device.type == 'cuda':
quantization_config = BitsAndBytesConfig(
load_in_8bit=True,
bnb_8bit_compute_dtype=torch.float16,
bnb_8bit_quant_type="nf8"
)
else:
quantization_config = None
# Modeli oluştur
self.model = AdvancedGeoModel()
# Model yükleme
if model_path and os.path.exists(model_path):
try:
state_dict = torch.load(model_path, map_location=self.device)
self.model.load_state_dict(state_dict)
logger.info(f"Model loaded from {model_path}")
except Exception as e:
logger.warning(f"Model loading failed: {e}. Using pretrained weights.")
self.model.to(self.device)
self.model.eval()
def _get_transforms(self):
"""Data augmentation ve preprocessing transforms"""
return transforms.Compose([
transforms.Resize((224, 224)),
transforms.RandomHorizontalFlip(p=0.3),
transforms.RandomVerticalFlip(p=0.1),
transforms.ColorJitter(brightness=0.2, contrast=0.2, saturation=0.2, hue=0.1),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]
)
])
def train(self,
epochs: int = 20,
batch_size: int = 32,
learning_rate: float = 1e-4,
output_dir: str = "./geo_model"):
"""Model eğitimi"""
# Dataset hazırlık
dataset_config = {
'earthview': True,
'eurosat': True,
's2_naip': True
}
train_dataset = MultiModalGeoDataset(dataset_config, transform=self.transform)
train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True, num_workers=2)
# Loss functions
coord_criterion = nn.HuberLoss() # Robust regression loss
class_criterion = nn.CrossEntropyLoss()
# Optimizer
optimizer = optim.AdamW(
self.model.parameters(),
lr=learning_rate,
weight_decay=1e-4,
betas=(0.9, 0.999)
)
# Learning rate scheduler
scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=epochs)
# Training loop
self.model.train()
best_loss = float('inf')
for epoch in range(epochs):
total_loss = 0
coord_loss_total = 0
class_loss_total = 0
for batch_idx, batch in enumerate(train_loader):
pixel_values = batch['pixel_values'].to(self.device)
coordinates = batch['coordinates'].to(self.device)
optimizer.zero_grad()
# Forward pass
outputs = self.model(pixel_values)
# Loss calculation
coord_loss = coord_criterion(outputs['coordinates'], coordinates)
# Region classification loss (synthetic for now)
region_targets = torch.randint(0, 7, (pixel_values.size(0),)).to(self.device)
class_loss = class_criterion(outputs['region_logits'], region_targets)
# Combined loss
loss = coord_loss + 0.1 * class_loss
# Backward pass
loss.backward()
torch.nn.utils.clip_grad_norm_(self.model.parameters(), max_norm=1.0)
optimizer.step()
total_loss += loss.item()
coord_loss_total += coord_loss.item()
class_loss_total += class_loss.item()
if batch_idx % 100 == 0:
logger.info(f'Epoch {epoch+1}/{epochs}, Batch {batch_idx}, '
f'Loss: {loss.item():.6f}, Coord: {coord_loss.item():.6f}, '
f'Class: {class_loss.item():.6f}')
scheduler.step()
avg_loss = total_loss / len(train_loader)
avg_coord_loss = coord_loss_total / len(train_loader)
avg_class_loss = class_loss_total / len(train_loader)
logger.info(f'Epoch {epoch+1}/{epochs} completed: '
f'Avg Loss: {avg_loss:.6f}, '
f'Avg Coord Loss: {avg_coord_loss:.6f}, '
f'Avg Class Loss: {avg_class_loss:.6f}')
# Model kaydetme
if avg_loss < best_loss:
best_loss = avg_loss
self.save_model(f"{output_dir}/best_model.pth")
logger.info(f"New best model saved with loss: {best_loss:.6f}")
# Final model kaydetme
self.save_model(f"{output_dir}/final_model.pth")
logger.info("Training completed and final model saved")
def predict(self, image: Union[str, Image.Image, np.ndarray]) -> Dict:
"""Görüntüden koordinat tahmini"""
self.model.eval()
try:
# Görüntü preprocessing
if isinstance(image, str):
image = Image.open(image).convert('RGB')
elif isinstance(image, np.ndarray):
image = Image.fromarray(image.astype('uint8')).convert('RGB')
# Transform uygula
processed_image = self.transform(image).unsqueeze(0).to(self.device)
with torch.no_grad():
outputs = self.model(processed_image)
coords = outputs['coordinates'].cpu().numpy()[0]
uncertainty = outputs['uncertainty'].cpu().numpy()[0]
region_probs = torch.softmax(outputs['region_logits'], dim=1).cpu().numpy()[0]
predicted_region = self.region_labels[np.argmax(region_probs)]
region_confidence = np.max(region_probs)
# Confidence hesaplama
overall_confidence = self._calculate_confidence(coords, uncertainty, region_confidence)
result = {
'latitude': float(coords[0]),
'longitude': float(coords[1]),
'latitude_uncertainty': float(uncertainty[0]),
'longitude_uncertainty': float(uncertainty[1]),
'predicted_region': predicted_region,
'region_confidence': float(region_confidence),
'overall_confidence': float(overall_confidence),
'region_probabilities': {
label: float(prob) for label, prob in zip(self.region_labels, region_probs)
},
'timestamp': datetime.now().isoformat()
}
return result
except Exception as e:
logger.error(f"Prediction error: {e}")
return {
'error': str(e),
'latitude': 0.0,
'longitude': 0.0,
'overall_confidence': 0.0
}
def _calculate_confidence(self, coords: np.ndarray, uncertainty: np.ndarray, region_confidence: float) -> float:
"""Genel güven skoru hesaplama"""
coord_confidence = 1.0 / (1.0 + np.mean(np.abs(uncertainty)))
overall_confidence = 0.7 * coord_confidence + 0.3 * region_confidence
return min(overall_confidence, 1.0)
def save_model(self, path: str):
"""Model kaydetme"""
torch.save(self.model.state_dict(), path)
logger.info(f"Model saved to {path}")
def load_model(self, path: str):
"""Model yükleme"""
self.model.load_state_dict(torch.load(path, map_location=self.device))
self.model.to(self.device)
logger.info(f"Model loaded from {path}")
class GeoVisualizationEngine:
"""Gelişmiş Görselleştirme Motoru"""
def __init__(self):
self.style = 'openstreetmap'
def create_interactive_map(self,
predictions: List[Dict],
map_center: Tuple[float, float] = (39, 35),
zoom_start: int = 4) -> str:
"""Interactive Folium haritası oluşturma"""
m = folium.Map(location=map_center, zoom_start=zoom_start, tiles=self.style)
for i, pred in enumerate(predictions):
if 'error' in pred:
continue
lat, lon = pred['latitude'], pred['longitude']
confidence = pred.get('overall_confidence', 0.5)
region = pred.get('predicted_region', 'Unknown')
# Confidence'a göre renk
color = 'red' if confidence < 0.3 else 'orange' if confidence < 0.7 else 'green'
# Popup içeriği
popup_text = f"""
<b>Prediction {i+1}</b><br>
<b>Coordinates:</b> {lat:.4f}, {lon:.4f}<br>
<b>Region:</b> {region}<br>
<b>Confidence:</b> {confidence:.2%}<br>
<b>Uncertainty:</b> ±{pred.get('latitude_uncertainty', 0):.3f}°
"""
# Marker ekle
folium.Marker(
[lat, lon],
popup=folium.Popup(popup_text, max_width=300),
tooltip=f"Click for details (Confidence: {confidence:.2%})",
icon=folium.Icon(color=color, icon='info-sign')
).add_to(m)
# Uncertainty circle
uncertainty = max(pred.get('latitude_uncertainty', 0.1), pred.get('longitude_uncertainty', 0.1))
folium.Circle(
location=[lat, lon],
radius=uncertainty * 111320, # Convert degrees to meters
popup=f"Uncertainty: ±{uncertainty:.3f}°",
color=color,
fill=True,
fillOpacity=0.2
).add_to(m)
# Haritayı HTML olarak kaydet
with tempfile.NamedTemporaryFile(suffix='.html', delete=False) as tmp:
m.save(tmp.name)
return tmp.name
def create_analysis_plot(self, predictions: List[Dict]) -> str:
"""Analiz grafiği oluşturma"""
fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, figsize=(15, 12))
# Confidence dağılımı
confidences = [p.get('overall_confidence', 0) for p in predictions if 'error' not in p]
ax1.hist(confidences, bins=20, alpha=0.7, color='skyblue', edgecolor='black')
ax1.set_xlabel('Confidence Score')
ax1.set_ylabel('Frequency')
ax1.set_title('Confidence Distribution')
ax1.grid(True, alpha=0.3)
# Bölge dağılımı
regions = [p.get('predicted_region', 'Unknown') for p in predictions if 'error' not in p]
region_counts = pd.Series(regions).value_counts()
ax2.bar(region_counts.index, region_counts.values, color='lightcoral', alpha=0.7)
ax2.set_xlabel('Predicted Region')
ax2.set_ylabel('Count')
ax2.set_title('Regional Distribution')
ax2.tick_params(axis='x', rotation=45)
ax2.grid(True, alpha=0.3)
# Uncertainty dağılımı
uncertainties = [p.get('latitude_uncertainty', 0) for p in predictions if 'error' not in p]
ax3.hist(uncertainties, bins=20, alpha=0.7, color='lightgreen', edgecolor='black')
ax3.set_xlabel('Uncertainty (degrees)')
ax3.set_ylabel('Frequency')
ax3.set_title('Uncertainty Distribution')
ax3.grid(True, alpha=0.3)
# Confidence vs Uncertainty
ax4.scatter(confidences, uncertainties, alpha=0.6, color='purple')
ax4.set_xlabel('Confidence')
ax4.set_ylabel('Uncertainty')
ax4.set_title('Confidence vs Uncertainty')
ax4.grid(True, alpha=0.3)
plt.tight_layout()
# Geçici dosyaya kaydet
with tempfile.NamedTemporaryFile(suffix='.png', delete=False) as tmp:
plt.savefig(tmp.name, dpi=300, bbox_inches='tight')
plt.close()
return tmp.name
class ProfessionalGeoApp:
"""Profesyonel Jeo-Referanslama Uygulaması"""
def __init__(self):
self.system = ProfessionalGeoReferencingSystem()
self.visualizer = GeoVisualizationEngine()
self.predictions_history = []
logger.info("Professional Geo-App initialized")
def process_single_image(self, image) -> Dict:
"""Tekil görüntü işleme"""
result = self.system.predict(image)
if 'error' not in result:
self.predictions_history.append(result)
return result
def process_batch_images(self, files: List) -> Dict:
"""Toplu görüntü işleme"""
results = []
for file in files:
try:
result = self.system.predict(file.name)
result['filename'] = os.path.basename(file.name)
results.append(result)
except Exception as e:
results.append({
'filename': os.path.basename(file.name),
'error': str(e)
})
# Analiz oluştur
successful_results = [r for r in results if 'error' not in r]
if successful_results:
map_path = self.visualizer.create_interactive_map(successful_results)
analysis_path = self.visualizer.create_analysis_plot(successful_results)
else:
map_path = None
analysis_path = None
batch_result = {
'results': results,
'summary': {
'total_images': len(files),
'successful_predictions': len(successful_results),
'failed_predictions': len(results) - len(successful_results),
'average_confidence': np.mean([r.get('overall_confidence', 0) for r in successful_results]) if successful_results else 0
},
'map_path': map_path,
'analysis_path': analysis_path
}
self.predictions_history.extend(successful_results)
return batch_result
def export_results(self, format_type: str = 'geojson') -> str:
"""Sonuçları export etme"""
if not self.predictions_history:
return None
df = pd.DataFrame(self.predictions_history)
with tempfile.NamedTemporaryFile(suffix=f'.{format_type}', delete=False) as tmp:
if format_type == 'geojson':
# GeoJSON export
features = []
for _, row in df.iterrows():
if 'error' not in row:
feature = {
"type": "Feature",
"geometry": {
"type": "Point",
"coordinates": [row['longitude'], row['latitude']]
},
"properties": {
"confidence": row.get('overall_confidence', 0),
"region": row.get('predicted_region', 'Unknown'),
"region_confidence": row.get('region_confidence', 0),
"timestamp": row.get('timestamp', ''),
"uncertainty_lat": row.get('latitude_uncertainty', 0),
"uncertainty_lon": row.get('longitude_uncertainty', 0)
}
}
features.append(feature)
geojson = {
"type": "FeatureCollection",
"features": features
}
with open(tmp.name, 'w') as f:
json.dump(geojson, f, indent=2)
elif format_type == 'csv':
df.to_csv(tmp.name, index=False)
elif format_type == 'excel':
df.to_excel(tmp.name, index=False)
return tmp.name
# Gradio Arayüzü
def create_gradio_interface():
"""Profesyonel Gradio arayüzü oluşturma"""
app = ProfessionalGeoApp()
with gr.Blocks(title="🤖 Advanced AI Geo-Referencing System", theme=gr.themes.Soft()) as demo:
gr.Markdown("""
# 🗺️ Advanced AI Geo-Referencing System
**Professional-grade geolocation prediction from aerial imagery**
This system uses state-of-the-art AI models (DINOv2, EuroSAT, EarthView, S2-NAIP)
to predict geographic coordinates from aerial and satellite images.
""")
with gr.Tab("📍 Single Image Analysis"):
with gr.Row():
with gr.Column():
single_image = gr.Image(
type="filepath",
label="Upload Aerial/Satellite Image",
height=400
)
single_btn = gr.Button("Predict Coordinates", variant="primary")
with gr.Column():
single_output = gr.JSON(
label="Prediction Results",
show_label=True
)
single_map = gr.HTML(label="Interactive Map")
single_btn.click(
fn=app.process_single_image,
inputs=single_image,
outputs=[single_output]
).then(
fn=lambda result: app.visualizer.create_interactive_map([result]) if 'error' not in result else None,
inputs=single_output,
outputs=single_map
)
with gr.Tab("📊 Batch Processing"):
with gr.Row():
with gr.Column():
batch_files = gr.File(
file_count="multiple",
file_types=[".jpg", ".jpeg", ".png", ".tiff"],
label="Upload Multiple Images"
)
batch_btn = gr.Button("Process Batch", variant="primary")
with gr.Column():
batch_summary = gr.JSON(label="Batch Summary")
batch_map = gr.HTML(label="Batch Results Map")
batch_analysis = gr.Image(label="Statistical Analysis", show_label=True)
batch_btn.click(
fn=app.process_batch_images,
inputs=batch_files,
outputs=[batch_summary]
).then(
fn=lambda result: result.get('map_path') if result else None,
inputs=batch_summary,
outputs=batch_map
).then(
fn=lambda result: result.get('analysis_path') if result else None,
inputs=batch_summary,
outputs=batch_analysis
)
with gr.Tab("📈 Results & Export"):
with gr.Row():
with gr.Column():
export_format = gr.Radio(
choices=['geojson', 'csv', 'excel'],
label="Export Format",
value='geojson'
)
export_btn = gr.Button("Export Results", variant="primary")
export_file = gr.File(label="Download Export")
with gr.Column():
history_df = gr.Dataframe(
label="Prediction History",
headers=["Latitude", "Longitude", "Region", "Confidence", "Timestamp"],
datatype=["number", "number", "str", "number", "str"],
row_count=10,
col_count=5
)
refresh_btn = gr.Button("Refresh History")
export_btn.click(
fn=app.export_results,
inputs=export_format,
outputs=export_file
)
refresh_btn.click(
fn=lambda: pd.DataFrame(app.predictions_history)[
['latitude', 'longitude', 'predicted_region', 'overall_confidence', 'timestamp']
].tail(20),
outputs=history_df
)
with gr.Tab("🛠️ Model Training"):
gr.Markdown("### Model Training Interface")
with gr.Row():
with gr.Column():
epochs = gr.Slider(1, 50, value=10, label="Training Epochs")
batch_size = gr.Slider(1, 64, value=16, label="Batch Size")
learning_rate = gr.Number(1e-4, label="Learning Rate")
train_btn = gr.Button("Start Training", variant="primary")
with gr.Column():
training_output = gr.Textbox(
label="Training Logs",
lines=10,
max_lines=15
)
train_btn.click(
fn=lambda e, b, lr: f"Training started with:\nEpochs: {e}\nBatch Size: {b}\nLearning Rate: {lr}\n\nThis would start actual training in production.",
inputs=[epochs, batch_size, learning_rate],
outputs=training_output
)
# Footer
gr.Markdown("""
---
### 🔧 Technical Specifications
- **Backbone Model**: DINOv2 Base
- **Training Datasets**: EarthView, EuroSAT, S2-NAIP
- **Output**: Coordinates (Lat/Lon) with uncertainty estimation
- **Features**: Regional classification, confidence scoring, batch processing
- **Export Formats**: GeoJSON, CSV, Excel
*Built for professional geospatial analysis and research*
""")
return demo
# FastAPI backend (opsiyonel)
from fastapi import FastAPI, UploadFile, File
from fastapi.responses import FileResponse
import uvicorn
app_fastapi = FastAPI(title="AI Geo-Referencing API")
geo_system = ProfessionalGeoReferencingSystem()
@app_fastapi.post("/predict")
async def predict_coordinates(file: UploadFile = File(...)):
"""API endpoint for coordinate prediction"""
try:
# Geçici dosyaya kaydet
with tempfile.NamedTemporaryFile(delete=False) as tmp:
content = await file.read()
tmp.write(content)
tmp_path = tmp.name
# Tahmin yap
result = geo_system.predict(tmp_path)
# Temizlik
os.unlink(tmp_path)
return result
except Exception as e:
return {"error": str(e)}
@app_fastapi.get("/health")
async def health_check():
"""Health check endpoint"""
return {"status": "healthy", "timestamp": datetime.now().isoformat()}
if __name__ == "__main__":
# Gradio arayüzünü başlat
demo = create_gradio_interface()
# Hugging Face Spaces için
demo.launch(
server_name="0.0.0.0",
server_port=7860,
share=True,
debug=True
)
# Alternatif: FastAPI başlatma
# uvicorn.run(app_fastapi, host="0.0.0.0", port=8000)