jonathanjordan21's picture
Update app.py
e9346a5 verified
raw
history blame
3.86 kB
import gradio as gr
import torch
from torch import nn
import numpy as np
import pandas as pd
from utils2 import compute_features
from scipy.stats import nbinom
from xgboost import XGBRegressor
import json
# class NegBinomialModel(nn.Module):
# def __init__(self, in_features):
# super().__init__()
# self.linear = nn.Linear(in_features, 1)
# self.alpha = nn.Parameter(torch.tensor(0.5))
# def forward(self, x):
# # safer activation than exp()
# mu = torch.exp(torch.clamp(self.linear(x), min=-5, max=5))
# alpha = torch.clamp(self.alpha, min=1e-3, max=10)
# return mu.squeeze(), alpha
# model = NegBinomialModel(12)
# model.load_state_dict(torch.load("model_weights(1).pt", map_location='cpu'))
# model.eval()
# MU_BANKS = 2.6035915713614286
# STD_BANKS = 3.0158890435512125
# with open("xgb_model(1).json", "r") as f:
# params = json.load(f)
xgb_model = XGBRegressor()
xgb_model.load_model("xgb_model(3).json")
def predict_score(lat, lon, api_key):
# Convert input to tensor
# inputs = torch.tensor([[lat, lon]], dtype=torch.float32)
inputs = compute_features((lat,lon), api_key, 500)
print("[INPUTS]", inputs)
num_banks = inputs.pop("num_banks_in_radius", 0)
input_dict = inputs.copy()
inputs = torch.tensor(list(inputs.values()), dtype=torch.float32)
# with torch.no_grad():
# mu_pred, alpha = model(inputs)
# mu_pred = mu_pred.numpy().flatten()
mu_pred2 = xgb_model.predict(inputs.unsqueeze(0).numpy())
# r = 1/alpha
# p = r / (r + mu_pred)
# # Compute pmf and mode
# k_mode = int((r - 1) * (1 - p) / p) # mode of NB
# p_k = nbinom.pmf(num_banks, r, p)
# p_mode = nbinom.pmf(k_mode, r, p)
# # Score normalized 0–100
# score = (p_k / p_mode) * 100
# score = np.clip(score, 0, 100)
# diff = (num_banks - mu_pred) / (mu_pred + 1e-6)
# # score = (1 - np.tanh(diff))
# print("[TANH]", np.tanh(diff))
# diff = mu_pred2 - num_banks
# score = 100 / (1 + np.exp(-alpha * diff))
# score = np.abs(1 + np.tanh(diff)) / 2 * 100
# score = (1 * np.abs(mu_pred2 + 0.1)) * 100
# score = np.sigmoid(mu_pred2 - num_banks + 0.1) * 100
score = 100 / (1 + np.exp(num_banks - mu_pred2))
# You can apply any post-processing here
return (
round(float(score), 3),
num_banks,
# round(float(mu_pred), 3),
round(float(mu_pred2), 3),
# round(float(log_score),3)
# "Normal Score": round(float(normal_score), 3),
# input_dict["total_amenities"],
*[v for k,v in input_dict.items() if k[:3] == "num"]
)
# ======== Gradio Interface ========
interface = gr.Interface(
fn=predict_score,
inputs=[
gr.Number(label="Latitude"),
gr.Number(label="Longitude"),
gr.Text(label="GOOGLE API KEY")
],
outputs=[
gr.Number(label="Score (0 - 100)"),
gr.Number(label="Current ATMs"),
# gr.Number(label="Ideal ATMs (Negative Binomial)"),
gr.Number(label="Ideal ATMs (XGBoost)"),
# gr.Number(label="Log Score Probability"),
# gr.Number(label="Total Amenities"),
gr.Number(label="Dining and Drinking"),
gr.Number(label="Community and Government"),
gr.Number(label="Retail"),
gr.Number(label="Business and Professional Services"),
gr.Number(label="Landmarks and Outdoors"),
gr.Number(label="Arts and Entertainment"),
gr.Number(label="Health and Medicine"),
gr.Number(label="Travel and Transportation"),
gr.Number(label="Sports and Recreation"),
gr.Number(label="Event"),
],
title="Bank Location Scoring Model",
description="Enter latitude and longitude to get the predicted score, number of banks, and normalized score.",
)