kiran6969's picture
CORS origin added
999e0c3
from flask import Flask, jsonify, request, send_file
from flask_cors import CORS
from PIL import Image
import io
import torch
import torch.nn as nn
import torchvision.transforms as transforms
import os
import time
app = Flask(__name__)
CORS(app, origins= ["https://autoencoderkiranfrontend.vercel.app/"])
@app.route("/healthz")
def health():
return "OK", 200
@app.route("/api/users", methods=['GET'])
def users():
return jsonify({
"users": [
'kiran',
'kumar',
'kanathala',
]
})
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print("Using device:", device)
# ----------------------------
# 2️⃣ Transform (same as training)
# ----------------------------
transform = transforms.Compose([
transforms.Resize((128, 128)),
transforms.ToTensor()
])
class Autoencoder(nn.Module):
def __init__(self):
super(Autoencoder, self).__init__()
self.encoder = nn.Sequential(
nn.Conv2d(3,32,3, stride=2, padding=1),
nn.ReLU(),
nn.Conv2d(32,64,3, stride=2, padding=1),
nn.ReLU(),
nn.Conv2d(64, 128, 3, stride=2, padding=1),
nn.ReLU(),
nn.Conv2d(128, 256, 3, stride=2, padding=1)
)
self.decoder = nn.Sequential(
nn.ConvTranspose2d(256, 128, 3, stride=2, padding=1, output_padding=1),
nn.ReLU(),
nn.ConvTranspose2d(128, 64, 3, stride=2, padding=1, output_padding=1),
nn.ReLU(),
nn.ConvTranspose2d(64, 32, 3, stride=2, padding=1, output_padding=1),
nn.ReLU(),
nn.ConvTranspose2d(32, 3, 3, stride=2, padding=1, output_padding=1),
nn.Sigmoid()
)
def forward(self, x):
x = self.encoder(x)
x = self.decoder(x)
return x
# ----------------------------
# 4️⃣ Load trained model
# ----------------------------
model = Autoencoder().to(device)
model.load_state_dict(torch.load("celeba_autoencoder.pth", map_location=device))
model.eval()
print("Model loaded successfully!")
@app.route("/reconstruct", methods=["POST"])
def reconstruct():
if "image" not in request.files:
return "No image uploaded", 400
file = request.files["image"]
img = Image.open(file.stream).convert("RGB")
orig_size = img.size
# Transform and send through autoencoder
img_tensor = transform(img).unsqueeze(0).to(device)
with torch.no_grad():
reconstructed = model(img_tensor)
# Convert back to PIL
recon_img = reconstructed.squeeze(0).cpu() # remove batch dimension
recon_img = transforms.ToPILImage()(recon_img)
recon_img = recon_img.resize(orig_size)
# Send image as BytesIO without saving to disk
buf = io.BytesIO()
recon_img.save(buf, format="PNG")
buf.seek(0)
return send_file(buf, mimetype="image/png", as_attachment=False, download_name="reconstructed.png")
if __name__ == "__main__":
app.run()