Spaces:
Runtime error
Runtime error
| import torch | |
| from PIL import Image | |
| from RealESRGAN import RealESRGAN | |
| import gradio as gr | |
| import logging | |
| from telegram.ext import Updater, CommandHandler, MessageHandler, Filters, CallbackContext | |
| from telegram import Update | |
| logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', | |
| level=logging.INFO) | |
| # Your API Token | |
| TOKEN = "6231949511:AAH7-oU213cfrGYcfIMeaYOQUDf9kZoXc_0" | |
| # Initialize the Updater | |
| updater = Updater(token=TOKEN, use_context=True) | |
| dispatcher = updater.dispatcher | |
| # Define a command handler | |
| def start(update: Update, context: CallbackContext): | |
| context.bot.send_message(chat_id=update.effective_chat.id, text="Hello! I'm your bot. How can I help you?") | |
| # Register the command handler | |
| start_handler = CommandHandler('start', start) | |
| dispatcher.add_handler(start_handler) | |
| # Define a function to handle user messages | |
| def echo(update: Update, context: CallbackContext): | |
| context.bot.send_message(chat_id=update.effective_chat.id, text=update.message.text) | |
| # Register the message handler | |
| message_handler = MessageHandler(Filters.text & ~Filters.command, echo) | |
| dispatcher.add_handler(message_handler) | |
| # Start the bot | |
| updater.start_polling() | |
| # Run the bot until you send a signal to stop | |
| updater.idle() | |
| device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') | |
| model2 = RealESRGAN(device, scale=2) | |
| model2.load_weights('weights/RealESRGAN_x2.pth', download=True) | |
| model4 = RealESRGAN(device, scale=4) | |
| model4.load_weights('weights/RealESRGAN_x4.pth', download=True) | |
| model8 = RealESRGAN(device, scale=8) | |
| model8.load_weights('weights/RealESRGAN_x8.pth', download=True) | |
| def inference(image, size): | |
| if size == '2x': | |
| result = model2.predict(image.convert('RGB')) | |
| elif size == '4x': | |
| result = model4.predict(image.convert('RGB')) | |
| else: | |
| result = model8.predict(image.convert('RGB')) | |
| if torch.cuda.is_available(): | |
| torch.cuda.empty_cache() | |
| return result | |
| title = "Face Real ESRGAN UpScale: 2x 4x 8x" | |
| description = "This is an unofficial demo for Real-ESRGAN. Scales the resolution of a photo. This model shows better results on faces compared to the original version.<br>Telegram BOT: https://t.me/restoration_photo_bot" | |
| article = "<div style='text-align: center;'>Twitter <a href='https://twitter.com/DoEvent' target='_blank'>Max Skobeev</a> | <a href='https://huggingface.co/sberbank-ai/Real-ESRGAN' target='_blank'>Model card</a>/<div>" | |
| gr.Interface(inference, | |
| [gr.Image(type="pil"), | |
| gr.Radio(['2x', '4x', '8x'], | |
| type="value", | |
| value='2x', | |
| label='Resolution model')], | |
| gr.Image(type="pil", label="Output"), | |
| title=title, | |
| description=description, | |
| article=article, | |
| examples=[['groot.jpeg', "2x"]], | |
| allow_flagging='never', | |
| cache_examples=False, | |
| ).queue(concurrency_count=1).launch(show_error=True) | |