Spaces:
Runtime error
Runtime error
Upload app.py
Browse files
app.py
CHANGED
|
@@ -1,23 +1,3 @@
|
|
| 1 |
-
import subprocess
|
| 2 |
-
import sys
|
| 3 |
-
|
| 4 |
-
# def install(package):
|
| 5 |
-
# subprocess.check_call([sys.executable, "-m", "pip", "install", package])
|
| 6 |
-
|
| 7 |
-
# install("evaluate")
|
| 8 |
-
# install("jiwer")
|
| 9 |
-
# install("huggingface_hub")
|
| 10 |
-
# install("gradio")
|
| 11 |
-
# install("bitsandbytes")
|
| 12 |
-
# install("git+https://github.com/huggingface/transformers.git")
|
| 13 |
-
# install("git+https://github.com/huggingface/peft.git")
|
| 14 |
-
# install("git+https://github.com/huggingface/accelerate.git")
|
| 15 |
-
# install("einops")
|
| 16 |
-
# install("safetensors")
|
| 17 |
-
# install("torch")
|
| 18 |
-
# install("xformers")
|
| 19 |
-
# install("datasets")
|
| 20 |
-
|
| 21 |
from transformers import AutoProcessor, AutoModelForCausalLM, BitsAndBytesConfig
|
| 22 |
import torch
|
| 23 |
from PIL import Image
|
|
@@ -130,6 +110,7 @@ from peft import (
|
|
| 130 |
from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig, AutoConfig
|
| 131 |
from peft import LoraConfig, get_peft_model
|
| 132 |
|
|
|
|
| 133 |
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
|
| 134 |
|
| 135 |
class Social_Media_Captioner:
|
|
@@ -207,7 +188,7 @@ class Social_Media_Captioner:
|
|
| 207 |
self.model_loaded = False
|
| 208 |
|
| 209 |
|
| 210 |
-
def inference(self, input_text: str, use_cached=True, cache_generation=True)
|
| 211 |
if not self.model_loaded:
|
| 212 |
raise Exception("Model not loaded")
|
| 213 |
|
|
@@ -249,7 +230,7 @@ class Social_Media_Captioner:
|
|
| 249 |
raise Exception("Enter a valid input text to generate a valid prompt")
|
| 250 |
|
| 251 |
return f"""
|
| 252 |
-
Convert the given image description to social media worthy
|
| 253 |
Description: {input_text}
|
| 254 |
Caption:
|
| 255 |
""".strip()
|
|
@@ -304,14 +285,12 @@ caption_generator = Captions()
|
|
| 304 |
import gradio as gr
|
| 305 |
|
| 306 |
def setup(image):
|
| 307 |
-
# Assuming `caption_generator.generate_captions` is your function to generate captions.
|
| 308 |
-
# This is just a placeholder for your actual caption generation logic.
|
| 309 |
return caption_generator.generate_captions(image = image)
|
| 310 |
|
| 311 |
iface = gr.Interface(
|
| 312 |
fn=setup,
|
| 313 |
-
inputs=gr.Image(type="pil", label="Upload Image"),
|
| 314 |
-
outputs="
|
| 315 |
)
|
| 316 |
|
| 317 |
-
iface.launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
from transformers import AutoProcessor, AutoModelForCausalLM, BitsAndBytesConfig
|
| 2 |
import torch
|
| 3 |
from PIL import Image
|
|
|
|
| 110 |
from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig, AutoConfig
|
| 111 |
from peft import LoraConfig, get_peft_model
|
| 112 |
|
| 113 |
+
|
| 114 |
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
|
| 115 |
|
| 116 |
class Social_Media_Captioner:
|
|
|
|
| 188 |
self.model_loaded = False
|
| 189 |
|
| 190 |
|
| 191 |
+
def inference(self, input_text: str, use_cached=True, cache_generation=True):
|
| 192 |
if not self.model_loaded:
|
| 193 |
raise Exception("Model not loaded")
|
| 194 |
|
|
|
|
| 230 |
raise Exception("Enter a valid input text to generate a valid prompt")
|
| 231 |
|
| 232 |
return f"""
|
| 233 |
+
Convert the given image description to social media worthy caption
|
| 234 |
Description: {input_text}
|
| 235 |
Caption:
|
| 236 |
""".strip()
|
|
|
|
| 285 |
import gradio as gr
|
| 286 |
|
| 287 |
def setup(image):
|
|
|
|
|
|
|
| 288 |
return caption_generator.generate_captions(image = image)
|
| 289 |
|
| 290 |
iface = gr.Interface(
|
| 291 |
fn=setup,
|
| 292 |
+
inputs=gr.inputs.Image(type="pil", label="Upload Image"),
|
| 293 |
+
outputs=gr.outputs.Textbox(label="Caption")
|
| 294 |
)
|
| 295 |
|
| 296 |
+
iface.launch()
|