benjamin-paine's picture
Update app.py
2240a2e verified
raw
history blame
1.21 kB
import gradio as gr
import spaces
import torch
import torch.amp as amp
from transformers import pipeline
repo_id = "appmana/Cosmos-1.0-Prompt-Upsampler-12B-Text2World-hf"
upsampler = pipeline("text-generation", repo_id, torch_dtype=torch.bfloat16)
@spaces.GPU
def upsample(prompt):
with torch.no_grad(), amp.autocast("cuda", dtype=torch.bfloat16):
return upsampler(f"Upsample the short caption to a long caption: {prompt}")[0]["generated_text"]
demo = gr.Interface(
title="NVIDIA Cosmos 🌌 Prompt Upsampler",
description="""Upsample prompts using NVIDIA's 12B Cosmos model, based on Mistral NeMo 12B. This space uses the HuggingFace Transformers version at bfloat16 precision.
[[cosmos]](https://huggingface.co/nvidia/Cosmos-1.0-Prompt-Upsampler-12B-Text2World) [[transformers]](https://huggingface.co/appmana/Cosmos-1.0-Prompt-Upsampler-12B-Text2World-hf) [[gguf]](https://huggingface.co/mradermacher/Cosmos-1.0-Prompt-Upsampler-12B-Text2World-hf-GGUF)""",
fn=upsample,
inputs=gr.Text(
label="Prompt",
value="A dog playing with a ball."
),
outputs=gr.Text(
label="Upsampled Prompt",
interactive=False
)
)
demo.launch()