Spaces:
Runtime error
Runtime error
gradio setup
Browse files- Dockerfile +40 -17
- app.py +100 -0
- requirements.txt +1 -0
Dockerfile
CHANGED
|
@@ -1,29 +1,52 @@
|
|
| 1 |
-
# Dockerfile
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 2 |
FROM python:3.10-slim
|
| 3 |
|
| 4 |
WORKDIR /app
|
| 5 |
|
| 6 |
# Install system dependencies
|
| 7 |
-
RUN apt-get update && apt-get install -y git-lfs && apt-get clean
|
| 8 |
|
| 9 |
-
#
|
| 10 |
COPY requirements.txt .
|
| 11 |
-
RUN pip install --
|
|
|
|
| 12 |
python -m spacy download en_core_web_sm
|
| 13 |
|
| 14 |
-
# Copy application
|
| 15 |
-
COPY
|
| 16 |
-
COPY config.json .
|
| 17 |
-
COPY model.safetensors .
|
| 18 |
-
COPY tokenizer_config.json .
|
| 19 |
-
COPY vocab.json .
|
| 20 |
-
COPY merges.txt .
|
| 21 |
-
COPY generation_config.json .
|
| 22 |
-
COPY special_tokens_map.json .
|
| 23 |
-
|
| 24 |
-
# Fix permissions
|
| 25 |
-
RUN chmod -R 755 /app
|
| 26 |
|
|
|
|
| 27 |
EXPOSE 7860
|
| 28 |
|
| 29 |
-
|
|
|
|
|
|
| 1 |
+
# # Dockerfile
|
| 2 |
+
# FROM python:3.10-slim
|
| 3 |
+
|
| 4 |
+
# WORKDIR /app
|
| 5 |
+
|
| 6 |
+
# # Install system dependencies
|
| 7 |
+
# RUN apt-get update && apt-get install -y git-lfs && apt-get clean
|
| 8 |
+
|
| 9 |
+
# # Install Python dependencies
|
| 10 |
+
# COPY requirements.txt .
|
| 11 |
+
# RUN pip install --no-cache-dir -r requirements.txt && \
|
| 12 |
+
# python -m spacy download en_core_web_sm
|
| 13 |
+
|
| 14 |
+
# # Copy application code and model
|
| 15 |
+
# COPY main.py .
|
| 16 |
+
# COPY config.json .
|
| 17 |
+
# COPY model.safetensors .
|
| 18 |
+
# COPY tokenizer_config.json .
|
| 19 |
+
# COPY vocab.json .
|
| 20 |
+
# COPY merges.txt .
|
| 21 |
+
# COPY generation_config.json .
|
| 22 |
+
# COPY special_tokens_map.json .
|
| 23 |
+
|
| 24 |
+
# # Fix permissions
|
| 25 |
+
# RUN chmod -R 755 /app
|
| 26 |
+
|
| 27 |
+
# EXPOSE 7860
|
| 28 |
+
|
| 29 |
+
# CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "7860"]
|
| 30 |
+
|
| 31 |
+
|
| 32 |
FROM python:3.10-slim
|
| 33 |
|
| 34 |
WORKDIR /app
|
| 35 |
|
| 36 |
# Install system dependencies
|
| 37 |
+
RUN apt-get update && apt-get install -y git-lfs && apt-get clean && rm -rf /var/lib/apt/lists/*
|
| 38 |
|
| 39 |
+
# Copy requirements and install Python dependencies
|
| 40 |
COPY requirements.txt .
|
| 41 |
+
RUN pip install --upgrade pip && \
|
| 42 |
+
pip install --no-cache-dir -r requirements.txt && \
|
| 43 |
python -m spacy download en_core_web_sm
|
| 44 |
|
| 45 |
+
# Copy all application files
|
| 46 |
+
COPY . .
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 47 |
|
| 48 |
+
# Expose port for Gradio
|
| 49 |
EXPOSE 7860
|
| 50 |
|
| 51 |
+
# Command to run the Gradio app
|
| 52 |
+
CMD ["python", "app.py"]
|
app.py
ADDED
|
@@ -0,0 +1,100 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import gradio as gr
|
| 2 |
+
from transformers import GPT2LMHeadModel, GPT2Tokenizer
|
| 3 |
+
import torch
|
| 4 |
+
import spacy
|
| 5 |
+
|
| 6 |
+
# Load models
|
| 7 |
+
model = GPT2LMHeadModel.from_pretrained("./")
|
| 8 |
+
tokenizer = GPT2Tokenizer.from_pretrained("./")
|
| 9 |
+
tokenizer.pad_token = tokenizer.eos_token
|
| 10 |
+
|
| 11 |
+
try:
|
| 12 |
+
nlp = spacy.load("en_core_web_sm")
|
| 13 |
+
except OSError:
|
| 14 |
+
import spacy.cli
|
| 15 |
+
spacy.cli.download("en_core_web_sm")
|
| 16 |
+
nlp = spacy.load("en_core_web_sm")
|
| 17 |
+
|
| 18 |
+
def summarize_description(text):
|
| 19 |
+
doc = nlp(text)
|
| 20 |
+
keywords = [token.text for token in doc if token.pos_ in ["NOUN", "PROPN", "ADJ"]]
|
| 21 |
+
return " ".join(keywords[:12])
|
| 22 |
+
|
| 23 |
+
def generate_slogans(brand, description, industry, tone="playful", num=5, liked_slogan=None):
|
| 24 |
+
processed_desc = summarize_description(description)
|
| 25 |
+
if liked_slogan:
|
| 26 |
+
prompt1 = (
|
| 27 |
+
f"Create {industry} brand slogans similar to: '{liked_slogan}'\n"
|
| 28 |
+
f"Brand: {brand}\n"
|
| 29 |
+
f"Key Attributes: {processed_desc}\n"
|
| 30 |
+
"Slogan:"
|
| 31 |
+
)
|
| 32 |
+
prompt2 = (
|
| 33 |
+
f"Generate slogans in the style of: '{liked_slogan}'\n"
|
| 34 |
+
f"For: {brand}\n"
|
| 35 |
+
f"Details: {processed_desc}\n"
|
| 36 |
+
"Slogan:"
|
| 37 |
+
)
|
| 38 |
+
else:
|
| 39 |
+
prompt1 = (
|
| 40 |
+
f"Create a {industry} brand slogan that's {tone} and unique.\n"
|
| 41 |
+
f"Brand: {brand}\n"
|
| 42 |
+
f"Attributes: {processed_desc}\n"
|
| 43 |
+
"Slogan:"
|
| 44 |
+
)
|
| 45 |
+
prompt2 = (
|
| 46 |
+
f"Write {tone} marketing slogans for this {industry} brand:\n"
|
| 47 |
+
f"Name: {brand}\n"
|
| 48 |
+
f"About: {processed_desc}\n"
|
| 49 |
+
"Slogan:"
|
| 50 |
+
)
|
| 51 |
+
|
| 52 |
+
tone_presets = {
|
| 53 |
+
"playful": {"temperature": 0.95, "top_p": 0.95, "repetition_penalty": 1.2},
|
| 54 |
+
"bold": {"temperature": 0.8, "top_p": 0.9, "repetition_penalty": 1.45},
|
| 55 |
+
"minimalist": {"temperature": 0.6, "top_p": 0.8, "repetition_penalty": 1.5},
|
| 56 |
+
"luxury": {"temperature": 0.7, "top_p": 0.85, "repetition_penalty": 1.35},
|
| 57 |
+
"classic": {"temperature": 0.7, "top_p": 0.9, "repetition_penalty": 1.25}
|
| 58 |
+
}
|
| 59 |
+
|
| 60 |
+
gen_params = {
|
| 61 |
+
**tone_presets[tone],
|
| 62 |
+
"max_new_tokens": 25,
|
| 63 |
+
"num_return_sequences": num,
|
| 64 |
+
"do_sample": True,
|
| 65 |
+
"pad_token_id": tokenizer.eos_token_id
|
| 66 |
+
}
|
| 67 |
+
|
| 68 |
+
outputs1 = model.generate(**tokenizer(prompt1, return_tensors="pt"), **gen_params)
|
| 69 |
+
outputs2 = model.generate(**tokenizer(prompt2, return_tensors="pt"), **gen_params)
|
| 70 |
+
|
| 71 |
+
slogans = []
|
| 72 |
+
for outputs in [outputs1, outputs2]:
|
| 73 |
+
for output in outputs:
|
| 74 |
+
raw = tokenizer.decode(output, skip_special_tokens=True)
|
| 75 |
+
clean = raw.split("Slogan:")[-1].strip()
|
| 76 |
+
clean = clean.split("\n")[0].replace('"', '').replace('(', '').split(".")[0].strip()
|
| 77 |
+
if len(clean) > 4 and clean not in slogans:
|
| 78 |
+
slogans.append(clean)
|
| 79 |
+
|
| 80 |
+
return {"slogans": slogans[:num * 2]}
|
| 81 |
+
|
| 82 |
+
# Minimal Gradio interface (hidden)
|
| 83 |
+
with gr.Blocks() as demo:
|
| 84 |
+
gr.Markdown("API only. No interface required.")
|
| 85 |
+
# Add an API route
|
| 86 |
+
gr.api(
|
| 87 |
+
generate_slogans,
|
| 88 |
+
api_name="predict",
|
| 89 |
+
inputs=[
|
| 90 |
+
gr.Textbox(label="Brand"),
|
| 91 |
+
gr.Textbox(label="Description"),
|
| 92 |
+
gr.Textbox(label="Industry"),
|
| 93 |
+
gr.Dropdown(["playful", "bold", "minimalist", "luxury", "classic"], label="Tone", value="playful"),
|
| 94 |
+
gr.Slider(1, 10, value=5, label="Number of Slogans"),
|
| 95 |
+
gr.Textbox(label="Generate like this slogan (optional)", value=None)
|
| 96 |
+
],
|
| 97 |
+
outputs=gr.JSON(label="Slogans")
|
| 98 |
+
)
|
| 99 |
+
|
| 100 |
+
demo.launch(show_api=True)
|
requirements.txt
CHANGED
|
@@ -1,4 +1,5 @@
|
|
| 1 |
# requirements.txt
|
|
|
|
| 2 |
fastapi>=0.68.0
|
| 3 |
uvicorn>=0.15.0
|
| 4 |
transformers>=4.0.0
|
|
|
|
| 1 |
# requirements.txt
|
| 2 |
+
gradio>=4.0.0
|
| 3 |
fastapi>=0.68.0
|
| 4 |
uvicorn>=0.15.0
|
| 5 |
transformers>=4.0.0
|