Update app.py
Browse files
app.py
CHANGED
|
@@ -8,16 +8,19 @@ import numpy as np
|
|
| 8 |
from datasets import Dataset
|
| 9 |
from huggingface_hub import HfApi
|
| 10 |
from datetime import datetime, time
|
|
|
|
| 11 |
|
| 12 |
-
#
|
| 13 |
-
|
|
|
|
| 14 |
|
| 15 |
# Initialize models
|
| 16 |
text_generator = pipeline("text-generation", model="gpt2-medium", device=device)
|
| 17 |
image_generator = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16)
|
| 18 |
-
image_generator = image_generator.to(device)
|
| 19 |
tokenizer = AutoTokenizer.from_pretrained("distilbert-base-uncased-finetuned-sst-2-english")
|
| 20 |
-
sentiment_model = AutoModelForSequenceClassification.from_pretrained("distilbert-base-uncased-finetuned-sst-2-english")
|
|
|
|
| 21 |
|
| 22 |
# Global variables for feedback collection
|
| 23 |
feedback_data = []
|
|
@@ -26,10 +29,8 @@ feedback_data = []
|
|
| 26 |
api = HfApi()
|
| 27 |
|
| 28 |
def set_sleep_time():
|
| 29 |
-
# Set the Space to sleep between 2 AM and 6 AM UTC
|
| 30 |
sleep_start = time(hour=2, minute=0)
|
| 31 |
sleep_end = time(hour=6, minute=0)
|
| 32 |
-
|
| 33 |
try:
|
| 34 |
api.set_space_sleep_time(
|
| 35 |
repo_id="Oranblock/Websitem", # Replace with your actual Space name
|
|
@@ -64,19 +65,24 @@ def generate_content():
|
|
| 64 |
text_content = generate_text()
|
| 65 |
image_prompt = "An abstract representation of a unique website"
|
| 66 |
image = generate_image(image_prompt)
|
| 67 |
-
|
| 68 |
sentiment = analyze_sentiment(text_content)
|
| 69 |
sentiment_label = "Positive" if sentiment[1] > sentiment[0] else "Negative"
|
| 70 |
-
|
| 71 |
return text_content, image, f"Content Sentiment: {sentiment_label}"
|
| 72 |
|
| 73 |
def save_feedback(feedback, rating):
|
| 74 |
feedback_data.append({"text": feedback, "rating": rating})
|
| 75 |
return f"Feedback saved. Total feedback collected: {len(feedback_data)}"
|
| 76 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 77 |
# Create Gradio interface
|
| 78 |
with gr.Blocks(theme=gr.themes.huggingface) as demo:
|
| 79 |
gr.Markdown("# AI-Driven Dynamic Website")
|
|
|
|
| 80 |
with gr.Row():
|
| 81 |
with gr.Column():
|
| 82 |
text_output = gr.Textbox(label="Generated Content")
|
|
|
|
| 8 |
from datasets import Dataset
|
| 9 |
from huggingface_hub import HfApi
|
| 10 |
from datetime import datetime, time
|
| 11 |
+
from accelerate import Accelerator
|
| 12 |
|
| 13 |
+
# Initialize Accelerator
|
| 14 |
+
accelerator = Accelerator()
|
| 15 |
+
device = accelerator.device
|
| 16 |
|
| 17 |
# Initialize models
|
| 18 |
text_generator = pipeline("text-generation", model="gpt2-medium", device=device)
|
| 19 |
image_generator = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16)
|
| 20 |
+
image_generator = accelerator.prepare(image_generator.to(device))
|
| 21 |
tokenizer = AutoTokenizer.from_pretrained("distilbert-base-uncased-finetuned-sst-2-english")
|
| 22 |
+
sentiment_model = AutoModelForSequenceClassification.from_pretrained("distilbert-base-uncased-finetuned-sst-2-english")
|
| 23 |
+
sentiment_model = accelerator.prepare(sentiment_model.to(device))
|
| 24 |
|
| 25 |
# Global variables for feedback collection
|
| 26 |
feedback_data = []
|
|
|
|
| 29 |
api = HfApi()
|
| 30 |
|
| 31 |
def set_sleep_time():
|
|
|
|
| 32 |
sleep_start = time(hour=2, minute=0)
|
| 33 |
sleep_end = time(hour=6, minute=0)
|
|
|
|
| 34 |
try:
|
| 35 |
api.set_space_sleep_time(
|
| 36 |
repo_id="Oranblock/Websitem", # Replace with your actual Space name
|
|
|
|
| 65 |
text_content = generate_text()
|
| 66 |
image_prompt = "An abstract representation of a unique website"
|
| 67 |
image = generate_image(image_prompt)
|
|
|
|
| 68 |
sentiment = analyze_sentiment(text_content)
|
| 69 |
sentiment_label = "Positive" if sentiment[1] > sentiment[0] else "Negative"
|
|
|
|
| 70 |
return text_content, image, f"Content Sentiment: {sentiment_label}"
|
| 71 |
|
| 72 |
def save_feedback(feedback, rating):
|
| 73 |
feedback_data.append({"text": feedback, "rating": rating})
|
| 74 |
return f"Feedback saved. Total feedback collected: {len(feedback_data)}"
|
| 75 |
|
| 76 |
+
def get_gpu_info():
|
| 77 |
+
if torch.cuda.is_available():
|
| 78 |
+
return f"Using GPU: {torch.cuda.get_device_name(0)}"
|
| 79 |
+
else:
|
| 80 |
+
return "GPU not available"
|
| 81 |
+
|
| 82 |
# Create Gradio interface
|
| 83 |
with gr.Blocks(theme=gr.themes.huggingface) as demo:
|
| 84 |
gr.Markdown("# AI-Driven Dynamic Website")
|
| 85 |
+
gr.Markdown(get_gpu_info())
|
| 86 |
with gr.Row():
|
| 87 |
with gr.Column():
|
| 88 |
text_output = gr.Textbox(label="Generated Content")
|