Oranblock commited on
Commit
4843a5e
·
verified ·
1 Parent(s): 94b93e1

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +15 -10
app.py CHANGED
@@ -3,24 +3,27 @@ import gradio as gr
3
  from transformers import pipeline, AutoTokenizer, AutoModelForSequenceClassification
4
  from diffusers import StableDiffusionPipeline
5
  import torch
6
- import random
7
  import numpy as np
8
  from datasets import Dataset
9
  from huggingface_hub import HfApi
10
  from datetime import datetime, time
11
  from accelerate import Accelerator
 
 
 
 
12
 
13
  # Initialize Accelerator
14
  accelerator = Accelerator()
15
- device = accelerator.device
16
 
17
  # Initialize models
18
  text_generator = pipeline("text-generation", model="gpt2-medium", device=device)
19
  image_generator = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16)
20
- image_generator = accelerator.prepare(image_generator.to(device))
21
  tokenizer = AutoTokenizer.from_pretrained("distilbert-base-uncased-finetuned-sst-2-english")
22
  sentiment_model = AutoModelForSequenceClassification.from_pretrained("distilbert-base-uncased-finetuned-sst-2-english")
23
- sentiment_model = accelerator.prepare(sentiment_model.to(device))
24
 
25
  # Global variables for feedback collection
26
  feedback_data = []
@@ -43,21 +46,23 @@ def set_sleep_time():
43
  return f"Error setting sleep time: {str(e)}"
44
 
45
  @spaces.GPU
 
46
  def generate_text():
47
  prompt = "This AI-driven website is unique because"
48
  return text_generator(prompt, max_length=100, num_return_sequences=1)[0]['generated_text']
49
 
50
  @spaces.GPU
 
51
  def generate_image(prompt):
52
- with torch.autocast(device.type):
53
- image = image_generator(prompt).images[0]
54
  return image
55
 
56
  @spaces.GPU
 
57
  def analyze_sentiment(text):
58
- inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True).to(device)
59
- with torch.no_grad():
60
- outputs = sentiment_model(**inputs)
61
  probabilities = torch.nn.functional.softmax(outputs.logits, dim=-1)
62
  return probabilities.cpu().numpy()[0]
63
 
@@ -77,7 +82,7 @@ def get_gpu_info():
77
  if torch.cuda.is_available():
78
  return f"Using GPU: {torch.cuda.get_device_name(0)}"
79
  else:
80
- return "GPU not available"
81
 
82
  # Create Gradio interface
83
  with gr.Blocks() as demo:
 
3
  from transformers import pipeline, AutoTokenizer, AutoModelForSequenceClassification
4
  from diffusers import StableDiffusionPipeline
5
  import torch
 
6
  import numpy as np
7
  from datasets import Dataset
8
  from huggingface_hub import HfApi
9
  from datetime import datetime, time
10
  from accelerate import Accelerator
11
+ from accelerate.utils import set_seed
12
+
13
+ # Set a seed for reproducibility
14
+ set_seed(42)
15
 
16
  # Initialize Accelerator
17
  accelerator = Accelerator()
18
+ device = 0 if torch.cuda.is_available() else -1 # Use GPU 0 if available, else CPU
19
 
20
  # Initialize models
21
  text_generator = pipeline("text-generation", model="gpt2-medium", device=device)
22
  image_generator = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16)
23
+ image_generator = image_generator.to(f"cuda:{device}" if device >= 0 else "cpu")
24
  tokenizer = AutoTokenizer.from_pretrained("distilbert-base-uncased-finetuned-sst-2-english")
25
  sentiment_model = AutoModelForSequenceClassification.from_pretrained("distilbert-base-uncased-finetuned-sst-2-english")
26
+ sentiment_model = sentiment_model.to(f"cuda:{device}" if device >= 0 else "cpu")
27
 
28
  # Global variables for feedback collection
29
  feedback_data = []
 
46
  return f"Error setting sleep time: {str(e)}"
47
 
48
  @spaces.GPU
49
+ @torch.no_grad()
50
  def generate_text():
51
  prompt = "This AI-driven website is unique because"
52
  return text_generator(prompt, max_length=100, num_return_sequences=1)[0]['generated_text']
53
 
54
  @spaces.GPU
55
+ @torch.no_grad()
56
  def generate_image(prompt):
57
+ with torch.autocast("cuda" if device >= 0 else "cpu"):
58
+ image = image_generator(prompt, guidance_scale=7.5).images[0]
59
  return image
60
 
61
  @spaces.GPU
62
+ @torch.no_grad()
63
  def analyze_sentiment(text):
64
+ inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True).to(f"cuda:{device}" if device >= 0 else "cpu")
65
+ outputs = sentiment_model(**inputs)
 
66
  probabilities = torch.nn.functional.softmax(outputs.logits, dim=-1)
67
  return probabilities.cpu().numpy()[0]
68
 
 
82
  if torch.cuda.is_available():
83
  return f"Using GPU: {torch.cuda.get_device_name(0)}"
84
  else:
85
+ return "GPU not available, using CPU"
86
 
87
  # Create Gradio interface
88
  with gr.Blocks() as demo: