alpercagann commited on
Commit
0c16b74
·
1 Parent(s): eecd177

Update controller to test Diffusers

Browse files
Files changed (1) hide show
  1. controller.py +30 -7
controller.py CHANGED
@@ -7,6 +7,7 @@ class SimpleSonicDiffusionController:
7
  def __init__(self):
8
  self.model_loaded = False
9
  self.tokenizer_loaded = False
 
10
  self.device = self._get_device()
11
 
12
  def _get_device(self):
@@ -25,38 +26,60 @@ class SimpleSonicDiffusionController:
25
 
26
  def load_model(self):
27
  """Load a simple model to verify libraries are working"""
 
 
28
  try:
 
29
  import torch
30
  self.test_tensor = torch.rand(3, 3)
 
31
 
32
  # Try loading a simple tokenizer from transformers
33
  try:
34
  from transformers import AutoTokenizer
35
  self.tokenizer = AutoTokenizer.from_pretrained("gpt2")
36
  self.tokenizer_loaded = True
37
- message = "Model and tokenizer loaded successfully!"
 
 
 
 
 
 
 
 
 
 
38
  except Exception as e:
39
- message = f"PyTorch loaded, but Transformers failed: {str(e)}"
40
 
41
  self.model_loaded = True
42
- return message
43
 
44
  except Exception as e:
45
  return f"Error loading model: {str(e)}"
46
 
47
  def generate(self, text_prompt, audio_path=None):
48
- """Generate using transformers if available"""
49
  if not self.model_loaded:
50
  return "Error: Model not loaded. Please click 'Load Model' first."
51
 
 
 
52
  try:
 
53
  if self.tokenizer_loaded:
54
- # Use the tokenizer to process the text
55
  tokens = self.tokenizer(text_prompt, return_tensors="pt")
56
  token_count = len(tokens['input_ids'][0])
57
- return f"Processed prompt: '{text_prompt}'\nTokenized into {token_count} tokens."
 
 
 
 
58
  else:
59
- return f"Tokenizer not available. Simple echo: '{text_prompt}'"
 
 
60
 
61
  except Exception as e:
62
  return f"Error during generation: {str(e)}"
 
7
  def __init__(self):
8
  self.model_loaded = False
9
  self.tokenizer_loaded = False
10
+ self.pipe_loaded = False
11
  self.device = self._get_device()
12
 
13
  def _get_device(self):
 
26
 
27
  def load_model(self):
28
  """Load a simple model to verify libraries are working"""
29
+ status_messages = []
30
+
31
  try:
32
+ # Test PyTorch
33
  import torch
34
  self.test_tensor = torch.rand(3, 3)
35
+ status_messages.append("✓ PyTorch loaded successfully")
36
 
37
  # Try loading a simple tokenizer from transformers
38
  try:
39
  from transformers import AutoTokenizer
40
  self.tokenizer = AutoTokenizer.from_pretrained("gpt2")
41
  self.tokenizer_loaded = True
42
+ status_messages.append(" Transformers tokenizer loaded")
43
+ except Exception as e:
44
+ status_messages.append(f"✗ Transformers error: {str(e)}")
45
+
46
+ # Try loading a simple pipeline from diffusers
47
+ try:
48
+ from diffusers import DiffusionPipeline
49
+ # Just check if the class exists, don't actually load a model
50
+ self.pipe_class = DiffusionPipeline
51
+ self.pipe_loaded = True
52
+ status_messages.append("✓ Diffusers available")
53
  except Exception as e:
54
+ status_messages.append(f" Diffusers error: {str(e)}")
55
 
56
  self.model_loaded = True
57
+ return "\n".join(status_messages)
58
 
59
  except Exception as e:
60
  return f"Error loading model: {str(e)}"
61
 
62
  def generate(self, text_prompt, audio_path=None):
63
+ """Generate text using available libraries"""
64
  if not self.model_loaded:
65
  return "Error: Model not loaded. Please click 'Load Model' first."
66
 
67
+ results = []
68
+
69
  try:
70
+ # Use tokenizer if available
71
  if self.tokenizer_loaded:
 
72
  tokens = self.tokenizer(text_prompt, return_tensors="pt")
73
  token_count = len(tokens['input_ids'][0])
74
+ results.append(f"Transformers: Tokenized into {token_count} tokens")
75
+
76
+ # Check diffusers status
77
+ if self.pipe_loaded:
78
+ results.append("Diffusers is available for pipeline creation")
79
  else:
80
+ results.append("Diffusers is not available")
81
+
82
+ return "\n".join(results)
83
 
84
  except Exception as e:
85
  return f"Error during generation: {str(e)}"