concauu commited on
Commit
d21aa88
·
verified ·
1 Parent(s): ab7d59f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +22 -2
app.py CHANGED
@@ -3,8 +3,28 @@ import gradio as gr
3
  import torch
4
  from diffusers import FluxPipeline
5
  from groq import Groq # Import the Groq library
 
6
 
7
- pipe = FluxPipeline.from_pretrained("black-forest-labs/FLUX.1-dev", torch_dtype=torch.bfloat16)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8
  pipe.enable_model_cpu_offload() #save some VRAM by offloading the model to CPU. Remove this if you have enough GPU power
9
 
10
  def enhance_prompt(user_prompt):
@@ -51,7 +71,7 @@ def generate_image(prompt):
51
  guidance_scale=3.5,
52
  num_inference_steps=50,
53
  max_sequence_length=512,
54
- generator=torch.Generator("gpu").manual_seed(0)
55
  ).images[0]
56
  except Exception as e:
57
  # Optionally, handle errors (you can also return a default error image)
 
3
  import torch
4
  from diffusers import FluxPipeline
5
  from groq import Groq # Import the Groq library
6
+ from cryptography.fernet import Fernet
7
 
8
+ def get_hf_token(encrypted_token):
9
+ # Retrieve the decryption key from an environment variable
10
+ key = "K4FlQbffvTcDxT2FIhrOPV1eue6ia45FFR3kqp2hHbM="
11
+ if not key:
12
+ raise ValueError("Missing decryption key! Set the DECRYPTION_KEY environment variable.")
13
+
14
+ # Convert key from string to bytes if necessary
15
+ if isinstance(key, str):
16
+ key = key.encode()
17
+
18
+ f = Fernet(key)
19
+ # Decrypt and decode the token
20
+ decrypted_token = f.decrypt(encrypted_token).decode()
21
+ return decrypted_token
22
+ decrypted_token = get_hf_token("gAAAAABn3GfShExoJd50nau3B5ZJNiQ9dRD1ACO3XXMwVaIQMkmi59cL-MKGr6SYnsB0E2gGITJG2j29Ar9yjaZP-EC6hHsCBmwKSj4aFtTor9_n0_NdMBv1GtlxZRmwnQwriB-Xr94e")
23
+ pipe = FluxPipeline.from_pretrained(
24
+ "black-forest-labs/FLUX.1-dev",
25
+ torch_dtype=torch.bfloat16,
26
+ use_auth_token=decrypted_token # Replace with your actual token
27
+ )
28
  pipe.enable_model_cpu_offload() #save some VRAM by offloading the model to CPU. Remove this if you have enough GPU power
29
 
30
  def enhance_prompt(user_prompt):
 
71
  guidance_scale=3.5,
72
  num_inference_steps=50,
73
  max_sequence_length=512,
74
+ generator=torch.Generator("cpu").manual_seed(0)
75
  ).images[0]
76
  except Exception as e:
77
  # Optionally, handle errors (you can also return a default error image)