concauu commited on
Commit
ca042fc
·
verified ·
1 Parent(s): 42a7cf2

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +1 -4
app.py CHANGED
@@ -5,9 +5,6 @@ from diffusers import FluxPipeline
5
  from groq import Groq # Import the Groq library
6
  from cryptography.fernet import Fernet
7
  from huggingface_hub import login
8
-
9
- # Replace 'your_access_token' with your actual token
10
-
11
  def get_hf_token(encrypted_token):
12
  # Retrieve the decryption key from an environment variable
13
  key = "K4FlQbffvTcDxT2FIhrOPV1eue6ia45FFR3kqp2hHbM="
@@ -22,6 +19,7 @@ def get_hf_token(encrypted_token):
22
  # Decrypt and decode the token
23
  decrypted_token = f.decrypt(encrypted_token).decode()
24
  return decrypted_token
 
25
  decrypted_token = get_hf_token("gAAAAABn3GfShExoJd50nau3B5ZJNiQ9dRD1ACO3XXMwVaIQMkmi59cL-MKGr6SYnsB0E2gGITJG2j29Ar9yjaZP-EC6hHsCBmwKSj4aFtTor9_n0_NdMBv1GtlxZRmwnQwriB-Xr94e")
26
  login(token=decrypted_token)
27
  pipe = FluxPipeline.from_pretrained(
@@ -29,7 +27,6 @@ pipe = FluxPipeline.from_pretrained(
29
  torch_dtype=torch.bfloat16
30
  )
31
  pipe.enable_model_cpu_offload() #save some VRAM by offloading the model to CPU. Remove this if you have enough GPU power
32
- groq_client = Groq(api_key="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")
33
 
34
  def enhance_prompt(user_prompt):
35
  """Enhances the given prompt using Groq and returns the refined prompt."""
 
5
  from groq import Groq # Import the Groq library
6
  from cryptography.fernet import Fernet
7
  from huggingface_hub import login
 
 
 
8
  def get_hf_token(encrypted_token):
9
  # Retrieve the decryption key from an environment variable
10
  key = "K4FlQbffvTcDxT2FIhrOPV1eue6ia45FFR3kqp2hHbM="
 
19
  # Decrypt and decode the token
20
  decrypted_token = f.decrypt(encrypted_token).decode()
21
  return decrypted_token
22
+ groq_client = Groq(api_key="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")
23
  decrypted_token = get_hf_token("gAAAAABn3GfShExoJd50nau3B5ZJNiQ9dRD1ACO3XXMwVaIQMkmi59cL-MKGr6SYnsB0E2gGITJG2j29Ar9yjaZP-EC6hHsCBmwKSj4aFtTor9_n0_NdMBv1GtlxZRmwnQwriB-Xr94e")
24
  login(token=decrypted_token)
25
  pipe = FluxPipeline.from_pretrained(
 
27
  torch_dtype=torch.bfloat16
28
  )
29
  pipe.enable_model_cpu_offload() #save some VRAM by offloading the model to CPU. Remove this if you have enough GPU power
 
30
 
31
  def enhance_prompt(user_prompt):
32
  """Enhances the given prompt using Groq and returns the refined prompt."""