Spanicin commited on
Commit
261aa57
·
verified ·
1 Parent(s): ebb14dd

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +40 -46
app.py CHANGED
@@ -31,54 +31,48 @@ app.config['generation_thread'] = None
31
  # return jsonify({"error": f"Failed to download model: {str(e)}"}), 500
32
 
33
  def generate_image_gif(prompt):
34
- global pipe
35
- if pipe is None:
 
 
 
 
36
  try:
37
- print('Downloading the model weights')
38
- device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
39
- xm = load_model('transmitter', device=device)
40
- model = load_model('text300M', device=device)
41
- diffusion = diffusion_from_config(load_config('diffusion'))
42
- pipe = 'Model loaded'
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
43
  except Exception as e:
44
- print(f"Error downloading the model: {e}")
45
-
46
- try:
47
- batch_size = 1
48
- guidance_scale = 30.0
49
-
50
- latents = sample_latents(
51
- batch_size=batch_size,
52
- model=model,
53
- diffusion=diffusion,
54
- guidance_scale=guidance_scale,
55
- model_kwargs=dict(texts=[prompt] * batch_size),
56
- progress=True,
57
- clip_denoised=True,
58
- use_fp16=True,
59
- use_karras=True,
60
- karras_steps=64,
61
- sigma_min=1E-3,
62
- sigma_max=160,
63
- s_churn=0,
64
- )
65
- render_mode = 'nerf'
66
- size = 256
67
- # render_mode = 'nerf' # you can change this to 'stf'
68
- # size = # this is the size of the renders, higher values take longer to render.
69
-
70
- cameras = create_pan_cameras(size, device)
71
- images = decode_latent_images(xm, latents, cameras, rendering_mode=render_mode)
72
- writer = io.BytesIO()
73
- images[0].save(writer, format="GIF", save_all=True, append_images=images[1:], duration=100, loop=0)
74
- writer.seek(0)
75
- data = base64.b64encode(writer.read()).decode("ascii")
76
- response_data = {'video_base64': data,'status':None}
77
- print('response_data',response_data)
78
- return response_data
79
- except Exception as e:
80
- print(f"Error generating 3D: {e}")
81
- return jsonify({"error": f"Failed to generate 3D animation: {str(e)}"}), 500
82
 
83
  def background(prompt):
84
  with app.app_context():
 
31
  # return jsonify({"error": f"Failed to download model: {str(e)}"}), 500
32
 
33
  def generate_image_gif(prompt):
34
+ print('Downloading the model weights')
35
+ device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
36
+ xm = load_model('transmitter', device=device)
37
+ model = load_model('text300M', device=device)
38
+ diffusion = diffusion_from_config(load_config('diffusion'))
39
+
40
  try:
41
+ batch_size = 1
42
+ guidance_scale = 30.0
43
+
44
+ latents = sample_latents(
45
+ batch_size=batch_size,
46
+ model=model,
47
+ diffusion=diffusion,
48
+ guidance_scale=guidance_scale,
49
+ model_kwargs=dict(texts=[prompt] * batch_size),
50
+ progress=True,
51
+ clip_denoised=True,
52
+ use_fp16=True,
53
+ use_karras=True,
54
+ karras_steps=64,
55
+ sigma_min=1E-3,
56
+ sigma_max=160,
57
+ s_churn=0,
58
+ )
59
+ render_mode = 'nerf'
60
+ size = 256
61
+ # render_mode = 'nerf' # you can change this to 'stf'
62
+ # size = # this is the size of the renders, higher values take longer to render.
63
+
64
+ cameras = create_pan_cameras(size, device)
65
+ images = decode_latent_images(xm, latents, cameras, rendering_mode=render_mode)
66
+ writer = io.BytesIO()
67
+ images[0].save(writer, format="GIF", save_all=True, append_images=images[1:], duration=100, loop=0)
68
+ writer.seek(0)
69
+ data = base64.b64encode(writer.read()).decode("ascii")
70
+ response_data = {'video_base64': data,'status':None}
71
+ print('response_data',response_data)
72
+ return response_data
73
  except Exception as e:
74
+ print(f"Error generating 3D: {e}")
75
+ return jsonify({"error": f"Failed to generate 3D animation: {str(e)}"}), 500
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
76
 
77
  def background(prompt):
78
  with app.app_context():