JagmeetMinhas22 commited on
Commit
9a5a343
·
verified ·
1 Parent(s): ab1c876

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +1 -18
app.py CHANGED
@@ -4,7 +4,6 @@ import streamlit as st
4
  from huggingface_hub import InferenceClient
5
  import os
6
  import torch
7
- from diffusers import FluxPipeline
8
 
9
  #Use an API endpoint to call in the MS inference client
10
  apiToken = os.getenv("my_API_Key")
@@ -14,8 +13,6 @@ client = InferenceClient(api_key=apiToken)
14
  summarizer = pipeline("summarization", model="facebook/bart-large-cnn")
15
 
16
  #Instantiate the image generation pipeline
17
- imagePipeline = FluxPipeline.from_pretrained("black-forest-labs/FLUX.1-schnell", torch_dtype=torch.bfloat16)
18
- imagePipeline.enable_model_cpu_offload() #save some VRAM by offloading the model to CPU
19
 
20
  #Intro text
21
  st.write("Are you writing a story but don't know where to start?")
@@ -44,18 +41,4 @@ if writingPrompt:
44
  st.write(response_content)
45
 
46
  #Image generation
47
- summary = summarizer(response_content, max_length=130, min_length=30, do_sample=False)
48
-
49
- """
50
- prompt = "A cat holding a sign that says hello world"
51
- image = imagePipeline(
52
- prompt,
53
- guidance_scale=0.0,
54
- num_inference_steps=4,
55
- max_sequence_length=256,
56
- generator=torch.Generator("cpu").manual_seed(0)
57
- ).images[0]
58
-
59
- st.image(image)
60
-
61
- """
 
4
  from huggingface_hub import InferenceClient
5
  import os
6
  import torch
 
7
 
8
  #Use an API endpoint to call in the MS inference client
9
  apiToken = os.getenv("my_API_Key")
 
13
  summarizer = pipeline("summarization", model="facebook/bart-large-cnn")
14
 
15
  #Instantiate the image generation pipeline
 
 
16
 
17
  #Intro text
18
  st.write("Are you writing a story but don't know where to start?")
 
41
  st.write(response_content)
42
 
43
  #Image generation
44
+ summary = summarizer(response_content, max_length=130, min_length=30, do_sample=False)