BlockBuster / app.py
JagmeetMinhas22's picture
Update app.py
435190c verified
#Import dependencies
from transformers import pipeline
import streamlit as st
from huggingface_hub import InferenceClient
import os
import torch
#Use an API endpoint to call in the MS inference client
apiToken = os.getenv("my_API_Key")
client = InferenceClient(api_key=apiToken)
#Instantiate the summarization pipeline - this will be used for the image generation
summarizer = pipeline("summarization", model="facebook/bart-large-cnn")
#Instantiate the image generation pipeline
#Intro text
st.write("Are you writing a story but don't know where to start?")
#Input prompt
writingPrompt = st.text_input("Paste your idea here, and have an introduction generated for you: ")
#Conditional to check submission
if writingPrompt:
#Create a message parameter for the story generation
messages = [{ "role": "user", "content": f"As an author, write an introduction using this prompt: {writingPrompt}"}]
#Instantiate chatstream
stream = client.chat.completions.create(
model="microsoft/Phi-3.5-mini-instruct",
messages=messages,
max_tokens=500,
stream=True
)
#Output response
response_content = ""
for chunk in stream:
response_content += chunk.choices[0].delta.content
st.write(response_content)
st.write("Here's some more information to get you started:")
messages2 = [{ "role": "user", "content": f"Who are the main characters in this text, what are some key locations that should be included, and what are the main story beats of this text: {response_content}"}]
stream2 = client.chat.completions.create(
model="microsoft/Phi-3.5-mini-instruct",
messages=messages2,
max_tokens=500,
stream=True
)
#Output response
response_content_2 = ""
for chunk in stream2:
response_content_2 += chunk.choices[0].delta.content
st.write(response_content_2)
#Image generation
summary = summarizer(response_content, max_length=130, min_length=30, do_sample=False)