Spaces:
Sleeping
Sleeping
File size: 2,013 Bytes
7661018 bd22318 37d1c08 b7b1ebe 385df2f 37d1c08 7661018 37d1c08 bd22318 7661018 bd22318 c2dcaf1 d55a907 7df3e67 7661018 7df3e67 75c8ffb 7661018 37d1c08 7661018 37d1c08 7661018 0b40c37 37d1c08 0b40c37 435190c 7661018 435190c 62e9dc5 9a5a343 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 |
#Import dependencies
from transformers import pipeline
import streamlit as st
from huggingface_hub import InferenceClient
import os
import torch
#Use an API endpoint to call in the MS inference client
apiToken = os.getenv("my_API_Key")
client = InferenceClient(api_key=apiToken)
#Instantiate the summarization pipeline - this will be used for the image generation
summarizer = pipeline("summarization", model="facebook/bart-large-cnn")
#Instantiate the image generation pipeline
#Intro text
st.write("Are you writing a story but don't know where to start?")
#Input prompt
writingPrompt = st.text_input("Paste your idea here, and have an introduction generated for you: ")
#Conditional to check submission
if writingPrompt:
#Create a message parameter for the story generation
messages = [{ "role": "user", "content": f"As an author, write an introduction using this prompt: {writingPrompt}"}]
#Instantiate chatstream
stream = client.chat.completions.create(
model="microsoft/Phi-3.5-mini-instruct",
messages=messages,
max_tokens=500,
stream=True
)
#Output response
response_content = ""
for chunk in stream:
response_content += chunk.choices[0].delta.content
st.write(response_content)
st.write("Here's some more information to get you started:")
messages2 = [{ "role": "user", "content": f"Who are the main characters in this text, what are some key locations that should be included, and what are the main story beats of this text: {response_content}"}]
stream2 = client.chat.completions.create(
model="microsoft/Phi-3.5-mini-instruct",
messages=messages2,
max_tokens=500,
stream=True
)
#Output response
response_content_2 = ""
for chunk in stream2:
response_content_2 += chunk.choices[0].delta.content
st.write(response_content_2)
#Image generation
summary = summarizer(response_content, max_length=130, min_length=30, do_sample=False) |