fmmkii commited on
Commit
508ee70
·
verified ·
1 Parent(s): e3336ef

Upload 4 files

Browse files
Files changed (4) hide show
  1. generate_script.py +76 -0
  2. imggen.py +119 -0
  3. scriptgen.py +53 -0
  4. tweaker.py +98 -0
generate_script.py ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from langchain_community.llms import Replicate
2
+ from langchain.chains import LLMChain
3
+ from langchain.prompts import PromptTemplate
4
+ from langchain_community.tools import DuckDuckGoSearchRun
5
+ import os
6
+
7
+ def generate_script(prompt, video_length, creativity, replicate_api):
8
+ """
9
+ Generate a YouTube video title and script based on the provided prompt, duration, and creativity level.
10
+
11
+ Parameters:
12
+ prompt (str): Topic of the video.
13
+ video_length (float): Desired duration of the video in minutes.
14
+ creativity (float): Creativity level (0.0 to 1.0).
15
+ replicate_api (str): Replicate API Key for authentication.
16
+
17
+ Returns:
18
+ tuple: Generated title, script, and search data.
19
+ """
20
+ # Ensure the Replicate API key is set
21
+ if not replicate_api:
22
+ raise ValueError("Replicate API Key is missing.")
23
+ os.environ["REPLICATE_API_TOKEN"] = replicate_api
24
+
25
+ # Initialize the LLM with the Replicate model
26
+ llm = Replicate(
27
+ model="meta/meta-llama-3-8b-instruct",
28
+ input={
29
+ "top_k": 0,
30
+ "top_p": 0.95,
31
+ "prompt": prompt,
32
+ "max_tokens": 4096,
33
+ "temperature": 0.7,
34
+ "length_penalty": 1,
35
+ "max_new_tokens": 4096,
36
+ "presence_penalty": 0,
37
+ "log_performance_metrics": False
38
+ },
39
+ )
40
+
41
+ # Define the title generation prompt
42
+ title_template = PromptTemplate(
43
+ input_variables=["topic"],
44
+ template="Generate a YouTube video title for the topic '{topic}'."
45
+ )
46
+ title_chain = LLMChain(llm=llm, prompt=title_template, verbose=True)
47
+
48
+ # Define the script generation prompt
49
+ script_template = PromptTemplate(
50
+ input_variables=["title", "duration", "search_data"],
51
+ template=(
52
+ "Create a YouTube script with the title '{title}' that lasts approximately {duration} minutes. "
53
+ "Incorporate the following research information: {search_data}."
54
+ )
55
+ )
56
+ script_chain = LLMChain(llm=llm, prompt=script_template, verbose=True)
57
+
58
+ # Fetch relevant search data using DuckDuckGo
59
+ search_tool = DuckDuckGoSearchRun()
60
+ search_data = search_tool.run(prompt)
61
+
62
+ # Generate the video title
63
+ title = title_chain.run({"topic": prompt})
64
+
65
+ # Save the generated title to a file
66
+ with open("generated_title.txt", "w") as file:
67
+ file.write(title)
68
+
69
+ # Generate the video script
70
+ script = script_chain.run({
71
+ "title": title,
72
+ "duration": video_length,
73
+ "search_data": search_data
74
+ })
75
+
76
+ return title, script, search_data
imggen.py ADDED
@@ -0,0 +1,119 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import replicate
3
+ import time
4
+ from dotenv import load_dotenv
5
+ import os
6
+ import requests
7
+ from io import BytesIO
8
+ from PIL import Image
9
+ import random
10
+
11
+ # Load environment variables
12
+ load_dotenv()
13
+
14
+ # Check for a title from the script generator
15
+ try:
16
+ with open("generated_title.txt", "r") as file:
17
+ title = file.read().strip()
18
+ prompt = title # Use the title as the default prompt
19
+ except FileNotFoundError:
20
+ st.warning("No title found! Please run the script generator first.")
21
+ prompt = "" # Set an empty prompt if no title is available
22
+
23
+ # Retrieve the API key
24
+ replicate_api = st.secrets.get('REPLICATE_API_TOKEN') or os.getenv("REPLICATE_API_TOKEN")
25
+
26
+ if not replicate_api:
27
+ st.error("API Key not provided! Please add it to Streamlit secrets or the .env file.")
28
+ else:
29
+ os.environ['REPLICATE_API_TOKEN'] = replicate_api # Set the API key for Replicate
30
+
31
+ # Initialize session state for image history
32
+ if "image_history" not in st.session_state:
33
+ st.session_state.image_history = []
34
+
35
+ # Streamlit UI setup
36
+ st.title("AI Image Generator")
37
+ prompt = st.text_input("Enter a prompt for the image", value=prompt) # Use the title as the default value
38
+ styles = st.multiselect("Image artstyle", ['Impressionism', 'Cubism', 'Surrealism', 'Baroque', 'Art Noveau', 'Cyberpunk', 'Synthwave', 'Anime', 'Pixel Art', 'Low Poly', 'Dark Fantasy', 'Space Opera', 'Steampunk', 'Fantasy', 'Abstract','Glitch Art', 'Kawaii','Pop Art'])
39
+
40
+ with st.sidebar:
41
+ st.title("Options")
42
+ aspect_ratio = st.selectbox("Aspect Ratio", ("16:9", "4:3", "3:2", "1:1", "21:9"))
43
+ seed = st.number_input("Seed (leave empty for random)", min_value=0, step=1, format="%d")
44
+ quality = st.slider("Quality", min_value=0, max_value=100, value=90, step=1)
45
+ strength = st.slider("Prompt Strength", min_value=0.0, max_value=1.0, value=0.9, step=0.01)
46
+ steps = st.slider("Sampling Steps", min_value=1, max_value=28, value=28, step=1)
47
+ cfg = st.slider("Similarity", min_value=0.0, max_value=20.0, value= 3.5, step=0.1)
48
+
49
+ # Function to convert image URL to PNG buffer
50
+ def get_image_buffer(image_url):
51
+ response = requests.get(image_url)
52
+ if response.status_code == 200:
53
+ image = Image.open(BytesIO(response.content))
54
+ buffer = BytesIO()
55
+ image.save(buffer, format="PNG")
56
+ buffer.seek(0)
57
+ return buffer
58
+ else:
59
+ return None
60
+
61
+ # Handle image generation
62
+ if st.button("Generate Image"):
63
+ seed = int(seed) if seed else random.randint(0, 99999)
64
+
65
+ if not prompt:
66
+ st.error("Please enter a prompt!")
67
+ else:
68
+ with st.spinner('Generating image...'):
69
+ start_time = time.time()
70
+ try:
71
+ # Replicate API call
72
+ output = replicate.run(
73
+ "stability-ai/stable-diffusion-3",
74
+ input={
75
+ "cfg": cfg,
76
+ "steps": steps,
77
+ "prompt": f"With this context of the prompt: {prompt}, make me a video thumbnail with these artstyles: {styles}",
78
+ "aspect_ratio": aspect_ratio,
79
+ "output_format": "png",
80
+ "output_quality": quality,
81
+ "negative_prompt": "bad quality, worse quality, deformed",
82
+ "prompt_strength": strength,
83
+ "seed": seed
84
+ }
85
+ )
86
+ # Display output and elapsed time
87
+ if output:
88
+ image_url = output[0]
89
+ st.image(image_url, caption="Generated Image", use_container_width=True)
90
+ end_time = time.time()
91
+ elapsed_time = end_time - start_time
92
+ st.write(f"Image Generated in {elapsed_time:.2f} seconds")
93
+
94
+ # Save to session state
95
+ st.session_state.image_history.append((image_url, prompt))
96
+ else:
97
+ st.error("No output received. Please try again!")
98
+ except Exception as e:
99
+ st.error(f"An error occurred: {str(e)}")
100
+
101
+ # Display image history
102
+ with st.expander("Show Prompt History"):
103
+ if st.session_state.image_history:
104
+ for idx, (image_url, image_prompt) in enumerate(reversed(st.session_state.image_history)):
105
+ st.subheader(f"Image {len(st.session_state.image_history) - idx}")
106
+ st.text(f"Prompt: {image_prompt}")
107
+ st.image(image_url, use_container_width=True)
108
+
109
+ # Generate download button for each image
110
+ buffer = get_image_buffer(image_url)
111
+ if buffer:
112
+ st.download_button(
113
+ label="Download Image",
114
+ data=buffer,
115
+ file_name=f"generated_image_{len(st.session_state.image_history) - idx}.png",
116
+ mime="image/png"
117
+ )
118
+ else:
119
+ st.write("No images generated yet.")
scriptgen.py ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ from dotenv import load_dotenv
3
+ from generate_script import generate_script
4
+ import os
5
+
6
+ # Load environment variables from .env file
7
+ load_dotenv()
8
+
9
+ # Streamlit app title and subtitle
10
+ st.title("YouTube-Inator")
11
+ st.subheader("Make fun stuff with YouTube-Inator!")
12
+
13
+ with st.sidebar:
14
+ st.title("Options")
15
+ creativity = st.slider("Set creativity level:", min_value=0.0, max_value=1.0, value=0.5)
16
+
17
+
18
+ # Input fields for video generation parameters
19
+ prompt = st.text_input("Provide the topic of the video:", placeholder="e.g., How to bake a cake")
20
+ video_length = st.number_input("Specify length in minutes", min_value=1.0, step=0.5, value= 10.0)
21
+
22
+ # Button to generate the video script
23
+ generate_script_button = st.button("Generate Script")
24
+
25
+ # Generate script on button click
26
+ if generate_script_button:
27
+ if not replicate_api:
28
+ st.error("Please provide a valid API Key.")
29
+ else:
30
+ # Set the API Key in the environment variable for use by other modules
31
+ os.environ["REPLICATE_API_TOKEN"] = replicate_api
32
+
33
+ try:
34
+ # Generate the video script using the utility function
35
+ with st.spinner("Generating your script..."):
36
+ title, script, search_data = generate_script(prompt, video_length, creativity, replicate_api)
37
+
38
+ # Display the results
39
+ st.success("Script generated successfully!")
40
+ st.subheader(f"Title: {title}")
41
+ st.write(f"Script: {script}")
42
+
43
+ # Display additional search data
44
+ with st.expander("Show search data for the script"):
45
+ st.write(search_data)
46
+
47
+ except Exception as e:
48
+ # Handle any errors that occur during script generation
49
+ st.error(f"An error occurred. Try again {e}")
50
+ with st.expander("Expand to see details"):
51
+ st.write(f'{e}')
52
+
53
+
tweaker.py ADDED
@@ -0,0 +1,98 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ from langchain_community.llms import Replicate
3
+ from langchain.chains import ConversationChain
4
+ from langchain.memory import ConversationSummaryMemory
5
+ from langchain.prompts import ChatPromptTemplate
6
+ import os
7
+
8
+ # Setting page title and header
9
+ st.title("Creative Consultant")
10
+ st.subheader("Make fun stuff with YouTube-Inator!")
11
+
12
+ # Sidebar for API key input and summarisation
13
+ with st.sidebar:
14
+ # Button to summarise the conversation
15
+ summarise_button = st.button("Summarise the conversation", key="summarise_button")
16
+
17
+ # Check if the summarise button was clicked
18
+ if summarise_button:
19
+ if st.session_state.get('conversation') and st.session_state['conversation'].memory:
20
+ # Generate the summary
21
+ summary = st.session_state['conversation'].memory.buffer
22
+ st.session_state['summary'] = summary # Store in session state
23
+ st.write("Summary:\n\n" + summary)
24
+ else:
25
+ # Handle case where no conversation memory exists
26
+ summary = "No conversation to summarise leh!"
27
+ st.write(summary)
28
+
29
+ # Add a download button if a summary exists
30
+ if st.session_state.get('summary'):
31
+ st.download_button(
32
+ label="Download Summary",
33
+ data=st.session_state['summary'],
34
+ file_name="conversation_summary.txt",
35
+ mime="text/plain",
36
+ )
37
+
38
+ # Initialize conversation state if not already done
39
+ if 'conversation' not in st.session_state:
40
+ st.session_state['conversation'] = None
41
+ if 'messages' not in st.session_state:
42
+ st.session_state['messages'] = []
43
+ if 'REPLICATE_API_TOKEN' not in st.session_state:
44
+ st.session_state['REPLICATE_API_TOKEN'] = ''
45
+
46
+ # Function to get response from OpenAI model
47
+ def get_response(user_input, replicate_api):
48
+ if st.session_state['conversation'] is None:
49
+ llm = Replicate(
50
+ model=("meta/meta-llama-3-8b-instruct"),
51
+ model_kwargs={
52
+ "temperature": 0.01,
53
+ "top_p": 0.9,
54
+ "max_length": 128,
55
+ },
56
+ )
57
+ st.session_state['conversation'] = ConversationChain(
58
+ llm=llm,
59
+ verbose=True,
60
+ memory=ConversationSummaryMemory(llm=llm),
61
+ )
62
+ # Call the conversation chain
63
+ response_dict = st.session_state['conversation'].invoke(input=user_input)
64
+
65
+ # Extract the response field only
66
+ return response_dict.get("response", "No response generated leh!")
67
+
68
+ # Function to get user input
69
+ def get_text():
70
+ chat_input = st.chat_input("Say Hello")
71
+ return chat_input
72
+
73
+ # Display chat history
74
+ if st.session_state.messages:
75
+ for msg in st.session_state.messages:
76
+ with st.chat_message(msg["role"]):
77
+ st.markdown(msg["content"])
78
+
79
+ # Get user input
80
+ user_input = get_text()
81
+
82
+ if user_input:
83
+ try:
84
+ # Get the response from the LLM
85
+ response = get_response(user_input, st.session_state['REPLICATE_API_TOKEN'])
86
+
87
+ # Append the user message to session state
88
+ st.session_state.messages.append({"role": "user", "content": user_input})
89
+ with st.chat_message("user"):
90
+ st.markdown(user_input)
91
+
92
+ # Append and display the LLM response
93
+ st.session_state.messages.append({"role": "LLM", "content": response})
94
+ with st.chat_message("LLM"):
95
+ st.markdown(response)
96
+
97
+ except Exception as e:
98
+ st.error(f"An error occurred: {e}")