import os import streamlit as st from langchain_community.document_loaders import YoutubeLoader from langchain_huggingface import HuggingFaceEndpoint, ChatHuggingFace # ✅ Set Hugging Face token from environment variable (or directly here for testing) os.environ["HUGGINGFACEHUB_API_TOKEN"] = os.getenv("Utube") os.environ["HF_TOKEN"] = os.getenv("Utube") # 🔧 Load the LLaMA 3.1 model def load_model(): endpoint = HuggingFaceEndpoint( repo_id="meta-llama/Llama-3.1-8B-Instruct", provider="nebius", temperature=0.5, max_new_tokens=150, task="conversational" ) return ChatHuggingFace( llm=endpoint, repo_id="meta-llama/Llama-3.1-8B-Instruct", provider="nebius", temperature=0.5, max_new_tokens=50, task="conversational" ) # 🎥 Get transcript from YouTube video def fetch_transcript(url): loader = YoutubeLoader.from_youtube_url(url) docs = loader.load() return docs[0].page_content # ✨ Generate summary def summarize(transcript, model): if len(transcript) > 3000: transcript = transcript[:3000] # Trim for token safety prompt = f"Summarize the following YouTube transcript:\n\n{transcript}" response = model.invoke(prompt) return response.content # 🖥️ Streamlit UI def main(): st.set_page_config(page_title="🎬 YouTube Video Summarizer", layout="centered") st.title("🎬 YouTube Video Summarizer") st.write("Paste a YouTube video URL below and get a smart summary using LLaMA 3.1 🧠") url = st.text_input("📥 Enter YouTube video URL:") if st.button("Summarize"): if not url.strip(): st.warning("🚫 Please enter a valid YouTube video URL.") return with st.spinner("🔄 Fetching transcript and generating summary..."): try: model = load_model() transcript = fetch_transcript(url) summary = summarize(transcript, model) st.success("✅ Summary:") st.write(summary) except Exception as e: st.error(f"❌ Error: {e}") # 🚀 Run the app if __name__ == "__main__": main()