import streamlit as st from transformers import AutoTokenizer, AutoModelForCausalLM import torch # Load pre-trained model and tokenizer model_name = "gpt2" # You can replace this with any other suitable model tokenizer = AutoTokenizer.from_pretrained(model_name) model = AutoModelForCausalLM.from_pretrained(model_name) # Streamlit app st.title("Interactive Chatbot with Games and Apps") # Chat interface user_input = st.text_input("You:", "") if st.button("Send"): # Generate response input_ids = tokenizer.encode(user_input, return_tensors="pt") output = model.generate(input_ids, max_length=100, num_return_sequences=1) response = tokenizer.decode(output[0], skip_special_tokens=True) st.text_area("Chatbot:", value=response, height=200) # Check for special commands if "play game" in user_input.lower(): st.subheader("Interactive Game") st.write("Here's a simple game:") game_choice = st.selectbox("Choose your move:", ["Rock", "Paper", "Scissors"]) if st.button("Play"): import random computer_choice = random.choice(["Rock", "Paper", "Scissors"]) st.write(f"Computer chose: {computer_choice}") if game_choice == computer_choice: st.write("It's a tie!") elif (game_choice == "Rock" and computer_choice == "Scissors") or \ (game_choice == "Paper" and computer_choice == "Rock") or \ (game_choice == "Scissors" and computer_choice == "Paper"): st.write("You win!") else: st.write("Computer wins!") elif "show presentation" in user_input.lower(): st.subheader("Interactive Presentation") st.write("Here's a simple presentation:") slide_number = st.slider("Select slide", 1, 5) st.write(f"Slide {slide_number} content goes here.") elif "launch web app" in user_input.lower(): st.subheader("Interactive Web App") st.write("Here's a simple web app:") name = st.text_input("Enter your name:") age = st.number_input("Enter your age:", min_value=0, max_value=120) if st.button("Submit"): st.write(f"Hello, {name}! You are {age} years old.") # Run the app using: streamlit run chatbot_app.py