import streamlit as st from transformers import AutoTokenizer, AutoModelForCausalLM from datasets import load_dataset import torch # Load the dataset ds = load_dataset("raddwolf/BookCorpus74M") # Load the tokenizer and model tokenizer = AutoTokenizer.from_pretrained("FabbriSimo01/Facebook_opt_1.3b_Quantized") model = AutoModelForCausalLM.from_pretrained("FabbriSimo01/Facebook_opt_1.3b_Quantized") # Streamlit interface st.title("Humanized Text Generation App") # Input from the user user_input = st.text_area("Enter your query here:", height=200) if st.button("Generate Humanized Text"): if user_input.strip(): # Prepare the input inputs = tokenizer(user_input, return_tensors="pt", truncation=True, max_length=512) # Generate output with torch.no_grad(): outputs = model.generate(inputs.input_ids, max_length=2000, num_return_sequences=1) # Decode the generated text generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True) # Display the generated text st.write(generated_text) else: st.write("Please enter a valid query.")