File size: 1,143 Bytes
2c74842
 
4b8e1c7
2c74842
ebf5ff6
2c74842
4b8e1c7
51d8364
4b8e1c7
 
 
98bee49
ebf5ff6
4b8e1c7
98bee49
b878812
4b8e1c7
98bee49
 
b878812
4b8e1c7
 
51d8364
4b8e1c7
 
51d8364
98bee49
4b8e1c7
 
98bee49
4b8e1c7
 
98bee49
b878812
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
import streamlit as st
from transformers import AutoTokenizer, AutoModelForCausalLM
from datasets import load_dataset
import torch

# Load the dataset
ds = load_dataset("raddwolf/BookCorpus74M")

# Load the tokenizer and model
tokenizer = AutoTokenizer.from_pretrained("FabbriSimo01/Facebook_opt_1.3b_Quantized")
model = AutoModelForCausalLM.from_pretrained("FabbriSimo01/Facebook_opt_1.3b_Quantized")

# Streamlit interface
st.title("Humanized Text Generation App")

# Input from the user
user_input = st.text_area("Enter your query here:", height=200)

if st.button("Generate Humanized Text"):
    if user_input.strip():
        # Prepare the input
        inputs = tokenizer(user_input, return_tensors="pt", truncation=True, max_length=512)

        # Generate output
        with torch.no_grad():
            outputs = model.generate(inputs.input_ids, max_length=2000, num_return_sequences=1)

        # Decode the generated text
        generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True)

        # Display the generated text
        st.write(generated_text)
    else:
        st.write("Please enter a valid query.")