File size: 1,462 Bytes
946d9b6
 
 
 
68d8cd7
 
 
946d9b6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
68d8cd7
 
946d9b6
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
import torch
import streamlit as st
from transformers import AutoModelForCausalLM, AutoTokenizer

# Use a smaller model for testing; switch to the larger model when GPU resources are available
model_name = "gpt2"
model = AutoModelForCausalLM.from_pretrained(model_name)
tokenizer = AutoTokenizer.from_pretrained(model_name)

# Set up the Streamlit app interface
st.title("LLM Text Classifier")
st.write("Enter text below, and the language model will classify or respond to it.")

# Take user input
prompt = st.text_input("Enter your text here:")

if st.button("Generate Response"):
    # Check if input is provided
    if prompt:
        with st.spinner("Generating response..."):
            # Tokenize input
            tokenized_prompt = tokenizer(prompt, return_tensors="pt")
            input_ids = tokenized_prompt['input_ids']
            attention_mask = tokenized_prompt['attention_mask']

            # Generate the model's response
            response_token_ids = model.generate(
                input_ids,
                attention_mask=attention_mask,
                max_new_tokens=50,
                pad_token_id=tokenizer.eos_token_id
            )

            # Decode and display the generated text
            generated_text = tokenizer.decode(response_token_ids[0], skip_special_tokens=True)
            st.write("**Model's Response:**", generated_text)
    else:
        st.warning("Please enter some text before generating a response.")