File size: 2,619 Bytes
c5117ff
 
 
 
 
 
 
b059ac2
c5117ff
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
import streamlit as st
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM
import os
from huggingface_hub import login

# Hugging Face Authentication
hf_token = os.getenv("H_API_KEY")

if not hf_token:
    st.error("HUGGINGFACE_TOKEN not found. Please set your Hugging Face token.")
    st.stop()

login(token=hf_token)

# Load Model & Tokenizer
model_name = "meta-llama/Llama-2-7b-chat-hf"  # Use the chat model

@st.cache_resource
def load_model():
    # Load tokenizer
    tokenizer = AutoTokenizer.from_pretrained(model_name, token=hf_token)

    # Load model with FP16 (half-precision) on CPU
    model = AutoModelForCausalLM.from_pretrained(
        model_name,
        device_map="cpu",  # Force CPU usage
        torch_dtype=torch.float16,  # Use FP16 to reduce memory usage
        token=hf_token
    )

    return tokenizer, model

tokenizer, model = load_model()

# Function to classify text using a prompt-based approach
def classify_text(text, classes):
    # Create a prompt for classification
    prompt = f"""
    Classify the following text into one of these categories: {", ".join(classes)}.
    Text: {text}
    Category:
    """
    
    # Tokenize the prompt
    inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
    
    # Generate the output
    with torch.no_grad():
        outputs = model.generate(**inputs, max_length=100, num_return_sequences=1)
    
    # Decode the output
    decoded_output = tokenizer.decode(outputs[0], skip_special_tokens=True)
    
    # Extract the predicted class
    predicted_class = decoded_output.split("Category:")[-1].strip()
    return predicted_class

# Custom CSS to make all text red
st.markdown(
    """
    <style>
    /* Target all text elements */
    body, h1, h2, h3, h4, h5, h6, p, div, span, input, textarea, button, label {
        color: #E25822 !important;
    }
    </style>
    """,
    unsafe_allow_html=True
)

# Streamlit UI
st.title("๐Ÿ“ Text Classification with LLaMA 2 Chat (FP16)")
st.write("Powered by LLaMA 2 Chat & Hugging Face")

# User Input
user_input = st.text_area("Enter the text to classify:")

# Define classes for classification
classes = ["Positive", "Negative", "Neutral"]

if st.button("Classify"):
    if user_input:
        # Perform classification
        predicted_class = classify_text(user_input, classes)

        # Display result
        st.subheader("Predicted Class:")
        st.write(predicted_class)
    else:
        st.warning("Please enter some text to classify.")

st.markdown("---")
st.write("๐Ÿ” This app classifies text using the LLaMA 2 Chat model with FP16.")