File size: 1,322 Bytes
3233259
ebc5242
 
 
 
 
 
 
3233259
 
 
ebc5242
 
 
 
3233259
ebc5242
3233259
 
ebc5242
 
 
 
 
 
 
 
 
 
 
 
 
 
3233259
ebc5242
 
 
 
 
36e89ec
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
import streamlit as st
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch

# Load pre-trained model and tokenizer
model_name = "microsoft/DialoGPT-medium"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(model_name)

# Streamlit UI
st.title("E-Commerce Chatbot")
st.write("Ask about our products or browse categories. Type 'exit' to end the conversation.")

# Initialize the chat history
chat_history_ids = None

# User input
query = st.text_input("Your Query:")

if query:
    # Tokenize the input and append to chat history
    new_input_ids = tokenizer.encode(query + tokenizer.eos_token, return_tensors="pt")
    
    # Update chat history
    chat_history_ids = new_input_ids if chat_history_ids is None else torch.cat([chat_history_ids, new_input_ids], dim=-1)

    # Generate a response from the model
    response_ids = model.generate(
        chat_history_ids, 
        max_length=1000, 
        pad_token_id=tokenizer.eos_token_id, 
        attention_mask=torch.ones(chat_history_ids.shape, dtype=torch.long)
    )

    # Decode the model's output
    response = tokenizer.decode(response_ids[:, chat_history_ids.shape[-1]:][0], skip_special_tokens=True)
    
    # Display the chatbot's response
    st.write(f"Chatbot: {response}")