File size: 1,452 Bytes
0fafec6
767af1b
 
 
 
 
0fafec6
 
 
 
6c04fbf
 
 
767af1b
0fafec6
 
767af1b
 
0fafec6
6c04fbf
767af1b
 
 
 
 
 
 
 
0fafec6
 
 
767af1b
 
0fafec6
 
 
 
 
 
 
 
 
767af1b
0fafec6
767af1b
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
import json
import os
import streamlit as st
from transformers import pipeline
from PIL import Image

# Load the configuration from config.json
with open('config.json') as f:
    config = json.load(f)

# Set the authentication token
os.environ["HF_API_TOKEN"] = config["hf_api_token"]

# Configure the Hugging Face Space
SPACE_NAME = config["space_name"]
SPACE_LINK = config["space_link"]

# Load the model
model_name = config["model_name"]
model = pipeline("text-generation", model=model_name)

# Create a Streamlit app
st.title("FuzzyLab Chat Application")
st.write("Welcome to FuzzyLab's chat application!")

# Create a text input for the user to enter their message
user_input = st.text_input("Enter your message:")

# Create a file uploader for attachments
attachment = st.file_uploader("Upload attachment:", type=config["allowed_file_types"])

# Create a button to trigger the model's response
if st.button("Send"):
    # Check if an attachment was uploaded
    if attachment is not None:
        # Get the attachment file name and type
        attachment_name = attachment.name
        attachment_type = attachment.type

        # Display the attachment information
        st.write(f"Attachment: {attachment_name} ({attachment_type})")

    # Generate a response using the model
    response = model(user_input, max_length=config["max_length"], return_full_text=False)[0]["generated_text"]
    st.write("Model Response:")
    st.write(response)