File size: 978 Bytes
1937af7
d129cbe
1b88037
200d90e
d129cbe
 
5045a62
1e259d8
200d90e
 
d129cbe
 
ae7e736
 
d129cbe
 
 
ae7e736
9e52199
 
 
 
ae7e736
d129cbe
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
import gradio as gr
from transformers import pipeline, AutoModelForCausalLM, AutoTokenizer

# Load the pre-trained model from Hugging Face
model_name = "llmModeluser/therapy_trained_model"
model = AutoModelForCausalLM.from_pretrained(model_name)

# Load the tokenizer directly from the Hugging Face model path
tokenizer = AutoTokenizer.from_pretrained("llmModeluser/therapy_trained_model")

# Create the text generation pipeline
nlp = pipeline("text-generation", model=model, tokenizer=tokenizer)

def chat_response(user_input):
    # Generate a response using the pre-trained model
    response = nlp(user_input, max_length=100, do_sample=True, top_k=50, top_p=0.95, num_return_sequences=1)[0]["generated_text"]
    return response

iface = gr.Interface(fn=chat_response,
                     inputs=gr.Textbox(lines=1, placeholder="Enter your message here..."),
                     outputs="text",
                     title="Therapy Chatbot")

iface.launch(server_port=7861)