tezodipta commited on
Commit
c969303
·
verified ·
1 Parent(s): 2e78143

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +25 -0
app.py ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from transformers import AutoModelForCausalLM, AutoTokenizer
3
+ import torch
4
+
5
+ # Load Model & Tokenizer
6
+ MODEL_NAME = "tezodipta/MindEase-Assistant-v0.1"
7
+ tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
8
+ model = AutoModelForCausalLM.from_pretrained(MODEL_NAME, torch_dtype=torch.float16, device_map="auto")
9
+
10
+ # Function to Generate Response
11
+ def generate_response(prompt):
12
+ input_ids = tokenizer(prompt, return_tensors="pt").input_ids.to(model.device)
13
+ output = model.generate(input_ids, max_length=200, temperature=0.7, do_sample=True, top_p=0.9)
14
+ return tokenizer.decode(output[0], skip_special_tokens=True)
15
+
16
+ # Gradio UI
17
+ interface = gr.Interface(
18
+ fn=generate_response,
19
+ inputs="text",
20
+ outputs="text",
21
+ title="MindEase AI Assistant",
22
+ description="Chat with a Mental Health AI Assistant",
23
+ )
24
+
25
+ interface.launch(server_name="0.0.0.0", server_port=7860, share=True)