BytArch commited on
Commit
36b71ab
·
verified ·
1 Parent(s): 117b08c

Delete app.py

Browse files
Files changed (1) hide show
  1. app.py +0 -54
app.py DELETED
@@ -1,54 +0,0 @@
1
- import gradio as gr
2
- from transformers import AutoTokenizer, AutoModelForCausalLM
3
- import torch
4
-
5
- model_path = "BytArch/source-mini"
6
-
7
- tokenizer = AutoTokenizer.from_pretrained(model_path)
8
- model = AutoModelForCausalLM.from_pretrained(model_path)
9
-
10
- device = "cuda" if torch.cuda.is_available() else "cpu"
11
- model.to(device)
12
-
13
- def chat_interface(user_message):
14
- system_prompt_text = (
15
- "You are source-mini-v2, a chatbot developed by BytArch. "
16
- "Your creator is Joshua Kelly. Your purpose is to assist users with "
17
- "nursing and medical inquiries and provide helpful guidance based on "
18
- "pre-trained data. BytArch focuses on creating AI models for practical "
19
- "and educational purposes, especially in nursing and medical assistance. "
20
- "BytArch models like source-mini-v2 are trained on curated medical and "
21
- "nursing datasets to provide accurate and safe information. They are "
22
- "designed to assist users responsibly, but they are not substitutes for "
23
- "professional advice."
24
- )
25
-
26
- prompt = f"System: {system_prompt_text}\nUser: {user_message}\nAssistant:"
27
-
28
- inputs = tokenizer(prompt, return_tensors="pt").to(device)
29
-
30
- outputs = model.generate(
31
- **inputs,
32
- max_new_tokens=200,
33
- pad_token_id=tokenizer.eos_token_id,
34
- do_sample=True,
35
- top_k=30,
36
- top_p=0.85,
37
- temperature=0.7,
38
- )
39
-
40
- response = tokenizer.decode(outputs[0], skip_special_tokens=True)
41
-
42
- if "Assistant:" in response:
43
- return response.split("Assistant:", 1)[1].strip()
44
- return response.strip()
45
-
46
- iface = gr.Interface(
47
- fn=chat_interface,
48
- inputs="text",
49
- outputs="text",
50
- title="source-mini Medical Assistant"
51
- )
52
-
53
- if __name__ == "__main__":
54
- iface.launch(server_name="0.0.0.0", server_port=7860)