Kberta2014 commited on
Commit
a5bca91
·
verified ·
1 Parent(s): 37a245b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -10
app.py CHANGED
@@ -62,19 +62,15 @@ demo = gr.ChatInterface(
62
 
63
  if __name__ == "__main__":
64
  demo.launch()
 
 
65
  import gradio as gr
66
- from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM
67
- import torch
68
 
69
- model_dir = "./" # your model folder
70
- tokenizer = AutoTokenizer.from_pretrained(model_dir)
71
- model = AutoModelForCausalLM.from_pretrained(model_dir)
72
 
73
- generator = pipeline("text-generation", model=model, tokenizer=tokenizer, device=0 if torch.cuda.is_available() else -1)
 
74
 
75
- def generate_answer(prompt):
76
- output = generator(prompt, max_length=100, do_sample=True, top_p=0.95, temperature=0.7)
77
- return output[0]['generated_text']
78
 
79
- gr.Interface(fn=generate_answer, inputs="text", outputs="text", title="Medical Chatbot").launch()
80
 
 
62
 
63
  if __name__ == "__main__":
64
  demo.launch()
65
+
66
+ from transformers import pipeline
67
  import gradio as gr
 
 
68
 
69
+ pipe = pipeline("text-generation", model="kberta2014/MedicalChatBot")
 
 
70
 
71
+ def chat(message):
72
+ return pipe(message, max_new_tokens=100, do_sample=True)[0]['generated_text']
73
 
74
+ gr.Interface(fn=chat, inputs="text", outputs="text", title="🩺 Medical ChatBot").launch()
 
 
75
 
 
76