WSLINMSAI commited on
Commit
2be3aa4
·
verified ·
1 Parent(s): eb8b8e1

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -8
app.py CHANGED
@@ -2,7 +2,7 @@ import gradio as gr
2
  from fuzzywuzzy import process
3
  from transformers import pipeline
4
 
5
- # 1) Our 20 dental terms:
6
  dental_terms = {
7
  "cavity": "A cavity is a hole in a tooth caused by decay.",
8
  "gingivitis": "Gingivitis is the inflammation of the gums, often caused by plaque buildup.",
@@ -27,7 +27,6 @@ dental_terms = {
27
  }
28
 
29
  # 2) Set up a Transformer-based text generation pipeline
30
- # (You can choose any model on Hugging Face; "gpt2" is just an example.)
31
  generation_pipeline = pipeline("text-generation", model="gpt2")
32
 
33
  def chatbot_response(message, history):
@@ -53,11 +52,11 @@ def chatbot_response(message, history):
53
  print(f"Closest Match: {closest_match}, Score: {score}")
54
 
55
  if score >= 80:
56
- # We suspect the user intended a known term
57
  return f"Did you mean '{closest_match}'? {dental_terms[closest_match]}"
58
  else:
59
  # 3) If no good match, let transformer-based AI handle it
60
- # We'll generate a short text response.
61
  generated = generation_pipeline(
62
  message,
63
  max_length=100, # adjust as needed
@@ -67,18 +66,16 @@ def chatbot_response(message, history):
67
  top_k=50
68
  )
69
  ai_response = generated[0]["generated_text"]
70
- # Optionally, you might want to trim the prompt out of the generated text,
71
- # but here we'll just return it as is.
72
  print(f"Transformer-based response: {ai_response}")
73
  return ai_response
74
 
75
- # 3) Gradio ChatInterface (or you can use gr.Interface)
76
  demo = gr.ChatInterface(
77
  fn=chatbot_response,
78
  title="Hybrid Dental Terminology Chatbot",
79
  description=(
80
  "Enter a dental term to get its definition (20 known terms). "
81
- "If the term isn't recognized, a transformer-based model will respond."
82
  )
83
  )
84
 
 
2
  from fuzzywuzzy import process
3
  from transformers import pipeline
4
 
5
+ # 1) 20 dental terms:
6
  dental_terms = {
7
  "cavity": "A cavity is a hole in a tooth caused by decay.",
8
  "gingivitis": "Gingivitis is the inflammation of the gums, often caused by plaque buildup.",
 
27
  }
28
 
29
  # 2) Set up a Transformer-based text generation pipeline
 
30
  generation_pipeline = pipeline("text-generation", model="gpt2")
31
 
32
  def chatbot_response(message, history):
 
52
  print(f"Closest Match: {closest_match}, Score: {score}")
53
 
54
  if score >= 80:
55
+ # Suspect the user intended a known term
56
  return f"Did you mean '{closest_match}'? {dental_terms[closest_match]}"
57
  else:
58
  # 3) If no good match, let transformer-based AI handle it
59
+ # Generate a short text response.
60
  generated = generation_pipeline(
61
  message,
62
  max_length=100, # adjust as needed
 
66
  top_k=50
67
  )
68
  ai_response = generated[0]["generated_text"]
 
 
69
  print(f"Transformer-based response: {ai_response}")
70
  return ai_response
71
 
72
+ # 3) Gradio ChatInterface
73
  demo = gr.ChatInterface(
74
  fn=chatbot_response,
75
  title="Hybrid Dental Terminology Chatbot",
76
  description=(
77
  "Enter a dental term to get its definition (20 known terms). "
78
+ "If the term isn't recognized, a transformer-based model will respond :) "
79
  )
80
  )
81