Sankeerth004 commited on
Commit
457c264
·
verified ·
1 Parent(s): 329f8e6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +7 -31
app.py CHANGED
@@ -1,40 +1,16 @@
1
- import os
2
  import gradio as gr
3
  import torch
4
  from transformers import BertTokenizer, BertForSequenceClassification, GPT2LMHeadModel, GPT2Tokenizer
5
- from huggingface_hub import list_files
6
 
7
- # --- 1. SETUP AND MODEL LOADING WITH DEBUGGING ---
8
 
9
  BERT_REPO = "Sankeerth004/fitbuddy-bert-intent"
10
  GPT2_REPO = "Sankeerth004/fitbuddy-gpt2-chatbot"
11
 
12
- print("--- Starting Application ---")
13
- print("--- Running Pre-flight Checks for Model Files ---")
14
-
15
- # --- BERT Model File Check ---
16
- try:
17
- print(f"Checking files for BERT repo: {BERT_REPO}")
18
- bert_files = list_files(repo_id=BERT_REPO)
19
- print(f"Found BERT files: {bert_files}")
20
-
21
- # Check for the most important tokenizer file
22
- if 'vocab.txt' not in [os.path.basename(f) for f in bert_files]:
23
- raise FileNotFoundError(
24
- "CRITICAL ERROR: 'vocab.txt' is missing from the BERT repository. "
25
- "Please upload all files from your local './models/bert_intent_model/' folder."
26
- )
27
- print("BERT file check successful.")
28
-
29
- except Exception as e:
30
- print(f"Could not check BERT repository files. Error: {e}")
31
- # Stop the app if we can't verify the files
32
- raise e
33
-
34
- # --- Load Models ---
35
  print("Loading models from Hugging Face Hub...")
36
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
37
 
 
38
  bert_tokenizer = BertTokenizer.from_pretrained(BERT_REPO)
39
  bert_model = BertForSequenceClassification.from_pretrained(BERT_REPO).to(device)
40
  bert_model.eval()
@@ -46,15 +22,15 @@ gpt2_model.config.pad_token_id = gpt2_tokenizer.eos_token_id
46
 
47
  print("Models loaded successfully!")
48
 
49
- # --- 2. DEFINE HELPER FUNCTIONS (No Changes Needed Here) ---
50
 
51
  intent_mapping = {
52
  0: "Body Building", 1: "Meal Plan Recommendation", 2: "Recommend Meditation or Yoga",
53
  3: "Suggest Recovery Exercises", 4: "Weight Loss"
54
  }
55
 
56
- def predict_intent(input_text):
57
- inputs = bert_tokenizer(input_text, return_tensors='pt', padding=True, truncation=True, max_length=512).to(device)
58
  with torch.no_grad():
59
  outputs = bert_model(**inputs)
60
  return intent_mapping.get(torch.argmax(outputs.logits, dim=1).item(), "Unknown Intent")
@@ -69,7 +45,7 @@ def generate_response(prompt, max_length=200):
69
  )
70
  return gpt2_tokenizer.decode(output[0], skip_special_tokens=True).split('[A]')[-1].strip()
71
 
72
- # --- 3. CREATE THE GRADIO CHATBOT FUNCTION (No Changes Needed Here) ---
73
 
74
  def chat_with_fitbuddy(user_input, history):
75
  if not user_input.strip(): return "Please enter a valid question."
@@ -77,7 +53,7 @@ def chat_with_fitbuddy(user_input, history):
77
  prompt = f"[Q] {user_input} [Intent: {intent}]"
78
  return generate_response(prompt)
79
 
80
- # --- 4. LAUNCH THE GRADIO INTERFACE (No Changes Needed Here) ---
81
 
82
  iface = gr.ChatInterface(
83
  fn=chat_with_fitbuddy,
 
 
1
  import gradio as gr
2
  import torch
3
  from transformers import BertTokenizer, BertForSequenceClassification, GPT2LMHeadModel, GPT2Tokenizer
 
4
 
5
+ # --- 1. LOAD MODELS FROM HUGGING FACE HUB ---
6
 
7
  BERT_REPO = "Sankeerth004/fitbuddy-bert-intent"
8
  GPT2_REPO = "Sankeerth004/fitbuddy-gpt2-chatbot"
9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
10
  print("Loading models from Hugging Face Hub...")
11
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
12
 
13
+ # Load models
14
  bert_tokenizer = BertTokenizer.from_pretrained(BERT_REPO)
15
  bert_model = BertForSequenceClassification.from_pretrained(BERT_REPO).to(device)
16
  bert_model.eval()
 
22
 
23
  print("Models loaded successfully!")
24
 
25
+ # --- 2. DEFINE HELPER FUNCTIONS ---
26
 
27
  intent_mapping = {
28
  0: "Body Building", 1: "Meal Plan Recommendation", 2: "Recommend Meditation or Yoga",
29
  3: "Suggest Recovery Exercises", 4: "Weight Loss"
30
  }
31
 
32
+ def predict_intent(text):
33
+ inputs = bert_tokenizer(text, return_tensors='pt', padding=True, truncation=True, max_length=512).to(device)
34
  with torch.no_grad():
35
  outputs = bert_model(**inputs)
36
  return intent_mapping.get(torch.argmax(outputs.logits, dim=1).item(), "Unknown Intent")
 
45
  )
46
  return gpt2_tokenizer.decode(output[0], skip_special_tokens=True).split('[A]')[-1].strip()
47
 
48
+ # --- 3. CREATE THE GRADIO CHATBOT FUNCTION ---
49
 
50
  def chat_with_fitbuddy(user_input, history):
51
  if not user_input.strip(): return "Please enter a valid question."
 
53
  prompt = f"[Q] {user_input} [Intent: {intent}]"
54
  return generate_response(prompt)
55
 
56
+ # --- 4. LAUNCH THE GRADIO INTERFACE ---
57
 
58
  iface = gr.ChatInterface(
59
  fn=chat_with_fitbuddy,