Bur3hani commited on
Commit
2a35895
·
verified ·
1 Parent(s): 145e596

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +16 -14
app.py CHANGED
@@ -1,27 +1,30 @@
1
  import gradio as gr
2
  from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
 
3
 
4
- # --- 1. Load your Model from its Hugging Face Repository ---
5
- # This is the correct, professional way to do it.
 
 
 
6
  MODEL_ID = "Bur3hani/karani_ofline"
7
 
8
  try:
9
  print(f"Loading tokenizer and model from Hub: {MODEL_ID}...")
10
- tokenizer = AutoTokenizer.from_pretrained(MODEL_ID)
11
- model = AutoModelForSeq2SeqLM.from_pretrained(MODEL_ID)
12
- print("✅ Model and Tokenizer loaded successfully.")
 
 
13
  except Exception as e:
14
  print(f"❌ Error loading from Hub: {e}")
15
- # Set to None so the app can show an error state if loading fails.
16
- tokenizer = None
17
- model = None
18
 
19
- # --- 2. Define the Prediction Function ---
20
  def get_chat_response(message, history):
21
- if not model or not tokenizer:
22
- return "ERROR: The AI model failed to load. Please check the Space logs for details."
23
 
24
- # Format the conversation history for the model
25
  input_text = ""
26
  for user_turn, bot_turn in history:
27
  input_text += f"user: {user_turn} bot: {bot_turn} "
@@ -32,8 +35,7 @@ def get_chat_response(message, history):
32
  response = tokenizer.decode(outputs[0], skip_special_tokens=True)
33
  return response
34
 
35
- # --- 3. Build and Launch the Gradio Interface ---
36
- # This version is simplified to avoid any deprecated arguments.
37
  demo = gr.ChatInterface(
38
  fn=get_chat_response,
39
  title="Karani v1 - AI Secretary",
 
1
  import gradio as gr
2
  from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
3
+ import os
4
 
5
+ # --- 1. Get the Hugging Face Token from Space Secrets ---
6
+ # The os.getenv() function securely reads the secret you just created.
7
+ auth_token = os.getenv("HF_TOKEN")
8
+
9
+ # --- 2. Load your Model from its Hub Repository using the Token ---
10
  MODEL_ID = "Bur3hani/karani_ofline"
11
 
12
  try:
13
  print(f"Loading tokenizer and model from Hub: {MODEL_ID}...")
14
+ # We pass the token to the 'from_pretrained' method.
15
+ tokenizer = AutoTokenizer.from_pretrained(MODEL_ID, token=auth_token)
16
+ model = AutoModelForSeq2SeqLM.from_pretrained(MODEL_ID, token=auth_token)
17
+ print("✅ Model and Tokenizer loaded successfully using the provided token.")
18
+ MODEL_LOADED = True
19
  except Exception as e:
20
  print(f"❌ Error loading from Hub: {e}")
21
+ MODEL_LOADED = False
 
 
22
 
23
+ # --- 3. Define the Prediction Function ---
24
  def get_chat_response(message, history):
25
+ if not MODEL_LOADED:
26
+ return "ERROR: The AI model failed to load. This could be due to a missing token or an issue with the model repository."
27
 
 
28
  input_text = ""
29
  for user_turn, bot_turn in history:
30
  input_text += f"user: {user_turn} bot: {bot_turn} "
 
35
  response = tokenizer.decode(outputs[0], skip_special_tokens=True)
36
  return response
37
 
38
+ # --- 4. Build and Launch the Gradio Interface ---
 
39
  demo = gr.ChatInterface(
40
  fn=get_chat_response,
41
  title="Karani v1 - AI Secretary",