Bur3hani commited on
Commit
833006b
·
verified ·
1 Parent(s): ede2f88

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +10 -23
app.py CHANGED
@@ -1,31 +1,25 @@
1
  import gradio as gr
2
  from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
3
 
4
- # --- 1. Load your Standalone Model from the Fixed Hugging Face Hub Repository ---
5
  MODEL_ID = "Bur3hani/karani_ofline"
6
 
7
  try:
8
- print(f"Loading tokenizer from Hub: {MODEL_ID}...")
9
- # AutoTokenizer will now work because all necessary files are present.
10
  tokenizer = AutoTokenizer.from_pretrained(MODEL_ID)
11
- print("✅ Tokenizer loaded successfully.")
12
-
13
- print(f"Loading model from Hub: {MODEL_ID}...")
14
  model = AutoModelForSeq2SeqLM.from_pretrained(MODEL_ID)
15
- print("✅ Model loaded successfully.")
16
-
17
  except Exception as e:
18
- # This will provide a clear error message in the logs if something is still wrong.
19
- print(f"❌ A critical error occurred during model loading: {e}")
20
  tokenizer = None
21
  model = None
22
 
23
-
24
  # --- 2. Define the Prediction Function ---
25
  def get_chat_response(message, history):
26
  if not model or not tokenizer:
27
- return "ERROR: Model or tokenizer not loaded. Please check the logs."
28
 
 
29
  input_text = ""
30
  for user_turn, bot_turn in history:
31
  input_text += f"user: {user_turn} bot: {bot_turn} "
@@ -36,20 +30,13 @@ def get_chat_response(message, history):
36
  response = tokenizer.decode(outputs[0], skip_special_tokens=True)
37
  return response
38
 
39
-
40
- # --- 3. Build and Launch the Gradio Chat Interface ---
41
  demo = gr.ChatInterface(
42
  fn=get_chat_response,
43
- title="Karani v1 - AI Secretary (Offline Model)",
44
  description="A conversational AI assistant for Kiswahili, powered by a custom fine-tuned model.",
45
- chatbot=gr.Chatbot(height=500),
46
- textbox=gr.Textbox(placeholder="Andika ujumbe wako hapa...", container=False, scale=7),
47
- theme="soft",
48
- examples=[["Habari za asubuhi?"], ["Ni nini mpango wa leo?"], ["Naweza kupata muhtasari wa habari?"]],
49
- cache_examples=False,
50
- retry_btn=None,
51
- undo_btn="Futa (Delete)",
52
- clear_btn="Futa Mazungumzo (Clear Chat)",
53
  )
54
 
55
  if __name__ == "__main__":
 
1
  import gradio as gr
2
  from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
3
 
4
+ # --- 1. Load your Model from the now-fixed Hub Repository ---
5
  MODEL_ID = "Bur3hani/karani_ofline"
6
 
7
  try:
 
 
8
  tokenizer = AutoTokenizer.from_pretrained(MODEL_ID)
 
 
 
9
  model = AutoModelForSeq2SeqLM.from_pretrained(MODEL_ID)
10
+ print("✅ Tokenizer and Model loaded successfully.")
 
11
  except Exception as e:
12
+ print(f"❌ Error loading model/tokenizer: {e}")
13
+ # Set to None so the app can show an error state
14
  tokenizer = None
15
  model = None
16
 
 
17
  # --- 2. Define the Prediction Function ---
18
  def get_chat_response(message, history):
19
  if not model or not tokenizer:
20
+ return "ERROR: The AI model could not be loaded. Please check the Space logs."
21
 
22
+ # Format the conversation history for the model
23
  input_text = ""
24
  for user_turn, bot_turn in history:
25
  input_text += f"user: {user_turn} bot: {bot_turn} "
 
30
  response = tokenizer.decode(outputs[0], skip_special_tokens=True)
31
  return response
32
 
33
+ # --- 3. Build and Launch the Gradio Interface ---
34
+ # This is a simplified and more robust version of the interface.
35
  demo = gr.ChatInterface(
36
  fn=get_chat_response,
37
+ title="Karani v1 - AI Secretary",
38
  description="A conversational AI assistant for Kiswahili, powered by a custom fine-tuned model.",
39
+ examples=[["Habari za asubuhi?"], ["Ni nini mpango wa leo?"]],
 
 
 
 
 
 
 
40
  )
41
 
42
  if __name__ == "__main__":