Spaces:
Sleeping
Sleeping
| import gradio as gr | |
| from transformers import AutoTokenizer, AutoModelForSeq2SeqLM | |
| import os | |
| # --- 1. Securely Get the Hugging Face Token from Space Secrets --- | |
| auth_token = os.getenv("HF_TOKEN") | |
| # --- DEBUGGING STEP: Check if the token was found --- | |
| # This will print to your Space's logs. | |
| if auth_token is not None: | |
| # Do not print the full token for security. Just confirm it was found. | |
| print("✅ HF_TOKEN secret was found by the application.") | |
| else: | |
| print("❌ HF_TOKEN secret was NOT found. Please double-check it is set in your Space Settings.") | |
| # --- END DEBUGGING STEP --- | |
| # --- 2. Load your Model using the Token --- | |
| MODEL_ID = "Bur3hani/karani_ofline" | |
| MODEL_LOADED = False | |
| try: | |
| if auth_token: | |
| print(f"Attempting to load tokenizer and model from Hub: {MODEL_ID}...") | |
| tokenizer = AutoTokenizer.from_pretrained(MODEL_ID, token=auth_token) | |
| model = AutoModelForSeq2SeqLM.from_pretrained(MODEL_ID, token=auth_token) | |
| print("✅ Model and Tokenizer loaded successfully.") | |
| MODEL_LOADED = True | |
| else: | |
| # This will be printed if the secret is missing. | |
| print("Skipping model loading because HF_TOKEN is missing.") | |
| except Exception as e: | |
| print(f"❌ An error occurred while loading the model from the Hub: {e}") | |
| # --- 3. Define the Prediction Function --- | |
| def get_chat_response(message, history): | |
| if not MODEL_LOADED: | |
| return "ERROR: The AI model failed to load. Please check the Space logs for the exact error." | |
| input_text = "" | |
| for user_turn, bot_turn in history: | |
| input_text += f"user: {user_turn} bot: {bot_turn} " | |
| input_text += f"user: {message}" | |
| inputs = tokenizer(input_text, return_tensors="pt", max_length=512, truncation=True) | |
| outputs = model.generate(**inputs, max_length=60, num_beams=4, early_stopping=True) | |
| response = tokenizer.decode(outputs[0], skip_special_tokens=True) | |
| return response | |
| # --- 4. Build and Launch the Gradio Interface --- | |
| demo = gr.ChatInterface( | |
| fn=get_chat_response, | |
| title="Karani v1 - AI Secretary", | |
| description="A conversational AI assistant for Kiswahili, powered by a custom fine-tuned model.", | |
| examples=[["Habari za asubuhi?"], ["Ni nini mpango wa leo?"]], | |
| ) | |
| if __name__ == "__main__": | |
| demo.launch() |