import os import torch import gradio as gr from huggingface_hub import login from transformers import AutoTokenizer, AutoModelForCausalLM # ⛳ Model config base_model_id = "mistralai/Mistral-7B-Instruct-v0.3" # 🔐 Load Hugging Face token from environment hf_token = os.environ.get("HF_TOKEN") if hf_token: login(token=hf_token) # 🔄 Load model and tokenizer with authentication tokenizer = AutoTokenizer.from_pretrained(base_model_id, use_auth_token=hf_token) model = AutoModelForCausalLM.from_pretrained( base_model_id, torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32, device_map="auto", use_auth_token=hf_token, trust_remote_code=True ) # 🧠 Main coaching function def ask_coach(stride_length, hip_rotation, knee_angle, shoulder_alignment, ground_reaction_force, release_timing): metrics = { "stride_length": stride_length, "hip_rotation": hip_rotation, "knee_angle": knee_angle, "shoulder_alignment": shoulder_alignment, "ground_reaction_force": ground_reaction_force, "release_timing": release_timing } # 🧠 Build rich prompt prompt = ( "You are a world-class biomechanical cricket coach. Given the following player metrics, " "write a comprehensive and highly detailed coaching report. Your response should:\n" "- Analyze each metric one by one (stride length, hip rotation, knee angle, etc.)\n" "- Identify strengths and weaknesses clearly\n" "- Explain the biomechanical impact of each weakness\n" "- Recommend technical corrections\n" "- Suggest specific named drills or techniques\n" "- Write in a formal, paragraph-based style, approximately 30–40 lines long\n\n" ) for key, value in metrics.items(): readable_key = key.replace("_", " ").title() prompt += f"{readable_key}: {value}\n" prompt += "\nPlease respond only with the final feedback in professional English." # 🔁 Inference inputs = tokenizer(prompt, return_tensors="pt").to(model.device) with torch.no_grad(): outputs = model.generate( **inputs, max_new_tokens=2048, temperature=0.7, top_p=0.95, repetition_penalty=1.1, do_sample=True, pad_token_id=tokenizer.eos_token_id, eos_token_id=tokenizer.eos_token_id ) result = tokenizer.decode(outputs[0], skip_special_tokens=True) # Remove prompt echo if present if result.startswith(prompt): result = result[len(prompt):].strip() return result # 🎛 Gradio UI demo = gr.Interface( fn=ask_coach, inputs=[ gr.Number(label="Stride Length (meters)", value=1.8), gr.Number(label="Hip Rotation (degrees)", value=75), gr.Number(label="Knee Angle (degrees)", value=140), gr.Number(label="Shoulder Alignment (degrees)", value=10), gr.Number(label="Ground Reaction Force (× body weight)", value=2.3), gr.Number(label="Release Timing (seconds)", value=0.45) ], outputs=gr.Textbox(label="📝 Coach Feedback"), title="Cricket Biomechanics Coaching AI", description="Enter biomechanical metrics of a cricket player to receive expert-level, detailed coaching feedback.", ) # 🚀 Launch the app if __name__ == "__main__": demo.launch()