from transformers import AutoModelForCausalLM, AutoTokenizer import torch tokenizer = AutoTokenizer.from_pretrained("TinyLlama/TinyLlama-1.1B-Chat-v1.0") model = AutoModelForCausalLM.from_pretrained("TinyLlama/TinyLlama-1.1B-Chat-v1.0", torch_dtype=torch.float16) model = model.to("cuda" if torch.cuda.is_available() else "cpu") def explain_hazard(helmet="No", zone="Danger", lighting="Poor"): prompt = f""" <|system|> You are a safety officer. Explain the hazard and recommend action. <|user|> Worker helmet: {helmet} Location: {zone} zone Lighting: {lighting} Explain the risk clearly and suggest action. <|assistant|> """ inputs = tokenizer(prompt, return_tensors="pt").to("cuda" if torch.cuda.is_available() else "cpu") outputs = model.generate(**inputs, max_new_tokens=200, do_sample=True, temperature=0.7) explanation = tokenizer.decode(outputs[0], skip_special_tokens=True) explanation = explanation.split("<|assistant|>")[-1].strip() return explanation