File size: 1,043 Bytes
6340002
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch

tokenizer = AutoTokenizer.from_pretrained("TinyLlama/TinyLlama-1.1B-Chat-v1.0")
model = AutoModelForCausalLM.from_pretrained("TinyLlama/TinyLlama-1.1B-Chat-v1.0", torch_dtype=torch.float16)
model = model.to("cuda" if torch.cuda.is_available() else "cpu")

def explain_hazard(helmet="No", zone="Danger", lighting="Poor"):
    prompt = f"""
    <|system|>
    You are a safety officer. Explain the hazard and recommend action.
    </s>
    <|user|>
    Worker helmet: {helmet}
    Location: {zone} zone
    Lighting: {lighting}
    Explain the risk clearly and suggest action.
    </s>
    <|assistant|>
    """

    inputs = tokenizer(prompt, return_tensors="pt").to("cuda" if torch.cuda.is_available() else "cpu")
    outputs = model.generate(**inputs, max_new_tokens=200, do_sample=True, temperature=0.7)
    explanation = tokenizer.decode(outputs[0], skip_special_tokens=True)

    explanation = explanation.split("<|assistant|>")[-1].strip()
    return explanation