File size: 4,493 Bytes
f7e23a8
 
 
 
 
 
 
 
 
 
8973d97
f7e23a8
 
 
f5b5999
 
 
f7e23a8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
import torch
from typing import Dict, Any, List
from transformers import AutoTokenizer, AutoModelForMaskedLM

class EndpointHandler():
    def __init__(self, path=""):
        self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

        # Load the models -- Leadership
        self.leadership_model_name = "roberta-large-mnli"
        self.leadership_model_path = path + "/pet-leadership-model-roberta-large-mnli_bs4_gas4_lr1e-05_ep5" + "/checkpoint-1855"
        self.leadership_pattern = "Sentence: {} Question: Does this show leadership? Answer: <mask>"

        # Load the models -- Collaboration
        self.collab_model_name = "roberta-large-mnli"
        self.collab_model_path = path + "/pet-collaboration-model-roberta-large-mnli_bs4_gas4_lr1e-05_ep5" + "/checkpoint-1560"
        self.collab_pattern = "Sentence: {} Question: Does this show teamwork? Answer: <mask>"

        # Load the tokenizer
        self.leadership_tokenizer = AutoTokenizer.from_pretrained(self.leadership_model_name)
        self.collab_tokenizer = AutoTokenizer.from_pretrained(self.collab_model_name)
        self.model_lead = AutoModelForMaskedLM.from_pretrained(self.leadership_model_path)
        self.model_collab = AutoModelForMaskedLM.from_pretrained(self.collab_model_path)

    def __call__(self, data: Dict[str, Any]) -> List[Dict[str, Any]]:
        """
       data args:
            inputs (:obj: `str` | `PIL.Image` | `np.array`)
            kwargs
      Return:
            A :obj:`list` | `dict`: will be serialized and returned
        """
        sentence = data["inputs"]
        pl, pt, pc, pct = self.extract_skill_quality(sentence)
        return {"leadership": pl, "leadership_token": pt, "collaboration": pc, "collaboration_token": pct}
    

    def predict_trait(self, model, sentence, pattern, task_name):
        prompt = pattern.format(sentence)
        # Select appropriate tokenizer based on task
        tokenizer = self.leadership_tokenizer if task_name == "leadership" else self.collab_tokenizer
        
        enc = tokenizer(prompt, return_tensors="pt")

        if torch.cuda.is_available():
            model = model.cuda()
            enc = {k: v.cuda() for k, v in enc.items()}

        outputs = model(**enc)
        logits = outputs.logits

        # Find the <mask> token position
        mask_index = (enc["input_ids"] == tokenizer.mask_token_id).nonzero(as_tuple=True)[1]
        mask_logits = logits[0, mask_index, :]

        # Predicted token
        pred_token_id = mask_logits.argmax(dim=-1).item()
        pred_token = tokenizer.decode([pred_token_id]).strip()

        # We label 1 if the model predicted something starting with "yes", else 0
        pred_label = 1 if pred_token.lower().startswith("yes") else 0
        return pred_label, pred_token

    def extract_skill_quality(self, sentence):
        # Check leadership
        pl, pt = self.predict_trait(self.model_lead, sentence, self.leadership_pattern, "leadership")
        # Check collaboration 
        pc, pct = self.predict_trait(self.model_collab, sentence, self.collab_pattern, "collaboration")
        return pl, pt, pc, pct
    

if __name__ == "__main__":
    # Initialize the handler
    from handler import EndpointHandler
    handler = EndpointHandler(path=".")  # Assuming proper initialization parameters are set in __init__

    # Test sentences
    test_sentences = [
        "I am leading a team of engineers.",
        "I am not leading my team.",
        "Exemplified the second-to-none customer service delivery in all interactions with customers and potential clients",
        "Collaborated with cross-functional teams to deliver the product on time.",
        "Finished my work on time.",
        "Mentored interns and coordinated weekly sync-ups."
    ]

    # Process each sentence
    for sentence in test_sentences:
        print(f"Sentence: \"{sentence}\"")
        result = handler({"inputs": sentence})
        
        # Format leadership prediction
        lead_token = result["leadership_token"]
        lead_pred = "Yes" if result["leadership"] == 1 else "No"
        print(f"  Leadership Prediction: {lead_pred} (Predicted token: '{lead_token}')")
        
        # Format collaboration prediction
        collab_token = result["collaboration_token"]
        collab_pred = "Yes" if result["collaboration"] == 1 else "No"
        print(f"  Collaboration Prediction: {collab_pred} (Predicted token: '{collab_token}')")
        print()