File size: 1,743 Bytes
8fa3acc
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
from typing import Union, List

from datasets import Dataset
from datasets.formatting.formatting import LazyRow

from src.language_model.language_model_abstraction import LanguageModel
from src.language_model.private_language_model_factory import (
    private_language_model_factory,
)
from src.task.task import Task, TaskType


class RemoteLLMModel(LanguageModel):
    """
    LLM Model based on private remote LLM provider (e.g. OpenAI) and pipeline mechanism for inference.
    """

    def generate(self, rows: LazyRow) -> Union[str, List[str]]:
        generate = self.model.predict(rows["text"])
        return generate

    def infer(self, rows: LazyRow) -> Union[str, List[str]]:
        infer = self.model.predict(rows["text"])
        return infer

    def __init__(
        self,
        model_name: str,
        token: Union[str, None] = None,
    ):
        super().__init__(model_name)
        self._model_name = model_name
        self._token = token

        self.model = private_language_model_factory(model_name=self._model_name)

    def predict(self, evaluation_dataset: Dataset, task: Task) -> List:
        if task.task_type == TaskType.INFERENCE:
            labels = task.dataset.possible_ground_truths
            self.model.init_function_calling(
                labels, tool_choices=self.model.tool_choices
            )
            inference_fn = self.infer
        else:
            inference_fn = self.generate

        process_dataset = evaluation_dataset.map(
            inference_fn,
            batched=False,
            desc=f"Running evaluation for task: {task.task_name}",
            remove_columns="text",
        )

        self.model.print_none()

        return list(process_dataset["prediction"])