File size: 3,457 Bytes
ca97aa9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
import { pipeline, TextGenerationPipeline } from "../../src/transformers.js";

import { MAX_MODEL_LOAD_TIME, MAX_TEST_EXECUTION_TIME, MAX_MODEL_DISPOSE_TIME, DEFAULT_MODEL_OPTIONS } from "../init.js";

const PIPELINE_ID = "text-generation";

export default () => {
  describe("Text Generation", () => {
    const model_id = "hf-internal-testing/tiny-random-LlamaForCausalLM";

    /** @type {TextGenerationPipeline} */
    let pipe;
    beforeAll(async () => {
      pipe = await pipeline(PIPELINE_ID, model_id, DEFAULT_MODEL_OPTIONS);
    }, MAX_MODEL_LOAD_TIME);

    it("should be an instance of TextGenerationPipeline", () => {
      expect(pipe).toBeInstanceOf(TextGenerationPipeline);
    });

    describe("batch_size=1", () => {
      const text_input = "hello";
      const generated_text_target = "erdingsdelete mely";
      const text_target = [{ generated_text: text_input + generated_text_target }];
      const new_text_target = [{ generated_text: generated_text_target }];

      const chat_input = [
        { role: "system", content: "a" },
        { role: "user", content: "b" },
      ];
      const chat_target = [
        {
          generated_text: [
            { role: "system", content: "a" },
            { role: "user", content: "b" },
            { role: "assistant", content: " Southern abund Load" },
          ],
        },
      ];

      it(
        "text input (single)",
        async () => {
          const output = await pipe(text_input, { max_new_tokens: 3 });
          expect(output).toEqual(text_target);
        },
        MAX_TEST_EXECUTION_TIME,
      );
      it(
        "text input (list)",
        async () => {
          const output = await pipe([text_input], { max_new_tokens: 3 });
          expect(output).toEqual([text_target]);
        },
        MAX_TEST_EXECUTION_TIME,
      );

      it(
        "text input (single) - return_full_text=false",
        async () => {
          const output = await pipe(text_input, { max_new_tokens: 3, return_full_text: false });
          expect(output).toEqual(new_text_target);
        },
        MAX_TEST_EXECUTION_TIME,
      );
      it(
        "text input (list) - return_full_text=false",
        async () => {
          const output = await pipe([text_input], { max_new_tokens: 3, return_full_text: false });
          expect(output).toEqual([new_text_target]);
        },
        MAX_TEST_EXECUTION_TIME,
      );

      it(
        "chat input (single)",
        async () => {
          const output = await pipe(chat_input, { max_new_tokens: 3 });
          expect(output).toEqual(chat_target);
        },
        MAX_TEST_EXECUTION_TIME,
      );
      it(
        "chat input (list)",
        async () => {
          const output = await pipe([chat_input], { max_new_tokens: 3 });
          expect(output).toEqual([chat_target]);
        },
        MAX_TEST_EXECUTION_TIME,
      );
    });

    // TODO: Fix batch_size>1
    // describe('batch_size>1', () => {
    //     it('default', async () => {
    //         const output = await pipe(['hello', 'hello world']);
    //         const target = [
    //            [{generated_text: 'helloerdingsAndroid Load'}],
    //            [{generated_text: 'hello world zerosMillнал'}],
    //         ];
    //         expect(output).toEqual(target);
    //     }, MAX_TEST_EXECUTION_TIME);
    // });

    afterAll(async () => {
      await pipe.dispose();
    }, MAX_MODEL_DISPOSE_TIME);
  });
};