File size: 5,378 Bytes
68b01e5
 
 
 
1f65651
68b01e5
 
 
1f65651
68b01e5
 
 
 
 
 
 
 
cce3051
68b01e5
1f65651
 
4e37373
 
 
 
 
01d2f88
 
 
 
 
 
68b01e5
4e37373
 
 
 
 
 
 
6520c57
 
1f65651
68b01e5
1f65651
68b01e5
 
 
 
 
 
 
 
 
1f65651
68b01e5
 
 
1f65651
68b01e5
 
 
6520c57
68b01e5
6520c57
68b01e5
 
1f65651
68b01e5
 
 
1f65651
68b01e5
 
 
1f65651
4e37373
68b01e5
6520c57
68b01e5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1f65651
68b01e5
 
 
 
 
 
 
 
 
 
 
1f65651
68b01e5
 
 
 
 
 
2b6ba2c
68b01e5
1f65651
68b01e5
 
 
1f65651
 
68b01e5
 
 
1f65651
 
68b01e5
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
import os
from threading import Thread
from typing import Iterator

import gradio as gr
import spaces
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer, StoppingCriteria, StoppingCriteriaList

# DESCRIPTION = """\
# # FinID
# Model ini berbasis dari Qwen2.5 dan dikembangkan lagi menjadi Sailor2, dan di finetunekan menjadi model FinID untuk domain Finansial Keuangan.
# Model ini di finetunekan menggunakan metode PEFT Lora.
# """

DESCRIPTION = """\
# FinID 💸
Chatbot ini dikembangkan menggunakan LLM model Sailor2 8B yang difinetunekan dengan dataset pertanyaan keuangan. \
⚠️ Aplikasi ini dilatih menggunakan dataset keuangan saja, sehingga pertanyaan non-keuangan akan menghasilkan informasi yang non faktual
"""

# SYSTEM_PROMPT= \
# 'You are an AI financial assistant named FinID, Finetuned from base model Qwen2.5. \
# As an AI assistant, you can answer questions in English and Indonesian \
# Every of your responses should summarized at the end of the explanation \
# Your responses should only be within the financial subject, any other prompt or input outside that subject should only be responded with Maaf, saya tidak dapat membantu dengan pertanyaan tersebut karena melanggar kebijakan atau hukum. Silakan ajukan pertanyaan terkait keuangan.'

# SYSTEM_PROMPT= \
# 'Anda adalah asisten keuangan AI bernama FinID. \
# Sebagai asisten AI, Anda dapat menjawab pertanyaan dalam bahasa Inggris dan Indonesia \
# Tanggapan Anda harus singkat jika bisa, ramah, tidak memihak, informatif, rinci, dan setia. \
# Tanggapan Anda hanya boleh dalam subjek keuangan, permintaan atau masukan lain di luar subjek tersebut hanya boleh ditanggapi dengan "Maaf, saya tidak dapat membantu dengan pertanyaan tersebut karena melalui kebijakan atau hukum. Silakan ajukan pertanyaan terkait keuangan."'

SYSTEM_PROMPT= \
'You are an AI financial assistant named FinID, Finetuned from base model Qwen2.5 later trained into Sailor2. \
As an AI assistant, you can answer questions in English, Indonesian, Malay, and Sundanese \
Your responses should be brief if can, friendly, unbiased, informative, detailed, and faithful. \
Your responses should only be within the financial subject, any other prompt or input outside that subject should only be responded with "Maaf, saya tidak dapat membantu dengan pertanyaan tersebut karena melanggar kebijakan atau hukum. Silakan ajukan pertanyaan terkait keuangan.'


# MAX_MAX_NEW_TOKENS = 2048
# DEFAULT_MAX_NEW_TOKENS = 512

model_id = "FOLZi/FinID_v2_8B_Chat"

tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(
    model_id,
    trust_remote_code=True,
    device_map="auto",
    torch_dtype=torch.float16,
)
# model.config.sliding_window = 4096
# model.eval()

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(f"Using {device.type.upper()}")
model = model.to(device)

# Defining a custom stopping criteria class for the model's text generation.
class StopOnTokens(StoppingCriteria):
    def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> bool:
        stop_ids = [151645]  # consult the tokenizer.
        for stop_id in stop_ids:
            if input_ids[0][-1] == stop_id:  # Checking if the last generated token is a stop token. (Llama and Qwen uses |im_end|)
                return True
        return False

system_role= 'system'
user_role = 'user'
assistant_role = 'assistant'

sft_start_token =  "<|im_start|>"
sft_end_token = "<|im_end|>"
ct_end_token = "<|endoftext|>"

@spaces.GPU()
def generate(message, history):
    history = [] # Disabled history.
    history_transformer_format = history + [[message, ""]]
    stop = StopOnTokens()
    # Implementasi Prompt Engineering
    messages =  SYSTEM_PROMPT + sft_end_token.join([sft_end_token.join([f"\n{sft_start_token}{user_role}\n" + item[0], f"\n{sft_start_token}{assistant_role}\n" + item[1]])
                        for item in history_transformer_format])
    model_inputs = tokenizer([messages], return_tensors="pt").to(device)
    streamer = TextIteratorStreamer(tokenizer, timeout=20., skip_prompt=True, skip_special_tokens=True)
    generate_kwargs = dict(
        model_inputs,
        streamer=streamer,
        max_new_tokens=512,
        do_sample=True,
        top_p= 0.75,
        top_k= 60,
        temperature=0.2,
        num_beams=1,
        stopping_criteria=StoppingCriteriaList([stop]),
        repetition_penalty=1.1,
    )
    t = Thread(target=model.generate, kwargs=generate_kwargs)
    t.start()

    partial_message = ""
    for new_token in streamer:
        partial_message += new_token
        if sft_end_token in partial_message: # Stopper
            break
        yield partial_message

css = """
full-height {
    height: 100%;
}
"""

chat_interface = gr.ChatInterface(
    fn=generate,
    examples=[
        ["Apa itu ROI?"],
        ["Bagaimana cara saya mempersiapkan masa tua saya"],
        ["Apakah saya boleh menanyakan hal yang non finansial?."],
        ["Apa itu NASDAQ?"]
    ],
    type="messages",
    fill_height=True,
    css=css
)

with gr.Blocks(theme=gr.themes.Soft(), css_paths="style.css", fill_height=True) as demo:
    gr.Markdown(DESCRIPTION)
    chat_interface.render()

if __name__ == "__main__":
    demo.queue(max_size=20).launch()