richardcsuwandi commited on
Commit
35d9f90
·
verified ·
1 Parent(s): c10c9d5

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +28 -33
app.py CHANGED
@@ -1,11 +1,14 @@
1
  import gradio as gr
2
- from huggingface_hub import InferenceClient
 
3
 
4
- """
5
- For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
6
- """
7
- client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
8
 
 
 
 
 
9
 
10
  def respond(
11
  message,
@@ -15,34 +18,27 @@ def respond(
15
  temperature,
16
  top_p,
17
  ):
18
- messages = [{"role": "system", "content": system_message}]
19
-
20
- for val in history:
21
- if val[0]:
22
- messages.append({"role": "user", "content": val[0]})
23
- if val[1]:
24
- messages.append({"role": "assistant", "content": val[1]})
25
-
26
- messages.append({"role": "user", "content": message})
27
-
28
- response = ""
29
-
30
- for message in client.chat_completion(
31
- messages,
32
- max_tokens=max_tokens,
33
- stream=True,
34
  temperature=temperature,
35
- top_p=top_p,
36
- ):
37
- token = message.choices[0].delta.content
 
 
 
 
38
 
39
- response += token
40
- yield response
41
-
42
-
43
- """
44
- For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
45
- """
46
  demo = gr.ChatInterface(
47
  respond,
48
  additional_inputs=[
@@ -59,6 +55,5 @@ demo = gr.ChatInterface(
59
  ],
60
  )
61
 
62
-
63
  if __name__ == "__main__":
64
- demo.launch()
 
1
  import gradio as gr
2
+ from transformers import AutoModelForCausalLM, AutoTokenizer
3
+ import torch
4
 
5
+ # Load your fine-tuned model and tokenizer
6
+ model_name = "richardcsuwandi/llama2-javanese"
 
 
7
 
8
+ model = AutoModelForCausalLM.from_pretrained(model_name, device_map="auto", torch_dtype=torch.bfloat16)
9
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
10
+ tokenizer.pad_token_id = 0
11
+ tokenizer.padding_side = "left"
12
 
13
  def respond(
14
  message,
 
18
  temperature,
19
  top_p,
20
  ):
21
+ # Prepare the instruction prompt and input text
22
+ instruction_prompt = "Sampeyan minangka chatbot umum sing tansah mangsuli nganggo basa Jawa."
23
+ input_text = f"<s>[INST] <<SYS>> {system_message} <</SYS>> {message} [/INST]"
24
+
25
+ # Tokenize the input text
26
+ inputs = tokenizer(input_text, return_tensors="pt").to(model.device)
27
+
28
+ # Generate response
29
+ output_sequences = model.generate(
30
+ input_ids=inputs['input_ids'],
31
+ max_length=max_tokens + inputs['input_ids'].shape[1], # Adjust for input length
32
+ repetition_penalty=1.2,
 
 
 
 
33
  temperature=temperature,
34
+ top_p=top_p
35
+ )
36
+
37
+ # Decode the generated response
38
+ generated_text = tokenizer.decode(output_sequences[0], skip_special_tokens=True)
39
+
40
+ return generated_text
41
 
 
 
 
 
 
 
 
42
  demo = gr.ChatInterface(
43
  respond,
44
  additional_inputs=[
 
55
  ],
56
  )
57
 
 
58
  if __name__ == "__main__":
59
+ demo.launch()