navidfalah commited on
Commit
49c2d91
·
verified ·
1 Parent(s): 301b00f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +54 -59
app.py CHANGED
@@ -1,64 +1,59 @@
1
  import gradio as gr
2
- from huggingface_hub import InferenceClient
3
-
4
- """
5
- For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
6
- """
7
- client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
8
-
9
-
10
- def respond(
11
- message,
12
- history: list[tuple[str, str]],
13
- system_message,
14
- max_tokens,
15
- temperature,
16
- top_p,
17
- ):
18
- messages = [{"role": "system", "content": system_message}]
19
-
20
- for val in history:
21
- if val[0]:
22
- messages.append({"role": "user", "content": val[0]})
23
- if val[1]:
24
- messages.append({"role": "assistant", "content": val[1]})
25
-
26
- messages.append({"role": "user", "content": message})
27
-
28
- response = ""
29
-
30
- for message in client.chat_completion(
31
- messages,
32
- max_tokens=max_tokens,
33
- stream=True,
34
- temperature=temperature,
35
- top_p=top_p,
36
- ):
37
- token = message.choices[0].delta.content
38
-
39
- response += token
40
- yield response
41
-
42
-
43
- """
44
- For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
45
- """
46
- demo = gr.ChatInterface(
47
- respond,
48
- additional_inputs=[
49
- gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
50
- gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
51
- gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
52
- gr.Slider(
53
- minimum=0.1,
54
- maximum=1.0,
55
- value=0.95,
56
- step=0.05,
57
- label="Top-p (nucleus sampling)",
58
- ),
59
- ],
60
  )
61
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
62
 
63
  if __name__ == "__main__":
64
- demo.launch()
 
1
  import gradio as gr
2
+ import torch
3
+ from transformers import AutoTokenizer, AutoModelForCausalLM
4
+ from peft import PeftModel
5
+ import os
6
+
7
+ # Load the base model and tokenizer
8
+ model_name = "mistralai/Mistral-7B-Instruct-v0.1" # or whatever base model you used
9
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
10
+
11
+ # Load base model
12
+ base_model = AutoModelForCausalLM.from_pretrained(
13
+ model_name,
14
+ torch_dtype=torch.float16,
15
+ device_map="auto"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
16
  )
17
 
18
+ # Load your PEFT adapter
19
+ model = PeftModel.from_pretrained(base_model, "./model")
20
+
21
+ def chat_with_model(message, history):
22
+ # Format the conversation
23
+ conversation = []
24
+ for human, assistant in history:
25
+ conversation.append(f"Human: {human}")
26
+ conversation.append(f"Assistant: {assistant}")
27
+
28
+ conversation.append(f"Human: {message}")
29
+ conversation.append("Assistant:")
30
+
31
+ prompt = "\n".join(conversation)
32
+
33
+ # Tokenize and generate
34
+ inputs = tokenizer(prompt, return_tensors="pt", truncation=True, max_length=2048)
35
+
36
+ with torch.no_grad():
37
+ outputs = model.generate(
38
+ inputs.input_ids,
39
+ max_new_tokens=512,
40
+ temperature=0.7,
41
+ do_sample=True,
42
+ pad_token_id=tokenizer.eos_token_id
43
+ )
44
+
45
+ response = tokenizer.decode(outputs[0], skip_special_tokens=True)
46
+ # Extract just the assistant's response
47
+ response = response.split("Assistant:")[-1].strip()
48
+
49
+ return response
50
+
51
+ # Create Gradio interface
52
+ iface = gr.ChatInterface(
53
+ fn=chat_with_model,
54
+ title="3AI Custom Model Chat",
55
+ description="Chat with your custom fine-tuned Mistral model"
56
+ )
57
 
58
  if __name__ == "__main__":
59
+ iface.launch()