rishu834763 commited on
Commit
4c0e46c
·
verified ·
1 Parent(s): e4bf453

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +58 -64
app.py CHANGED
@@ -1,82 +1,76 @@
 
 
 
1
  import gradio as gr
2
- from huggingface_hub import InferenceClient
3
 
 
 
4
 
5
- from transformers import AutoModelForCausalLM
6
- from peft import PeftModel
7
 
8
- # This automatically loads the base model mentioned in adapter_config.json
9
  model = AutoModelForCausalLM.from_pretrained(
10
- "rishu834763/java-explainer-lora",
 
11
  device_map="auto",
12
- torch_dtype="auto",
13
- trust_remote_code=True # only needed for some gated/gated models
 
 
 
 
 
 
14
  )
15
- model = PeftModel.from_pretrained(model, "rishu834763/java-explainer-lora")
16
 
17
- def respond(
18
- message,
19
- history: list[dict[str, str]],
20
- system_message,
21
- max_tokens,
22
- temperature,
23
- top_p,
24
- hf_token: gr.OAuthToken,
25
- ):
26
- """
27
- For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
28
- """
29
- client = InferenceClient(token=hf_token.token, model="openai/gpt-oss-20b")
30
 
31
- messages = [{"role": "system", "content": system_message}]
 
 
32
 
33
- messages.extend(history)
 
 
 
 
 
 
 
34
 
 
 
 
 
 
 
 
35
  messages.append({"role": "user", "content": message})
36
 
37
- response = ""
38
-
39
- for message in client.chat_completion(
40
  messages,
41
- max_tokens=max_tokens,
42
- stream=True,
43
- temperature=temperature,
44
- top_p=top_p,
45
- ):
46
- choices = message.choices
47
- token = ""
48
- if len(choices) and choices[0].delta.content:
49
- token = choices[0].delta.content
50
-
51
- response += token
52
- yield response
53
 
54
-
55
- """
56
- For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
57
- """
58
- chatbot = gr.ChatInterface(
59
- respond,
60
- type="messages",
61
- additional_inputs=[
62
- gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
63
- gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
64
- gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
65
- gr.Slider(
66
- minimum=0.1,
67
- maximum=1.0,
68
- value=0.95,
69
- step=0.05,
70
- label="Top-p (nucleus sampling)",
71
- ),
72
  ],
 
73
  )
74
 
75
- with gr.Blocks() as demo:
76
- with gr.Sidebar():
77
- gr.LoginButton()
78
- chatbot.render()
79
-
80
-
81
- if __name__ == "__main__":
82
- demo.launch()
 
1
+ import torch
2
+ from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
3
+ from peft import PeftModel, PeftConfig
4
  import gradio as gr
 
5
 
6
+ # === Load your LoRA correctly ===
7
+ peft_model_id = "rishu834763/java-explainer-lora"
8
 
9
+ config = PeftConfig.from_pretrained(peft_model_id)
10
+ base_model_name = config.base_model_name_or_path # this will be mistralai/Mistral-7B-Instruct-v0.2
11
 
12
+ # Load base model (with quantization if you want to fit in free tier)
13
  model = AutoModelForCausalLM.from_pretrained(
14
+ base_model_name,
15
+ torch_dtype=torch.bfloat16,
16
  device_map="auto",
17
+ # Remove the two lines below if you have enough VRAM or a paid Space
18
+ # load_in_4bit=True,
19
+ # quantization_config=BitsAndBytesConfig(
20
+ # load_in_4bit=True,
21
+ # bnb_4bit_compute_dtype=torch.bfloat16,
22
+ # bnb_4bit_use_double_quant=True,
23
+ # bnb_4bit_quant_type="nf4"
24
+ # ),
25
  )
 
26
 
27
+ model = PeftModel.from_pretrained(model, peft_model_id)
28
+ # Optional but recommended: merge so inference is faster and uses less VRAM
29
+ model = model.merge_and_unload()
 
 
 
 
 
 
 
 
 
 
30
 
31
+ tokenizer = AutoTokenizer.from_pretrained(base_model_name)
32
+ if tokenizer.pad_token is None:
33
+ tokenizer.pad_token = tokenizer.eos_token
34
 
35
+ # Create the pipeline using YOUR model and tokenizer
36
+ pipe = pipeline(
37
+ "text-generation",
38
+ model=model,
39
+ tokenizer=tokenizer,
40
+ torch_dtype=torch.bfloat16,
41
+ device_map="auto",
42
+ )
43
 
44
+ # === This is the most important part ===
45
+ def chat(message, history):
46
+ messages = []
47
+ for user_msg, assistant_msg in history:
48
+ messages.append({"role": "user", "content": user_msg})
49
+ if assistant_msg:
50
+ messages.append({"role": "assistant", "content": assistant_msg})
51
  messages.append({"role": "user", "content": message})
52
 
53
+ outputs = pipe(
 
 
54
  messages,
55
+ max_new_tokens=512,
56
+ do_sample=True,
57
+ temperature=0.7,
58
+ top_p=0.9,
59
+ pad_token_id=tokenizer.eos_token_id
60
+ )
61
+ response = outputs[0]["generated_text"][-1]["content"]
62
+ return response
 
 
 
 
63
 
64
+ # === Build the Gradio interface ===
65
+ demo = gr.ChatInterface(
66
+ fn=chat,
67
+ title="Java Explainer (Mistral-7B + your LoRA)",
68
+ description="Ask anything about Java code → I will explain it using your fine-tuned model",
69
+ examples=[
70
+ "Explain this Java code: public class HelloWorld { public static void main(String[] args) { System.out.println(\"Hello, World!\"); } }",
71
+ "What does synchronized keyword do in Java?"
 
 
 
 
 
 
 
 
 
 
72
  ],
73
+ cache_examples=False,
74
  )
75
 
76
+ demo.launch()