Akshaymp commited on
Commit
abc64f0
·
verified ·
1 Parent(s): 3e7d108

Upload handler.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. handler.py +70 -0
handler.py ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Dict, List, Any
2
+ import torch
3
+ from transformers import AutoModelForCausalLM, AutoTokenizer
4
+ from peft import PeftModel, PeftConfig
5
+
6
+ class EndpointHandler:
7
+ def __init__(self, path=""):
8
+ # 1. Load the adapter config from the local path (where the repo is cloned on the endpoint)
9
+ self.peft_config = PeftConfig.from_pretrained(path)
10
+
11
+ # 2. Load the Base Model
12
+ # We use device_map="auto" to use the GPU available in the endpoint
13
+ # torch_dtype=torch.float16 is standard for inference on T4/A10G
14
+ self.base_model = AutoModelForCausalLM.from_pretrained(
15
+ self.peft_config.base_model_name_or_path,
16
+ return_dict=True,
17
+ torch_dtype=torch.float16,
18
+ device_map="auto",
19
+ trust_remote_code=True
20
+ )
21
+
22
+ # 3. Load the Tokenizer
23
+ self.tokenizer = AutoTokenizer.from_pretrained(
24
+ self.peft_config.base_model_name_or_path,
25
+ trust_remote_code=True
26
+ )
27
+
28
+ # 4. Load the Adapter (Fine-tuned weights)
29
+ self.model = PeftModel.from_pretrained(self.base_model, path)
30
+ self.model.eval()
31
+
32
+ def __call__(self, data: Dict[str, Any]) -> List[Dict[str, Any]]:
33
+ """
34
+ Args:
35
+ data (:obj: `Dict[str, Any]`):
36
+ Input data payload. Expects a key 'inputs' containing the prompt text.
37
+ Optional parameters: 'temperature', 'max_new_tokens', 'top_p', etc.
38
+ """
39
+ # Get inputs
40
+ inputs = data.pop("inputs", data)
41
+ parameters = data.pop("parameters", {})
42
+
43
+ # Default generation parameters
44
+ max_new_tokens = parameters.get("max_new_tokens", 512)
45
+ temperature = parameters.get("temperature", 0.7)
46
+ top_p = parameters.get("top_p", 0.9)
47
+
48
+ # Handle list of inputs or single string
49
+ if isinstance(inputs, list):
50
+ inputs = inputs[0] # Simplification for single-turn
51
+
52
+ # Tokenize
53
+ input_ids = self.tokenizer(inputs, return_tensors="pt").input_ids.to(self.model.device)
54
+
55
+ # Generate
56
+ with torch.no_grad():
57
+ output_ids = self.model.generate(
58
+ input_ids=input_ids,
59
+ max_new_tokens=max_new_tokens,
60
+ temperature=temperature,
61
+ top_p=top_p,
62
+ do_sample=True,
63
+ pad_token_id=self.tokenizer.eos_token_id
64
+ )
65
+
66
+ # Decode
67
+ # We slice [input_ids.shape[1]:] to return ONLY the generated response, not the prompt
68
+ generated_text = self.tokenizer.decode(output_ids[0][input_ids.shape[1]:], skip_special_tokens=True)
69
+
70
+ return [{"generated_text": generated_text}]