ChevalierJoseph commited on
Commit
ee72852
·
verified ·
1 Parent(s): 55542d9

Update handler.py

Browse files
Files changed (1) hide show
  1. handler.py +121 -16
handler.py CHANGED
@@ -1,22 +1,127 @@
1
  from typing import Dict, List, Any
2
- from transformers import AutoModelForCausalLM, AutoTokenizer
 
 
3
 
4
- class EndpointHandler():
5
- def __init__(self, path=""):
6
- # Load the model and tokenizer from the specified path
7
- self.model = AutoModelForCausalLM.from_pretrained(path)
 
 
 
 
 
 
 
 
 
 
 
8
  self.tokenizer = AutoTokenizer.from_pretrained(path)
 
 
 
 
 
 
 
 
 
 
 
 
9
 
10
  def __call__(self, data: Dict[str, Any]) -> List[Dict[str, Any]]:
11
- # Extract input text from the request
12
- inputs = data.pop("inputs", data)
13
-
14
- # Tokenize input and generate text
15
- input_ids = self.tokenizer.encode(inputs, return_tensors="pt")
16
- output_ids = self.model.generate(input_ids)
17
-
18
- # Decode the generated output
19
- output_text = self.tokenizer.decode(output_ids[0], skip_special_tokens=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
20
 
21
- # Return the generated text
22
- return [{"generated_text": output_text}]
 
 
 
 
 
1
  from typing import Dict, List, Any
2
+ import torch
3
+ from transformers import AutoTokenizer, AutoModelForCausalLM
4
+ import logging
5
 
6
+ # Set up logging
7
+ logging.basicConfig(level=logging.INFO)
8
+ logger = logging.getLogger(__name__)
9
+
10
+ class EndpointHandler:
11
+ def __init__(self, path: str = ""):
12
+ """
13
+ Initialize the model and tokenizer when the endpoint starts.
14
+
15
+ Args:
16
+ path (str): Path to the model files
17
+ """
18
+ logger.info(f"Loading model from {path}")
19
+
20
+ # Load tokenizer and model
21
  self.tokenizer = AutoTokenizer.from_pretrained(path)
22
+ self.model = AutoModelForCausalLM.from_pretrained(
23
+ path,
24
+ torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
25
+ device_map="auto" if torch.cuda.is_available() else None,
26
+ trust_remote_code=True
27
+ )
28
+
29
+ # Set pad token if it doesn't exist
30
+ if self.tokenizer.pad_token is None:
31
+ self.tokenizer.pad_token = self.tokenizer.eos_token
32
+
33
+ logger.info("Model loaded successfully")
34
 
35
  def __call__(self, data: Dict[str, Any]) -> List[Dict[str, Any]]:
36
+ """
37
+ Process the inference request.
38
+
39
+ Args:
40
+ data (Dict[str, Any]): Request data containing:
41
+ - inputs (str): The input text/prompt
42
+ - parameters (dict, optional): Generation parameters
43
+ - max_new_tokens (int): Maximum tokens to generate (default: 256)
44
+ - temperature (float): Sampling temperature (default: 0.7)
45
+ - top_p (float): Top-p sampling (default: 0.9)
46
+ - do_sample (bool): Whether to use sampling (default: True)
47
+ - repetition_penalty (float): Repetition penalty (default: 1.1)
48
+ - return_full_text (bool): Return full text including input (default: False)
49
+
50
+ Returns:
51
+ List[Dict[str, Any]]: Generated text response
52
+ """
53
+ try:
54
+ # Extract inputs
55
+ inputs = data.get("inputs", "")
56
+ if not inputs:
57
+ return [{"error": "No input text provided"}]
58
+
59
+ # Extract generation parameters
60
+ parameters = data.get("parameters", {})
61
+ max_new_tokens = parameters.get("max_new_tokens", 256)
62
+ temperature = parameters.get("temperature", 0.7)
63
+ top_p = parameters.get("top_p", 0.9)
64
+ do_sample = parameters.get("do_sample", True)
65
+ repetition_penalty = parameters.get("repetition_penalty", 1.1)
66
+ return_full_text = parameters.get("return_full_text", False)
67
+
68
+ # Format the input as a chat message if it doesn't already contain instruction formatting
69
+ if not any(marker in inputs.lower() for marker in ["[inst]", "<s>", "### instruction", "user:", "assistant:"]):
70
+ formatted_input = f"[INST] {inputs} [/INST]"
71
+ else:
72
+ formatted_input = inputs
73
+
74
+ # Tokenize input
75
+ input_ids = self.tokenizer.encode(
76
+ formatted_input,
77
+ return_tensors="pt",
78
+ truncation=True,
79
+ max_length=2048 # Reasonable limit for input
80
+ )
81
+
82
+ # Move to GPU if available
83
+ if torch.cuda.is_available():
84
+ input_ids = input_ids.cuda()
85
+
86
+ # Generate response
87
+ with torch.no_grad():
88
+ output_ids = self.model.generate(
89
+ input_ids,
90
+ max_new_tokens=max_new_tokens,
91
+ temperature=temperature,
92
+ top_p=top_p,
93
+ do_sample=do_sample,
94
+ repetition_penalty=repetition_penalty,
95
+ pad_token_id=self.tokenizer.pad_token_id,
96
+ eos_token_id=self.tokenizer.eos_token_id,
97
+ use_cache=True
98
+ )
99
+
100
+ # Decode the response
101
+ if return_full_text:
102
+ generated_text = self.tokenizer.decode(output_ids[0], skip_special_tokens=True)
103
+ else:
104
+ # Only return the newly generated tokens
105
+ new_tokens = output_ids[0][input_ids.shape[-1]:]
106
+ generated_text = self.tokenizer.decode(new_tokens, skip_special_tokens=True)
107
+
108
+ # Clean up the response
109
+ generated_text = generated_text.strip()
110
+
111
+ # Return in the expected format
112
+ return [{
113
+ "generated_text": generated_text,
114
+ "input_length": input_ids.shape[-1],
115
+ "output_length": len(output_ids[0]) - input_ids.shape[-1]
116
+ }]
117
+
118
+ except Exception as e:
119
+ logger.error(f"Error during inference: {str(e)}")
120
+ return [{"error": f"Inference failed: {str(e)}"}]
121
 
122
+ def __del__(self):
123
+ """Clean up resources when the handler is destroyed."""
124
+ if hasattr(self, 'model'):
125
+ del self.model
126
+ if torch.cuda.is_available():
127
+ torch.cuda.empty_cache()