sajjadamjad commited on
Commit
87afd64
·
1 Parent(s): 8184b9d

Update handler.py

Browse files
Files changed (1) hide show
  1. handler.py +33 -48
handler.py CHANGED
@@ -1,53 +1,38 @@
1
- from typing import Dict, Any
2
- import logging
 
 
3
 
4
- from transformers import AutoModelForCausalLM, AutoTokenizer
5
- from peft import PeftConfig, PeftModel
6
- import torch.cuda
7
 
8
-
9
- LOGGER = logging.getLogger(__name__)
10
- logging.basicConfig(level=logging.INFO)
11
- device = "cuda" if torch.cuda.is_available() else "cpu"
12
-
13
-
14
- class EndpointHandler():
15
  def __init__(self, path=""):
16
- config = PeftConfig.from_pretrained(path)
17
- model = AutoModelForCausalLM.from_pretrained(config.base_model_name_or_path, load_in_4bit=True, device_map='auto')
18
- self.tokenizer = AutoTokenizer.from_pretrained(config.base_model_name_or_path)
19
- # Load the Lora model
20
- self.model = PeftModel.from_pretrained(model, path)
21
 
22
- def __call__(self, data: Dict[str, Any]) -> Dict[str, Any]:
23
- """
24
- Args:
25
- data (Dict): The payload with the text prompt and generation parameters.
26
- """
27
- LOGGER.info(f"Received data: {data}")
28
- # Get inputs
29
- query = data.pop("inputs", None)
30
- prompt_template = """
31
- Below is a screenplay prompt followed by a screenplay response. Generate only screenplay response.
32
- ### Screenplay Prompt:
33
- {query}
34
- ### Screenplay Response:
35
- """
36
- prompt = prompt_template.format(query=query)
37
- parameters = data.pop("parameters", None)
38
- if prompt is None:
39
- raise ValueError("Missing prompt.")
40
- # Preprocess
41
- encodeds = self.tokenizer(prompt, return_tensors="pt", add_special_tokens=True)
42
-
43
- model_inputs = encodeds.to(device)
 
44
 
45
- # Forward
46
- LOGGER.info(f"Start generation.")
47
- eos_tok = self.tokenizer.eos_token_id
48
- LOGGER.info(f"Generating Ids")
49
- generated_ids = self.model.generate(**model_inputs, max_new_tokens=9999999, do_sample=True, pad_token_id=eos_tok)
50
- LOGGER.info(f"Ids Generated.")
51
- decoded = self.tokenizer.batch_decode(generated_ids)
52
- LOGGER.info(f"Generated text length: {len(decoded[0])}")
53
- return {"generated_text": decoded[0]}
 
1
+ from typing import Dict, List, Any
2
+ from transformers import AutoTokenizer, AutoModelForCausalLM
3
+ import transformers
4
+ import torch
5
 
6
+ dtype = torch.bfloat16 if torch.cuda.get_device_capability()[0] == 8 else torch.float16
 
 
7
 
8
+ class EndpointHandler:
 
 
 
 
 
 
9
  def __init__(self, path=""):
 
 
 
 
 
10
 
11
+ tokenizer = AutoTokenizer.from_pretrained(path, trust_remote_code=True)
12
+ model = AutoModelForCausalLM.from_pretrained(
13
+ path,
14
+ return_dict=True,
15
+ device_map="auto",
16
+ load_in_8bit=True,
17
+ torch_dtype=dtype,
18
+ trust_remote_code=True,
19
+ )
20
+
21
+ generation_config = model.generation_config
22
+ generation_config.max_new_tokens = 1000000
23
+ generation_config.temperature = 0
24
+ generation_config.num_return_sequences = 1
25
+ generation_config.pad_token_id = tokenizer.eos_token_id
26
+ generation_config.eos_token_id = tokenizer.eos_token_id
27
+ self.generation_config = generation_config
28
+
29
+ self.pipeline = transformers.pipeline(
30
+ "text-generation",
31
+ model=model,
32
+ tokenizer=tokenizer
33
+ )
34
 
35
+ def __call__(self, data: Dict[str, Any]) -> Dict[str, Any]:
36
+ prompt = data.pop("inputs", data)
37
+ result = self.pipeline(prompt, generation_config=self.generation_config)
38
+ return result