import os from typing import Dict, List, Any import torch from unsloth import FastLanguageModel class EndpointHandler: def __init__(self, model_dir: str = ""): print(f"[DEBUG] Original model_dir: {model_dir}") # Override model_dir with your checkpoint path. model_dir = "RichardLu/mistral7b_aspectsentiment_res" print(f"[DEBUG] Using model_dir: {model_dir}") # Inference configuration. max_seq_length = 2048 load_in_4bit = True # or False as needed. # Load the model and tokenizer. self.model, self.tokenizer = FastLanguageModel.from_pretrained( model_name=model_dir, max_seq_length=max_seq_length, load_in_4bit=load_in_4bit ) print("[DEBUG] Model and tokenizer loaded successfully.") # Set the model to inference mode. FastLanguageModel.for_inference(self.model) # Define the instructabsa instruction text with examples. self.instructabsa_instruction = """Definition: The output will be 'positive' if the aspect identified in the sentence contains a positive sentiment. If the sentiment of the identified aspect in the input is negative the answer will be 'negative'. Otherwise, the output should be 'neutral'. For aspects which are classified as noaspectterm, the sentiment is none. Positive example 1- input: With the great variety on the menu , I eat here often and never get bored. The aspect is menu. output: positive Positive example 2- input: Great food, good size menu, great service and an unpretensious setting. The aspect is food. output: positive Negative example 1- input: They did not have mayonnaise, forgot our toast, left out ingredients (ie cheese in an omelet), below hot temperatures and the bacon was so over cooked it crumbled on the plate when you touched it. The aspect is toast. output: negative Negative example 2- input: The seats are uncomfortable if you are sitting against the wall on wooden benches. The aspect is seats. output: negative Neutral example 1- input: I asked for seltzer with lime, no ice. The aspect is seltzer with lime. output: neutral Neutral example 2- input: They wouldnt even let me finish my glass of wine before offering another. The aspect is glass of wine. output: neutral Now complete the following example- """ # Define the Alpaca-style prompt template. self.alpaca_prompt = """Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request. ### Instruction: {} ### Input: {} ### Response: {}""" def __call__(self, data: Dict[str, Any]) -> List[Dict[str, Any]]: # First, try to use the "inputs" key. if "inputs" in data: full_input = data["inputs"] # Otherwise, check for "review" and "aspect" keys. elif "review" in data and "aspect" in data: full_input = f"{data['review']} The aspect is {data['aspect']}." else: return [{"error": "Provide either an 'inputs' key or both 'review' and 'aspect' keys."}] # Build the final prompt. prompt = self.alpaca_prompt.format(self.instructabsa_instruction, full_input, "") # Set device. device = "cuda" if torch.cuda.is_available() else "cpu" inputs = self.tokenizer(prompt, return_tensors="pt", truncation=True).to(device) # Generate the output. output_ids = self.model.generate(**inputs, max_new_tokens=128) output_text = self.tokenizer.decode(output_ids[0], skip_special_tokens=True) # Extract the predicted sentiment. if "### Response:" in output_text: predicted = output_text.split("### Response:")[-1].strip() else: predicted = output_text.strip() return [{"predicted": predicted}]