File size: 3,674 Bytes
744c55e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
"""HF Inference Endpoint custom handler for NX-AI/xLSTM-7b.

Deploys matrix-memory recurrent architecture (Beck et al. 2024) via the HF
Endpoints custom-handler interface. xLSTM introduces mLSTM (matrix-memory
long short-term memory) and sLSTM (exponential-gating scalar LSTM) blocks,
representing a non-SSM non-attention recurrent family.

Input schema (Bench 1.6-A concatenated completion format):
    {
        "inputs": "<flat text prompt with system + user turns concatenated>",
        "parameters": {
            "max_new_tokens": 512,
            "temperature": 0.7,
            "top_p": 0.95,
            "do_sample": true,
        }
    }

Output schema:
    {
        "generated_text": "<model completion>",
        "input_tokens": <int>,
        "output_tokens": <int>,
        "model": "NX-AI/xLSTM-7b"
    }

Preregistered per docs/BENCH-1.6A-PREREG-V1.1-AMENDMENT.md as Cell A3.
Base-model asymmetry (v1.0 §5.5) applies: xLSTM-7b is a base model with no
instruction tuning, receives completion-format prompts.
"""
from __future__ import annotations

from typing import Any

import torch
from transformers import AutoModelForCausalLM, AutoTokenizer

MODEL_ID = "NX-AI/xLSTM-7b"


class EndpointHandler:
    """HF Endpoints custom handler entry point."""

    def __init__(self, path: str = "") -> None:
        self.model_id = MODEL_ID
        self.device = "cuda" if torch.cuda.is_available() else "cpu"

        self.tokenizer = AutoTokenizer.from_pretrained(
            self.model_id,
            trust_remote_code=True,
        )
        # xLSTM-7b at BF16 ≈ 14GB, fits A10G 24GB comfortably.
        # device_map="auto" handles multi-GPU gracefully if A100 80GB is used instead.
        self.model = AutoModelForCausalLM.from_pretrained(
            self.model_id,
            trust_remote_code=True,
            torch_dtype=torch.bfloat16,
            device_map="auto",
        )
        self.model.eval()

    def __call__(self, data: dict[str, Any]) -> dict[str, Any]:
        prompt: str = data.get("inputs", "")
        params: dict[str, Any] = data.get("parameters", {}) or {}

        max_new_tokens: int = int(params.get("max_new_tokens", 512))
        temperature: float = float(params.get("temperature", 0.7))
        top_p: float = float(params.get("top_p", 0.95))
        do_sample: bool = bool(params.get("do_sample", True))

        if not prompt:
            return {
                "generated_text": "",
                "input_tokens": 0,
                "output_tokens": 0,
                "model": self.model_id,
                "error": "empty_input",
            }

        inputs = self.tokenizer(prompt, return_tensors="pt").to(self.model.device)
        input_tokens = int(inputs["input_ids"].shape[-1])

        with torch.no_grad():
            outputs = self.model.generate(
                **inputs,
                max_new_tokens=max_new_tokens,
                temperature=temperature if do_sample else 1.0,
                top_p=top_p,
                do_sample=do_sample,
                pad_token_id=self.tokenizer.eos_token_id
                if self.tokenizer.pad_token_id is None
                else self.tokenizer.pad_token_id,
            )

        full_text = self.tokenizer.decode(
            outputs[0],
            skip_special_tokens=True,
        )
        generated_only = full_text[len(prompt):] if full_text.startswith(prompt) else full_text
        output_tokens = int(outputs.shape[-1]) - input_tokens

        return {
            "generated_text": generated_only,
            "input_tokens": input_tokens,
            "output_tokens": output_tokens,
            "model": self.model_id,
        }