File size: 841 Bytes
c7256ee
 
 
 
 
 
 
c64aaec
c7256ee
 
 
 
 
 
 
 
 
 
 
 
c64aaec
c7256ee
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
from huggingface_hub import InferenceClient

class Qwen2_5:
    def __init__(self, token):
        self.client = InferenceClient(token=token)
        self.model_id = "Qwen/Qwen2.5-72B-Instruct"

    def generate_stream(self, prompt, max_tokens=1000, temperature=0.1):
        for message in self.client.chat_completion(
            model=self.model_id,
            messages=[{"role": "user", "content": prompt}],
            max_tokens=max_tokens,
            temperature=temperature,
            stream=True,
        ):
            if message.choices:
                content = message.choices[0].delta.content
                if content:
                    yield content

    def generate(self, prompt, max_tokens=1000, temperature=0.1):
        return "".join(self.generate_stream(prompt, max_tokens=max_tokens, temperature=temperature))