| from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, pipeline | |
| tokenizer = AutoTokenizer.from_pretrained("cv43/llmpot") | |
| model = AutoModelForSeq2SeqLM.from_pretrained("cv43/llmpot") | |
| pipe = pipeline("text2text-generation", model=model, tokenizer=tokenizer, framework="pt") | |
| request = "02b10000000b00100000000204ffffffff" | |
| result = pipe(request) | |
| print(f"Request: {request}, Response: {result[0]['generated_text']}") |