cv43 commited on
Commit
b0450a5
·
1 Parent(s): c9ae900

update pipelines and readme

Browse files
Files changed (3) hide show
  1. .gitignore +0 -1
  2. README.md +28 -1
  3. pipeline.py +9 -3
.gitignore CHANGED
@@ -1,2 +1 @@
1
  __pycache__/
2
- best-38.ckpt
 
1
  __pycache__/
 
README.md CHANGED
@@ -11,4 +11,31 @@ base_model:
11
  ---
12
 
13
  # Model
14
- This is a model fine-tuned using PyTorch Lightning...
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
11
  ---
12
 
13
  # Model
14
+ This is a sample finetuned model produced under [LLMPot research project](https://github.com/momalab/LLMPot) and explained further in the [related research manuscript](https://arxiv.org/abs/2405.05999).
15
+
16
+ ## How to Use
17
+
18
+ This model is a fine-tuned version of [`google/byt5-small`](https://huggingface.co/google/byt5-small) for Modbus protocol emulation.
19
+
20
+ Make sure you have `transformers` and `torch` installed:
21
+
22
+ ```bash
23
+ pip install transformers torch
24
+ ```
25
+
26
+ Load the model and run a single inference.
27
+
28
+ ```python
29
+ from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, pipeline
30
+
31
+ tokenizer = AutoTokenizer.from_pretrained("cv43/llmpot")
32
+ model = AutoModelForSeq2SeqLM.from_pretrained("cv43/llmpot")
33
+
34
+ pipe = pipeline("text2text-generation", model=model, tokenizer=tokenizer, framework="pt")
35
+
36
+ request = "02b10000000b00100000000204ffffffff"
37
+ result = pipe(request)
38
+ print(f"Request: {request}, Response: {result[0]['generated_text']}")
39
+ ```
40
+
41
+ Otherwise you may use our Huggingface Spaces where the model is running on this platform.
pipeline.py CHANGED
@@ -1,4 +1,10 @@
1
- from transformers import pipeline
2
 
3
- pipe = pipeline("text2text-generation", model="cv43/mbtcp-c0-f1_5_15_3_6_16-a0_39")
4
- pipe("02b10000000b00100000000204ffffffff")
 
 
 
 
 
 
 
1
+ from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, pipeline
2
 
3
+ tokenizer = AutoTokenizer.from_pretrained("cv43/llmpot")
4
+ model = AutoModelForSeq2SeqLM.from_pretrained("cv43/llmpot")
5
+
6
+ pipe = pipeline("text2text-generation", model=model, tokenizer=tokenizer, framework="pt")
7
+
8
+ request = "02b10000000b00100000000204ffffffff"
9
+ result = pipe(request)
10
+ print(f"Request: {request}, Response: {result[0]['generated_text']}")