SkywalkerLu commited on
Commit
3d78501
·
verified ·
1 Parent(s): a18517c

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +5 -5
README.md CHANGED
@@ -11,7 +11,7 @@ language:
11
  - en
12
  ---
13
 
14
- # TransHLA2.0-BIND
15
 
16
  A minimal Hugging Face-compatible PyTorch model for peptide–HLA binding classification using ESM with optional LoRA and cross-attention. There is no custom predict API; inference follows the training path: tokenize peptide and HLA pseudosequence with the ESM tokenizer, pad or truncate to fixed lengths (default peptide=16, HLA=36), run a forward pass as `logits, features = model(epitope_ids, hla_ids)`, then apply softmax to get the binding probability.
17
 
@@ -32,11 +32,11 @@ pip install torch transformers peft
32
  import torch
33
  import torch.nn.functional as F
34
  from transformers import AutoModel, AutoTokenizer
35
- model_id = "SkywalkerLu/TransHLA2.0-BIND"
36
  model = AutoModel.from_pretrained(model_id, trust_remote_code=True).to(device).eval()
37
  ```
38
  ```
39
- ## How to use TransHLA2.0-BIND
40
  ```python
41
  import torch
42
  import torch.nn.functional as F
@@ -46,7 +46,7 @@ from transformers import AutoModel, AutoTokenizer
46
  device = "cuda" if torch.cuda.is_available() else "cpu"
47
 
48
  # Load model (replace with your model id if different)
49
- model_id = "SkywalkerLu/TransHLA2.0-BIND"
50
  model = AutoModel.from_pretrained(model_id, trust_remote_code=True).to(device).eval()
51
 
52
  # Load tokenizer used in training (ESM2 650M)
@@ -100,7 +100,7 @@ from transformers import AutoModel, AutoTokenizer
100
  device = "cuda" if torch.cuda.is_available() else "cpu"
101
 
102
  # Load model and tokenizer
103
- model_id = "SkywalkerLu/TransHLA2.0-BIND" # replace with your model id if different
104
  model = AutoModel.from_pretrained(model_id, trust_remote_code=True).to(device).eval()
105
  tok = AutoTokenizer.from_pretrained("facebook/esm2_t33_650M_UR50D")
106
 
 
11
  - en
12
  ---
13
 
14
+ # TriStageHLA-BIND
15
 
16
  A minimal Hugging Face-compatible PyTorch model for peptide–HLA binding classification using ESM with optional LoRA and cross-attention. There is no custom predict API; inference follows the training path: tokenize peptide and HLA pseudosequence with the ESM tokenizer, pad or truncate to fixed lengths (default peptide=16, HLA=36), run a forward pass as `logits, features = model(epitope_ids, hla_ids)`, then apply softmax to get the binding probability.
17
 
 
32
  import torch
33
  import torch.nn.functional as F
34
  from transformers import AutoModel, AutoTokenizer
35
+ model_id = "SkywalkerLu/TriStageHLA-BIND"
36
  model = AutoModel.from_pretrained(model_id, trust_remote_code=True).to(device).eval()
37
  ```
38
  ```
39
+ ## How to use TriStageHLA-BIND
40
  ```python
41
  import torch
42
  import torch.nn.functional as F
 
46
  device = "cuda" if torch.cuda.is_available() else "cpu"
47
 
48
  # Load model (replace with your model id if different)
49
+ model_id = "SkywalkerLu/TriStageHLA-BIND"
50
  model = AutoModel.from_pretrained(model_id, trust_remote_code=True).to(device).eval()
51
 
52
  # Load tokenizer used in training (ESM2 650M)
 
100
  device = "cuda" if torch.cuda.is_available() else "cpu"
101
 
102
  # Load model and tokenizer
103
+ model_id = "SkywalkerLu/TriStageHLA-BIND" # replace with your model id if different
104
  model = AutoModel.from_pretrained(model_id, trust_remote_code=True).to(device).eval()
105
  tok = AutoTokenizer.from_pretrained("facebook/esm2_t33_650M_UR50D")
106