SkywalkerLu commited on
Commit
e0b61b1
·
verified ·
1 Parent(s): 6d3c2ea

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +5 -5
README.md CHANGED
@@ -10,7 +10,7 @@ tags:
10
  ---
11
 
12
 
13
- # TransHLA2.0-IM
14
 
15
  A minimal Hugging Face-compatible PyTorch model for peptide–HLA binding classification using ESM with optional LoRA and cross-attention. There is no custom predict API; inference follows the training path: tokenize peptide and HLA pseudosequence with the ESM tokenizer, pad or truncate to fixed lengths (default peptide=16, HLA=36), run a forward pass as `logits, features = model(epitope_ids, hla_ids)`, then apply softmax to get the binding probability.
16
 
@@ -31,10 +31,10 @@ pip install torch transformers peft
31
  import torch
32
  import torch.nn.functional as F
33
  from transformers import AutoModel, AutoTokenizer
34
- model_id = "SkywalkerLu/TransHLA2.0-IM"
35
  model = AutoModel.from_pretrained(model_id, trust_remote_code=True).to(device).eval()
36
  ```
37
- ## How to use TransHLA2.0-IM
38
  ```python
39
  import torch
40
  import torch.nn.functional as F
@@ -44,7 +44,7 @@ from transformers import AutoModel, AutoTokenizer
44
  device = "cuda" if torch.cuda.is_available() else "cpu"
45
 
46
  # Load model (replace with your model id if different)
47
- model_id = "SkywalkerLu/TransHLA2.0-IM"
48
  model = AutoModel.from_pretrained(model_id, trust_remote_code=True).to(device).eval()
49
 
50
  # Load tokenizer used in training (ESM2 650M)
@@ -98,7 +98,7 @@ from transformers import AutoModel, AutoTokenizer
98
  device = "cuda" if torch.cuda.is_available() else "cpu"
99
 
100
  # Load model and tokenizer
101
- model_id = "SkywalkerLu/TransHLA2.0-IM" # replace with your model id if different
102
  model = AutoModel.from_pretrained(model_id, trust_remote_code=True).to(device).eval()
103
  tok = AutoTokenizer.from_pretrained("facebook/esm2_t33_650M_UR50D")
104
 
 
10
  ---
11
 
12
 
13
+ # TriStageHLA-IM
14
 
15
  A minimal Hugging Face-compatible PyTorch model for peptide–HLA binding classification using ESM with optional LoRA and cross-attention. There is no custom predict API; inference follows the training path: tokenize peptide and HLA pseudosequence with the ESM tokenizer, pad or truncate to fixed lengths (default peptide=16, HLA=36), run a forward pass as `logits, features = model(epitope_ids, hla_ids)`, then apply softmax to get the binding probability.
16
 
 
31
  import torch
32
  import torch.nn.functional as F
33
  from transformers import AutoModel, AutoTokenizer
34
+ model_id = "SkywalkerLu/TriStageHLA-IM"
35
  model = AutoModel.from_pretrained(model_id, trust_remote_code=True).to(device).eval()
36
  ```
37
+ ## How to use TriStageHLA-IM
38
  ```python
39
  import torch
40
  import torch.nn.functional as F
 
44
  device = "cuda" if torch.cuda.is_available() else "cpu"
45
 
46
  # Load model (replace with your model id if different)
47
+ model_id = "SkywalkerLu/TriStageHLA-IM"
48
  model = AutoModel.from_pretrained(model_id, trust_remote_code=True).to(device).eval()
49
 
50
  # Load tokenizer used in training (ESM2 650M)
 
98
  device = "cuda" if torch.cuda.is_available() else "cpu"
99
 
100
  # Load model and tokenizer
101
+ model_id = "SkywalkerLu/TriStageHLA-IM" # replace with your model id if different
102
  model = AutoModel.from_pretrained(model_id, trust_remote_code=True).to(device).eval()
103
  tok = AutoTokenizer.from_pretrained("facebook/esm2_t33_650M_UR50D")
104