Upload README.md with huggingface_hub
Browse files
README.md
CHANGED
|
@@ -58,8 +58,12 @@ from transformers import AutoModel
|
|
| 58 |
device = torch.device("cpu")
|
| 59 |
pad_char = "X" # Padding character
|
| 60 |
target_length = 33 # Target length for sequence padding
|
| 61 |
-
mode = "
|
| 62 |
-
esm_ratio =
|
|
|
|
|
|
|
|
|
|
|
|
|
| 63 |
|
| 64 |
# Initialize the PDeepPPProcessor
|
| 65 |
processor = PDeepPPProcessor(pad_char=pad_char, target_length=target_length)
|
|
@@ -96,10 +100,6 @@ pretrained_features = pretrainer.create_embeddings(
|
|
| 96 |
# Ensure pretrained features are on the same device
|
| 97 |
inputs["input_embeds"] = pretrained_features.to(device)
|
| 98 |
|
| 99 |
-
# Load the PDeepPP model
|
| 100 |
-
model_name = "fondress/PDeepPP_ACE"
|
| 101 |
-
model = AutoModel.from_pretrained(model_name, trust_remote_code=True) # Directly load the model
|
| 102 |
-
|
| 103 |
# Perform prediction
|
| 104 |
model.eval()
|
| 105 |
outputs = model(input_embeds=inputs["input_embeds"]) # Use pretrained features as model input
|
|
|
|
| 58 |
device = torch.device("cpu")
|
| 59 |
pad_char = "X" # Padding character
|
| 60 |
target_length = 33 # Target length for sequence padding
|
| 61 |
+
mode = "PTMS" # Mode setting (only configured in example.py)
|
| 62 |
+
esm_ratio = 0.95 # Ratio for ESM embeddings
|
| 63 |
+
|
| 64 |
+
# Load the PDeepPP model
|
| 65 |
+
model_name = "fondress/PDeepPP_N-linked-glycosylation-N"
|
| 66 |
+
model = AutoModel.from_pretrained(model_name, trust_remote_code=True) # Directly load the model
|
| 67 |
|
| 68 |
# Initialize the PDeepPPProcessor
|
| 69 |
processor = PDeepPPProcessor(pad_char=pad_char, target_length=target_length)
|
|
|
|
| 100 |
# Ensure pretrained features are on the same device
|
| 101 |
inputs["input_embeds"] = pretrained_features.to(device)
|
| 102 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 103 |
# Perform prediction
|
| 104 |
model.eval()
|
| 105 |
outputs = model(input_embeds=inputs["input_embeds"]) # Use pretrained features as model input
|