Safetensors
LauraWang1107 commited on
Commit
ce7dc8c
·
verified ·
1 Parent(s): 58a54b7

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +8 -6
README.md CHANGED
@@ -9,12 +9,13 @@ Here's how to extract PepDoRA embeddings for your input peptide:
9
 
10
  ```
11
  import torch
12
- from transformers import AutoModel, AutoTokenizer
13
 
14
  # Load the model and tokenizer
15
  model_name = "ChatterjeeLab/PepDoRA"
16
- model = AutoModel.from_pretrained(model_name, output_hidden_states=True)
17
- tokenizer = AutoTokenizer.from_pretrained(model_name)
 
18
 
19
  # Input peptide sequence
20
  peptide = "CC(C)C[C@H]1NC(=O)[C@@H](C)NCCCCCCNC(=O)[C@H](CO)NC1=O"
@@ -25,12 +26,13 @@ inputs = tokenizer(peptide, return_tensors="pt")
25
  # Get the hidden states (embeddings) from the model
26
  with torch.no_grad():
27
  outputs = model(**inputs)
28
-
 
29
  # Extract the embeddings from the last hidden layer
30
- last_hidden_state = outputs.hidden_states[-1]
31
 
32
  # Print the embedding shape (or the embedding itself)
33
- print(last_hidden_state.shape)
34
  ```
35
 
36
  ## Repository Authors
 
9
 
10
  ```
11
  import torch
12
+ from transformers import RobertaTokenizer, RobertaForMaskedLM
13
 
14
  # Load the model and tokenizer
15
  model_name = "ChatterjeeLab/PepDoRA"
16
+
17
+ tokenizer = RobertaTokenizer.from_pretrained(model_name)
18
+ model = RobertaForMaskedLM.from_pretrained(model_name)
19
 
20
  # Input peptide sequence
21
  peptide = "CC(C)C[C@H]1NC(=O)[C@@H](C)NCCCCCCNC(=O)[C@H](CO)NC1=O"
 
26
  # Get the hidden states (embeddings) from the model
27
  with torch.no_grad():
28
  outputs = model(**inputs)
29
+
30
+
31
  # Extract the embeddings from the last hidden layer
32
+ embedding = outputs.logits
33
 
34
  # Print the embedding shape (or the embedding itself)
35
+ print(embedding.shape)
36
  ```
37
 
38
  ## Repository Authors