Safetensors
LauraWang1107 commited on
Commit
0af985f
·
verified ·
1 Parent(s): 4f6e48c

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +8 -8
README.md CHANGED
@@ -13,13 +13,13 @@ Here's how to extract PepDoRA embeddings for your input peptide:
13
 
14
  ```
15
  import torch
16
- from transformers import RobertaTokenizer, RobertaForMaskedLM
17
 
18
  # Load the model and tokenizer
19
  model_name = "ChatterjeeLab/PepDoRA"
20
-
21
- tokenizer = RobertaTokenizer.from_pretrained(model_name)
22
- model = RobertaForMaskedLM.from_pretrained(model_name)
23
 
24
  # Input peptide sequence
25
  peptide = "CC(C)C[C@H]1NC(=O)[C@@H](C)NCCCCCCNC(=O)[C@H](CO)NC1=O"
@@ -29,14 +29,14 @@ inputs = tokenizer(peptide, return_tensors="pt")
29
 
30
  # Get the hidden states (embeddings) from the model
31
  with torch.no_grad():
32
- outputs = model(**inputs)
33
 
34
-
35
  # Extract the embeddings from the last hidden layer
36
- embedding = outputs.logits
37
 
38
  # Print the embedding shape (or the embedding itself)
39
- print(embedding.shape)
 
40
  ```
41
 
42
  ## Repository Authors
 
13
 
14
  ```
15
  import torch
16
+ from transformers import AutoTokenizer, AutoModel
17
 
18
  # Load the model and tokenizer
19
  model_name = "ChatterjeeLab/PepDoRA"
20
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
21
+ model = AutoModel.from_pretrained(model_name, output_hidden_states=True)
22
+
23
 
24
  # Input peptide sequence
25
  peptide = "CC(C)C[C@H]1NC(=O)[C@@H](C)NCCCCCCNC(=O)[C@H](CO)NC1=O"
 
29
 
30
  # Get the hidden states (embeddings) from the model
31
  with torch.no_grad():
32
+ outputs = model(**inputs,output_hidden_states=True)
33
 
 
34
  # Extract the embeddings from the last hidden layer
35
+ embeddng=outputs.last_hidden_state
36
 
37
  # Print the embedding shape (or the embedding itself)
38
+ print(outputs.last_hidden_state.shape)
39
+ print(embeddng)
40
  ```
41
 
42
  ## Repository Authors