Update README.md
Browse files
README.md
CHANGED
|
@@ -1,10 +1,109 @@
|
|
| 1 |
---
|
| 2 |
license: apache-2.0
|
| 3 |
---
|
| 4 |
-
ProLLaMA: A Protein Large Language Model for Multi-Task Protein Language Processing
|
| 5 |
|
| 6 |
-
|
| 7 |
|
| 8 |
-
[
|
| 9 |
|
| 10 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
---
|
| 2 |
license: apache-2.0
|
| 3 |
---
|
| 4 |
+
# ProLLaMA: A Protein Large Language Model for Multi-Task Protein Language Processing
|
| 5 |
|
| 6 |
+
[Paper on arxiv](https://arxiv.org/abs/2402.16445) for more information
|
| 7 |
|
| 8 |
+
[Github](https://github.com/Lyu6PosHao/ProLLaMA) for more information
|
| 9 |
|
| 10 |
+
# Quick usage:
|
| 11 |
+
```bash
|
| 12 |
+
#you can replace the model_path with your local path
|
| 13 |
+
CUDA_VISIBLE_DEVICES=0 python main.py --model "GreatCaptainNemo/ProLLaMA" --interactive
|
| 14 |
+
#main.py is as follows 👇:
|
| 15 |
+
```
|
| 16 |
+
|
| 17 |
+
```python
|
| 18 |
+
import argparse
|
| 19 |
+
import json, os
|
| 20 |
+
import torch
|
| 21 |
+
from transformers import LlamaForCausalLM, LlamaTokenizer
|
| 22 |
+
from transformers import GenerationConfig
|
| 23 |
+
from tqdm import tqdm
|
| 24 |
+
|
| 25 |
+
generation_config = GenerationConfig(
|
| 26 |
+
temperature=0.2,
|
| 27 |
+
top_k=40,
|
| 28 |
+
top_p=0.9,
|
| 29 |
+
do_sample=True,
|
| 30 |
+
num_beams=1,
|
| 31 |
+
repetition_penalty=1.2,
|
| 32 |
+
max_new_tokens=400
|
| 33 |
+
)
|
| 34 |
+
|
| 35 |
+
parser = argparse.ArgumentParser()
|
| 36 |
+
parser.add_argument('--model', default=None, type=str,help="The local path of the model. If None, the model will be downloaded from HuggingFace")
|
| 37 |
+
parser.add_argument('--interactive', action='store_true',help="If True, you can input instructions interactively. If False, the input instructions should be in the input_file.")
|
| 38 |
+
parser.add_argument('--input_file', default=None, help="You can put all your input instructions in this file (one instruction per line).")
|
| 39 |
+
parser.add_argument('--output_file', default=None, help="All the outputs will be saved in this file.")
|
| 40 |
+
args = parser.parse_args()
|
| 41 |
+
|
| 42 |
+
if __name__ == '__main__':
|
| 43 |
+
if args.interactive and args.input_file:
|
| 44 |
+
raise ValueError("interactive is True, but input_file is not None.")
|
| 45 |
+
if (not args.interactive) and (args.input_file is None):
|
| 46 |
+
raise ValueError("interactive is False, but input_file is None.")
|
| 47 |
+
if args.input_file and (args.output_file is None):
|
| 48 |
+
raise ValueError("input_file is not None, but output_file is None.")
|
| 49 |
+
|
| 50 |
+
load_type = torch.bfloat16
|
| 51 |
+
if torch.cuda.is_available():
|
| 52 |
+
device = torch.device(0)
|
| 53 |
+
else:
|
| 54 |
+
raise ValueError("No GPU available.")
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
model = LlamaForCausalLM.from_pretrained(
|
| 58 |
+
args.model,
|
| 59 |
+
torch_dtype=load_type,
|
| 60 |
+
low_cpu_mem_usage=True,
|
| 61 |
+
device_map='auto',
|
| 62 |
+
quantization_config=None
|
| 63 |
+
)
|
| 64 |
+
tokenizer = LlamaTokenizer.from_pretrained(args.model)
|
| 65 |
+
|
| 66 |
+
model.eval()
|
| 67 |
+
with torch.no_grad():
|
| 68 |
+
if args.interactive:
|
| 69 |
+
while True:
|
| 70 |
+
raw_input_text = input("Input:")
|
| 71 |
+
if len(raw_input_text.strip())==0:
|
| 72 |
+
break
|
| 73 |
+
input_text = raw_input_text
|
| 74 |
+
input_text = tokenizer(input_text,return_tensors="pt")
|
| 75 |
+
|
| 76 |
+
generation_output = model.generate(
|
| 77 |
+
input_ids = input_text["input_ids"].to(device),
|
| 78 |
+
attention_mask = input_text['attention_mask'].to(device),
|
| 79 |
+
eos_token_id=tokenizer.eos_token_id,
|
| 80 |
+
pad_token_id=tokenizer.pad_token_id,
|
| 81 |
+
generation_config = generation_config,
|
| 82 |
+
output_attentions=False
|
| 83 |
+
)
|
| 84 |
+
s = generation_output[0]
|
| 85 |
+
output = tokenizer.decode(s,skip_special_tokens=True)
|
| 86 |
+
print("Output:",output)
|
| 87 |
+
print("\n")
|
| 88 |
+
else:
|
| 89 |
+
outputs=[]
|
| 90 |
+
with open(args.input_file, 'r') as f:
|
| 91 |
+
examples =f.read().splitlines()
|
| 92 |
+
print("Start generating...")
|
| 93 |
+
for index, example in tqdm(enumerate(examples),total=len(examples)):
|
| 94 |
+
input_text = tokenizer(example,return_tensors="pt") #add_special_tokens=False ?
|
| 95 |
+
|
| 96 |
+
generation_output = model.generate(
|
| 97 |
+
input_ids = input_text["input_ids"].to(device),
|
| 98 |
+
attention_mask = input_text['attention_mask'].to(device),
|
| 99 |
+
eos_token_id=tokenizer.eos_token_id,
|
| 100 |
+
pad_token_id=tokenizer.pad_token_id,
|
| 101 |
+
generation_config = generation_config
|
| 102 |
+
)
|
| 103 |
+
s = generation_output[0]
|
| 104 |
+
output = tokenizer.decode(s,skip_special_tokens=True)
|
| 105 |
+
outputs.append(output)
|
| 106 |
+
with open(args.output_file,'w') as f:
|
| 107 |
+
f.write("\n".join(outputs))
|
| 108 |
+
print("All the outputs have been saved in",args.output_file)
|
| 109 |
+
```
|