Update README.md
Browse files
README.md
CHANGED
|
@@ -35,13 +35,13 @@ The model contains three versions:
|
|
| 35 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
| 36 |
|
| 37 |
tokenizer_test = AutoTokenizer.from_pretrained(
|
| 38 |
-
model_path,
|
| 39 |
-
token =
|
| 40 |
)
|
| 41 |
model_test = AutoModelForCausalLM.from_pretrained(
|
| 42 |
-
model_path,
|
| 43 |
-
device_map=
|
| 44 |
-
token =
|
| 45 |
)
|
| 46 |
|
| 47 |
def complete_chat(system, prompt, model, tokenizer):
|
|
@@ -72,7 +72,7 @@ The model contains three versions:
|
|
| 72 |
return summary
|
| 73 |
|
| 74 |
if __name__ == "__main__":
|
| 75 |
-
genes = "Your
|
| 76 |
result = llama(genes)
|
| 77 |
print(result)
|
| 78 |
```
|
|
|
|
| 35 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
| 36 |
|
| 37 |
tokenizer_test = AutoTokenizer.from_pretrained(
|
| 38 |
+
model_path = "xxxxxxxxx", # The local path where the model is located
|
| 39 |
+
token = "xxxxxxxxx" # Your access key of hugging face
|
| 40 |
)
|
| 41 |
model_test = AutoModelForCausalLM.from_pretrained(
|
| 42 |
+
model_path = "xxxxxxxxx", # The local path where the model is located
|
| 43 |
+
device_map = "auto",
|
| 44 |
+
token = "xxxxxxxxx" # Your access key of hugging face
|
| 45 |
)
|
| 46 |
|
| 47 |
def complete_chat(system, prompt, model, tokenizer):
|
|
|
|
| 72 |
return summary
|
| 73 |
|
| 74 |
if __name__ == "__main__":
|
| 75 |
+
genes = "xxxxxxxxx" # Your private gene set that is separated by comma (,)!
|
| 76 |
result = llama(genes)
|
| 77 |
print(result)
|
| 78 |
```
|