Update README.md
Browse files
README.md
CHANGED
|
@@ -30,24 +30,22 @@ The code used to generate cosmosage_v2 is available at https://github.com/tijmen
|
|
| 30 |
After downloading cosmosage_v2, the following example code can be used to ask questions:
|
| 31 |
|
| 32 |
```python
|
| 33 |
-
|
| 34 |
|
| 35 |
-
from transformers import AutoModelForCausalLM, AutoTokenizer
|
| 36 |
import torch
|
| 37 |
-
|
| 38 |
-
|
| 39 |
-
|
| 40 |
-
|
| 41 |
-
|
| 42 |
-
|
| 43 |
-
|
| 44 |
-
|
| 45 |
-
tokenizer.encode(
|
| 46 |
-
|
| 47 |
-
tokenizer.
|
| 48 |
-
|
| 49 |
-
|
| 50 |
-
return tokenizer.decode(generated_ids[0], skip_special_tokens=True)
|
| 51 |
```
|
| 52 |
|
| 53 |
## Comparison to cosmosage_v1
|
|
|
|
| 30 |
After downloading cosmosage_v2, the following example code can be used to ask questions:
|
| 31 |
|
| 32 |
```python
|
| 33 |
+
model_path = "models/cosmosage_v2/"
|
| 34 |
|
|
|
|
| 35 |
import torch
|
| 36 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer
|
| 37 |
+
|
| 38 |
+
device = torch.device("cuda")
|
| 39 |
+
model = AutoModelForCausalLM.from_pretrained(model_path).to(device, dtype=torch.bfloat16)
|
| 40 |
+
tokenizer = AutoTokenizer.from_pretrained(model_path)
|
| 41 |
+
|
| 42 |
+
def ask_cosmosage(question, answer_start=''):
|
| 43 |
+
prompt = f"You are cosmosage, an AI programmed to be a cosmology expert. You answer the USER's question clearly in long form, always providing context. When appropriate, provide a reference.USER: {question}ASSISTANT: {answer_start}"
|
| 44 |
+
input_ids = tokenizer.encode(prompt, return_tensors="pt").to(device)
|
| 45 |
+
generated_ids = model.generate(input_ids, max_length=1024, do_sample=True, temperature=0.7, top_k=None, pad_token_id=tokenizer.eos_token_id)
|
| 46 |
+
generated_text = tokenizer.decode(generated_ids[0], skip_special_tokens=True)
|
| 47 |
+
answer = generated_text.split("ASSISTANT:")[-1]
|
| 48 |
+
return answer
|
|
|
|
| 49 |
```
|
| 50 |
|
| 51 |
## Comparison to cosmosage_v1
|