Update README.md
Browse files
README.md
CHANGED
|
@@ -8,7 +8,6 @@ tags: []
|
|
| 8 |
|
| 9 |
|
| 10 |
``` python
|
| 11 |
-
|
| 12 |
import os
|
| 13 |
import torch
|
| 14 |
import pandas as pd
|
|
@@ -29,15 +28,11 @@ from transformers import (
|
|
| 29 |
|
| 30 |
#should install transformers 4.51.3
|
| 31 |
|
| 32 |
-
|
| 33 |
hf_token = "xxxxxxxxxxxxxxxxxxxxxxxxxxxe"
|
| 34 |
login(hf_token)
|
| 35 |
|
| 36 |
-
|
| 37 |
-
|
| 38 |
model_id = "NYUAD-ComNets/NYUAD_Llama4_Inheritance_Solver"
|
| 39 |
|
| 40 |
-
|
| 41 |
# Load tokenizer and model
|
| 42 |
tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code=True)
|
| 43 |
model = Llama4ForConditionalGeneration.from_pretrained(
|
|
@@ -55,8 +50,6 @@ inference_prompt_template = """Answer the following question using a single word
|
|
| 55 |
### Response:
|
| 56 |
{}"""
|
| 57 |
|
| 58 |
-
|
| 59 |
-
|
| 60 |
def generate_answer(context):
|
| 61 |
prompt = inference_prompt_template.format(context, "")
|
| 62 |
inputs = tokenizer(prompt + tokenizer.eos_token, return_tensors="pt").to("cuda")
|
|
@@ -78,7 +71,6 @@ def generate_answer(context):
|
|
| 78 |
response = response[0].split("### Response:")[1][-1]
|
| 79 |
|
| 80 |
|
| 81 |
-
|
| 82 |
df=pd.read_csv('/path_to/islamic_inheritance_problem.csv.csv')
|
| 83 |
for k,o1,o2,o3,o4,o5,o6 in zip(df.question.values
|
| 84 |
,df.option1.values,df.option2.values
|
|
@@ -90,8 +82,6 @@ for k,o1,o2,o3,o4,o5,o6 in zip(df.question.values
|
|
| 90 |
|
| 91 |
predicted_label = generate_answer(example)
|
| 92 |
print("Predicted:", predicted_label)
|
| 93 |
-
|
| 94 |
-
|
| 95 |
```
|
| 96 |
|
| 97 |
|
|
|
|
| 8 |
|
| 9 |
|
| 10 |
``` python
|
|
|
|
| 11 |
import os
|
| 12 |
import torch
|
| 13 |
import pandas as pd
|
|
|
|
| 28 |
|
| 29 |
#should install transformers 4.51.3
|
| 30 |
|
|
|
|
| 31 |
hf_token = "xxxxxxxxxxxxxxxxxxxxxxxxxxxe"
|
| 32 |
login(hf_token)
|
| 33 |
|
|
|
|
|
|
|
| 34 |
model_id = "NYUAD-ComNets/NYUAD_Llama4_Inheritance_Solver"
|
| 35 |
|
|
|
|
| 36 |
# Load tokenizer and model
|
| 37 |
tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code=True)
|
| 38 |
model = Llama4ForConditionalGeneration.from_pretrained(
|
|
|
|
| 50 |
### Response:
|
| 51 |
{}"""
|
| 52 |
|
|
|
|
|
|
|
| 53 |
def generate_answer(context):
|
| 54 |
prompt = inference_prompt_template.format(context, "")
|
| 55 |
inputs = tokenizer(prompt + tokenizer.eos_token, return_tensors="pt").to("cuda")
|
|
|
|
| 71 |
response = response[0].split("### Response:")[1][-1]
|
| 72 |
|
| 73 |
|
|
|
|
| 74 |
df=pd.read_csv('/path_to/islamic_inheritance_problem.csv.csv')
|
| 75 |
for k,o1,o2,o3,o4,o5,o6 in zip(df.question.values
|
| 76 |
,df.option1.values,df.option2.values
|
|
|
|
| 82 |
|
| 83 |
predicted_label = generate_answer(example)
|
| 84 |
print("Predicted:", predicted_label)
|
|
|
|
|
|
|
| 85 |
```
|
| 86 |
|
| 87 |
|