from transformers import AutoTokenizer, AutoModelForCausalLM
tokenizer = AutoTokenizer.from_pretrained('quidangz/LLamaEE-8B-Instruct-ZeroShot')
model = AutoModelForCausalLM.from_pretrained(
'quidangz/LLamaEE-8B-Instruct-ZeroShot',
torch_dtype="auto",
device_map="cuda",
)
if tokenizer.pad_token is None:
tokenizer.pad_token = tokenizer.eos_token
model.config.pad_token_id = model.config.eos_token_id
user_prompt = """
Extract events and their components from text **strictly using ONLY the provided Event List** below and **MUST** strictly adhere to the output format.
Format output as '<event_type>: <trigger_word> | <role1>: <argument1> | <role2>: <argument2>' and separate multiple events with '|'. Return 'None' if no events are identified.
Event List: {ee_labels}
Text: {text}
"""
query = 'Drug-induced fever due to diltiazem'
ee_labels = ["adverse event", "potential therapeutic event", "Treatment.Drug", "Treatment.Route,Subject.Disorder", "Treatment.Freq", "Treatment", "Effect"]
user_prompt = user_prompt.format(ee_labels=ee_labels, text=query)
messages = [
{
"role": "system",
"content": "You are an expert in Event Extraction (EE) task."
},
{
"role": "user",
"content": user_prompt
}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer(text, return_tensors="pt").to(model.device)
generated_ids = model.generate(
**model_inputs,
max_new_tokens=512,
)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
print(response) # adverse event: due to | Effect: fever | Treatment: diltiazem | Treatment.Drug: diltiazem
Contact
Email: dbqui1706@gmail.com
LinkedIn: Qui Dang
Facebook: Đặng Bá Qúi
Citation
Please cite as
@misc{LlamaEE-8B-Instruct-ZeroShot,
title={LlamaEE: An Large Language Model for Relation Extraction},
author={Qui Dang Ba},
year={2025},
publisher={Huggingface},
}
- Downloads last month
- -
Model tree for quidangz/LLamaEE-8B-Instruct-ZeroShot
Base model
meta-llama/Llama-3.1-8B
Finetuned
meta-llama/Llama-3.1-8B-Instruct
Finetuned
unsloth/Meta-Llama-3.1-8B-Instruct