LLMGeoBenchmarking / config.yaml
celikn's picture
Create config.yaml
e544cea verified
raw
history blame contribute delete
430 Bytes
models:
- name: "mistralai/Mistral-7B-Instruct"
params:
max_tokens: 256
temperature: 0.2
- name: "meta-llama/Llama-2-7b-chat-hf"
params:
max_tokens: 256
temperature: 0.2
prompt_templates:
qa: "Answer the question: {{question}}"
summarization: "Summarize this text: {{context}}"
runtime:
output_results_csv: "results.csv"
output_summary_md: "summary.md"
request_timeout_seconds: 60