File size: 3,324 Bytes
ef8decb | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 | {
"nbformat": 4,
"nbformat_minor": 0,
"metadata": {
"colab": {
"provenance": []
},
"kernelspec": {
"name": "python3",
"display_name": "Python 3"
},
"language_info": {
"name": "python"
}
},
"cells": [
{
"cell_type": "code",
"execution_count": 24,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "cwihsgMJw-V2",
"outputId": "a893f7c8-2f8d-496d-9965-25b611511ef5"
},
"outputs": [
{
"output_type": "stream",
"name": "stdout",
"text": [
"Building Energy Model Auto-Generated: v1_final.idf\n"
]
}
],
"source": [
"import torch\n",
"from transformers import (\n",
" AutoModelForSeq2SeqLM,\n",
" AutoTokenizer,\n",
")\n",
"\n",
"# Input the rest port of IDF file.\n",
"file_path = \"v1_nextpart.idf\"\n",
"output_path = \"v1_final.idf\"\n",
"\n",
"# Input the EPlus-LLM model\n",
"tokenizer = AutoTokenizer.from_pretrained(\"google/flan-t5-large\")\n",
"model = AutoModelForSeq2SeqLM.from_pretrained(\"EPlus-LLM/EPlus-LLMv1\")\n",
"\n",
"# Generation config\n",
"generation_config = model.generation_config\n",
"generation_config.max_new_tokens = 2000\n",
"generation_config.temperature = 0.1\n",
"generation_config.top_p = 0.1\n",
"generation_config.num_return_sequences = 1\n",
"generation_config.pad_token_id = tokenizer.eos_token_id\n",
"generation_config.eos_token_id = tokenizer.eos_token_id\n",
"\n",
"# Please provide your input here — a description of the desired building\n",
"# For more details, please refer to the paper\n",
"input=\"Simulate a building that is 30.00 meters long, 15.00 meters wide, and 3.50 meters high. The window-to-wall ratio is 0.28. The occupancy rate is 8.00 m2/people, the lighting level is 6.00 W/m2, and the equipment power consumption is 8.80 W/m2.\"\n",
"input_ids = tokenizer(input, return_tensors=\"pt\", truncation=False)\n",
"generated_ids = model.generate(input_ids = input_ids.input_ids,\n",
" attention_mask = input_ids.attention_mask,\n",
" generation_config = generation_config)\n",
"generated_output = tokenizer.decode(generated_ids[0], skip_special_tokens=True)\n",
"generated_output = generated_output.replace(\"_\", \" \")\n",
"generated_output = generated_output.replace(\"|\", \"\\n\")\n",
"\n",
"with open(file_path, 'r', encoding='utf-8') as file:\n",
" nextpart = file.read()\n",
"final_text = nextpart + \"\\n\\n\" + generated_output\n",
"with open(output_path, 'w', encoding='utf-8') as f:\n",
" f.write(final_text)\n",
"\n",
"# Output the building energy model in IDF file\n",
"print(f\"Building Energy Model Auto-Generated: {output_path}\")"
]
},
{
"cell_type": "code",
"source": [],
"metadata": {
"id": "dDakbuWH5TLt"
},
"execution_count": null,
"outputs": []
}
]
} |