EPlus-LLM commited on
Commit
ef8decb
·
verified ·
1 Parent(s): b95324d

Upload Run_EPlus_LLM.ipynb

Browse files
Files changed (1) hide show
  1. Run_EPlus_LLM.ipynb +91 -0
Run_EPlus_LLM.ipynb ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "nbformat": 4,
3
+ "nbformat_minor": 0,
4
+ "metadata": {
5
+ "colab": {
6
+ "provenance": []
7
+ },
8
+ "kernelspec": {
9
+ "name": "python3",
10
+ "display_name": "Python 3"
11
+ },
12
+ "language_info": {
13
+ "name": "python"
14
+ }
15
+ },
16
+ "cells": [
17
+ {
18
+ "cell_type": "code",
19
+ "execution_count": 24,
20
+ "metadata": {
21
+ "colab": {
22
+ "base_uri": "https://localhost:8080/"
23
+ },
24
+ "id": "cwihsgMJw-V2",
25
+ "outputId": "a893f7c8-2f8d-496d-9965-25b611511ef5"
26
+ },
27
+ "outputs": [
28
+ {
29
+ "output_type": "stream",
30
+ "name": "stdout",
31
+ "text": [
32
+ "Building Energy Model Auto-Generated: v1_final.idf\n"
33
+ ]
34
+ }
35
+ ],
36
+ "source": [
37
+ "import torch\n",
38
+ "from transformers import (\n",
39
+ " AutoModelForSeq2SeqLM,\n",
40
+ " AutoTokenizer,\n",
41
+ ")\n",
42
+ "\n",
43
+ "# Input the rest port of IDF file.\n",
44
+ "file_path = \"v1_nextpart.idf\"\n",
45
+ "output_path = \"v1_final.idf\"\n",
46
+ "\n",
47
+ "# Input the EPlus-LLM model\n",
48
+ "tokenizer = AutoTokenizer.from_pretrained(\"google/flan-t5-large\")\n",
49
+ "model = AutoModelForSeq2SeqLM.from_pretrained(\"EPlus-LLM/EPlus-LLMv1\")\n",
50
+ "\n",
51
+ "# Generation config\n",
52
+ "generation_config = model.generation_config\n",
53
+ "generation_config.max_new_tokens = 2000\n",
54
+ "generation_config.temperature = 0.1\n",
55
+ "generation_config.top_p = 0.1\n",
56
+ "generation_config.num_return_sequences = 1\n",
57
+ "generation_config.pad_token_id = tokenizer.eos_token_id\n",
58
+ "generation_config.eos_token_id = tokenizer.eos_token_id\n",
59
+ "\n",
60
+ "# Please provide your input here — a description of the desired building\n",
61
+ "# For more details, please refer to the paper\n",
62
+ "input=\"Simulate a building that is 30.00 meters long, 15.00 meters wide, and 3.50 meters high. The window-to-wall ratio is 0.28. The occupancy rate is 8.00 m2/people, the lighting level is 6.00 W/m2, and the equipment power consumption is 8.80 W/m2.\"\n",
63
+ "input_ids = tokenizer(input, return_tensors=\"pt\", truncation=False)\n",
64
+ "generated_ids = model.generate(input_ids = input_ids.input_ids,\n",
65
+ " attention_mask = input_ids.attention_mask,\n",
66
+ " generation_config = generation_config)\n",
67
+ "generated_output = tokenizer.decode(generated_ids[0], skip_special_tokens=True)\n",
68
+ "generated_output = generated_output.replace(\"_\", \" \")\n",
69
+ "generated_output = generated_output.replace(\"|\", \"\\n\")\n",
70
+ "\n",
71
+ "with open(file_path, 'r', encoding='utf-8') as file:\n",
72
+ " nextpart = file.read()\n",
73
+ "final_text = nextpart + \"\\n\\n\" + generated_output\n",
74
+ "with open(output_path, 'w', encoding='utf-8') as f:\n",
75
+ " f.write(final_text)\n",
76
+ "\n",
77
+ "# Output the building energy model in IDF file\n",
78
+ "print(f\"Building Energy Model Auto-Generated: {output_path}\")"
79
+ ]
80
+ },
81
+ {
82
+ "cell_type": "code",
83
+ "source": [],
84
+ "metadata": {
85
+ "id": "dDakbuWH5TLt"
86
+ },
87
+ "execution_count": null,
88
+ "outputs": []
89
+ }
90
+ ]
91
+ }