EPlus-LLM commited on
Commit
268ec13
·
verified ·
1 Parent(s): c8268d0

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +23 -10
README.md CHANGED
@@ -37,33 +37,46 @@ Here provides a code snippet to show you how to load the EPlus-LLM and auto-gene
37
  ```python
38
  import torch
39
  from transformers import (
40
- AutoModelForSeq2SeqLM,
41
  AutoTokenizer,
42
  )
43
 
 
 
 
 
 
44
  tokenizer = AutoTokenizer.from_pretrained("google/flan-t5-large")
45
  model = AutoModelForSeq2SeqLM.from_pretrained("EPlus-LLM/EPlus-LLMv1")
46
 
 
47
  generation_config = model.generation_config
48
- generation_config.max_new_tokens = 1300
49
  generation_config.temperature = 0.1
50
  generation_config.top_p = 0.1
51
  generation_config.num_return_sequences = 1
52
  generation_config.pad_token_id = tokenizer.eos_token_id
53
  generation_config.eos_token_id = tokenizer.eos_token_id
54
 
55
- input="<Your input, description of the desired building.>"
56
- input_ids = tokenizer(input, return_tensors="pt", truncation=False).to(device)
 
 
57
  generated_ids = model.generate(input_ids = input_ids.input_ids,
58
  attention_mask = input_ids.attention_mask,
59
  generation_config = generation_config)
60
-
61
  generated_output = tokenizer.decode(generated_ids[0], skip_special_tokens=True)
62
- generated_output = new_tokens.replace("_", " ")
63
- generated_ooutput = new_tokens.replace("|", "\n")
64
-
65
- print(generated_output)
66
-
 
 
 
 
 
 
67
  ```
68
 
69
  ## 📝 Citation
 
37
  ```python
38
  import torch
39
  from transformers import (
40
+ AutoModelForSeq2SeqLM,
41
  AutoTokenizer,
42
  )
43
 
44
+ # Input the rest port of IDF file.
45
+ file_path = "v1_nextpart.idf"
46
+ output_path = "v1_final.idf"
47
+
48
+ # Input the EPlus-LLM model
49
  tokenizer = AutoTokenizer.from_pretrained("google/flan-t5-large")
50
  model = AutoModelForSeq2SeqLM.from_pretrained("EPlus-LLM/EPlus-LLMv1")
51
 
52
+ # Generation config
53
  generation_config = model.generation_config
54
+ generation_config.max_new_tokens = 2000
55
  generation_config.temperature = 0.1
56
  generation_config.top_p = 0.1
57
  generation_config.num_return_sequences = 1
58
  generation_config.pad_token_id = tokenizer.eos_token_id
59
  generation_config.eos_token_id = tokenizer.eos_token_id
60
 
61
+ # Please provide your input here — a description of the desired building
62
+ # For more details, please refer to the paper
63
+ input="Simulate a building that is 30.00 meters long, 15.00 meters wide, and 3.50 meters high. The window-to-wall ratio is 0.28. The occupancy rate is 8.00 m2/people, the lighting level is 6.00 W/m2, and the equipment power consumption is 8.80 W/m2."
64
+ input_ids = tokenizer(input, return_tensors="pt", truncation=False)
65
  generated_ids = model.generate(input_ids = input_ids.input_ids,
66
  attention_mask = input_ids.attention_mask,
67
  generation_config = generation_config)
 
68
  generated_output = tokenizer.decode(generated_ids[0], skip_special_tokens=True)
69
+ generated_output = generated_output.replace("_", " ")
70
+ generated_output = generated_output.replace("|", "\n")
71
+
72
+ with open(file_path, 'r', encoding='utf-8') as file:
73
+ nextpart = file.read()
74
+ final_text = nextpart + "\n\n" + generated_output
75
+ with open(output_path, 'w', encoding='utf-8') as f:
76
+ f.write(final_text)
77
+
78
+ # Output the building energy model in IDF file
79
+ print(f"Building Energy Model Auto-Generated: {output_path}")
80
  ```
81
 
82
  ## 📝 Citation