EPlus-LLM commited on
Commit
71a966f
·
verified ·
1 Parent(s): b257c86

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +3 -3
README.md CHANGED
@@ -43,11 +43,11 @@ from transformers import (
43
  AutoTokenizer,
44
  )
45
 
46
- # Input the rest port of IDF file.
47
  file_path = "v1_nextpart.idf"
48
  output_path = "v1_final.idf"
49
 
50
- # Input the EPlus-LLM model
51
  tokenizer = AutoTokenizer.from_pretrained("google/flan-t5-large")
52
  model = AutoModelForSeq2SeqLM.from_pretrained("EPlus-LLM/EPlus-LLMv1")
53
 
@@ -61,7 +61,7 @@ generation_config.pad_token_id = tokenizer.eos_token_id
61
  generation_config.eos_token_id = tokenizer.eos_token_id
62
 
63
  # Please provide your input here — a description of the desired building
64
- # For more details, please refer to the paper
65
  input="Simulate a building that is 30.00 meters long, 15.00 meters wide, and 3.50 meters high. The window-to-wall ratio is 0.28. The occupancy rate is 8.00 m2/people, the lighting level is 6.00 W/m2, and the equipment power consumption is 8.80 W/m2."
66
  input_ids = tokenizer(input, return_tensors="pt", truncation=False)
67
  generated_ids = model.generate(input_ids = input_ids.input_ids,
 
43
  AutoTokenizer,
44
  )
45
 
46
+ # Load the rest port of IDF file.
47
  file_path = "v1_nextpart.idf"
48
  output_path = "v1_final.idf"
49
 
50
+ # Load the EPlus-LLM model
51
  tokenizer = AutoTokenizer.from_pretrained("google/flan-t5-large")
52
  model = AutoModelForSeq2SeqLM.from_pretrained("EPlus-LLM/EPlus-LLMv1")
53
 
 
61
  generation_config.eos_token_id = tokenizer.eos_token_id
62
 
63
  # Please provide your input here — a description of the desired building
64
+ # For more details, please refer to the paper: https://doi.org/10.1016/j.apenergy.2024.123431
65
  input="Simulate a building that is 30.00 meters long, 15.00 meters wide, and 3.50 meters high. The window-to-wall ratio is 0.28. The occupancy rate is 8.00 m2/people, the lighting level is 6.00 W/m2, and the equipment power consumption is 8.80 W/m2."
66
  input_ids = tokenizer(input, return_tensors="pt", truncation=False)
67
  generated_ids = model.generate(input_ids = input_ids.input_ids,