hkayabilisim commited on
Commit
768d678
·
1 Parent(s): c34864f

Engine is embedded in a portal v0.0.1

Browse files
Files changed (49) hide show
  1. Dockerfile +32 -0
  2. LICENSE +17 -24
  3. Procfile +7 -0
  4. README.md +3 -4
  5. docs/conf.py +0 -27
  6. docs/gui.rst +0 -3
  7. docs/index.rst +0 -3
  8. docs/install.rst +0 -3
  9. docs/make.bat +0 -3
  10. docs/quickstart.rst +0 -3
  11. mypy.ini +3 -0
  12. pyproject.toml +27 -16
  13. src/gui/requirements.txt → requirements.txt +2 -1
  14. src/gui/Dockerfile +0 -23
  15. src/gui/app_datagenerator.py +0 -63
  16. src/gui/app_engine.py +0 -515
  17. src/gui/sandbox.py +0 -100
  18. src/tests/Input_DistributionTables_20230614.xlsx +0 -3
  19. src/tests/nairobi_business_buildings.geojson +0 -3
  20. src/tests/nairobi_business_household.json +0 -3
  21. src/tests/nairobi_business_individual.json +0 -3
  22. src/tests/nairobi_business_landuse.geojson +0 -3
  23. src/tests/nairobi_earthquake_fragility.json +0 -3
  24. src/tests/nairobi_earthquake_intensity.geojson +0 -3
  25. src/tests/nairobi_flood_depth_50yr.geojson +0 -3
  26. src/tests/nairobi_flood_vulnerability.json +0 -3
  27. src/tests/nairobi_power_edges.geojson +0 -3
  28. src/tests/nairobi_power_fragility.json +0 -3
  29. src/tests/nairobi_power_nodes.geojson +0 -3
  30. src/tests/polygonsTV50_v2b.zip +0 -3
  31. src/tomorrowcities/__init__.py +0 -7
  32. src/tomorrowcities/core.py +0 -1463
  33. src/tomorrowcities/utils.py +0 -10
  34. tomorrowcities/__init__.py +3 -0
  35. {src/gui → tomorrowcities/backend}/engine.py +16 -16
  36. tomorrowcities/components/__init__.py +2 -0
  37. tomorrowcities/components/article.py +25 -0
  38. tomorrowcities/components/header.py +6 -0
  39. tomorrowcities/components/layout.py +6 -0
  40. tomorrowcities/content/articles/data_formats.md +53 -0
  41. tomorrowcities/content/articles/power_network_analysis copy.md +14 -0
  42. tomorrowcities/content/articles/welcome.md +78 -0
  43. docs/Makefile → tomorrowcities/content/images/tcdse_demo1.mp4 +2 -2
  44. tomorrowcities/data.py +30 -0
  45. tomorrowcities/pages/__init__.py +121 -0
  46. tomorrowcities/pages/account.py +13 -0
  47. tomorrowcities/pages/docs.py +25 -0
  48. src/gui/app_engine_v2.py → tomorrowcities/pages/engine.py +208 -196
  49. tomorrowcities/pages/settings.py +8 -0
Dockerfile ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM python:3.9
2
+
3
+ RUN useradd -m -u 1000 user
4
+
5
+ #USER root
6
+ RUN apt update
7
+ RUN apt -y install gdal-bin libgdal-dev
8
+
9
+ USER user
10
+
11
+ ENV HOME=/home/user \
12
+ PATH=/home/user/.local/bin:$PATH
13
+
14
+ COPY --chown=user . $HOME/app
15
+
16
+ WORKDIR $HOME/app
17
+
18
+
19
+
20
+ RUN (cd tomorrowcities & pip install -e .)
21
+
22
+ CMD ["solara", "run", "tomorrowcities.pages", "--host", "0.0.0.0", "--port", "7860"]
23
+ #COPY ./requirements.txt $HOME/app/requirements.txt
24
+
25
+
26
+
27
+ #RUN pip install --no-cache-dir --upgrade -r $HOME/app/requirements.txt
28
+
29
+ #COPY . .
30
+
31
+ #CMD ["solara", "run", "app_engine.py", "--host", "0.0.0.0", "--port", "7860"]
32
+
LICENSE CHANGED
@@ -1,28 +1,21 @@
1
- BSD 3-Clause License
2
 
3
- Copyright (c) 2023, Tomorrow's Cities
4
 
5
- Redistribution and use in source and binary forms, with or without
6
- modification, are permitted provided that the following conditions are met:
 
 
 
 
7
 
8
- 1. Redistributions of source code must retain the above copyright notice, this
9
- list of conditions and the following disclaimer.
10
 
11
- 2. Redistributions in binary form must reproduce the above copyright notice,
12
- this list of conditions and the following disclaimer in the documentation
13
- and/or other materials provided with the distribution.
14
-
15
- 3. Neither the name of the copyright holder nor the names of its
16
- contributors may be used to endorse or promote products derived from
17
- this software without specific prior written permission.
18
-
19
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20
- AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21
- IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22
- DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23
- FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24
- DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25
- SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26
- CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27
- OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
1
+ The MIT License (MIT)
2
 
3
+ Copyright (c) 2022
4
 
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
 
12
+ The above copyright notice and this permission notice shall be included in
13
+ all copies or substantial portions of the Software.
14
 
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
21
+ THE SOFTWARE.
 
 
 
 
 
 
 
 
 
 
 
Procfile ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ # heroku by default sets WEB_CONCURRENCY=2
2
+ # see: https://devcenter.heroku.com/changelog-items/618
3
+ # which uvicorn picks up, unless we explicitly set --workers --1
4
+ # see https://www.uvicorn.org/deployment/
5
+ # we do not support multiple workers yet
6
+ # we also need to bind to 0.0.0.0 otherwise heroku cannot route to our server
7
+ web: solara run tomorrowcities.pages --port=$PORT --no-open --host=0.0.0.0 --workers 1
README.md CHANGED
@@ -1,5 +1,4 @@
1
- # TomorrowCities Python Library
2
 
3
- ## Running GUI
4
-
5
- ### 
 
1
+ # Release Notes
2
 
3
+ ## v0.0.1
4
+ *
 
docs/conf.py DELETED
@@ -1,27 +0,0 @@
1
- # Configuration file for the Sphinx documentation builder.
2
- #
3
- # For the full list of built-in configuration values, see the documentation:
4
- # https://www.sphinx-doc.org/en/master/usage/configuration.html
5
-
6
- # -- Project information -----------------------------------------------------
7
- # https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information
8
-
9
- project = 'TomorrowCities'
10
- copyright = '2023, H. Kaya'
11
- author = 'H. Kaya'
12
-
13
- # -- General configuration ---------------------------------------------------
14
- # https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration
15
-
16
- extensions = []
17
-
18
- templates_path = ['_templates']
19
- exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
20
-
21
-
22
-
23
- # -- Options for HTML output -------------------------------------------------
24
- # https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output
25
-
26
- html_theme = 'sphinx_rtd_theme'
27
- html_static_path = ['_static']
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
docs/gui.rst DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:f07d5104e957d606acc39c49bbfe21eacfac18e12b84177fc1fa5e90f6d74ef9
3
- size 336
 
 
 
 
docs/index.rst DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:c544036eaadf2037605d86ec18230028fa57eaefa0e3a6f18afc4bce94cee39c
3
- size 460
 
 
 
 
docs/install.rst DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:99c1e6903a483076adc5059007aaab2ad10ef94b6afec3818a2f804f97972d7d
3
- size 210
 
 
 
 
docs/make.bat DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:bca6b3244f115b8f42cbc2bb866f1061b95eb6f01df1891e2ac68f03f7569d7b
3
- size 800
 
 
 
 
docs/quickstart.rst DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:b112f604684fac6a20c3e7350b59ef814cb925802ca31937c066781079ab5289
3
- size 579
 
 
 
 
mypy.ini ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ [mypy]
2
+ check_untyped_defs = True
3
+ ignore_missing_imports = True
pyproject.toml CHANGED
@@ -1,24 +1,35 @@
1
  [build-system]
2
- requires = ["hatchling","fiona","geopandas","pandas","uuid","numpy"]
3
  build-backend = "hatchling.build"
4
 
5
  [project]
6
  name = "tomorrowcities"
7
- version = "0.0.2"
8
- authors = [
9
- { name="Huseyin Kaya", email="hkayabilisim@gmail.com" },
10
- { name="Prashant Rawal", email="prashantrawal@nset.org.np" },
11
- { name="Erdem Ozer", email="ozerer@gmail.com" },
12
- ]
13
- description = "Tomorrow Cities' Python Library"
14
- readme = "README.md"
15
- requires-python = ">=3.7"
16
- classifiers = [
17
- "Programming Language :: Python :: 3",
18
- "License :: OSI Approved :: MIT License",
19
- "Operating System :: OS Independent",
 
20
  ]
21
 
 
 
 
 
 
22
  [project.urls]
23
- "Homepage" = "https://github.com/TomorrowsCities/tomorrowcities"
24
- "Bug Tracker" = "https://github.com/TomorrowsCities/tomorrowcities"
 
 
 
 
 
 
1
  [build-system]
2
+ requires = ["hatchling >=0.25"]
3
  build-backend = "hatchling.build"
4
 
5
  [project]
6
  name = "tomorrowcities"
7
+ license = {file = "LICENSE"}
8
+ classifiers = ["License :: OSI Approved :: MIT License"]
9
+ dynamic = ["version", "description"]
10
+ dependencies = [
11
+ "solara",
12
+ "geopandas",
13
+ "ipyleaflet",
14
+ "plotly",
15
+ "lorem_text",
16
+ "matplotlib",
17
+ "psycopg2-binary",
18
+ "scipy",
19
+ "pandas",
20
+ "networkx"
21
  ]
22
 
23
+ [tool.hatch.version]
24
+ path = "tomorrowcities/__init__.py"
25
+
26
+
27
+
28
  [project.urls]
29
+ Home = "https://github.com/TomorrowsCities/tomorrowcities"
30
+
31
+ [tool.black]
32
+ line-length = 160
33
+
34
+ [tool.isort]
35
+ profile = "black"
src/gui/requirements.txt → requirements.txt RENAMED
@@ -5,4 +5,5 @@ plotly
5
  lorem_text
6
  matplotlib
7
  psycopg2-binary
8
- scipy
 
 
5
  lorem_text
6
  matplotlib
7
  psycopg2-binary
8
+ scipy
9
+ pandas
src/gui/Dockerfile DELETED
@@ -1,23 +0,0 @@
1
- FROM python:3.9
2
-
3
- RUN useradd -m -u 1000 user
4
-
5
- USER user
6
-
7
- ENV HOME=/home/user \
8
- PATH=/home/user/.local/bin:$PATH
9
-
10
- COPY --chown=user . $HOME/app
11
-
12
- WORKDIR $HOME/app
13
-
14
- COPY ./requirements.txt $HOME/app/requirements.txt
15
-
16
- #RUN sudo apt -y install gdal-bin libgdal-dev
17
-
18
- RUN pip install --no-cache-dir --upgrade -r $HOME/app/requirements.txt
19
-
20
- COPY . .
21
-
22
- CMD ["solara", "run", "app_engine.py", "--host", "0.0.0.0", "--port", "7860"]
23
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
src/gui/app_datagenerator.py DELETED
@@ -1,63 +0,0 @@
1
- import gradio as gr
2
- from tomorrowcities import DataGenerator
3
- import warnings
4
- import uuid
5
- import os
6
- import matplotlib.pyplot as plt
7
-
8
-
9
- warnings.simplefilter(action='ignore')
10
-
11
- plt.switch_backend("agg")
12
-
13
- def generate(land_use_file, parameter_file, seed):
14
- oppath='.'
15
- dg = DataGenerator(parameter_file=parameter_file.name,
16
- land_use_file=land_use_file.name)
17
-
18
- building, household, individual, land_use = dg.generate(seed)
19
-
20
-
21
- # Generate unique filenames
22
- opfile_map = 'map'+str(uuid.uuid4())+'.png'
23
- opfile_building = 'building_layer_'+str(uuid.uuid4())+'.xlsx'
24
- opfile_household = 'household_layer_'+str(uuid.uuid4())+'.xlsx'
25
- opfile_individual = 'individual_layer_'+str(uuid.uuid4())+'.xlsx'
26
- opfile_landuse = 'landuse_layer_'+str(uuid.uuid4())+'.xlsx'
27
-
28
- fig, ax = plt.subplots(1,1,figsize=(10,10))
29
-
30
- building.plot(ax=ax)
31
- plt.savefig(opfile_map)
32
-
33
- # Save to Excel files
34
- building.to_excel(os.path.join(oppath,opfile_building),index=False)
35
- household.to_excel(os.path.join(oppath,opfile_household),index=False)
36
- individual.to_excel(os.path.join(oppath,opfile_individual),index=False)
37
- land_use.to_excel(os.path.join(oppath,opfile_landuse),index=False)
38
-
39
- info = f'# buildings: {len(building)}, # households: {len(household)}, # individuals: {len(individual)}'
40
- return opfile_building, opfile_household, opfile_individual, opfile_landuse, opfile_map, info
41
-
42
- with gr.Blocks() as demo:
43
- with gr.Row():
44
- land_use_file = gr.File(label="Upload Land Use File")
45
- parameter_file = gr.File(label="Upload Parameter File")
46
- seed = gr.Slider(label="Seed", minimum=0, maximum=1000, value=0, step=1)
47
- btn = gr.Button("Generate")
48
- with gr.Row():
49
- with gr.Column():
50
- building = gr.File(label="Buildings")
51
- household = gr.File(label="Households")
52
- with gr.Column():
53
- individual = gr.File(label="Individuals")
54
- land_use = gr.File(label="Land Use")
55
- map = gr.Image(label="Map")
56
- info = gr.Textbox(label="Info")
57
- gr.Examples(examples=[['tests/polygonsTV50_v2b.zip','tests/Input_DistributionTables_20230614.xlsx',0]],
58
- inputs=[land_use_file, parameter_file, seed])
59
-
60
- btn.click(fn=generate, inputs=[land_use_file, parameter_file, seed],
61
- outputs=[building, household, individual, land_use, map, info])
62
-
63
- demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
src/gui/app_engine.py DELETED
@@ -1,515 +0,0 @@
1
- import solara
2
- from solara.components.file_drop import FileInfo
3
- import os
4
- os.environ['USE_PYGEOS'] = '0'
5
- import geopandas as gpd
6
- import pandas as pd
7
- import json
8
- import numpy as np
9
- import ipyleaflet
10
- from ipyleaflet import AwesomeIcon, Marker
11
- import engine
12
- import random
13
- from matplotlib.figure import Figure
14
- import matplotlib.pyplot as plt
15
- import math
16
-
17
- css = """
18
-
19
- """
20
- plt.switch_backend("agg")
21
-
22
- # Static parameters
23
- initial_building_columns = set(['zoneID', 'bldID', 'nHouse', 'residents', 'specialFac', 'expStr', 'fptarea', 'repValue'])
24
- building_columns = set(['geometry','metric1','metric2','metric3','metric4','metric5','metric6','metric7','ds','zoneID', 'bldID', 'nHouse', 'residents', 'specialFac', 'expStr', 'fptarea', 'repValue'])
25
- landuse_columns = set(['geometry', 'zoneID', 'LuF', 'population', 'densityCap', 'floorARat', 'setback', 'avgIncome'])
26
- household_columns = set(['hhID', 'nInd', 'income', 'bldID', 'CommFacID'])
27
- individual_columns = set(['indivId', 'hhID', 'gender', 'age', 'eduAttStat', 'head', 'indivFacID'])
28
- intensity_columns = set(['geometry','im'])
29
- power_edge_columns = set(['FROM_NODE', 'direction', 'pipetype', 'EDGE_ID', 'guid', 'capacity', 'geometry', 'TO_NODE', 'length'])
30
- power_node_columns = set(['geometry', 'FLTYTYPE', 'STRCTYPE', 'UTILFCLTYC', 'INDPNODE', 'guid',
31
- 'NODE_ID', 'x_coord', 'y_coord', 'pwr_plant', 'serv_area', 'n_bldgs',
32
- 'income', 'eq_vuln'])
33
- vulnerabillity_columns = set(['expstr', 'hw0', 'hw0_5', 'hw1', 'hw1_5', 'hw2', 'hw3', 'hw4', 'hw5','hw6'])
34
- fragility_columns = set(['expstr','muds1_g','muds2_g','muds3_g','muds4_g','sigmads1','sigmads2','sigmads3','sigmads4'])
35
- power_fragility_columns = set(['vuln_string', 'med_Slight', 'med_Moderate', 'med_Extensive', 'med_Complete', 'beta_Slight', 'beta_Moderate', 'beta_Extensive', 'beta_Complete', 'description'])
36
- all_layers = ["Landuse", "Buildings", "Household","Individual","Intensity","Vulnerability","Fragility","Power Links","Power Nodes", "Power Fragility"]
37
- infra_options = ["power",'buildings']
38
-
39
- metrics_template = {"metric1": {"desc": "Number of workers unemployed", "value": 0, "max_value": 0},
40
- "metric2": {"desc": "Number of children with no access to education", "value": 0, "max_value": 0},
41
- "metric3": {"desc": "Number of households with no access to hospital", "value": 0, "max_value": 0},
42
- "metric4": {"desc": "Number of individuals with no access to hospital", "value": 0, "max_value": 0},
43
- "metric5": {"desc": "Number of homeless households", "value": 0, "max_value": 0},
44
- "metric6": {"desc": "Number of homeless individuals", "value": 0, "max_value": 0},
45
- "metric7": {"desc": "Population displacement", "value": 0, "max_value": 0},}
46
-
47
- metrics = solara.reactive(metrics_template)
48
-
49
-
50
- layers = solara.reactive([])
51
-
52
- base_map = ipyleaflet.basemaps.OpenStreetMap.BZH
53
-
54
- default_zoom = 14
55
- default_radius = 10
56
- default_center = (41.03,28.94)
57
-
58
- center = solara.reactive(default_center)
59
- zoom = solara.reactive(default_zoom)
60
- bounds = solara.reactive(None)
61
- radius = solara.reactive(default_radius)
62
- hazard = solara.reactive("earthquake")
63
- infra = solara.reactive([])
64
-
65
- loading = solara.reactive(-1)
66
-
67
- building_df = solara.reactive(None)
68
- clicked_df = solara.reactive(pd.DataFrame(columns=['attribute','value']))
69
-
70
-
71
- def building_click_handler(event=None, feature=None, id=None, properties=None):
72
- df = pd.DataFrame(columns=['attribute','value'])
73
- df['attribute'] = [k for k in properties.keys() if k != 'style']
74
- df['value']= [str(properties[k]) for k in properties.keys() if k != 'style']
75
- clicked_df.set(df)
76
-
77
- def landuse_click_handler(event=None, feature=None, id=None, properties=None):
78
- df = pd.DataFrame(columns=['attribute','value'])
79
- df['attribute'] = [k for k in properties.keys() if k != 'style']
80
- df['value']= [str(properties[k]) for k in properties.keys() if k != 'style']
81
- clicked_df.set(df)
82
-
83
- def power_node_click_handler(event=None, feature=None, properties=None):
84
- print(locals())
85
- df = pd.DataFrame(columns=['attribute','value'])
86
- df['attribute'] = [k for k in properties.keys() if k != 'style']
87
- df['value']= [str(properties[k]) for k in properties.keys() if k != 'style']
88
- clicked_df.set(df)
89
-
90
- def landuse_colors(feature):
91
- luf_type = feature['properties']['LuF']
92
- if luf_type == 'RESIDENTIAL (HIGH DENSITY)':
93
- luf_color = {
94
- 'color': 'black',
95
- 'fillColor': '#A0522D', # sienna
96
- }
97
- elif luf_type == 'HISTORICAL PRESERVATION AREA':
98
- luf_color = {
99
- 'color': 'black',
100
- 'fillColor': '#673147', # plum
101
- }
102
- elif luf_type == 'RESIDENTIAL (MODERATE DENSITY)':
103
- luf_color = {
104
- 'color': 'black',
105
- 'fillColor': '#cd853f', # peru
106
- }
107
- elif luf_type == 'COMMERCIAL AND RESIDENTIAL':
108
- luf_color = {
109
- 'color': 'black',
110
- 'fillColor': 'red',
111
- }
112
- elif luf_type == 'CITY CENTER':
113
- luf_color = {
114
- 'color': 'black',
115
- 'fillColor': '#E6E6FA', # lavender
116
- }
117
- elif luf_type == 'INDUSTRY':
118
- luf_color = {
119
- 'color': 'black',
120
- 'fillColor': 'grey',
121
- }
122
- elif luf_type == 'RESIDENTIAL (LOW DENSITY)':
123
- luf_color= {
124
- 'color': 'black',
125
- 'fillColor': '#D2B48C', # tan
126
- }
127
- elif luf_type == 'RESIDENTIAL (GATED NEIGHBORHOOD)':
128
- luf_color= {
129
- 'color': 'black',
130
- 'fillColor': 'orange',
131
- }
132
- elif luf_type == 'AGRICULTURE':
133
- luf_color= {
134
- 'color': 'black',
135
- 'fillColor': 'yellow',
136
- }
137
- elif luf_type == 'FOREST':
138
- luf_color= {
139
- 'color': 'black',
140
- 'fillColor': 'green',
141
- }
142
- elif luf_type == 'VACANT ZONE':
143
- luf_color = {
144
- 'color': 'black',
145
- 'fillColor': '#90EE90', # lightgreen
146
- }
147
- elif luf_type == 'RECREATION AREA':
148
- luf_color = {
149
- 'color': 'black',
150
- 'fillColor': '#32CD32', #lime
151
- }
152
- else:
153
- luf_color = {
154
- 'color': 'black',
155
- 'fillColor': random.choice(['red', 'yellow', 'green', 'orange','blue']),
156
- }
157
- return luf_color
158
-
159
- def building_colors(feature):
160
- ds_to_color = {0: 'lavender', 1:'violet',2:'fuchsia',3:'indigo',4:'darkslateblue',5:'black'}
161
- ds = feature['properties']['ds']
162
-
163
- return {'color': ds_to_color[ds], 'fillColor': ds_to_color[ds]}
164
-
165
- def power_node_style(feature,):
166
- return dict(
167
- opacity=0.5,
168
- color='black',
169
- weight=0.9
170
- )
171
-
172
-
173
- @solara.component
174
- def MapComponent():
175
-
176
- extra_layers = [l['geojson'] for l in layers.value if 'geojson' in l.keys() and l['visible'].value]
177
-
178
- # MarkerGroup Layer
179
- dataframes = {l['name']:l['df'] for l in layers.value if l['visible'].value}
180
- if 'Power Nodes' in dataframes.keys():
181
- df = dataframes['Power Nodes'].value
182
- markers = []
183
- for index, node in df.iterrows():
184
- x = node.geometry.x
185
- y = node.geometry.y
186
- marker_color = 'blue' if node['is_operational'] else 'red'
187
- icon_name = 'fa-industry' if node['pwr_plant'] else 'bolt'
188
- icon_color = 'black'
189
- marker = Marker(icon=AwesomeIcon(
190
- name=icon_name,
191
- marker_color=marker_color,
192
- icon_color=icon_color,
193
- spin=False
194
- ),location=(y,x),title=f'{node["NODE_ID"]}')
195
-
196
- markers.append(marker)
197
- power_node_layer= ipyleaflet.MarkerCluster(markers=markers,
198
- disable_clustering_at_zoom=5)
199
- extra_layers.append(power_node_layer)
200
-
201
-
202
-
203
-
204
-
205
- if building_df.value is not None:
206
- json_data = json.loads(building_df.value.to_json())
207
- building_layer = ipyleaflet.GeoJSON(data=json_data,
208
- style={'opacity': 1, 'fillOpacity': 0.5, 'weight': 1},
209
- hover_style={'color': 'red', 'dashArray': '0', 'fillOpacity': 0.5},
210
- style_callback=building_colors)
211
- building_layer.on_click(building_click_handler)
212
- extra_layers.append(building_layer)
213
-
214
-
215
-
216
-
217
- print('rendering map, number of extra layers',len(extra_layers))
218
-
219
- ipyleaflet.Map.element(
220
- zoom=zoom.value,
221
- on_zoom=zoom.set,
222
- on_bounds=bounds.set,
223
- center=center.value,
224
- on_center=center.set,
225
- scroll_wheel_zoom=True,
226
- dragging=True,
227
- double_click_zoom=True,
228
- touch_zoom=True,
229
- box_zoom=True,
230
- keyboard=True,
231
- layers=[ipyleaflet.TileLayer.element(url=base_map.build_url())]+extra_layers,
232
- )
233
-
234
- @solara.component
235
- def DialWidget(name, value, max_value=10000):
236
- print('value',value,'max_value',max_value)
237
- if max_value == 0:
238
- max_value = 10000
239
- fig = Figure(tight_layout=True,dpi=30,frameon=False)
240
- fig.set_size_inches(1.5,1)
241
- ax = fig.subplots()
242
- ax.axis('equal')
243
- ax.axis('off')
244
-
245
- ax.set_xticks([])
246
- ax.set_yticks([])
247
-
248
- t = np.linspace(0, math.pi, 100)
249
-
250
- cos = np.cos(t)
251
- sin = np.sin(t)
252
-
253
- ax.plot(cos,sin, linewidth=2)
254
- value_t = math.pi * (1 - (value / max_value))
255
-
256
- fill_color = 'red'
257
- if value_t > math.pi / 2 :
258
- x1 = np.linspace(-1,np.cos(value_t),100)
259
- y1 = np.sqrt(1 - x1**2)
260
- ax.fill_between(x1,y1,color=fill_color)
261
- x1 = np.linspace(np.cos(value_t),0,100)
262
- y1 = np.tan(value_t) * x1
263
- ax.fill_between(x1,y1,color=fill_color)
264
- else:
265
- x = np.linspace(-1, np.cos(value_t),100)
266
- y1 = np.sqrt(1-x**2)
267
- y2a = np.zeros(100)
268
- y2b = np.tan(value_t) * x
269
- y2 = np.maximum(y2a,y2b)
270
- ax.fill_between(x,y1,y2,color=fill_color)
271
-
272
- ax.text(0,0.5,value,fontdict={'fontsize':18},verticalalignment="center",
273
- horizontalalignment="center",color="black")
274
- ax.text(1,0,max_value,fontdict={'fontsize':8},verticalalignment="center",
275
- horizontalalignment="right",color="black")
276
- ax.text(0,-0.1,name,fontdict={'fontsize':10},verticalalignment="top",
277
- horizontalalignment="center",color="black")
278
- ax.set_xlim(-1.1,1.1)
279
- ax.set_ylim(-0.2,1.2)
280
- solara.FigureMatplotlib(fig)
281
-
282
- @solara.component
283
- def ExistingLayers():
284
- with solara.Column(gap="0px"):
285
- for layer in layers.value:
286
- solara.Checkbox(label=layer['name'],
287
- value=layer['visible'],
288
- style="margin-top: 0px; padding-top: 0px; min-height: 0px;")
289
-
290
- @solara.component
291
- def VisioningScenarioViewer():
292
-
293
- # State variables
294
- error_message, set_error_message = solara.use_state("")
295
-
296
- selected_layer, set_selected_layer = solara.use_state("Landuse")
297
-
298
- def load(file: FileInfo):
299
- try:
300
- json_string = file['data'].decode('utf-8')
301
- json_hash = hash(json_string)
302
- json_data = json.loads(json_string)
303
- print('loaded json data keys',json_data.keys())
304
- # Load into dataframes
305
- if "features" in json_data.keys():
306
- # Add zero metrics to building layer
307
- if set(json_data['features'][0]['properties'].keys()) == initial_building_columns:
308
- for metric in metrics.value.keys():
309
- for i in range(len(json_data['features'])):
310
- json_data['features'][i]['properties'][metric] = 0
311
- # initial damage states set to zero
312
- for i in range(len(json_data['features'])):
313
- json_data['features'][i]['properties']['ds'] = 0
314
-
315
- df = gpd.GeoDataFrame.from_features(json_data['features'])
316
-
317
- if set(df.columns) == building_columns:
318
- building_df.set(df)
319
-
320
- new_center = (df.geometry.centroid.y.mean(), df.geometry.centroid.x.mean())
321
- center.set(new_center)
322
-
323
- else:
324
- df = pd.read_json(json_string)
325
-
326
- existing_hashes = [l['hash'] for l in layers.value]
327
- if json_hash in existing_hashes:
328
- set_error_message("File already uploaded")
329
- return
330
- else:
331
- new_layer = {'fileinfo': file, 'hash': json_hash, 'visible': solara.reactive(True)}
332
- df_columns = set(df.columns)
333
- if df_columns == building_columns:
334
- new_layer['name'] = 'Buildings'
335
- elif df_columns == landuse_columns:
336
- new_layer['name'] = 'Landuse'
337
- new_layer['geojson'] = ipyleaflet.GeoJSON(data=json_data,
338
- style={'opacity': 1, 'dashArray': '9', 'fillOpacity': 0.5, 'weight': 1},
339
- hover_style={'color': 'white', 'dashArray': '0', 'fillOpacity': 0.5},
340
- style_callback=landuse_colors)
341
- new_layer['geojson'].on_click(landuse_click_handler)
342
-
343
- elif df_columns == intensity_columns:
344
- locs = np.array([df.geometry.y.to_list(), df.geometry.x.to_list(), df.im.to_list()]).transpose().tolist()
345
- new_layer['name'] = 'Intensity'
346
- new_layer['geojson'] = ipyleaflet.Heatmap(locations=locs, radius=radius.value)
347
- elif df_columns == household_columns:
348
- new_layer['name'] = 'Household'
349
- elif df_columns == individual_columns:
350
- new_layer['name'] = 'Individual'
351
- elif df_columns == vulnerabillity_columns:
352
- new_layer['name'] = 'Vulnerability'
353
- elif df_columns == fragility_columns:
354
- new_layer['name'] = 'Fragility'
355
- elif df_columns == power_fragility_columns:
356
- new_layer['name'] = 'Power Fragility'
357
- elif df_columns == power_edge_columns:
358
- new_layer['name'] = 'Power Links'
359
- new_layer['geojson'] = ipyleaflet.GeoJSON(data=json_data)
360
- elif df_columns == power_node_columns:
361
- new_layer['name'] = 'Power Nodes'
362
- df['ds'] = 0
363
- df['is_damaged'] = False
364
- df['is_operational'] = True
365
-
366
- new_layer['df'] = solara.reactive(df)
367
- if new_layer['name'] in [l['name'] for l in layers.value]:
368
- for i, l in enumerate(layers.value):
369
- if l['name'] == new_layer['name']:
370
- layers.value[i] = new_layer
371
- break
372
- #layers.set(layers.value)
373
- else:
374
- layers.set(layers.value + [new_layer])
375
-
376
- #set_run_allowed(is_ready_to_run())
377
- set_error_message("")
378
- except UnicodeDecodeError:
379
- set_error_message(f'{file["name"]} is not a text file')
380
- except Exception as e:
381
- set_error_message(f'file: {file["name"]} Exception:{e}')
382
-
383
-
384
- def progress(ratio):
385
- print(f"loading {ratio}")
386
- loading.set(ratio)
387
-
388
- def is_ready_to_run():
389
- print('infra.value',infra.value)
390
- existing_layers = set([l['name'] for l in layers.value])
391
- missing = []
392
-
393
-
394
-
395
- if hazard.value == "earthquake":
396
- if "power" in infra.value:
397
- missing += list(set(["Power Links","Power Nodes","Intensity","Power Fragility"]) - existing_layers)
398
- if "buildings" in infra.value:
399
- missing += list(set(["Buildings","Household","Individual","Intensity","Fragility"]) - existing_layers)
400
- elif hazard.value == "flood":
401
- if "power" in infra.value:
402
- missing += list(set(["Power Links","Power Nodes","Intensity","Power Vulnerability"]) - existing_layers)
403
- if "buildings" in infra.value:
404
- missing += list(set(["Buildings","Household","Individual","Intensity","Vulnerability"]) - existing_layers)
405
-
406
- if infra.value == []:
407
- missing += ['You should select power and/or buildings']
408
- return missing == [], missing
409
-
410
- def compute():
411
- print("I'm computing")
412
- dfs = {l['name']: l['df'].value for l in layers.value}
413
- dfs_reactive = {l['name']: l['df'] for l in layers.value}
414
-
415
- is_ready, missing = is_ready_to_run()
416
-
417
-
418
-
419
- if is_ready:
420
- set_error_message("")
421
-
422
- if 'power' in infra.value:
423
-
424
- eq_ds, is_damaged, is_operational = engine.compute_power_infra(dfs['Power Nodes'],
425
- dfs['Power Links'],
426
- dfs['Intensity'],
427
- dfs['Power Fragility'])
428
-
429
- power_node_df = dfs['Power Nodes'].copy()
430
- power_node_df['ds'] = list(eq_ds)
431
- power_node_df['is_damaged'] = list(is_damaged)
432
- power_node_df['is_operational'] = list(is_operational)
433
-
434
- dfs_reactive['Power Nodes'].set(power_node_df)
435
-
436
-
437
-
438
- if 'buildings' in infra.value:
439
- computed_metrics, df_metrics, df_bld_hazard = engine.compute(dfs['Buildings'],
440
- dfs['Household'],
441
- dfs['Individual'],
442
- dfs['Intensity'],
443
- dfs['Fragility'] if hazard.value == "earthquake" else dfs['Vulnerability'],
444
- hazard.value)
445
-
446
- print(computed_metrics)
447
- updated_df = building_df.value
448
- for metric in df_metrics.keys():
449
- updated_df[metric] = list(df_metrics[metric][metric])
450
- updated_df['ds'] = list(df_bld_hazard['ds'])
451
- building_df.set(updated_df)
452
-
453
- ((ymin,xmin),(ymax,xmax)) = bounds.value
454
- for metric in df_metrics.keys():
455
- computed_metrics[metric]['value'] = int(updated_df.cx[xmin:xmax,ymin:ymax][metric].sum())
456
-
457
- metrics.set(computed_metrics)
458
- else:
459
- set_error_message(f'Layers {missing} are missing')
460
-
461
- with solara.Row():
462
- with solara.Column():
463
- solara.FileDrop(label="Drop layers", on_total_progress=progress, on_file=load,lazy=False)
464
- if loading.value > -1 and loading.value < 100:
465
- solara.Info(f'Uploading {loading.value}%')
466
-
467
- with solara.Column():
468
- solara.ToggleButtonsSingle(hazard.value, ["earthquake","flood"], on_value=hazard.set)
469
- solara.ToggleButtonsMultiple(infra.value,infra_options, on_value=infra.set)
470
- solara.Button(label="Compute", on_click=compute, outlined=True)
471
-
472
- #building_layer = get_building_layer()
473
- with solara.GridFixed(columns=4):
474
- for metric_name, metric in metrics.value.items():
475
- metric_description = metric['desc']
476
- if building_df.value is not None and bounds.value is not None:
477
- ((ymin,xmin),(ymax,xmax)) = bounds.value
478
- value = int(building_df.value.cx[xmin:xmax,ymin:ymax][metric_name].sum())
479
- else:
480
- value = 0
481
- #with solara.Column(gap="1px",classes=['metriccontainer'],align="center", margin="1"):
482
- with solara.Tooltip(metric_description):
483
- with solara.Column():
484
- DialWidget(metric_name, value, max_value=metric['max_value'])
485
- #solara.HTML(tag="div",unsafe_innerHTML=f'{metric_description}',
486
- # classes=["metricdescription"])
487
- if error_message != "":
488
- solara.Error(error_message)
489
- with solara.Columns([10, 70, 20]):
490
- ExistingLayers()
491
- MapComponent()
492
- solara.DataFrame(df=clicked_df.value, scrollable=True)
493
-
494
- solara.ToggleButtonsSingle(selected_layer, all_layers, on_value=set_selected_layer)
495
- found_layer = None
496
- for layer in layers.value:
497
- if layer['name'] == selected_layer and layer['visible'].value:
498
- found_layer = layer
499
- break
500
-
501
- if found_layer:
502
- df = found_layer['df'].value
503
- print('found layer', found_layer['name'])
504
- if 'geometry' in list(df.columns) and bounds.value is not None:
505
- ((ymin,xmin),(ymax,xmax)) = bounds.value
506
- solara.DataFrame(df=df.cx[xmin:xmax,ymin:ymax].drop(columns='geometry'))
507
- else:
508
- solara.DataFrame(df=df)
509
-
510
- else:
511
- solara.Info(f'{selected_layer} is not uploaded or set to invisible')
512
- @solara.component
513
- def Page():
514
- solara.Style(css)
515
- VisioningScenarioViewer()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
src/gui/sandbox.py DELETED
@@ -1,100 +0,0 @@
1
- import solara
2
- import ipyleaflet
3
- from matplotlib.figure import Figure
4
- import matplotlib.pyplot as plt
5
- import math
6
- import numpy as np
7
-
8
- plt.switch_backend("agg")
9
-
10
- @solara.component
11
- def DialWidget(desc, value, max_value=10000):
12
- if max_value == 0:
13
- max_value = 10000
14
- fig = Figure(tight_layout=True,dpi=30,frameon=False)
15
- fig.set_size_inches(1.5,1)
16
- ax = fig.subplots()
17
- ax.axis('equal')
18
- ax.axis('off')
19
-
20
- ax.set_xticks([])
21
- ax.set_yticks([])
22
-
23
- t = np.linspace(0, math.pi, 100)
24
-
25
- cos = np.cos(t)
26
- sin = np.sin(t)
27
-
28
- ax.plot(cos,sin, linewidth=2)
29
- value_t = math.pi * (1 - (value / max_value))
30
-
31
- fill_color = 'red'
32
- if value_t > math.pi / 2 :
33
- x1 = np.linspace(-1,np.cos(value_t),100)
34
- y1 = np.sqrt(1 - x1**2)
35
- ax.fill_between(x1,y1,color=fill_color)
36
- x1 = np.linspace(np.cos(value_t),0,100)
37
- y1 = np.tan(value_t) * x1
38
- ax.fill_between(x1,y1,color=fill_color)
39
- else:
40
- x = np.linspace(-1, np.cos(value_t),100)
41
- y1 = np.sqrt(1-x**2)
42
- y2a = np.zeros(100)
43
- y2b = np.tan(value_t) * x
44
- y2 = np.maximum(y2a,y2b)
45
- ax.fill_between(x,y1,y2,color=fill_color)
46
-
47
- #ax.plot([0,0.9*np.cos(value_t)],[0, 0.9*np.sin(value_t)], linewidth=10)
48
- #ax.scatter([0],[0],color='black',s=50)
49
- #ax.text(-1.1,0,0,fontdict={'fontsize':14},verticalalignment="center",
50
- # horizontalalignment="right",color="black")
51
- #ax.text(1.1,0,max_value,fontdict={'fontsize':14},verticalalignment="center",
52
- # horizontalalignment="left",color="black")
53
- #ax.plot([-0.9,-1.1],[0,0],color="black")
54
- #ax.plot([0.9,1.1],[0,0],color="black")
55
- #ax.plot([0,0],[0.9,1.1],color="black")
56
- horizontalalignment = "right" if value_t > math.pi/2 else "left"
57
- #ax.text(1.1*np.cos(value_t),1.1*np.sin(value_t),value,fontdict={'fontsize':20},verticalalignment="center",
58
- # horizontalalignment=horizontalalignment,color="black")
59
- ax.text(0,0.5,value,fontdict={'fontsize':20},verticalalignment="center",
60
- horizontalalignment="center",color="black")
61
- ax.set_xlim(-1.1,1.1)
62
- ax.set_ylim(-0.2,1.2)
63
- #ax.text(0, -0.1, desc,fontdict={'fontsize':20},verticalalignment="center",
64
- # horizontalalignment='center',color="white")
65
- solara.FigureMatplotlib(fig)
66
-
67
- metrics_template = {"metric1": {"desc": "Number of workers unemployed", "value": 0, "max_value": 0},
68
- "metric2": {"desc": "Number of children with no access to education", "value": 0, "max_value": 0},
69
- "metric3": {"desc": "Number of households with no access to hospital", "value": 0, "max_value": 0},
70
- "metric4": {"desc": "Number of individuals with no access to hospital", "value": 0, "max_value": 0},
71
- "metric5": {"desc": "Number of homeless households", "value": 0, "max_value": 0},
72
- "metric6": {"desc": "Number of homeless individuals", "value": 0, "max_value": 0},
73
- "metric7": {"desc": "Population displacement", "value": 0, "max_value": 0},}
74
-
75
- metrics = solara.reactive(metrics_template)
76
-
77
- def generate_metrics():
78
-
79
- new_metrics = metrics_template.copy()
80
- for m in new_metrics.keys():
81
- max_value = np.random.randint(500, 10001)
82
- value = int(np.random.random() * max_value)
83
- new_metrics[m]["max_value"] = max_value
84
- new_metrics[m]["value"] = value
85
-
86
- metrics.set(new_metrics)
87
-
88
- @solara.component
89
- def Page():
90
-
91
-
92
-
93
-
94
- solara.Button(label="Generate", on_click=generate_metrics)
95
-
96
- for name, metric in metrics.value.items():
97
- DialWidget(name, metric["value"], max_value=metric["max_value"])
98
-
99
-
100
- Page()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
src/tests/Input_DistributionTables_20230614.xlsx DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:00997533e5d700f047c8ed32bbbd38ec4d8cd0c779cc2d1434f2e45c761f75d2
3
- size 102807
 
 
 
 
src/tests/nairobi_business_buildings.geojson DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:8083da200b62a6696f961e6bd3420a4d0b7d8b844be96cf7a68ee09178202575
3
- size 2715946
 
 
 
 
src/tests/nairobi_business_household.json DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:4b9336e565a071ec9ed8db77ee07c2405768f2d431315dff1794e56ca935921a
3
- size 10349647
 
 
 
 
src/tests/nairobi_business_individual.json DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:f46b176200fbf857d92ad7c9ec2ad65aab08fe5a09386245f1e8534f823a156f
3
- size 53053675
 
 
 
 
src/tests/nairobi_business_landuse.geojson DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:e175564c7defb2ebc85d3ba9d988523e194ecc9cba35769024e973198aa69d9a
3
- size 293806
 
 
 
 
src/tests/nairobi_earthquake_fragility.json DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:db71436e5c5638af07de5734cee2f48d89cd8fda673295c041ee6f5a5f85b549
3
- size 1179
 
 
 
 
src/tests/nairobi_earthquake_intensity.geojson DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:26b0f31995d0ca2a8eb2fed56e7215e56505fe8452fa4007ba0216fda6f91eab
3
- size 148798
 
 
 
 
src/tests/nairobi_flood_depth_50yr.geojson DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:447f10956468e02f7688923cc4094609c749dc039c10ec76984e377c803616b0
3
- size 14778140
 
 
 
 
src/tests/nairobi_flood_vulnerability.json DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:9c1a0e1d6edd529e26abaacd68dee9971d34c9fa1e9ac2eb84c5fb386fa562ad
3
- size 864604
 
 
 
 
src/tests/nairobi_power_edges.geojson DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:db3729b7a244353f15a2839a8c2631f4f098c1cf2bbb03b649234d4f71b2f21b
3
- size 11094
 
 
 
 
src/tests/nairobi_power_fragility.json DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:0b626b417823a52b7f385a892ce10a4f3c4daa66d891df2dc389ef3021a68336
3
- size 1909
 
 
 
 
src/tests/nairobi_power_nodes.geojson DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:18e215291618dd0255c4583e96f5c10c84a7edb50428e78cc721e3f949f2fee2
3
- size 12663
 
 
 
 
src/tests/polygonsTV50_v2b.zip DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:9995f83f37301f54d7a960aaad4623d37cf7efef07daea874dc919425256caef
3
- size 29531
 
 
 
 
src/tomorrowcities/__init__.py DELETED
@@ -1,7 +0,0 @@
1
- from tomorrowcities.core import (
2
- DataGenerator,
3
- )
4
-
5
- __all__ = [
6
- 'DataGenerator',
7
- ]
 
 
 
 
 
 
 
 
src/tomorrowcities/core.py DELETED
@@ -1,1463 +0,0 @@
1
- from tomorrowcities.utils import read_zipshp
2
- import pandas as pd
3
- import uuid
4
- import json
5
- import io
6
- import fiona
7
- import os.path
8
- import numpy as np
9
- import pandas as pd
10
- import geopandas as gpd
11
- import random
12
- from random import sample
13
- from numpy.random import multinomial, randint
14
- from math import ceil
15
- import math
16
- from itertools import repeat, chain
17
- import warnings
18
- warnings.simplefilter(action='ignore')
19
-
20
- class DataGenerator:
21
- def __init__(self, parameter_file, land_use_file):
22
- self.parameter_file = parameter_file
23
- self.land_use_file = land_use_file
24
-
25
- def generate(self, seed=42):
26
- ipfile = self.parameter_file
27
- random.seed(seed)
28
- np.random.seed(seed)
29
- df_nc = pd.read_excel(ipfile,sheet_name=1, header=None)
30
- ipdf = pd.read_excel(ipfile,sheet_name=2, header=None)
31
- df1 = pd.read_excel(ipfile,sheet_name=3, header=None)
32
- df2 = pd.read_excel(ipfile,sheet_name=4, header=None)
33
- df3 = pd.read_excel(ipfile,sheet_name=5, header=None)
34
-
35
-
36
- #%% Extract the nomenclature for load resisting system and land use types
37
- startmarker = '\['
38
- startidx = df_nc[df_nc.apply(lambda row: row.astype(str).str.contains(\
39
- startmarker,case=False).any(), axis=1)]
40
-
41
- endmarker = '\]'
42
- endidx = df_nc[df_nc.apply(lambda row: row.astype(str).str.contains(\
43
- endmarker,case=False).any(), axis=1)]
44
-
45
- # Load resisting system types
46
- lrs_types_temp = df_nc.loc[list(range(startidx.index[0]+1,endidx.index[0]))]
47
- lrs_types = lrs_types_temp[1].to_numpy().astype(str)
48
- lrsidx = {}
49
- count = 0
50
- for key in lrs_types:
51
- lrsidx[str(key)] = count
52
- count+=1
53
-
54
- # Landuse Types
55
- lut_types_temp = df_nc.loc[list(range(startidx.index[1]+1,endidx.index[1]))]
56
- lut_types = lut_types_temp[1].astype(str)
57
- lutidx = {}
58
- count = 0
59
- for key in lut_types:
60
- lutidx[key] = count
61
- count+=1
62
-
63
-
64
- #%% Inputs extracted from the excel input file
65
-
66
- opfile_building = 'building_layer_'+str(uuid.uuid4())+'.xlsx'
67
- opfile_household = 'household_layer_'+str(uuid.uuid4())+'.xlsx'
68
- opfile_individual = 'individual_layer_'+str(uuid.uuid4())+'.xlsx'
69
- opfile_landuse = 'landuse_layer_'+str(uuid.uuid4())+'.xlsx'
70
-
71
- savefile = ipdf.iloc[8,1]
72
-
73
- # Income types is hardcoded
74
- avg_income_types =np.array(['lowIncomeA','lowIncomeB','midIncome','highIncome'])
75
-
76
- #Average dwelling area (sqm) wrt income type (44 for LI, 54 for MI,
77
- #67 for HI in Tomorrovwille)
78
- #Range of footprint area fpt_area (sqm) wrt. income type (32-66 for LI,
79
- # 32-78 for MI and 70-132 for HI in Tomorrowville)
80
- average_dwelling_area = np.array([ipdf.iloc[21,2],ipdf.iloc[21,3],\
81
- ipdf.iloc[21,4],ipdf.iloc[21,5]])
82
-
83
- fpt_area = {'lowIncomeA':np.fromstring(ipdf.iloc[22,2],dtype=float,sep=','),
84
- 'lowIncomeB':np.fromstring(ipdf.iloc[22,3],dtype=float,sep=','),
85
- 'midIncome':np.fromstring(ipdf.iloc[22,4],dtype=float,sep=','),
86
- 'highIncome':np.fromstring(ipdf.iloc[22,5],dtype=float,sep=',')}
87
-
88
- # Storey definition 1- Low rise (LR) 1-4, 2- Mid rise (MR) 5-8,
89
- # 3- High rise (HR) 9-19
90
- storey_range = {0:np.fromstring(ipdf.iloc[25,2],dtype=int,sep=','),
91
- 1:np.fromstring(ipdf.iloc[25,3],dtype=int,sep=','),
92
- 2:np.fromstring(ipdf.iloc[25,4],dtype=int,sep=',')}
93
-
94
- # Code Compliance Levels (Low, Medium, High): 1 - LC, 2 - MC, 3 - HC
95
- code_level = np.array(['LC','MC','HC'])
96
-
97
- # Nr of commercial buildings per 1000 individuals
98
- numb_com = ipdf.iloc[10,1]
99
- # Nr of industrial buildings per 1000 individuals
100
- numb_ind = ipdf.iloc[11,1]
101
-
102
- # Area constraints in percentage (AC) for residential and commercial zones.
103
- # Total built-up areas in these zones cannot exceed (AC*available area)
104
- AC_com = ipdf.iloc[14,1] # in percent
105
- AC_ind = ipdf.iloc[15,1] # in percent
106
-
107
- # Assumption 14 and 15: 1 school per 10000 individuals,
108
- # 1 hospital per 25000 individuals
109
- nsch_pi = ipdf.iloc[17,1]
110
- nhsp_pi = ipdf.iloc[18,1]
111
-
112
- # Unit price for replacement wrt occupancy type and special facility
113
- # status of the building
114
- # Occupancy type is unchangeable, only replacement value is taken from user input
115
- Unit_price={'Res':ipdf.iloc[28,2],'Com':ipdf.iloc[28,3],'Ind':ipdf.iloc[28,4],
116
- 'ResCom':ipdf.iloc[28,5],'Edu':ipdf.iloc[28,6],'Hea':ipdf.iloc[28,7]}
117
-
118
- #household_building_match = 'footprint' # 'footprint' or 'number_of_units'
119
-
120
- landuse_shp = read_zipshp(self.land_use_file)
121
-
122
-
123
- #Calculate area of landuse zones using polygons only if area is not already.
124
- # First, convert coordinate system to cartesian
125
- if 'area' not in landuse_shp.columns:
126
- landuse_shp_cartesian = landuse_shp.copy()
127
- landuse_shp_cartesian = landuse_shp_cartesian.to_crs({'init': 'epsg:3857'})
128
- landuse_shp_cartesian['area']=landuse_shp_cartesian['geometry'].area # m^2
129
- landuse_shp_cartesian['area']=landuse_shp_cartesian['area']/10**4 # Hectares
130
- landuse_shp_cartesian = landuse_shp_cartesian.drop(columns=['geometry'])
131
- landuse = landuse_shp_cartesian.copy()
132
- else:
133
- landuse = landuse_shp.copy()
134
- landuse = landuse.drop(columns=['geometry'])
135
-
136
-
137
- #%% Concatenate the dataframes and process the data
138
- tabledf = pd.concat([df1,df2,df3]).reset_index(drop=True)
139
-
140
- # Define a dictionary containing data distribution tables
141
- # Table names sorted according to the order in the excel input spreadsheet
142
- tables_temp = {
143
- 't1':[],'t2':[],'t3':[],'t4':[],'t5':[],'t5a':[],'t6':[],'t9':[],
144
- 't12':[],'t13':[],'t7':[],'t8':[],'t11':[],'t10':[],'t14':[]
145
- }
146
- startmarker = '\['
147
- startidx = tabledf[tabledf.apply(lambda row: row.astype(str).str.contains(\
148
- startmarker,case=False).any(), axis=1)]
149
-
150
- endmarker = '\]'
151
- endidx = tabledf[tabledf.apply(lambda row: row.astype(str).str.contains(\
152
- endmarker,case=False).any(), axis=1)]
153
-
154
-
155
- count=0
156
- for key in tables_temp:
157
- #print(startidx.index[count], endidx.index[count])
158
- tablepart = tabledf.loc[list(range(startidx.index[count]+1,endidx.index[count]))]
159
- tablepart = tablepart.drop(columns =0 )
160
- tablepart = tablepart.dropna(axis=1).reset_index(drop=True).values.tolist()
161
- tables_temp[key].append(tablepart)
162
- count+=1
163
-
164
- tables = tables_temp
165
-
166
- #If values are zero for industrial and commercial buildings
167
- if numb_com ==0:
168
- print('The number of commercial buildings cannot be zero.')
169
- if numb_ind == 0:
170
- print('The number of industrial buildings cannot be zero.')
171
-
172
-
173
- #%% Function definition: dist2vector
174
- def dist2vector(d_value, d_number,d_limit,shuffle_or_not):
175
- # d_value, d_number = vectors of samelength (numpy array)
176
- # d_limit = single integer which indicates the sum of all values
177
- # in d_number.
178
- # shuffle_or_not = 'shuffle' will return a randomly shuffled list otherwise
179
- # by default or with 'DoNotShuffle' the list will not be shuffled
180
- # Output: insert_vector is a list
181
- # Calculate cumulative sum and typecast to integer
182
- d_number = np.cumsum(d_number).astype('int32')
183
- d_number[-1] = d_limit #To prevent array broadcast mismatch
184
- d_number_temp = np.insert(d_number,0,0)
185
- d_number_round = np.diff(d_number_temp)
186
- insert_vector_df= pd.DataFrame(np.nan,index=range(d_number[-1]),columns=['iv'])
187
- a=0
188
- icount= 0
189
- for value in d_value:
190
- b = d_number[icount]
191
- subvector = [str(value)]*d_number_round[icount]
192
- insert_vector_df.loc[range(a,b),'iv'] = subvector
193
- a = b
194
- icount+=1
195
-
196
- insert_vector = insert_vector_df['iv'].values.tolist()
197
- if shuffle_or_not == 'shuffle':
198
- random.shuffle(insert_vector)
199
-
200
- return insert_vector
201
-
202
- #%% The data generation process begins here____________________________________
203
-
204
- #%% Step 1: Calculate maximum population (nPeople)
205
- nPeople = round(landuse['densityCap']*landuse['area']-landuse['population'])
206
- nPeople[nPeople<0]=0
207
-
208
- #%% Step 2: Calculate the number of households (nHouse), hhID
209
- # Assumption 1: Household size distribution is same for different income types
210
- # Question: How to ensure that there are no NaNs while assigning zone type?
211
-
212
- # Convert Table 1 to numpy array
213
- t1_list = tables['t1'][0]
214
- # No. of individuals
215
- t1_l1 = np.array(t1_list[0], dtype=int)
216
- t1_l2 = np.array(t1_list[1], dtype=float) # Probabilities
217
-
218
- # Compute the probability of X number of people living in a household
219
- household_prop = t1_l2/sum(t1_l2)
220
- # Total number of households for all zones
221
- nHouse_all = round(nPeople/(sum(household_prop*t1_l1)))
222
- nHouse_all = nHouse_all.astype('int32')
223
- nHouse = nHouse_all[nHouse_all>0] # Exclude zones with zero households
224
- nHouseidx = nHouse.index
225
- #Preallocate a dataframe with nan to hold the household layer
226
- household_df = pd.DataFrame(np.nan, index = range(sum(nHouse)),
227
- columns=['bldID','hhID','income','nIND','CommFacID',
228
- 'income_numb','zoneType','zoneID',
229
- 'approxFootprint'])
230
- #Calculate a list of cumulative sum of nHouse
231
- nHouse_cuml = np.cumsum(nHouse)
232
-
233
- # Assign household id (hhID)
234
- a = 0
235
- for i in nHouseidx:
236
- b = nHouse_cuml[i]
237
- household_df.loc[range(a,b),'hhID'] = range(a+1,b+1) # First hhID index =1
238
- household_df.loc[range(a,b),'zoneID'] = landuse.loc[i,'zoneID']
239
- household_df.loc[range(a,b),'zoneType'] = landuse.loc[i,'avgIncome']
240
- a = b
241
-
242
- del a,b
243
- household_df['hhID'] = household_df['hhID'].astype(int)
244
-
245
-
246
- #%% Step 3: Identify the household size and assign "nInd" values to each household
247
- a_g = 0
248
- for i in nHouseidx:
249
- b_g = nHouse_cuml[i]
250
- # Find Total of every different nInd number for households
251
- household_num = nHouse[i] * household_prop
252
- # Round the household numbers for various numbers of individuals
253
- # without exceeding total household number
254
- cumsum_household_num = np.round_(np.cumsum(household_num)).astype('int32')
255
- cumsum_household_num_diff = np.diff(cumsum_household_num)
256
- first_val = nHouse[i] - sum(cumsum_household_num_diff)
257
- household_num_round = np.insert(cumsum_household_num_diff,0,first_val)
258
-
259
- #Generate a column vector
260
- d_value = t1_l1
261
- d_number = cumsum_household_num
262
- insert_vector = np.ones(d_number[-1])
263
- a, count =0, 0
264
- for value in d_value:
265
- b = d_number[count]
266
- #This works for numbers but not for strings
267
- subvector = np.empty(household_num_round[count]) #
268
- subvector.fill(value) #
269
- insert_vector[a:b] = subvector #
270
- a = b
271
- count+=1
272
- del a,b
273
- insert_vector = np.random.permutation(insert_vector)
274
-
275
- household_df.loc[range(a_g,b_g), 'nIND'] = insert_vector
276
- a_g = b_g
277
-
278
- del a_g, b_g, count,insert_vector,subvector
279
-
280
- household_df['nIND'] = household_df['nIND'].astype(int)
281
-
282
-
283
- #%% Step 4: Identify and assign income type of the households
284
- # Table 2 states the % of various income groups in different income zones
285
- # Convert Table 2 to numpy array
286
- # for row in range((len(tables['t2'][0]))):
287
- # tables['t2'][0][row]=np.fromstring(tables['t2'][0][row],dtype=float,sep=',')
288
-
289
- t2 = np.array(tables['t2'][0])
290
- # avg_income_types = ['lowIncomeA','lowIncomeB','midIncome','highIncome']
291
-
292
- count = 0
293
-
294
- for inc in avg_income_types:
295
- #Find indices corresponding to a zone type
296
- itidx = household_df['zoneType'] == inc
297
- if sum(itidx) ==0: #i.e. this income zone doesn't exist in the landuse data
298
- continue
299
- income_entries = t2[count]*sum(itidx)
300
- d_limit = sum(itidx) # Size of array to match after rounding off
301
- d_value = avg_income_types[income_entries!=0]
302
- d_number = income_entries[income_entries!=0] #ip
303
-
304
- insert_vector = dist2vector(d_value, d_number,d_limit,'shuffle')
305
- count+=1
306
- household_df.loc[itidx, 'income'] = insert_vector
307
-
308
- del count,insert_vector
309
-
310
-
311
-
312
- #%% Step 5: Identify and assign a unique ID for each individual
313
-
314
- #Asumption 2: Gender distribution is same for different income types
315
-
316
- #Preallocate a dataframe with nan to hold the individual layer
317
- nindiv = int(sum(household_df['nIND'])) # Total number of individuals
318
- individual_df = pd.DataFrame(np.nan, index = range(nindiv),
319
- columns=['hhID', 'indivID', 'gender', 'age','head',
320
- 'eduAttStat','indivFacID_1','indivFacID_2',
321
- 'schoolEnrollment','labourForce','employed'])
322
- individual_df.loc[range(nindiv),'indivID'] = [range(1,nindiv+1)]
323
- individual_df['indivID'].astype('int')
324
-
325
- #%% Step 6: Identify and assign gender for each individual
326
- # Convert the gender distribution table 3 to numpy array
327
- tables['t3'][0] = np.array(tables['t3'][0][0],dtype=float)
328
- female_p = tables['t3'][0][0]
329
- male_p = 1-female_p
330
- gender_value = np.array([1,2], dtype=int) # 1=Female, 2=Male
331
- gender_number = np.array([female_p, male_p])*nindiv
332
-
333
- d_limit = nindiv # Size of array to match after rounding off
334
- d_value = gender_value
335
- d_number = gender_number
336
-
337
- insert_vector = dist2vector(d_value, d_number,d_limit,'shuffle')
338
- individual_df.loc[range(nindiv),'gender'] = insert_vector
339
- individual_df['gender'] = individual_df['gender'].astype('int')
340
-
341
- #%% Step 7: Identify and assign age for each individual
342
- #Assumption 3: Age profile is same for different income types
343
- #Convert the age profile wrt gender distribution table 4 to numpy array
344
- ageprofile_value = np.array([1,2,3,4,5,6,7,8,9,10], dtype=int)
345
- t4_l1_f = np.array(tables['t4'][0][0], dtype=float) #For female
346
- t4_l2_m = np.array(tables['t4'][0][1], dtype=float) #For male
347
- t4 = np.array([t4_l1_f, t4_l2_m])
348
-
349
- for i in range(len(gender_value)):
350
- gidx = individual_df['gender'] == gender_value[i]
351
- d_limit = sum(gidx)
352
- d_value = ageprofile_value
353
- d_number = t4[i]*sum(gidx)
354
- insert_vector = dist2vector(d_value, d_number,d_limit,'shuffle')
355
- individual_df.loc[gidx,'age'] = insert_vector
356
-
357
- individual_df['age'] = individual_df['age'].astype(int)
358
-
359
-
360
- #%% Step 8: Identify and assign education attainment status for each individual
361
-
362
- # Assumption 4: Education Attainment status is same for different income types
363
- # Education Attainment Status (Meta Data)
364
- # 1 - Only literate
365
- # 2 - Primary school
366
- # 3 - Elementary sch.
367
- # 4 - High school
368
- # 5 - University and above
369
- #Convert the educational status distribution table 5 to numpy array
370
- education_value = np.array([1,2,3,4,5], dtype=int)
371
- t5_l1_f = np.array(tables['t5'][0][0], dtype=float) #For female
372
- t5_l2_m = np.array(tables['t5'][0][1], dtype=float) #For male
373
- t5 = np.array([t5_l1_f, t5_l2_m])
374
-
375
- for i in range(len(gender_value)):
376
- gidx = individual_df['gender'] == gender_value[i]
377
- d_limit = sum(gidx)
378
- d_value = education_value
379
- d_number = t5[i]*sum(gidx)
380
- insert_vector = dist2vector(d_value, d_number,d_limit,'shuffle')
381
- individual_df.loc[gidx,'eduAttStat'] = insert_vector
382
-
383
- individual_df['eduAttStat'] = individual_df['eduAttStat'].astype(int)
384
-
385
- #%% Step 9: Identify and assign the head of household to corresponding hhID
386
-
387
- # Assumption 5: Head of household is dependent on gender
388
- # Assumption 6: Only (age>20) can be head of households
389
- #Convert the head of houseold distribution table 6 to numpy array
390
- tables['t6'][0] = np.array(tables['t6'][0][0],dtype=float)
391
- female_hh = tables['t6'][0][0]
392
- male_hh = 1-female_hh
393
-
394
- # Calculate the number of household heads by gender
395
- hh_number= np.array([female_hh, male_hh])*sum(nHouse)
396
- hh_number= hh_number.astype(int)
397
- hh_number[0] = sum(nHouse) - hh_number[1]
398
-
399
- for i in range(len(gender_value)): #Assign female and male candidates
400
- gaidx= (individual_df['gender'] == gender_value[i]) & \
401
- (individual_df['age']>4) # '>4' denotes above age group '18-20'
402
- #Index of household head candidates in individual_df
403
- hh_candidate_idx = list(individual_df.loc[gaidx,'gender'].index)
404
- # Take a random permutation sample to obtain household head indices from
405
- # the index of possible household candidates in individual_df
406
- ga_hh_idx = random.sample(hh_candidate_idx, hh_number[i])
407
- #print('gaidx=',sum(gaidx), 'ga_hh_idx', len(ga_hh_idx))
408
-
409
- individual_df.loc[ga_hh_idx,'head'] = 1
410
-
411
-
412
-
413
- # 1= household head, 2= household members other than the head
414
- individual_df.loc[individual_df['head'] != 1,'head'] =0
415
-
416
- #Assign household ID (hhID) randomly
417
- hhid_temp = household_df['hhID'].tolist()
418
- random.shuffle(hhid_temp)
419
- individual_df.loc[individual_df['head'] == 1,'hhID'] = hhid_temp
420
-
421
- #%% Step 10: Identify and assign the household that each individual belongs to
422
- # In relation with Assumption 6, no individuals under 20 years of age can live
423
- # alone in an household
424
- individual_df_temp = individual_df[individual_df['head']==0]
425
- individual_df_temp_idx = list(individual_df_temp.index)
426
- #hhidlist = household_df['hhID'].tolist()
427
- for i in range(1,len(t1_l1)): #Loop through household numbers >1
428
- hh_nind = t1_l1[i] # Number of individuals in households
429
- # Find hhID corresponding to household numbers
430
- hh_df_idx = household_df['nIND']== hh_nind
431
- hhidx = household_df.loc[hh_df_idx,'hhID'].tolist()
432
- #Random shuffle hhidx here
433
- amph = hh_nind -1 # additional member per household
434
- for j in range(amph):
435
- # Randomly select len(hhidx) number of indices from individual_df_temp_idx
436
- idtidx = random.sample(individual_df_temp_idx, len(hhidx))
437
- individual_df.loc[idtidx,'hhID'] = hhidx
438
- #Remove idtidx before next iteration
439
- individual_df_temp = individual_df_temp.drop(index=idtidx)
440
- individual_df_temp_idx = list(individual_df_temp.index)
441
-
442
- individual_df['hhID'] = individual_df['hhID'].astype(int)
443
-
444
- #%% Step 10a: Identify school enrollment for each individual
445
- # Final output 0 = not enrolled in school, 1 = enrolled in school
446
- # Assumption 16: Schooling age limits- AP2 and AP3 ( 5 to 18 years old)
447
- # can go to school
448
- # Convert distribution table 5a to numpy array
449
- # Table 5a contains school enrollment probability
450
- for row in range((len(tables['t5a'][0]))):
451
- tables['t5a'][0][row]=np.array(tables['t5a'][0][row],dtype=float)
452
- t5a = np.array(tables['t5a'][0]) # Table 5a
453
- # Find individuals with age between 5-18 (these are students)
454
- # Also find individual Id of students and household Id of students
455
- agemask = (individual_df['age'] == 2) | (individual_df['age']==3)
456
- school_df = pd.DataFrame(np.nan, index = range(sum(agemask)),
457
- columns=['indivID','hhID','eduAttStatH','income','enrollment'])
458
- school_df_idx = individual_df.loc[agemask,'indivID'].index
459
- school_df.set_index(school_df_idx, inplace=True)
460
- school_df['indivID'] = individual_df.loc[agemask,'indivID']
461
- school_df['hhID'] = individual_df.loc[agemask,'hhID']
462
- # Then, pick a slice of individual_df corresponding to the household a student
463
- # belongs to. From there, Pick eduAtt status of head of household. To expedite
464
- # computation, dataframe columns have been converted to list
465
- school_df_hhid_list = list(school_df['hhID'])
466
- temp_df = individual_df[individual_df['hhID'].isin(school_df_hhid_list)]
467
- head4school_df = temp_df[temp_df['head'] == 1]
468
- head4school_df_hhID_list = list(head4school_df['hhID'])
469
- head4school_df_edus_list = list(head4school_df['eduAttStat'])
470
- school_df_edu_list = np.ones(len(school_df_hhid_list))*np.nan
471
-
472
- # Label 'lowIncomeA' and 'lowIncomeB' = 1, 'midIncome' =2, 'highIncome' =3
473
- household_df_hhid_list = list(household_df['hhID'])
474
- #Use .copy() to avoid SettingwithCopyWarning
475
- income4school_df=household_df[household_df['hhID'].\
476
- isin(school_df_hhid_list)].copy()
477
- li_mask = (income4school_df['income'] == avg_income_types[0]) |\
478
- (income4school_df['income'] == avg_income_types[1])
479
- lm_mask = income4school_df['income'] == avg_income_types[2]
480
- lh_mask = income4school_df['income'] == avg_income_types[3]
481
- income4school_df.loc[li_mask,'income'] = 1
482
- income4school_df.loc[lm_mask,'income'] = 2
483
- income4school_df.loc[lh_mask,'income'] = 3
484
- income4school_df_income_list = list(income4school_df['income'])
485
- income4school_df_hhID_list = list(income4school_df['hhID'])
486
- school_df_income_list = np.ones(len(school_df_hhid_list))*np.nan
487
-
488
- count=0
489
- # NOTE: If the operation inside this for loop can be replaced with indexing
490
- # operation the computation time for this code can be further reduced.
491
- for hhid in school_df_hhid_list:
492
- #assign education attained by head of household to school_df
493
- hhid_temp = [i for i, value in enumerate(head4school_df_hhID_list)\
494
- if value == hhid ]
495
- school_df_edu_list[count] = head4school_df_edus_list[hhid_temp[0]]
496
- #assign income type of household to school_df
497
- hhid_temp2 = [i for i, value in enumerate(income4school_df_hhID_list)\
498
- if value == hhid ]
499
- school_df_income_list[count] = income4school_df_income_list[hhid_temp[0]]
500
- count+=1
501
-
502
- school_df.loc[school_df.index, 'eduAttStatH'] = school_df_edu_list
503
- school_df['eduAttStatH'] = school_df['eduAttStatH'].astype(int)
504
- school_df['income'] = school_df_income_list
505
- school_df['income'] = school_df['income'].astype(int)
506
-
507
- #assign school enrollment (1 = enrolled, 0 = not enrolled)
508
- for incomeclass in range(1,4): # Income class 1,2,3
509
- for head_eduAttStat in range(1,6): # Education attainment category 1 to 5
510
- enrmask = (school_df['income'] == incomeclass) &\
511
- (school_df['eduAttStatH'] == head_eduAttStat)
512
- no_of_pstudents = sum(enrmask) # Number of potential students
513
- if no_of_pstudents ==0: #continue if no students exist for given case
514
- continue
515
- i,j = incomeclass-1, head_eduAttStat-1 # indices to access table 5a
516
- d_limit = no_of_pstudents # Size of array to match after rounding off
517
- d_value = [1,0] #1= enrolled, 0 = not enrolled
518
- d_number = np.array([t5a[i,j], 1-t5a[i,j]])*no_of_pstudents
519
- insert_vector = dist2vector(d_value, d_number,d_limit,'shuffle')
520
- school_df.loc[enrmask,'enrollment'] = insert_vector
521
-
522
- school_df['enrollment']= school_df['enrollment'].astype(int)
523
- # Substitute the enrollment status back to individual_df dataframe
524
- individual_df.loc[school_df.index,'schoolEnrollment']= school_df['enrollment']
525
-
526
-
527
- #%% Step 11: Identify approximate total residential building area needed
528
- # (approxDwellingAreaNeeded_sqm)
529
- # Assumption 7a: Average dwelling area (sqm) wrt income type (44 for LI,
530
- # 54 for MI, 67 for HI in Tomorrovwille)
531
- # The output is stored in the column 'totalbldarea_res' in landuse_res_df,
532
- # which represents the total buildable area
533
-
534
- #Sub dataframe of landuse type containing only residential areas
535
- landuse_res_df = landuse.loc[nHouse.index].copy()
536
- landuse_res_df.loc[nHouse.index,'nHousehold'] = nHouse
537
- hh_temp_df = household_df.copy()
538
-
539
- for i in range(0,len(avg_income_types)):
540
- hh_temp_df['income'] = hh_temp_df['income'].replace(avg_income_types[i],\
541
- average_dwelling_area[i])
542
- for index in landuse_res_df.index: # Loop through each residential zone
543
- zoneID = landuse_res_df['zoneID'][index]
544
- sum_part = hh_temp_df.loc[hh_temp_df['zoneID']==zoneID,'income'].sum()
545
- landuse_res_df.loc[index, 'approxDwellingAreaNeeded_sqm'] = sum_part
546
-
547
- # Zones where no households live i.e. potential commercial or industrial zones
548
- noHH = nHouse_all[nHouse_all<=0].index
549
- landuse_ic_df = landuse.loc[noHH].copy()
550
- landuse_ic_df['area'] = landuse_ic_df['area']*10000 # Convert hectare to sq m
551
-
552
- #%% Note on Land use types (LUT), load resisting system (LRS) and storey height
553
- # Land Use Type
554
- # 1 - 'AGRICULTURE'
555
- # 2 - 'CITY CENTER'
556
- # 3 - 'COMMERCIAL AND RESIDENTIAL'
557
- # 4 - 'HISTORICAL PRESERVATION AREA'
558
- # 5 - 'INDUSTRY'
559
- # 6 - 'NEW DEVELOPMENT'
560
- # 7 - 'NEW PLANNING'
561
- # 8 - 'RECREATION AREA'
562
- # 9 - 'RESIDENTIAL (GATED NEIGHBORHOOD)'
563
- # 10- 'RESIDENTIAL (HIGH DENSITY)'
564
- # 11- 'RESIDENTIAL (LOW DENSITY)'
565
- # 12- 'RESIDENTIAL (MODERATE DENSITY)'
566
-
567
- # Storey definition:
568
- # 1 - Low rise (LR) 1-4
569
- # 2 - Mid rise (MR) 5-8
570
- # 3 - High rise (HR) 9-19
571
-
572
- # LRS Types
573
- # 1 - BrCfl: brick and cement with flexible floor;
574
- # 2 - BrCri: brick and cement with rigid floor;
575
- # 3 - BrM: brick and mud
576
- # 4 - Adb: Adobe
577
- # 5 - RCi : Reinforced Concrete infill
578
-
579
- # Code Compliance Levels (Low, Medium, High): 1 - LC, 2 - MC, 3 - HC
580
-
581
- # Occupancy types: Residential (Res), Industrial (Ind), Commercial (Com)
582
- # Residential and commercial mixed (ResCom)
583
-
584
- #%% Steps 12,13,14,15:
585
- # Identify number of residential buildings and generate building layer
586
- # Asumption 7: Range of footprint area (sqm) wrt. Income type (32-66 for LI,
587
- # 32-78 for MI and 70-132 for HI in Tomorrowville)
588
-
589
- # Table 7 contains Number of storeys distribution for various LRS and LUT
590
- # Table 11 contains code compliance distribution for various LRS and LUT
591
- t7= tables['t7'][0]
592
- t11 = tables['t11'][0]
593
-
594
- # Convert Table 8 to numpy array
595
- # Table8 contains LRS distribution with respect to various LUT
596
- for row in range((len(tables['t8'][0]))):
597
- tables['t8'][0][row]=np.array(tables['t8'][0][row],dtype=float)
598
- t8 = np.array(tables['t8'][0]) # Table 8
599
-
600
- # Determine the number of buildings in each zone based on average income class
601
- # building footprint range for each landuse zone and Tables 7 and 8
602
- no_of_resbldg = 0 # Total residential buildings in all zones
603
- footprint_base_sum = 0 # footprint at base, not multiplied by storeys
604
- footprint_base_L,storey_L,lrs_L,zoneid_L,codelevel_L = [],[],[],[],[]
605
-
606
- for i in landuse_res_df.index: #Loop through zones
607
- zoneid = landuse_res_df['zoneID'][i]
608
- #totalbldarea_res = landuse_res_df['totalbldarea_res'][i]
609
- #totalbldarea_res is the total residential area that needs to be built
610
- totalbldarea_res = landuse_res_df.loc[i,'approxDwellingAreaNeeded_sqm']
611
- avgincome = landuse_res_df['avgIncome'][i]
612
- lut_zone = landuse_res_df['LuF'][i]
613
- fpt_range = fpt_area[avgincome]
614
- # Generate a vector of footprints such that sum of all the footprints in
615
- # lenmax equals maximum possible length of vector of building footprints
616
- lenmax = int(totalbldarea_res/np.min(fpt_range))
617
- footprints_temp = np.random.uniform(np.min(fpt_range),\
618
- np.max(fpt_range), size=(lenmax,1))
619
- footprints_temp = footprints_temp.reshape(len(footprints_temp),)
620
- # Select LRS using multinomial distribution and Table 8
621
- lrs_number=multinomial(len(footprints_temp), t8[lutidx[lut_zone]],size=1)
622
- lrs_vector=np.array(dist2vector(lrs_types,lrs_number,\
623
- np.sum(lrs_number),'shuffle'))
624
-
625
- # Select storeys in a zone for various LRS using multinomial distribution
626
- #storey_vector = np.array([],dtype=int)
627
- storey_vector = np.array(np.zeros(len(lrs_vector),dtype=int)) #must be assigned after loop
628
- for lrs in lrs_types: # Loop through LRS types in a zone
629
- t7row = t7[lutidx[lut_zone]] #Extract row for LUT
630
- #Extract storey distribution in row for LRS
631
- t7dist = np.fromstring(t7row[lrsidx[lrs]],dtype=float, sep=',')
632
- lrs_pos = lrs_vector==lrs
633
- storey_number = multinomial(sum(lrs_pos),t7dist,size=1)
634
- storey_vector_part = np.array([],dtype=int)
635
- for idx,st_range in storey_range.items(): #Loop through storey classes
636
- sv_temp = \
637
- randint(st_range[0],st_range[1]+1,storey_number[0][idx])
638
- storey_vector_part = \
639
- np.concatenate((storey_vector_part,sv_temp),axis =0)
640
- # Need to shuffle storey_vector before multiplying and deleting
641
- #extra values, otherwise 100% of storeys will be low rise, resulting in
642
- #larger number of buildings
643
- np.random.shuffle(storey_vector_part)
644
- storey_vector[lrs_pos] =storey_vector_part
645
- # Select code compliance level for various LRS using multinomial dist
646
- cc_vector = [] # code compliance vector for a zone
647
- for lrs in lrs_types: # for each LRS in a zone
648
- t11row = t11[lutidx[lut_zone]]
649
- t11dist = np.fromstring(t11row[lrsidx[lrs]],dtype=float, sep=',')
650
- lrs_pos = lrs_vector==lrs
651
- cc_number = multinomial(sum(lrs_pos),t11dist,size=1)
652
- cc_part = dist2vector(code_level, cc_number,sum(lrs_pos),'shuffle')
653
- cc_vector += cc_part
654
- random.shuffle(cc_vector)
655
-
656
- #If it is necessary to equalize number of storeys = number of households
657
- storey_vector_cs = np.cumsum(storey_vector)
658
- stmask = storey_vector_cs <= landuse_res_df.loc[i,'nHousehold']
659
- stlimit_idx = np.max(np.where(stmask))+1
660
- stlimit_idx_range = range(stlimit_idx+1,len(footprints_temp))
661
-
662
- #If it is necessary to equalize required footprint = provided footprint
663
- footprints_base = footprints_temp #Footprints without storey
664
- dwellingArea_temp= footprints_temp*storey_vector
665
- dwellingArea_temp_cs = np.cumsum(dwellingArea_temp)
666
- #OPTIONAL:Here, introduce a method to match total buildable area (dwelling)
667
- fpmask = dwellingArea_temp_cs <= totalbldarea_res
668
- #Indices of footprints whose sum <= dwelling area needed in a zone
669
- # '+ 1' provides slightly more dwelling area than needed
670
- footprints_idx = np.max(np.where(fpmask)) + 1
671
-
672
- # Delete additional entries in the vectors for footprint, lrs and storeys
673
- # which do not fit into total buildable area
674
- #ftrange = range(footprints_idx+1,len(dwellingArea_temp))
675
- ftrange = stlimit_idx_range
676
-
677
- dwellingArea = np.delete(dwellingArea_temp,ftrange)
678
- footprints_base = np.delete(footprints_base,ftrange)
679
- lrs_vector_final = np.delete(lrs_vector,ftrange)
680
- storey_vector_final = np.delete(storey_vector,ftrange)
681
- cc_vector = np.array(cc_vector)
682
- cc_vector_final = np.delete(cc_vector,ftrange)
683
- no_of_resbldg += len(dwellingArea)
684
-
685
- #footprint_base_sum+=np.sum(footprints_base)
686
- # Store the vectors in lists for substitution in dataframe
687
- footprint_base_L += list(footprints_base)
688
- storey_L += list(storey_vector_final)
689
- lrs_L += list(lrs_vector_final)
690
- zoneid_L += [zoneid]*len(dwellingArea)
691
- codelevel_L += list(cc_vector_final)
692
-
693
- landuse_res_df.loc[i,'footprint_sqm'] = np.sum(footprints_base)
694
- landuse_res_df.loc[i,'dwellingAreaProvided_sqm'] = np.sum(dwellingArea)
695
-
696
- landuse_res_df.loc[i, 'Storey_units'] = sum(storey_vector_final)
697
- #'No_of_res_buildings' denotes total residential + ResCom buildings
698
- landuse_res_df.loc[i, 'No_of_res_buildings'] = len(footprints_base)
699
- # Check distribution after deletion (for debugging) by counting LR
700
- #print(sum(storey_vector_final<5)/len(storey_vector_final))
701
-
702
- # landuse_res_df['area'] denotes the total buildable area
703
- landuse_res_df['area'] *= 10000 # Convert hectares to sq m, 1ha =10^4 sqm
704
-
705
- # landuse_res_df['builtArea_percent'] denotes the percentage of total
706
- # buildable area that needs to be built to accomodate the projected population
707
- landuse_res_df['builtArea_percent'] =\
708
- landuse_res_df['footprint_sqm']/landuse_res_df['area']*100
709
-
710
- #ADD HERE : EXCEPTION HANDLING for built area exceeding available area
711
-
712
- #print(no_of_resbldg)
713
-
714
- #ADD: Check if calculated footprint exceeds total buildable area (landuse.area)
715
-
716
- #Create and populate the building layer, with unassigned values as NaN
717
- resbld_df = pd.DataFrame(np.nan, index = range(0, no_of_resbldg),
718
- columns=['zoneID', 'bldID', 'specialFac', 'repValue',
719
- 'nHouse', 'residents', 'expStr','fptarea',
720
- 'OccBld','lrstype','CodeLevel',
721
- 'nstoreys'])
722
- resbld_range = range(0,no_of_resbldg)
723
- #resbld_df.loc[resbld_range,'bldID'] = list(range(1,no_of_resbldg+1))
724
- resbld_df.loc[resbld_range,'zoneID'] = zoneid_L
725
- resbld_df['zoneID'] = resbld_df['zoneID'].astype('int')
726
- resbld_df.loc[resbld_range,'OccBld'] = 'Res'
727
- resbld_df.loc[resbld_range,'specialFac'] = 0
728
- resbld_df.loc[resbld_range,'fptarea'] = footprint_base_L
729
- resbld_df.loc[resbld_range,'nstoreys'] = storey_L
730
- resbld_df.loc[resbld_range,'lrstype'] = lrs_L
731
- resbld_df.loc[resbld_range,'CodeLevel'] = codelevel_L
732
-
733
-
734
- #%% Assign zoneIDs and building IDs for Res and ResCom
735
- # Assign 'ResCom' status based on Table 9
736
- # Assumption: Total residential buildings = Res + ResCom
737
- # Convert Table 9 to numpy array
738
- # Table 9 contains occupancy type with respect to various LUT
739
- # Occupancy types: Residential (Res), Industrial (Ind), Commercial (Com)
740
- # Residential and commercial mixed (ResCom)
741
- for row in range((len(tables['t9'][0]))):
742
- tables['t9'][0][row]=np.array(tables['t9'][0][row],dtype=float)
743
- t9 = np.array(tables['t9'][0]) # Table 9
744
-
745
- #available_LUT = list(set(landuse_res_df['LuF']))
746
- available_zoneID = list(set(resbld_df['zoneID']))
747
- for zoneID in available_zoneID: #Loop through zones
748
- zonemask = resbld_df['zoneID'] == zoneID
749
- zone_idx = list(zonemask.index.values[zonemask])
750
- lutlrdidx=landuse_res_df[landuse_res_df['zoneID']==zoneID].index.values[0]
751
- #Occupancy type distribution for a zone
752
- occtypedist = t9[lutidx[ landuse_res_df['LuF'][lutlrdidx]]]
753
- no_of_resbld = sum(zonemask) # Number of residential buildings in a zone
754
- if occtypedist[3] !=0: # if mixed residential+commercial buildings exist
755
- # nrc = number of mixed res+com buildings in a zone
756
- nrc = int(occtypedist[3]/occtypedist[0]*no_of_resbld)
757
- else: # if only residential buildings exist
758
- continue
759
- nrc_idx = sample(zone_idx,nrc)
760
- resbld_df.loc[nrc_idx,'OccBld'] = 'ResCom'
761
-
762
- #Assign building Ids for res and rescom buildings
763
- lenresbld = len(resbld_df)
764
- resbld_df.loc[range(0,lenresbld),'bldID'] = list(range(1,lenresbld+1))
765
- resbld_df['bldID'] = resbld_df['bldID'].astype('int')
766
-
767
-
768
- #%% STEP16: Identify and assign number of households and residents for each
769
- #residential building
770
- #Assign nHouse, residents. All the households and residents must be assigned
771
- #to this layer.
772
-
773
- dwellings_str=dist2vector(resbld_df['bldID'],np.array(storey_L),\
774
- np.sum(np.array(storey_L)),'DoNotShuffle')
775
- dwellings = list(map(int,dwellings_str))
776
- #dwellings.sort()
777
- dwellings_selected = dwellings[0:len(household_df)]
778
- random.shuffle(dwellings_selected)
779
- #Assign building IDs to all households
780
- household_df.loc[:,'bldID'] = dwellings_selected
781
-
782
- # The number of residential buildings are slightly more than that needed by
783
- # the total population. After the IDs are sorted, some of the buildings towards
784
- # the end of the list will receive no population, and will be deleted from the
785
- # building dataframe.
786
- # QUESTION: What will happen if the household_df zoneType and ZoneIDs are
787
- # modified to inherit the zoneType and zoneIDs of the building they are
788
- # assigned to at this step? ANSWER: It could conflict with Table 2, but it
789
- # eliminates the inconsistency between building income zone and income level
790
- # of its inhabitants.
791
-
792
- # Assign number of households and residents to residential buildings resbld_df
793
- # This loop must be optimized for speed
794
- count =0
795
- for bldid in resbld_df['bldID']:
796
- bldidmask = household_df['bldID'] == bldid
797
- resbld_df.loc[count,'nHouse'] = sum(bldidmask)
798
- resbld_df.loc[count,'residents'] =sum(household_df['nIND'][bldidmask])
799
- count+=1
800
-
801
- # Remove rows in resbld_df which contains no residents
802
- to_del = resbld_df['nHouse'] ==0
803
- resbld_df = resbld_df.drop(index=resbld_df.index[to_del])
804
-
805
-
806
- #%% Step 17,18: Identify and generate commercial and industrial buildings
807
- # No household or individual lives in com, ind, hosp, sch zones
808
- # Assumption 10 and 11: Assume a certain number of commercial and industrial
809
- # buildings per 1000 individuals
810
-
811
- # No commercial and industrial buildings in:recreational areas,agriculture,
812
- # residential (gated neighbourhood), residential (low-density)
813
- # But com an ind build can occur in any zone where permitted by table 9
814
- ncom = round(nindiv/1000*numb_com)
815
- nind = round(nindiv/1000*numb_ind)
816
- nci = np.array([ncom,nind])
817
- occbld_label = ['Com','Ind']
818
- nci_cs = np.cumsum(nci)
819
- indcom_df = pd.DataFrame(np.nan, index = range(0, ncom+nind),
820
- columns=['zoneID', 'bldID', 'specialFac', 'repValue',
821
- 'nHouse', 'residents', 'expStr','fptarea',
822
- 'lut_number','OccBld','lrstype','CodeLevel',
823
- 'nstoreys'])
824
-
825
- t10= tables['t10'][0] # Extract Table 10
826
- a = 0
827
- for i in range(0,len(nci)): # First commercial, then industrial
828
- attr = t10[i]
829
- #Extract distributions for footprint, storeys, code compliance and LRS
830
- fpt_ic = np.fromstring(attr[0], dtype=float, sep=',')
831
- nstorey_ic = np.fromstring(attr[1], dtype=int, sep=',')
832
- codelevel_ic = np.fromstring(attr[2], dtype=float, sep=',')
833
- lrs_ic = np.fromstring(attr[3], dtype=float, sep=',')
834
- range_ic = range(a,nci_cs[i])
835
- a = nci_cs[i]
836
- # Generate footprints
837
- indcom_df.loc[range_ic,'fptarea'] = np.random.uniform(\
838
- np.min(fpt_ic),np.max(fpt_ic), size=(nci[i],1)).reshape(nci[i],)
839
- # Generate number of storeys
840
- indcom_df.loc[range_ic,'nstoreys'] =randint(np.min(nstorey_ic),\
841
- np.max(nstorey_ic)+1,size=(nci[i],1)).reshape(nci[i],)
842
- # Generate code compliance
843
- cc_number_ic = multinomial(nci[i],codelevel_ic,size=1)
844
- indcom_df.loc[range_ic,'CodeLevel'] =\
845
- dist2vector(code_level, cc_number_ic,nci[i],'shuffle')
846
- # Generate LRS
847
- lrs_number_ic = multinomial(nci[i],lrs_ic,size=1)
848
- indcom_df.loc[range_ic,'lrstype'] =\
849
- dist2vector(lrs_types,lrs_number_ic,nci[i],'shuffle')
850
- indcom_df.loc[range_ic,'OccBld']= occbld_label[i]
851
-
852
- # Assign number of households, Residents, special facility label
853
- range_all_ic = range(0,len(indcom_df))
854
- indcom_df.loc[range_all_ic,'nHouse'] = 0
855
- indcom_df.loc[range_all_ic,'residents'] = 0
856
- indcom_df.loc[range_all_ic,'specialFac'] = 0
857
-
858
- ind_df = indcom_df[indcom_df['OccBld'] == 'Ind'].copy()
859
- com_df = indcom_df[indcom_df['OccBld'] == 'Com'].copy()
860
- ind_df.reset_index(drop=True,inplace=True)
861
- com_df.reset_index(drop=True,inplace=True)
862
-
863
-
864
- #%% Step 19,20 Generate school and hospitals along with their attributes
865
-
866
- # Assumption 14 and 15: For example : 1 school per 10000 individuals,
867
- # 1 hospital per 25000 individuals
868
- nsch = round(nindiv/nsch_pi) # Number of schools
869
- nhsp = round(nindiv/nhsp_pi) # Number of hospitals
870
-
871
- if nsch == 0:
872
- print("WARNING: Total population",nindiv,"is less than the user-specified "\
873
- "number of individuals per school",nsch_pi,". So, total school for "\
874
- "this population = 1 (by default) ")
875
- nsch = 1
876
-
877
- if nhsp == 0:
878
- print("WARNING: Total population",nindiv,"is less than the user-specified "\
879
- "number of individuals per hospital",nhsp_pi,". So, total hospital for "\
880
- "this population = 1 (by default) ")
881
- nhsp = 1
882
-
883
- nsh = np.array([nsch,nhsp])
884
- nsh_cs = np.cumsum(nsh)
885
- occbld_label_sh = ['Edu','Hea']
886
- specialFac = [1,2] # Special facility label
887
- schhsp_df = pd.DataFrame(np.nan, index = range(0, nsch+nhsp),
888
- columns=['zoneID', 'bldID', 'specialFac', 'repValue',
889
- 'nHouse', 'residents', 'expStr','fptarea',
890
- 'lut_number','OccBld','lrstype','CodeLevel',
891
- 'nstoreys'])
892
- t14= tables['t14'][0] # Extract Table 14
893
- a=0
894
- for i in range(0,len(t14)): # First school, then hospital
895
- attr_sh = t14[i]
896
- #Extract distributions for footprint, storeys, code compliance and LRS
897
- fpt_sh = np.fromstring(attr_sh[0], dtype=float, sep=',')
898
- nstorey_sh = np.fromstring(attr_sh[1], dtype=int, sep=',')
899
- codelevel_sh = np.fromstring(attr_sh[2], dtype=float, sep=',')
900
- lrs_sh = np.fromstring(attr_sh[3], dtype=float, sep=',')
901
- range_sh = range(a,nsh_cs[i])
902
- a = nsh_cs[i]
903
- # Generate footprints
904
- schhsp_df.loc[range_sh,'fptarea'] = np.random.uniform(\
905
- np.min(fpt_sh),np.max(fpt_sh), size=(nsh[i],1)).reshape(nsh[i],)
906
- # Generate number of storeys
907
- schhsp_df.loc[range_sh,'nstoreys'] =randint(np.min(nstorey_sh),\
908
- np.max(nstorey_sh)+1,size=(nsh[i],1)).reshape(nsh[i],)
909
- # Generate code compliance
910
- cc_number_sh = multinomial(nsh[i],codelevel_sh,size=1)
911
- schhsp_df.loc[range_sh,'CodeLevel'] =\
912
- dist2vector(code_level, cc_number_sh,nsh[i],'shuffle')
913
- # Generate LRS
914
- lrs_number_sh = multinomial(nsh[i],lrs_sh,size=1)
915
- schhsp_df.loc[range_sh,'lrstype'] =\
916
- dist2vector(lrs_types,lrs_number_sh,nsh[i],'shuffle')
917
- schhsp_df.loc[range_sh,'OccBld']= occbld_label_sh[i]
918
-
919
- # Assign special facility label
920
- schhsp_df.loc[range_sh,'specialFac'] = specialFac[i]
921
-
922
- # Assign number of households, Residents,
923
- range_all_sh = range(0,len(schhsp_df))
924
- schhsp_df.loc[range_all_sh,'nHouse'] = 0
925
- schhsp_df.loc[range_all_sh,'residents'] = 0
926
-
927
-
928
- #%% Assign zoneIds for Industrial and Commercial buildings
929
-
930
- # The number of industrial and commercial buildings are estimated using the
931
- # following 2 methods:
932
- # Method 1: Assumption of number of industrial or commercial building per
933
- # 1000 individuals. (Done in steps 17,18)
934
- # Method 2: Table 9 specifies what the occupancy type distribution should be
935
- # in different land use types. This gives a different estimate of the
936
- # number of the buiildings as compared to Method 1. (Done here)
937
- # To make these two Methods compatible, the value from Method 1 is treated as
938
- # the actual value of the buildings, and Method 2 is used to ensure that
939
- # these buildings are distributed in such a way that they follow Table 9.
940
- #
941
- # The following method of assigning the ZoneIDs treats the mixed used zones
942
- # (residential, residential+commercial) and purely industrial or commercial
943
- # zones as 2 separate cases.
944
- #
945
- # For each of the following 2 cases, we need to first find the number of
946
- # industrial and commercial buildings in each zone
947
-
948
- # Case 1: For industrial/commercial buildings in residential areas_____________
949
- for i in landuse_res_df.index:
950
- #Occupancy type distribution for a zone
951
- otd = t9[lutidx[landuse_res_df.loc[i,'LuF']]]
952
- if otd[1]==0 and otd[2]==0:
953
- # If neither industrial nor commercial buildings exist
954
- landuse_res_df.loc[i,'ind_weightage'] = 0
955
- landuse_res_df.loc[i,'com_weightage'] = 0
956
- continue
957
- # Number of residential + rescom building
958
- Nrc = landuse_res_df.loc[i, 'No_of_res_buildings']
959
-
960
- # Tb = total possible number of buildings in a zone (all accupancy types)
961
- # This is used as weightage factor to distribute the buildings
962
- # according to Method 2.
963
- if otd[0] == 0 and otd[3]==0:
964
- Tb = Nrc # If neither residential nor res+com exist
965
- print('If population exists, but neither residential nor '\
966
- 'residential+commercial buildings are allowed, there is '\
967
- 'inconsistency between population and current row in table 9.'\
968
- 'Therefore, it is assumed that total number of buildings in '\
969
- 'zoneID', landuse_res_df.loc[i,'zoneID'],\
970
- '= no. of residential buildings in this zone.')
971
- print('Also, consider allowing residential and/or res+com building '\
972
- 'to this zone in Table 9, if it is assigned population.')
973
- else:
974
- Tb = Nrc/(otd[0]+otd[3]) # If either residential or res+com exist
975
-
976
- #Calculate the number of industrial buildings using Table 9
977
- if otd[1]>0:
978
- landuse_res_df.loc[i,'ind_weightage'] = ceil(Tb * otd[1])
979
- #landuse_res_df.loc[i,'No_of_ind_buildings'] = ceil(Tb * otd[1])
980
- else:
981
- # landuse_res_df.loc[i,'No_of_ind_buildings'] = 0
982
- landuse_res_df.loc[i,'ind_weightage'] = 0
983
-
984
- #Calculate the number of commercial buildings using Table 9
985
- if otd[2]>0:
986
- landuse_res_df.loc[i,'com_weightage'] = ceil(Tb * otd[2])
987
- #landuse_res_df.loc[i,'No_of_com_buildings'] = ceil(Tb * otd[2])
988
- else:
989
- landuse_res_df.loc[i,'com_weightage'] = 0
990
- #landuse_res_df.loc[i,'No_of_com_buildings'] = 0
991
-
992
-
993
- # If number of buildings (industrial/commercial) estimated from Method 2(in the
994
- # above steps of Case 1) exceeds the number of buildings estimated from
995
- # Method 1, treat the value from Method 1 as the upper limit.
996
- # Then, using the number of buildings from Method 2 as weightage factor,
997
- # distribute the number of buildings from Method 1 proportionally to
998
- # all the mixed use zones. This situation arises if the number of
999
- # industrial/commercial buildings per 1000 people is low.
1000
- #
1001
- # Otherwise, if the number of industrial/commercial buildings estimated from
1002
- # Method 1 is larger than that estimated from Method 2, it is assumed that the
1003
- # number of buildings is large enough not to fit into the mixed use zones
1004
- # being considered under Case 1, and the additional buildings not assigned into
1005
- # mixed use zones is assigned under case 2 in the following section.
1006
- #
1007
- # This method requires the area of industrial/commercial buildings in the
1008
- # mixed use zones to be checked separately to see if they fit into these zones.
1009
-
1010
- com_wt = landuse_res_df['com_weightage'].copy()
1011
- if com_wt.sum() > ncom:
1012
- landuse_res_df['No_of_com_buildings'] = np.floor(ncom*com_wt/com_wt.sum())
1013
- else:
1014
- landuse_res_df['No_of_com_buildings'] = com_wt
1015
-
1016
- ind_wt = landuse_res_df['ind_weightage'].copy()
1017
- if ind_wt.sum() > nind:
1018
- landuse_res_df['No_of_ind_buildings'] = np.floor(nind*ind_wt/ind_wt.sum())
1019
- else:
1020
- landuse_res_df['No_of_ind_buildings'] = ind_wt
1021
-
1022
-
1023
- landuse_res_df['No_of_ind_buildings'] =\
1024
- landuse_res_df['No_of_ind_buildings'].astype('int')
1025
- landuse_res_df['No_of_com_buildings'] =\
1026
- landuse_res_df['No_of_com_buildings'].astype('int')
1027
-
1028
- # Number and area of commercial buildings to be assigned
1029
- nCom_asgn = landuse_res_df['No_of_com_buildings'].sum()
1030
- nCom_asgn_area = com_df.loc[range(0, nCom_asgn),'fptarea'].sum()
1031
- # Number and area of industrial buildings to be assigned
1032
- nInd_asgn = landuse_res_df['No_of_ind_buildings'].sum()
1033
- nInd_asgn_area = ind_df.loc[range(0,nInd_asgn),'fptarea'].sum()
1034
-
1035
-
1036
- # Assign zoneID to industrial buildings (if any) in residential areas
1037
- zoneID_r_i = dist2vector(list(landuse_res_df['zoneID']),\
1038
- list(landuse_res_df['No_of_ind_buildings']),nInd_asgn,'shuffle')
1039
- ind_df.loc[range(0,nInd_asgn),'zoneID'] = list(map(int,zoneID_r_i))
1040
-
1041
- # Assign zoneID to commercial buildings (if any) in residential areas
1042
- zoneID_r_c = dist2vector(list(landuse_res_df['zoneID']),\
1043
- list(landuse_res_df['No_of_com_buildings']),nCom_asgn,'shuffle')
1044
- com_df.loc[range(0,nCom_asgn),'zoneID'] = list(map(int,zoneID_r_c))
1045
-
1046
-
1047
- # Back-calculated number of commercial buildings per 1000 people
1048
- #nCom_asgn/(len(individual_df)/1000)
1049
-
1050
- # Case 2 For industrial/commercial buildings in non-residential areas__________
1051
-
1052
- # Number of industrial buildings that have not been assigned
1053
- nInd_tba = int(len(ind_df) - nInd_asgn)
1054
- # Number of commercial buildings that have not been assigned
1055
- nCom_tba = int(len(com_df) - nCom_asgn)
1056
-
1057
- # Before assigning zones to buildings, find out the area available for buildings
1058
- # in each zones. Since no population is assigned to residential and commercial
1059
- # buildings, the number of buildings in a zone is controlled solely by area.
1060
- for i in landuse_ic_df.index:
1061
- #Occupancy type distribution for a zone
1062
- otd = t9[lutidx[landuse_ic_df.loc[i,'LuF']]]
1063
- if otd[1]>0:
1064
- landuse_ic_df.loc[i,'AreaAvailableForInd']=\
1065
- AC_ind/100*landuse_ic_df.loc[i,'area']
1066
- else:
1067
- landuse_ic_df.loc[i,'AreaAvailableForInd']=0
1068
-
1069
- if otd[2]>0:
1070
- landuse_ic_df.loc[i,'AreaAvailableForCom']=\
1071
- AC_com/100*landuse_ic_df.loc[i,'area']
1072
- else:
1073
- landuse_ic_df.loc[i,'AreaAvailableForCom']=0
1074
-
1075
- # Check how many of the generated com/ind buildings fit into the available area
1076
- ind_fptarea_cs = list(np.cumsum(ind_df['fptarea']))
1077
- com_fptarea_cs = list(np.cumsum(com_df['fptarea']))
1078
-
1079
- # Total areas available for commercial and industrial buildings in all zones
1080
- At_c= landuse_ic_df['AreaAvailableForCom'].sum()
1081
- At_i = landuse_ic_df['AreaAvailableForInd'].sum()
1082
- licidx = landuse_ic_df.index
1083
-
1084
- unassigned_ind_area = ind_fptarea_cs[-1]-nInd_asgn_area # Total - assigned
1085
- if unassigned_ind_area <= At_i:
1086
- landuse_ic_df.loc[licidx,'No_of_ind_buildings'] =\
1087
- landuse_ic_df['AreaAvailableForInd']/At_i*nInd_tba
1088
- landuse_ic_df['No_of_ind_buildings']=\
1089
- landuse_ic_df['No_of_ind_buildings'].astype('int')
1090
- else:
1091
- print('Required industrial buildings do not fit into available land area.')
1092
- sys.exit(1)
1093
-
1094
- unassigned_com_area = com_fptarea_cs[-1]-nCom_asgn_area
1095
- if unassigned_com_area <= At_c:
1096
- landuse_ic_df.loc[licidx,'No_of_com_buildings'] =\
1097
- landuse_ic_df['AreaAvailableForCom']/At_c*nCom_tba
1098
- landuse_ic_df['No_of_com_buildings']=\
1099
- landuse_ic_df['No_of_com_buildings'].astype('int')
1100
- else:
1101
- print('Required commercial buildings do not fit into available land area.')
1102
- sys.exit(1)
1103
-
1104
- # Begin assigning buildings to zones
1105
- # Assign zoneID to industrial buildings (if any) in industrial areas
1106
- zoneID_ic_i = dist2vector(list(landuse_ic_df['zoneID']),\
1107
- list(landuse_ic_df['No_of_ind_buildings']),nInd_tba,'shuffle')
1108
- ind_df.loc[range(nInd_asgn,nInd_asgn+nInd_tba),'zoneID']=list(map(int,zoneID_ic_i))
1109
-
1110
- # Assign zoneID to commercial buildings (if any) in commercial areas
1111
- zoneID_ic_c = dist2vector(list(landuse_ic_df['zoneID']),\
1112
- list(landuse_ic_df['No_of_com_buildings']),nCom_tba,'shuffle')
1113
- com_df.loc[range(nCom_asgn,nCom_asgn+nCom_tba),'zoneID']=list(map(int,zoneID_ic_c))
1114
-
1115
-
1116
- #%% Find populations in each zones and assign it back to landuse layer
1117
- for i in landuse.index:
1118
- zidmask = resbld_df['zoneID'] == landuse.loc[i,'zoneID']
1119
- if sum(zidmask) == 0: # if no population has been added to the zone
1120
- landuse.loc[i,'populationAdded'] = 0
1121
- continue
1122
- else: # if new population has been added to the zone
1123
- zone_nInd = resbld_df['residents'][zidmask]
1124
- landuse.loc[i,'populationAdded'] = int(zone_nInd.sum())
1125
- # population=Existing population, populationAdded=Projected future population
1126
- # populationFinal = existing + future projected population
1127
- landuse['populationFinal'] = landuse['population']+landuse['populationAdded']
1128
- landuse['populationFinal'] = landuse['populationFinal'].astype('int')
1129
-
1130
-
1131
-
1132
- #%% Assign zoneIds for schools and hospitals
1133
- # Assign schools and hospitals to zones starting from the highest
1134
- # population until the number of schools and hospitals are reached
1135
- landuse_sorted = landuse.sort_values(by=['populationFinal'],\
1136
- ascending=False).copy()
1137
- landuse_sorted.reset_index(inplace=True, drop=True)
1138
- #Remove zones without population
1139
- no_popl_zones = landuse_sorted['populationFinal']==0
1140
- landuse_sorted =landuse_sorted.drop(index=landuse_sorted.index[no_popl_zones])
1141
-
1142
- sch_df = schhsp_df[schhsp_df['OccBld']=='Edu'].copy() #Educational institutions
1143
- hsp_df = schhsp_df[schhsp_df['OccBld']=='Hea'].copy() #Health institutions
1144
-
1145
- sch_df.reset_index(drop=True,inplace=True)
1146
- hsp_df.reset_index(drop=True,inplace=True)
1147
-
1148
- # Assign zoneIDs for schools/educational institutions
1149
- sch_range = range(0,len(sch_df))
1150
- if len(sch_df) <= len(landuse_sorted):
1151
- sch_df.loc[sch_range, 'zoneID'] = landuse_sorted.loc[sch_range,'zoneID']
1152
- else:
1153
- iterations_s = ceil(len(sch_df)/len(landuse_sorted))
1154
- a1_s= list(repeat(landuse_sorted['zoneID'].tolist(),iterations_s))
1155
- a_s = list(chain(*a1_s))
1156
- sch_df.loc[sch_range, 'zoneID'] = a_s[0:len(sch_df)]
1157
-
1158
- # Assign zoneIDs for hospitals/health institutions
1159
- hsp_range= range(0,len(hsp_df))
1160
- if len(hsp_df) <= len(landuse_sorted):
1161
- hsp_range = range(0,len(hsp_df))
1162
- hsp_df.loc[hsp_range, 'zoneID'] = landuse_sorted.loc[hsp_range,'zoneID']
1163
- else:
1164
- iterations_h = ceil(len(hsp_df)/len(landuse_sorted))
1165
- a1_h= list(repeat(landuse_sorted['zoneID'].tolist(),iterations_h))
1166
- a_h = list(chain(*a1_h))
1167
- hsp_df.loc[hsp_range, 'zoneID'] = a_h[0:len(hsp_df)]
1168
-
1169
-
1170
- #%% Concatenate the residential, industrial/commercial and special facilities
1171
- # dataframes to obtain the complete building dataframe
1172
- building_df=pd.concat([resbld_df,ind_df,com_df,sch_df,\
1173
- hsp_df]).reset_index(drop=True)
1174
- building_df['nstoreys'] = building_df['nstoreys'].astype(int)
1175
-
1176
- #Assign exposure string
1177
- building_df['expStr'] = building_df['lrstype'].astype(str)+'+'+\
1178
- building_df['CodeLevel'].astype(str)+'+'+\
1179
- building_df['nstoreys'].astype(str)+'s'+'+'+\
1180
- building_df['OccBld'].astype(str)
1181
- # Assign building ids
1182
- # lenbdf = len(building_df)
1183
- # building_df.loc[range(0,lenbdf),'bldID'] = list(range(1,lenbdf+1))
1184
- building_df.loc[range(len(resbld_df),len(building_df)),'bldID'] =\
1185
- list(range(len(resbld_df)+1,len(building_df)+1))
1186
- building_df['bldID'] = building_df['bldID'].astype('int')
1187
-
1188
-
1189
- #%% Step 21 Employment status of the individuals
1190
- # Assumption 9: Only 20-65 years old individuals can work
1191
- # Extract Tables 12 and 13
1192
- t12 = np.array(tables['t12'][0][0],dtype=float) #[Female, Male]
1193
-
1194
- t13_f = np.array(tables['t13'][0][0],dtype=float) #Female
1195
- t13_m = np.array(tables['t13'][0][1],dtype=float) #Male
1196
- t13 = [t13_f,t13_m]
1197
-
1198
- # Identify individuals who can work
1199
- working_females_mask = (individual_df['gender']==1) & \
1200
- (individual_df['age']>=5) & (individual_df['age']<=9)
1201
- working_males_mask = (individual_df['gender']==2) & \
1202
- (individual_df['age']>=5) & (individual_df['age']<=9)
1203
- potential_female_workers = individual_df.index[working_females_mask]
1204
- potential_male_workers = individual_df.index[working_males_mask]
1205
-
1206
- # But according to Table 12, not all individuals who can work are employed,
1207
- # so the labour force is less than 100%
1208
- labourforce_female = sample(list(potential_female_workers),\
1209
- int(t12[0]*len(potential_female_workers)))
1210
- labourforce_male = sample(list(potential_male_workers),\
1211
- int(t12[1]*len(potential_male_workers)))
1212
- # labourForce = 1 indicates that an individual is a part of labour force, but
1213
- # not necessarily employed.
1214
- individual_df.loc[labourforce_female,'labourForce'] =1
1215
- individual_df.loc[labourforce_male,'labourForce'] =1
1216
-
1217
- # According to Table 13, the employment probability for labourforce differs
1218
- # based on educational attainment status
1219
- for epd_array in t13: #Employment probability distribution for female and male
1220
- count = 0
1221
- ind_employed_idx =[]
1222
- for epd in epd_array: # EPD for various educational attainment status
1223
- # Individuals in labour force that belong to current EPD
1224
- eamask = (individual_df['eduAttStat'] == education_value[count]) & \
1225
- (individual_df['labourForce']==1)
1226
- nInd_in_epd = sum(eamask)
1227
- if nInd_in_epd == 0:
1228
- continue
1229
-
1230
- nInd_employed = int(epd*nInd_in_epd)
1231
- if nInd_employed == 0:
1232
- continue
1233
- ind_ea_labourforce = list(individual_df.index[eamask])
1234
- ind_employed_idx = sample(ind_ea_labourforce, nInd_employed)
1235
- individual_df.loc[ind_employed_idx,'employed'] = 1
1236
-
1237
- #Check ouput epd (for debugging)
1238
- #print(epd,':',len(ind_employed_idx)/len(ind_ea_labourforce))
1239
-
1240
- count+=1
1241
-
1242
- #%% Step 22 Assign IndividualFacID
1243
- # bld_ID of the building that the individual regularly visits
1244
- # (can be workplace, school, etc.)
1245
- # Assumption 13: Each individual is working within the total study area extent.
1246
- # Assumption 17: Each individual (within schooling age limits) goes to
1247
- # school within the total study area extent.
1248
-
1249
- # indivFacID_1 denotes bldID of the schools
1250
- # students (schoolEnrollment=1) go to, whereas, indivFacID_2 denotes bldID of
1251
- # com, ind and rescom buildings where working people go to (workplace bldID).
1252
-
1253
- # Assign working places to employed people in indivFacID_2_________________
1254
- # Working places are defined as occupancy types 'Ind','Com' and 'ResCom'
1255
- workplacemask=(building_df['OccBld']=='Ind') | (building_df['OccBld']=='Com')\
1256
- | (building_df['OccBld'] == 'ResCom')
1257
- workplaceidx = building_df.index[workplacemask]
1258
- workplace_bldID = building_df['bldID'][workplaceidx].tolist()
1259
-
1260
- employedmask = individual_df['employed'] ==1
1261
- employedidx = individual_df.index[employedmask]
1262
- if len(employedidx)>len(workplaceidx):
1263
- repetition = ceil(len(employedidx)/len(workplaceidx))
1264
- workplace_sample_temp = list(repeat(workplace_bldID,repetition))
1265
- workplace_sample = list(chain(*workplace_sample_temp))
1266
- else:
1267
- workplace_sample = workplace_bldID
1268
- random.shuffle(workplace_sample)
1269
-
1270
- individual_df.loc[employedidx,'indivFacID_2'] = \
1271
- workplace_sample[0:sum(employedmask)]
1272
-
1273
- # Assign school bldIDs to enrolled students in indivFacID_1________________
1274
- schoolmask = building_df['OccBld']=='Edu'
1275
- schoolidx = building_df.index[schoolmask]
1276
- school_bldID = building_df['bldID'][schoolidx].tolist()
1277
-
1278
- studentmask = individual_df['schoolEnrollment'] ==1
1279
- studentidx = individual_df.index[studentmask]
1280
- if len(studentidx)>len(schoolidx):
1281
- repetition = ceil(len(studentidx)/len(schoolidx))
1282
- school_sample_temp = list(repeat(school_bldID,repetition))
1283
- school_sample = list(chain(*school_sample_temp))
1284
- else:
1285
- school_sample = school_bldID
1286
- random.shuffle(school_sample)
1287
-
1288
- individual_df.loc[studentidx,'indivFacID_1'] = \
1289
- school_sample[0:sum(studentmask)]
1290
-
1291
- # Replace missing values with -1 instead of NaN
1292
- individual_df['indivFacID_1'] = individual_df['indivFacID_1'].fillna(-1)
1293
- individual_df['indivFacID_2'] = individual_df['indivFacID_2'].fillna(-1)
1294
-
1295
-
1296
- #%% Step 23 Assign community facility ID (CommFacID) to household layer
1297
- # CommFacID denotes the bldID of the hospital the households usually go to.
1298
-
1299
- # In this case, randomly assign bldID of hospitals to the households, but in
1300
- # next version, households must be assigned hospitals closest to their location
1301
- hospitalmask = building_df['OccBld']=='Hea'
1302
- hospitalidx = building_df.index[hospitalmask]
1303
- hospital_bldID = building_df['bldID'][hospitalidx].tolist()
1304
- repetition = ceil(len(household_df)/len(hospitalidx))
1305
- hospital_sample_temp = list(repeat(hospital_bldID,repetition))
1306
- hospital_sample = list(chain(*hospital_sample_temp))
1307
- random.shuffle(hospital_sample)
1308
-
1309
- household_df.loc[household_df.index,'CommFacID'] =\
1310
- hospital_sample[0:len(household_df)]
1311
-
1312
- #%% Step 24 Assign repValue
1313
- # Assumption 12: Unit price for replacement wrt occupation type and
1314
- # special facility status of the building
1315
-
1316
- # Assign unit price
1317
- for occtype in Unit_price:
1318
- occmask = building_df['OccBld'] == occtype
1319
- occidx = building_df.index[occmask]
1320
- building_df.loc[occidx, 'unit_price'] = Unit_price[occtype]
1321
-
1322
- building_df['repValue'] = building_df['fptarea'] *\
1323
- building_df['nstoreys']* building_df['unit_price']
1324
-
1325
-
1326
- #%% Remove unnecessary columns and save the results
1327
- building_df = building_df.drop(columns=\
1328
- ['lut_number','lrstype','CodeLevel','nstoreys','OccBld','unit_price'])
1329
- household_df = household_df.drop(columns=\
1330
- ['income_numb','zoneType','zoneID','approxFootprint'])
1331
- individual_df = individual_df.drop(columns=\
1332
- ['schoolEnrollment','labourForce','employed'])
1333
-
1334
- # Rename indices to convert all header names to lowercase
1335
- building_df.rename(columns={'zoneID':'zoneid','bldID':'bldid','expStr':'expstr',\
1336
- 'specialFac':'specialfac','repValue':'repvalue','nHouse':'nhouse'},\
1337
- inplace=True)
1338
- household_df.rename(columns={'bldID':'bldid','hhID':'hhid','nIND':'nind',\
1339
- 'CommFacID':'commfacid'}, inplace=True)
1340
- individual_df.rename(columns={'hhID':'hhid','indivID':'individ',\
1341
- 'eduAttStat':'eduattstat','indivFacID_1':'indivfacid_1',\
1342
- 'indivFacID_2':'indivfacid_2'}, inplace=True)
1343
- landuse_shp.rename(columns={'zoneID':'zoneid','LuF':'luf',\
1344
- 'densityCap':'densitycap','floorAreaR':'floorarear',\
1345
- 'avgIncome':'avgincome'}, inplace=True)
1346
-
1347
- #%% Generate building centroid coordinates
1348
-
1349
- histo = building_df.groupby(['zoneid'])['zoneid'].count()
1350
- max_val = building_df.groupby(['zoneid'])['fptarea'].max()
1351
- landuse_layer = landuse_shp
1352
- building_layer = building_df
1353
- final_list = []
1354
-
1355
- for i in range(len(histo)):
1356
- df = landuse_layer[landuse_layer['zoneid'] == histo.index[i]].copy()
1357
- bui_indx = building_layer['zoneid'] == histo.index[i]
1358
- bui_attr = building_layer.loc[bui_indx].copy()
1359
-
1360
- rot_a = random.randint(10, 40)
1361
- rot_a_rad = rot_a*math.pi/180
1362
-
1363
- separation_val = math.sqrt(max_val.values[i])/abs(math.cos(rot_a_rad))
1364
- separation_val = round(separation_val, 2)
1365
- boundary_approach = (math.sqrt(max_val.values[i])/2)*math.sqrt(2)
1366
- boundary_approach = round(boundary_approach, 2)
1367
-
1368
- df2 = df.buffer(-boundary_approach)
1369
- df2 = gpd.GeoDataFrame(gpd.GeoSeries(df2))
1370
- df2 = df2.rename(columns={0:'geometry'}).set_geometry('geometry')
1371
-
1372
- xmin, ymin, xmax, ymax = df2.total_bounds
1373
- xcoords = [ii for ii in np.arange(xmin, xmax, separation_val)]
1374
- ycoords = [ii for ii in np.arange(ymin, ymax, separation_val)]
1375
-
1376
- pointcoords = np.array(np.meshgrid(xcoords, ycoords)).T.reshape(-1, 2)
1377
- points = gpd.points_from_xy(x=pointcoords[:,0], y=pointcoords[:,1])
1378
- grid = gpd.GeoSeries(points, crs=df.crs)
1379
- grid.name = 'geometry'
1380
-
1381
- gridinside = gpd.sjoin(gpd.GeoDataFrame(grid), df2[['geometry']], how="inner")
1382
-
1383
- def buff(row):
1384
- return row.geometry.buffer(row.buff_val, cap_style = 3)
1385
-
1386
- if len(gridinside) >= histo.values[i]:
1387
- gridinside = gridinside.sample(min(len(gridinside), histo.values[i]))
1388
- gridinside['xcoord'] = gridinside.geometry.x
1389
- gridinside['ycoord'] = gridinside.geometry.y
1390
-
1391
- buffer_val = np.sqrt(list(bui_attr.fptarea))/2
1392
- buffered = gridinside.copy()
1393
- buffered['buff_val'] = buffer_val[0:len(gridinside)]
1394
-
1395
- buffered['geometry'] = buffered.apply(buff, axis=1)
1396
- polyinside = buffered.rotate(rot_a, origin='centroid')
1397
-
1398
- polyinside2 = gpd.GeoDataFrame(gpd.GeoSeries(polyinside))
1399
- polyinside2 = polyinside2.rename(columns={0:'geometry'}).set_geometry('geometry')
1400
- polyinside2['fid'] = list(range(1,len(polyinside2)+1))
1401
-
1402
- bui_attr['fid'] = list(range(1,len(bui_attr)+1))
1403
- bui_joined = polyinside2.merge(bui_attr, on='fid')
1404
- bui_joined = bui_joined.drop(columns=['fid'])
1405
-
1406
- bui_joined['xcoord'] = list(round(gridinside.geometry.x, 3))
1407
- bui_joined['ycoord'] = list(round(gridinside.geometry.y, 3))
1408
-
1409
- elif len(gridinside) < histo.values[i]:
1410
- separation_val = math.sqrt(max_val.values[i])
1411
- separation_val = round(separation_val, 2)
1412
- boundary_approach = (math.sqrt(max_val.values[i])/2)*math.sqrt(2)
1413
- boundary_approach = round(boundary_approach, 2)
1414
-
1415
- df2 = df.buffer(-boundary_approach, 200)
1416
- df2 = gpd.GeoDataFrame(gpd.GeoSeries(df2))
1417
- df2 = df2.rename(columns={0:'geometry'}).set_geometry('geometry')
1418
-
1419
- xmin, ymin, xmax, ymax = df2.total_bounds
1420
- xcoords = [ii for ii in np.arange(xmin, xmax, separation_val)]
1421
- ycoords = [ii for ii in np.arange(ymin, ymax, separation_val)]
1422
-
1423
- pointcoords = np.array(np.meshgrid(xcoords, ycoords)).T.reshape(-1, 2)
1424
- points = gpd.points_from_xy(x=pointcoords[:,0], y=pointcoords[:,1])
1425
- grid = gpd.GeoSeries(points, crs=df.crs)
1426
- grid.name = 'geometry'
1427
-
1428
- gridinside = gpd.sjoin(gpd.GeoDataFrame(grid), df2[['geometry']], how="inner")
1429
-
1430
- gridinside = gridinside.sample(min(len(gridinside), histo.values[i]))
1431
- gridinside['xcoord'] = gridinside.geometry.x
1432
- gridinside['ycoord'] = gridinside.geometry.y
1433
-
1434
- buffer_val = np.sqrt(list(bui_attr.fptarea))/2
1435
- buffered = gridinside.copy()
1436
- buffered['buff_val'] = buffer_val[0:len(gridinside)]
1437
-
1438
- buffered['geometry'] = buffered.apply(buff, axis=1)
1439
- polyinside = buffered.rotate(0, origin='centroid')
1440
-
1441
- polyinside2 = gpd.GeoDataFrame(gpd.GeoSeries(polyinside))
1442
- polyinside2 = polyinside2.rename(columns={0:'geometry'}).set_geometry('geometry')
1443
- polyinside2['fid'] = list(range(1,len(polyinside2)+1))
1444
-
1445
- bui_attr['fid'] = list(range(1,len(bui_attr)+1))
1446
- bui_joined = polyinside2.merge(bui_attr, on='fid')
1447
- bui_joined = bui_joined.drop(columns=['fid'])
1448
-
1449
- bui_joined['xcoord'] = list(round(gridinside.geometry.x, 3))
1450
- bui_joined['ycoord'] = list(round(gridinside.geometry.y, 3))
1451
-
1452
- final_list.append(bui_joined)
1453
-
1454
- final = pd.concat(final_list)
1455
- temp_cols = final.columns.tolist()
1456
- new_cols = temp_cols[1:] + temp_cols[0:1]
1457
- final = final[new_cols]
1458
-
1459
- temp_cols2 = landuse_shp.columns.tolist()
1460
- new_cols2 = temp_cols2[1:] + temp_cols2[0:1]
1461
- landuse_shp = landuse_shp[new_cols2]
1462
-
1463
- return final, household_df, individual_df, landuse_shp
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
src/tomorrowcities/utils.py DELETED
@@ -1,10 +0,0 @@
1
- import io
2
- import fiona
3
- import geopandas as gpd
4
-
5
- def read_zipshp(file):
6
- zipshp = io.BytesIO(open(file, 'rb').read())
7
- with fiona.BytesCollection(zipshp.read()) as src:
8
- crs = src.crs
9
- gdf = gpd.GeoDataFrame.from_features(src, crs=crs)
10
- return gdf
 
 
 
 
 
 
 
 
 
 
 
tomorrowcities/__init__.py ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ """Tomorrow's Cities Decision Support Environment"""
2
+ __title__ = "Tomorrow's Cities Decision Support Environment"
3
+ __version__ = "0.0.1"
{src/gui → tomorrowcities/backend}/engine.py RENAMED
@@ -20,21 +20,21 @@ def compute_power_infra(nodes,edges,intensity,fragility):
20
  print(fragility.head())
21
  print(intensity.head())
22
 
23
- eq_vuln = fragility.rename(columns={"med_Slight": "med_ds1",
24
- "med_Moderate": "med_ds2",
25
- "med_Extensive": "med_ds3",
26
- "med_Complete": "med_ds4",
27
- "beta_Slight": "beta_ds1",
28
- "beta_Moderate": "beta_ds2",
29
- "beta_Extensive": "beta_ds3",
30
- "beta_Complete": "beta_ds4"})
31
 
32
  G_power = nx.Graph()
33
  for _, node in nodes.iterrows():
34
- G_power.add_node(node.NODE_ID, pos=(node.x_coord, node.y_coord))
35
 
36
  for _, edge in edges.iterrows():
37
- G_power.add_edge(*(edge.FROM_NODE, edge.TO_NODE))
38
 
39
  nodes = geopandas.sjoin_nearest(nodes,intensity,
40
  how='left', rsuffix='intensity',distance_col='distance')
@@ -69,16 +69,16 @@ def compute_power_infra(nodes,edges,intensity,fragility):
69
  threshold = DS_MODERATE
70
 
71
  # All Nodes
72
- all_nodes = set(nodes['NODE_ID'])
73
 
74
  # Power Plants (generators)
75
- power_plants = set(nodes[nodes['pwr_plant'] == 1]['NODE_ID'])
76
 
77
  # Server Nodes
78
- server_nodes = set(nodes[nodes['n_bldgs'] > 0]['NODE_ID'])
79
 
80
  # Nodes directly affected by earthquake. Thresholding takes place.
81
- damaged_nodes = set(nodes[nodes['eq_ds'] > threshold]['NODE_ID'])
82
 
83
  # Damaged Server Nodes
84
  damaged_server_nodes = damaged_nodes.intersection(server_nodes)
@@ -126,8 +126,8 @@ def compute_power_infra(nodes,edges,intensity,fragility):
126
 
127
  is_damaged_mapper = {id:id in damaged_nodes for id in all_nodes}
128
  is_operational_mapper = {id:id in operating_nodes for id in all_nodes}
129
- nodes['is_damaged'] = nodes['NODE_ID'].map(is_damaged_mapper)
130
- nodes['is_operational'] = nodes['NODE_ID'].map(is_operational_mapper)
131
 
132
  return nodes['eq_ds'], nodes['is_damaged'], nodes['is_operational']
133
 
 
20
  print(fragility.head())
21
  print(intensity.head())
22
 
23
+ eq_vuln = fragility.rename(columns={"med_slight": "med_ds1",
24
+ "med_moderate": "med_ds2",
25
+ "med_extensive": "med_ds3",
26
+ "med_complete": "med_ds4",
27
+ "beta_slight": "beta_ds1",
28
+ "beta_moderate": "beta_ds2",
29
+ "beta_extensive": "beta_ds3",
30
+ "beta_complete": "beta_ds4"})
31
 
32
  G_power = nx.Graph()
33
  for _, node in nodes.iterrows():
34
+ G_power.add_node(node.node_id, pos=(node.x_coord, node.y_coord))
35
 
36
  for _, edge in edges.iterrows():
37
+ G_power.add_edge(*(edge.from_node, edge.to_node))
38
 
39
  nodes = geopandas.sjoin_nearest(nodes,intensity,
40
  how='left', rsuffix='intensity',distance_col='distance')
 
69
  threshold = DS_MODERATE
70
 
71
  # All Nodes
72
+ all_nodes = set(nodes['node_id'])
73
 
74
  # Power Plants (generators)
75
+ power_plants = set(nodes[nodes['pwr_plant'] == 1]['node_id'])
76
 
77
  # Server Nodes
78
+ server_nodes = set(nodes[nodes['n_bldgs'] > 0]['node_id'])
79
 
80
  # Nodes directly affected by earthquake. Thresholding takes place.
81
+ damaged_nodes = set(nodes[nodes['eq_ds'] > threshold]['node_id'])
82
 
83
  # Damaged Server Nodes
84
  damaged_server_nodes = damaged_nodes.intersection(server_nodes)
 
126
 
127
  is_damaged_mapper = {id:id in damaged_nodes for id in all_nodes}
128
  is_operational_mapper = {id:id in operating_nodes for id in all_nodes}
129
+ nodes['is_damaged'] = nodes['node_id'].map(is_damaged_mapper)
130
+ nodes['is_operational'] = nodes['node_id'].map(is_operational_mapper)
131
 
132
  return nodes['eq_ds'], nodes['is_damaged'], nodes['is_operational']
133
 
tomorrowcities/components/__init__.py ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ from .header import Header # noqa
2
+ from .layout import Layout # noqa
tomorrowcities/components/article.py ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import reacton.ipyvuetify as rv
2
+ import solara
3
+
4
+ from ..data import articles
5
+
6
+ @solara.component
7
+ def ArticleCard(name):
8
+ article = articles[name]
9
+ with rv.Card(max_width="400px") as main:
10
+ rv.CardTitle(children=[article.title])
11
+ with rv.CardText():
12
+ solara.Markdown(article.description)
13
+ with solara.Link(f"/docs/{name}"):
14
+ solara.Button("Read article", text=True, icon_name="mdi-book-open")
15
+ return main
16
+
17
+
18
+ @solara.component
19
+ def Overview():
20
+ with solara.ColumnsResponsive(12) as main:
21
+ with solara.Card():
22
+ with solara.ColumnsResponsive(12, small=6, large=4):
23
+ for name in articles:
24
+ ArticleCard(name)
25
+ return main
tomorrowcities/components/header.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ import solara
2
+
3
+
4
+ @solara.component
5
+ def Header():
6
+ pass
tomorrowcities/components/layout.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ import solara
2
+
3
+
4
+ @solara.component
5
+ def Layout(children=[]):
6
+ return solara.VBox(children=children)
tomorrowcities/content/articles/data_formats.md ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ author: huseyin.kaya
3
+ title: Data Formats
4
+ description: Description of the data formats used in web application
5
+ alt: "Data Formats"
6
+ createdAt: 2023-10-10
7
+ duration: 6 min read
8
+ category:
9
+ - general
10
+ ---
11
+
12
+ ## Data Structures
13
+ Tomorrow's Cities Decision Support Environment (TCDSE) is capable of conducting different hazard scenarios on different infrastructures, hence it needs to use several data input/output formats.
14
+
15
+ There are may different possible strategies and data formats to describe and store data.
16
+ In TCDSE, we generally use tabular data where each row corresponds to a unique object whereas the columns corresponds to the features of that object. Object here can refer to a building, individual, etc. Full list of the objects for which we use a dedicated data file is as follows:
17
+
18
+ * landuse
19
+ * building
20
+ * household
21
+ * individual
22
+ * intensity
23
+ * fragility
24
+ * vulnerability
25
+ * power nodes
26
+ * power edges
27
+
28
+ **Storage Format:** The tabular data can be stored in different formats such as Comma-Separeted Values or spreadsheets. If the data does not contain geographic coordinates or the coordinates are defined with longitude and latitude pairs, spreadsheet formats
29
+
30
+ In this way, building data can be joined with other type of data that we will mention in the coming section via relational databases.
31
+
32
+ ### Format
33
+
34
+
35
+ ## Layers
36
+ ### Buildings
37
+ Buildings are the core component of visioning scenarios. The features of the building with some example data are shown below:
38
+
39
+ |zoneID| bldID | nHouse | residents | specialFac | expStr | fptarea | geometry |
40
+ |------|-------|--------|-----------|------------|-----------------|---------|--------------|
41
+ |4 | 17 | 41 | 178 | 0 |RCi+HC+18s+ResCom| 111 | MultiPolygon |
42
+
43
+
44
+ where
45
+
46
+ * **zoneID (integer)** refers to the unique identitifer of the zone that building is located in. The features of the corresponding zone is described in a dedicated zone table.
47
+ * **bldID (integer)** is a unique building identifier.
48
+ * **nHouse (integer)** is the number of household in that building.
49
+ * **residents (integer)** stores the number of individual live in the building.
50
+ * **expStr (string)**
51
+
52
+
53
+
tomorrowcities/content/articles/power_network_analysis copy.md ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ author: huseyin.kaya
3
+ title: Power Network Analysis
4
+ description: How to conduct power network analysis in Tomorrowville
5
+ image: https://images.unsplash.com/photo-1429041966141-44d228a42775?ixid=MXwxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHw%3D&ixlib=rb-1.2.1&auto=format&fit=crop&w=2500&q=80
6
+ thumbnail: https://images.unsplash.com/photo-1429041966141-44d228a42775?ixid=MXwxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHw%3D&ixlib=rb-1.2.1&auto=format&fit=crop&w=350&q=80
7
+ alt: "Power Network Analysis"
8
+ createdAt: 2023-10-10
9
+ duration: 6 min read
10
+ category:
11
+ - general
12
+ ---
13
+
14
+ ## Power Infrastructure Analysis
tomorrowcities/content/articles/welcome.md ADDED
@@ -0,0 +1,78 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ author: huseyin.kaya
3
+ title: Welcome!
4
+ description: A Brief Introduction to Tomorrow's Cities Decision Support Environment (TCDSE)
5
+ image: https://images.unsplash.com/photo-1429041966141-44d228a42775?ixid=MXwxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHw%3D&ixlib=rb-1.2.1&auto=format&fit=crop&w=2500&q=80
6
+ thumbnail: https://images.unsplash.com/photo-1429041966141-44d228a42775?ixid=MXwxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHw%3D&ixlib=rb-1.2.1&auto=format&fit=crop&w=350&q=80
7
+ alt: "Welcome!"
8
+ createdAt: 2023-10-10
9
+ duration: 6 min read
10
+ category:
11
+ - general
12
+ ---
13
+
14
+ ## Tomorrow's Cities Decision Support Environment (TCDSE)
15
+ TCDSE is a web application designed to conduct computational tasks to generate information needed for decision mechanisms in designing future cities. The web application, which will be referred as TCDSE for short, contains a computational engine capable of executing several hazard scenarios on different exposure datasets and infrastructures.
16
+
17
+ ## Features
18
+ General capabilities/features of the web application can be summarized as follows:
19
+
20
+
21
+ *Hazard Scenarios*
22
+
23
+ * Earthquake
24
+ * Flood
25
+ * Debris
26
+
27
+ *Exposure Scenarios*
28
+
29
+ * Buildings
30
+ * Power networks
31
+ * Transportation
32
+ * Water networks
33
+
34
+ *Impact Metrics*
35
+
36
+ * Building and infrastructure-level damage states
37
+ * Household and individual-level derived metrics
38
+
39
+ *Visualization*
40
+
41
+ * GIS Maps
42
+ * Hazard and Exposure data displayers
43
+ * Reactive metric widgets
44
+ * Damage state classifications
45
+
46
+ *Data structure*
47
+
48
+ * GeoJSON format for geospatial data
49
+ * Vanilla JSON for non-geospatioal tabular data
50
+
51
+ *Software*
52
+
53
+ * Pure-Python development for both backend and frontend
54
+ * Reactive user interface via Solara
55
+ * geospatial database via postgis
56
+ * Leaflet backend for maps
57
+ * Easy deployment to cloud
58
+
59
+
60
+ ## Quickstart
61
+ * Download [Sample Dataset](https://drive.google.com/file/d/1BGPZQ2IKJHY9ExOCCHcNNrCTioYZ8D1y/view?usp=sharing) to your local environment and unzip the archieve file.
62
+ * Go to [engine](/engine)
63
+ * Drag/drop necessary files to the drop zone of the engine and execute the engine. A sample session is displayed below.
64
+ * The impact metrics will be immediately seen on the page.
65
+
66
+ <video width="853" controls>
67
+ <source src="https://github-production-user-asset-6210df.s3.amazonaws.com/2515171/270064030-0733ad34-0a7f-445e-86fb-9a61df4e2969.mp4" type="video/mp4">
68
+ </video>
69
+
70
+ In case the file names in the video are not clearly seen, they are:
71
+
72
+ * nairobi_business_buildings.geojson
73
+ * nairobi_business_household.json
74
+ * nairobi_business_individual.json
75
+ * nairobi_earthquake_fragility.json
76
+ * nairobi_earthquake_intensity.geojson
77
+
78
+ The used files above satisfy minimum requirements to run Earthquake analysis on buildings.
docs/Makefile → tomorrowcities/content/images/tcdse_demo1.mp4 RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8b6587b859607f200f116e2cb043fc358e1c3a26c326b563bf348453cfc68307
3
- size 634
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:88844e0fd52f7751617e2b9fdb98a84d137ae50c13620530507fa93bbd24f6e8
3
+ size 1949660
tomorrowcities/data.py ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import dataclasses
2
+ from pathlib import Path
3
+ from typing import Any, Dict
4
+ import solara
5
+
6
+ import yaml
7
+
8
+
9
+ HERE = Path(__file__)
10
+
11
+
12
+ @dataclasses.dataclass
13
+ class Article:
14
+ markdown: str
15
+ title: str
16
+ description: str
17
+
18
+
19
+ articles: Dict[str, Article] = {}
20
+
21
+ for file in (HERE.parent / "content/articles").glob("*.md"):
22
+ content = file.read_text()
23
+ lines = [k.strip() for k in content.split("\n")]
24
+ frontmatter_start = lines.index("---", 0)
25
+ frontmatter_end = lines.index("---", frontmatter_start + 1)
26
+ yamltext = "\n".join(lines[frontmatter_start + 1 : frontmatter_end - 2])
27
+ metadata = yaml.safe_load(yamltext)
28
+ markdown = "\n".join(lines[frontmatter_end + 1 :])
29
+ articles[file.stem] = Article(markdown=markdown, title=metadata["title"], description=metadata["description"])
30
+
tomorrowcities/pages/__init__.py ADDED
@@ -0,0 +1,121 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Optional, cast
2
+ import solara
3
+ from solara.alias import rv
4
+ import dataclasses
5
+
6
+ from ..data import articles
7
+
8
+ route_order = ["/", "docs","engine","settings","account"]
9
+
10
+ def check_auth(route, children):
11
+ # This can be replaced by a custom function that checks if the user is
12
+ # logged in and has the required permissions.
13
+
14
+ # routes that are public or only for admin
15
+ # the rest only requires login
16
+ public_paths = ["/","docs","engine","account"]
17
+ admin_paths = ["settings"]
18
+
19
+
20
+ if route.path in public_paths:
21
+ children_auth = children
22
+ else:
23
+ if user.value is None:
24
+ children_auth = [LoginForm()]
25
+ else:
26
+ if route.path in admin_paths and not user.value.admin:
27
+ children_auth = [solara.Error("You are not an admin")]
28
+ else:
29
+ children_auth = children
30
+ return children_auth
31
+
32
+
33
+ @dataclasses.dataclass
34
+ class User:
35
+ username: str
36
+ admin: bool = False
37
+
38
+
39
+ user = solara.reactive(cast(Optional[User], None))
40
+ login_failed = solara.reactive(False)
41
+
42
+
43
+ def login_control(username: str, password: str):
44
+ # this function can be replace by a custom username/password check
45
+ if username == "test" and password == "test":
46
+ user.value = User(username, admin=False)
47
+ login_failed.value = False
48
+ elif username == "admin" and password == "admin":
49
+ user.value = User(username, admin=True)
50
+ login_failed.value = False
51
+ else:
52
+ login_failed.value = True
53
+
54
+
55
+ @solara.component
56
+ def LoginForm():
57
+ username = solara.use_reactive("")
58
+ password = solara.use_reactive("")
59
+ with solara.Card("Login"):
60
+ solara.Markdown(
61
+ """
62
+ This is an example login form.
63
+
64
+ * use admin/admin to login as admin.
65
+ * use test/test to login as a normal user.
66
+ """
67
+ )
68
+ solara.InputText(label="Username", value=username)
69
+ solara.InputText(label="Password", password=True, value=password)
70
+ solara.Button(label="Login", on_click=lambda: login_control(username.value, password.value))
71
+ if login_failed.value:
72
+ solara.Error("Wrong username or password")
73
+
74
+
75
+
76
+ @solara.component
77
+ def Layout(children=[]):
78
+ router = solara.use_context(solara.routing.router_context)
79
+ route, routes = solara.use_route(peek=True)
80
+
81
+ if route is None:
82
+ return solara.Error("Route not found")
83
+
84
+ children = check_auth(route, children)
85
+
86
+
87
+ with solara.AppLayout(children=children, title="TomorrowCities Decision Support Environment", navigation=True) as main:
88
+ with solara.AppBar():
89
+ with solara.lab.Tabs(align="center"):
90
+ for route in routes:
91
+ name = route.path if route.path != "/" else "Welcome"
92
+ is_admin = user.value and user.value.admin
93
+ # we could skip the admin tab if the user is not an admin
94
+ if route.path == "settings" and not is_admin:
95
+ continue
96
+ if user.value is not None and route.path == "logon":
97
+ continue
98
+ # in this case we disable the tab
99
+ solara.lab.Tab(name, path_or_route=route, disabled=False)
100
+ if user.value:
101
+ solara.Text(f"Logged in as {user.value.username} as {'admin' if user.value.admin else 'user'}")
102
+ with solara.Tooltip("Logout"):
103
+ with solara.Link(f"/account"):
104
+ solara.Button(icon_name="mdi-logout", icon=True, on_click=lambda: user.set(None))
105
+ else:
106
+ with solara.Link(f"/account"):
107
+ solara.Button(icon_name="mdi-login",label='login', icon=True)
108
+
109
+
110
+ return main
111
+
112
+
113
+ @solara.component
114
+ def Page():
115
+ with solara.VBox() as main:
116
+ solara.Title("TCDSE » Welcome")
117
+ article = articles["welcome"]
118
+ solara.Markdown(article.markdown)
119
+
120
+
121
+ return main
tomorrowcities/pages/account.py ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Optional
2
+ import solara
3
+
4
+ from . import user
5
+ from . import LoginForm
6
+
7
+ @solara.component
8
+ def Page(name: Optional[str] = None, page: int = 0, page_size=100):
9
+ solara.Title("TCDSE » Account")
10
+ if user.value is None:
11
+ LoginForm()
12
+ else:
13
+ solara.Markdown(f'Hello {user.value.username}')
tomorrowcities/pages/docs.py ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Optional
2
+
3
+ import solara
4
+
5
+ from .. import data
6
+ from ..components.article import Overview
7
+
8
+
9
+ @solara.component
10
+ def Page(name: Optional[str] = None, page: int = 0, page_size=100):
11
+ if name is None:
12
+ with solara.Column() as main:
13
+ solara.Title("TCDSE» Documentation")
14
+ Overview()
15
+ return main
16
+ if name not in data.articles:
17
+ return solara.Error(f"No such article: {name!r}")
18
+ article = data.articles[name]
19
+ with solara.ColumnsResponsive(12) as main:
20
+ solara.Title("TCDSE » Documentation » " + article.title)
21
+ with solara.Link("/docs"):
22
+ solara.Text("« Back to documentation")
23
+ with solara.Card():
24
+ solara.Markdown(article.markdown)
25
+ return main
src/gui/app_engine_v2.py → tomorrowcities/pages/engine.py RENAMED
@@ -10,7 +10,101 @@ from typing import Tuple, Optional
10
  import ipyleaflet
11
  from ipyleaflet import AwesomeIcon, Marker
12
  import numpy as np
13
- import engine
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
14
 
15
  def building_colors(feature):
16
  ds_to_color = {0: 'lavender', 1:'violet',2:'fuchsia',3:'indigo',4:'darkslateblue',5:'black'}
@@ -23,17 +117,17 @@ def power_node_colors(feature):
23
  ds = random.randint(0,5) #feature['properties']['ds']
24
  return {'color': ds_to_color[ds], 'fillColor': ds_to_color[ds]}
25
 
26
- def create_map_layer(layers, df, name):
27
  if df is None:
28
  return None
29
  if "geometry" not in list(df.columns):
30
  return None
31
 
32
- if name not in layers['layers'].keys():
33
  return None
34
 
35
- existing_map_layer = layers['layers'][name]['map_layer'].value
36
- if existing_map_layer is not None and not layers['layers'][name]['force_render'].value:
37
  return existing_map_layer
38
 
39
  if name == "intensity":
@@ -58,7 +152,7 @@ def create_map_layer(layers, df, name):
58
  marker_color=marker_color,
59
  icon_color=icon_color,
60
  spin=False
61
- ),location=(y,x),title=f'{node["NODE_ID"]}')
62
 
63
  markers.append(marker)
64
  map_layer= ipyleaflet.MarkerCluster(markers=markers,
@@ -67,8 +161,8 @@ def create_map_layer(layers, df, name):
67
 
68
  else:
69
  map_layer = ipyleaflet.GeoData(geo_dataframe = df)
70
- layers['layers'][name]['map_layer'].set(map_layer)
71
- layers['layers'][name]['force_render'].set(False)
72
  return map_layer
73
 
74
  @solara.component
@@ -106,7 +200,7 @@ def MetricWidget(name, description, value, max_value, render_count):
106
  solara.FigureEcharts(option=options, attributes={ "style": "height: 100px; width: 100px" })
107
 
108
 
109
- def import_data(data: Optional[bytes], layers):
110
  json_string = data.decode('utf-8')
111
  json_data = json.loads(json_string)
112
  if "features" in json_data.keys():
@@ -114,35 +208,39 @@ def import_data(data: Optional[bytes], layers):
114
  else:
115
  df = pd.read_json(json_string)
116
 
 
 
117
  name = None
118
- for layer_name, layer in layers['layers'].items():
119
  if layer['cols'] == set(df.columns):
120
  name = layer_name
121
  break
122
 
123
  # Inject columns
124
  if name is not None:
125
- for col, val in layers['layers'][name]['extra_cols'].items():
126
  df[col] = val
127
  return (name, df)
128
 
129
 
130
  @solara.component
131
- def FileDropZone(layers):
132
  total_progress, set_total_progress = solara.use_state(-1)
133
  fileinfo, set_fileinfo = solara.use_state(None)
134
  result, set_result = solara.use_state(solara.Result(True))
135
- layers, set_layers = solara.use_state_or_update(layers)
136
 
137
  def load():
138
  if fileinfo is not None:
139
  print('processing file')
140
- name, df = import_data(fileinfo['data'], layers)
141
  if name is not None and df is not None:
142
- layers['layers'][name]['df'].set(df)
143
- layers['selected_layer'].set(name)
144
- layers['layers'][name]['visible'].set(True)
145
- layers['layers'][name]['force_render'].set(True)
 
 
 
146
  else:
147
  return False
148
  return True
@@ -179,15 +277,13 @@ def FileDropZone(layers):
179
  solara.ProgressLinear(value=True)
180
 
181
  @solara.component
182
- def LayerDisplayer(layers):
183
-
184
- layers, set_layers = solara.use_state(layers)
185
 
186
- nonempty_layers = {name: layer for name, layer in layers['layers'].items() if layer['df'].value is not None}
187
  nonempty_layer_names = list(nonempty_layers.keys())
188
- selected = layers['selected_layer'].value
189
  def set_selected(s):
190
- layers['selected_layer'].set(s)
191
 
192
  solara.ToggleButtonsSingle(value=selected, on_value=set_selected,
193
  values=nonempty_layer_names)
@@ -195,61 +291,58 @@ def LayerDisplayer(layers):
195
  set_selected(nonempty_layer_names[0])
196
  if selected is not None:
197
  DataframeDisplayer(nonempty_layers[selected]['df'].value,
198
- layers['render_count'].value,
199
- layers['bounds'].value)
200
 
201
  @solara.component
202
- def MetricPanel(layers):
203
- layers, set_layers = solara.use_state(layers)
204
- building = layers['layers']['building']['df'].value
205
- filtered_metrics = {name: 0 for name in layers['metrics'].keys()}
206
- if building is not None and layers['bounds'].value is not None:
207
- ((ymin,xmin),(ymax,xmax)) = layers['bounds'].value
208
  filtered = building.cx[xmin:xmax,ymin:ymax]
209
  for metric in filtered_metrics.keys():
210
  filtered_metrics[metric] = int(filtered.cx[xmin:xmax,ymin:ymax][metric].sum())
211
 
212
  with solara.Row():
213
- for name, metric in layers['metrics'].items():
214
  MetricWidget(name, metric['desc'],
215
  filtered_metrics[name],
216
  metric['max_value'],
217
- layers['render_count'].value)
218
 
219
 
220
  @solara.component
221
- def LayerController(layers):
222
- layers, set_layers = solara.use_state(layers)
223
  with solara.Row(gap="0px"):
224
- for layer_name, layer in layers['layers'].items():
225
  if layer['map_layer'].value is not None:
226
  solara.Checkbox(label=layer_name,
227
  value=layer['visible'])
228
 
229
 
230
  @solara.component
231
- def MapViewer(layers):
232
  print('rendering mapviewer')
233
  default_zoom = 14
234
- default_center = (-1.3, 36.80)
235
- layers, set_layers = solara.use_state(layers)
236
  zoom, set_zoom = solara.use_state(default_zoom)
237
- center, set_center = solara.use_state(default_center)
238
 
239
  def set_bounds(bounds):
240
- layers['bounds'].set(bounds)
241
 
242
  base_map = ipyleaflet.basemaps["Stamen"]["Watercolor"]
243
  base_layer = ipyleaflet.TileLayer.element(url=base_map.build_url())
244
  map_layers = [base_layer]
245
 
246
- for layer_name, layer in layers['layers'].items():
247
  df = layer['df'].value
248
  if df is None:
249
  continue
250
  # we have something to display on map
251
  if "geometry" in list(df.columns) and layer['visible'].value:
252
- map_layer = create_map_layer(layers, df, layer_name)
253
  if map_layer is not None:
254
  map_layers.append(map_layer)
255
 
@@ -258,8 +351,8 @@ def MapViewer(layers):
258
  zoom=zoom,
259
  on_zoom=set_zoom,
260
  on_bounds=set_bounds,
261
- center=center,
262
- on_center=set_center,
263
  scroll_wheel_zoom=True,
264
  dragging=True,
265
  double_click_zoom=True,
@@ -280,8 +373,7 @@ def DataframeDisplayer(df, render_count, bounds):
280
  solara.DataFrame(df)
281
 
282
  @solara.component
283
- def ExecutePanel(layers):
284
- layers, set_layers = solara.use_state_or_update(layers)
285
  infra, set_infra = solara.use_state(["power"])
286
  hazard, set_hazard = solara.use_state("earthquake")
287
 
@@ -294,8 +386,8 @@ def ExecutePanel(layers):
294
  set_execute_counter(execute_counter + 1)
295
  execute_error.set("")
296
 
297
- def is_ready_to_run(layers, infra, hazard):
298
- existing_layers = set([name for name, l in layers['layers'].items() if l['df'].value is not None])
299
  missing = []
300
 
301
  if hazard == "earthquake":
@@ -316,63 +408,70 @@ def ExecutePanel(layers):
316
 
317
 
318
  def execute_engine():
319
- if execute_counter > 0 :
320
- is_ready, missing = is_ready_to_run(layers, infra, hazard)
321
- if not is_ready:
322
- raise Exception(f'Missing {missing}')
323
- return
324
-
325
- if 'power' in infra:
326
- nodes = layers['layers']['power nodes']['df'].value
327
- edges = layers['layers']['power edges']['df'].value
328
- intensity = layers['layers']['intensity']['df'].value
329
- power_fragility = layers['layers']['power fragility']['df'].value
330
 
331
 
332
- eq_ds, is_damaged, is_operational = engine.compute_power_infra(nodes,
333
- edges,
334
- intensity,
335
- power_fragility)
336
-
337
- #power_node_df = dfs['Power Nodes'].copy()
338
- nodes['ds'] = list(eq_ds)
339
- nodes['is_damaged'] = list(is_damaged)
340
- nodes['is_operational'] = list(is_operational)
341
 
342
- layers['layers']['power nodes']['df'].set(nodes)
343
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
344
 
345
- layers['render_count'].set(layers['render_count'].value + 1)
346
- layers['layers']['power nodes']['force_render'].set(True)
 
 
 
 
 
 
 
 
 
347
 
 
 
 
 
348
  if 'building' in infra:
349
- buildings = layers['layers']['building']['df'].value
350
- household = layers['layers']['household']['df'].value
351
- individual = layers['layers']['individual']['df'].value
352
- intensity = layers['layers']['intensity']['df'].value
353
-
354
- fragility = layers['layers']['fragility']['df'].value
355
- vulnerability = layers['layers']['vulnerability']['df'].value
356
- computed_metrics, df_metrics, df_bld_hazard = engine.compute(
357
- buildings,
358
- household,
359
- individual,
360
- intensity,
361
- fragility if hazard == "earthquake" else vulnerability,
362
- hazard)
363
-
364
- print(computed_metrics)
365
- for metric in df_metrics.keys():
366
- buildings[metric] = list(df_metrics[metric][metric])
367
- layers['metrics'][metric]['value'] = computed_metrics[metric]['value']
368
- layers['metrics'][metric]['max_value'] = computed_metrics[metric]['max_value']
369
- buildings['ds'] = list(df_bld_hazard['ds'])
370
-
371
- layers['layers']['building']['df'].set(buildings)
372
-
373
- layers['render_count'].set(layers['render_count'].value + 1)
374
- layers['layers']['building']['force_render'].set(True)
375
 
 
376
 
377
  # Execute the thread only when the depencency is changed
378
  result = solara.use_thread(execute_engine, dependencies=[execute_counter])
@@ -403,106 +502,22 @@ def ExecutePanel(layers):
403
 
404
  @solara.component
405
  def WebApp():
406
- layers, set_layers = solara.use_state({
407
- 'layers' : {
408
- 'building': {
409
- 'df': solara.reactive(None),
410
- 'map_layer': solara.reactive(None),
411
- 'force_render': solara.reactive(False),
412
- 'visible': solara.reactive(False),
413
- 'extra_cols': {'ds': 0, 'metric1': 0, 'metric2': 0, 'metric3': 0,'metric4': 0, 'metric5': 0,'metric6': 0,'metric7': 0},
414
- 'cols': set(['geometry','zoneID', 'bldID', 'nHouse', 'residents', 'specialFac', 'expStr', 'fptarea', 'repValue'])},
415
- 'landuse': {
416
- 'df': solara.reactive(None),
417
- 'map_layer': solara.reactive(None),
418
- 'force_render': solara.reactive(False),
419
- 'visible': solara.reactive(False),
420
- 'extra_cols': {},
421
- 'cols': set(['geometry', 'zoneID', 'LuF', 'population', 'densityCap', 'floorARat', 'setback', 'avgIncome'])},
422
- 'household': {
423
- 'df': solara.reactive(None),
424
- 'map_layer': solara.reactive(None),
425
- 'force_render': solara.reactive(False),
426
- 'visible': solara.reactive(False),
427
- 'extra_cols': {},
428
- 'cols':set(['hhID', 'nInd', 'income', 'bldID', 'CommFacID'])},
429
- 'individual': {
430
- 'df': solara.reactive(None),
431
- 'map_layer': solara.reactive(None),
432
- 'force_render': solara.reactive(False),
433
- 'visible': solara.reactive(False),
434
- 'extra_cols': {},
435
- 'cols': set(['indivId', 'hhID', 'gender', 'age', 'eduAttStat', 'head', 'indivFacID'])},
436
- 'intensity': {
437
- 'df': solara.reactive(None),
438
- 'map_layer': solara.reactive(None),
439
- 'force_render': solara.reactive(False),
440
- 'visible': solara.reactive(False),
441
- 'extra_cols': {},
442
- 'cols': set(['geometry','im'])},
443
- 'fragility': {
444
- 'df': solara.reactive(None),
445
- 'map_layer': solara.reactive(None),
446
- 'force_render': solara.reactive(False),
447
- 'visible': solara.reactive(False),
448
- 'extra_cols': {},
449
- 'cols': set(['expstr','muds1_g','muds2_g','muds3_g','muds4_g','sigmads1','sigmads2','sigmads3','sigmads4'])},
450
- 'vulnerability': {
451
- 'df': solara.reactive(None),
452
- 'map_layer': solara.reactive(None),
453
- 'force_render': solara.reactive(False),
454
- 'visible': solara.reactive(False),
455
- 'extra_cols': {},
456
- 'cols': set(['expstr', 'hw0', 'hw0_5', 'hw1', 'hw1_5', 'hw2', 'hw3', 'hw4', 'hw5','hw6'])},
457
- 'power nodes': {
458
- 'df': solara.reactive(None),
459
- 'map_layer': solara.reactive(None),
460
- 'force_render': solara.reactive(False),
461
- 'visible': solara.reactive(False),
462
- 'extra_cols': {'ds': 0, 'is_damaged': False, 'is_operational': True},
463
- 'cols': set(['geometry', 'FLTYTYPE', 'STRCTYPE', 'UTILFCLTYC', 'INDPNODE', 'guid',
464
- 'NODE_ID', 'x_coord', 'y_coord', 'pwr_plant', 'serv_area', 'n_bldgs',
465
- 'income', 'eq_vuln'])},
466
- 'power edges': {
467
- 'df': solara.reactive(None),
468
- 'map_layer': solara.reactive(None),
469
- 'force_render': solara.reactive(False),
470
- 'visible': solara.reactive(False),
471
- 'extra_cols': {},
472
- 'cols': set(['FROM_NODE', 'direction', 'pipetype', 'EDGE_ID', 'guid', 'capacity', 'geometry', 'TO_NODE', 'length'])},
473
- 'power fragility': {
474
- 'df': solara.reactive(None),
475
- 'map_layer': solara.reactive(None),
476
- 'force_render': solara.reactive(False),
477
- 'visible': solara.reactive(False),
478
- 'extra_cols': {},
479
- 'cols': set(['vuln_string', 'med_Slight', 'med_Moderate', 'med_Extensive', 'med_Complete', 'beta_Slight', 'beta_Moderate', 'beta_Extensive', 'beta_Complete', 'description'])}
480
- },
481
- 'selected_layer' : solara.reactive(None),
482
- 'render_count': solara.reactive(0),
483
- 'bounds': solara.reactive(None),
484
- 'metrics': {
485
- "metric1": {"desc": "Number of workers unemployed", "value": 0, "max_value": 100},
486
- "metric2": {"desc": "Number of children with no access to education", "value": 0, "max_value": 100},
487
- "metric3": {"desc": "Number of households with no access to hospital", "value": 0, "max_value": 100},
488
- "metric4": {"desc": "Number of individuals with no access to hospital", "value": 0, "max_value": 100},
489
- "metric5": {"desc": "Number of homeless households", "value": 0, "max_value": 100},
490
- "metric6": {"desc": "Number of homeless individuals", "value": 0, "max_value": 100},
491
- "metric7": {"desc": "Population displacement", "value": 0, "max_value":100},}})
492
 
493
  with solara.Columns([30,60]):
494
  with solara.Column():
495
- FileDropZone(layers)
496
- ExecutePanel(layers)
 
 
497
  with solara.Column():
498
- LayerController(layers)
499
- MapViewer(layers)
500
- MetricPanel(layers)
501
 
502
- LayerDisplayer(layers)
503
 
504
  @solara.component
505
- def Page():
506
  css = """
507
  .v-input {
508
  height: 10px;
@@ -516,9 +531,6 @@ def Page():
516
 
517
  """
518
  solara.Style(value=css)
 
519
 
520
  WebApp()
521
-
522
- Page()
523
-
524
-
 
10
  import ipyleaflet
11
  from ipyleaflet import AwesomeIcon, Marker
12
  import numpy as np
13
+ import sys
14
+
15
+ from ..backend.engine import compute, compute_power_infra
16
+
17
+
18
+ layers = solara.reactive({
19
+ 'layers' : {
20
+ 'building': {
21
+ 'df': solara.reactive(None),
22
+ 'map_layer': solara.reactive(None),
23
+ 'force_render': solara.reactive(False),
24
+ 'visible': solara.reactive(False),
25
+ 'extra_cols': {'ds': 0, 'metric1': 0, 'metric2': 0, 'metric3': 0,'metric4': 0, 'metric5': 0,'metric6': 0,'metric7': 0},
26
+ 'cols': set(['residents', 'fptarea', 'repvalue', 'nhouse', 'zoneid', 'expstr', 'bldid', 'geometry', 'specialfac'])},
27
+ 'landuse': {
28
+ 'df': solara.reactive(None),
29
+ 'map_layer': solara.reactive(None),
30
+ 'force_render': solara.reactive(False),
31
+ 'visible': solara.reactive(False),
32
+ 'extra_cols': {},
33
+ 'cols': set(['geometry', 'zoneid', 'luf', 'population', 'densitycap', 'floorarat', 'setback', 'avgincome'])},
34
+ 'household': {
35
+ 'df': solara.reactive(None),
36
+ 'map_layer': solara.reactive(None),
37
+ 'force_render': solara.reactive(False),
38
+ 'visible': solara.reactive(False),
39
+ 'extra_cols': {},
40
+ 'cols':set(['hhid', 'nind', 'income', 'bldid', 'commfacid'])},
41
+ 'individual': {
42
+ 'df': solara.reactive(None),
43
+ 'map_layer': solara.reactive(None),
44
+ 'force_render': solara.reactive(False),
45
+ 'visible': solara.reactive(False),
46
+ 'extra_cols': {},
47
+ 'cols': set(['individ', 'hhid', 'gender', 'age', 'eduattstat', 'head', 'indivfacid'])},
48
+ 'intensity': {
49
+ 'df': solara.reactive(None),
50
+ 'map_layer': solara.reactive(None),
51
+ 'force_render': solara.reactive(False),
52
+ 'visible': solara.reactive(False),
53
+ 'extra_cols': {},
54
+ 'cols': set(['geometry','im'])},
55
+ 'fragility': {
56
+ 'df': solara.reactive(None),
57
+ 'map_layer': solara.reactive(None),
58
+ 'force_render': solara.reactive(False),
59
+ 'visible': solara.reactive(False),
60
+ 'extra_cols': {},
61
+ 'cols': set(['expstr','muds1_g','muds2_g','muds3_g','muds4_g','sigmads1','sigmads2','sigmads3','sigmads4'])},
62
+ 'vulnerability': {
63
+ 'df': solara.reactive(None),
64
+ 'map_layer': solara.reactive(None),
65
+ 'force_render': solara.reactive(False),
66
+ 'visible': solara.reactive(False),
67
+ 'extra_cols': {},
68
+ 'cols': set(['expstr', 'hw0', 'hw0_5', 'hw1', 'hw1_5', 'hw2', 'hw3', 'hw4', 'hw5','hw6'])},
69
+ 'power nodes': {
70
+ 'df': solara.reactive(None),
71
+ 'map_layer': solara.reactive(None),
72
+ 'force_render': solara.reactive(False),
73
+ 'visible': solara.reactive(False),
74
+ 'extra_cols': {'ds': 0, 'is_damaged': False, 'is_operational': True},
75
+ 'cols': set(['geometry', 'fltytype', 'strctype', 'utilfcltyc', 'indpnode', 'guid',
76
+ 'node_id', 'x_coord', 'y_coord', 'pwr_plant', 'serv_area', 'n_bldgs',
77
+ 'income', 'eq_vuln'])},
78
+ 'power edges': {
79
+ 'df': solara.reactive(None),
80
+ 'map_layer': solara.reactive(None),
81
+ 'force_render': solara.reactive(False),
82
+ 'visible': solara.reactive(False),
83
+ 'extra_cols': {},
84
+ 'cols': set(['from_node', 'direction', 'pipetype', 'edge_id', 'guid', 'capacity',
85
+ 'geometry', 'to_node', 'length'])},
86
+ 'power fragility': {
87
+ 'df': solara.reactive(None),
88
+ 'map_layer': solara.reactive(None),
89
+ 'force_render': solara.reactive(False),
90
+ 'visible': solara.reactive(False),
91
+ 'extra_cols': {},
92
+ 'cols': set(['vuln_string', 'med_slight', 'med_moderate', 'med_extensive', 'med_complete',
93
+ 'beta_slight', 'beta_moderate', 'beta_extensive', 'beta_complete', 'description'])}
94
+ },
95
+ 'center': solara.reactive((41.01,28.98)),
96
+ 'selected_layer' : solara.reactive(None),
97
+ 'render_count': solara.reactive(0),
98
+ 'bounds': solara.reactive(None),
99
+ 'metrics': {
100
+ "metric1": {"desc": "Number of workers unemployed", "value": 0, "max_value": 100},
101
+ "metric2": {"desc": "Number of children with no access to education", "value": 0, "max_value": 100},
102
+ "metric3": {"desc": "Number of households with no access to hospital", "value": 0, "max_value": 100},
103
+ "metric4": {"desc": "Number of individuals with no access to hospital", "value": 0, "max_value": 100},
104
+ "metric5": {"desc": "Number of homeless households", "value": 0, "max_value": 100},
105
+ "metric6": {"desc": "Number of homeless individuals", "value": 0, "max_value": 100},
106
+ "metric7": {"desc": "Population displacement", "value": 0, "max_value":100},}})
107
+
108
 
109
  def building_colors(feature):
110
  ds_to_color = {0: 'lavender', 1:'violet',2:'fuchsia',3:'indigo',4:'darkslateblue',5:'black'}
 
117
  ds = random.randint(0,5) #feature['properties']['ds']
118
  return {'color': ds_to_color[ds], 'fillColor': ds_to_color[ds]}
119
 
120
+ def create_map_layer(df, name):
121
  if df is None:
122
  return None
123
  if "geometry" not in list(df.columns):
124
  return None
125
 
126
+ if name not in layers.value['layers'].keys():
127
  return None
128
 
129
+ existing_map_layer = layers.value['layers'][name]['map_layer'].value
130
+ if existing_map_layer is not None and not layers.value['layers'][name]['force_render'].value:
131
  return existing_map_layer
132
 
133
  if name == "intensity":
 
152
  marker_color=marker_color,
153
  icon_color=icon_color,
154
  spin=False
155
+ ),location=(y,x),title=f'{node["node_id"]}')
156
 
157
  markers.append(marker)
158
  map_layer= ipyleaflet.MarkerCluster(markers=markers,
 
161
 
162
  else:
163
  map_layer = ipyleaflet.GeoData(geo_dataframe = df)
164
+ layers.value['layers'][name]['map_layer'].set(map_layer)
165
+ layers.value['layers'][name]['force_render'].set(False)
166
  return map_layer
167
 
168
  @solara.component
 
200
  solara.FigureEcharts(option=options, attributes={ "style": "height: 100px; width: 100px" })
201
 
202
 
203
+ def import_data(data: Optional[bytes]):
204
  json_string = data.decode('utf-8')
205
  json_data = json.loads(json_string)
206
  if "features" in json_data.keys():
 
208
  else:
209
  df = pd.read_json(json_string)
210
 
211
+ df.columns = df.columns.str.lower()
212
+
213
  name = None
214
+ for layer_name, layer in layers.value['layers'].items():
215
  if layer['cols'] == set(df.columns):
216
  name = layer_name
217
  break
218
 
219
  # Inject columns
220
  if name is not None:
221
+ for col, val in layers.value['layers'][name]['extra_cols'].items():
222
  df[col] = val
223
  return (name, df)
224
 
225
 
226
  @solara.component
227
+ def FileDropZone():
228
  total_progress, set_total_progress = solara.use_state(-1)
229
  fileinfo, set_fileinfo = solara.use_state(None)
230
  result, set_result = solara.use_state(solara.Result(True))
 
231
 
232
  def load():
233
  if fileinfo is not None:
234
  print('processing file')
235
+ name, df = import_data(fileinfo['data'])
236
  if name is not None and df is not None:
237
+ layers.value['layers'][name]['df'].set(df)
238
+ layers.value['selected_layer'].set(name)
239
+ layers.value['layers'][name]['visible'].set(True)
240
+ layers.value['layers'][name]['force_render'].set(True)
241
+ if "geometry" in list(df.columns):
242
+ center = (df.geometry.centroid.y.mean(), df.geometry.centroid.x.mean())
243
+ layers.value['center'].set(center)
244
  else:
245
  return False
246
  return True
 
277
  solara.ProgressLinear(value=True)
278
 
279
  @solara.component
280
+ def LayerDisplayer():
 
 
281
 
282
+ nonempty_layers = {name: layer for name, layer in layers.value['layers'].items() if layer['df'].value is not None}
283
  nonempty_layer_names = list(nonempty_layers.keys())
284
+ selected = layers.value['selected_layer'].value
285
  def set_selected(s):
286
+ layers.value['selected_layer'].set(s)
287
 
288
  solara.ToggleButtonsSingle(value=selected, on_value=set_selected,
289
  values=nonempty_layer_names)
 
291
  set_selected(nonempty_layer_names[0])
292
  if selected is not None:
293
  DataframeDisplayer(nonempty_layers[selected]['df'].value,
294
+ layers.value['render_count'].value,
295
+ layers.value['bounds'].value)
296
 
297
  @solara.component
298
+ def MetricPanel():
299
+ building = layers.value['layers']['building']['df'].value
300
+ filtered_metrics = {name: 0 for name in layers.value['metrics'].keys()}
301
+ if building is not None and layers.value['bounds'].value is not None:
302
+ ((ymin,xmin),(ymax,xmax)) = layers.value['bounds'].value
 
303
  filtered = building.cx[xmin:xmax,ymin:ymax]
304
  for metric in filtered_metrics.keys():
305
  filtered_metrics[metric] = int(filtered.cx[xmin:xmax,ymin:ymax][metric].sum())
306
 
307
  with solara.Row():
308
+ for name, metric in layers.value['metrics'].items():
309
  MetricWidget(name, metric['desc'],
310
  filtered_metrics[name],
311
  metric['max_value'],
312
+ layers.value['render_count'].value)
313
 
314
 
315
  @solara.component
316
+ def LayerController():
 
317
  with solara.Row(gap="0px"):
318
+ for layer_name, layer in layers.value['layers'].items():
319
  if layer['map_layer'].value is not None:
320
  solara.Checkbox(label=layer_name,
321
  value=layer['visible'])
322
 
323
 
324
  @solara.component
325
+ def MapViewer():
326
  print('rendering mapviewer')
327
  default_zoom = 14
328
+ #default_center = (-1.3, 36.80)
 
329
  zoom, set_zoom = solara.use_state(default_zoom)
330
+ #center, set_center = solara.use_state(default_center)
331
 
332
  def set_bounds(bounds):
333
+ layers.value['bounds'].set(bounds)
334
 
335
  base_map = ipyleaflet.basemaps["Stamen"]["Watercolor"]
336
  base_layer = ipyleaflet.TileLayer.element(url=base_map.build_url())
337
  map_layers = [base_layer]
338
 
339
+ for layer_name, layer in layers.value['layers'].items():
340
  df = layer['df'].value
341
  if df is None:
342
  continue
343
  # we have something to display on map
344
  if "geometry" in list(df.columns) and layer['visible'].value:
345
+ map_layer = create_map_layer(df, layer_name)
346
  if map_layer is not None:
347
  map_layers.append(map_layer)
348
 
 
351
  zoom=zoom,
352
  on_zoom=set_zoom,
353
  on_bounds=set_bounds,
354
+ center=layers.value['center'].value,
355
+ on_center=layers.value['center'].set,
356
  scroll_wheel_zoom=True,
357
  dragging=True,
358
  double_click_zoom=True,
 
373
  solara.DataFrame(df)
374
 
375
  @solara.component
376
+ def ExecutePanel():
 
377
  infra, set_infra = solara.use_state(["power"])
378
  hazard, set_hazard = solara.use_state("earthquake")
379
 
 
386
  set_execute_counter(execute_counter + 1)
387
  execute_error.set("")
388
 
389
+ def is_ready_to_run(infra, hazard):
390
+ existing_layers = set([name for name, l in layers.value['layers'].items() if l['df'].value is not None])
391
  missing = []
392
 
393
  if hazard == "earthquake":
 
408
 
409
 
410
  def execute_engine():
 
 
 
 
 
 
 
 
 
 
 
411
 
412
 
413
+ def execute_infra():
414
+ nodes = layers.value['layers']['power nodes']['df'].value
415
+ edges = layers.value['layers']['power edges']['df'].value
416
+ intensity = layers.value['layers']['intensity']['df'].value
417
+ power_fragility = layers.value['layers']['power fragility']['df'].value
 
 
 
 
418
 
 
419
 
420
+ eq_ds, is_damaged, is_operational = compute_power_infra(nodes,
421
+ edges,
422
+ intensity,
423
+ power_fragility)
424
+
425
+ #power_node_df = dfs['Power Nodes'].copy()
426
+ nodes['ds'] = list(eq_ds)
427
+ nodes['is_damaged'] = list(is_damaged)
428
+ nodes['is_operational'] = list(is_operational)
429
+ return nodes
430
+
431
+ def execute_building():
432
+ buildings = layers.value['layers']['building']['df'].value
433
+ household = layers.value['layers']['household']['df'].value
434
+ individual = layers.value['layers']['individual']['df'].value
435
+ intensity = layers.value['layers']['intensity']['df'].value
436
+
437
+ fragility = layers.value['layers']['fragility']['df'].value
438
+ vulnerability = layers.value['layers']['vulnerability']['df'].value
439
+ computed_metrics, df_metrics, df_bld_hazard = compute(
440
+ buildings,
441
+ household,
442
+ individual,
443
+ intensity,
444
+ fragility if hazard == "earthquake" else vulnerability,
445
+ hazard)
446
+
447
+ print(computed_metrics)
448
+ for metric in df_metrics.keys():
449
+ buildings[metric] = list(df_metrics[metric][metric])
450
+ layers.value['metrics'][metric]['value'] = computed_metrics[metric]['value']
451
+ layers.value['metrics'][metric]['max_value'] = computed_metrics[metric]['max_value']
452
+ buildings['ds'] = list(df_bld_hazard['ds'])
453
+ return buildings
454
 
455
+ if execute_counter > 0 :
456
+ is_ready, missing = is_ready_to_run(infra, hazard)
457
+ if not is_ready:
458
+ raise Exception(f'Missing {missing}')
459
+
460
+ if 'power' in infra:
461
+ nodes = execute_infra()
462
+ layers.value['layers']['power nodes']['df'].set(nodes)
463
+ if 'building' in infra:
464
+ buildings = execute_building()
465
+ layers.value['layers']['building']['df'].set(buildings)
466
 
467
+ # trigger render event
468
+ layers.value['render_count'].set(layers.value['render_count'].value + 1)
469
+ if 'power' in infra:
470
+ layers.value['layers']['power nodes']['force_render'].set(True)
471
  if 'building' in infra:
472
+ layers.value['layers']['building']['force_render'].set(True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
473
 
474
+
475
 
476
  # Execute the thread only when the depencency is changed
477
  result = solara.use_thread(execute_engine, dependencies=[execute_counter])
 
502
 
503
  @solara.component
504
  def WebApp():
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
505
 
506
  with solara.Columns([30,60]):
507
  with solara.Column():
508
+
509
+ solara.Markdown('[Download Sample Dataset](https://drive.google.com/file/d/1BGPZQ2IKJHY9ExOCCHcNNrCTioYZ8D1y/view?usp=sharing)')
510
+ FileDropZone()
511
+ ExecutePanel()
512
  with solara.Column():
513
+ LayerController()
514
+ MapViewer()
515
+ MetricPanel()
516
 
517
+ LayerDisplayer()
518
 
519
  @solara.component
520
+ def Page(name: Optional[str] = None, page: int = 0, page_size=100):
521
  css = """
522
  .v-input {
523
  height: 10px;
 
531
 
532
  """
533
  solara.Style(value=css)
534
+ solara.Title("TCDSE » Engine")
535
 
536
  WebApp()
 
 
 
 
tomorrowcities/pages/settings.py ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ import solara
2
+
3
+ from . import user
4
+
5
+ @solara.component
6
+ def Page():
7
+ assert user.value is not None
8
+ solara.Markdown(f"Hi {user.value.username}, you are an admin")