text stringlengths 0 1.05M | meta dict |
|---|---|
__author__ = 'bakeneko'
import pygame
# Initialize Pygame
pygame.init()
# Set the height and width of the screen
screen_width = 640
screen_height = 480
screen = pygame.display.set_mode([screen_width, screen_height])
logo = pygame.image.load('test_logo.png')
sound = pygame.mixer.Sound('test_sound.ogg')
sound_channel = None
logo_y = (screen_height / 2) - (logo.get_rect().height / 2)
logo_slice = [0, 0, 15, logo.get_rect().height]
PIXELS_PER_SECOND = 300
LOGO_WIDTH = 600
FPS = 60
end_logo = False
clock = pygame.time.Clock()
total_seconds = 0
change_x = 0
while not end_logo:
time_in_millis = clock.tick(FPS)
seconds = time_in_millis / 1000.0
total_seconds += seconds
screen.fill(pygame.Color('white'))
if logo_slice[0] <= LOGO_WIDTH:
logo_part = pygame.Surface(logo_slice[2:4], pygame.SRCALPHA, 32)
logo_part.blit(logo, (0, 0), logo_slice)
screen.blit(logo_part, (int(logo_slice[0]), logo_y))
logo_slice[0] += PIXELS_PER_SECOND * seconds
else:
if not sound_channel:
sound_channel = sound.play()
screen.blit(logo, (20, logo_y))
pygame.display.flip()
if total_seconds > 4.5:
end_logo = True
pygame.quit()
| {
"repo_name": "nekotiko/workshop_env",
"path": "test/test.py",
"copies": "1",
"size": "1226",
"license": "mit",
"hash": 1772931837571929900,
"line_mean": 20.5087719298,
"line_max": 73,
"alpha_frac": 0.6386623165,
"autogenerated": false,
"ratio": 2.9613526570048307,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4100014973504831,
"avg_score": null,
"num_lines": null
} |
__author__ = 'bakl'
# CGS
class phys:
h = 6.626068e-27 # erg s
c = 2.9979245800e10 # cm/s
k = 1.3806504e-16 # erg K^-1
sigma_SB = 5.6704e-5 # erg cm^-2 s^-1 K^-4, Stefan-Boltzman Constant
H0 = 68 # Hubble constant [km/c/Mpc]
G = 6.6743e-8 # Newton's gravitational constant cm3 g-1 s-2
echarg = 4.8032042000e-10
avogar = 6.0221419900e+23
# conversions
angs_to_cm = 1.e-8
cm_to_angs = 1. / angs_to_cm
ZP_AB = -48.6 # zero point AB magnitude for nu
ZP_AB_lmb = -21.10 # zero point AB magnitude for lambda
jy_to_erg = 1.e-23 # 1 Jy = 10^-23 erg sec^-1 cm^-2 Hz^-1
jy_to_photon = 1.51e3 # 1 Jy = 1.51e7 photons sec^-1 m^-2 (dlambda/lambda)^-1
# units
AU = 1.4959787066e13 # cm
pc = 206265 * AU
R_sun = 6.957e10 # cm
M_sun = 1.99e33 # g
L_sun = 3.8270e33 # ergs # see https://sites.google.com/site/mamajeksstarnotes/bc-scale
Mag_sun = 4.62 # https://ui.adsabs.harvard.edu/abs/1938ApJ....88..429K/abstract
# Mag_sun = 4.7554 # Tsvetkov, in letter Я нашел такие константы: L_sun=3.828e33 M_bolSun=4.74
FOE = 1.e51 # ergs
d2s = 24. * 60. * 60. # convert days to seconds
ev2erg = 1.6021764630e-12 # convert eV to erg
@staticmethod
def pc2cm(parsec):
"""Takes in a measurement in parsecs and returns cm"""
return parsec * phys.pc
@staticmethod
def cosmology_D_by_z(*args, **kwargs): # clone
return cosmology_D_by_z(*args, **kwargs)
@staticmethod
def dist2MD(d): # clone
return dist2MD(d)
def dist2MD(d):
import math
return 5*math.log10(d) - 5.
def cosmology_D_by_z(z, H0=67.7, Omega_m=0.31, Omega_e=0.69):
"""Compute the photometric distance for Lambda-CDM model of cosmology
Returns
-------
D : float
Distance [Mpc]
"""
from scipy.integrate import quad
import numpy as np
c = 2.998e5
D = (1. + z) * c / H0 * \
quad(lambda zz: 1 / np.sqrt(Omega_m * (1. + zz) ** 3 + Omega_e), 0, z)[0]
return D
| {
"repo_name": "baklanovp/pystella",
"path": "pystella/util/phys_var.py",
"copies": "1",
"size": "2067",
"license": "mit",
"hash": -5217704563311572000,
"line_mean": 27.8309859155,
"line_max": 100,
"alpha_frac": 0.5847581827,
"autogenerated": false,
"ratio": 2.4456391875746712,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.35303973702746716,
"avg_score": null,
"num_lines": null
} |
from bokeh.plotting import *
from bokeh.models import HoverTool, ColumnDataSource
import pandas as pd
from collections import OrderedDict
datafile = pd.read_csv("./annual_averages_by_state.csv")
populations = pd.DataFrame(data=datafile, columns=['STATE','TOTAL_POPULATION'])
employed = pd.DataFrame(data=datafile, columns=['STATE','EMPLOYED'])
unemployed = pd.DataFrame(data=datafile, columns=['STATE','UNEMPLOYED'])
print unemployed
def mtext(p, x, y, textstr):
p.text(x, y, text=textstr,
text_color="#449944", text_align="center", text_font_size="10pt")
## returns population value from table.
def valueforstate(df,state):
value=0
for i, row in enumerate(df.values):
#first column is stateName
stateName = row[0]
if(stateName.upper() == state.upper()):
#second column is population in 2012
value = row[1].replace(",","").strip()
return int(value)
########################################################################
# Loading accident data by state
# breaking it out into 4 quartiles
########################################################################
datafile = pd.read_csv("/Users/bvenkatesan/Documents/workspace/PyCharmProjects/capstone/data/incidents_state_totals.csv")
accidents = pd.DataFrame(datafile, columns=['code','state','totals'])
quartiles = pd.qcut(datafile["totals"],4,labels=[1, 2, 3, 4], precision=1)
########################################################################
# color palate
########################################################################
colormap = {
'1' : "#ffffb2",
'2' : "#fecc5c",
'3' : "#fd8d3c",
'4' : "#f03b20",
'0': "#f7f7f7",
}
def drawFile(removeOutliers, filename):
state_colors = []
state_names = []
pops = []
fatalities=[]
employment=[]
unemployment=[]
p = figure(title="Fatalities Plot")
########################################################################
# looping through the datafile to create state-by-state mapping
########################################################################
for i, row in enumerate(accidents.values):
try:
code = row[0]
state = row[1]
total = row[2]
if( removeOutliers and (code == 'CA')):
print removeOutliers
print 'remove outlier is true '+ state
pass
else:
print 'remove outlier is false '+ state
pops.append(valueforstate(populations,state))
employment.append(valueforstate(employed,state))
unemployment.append(valueforstate(unemployed,state))
fatalities.append(total)
state_colors.append(colormap[str(quartiles[i])])
state_names.append(state)
#print pops.__len__()
#print state_colors.__len__()
#print state_names.__len__()
#print fatalities.__len__()
print employment
print unemployment.__len__()
#idx = min(int(rate/2), 5)
#state_colors.append(colors[idx])
except KeyError:
state_colors.append(colormap['0'])
########################################################################
# Creating columndatasource for hover tool
########################################################################
hoverLabels = ColumnDataSource(
data=dict(
x=fatalities,
y=pops,
state= state_names,
employed= employment,
unemployed = unemployment,
)
)
########################################################################
# Loading accident data by state
########################################################################
p = figure(title="Fatalities Total without Outlier", toolbar_location="left", tools="resize,hover,save",
plot_width=1100, plot_height=700)
hover = p.select(dict(type=HoverTool))
hover.tooltips = OrderedDict([
('State: ', '@state'),
('Fatalities: ', '@x'),
('Population (in 000s): ', '@y'),
('Employment (in 000s): ', '@employed'),
('Unemployment (in 000s): ', '@unemployed'),
])
output_file(filename)
p.scatter(fatalities, pops, marker="circle", line_color="#6666ee", ylabel='fatality', source=hoverLabels,
fill_color=state_colors, fill_alpha=0.5, size=12)
show(p) # open a browser
if __name__ == '__main__':
drawFile(False,'../temp/scatter2_with_outlier.html')
drawFile(True,'../temp/scatter2_without_outlier.html') | {
"repo_name": "OSHADataDoor/OshaBokeh",
"path": "bokehsamples/scattermap2.py",
"copies": "1",
"size": "4783",
"license": "apache-2.0",
"hash": -5106794308927897000,
"line_mean": 32.9290780142,
"line_max": 121,
"alpha_frac": 0.4952958394,
"autogenerated": false,
"ratio": 4.20298769771529,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.519828353711529,
"avg_score": null,
"num_lines": null
} |
from bokeh.sampledata import us_states
from bokeh.plotting import *
from bokeh.models import HoverTool, ColumnDataSource
import pandas as pd
from collections import OrderedDict
########################################################################
# Loading us_states from bokeh sampledata library.
# Removing Alaska & Hawaii for this excercise
########################################################################
us_states = us_states.data.copy()
del us_states["HI"]
del us_states["AK"]
########################################################################
# Loading accident data by state
# breaking it out into 4 quartiles
########################################################################
datafile = pd.read_csv("./incidents_state_totals.csv")
accidents = pd.DataFrame(datafile, columns=['code','state', 'totals'])
quartiles = pd.qcut(datafile["totals"],4,labels=["low", "moderate", "high", "very high"], precision=1)
########################################################################
# color palate
########################################################################
colormap = {
'low' : "#ffffb2",
'moderate' : "#fecc5c",
'high' : "#fd8d3c",
'very high' : "#f03b20",
'none': "#f7f7f7",
}
state_colors = []
state_xs = []
state_ys= []
state_names = []
totals = []
risk_factor=[]
########################################################################
# looping through the datafile to create state-by-state mapping
########################################################################
for i, row in enumerate(accidents.values):
try:
code = row[0]
state = row[1]
total = row[2]
tabledata = us_states[code]['name'].upper()
q = quartiles[i]
if (state == tabledata):
state_colors.append(colormap[q])
lons = us_states[code]["lons"]
lats = us_states[code]["lats"]
state_xs.append(lons)
state_ys.append(lats)
state_names.append(state)
totals.append(total)
risk_factor.append(q)
#idx = min(int(rate/2), 5)
#state_colors.append(colors[idx])
except KeyError:
state_colors.append(colormap['none'])
output_file("../temp/heatmap.html", title="Fatalities - Heat Map")
########################################################################
# Creating columndatasource for hover tool
########################################################################
source = ColumnDataSource(
data=dict(
x=state_xs,
y=state_ys,
state= state_names,
fatalities=totals,
risk = risk_factor,
)
)
########################################################################
# Loading accident data by state
########################################################################
p = figure(title="Fatalities Total", toolbar_location="left",tools="resize,hover,save", plot_width=1100, plot_height=700)
hover = p.select(dict(type=HoverTool))
hover.tooltips = OrderedDict([
('state: ', '@state'),
('fatalities: ', '@fatalities'),
('risk factor : ', '@risk')
])
#p.patches(county_xs, county_ys, fill_color=county_colors, fill_alpha=0.7,
# line_color="white", line_width=0.5)
p.patches(state_xs, state_ys, fill_color=state_colors, fill_alpha=0.7, source=source, line_color="black", line_width=0.7)
show(p)
| {
"repo_name": "OSHADataDoor/OshaBokeh",
"path": "bokehsamples/heatmap.py",
"copies": "1",
"size": "3472",
"license": "apache-2.0",
"hash": -5314071018251472000,
"line_mean": 31.4485981308,
"line_max": 121,
"alpha_frac": 0.4683179724,
"autogenerated": false,
"ratio": 4.09433962264151,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.005208114470963314,
"num_lines": 107
} |
########################################################################
# Wrote this file to separate out the loading of the data from the
# python file where the actual display happens
########################################################################
import pandas as pd
import csv
########################################################################
# Loading data
########################################################################
statefile = open('./annual_averages_by_state.csv', 'r')
csvreader = csv.reader(statefile)
########################################################################
# initializing a dataframe to parse only required data from file
########################################################################
columns = ["STATE",
"TOTAL_POPULATION",
"WORKFORCE",
"WORK_%_OF_POP",
"EMPLOYED",
"EMP_%_OF_POP",
"UNEMPLOYED",
"UNEMPLOMENT_RATE",
]
data = []
rowIndex = 0
########################################################################
# function that parses the state data for 2012 & 2013 and returns
# a DataFrame with the data read from the file
# the function cleans the data before returning the DataFrame
########################################################################
def state_data():
for row in csvreader:
#######################################################################################
# intialize a bunch of index variables for data clean up
# startat is used to push the iteration to the right in the case of states with 2 words
# stopat moves corresponding.
#######################################################################################
index = 0
startat = 0
stopat=10
statename = row[0]
# Initializing pandas series for DataFrame.
values = []
for x in enumerate(row):
print statename
print x
if(index == 0):
values.append(statename.upper())
else:
values.append(x.replace(",",""))
index = index + 1
data.insert(rowIndex,values)
df = pd.DataFrame(data,columns=columns)
return df
if __name__ == '__main__':
print state_data()
| {
"repo_name": "OSHADataDoor/OshaBokeh",
"path": "bokehsamples/osha_files.py",
"copies": "1",
"size": "2378",
"license": "apache-2.0",
"hash": 2424964485674786300,
"line_mean": 29.4871794872,
"line_max": 95,
"alpha_frac": 0.4007569386,
"autogenerated": false,
"ratio": 5.55607476635514,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.645683170495514,
"avg_score": null,
"num_lines": null
} |
__author__ = 'baniu.yao@gmail.com'
import hashlib
import re
import os
import argparse
class LogKeywordCheck(object):
""" A simple tool to check if keywords exist in log files.
This tool is able to read file at the position it read last time and
it can read keyword from file and command line args.
"""
class Offset(object):
"""An internal class which is the abstract of offset
"""
def __init__(self, offset_file_name):
"""Constructor for Offset, with offset_file_name
Offset file only has one line like below:
FILE_FIRST_LINE_MD5|100
FILE_FIRST_LINE_MD% is used to judge if a log file is rotated
and '100' here means the offset. The offset is calculated by
'file.tell()'.
:param offset_file_name: file_name for store offset info
:return: None
"""
self._offset_file_name = offset_file_name
if not os.path.isfile(self._offset_file_name):
self.init()
def init(self):
"""Init offset_file name with default value
:return: None
"""
file(self._offset_file_name, 'w').write('NO_FIRST_LINE_MD5|0')
def save(self, tag, offset):
""" Save tag and offset into offset file
:return: None
"""
file(self._offset_file_name, 'w+').write('|'.join([tag, str(offset)]))
def read(self):
""" Read tag and offset from offset file
:return: (tag, offset)
"""
tag, offset = file(self._offset_file_name).read().split('|')
return tag, int(offset)
def __init__(self, file_name, keyword_type, keyword):
""" Construction for LogKeywordCheck
:param file_name: File to check
:param keyword_type: this can be 'str' or 'file'.
:param keyword: keywords conf file or a regex pattern
:return: None
"""
self._id = hashlib.md5(file_name + keyword_type + keyword).hexdigest()
self._offset = self.Offset(offset_file_name='/tmp/log_keyword_check.' + self._id)
self._file = file(file_name)
self._current_tag, self._current_offset = self._offset.read()
if self._is_file_rotated():
self._offset.init()
self._file.seek(self._current_offset)
self._generate_keyword_re_pattern(keyword_type, keyword)
def _generate_keyword_re_pattern(self, keyword_type, keyword):
""" Generate regex pattern for fast matching
:param keyword: keysord pattern
:return: None
"""
if keyword_type == 'file':
self._re_pattern = re.compile('(' + '|'.join(file(keyword).readlines()).replace('\n', '') + ')')
elif keyword_type == 'str':
self._re_pattern = re.compile(keyword)
def _line2tag(self, line):
""" Convert line string into md5 to simplify the comparison of tag.
:param line: input string
:return: MD5_STRING
"""
return hashlib.md5(line).hexdigest()
def _is_file_rotated(self):
""" Judge if the file has been rotated before. Log file only has append operation,
so the first line of log file can be the tag of one certain log file. If the tag
has changed, the log file has been rotated.
:return:
"""
current_tag = self._line2tag(self._read_file_first_line())
if current_tag == self._current_tag:
return True
else:
self._current_tag = current_tag
self._current_offset = 0
return False
def _read_file_first_line(self):
""" Read the first line of file. The first line is used to judge
if the file has been rotated
:return: None
"""
tmp_offset = self._file.tell()
self._file.seek(0)
first_line = self._file.readline()
self._file.seek(tmp_offset)
return first_line.strip()
def _read_lines(self):
""" Simple wrapper of file.read(). This function only does the strip() job.
:return: line
"""
for line in self._file.read().split('\n'):
if line == '':
continue
yield line
def _is_re_matched(self, line):
""" This function is used to tell if the regex pattern has found matched string.
:param line:
:return: True/False
"""
return True if len(self._re_pattern.findall(line)) > 0 else False
def process(self):
""" Main process which is invoked by __main__
:return: None
"""
lines = self._read_lines()
error_lines = []
for line in lines:
if self._is_re_matched(line):
error_lines.append(line)
self._offset.save(self._current_tag, self._file.tell())
if len(error_lines) > 0:
print '\n'.join(error_lines)
else:
print 0
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='log file keyword check')
parser.add_argument('--file', dest='file_name', required=True,
help='file to check')
parser.add_argument('--type', dest='keyword_type', choices=['str', 'file'], required=True,
help='keyword type, str or file')
parser.add_argument('--keyword', dest='keyword', required=True, nargs='+',
help='keyword conf file or keyword re pattern')
args = parser.parse_args()
""" If --keyword has more than one keyword, codes below will compose them into regex pattern
like '(KEYWORD_1|KEYWORD_2|KEYWORD_3|...)'
"""
if len(args.keyword) > 1:
keyword = '(' + '|'.join(args.keyword) + ')'
else:
keyword = args.keyword[0]
lkc = LogKeywordCheck(file_name=args.file_name, keyword_type=args.keyword_type, keyword=keyword)
lkc.process()
| {
"repo_name": "baniuyao/python-log-keyword-check",
"path": "log_keyword_check.py",
"copies": "1",
"size": "5956",
"license": "mit",
"hash": 7011796058342724000,
"line_mean": 35.3170731707,
"line_max": 108,
"alpha_frac": 0.5691739422,
"autogenerated": false,
"ratio": 4.0683060109289615,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0010837255158969421,
"num_lines": 164
} |
__author__ = 'baohua'
from oslo_config import cfg
from tripled.common import config #noqa
from tripled.common.log import error
from tripled.common.credential import get_creds
import keystoneclient.v2_0.client as ksclient
class KeystoneClient(object):
"""
KeystoneClient: client to get keystone resources.
"""
def __init__(self, username=None, tenant_name=None, password=None,
auth_url=None):
d = get_creds()
if d:
username = username or d['username']
tenant_name = tenant_name or d['tenant_name']
password = password or d['password']
auth_url = auth_url or d['auth_url']
self.client = ksclient.Client(username=username,
password=password,
tenant_name=tenant_name,
auth_url=auth_url)
def get_tenant_by_id(self, id):
try:
return self.client.tenants.get(id)
except Exception:
error(_("Did not find tenant: %r"), id)
return 'not found'
def get_tenant_name_by_id(self, id):
tenant = self.get_tenant_by_id(id)
if tenant:
return tenant.name
def get_tokens(self):
return self.client.tokens
def get_endpoints(self):
return self.client.endpoints
def get_roles(self):
return self.client.roles
def get_services(self):
return self.client.services
def get_tenants(self):
return self.client.tenants
def get_users(self):
return self.client.users
| {
"repo_name": "yeasy/tripled",
"path": "tripled/stack/keystone.py",
"copies": "1",
"size": "1618",
"license": "apache-2.0",
"hash": 556864122979526300,
"line_mean": 27.8928571429,
"line_max": 70,
"alpha_frac": 0.5784919654,
"autogenerated": false,
"ratio": 4.065326633165829,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5143818598565829,
"avg_score": null,
"num_lines": null
} |
__author__ = 'baohua'
from oslo_config import cfg
from tripled.stack.node import Control, Network, Compute
from tripled.stack.keystone import KeystoneClient
from tripled.stack.nova import NovaClient
from tripled.stack.neutron import NeutronClient
class Stack(object):
"""
An instance of the operational stack.
"""
def __init__(self):
cfg.CONF(project='tripled')
CONF = cfg.CONF
self.control_nodes = map(lambda x: Control(x), CONF.STACK.control_nodes)
self.network_nodes = map(lambda x: Network(x), CONF.STACK.network_nodes)
self.compute_nodes = map(lambda x: Compute(x), CONF.STACK.compute_nodes)
#self.keystone = KeystoneClient()
#self.nova = NovaClient()
#self.neutron = NeutronClient()
def get_control_nodes(self):
"""
:param
:return: A list of control node instances generated from the conf file.
"""
return self.control_nodes
def get_network_nodes(self):
"""
:param
:return: A list of network node instances generated from the conf file.
"""
return self.network_nodes
def get_computer_nodes(self):
"""
:param
:return: A list of compute node instances generated from the conf file.
"""
return self.compute_nodes
def get_nodes(self):
"""
:param
:return: A list of all node instances generated from the conf file.
"""
return self.get_control_nodes() + self.get_network_nodes() + self.get_computer_nodes()
stack = Stack()
if __name__ == '__main__':
s = Stack()
for n in s.get_nodes():
print n.ip
| {
"repo_name": "yeasy/tripled",
"path": "tripled/stack/stack.py",
"copies": "1",
"size": "1681",
"license": "apache-2.0",
"hash": -8217433225972517000,
"line_mean": 27.0166666667,
"line_max": 94,
"alpha_frac": 0.6139202855,
"autogenerated": false,
"ratio": 3.9552941176470586,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5069214403147059,
"avg_score": null,
"num_lines": null
} |
__author__ = 'baohua'
from subprocess import PIPE, Popen
from tripled.common.constants import NODE_ROLES
class Node(object):
"""
An instance of the server in the stack.
"""
def __init__(self, ip, role):
self.ip = ip
self.role = NODE_ROLES.get(role, NODE_ROLES['compute'])
def is_reachable(self, dst):
"""
Return whether the dst is reachable from the node.
>>> Node().is_reachable(Node('127.0.0.1'))
True
>>> Node().is_reachable(Node('169.254.254.254'))
False
"""
cmd = 'ping %s -c 3 -W 2' % dst.ip
output, error = Popen(cmd, stdout=PIPE, stderr=PIPE, shell=True).communicate()
if not error and output and '0% packet loss' in output:
return True
else:
return False
class Control(Node):
"""
An instance of the control node in the stack.
"""
def __init__(self, ip='127.0.0.1'):
super(Control, self).__init__(ip, role='control')
class Network(Node):
"""
An instance of the control node in the stack.
"""
def __init__(self, ip='127.0.0.1'):
super(Network, self).__init__(ip, role='network')
class Compute(Node):
"""
An instance of the control node in the stack.
"""
def __init__(self, ip='127.0.0.1'):
super(Compute, self).__init__(ip, role='compute')
if __name__ == '__main__':
import doctest
doctest.testmod()
| {
"repo_name": "yeasy/tripled",
"path": "tripled/stack/node.py",
"copies": "1",
"size": "1453",
"license": "apache-2.0",
"hash": -782027910157479600,
"line_mean": 22.0634920635,
"line_max": 86,
"alpha_frac": 0.5581555403,
"autogenerated": false,
"ratio": 3.5876543209876544,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4645809861287654,
"avg_score": null,
"num_lines": null
} |
__author__ = 'baohua'
from tripled.common.credential import get_creds
class ServiceClient(object):
"""
ServiceClient :client to get service resources.
"""
def __init__(self, username=None, tenant_name=None, password=None,
auth_url=None):
d = get_creds()
if d:
self.username = username or d['username']
self.tenant_name = tenant_name or d['tenant_name']
self.password = password or d['password']
self.auth_url = auth_url or d['auth_url']
self.client = None
self.resources = {} #store the name: attributes of each resource
def get_res_stat(self):
"""Get a dict of each resources.
:param:
:returns: a dict e.g., {'resource_name':[string1, string2, ...]}
"""
result = {}
for r in self.resources:
result[r] = self.get_res_str(*(self.resources[r]), resource_name=r)
return result
def get_res_str(self, *args, **kwargs):
"""Get strings
:param *args: the attributes list of the resource
:param **kwargs: the options to do list()
:returns: a list e.g., {'resource1_str', 'resource2_str', ...]}
"""
if not args:
return None
res_name = kwargs.pop('resource_name', None)
if not res_name:
return None
result = []
for e in eval('self.client.%s' % res_name).list(**kwargs):
result.append('\t'.join([eval('e.%s' % r) for r in args if r]))
return result
| {
"repo_name": "yeasy/tripled",
"path": "tripled/stack/service_client.py",
"copies": "1",
"size": "1559",
"license": "apache-2.0",
"hash": -4742778416026847000,
"line_mean": 31.4791666667,
"line_max": 79,
"alpha_frac": 0.5490699166,
"autogenerated": false,
"ratio": 3.8304668304668303,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48795367470668305,
"avg_score": null,
"num_lines": null
} |
__author__ = 'baohua'
from tripled.common.log import warn, info, output
from tripled.common.case import Case
class UnderlayConnectivity(Case):
"""
UnderlayConnectivity : the case to detect underlay connectivity problem.
"""
def __init__(self):
super(UnderlayConnectivity, self).__init__()
def test_connectivity(self, nodes_src, nodes_dst):
"""Check every pair from the given node list are connected.
:param node_src, nodes_dst: nodes pairs to check
:returns: True or False
"""
for src in nodes_src:
for dst in nodes_dst:
if not src.is_reachable(dst):
self.fail_msg.append('Node %s cannot reach %s'
% (src.ip, dst.ip))
warn('node %s cannot reach %s' % (src.ip, dst.ip))
return False
return True
def run_case(self, stack):
"""Check the underlay connectivity status.
:param stack: the stack instance
:returns: True or False
"""
control_nodes = stack.get_control_nodes()
network_nodes = stack.get_network_nodes()
compute_nodes = stack.get_computer_nodes()
self.result = \
self.test_connectivity(control_nodes, compute_nodes) and \
self.test_connectivity(network_nodes, compute_nodes) and \
self.test_connectivity(control_nodes, network_nodes)
super(UnderlayConnectivity, self).run_case(module_name='Underlay '
'Connectivity')
if __name__ == '__main__':
UnderlayConnectivity().run()
| {
"repo_name": "yeasy/tripled",
"path": "tripled/case/system/underlay_connectivity.py",
"copies": "1",
"size": "1672",
"license": "apache-2.0",
"hash": -6616190255260927000,
"line_mean": 33.8333333333,
"line_max": 78,
"alpha_frac": 0.5675837321,
"autogenerated": false,
"ratio": 4.26530612244898,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 48
} |
__author__ = 'baohua'
from tripled.common.log import warn, info, output
from tripled.stack.stack import stack as the_stack
from tripled.common.util import color_str
import sys
class Case(object):
"""
A check case.
"""
def __init__(self, stack=the_stack):
self.success_msg = []
self.fail_msg = []
self.stat_msg = []
self.result = True
def run_case(self, **kwargs):
"""Run the case with given options.
:param module_name: The name of modules, will be shown in the msg
:returns:
"""
module_name = kwargs.get('module_name', None)
if self.result:
self.success_msg.append('>>>%s PASSED' % module_name or sys.modules[__name__])
else:
self.fail_msg.insert(0, '>>>%s FAILED' % module_name or sys.modules[__name__])
def show_msg(self):
"""Show the success or failed msg.
:param:
:returns:
"""
if self.result and self.success_msg:
print color_str('g', '\n'.join(self.success_msg))
elif self.result == False and self.fail_msg:
print color_str('r', '\n'.join(self.fail_msg))
if self.stat_msg:
print color_str('b', '\n'.join(self.stat_msg))
def run(self, stack=the_stack, **kwargs):
"""Run the case and show it's output msg.
:param statck: The stack instance
:returns:
"""
self.run_case(stack=the_stack, **kwargs)
self.show_msg()
| {
"repo_name": "yeasy/tripled",
"path": "tripled/common/case.py",
"copies": "1",
"size": "1506",
"license": "apache-2.0",
"hash": 5465549019019218000,
"line_mean": 27.9615384615,
"line_max": 90,
"alpha_frac": 0.5610889774,
"autogenerated": false,
"ratio": 3.585714285714286,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9642875768257908,
"avg_score": 0.000785498971275459,
"num_lines": 52
} |
__author__ = 'baohua'
import logging
import sys
import types
from oslo_config import cfg
from tripled.common import config # do not remove this line
OUTPUT = 25
LEVELS = {'debug': logging.DEBUG,
'info': logging.INFO,
'output': OUTPUT,
'warning': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL}
#default: '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
LEVEL_DEFAULT = cfg.CONF.LOG.level
# Modified from python2.5/__init__.py
class StreamHandlerNoNewline(logging.StreamHandler):
"""StreamHandler that doesn't print newlines by default.
Since StreamHandler automatically adds newlines, define a mod to more
easily support interactive mode when we want it, or errors-only logging
for running unit tests."""
def emit(self, record):
"""Emit a record.
If a formatter is specified, it is used to format the record.
The record is then written to the stream with a trailing newline
[ N.B. this may be removed depending on feedback ]. If exception
information is present, it is formatted using
traceback.printException and appended to the stream."""
try:
msg = self.format(record)
fs = '%s' # was '%s\n'
if not hasattr(types, 'UnicodeType'): # if no unicode support...
self.stream.write(fs % msg)
else:
try:
self.stream.write(fs % msg)
except UnicodeError:
self.stream.write(fs % msg.encode('UTF-8'))
self.flush()
except (KeyboardInterrupt, SystemExit):
raise
except:
self.handleError(record)
class Singleton(type):
"""Singleton pattern from Wikipedia
See http://en.wikipedia.org/wiki/Singleton_Pattern
Intended to be used as a __metaclass_ param, as shown for the class
below."""
def __init__(cls, name, bases, dict_):
super(Singleton, cls).__init__(name, bases, dict_)
cls.instance = None
def __call__(cls, *args, **kw):
if cls.instance is None:
cls.instance = super(Singleton, cls).__call__(*args, **kw)
return cls.instance
class MyLogger(logging.Logger, object):
__metaclass__ = Singleton
def __init__(self):
cfg.CONF(project='tripled')
logging.Logger.__init__(self, "Logger")
# create console handler
ch = StreamHandlerNoNewline(sys.stdout)
# create formatter
formatter = logging.Formatter(cfg.CONF.LOG.msg_format)
# add formatter to ch
ch.setFormatter(formatter)
# add ch to lg
self.addHandler(ch)
self.set_log_level()
def set_log_level(self, levelname=LEVEL_DEFAULT):
self.debug("Set log level to %s\n" % levelname)
level = LEVELS.get(levelname)
self.setLevel(level)
self.handlers[0].setLevel(level)
def output(self, msg, *args, **kwargs):
"""Log 'msg % args' with severity 'OUTPUT'.
To pass exception information, use the keyword argument exc_info
with a true value, e.g.
logger.warning("Houston, we have a %s", "cli output", exc_info=1)
"""
if self.manager.disable >= OUTPUT:
return
if self.isEnabledFor(OUTPUT):
self._log(OUTPUT, msg, args, kwargs)
def make_list_compatible(fn):
"""Return a new function allowing fn( 'a 1 b' ) to be called as
newfn( 'a', 1, 'b' )"""
def newfn(*args):
"""
Generated function. Closure-ish.
"""
if len(args) == 1:
return fn(*args)
args = ' '.join([str(arg) for arg in args])
return fn(args)
# Fix newfn's name and docstring
setattr(newfn, '__name__', fn.__name__)
setattr(newfn, '__doc__', fn.__doc__)
return newfn
lg = MyLogger()
info, output, warn, error, debug = (
lg.info, lg.output, lg.warn, lg.error, lg.debug) = \
[make_list_compatible(f) for f in
lg.info, lg.output, lg.warn, lg.error, lg.debug]
setLogLevel = lg.set_log_level
if __name__ == "__main__":
print LEVELS
print LEVELS.get('debug') == logging.DEBUG
| {
"repo_name": "yeasy/tripled",
"path": "tripled/common/log.py",
"copies": "1",
"size": "4266",
"license": "apache-2.0",
"hash": 4320883797026556000,
"line_mean": 30.3676470588,
"line_max": 78,
"alpha_frac": 0.5881387717,
"autogenerated": false,
"ratio": 3.8781818181818184,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49663205898818186,
"avg_score": null,
"num_lines": null
} |
__author__ = 'baohua'
import novaclient.v1_1.client as novaclient
from tripled.stack.service_client import ServiceClient
class NovaClient(ServiceClient):
"""
NovaClient :client to get nova resources.
"""
def __init__(self, username=None, tenant_name=None, password=None,
auth_url=None):
super(NovaClient, self).__init__(username, tenant_name, password,
auth_url)
self.client = novaclient.Client(username=self.username,
api_key=self.password,
project_id=self.tenant_name,
auth_url=self.auth_url)
self.resources = {'servers': [], 'services': [], 'images': ['id', 'name', 'status']}
def get_servers(self):
return self.client.servers
def get_services(self):
return self.client.services
def get_images(self):
return self.client.images
if __name__ == '__main__':
client = NovaClient()
print client.get_res_stat()
| {
"repo_name": "yeasy/tripled",
"path": "tripled/stack/nova.py",
"copies": "1",
"size": "1078",
"license": "apache-2.0",
"hash": 6094419769053550000,
"line_mean": 30.7058823529,
"line_max": 92,
"alpha_frac": 0.5500927644,
"autogenerated": false,
"ratio": 4.178294573643411,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5228387338043411,
"avg_score": null,
"num_lines": null
} |
__author__ = 'baohua'
import os
from oslo_config import cfg
from tripled.common import config #noqa
def get_creds():
"""Get the Keystone credentials.
:param : none
:returns: a map of credentials or None
"""
d = {}
cfg.CONF(project='tripled')
AUTH = cfg.CONF.AUTH
d['username'] = AUTH.username or os.environ['OS_USERNAME'] or None
d['password'] = AUTH.password or os.environ['OS_PASSWORD'] or None
d['tenant_name'] = AUTH.tenant_name or os.environ['OS_TENANT_NAME'] or None
d['auth_url'] = AUTH.auth_url or os.environ['OS_AUTH_URL'] or None
if d['username'] and d['password'] and d['tenant_name'] and d['auth_url']:
return d
else:
cfg.CONF(project='neutron')
keystone_conf = cfg.CONF.keystone_authtoken
keystone_auth_url = ('%s://%s:%s/v2.0/' %
(keystone_conf.auth_protocol,
keystone_conf.auth_host,
keystone_conf.auth_port))
d['username'] = keystone_conf.admin_user
d['password'] = keystone_conf.admin_tenant_name
d['tenant_name'] = keystone_conf.admin_password
d['auth_url'] = keystone_auth_url
if d['username'] and d['password'] and d['tenant_name'] and d['auth_url']:
return d
else:
return None
| {
"repo_name": "yeasy/tripled",
"path": "tripled/common/credential.py",
"copies": "1",
"size": "1349",
"license": "apache-2.0",
"hash": 6494320867109138000,
"line_mean": 34.5,
"line_max": 82,
"alpha_frac": 0.5767234989,
"autogenerated": false,
"ratio": 3.6361185983827493,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4712842097282749,
"avg_score": null,
"num_lines": null
} |
__author__ = 'baohua'
import pkgutil
import subprocess
from tripled.common.log import warn, debug, info, error, output
def color_str(color, raw_str):
"""Format a string with color.
:param color: a color name, can be r, g, b or y
:param raw_str: the string to be formatted
:returns: a colorful string
"""
if color == 'r':
fore = 31
elif color == 'g':
fore = 32
elif color == 'b':
fore = 36
elif color == 'y':
fore = 33
else:
fore = 37
color = "\x1B[%d;%dm" % (1, fore)
return "%s%s\x1B[0m" % (color, raw_str)
def get_pkg_modules(pkg_name):
"""Get the modules inside a package
:param pkg_name: the package name to be processed
:returns: a list of modules or None
"""
try:
modules = []
cmd = "import %s" % (pkg_name)
exec(cmd)
cmd = "modules=[name for _, name, _ in pkgutil.iter_modules(" \
"%s.__path__)]" % pkg_name
exec(cmd)
return modules
except ImportError:
return None
def get_available_checks():
"""Get the available checks in the system.
:param :
:returns: a list of available checks or None
"""
return get_pkg_modules('tripled.case')
def run_check(name):
"""Run a check inside the stack.
:param name: the check name, e.g., system or nova
:returns:
"""
if name not in get_available_checks():
warn(_("The check %s is not registered\n" % name))
return
pkg_name = 'tripled.case.%s' % name
cases = get_pkg_modules(pkg_name)
for case in cases:
cmd = 'sudo python -m %s' % pkg_name + '.' + case
info("run cmd = %s\n" % cmd)
result, err = \
subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, shell=True).communicate()
if err:
error(err)
output(result)
if __name__ == "__main__":
run_check("system")
| {
"repo_name": "yeasy/tripled",
"path": "tripled/common/util.py",
"copies": "1",
"size": "1977",
"license": "apache-2.0",
"hash": -8445964601010291000,
"line_mean": 24.3461538462,
"line_max": 78,
"alpha_frac": 0.5528578655,
"autogenerated": false,
"ratio": 3.562162162162162,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4615020027662162,
"avg_score": null,
"num_lines": null
} |
__author__ = 'baranbartu'
from django.conf import settings
from celery.app.control import Control
from utils import import_object, nested_method
class CeleryClient(object):
_application = None
_control = None
_default_queue = None
def __init__(self):
path = getattr(settings, 'CELERY_APPLICATION_PATH', None)
if path is None:
raise ValueError(
'You need to define "CELERY_APPLICATION_PATH" on settings.')
self._application = import_object(path)
self._control = Control(self._application)
self._default_queue = self._application.amqp.default_queue.name
self._routes = getattr(settings, 'CELERY_ROUTES', {})
@property
def application(self):
return self._application
@property
def default_queue(self):
return self._default_queue
@property
def routes(self):
return self._routes
def enable_events(self):
self._control.enable_events()
def disable_events(self):
self._control.disable_events()
def workers(self):
response = self._control.inspect().stats()
if not response:
return []
statuses = self.worker_statuses()
queues = self.active_queues()
workers = []
for name, info in response.iteritems():
worker = dict()
worker['name'] = name
worker['status'] = statuses[worker['name']]
worker['concurrency'] = info['pool']['max-concurrency']
worker['broker'] = {'transport': info['broker']['transport'],
'hostname': info['broker']['hostname'],
'port': info['broker']['port']}
worker['queues'] = queues[worker['name']]
workers.append(worker)
return workers
def worker_statuses(self):
"""
get worker statuses
:return:
"""
response = self._control.ping()
if not response:
return []
workers = {}
for w in response:
for k, v in w.iteritems():
for k_inner, v_inner in v.iteritems():
if k_inner == 'ok' and v_inner == 'pong':
workers[k] = 'Active'
else:
workers[k] = 'Passive'
break
return workers
def active_queues(self):
"""
get queue mappings with workers
:return:
"""
response = self._control.inspect().active_queues()
if not response:
return []
workers = {}
for w, queues in response.iteritems():
workers[w] = list()
for q in queues:
workers[w].append(q['name'])
return workers
def registered_tasks(self):
"""
get registered task list
:return:
"""
response = self._control.inspect().registered()
if not response:
return []
all_tasks = set()
for worker, tasks in response.iteritems():
for task in tasks:
all_tasks.add(task)
registered_tasks = {}
for task in all_tasks:
if task in self.routes:
queue = self.routes[task].get('queue', self.default_queue)
else:
queue = self.default_queue
registered_tasks[task] = queue
return registered_tasks
def active_tasks(self):
"""
get active tasks which is running currently
:return:
"""
response = self._control.inspect().active()
if not response:
return []
tasks = []
for worker, task_list in response.iteritems():
for task in task_list:
t = dict()
t['queue'] = task['delivery_info']['routing_key']
t['name'] = task['name']
t['id'] = task['id']
t['worker'] = worker
tasks.append(t)
return tasks
def reserved_tasks(self):
"""
get reserved tasks which is in queue but still waiting to be executed
:return:
"""
response = self._control.inspect().reserved()
if not response:
return []
tasks = []
for worker, task_list in response.iteritems():
for task in task_list:
t = dict()
t['queue'] = task['delivery_info']['routing_key']
t['name'] = task['name']
t['id'] = task['id']
t['worker'] = worker
tasks.append(t)
return tasks
def execute(self, command, parameter):
def run(*args):
task_verbose = args[1]
task = import_object(task_verbose)
task.delay()
def revoke(*args):
ctrl = args[0]
task_id = args[1]
ctrl.revoke(task_id, terminate=True, signal="SIGKILL")
control = self._control
nested = nested_method(self, 'execute', command)
return nested(*(control, parameter))
| {
"repo_name": "baranbartu/djcelery-admin",
"path": "sample_project/celeryadmin/client.py",
"copies": "2",
"size": "5152",
"license": "mit",
"hash": -2382648969230712000,
"line_mean": 29.3058823529,
"line_max": 77,
"alpha_frac": 0.5114518634,
"autogenerated": false,
"ratio": 4.654019873532069,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6165471736932069,
"avg_score": null,
"num_lines": null
} |
__author__ = 'baranbartu'
import datetime
from client import CeleryClient
class ContextManager(object):
_client = None
# _dashboard and _tasks are mutable and same object for each instance
# so one instance will be used on the scope always
_dashboard = {}
_events = {}
# todo find a better way to define this field
EVENT_SIZE_THRESHOLD = 100
def __init__(self, client=None):
self._client = client or CeleryClient()
@property
def dashboard(self):
self._dashboard['workers'] = self.workers()
self._dashboard['registered_tasks'] = self.registered_tasks()
self._dashboard['queue_tasks'] = self.queue_tasks()
return self._dashboard
@property
def tasks(self):
_tasks = []
for uuid, event in self._events.iteritems():
task = {'uuid': uuid, 'name': event.get('name', ''),
'state': event['type'].replace('task-', '').upper(),
'args': event.get('args'
''), 'kwargs': event.get('kwargs', ''),
'received': event['local_received']}
_tasks.append(task)
return _tasks
def add_event(self, event):
if len(self._events) == self.EVENT_SIZE_THRESHOLD:
self._events.pop(0)
if event['uuid'] in self._events:
exists = self._events[event['uuid']]
event.update(name=exists.get('name', ''))
event.update(args=exists.get('args', ''))
event.update(kwargs=exists.get('kwargs', ''))
event['local_received'] = datetime.datetime.fromtimestamp(
event['local_received'])
self._events.update({event['uuid']: event})
def workers(self):
return self._client.workers()
def registered_tasks(self):
return self._client.registered_tasks()
def queue_tasks(self):
return {'active': self._client.active_tasks(),
'reserved': self._client.reserved_tasks()}
class TaskStatus:
RECEIVED = (0, 'received')
STARTED = (1, 'started')
| {
"repo_name": "baranbartu/djcelery-admin",
"path": "celeryadmin/context.py",
"copies": "2",
"size": "2096",
"license": "mit",
"hash": -3157099993042204000,
"line_mean": 31.2461538462,
"line_max": 77,
"alpha_frac": 0.5682251908,
"autogenerated": false,
"ratio": 4.1836327345309385,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5751857925330939,
"avg_score": null,
"num_lines": null
} |
__author__ = 'baranbartu'
import os
import logging
import inspect
import linecache
from memgraph.plot import make_plot
from memgraph.utils import make_csv, remove_file
logger = logging.getLogger(__name__)
def determine_memory_info(prof, precision=1):
logs = []
for code in prof.code_map:
lines = prof.code_map[code]
if not lines:
# .. measurements are empty ..
continue
filename = code.co_filename
if filename.endswith((".pyc", ".pyo")):
filename = filename[:-1]
logger.debug('Filename: ' + filename + '\n\n')
if not os.path.exists(filename):
logger.error('ERROR: Could not find file ' + filename + '\n')
if any([filename.startswith(k) for k in
("ipython-input", "<ipython-input")]):
logger.info(
"NOTE: memgraph can only be used on functions defined in "
"physical files, and not in the IPython environment.")
continue
all_lines = linecache.getlines(filename)
sub_lines = inspect.getblock(all_lines[code.co_firstlineno - 1:])
linenos = range(code.co_firstlineno,
code.co_firstlineno + len(sub_lines))
mem_old = lines[min(lines.keys())]
float_format = '{0}.{1}f'.format(precision + 4, precision)
template_mem = '{0:' + float_format + '}'
for line in linenos:
if line in lines:
mem = lines[line]
inc = mem - mem_old
mem_old = mem
mem = template_mem.format(mem)
inc = template_mem.format(inc)
# todo will be used in the future to make more sensitive
values = (line, mem, inc, all_lines[line - 1])
logs.append({'x': line, 'y': mem})
# todo will be used in the future
csv_file = make_csv(logs, ['Line', 'Memory'])
make_plot(logs, csv_file.replace('.csv', ''))
remove_file(csv_file)
| {
"repo_name": "baranbartu/memgraph",
"path": "memgraph/profile.py",
"copies": "1",
"size": "2024",
"license": "bsd-2-clause",
"hash": -526635885364405060,
"line_mean": 36.4814814815,
"line_max": 78,
"alpha_frac": 0.5528656126,
"autogenerated": false,
"ratio": 4.039920159680639,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5092785772280639,
"avg_score": null,
"num_lines": null
} |
__author__ = 'baranbartu'
import threading
import time
from celery.events import EventReceiver
class EventListener(threading.Thread):
def __init__(self, celery_client, context_manager, enable_events=False):
threading.Thread.__init__(self)
self.daemon = True
self.celery_client = celery_client
self.context_manager = context_manager
self.enable_events = enable_events
def start(self):
threading.Thread.start(self)
def run(self):
ready = False
while not ready:
workers = self.celery_client.workers()
if not workers:
time.sleep(5)
continue
ready = True
if self.enable_events:
self.celery_client.enable_events()
application = self.celery_client.application
with application.connection() as conn:
receiver = EventReceiver(conn,
handlers={"*": self.on_event},
app=application)
receiver.capture(limit=None, timeout=None, wakeup=True)
def on_event(self, event):
if event['type'].startswith('task-'):
self.context_manager.add_event(event)
| {
"repo_name": "baranbartu/djcelery-admin",
"path": "sample_project/celeryadmin/events.py",
"copies": "2",
"size": "1272",
"license": "mit",
"hash": 7888891892090209000,
"line_mean": 30.8,
"line_max": 76,
"alpha_frac": 0.5652515723,
"autogenerated": false,
"ratio": 4.608695652173913,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6173947224473912,
"avg_score": null,
"num_lines": null
} |
__author__ = 'baranbartu'
def import_object(object_path):
"""imports and returns given class string.
:param object_path: Class path as string
:type object_path: str
:returns: Class that has given path
:rtype: class
:Example:
>>> import_object('collections.OrderedDict').__name__
'OrderedDict'
"""
try:
from django.utils.importlib import import_module
module_name = '.'.join(object_path.split(".")[:-1])
mod = import_module(module_name)
return getattr(mod, object_path.split(".")[-1])
except Exception as detail:
raise ImportError(detail)
def nested_method(clazz, method, nested):
from types import CodeType, FunctionType
""" Return the function named <child_name> that is defined inside
a <parent> function
Returns None if nonexistent
"""
parent = getattr(clazz, method)
consts = parent.func_code.co_consts
for item in consts:
if isinstance(item, CodeType) and item.co_name == nested:
return FunctionType(item, globals())
| {
"repo_name": "baranbartu/djcelery-admin",
"path": "sample_project/celeryadmin/utils.py",
"copies": "2",
"size": "1074",
"license": "mit",
"hash": -6719141217791618000,
"line_mean": 28.027027027,
"line_max": 69,
"alpha_frac": 0.6415270019,
"autogenerated": false,
"ratio": 4.037593984962406,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5679120986862406,
"avg_score": null,
"num_lines": null
} |
__author__ = 'bartek'
import numpy
class NumpyRow(object):
def __init__(self, array):
self.v = array
def __iter__(self):
for i, el in enumerate(numpy.nditer(self.v)):
if el:
yield i
class NumpyMatrix(object):
def __init__(self, array):
self._m = array
self._row_size = numpy.size(self._m, 0)
self._col_size = numpy.size(self._m, 1)
@classmethod
def _make_matrix(cls, list_of_set):
maxx = max(max(sett) for sett in list_of_set)
iterable_length = len(list_of_set)
matrix = numpy.zeros(shape=(iterable_length, maxx + 1))
for index, sett in enumerate(list_of_set):
for el in sett:
matrix[index, el] = 1
return matrix
@classmethod
def make_matrix_from_blocks(cls, list_of_set):
matrix = cls._make_matrix(list_of_set)
return cls(matrix)
@classmethod
def make_matrix_from_columns(cls, list_of_columns):
matrix = cls._make_matrix(list_of_columns)
return cls(numpy.transpose(matrix))
def sum_columns(self, combination):
return sum(combination, NumpyColumn.make_column_from_iter([0 for i in xrange(self._row_size)]))
def get_column(self, column_index):
return NumpyColumn(self._m[:, [column_index]])
def _get_row(self, row_index):
return NumpyRow(self._m[[row_index], :])
def column_contained(self, column_index, column_containing):
column = self.get_column(column_index)
return column.contained_in(column_containing)
def get_columns(self):
col_count = numpy.size(self._m, 1)
return [self.get_column(i) for i in range(col_count)]
def get_blocks(self):
row_count = numpy.size(self._m, 0)
return (self._get_row(i) for i in range(row_count))
def transpose(self):
return NumpyMatrix(numpy.transpose(self._m))
class NumpyColumn(object):
def __init__(self, array):
self.v = array
@classmethod
def make_column(cls, combination, length):
v = numpy.zeros([length, 1])
for el in combination:
v[el, 0] = 1
return cls(v)
@classmethod
def make_column_from_iter(cls, it):
v = numpy.array(it)
size = v.size
return cls(v.reshape(size, 1))
def __add__(self, other):
return NumpyColumn(self.v + other.v)
def contained_in(self, column_containing):
return (((1 - self.v) + column_containing.v) > 0).all()
| {
"repo_name": "szredinger/graph-constr-group-testing",
"path": "graph_constr_group_testing/block_design/matrix_operations.py",
"copies": "1",
"size": "2525",
"license": "mit",
"hash": -6843268085168112000,
"line_mean": 26.7472527473,
"line_max": 103,
"alpha_frac": 0.5912871287,
"autogenerated": false,
"ratio": 3.4779614325068873,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9507742860964923,
"avg_score": 0.012301140048392795,
"num_lines": 91
} |
__author__ = 'bartek'
from py2neo import Relationship
class Security:
def __init__(self):
pass
KNOWS = "KNOWS"
SECURITY = "SECURITY"
IS_MEMBER_OF = "IS_MEMBER_OF"
def __int__(self):
pass
@staticmethod
def create_permission(db, entity, resource, permissions):
sec = Relationship(entity, Security.SECURITY, resource, permissions=permissions)
db.create(sec)
@staticmethod
def add_person_to_group(db, person, group):
sec = Relationship(group, Security.IS_MEMBER_OF, person)
db.create(sec)
@staticmethod
def __accessable(db, user, resource, permissions):
relations = db.match(user, Security.SECURITY, resource)
for rel in relations:
if rel[Security.SECURITY].contains(permissions):
return True
return False
@staticmethod
def has_access(db, user, resource):
return Security.__accessable(db, user, resource, Permissions.R)
@staticmethod
def can_write(db, user, resource):
return Security.__accessable(db, user, resource, Permissions.W)
class Permissions:
RW = "RW"
R = "R"
W = "W"
def __init__(self):
pass
| {
"repo_name": "mobile2015/neoPyth",
"path": "app/models/security.py",
"copies": "1",
"size": "1214",
"license": "bsd-2-clause",
"hash": 4861220892020832000,
"line_mean": 22.3461538462,
"line_max": 88,
"alpha_frac": 0.6169686985,
"autogenerated": false,
"ratio": 3.878594249201278,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4995562947701278,
"avg_score": null,
"num_lines": null
} |
from .instance_manager import VRouterHostedManager
from vnc_api.vnc_api import *
from .config_db import VirtualRouterSM, VirtualMachineSM
# Manager for service instances (Docker or KVM) hosted on selected vrouter
class VRouterInstanceManager(VRouterHostedManager):
def _associate_vrouter(self, si, vm):
vrouter_name = None
vr_obj = None
vm_obj = VirtualMachine()
vm_obj.uuid = vm.uuid
vm_obj.fq_name = vm.fq_name
if vm.virtual_router:
vr = VirtualRouterSM.get(vm.virtual_router)
if si.vr_id == vr.uuid:
vrouter_name = vr.name
else:
vr_obj = VirtualRouter()
vr_obj.uuid = vr.uuid
vr_obj.fq_name = vr.fq_name
vr_obj.del_virtual_machine(vm_obj)
self._vnc_lib.virtual_router_update(vr_obj)
self.logger.info("vm %s deleted from vrouter %s" %
(vm_obj.get_fq_name_str(), vr_obj.get_fq_name_str()))
vm.virtual_router = None
if not vm.virtual_router:
vr = VirtualRouterSM(si.vr_id)
vr_obj = VirtualRouter()
vr_obj.uuid = vr.uuid
vr_obj.fq_name = vr.fq_name
vr_obj.add_virtual_machine(vm_obj)
self._vnc_lib.virtual_router_update(vr_obj)
self.logger.info("vrouter %s updated with vm %s" %
(':'.join(vr_obj.get_fq_name()), vm.name))
vrouter_name = vr_obj.get_fq_name()[-1]
return vrouter_name
def create_service(self, st, si):
if not self.validate_network_config(st, si):
return
# get current vm list
vm_list = [None] * si.max_instances
for vm_id in si.virtual_machines:
vm = VirtualMachineSM.get(vm_id)
vm_list[vm.index] = vm
# create and launch vm
si.state = 'launching'
instances = []
for index in range(0, si.max_instances):
vm = self._check_create_netns_vm(index, si, st, vm_list[index])
if not vm:
continue
vr_name = self._associate_vrouter(si, vm)
instances.append({'uuid': vm.uuid, 'vr_name': vr_name})
# uve trace
si.state = 'active'
self.logger.uve_svc_instance((':').join(si.fq_name),
status='CREATE', vms=instances,
st_name=(':').join(st.fq_name))
| {
"repo_name": "eonpatapon/contrail-controller",
"path": "src/config/svc-monitor/svc_monitor/vrouter_instance_manager.py",
"copies": "5",
"size": "2470",
"license": "apache-2.0",
"hash": -8180703730598770000,
"line_mean": 35.3235294118,
"line_max": 75,
"alpha_frac": 0.5534412955,
"autogenerated": false,
"ratio": 3.5185185185185186,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.001399235010900016,
"num_lines": 68
} |
from .instance_manager import VRouterHostedManager
from vnc_api.vnc_api import *
class VRouterInstanceManager(VRouterHostedManager):
"""
Manager for service instances (Docker or KVM) hosted on selected VRouter
"""
def create_service(self, st_obj, si_obj):
self.logger.log_info("Creating new VRouter instance!")
si_props = si_obj.get_service_instance_properties()
st_props = st_obj.get_service_template_properties()
if st_props is None:
self.logger.log_error("Cannot find service template associated to "
"service instance %s" % si_obj.get_fq_name_str())
return
self.db.service_instance_insert(si_obj.get_fq_name_str(),
{'max-instances': str(1),
'state': 'launching'})
# populate nic information
nics = self._get_nic_info(si_obj, si_props, st_props)
# this type can have only one instance
instance_name = self._get_instance_name(si_obj, 0)
try:
vm_obj = self._vnc_lib.virtual_machine_read(
fq_name=[instance_name], fields="virtual_router_back_refs")
self.logger.log_info("VM %s already exists" % instance_name)
except NoIdError:
vm_obj = VirtualMachine(instance_name)
self._vnc_lib.virtual_machine_create(vm_obj)
self.logger.log_info("VM %s created" % instance_name)
si_refs = vm_obj.get_service_instance_refs()
if (si_refs is None) or (si_refs[0]['to'][0] == 'ERROR'):
vm_obj.set_service_instance(si_obj)
self._vnc_lib.virtual_machine_update(vm_obj)
self.logger.log_info("VM %s updated with SI %s" %
(instance_name, si_obj.get_fq_name_str()))
# Create virtual machine interfaces with an IP on networks
for nic in nics:
vmi_obj = self._create_svc_vm_port(nic, instance_name,
st_obj, si_obj)
if vmi_obj.get_virtual_machine_refs() is None:
vmi_obj.set_virtual_machine(vm_obj)
self._vnc_lib.virtual_machine_interface_update(vmi_obj)
self.logger.log_info("VMI %s updated with VM %s" %
(vmi_obj.get_fq_name_str(), instance_name))
vrouter_name = None
state = 'pending'
vrouter_back_refs = getattr(vm_obj, "virtual_router_back_refs", None)
vr_id = si_props.get_virtual_router_id()
if (vrouter_back_refs is not None
and vrouter_back_refs[0]['uuid'] != vr_id):
# if it is not choosen vrouter remove machine from it
vr_obj = self._vnc_lib.virtual_router_read(
id=vr_id)
if vr_obj:
vr_obj.del_virtual_machine(vm_obj)
self.logger.log_info("VM %s removed from VRouter %s" %
(instance_name, ':'.join(vr_obj.get_fq_name())))
vrouter_back_refs = None
# Associate instance on the selected vrouter
if vrouter_back_refs is None:
vr_obj = self._vnc_lib.virtual_router_read(
id=vr_id)
if vr_obj:
vr_obj.add_virtual_machine(vm_obj)
chosen_vr_fq_name = vr_obj.get_fq_name()
vrouter_name = chosen_vr_fq_name[-1]
self._vnc_lib.virtual_router_update(vr_obj)
state = 'active'
self.logger.log_info("Info: VRouter %s updated with VM %s" %
(':'.join(chosen_vr_fq_name), instance_name))
else:
vrouter_name = vrouter_back_refs[0]['to'][-1]
state = 'active'
vm_db_entry = self._set_vm_db_info(0, instance_name,
vm_obj.uuid, state, vrouter_name)
self.db.service_instance_insert(si_obj.get_fq_name_str(),
vm_db_entry)
# uve trace
self.logger.uve_svc_instance(si_obj.get_fq_name_str(),
status='CREATE',
vms=[{'uuid': vm_obj.uuid,
'vr_name': vrouter_name}],
st_name=st_obj.get_fq_name_str())
| {
"repo_name": "srajag/contrail-controller",
"path": "src/config/svc-monitor/svc_monitor/vrouter_instance_manager.py",
"copies": "2",
"size": "4367",
"license": "apache-2.0",
"hash": 8680883868515995000,
"line_mean": 44.4895833333,
"line_max": 79,
"alpha_frac": 0.5321731166,
"autogenerated": false,
"ratio": 3.710280373831776,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5242453490431775,
"avg_score": null,
"num_lines": null
} |
import string
import re
import sgmllib
from Bio import File
from Bio.WWW import NCBI
result_handle = NCBI.query(search_command, search_database, term = search_term,doptcmdl = return_format)
search_command = 'Search'
search_database = 'Nucleotide'
return_format = 'FASTA'
search_term = 'Cypripedioideae'
my_browser = 'lynx'
import os
result_file_name = os.path.join(os.getcwd(), 'results.html')
result_file = open(result_file_name, 'w')
result_file.write(result_handle.read())
result_file.close()
if my_browser == 'lynx':
os.system('lynx -force_html ' + result_file_name)
elif my_browser == 'netscape':
os.system('netscape file:' + result_file_name)
from Bio import SeqIO
for seq_record in SeqIO.parse("seq.fasta", "fasta"):
# print(seq_record.seq)
x = raw_input('--> ')
if x in seq_record.seq:
print 'JEST'
#statement(s) = "AAACATGAAGG" in seq_record.seq:
# if expression:
# statement(s)
# else:
# statement(s)
| {
"repo_name": "dziq/biopython",
"path": "scripts/script_test.py",
"copies": "1",
"size": "1032",
"license": "mit",
"hash": 3048500326382344000,
"line_mean": 26.1578947368,
"line_max": 104,
"alpha_frac": 0.6889534884,
"autogenerated": false,
"ratio": 2.789189189189189,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8810494340199355,
"avg_score": 0.03352966747796688,
"num_lines": 38
} |
__author__ = 'basca'
from cysparql import *
import time
q = '''
SELECT ?mail ?phone ?doctor
WHERE {
?professor <http://www.lehigh.edu/~zhp2/2004/0401/univ-bench.owl#emailAddress> ?mail .
?professor <http://www.lehigh.edu/~zhp2/2004/0401/univ-bench.owl#telephone> ?phone .
?professor <http://www.lehigh.edu/~zhp2/2004/0401/univ-bench.owl#doctoralDegreeFrom> ?doctor .
?professor <http://www.lehigh.edu/~zhp2/2004/0401/univ-bench.owl#name> "FullProfessor1" .
} LIMIT 11
'''
q = '''
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
PREFIX wgs84_pos: <http://www.w3.org/2003/01/geo/wgs84_pos#>
SELECT *
FROM <http://dbpedia.org>
WHERE {
<http://dbpedia.org/resource/Karl_Gebhardt> rdfs:label ?label .
OPTIONAL {
<http://dbpedia.org/resource/Karl_Gebhardt> wgs84_pos:lat ?lat .
<http://dbpedia.org/resource/Karl_Gebhardt> wgs84_pos:long ?long
}
FILTER (lang(?label) = 'en')
}
'''
#t1= time.time()
#qry = Query(q)
#print qry.query_string
#print "Took ", float(time.time()-t1) * 1000.0," ms"
#qry.debug()
#print qry.triple_patterns
#
#print '--------'
#for tp in qry.triple_patterns:
# print 'SUBJECT = ',tp.subject
# print 'PREDICATE = ',tp.predicate
# print 'OBJECT = ',tp.object
# print 'iter...'
# for part in tp:
# print type(part), part
#
#print '-------'
#print qry.vars
#print qry.projections
#for var in qry.vars:
# print var.name
#print '-------'
#print qry.graph_patterns
#for gp in qry.graph_patterns:
# print gp
#
#print '---------'
#label = qry.get_variable(0)
#print label.name, label.n3()
#
#print qry.variables['label'].n3()
#
#vt = qry.create_vars_table()
#user2 = vt.add_new_variable("user2")
#print user2, type(user2)
#vt.add_variable(list(qry.projections)[0])
#print list(qry.projections)[0]
#n = list(qry.projections)[0].name
#print 'name ',n
#v1 = vt['user2']
#print 'v1 = ',v1
#v2 = vt[n]
#print 'v2 = ',v2
#del vt
#print v2
#print v1
# FROM NAMED STREAM <http://www.cwi.nl/SRBench/observations>
# [NOW - 1 HOURS SLIDE 10 MINUTES]
#q_string = '''
#SELECT ?drug WHERE {
# ?drug1 <http://www4.wiwiss.fu-berlin.de/drugbank/resource/drugbank/possibleDiseaseTarget> <http://www4.wiwiss.fu-berlin.de/diseasome/resource/diseases/302> .
# ?drug2 <http://www4.wiwiss.fu-berlin.de/drugbank/resource/drugbank/possibleDiseaseTarget> <http://www4.wiwiss.fu-berlin.de/diseasome/resource/diseases/53> .
# ?drug3 <http://www4.wiwiss.fu-berlin.de/drugbank/resource/drugbank/possibleDiseaseTarget> <http://www4.wiwiss.fu-berlin.de/diseasome/resource/diseases/59> .
# ?drug4 <http://www4.wiwiss.fu-berlin.de/drugbank/resource/drugbank/possibleDiseaseTarget> <http://www4.wiwiss.fu-berlin.de/diseasome/resource/diseases/105> .
# ?I1 <http://www4.wiwiss.fu-berlin.de/drugbank/resource/drugbank/interactionDrug2> ?drug1 .
# ?I1 <http://www4.wiwiss.fu-berlin.de/drugba nk/resource/drugbank/interactionDrug1> ?drug .
# ?I2 <http://www4.wiwiss.fu-berlin.de/drugbank/resource/drugbank/interactionDrug2> ?drug2 .
# ?I2 <http://www4.wiwiss.fu-berlin.de/drugbank/resource/drugbank/interactionDrug1> ?drug .
# ?I3 <http://www4.wiwiss.fu-berlin.de/drugbank/resource/drugbank/interactionDrug2> ?drug3 .
# ?I3 <http://www4.wiwiss.fu-berlin.de/drugbank/resource/drugbank/interactionDrug1> ?drug .
# ?I4 <http://www4.wiwiss.fu-berlin.de/drugbank/resource/drugbank/interactionDrug2> ?drug4 .
# ?I4 <http://www4.wiwiss.fu-berlin.de/drugbank/resource/drugbank/interactionDrug1> ?drug .
# ?drug <http://www4.wiwiss.fu-berlin.de/drugbank/resource/drugbank/casRegistryNumber> ?id .
# ?keggDrug <http://www.w3.org/1999/02/22-rdf-syntax-ns#type> <http://bio2rdf.org/ns/kegg#Drug> .
# ?keggDrug <http://bio2rdf.org/ns/bio2rdf#xRef> ?id .
# ?keggDrug <http://purl.org/dc/elements/1.1/title> ?title .
#}
#'''
#
#q = Query(q_string)
##q.debug()
#
#for triple in q:
# print 'TRIPLE PATTERN -> ',triple
#
#
##print ''
##q.debug()
#
#for v in q.variables:
# print 'VARIABLE ---> ',v
#
#for gp in q.query_graph_pattern:
# print 'GP -> ',gp
# #gp.debug()
# print
# for t in gp.triple_patterns:
# print '\t',t
print 'Q1'
Query('''
select * where {
?person <http://www.w3.org/1999/02/22-rdf-syntax-ns#type> <http://example.com/Person>
}
''').debug()
print 'Q2'
Query('''
select * where {
?www.w3.org/1999/02/22-rdf-syntax-ns#type <http://www.w3.org/1999/02/22-rdf-syntax-ns#type> <http://example.com/Person>
}
''').debug() | {
"repo_name": "cosminbasca/cysparql",
"path": "utils/bench_query.py",
"copies": "1",
"size": "4515",
"license": "apache-2.0",
"hash": 8494159827042808000,
"line_mean": 32.4518518519,
"line_max": 162,
"alpha_frac": 0.6631229236,
"autogenerated": false,
"ratio": 2.3626373626373627,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.35257602862373627,
"avg_score": null,
"num_lines": null
} |
__author__ = 'bashao'
import os
import sys
import time
import random
import pickle
import smbus
import time
from temperature import Temperature
import RPi.GPIO as GPIO
import subprocess
import traceback
from Daemon import Daemon
from Logger import Logger
class OpticBubble(Daemon):
#Temp vars
DEVICESDIR = "/sys/bus/w1/devices/"
TEMP_SENSOR_FILE = os.path.join(DEVICESDIR, "28-00000xxxxxxx", "w1_slave")
def __init__(self):
Daemon.__init__(self, pidfile="OpticBubble_PID", stdout="bubble_stdout.txt", stderr="bubble_stderr.txt")
GPIO.setmode(GPIO.BCM)
GPIO.setup(17, GPIO.OUT)
self.bus = smbus.SMBus(1)
subprocess.call(["sudo", "modprobe", "w1-gpio"])
subprocess.call(["sudo", "modprobe", "w1-therm"])
self.logger = Logger("OpticBubble", ".", "bubble.log")
self.oneTime = 0
self.fiveTime = 0
self.fithteenTime = 0.0
self.oneMinStats = 0
self.fiveMinStats = 0
self.fithteenMinStats = 0
self.oneT = time.time()
self.fiveT = time.time()
self.fithteenT = time.time()
self.oneFile = open("Fermentation/OneMinLog.tsv", 'a')
self.fiveFile = open("Fermentation/FiveMinLog.tsv", 'a')
self.fithteenFile = open("Fermentation/FithteenMinLog.tsv", 'a')
def readTempFile(self):
try:
sensorFile = open(OpticBubble.TEMP_SENSOR_FILE, "r")
lines = sensorFile.readlines()
sensorFile.close()
except Exception, exc:
self.logger.log(3, "Got exception in reading temperature file: %s"%traceback.format_exc())
return ""
return lines
def getTemp(self):
data = self.readTempFile()
#the output from the tempsensor looks like this
#f6 01 4b 46 7f ff 0a 10 eb : crc=eb YES
#f6 01 4b 46 7f ff 0a 10 eb t=31375
#has a YES been returned?
if data[0].strip()[-3:] == "YES":
#can I find a temperature (t=)
equals_pos = data[1].find("t=")
if equals_pos != -1:
tempData = data[1][equals_pos+2:]
#update temperature
temperature = Temperature(tempData)
#self.logger.log(1, "Temperature is %f"%(temperature.C))
return temperature
else:
return False
else:
return False
def run(self):
aout = 0
t = 0
hist = []
rhist = []
norm = 0
inBubble = False
fiveMinStats = 0
t = time.time()
while True:
try:
for a in range(0,4):
aout = aout + 1
self.bus.write_byte_data(0x48,0x40 | ((a+1) & 0x03), aout)
t = self.bus.read_byte(0x48)
if(a == 3):
hist.append(t)
rhist.append(t)
hist = hist[-1000:]
rhist = rhist[-5:]
norm = float(sum(hist))/len(hist)
rnorm = float(sum(rhist))/len(rhist)
if(abs(rnorm - norm) > 40) and not inBubble:
inBubble = True
print "ding " + str(abs(rnorm - norm))
self.fiveMinStats += 1
self.fithteenMinStats += 1
self.oneMinStats += 1
fiveMinStats += 1
GPIO.output(17, True)
elif abs(rnorm - norm) <= 20:
GPIO.output(17, False)
inBubble = False
if time.time()-self.oneT >= 60:
temp = self.getTemp()
if temp:
temp = temp.C
else:
temp = float(-100)
line = "%f\t%d\t%f\n"%(self.oneTime, self.oneMinStats, temp)
self.logger.log(1, line)
self.oneFile.write(line)
self.oneFile.flush()
self.oneTime += 1
self.oneMinStats = 0
self.oneT = time.time()
if time.time()-self.fiveT >= 300:
temp = self.getTemp()
if temp:
temp = temp.C
else:
temp = float(-100)
pklFile = "Fermentation/stats2/" + str(time.time()) + ".pkl"
self.logger.log(1, "Dumping %d to %s"%(self.fiveMinStats, pklFile))
line = "%d\t%d\t%f\n"%(self.fiveTime, self.fiveMinStats, temp)
pickle.dump(self.fiveMinStats,open(pklFile,'w'))
self.logger.log(1, "Done writing pickle, about to write to 5 min file: %s"%(line))
self.fiveFile.write(line)
self.fiveFile.flush()
self.fiveTime += 1
self.fiveMinStats = 0
self.fiveT = time.time()
if time.time()-self.fithteenT >= 900:
temp = self.getTemp()
if temp:
temp = temp.C
else:
temp = float(-100)
line = "%f\t%d\t%f\n"%(self.fithteenTime, self.fithteenMinStats, temp)
self.logger.log(1, line)
self.fithteenFile.write(line)
self.fithteenFile.flush()
self.fithteenTime += 0.25
self.fithteenMinStats = 0
self.fithteenT = time.time()
except Exception, e:
print "Unexpected error: ", sys.exc_info()[0]
self.logger.log(3, traceback.format_exc())
if __name__ == "__main__":
bubble = OpticBubble()
bubble.start()
| {
"repo_name": "bashao/FermBot",
"path": "OpticBubble.py",
"copies": "1",
"size": "5995",
"license": "mit",
"hash": -1724236513128511200,
"line_mean": 36.46875,
"line_max": 112,
"alpha_frac": 0.470058382,
"autogenerated": false,
"ratio": 3.9833887043189367,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4953447086318936,
"avg_score": null,
"num_lines": null
} |
import logging
# BMP280 default address.
BMP280_I2CADDR = 0x77
BMP280_CHIPID = 0xD0
# BMP280 Registers
BMP280_DIG_T1 = 0x88 # R Unsigned Calibration data (16 bits)
BMP280_DIG_T2 = 0x8A # R Signed Calibration data (16 bits)
BMP280_DIG_T3 = 0x8C # R Signed Calibration data (16 bits)
BMP280_DIG_P1 = 0x8E # R Unsigned Calibration data (16 bits)
BMP280_DIG_P2 = 0x90 # R Signed Calibration data (16 bits)
BMP280_DIG_P3 = 0x92 # R Signed Calibration data (16 bits)
BMP280_DIG_P4 = 0x94 # R Signed Calibration data (16 bits)
BMP280_DIG_P5 = 0x96 # R Signed Calibration data (16 bits)
BMP280_DIG_P6 = 0x98 # R Signed Calibration data (16 bits)
BMP280_DIG_P7 = 0x9A # R Signed Calibration data (16 bits)
BMP280_DIG_P8 = 0x9C # R Signed Calibration data (16 bits)
BMP280_DIG_P9 = 0x9E # R Signed Calibration data (16 bits)
BMP280_CONTROL = 0xF4
BMP280_RESET = 0xE0
BMP280_CONFIG = 0xF5
BMP280_PRESSUREDATA = 0xF7
BMP280_TEMPDATA = 0xFA
class BMP280(object):
def __init__(self, address=BMP280_I2CADDR, i2c=None, **kwargs):
self._logger = logging.getLogger('Adafruit_BMP.BMP280')
# Create I2C device.
if i2c is None:
import Adafruit_GPIO.I2C as I2C
i2c = I2C
self._device = i2c.get_i2c_device(address, **kwargs)
if self._device.readU8(BMP280_CHIPID) != 0x58:
raise Exception('Unsupported chip')
# Load calibration values.
self._load_calibration()
self._device.write8(BMP280_CONTROL, 0x3F)
def _load_calibration(self):
self.cal_t1 = int(self._device.readU16(BMP280_DIG_T1)) # UINT16
self.cal_t2 = int(self._device.readS16(BMP280_DIG_T2)) # INT16
self.cal_t3 = int(self._device.readS16(BMP280_DIG_T3)) # INT16
self.cal_p1 = int(self._device.readU16(BMP280_DIG_P1)) # UINT16
self.cal_p2 = int(self._device.readS16(BMP280_DIG_P2)) # INT16
self.cal_p3 = int(self._device.readS16(BMP280_DIG_P3)) # INT16
self.cal_p4 = int(self._device.readS16(BMP280_DIG_P4)) # INT16
self.cal_p5 = int(self._device.readS16(BMP280_DIG_P5)) # INT16
self.cal_p6 = int(self._device.readS16(BMP280_DIG_P6)) # INT16
self.cal_p7 = int(self._device.readS16(BMP280_DIG_P7)) # INT16
self.cal_p8 = int(self._device.readS16(BMP280_DIG_P8)) # INT16
self.cal_p9 = int(self._device.readS16(BMP280_DIG_P9)) # INT16
self._logger.debug('T1 = {0:6d}'.format(self.cal_t1))
self._logger.debug('T2 = {0:6d}'.format(self.cal_t2))
self._logger.debug('T3 = {0:6d}'.format(self.cal_t3))
self._logger.debug('P1 = {0:6d}'.format(self.cal_p1))
self._logger.debug('P2 = {0:6d}'.format(self.cal_p2))
self._logger.debug('P3 = {0:6d}'.format(self.cal_p3))
self._logger.debug('P4 = {0:6d}'.format(self.cal_p4))
self._logger.debug('P5 = {0:6d}'.format(self.cal_p5))
self._logger.debug('P6 = {0:6d}'.format(self.cal_p6))
self._logger.debug('P7 = {0:6d}'.format(self.cal_p7))
self._logger.debug('P8 = {0:6d}'.format(self.cal_p8))
self._logger.debug('P9 = {0:6d}'.format(self.cal_p9))
def _load_datasheet_calibration(self):
# Set calibration from values in the datasheet example. Useful for debugging the
# temp and pressure calculation accuracy.
self.cal_t1 = 27504
self.cal_t2 = 26435
self.cal_t3 = -1000
self.cal_p1 = 36477
self.cal_p2 = -10685
self.cal_p3 = 3024
self.cal_p4 = 2855
self.cal_p5 = 140
self.cal_p6 = -7
self.cal_p7 = 15500
self.cal_p8 = -14500
self.cal_p9 = 6000
def read_raw(self, register):
"""Reads the raw (uncompensated) temperature or pressure from the sensor."""
raw = self._device.readU16BE(register)
raw <<= 8
raw = raw | self._device.readU8(register + 2)
raw >>= 4
self._logger.debug('Raw value 0x{0:X} ({1})'.format(raw & 0xFFFF, raw))
return raw
def _compensate_temp(self, raw_temp):
""" Compensate temperature """
t1 = (((raw_temp >> 3) - (self.cal_t1 << 1)) *
(self.cal_t2)) >> 11
t2 = (((((raw_temp >> 4) - (self.cal_t1)) *
((raw_temp >> 4) - (self.cal_t1))) >> 12) *
(self.cal_t3)) >> 14
return t1 + t2
def read_temperature(self):
"""Gets the compensated temperature in degrees celsius."""
raw_temp = self.read_raw(BMP280_TEMPDATA)
compensated_temp = self._compensate_temp(raw_temp)
temp = float(((compensated_temp * 5 + 128) >> 8)) / 100
self._logger.debug('Calibrated temperature {0}'.format(temp))
return temp
def read_pressure(self):
"""Gets the compensated pressure in Pascals."""
raw_temp = self.read_raw(BMP280_TEMPDATA)
compensated_temp = self._compensate_temp(raw_temp)
raw_pressure = self.read_raw(BMP280_PRESSUREDATA)
p1 = compensated_temp - 128000
p2 = p1 * p1 * self.cal_p6
p2 += (p1 * self.cal_p5) << 17
p2 += self.cal_p4 << 35
p1 = ((p1 * p1 * self.cal_p3) >> 8) + ((p1 * self.cal_p2) << 12)
p1 = ((1 << 47) + p1) * (self.cal_p1) >> 33
if 0 == p1:
return 0
p = 1048576 - raw_pressure
p = (((p << 31) - p2) * 3125) / p1
p1 = (self.cal_p9 * (p >> 13) * (p >> 13)) >> 25
p2 = (self.cal_p8 * p) >> 19
p = ((p + p1 + p2) >> 8) + ((self.cal_p7) << 4)
return float(p / 256)
def read_altitude(self, sealevel_pa=101325.0):
"""Calculates the altitude in meters."""
# Calculation taken straight from section 3.6 of the datasheet.
pressure = float(self.read_pressure())
altitude = 44330.0 * (1.0 - pow(pressure / sealevel_pa, (1.0 / 5.255)))
self._logger.debug('Altitude {0} m'.format(altitude))
return altitude
def read_sealevel_pressure(self, altitude_m=0.0):
"""Calculates the pressure at sealevel when given a known altitude in
meters. Returns a value in Pascals."""
pressure = float(self.read_pressure())
p0 = pressure / pow(1.0 - altitude_m / 44330.0, 5.255)
self._logger.debug('Sealevel pressure {0} Pa'.format(p0))
return p0
| {
"repo_name": "josecastroleon/GroveWeatherPi",
"path": "Adafruit_Python_BMP/Adafruit_BMP/BMP280.py",
"copies": "1",
"size": "6652",
"license": "apache-2.0",
"hash": -5876016160522723000,
"line_mean": 39.3151515152,
"line_max": 89,
"alpha_frac": 0.5963619964,
"autogenerated": false,
"ratio": 2.7351973684210527,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8830866158245494,
"avg_score": 0.00013864131511190337,
"num_lines": 165
} |
__author__ = 'Batchu Vishal'
from person import Person
'''
This class defines our player.
It inherits from the Person class since a Player is also a person.
We specialize the person by adding capabilities such as jump etc..
'''
class Player(Person):
def __init__(self, raw_image, position):
super(Player, self).__init__(raw_image, position)
self.isJumping = 0
self.onLadder = 0
self.currentJumpSpeed = 0
self.__gravity = 1 # Gravity affecting the jump velocity of the player
self.__speed = 5 # Movement speed of the player
# Getters and Setters
def getSpeed(self):
return self.__speed
def setSpeed(self):
return self.__speed
# This manages the players jump
def continuousUpdate(self, wallGroupList, ladderGroupList): # Only the player can jump (For the player's jump)
# Only gets run when the player is not on the ladder
if self.onLadder == 0:
wallsCollided = self.checkCollision(wallGroupList)
# If the player is not jumping
if self.isJumping == 0:
# We move down a little and check if we collide with anything
self.updateY(2)
laddersCollided = self.checkCollision(ladderGroupList)
wallsCollided = self.checkCollision(wallGroupList)
self.updateY(-2)
# If we are not colliding with anything below, then we start a jump with 0 speed so that we just fall down
if len(wallsCollided) == 0 and len(laddersCollided) == 0:
self.isJumping = 1
self.currentJumpSpeed = 0
# If the player is jumping
if self.isJumping:
if wallsCollided:
# If you collide a wall while jumping and its below you, then you stop the jump
if wallsCollided[0].getPosition()[1] > self.getPosition()[1]: # wallsize/2 and charsize/2 and +1
self.isJumping = 0
self.setPosition(((self.getPosition()[0], wallsCollided[0].getPosition()[
1] - 16))) # Wall size/2 and charactersize/2 and +1
#print "HIT FLOOR"
# If you collide a wall while jumping and its above you, then you hit the ceiling so you make jump speed 0 so he falls down
elif wallsCollided[0].getPosition()[1] < self.getPosition()[1]:
self.currentJumpSpeed = 0
self.setPosition((self.getPosition()[0], wallsCollided[0].getPosition()[1] + 16))
#print "HIT TOP"
self.setCenter(self.getPosition())
# If he is still jumping (ie. hasnt touched the floor yet)
if self.isJumping:
# We move him down by the currentJumpSpeed
self.updateY(-self.currentJumpSpeed)
self.setCenter(self.getPosition())
self.currentJumpSpeed -= self.__gravity # Affect the jump speed with gravity
if self.currentJumpSpeed < -8:
self.currentJumpSpeed = -8
| {
"repo_name": "erilyth/PyGame-Learning-Environment",
"path": "ple/games/donkeykong/player.py",
"copies": "1",
"size": "3213",
"license": "mit",
"hash": 3629559074728778000,
"line_mean": 46.9552238806,
"line_max": 143,
"alpha_frac": 0.5751633987,
"autogenerated": false,
"ratio": 4.092993630573249,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0021464090458674367,
"num_lines": 67
} |
__author__ = 'Batchu Vishal'
from .person import Person
'''
This class defines our player.
It inherits from the Person class since a Player is also a person.
We specialize the person by adding capabilities such as jump etc..
'''
class Player(Person):
def __init__(self, raw_image, position, width, height):
super(Player, self).__init__(raw_image, position, width, height)
self.isJumping = 0
self.onLadder = 0
self.currentJumpSpeed = 0
self.__gravity = 0.85 # Gravity affecting the jump velocity of the player
self.__speed = 5 # Movement speed of the player
# Getters and Setters
def getSpeed(self):
return self.__speed
def setSpeed(self):
return self.__speed
# This manages the players jump
# Only the player can jump (For the player's jump)
def continuousUpdate(self, wallGroupList, ladderGroupList):
# Only gets run when the player is not on the ladder
if self.onLadder == 0:
wallsCollided = self.checkCollision(wallGroupList)
# If the player is not jumping
if self.isJumping == 0:
# We move down a little and check if we collide with anything
self.updateY(2)
laddersCollided = self.checkCollision(ladderGroupList)
wallsCollided = self.checkCollision(wallGroupList)
self.updateY(-2)
# If we are not colliding with anything below, then we start a
# jump with 0 speed so that we just fall down
if len(wallsCollided) == 0 and len(laddersCollided) == 0:
self.isJumping = 1
self.currentJumpSpeed = 0
# If the player is jumping
if self.isJumping:
if wallsCollided:
# If you collide a wall while jumping and its below you,
# then you stop the jump
if wallsCollided[0].getPosition()[1] > self.getPosition()[
1]: # wallsize/2 and charsize/2 and +1
self.isJumping = 0
self.setPosition(((self.getPosition()[0], wallsCollided[0].getPosition()[
1] - (self.height + 1)))) # Wall size/2 and charactersize/2 and +1
# print "HIT FLOOR"
# If you collide a wall while jumping and its above you,
# then you hit the ceiling so you make jump speed 0 so he
# falls down
elif wallsCollided[0].getPosition()[1] < self.getPosition()[1]:
self.currentJumpSpeed = 0
self.setPosition((self.getPosition()[0], wallsCollided[
0].getPosition()[1] + (self.height + 1)))
# print "HIT TOP"
self.setCenter(self.getPosition())
# If he is still jumping (ie. hasnt touched the floor yet)
if self.isJumping:
# We move him down by the currentJumpSpeed
self.updateY(-self.currentJumpSpeed)
self.setCenter(self.getPosition())
self.currentJumpSpeed -= self.__gravity # Affect the jump speed with gravity
if self.currentJumpSpeed < -8:
self.currentJumpSpeed = -8
| {
"repo_name": "ntasfi/PyGame-Learning-Environment",
"path": "ple/games/monsterkong/player.py",
"copies": "1",
"size": "3438",
"license": "mit",
"hash": -8764624992329837000,
"line_mean": 44.84,
"line_max": 97,
"alpha_frac": 0.5497382199,
"autogenerated": false,
"ratio": 4.213235294117647,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5262973514017646,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Batchu Vishal'
import pygame
import math
import sys
import os
from person import Person
from onBoard import OnBoard
from coin import Coin
from player import Player
from fireball import Fireball
from donkeyKongPerson import DonkeyKongPerson
'''
This class defines our gameboard.
A gameboard contains everthing related to our game on it like our characters, walls, ladders, coins etc
The generation of the level also happens in this class.
'''
class Board:
def __init__(self, width, height, rewards, rng, dir):
self.__width = width
self.__actHeight = height
self.__height = self.__actHeight + 10
self.score = 0
self.rng = rng
self.rewards = rewards
self.cycles = 0 # For the characters animation
self.direction = 0
self._dir = dir
self.IMAGES = {
"background": pygame.image.load(os.path.join(dir, 'assets/background.png')).convert(),
"still": pygame.image.load(os.path.join(dir, 'assets/still.png')).convert_alpha(),
"kong0": pygame.image.load(os.path.join(dir, 'assets/kong0.png')).convert_alpha(),
"princess": pygame.image.load(os.path.join(dir, 'assets/princess.png')).convert_alpha(),
"fireballright": pygame.image.load(os.path.join(dir, 'assets/fireballright.png')).convert_alpha(),
"coin1": pygame.image.load(os.path.join(dir, 'assets/coin1.png')).convert_alpha(),
"wood_block": pygame.image.load(os.path.join(dir, 'assets/wood_block.png')).convert_alpha(),
"ladder": pygame.image.load(os.path.join(dir, 'assets/ladder.png')).convert_alpha()
}
self.white = (255, 255, 255)
'''
The map is essentially an array of 30x80 in which we store what each block on our map is.
1 represents a wall, 2 for a ladder and 3 for a coin.
'''
self.map = []
# These are the arrays in which we store our instances of different classes
self.Players = []
self.Enemies = []
self.Allies = []
self.Coins = []
self.Walls = []
self.Ladders = []
self.Fireballs = []
self.Boards = []
self.FireballEndpoints = []
# Resets the above groups and initializes the game for us
self.resetGroups()
self.background = self.IMAGES["background"]
self.background = pygame.transform.scale(self.background, (width, height))
# Initialize the instance groups which we use to display our instances on the screen
self.fireballGroup = pygame.sprite.RenderPlain(self.Fireballs)
self.playerGroup = pygame.sprite.RenderPlain(self.Players)
self.enemyGroup = pygame.sprite.RenderPlain(self.Enemies)
self.wallGroup = pygame.sprite.RenderPlain(self.Walls)
self.ladderGroup = pygame.sprite.RenderPlain(self.Ladders)
self.coinGroup = pygame.sprite.RenderPlain(self.Coins)
self.allyGroup = pygame.sprite.RenderPlain(self.Allies)
self.fireballEndpointsGroup = pygame.sprite.RenderPlain(self.FireballEndpoints)
def resetGroups(self):
self.score = 0
self.lives = 3
self.map = [] # We will create the map again when we reset the game
self.Players = [Player(self.IMAGES["still"], (50, 440))]
self.Enemies = [DonkeyKongPerson(self.IMAGES["kong0"], (100, 117), self.rng, self._dir)]
self.Allies = [Person(self.IMAGES["princess"], (50, 55))]
self.Allies[0].updateWH(self.Allies[0].image, "H", 0, 25, 25)
self.Coins = []
self.Walls = []
self.Ladders = []
self.Fireballs = []
self.FireballEndpoints = [OnBoard(self.IMAGES["still"], (50, 440))]
self.initializeGame() # This initializes the game and generates our map
self.createGroups() # This creates the instance groups
# Checks to destroy a fireball when it reaches its terminal point
def checkFireballDestroy(self, fireball):
if pygame.sprite.spritecollide(fireball, self.fireballEndpointsGroup, False):
self.DestroyFireball(fireball.index) # We use indices on fireballs to uniquely identify each fireball
# Creates a new fireball and adds it to our fireball group
def CreateFireball(self, location, kongIndex):
if len(self.Fireballs) < len(self.Enemies) * 6+6:
self.Fireballs.append(
Fireball(self.IMAGES["fireballright"], (location[0], location[1] + 15), len(self.Fireballs),
2 + len(self.Enemies)/2, self.rng, self._dir))
# Starts DonkeyKong's animation
self.Enemies[kongIndex].setStopDuration(15)
self.Enemies[kongIndex].setPosition(
(self.Enemies[kongIndex].getPosition()[0], self.Enemies[kongIndex].getPosition()[1] - 12))
self.Enemies[kongIndex].setCenter(self.Enemies[kongIndex].getPosition())
self.createGroups() # We recreate the groups so the fireball is added
# Destroy a fireball if it has collided with a player or reached its endpoint
def DestroyFireball(self, index):
for fireBall in range(len(self.Fireballs)):
if self.Fireballs[fireBall].index == index:
self.Fireballs.remove(self.Fireballs[fireBall])
for fireBallrem in range(
len(self.Fireballs)): # We need to reduce the indices of all fireballs greater than this
if self.Fireballs[fireBallrem].index > index:
self.Fireballs[fireBallrem].index -= 1
self.createGroups() # Recreate the groups so the fireball is removed
break
# Randomly Generate coins in the level where there is a wall below the coin so the player can reach it
def GenerateCoins(self):
for i in range(6, len(self.map)):
for j in range(len(self.map[i])):
if self.map[i][j] == 0 and ((i + 1 < len(self.map) and self.map[i + 1][j] == 1) or (
i + 2 < len(self.map) and self.map[i + 2][j] == 1)):
randNumber = math.floor(self.rng.rand() * 1000)
if randNumber % 35 == 0 and len(self.Coins) <= 25: # At max there will be 26 coins in the map
self.map[i][j] = 3
if j - 1 >= 0 and self.map[i][j - 1] == 3:
self.map[i][j] = 0
if self.map[i][j] == 3:
# Add the coin to our coin list
self.Coins.append(Coin(self.IMAGES["coin1"], (j * 15 + 15 / 2, i * 15 + 15 / 2), self._dir))
if len(self.Coins) <= 20: # If there are less than 21 coins, we call the function again
self.GenerateCoins()
# Given a position and checkNo ( 1 for wall, 2 for ladder, 3 for coin) the function tells us if its a valid position to place or not
def checkMapForMatch(self, placePosition, floor, checkNo, offset):
if floor < 1:
return 0
for i in range(0, 5): # We will get things placed atleast 5-1 blocks away from each other
if self.map[floor * 5 - offset][placePosition + i] == checkNo:
return 1
if self.map[floor * 5 - offset][placePosition - i] == checkNo:
return 1
return 0
# Create an empty 2D map of 30x80 size
def makeMap(self):
for point in range(0, self.__height / 15 + 1):
row = []
for point2 in range(0, self.__width / 15):
row.append(0)
self.map.append(row)
# Add walls to our map boundaries and also the floors
def makeWalls(self):
for i in range(0, (self.__height / 15) - 4):
self.map[i][0] = self.map[i][self.__width / 15 - 1] = 1
for i in range(0, (self.__height / (15 * 5))):
for j in range(0, self.__width / 15):
self.map[i * 5][j] = 1
# Make a small chamber on the top where the princess resides
def makePrincessChamber(self):
for j in range(0, 5):
self.map[j][9] = 1
for j in range(10, (self.__width / 15) - 1):
self.map[1 * 5][j] = 0
for j in range(0, 5):
self.map[1 * 5 + j][7] = self.map[1 * 5 + j][8] = 2
# Generate ladders randomly, 1 for each floor such that they are not too close to each other
def makeLadders(self):
for i in range(2, (self.__height / (15 * 5) - 1)):
ladderPos = math.floor(self.rng.rand() * (self.__width / 15 - 20))
ladderPos = int(10 + ladderPos)
while self.checkMapForMatch(ladderPos, i - 1, 2, 0) == 1:
ladderPos = math.floor(self.rng.rand() * (self.__width / 15 - 20))
ladderPos = int(10 + ladderPos)
for k in range(0, 5):
self.map[i * 5 + k][ladderPos] = self.map[i * 5 + k][ladderPos + 1] = 2
# Generate a few broken ladders, such that they are not too close to each other
def makeBrokenLadders(self):
for i in range(2, (self.__height / (15 * 5) - 1)):
if i % 2 == 1:
brokenLadderPos = math.floor(self.rng.rand() * (self.__width / 15 - 20))
brokenLadderPos = int(10 + brokenLadderPos)
# Make sure aren't too close to other ladders or broken ladders
while self.checkMapForMatch(brokenLadderPos, i - 1, 2, 0) == 1 or self.checkMapForMatch(brokenLadderPos,i, 2,0) == 1 or self.checkMapForMatch(brokenLadderPos, i + 1, 2, 0) == 1:
brokenLadderPos = math.floor(self.rng.rand() * (self.__width / 15 - 20))
brokenLadderPos = int(10 + brokenLadderPos)
# Randomly make the broken edges of the ladder
brokenRand = int(math.floor(self.rng.rand() * 100)) % 2
brokenRand2 = int(math.floor(self.rng.rand() * 100)) % 2
for k in range(0, 1):
self.map[i * 5 + k][brokenLadderPos] = self.map[i * 5 + k][brokenLadderPos + 1] = 2
for k in range(3 + brokenRand, 5):
self.map[i * 5 + k][brokenLadderPos] = 2
for k in range(3 + brokenRand2, 5):
self.map[i * 5 + k][brokenLadderPos + 1] = 2
# Create the holes on each floor (extreme right and extreme left)
def makeHoles(self):
for i in range(3, (self.__height / (15 * 5) - 1)):
for k in range(1, 6): # Ladders wont interfere since they leave 10 blocks on either side
if i % 2 == 0:
self.map[i * 5][k] = 0
else:
self.map[i * 5][self.__width / 15 - 1 - k] = 0
'''
This is called once you have finished making holes, ladders, walls etc
You use the 2D map to add instances to the groups
'''
def populateMap(self):
for x in range(len(self.map)):
for y in range(len(self.map[x])):
if self.map[x][y] == 1:
# Add a wall at that position
self.Walls.append(OnBoard(self.IMAGES["wood_block"], (y * 15 + 15 / 2, x * 15 + 15 / 2)))
elif self.map[x][y] == 2:
# Add a ladder at that position
self.Ladders.append(OnBoard(self.IMAGES["ladder"], (y * 15 + 15 / 2, x * 15 + 15 / 2)))
# Check if the player is on a ladder or not
def ladderCheck(self, laddersCollidedBelow, wallsCollidedBelow, wallsCollidedAbove):
if laddersCollidedBelow and len(wallsCollidedBelow) == 0:
for ladder in laddersCollidedBelow:
if ladder.getPosition()[1] >= self.Players[0].getPosition()[1]:
self.Players[0].onLadder = 1
self.Players[0].isJumping = 0
# Move the player down if he collides a wall above
if wallsCollidedAbove:
self.Players[0].updateY(3)
else:
self.Players[0].onLadder = 0
# Update all the fireball positions and check for collisions with player
def fireballCheck(self):
for fireball in self.fireballGroup:
fireball.continuousUpdate(self.wallGroup, self.ladderGroup)
if fireball.checkCollision(self.playerGroup, "V"):
self.Fireballs.remove(fireball)
self.Players[0].setPosition((50, 440))
self.score += self.rewards["negative"]
self.lives += -1
self.createGroups()
self.checkFireballDestroy(fireball)
# Check for coins collided and add the appropriate score
def coinCheck(self, coinsCollected):
for coin in coinsCollected:
self.score += self.rewards["positive"]
# We also remove the coin entry from our map
self.map[(coin.getPosition()[1] - 15 / 2) / 15][(coin.getPosition()[0] - 15 / 2) / 15] = 0
# Remove the coin entry from our list
self.Coins.remove(coin)
# Update the coin group since we modified the coin list
self.createGroups()
# Check if the player wins
def checkVictory(self):
# If you touch the princess or reach the floor with the princess you win!
if self.Players[0].checkCollision(self.allyGroup) or self.Players[0].getPosition()[1] < 5 * 15:
self.score += self.rewards["win"]
# This is just the next level so we only clear the fireballs and regenerate the coins
self.Fireballs = []
self.Players[0].setPosition((50, 440))
self.Coins = []
self.GenerateCoins()
# Add Donkey Kongs
if len(self.Enemies) == 1:
self.Enemies.append(DonkeyKongPerson(self.IMAGES["kong0"], (700, 117), self.rng, self._dir))
elif len(self.Enemies) == 2:
self.Enemies.append(DonkeyKongPerson(self.IMAGES["kong0"], (400, 117), self.rng, self._dir))
# Create the groups again so the enemies are effected
self.createGroups()
# Redraws the entire game screen for us
def redrawScreen(self, screen, width, height):
screen.fill((0, 0, 0)) # Fill it with black
# Draw the background first
screen.blit(self.background, self.background.get_rect())
# Draw all our groups on the background
self.ladderGroup.draw(screen)
self.playerGroup.draw(screen)
self.coinGroup.draw(screen)
self.wallGroup.draw(screen)
self.fireballGroup.draw(screen)
self.enemyGroup.draw(screen)
self.allyGroup.draw(screen)
# Update all the groups from their corresponding lists
def createGroups(self):
self.fireballGroup = pygame.sprite.RenderPlain(self.Fireballs)
self.playerGroup = pygame.sprite.RenderPlain(self.Players)
self.enemyGroup = pygame.sprite.RenderPlain(self.Enemies)
self.wallGroup = pygame.sprite.RenderPlain(self.Walls)
self.ladderGroup = pygame.sprite.RenderPlain(self.Ladders)
self.coinGroup = pygame.sprite.RenderPlain(self.Coins)
self.allyGroup = pygame.sprite.RenderPlain(self.Allies)
self.fireballEndpointsGroup = pygame.sprite.RenderPlain(self.FireballEndpoints)
'''
Initialize the game by making the map, generating walls, generating princess chamber, generating ladders randomly,
generating broken ladders randomly, generating holes, generating coins randomly, adding the ladders and walls to our lists
and finally updating the groups.
'''
def initializeGame(self):
self.makeMap()
self.makeWalls()
self.makePrincessChamber()
self.makeLadders()
self.makeBrokenLadders()
self.makeHoles()
self.GenerateCoins()
self.populateMap()
self.createGroups()
| {
"repo_name": "erilyth/PyGame-Learning-Environment",
"path": "ple/games/donkeykong/board.py",
"copies": "1",
"size": "15880",
"license": "mit",
"hash": 1802422882069176600,
"line_mean": 47.2674772036,
"line_max": 193,
"alpha_frac": 0.5920654912,
"autogenerated": false,
"ratio": 3.616488271464359,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47085537626643587,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Batchu Vishal'
import pygame
import math
import sys
import os
from .person import Person
from .onBoard import OnBoard
from .coin import Coin
from .player import Player
from .fireball import Fireball
from .monsterPerson import MonsterPerson
class Board(object):
'''
This class defines our gameboard.
A gameboard contains everthing related to our game on it like our characters, walls, ladders, coins etc
The generation of the level also happens in this class.
'''
def __init__(self, width, height, rewards, rng, _dir):
self.__width = width
self.__actHeight = height
self.__height = self.__actHeight + 10
self.score = 0
self.rng = rng
self.rewards = rewards
self.cycles = 0 # For the characters animation
self.direction = 0
self._dir = _dir
self.IMAGES = {
"still": pygame.image.load(os.path.join(_dir, 'assets/still.png')).convert_alpha(),
"monster0": pygame.image.load(os.path.join(_dir, 'assets/monster0.png')).convert_alpha(),
"princess": pygame.image.load(os.path.join(_dir, 'assets/princess.png')).convert_alpha(),
"fireballright": pygame.image.load(os.path.join(_dir, 'assets/fireballright.png')).convert_alpha(),
"coin1": pygame.image.load(os.path.join(_dir, 'assets/coin1.png')).convert_alpha(),
"wood_block": pygame.image.load(os.path.join(_dir, 'assets/wood_block.png')).convert_alpha(),
"ladder": pygame.image.load(os.path.join(_dir, 'assets/ladder.png')).convert_alpha()
}
self.white = (255, 255, 255)
'''
The map is essentially an array of 30x80 in which we store what each block on our map is.
1 represents a wall, 2 for a ladder and 3 for a coin.
'''
self.map = []
# These are the arrays in which we store our instances of different
# classes
self.Players = []
self.Enemies = []
self.Allies = []
self.Coins = []
self.Walls = []
self.Ladders = []
self.Fireballs = []
self.Boards = []
self.FireballEndpoints = []
# Resets the above groups and initializes the game for us
self.resetGroups()
# Initialize the instance groups which we use to display our instances
# on the screen
self.fireballGroup = pygame.sprite.RenderPlain(self.Fireballs)
self.playerGroup = pygame.sprite.RenderPlain(self.Players)
self.enemyGroup = pygame.sprite.RenderPlain(self.Enemies)
self.wallGroup = pygame.sprite.RenderPlain(self.Walls)
self.ladderGroup = pygame.sprite.RenderPlain(self.Ladders)
self.coinGroup = pygame.sprite.RenderPlain(self.Coins)
self.allyGroup = pygame.sprite.RenderPlain(self.Allies)
self.fireballEndpointsGroup = pygame.sprite.RenderPlain(
self.FireballEndpoints)
def resetGroups(self):
self.score = 0
self.lives = 3
self.map = [] # We will create the map again when we reset the game
self.Players = [
Player(
self.IMAGES["still"],
(self.__width / 2,
435),
15,
15)]
self.Enemies = [
MonsterPerson(
self.IMAGES["monster0"],
(100,
117),
self.rng,
self._dir)]
self.Allies = [Person(self.IMAGES["princess"], (50, 48), 18, 25)]
self.Allies[0].updateWH(self.Allies[0].image, "H", 0, 25, 25)
self.Coins = []
self.Walls = []
self.Ladders = []
self.Fireballs = []
self.FireballEndpoints = [OnBoard(self.IMAGES["still"], (50, 440))]
self.initializeGame() # This initializes the game and generates our map
self.createGroups() # This creates the instance groups
# Checks to destroy a fireball when it reaches its terminal point
def checkFireballDestroy(self, fireball):
if pygame.sprite.spritecollide(
fireball, self.fireballEndpointsGroup, False):
# We use indices on fireballs to uniquely identify each fireball
self.DestroyFireball(fireball.index)
# Creates a new fireball and adds it to our fireball group
def CreateFireball(self, location, monsterIndex):
if len(self.Fireballs) < len(self.Enemies) * 5:
self.Fireballs.append(
Fireball(self.IMAGES["fireballright"], (location[0], location[1] + 15), len(self.Fireballs),
2 + len(self.Enemies) / 2, self.rng, self._dir))
# Starts monster's animation
self.Enemies[monsterIndex].setStopDuration(15)
self.Enemies[monsterIndex].setPosition(
(self.Enemies[monsterIndex].getPosition()[0], self.Enemies[monsterIndex].getPosition()[1] - 12))
self.Enemies[monsterIndex].setCenter(
self.Enemies[monsterIndex].getPosition())
self.createGroups() # We recreate the groups so the fireball is added
# Destroy a fireball if it has collided with a player or reached its
# endpoint
def DestroyFireball(self, index):
for fireBall in range(len(self.Fireballs)):
if self.Fireballs[fireBall].index == index:
self.Fireballs.remove(self.Fireballs[fireBall])
for fireBallrem in range(
len(self.Fireballs)): # We need to reduce the indices of all fireballs greater than this
if self.Fireballs[fireBallrem].index > index:
self.Fireballs[fireBallrem].index -= 1
self.createGroups() # Recreate the groups so the fireball is removed
break
# Randomly Generate coins in the level where there is a wall below the
# coin so the player can reach it
def GenerateCoins(self):
for i in range(6, len(self.map)):
for j in range(len(self.map[i])):
if self.map[i][j] == 0 and ((i + 1 < len(self.map) and self.map[i + 1][j] == 1) or (
i + 2 < len(self.map) and self.map[i + 2][j] == 1)):
randNumber = math.floor(self.rng.rand() * 1000)
if randNumber % 35 == 0 and len(
self.Coins) <= 25: # At max there will be 26 coins in the map
self.map[i][j] = 3
if j - 1 >= 0 and self.map[i][j - 1] == 3:
self.map[i][j] = 0
if self.map[i][j] == 3:
# Add the coin to our coin list
self.Coins.append(
Coin(
self.IMAGES["coin1"],
(j * 15 + 15 / 2,
i * 15 + 15 / 2),
self._dir))
if len(
self.Coins) <= 15: # If there are less than 21 coins, we call the function again
self.GenerateCoins()
# Given a position and checkNo ( 1 for wall, 2 for ladder, 3 for coin) the
# function tells us if its a valid position to place or not
def checkMapForMatch(self, placePosition, floor, checkNo, offset):
if floor < 1:
return 0
for i in range(
0, 5): # We will get things placed atleast 5-1 blocks away from each other
if self.map[floor * 5 - offset][placePosition + i] == checkNo:
return 1
if self.map[floor * 5 - offset][placePosition - i] == checkNo:
return 1
return 0
# Create an empty 2D map of 30x80 size
def makeMap(self):
for point in range(0, int(self.__height / 15 + 1)):
row = []
for point2 in range(0, int(self.__width / 15)):
row.append(0)
self.map.append(row)
# Add walls to our map boundaries and also the floors
def makeWalls(self):
for i in range(0, int(self.__height / 15)):
self.map[i][0] = self.map[i][int(self.__width / 15 - 1)] = 1
for i in range(2, int(self.__height / (15 * 4))):
for j in range(0, int(self.__width / 15)):
self.map[i * 5][j] = 1
# Make a small chamber on the top where the princess resides
def makePrincessChamber(self):
for j in range(0, 4):
self.map[j][9] = 1
for j in range(0, 10):
self.map[4][j] = 1
for j in range(0, 6):
self.map[1 * 4 + j][7] = self.map[1 * 4 + j][8] = 2
# Generate ladders randomly, 1 for each floor such that they are not too
# close to each other
def makeLadders(self):
for i in range(2, int(self.__height / (15 * 4) - 1)):
ladderPos = math.floor(self.rng.rand() * (self.__width / 15 - 20))
ladderPos = int(7 + ladderPos)
while self.checkMapForMatch(ladderPos, i - 1, 2, 0) == 1:
ladderPos = math.floor(
self.rng.rand() * (self.__width / 15 - 20))
ladderPos = int(7 + ladderPos)
for k in range(0, 5):
self.map[i * 5 + k][ladderPos] = self.map[i *
5 + k][ladderPos + 1] = 2
# Create the holes on each floor (extreme right and extreme left)
def makeHoles(self):
for i in range(3, int(self.__height / (15 * 4) - 1)):
for k in range(
1, 6): # Ladders wont interfere since they leave 10 blocks on either side
if i % 2 == 0:
self.map[i * 5][k] = 0
else:
self.map[i * 5][int(self.__width / 15 - 1 - k)] = 0
'''
This is called once you have finished making holes, ladders, walls etc
You use the 2D map to add instances to the groups
'''
def populateMap(self):
for x in range(len(self.map)):
for y in range(len(self.map[x])):
if self.map[x][y] == 1:
# Add a wall at that position
self.Walls.append(
OnBoard(
self.IMAGES["wood_block"],
(y * 15 + 15 / 2,
x * 15 + 15 / 2)))
elif self.map[x][y] == 2:
# Add a ladder at that position
self.Ladders.append(
OnBoard(
self.IMAGES["ladder"],
(y * 15 + 15 / 2,
x * 15 + 15 / 2)))
# Check if the player is on a ladder or not
def ladderCheck(self, laddersCollidedBelow,
wallsCollidedBelow, wallsCollidedAbove):
if laddersCollidedBelow and len(wallsCollidedBelow) == 0:
for ladder in laddersCollidedBelow:
if ladder.getPosition()[1] >= self.Players[0].getPosition()[1]:
self.Players[0].onLadder = 1
self.Players[0].isJumping = 0
# Move the player down if he collides a wall above
if wallsCollidedAbove:
self.Players[0].updateY(3)
else:
self.Players[0].onLadder = 0
# Update all the fireball positions and check for collisions with player
def fireballCheck(self):
for fireball in self.fireballGroup:
fireball.continuousUpdate(self.wallGroup, self.ladderGroup)
if fireball.checkCollision(self.playerGroup, "V"):
self.Fireballs.remove(fireball)
self.Players[0].setPosition((50, 440))
self.score += self.rewards["negative"]
self.lives += -1
self.createGroups()
self.checkFireballDestroy(fireball)
# Check for coins collided and add the appropriate score
def coinCheck(self, coinsCollected):
for coin in coinsCollected:
self.score += self.rewards["positive"]
# We also remove the coin entry from our map
self.map[int((coin.getPosition()[1] - 15 / 2) /
15)][int((coin.getPosition()[0] - 15 / 2) / 15)] = 0
# Remove the coin entry from our list
self.Coins.remove(coin)
# Update the coin group since we modified the coin list
self.createGroups()
# Check if the player wins
def checkVictory(self):
# If you touch the princess or reach the floor with the princess you
# win!
if self.Players[0].checkCollision(self.allyGroup) or self.Players[
0].getPosition()[1] < 4 * 15:
self.score += self.rewards["win"]
# This is just the next level so we only clear the fireballs and
# regenerate the coins
self.Fireballs = []
self.Players[0].setPosition((50, 440))
self.Coins = []
self.GenerateCoins()
# Add monsters
if len(self.Enemies) == 1:
self.Enemies.append(
MonsterPerson(
self.IMAGES["monster0"], (700, 117), self.rng, self._dir))
elif len(self.Enemies) == 2:
self.Enemies.append(
MonsterPerson(
self.IMAGES["monster0"], (400, 117), self.rng, self._dir))
# Create the groups again so the enemies are effected
self.createGroups()
# Redraws the entire game screen for us
def redrawScreen(self, screen, width, height):
screen.fill((40, 20, 0)) # Fill it with black
# Draw all our groups on the background
self.ladderGroup.draw(screen)
self.playerGroup.draw(screen)
self.coinGroup.draw(screen)
self.wallGroup.draw(screen)
self.fireballGroup.draw(screen)
self.enemyGroup.draw(screen)
self.allyGroup.draw(screen)
# Update all the groups from their corresponding lists
def createGroups(self):
self.fireballGroup = pygame.sprite.RenderPlain(self.Fireballs)
self.playerGroup = pygame.sprite.RenderPlain(self.Players)
self.enemyGroup = pygame.sprite.RenderPlain(self.Enemies)
self.wallGroup = pygame.sprite.RenderPlain(self.Walls)
self.ladderGroup = pygame.sprite.RenderPlain(self.Ladders)
self.coinGroup = pygame.sprite.RenderPlain(self.Coins)
self.allyGroup = pygame.sprite.RenderPlain(self.Allies)
self.fireballEndpointsGroup = pygame.sprite.RenderPlain(
self.FireballEndpoints)
'''
Initialize the game by making the map, generating walls, generating princess chamber, generating ladders randomly,
generating broken ladders randomly, generating holes, generating coins randomly, adding the ladders and walls to our lists
and finally updating the groups.
'''
def initializeGame(self):
self.makeMap()
self.makeWalls()
self.makePrincessChamber()
self.makeLadders()
self.makeHoles()
self.GenerateCoins()
self.populateMap()
self.createGroups()
| {
"repo_name": "ntasfi/PyGame-Learning-Environment",
"path": "ple/games/monsterkong/board.py",
"copies": "1",
"size": "15293",
"license": "mit",
"hash": 8642456087992719000,
"line_mean": 42.0788732394,
"line_max": 126,
"alpha_frac": 0.5541751128,
"autogenerated": false,
"ratio": 3.8472955974842766,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9897862789217992,
"avg_score": 0.0007215842132570204,
"num_lines": 355
} |
__author__ = 'Batchu Vishal'
import pygame
import math
import sys
import os
from person import Person
from onBoard import OnBoard
from coin import Coin
from player import Player
from fireball import Fireball
from monsterPerson import MonsterPerson
class Board(object):
'''
This class defines our gameboard.
A gameboard contains everthing related to our game on it like our characters, walls, ladders, coins etc
The generation of the level also happens in this class.
'''
def __init__(self, width, height, rewards, rng, _dir):
self.__width = width
self.__actHeight = height
self.__height = self.__actHeight + 10
self.score = 0
self.rng = rng
self.rewards = rewards
self.cycles = 0 # For the characters animation
self.direction = 0
self._dir = _dir
self.IMAGES = {
"still": pygame.image.load(os.path.join(_dir, 'assets/still.png')).convert_alpha(),
"monster0": pygame.image.load(os.path.join(_dir, 'assets/monster0.png')).convert_alpha(),
"princess": pygame.image.load(os.path.join(_dir, 'assets/princess.png')).convert_alpha(),
"fireballright": pygame.image.load(os.path.join(_dir, 'assets/fireballright.png')).convert_alpha(),
"coin1": pygame.image.load(os.path.join(_dir, 'assets/coin1.png')).convert_alpha(),
"wood_block": pygame.image.load(os.path.join(_dir, 'assets/wood_block.png')).convert_alpha(),
"ladder": pygame.image.load(os.path.join(_dir, 'assets/ladder.png')).convert_alpha()
}
self.white = (255, 255, 255)
'''
The map is essentially an array of 30x80 in which we store what each block on our map is.
1 represents a wall, 2 for a ladder and 3 for a coin.
'''
self.map = []
# These are the arrays in which we store our instances of different
# classes
self.Players = []
self.Enemies = []
self.Allies = []
self.Coins = []
self.Walls = []
self.Ladders = []
self.Fireballs = []
self.Boards = []
self.FireballEndpoints = []
# Resets the above groups and initializes the game for us
self.resetGroups()
# Initialize the instance groups which we use to display our instances
# on the screen
self.fireballGroup = pygame.sprite.RenderPlain(self.Fireballs)
self.playerGroup = pygame.sprite.RenderPlain(self.Players)
self.enemyGroup = pygame.sprite.RenderPlain(self.Enemies)
self.wallGroup = pygame.sprite.RenderPlain(self.Walls)
self.ladderGroup = pygame.sprite.RenderPlain(self.Ladders)
self.coinGroup = pygame.sprite.RenderPlain(self.Coins)
self.allyGroup = pygame.sprite.RenderPlain(self.Allies)
self.fireballEndpointsGroup = pygame.sprite.RenderPlain(
self.FireballEndpoints)
def resetGroups(self):
self.score = 0
self.lives = 3
self.map = [] # We will create the map again when we reset the game
self.Players = [
Player(
self.IMAGES["still"],
(self.__width / 2,
435),
15,
15)]
self.Enemies = [
MonsterPerson(
self.IMAGES["monster0"],
(100,
117),
self.rng,
self._dir)]
self.Allies = [Person(self.IMAGES["princess"], (50, 48), 18, 25)]
self.Allies[0].updateWH(self.Allies[0].image, "H", 0, 25, 25)
self.Coins = []
self.Walls = []
self.Ladders = []
self.Fireballs = []
self.FireballEndpoints = [OnBoard(self.IMAGES["still"], (50, 440))]
self.initializeGame() # This initializes the game and generates our map
self.createGroups() # This creates the instance groups
# Checks to destroy a fireball when it reaches its terminal point
def checkFireballDestroy(self, fireball):
if pygame.sprite.spritecollide(
fireball, self.fireballEndpointsGroup, False):
# We use indices on fireballs to uniquely identify each fireball
self.DestroyFireball(fireball.index)
# Creates a new fireball and adds it to our fireball group
def CreateFireball(self, location, monsterIndex):
if len(self.Fireballs) < len(self.Enemies) * 5:
self.Fireballs.append(
Fireball(self.IMAGES["fireballright"], (location[0], location[1] + 15), len(self.Fireballs),
2 + len(self.Enemies) / 2, self.rng, self._dir))
# Starts monster's animation
self.Enemies[monsterIndex].setStopDuration(15)
self.Enemies[monsterIndex].setPosition(
(self.Enemies[monsterIndex].getPosition()[0], self.Enemies[monsterIndex].getPosition()[1] - 12))
self.Enemies[monsterIndex].setCenter(
self.Enemies[monsterIndex].getPosition())
self.createGroups() # We recreate the groups so the fireball is added
# Destroy a fireball if it has collided with a player or reached its
# endpoint
def DestroyFireball(self, index):
for fireBall in range(len(self.Fireballs)):
if self.Fireballs[fireBall].index == index:
self.Fireballs.remove(self.Fireballs[fireBall])
for fireBallrem in range(
len(self.Fireballs)): # We need to reduce the indices of all fireballs greater than this
if self.Fireballs[fireBallrem].index > index:
self.Fireballs[fireBallrem].index -= 1
self.createGroups() # Recreate the groups so the fireball is removed
break
# Randomly Generate coins in the level where there is a wall below the
# coin so the player can reach it
def GenerateCoins(self):
for i in range(6, len(self.map)):
for j in range(len(self.map[i])):
if self.map[i][j] == 0 and ((i + 1 < len(self.map) and self.map[i + 1][j] == 1) or (
i + 2 < len(self.map) and self.map[i + 2][j] == 1)):
randNumber = math.floor(self.rng.rand() * 1000)
if randNumber % 35 == 0 and len(
self.Coins) <= 25: # At max there will be 26 coins in the map
self.map[i][j] = 3
if j - 1 >= 0 and self.map[i][j - 1] == 3:
self.map[i][j] = 0
if self.map[i][j] == 3:
# Add the coin to our coin list
self.Coins.append(
Coin(
self.IMAGES["coin1"],
(j * 15 + 15 / 2,
i * 15 + 15 / 2),
self._dir))
if len(
self.Coins) <= 15: # If there are less than 21 coins, we call the function again
self.GenerateCoins()
# Given a position and checkNo ( 1 for wall, 2 for ladder, 3 for coin) the
# function tells us if its a valid position to place or not
def checkMapForMatch(self, placePosition, floor, checkNo, offset):
if floor < 1:
return 0
for i in range(
0, 5): # We will get things placed atleast 5-1 blocks away from each other
if self.map[floor * 5 - offset][placePosition + i] == checkNo:
return 1
if self.map[floor * 5 - offset][placePosition - i] == checkNo:
return 1
return 0
# Create an empty 2D map of 30x80 size
def makeMap(self):
for point in range(0, self.__height / 15 + 1):
row = []
for point2 in range(0, self.__width / 15):
row.append(0)
self.map.append(row)
# Add walls to our map boundaries and also the floors
def makeWalls(self):
for i in range(0, (self.__height / 15)):
self.map[i][0] = self.map[i][self.__width / 15 - 1] = 1
for i in range(2, (self.__height / (15 * 4))):
for j in range(0, self.__width / 15):
self.map[i * 5][j] = 1
# Make a small chamber on the top where the princess resides
def makePrincessChamber(self):
for j in range(0, 4):
self.map[j][9] = 1
for j in range(0, 10):
self.map[4][j] = 1
for j in range(0, 6):
self.map[1 * 4 + j][7] = self.map[1 * 4 + j][8] = 2
# Generate ladders randomly, 1 for each floor such that they are not too
# close to each other
def makeLadders(self):
for i in range(2, (self.__height / (15 * 4) - 1)):
ladderPos = math.floor(self.rng.rand() * (self.__width / 15 - 20))
ladderPos = int(7 + ladderPos)
while self.checkMapForMatch(ladderPos, i - 1, 2, 0) == 1:
ladderPos = math.floor(
self.rng.rand() * (self.__width / 15 - 20))
ladderPos = int(7 + ladderPos)
for k in range(0, 5):
self.map[i * 5 + k][ladderPos] = self.map[i *
5 + k][ladderPos + 1] = 2
# Create the holes on each floor (extreme right and extreme left)
def makeHoles(self):
for i in range(3, (self.__height / (15 * 4) - 1)):
for k in range(
1, 6): # Ladders wont interfere since they leave 10 blocks on either side
if i % 2 == 0:
self.map[i * 5][k] = 0
else:
self.map[i * 5][self.__width / 15 - 1 - k] = 0
'''
This is called once you have finished making holes, ladders, walls etc
You use the 2D map to add instances to the groups
'''
def populateMap(self):
for x in range(len(self.map)):
for y in range(len(self.map[x])):
if self.map[x][y] == 1:
# Add a wall at that position
self.Walls.append(
OnBoard(
self.IMAGES["wood_block"],
(y * 15 + 15 / 2,
x * 15 + 15 / 2)))
elif self.map[x][y] == 2:
# Add a ladder at that position
self.Ladders.append(
OnBoard(
self.IMAGES["ladder"],
(y * 15 + 15 / 2,
x * 15 + 15 / 2)))
# Check if the player is on a ladder or not
def ladderCheck(self, laddersCollidedBelow,
wallsCollidedBelow, wallsCollidedAbove):
if laddersCollidedBelow and len(wallsCollidedBelow) == 0:
for ladder in laddersCollidedBelow:
if ladder.getPosition()[1] >= self.Players[0].getPosition()[1]:
self.Players[0].onLadder = 1
self.Players[0].isJumping = 0
# Move the player down if he collides a wall above
if wallsCollidedAbove:
self.Players[0].updateY(3)
else:
self.Players[0].onLadder = 0
# Update all the fireball positions and check for collisions with player
def fireballCheck(self):
for fireball in self.fireballGroup:
fireball.continuousUpdate(self.wallGroup, self.ladderGroup)
if fireball.checkCollision(self.playerGroup, "V"):
self.Fireballs.remove(fireball)
self.Players[0].setPosition((50, 440))
self.score += self.rewards["negative"]
self.lives += -1
self.createGroups()
self.checkFireballDestroy(fireball)
# Check for coins collided and add the appropriate score
def coinCheck(self, coinsCollected):
for coin in coinsCollected:
self.score += self.rewards["positive"]
# We also remove the coin entry from our map
self.map[(coin.getPosition()[1] - 15 / 2) /
15][(coin.getPosition()[0] - 15 / 2) / 15] = 0
# Remove the coin entry from our list
self.Coins.remove(coin)
# Update the coin group since we modified the coin list
self.createGroups()
# Check if the player wins
def checkVictory(self):
# If you touch the princess or reach the floor with the princess you
# win!
if self.Players[0].checkCollision(self.allyGroup) or self.Players[
0].getPosition()[1] < 4 * 15:
self.score += self.rewards["win"]
# This is just the next level so we only clear the fireballs and
# regenerate the coins
self.Fireballs = []
self.Players[0].setPosition((50, 440))
self.Coins = []
self.GenerateCoins()
# Add monsters
if len(self.Enemies) == 1:
self.Enemies.append(
MonsterPerson(
self.IMAGES["monster0"], (700, 117), self.rng, self._dir))
elif len(self.Enemies) == 2:
self.Enemies.append(
MonsterPerson(
self.IMAGES["monster0"], (400, 117), self.rng, self._dir))
# Create the groups again so the enemies are effected
self.createGroups()
# Redraws the entire game screen for us
def redrawScreen(self, screen, width, height):
screen.fill((40, 20, 0)) # Fill it with black
# Draw all our groups on the background
self.ladderGroup.draw(screen)
self.playerGroup.draw(screen)
self.coinGroup.draw(screen)
self.wallGroup.draw(screen)
self.fireballGroup.draw(screen)
self.enemyGroup.draw(screen)
self.allyGroup.draw(screen)
# Update all the groups from their corresponding lists
def createGroups(self):
self.fireballGroup = pygame.sprite.RenderPlain(self.Fireballs)
self.playerGroup = pygame.sprite.RenderPlain(self.Players)
self.enemyGroup = pygame.sprite.RenderPlain(self.Enemies)
self.wallGroup = pygame.sprite.RenderPlain(self.Walls)
self.ladderGroup = pygame.sprite.RenderPlain(self.Ladders)
self.coinGroup = pygame.sprite.RenderPlain(self.Coins)
self.allyGroup = pygame.sprite.RenderPlain(self.Allies)
self.fireballEndpointsGroup = pygame.sprite.RenderPlain(
self.FireballEndpoints)
'''
Initialize the game by making the map, generating walls, generating princess chamber, generating ladders randomly,
generating broken ladders randomly, generating holes, generating coins randomly, adding the ladders and walls to our lists
and finally updating the groups.
'''
def initializeGame(self):
self.makeMap()
self.makeWalls()
self.makePrincessChamber()
self.makeLadders()
self.makeHoles()
self.GenerateCoins()
self.populateMap()
self.createGroups()
| {
"repo_name": "EndingCredits/PyGame-Learning-Environment",
"path": "ple/games/monsterkong/board.py",
"copies": "1",
"size": "15240",
"license": "mit",
"hash": -4423587379266991600,
"line_mean": 41.9295774648,
"line_max": 126,
"alpha_frac": 0.5539370079,
"autogenerated": false,
"ratio": 3.8562753036437245,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49102123115437246,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Batchu Vishal'
import pygame
import os
from onBoard import OnBoard
class Coin(OnBoard):
"""
This class defines all our coins.
Each coin will increase our score by an amount of 'value'
We animate each coin with 5 images
A coin inherits from the OnBoard class since we will use it as an inanimate object on our board.
"""
def __init__(self, raw_image, position, _dir):
OnBoard.__init__(self, raw_image, position)
self.__coinAnimState = 0 # Initialize animation state to 0
self.IMAGES = {
"coin1": pygame.transform.scale(pygame.image.load(os.path.join(_dir, 'assets/coin1.png')), (15, 15)).convert_alpha(),
"coin2": pygame.transform.scale(pygame.image.load(os.path.join(_dir, 'assets/coin2.png')), (15, 15)).convert_alpha(),
"coin3": pygame.transform.scale(pygame.image.load(os.path.join(_dir, 'assets/coin3.png')), (15, 15)).convert_alpha(),
"coin4": pygame.transform.scale(pygame.image.load(os.path.join(_dir, 'assets/coin4.png')), (15, 15)).convert_alpha(),
"coin5": pygame.transform.scale(pygame.image.load(os.path.join(_dir, 'assets/coin5.png')), (15, 15)).convert_alpha()
}
# Update the image of the coin
def updateImage(self, raw_image):
self.image = raw_image
# Animate the coin
def animateCoin(self):
self.__coinAnimState = (self.__coinAnimState + 1) % 25
if self.__coinAnimState / 5 == 0:
self.updateImage(self.IMAGES["coin1"])
if self.__coinAnimState / 5 == 1:
self.updateImage(self.IMAGES["coin2"])
if self.__coinAnimState / 5 == 2:
self.updateImage(self.IMAGES["coin3"])
if self.__coinAnimState / 5 == 3:
self.updateImage(self.IMAGES["coin4"])
if self.__coinAnimState / 5 == 4:
self.updateImage(self.IMAGES["coin5"])
| {
"repo_name": "EndingCredits/PyGame-Learning-Environment",
"path": "ple/games/monsterkong/coin.py",
"copies": "1",
"size": "1899",
"license": "mit",
"hash": 362283883979458300,
"line_mean": 44.2142857143,
"line_max": 129,
"alpha_frac": 0.6192733017,
"autogenerated": false,
"ratio": 3.4716636197440587,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4590936921444059,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Batchu Vishal'
import pygame
import os
from .onBoard import OnBoard
class Coin(OnBoard):
"""
This class defines all our coins.
Each coin will increase our score by an amount of 'value'
We animate each coin with 5 images
A coin inherits from the OnBoard class since we will use it as an inanimate object on our board.
"""
def __init__(self, raw_image, position, _dir):
OnBoard.__init__(self, raw_image, position)
self.__coinAnimState = 0 # Initialize animation state to 0
self.IMAGES = {
"coin1": pygame.transform.scale(pygame.image.load(os.path.join(_dir, 'assets/coin1.png')), (15, 15)).convert_alpha(),
"coin2": pygame.transform.scale(pygame.image.load(os.path.join(_dir, 'assets/coin2.png')), (15, 15)).convert_alpha(),
"coin3": pygame.transform.scale(pygame.image.load(os.path.join(_dir, 'assets/coin3.png')), (15, 15)).convert_alpha(),
"coin4": pygame.transform.scale(pygame.image.load(os.path.join(_dir, 'assets/coin4.png')), (15, 15)).convert_alpha(),
"coin5": pygame.transform.scale(pygame.image.load(os.path.join(_dir, 'assets/coin5.png')), (15, 15)).convert_alpha()
}
# Update the image of the coin
def updateImage(self, raw_image):
self.image = raw_image
# Animate the coin
def animateCoin(self):
self.__coinAnimState = (self.__coinAnimState + 1) % 25
if self.__coinAnimState / 5 == 0:
self.updateImage(self.IMAGES["coin1"])
if self.__coinAnimState / 5 == 1:
self.updateImage(self.IMAGES["coin2"])
if self.__coinAnimState / 5 == 2:
self.updateImage(self.IMAGES["coin3"])
if self.__coinAnimState / 5 == 3:
self.updateImage(self.IMAGES["coin4"])
if self.__coinAnimState / 5 == 4:
self.updateImage(self.IMAGES["coin5"])
| {
"repo_name": "ntasfi/PyGame-Learning-Environment",
"path": "ple/games/monsterkong/coin.py",
"copies": "1",
"size": "1900",
"license": "mit",
"hash": 7498105909886675000,
"line_mean": 44.2380952381,
"line_max": 129,
"alpha_frac": 0.6189473684,
"autogenerated": false,
"ratio": 3.4671532846715327,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45861006530715326,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Batchu Vishal'
import pygame
import os
from onBoard import OnBoard
'''
This class defines all our coins.
Each coin will increase our score by an amount of 'value'
We animate each coin with 5 images
A coin inherits from the OnBoard class since we will use it as an inanimate object on our board.
'''
class Coin(OnBoard):
def __init__(self, raw_image, position, dir):
super(Coin, self).__init__(raw_image, position)
self.__coinAnimState = 0 # Initialize animation state to 0
self.IMAGES = {
"coin1": pygame.transform.scale(pygame.image.load(os.path.join(dir, 'assets/coin1.png')), (15, 15)).convert_alpha(),
"coin2": pygame.transform.scale(pygame.image.load(os.path.join(dir, 'assets/coin2.png')), (15, 15)).convert_alpha(),
"coin3": pygame.transform.scale(pygame.image.load(os.path.join(dir, 'assets/coin3.png')), (15, 15)).convert_alpha(),
"coin4": pygame.transform.scale(pygame.image.load(os.path.join(dir, 'assets/coin4.png')), (15, 15)).convert_alpha(),
"coin5": pygame.transform.scale(pygame.image.load(os.path.join(dir, 'assets/coin5.png')), (15, 15)).convert_alpha()
}
# Update the image of the coin
def updateImage(self, raw_image):
self.image = raw_image
# Animate the coin
def animateCoin(self):
self.__coinAnimState = (self.__coinAnimState + 1) % 25
if self.__coinAnimState / 5 == 0:
self.updateImage(self.IMAGES["coin1"])
if self.__coinAnimState / 5 == 1:
self.updateImage(self.IMAGES["coin2"])
if self.__coinAnimState / 5 == 2:
self.updateImage(self.IMAGES["coin3"])
if self.__coinAnimState / 5 == 3:
self.updateImage(self.IMAGES["coin4"])
if self.__coinAnimState / 5 == 4:
self.updateImage(self.IMAGES["coin5"])
| {
"repo_name": "erilyth/PyGame-Learning-Environment",
"path": "ple/games/donkeykong/coin.py",
"copies": "1",
"size": "1873",
"license": "mit",
"hash": -248175072479729150,
"line_mean": 43.5952380952,
"line_max": 128,
"alpha_frac": 0.6289375334,
"autogenerated": false,
"ratio": 3.411657559198543,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45405950925985433,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Batchu Vishal'
import pygame
import sys
from pygame.constants import K_a, K_d, K_SPACE, K_w, K_s, QUIT, KEYDOWN
from .board import Board
#from ..base import base
#from ple.games import base
from ple.games.base.pygamewrapper import PyGameWrapper
import numpy as np
import os
class MonsterKong(PyGameWrapper):
def __init__(self):
"""
Parameters
----------
None
"""
self.height = 465
self.width = 500
actions = {
"left": K_a,
"right": K_d,
"jump": K_SPACE,
"up": K_w,
"down": K_s
}
PyGameWrapper.__init__(
self, self.width, self.height, actions=actions)
self.rewards = {
"positive": 5,
"win": 50,
"negative": -25,
"tick": 0
}
self.allowed_fps = 30
self._dir = os.path.dirname(os.path.abspath(__file__))
self.IMAGES = {
"right": pygame.image.load(os.path.join(self._dir, 'assets/right.png')),
"right2": pygame.image.load(os.path.join(self._dir, 'assets/right2.png')),
"left": pygame.image.load(os.path.join(self._dir, 'assets/left.png')),
"left2": pygame.image.load(os.path.join(self._dir, 'assets/left2.png')),
"still": pygame.image.load(os.path.join(self._dir, 'assets/still.png'))
}
def init(self):
# Create a new instance of the Board class
self.newGame = Board(
self.width,
self.height,
self.rewards,
self.rng,
self._dir)
# Initialize the fireball timer
self.fireballTimer = 0
# Assign groups from the Board instance that was created
self.playerGroup = self.newGame.playerGroup
self.wallGroup = self.newGame.wallGroup
self.ladderGroup = self.newGame.ladderGroup
def getScore(self):
return self.newGame.score
def game_over(self):
return self.newGame.lives <= 0
def step(self, dt):
self.newGame.score += self.rewards["tick"]
# This is where the actual game is run
# Get the appropriate groups
self.fireballGroup = self.newGame.fireballGroup
self.coinGroup = self.newGame.coinGroup
# Create fireballs as required, depending on the number of monsters in
# our game at the moment
if self.fireballTimer == 0:
self.newGame.CreateFireball(
self.newGame.Enemies[0].getPosition(), 0)
elif len(self.newGame.Enemies) >= 2 and self.fireballTimer == 23:
self.newGame.CreateFireball(
self.newGame.Enemies[1].getPosition(), 1)
elif len(self.newGame.Enemies) >= 3 and self.fireballTimer == 46:
self.newGame.CreateFireball(
self.newGame.Enemies[2].getPosition(), 2)
self.fireballTimer = (self.fireballTimer + 1) % 70
# Animate the coin
for coin in self.coinGroup:
coin.animateCoin()
# To check collisions below, we move the player downwards then check
# and move him back to his original location
self.newGame.Players[0].updateY(2)
self.laddersCollidedBelow = self.newGame.Players[
0].checkCollision(self.ladderGroup)
self.wallsCollidedBelow = self.newGame.Players[
0].checkCollision(self.wallGroup)
self.newGame.Players[0].updateY(-2)
# To check for collisions above, we move the player up then check and
# then move him back down
self.newGame.Players[0].updateY(-2)
self.wallsCollidedAbove = self.newGame.Players[
0].checkCollision(self.wallGroup)
self.newGame.Players[0].updateY(2)
# Sets the onLadder state of the player
self.newGame.ladderCheck(
self.laddersCollidedBelow,
self.wallsCollidedBelow,
self.wallsCollidedAbove)
for event in pygame.event.get():
# Exit to desktop
if event.type == QUIT:
pygame.quit()
sys.exit()
if event.type == KEYDOWN:
# Get the ladders collided with the player
self.laddersCollidedExact = self.newGame.Players[
0].checkCollision(self.ladderGroup)
if (event.key == self.actions["jump"] and self.newGame.Players[0].onLadder == 0) or (
event.key == self.actions["up"] and self.laddersCollidedExact):
# Set the player to move up
self.direction = 2
if self.newGame.Players[
0].isJumping == 0 and self.wallsCollidedBelow:
# We can make the player jump and set his
# currentJumpSpeed
self.newGame.Players[0].isJumping = 1
self.newGame.Players[0].currentJumpSpeed = 7
if event.key == self.actions["right"]:
if self.newGame.direction != 4:
self.newGame.direction = 4
self.newGame.cycles = -1 # Reset cycles
self.newGame.cycles = (self.newGame.cycles + 1) % 4
if self.newGame.cycles < 2:
# Display the first image for half the cycles
self.newGame.Players[0].updateWH(self.IMAGES["right"], "H",
self.newGame.Players[0].getSpeed(), 15, 15)
else:
# Display the second image for half the cycles
self.newGame.Players[0].updateWH(self.IMAGES["right2"], "H",
self.newGame.Players[0].getSpeed(), 15, 15)
wallsCollidedExact = self.newGame.Players[
0].checkCollision(self.wallGroup)
if wallsCollidedExact:
# If we have collided a wall, move the player back to
# where he was in the last state
self.newGame.Players[0].updateWH(self.IMAGES["right"], "H",
-self.newGame.Players[0].getSpeed(), 15, 15)
if event.key == self.actions["left"]:
if self.newGame.direction != 3:
self.newGame.direction = 3
self.newGame.cycles = -1 # Reset cycles
self.newGame.cycles = (self.newGame.cycles + 1) % 4
if self.newGame.cycles < 2:
# Display the first image for half the cycles
self.newGame.Players[0].updateWH(self.IMAGES["left"], "H",
-self.newGame.Players[0].getSpeed(), 15, 15)
else:
# Display the second image for half the cycles
self.newGame.Players[0].updateWH(self.IMAGES["left2"], "H",
-self.newGame.Players[0].getSpeed(), 15, 15)
wallsCollidedExact = self.newGame.Players[
0].checkCollision(self.wallGroup)
if wallsCollidedExact:
# If we have collided a wall, move the player back to
# where he was in the last state
self.newGame.Players[0].updateWH(self.IMAGES["left"], "H",
self.newGame.Players[0].getSpeed(), 15, 15)
# If we are on a ladder, then we can move up
if event.key == self.actions[
"up"] and self.newGame.Players[0].onLadder:
self.newGame.Players[0].updateWH(self.IMAGES["still"], "V",
-self.newGame.Players[0].getSpeed() / 2, 15, 15)
if len(self.newGame.Players[0].checkCollision(self.ladderGroup)) == 0 or len(
self.newGame.Players[0].checkCollision(self.wallGroup)) != 0:
self.newGame.Players[0].updateWH(self.IMAGES["still"], "V",
self.newGame.Players[0].getSpeed() / 2, 15, 15)
# If we are on a ladder, then we can move down
if event.key == self.actions[
"down"] and self.newGame.Players[0].onLadder:
self.newGame.Players[0].updateWH(self.IMAGES["still"], "V",
self.newGame.Players[0].getSpeed() / 2, 15, 15)
# Update the player's position and process his jump if he is jumping
self.newGame.Players[0].continuousUpdate(
self.wallGroup, self.ladderGroup)
'''
We use cycles to animate the character, when we change direction we also reset the cycles
We also change the direction according to the key pressed
'''
# Redraws all our instances onto the screen
self.newGame.redrawScreen(self.screen, self.width, self.height)
# Update the fireball and check for collisions with player (ie Kill the
# player)
self.newGame.fireballCheck()
# Collect a coin
coinsCollected = pygame.sprite.spritecollide(
self.newGame.Players[0], self.coinGroup, True)
self.newGame.coinCheck(coinsCollected)
# Check if you have reached the princess
self.newGame.checkVictory()
# Update all the monsters
for enemy in self.newGame.Enemies:
enemy.continuousUpdate(self.wallGroup, self.ladderGroup)
| {
"repo_name": "ntasfi/PyGame-Learning-Environment",
"path": "ple/games/monsterkong/__init__.py",
"copies": "1",
"size": "9882",
"license": "mit",
"hash": -5214852733424396000,
"line_mean": 41.9652173913,
"line_max": 104,
"alpha_frac": 0.5297510625,
"autogenerated": false,
"ratio": 4.041717791411043,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5071468853911042,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Batchu Vishal'
import pygame
import sys
from pygame.constants import K_a, K_d, K_SPACE, K_w, K_s, QUIT, KEYDOWN
from board import Board
from .. import base
import numpy as np
import os
class MonsterKong(base.PyGameWrapper):
def __init__(self):
"""
Parameters
----------
None
"""
self.height = 465
self.width = 500
actions = {
"left": K_a,
"right": K_d,
"jump": K_SPACE,
"up": K_w,
"down": K_s
}
base.PyGameWrapper.__init__(
self, self.width, self.height, actions=actions)
self.rewards = {
"positive": 5,
"win": 50,
"negative": -25,
"tick": 0
}
self.allowed_fps = 30
self._dir = os.path.dirname(os.path.abspath(__file__))
self.IMAGES = {
"right": pygame.image.load(os.path.join(self._dir, 'assets/right.png')),
"right2": pygame.image.load(os.path.join(self._dir, 'assets/right2.png')),
"left": pygame.image.load(os.path.join(self._dir, 'assets/left.png')),
"left2": pygame.image.load(os.path.join(self._dir, 'assets/left2.png')),
"still": pygame.image.load(os.path.join(self._dir, 'assets/still.png'))
}
def init(self):
# Create a new instance of the Board class
self.newGame = Board(
self.width,
self.height,
self.rewards,
self.rng,
self._dir)
# Initialize the fireball timer
self.fireballTimer = 0
# Assign groups from the Board instance that was created
self.playerGroup = self.newGame.playerGroup
self.wallGroup = self.newGame.wallGroup
self.ladderGroup = self.newGame.ladderGroup
def getScore(self):
return self.newGame.score
def game_over(self):
return self.newGame.lives <= 0
def step(self, dt):
self.newGame.score += self.rewards["tick"]
# This is where the actual game is run
# Get the appropriate groups
self.fireballGroup = self.newGame.fireballGroup
self.coinGroup = self.newGame.coinGroup
# Create fireballs as required, depending on the number of monsters in
# our game at the moment
if self.fireballTimer == 0:
self.newGame.CreateFireball(
self.newGame.Enemies[0].getPosition(), 0)
elif len(self.newGame.Enemies) >= 2 and self.fireballTimer == 23:
self.newGame.CreateFireball(
self.newGame.Enemies[1].getPosition(), 1)
elif len(self.newGame.Enemies) >= 3 and self.fireballTimer == 46:
self.newGame.CreateFireball(
self.newGame.Enemies[2].getPosition(), 2)
self.fireballTimer = (self.fireballTimer + 1) % 70
# Animate the coin
for coin in self.coinGroup:
coin.animateCoin()
# To check collisions below, we move the player downwards then check
# and move him back to his original location
self.newGame.Players[0].updateY(2)
self.laddersCollidedBelow = self.newGame.Players[
0].checkCollision(self.ladderGroup)
self.wallsCollidedBelow = self.newGame.Players[
0].checkCollision(self.wallGroup)
self.newGame.Players[0].updateY(-2)
# To check for collisions above, we move the player up then check and
# then move him back down
self.newGame.Players[0].updateY(-2)
self.wallsCollidedAbove = self.newGame.Players[
0].checkCollision(self.wallGroup)
self.newGame.Players[0].updateY(2)
# Sets the onLadder state of the player
self.newGame.ladderCheck(
self.laddersCollidedBelow,
self.wallsCollidedBelow,
self.wallsCollidedAbove)
for event in pygame.event.get():
# Exit to desktop
if event.type == QUIT:
pygame.quit()
sys.exit()
if event.type == KEYDOWN:
# Get the ladders collided with the player
self.laddersCollidedExact = self.newGame.Players[
0].checkCollision(self.ladderGroup)
if (event.key == self.actions["jump"] and self.newGame.Players[0].onLadder == 0) or (
event.key == self.actions["up"] and self.laddersCollidedExact):
# Set the player to move up
self.direction = 2
if self.newGame.Players[
0].isJumping == 0 and self.wallsCollidedBelow:
# We can make the player jump and set his
# currentJumpSpeed
self.newGame.Players[0].isJumping = 1
self.newGame.Players[0].currentJumpSpeed = 7
if event.key == self.actions["right"]:
if self.newGame.direction != 4:
self.newGame.direction = 4
self.newGame.cycles = -1 # Reset cycles
self.newGame.cycles = (self.newGame.cycles + 1) % 4
if self.newGame.cycles < 2:
# Display the first image for half the cycles
self.newGame.Players[0].updateWH(self.IMAGES["right"], "H",
self.newGame.Players[0].getSpeed(), 15, 15)
else:
# Display the second image for half the cycles
self.newGame.Players[0].updateWH(self.IMAGES["right2"], "H",
self.newGame.Players[0].getSpeed(), 15, 15)
wallsCollidedExact = self.newGame.Players[
0].checkCollision(self.wallGroup)
if wallsCollidedExact:
# If we have collided a wall, move the player back to
# where he was in the last state
self.newGame.Players[0].updateWH(self.IMAGES["right"], "H",
-self.newGame.Players[0].getSpeed(), 15, 15)
if event.key == self.actions["left"]:
if self.newGame.direction != 3:
self.newGame.direction = 3
self.newGame.cycles = -1 # Reset cycles
self.newGame.cycles = (self.newGame.cycles + 1) % 4
if self.newGame.cycles < 2:
# Display the first image for half the cycles
self.newGame.Players[0].updateWH(self.IMAGES["left"], "H",
-self.newGame.Players[0].getSpeed(), 15, 15)
else:
# Display the second image for half the cycles
self.newGame.Players[0].updateWH(self.IMAGES["left2"], "H",
-self.newGame.Players[0].getSpeed(), 15, 15)
wallsCollidedExact = self.newGame.Players[
0].checkCollision(self.wallGroup)
if wallsCollidedExact:
# If we have collided a wall, move the player back to
# where he was in the last state
self.newGame.Players[0].updateWH(self.IMAGES["left"], "H",
self.newGame.Players[0].getSpeed(), 15, 15)
# If we are on a ladder, then we can move up
if event.key == self.actions[
"up"] and self.newGame.Players[0].onLadder:
self.newGame.Players[0].updateWH(self.IMAGES["still"], "V",
-self.newGame.Players[0].getSpeed() / 2, 15, 15)
if len(self.newGame.Players[0].checkCollision(self.ladderGroup)) == 0 or len(
self.newGame.Players[0].checkCollision(self.wallGroup)) != 0:
self.newGame.Players[0].updateWH(self.IMAGES["still"], "V",
self.newGame.Players[0].getSpeed() / 2, 15, 15)
# If we are on a ladder, then we can move down
if event.key == self.actions[
"down"] and self.newGame.Players[0].onLadder:
self.newGame.Players[0].updateWH(self.IMAGES["still"], "V",
self.newGame.Players[0].getSpeed() / 2, 15, 15)
# Update the player's position and process his jump if he is jumping
self.newGame.Players[0].continuousUpdate(
self.wallGroup, self.ladderGroup)
'''
We use cycles to animate the character, when we change direction we also reset the cycles
We also change the direction according to the key pressed
'''
# Redraws all our instances onto the screen
self.newGame.redrawScreen(self.screen, self.width, self.height)
# Update the fireball and check for collisions with player (ie Kill the
# player)
self.newGame.fireballCheck()
# Collect a coin
coinsCollected = pygame.sprite.spritecollide(
self.newGame.Players[0], self.coinGroup, True)
self.newGame.coinCheck(coinsCollected)
# Check if you have reached the princess
self.newGame.checkVictory()
# Update all the monsters
for enemy in self.newGame.Enemies:
enemy.continuousUpdate(self.wallGroup, self.ladderGroup)
| {
"repo_name": "EndingCredits/PyGame-Learning-Environment",
"path": "ple/games/monsterkong/__init__.py",
"copies": "1",
"size": "9803",
"license": "mit",
"hash": 1008047170254557200,
"line_mean": 41.9956140351,
"line_max": 104,
"alpha_frac": 0.527287565,
"autogenerated": false,
"ratio": 4.04748142031379,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.507476898531379,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Batchu Vishal'
import pygame
import sys
from pygame.locals import K_a, K_d, K_SPACE, K_w, K_s, QUIT, KEYDOWN
from board import Board
from .. import base
import numpy as np
import os
'''
This class defines the logic of the game and how player input is taken etc
We run one instance of this class at the start of the game, and this instance manages the game for us.
'''
class DonkeyKong(base.Game):
def __init__(self):
'''
Set the height and width for the game element
FPS is set to 30 frames per second
'''
self.height = 520
self.width = 1200
actions = {
"left": K_a,
"right": K_d,
"jump": K_SPACE,
"up": K_w,
"down": K_s
}
base.Game.__init__(self, self.width, self.height, actions=actions)
self.rewards = {
"positive": 5,
"win": 50,
"negative": -25,
"tick": 0
}
self.allowed_fps = 30
self._dir = os.path.dirname(os.path.abspath(__file__))
self.IMAGES = {
"right": pygame.image.load(os.path.join(self._dir, 'assets/right.png')),
"right2": pygame.image.load(os.path.join(self._dir, 'assets/right2.png')),
"left": pygame.image.load(os.path.join(self._dir, 'assets/left.png')),
"left2": pygame.image.load(os.path.join(self._dir, 'assets/left2.png')),
"still": pygame.image.load(os.path.join(self._dir, 'assets/still.png'))
}
def init(self):
# Create a new instance of the Board class
self.newGame = Board(self.width, self.height, self.rewards, self.rng, self._dir)
# Initialize the fireball timer
self.fireballTimer = 0
# Assign groups from the Board instance that was created
self.playerGroup = self.newGame.playerGroup
self.wallGroup = self.newGame.wallGroup
self.ladderGroup = self.newGame.ladderGroup
def getScore(self):
return self.newGame.score
def game_over(self):
return self.newGame.lives <= 0
def step(self, dt):
self.newGame.score += self.rewards["tick"]
# This is where the actual game is run
# Get the appropriate groups
self.fireballGroup = self.newGame.fireballGroup
self.coinGroup = self.newGame.coinGroup
# Create fireballs as required, depending on the number of Donkey Kongs in our game at the moment
if self.fireballTimer == 0:
self.newGame.CreateFireball(self.newGame.Enemies[0].getPosition(), 0)
elif len(self.newGame.Enemies) >= 2 and self.fireballTimer == 23:
self.newGame.CreateFireball(self.newGame.Enemies[1].getPosition(), 1)
elif len(self.newGame.Enemies) >= 3 and self.fireballTimer == 46:
self.newGame.CreateFireball(self.newGame.Enemies[2].getPosition(), 2)
self.fireballTimer = (self.fireballTimer + 1) % 70
# Animate the coin
for coin in self.coinGroup:
coin.animateCoin()
# To check collisions below, we move the player downwards then check and move him back to his original location
self.newGame.Players[0].updateY(2)
self.laddersCollidedBelow = self.newGame.Players[0].checkCollision(self.ladderGroup)
self.wallsCollidedBelow = self.newGame.Players[0].checkCollision(self.wallGroup)
self.newGame.Players[0].updateY(-2)
# To check for collisions above, we move the player up then check and then move him back down
self.newGame.Players[0].updateY(-2)
self.wallsCollidedAbove = self.newGame.Players[0].checkCollision(self.wallGroup)
self.newGame.Players[0].updateY(2)
# Sets the onLadder state of the player
self.newGame.ladderCheck(self.laddersCollidedBelow, self.wallsCollidedBelow, self.wallsCollidedAbove)
for event in pygame.event.get():
# Exit to desktop
if event.type == QUIT:
pygame.quit()
sys.exit()
if event.type == KEYDOWN:
# Get the ladders collided with the player
self.laddersCollidedExact = self.newGame.Players[0].checkCollision(self.ladderGroup)
if (event.key == self.actions["jump"] and self.newGame.Players[0].onLadder == 0) or (
event.key == self.actions["up"] and self.laddersCollidedExact):
# Set the player to move up
self.direction = 2
if self.newGame.Players[0].isJumping == 0 and self.wallsCollidedBelow:
# We can make the player jump and set his currentJumpSpeed
self.newGame.Players[0].isJumping = 1
self.newGame.Players[0].currentJumpSpeed = 7
if event.key == self.actions["right"]:
if self.newGame.direction != 4:
self.newGame.direction = 4
self.newGame.cycles = -1 # Reset cycles
self.newGame.cycles = (self.newGame.cycles + 1) % 10
if self.newGame.cycles < 5:
# Display the first image for half the cycles
self.newGame.Players[0].updateWH(self.IMAGES["right"], "H",
self.newGame.Players[0].getSpeed(), 15, 15)
else:
# Display the second image for half the cycles
self.newGame.Players[0].updateWH(self.IMAGES["right2"], "H",
self.newGame.Players[0].getSpeed(), 15, 15)
wallsCollidedExact = self.newGame.Players[0].checkCollision(self.wallGroup)
if wallsCollidedExact:
# If we have collided a wall, move the player back to where he was in the last state
self.newGame.Players[0].updateWH(self.IMAGES["right"], "H",
-self.newGame.Players[0].getSpeed(), 15, 15)
if event.key == self.actions["left"]:
if self.newGame.direction != 3:
self.newGame.direction = 3
self.newGame.cycles = -1 # Reset cycles
self.newGame.cycles = (self.newGame.cycles + 1) % 10
if self.newGame.cycles < 5:
# Display the first image for half the cycles
self.newGame.Players[0].updateWH(self.IMAGES["left"], "H",
-self.newGame.Players[0].getSpeed(), 15, 15)
else:
# Display the second image for half the cycles
self.newGame.Players[0].updateWH(self.IMAGES["left2"], "H",
-self.newGame.Players[0].getSpeed(), 15, 15)
wallsCollidedExact = self.newGame.Players[0].checkCollision(self.wallGroup)
if wallsCollidedExact:
# If we have collided a wall, move the player back to where he was in the last state
self.newGame.Players[0].updateWH(self.IMAGES["left"], "H",
self.newGame.Players[0].getSpeed(), 15, 15)
# If we are on a ladder, then we can move up
if event.key == self.actions["up"] and self.newGame.Players[0].onLadder:
self.newGame.Players[0].updateWH(self.IMAGES["still"], "V",
-self.newGame.Players[0].getSpeed() / 2, 15, 15)
if len(self.newGame.Players[0].checkCollision(self.ladderGroup)) == 0 or len(
self.newGame.Players[0].checkCollision(self.wallGroup)) != 0:
self.newGame.Players[0].updateWH(self.IMAGES["still"], "V",
self.newGame.Players[0].getSpeed() / 2, 15, 15)
# If we are on a ladder, then we can move down
if event.key == self.actions["down"] and self.newGame.Players[0].onLadder:
self.newGame.Players[0].updateWH(self.IMAGES["still"], "V",
self.newGame.Players[0].getSpeed() / 2, 15, 15)
# Update the player's position and process his jump if he is jumping
self.newGame.Players[0].continuousUpdate(self.wallGroup, self.ladderGroup)
'''
We use cycles to animate the character, when we change direction we also reset the cycles
We also change the direction according to the key pressed
'''
# Redraws all our instances onto the screen
self.newGame.redrawScreen(self.screen, self.width, self.height)
# Update the fireball and check for collisions with player (ie Kill the player)
self.newGame.fireballCheck()
# Collect a coin
coinsCollected = pygame.sprite.spritecollide(self.newGame.Players[0], self.coinGroup, True)
self.newGame.coinCheck(coinsCollected)
# Check if you have reached the princess
self.newGame.checkVictory()
# Update all the Donkey Kongs
for enemy in self.newGame.Enemies:
enemy.continuousUpdate(self.wallGroup, self.ladderGroup)
if __name__ == "__main__":
pygame.init()
# Instantiate the Game class and run the game
createdGame = DonkeyKong()
createdGame.screen = pygame.display.set_mode((1200, 520))
createdGame.clock = pygame.time.Clock()
createdGame.rng = np.random.RandomState(24)
createdGame.init()
while True:
dt = pygame.time.Clock().tick_busy_loop(30)
createdGame.step(dt);
pygame.display.update()
| {
"repo_name": "erilyth/PyGame-Learning-Environment",
"path": "ple/games/donkeykong/__init__.py",
"copies": "1",
"size": "9974",
"license": "mit",
"hash": 563379690036897150,
"line_mean": 45.8262910798,
"line_max": 119,
"alpha_frac": 0.5638660517,
"autogenerated": false,
"ratio": 3.8945724326434985,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4958438484343498,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Batchu Vishal'
import pygame
class OnBoard(pygame.sprite.Sprite):
'''
This class defines all inanimate objects that we need to display on our board.
Any object that is on the board and not a person, comes under this class (ex. Coins,Ladders,Walls etc)
Sets up the image and its position for all its child classes.
'''
def __init__(self, raw_image, position):
pygame.sprite.Sprite.__init__(self)
self.__position = position
self.image = raw_image
self.image = pygame.transform.scale(self.image,
(15, 15)) # Image and Rect required for the draw function on sprites
self.rect = self.image.get_rect()
self.rect.center = self.__position
# Getters and Setters
def setCenter(self, position):
self.rect.center = position
def getPosition(self):
return self.__position
def setPosition(self, position):
self.__position = position
# Update Image, this is an abstract method, needs to be implemented in the
# subclass with whatever size required
def updateImage(self, raw_image): # Abstract Method
raise NotImplementedError("Subclass must implement this")
# Modify the size of the image
def modifySize(self, raw_image, height, width):
self.image = raw_image
self.image = pygame.transform.scale(self.image, (width, height))
| {
"repo_name": "ntasfi/PyGame-Learning-Environment",
"path": "ple/games/monsterkong/onBoard.py",
"copies": "2",
"size": "1433",
"license": "mit",
"hash": -5277542143081199000,
"line_mean": 35.7435897436,
"line_max": 113,
"alpha_frac": 0.6489881368,
"autogenerated": false,
"ratio": 4.252225519287834,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.000773485050978695,
"num_lines": 39
} |
__author__ = 'Batchu Vishal'
import pygame
'''
This class defines all inanimate objects that we need to display on our board.
Any object that is on the board and not a person, comes under this class (ex. Coins,Ladders,Walls etc)
Sets up the image and its position for all its child classes.
'''
class OnBoard(pygame.sprite.Sprite):
def __init__(self, raw_image, position):
super(OnBoard, self).__init__()
self.__position = position
self.image = raw_image
self.image = pygame.transform.scale(self.image,
(15, 15)) # Image and Rect required for the draw function on sprites
self.rect = self.image.get_rect()
self.rect.center = self.__position
# Getters and Setters
def setCenter(self, position):
self.rect.center = position
def getPosition(self):
return self.__position
def setPosition(self, position):
self.__position = position
# Update Image, this is an abstract method, needs to be implemented in the subclass with whatever size required
def updateImage(self, raw_image): # Abstract Method
raise NotImplementedError("Subclass must implement this")
# Modify the size of the image
def modifySize(self, raw_image, height, width):
self.image = raw_image
self.image = pygame.transform.scale(self.image, (width, height))
| {
"repo_name": "erilyth/PyGame-Learning-Environment",
"path": "ple/games/donkeykong/onBoard.py",
"copies": "1",
"size": "1403",
"license": "mit",
"hash": -645563716666976900,
"line_mean": 35.9210526316,
"line_max": 115,
"alpha_frac": 0.6585887384,
"autogenerated": false,
"ratio": 4.188059701492537,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5346648439892536,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Batchu Vishal'
import pygame
'''
This class defines all living things in the game, ex.Donkey Kong, Player etc
Each of these objects can move in any direction specified.
'''
class Person(pygame.sprite.Sprite):
def __init__(self, raw_image, position):
super(Person, self).__init__()
self.__position = position
self.image = raw_image
self.image = pygame.transform.scale(self.image, (15, 15)).convert_alpha()
self.rect = self.image.get_rect()
self.rect.center = self.__position
'''
We set these as abstract methods since this class does not have a speed variable set, but we want all the child classes to
set a movement speed and they should have setters and getters for this movement speed.
'''
def getSpeed(self): # Abstract method
raise NotImplementedError("Subclass must implement this")
def setSpeed(self): # Abstract method
raise NotImplementedError("Subclass must implement this")
# Getters and Setters
def setCenter(self, position):
self.rect.center = position
def getPosition(self):
return self.__position
def setPosition(self, position):
self.__position = position
# Move the person in the horizontal ("H") or vertical ("V") axis
def updateWH(self, raw_image, direction, value, height, width):
if direction == "H":
self.__position = (self.__position[0] + value, self.__position[1])
if direction == "V":
self.__position = (self.__position[0], self.__position[1] + value)
self.image = raw_image
# Update the image to the specified width and height
self.image = pygame.transform.scale(self.image, (height, width))
self.rect.center = self.__position
# When you only need to update vertically
def updateY(self, value):
self.__position = (self.__position[0], self.__position[1] + value)
self.rect.center = self.__position
# Given a collider list, just check if the person instance collides with any of them
def checkCollision(self, colliderGroup):
Colliders = pygame.sprite.spritecollide(self, colliderGroup, False)
return Colliders
# This is another abstract function, and it must be implemented in child classes inheriting from this class
def continuousUpdate(self, GroupList,GroupList2):
# continuousUpdate that gets called frequently for collision checks, movement etc
raise NotImplementedError("Subclass must implement this")
| {
"repo_name": "erilyth/PyGame-Learning-Environment",
"path": "ple/games/donkeykong/person.py",
"copies": "1",
"size": "2540",
"license": "mit",
"hash": 7303924271252697000,
"line_mean": 38.6875,
"line_max": 126,
"alpha_frac": 0.6700787402,
"autogenerated": false,
"ratio": 4.254606365159129,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5424685105359128,
"avg_score": null,
"num_lines": null
} |
import csv
import argparse
import string
class Csv2Aiken:
"""
CSV (input) file must be formatted as follows:
Question;Answer;Index;Correct
What is the correct answer to this question?;Is it this one;A;
;Maybe this answer;B;
;Possibly this one;C;OK
...
Aiken (output) file will look like these:
What is the correct answer to this question?
A. Is it this one
B. Maybe this answer
C. Possibly this one
ANSWER: C
...
"""
_ANSWER = 'ANSWER:'
_INDEX_SEP = '.'
_INDEX_DICT = {'1': 'A', '2': 'B', '3': 'C', '4': 'D'}
def __init__(self):
pass
def convert(self, infile, outfile):
_out = open(outfile, mode='wb')
with open(infile, mode='rU') as _in:
csvreader = csv.DictReader(_in, dialect='excel', delimiter=';')
i = 0
for row in csvreader:
i += 1
_question = '{0}\n'.format(row['Question'])
if _question != '\n':
_out.write(_question)
_out.write('{0}{1} {2}\n'.format(row['Index'], self._INDEX_SEP, row['Answer']))
if string.lower(row['Correct']) == 'ok':
_solution = self._INDEX_DICT[str(i)]
if i == 3:
_out.write('{0} {1}\n\n'.format(self._ANSWER, _solution))
i = 0
_in.close()
_out.close()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Acquire input CSV and output AIKEN files.')
parser.add_argument('-i', type=str, nargs=1, action='store', dest='_in', help='CSV input file to convert')
parser.add_argument('-o', type=str, nargs=1, action='store', dest='_out', help='AIKEN converted output file')
args = parser.parse_args()
c2a = Csv2Aiken()
c2a.convert(infile=args._in[0], outfile=args._out[0])
| {
"repo_name": "bateman/mood-c2a",
"path": "moodc2a/converter.py",
"copies": "1",
"size": "1995",
"license": "mit",
"hash": -2995576909646456300,
"line_mean": 28.3382352941,
"line_max": 113,
"alpha_frac": 0.5243107769,
"autogenerated": false,
"ratio": 3.5625,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45868107769,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Bauer'
from graphics import GraphicsWindow
def drawHappyFace(canvas,x,y):
canvas.setColor("yellow")
canvas.setOutline("black")
#canvas.drawOval(100, 100, 30, 30)
canvas.drawOval(x, y, 30, 30)
canvas.setColor("black")
#canvas.drawOval(108, 110, 5, 5)
canvas.drawOval(x+8, y+10, 5, 5)
#canvas.drawOval(118, 110, 5, 5)
canvas.drawOval(x+18, y+10, 5, 5)
#canvas.drawLine(110, 122, 113, 125)
canvas.drawLine(x+10, y+22, x+13, y+25)
#canvas.drawLine(113, 125, 117, 125)
canvas.drawLine(x+13, y+25, x+17, y+25)
#canvas.drawLine(117, 125, 120, 122)
canvas.drawLine(x+17, y+25, x+20, y+22)
def drawSadFace(canvas,x,y):
canvas.setColor("yellow")
canvas.setOutline("black")
#canvas.drawOval(100, 100, 30, 30)
canvas.drawOval(x, y, 30, 30)
canvas.setColor("black")
#canvas.drawOval(108, 110, 5, 5)
canvas.drawOval(x+8, y+10, 5, 5)
#canvas.drawOval(118, 110, 5, 5)
canvas.drawOval(x+18, y+10, 5, 5)
#canvas.drawLine(110, 122, 113, 125)
canvas.drawLine(x+10, y+23, x+13, y+20)
#canvas.drawLine(113, 125, 117, 125)
canvas.drawLine(x+13, y+20, x+17, y+20)
#canvas.drawLine(117, 125, 120, 122)
canvas.drawLine(x+17, y+20, x+20, y+23)
def drawSimpleHistogram(eval,cval,mval,pval):
# Draws a simple histogram of 4 values - sentiment values from 4 regions
# Assumes that the values are in the range of 0-10
#
# Parameters:
# - eval - value of the Eastern region
# - cval - value of the Central region
# - mval - value of the Mountain region
# - pval - value of the Pacific region
win = GraphicsWindow(400, 400)
canvas = win.canvas()
wid = 400
hght = 400
C = 0.8
facew = 30
step = 5
if ((wid-(80+2*facew)) < 100) or (hght < 150):
canvas.drawText(wid/2-10,hght/2-10,"Oops! Window dimensions too small!")
else:
wuse = wid-(80+2*facew)
huse = (hght-120)/5
barx = 110+step # 80 plus width of face, which is 30, plus step
endofbar = wid-facew-step
canvas.drawLine(75, 0, 75, hght)
# Draw bar for East
canvas.drawText(2, huse, "Eastern")
drawSadFace(canvas, 80, C*huse)
lngth = wuse*eval/10.0
canvas.setColor(240,0,0)
canvas.setOutline("black")
canvas.drawRectangle(barx, C*huse, lngth, facew)
drawHappyFace(canvas,endofbar,C*huse)
# Draw bar for Central
canvas.drawText(2, 2*huse+facew, "Central")
drawSadFace(canvas, 80, (1+C)*huse+facew)
lngth = wuse*cval/10.0
canvas.setColor(120,240,120)
canvas.setOutline("black")
canvas.drawRectangle(barx, (1+C)*huse+facew, lngth, facew)
drawHappyFace(canvas, endofbar, (1+C)*huse+facew)
# Draw bard for Mountain
canvas.drawText(2, 3*huse+2*facew, "Mountain")
drawSadFace(canvas, 80, (2+C)*huse+2*facew)
lngth = wuse*mval/10.0
canvas.setColor(0,0,240)
canvas.setOutline("black")
canvas.drawRectangle(barx, (2+C)*huse+2*facew, lngth, facew)
drawHappyFace(canvas, endofbar, (2+C)*huse+2*facew)
# Draw bar for Pacific
canvas.drawText(2, 4*huse+3*facew, "Pacific")
drawSadFace(canvas, 80, (3+C)*huse+3*facew)
lngth = wuse*mval/10.0
canvas.setColor(120,120,120)
canvas.setOutline("black")
canvas.drawRectangle(barx, (3+C)*huse+3*facew, lngth, facew)
drawHappyFace(canvas, endofbar, (3+C)*huse+3*facew)
win.wait()
| {
"repo_name": "joanna-chen/schoolwork",
"path": "Tweets/happy_histogram.py",
"copies": "1",
"size": "3653",
"license": "mit",
"hash": -1931062101751003000,
"line_mean": 36.0520833333,
"line_max": 80,
"alpha_frac": 0.5932110594,
"autogenerated": false,
"ratio": 2.8078401229823213,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.39010511823823213,
"avg_score": null,
"num_lines": null
} |
__author__ = 'bbowman@pacificbiosciences.com'
from collections import namedtuple
from base import BaseTypingReader
from utils import sorted_set, sample_from_file
HlaToolsRecord = namedtuple('HlaToolsRecord', 'name glen gtype gpctid nmis indel clen ctype cpctid type')
class HlaToolsReader(BaseTypingReader):
"""
An abstract base-class to define the interface by which readers for the outputs of different
reader software will make their contents accessible
"""
def __init__(self, filename):
self._filename = filename
self._barcodes = [sample_from_file(filename)]
self._records = self._parse_records()
self._types_by_name = {r.name: r.type for r in self._records}
self._loci = sorted_set([r.gtype.split('*')[0] for r in self._records])
def _parse_records(self):
records = []
with open( self._filename ) as handle:
handle.next() # Skip header row
for line in handle:
records.append( HlaToolsRecord._make(line.strip().split()) )
return sorted(records, key=lambda r: r.name)
@property
def records(self):
"""
Return all of the typing records
"""
return self._records
@property
def names(self):
"""
Return the names of the sequences typed in this file
"""
return sorted(self._types_by_name.keys())
@property
def types(self):
return [(r.name, r.type) for r in self._records]
@property
def loci(self):
"""
Return the loci of the sequences typed in this file
"""
return self._loci
@property
def barcodes(self):
return self._barcodes
def __getitem__(self, item):
"""
Return the typing associated with a specified sequence name
"""
return self._types_by_name[item] | {
"repo_name": "bnbowman/pbhml",
"path": "pbhml/reader/HlaToolsReader.py",
"copies": "1",
"size": "1887",
"license": "bsd-3-clause",
"hash": -1791239878121173200,
"line_mean": 28.5,
"line_max": 105,
"alpha_frac": 0.6131425543,
"autogenerated": false,
"ratio": 4.075593952483802,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.002160475907469595,
"num_lines": 64
} |
__author__ = 'bbowman@pacificbiosciences.com'
import os
from job import SmrtAnalysisJob
from reader import HlaToolsReader
from SmrtHmlReport import SmrtHmlReport
class SmrtHmlReportWriter:
"""A Class for writing multiple HML Reports from SMRT Sequencing data
"""
def __init__(self, typing, job, output=''):
self._typing = self._set_typing( typing )
self._job = self._set_job( job )
self._output = os.path.abspath(output) if output else os.getcwd()
def _set_typing(self, typing):
if isinstance(typing, str):
try:
reader = HlaToolsReader(typing)
except:
raise ValueError("Not a recognized HLA Typing result '{0}'".format(typing))
elif isinstance(typing, HlaToolsReader):
return typing
else:
raise TypeError("Not a recognized HLA Typing result '{0}'".format(typing))
return reader
def _set_job(self, job):
if isinstance(job, str):
try:
reader = SmrtAnalysisJob(job)
except:
raise ValueError("Not a recognized SMRT Analysis Job '{0}'".format(job))
elif isinstance(job, SmrtAnalysisJob):
return job
else:
raise TypeError("Not a recognized SMRT Analysis Job '{0}'".format(job))
return reader
@property
def sequence_barcodes(self):
return self._job.barcodes
@property
def typing_barcodes(self):
return self._typing.barcodes
@property
def barcodes(self):
return sorted(set(self.sequence_barcodes) & set(self.typing_barcodes))
def write_report(self, barcode, output_file):
assert barcode in self.barcodes
report = SmrtHmlReport()
records = self._job.sequence_records(barcode)
for record in records:
name = record.name.strip().split()[0]
try:
typing = self._typing[name]
except KeyError:
continue
report.add_record(name, record.sequence, typing)
tree = report.to_tree()
tree.write(output_file) | {
"repo_name": "bnbowman/pbhml",
"path": "pbhml/report/SmrtHmlReportWriter.py",
"copies": "1",
"size": "2143",
"license": "bsd-3-clause",
"hash": -2257070360166263000,
"line_mean": 31.4848484848,
"line_max": 91,
"alpha_frac": 0.6005599627,
"autogenerated": false,
"ratio": 4.193737769080235,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5294297731780234,
"avg_score": null,
"num_lines": null
} |
__author__ = 'bbowman@pacificbiosciences.com'
import sys
import logging
LOG_FORMAT = "%(asctime)s [%(levelname)s - %(module)s] %(message)s"
TIME_FORMAT = "%Y-%m-%d %H:%M:%S"
FORMATTER = logging.Formatter( LOG_FORMAT, TIME_FORMAT )
def add_stream_handler( logger, stream=sys.stdout, log_level=logging.INFO ):
# Set up a simple Stream handler
stream_handler = logging.StreamHandler( stream=stream )
stream_handler.setFormatter( FORMATTER )
stream_handler.setLevel( log_level )
logger.addHandler( stream_handler )
def add_file_handler( logger, log_file='hla_pipeline.log', log_level=logging.INFO ):
# Set a second handler for the log file
file_handler = logging.FileHandler( log_file )
file_handler.setFormatter( FORMATTER )
file_handler.setLevel( log_level )
logger.addHandler( file_handler )
def initialize_logger( logger, stream=None, log_file=None, debug=False):
if debug:
log_level = logging.DEBUG
else:
log_level = logging.INFO
logger.setLevel( log_level )
if stream:
add_stream_handler( logger, stream=stream, log_level=log_level )
else:
add_stream_handler( logger, log_level=log_level )
if log_file:
add_file_handler( logger, log_file=log_file, log_level=log_level )
else:
add_file_handler( logger, log_level=log_level )
return logger | {
"repo_name": "bnbowman/HlaTools",
"path": "src/pbhla/log.py",
"copies": "1",
"size": "1366",
"license": "bsd-3-clause",
"hash": 7307327445073533000,
"line_mean": 33.175,
"line_max": 84,
"alpha_frac": 0.6830161054,
"autogenerated": false,
"ratio": 3.4235588972431077,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9511775675974399,
"avg_score": 0.018959865333741773,
"num_lines": 40
} |
__author__ = 'bbowman@pacificbiosciences.com'
import xml.etree.ElementTree as et
from utils import sorted_set, family_from_typing, locus_from_typing
class SmrtHmlRecord:
def __init__(self, name, sequence, typing):
self._name = name
self._sequence = sequence
self._typing = typing
self._family = family_from_typing(typing)
self._locus = locus_from_typing(typing)
@property
def name(self):
return self._name
@property
def sequence(self):
return self._sequence
@property
def typing(self):
return self._typing
@property
def family(self):
return self._family
@property
def locus(self):
return self._locus
def __str__(self):
return '<SmrtHmlRecord: {0}, {1}, {2}>'.format(self.name, self.family, self.locus)
class SmrtHmlReport:
"""A class for representing a SMRT Analysis HML Report
"""
def __init__(self, center_code="123", id="123456789"):
self._center_code = center_code
self._id = id
self._hml = self._initialize_hml()
self._sample = self._initialize_sample()
self._records = []
self._processed = False
@staticmethod
def _initialize_hml():
# Initialize the root HML report
hml = et.Element("hml")
hml.set("xmlns", "http://schemas.nmdp.org/spec/hml/0.9.6")
hml.set("xmlns:hml", "http://schemas.nmdp.org/spec/hml/0.9.6")
hml.set("xmlns:xsi", "http://www.w3.org/2001/XMLSchema-instance")
hml.set("xsi:schemaLocation",
"http://schemas.nmdp.org/spec/hml/0.9.6 http://schemas.nmdp.org/spec/hml/0.9.6/hml-0.9.6.xsd")
hml.set("reporting-center", "789")
hml.set("project-name", "LAB")
hml.set("version", "0.9.6")
return hml
def _initialize_sample(self):
# Add the first sample
sample = et.SubElement(self._hml, "sample")
sample.set("center-code", self._center_code)
sample.set("id", self._id)
return sample
def add_record(self, name, sequence, typing):
record = SmrtHmlRecord(name, sequence, typing)
self._records.append( record )
def _process_records(self):
# Iterate over each family, adding a typing tag for each
for family in self.gene_families:
typing = et.SubElement(self._sample, "typing")
typing.set("gene-family", family)
typing.set("date", "2014-09-27")
# Iterate over each locus in that family, adding an sbt tag for each
for locus in self.loci_for_family(family):
sbt_elem = et.SubElement(typing, "sbt-ngs")
sbt_elem.set("locus", locus)
# Find and process the records for this locus
self._process_records_for_locus(locus, sbt_elem)
def _process_records_for_locus(self, locus, sbt_elem):
consensus_elem = et.SubElement(sbt_elem, "consensus-sequence")
target_elem = et.SubElement(consensus_elem, "targeted-region")
target_elem.set("assembly", "GRCh38")
target_elem.set("contig", "6")
target_elem.set("start", "29999999")
target_elem.set("end", "30000000")
for record in self.records_for_locus(locus):
sequence_elem = et.SubElement(consensus_elem, "sequence")
sequence_elem.set("alphabet", "DNA")
sequence_elem.text = record.sequence
@property
def gene_families(self):
return sorted_set([r.family for r in self._records])
@property
def loci(self):
return sorted_set([r.locus for r in self._records])
def loci_for_family(self, family):
return [l for l in self.loci if l.startswith(family)]
def records_for_locus(self, locus):
return [r for r in self._records if r.locus == locus]
def to_tree(self):
if not self._processed:
self._process_records()
self._processed = True
return et.ElementTree(self._hml)
def __str__(self):
if not self._processed:
self._process_records()
self._processed = True
tree = et.ElementTree(self._hml)
root = tree.getroot()
return et.tostring(root, encoding='utf8', method='xml') | {
"repo_name": "bnbowman/pbhml",
"path": "pbhml/report/SmrtHmlReport.py",
"copies": "1",
"size": "4284",
"license": "bsd-3-clause",
"hash": 4035088207309334000,
"line_mean": 33.837398374,
"line_max": 110,
"alpha_frac": 0.5971055089,
"autogenerated": false,
"ratio": 3.543424317617866,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46405298265178657,
"avg_score": null,
"num_lines": null
} |
__author__ = 'bcarson'
import calendar,time
from datetime import datetime, timedelta
import os,sys
import xively
import requests
import numpy
from scipy.integrate import simps
# Input settings
XIVELY_FEED_ID = os.environ["XIVELY_FEED_ID"]
XIVELY_API_KEY = os.environ["XIVELY_API_KEY"]
xively_api = xively.XivelyAPIClient(XIVELY_API_KEY)
# Data retrieval interval, in seconds
INTERVAL = 300
# Output settings
PVOUTPUT_SYSTEM_ID = os.environ["PVOUTPUT_SYSTEM_ID"]
PVOUTPUT_API_KEY = os.environ["PVOUTPUT_API_KEY"]
PVOUTPUT_UPLOAD_ENDPOINT = "http://pvoutput.org/service/r2/addoutput.jsp"
UTC_OFFSET_TIMEDELTA = datetime.utcnow() - datetime.now()
THRESHOLD = 110
# A precondition on this is that the datapoints are filtered by the minimum value threshold.
# Calculated as the area under the curve.
def calculate_area_under_curve(datapoints):
num_points = len(datapoints)
start_point = calendar.timegm(datapoints[0].at.timetuple())
yValues = numpy.array([float(point.value) for point in datapoints])
xValues = numpy.array([float(calendar.timegm(point.at.timetuple()) - start_point) / 3600.0 for point in datapoints])
return simps(yValues, xValues, even='avg')
def get_maximum_datapoint(dataseries):
return reduce(lambda x,y: x if float(x.value) > float(y.value) else y, dataseries )
def upload_pvoutput_data( date, max_watts, max_watts_time, watt_hours_generated, consumption ):
headers = { "X-Pvoutput-Apikey" : PVOUTPUT_API_KEY,
"X-Pvoutput-SystemId": PVOUTPUT_SYSTEM_ID }
parameters = { "d": date.strftime("%Y%m%d"),
"g": str(watt_hours_generated),
"pp": str(max_watts),
"pt": max_watts_time.strftime("%H:%M"),
"c" : str(consumption) }
for i in range(0,5):
result = requests.post(PVOUTPUT_UPLOAD_ENDPOINT, data=parameters, headers=headers)
print("PV Output response: %s" % result.text)
if result.status_code == requests.codes.ok:
return True
else:
sleep(30)
return False
def process_day(day):
start_time = day + UTC_OFFSET_TIMEDELTA
end_time = start_time + timedelta(days=1)
print("Retrieving feed data between %s and %s" % (str(start_time), str(end_time)))
feed = xively_api.feeds.get(XIVELY_FEED_ID, start = start_time, end = end_time)
temperature_datastream = feed.datastreams.get("0", start = start_time, end = end_time, limit = 1000, interval_type = "discrete", interval = INTERVAL)
watts_datastream = feed.datastreams.get("1", start = start_time, end = end_time, limit = 1000, interval_type = "discrete", interval = INTERVAL)
consumed_datastream = feed.datastreams.get("2", start = start_time, end = end_time, limit = 1000, interval_type = "discrete", interval = INTERVAL)
# Filter the data points, as the device is a flow meter (i.e. when not generating power, it is measuring draw).
filtered_watts_points = [point for point in watts_datastream.datapoints if float(point.value) > THRESHOLD]
# Find the point of maximum power generation.
max_watts_point = get_maximum_datapoint(filtered_watts_points)
max_watts_time = datetime.fromtimestamp(time.mktime(max_watts_point.at.timetuple())) - UTC_OFFSET_TIMEDELTA
# Find the point of the highest temperature.
max_temperature_point = get_maximum_datapoint(temperature_datastream.datapoints)
max_temperature_time = datetime.fromtimestamp(time.mktime(max_temperature_point.at.timetuple())) - UTC_OFFSET_TIMEDELTA
# Process power consumption.
max_consumption_point = get_maximum_datapoint(consumed_datastream.datapoints)
max_consumption_time = datetime.fromtimestamp(time.mktime(max_consumption_point.at.timetuple())) - UTC_OFFSET_TIMEDELTA
total_consumption = calculate_area_under_curve(consumed_datastream.datapoints)
watt_hours = calculate_area_under_curve(filtered_watts_points)
print("Watt hours for %s to %s: %.2f kWh" % (start_time - UTC_OFFSET_TIMEDELTA, end_time - UTC_OFFSET_TIMEDELTA, watt_hours / 1000))
print("Maximum power generation was %s W at %s" % (max_watts_point.value, max_watts_time))
print("Maximum temperature was %s degrees at %s" % (max_temperature_point.value, max_temperature_time))
print("Total power consumption was %.2f kWh (maximum: %.2f W at %s)" % (total_consumption / 1000, float(max_consumption_point.value), max_consumption_time))
return upload_pvoutput_data( start_time + timedelta(hours=12) - UTC_OFFSET_TIMEDELTA, int(max_watts_point.value), max_watts_time, int(watt_hours), int(total_consumption))
if __name__ == "__main__":
if len(sys.argv) < 2:
now = datetime.now() - timedelta(days=1)
start_date = datetime(now.year, now.month, now.day)
end_date = start_date + timedelta(days=1)
else:
start_date = datetime.strptime(sys.argv[1], "%Y-%m-%d")
end_date = datetime.strptime(sys.argv[2], "%Y-%m-%d")
number_of_days = (end_date - start_date).days
for i in xrange(0, number_of_days):
process_day(start_date)
start_date += timedelta(days=1)
| {
"repo_name": "hebenon/Shamash",
"path": "shamash.py",
"copies": "1",
"size": "5117",
"license": "apache-2.0",
"hash": -6272020048394445000,
"line_mean": 42.3644067797,
"line_max": 174,
"alpha_frac": 0.6898573383,
"autogenerated": false,
"ratio": 3.280128205128205,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4469985543428205,
"avg_score": null,
"num_lines": null
} |
__author__ = 'bcarson'
import logging
from threading import Timer
from signals import image_analysis, trigger_event
logger = logging.getLogger('root')
class Monitor(object):
def __init__(self, triggers, notification_delay=2):
self.triggers = triggers
self.notification_delay = notification_delay
self.active_triggers = dict()
self.notification_timer = None
image_analysis.connect(self.handle_image_analysis)
def send_notification(self, prediction, probability, source, timestamp, image):
logger.debug("Sending trigger: %s (%f) @ %s" % (prediction, probability, str(timestamp)))
trigger_event.send(self, prediction=prediction, probability=probability,
source=source, timestamp=timestamp, image=image)
def handle_image_analysis(self, sender, **data):
# Get predictions
predictions = data['predictions']
source = data['source']
# Check if this is a new source or not.
if source not in self.active_triggers:
self.active_triggers[source] = set()
# Check for a result
for (prediction, probability) in predictions:
logger.debug("prediction %s: %f", prediction, probability)
# The graph uses softmax in the final layer, so it's *unlikely* that this will be useful.
# That being said, it's possible to configure multiple triggers with low thresholds.
if prediction in self.triggers and probability >= self.triggers[prediction]:
# Prevent alarm storms by not acting on active triggers
if prediction not in self.active_triggers[source]:
logger.warning("Trigger event active: %s %f", prediction, probability)
self.active_triggers[source].add(prediction)
# Only send a notification if one isn't already triggered.
if not self.notification_timer or not self.notification_timer.isAlive():
self.notification_timer = Timer(self.notification_delay, self.send_notification,
(prediction, probability, source, data['timestamp'], data['image']))
self.notification_timer.start()
else:
# Log any clearing alarms
if prediction in self.active_triggers[source]:
logger.warning("Trigger event ended: %s %f", prediction, probability)
self.active_triggers[source].discard(prediction) # Remove from active triggers (if it exists) | {
"repo_name": "hebenon/oversight",
"path": "oversight/monitor.py",
"copies": "1",
"size": "2621",
"license": "apache-2.0",
"hash": -6142605878923368000,
"line_mean": 44.2068965517,
"line_max": 124,
"alpha_frac": 0.6230446395,
"autogenerated": false,
"ratio": 4.818014705882353,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5941059345382353,
"avg_score": null,
"num_lines": null
} |
__author__ = 'bcox, roconnor'
import urllib2
import json
import sys
from collections import defaultdict
baseUrl = 'https://api.groupme.com/v3/'
members = defaultdict(list)
def main(args):
try:
global group_name, image_url
group_name = str(args[1])
access_token = '?token=' + str(args[2])
get_groups = urllib2.Request(baseUrl+'groups'+access_token, headers={'Content-type': 'application/json'})
get_groups_resp = urllib2.urlopen(get_groups)
old_group_id = getOldGroupId(json.load(get_groups_resp))
get_members = urllib2.Request(baseUrl+'groups/'+old_group_id+access_token, headers={'Content-type': 'application/json'})
get_members_resp = urllib2.urlopen(get_members)
getMembers(json.load(get_members_resp))
destroy = urllib2.Request(baseUrl+'groups/'+old_group_id+'/destroy'+access_token, data="", headers={'Content-type': 'application/json'})
data = {
"name": group_name,
"share": True,
"image_url": image_url
}
create = urllib2.Request(baseUrl+'groups'+access_token, data = json.dumps(data), headers={'Content-type': 'application/json'})
destroy_resp = urllib2.urlopen(destroy)
create_resp = urllib2.urlopen(create)
id = getNewGroupId(json.load(create_resp))
invite = urllib2.Request(baseUrl+'groups/'+id+'/members/add'+access_token, data=json.dumps(members), headers={'Content-type': 'application/json'})
invite_resp = urllib2.urlopen(invite)
except TypeError:
print "Check your spelling, Julian"
def getOldGroupId(get_groups_resp):
for x in get_groups_resp['response']:
if x['name'] == group_name:
global image_url
image_url = x['image_url']
return x['id']
def getMembers(get_members_resp):
for x in get_members_resp['response']['members']:
person = defaultdict(list)
person['nickname'] = x['nickname']
person['user_id'] = x['user_id']
members['members'].append(person)
def getNewGroupId(create_resp):
return create_resp['response']['id']
if __name__ == '__main__':
main(sys.argv)
| {
"repo_name": "TerraceBoys/GroupMeScripts",
"path": "killScript.py",
"copies": "1",
"size": "2191",
"license": "mit",
"hash": -5878063591179376000,
"line_mean": 37.4385964912,
"line_max": 154,
"alpha_frac": 0.6289365586,
"autogenerated": false,
"ratio": 3.5977011494252875,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47266377080252875,
"avg_score": null,
"num_lines": null
} |
__author__ = 'bdeggleston'
from unittest import skip
from rexpro.tests.base import BaseRexProTestCase, multi_graph
from rexpro import exceptions
class TestConnection(BaseRexProTestCase):
def test_connection_success(self):
""" Development test to aid in debugging """
conn = self.get_connection()
def test_attempting_to_connect_to_an_invalid_graphname_raises_exception(self):
""" Attempting to connect to a nonexistant graph should raise a RexProConnectionExeption """
with self.assertRaises(exceptions.RexProConnectionException):
self.get_connection(graphname='nothing')
@skip
def test_invalid_connection_info_raises_exception(self):
pass
@skip
def test_call_close_transactions_without_an_open_transaction_fails(self):
pass
@skip
def test_call_open_transaction_with_a_transaction_already_open_fails(self):
pass
class TestQueries(BaseRexProTestCase):
@multi_graph
def test_data_integrity(self):
"""
Tests that simply being passed through rexster comes unchanged
"""
conn = self.get_connection(graphname=self.graphname)
e = lambda p: conn.execute(
script='values',
params={'values':p}
)
#test string
data = e('yea boyeeee')
assert data== 'yea boyeeee'
#test int
data = e(1982)
assert data == 1982
#test float
data = e(3.14)
assert data == 3.14
#test dict
data = e({'blake':'eggleston'})
assert data == {'blake':'eggleston'}
#test none
data = e(None)
assert data is None
#test list
data = e([1,2])
assert data == (1,2)
def test_query_isolation(self):
""" Test that variables defined in one query are not available in subsequent queries """
conn = self.get_connection()
conn.execute(
"""
def one_val = 5
one_val
""",
pretty=True
)
with self.assertRaises(exceptions.RexProScriptException):
r = conn.execute(
"""
one_val
"""
)
def test_element_creation(self):
""" Tests that vertices and edges can be created and are serialized properly """
conn = self.get_connection()
elements = conn.execute(
"""
def v1 = g.addVertex([prop:6])
def v2 = g.addVertex([prop:8])
def e = g.addEdge(v1, v2, 'connects', [prop:10])
return [v1, v2, e]
"""
)
v1, v2, e = elements
assert v1['_properties']['prop'] == 6
assert v2['_properties']['prop'] == 8
assert e['_properties']['prop'] == 10
assert e['_outV'] == v1['_id']
assert e['_inV'] == v2['_id']
class TestTransactions(BaseRexProTestCase):
def test_transaction_isolation(self):
""" Tests that operations between 2 transactions are isolated """
conn1 = self.get_connection()
conn2 = self.get_connection()
if not conn1.graph_features['supportsTransactions']:
return
with conn1.transaction():
v1, v2, v3 = conn1.execute(
"""
def v1 = g.addVertex([val:1, str:"vertex 1"])
def v2 = g.addVertex([val:2, str:"vertex 2"])
def v3 = g.addVertex([val:3, str:"vertex 3"])
[v1, v2, v3]
"""
)
conn1.open_transaction()
conn2.open_transaction()
v1_1 = conn1.execute(
"""
def v1 = g.v(eid)
v1.setProperty("str", "v1")
v1
""",
params={'eid':v1['_id']}
)
v1_2 = conn2.execute(
"""
g.v(eid)
""",
params={'eid':v1['_id']}
)
assert v1_2['_properties']['str'] == 'vertex 1'
| {
"repo_name": "bdeggleston/rexpro-python",
"path": "rexpro/tests/test_connection.py",
"copies": "1",
"size": "4023",
"license": "mit",
"hash": 5946972977394876000,
"line_mean": 25.642384106,
"line_max": 100,
"alpha_frac": 0.5279642058,
"autogenerated": false,
"ratio": 3.9910714285714284,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5019035634371428,
"avg_score": null,
"num_lines": null
} |
__author__ = 'bdeggleston'
import json
import re
import struct
from uuid import uuid1, uuid4
import msgpack
from rexpro import exceptions
from rexpro import utils
class MessageTypes(object):
"""
Enumeration of RexPro send message types
"""
ERROR = 0
SESSION_REQUEST = 1
SESSION_RESPONSE = 2
SCRIPT_REQUEST = 3
SCRIPT_RESPONSE = 5
class RexProMessage(object):
""" Base class for rexpro message types """
MESSAGE_TYPE = None
def get_meta(self):
"""
Returns a dictionary of message meta
data depending on other set values
"""
return {}
def get_message_list(self):
"""
Creates and returns the list containing the data to be serialized into a message
"""
return [
#session
self.session,
#unique request id
uuid1().bytes,
#meta
self.get_meta()
]
def serialize(self):
"""
Serializes this message to send to rexster
The format as far as I can tell is this:
1B: Message type
4B: message length
nB: msgpack serialized message
the actual message is just a list of values, all seem to start with version, session, and a unique request id
the session and unique request id are uuid bytes, and the version and are each 1 byte unsigned integers
"""
#msgpack list
msg = self.get_message_list()
bytes = msgpack.dumps(msg)
#add protocol version
message = bytearray([1])
#add serializer type
message += bytearray([0])
#add padding
message += bytearray([0, 0, 0, 0])
#add message type
message += bytearray([self.MESSAGE_TYPE])
#add message length
message += struct.pack('!I', len(bytes))
#add message
message += bytes
return message
@classmethod
def deserialize(cls, data):
"""
Constructs a message instance from the given data
:param data: the raw data, minus the type and size info, from rexster
:type data: str/bytearray
:rtype: RexProMessage
"""
#redefine in subclasses
raise NotImplementedError
@staticmethod
def interpret_response(response):
"""
interprets the response from rexster, returning the relevant response message object
"""
class ErrorResponse(RexProMessage):
#meta flags
INVALID_MESSAGE_ERROR = 0
INVALID_SESSION_ERROR = 1
SCRIPT_FAILURE_ERROR = 2
AUTH_FAILURE_ERROR = 3
GRAPH_CONFIG_ERROR = 4
CHANNEL_CONFIG_ERROR = 5
RESULT_SERIALIZATION_ERROR = 6
def __init__(self, meta, message, **kwargs):
super(ErrorResponse, self).__init__(**kwargs)
self.meta = meta
self.message = message
@classmethod
def deserialize(cls, data):
message = msgpack.loads(data)
session, request, meta, msg = message
return cls(message=msg, meta=meta)
class SessionRequest(RexProMessage):
"""
Message for creating a session with rexster
"""
MESSAGE_TYPE = MessageTypes.SESSION_REQUEST
def __init__(self, graph_name=None, graph_obj_name=None, username='', password='', session_key=None, kill_session=False, **kwargs):
"""
:param graph_name: the name of the rexster graph to connect to
:type graph_name: str
:param graph_obj_name: the name of the variable to bind the graph object to (defaults to 'g')
:type graph_obj_name: str
:param username: the username to use for authentication (optional)
:type username: str
:param password: the password to use for authentication (optional)
:type password: str
:param session_key: the session key to reference (used only for killing existing session)
:type session_key: str
:param kill_session: sets this request to kill the server session referenced by the session key parameter, defaults to False
:type kill_session: bool
"""
super(SessionRequest, self).__init__(**kwargs)
self.username = username
self.password = password
self.session = session_key
self.graph_name = graph_name
self.graph_obj_name = graph_obj_name
self.kill_session = kill_session
def get_meta(self):
if self.kill_session:
return {'killSession': True}
meta = {}
if self.graph_name:
meta['graphName'] = self.graph_name
if self.graph_obj_name:
meta['graphObjName'] = self.graph_obj_name
return meta
def get_message_list(self):
return super(SessionRequest, self).get_message_list() + [
self.username,
self.password
]
class SessionResponse(RexProMessage):
def __init__(self, session_key, meta, languages, **kwargs):
"""
"""
super(SessionResponse, self).__init__(**kwargs)
self.session_key = session_key
self.meta = meta
self.languages = languages
@classmethod
def deserialize(cls, data):
message = msgpack.loads(data)
session, request, meta, languages = message
return cls(
session_key=session,
meta=meta,
languages=languages
)
class ScriptRequest(RexProMessage):
"""
Message that executes a gremlin script and returns the response
"""
class Language(object):
GROOVY = 'groovy'
SCALA = 'scala'
JAVA = 'java'
MESSAGE_TYPE = MessageTypes.SCRIPT_REQUEST
def __init__(self, script, params=None, session_key=None, graph_name=None, graph_obj_name=None, in_session=True,
isolate=True, in_transaction=True, language=Language.GROOVY, **kwargs):
"""
:param script: script to execute
:type script: str/unicode
:param params: parameter values to bind to request
:type params: dict (json serializable)
:param session_key: the session key to execute the script with
:type session_key: str
:param graph_name: the name of the rexster graph to connect to
:type graph_name: str
:param graph_obj_name: the name of the variable to bind the graph object to (defaults to 'g')
:type graph_obj_name: str
:param in_session: indicates this message should be executed in the context of the included session
:type in_session:bool
:param isolate: indicates variables defined in this message should not be available to subsequent message
:type isolate:bool
:param in_transaction: indicates this message should be wrapped in a transaction
:type in_transaction:bool
:param language: the language used by the script (only groovy has been tested)
:type language: ScriptRequest.Language
"""
super(ScriptRequest, self).__init__(**kwargs)
self.script = script
self.params = params or {}
self.session = session_key
self.graph_name = graph_name
self.graph_obj_name = graph_obj_name
self.in_session = in_session
self.isolate = isolate
self.in_transaction = in_transaction
self.language = language
def get_meta(self):
meta = {}
if self.graph_name:
meta['graphName'] = self.graph_name
if self.graph_obj_name:
meta['graphObjName'] = self.graph_obj_name
#defaults to False
if self.in_session:
meta['inSession'] = True
#defaults to True
if not self.isolate:
meta['isolate'] = False
#defaults to True
if not self.in_transaction:
meta['transaction'] = False
return meta
def _validate_params(self):
"""
Checks that the parameters are ok
(no invalid types, no weird key names)
"""
for k,v in self.params.items():
if re.findall(r'^[0-9]', k):
raise exceptions.RexProScriptException(
"parameter names can't begin with a number")
if re.findall(r'[\s\.]', k):
raise exceptions.RexProException(
"parameter names can't contain {}".format(
re.findall(r'^[0-9]', k)[0]
)
)
if not isinstance(v, (int,
long,
float,
basestring,
dict,
list,
tuple)):
raise exceptions.RexProScriptException(
"{} is an unsupported type".format(type(v))
)
def serialize_parameters(self):
"""
returns a serialization of the supplied parameters
"""
data = bytearray()
for k, v in self.params.items():
key = k.encode('utf-8')
val = json.dumps(v).encode('utf-8')
data += utils.int_to_32bit_array(len(key))
data += key
data += utils.int_to_32bit_array(len(val))
data += val
return str(data)
def get_message_list(self):
return super(ScriptRequest, self).get_message_list() + [
self.language,
self.script.encode('utf-8'),
self.params
]
class MsgPackScriptResponse(RexProMessage):
def __init__(self, results, bindings, **kwargs):
super(MsgPackScriptResponse, self).__init__(**kwargs)
self.results = results
self.bindings = bindings
@classmethod
def deserialize(cls, data):
message = msgpack.loads(data)
session, request, meta, results, bindings = message
return cls(
results=results,
bindings=bindings
)
| {
"repo_name": "bdeggleston/rexpro-python",
"path": "rexpro/messages.py",
"copies": "1",
"size": "9997",
"license": "mit",
"hash": -1249220188854101500,
"line_mean": 29.2939393939,
"line_max": 135,
"alpha_frac": 0.5792737821,
"autogenerated": false,
"ratio": 4.4293309703145765,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5508604752414576,
"avg_score": null,
"num_lines": null
} |
__author__ = 'bdeutsch'
## do a polynomial fit on the data, calculate the goodness of tweet for each coordinate in tweetspace.
# next, find the gradient and make recommendations.
from sklearn.preprocessing import PolynomialFeatures
import numpy as np
import pandas as pd
import MySQLdb
import matplotlib.pyplot as plt
from sklearn.preprocessing import PolynomialFeatures
from sklearn import linear_model
def sql_to_df(database, table):
con = MySQLdb.connect(host='localhost', user='root', passwd='', db=database)
df = pd.read_sql_query("select * from %s" % table, con, index_col=None, coerce_float=True, params=None, parse_dates=None, chunksize=None)
return df
# Import data from SQL
df = sql_to_df('TweetScore', 'probabilities')
#test_num = 50000
# Code source: Jaques Grobler
# License: BSD 3 clause
# load targets
data_Y = df["rt_prob"].values
#test_Y = df["rt_prob"][-test_num:].values
# Load features
data_X = df[["emo_num", "ht_num", "media_num", "txt_len_basic", "url_num", "user_num"]].values
#test_X = df[["emo_num", "ht_num", "media_num", "txt_len_basic", "url_num", "user_num"]][-test_num:].values
# Turn the linear features into polynomial features
poly = PolynomialFeatures(2)
# Apply this transformation
X_new = poly.fit_transform(data_X)
#X_new_test = poly.fit_transform(test_X)
# Create linear regression object
regr = linear_model.LinearRegression()
# Train the model using the training sets
regr.fit(X_new, data_Y)
# The coefficients
print('Coefficients: \n', regr.coef_)
# The mean square error
print("Residual sum of squares for training set: %.2f" % np.mean((regr.predict(X_new) - data_Y) ** 2))
#print("Residual sum of squares for test set: %.2f" % np.mean((regr.predict(X_new_test) - test_Y) ** 2))
# Explained variance score: 1 is perfect prediction
print('Variance score for training set: %.2f' % regr.score(X_new, data_Y))
#print('Variance score for test set: %.2f' % regr.score(X_new_test, test_Y))
# plots
#x_axis = np.array(range(max(data_X)+2))[:, np.newaxis]
#y_pred = regr.predict(poly.fit_transform(x_axis))
# Plot outputs
#plt.scatter(data_X, data_Y, color='black')
#plt.plot(x_axis, y_pred, color='blue', linewidth=3)
#plt.xticks(())
#plt.yticks(())
#plt.show()
## use the regression result to create the goodness function
# load all possible tweets <= 140 characters
tweetspace = pd.read_pickle('legal_tweets_3')
print tweetspace
# calcuate the goodness column (-cost)
goodness = []
cols=["emo_num", "ht_num", "media_num", "txt_len_basic", "url_num", "user_num"]
#test = regr.predict(poly.fit_transform(tweetspace[cols].loc[1].values))
for ind in tweetspace.index:
goodness.append(regr.predict(poly.fit_transform(tweetspace[cols].loc[ind].values))[0])
if ind%1000 == 0:
print ind
# put the results in the dataframe
tweetspace["goodness"] = goodness
#print tweetspace.tail()
def make_index(row):
#new_ind = ""
new_ind = str(row.values[0:6])
return new_ind
# convert each row to a vector and then a string. Use it as the index.
tweetspace['desig'] = tweetspace.apply(lambda row: make_index(row), axis=1)
#ts = pd.DataFrame()
#ts= tweetspace[["desig", "goodness"]]
#ts = ts.set_index("desig")
#print len(ts2.index.values)
tweetspace.to_pickle("goodness_prob")
# ADD IN RE_INDEX_GOODNESS.PY
| {
"repo_name": "aspera1631/TweetScore",
"path": "get_goodness.py",
"copies": "1",
"size": "3295",
"license": "mit",
"hash": 5739844371845408000,
"line_mean": 29.2293577982,
"line_max": 141,
"alpha_frac": 0.7028831563,
"autogenerated": false,
"ratio": 2.9846014492753623,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9146267480269309,
"avg_score": 0.008243425061210629,
"num_lines": 109
} |
__author__ = 'bdeutsch'
import twitter_text as tt
from ttp import ttp
import re
def get_len(list):
len1 = 0
for item in list:
len1 += len(item) + 1
return len1
def count_https(list):
count = 0
for item in list:
if item[:5] =='https':
count += 1
return count
def emoji_txt(text):
# search for retweet
#print text
n = re.findall('(\\\U\w{8}|\\\u\w{4})', text)
if n:
return len(n)
else:
return 0
def tweet_features(df, tweet, img_count):
# run a tweet through the parser
p = ttp.Parser()
result = p.parse(tweet)
# Use the twitter text py package to validate length
tweet_tt = tt.TwitterText(tweet)
df["ht_num"] = [len(result.tags)] # number of hashtags
df["user_num"] = [len(result.users)] # number of user mentions
df["url_num"] = [len(result.urls)] # number of urls
df["https_num"] = [count_https(result.urls)] # Number of secure urls
df["http_num"] = df["url_num"] - df["https_num"] # number of other urls
df["ht_len"] = get_len(result.tags) # total length of all hashtags
df["user_len"] = get_len(result.users) # total length of all user mentions
df["txt_len_tot"] = [tweet_tt.validation.tweet_length()] # total length of tweet
# length of basic text in tweet (no urls, hashtags, user mentions)
df["txt_len_basic"] = df["txt_len_tot"] - df["user_len"] - df["ht_len"] - df["https_num"]*23 - df["http_num"]*22
return df
# order: [emo_num, ]
def print_lit(text):
return text.encode('ascii')
test = "This web app was easy to make! #sarcastic @InsightDataSci" | {
"repo_name": "aspera1631/TS_web_app",
"path": "app/templates/models.py",
"copies": "1",
"size": "1718",
"license": "mit",
"hash": 3385105104428604400,
"line_mean": 28.1355932203,
"line_max": 116,
"alpha_frac": 0.5768335274,
"autogenerated": false,
"ratio": 3.2293233082706765,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.43061568356706764,
"avg_score": null,
"num_lines": null
} |
__author__ = 'bdeutsch'
## Calculates and saves the gradient given a "goodness" matrix that measures the quality of every tweet in tweetspace
import numpy as np
import pandas as pd
# function that converts coordinates to index
def make_index(coord):
#new_ind = ""
new_ind = str(coord)
return new_ind
# Load goodness dataframe
ts = pd.read_pickle('goodness_prob2')
# Create new dataframe of tweet coordinates
coord = pd.DataFrame()
# create new dataframe to hold the gradient
cols = ["emo+", "ht+", "med+", "txt+", "url+", "usr+", "emo-", "ht-", "med-", "txt-", "url-", "usr-"]
gradient = pd.DataFrame(columns=cols, index=ts.index)
## Calculate finite differences
count = 0
# choose a row
for ind in ts.index:
count += 1
# get current tweet coordinates
coord = ts.loc[ind].values[0:6]
# Find goodness at those coordinates
goodness = ts.loc[ind].values[7]
# addition loop
gradient_row = []
# For every possible "up" transition
for feature in range(len(coord)):
# take a step up
new_coord = coord
new_coord[feature] += 1
# convert the new coordinate to an index
new_ind = make_index(new_coord)
# look up cost at these coordinates. If it's not there, return NaN.
try:
new_goodness = ts.loc[new_ind].values[7]
except:
new_goodness = np.nan
# aggregate the gradient for this row. NaN should propagate
gradient_row.append(new_goodness - goodness)
# Maybe I added a race condition? Maybe it tries to execute these in parallel?
# subtraction loop
# for every possible step down
for feature in range(len(coord)):
# take a step down
new_coord = coord
new_coord[feature] = new_coord[feature] - 2
# convert the coordinate to an index
new_ind = make_index(new_coord)
# look up cost at these coordinates
try:
new_goodness = ts.loc[new_ind].values[7]
except:
new_goodness = np.nan
# aggregate the gradient for this row. NaN should propagate
#print new_goodness - goodness
gradient_row.append(new_goodness - goodness)
# add gradient row to the dataframe
gradient.loc[ind] = gradient_row
#print gradient
if count%1000 == 0:
print count
#print ts.loc["[ 1. 2. 1. 10. 0. 6.]"][7]
# save dataframe as pickle file
gradient.to_pickle("gradient_prob")
| {
"repo_name": "aspera1631/TweetScore",
"path": "gradient.py",
"copies": "1",
"size": "2454",
"license": "mit",
"hash": -5985336824246518000,
"line_mean": 28.9268292683,
"line_max": 117,
"alpha_frac": 0.630399348,
"autogenerated": false,
"ratio": 3.6681614349775784,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9771739172054392,
"avg_score": 0.005364322184637074,
"num_lines": 82
} |
__author__ = 'bdeutsch'
import json
import MySQLdb
import numpy as np
import pandas as pd
import re
import twitter_text as tt
from ttp import ttp
# Function to replace "&" with "&"
def replace_codes(text):
newtext = text.replace('&','&').replace('>','>').replace('<','<')
return newtext
# Counts the number of emoji in a tweet
def emoji_txt(text):
# search for retweet
m = re.search('''text(.+?), u'\w''', text)
if m:
n = re.findall('(\\\U\w{8}|\\\u\w{4})', m.group(1))
if n:
return len(n)
else:
return 0
else:
return 0
# Function to count the total length of all hashtags, etc
def get_ent_len(entities):
# if the entity contains emoji, don't count them in the length. We already counted them.
totlen = 0
if len(entities) > 0:
for item in entities:
text = item.get("text", "")
m = re.findall('(\\\U\w{8}|\\\u\w{4})', text.encode('unicode-escape'))
if m:
len1 = 0
else:
indices = item.get("indices", [0,0])
len1 = indices[1] - indices[0]
totlen = totlen + len1
return totlen
def clean_tweets(filein, fileout):
# Define path to raw tweet file
tweets_data_path = filein
tweets_data = []
tweets_file = open(tweets_data_path, "r")
for line in tweets_file:
try:
tweet = json.loads(line)
tweets_data.append(tweet)
except:
continue
def create_rt(retweets):
if retweets > 0:
return 1
else:
return 0
# Define an empty dataframe
tweets = pd.DataFrame()
# Select and fill dataframe columns. If it's a retweet, use original tweet. If not, use regular values.
# tweet ID. This will be the ID used in the dataframe
tweets['tw_id'] = map(lambda tweet: tweet.get("retweeted_status", tweet).get("id"), tweets_data)
# Tweet text
#tweets['text'] = map(lambda tweet: tweet.get("retweeted_status", tweet).get("text", {}).replace('\n', ''), tweets_data)
tweets['text'] = map(lambda tweet: tweet.get("retweeted_status", tweet).get("text", {}).replace('\n', ''), tweets_data)
# Replace '∧' with '&' in the text
tweets['text'] = tweets['text'].apply(replace_codes)
# get lists of all info for all hashtags, urls, etc. If none, this is an empty set.
tweets['hashtags'] = map(lambda tweet: tweet.get("retweeted_status", tweet).get("entities", {}).get("hashtags", []), tweets_data)
tweets['users'] = map(lambda tweet: tweet.get("retweeted_status", tweet).get("entities", {}).get("user_mentions", []), tweets_data)
tweets['urls'] = map(lambda tweet: tweet.get("retweeted_status", tweet).get("entities", {}).get("urls", []), tweets_data)
tweets['symbols'] = map(lambda tweet: tweet.get("retweeted_status", tweet).get("entities", {}).get("symbols", []), tweets_data)
tweets['media'] = map(lambda tweet: tweet.get("retweeted_status", tweet).get("entities", {}).get("media", []), tweets_data)
tweets['ext_ent'] = map(lambda tweet: tweet.get("retweeted_status", tweet).get("entities", {}).get("extended_entities", []), tweets_data)
# Count the hashtags, etc.
tweets['emo_num'] = map(lambda tweet: emoji_txt(str(tweet).encode("unicode-escape")), tweets_data)
tweets['ht_num'] = tweets['hashtags'].apply(len)
tweets['user_num'] = tweets['users'].apply(len)
tweets['url_num'] = tweets['urls'].apply(len)
tweets['sym_num'] = tweets['symbols'].apply(len) # stuff like Coke or Pepsi
tweets['media_num'] = tweets['media'].apply(len) # Twitter photos
tweets['ext_num'] = tweets['ext_ent'].apply(len) # Multi-photos or videos
# find the total length of all hashtags, etc.
#tweets['emo_len'] = tweets['emo_num'].apply(get_emo_len)
tweets['ht_len'] = tweets['hashtags'].apply(get_ent_len)
tweets['user_len'] = tweets['users'].apply(get_ent_len)
tweets['url_len'] = tweets['urls'].apply(get_ent_len)
tweets['sym_len'] = tweets['symbols'].apply(get_ent_len)
tweets['media_len'] = tweets['media'].apply(get_ent_len)
tweets['ext_len'] = tweets['ext_ent'].apply(get_ent_len)
# Get the length of the text, and then subtract all of the entity lengths.
tweets['txt_len_total'] = tweets['text'].apply(len) #accounts for double-counting of emoji chars
tweets['txt_len_basic'] = tweets['txt_len_total'] - \
tweets[['ht_len', 'user_len', 'sym_len', 'media_len', 'ext_len', 'emo_num']].sum(axis=1)
# get the user ID and the number of followers of that user
tweets['user_id'] = map(lambda tweet: tweet.get("retweeted_status", tweet).get("user", {}).get("screen_name"), tweets_data)
tweets['followers'] = map(lambda tweet: tweet.get("retweeted_status", tweet).get("user", {}).get("followers_count"), tweets_data)
# get number of retweets and favories.
tweets['retweets'] = map(lambda tweet: tweet.get("retweeted_status", {}).get("retweet_count", 0), tweets_data)
tweets['favorites'] = map(lambda tweet: tweet.get("retweeted_status", {}).get("favorite_count", 0), tweets_data)
# assign 1 for retweeted, 0 for not.
tweets["rt"] = tweets["retweets"].apply(create_rt)
# Select only one version of each tweet, with the maximum retweets
tw_unique = tweets.groupby('tw_id').first()
#features = tw_unique['tw_id']
features = tw_unique[["emo_num", "ht_num", "media_num", "txt_len_basic", "url_num", "user_num", "sym_num", "ext_num", "txt_len_total", "user_id", "followers", "retweets", "rt"]]
#(optional) save in pickle format
features.to_pickle(fileout)
return features
def pickle_to_sql(filein, tableName, mode):
## pickle_to_sql: open a file in pickle format, load into an SQL database.
# open file and load into a dataframe
tweets = pd.read_pickle(filein)
# Connect to server
con = MySQLdb.connect(host='localhost', user='root', passwd='', db='TweetScore') # may need to add some other options to connect
# Convert to to sql
tweets.to_sql(con=con, name=tableName, if_exists=mode, flavor='mysql')
return True
def get_len(list):
len1 = 0
for item in list:
len1 += len(item) + 1
return len1
def count_https(list):
count = 0
for item in list:
if item[:5] =='https':
count += 1
return count
def tweet_features(df, tweet):
# THIS VERSION IS DEPRECATED. USE VERSION FROM WEB APP
# run a tweet through the parser
p = ttp.Parser()
result = p.parse(tweet)
# Use the twitter text py package to validate length
tweet_tt = tt.TwitterText(tweet)
df["ht_num"] = [len(result.tags)] # number of hashtags
df["user_num"] = [len(result.users)] # number of user mentions
df["url_num"] = [len(result.urls)] # number of urls
df["https_num"] = [count_https(result.urls)] # Number of secure urls
df["http_num"] = df["url_num"] - df["https_num"] # number of other urls
df["ht_len"] = get_len(result.tags) # total lentgh of all hashtags
df["user_len"] = get_len(result.users) # total length of all user mentions
df["txt_len_tot"] = [tweet_tt.validation.tweet_length()] # total length of tweet
# length of basic text in tweet (no urls, hashtags, user mentions)
df["txt_len_basic"] = df["txt_len_tot"] - df["user_len"] - df["ht_len"] - df["https_num"]*23 - df["http_num"]*22
return df
def parse_input_tweet(tweet):
df = pd.DataFrame()
df = tweet_features(df, tweet)
return df
def sql_to_df(database, table):
con = MySQLdb.connect(host='localhost', user='root', passwd='', db=database)
df = pd.read_sql_query("select * from %s" % table, con, index_col=None, coerce_float=True, params=None, parse_dates=None, chunksize=None)
return df
#pickle_to_sql('recommendations', 'recommendations', 'replace')
path = '../twitter data/raw data/'
'''
clean_tweets(path + 'data_1.json', 'features_1')
clean_tweets(path + 'data_2.json', 'features_2')
clean_tweets(path + 'data_3.json', 'features_3')
clean_tweets(path + 'data_4.json', 'features_4')
clean_tweets(path + 'data_5.json', 'features_5')
clean_tweets(path + 'data_6.json', 'features_6')
clean_tweets(path + 'data_7.json', 'features_7')
clean_tweets(path + 'data_8.json', 'features_8')
clean_tweets(path + 'data_9.json', 'features_9')
clean_tweets(path + 'data_10.json', 'features_10')
clean_tweets(path + 'data_11.json', 'features_11')
#print df["emo_num"]
pickle_to_sql('features_1', 'cleaned', 'append')
pickle_to_sql('features_2', 'cleaned', 'append')
pickle_to_sql('features_3', 'cleaned', 'append')
pickle_to_sql('features_4', 'cleaned', 'append')
pickle_to_sql('features_5', 'cleaned', 'append')
pickle_to_sql('features_6', 'cleaned', 'append')
pickle_to_sql('features_7', 'cleaned', 'append')
pickle_to_sql('features_8', 'cleaned', 'append')
pickle_to_sql('features_9', 'cleaned', 'append')
pickle_to_sql('features_10', 'cleaned', 'append')
pickle_to_sql('features_11', 'cleaned', 'append')
'''
# bin the tweets using rebin_df
#pickle_to_sql('binned_tweets', 'binned', 'replace')
# load the full data set from SQL, select unique tweets, save as pickle file.
#df = sql_to_df("TweetScore", "cleaned")
#tw_unique = df.groupby('tw_id').first()
#tw_unique.to_pickle('features_all')
# Run "rebin_dataframe" to convert full file to binned dataframe and then sql
# load from SQL and write as pickle.
#df = sql_to_df("TweetScore", "binned2")
#df.to_pickle('binned_all')
# plot the data to make sure it makes sense.
# perform the fit with "get_goodness"
# re-index with "re_index_goodness"
# perform gradient calculation with "gradient"
# Assemble recommendation list with "recommendations"
# Fields are:
# tw_id: Unique tweet ID supplied by Twitter
# text: Full text of the original tweet
# hashtags: List of all hashtag entity data (no '#' symbol)
# users: List of user mentions (no '@' symbol)
# urls List of all url data
# symbols: List of symbol data (like Coke or Pepsi symbols)
# media: Twitter Picture entities
# ext_ent: Extended entities, including multi-pictures and videos
# emo_num: Number of emoji
# ht_num: Number of hashtags
# user_num: Number of user mentions in original tweet
# url_num: Number of URLs in tweet
# sym_num: Number of symbols
# media_num: Number of media (twitter picture) elements
# ext_num: Number of extended elements
# emo_len: Length of emoji in parsed data (just 2x emo_num)
# ht_len: Length of all hashtags
# user_len: Length of all user mentions
# url_len: Length of all URLs (22 or 23 char each)
# sym_len: Length of all symbols
# media_len: Length of all media elements
# ext_len: Length of all extended entities
# txt_len_total Length of tweet
# txt_len_basic Length of simple text in tweet
# user_id: Screen name of user for original tweet
# followers: Number of followers of user
# retweets: (max) Number of retweets for this tweet_id
# favorites: (max) Number of favorites for this tweet_id
# Fields recorded in processed DataFrame:
# tw_id: Unique tweet ID supplied by Twitter
# emo_num: Number of emoji
# ht_num: Number of hashtags
# user_num: Number of user mentions in original tweet
# url_num: Number of URLs in tweet
# sym_num: Number of symbols
# media_num: Number of media (twitter picture) elements
# ext_num: Number of extended elements
# txt_len_total Length of tweet
# txt_len_basic Length of simple text in tweet
# user_id: Screen name of user for original tweet
# followers: Number of followers of user
# retweets: (max) Number of retweets for this tweet_id
# favorites: (max) Number of favorites for this tweet_id | {
"repo_name": "aspera1631/TweetScore",
"path": "tweetscore.py",
"copies": "1",
"size": "12012",
"license": "mit",
"hash": 7878695082852505000,
"line_mean": 37.5032051282,
"line_max": 181,
"alpha_frac": 0.6323676324,
"autogenerated": false,
"ratio": 3.2667935817242317,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9382229467260241,
"avg_score": 0.003386349372798177,
"num_lines": 312
} |
__author__ = 'bdeutsch'
import json
import numpy as np
import pandas as pd
import matplotlib as mpl
import matplotlib.pyplot as plt
import seaborn as sns
import MySQLdb
def sql_to_df(database, table):
con = MySQLdb.connect(host='localhost', user='root', passwd='', db=database)
df = pd.read_sql_query("select * from %s" % table, con, index_col=None, coerce_float=True, params=None, parse_dates=None, chunksize=None)
return df
# Create bins and labels for a data set
def make_bins(max_lbl, step):
range1 = np.arange(0, max_lbl + step, step)
bins = np.append((range1 - step/2), 1000)
labels = []
for item in range1:
labels.append(str(int(item)))
labels.pop()
labels.append(str(range1[-1]) + "+")
labels_out = tuple(labels)
return [bins, labels_out]
# Load data
df = pd.read_pickle('binned_all')
#df = sql_to_df("TweetScore", "twitter")
## Plot retweets vs basic text length
# Take log of retweets
#df["rt_log"] = df["retweets"].apply(lambda tweet: np.log10(tweet + 1))
# plot ave retweets vs emoji
def plot_fits(feature):
sns.set_context("talk", font_scale=1)
ax = sns.regplot(x=feature, y="retweets", data=df, x_estimator=np.mean, fit_reg=True, x_ci=50, scatter=True, ci=None)
ax = sns.regplot(x=feature, y="retweets", data=df, fit_reg=True, order=2, x_ci=50, scatter=False, ci=None)
ax = sns.regplot(x=feature, y="retweets", data=df, fit_reg=True, order=3, x_ci=50, scatter=False, ci=None)
ax = sns.regplot(x=feature, y="retweets", data=df, fit_reg=True, order=4, x_ci=50, scatter=False, ci=None)
#ax = sns.regplot(x="emo_num", y="retweets", data=df, fit_reg=True)
ax.set(xlabel=feature, ylabel='Retweets')
plt.show()
def plot_feature(feature):
ax = sns.regplot(x=feature, y="retweets", data=df, x_estimator=np.mean, fit_reg=False, x_ci=50, scatter=True)
ax.set(xlabel=feature, ylabel='Retweets')
plt.show()
plot_feature("emo_num")
plot_feature("ht_num")
'''
## Plot log(RTs) vs length of text
# Create bins and labels for follwoer counts
labels = range(0, max(df["txt_len_total"]), 5)
df["txt_tot_bins"] = pd.cut(df["txt_len_total"], range(0, max(df["txt_len_total"])+5, 5), right=False, labels=labels)
sns.set_context("talk", font_scale=1)
df.sort(columns="txt_tot_bins")
ax = sns.lmplot(x="txt_tot_bins", y="rt_log", data=df, x_estimator=np.mean, fit_reg=False)
ax.set(xlabel='Total tweet length', ylabel='log(Rewteets + 1)')
#ax.set_yscale('log')
plt.show()
'''
'''
bin_info = make_bins(140,5)
df["txt_tot_bins"] = pd.cut(df["txt_len_total"], bins=bin_info[0], labels=False)
sns.set_context("talk", font_scale=1)
df.sort(columns="txt_tot_bins")
ax = sns.lmplot(x="txt_tot_bins", y="rt_log", data=df, x_estimator=np.mean, fit_reg=False)
ax.set(xlabel='Total tweet length', ylabel='log(Rewteets + 1)')
#ax.set_yscale('log')
plt.show()
'''
'''
## Plot log(RTs) vs length of text
# Create bins and labels for follwoer counts
labels = range(0, max(df["txt_len_basic"]), 5)
df["txt_basic_bins"] = pd.cut(df["txt_len_basic"], range(0, max(df["txt_len_basic"])+5, 5), right=False, labels=labels)
sns.set_context("talk", font_scale=1)
df.sort(columns="txt_basic_bins")
ax = sns.lmplot(x="txt_basic_bins", y="rt_log", data=df, x_estimator=np.mean, fit_reg=False)
ax.set(xlabel='Simple text characters', ylabel='log(Rewteets + 1)')
#ax.set_yscale('log')
plt.show()
'''
'''
## Plot log(RTs) vs number of followers
# Create bins and labels for follwoer counts
labels = range(0, int(np.ceil(max(df["fol_log"]))), 1)
df["fol_bins"] = pd.cut(df["fol_log"], range(0, int(np.ceil(max(df["fol_log"])))+1, 1), right=False, labels=labels)
sns.set_context("talk", font_scale=1)
df.sort(columns="fol_bins")
ax = sns.lmplot(x="fol_bins", y="rt_log", data=df, x_estimator=np.mean, fit_reg=False)
ax.set(xlabel='log(Followers + 1)', ylabel='log(Rewteets + 1)')
#ax.set_yscale('log')
plt.show()
'''
'''
# log(RTs) vs basic length
sns.set_context("talk", font_scale=1)
ax = sns.pointplot(x=df_bins_txt.index, y=df_bins_txt["rt_log"], fit_reg=False)
ax.set(xlabel='Basic text length', ylabel='retweets')
##ax.set_yscale('log')
plt.show()
'''
'''
# log (RTs) vs total length
sns.set_context("talk", font_scale=1)
ax = sns.pointplot(x=df_bins.index, y=df_bins["rt_log"], fit_reg=False)
ax.set(xlabel='Basic text length', ylabel='retweets')
##ax.set_yscale('log')
plt.show()
'''
'''
# Plot log(RTs) vs number of hashtag
sns.set_context("talk", font_scale=1)
df.sort(columns="ht_num")
ax = sns.lmplot(x="ht_num", y="rt_log", data=df, x_estimator=np.mean, fit_reg=False)
ax.set(xlabel='Number of hashtags', ylabel='log(Rewteets + 1)')
#ax.set_yscale('log')
plt.show()
'''
'''
# Plot log(RTs) vs number of urls
sns.set_context("talk", font_scale=1)
df.sort(columns="url_num")
ax = sns.lmplot(x="url_num", y="rt_log", data=df, x_estimator=np.mean, fit_reg=False)
ax.set(xlabel='Number of URLs', ylabel='log(Rewteets + 1)')
#ax.set_yscale('log')
plt.show()
'''
'''
# Plot log(RTs) vs number of pictures
sns.set_context("talk", font_scale=1)
df.sort(columns="media_num")
ax = sns.lmplot(x="media_num", y="rt_log", data=df, x_estimator=np.mean, fit_reg=False)
ax.set(xlabel='Number of Pictures', ylabel='log(Rewteets + 1)')
#ax.set_yscale('log')
plt.show()
'''
'''
# Plot log(RTs) vs number of user mentions
sns.set_context("talk", font_scale=1)
df.sort(columns="user_num")
ax = sns.lmplot(x="user_num", y="rt_log", data=df, x_estimator=np.mean, fit_reg=False)
ax.set(xlabel='Number of user mentions', ylabel='log(Rewteets + 1)')
#ax.set_yscale('log')
plt.show()
'''
#print tweets
#print df.head()
# create plot of retweets vs
#counts1 = tweets[["ht_num", "user_num"]]
#ax = sns.heatmap(counts1)
#'processed_20k_03' is 20k tweets in english. Fields are:
# tw_id: Unique tweet ID supplied by Twitter
# text: Full text of the original tweet
# hashtags: List of all hashtag entity data (no '#' symbol)
# users: List of user mentions (no '@' symbol)
# urls List of all url data
# symbols: List of symbol data (like Coke or Pepsi symbols)
# media: Twitter Picture entities
# ext_ent: Extended entities, including multi-pictures and videos
# emo_num: Number of emoji
# ht_num: Number of hashtags
# user_num: Number of user mentions in original tweet
# url_num: Number of URLs in tweet
# sym_num: Number of symbols
# media_num: Number of media (twitter picture) elements
# ext_num: Number of extended elements
# emo_len: Length of emoji in parsed data (just 2x emo_num)
# ht_len: Length of all hashtags
# user_len: Length of all user mentions
# url_len: Length of all URLs (22 or 23 char each)
# sym_len: Length of all symbols
# media_len: Length of all media elements
# ext_len: Length of all extended entities
# txt_len_total Length of tweet
# txt_len_basic Length of simple text in tweet
# user_id: Screen name of user for original tweet
# followers: Number of followers of user
# retweets: (max) Number of retweets for this tweet_id
# favorites: (max) Number of favorites for this tweet_id
| {
"repo_name": "aspera1631/TweetScore",
"path": "make_plots.py",
"copies": "1",
"size": "7126",
"license": "mit",
"hash": 4178590702642160600,
"line_mean": 31.0990990991,
"line_max": 141,
"alpha_frac": 0.6668537749,
"autogenerated": false,
"ratio": 2.688042248208223,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8831591397342258,
"avg_score": 0.004660925153192945,
"num_lines": 222
} |
__author__ = 'bdeutsch'
import numpy as np
import pandas as pd
import MySQLdb
def import_data(sql_table):
database = "TweetScore"
table = sql_table
con = MySQLdb.connect(host='localhost', user='root', passwd='', db=database)
df = pd.read_sql_query("select * from %s" % table, con, index_col=None, coerce_float=True, params=None, parse_dates=None, chunksize=None)
return df
def make_bins(max_lbl, step):
range1 = np.arange(0, max_lbl + step, step)
bins = np.append((range1 - float(step)/2), [1000])
labels = []
for item in range1:
labels.append(str(int(item)))
labels.pop()
labels.append(str(range1[-1]) + "+")
labels_out = tuple(labels)
return [bins, labels_out]
def bin_data(df):
## Rebins a dataframe according to some provided vectors.
# Tweet length
# 0-140, bins of 5
len_tot_max = 140
len_tot_step = 5
len_tot_bins = make_bins(len_tot_max, len_tot_step)[0]
len_tot_labels = make_bins(len_tot_max, len_tot_step)[1]
# Basic text length
# 0-140, bins of 5
len_bas_max = 140
len_bas_step = 5
len_bas_bins = make_bins(len_bas_max, len_bas_step)[0]
len_bas_labels = make_bins(len_bas_max, len_bas_step)[1]
# Number of hashtags
# [0,1,2,3,4,5,6+]
ht_max = 6
ht_step = 1
ht_bins = make_bins(ht_max, ht_step)[0]
ht_labels = make_bins(ht_max, ht_step)[1]
# Number of user mentions
# [0,1,2,3,4,5,6+]
user_max = 6
user_step = 1
user_bins = make_bins(user_max, user_step)[0]
user_labels = make_bins(user_max, user_step)[1]
# Number of URLs
# [0,1,2+]
url_max = 2
url_step = 1
url_bins = make_bins(url_max, url_step)[0]
url_labels = make_bins(url_max, url_step)[1]
# Number of pictures
# [0,1,2+]
media_max = 2
media_step = 1
media_bins = make_bins(media_max, media_step)[0]
media_labels = make_bins(media_max, media_step)[1]
# Number of emoji
# [0,1,2,3,4,5,6+]
emo_max = 6
emo_step = 1
emo_bins = make_bins(emo_max, emo_step)[0]
emo_labels = make_bins(emo_max, emo_step)[1]
## Load data
#df = sql_to_df("TweetScore", "twitter")
## Make a new datafram with binned data
feat_bins = pd.DataFrame()
feat_bins["emo_num"] = pd.cut(df["emo_num"], emo_bins, labels=False)
feat_bins["ht_num"] = pd.cut(df["ht_num"], ht_bins, labels=False)
feat_bins["media_num"] = pd.cut(df["media_num"], media_bins, labels=False)
feat_bins["txt_len_basic"] = pd.cut(df["txt_len_basic"], len_bas_bins, labels=False)
feat_bins["url_num"] = pd.cut(df["url_num"], url_bins, labels=False)
feat_bins["user_num"] = pd.cut(df["user_num"], user_bins, labels=False)
#feat_bins["txt_len_total"] = pd.cut(df["txt_len_total"], len_tot_bins, labels=False)
#feat_bins["retweets"] = df["retweets"]
feat_bins["rt"] = df["rt"]
#feat_bins["rt_log"] = df["retweets"].apply(lambda tweet: np.log10(tweet + 1))
return feat_bins
# save to SQL
#con = MySQLdb.connect(host='localhost', user='root', passwd='', db='TweetScore') # may need to add some other options to connect
def pickle_to_sql(filein, tableName, mode):
## pickle_to_sql: open a file in pickle format, load into an SQL database.
# open file and load into a dataframe
df = pd.read_pickle(filein)
# Connect to server
con = MySQLdb.connect(host='localhost', user='root', passwd='', db='TweetScore') # may need to add some other options to connect
# Convert to to sql
df.to_sql(con=con, name=tableName, if_exists=mode, flavor='mysql')
return True
# import and bin
df = import_data('cleaned')
# doesn't need to be here, but it's a safeguard
df = bin_data(df).dropna()
# re-index based on coordinates
df['desig'] = df.apply(lambda row: str(row.values[0:6]), axis=1)
#print df.head()
con = MySQLdb.connect(host='localhost', user='root', passwd='', db='TweetScore')
#df[180001:].to_sql(con=con, name="binned", if_exists="append", flavor='mysql')
| {
"repo_name": "aspera1631/TweetScore",
"path": "rebin_dataframe.py",
"copies": "1",
"size": "4005",
"license": "mit",
"hash": 6950713856815547000,
"line_mean": 29.572519084,
"line_max": 141,
"alpha_frac": 0.6229712859,
"autogenerated": false,
"ratio": 2.84850640113798,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.397147768703798,
"avg_score": null,
"num_lines": null
} |
__author__ = 'bdeutsch'
import numpy as np
import pandas as pd
import MySQLdb
## Given the gradient, output a file with the top n recommendations
# Import gradient, replace NaN with a very negative gradient (will always avoid those transitions)
gradient = pd.read_pickle('gradient_prob').fillna(-100000000)
# Create a new dataframe with same indices as gradient, but 3 columns corresponding to each index
recommendations = pd.DataFrame(columns=["msg1", "msg2", "msg3"], index=gradient.index)
# Create a new dataframe that has the gradient cols as indices. One column corresponds to the text message.
indices = ["emo+", "ht+", "med+", "txt+", "url+", "usr+", "emo-", "ht-", "med-", "txt-", "url-", "usr-"]
msg_tbl = pd.DataFrame(columns=["messages"], index=indices)
msg_tbl["messages"] = ["Add an emoji", "Add a hashtag", "Add an image", "Add a bit more text", "Add a link", "Add a user mention", "Remove an emoji", "Remove a hashtag", "Remove an image", "Remove a bit of text", "Remove a link", "Remove a user mention"]
# Sort each row of the gradient by column (axis 1). Return the first n column names that are not nan.
# Generate list of indices
ind1 = gradient.index.values
# For each index
for i in ind1:
# reorder columns in this row
new_columns = gradient.columns[gradient.ix[i].argsort()]
# ordered row. If we have NaN here it messes up the ordering.
ord_row = gradient.loc[i][reversed(new_columns)]
# Filter for only positive recommendations. Kills old NaN values
pos_steps = ord_row[ord_row.values > 0]
# Build a message list
msg_list = []
# For each message
for j in [0,1,2]:
# Try to find the message corresponding to the first three values.
try:
msg_ind = pos_steps.index.values[j]
msg = msg_tbl.loc[msg_ind].values[0]
except:
# if there aren't three positive values, report empty strings.
msg = ''
msg_list.append(msg)
# Build the recommendation dataframe.
recommendations.loc[i][["msg1", "msg2", "msg3"]] = msg_list
# Write to pickle file
recommendations.to_pickle("recommendations_prob")
# Write to SQL
con = MySQLdb.connect(host='localhost', user='root', passwd='', db='TweetScore') # may need to add some other options to connect
tableName = 'recommendations_prob'
recommendations.to_sql(con=con, name=tableName, if_exists="replace", flavor='mysql')
'''
rownum = 900
new_columns = gradient.columns[gradient.ix[ind1[rownum]].argsort()]
#new_columns = gradient.columns[gradient.ix[ind1[1]]]
#print gradient.loc[ind1[rownum]]
#print gradient.loc[ind1[rownum]][reversed(new_columns)]
#print new_columns
ord_row = gradient.loc[ind1[rownum]][reversed(new_columns)]
# take only the suggestions that lead to an improvement
pos_steps = ord_row[ord_row.values > 0]
# Build the rec. dataframe
#print msg_tbl
msg_list = []
for i in [0,1,2]:
try:
msg_ind = pos_steps.index.values[i]
msg = msg_tbl.loc[msg_ind].values[0]
except:
msg = ''
msg_list.append(msg)
recommendations.loc[ind1[rownum]][["msg1", "msg2", "msg3"]] = msg_list
print recommendations.loc[ind1[rownum]]
''' | {
"repo_name": "aspera1631/TweetScore",
"path": "recommendations.py",
"copies": "1",
"size": "3169",
"license": "mit",
"hash": -3941668439482678300,
"line_mean": 32.7234042553,
"line_max": 254,
"alpha_frac": 0.6784474598,
"autogenerated": false,
"ratio": 3.356991525423729,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9472760768273816,
"avg_score": 0.01253564338998256,
"num_lines": 94
} |
__author__ = 'bdeutsch'
import numpy as np
import pandas as pd
import MySQLdb
def sql_to_df(database, table):
con = MySQLdb.connect(host='localhost', user='root', passwd='', db=database)
df = pd.read_sql_query("select * from %s" % table, con, index_col=None, coerce_float=True, params=None, parse_dates=None, chunksize=None)
return df
def pickle_to_sql(filein, tableName, mode):
## pickle_to_sql: open a file in pickle format, load into an SQL database.
# open file and load into a dataframe
tweets = pd.read_pickle(filein)
# Connect to server
con = MySQLdb.connect(host='localhost', user='root', passwd='', db='TweetScore') # may need to add some other options to connect
# Convert to to sql
tweets.to_sql(con=con, name=tableName, if_exists=mode, flavor='mysql')
return True
# load binned data from sql
df = sql_to_df('tweetscore', 'binned')
# re-index
df = df.set_index("desig")
# group
df_group = df.groupby(level=0)
# new df
df2 = pd.DataFrame()
df2["emo_num"] = df_group["emo_num"].first()
df2["ht_num"] = df_group["ht_num"].first()
df2["media_num"] = df_group["media_num"].first()
df2["txt_len_basic"] = df_group["txt_len_basic"].first()
df2["url_num"] = df_group["url_num"].first()
df2["user_num"] = df_group["user_num"].first()
df2["rt_prob"] = df_group["rt"].mean()
df2["weights"] = df_group["rt"].count().apply(np.sqrt)
# write to pickle file
df2.to_pickle("probabilities")
pickle_to_sql("probabilities", "probabilities", "replace") | {
"repo_name": "aspera1631/TweetScore",
"path": "prob_weights.py",
"copies": "1",
"size": "1505",
"license": "mit",
"hash": 7804725636362539000,
"line_mean": 26.8888888889,
"line_max": 141,
"alpha_frac": 0.6657807309,
"autogenerated": false,
"ratio": 2.9684418145956606,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.41342225454956605,
"avg_score": null,
"num_lines": null
} |
__author__ = 'bdeutsch'
import numpy as np
import pandas as pd
def cartesian(arrays, out=None):
arrays = [np.asarray(x) for x in arrays]
dtype = arrays[0].dtype
n = np.prod([x.size for x in arrays])
if out is None:
out = np.zeros([n, len(arrays)], dtype=dtype)
m = n / arrays[0].size
out[:,0] = np.repeat(arrays[0], m)
if arrays[1:]:
cartesian(arrays[1:], out=out[0:m,1:])
for j in xrange(1, arrays[0].size):
out[j*m:(j+1)*m,1:] = out[0:m,1:]
return out
emo_vals = range(0,7)
ht_vals = range(0,7)
media_vals = range(0,3)
txt_bas_vals = range(0,29)
url_vals = range(0,3)
user_vals = range(0,7)
'''
# generate the space of all possible tweets
emo_vals = range(0,2)
ht_vals = range(0,2)
media_vals = range(0,2)
txt_bas_vals = range(0,2)
url_vals = range(0,2)
user_vals = range(0,2)
'''
def get_txt_len(dfrow):
# weights represent number of characters per bin of each type of data
weight = pd.DataFrame([1, 2, 23, 5, 22, 2], index=["emo_num", "ht_num", "media_num", "txt_len_basic", "url_num", "user_num"])
len1 = dfrow.dot(weight)
return len1
# for each possible tweet, create a row of a dataframe
test = cartesian((emo_vals, ht_vals, media_vals, txt_bas_vals, url_vals, user_vals))
#test = [[141,0,0,0,0,0]]
# label the columns
tweetspace = pd.DataFrame(test, columns=["emo_num", "ht_num", "media_num", "txt_len_basic", "url_num", "user_num"])
tweetspace["len_tot"] = tweetspace.apply(get_txt_len, axis = 1)
legal_tweets = tweetspace[tweetspace["len_tot"] <= 140]
legal_tweets.to_pickle("legal_tweets_3")
| {
"repo_name": "aspera1631/TweetScore",
"path": "length_test.py",
"copies": "1",
"size": "1607",
"license": "mit",
"hash": -1947932145841150000,
"line_mean": 24.109375,
"line_max": 129,
"alpha_frac": 0.6241443684,
"autogenerated": false,
"ratio": 2.630114566284779,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3754258934684779,
"avg_score": null,
"num_lines": null
} |
__author__ = 'bdeutsch'
import re
import numpy as np
import pandas as pd
# List cards drawn by me and played by opponent
def get_cards(filename):
# Open the file
with open(filename) as f:
mycards = []
oppcards = []
for line in f:
# Generate my revealed card list
m = re.search('name=(.+)id.+to FRIENDLY HAND', line)
if m:
mycards.append(m.group(1))
n = re.search('name=(.+)id.+to OPPOSING PLAY(?! \(Hero)', line)
if n:
oppcards.append(n.group(1))
for item in mycards:
print item
print '\n'
for item in oppcards:
print item
# make a list of card IDs and names
def get_ids():
# Create an empty list of IDs
idlist = []
with open('test_game') as f:
# For each line
for line in f:
# Find the entity ids
m = re.search('[\[ ]id=(\d+) ', line)
# if one is found
if m:
# Check that we haven't found it yet, convert to an integer
id = int(m.group(1))
# Add it to the list
if id not in idlist:
idlist.append(id)
# Sort the ids
idlist.sort()
# Convert to dataframe
d = pd.DataFrame(index=idlist)
# Rename the index
d.index.name = "Entity ID"
# Create an empty column for names
d["Name"] = np.nan
#print d
return d
# make a list of card names only if followed by id
def get_names():
with open('test_game') as f:
for line in f:
# Find the entity ids
m = re.search('[\[ ]name=([\w ]+?) id=', line)
if m:
print m.group(1)
def get_ids_names(df):
with open('test_game') as f:
namedict = {}
for line in f:
# Find combinations of entities and names
m = re.search('[\[ ]name=([\w ]+?) id=(\d+)', line)
if m:
ent_id = int(m.group(2))
name = m.group(1)
df.ix[ent_id, 'Name'] = name
#print m.group(2), m.group(1)
return df
idlist = []
with open('test_game') as f:
# For each line
for line in f:
# Find the entity ids
m = re.search('[\[ ]id=(\d+) ', line)
# if one is found
if m:
# Check that we haven't found it yet, convert to an integer
id = int(m.group(1))
# Add it to the list
if id not in idlist:
idlist.append(id)
# Sort the ids
idlist.sort()
# Convert to dataframe
df = pd.DataFrame(index=idlist)
# Rename the index
df.index.name = "Entity ID"
# Create an empty column for names
df["Name"] = np.nan
df["CardId"] = np.nan
df["Player"] = np.nan
with open('test_game') as f:
updates = []
for line in f:
# Find lists of the innermost nested brackets
m = re.findall(r"\[([^\[]+?)]", line)
# If it's not just the command designation bracket ("zone", e.g.)
if len(m)>1:
# for each set of bracket contents
for item in m[1:]:
# add to the list of updates
updates.append(item)
for item in updates:
# find the id
m = re.search("id=(\d+)", item)
if m:
# Assign ID variable
id = int(m.group(1))
# find name and assign
n = re.search("name=(.+?) \w+?=", item)
if n:
name = n.group(1)
df.ix[id, "Name"] = name
# find cardId and assign
n = re.search("cardId=(\w.+?) ", item)
if n:
cardId = n.group(1)
df.ix[id, "CardId"] = cardId
# find player
n = re.search("player=(\d)", item)
if n:
player = n.group(1)
df.ix[id, "Player"] = player
# update the dataframe for each update
# get rid of the "zone" and "power" markers.
# collect the entries into a list
# Put card IDs into a DataFrame
#df = get_ids_names(get_ids())
pd.set_option('display.max_rows', 200)
print df
# get_cards('test_game') | {
"repo_name": "aspera1631/hs_logreader",
"path": "logreader.py",
"copies": "1",
"size": "4183",
"license": "mit",
"hash": 7330306061523231000,
"line_mean": 25.6496815287,
"line_max": 75,
"alpha_frac": 0.4994023428,
"autogenerated": false,
"ratio": 3.5782720273738238,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4577674370173824,
"avg_score": null,
"num_lines": null
} |
__author__ = 'bdeutsch'
import re
import numpy as np
import pandas as pd
# Make a list of all card IDs and create a dataframe
def get_ids(filename):
# Create an empty list of IDs
idlist = []
with open(filename) as f:
# For each line
for line in f:
# Find the entity ids
m = re.search('[\[ ]id=(\d+) ', line)
# if one is found
if m:
# Check that we haven't found it yet, convert to an integer
id = int(m.group(1))
# Add it to the list
if id not in idlist:
idlist.append(id)
# Sort the ids
idlist.sort()
# Convert to dataframe
df = pd.DataFrame(index=idlist)
# Rename the index
df.index.name = "Entity ID"
# Create an empty column for names
df["Name"] = np.nan
df["CardId"] = np.nan
df["Player"] = np.nan
df.ix[1, "Name"] = "GameEntity"
df.ix[2, "Name"] = "Player 1"
df.ix[2, "Player"] = 1
df.ix[3, "Name"] = "Player 2"
df.ix[3, "Player"] = 2
return df
def import_data(filename, df):
with open(filename) as f:
updates = []
for line in f:
# Find lists of the innermost nested brackets
m = re.findall(r"\[([^\[]+?)]", line)
# If it's not just the command designation bracket ("zone", e.g.)
if len(m)>1:
# for each set of bracket contents
for item in m[1:]:
# add to the list of updates
updates.append(item)
for item in updates:
# find the id
m = re.search("id=(\d+)", item)
if m:
# Assign ID variable
id = int(m.group(1))
# find name and assign
n = re.search("name=(.+?) \w+?=", item)
if n:
name = n.group(1)
df.ix[id, "Name"] = name
# find cardId and assign
n = re.search("cardId=(\w.+?) ", item)
if n:
cardId = n.group(1)
df.ix[id, "CardId"] = cardId
# find player
n = re.search("player=(\d)", item)
if n:
player = n.group(1)
df.ix[id, "Player"] = player
# update the dataframe for each update
return df
# get rid of the "zone" and "power" markers.
# collect the entries into a list
# Call function
df = import_data("test_game", get_ids("test_game"))
pd.set_option('display.max_rows', 200)
print df
# get_cards('test_game') | {
"repo_name": "aspera1631/hs_logreader",
"path": "import_all.py",
"copies": "1",
"size": "2656",
"license": "mit",
"hash": -2516843833235626500,
"line_mean": 27.8804347826,
"line_max": 77,
"alpha_frac": 0.4785391566,
"autogenerated": false,
"ratio": 3.751412429378531,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4729951585978531,
"avg_score": null,
"num_lines": null
} |
__author__ = 'beast'
import simpleldap
class LDAPAuth(object):
def __init__(self, server, port, encryption, user_dn, supported_group):
self.server = server
self.user_dn = user_dn
self.supported_group = supported_group
self.port = port
self.encryption = encryption
def authenticate(self, username, password):
with simpleldap.Connection(self.server, port=self.port,encryption=self.encryption) as conn:
is_valid = conn.authenticate(self.user_dn % (username), password)
if is_valid:
return True
return False
def check_user_in_group(self, username, group=None):
selected_group = self.supported_group
if not group:
selected_group = group
with simpleldap.Connection(self.server) as conn:
try:
result = conn.search("(&(cn=%s)(memberOf=*%s*))" % (username, selected_group))
if len(result) > 0:
return True
except:
return False
return False | {
"repo_name": "mr-robot/granule",
"path": "granule/granular/auth.py",
"copies": "1",
"size": "1086",
"license": "mit",
"hash": -7907744019355515000,
"line_mean": 26.8717948718,
"line_max": 99,
"alpha_frac": 0.5782688766,
"autogenerated": false,
"ratio": 4.292490118577075,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0053992352676563195,
"num_lines": 39
} |
__author__ = 'beast'
from flask import Flask, request, g, jsonify
from flask.ext.httpauth import HTTPBasicAuth
from granular.store import get_manager
from granular.work import subscribe
auth = HTTPBasicAuth()
app = Flask(__name__)
def get_granule():
granule = getattr(g, '_granular', None)
if granule is None:
granule = g._granular = get_manager(host="172.17.0.1")
return granule
@app.teardown_appcontext
def close_connection(exception):
granule = getattr(g, '_granular', None)
if granule is not None:
granule.close()
@auth.verify_password
def verify_pw(username, password):
return get_granule().login(username, password)
@app.route('/api/2015-05-30/authenticate' , methods=['POST', 'GET'])
@auth.login_required
def login():
return jsonify({})
@app.route('/api/2015-05-30/activity/' , methods=['POST', 'GET'])
@auth.login_required
def activities():
if request.method == 'POST':
input = request.get_json()
activity = get_granule().run_activity(input)
return jsonify(activity)
else:
activities_result = get_granule().get_user_activities(user_id=get_granule().user_id)
return jsonify(activities=activities_result)
@app.route('/api/2015-05-30/activity/<activity_id>', methods=['GET'])
@auth.login_required
def activity(activity_id):
activity = get_granule().get_activity(activity_id, get_granule().user_id)
return jsonify(activity)
@app.route('/api/2015-05-30/activity/<activity_id>/result', methods=['GET', 'POST'])
@auth.login_required
def activity_result(activity_id):
activity = get_granule().get_activity(activity_id, get_granule().user_id)
return jsonify(activity)
@app.route('/')
def home():
#get_granule().create_user("Test","Test")
return ""
if __name__ == "__main__":
app.run(debug=True, port=8080) | {
"repo_name": "mr-robot/granule",
"path": "granule/application.py",
"copies": "1",
"size": "1848",
"license": "mit",
"hash": -2603262195135216000,
"line_mean": 23.9864864865,
"line_max": 92,
"alpha_frac": 0.6737012987,
"autogenerated": false,
"ratio": 3.323741007194245,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4497442305894245,
"avg_score": null,
"num_lines": null
} |
__author__ = 'beast'
import base64, hashlib, random
from signals import post_save_activity
import redis
class Store(object):
def __init__(self, host="localhost", port=6379):
self.r = redis.StrictRedis(host=host, port=port, db=0)
self.user_id = None
def close(self):
pass
def login(self, username, password):
current_id = self.r.get("user:name:%s" % username)
if current_id:
user_object = self.r.hmget("user:id:%s" % current_id, ["username"])
self.user_id = current_id
return {"user_id" : current_id, "username": user_object[0] }
return None
def create_user(self, username, password):
if not self.r.exists("user:name:%s" % username):
user_id = str(self.r.incr("user_id"))
salt = str(random.getrandbits(256))
token = "%s:%s" % (salt, base64.b64encode(hashlib.sha256( salt + password ).digest()))
pipeline = self.r.pipeline()
pipeline.set("user:name:%s" % username, user_id)
pipeline.hmset("user:id:%s" % user_id, {"username": username, "token": token })
pipeline.execute()
return {"user_id" : user_id, "username": username }
else:
return None
def run_activity(self, inputs, user_id=None):
active_user_id = user_id
if active_user_id is None:
active_user_id = self.user_id
if active_user_id:
activity_id = self.r.incr("activity:id")
activity_key = "user:%s:activity:%s" % (user_id, activity_id)
self.r.zadd("activity", activity_id, activity_key)
self.r.hmset(activity_key, {"input":inputs})
self.r.zadd("user:%s:activity" % active_user_id,activity_id, activity_key)
post_save_activity.send(self, activity_id, inputs)
return activity_id
else:
return None
def get_activity(self, activity_id, user_id=None):
activity = {"id":activity_id}
active_user_id = user_id
if active_user_id is None:
active_user_id = self.user_id
if active_user_id:
activity_key = "user:%s:activity:%s" % (user_id, activity_id)
result = self.r.hgetall(activity_key)
for key, value in result.iteritems():
activity[key] = value
return activity
else:
return None
def add_result(self,activity_id, result, user_id=None):
active_user_id = user_id
if active_user_id is None:
active_user_id = self.user_id
if active_user_id:
return self.r.hmset("user:%s:activity:%s:result" % (user_id, activity_id), {"result":result})
def get_all_activities(self):
results = self.r.zrange("activity",0,-1)
return results
def get_user_activities(self, user_id=None):
active_user_id = user_id
if active_user_id is None:
active_user_id = self.user_id
if active_user_id:
post_save_activity.send(active_user_id
)
return self.r.zrange("user:%s:activity" % str(active_user_id),0,-1 )
else:
return {}
def get_manager(host="localhost", port=6379):
return Store(host, port) | {
"repo_name": "mr-robot/granule",
"path": "granule/granular/store.py",
"copies": "1",
"size": "3362",
"license": "mit",
"hash": -8749583636607866000,
"line_mean": 23.5474452555,
"line_max": 105,
"alpha_frac": 0.5559190958,
"autogenerated": false,
"ratio": 3.591880341880342,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46477994376803416,
"avg_score": null,
"num_lines": null
} |
__author__ = 'beast'
import unittest
import requests
import json
class TestRestFunctionalGranule(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_basic_rest_functional(self):
#API calls End point using Basic Auth
payload = {'some': 'data'}
response = requests.post("http://localhost:8080/api/2015-05-30/activity/", data=json.dumps(payload))
#API Gets endpoint response
self.assertEquals(response.status_code, 200)
#Endpoint response includes activity details
#API Gets Activity result Endpoint
pass
class TestWebFunctionalGranule(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_basic_web_test_functional(self):
#User goes to page, and is asked to Login
#User Gets redirected to Home page on login
#User Enters Item Details into Activity Form
#User Gets result
pass
if __name__ == '__main__':
unittest.main() | {
"repo_name": "mr-robot/granule",
"path": "tests/test_functional.py",
"copies": "1",
"size": "1050",
"license": "mit",
"hash": 4627267885394648000,
"line_mean": 18.1090909091,
"line_max": 108,
"alpha_frac": 0.6371428571,
"autogenerated": false,
"ratio": 4.285714285714286,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.008698831785780228,
"num_lines": 55
} |
__author__ = 'beau'
__author__ = 'beau'
import pywt
import numpy as np
x = [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16]
x = np.random.randint(100,size=16)
print x
# haar = pywt.Wavelet('haar')
# dwt_x = pywt.wavedec(x,haar)
# print dwt_x
import math
c = 1/2.0#math.sqrt(2)/2 #'real' haar
dec_lo, dec_hi, rec_lo, rec_hi = [c, c], [-c, c], [c, c], [c, -c]
filter_bank = [dec_lo, dec_hi, rec_lo, rec_hi]
wl = pywt.Wavelet(name="", filter_bank=filter_bank)
dwt_x = pywt.wavedec(x,wl)
print dwt_x
dwt_x = np.concatenate(dwt_x)
#Left child: 2i
#Right child: 2i+1
class featureMask:
def __init__(self,n_features=None,mask_arr=None):
if mask_arr is None:
self.mask = np.array([0]*n_features)
else:
self.mask = mask_arr
assert ((n_features & (n_features - 1)) == 0) and n_features > 0
self.selection_count = [0]*n_features
def update(self,selected_feature_idx):
assert self.mask[selected_feature_idx]==1 #check if update allowed by mask
self.selection_count[selected_feature_idx]+=1
if selected_feature_idx*2+1<len(self.mask):#check for not leaf
self.mask[selected_feature_idx*2]=1#LC
self.mask[selected_feature_idx*2+1]=1#RC
def print_tree(self):
pass
def validIdxs(self):
return np.where(self.mask == 1)[0]
def pickRandomFeat(self):
"""Selects a random feature, allowed by the current mask"""
fm = featureMask(n_features=32)
print fm.mask
fm.mask[0]=1
print fm.mask
fm.update(0)
fm.update(1)
fm.update(3)
fm.update(7)
fm.update(14)
print fm.mask
print fm.validIdxs() | {
"repo_name": "B3AU/waveTree",
"path": "sklearn/waveTree/tests/starting_code_featuremask.py",
"copies": "1",
"size": "1623",
"license": "bsd-3-clause",
"hash": -5427183008002700000,
"line_mean": 22.2,
"line_max": 82,
"alpha_frac": 0.6229205176,
"autogenerated": false,
"ratio": 2.6390243902439026,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.37619449078439027,
"avg_score": null,
"num_lines": null
} |
import nltk
import re
import os.path
import glob
import sqlite3 as lite
import sys
import time
import geniatagger
#nltk.download()
reload(sys)
sys.setdefaultencoding("utf-8")
#########################################Searching for Ontological concepts############################################
#Searching One_word concepts
def one_Concept(sentence,onto):
jupper=sentence.upper()
tokens = nltk.word_tokenize(jupper)
oneconcept=list()
for o in tokens:
con = lite.connect('Concepts.sqlite')
cur = con.cursor()
req2=('SELECT Concept from '+onto+'_concepts_tag_up WHERE Reg_exp=="one" and Concept=="%s"'%o)
cur.execute(req2)
sec2=cur.fetchall()
if len(sec2)>0:
if len(sec2[0][0])>2:
oneconcept=oneconcept+[sec2[0][0]]
return oneconcept
#Searching Multi_word concepts
def comp_concept(sentence,onto):
jupper=sentence.upper()
tokens = nltk.word_tokenize(jupper)
pos_tag = nltk.pos_tag(tokens)
CC=list()
for i in pos_tag:
C=list(i)
if C[1]=='NNP':
CC=CC+[[C[0],'NN']]
else:
CC=CC+[C]
tt=''
for i in CC:
tt=tt+i[1]+' '
con = lite.connect('Concepts.sqlite')
cur = con.cursor()
req2="SELECT Tag from "+onto+"_concepts_tag_up WHERE Reg_exp!='one'"
cur.execute(req2)
sec=cur.fetchall()
C=(set(sec))
list_reg=list()
for reg in C:
if reg[0] in tt or reg[0]==tt:
list_reg=list_reg+[reg[0]]
list_term=list()
for i in list_reg:
re_i=i.split()
for j in CC:
if re_i[0]==j[1]:
v=CC.index(j)
l=0
term=''
while CC[v][1]==re_i[l] and v<len(CC)-1 and l<len(re_i)-1:
term=term+CC[v][0]+' '
v=v+1
l=l+1
list_term=list_term+[term+CC[v][0]]
req_list=list()
for i in set(list_term):
X=i.replace('( ','(')
CV=X.replace(' )',')')
CV1=CV.replace(' ,',',')
req_list=req_list+[CV1]
up_res=list()
for i in req_list:
t=i
cur.execute('SELECT Concept from '+onto+'_concepts_tag_up WHERE Concept =="%s"' %t)
sec=cur.fetchall()
if sec:
up_res=up_res+[sec[0][0]]
jupper=sentence.lower()
tokens = nltk.word_tokenize(jupper)
pos_tag = nltk.pos_tag(tokens)
CC=list()
for i in pos_tag:
C=list(i)
if C[1]=='NNP':
CC=CC+[[C[0],'NN']]
else:
CC=CC+[C]
tt=''
for i in CC:
tt=tt+i[1]+' '
con = lite.connect('Concepts.sqlite')
cur = con.cursor()
req2="SELECT Tag from "+onto+"_concepts_tag_min WHERE Reg_exp!='one'"
cur.execute(req2)
sec=cur.fetchall()
C=(set(sec))
list_reg=list()
for reg in C:
if reg[0] in tt or reg[0]==tt:
list_reg=list_reg+[reg[0]]
list_term=list()
for i in list_reg:
re_i=i.split()
for j in CC:
if re_i[0]==j[1]:
v=CC.index(j)
l=0
term=''
while CC[v][1]==re_i[l] and v<len(CC)-1 and l<len(re_i)-1:
term=term+CC[v][0]+' '
v=v+1
l=l+1
list_term=list_term+[term+CC[v][0]]
req_list=list()
for i in set(list_term):
X=i.replace('( ','(')
CV=X.replace(' )',')')
CV1=CV.replace(' ,',',')
req_list=req_list+[CV1]
up_res=list()
for i in req_list:
t=i
cur.execute('SELECT Concept from '+onto+'_concepts_tag_min WHERE Concept =="%s"' %t)
sec=cur.fetchall()
if sec:
up_res=up_res+sec
return up_res
#########################################Searching for gene names############################################
def gene (sent,tagger):
c=tagger.parse(sent)
tup_gene=list()
name_list=list()
for i in c:
if len(i[4])>1:
if 'cell' in i[4]:
print 'in progress'
else:
tup_gene=tup_gene+[(i[1],i[4])]
if len(tup_gene)>0:
big_list=list()
for i in range (0,len(tup_gene)):
if 'B-' in tup_gene[i][1]:
big_list=big_list+[i]
genre=tup_gene[i][1]
big_list=big_list+[len(tup_gene)+1]
for j in range (0,len(big_list)-1):
name=''
c=(big_list[j+1])
t=big_list[j]
my_name=''
t=tup_gene[big_list[j]:c]
for i in t:
name=name+' '+i[0]
name_list=name_list+[(name,genre)]
return name_list
#########################################Searching synonimes#########################################
def syno (Ontology, liste):
con = lite.connect('Concepts.sqlite')
cur = con.cursor()
listy=list()
for i in set(liste):
try:
req='SELECT Concept from '+Ontology+'_Synonym WHERE Syno=="%s"'%i.upper()
cur.execute(req)
sec=cur.fetchall()
if sec:
for j in sec:
listy=listy+[j[0]]
except AttributeError:
req='SELECT Concept from '+Ontology+'_Synonym WHERE Syno=="%s"'%i[0].upper()
cur.execute(req)
sec=cur.fetchall()
if sec:
for j in sec:
listy=listy+[j[0]]
return list(set(listy))
#########################################Annotation concepts############################################
def annotation(directory, table_name, GeniaPath):
tagger = geniatagger.GeniaTagger(GeniaPath)
con = lite.connect('Concepts.sqlite')
cur = con.cursor()
rzq='DROP TABLE IF EXISTS '+table_name
print rzq
cur.execute(rzq)
raq='CREATE TABLE '+table_name+' (Art , Concept , Onto)'
cur.execute(raq)
texte=''
for filename in glob.glob(os.path.join(directory, '*.txt')):
tt=time.time()
print '--------------<'+str(filename)+'>--------------'
texte=texte+'--------------<'+str(filename)+'>--------------'+'\n'
fil1=open(filename,'r')
liste=fil1.read()
liste2=liste.replace('\n',' ')
liste1=liste2.split('. ')
if '' in liste1:
liste1.remove('')
list_one_cl=list()
list_comp_cl=list()
list_one_doid=list()
list_comp_doid=list()
list_one_uberon=list()
list_comp_uberon=list()
list_gene=list()
for sent in liste1:
list_one_cl=list_one_cl+one_Concept(sent,'CL')
list_comp_cl=list_comp_cl+comp_concept(sent,'CL')
list_one_doid=list_one_doid+one_Concept(sent,'DOID')
list_comp_doid=list_comp_doid+comp_concept(sent,'DOID')
list_one_uberon=list_one_uberon+one_Concept(sent,'UBERON')
list_comp_uberon=list_comp_uberon+comp_concept(sent,'UBERON')
list_gene=gene(sent,tagger)
liste=['+','-']
newsent=sent.translate(None, ''.join(liste))
list_N_gene=gene(newsent,tagger)
N_list_gene=list_gene+list_N_gene
all_cell=list_one_cl+list_comp_cl
#print all_cell
syno_cell=syno ('CL', all_cell)
art1=str(filename)
art2=art1.split('/')
art3=art2[1].split('.txt')
art4=art3[0]
for C in syno_cell:
with con:
cur.execute('INSERT INTO '+table_name+' (Art , Concept , Onto) VALUES (?,?,?)',(art4,str(C),'CL'))
texte=texte+'\n'+'Cell Pop:'
for k in list(set(list_one_cl)):
texte=texte+'\t'+str(k)
for k in list(set(list_comp_cl)):
texte=texte+'\t'+k[0]
all_dis=list_one_doid+list_comp_doid
syno_dis=syno ('DOID', all_dis)
for C in syno_dis:
with con:
cur.execute('INSERT INTO '+table_name+' (Art , Concept , Onto) VALUES (?,?,?)',(art4,str(C),'DOID'))
texte=texte+'\n'+'Disease:'
for k in list(set(list_one_doid)):
texte=texte+'\t'+str(k)
for k in list(set(list_comp_doid)):
texte=texte+'\t'+k[0]
all_ana=list_one_uberon+list_comp_uberon
syno_ana=syno ('UBERON', all_ana)
for C in syno_ana:
with con:
cur.execute('INSERT INTO '+table_name+' (Art , Concept , Onto) VALUES (?,?,?)',(art4,str(C),'UBERON'))
texte=texte+'\n'+'Anatomy:'
for k in list(set(list_one_uberon)):
texte=texte+'\t'+str(k)
for k in list(set(list_comp_uberon)):
texte=texte+'\t'+k[0]
texte=texte+'\n'+'Gene:'
for k in list(set(N_list_gene)):
try:
with con:
cur.execute('INSERT INTO '+table_name+' (Art , Concept , Onto) VALUES (?,?,?)',(art4,str(k[0]),str(k[1])))
texte=texte+'\t'+k[0]+'||'+k[1]
except lite.ProgrammingError:
con = lite.connect('Concepts.sqlite')
con.text_factory = str
cur = con.cursor()
with con:
cur.execute('INSERT INTO '+table_name+' (Art , Concept , Onto) VALUES (?,?,?)',(art4,str(k[0]),str(k[1])))
ttt=time.time()
texte=texte+'\n'+str(ttt-tt)
print (ttt-tt)
lif=directory+'res.txt'
fil=open(str(lif),'w')
fil.write(texte)
fil.close()
print 'The annotation process is done'
| {
"repo_name": "walidbedhiafi/OntoContext1",
"path": "OntoContext/annot.py",
"copies": "1",
"size": "8039",
"license": "mit",
"hash": 2607152706825569000,
"line_mean": 27.5070921986,
"line_max": 120,
"alpha_frac": 0.5853961936,
"autogenerated": false,
"ratio": 2.466707579011967,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3552103772611967,
"avg_score": null,
"num_lines": null
} |
from Tkinter import *
import sqlite3 as lite
import operator
###############################Graphical interface###############################################
class ListBoxChoice(object):
def __init__(self, master=None, title=None, message=None, list=[]):
self.master = master
self.value = None
self.list = list[:]
self.modalPane = Toplevel(self.master)
self.modalPane.transient(self.master)
self.modalPane.grab_set()
self.modalPane.bind("<Return>", self._choose)
self.modalPane.bind("<Escape>", self._cancel)
if title:
self.modalPane.title(title)
if message:
Label(self.modalPane, text=message).pack(padx=5, pady=5)
listFrame = Frame(self.modalPane)
listFrame.pack(side=TOP, padx=5, pady=5)
xscrollbar = Scrollbar(listFrame, orient=HORIZONTAL)
xscrollbar.pack(side=BOTTOM, fill=X)
scrollBar = Scrollbar(listFrame)
scrollBar.pack(side=RIGHT, fill=Y)
self.listBox = Listbox(listFrame, selectmode=SINGLE)
self.listBox.pack(side=LEFT, fill=Y)
scrollBar.config(command=self.listBox.yview)
xscrollbar.config(command=self.listBox.xview)
self.listBox.config(yscrollcommand=scrollBar.set)
self.listBox.config(xscrollcommand=xscrollbar.set)
#self.list.sort()
for item in self.list:
self.listBox.insert(END, item)
buttonFrame = Frame(self.modalPane)
buttonFrame.pack(side=BOTTOM)
chooseButton = Button(buttonFrame, text="Choose", command=self._choose)
chooseButton.pack()
cancelButton = Button(buttonFrame, text="Cancel", command=self._cancel)
cancelButton.pack(side=RIGHT)
def _choose(self, event=None):
try:
firstIndex = self.listBox.curselection()[0]
self.value = self.list[int(firstIndex)]
except IndexError:
self.value = None
self.modalPane.destroy()
def _cancel(self, event=None):
self.modalPane.destroy()
def returnValue(self):
self.master.wait_window(self.modalPane)
return self.value
class Checkbar(Frame):
def __init__(self, parent=None, picks=[], side=LEFT, anchor=W):
Frame.__init__(self, parent)
self.vars = []
for pick in picks:
var = IntVar()
chk = Checkbutton(self, text=pick, variable=var)
chk.pack(side=side, anchor=anchor, expand=YES)
self.vars.append(var)
def state(self):
return map((lambda var: var.get()), self.vars)
###############################Generate Children###############################################
def all_children(concepts, table):
con = lite.connect('Concepts.sqlite')
cur = con.cursor()
cc='/'+concepts+'/'
req1=('Select Distinct Concept From '+table+' where Anc LIKE ("%'+cc+'%")')
cur.execute(req1)
return cur.fetchall()
###############################User parameters from the graphical interface#####################
def param():
def allstates():
print(list(lng.state()), list(tgl.state()))
return (list(lng.state()), list(tgl.state()))
root=Tk()
lng = Checkbar(root, ['Sort by alphabetic order', 'Sort by frequency'])
tgl = Checkbar(root, ['Only texts where gene are mentioned'])
lng.pack(side=TOP, fill=X)
tgl.pack(side=LEFT)
Button(root, text='Validate', command=root.quit).pack(side=RIGHT)
Button(root, text='My choice', command=allstates).pack(side=RIGHT)
root.mainloop()
return allstates()
###############################Select art with mentioned gene#####################
def first_inter(text_table):
K=param()
C=K[1]
if K[1]==[1]:
text_list=list()
con = lite.connect('Concepts.sqlite')
cur = con.cursor()
req=("Select Distinct Art From " +text_table)
cur.execute(req)
sec2=cur.fetchall()
for i in sec2:
text_list=text_list+[i[0]]
else:
text_list=list()
con = lite.connect('Concepts.sqlite')
cur = con.cursor()
req=("Select Distinct Art From "+text_table+" where Onto LIKE ('%B-%')")
cur.execute(req)
sec2=cur.fetchall()
for i in sec2:
text_list=text_list+[i[0]]
return (text_list,K)
###############################The choosing process##################################
def choosing (text_table,ontology, part):
text_liste=first_inter(text_table)
text_list=text_liste[0]
L=text_liste[1]
print "This step may take few minutes"
con = lite.connect('Concepts.sqlite')
cur = con.cursor()
my_cell_liste=list()
for i in text_list:
req=('SELECT DISTINCT Concept FROM '+ text_table+' WHERE Onto="'+ontology+'" and Art="'+i+'"')
cur.execute(req)
sec2=cur.fetchall()
for i in sec2:
my_cell_liste=my_cell_liste+[i[0]]
list(set(my_cell_liste))
my_concept_list=list()
my_dict_concept=dict()
for i in list(set(my_cell_liste)):
C=list(set(all_children(str(i), str(ontology)+'_ALL_PATH')))
my_concept_list=my_concept_list+[i]
my_dict_concept[i]=[i]
for u in C:
my_concept_list=my_concept_list+[u[0]]
my_dict_concept[i]=my_dict_concept[i]+[u[0]]
KK=dict()
My_dict_art=dict()
My_dict_art1=dict()
for i in my_dict_concept:
My_dict_art1[i]=list()
for g in list(set(my_dict_concept[i])):
req=('SELECT DISTINCT Art FROM '+ text_table+' WHERE Onto="'+ontology+'" and Concept="'+g+'"')
cur.execute(req)
sec2=cur.fetchall()
if sec2:
for j in sec2:
My_dict_art1[i]=My_dict_art1[i]+[j[0]]
My_dict_art[i]=list(set(My_dict_art1[i]))
KK[i]=len(My_dict_art[i])
if L[0][0]==1:
GG=sorted(KK.items(), key=lambda x:x[0])
else:
GG= sorted(KK.items(), key=lambda x:x[1], reverse=True)
list_cell=list()
for o in GG:
texte=str(o[1])+'\t'+str(o[0])
list_cell=list_cell+[texte]
root=Tk()
returnValue = True
my_cell_list=list()
while returnValue:
returnValue = ListBoxChoice(root, str(part)+" part", "When your choice is done clic choose", list_cell).returnValue()
my_cell_list=my_cell_list+[returnValue]
root.mainloop()
my_choosed_list=list()
for i in my_cell_list:
if i:
h=i.split('\t')
my_choosed_list=my_choosed_list+[h[1]]
terms_dict=dict()
for i in my_choosed_list:
terms_list=[i]
for j in my_dict_concept:
if i == j:
print my_dict_concept[j]
for k in my_dict_concept[j]:
print str(i)+' has mentioned child: '+str(k)
terms_list=terms_list+[k]
terms_dict[i]=list(set(terms_list))
return (terms_dict,My_dict_art)
###############################The crossover##################################
def crisscross(text_table):
con = lite.connect('Concepts.sqlite')
cur = con.cursor()
CC=choosing (text_table,'CL', 'Cell populations')
Dis=choosing (text_table,'DOID', 'Diseases')
Ana=choosing (text_table,'UBERON', 'Anatomy')
protein_dict=dict()
DNA_dict=dict()
RNA_dict=dict()
print "starting the crisscross process"
for i in CC[0]:
for j in Dis[0]:
for k in Ana[0]:
dict_name=str(i)+str(j)+str(k)
protein_dict[dict_name]=list()
DNA_dict[dict_name]=list()
RNA_dict[dict_name]=list()
for l in list(set(CC[1][i])):
for m in list(set(Dis[1][j])):
for n in list(set(Ana[1][k])):
if l==m==n:
req=('SELECT DISTINCT Concept FROM '+ text_table+' WHERE Onto="B-protein"and Art="'+l+'"')
cur.execute(req)
sec2=cur.fetchall()
if sec2:
for o in sec2:
protein_dict[dict_name]=protein_dict[dict_name]+[o[0]]
req2=('SELECT DISTINCT Concept FROM '+ text_table+' WHERE Onto="B-DNA"and Art="'+l+'"')
cur.execute(req2)
sec3=cur.fetchall()
if sec3:
for p in sec3:
DNA_dict[dict_name]=DNA_dict[dict_name]+[p[0]]
req4=('SELECT DISTINCT Concept FROM '+ text_table+' WHERE Onto="B-RNA"and Art="'+l+'"')
cur.execute(req4)
sec4=cur.fetchall()
if sec3:
for q in sec3:
RNA_dict[dict_name]=RNA_dict[dict_name]+[q[0]]
return (protein_dict,DNA_dict,RNA_dict)
| {
"repo_name": "walidbedhiafi/OntoContext1",
"path": "OntoContext/crisscross.py",
"copies": "1",
"size": "8145",
"license": "mit",
"hash": 1446736913631744500,
"line_mean": 31.7108433735,
"line_max": 119,
"alpha_frac": 0.5965623082,
"autogenerated": false,
"ratio": 2.957516339869281,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8831042689115762,
"avg_score": 0.044607191790703736,
"num_lines": 249
} |
__author__ = 'befulton'
from subprocess import call, Popen, PIPE
from collections import defaultdict
import os
import time
import re
import datetime
import sys
def total_seconds(td):
# Since this function is not available in Python 2.6
return (td.microseconds + (td.seconds + td.days * 24 * 3600) * 10**6) / 10**6
def open_collectl(filename, start_time=None):
bindir = os.path.dirname(sys.argv[0])
args = [bindir+os.sep+"collectl", "-P", "-p", filename, "-sZ"]
if start_time:
midnight = datetime.datetime.combine(start_time, datetime.datetime.min.time())
offsettime = total_seconds(midnight - start_time + datetime.timedelta(minutes=1))
print "offsettime: %s s" % offsettime
args += ["--offsettime", str(offsettime)]
p = Popen(args, stdout=PIPE)
return p
def wait_for_collectl(filename):
finished = call(["gzip", "-tf", filename])
count = 0
while finished != 0 and count < 300:
print "collectl has not yet finished"
time.sleep(1)
finished = call(["gzip", "-tf", filename])
count += 1
def get_start_time(filename):
p = open_collectl(filename)
print p.stdout.readline()
print p.stdout.readline()
s = p.stdout.readline().split()
p.stdout.close()
return s[0:2]
#return datetime.datetime.strptime(s[0] + ' ' + s[1], "%Y%m%d %H:%M:%S")
def replay(filename, start_time):
if filename.endswith('gz'):
p = open_collectl(filename, start_time)
print p.stdout.readline()
print p.stdout.readline()
while True:
retcode = p.poll() #returns None while subprocess is running
line = p.stdout.readline()
yield line
if(retcode is not None):
break
else:
with open(filename) as f:
for line in f:
yield line
def timedelta(filename):
print filename
p = Popen(["zcat", filename], stdout=PIPE)
p.stdout.readline()
params = p.stdout.readline().split()
i = next(param for param in params if param.startswith('-i'))
p.stdout.close()
return i[2:]
def prettyprocess(line):
s = line.split()[29:]
if not s:
return None
if s[0] in ("-bash", 'sh', '/bin/bash', 'bash', 'ln', '/bin/pwd', 'mkdir', 'date', 'touch', '/usr/bin/env'):
return None
exes = ['fastool', 'ParaFly','Butterfly','ReadsToTranscripts', 'jellyfish',
'inchworm', 'FastaToDeBruijn', 'QuantifyGraph', 'GraphFromFasta', 'CreateIwormFastaBundle',
'bowtie-build', 'bowtie', 'Chrysalis', 'cat', 'sort', 'cp', 'wc', 'rm', 'find']
perl_scripts = ['scaffold_iworm_contigs', 'Trinity', 'collectl', 'print_butterfly_assemblies',
'partition_chrysalis_graphs_n_reads', 'fasta_filter_by_min_length', 'partitioned_trinity_aggregator']
for k in exes:
if s[0].endswith(k):
return k
if s[0] == 'samtools':
return ('samtools_' + s[1]) if len(s) > 1 else 'samtools'
if s[0] == '/bin/sort':
return 'sort'
if s[0] == 'java':
if 'Butterfly.jar' in " ".join(s):
return 'Butterfly'
if 'ExitTester.jar' in " ".join(s):
return 'ExitTester'
if '-version' in " ".join(s):
return 'java_version'
return 'java'
if s[0] == 'perl':
for k in perl_scripts:
if k in s[1]:
return k
return 'perl'
if s[0] == '/usr/bin/perl' and 'collectl' in s[2]:
return 'collectl'
return os.path.basename(s[0])+'_unknown'
def build_datasets(line_generator):
line_dict = defaultdict(list)
sum_dict = defaultdict(lambda: [0]*27)
grand_sum_dict = defaultdict(lambda: [0]*27)
last_line = ''
ordered_keys = []
for line in line_generator:
if line: last_line = line
key = prettyprocess(line)
if key:
if key not in ordered_keys:
ordered_keys.append(key)
line_dict[key].append(line)
s = line.split()
data = sum_dict[key, s[0], s[1]]
total = grand_sum_dict[tuple(s[0:2])]
for i in range(27):
try:
data[i] += float(s[i + 2])
total[i] += float(s[i + 2])
except ValueError:
pass
return ordered_keys, line_dict, sum_dict, grand_sum_dict, last_line.split()[0:2]
def write_times(start_time, end_time, colpar):
ref = start_time[0] + " 00:00:00"
e = datetime.datetime.strptime(" ".join(end_time), "%Y%m%d %H:%M:%S")
r = datetime.datetime.strptime(ref, "%Y%m%d %H:%M:%S")
with open("global.time", "w") as f:
f.write("date %s\n" % " ".join(start_time))
f.write("start %s\n" % ref)
f.write("end %s\n" % " ".join(end_time))
f.write("runtime %s\n" % total_seconds(e-r))
f.write("interval %s\n" % colpar)
def write_files(keys, line_dict, sum_dict, grand_sum_dict):
for id, tool in enumerate(keys):
if line_dict[tool]:
with open("%s.%s.data" % (id + 1, tool), "w") as f:
f.writelines(line_dict[tool])
with open("%s.%s.sum" % (id + 1, tool), "w") as f:
lines = (key for key in sum_dict.keys() if key[0] == tool)
for line in sorted(lines):
f.write(" ".join(list(line[1:]) + [('%f' % val).rstrip('0').rstrip('.')
for val in sum_dict[line]]) + "\n")
with open("collectZ.proc", "w") as f:
for k in sorted(grand_sum_dict.keys()):
f.write(" ".join(list(k) + ['{0:g}'.format(val) for val in grand_sum_dict[k]]) + "\n")
if __name__ == '__main__':
filename = next(file for file in os.listdir(".") if file.endswith(".gz"))
wait_for_collectl(filename)
colpar = timedelta(filename)
start_time = get_start_time(filename)
start = datetime.datetime.strptime(" ".join(start_time), "%Y%m%d %H:%M:%S")
ordered_keys, line_dict, sum_dict, grand_sum_dict, end_time = build_datasets(replay(filename, start))
write_files(ordered_keys, line_dict, sum_dict, grand_sum_dict)
write_times(start_time, end_time, colpar)
sum_files = sorted((file for file in os.listdir(".") if file.endswith(".sum")), key=lambda f: int(f.split('.')[0]))
for id, sf in enumerate(sum_files):
os.rename(sf, "%s.%s" % (id+1, sf))
| {
"repo_name": "HPCHub/trinityrnaseq",
"path": "trinity-plugins/collectl/make_data_files.py",
"copies": "4",
"size": "6626",
"license": "bsd-3-clause",
"hash": -6099578993146510000,
"line_mean": 34.6077348066,
"line_max": 119,
"alpha_frac": 0.5430123755,
"autogenerated": false,
"ratio": 3.372010178117048,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5915022553617049,
"avg_score": null,
"num_lines": null
} |
__author__ = 'befulton'
import os
import sys
import subprocess
def get_times():
d = dict()
with open("global.time") as f:
for line in f:
s = line.split()
d[s[0]] = s[1:]
return d
times = get_times()
date = times['start'][0]
start = times['start'][1]
end = times['end'][1]
tics = int(end.split(':')[0]) + 1
prettycolors = False
if len(sys.argv) > 1:
name = sys.argv[1]
else:
name = os.path.split(os.getcwd())[1]
if len(sys.argv) > 2:
cpu_count = int(sys.argv[2])
else:
cpu_count = 64
def build_plot(files, stat):
d = []
for i, file in enumerate(sorted(files, key=lambda fn: int(fn.split('.')[0]))):
title = file.split('.')[2]
d.append("'%s' using 1:(%s) title \"%s\" ls %s" % (file, stat, title, i+1))
return d
def write_files(name):
files = [file for file in os.listdir(".") if file.endswith(".sum")]
with open("defs.gnu", 'w') as f:
f.write("#generated gnuplot file\n")
f.write("set xrange ['%s %s':'%s %s']\n" % (date, start, date, end))
f.write("set xtics (")
f.write(", ".join(["\"{0}\" '{1} {0:02d}:00:00'".format(i, date) for i in range(tics)]))
f.write(" )\n")
f.write("ncpu=%s" % cpu_count)
f.write("\n")
with open("common.gnu", 'w') as f:
f.write(common_text.format(name))
if prettycolors:
colorset = color_sets[len(files)-1]
for i, c in enumerate(colorset.split()):
f.write("set style line %s lt 1 lc rgb \"%s\" lw mylw1 pt mypt1 ps myps1\n" % i, c)
else:
for i in range(len(files)):
f.write("set style line %s lw mylw1 pt mypt1 ps myps1\n" % (i+1))
with open('ram.gnu', 'w') as f:
f.write(gnu_head % ('ram', end, "RAM usage GiB", ""))
d = build_plot(files, "fg*$11")
f.write(",\\\n ".join(d))
with open('cpu.gnu', 'w') as f:
f.write(gnu_head % ('cpu', end, "Core Utilization", "ncpu"))
d = build_plot(files, "$19/100")
f.write(",\\\n ".join(d))
with open('io.gnu', 'w') as f:
f.write(gnu_io_head % end)
d = build_plot(files, "fm*($23+$24)")
f.write(",\\\n ".join(d))
for gnu in ['ram.gnu', 'cpu.gnu', 'io.gnu']:
subprocess.Popen(['gnuplot', gnu]).wait()
common_text = """
set terminal postscript color eps "Times" 14
#set termoption enhanced
set style data points
# ram and cpu
mypt1=7
mylw1=0
#myps1=0.3
myps1=0.6
set key below
set timefmt "%Y%m%d %H:%M:%S"
set xdata time
set format x "%k"
set xrange [*:*]
unset grid
set grid x
unset title
#set title "{0}" noenhanced
set title "{0}"
fm=1./(1024.0)
fg=1./(1024.0)/(1024.0)
#set multiplot layout 3,1
#set tmargin 0
#set tmargin 0.8
#set bmargin 0.8
#set tics nomirror scale 0.66
#set xtics scale 0
#myvertoffset=0.02
"""
gnu_head = """load 'common.gnu'
load 'defs.gnu'
set out '%s.eps'
set xlabel "Runtime [h] %s"
set ylabel "%s"
set yrange [0:%s]
plot """
gnu_io_head = """load 'common.gnu'
load 'defs.gnu'
set out 'io.eps'
set xlabel "Runtime [h] %s"
set ylabel "I/O MiB/s"
set yrange [0.005:2000]
set logscale y
set ytics ("10^{-3}" 0.001, "10^{-2}" 0.01, "10^{-1}" 0.1, "10^{0}" 1, "10^{1}" 10, "10^{2}" 100, "10^{3}" 1000)
plot """
color_sets = [
'#f800ff',
'#ff00e3 #f100ff',
"#ffa900 #00ffdc #0096ff",
"#ff005d #19ff00 #00ffc8 #c200ff",
"#ff00c7 #9cff00 #00ff61 #0042ff #5700ff",
"#ff00cb #ff1500 #59ff00 #00ffb5 #00b4ff #4f00ff",
"#ff00f2 #ff000f #c0ff00 #0bff00 #00ffdd #0007ff #7800ff",
"#ff004d #ff0100 #ffe800 #64ff00 #00ff94 #00daff #0070ff #c800ff",
"#ff0081 #ff0031 #ffa500 #6aff00 #23ff00 #00ffb5 #0058ff #1300ff #ab00ff",
"#ff00af #ff003b #ff4b00 #fff900 #6aff00 #00ff98 #00feff #00b6ff #3c00ff #8700ff",
"#ff00de #ff1100 #ff7d00 #fff600 #59ff00 #1bff00 #00ff64 #00e7ff #009cff #7100ff #bc00ff",
"#ff00b7 #ff0062 #ff5200 #ffdb00 #baff00 #54ff00 #00ff5e #00ff87 #0087ff #0083ff #7800ff #cc00ff",
"#ff00b9 #ff0037 #ff2600 #ffa500 #bbff00 #84ff00 #00ff03 #00ff64 #00ffb5 #00cfff #0048ff #2b00ff #ba00ff",
'#ff00d8 #ff0047 #ff0007 #ff7f00 #ffee00 #88ff00 #32ff00 #00ff67 #00ff75 #00c9ff #0070ff #0005ff #6c00ff #8f00ff',
"#ff00db #ff0066 #ff2d00 #ff3400 #fff600 #adff00 #7eff00 #00ff03 #00ff72 #00ffc5 #00c6ff #0052ff #002fff #9500ff #bb00ff",
"#ff00c8 #ff009b #ff0004 #ff7600 #ffbe00 #ffef00 #98ff00 #54ff00 #00ff2c #00ff6a #00ffbf #0088ff #004fff #0010ff #8900ff #b300ff",
"#ff00ed #ff0053 #ff0005 #ff4200 #ffc200 #f9ff00 #aaff00 #52ff00 #14ff00 #00ff74 #00ffb5 #00ffe6 #00b4ff #0025ff #0003ff #6300ff #be00ff",
"#ff00a9 #ff006c #ff004a #ff4000 #ff5a00 #ffae00 #e8ff00 #9bff00 #36ff00 #00ff3a #00ff7a #00fff4 #00bdff #007cff #0017ff #2a00ff #5d00ff #be00ff",
"#ff00cb #ff007f #ff0054 #ff0b00 #ff7d00 #ffa700 #ffe400 #aaff00 #7cff00 #18ff00 #00ff41 #00ffb1 #00ffe1 #009cff #0071ff #001eff #1900ff #8a00ff #b400ff",
"#ff00e1 #ff0086 #ff0037 #ff0400 #ff6f00 #ff8400 #fff100 #aeff00 #71ff00 #05ff00 #00ff22 #00ff56 #00ffd2 #00ecff #00c4ff #003eff #001cff #3300ff #8000ff #c700ff",
"#ff00f0 #ff009c #ff004e #ff0004 #ff3300 #ff7400 #ffcc00 #f4ff00 #9fff00 #53ff00 #13ff00 #00ff3a #00ff9c #00ffb5 #00f1ff #00b4ff #0078ff #0007ff #3700ff #8a00ff #eb00ff",
"#ff00d4 #ff00a1 #ff0064 #ff001f #ff3100 #ff9d00 #ffa200 #ffeb00 #a5ff00 #80ff00 #43ff00 #02ff00 #00ff49 #00ff88 #00ffe4 #00e6ff #008fff #004fff #001dff #4300ff #7900ff #b000ff",
"#ff00f6 #ff00af #ff0047 #ff0012 #ff2e00 #ff6300 #ffc700 #fffc00 #bcff00 #6bff00 #63ff00 #0fff00 #00ff29 #00ff8b #00ffc5 #00fff1 #00c5ff #0072ff #0053ff #0200ff #3600ff #7700ff #bd00ff",
"#ff00d7 #ff009e #ff0061 #ff0037 #ff3000 #ff7200 #ffaa00 #ffec00 #cfff00 #94ff00 #4eff00 #10ff00 #08ff00 #00ff59 #00ff86 #00ffc1 #00dcff #00b5ff #006cff #0041ff #1d00ff #5100ff #9800ff #d100ff",
"#ff00e4 #ff0094 #ff007e #ff003b #ff0000 #ff4e00 #ff9100 #ffb000 #ffec00 #ccff00 #76ff00 #28ff00 #00ff17 #00ff50 #00ff5c #00ffa9 #00fff1 #00e6ff #008bff #005bff #0005ff #1b00ff #7000ff #8a00ff #c500ff",
"#ff00ed #ff00a9 #ff0059 #ff0034 #ff0009 #ff4d00 #ff6200 #ffab00 #fffe00 #e3ff00 #8bff00 #7aff00 #41ff00 #00ff06 #00ff45 #00ff7c #00ffb5 #00fff7 #00b2ff #00a4ff #0067ff #0033ff #2e00ff #5a00ff #9d00ff #d500ff",
"#ff00e2 #ff00af #ff0059 #ff0033 #ff1000 #ff2c00 #ff7500 #ffb100 #ffe600 #faff00 #c9ff00 #94ff00 #50ff00 #17ff00 #00ff39 #00ff74 #00ffad #00ffe6 #00fff9 #00c8ff #0089ff #004cff #0019ff #1700ff #6e00ff #9400ff #d600ff",
"#ff00cd #ff00af #ff008a #ff003a #ff0020 #ff2f00 #ff6c00 #ff8300 #ffc200 #ffea00 #b5ff00 #82ff00 #71ff00 #12ff00 #00ff16 #00ff58 #00ff69 #00ffb6 #00ffd5 #00d0ff #00bcff #0077ff #002cff #0020ff #3c00ff #5900ff #9e00ff #be00ff",
"#ff00d6 #ff00b4 #ff007e #ff005d #ff0013 #ff0f00 #ff4400 #ff8a00 #ffb300 #fff100 #e1ff00 #beff00 #6eff00 #31ff00 #03ff00 #00ff28 #00ff68 #00ff83 #00ffad #00ffe7 #00deff #0099ff #0053ff #0038ff #0011ff #4100ff #6d00ff #8800ff #e800ff",
"#ff00d8 #ff00ae #ff0078 #ff0036 #ff0003 #ff0400 #ff5500 #ff8600 #ffb100 #ffd100 #cfff00 #b4ff00 #7dff00 #65ff00 #1cff00 #00ff0c #00ff4b #00ff86 #00ffb9 #00ffe5 #00f5ff #00a2ff #0086ff #0064ff #0014ff #2d00ff #4a00ff #8200ff #b100ff #de00ff",
]
if __name__ == '__main__':
write_files(name) | {
"repo_name": "HPCHub/trinityrnaseq",
"path": "trinity-plugins/collectl/plot.py",
"copies": "4",
"size": "7460",
"license": "bsd-3-clause",
"hash": -656375766330385400,
"line_mean": 40.3977272727,
"line_max": 246,
"alpha_frac": 0.6201072386,
"autogenerated": false,
"ratio": 2.311744654477843,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4931851893077843,
"avg_score": null,
"num_lines": null
} |
__author__ = 'belinkov'
from itertools import izip_longest
from numpy import cumsum
import subprocess
def grouper(iterable, n, fillvalue=None):
args = [iter(iterable)] * n
return izip_longest(*args, fillvalue=fillvalue)
def increment_dict(dic, k):
if k in dic:
dic[k] += 1
else:
dic[k] = 1
def ravel_list(data, lengths):
"""
Ravel a list into a list of lists based on lengths
"""
start_indices = cumsum(lengths) - lengths
ar = [data[start_indices[i]:start_indices[i]+lengths[i]] for i in xrange(len(start_indices))]
return ar
def argmax_two(vals):
"""
Find indexes of max two values
This only works when the max value is unique
"""
best = -float("inf")
arg_best = -1
second_best = -float("inf")
arg_second_best = -1
for i in xrange(len(vals)):
if vals[i] > best:
best = vals[i]
arg_best = i
for i in xrange(len(vals)):
if vals[i] < best and vals[i] > second_best:
second_best = vals[i]
arg_second_best = i
return arg_best, arg_second_best
def load_word_vectors(word_vectors_filename):
word_vectors = dict()
with open(word_vectors_filename) as f:
for line in f:
splt = line.strip().split()
if len(splt) <= 2:
continue
word = splt[0]
vec = [float(d) for d in splt[1:]]
word_vectors[word] = vec
return word_vectors
def get_word_vectors_size(word_vectors):
for word in word_vectors:
return len(word_vectors[word])
def bw2utf8(bw):
pipe = subprocess.Popen(['perl', 'bw2utf8.pl', bw], stdout=subprocess.PIPE)
utf8 = pipe.stdout.read().decode('utf-8')
return utf8 | {
"repo_name": "boknilev/diacritization",
"path": "utils.py",
"copies": "1",
"size": "1783",
"license": "mit",
"hash": -410875244527967600,
"line_mean": 21.8717948718,
"line_max": 97,
"alpha_frac": 0.5787997757,
"autogenerated": false,
"ratio": 3.4354527938342967,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45142525695342967,
"avg_score": null,
"num_lines": null
} |
__author__ = 'belinkov'
from netCDF4 import Dataset
from utils import *
from data_utils import load_extracted_data, Word
import numpy as np
import sys
def collect_predictions(num_labels, pred_filename):
print 'collecting predictions'
pred_classes = []
with open(pred_filename) as f:
count = 0
for line in f:
count += 1
# if count % 1000 == 0:
# print 'sequence:', count
splt = line.strip().split(';')
seq_id = splt[0]
probs = [float(p) for p in splt[1:]]
for letter_probs in grouper(probs, num_labels, 0):
arg_best = np.argmax(letter_probs)
pred_classes.append(arg_best)
return pred_classes
def convert_file(word_filename, word_diac_filename, pred_csv_filename, pred_output_filename, train_nc_filename):
"""
Convert Currennt output to predictions
:param word_filename (str): file with words (non-diac)
:param word_diac_filename (str): file with words (diac)
:param pred_csv_filename (str): file in csv format with predictions
:param pred_output_filename (str): file to write predictions in Kaldi format (bw-currennt)
:param train_nc_filename (str): file in Currennt format that was used to train the model
:return:
"""
sequences = load_extracted_data(word_filename, word_diac_filename)
train_nc_file = Dataset(train_nc_filename)
num_labels = len(train_nc_file.dimensions['numLabels'])
nc_labels = [''.join(l.data) for l in train_nc_file.variables['labels']]
class2label = dict(zip(range(len(nc_labels)), nc_labels))
print class2label
g = open(pred_output_filename, 'w')
f = open(pred_csv_filename)
pred_lines = f.readlines()
if len(pred_lines) != len(sequences):
sys.stderr.write('Error: incompatible predicted lines and input sequences. Quitting.\n')
return
for i in xrange(len(pred_lines)):
line = pred_lines[i]
splt = line.strip().split(';')
seq_id_pred = splt[0]
probs = [float(p) for p in splt[1:]]
sequence = sequences[i]
if seq_id_pred != sequence.seq_id:
sys.stderr.write('Error: seq id in text file ' + sequence.seq_id + \
' != seq id in predicted currennt file ' + seq_id_pred + '. Quitting.\n')
return
g.write(sequence.seq_id)
letters = sequences[i].get_sequence_letters(include_word_boundary=True)
letter_idx = 0
cur_word, cur_word_diac_pred = '', ''
for letter_probs in grouper(probs, num_labels, 0):
letter = letters[letter_idx]
letter_idx += 1
if letter == Word.WORD_BOUNDARY:
if cur_word:
# print cur_word + ':' + cur_word_diac_pred
g.write(' ' + cur_word + ':' + cur_word_diac_pred)
cur_word, cur_word_diac_pred = '', ''
continue
cur_word += letter
arg_best = np.argmax(letter_probs)
pred_label = class2label[arg_best]
# print letter, ':', pred_label
cur_word_diac_pred += letter + pred_label
g.write('\n')
f.close()
g.close()
def main():
if len(sys.argv) == 6:
convert_file(sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4], sys.argv[5])
else:
print 'USAGE: python ' + sys.argv[0] + ' <word file> <word diac file> <currennt pred csv file> <pred out file> <train nc file>'
if __name__ == '__main__':
main()
| {
"repo_name": "boknilev/diacritization",
"path": "write_currennt_predictions.py",
"copies": "1",
"size": "3560",
"license": "mit",
"hash": -1394506221659742200,
"line_mean": 34.6,
"line_max": 135,
"alpha_frac": 0.581741573,
"autogenerated": false,
"ratio": 3.5528942115768465,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4634635784576846,
"avg_score": null,
"num_lines": null
} |
__author__ = 'belinkov'
import re
import sys
import os
import numpy as np
REGEX_DIACS = re.compile(r'[iauo~FNK`]+')
REGEX_DIACS_NOSHADDA = re.compile(r'[iauoFNK`]+')
DIACS = {'i', 'a', 'u', 'o', '~', 'F', 'N', 'K', '`'}
DIACS_NOSHADDA = {'i', 'a', 'u', 'o', 'F', 'N', 'K', '`'}
PUNCS_STOP = {'!', '.', ':', ';', '?', '-'} # punctuation for heuristic sentence stop
MADA_LATIN_TAG = '@@LAT@@'
SHADDA = '~'
class Word(object):
"""
A class for a single word with its diacritics
word (str): a word without diacritics extracted from the Treebank
word_diac (str): a word with diacritics extracted from the Treebank
letters (list): a list of characters representing the word
diacs (list): a list of 0-2 diacritics for each letter
shadda (str): strategy for dealing with shadda
"""
WORD_BOUNDARY = '_###_'
SHADDA_WITH_NEXT = 'with_next'
SHADDA_IGNORE = 'ignore'
SHADDA_ONLY = 'only'
def __init__(self, word, word_diac, shadda=SHADDA_WITH_NEXT):
self.diacs = []
self.letters = []
self.word = word
self.word_diac = word_diac
self.make_labels(shadda)
def __str__(self):
return 'word: ' + self.word + ' word_diac: ' + self.word_diac
def make_labels(self, shadda=SHADDA_WITH_NEXT):
"""
Make labels for the word
shadda: 'with_next' will create a new label for each pair of seen shadda+vowel
'ignore' will ignore all shadda occurrences
'only' will create only shadda labels and ignore all other diacritics
return:
"""
if self.word != REGEX_DIACS.sub('', self.word_diac):
# TODO consider ignoring Latin words
sys.stderr.write('Warning: word ' + self.word + ' != word_diac ' + self.word_diac + ' after removing diacritics. Will use all chars as letters\n')
self.letters = list(self.word_diac)
self.diacs = ['']*len(self.letters)
else:
if shadda == Word.SHADDA_WITH_NEXT:
# check all letters except for the last
for i in xrange(len(self.word_diac)-1):
if self.word_diac[i] in DIACS:
continue
self.letters.append(self.word_diac[i])
# there may be another diacritic after shadda, so add both under the 'with_next' shadda strategy
if self.word_diac[i+1] == SHADDA and i < len(self.word_diac)-2 and self.word_diac[i+2] in DIACS:
self.diacs.append(self.word_diac[i+1] + self.word_diac[i+2])
# normally just choose the diacritic following the letter
elif self.word_diac[i+1] in DIACS:
self.diacs.append(self.word_diac[i+1])
# if there's no diacritic, choose an empty string
else:
self.diacs.append('')
if self.word_diac[-1] not in DIACS:
# if the last letter is not a diacritic, add it as well
self.letters.append(self.word_diac[-1])
self.diacs.append('')
elif shadda == Word.SHADDA_IGNORE:
for i in xrange(len(self.word_diac)-1):
if self.word_diac[i] in DIACS:
continue
self.letters.append(self.word_diac[i])
# there may be another diacritic after shadda, so add only that diacritic under the 'ignore' shadda strategy
if self.word_diac[i+1] == SHADDA and i < len(self.word_diac)-2 and \
self.word_diac[i+2] in DIACS and self.word_diac[i+2] != SHADDA:
self.diacs.append(self.word_diac[i+2])
# add non-shadda diacritic following the letter
elif self.word_diac[i+1] in DIACS and self.word_diac[i+1] != SHADDA:
self.diacs.append(self.word_diac[i+1])
# if there's no non-shadda diacritic, choose an empty string
else:
self.diacs.append('')
if self.word_diac[-1] not in DIACS:
# if the last letter is not a diacritic, add it as well
self.letters.append(self.word_diac[-1])
self.diacs.append('')
elif shadda == Word.SHADDA_ONLY:
for i in xrange(len(self.word_diac)-1):
if self.word_diac[i] in DIACS:
continue
self.letters.append(self.word_diac[i])
# under the 'only' shadda strategy, add only shadda and ignore all other diacritics
if self.word_diac[i+1] == SHADDA:
self.diacs.append(self.word_diac[i+1])
else:
self.diacs.append('')
if self.word_diac[-1] not in DIACS:
# if the last letter is not a diacritic, add it as well
self.letters.append(self.word_diac[-1])
self.diacs.append('')
else:
sys.stderr.write('Error: unknown shadda strategy \"' + shadda + '\" in make_labels()\n')
return
if len(self.letters) != len(self.diacs):
sys.stderr.write('Error: incompatible lengths of letters and diacs in word: [ ' + str(self) + ' ]\n')
@property
def num_letters(self):
return len(self.letters)
class KaldiWord(Word):
"""
A class for a word in Kaldi data
"""
def __init__(self, word, word_diac, shadda=Word.SHADDA_WITH_NEXT):
self.diacs = []
self.letters = []
self.word = word
self.word_diac = word_diac
if REGEX_DIACS.sub('', self.word_diac) != self.word:
sys.stderr.write('Warning: word ' + self.word + ' != word_diac ' + self.word_diac + \
' after removing diacritics. Attempting to correct\n')
self.unnormalize()
if REGEX_DIACS.sub('', self.word_diac) != self.word:
sys.stderr.write('Warning: could not correct, word ' + self.word + ' != word_diac ' + \
self.word_diac + '. Using undiacritized word_diac as word.\n')
self.word = REGEX_DIACS.sub('', self.word_diac)
self.make_labels(shadda)
def unnormalize(self):
"""
Try to reverse Buckwalter normalizations on diacritized word
"""
# first, remove "_" (elongation character)
self.word = self.word.replace('_', '')
self.word_diac = self.word_diac.replace('_', '')
# next, check for normalization mismatches
word_ind = 0
word_diac_ind = 0
new_word_diac = ''
while word_ind < len(self.word) and word_diac_ind < len(self.word_diac):
word_char = self.word[word_ind]
word_diac_char = self.word_diac[word_diac_ind]
if word_char == word_diac_char:
new_word_diac += word_diac_char
word_ind += 1
word_diac_ind += 1
elif word_diac_char in DIACS:
new_word_diac += word_diac_char
word_diac_ind += 1
else:
# this is probably a normalization
# print 'word_char:', word_char, 'word_diac_char:', word_diac_char
new_word_diac += word_char
word_ind += 1
word_diac_ind += 1
if word_ind == len(self.word) and word_diac_ind == len(self.word_diac) - 1:
# if we have one more char in word_diac
word_diac_char = self.word_diac[word_diac_ind]
if word_diac_char in DIACS:
new_word_diac += word_diac_char
self.word_diac = new_word_diac
# print 'done normalizing. word:', self.word, 'word_diac:', self.word_diac
class Sequence(object):
"""
A class for a sequence of words
words_str (list): a list of word strings
words_diac_str (list): a list of diacritized word strings
seq_id (str): an ID for the sequence
shadda (str): strategy for dealing with shadda
"""
def __init__(self, words_str, words_diac_str, seq_id, shadda=Word.SHADDA_WITH_NEXT, word_type=type(Word)):
if len(words_str) != len(words_diac_str):
sys.stderr.write('Error: incompatible words_str ' + str(words_str) + ' and words_diac_str ' + str(words_diac_str) + '\n')
return
self.words = []
for i in xrange(len(words_str)):
word_str, word_diac_str = words_str[i], words_diac_str[i]
if word_type == type(KaldiWord):
word = KaldiWord(word_str, word_diac_str, shadda)
else:
word = Word(word_str, word_diac_str, shadda)
self.words.append(word)
self.seq_id = seq_id
def __len__(self):
return len(self.words)
def __str__(self):
res = ''
for word in self.words:
res += str(word) + '\n'
return res
def num_letters(self, count_word_boundary=False):
num = 0
for word in self.words:
num += word.num_letters
if count_word_boundary:
num += len(self) + 1 # include boundaries for beginning and end of sequence
return num
def get_sequence_letters(self, include_word_boundary=False):
letters = []
if include_word_boundary:
letters.append(Word.WORD_BOUNDARY)
for word in self.words:
letters += word.letters
if include_word_boundary:
letters.append(Word.WORD_BOUNDARY)
return letters
def get_sequence_letter2word(self, include_word_boundary=False):
"""
Get a list of words corresponding to the letter sequence
word i in the list is the word containing the i-th letter in the sequence
"""
letter2word = []
if include_word_boundary:
letter2word.append(Word.WORD_BOUNDARY)
for word in self.words:
for letter in word.letters:
letter2word.append(word)
if include_word_boundary:
letter2word.append(Word.WORD_BOUNDARY)
return letter2word
@staticmethod
def is_sequence_stop(word_str, word_diac_str, stop_on_punc=False):
"""
Check if there is a sequence stop at this word
"""
if word_str == word_diac_str:
if word_str == '':
return True
elif stop_on_punc and word_str in PUNCS_STOP:
return True
return False
def load_extracted_data(word_filename, word_diac_filename, stop_on_punc=False, shadda=Word.SHADDA_WITH_NEXT):
"""
Load data extracted from the Treebank
word_filename (str): A text file with one word per line, a blank line between sentences.
word_diac_filename (str): A text file with one diacritized word per line, a blank line between sentences.
Corresponds to word_filename.
stop_on_punc (bool): If True, stop sequence on punctuation
shadda (str): Strategy for dealing with shadda
return: sequences (list): A list of Sequence objects containing sentences for the data set
"""
print 'loading extracted data from:', word_filename, word_diac_filename
if stop_on_punc:
print 'stopping sequences on punctuations'
word_lines = open(word_filename).readlines()
word_diac_lines = open(word_diac_filename).readlines()
if len(word_lines) != len(word_diac_lines):
sys.stderr.write('Error: incompatible word file ' + word_filename + \
' and word_diac file ' + word_diac_filename + '\n')
return
sequences = []
words_str = []
words_diac_str = []
sequence_lengths = []
for word_line, word_diac_line in zip(word_lines, word_diac_lines):
word_str = word_line.strip()
word_diac_str = word_diac_line.strip()
if (word_str == '' and word_diac_str != '') or (word_str != '' and word_diac_str == ''):
sys.stderr.write('Warning: word_str ' + word_str + ' xor word_diac_str ' + word_diac_str + \
' is empty, ignoring word')
continue
if Sequence.is_sequence_stop(word_str, word_diac_str, stop_on_punc):
# if stopped on non-empty word, include it in the sequence
if word_str != '':
words_str.append(word_str)
words_diac_str.append(word_diac_str)
seq_id = os.path.basename(word_filename) + ':' + str(len(sequences) + 1)
sequence = Sequence(words_str, words_diac_str, seq_id, shadda)
sequences.append(sequence)
sequence_lengths.append(sequence.num_letters())
words_str = []
words_diac_str = []
else:
words_str.append(word_str)
words_diac_str.append(word_diac_str)
if words_str:
seq_id = os.path.basename(word_filename) + ':' + str(len(sequences) + 1)
sequence = Sequence(words_str, words_diac_str, seq_id, shadda)
sequences.append(sequence)
print 'found', len(sequences), 'sequences'
print 'average sequence length:', np.mean(sequence_lengths), 'std dev:', np.std(sequence_lengths), 'max:', max(sequence_lengths)
return sequences
def load_kaldi_data(bw_mada_filename, shadda=Word.SHADDA_WITH_NEXT):
"""
Load data used in Kaldi experiments
bw_mada_filename: each line contains an id followed by bw:mada strings
shadda:
return:
"""
print 'loading kaldi data from:', bw_mada_filename
sequences = []
words_str = []
words_diac_str = []
sequence_lengths = []
f = open(bw_mada_filename)
for line in f:
splt = line.strip().split()
seq_id = splt[0]
for pair in splt[1:]:
word_str, word_diac_str = pair.split(':')
if (word_str == '' and word_diac_str != '') or (word_str != '' and word_diac_str == ''):
sys.stderr.write('Warning: word_str ' + word_str + ' xor word_diac_str ' + word_diac_str + \
' is empty, ignoring word')
continue
words_str.append(word_str)
words_diac_str.append(word_diac_str)
sequence = Sequence(words_str, words_diac_str, seq_id, shadda, word_type=type(KaldiWord))
sequences.append(sequence)
sequence_lengths.append(sequence.num_letters())
words_str = []
words_diac_str = []
f.close()
print 'found', len(sequences), 'sequences'
print 'average sequence length:', np.mean(sequence_lengths), 'std dev:', np.std(sequence_lengths), 'max:', max(sequence_lengths)
return sequences
def load_label_indices(label_indices_filename):
"""
Load label indices used in training
label_indices_filename: one label (diacritic) per line, in order used in Current
returns class2label, label2class (dicts): maps from index to label and from label to index
"""
labels = open(label_indices_filename).readlines()
labels = [label.strip() for label in labels]
class2label, label2class = dict(enumerate(labels)), dict(zip(labels, range(len(labels))))
return class2label, label2class
| {
"repo_name": "boknilev/diacritization",
"path": "data_utils.py",
"copies": "1",
"size": "15410",
"license": "mit",
"hash": -7405606961672125000,
"line_mean": 39.1302083333,
"line_max": 158,
"alpha_frac": 0.5618429591,
"autogenerated": false,
"ratio": 3.55232826187183,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.461417122097183,
"avg_score": null,
"num_lines": null
} |
__author__ = 'belinkov'
import sys
from data_utils import DIACS, REGEX_DIACS, MADA_LATIN_TAG
def extract_data(rdi_bw_filename, output_word_filename, output_word_diac_filename):
"""
Extract data from an RDI file
:param rdi_bw_filename: file containing raw Arabic text, preprocessed by MADA preprocessor (keeping diacritics)
:param output_word_filename: file to write words without diacritics
:param output_word_diac_filename: file to wrote words with diacritics
:return:
"""
print 'extracting data from:', rdi_bw_filename
g_word = open(output_word_filename, 'w')
g_word_diac = open(output_word_diac_filename, 'w')
with open(rdi_bw_filename) as f:
for line in f:
for token in line.strip().split():
if token.startswith(MADA_LATIN_TAG):
sys.stderr.write('Warning: found Latin word: ' + token + '. skipping word.\n')
continue
word_str = REGEX_DIACS.sub('', token)
word_diac_str = token
if word_str == '' or word_diac_str == '':
sys.stderr.write('Warning: empty word_str ' + word_str + ' or word_diac_str ' + word_diac_str + \
'. skipping word.\n')
continue
g_word.write(word_str + '\n')
g_word_diac.write(word_diac_str + '\n')
g_word.write('\n')
g_word_diac.write('\n')
g_word.close()
g_word_diac.close()
print 'written words to file:', output_word_filename
print 'written words diac to file:', output_word_diac_filename
if __name__ == '__main__':
if len(sys.argv) == 4:
extract_data(sys.argv[1], sys.argv[2], sys.argv[3])
else:
print 'USAGE: python ' + sys.argv[0] + ' <rdi bw file> <word output file> <word diac output file>'
| {
"repo_name": "boknilev/diacritization",
"path": "extract_rdi_data.py",
"copies": "1",
"size": "1874",
"license": "mit",
"hash": 9113141537973358000,
"line_mean": 37.2448979592,
"line_max": 117,
"alpha_frac": 0.5752401281,
"autogenerated": false,
"ratio": 3.413479052823315,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4488719180923315,
"avg_score": null,
"num_lines": null
} |
__author__ = 'belinkov'
# write current predictions without using any .nc file
#from netCDF4 import Dataset
from utils import *
from data_utils import load_extracted_data, Word, load_label_indices
import numpy as np
import sys
def collect_predictions(num_labels, pred_filename):
print 'collecting predictions'
pred_classes = []
with open(pred_filename) as f:
count = 0
for line in f:
count += 1
# if count % 1000 == 0:
# print 'sequence:', count
splt = line.strip().split(';')
seq_id = splt[0]
probs = [float(p) for p in splt[1:]]
for letter_probs in grouper(probs, num_labels, 0):
arg_best = np.argmax(letter_probs)
pred_classes.append(arg_best)
return pred_classes
def convert_file(word_filename, word_diac_filename, pred_csv_filename, pred_output_filename, label_indices_filename):
"""
Convert Currennt output to predictions
word_filename (str): file with words (non-diac)
word_diac_filename (str): file with words (diac)
pred_csv_filename (str): file in csv format with predictions
pred_output_filename (str): file to write predictions in Kaldi format (bw-currennt)
label_indices_filename (str): file with labels, one label per line, in the order corresponding to indices used in Current
:return:
"""
sequences = load_extracted_data(word_filename, word_diac_filename)
class2label, _ = load_label_indices(label_indices_filename)
print class2label
num_labels = len(class2label)
g = open(pred_output_filename, 'w')
f = open(pred_csv_filename)
pred_lines = f.readlines()
if len(pred_lines) != len(sequences):
sys.stderr.write('Error: incompatible predicted lines and input sequences. Quitting.\n')
return
for i in xrange(len(pred_lines)):
line = pred_lines[i]
splt = line.strip().split(';')
seq_id_pred = splt[0]
probs = [float(p) for p in splt[1:]]
sequence = sequences[i]
if seq_id_pred != sequence.seq_id:
sys.stderr.write('Error: seq id in text file ' + sequence.seq_id + \
' != seq id in predicted currennt file ' + seq_id_pred + '. Quitting.\n')
return
g.write(sequence.seq_id)
letters = sequences[i].get_sequence_letters(include_word_boundary=True)
letter_idx = 0
cur_word, cur_word_diac_pred = '', ''
for letter_probs in grouper(probs, num_labels, 0):
letter = letters[letter_idx]
letter_idx += 1
if letter == Word.WORD_BOUNDARY:
if cur_word:
# print cur_word + ':' + cur_word_diac_pred
g.write(' ' + cur_word + ':' + cur_word_diac_pred)
cur_word, cur_word_diac_pred = '', ''
continue
cur_word += letter
arg_best = np.argmax(letter_probs)
pred_label = class2label[arg_best]
# print letter, ':', pred_label
cur_word_diac_pred += letter + pred_label
g.write('\n')
f.close()
g.close()
def main():
if len(sys.argv) == 6:
convert_file(sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4], sys.argv[5])
else:
print 'USAGE: python ' + sys.argv[0] + ' <word file> <word diac file> <currennt pred csv file> <pred out file> <label indices file>'
if __name__ == '__main__':
main()
| {
"repo_name": "boknilev/diacritization",
"path": "write_currennt_predictions_nonc.py",
"copies": "1",
"size": "3503",
"license": "mit",
"hash": -1807814872023136800,
"line_mean": 34.3838383838,
"line_max": 140,
"alpha_frac": 0.5860690836,
"autogenerated": false,
"ratio": 3.6262939958592133,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4712363079459213,
"avg_score": null,
"num_lines": null
} |
__author__ = 'BELLAICHE Adrien'
from os import listdir
from ford_fulkerson import execute_algorithm
corresponding = {"\xeb": "e",
"\xe9": "e",
"\xe8": "e",
"\n": ""}
def clean_line(value):
thing = list(value)
for _ in range(len(thing)):
if thing[_] in corresponding:
thing[_] = corresponding[thing[_]]
return ''.join(thing)
def get_name(value):
return clean_line(value.split(",")[1] + " " + value.split(",")[0])
file_names = listdir("csv")
name_id = {}
k = 0
# Attribution des ID
name_id["Source"] = k
for name in file_names:
k += 1
name_id[clean_line(name.split(".")[0])] = k
idKnows = [[] for _ in name_id]
# Definition des connaissances
corresponding_file = open('correspondance.txt', 'w')
for name in file_names:
self_name = clean_line(name.split(".")[0])
selfID = name_id[self_name]
with open("csv/" + name, 'r') as data:
lines = data.readlines()
for line in lines:
if line != ',\n':
if get_name(line) in name_id:
if get_name(line) != self_name:
idKnows[selfID].append(name_id[get_name(line)])
corresponding_file.write(str(selfID) + " " + self_name + "\n")
corresponding_file.close()
for name in name_id:
if name != "Source":
idKnows[0].append(name_id[name])
with open("ex.graph", 'w') as out:
out.write(str(2 * len(name_id)) + '\n')
for name in name_id:
selfID = name_id[clean_line(name.split(".")[0])]
if name == "Source":
for known in idKnows[name_id[name]]:
out.write('0' + ' ' + str(known) + ' 1\n')
else:
for known in idKnows[name_id[name]]:
out.write(str(selfID) + ' ' + str(known + len(name_id) - 1) + ' 1\n')
out.write(str(selfID + len(name_id) - 1) + ' ' + str(2 * len(name_id) - 1) + ' 1\n')
execute_algorithm() | {
"repo_name": "adrien-bellaiche/Repartition_Unpreferred",
"path": "main.py",
"copies": "1",
"size": "1949",
"license": "mit",
"hash": -841187400027989400,
"line_mean": 29.46875,
"line_max": 96,
"alpha_frac": 0.5346331452,
"autogenerated": false,
"ratio": 3.0500782472613457,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9078885707752391,
"avg_score": 0.0011651369417910637,
"num_lines": 64
} |
__author__ = 'Belyavtsev'
import AStarSearchModel
class TestModel(AStarSearchModel):
"""
Test implementation of model AStarSearchModel.
It contains pole 5x5 with
"""
def __init__(self):
"""
Constructor.
Here will be initialize internal state of object.
@return: Created object TestModel.
"""
self.pole = [[1, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]]
self.cursor = (0, 0)
def __changeCursor__(self, position):
"""
Method for change cursor position.
@return: None.
"""
(x, y) = self.cursor
self.pole[x][y] = 0
(x, y) = position
self.pole[x][y] = 1
self.cursor = (x, y)
def __MoveUp__(self):
"""
Method for upping cursor position.
@return: Is operation was performed.
"""
(x, y) = self.cursor
if x > 0:
self.__changeCursor__((x-1, y))
return True
return False
| {
"repo_name": "djbelyak/AStarSearch",
"path": "TestModel.py",
"copies": "1",
"size": "1120",
"license": "mit",
"hash": 4188354282619309600,
"line_mean": 23.347826087,
"line_max": 57,
"alpha_frac": 0.4598214286,
"autogenerated": false,
"ratio": 3.7086092715231787,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4668430700123179,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Bene'
import string
# class to hold flow entrys
class FlowTable(object):
def __init__(self, switch=None, tableString=None):
self.switch = switch
self.tableString = tableString
#holds alls entrys of a given switch
self.table = []
#fill table
#split tablestring by row
__rows = string.split(tableString, '\n') # --> ['Entry 1', 'Line 2', 'Line 3']
#iterate each row and fill table with entrys
attributes = []
for rowIndex in range(1, len(__rows)):
attributes = __rows[rowIndex].replace(' actions', ',actions').split(',')
#create entrys with attributs
#attributes for each entry
cookie=None
duration=None
table=None
n_packets=None
n_bytes=None
idle_age=None
priority=None
in_port = None
dl_vlan = None
dl_src = None
dl_dst = None
dl_type = None
nw_src = None
nw_dst = None
nw_prot = None
nw_tos = None
tp_src = None
tp_dst = None
icmp_type = None
icmp_code = None
actions = None
for index in range(0, len(attributes)):
attributeToCheck = attributes[index]
#if else if cascade to match keys
if("cookie" in attributeToCheck):
splitIndex = attributeToCheck.find('=') + 1
cookie = attributeToCheck[splitIndex : len(attributeToCheck)]
elif("duration" in attributeToCheck):
splitIndex = attributeToCheck.find('=') + 1
duration = attributeToCheck[splitIndex : len(attributeToCheck)]
elif("table" in attributeToCheck):
splitIndex = attributeToCheck.find('=') + 1
table = attributeToCheck[splitIndex : len(attributeToCheck)]
elif("n_packets" in attributeToCheck):
splitIndex = attributeToCheck.find('=') + 1
n_packets = attributeToCheck[splitIndex : len(attributeToCheck)]
elif("n_bytes" in attributeToCheck):
splitIndex = attributeToCheck.find('=') + 1
n_bytes = attributeToCheck[splitIndex : len(attributeToCheck)]
elif("idle_age" in attributeToCheck):
splitIndex = attributeToCheck.find('=') + 1
idle_age = attributeToCheck[splitIndex : len(attributeToCheck)]
elif("priority" in attributeToCheck):
splitIndex = attributeToCheck.find('=') + 1
priority = attributeToCheck[splitIndex : len(attributeToCheck)]
elif("in_port" in attributeToCheck):
splitIndex = attributeToCheck.find('=') + 1
in_port = attributeToCheck[splitIndex : len(attributeToCheck)]
elif("dl_vlan" in attributeToCheck):
splitIndex = attributeToCheck.find('=') + 1
dl_vlan = attributeToCheck[splitIndex : len(attributeToCheck)]
elif("dl_src" in attributeToCheck):
splitIndex = attributeToCheck.find('=') + 1
dl_src = attributeToCheck[splitIndex : len(attributeToCheck)]
elif("dl_dst" in attributeToCheck):
splitIndex = attributeToCheck.find('=') + 1
dl_dst = attributeToCheck[splitIndex : len(attributeToCheck)]
elif("dl_type" in attributeToCheck):
splitIndex = attributeToCheck.find('=') + 1
dl_type = attributeToCheck[splitIndex : len(attributeToCheck)]
elif("nw_src" in attributeToCheck):
splitIndex = attributeToCheck.find('=') + 1
nw_src = attributeToCheck[splitIndex : len(attributeToCheck)]
elif("nw_dst" in attributeToCheck):
splitIndex = attributeToCheck.find('=') + 1
nw_dst = attributeToCheck[splitIndex : len(attributeToCheck)]
elif("nw_prot" in attributeToCheck):
splitIndex = attributeToCheck.find('=') + 1
nw_prot = attributeToCheck[splitIndex : len(attributeToCheck)]
elif("nw_tos" in attributeToCheck):
splitIndex = attributeToCheck.find('=') + 1
nw_tos = attributeToCheck[splitIndex : len(attributeToCheck)]
elif("tp_src" in attributeToCheck):
splitIndex = attributeToCheck.find('=') + 1
tp_src = attributeToCheck[splitIndex : len(attributeToCheck)]
elif("icmp_type" in attributeToCheck):
splitIndex = attributeToCheck.find('=') + 1
icmp_type = attributeToCheck[splitIndex : len(attributeToCheck)]
elif("icmp_code" in attributeToCheck):
splitIndex = attributeToCheck.find('=') + 1
icmp_code = attributeToCheck[splitIndex : len(attributeToCheck)]
elif("actions" in attributeToCheck):
splitIndex = attributeToCheck.find('=') + 1
actions = attributeToCheck[splitIndex : len(attributeToCheck)]
if(actions is not None):
entry = FlowEntry(cookie, duration, table,n_packets, n_bytes, idle_age, priority, in_port,
dl_vlan, dl_src, dl_dst, dl_type, nw_src, nw_dst, nw_prot, nw_tos, tp_src,
tp_dst, icmp_type, icmp_code, actions)
self.table.append(entry)
def hasEntryWithMacDest(self, mac):
for index in range(0, len(self.table)):
if(self.table[index].dl_dst == mac):
return True
else:
return False
def hasForwardingEntry(self, srcMac, dstMac):
#check all entrys in table
for index in range(0, len(self.table)):
entry = self.table[index]
#check if entry has MAC as dest and action is output (rather than drop)
if(entry.dl_src == srcMac and entry.dl_dst == dstMac and "output" in entry.actions):
return True
return False
def printTable(self):
#check all entrys in table
for index in range(0, len(self.table)):
entry = self.table[index]
entry.printEntry()
class FlowEntry(object):
def __init__(self, cookie=None, duration=None, table=None, n_packets=None, n_bytes=None,
idle_age=None, priority=None, in_port=None, dl_vlan=None, dl_src=None, dl_dst=None,
dl_type=None, nw_src=None, nw_dst=None, nw_prot=None, nw_tos=None,
tp_src=None, tp_dst=None, icmp_type=None, icmp_code=None,
actions=None):
'''
in_port=port_no
Matches physical port port_no. Switch ports are numbered as displayed by dpctl show.
dl_vlan=vlan
Matches IEEE 802.1q virtual LAN tag vlan. Specify 0xffff as vlan to match packets that
are not tagged with a virtual LAN; otherwise, specify a number between 0 and 4095, inclusive,
as the 12-bit VLAN ID to match.
dl_src=mac
Matches Ethernet source address mac, which should be specified as 6 pairs of hexadecimal digits
delimited by colons, e.g. 00:0A:E4:25:6B:B0.
dl_dst=mac
Matches Ethernet destination address mac.
dl_type=ethertype
Matches Ethernet protocol type ethertype, which should be specified as a integer between 0 and 65535,
inclusive, either in decimal or as a hexadecimal number prefixed by 0x, e.g. 0x0806 to match ARP packets.
nw_src=ip[/netmask]
Matches IPv4 source address ip, which should be specified as an IP address or host name,
e.g. 192.168.1.1 or www.example.com. The optional netmask allows matching only on an
IPv4 address prefix. It may be specified as a dotted quad (e.g. 192.168.1.0/255.255.255.0) or
as a count of bits (e.g. 192.168.1.0/24).
nw_dst=ip[/netmask]
Matches IPv4 destination address ip.
nw_proto=proto
Matches IP protocol type proto, which should be specified as a decimal number between 0 and 255,
inclusive, e.g. 6 to match TCP packets.
nw_tos=tos/dscp
Matches ToS/DSCP (only 6-bits, not modify reserved 2-bits for future use) field of IPv4 header tos/dscp,
which should be specified as a decimal number between 0 and 255, inclusive.
tp_src=port
Matches UDP or TCP source port port, which should be specified as a decimal number between 0 and 65535,
inclusive, e.g. 80 to match packets originating from a HTTP server.
tp_dst=port
Matches UDP or TCP destination port port.
icmp_type=type
Matches ICMP message with type, which should be specified as a decimal number between 0 and 255, inclusive.
icmp_code=code
Matches ICMP messages with code.
'''
self.cookie=cookie
self.duration=duration
self.table=table
self.n_packets=n_packets
self.n_bytes=n_bytes
self.idle_age=idle_age
self.priority=priority
self.in_port = in_port
self.dl_vlan = dl_vlan
self.dl_src = dl_src
self.dl_dst = dl_dst
self.dl_type = dl_type
self.nw_src = nw_src
self.nw_dst = nw_dst
self.nw_prot = nw_prot
self.nw_tos = nw_tos
self.tp_src = tp_src
self.tp_dst = tp_dst
self.icmp_type = icmp_type
self.icmp_code = icmp_code
self.actions = actions
def printEntry(self):
print("cookie="+str(self.cookie)),
print("duration="+str(self.duration)),
print("table="+str(self.table)),
print("n_packets="+str(self.n_packets)),
print("n_bytes="+str(self.n_bytes)),
print("idle_age="+str(self.idle_age)),
print("priority="+str(self.priority)),
print("in_port="+str(self.in_port)),
print("dl_vlan="+str(self.dl_vlan)),
print("dl_src="+str(self.dl_src)),
print("dl_dst="+str(self.dl_dst)),
print("dl_type="+str(self.dl_type)),
print("nw_src="+str(self.nw_src)),
print("nw_dst="+str(self.nw_dst)),
print("nw_prot="+str(self.nw_prot)),
print("nw_tos="+str(self.nw_tos)),
print("tp_src="+str(self.tp_src)),
print("tp_dst="+str(self.tp_dst)),
print("icmp_type="+str(self.icmp_type)),
print("icmp_code="+str(self.icmp_code)),
print("actions="+str(self.actions))
| {
"repo_name": "lsinfo3/BDD-mininet",
"path": "steps/FlowEntrys.py",
"copies": "1",
"size": "10920",
"license": "mit",
"hash": 6235447131816686000,
"line_mean": 47.3185840708,
"line_max": 119,
"alpha_frac": 0.5645604396,
"autogenerated": false,
"ratio": 4.185511690302798,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5250072129902797,
"avg_score": null,
"num_lines": null
} |
__author__ = 'bengt'
BOARD, WHITE, BLACK, MOVE = 'BOARD', 'WHITE', 'BLACK', 'MOVE'
WIDTH, HEIGHT = 8, 8
NORTH = -HEIGHT
NORTHEAST = -HEIGHT + 1
EAST = 1
SOUTHEAST = HEIGHT + 1
SOUTH = HEIGHT
SOUTHWEST = HEIGHT - 1
WEST = - 1
NORTHWEST = -HEIGHT - 1
DIRECTIONS = (NORTH, NORTHEAST, EAST, SOUTHEAST, SOUTH, SOUTHWEST, WEST, NORTHWEST)
def chunks(l, n):
""" Yield successive n-sized chunks from l.
"""
for i in range(0, len(l), n):
yield l[i:i + n]
def get_opponent(player):
if player == WHITE:
return BLACK
elif player == BLACK:
return WHITE
else:
raise ValueError
class NoMovesError(Exception):
pass
def outside_board(tile, direction):
tile_top = 0 <= tile <= 7
tile_bot = 56 <= tile <= 63
tile_right = tile % WIDTH == 7
tile_left = tile % WIDTH == 0
return (direction in (NORTH, NORTHEAST, NORTHWEST) and tile_top) or \
(direction in (SOUTH, SOUTHWEST, SOUTHEAST) and tile_bot) or \
(direction in (NORTHEAST, EAST, SOUTHEAST) and tile_right) or \
(direction in (NORTHWEST, WEST, SOUTHWEST) and tile_left)
| {
"repo_name": "Zolomon/reversi-ai",
"path": "game/settings.py",
"copies": "1",
"size": "1138",
"license": "mit",
"hash": -7497814401301158000,
"line_mean": 24.2888888889,
"line_max": 83,
"alpha_frac": 0.6080843585,
"autogenerated": false,
"ratio": 2.8168316831683167,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.39249160416683165,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Ben Haley & Ryan Jones'
import config as cfg
import shape
import util
from datamapfunctions import DataMapFunctions
import numpy as np
import pandas as pd
from collections import defaultdict
import copy
from datetime import datetime
from demand_subsector_classes import DemandStock, SubDemand, ServiceEfficiency, ServiceLink
from shared_classes import AggregateStock
from demand_measures import ServiceDemandMeasure, EnergyEfficiencyMeasure, FuelSwitchingMeasure, FlexibleLoadMeasure, FlexibleLoadMeasure2
from demand_technologies import DemandTechnology, SalesShare
from rollover import Rollover
from util import DfOper
from outputs import Output
import dispatch_classes
import energyPATHWAYS.helper_multiprocess as helper_multiprocess
import pdb
import logging
import time
class Driver(object, DataMapFunctions):
def __init__(self, id, scenario):
self.id = id
self.scenario = scenario
self.sql_id_table = 'DemandDrivers'
self.sql_data_table = 'DemandDriversData'
self.mapped = False
for col, att in util.object_att_from_table(self.sql_id_table, id):
setattr(self, col, att)
# creates the index_levels dictionary
DataMapFunctions.__init__(self, data_id_key='parent_id')
self.read_timeseries_data()
class Demand(object):
def __init__(self, scenario):
self.drivers = {}
self.sectors = {}
self.outputs = Output()
self.default_electricity_shape = shape.shapes.data[cfg.electricity_energy_type_shape_id] if shape.shapes.data else None
self.feeder_allocation_class = dispatch_classes.DispatchFeederAllocation(1)
self.feeder_allocation_class.values.index = self.feeder_allocation_class.values.index.rename('sector', 'demand_sector')
self.electricity_reconciliation = None
self.scenario = scenario
def setup_and_solve(self):
logging.info('Configuring energy system')
# Drivers must come first
self.add_drivers()
# Sectors requires drivers be read in
self.add_sectors()
self.add_subsectors()
self.calculate_demand()
logging.info("Aggregating demand results")
self.aggregate_results()
if cfg.evolved_run == 'true':
self.aggregate_results_evolved(cfg.evolved_years)
def add_sectors(self):
"""Loop through sector ids and call add sector function"""
ids = util.sql_read_table('DemandSectors',column_names='id',return_iterable=True)
for id in ids:
self.sectors[id] = Sector(id, self.drivers, self.scenario)
def add_subsectors(self):
"""Read in and initialize data"""
logging.info('Populating subsector data')
for sector in self.sectors.values():
logging.info(' '+sector.name+' sector')
sector.add_subsectors()
def calculate_demand(self):
logging.info('Calculating demand')
logging.info(' solving sectors')
for sector in self.sectors.values():
logging.info(' {} sector'.format(sector.name))
sector.manage_calculations()
def add_drivers(self):
"""Loops through driver ids and call create driver function"""
logging.info('Adding drivers')
ids = util.sql_read_table('DemandDrivers',column_names='id',return_iterable=True)
for id in ids:
self.add_driver(id, self.scenario)
self.remap_drivers()
def add_driver(self, id, scenario):
"""add driver object to demand"""
if id in self.drivers:
# ToDo note that a driver by the same name was added twice
return
self.drivers[id] = Driver(id, scenario)
def remap_drivers(self):
"""
loop through demand drivers and remap geographically
"""
logging.info(' remapping drivers')
for driver in self.drivers.values():
# It is possible that recursion has mapped before we get to a driver in the list. If so, continue.
if driver.mapped:
continue
self.remap_driver(driver)
def remap_driver(self, driver):
"""mapping of a demand driver to its base driver"""
# base driver may be None
base_driver_id = driver.base_driver_id
if base_driver_id:
base_driver = self.drivers[base_driver_id]
# mapped is an indicator variable that records whether a driver has been mapped yet
if not base_driver.mapped:
# If a driver hasn't been mapped, recursion is uesd to map it first (this can go multiple layers)
self.remap_driver(base_driver)
driver.remap(drivers=base_driver.values,converted_geography=cfg.disagg_geography, filter_geo=False)
else:
driver.remap(converted_geography=cfg.disagg_geography,filter_geo=False)
# Now that it has been mapped, set indicator to true
logging.info(' {}'.format(driver.name))
driver.mapped = True
driver.values.data_type = 'total'
@staticmethod
def geomap_to_dispatch_geography(df):
""" maps a dataframe to another geography using relational GeographyMapdatabase table
"""
if cfg.primary_geography==cfg.dispatch_geography:
return df
geography_map_key = cfg.cfgfile.get('case', 'default_geography_map_key')
# create dataframe with map from one geography to another
map_df = cfg.geo.map_df(cfg.primary_geography,cfg.dispatch_geography, normalize_as='total', map_key=geography_map_key)
levels = ['timeshift_type', cfg.dispatch_geography, 'dispatch_feeder', 'weather_datetime']
return util.DfOper.mult([df, map_df]).groupby(level=levels).sum()
def aggregate_electricity_shapes(self, year, geomap_to_dispatch_geography=True, reconciliation_step=False):
""" Final levels that will always return from this function
['timeshift_type', 'gau', 'dispatch_feeder', 'weather_datetime']
"""
if reconciliation_step==False and self.electricity_reconciliation is None:
self.create_electricity_reconciliation()
inflexible = [sector.aggregate_inflexible_electricity_shape(year) for sector in self.sectors.values()]
no_shape = [self.shape_from_subsectors_with_no_shape(year)]
inflex_load = util.DfOper.add(no_shape+inflexible, expandable=False, collapsible=False)
inflex_load = util.DfOper.mult((inflex_load, self.electricity_reconciliation))
flex_load = util.DfOper.add([sector.aggregate_flexible_electricity_shape(year) for sector in self.sectors.values()], expandable=False, collapsible=False)
if flex_load is None:
agg_load = pd.concat([inflex_load], keys=[2], names=['timeshift_type'])
else:
inflex_load = pd.concat([inflex_load]*3, keys=[1,2,3], names=['timeshift_type'])
agg_load = util.DfOper.add((flex_load, inflex_load), expandable=False, collapsible=False)
df = self.geomap_to_dispatch_geography(agg_load) if geomap_to_dispatch_geography else agg_load
# this line makes sure the energy is correct.. sometimes it is a bit off due to rounding
df *= self.energy_demand.xs([cfg.electricity_energy_type_id, year], level=['final_energy', 'year']).sum().sum() / df.xs(2, level='timeshift_type').sum().sum()
df = df.rename(columns={'value':year})
return df
def electricity_energy_slice(self, year, subsector_slice):
if len(subsector_slice):
if not hasattr(self, 'ele_energy_helper'):
indexer = util.level_specific_indexer(self.outputs.d_energy, levels=['year', 'final_energy'], elements=[[year], [cfg.electricity_energy_type_id]])
self.ele_energy_helper = self.outputs.d_energy.loc[indexer].groupby(level=('subsector', 'sector', cfg.primary_geography)).sum()
feeder_allocation = self.feeder_allocation_class.values.xs(year, level='year')
return util.remove_df_levels(util.DfOper.mult((feeder_allocation,
self.ele_energy_helper.loc[subsector_slice].groupby(level=('sector', cfg.primary_geography)).sum())), 'sector')
else:
dispatch_feeders = self.feeder_allocation_class.values.index.get_level_values('dispatch_feeder').unique()
return pd.DataFrame(0, columns=['value'], index=pd.MultiIndex.from_product((cfg.geographies, dispatch_feeders),names=(cfg.primary_geography, 'dispatch_feeder')))
def shape_from_subsectors_with_no_shape(self, year):
""" Final levels that will always return from this function
['gau', 'dispatch_feeder', 'weather_datetime']
"""
subsectors_map = util.defaultdict(list)
shapes_map = {}
for sector in self.sectors.values():
subsectors_map[sector.id if hasattr(sector, 'shape') else None] += sector.get_subsectors_with_no_shape(year)
shapes_map[sector.id] = sector.shape.values.xs(2, level='timeshift_type') if hasattr(sector, 'shape') else None
shapes_map[None] = self.default_electricity_shape.values.xs(2, level='timeshift_type')
df = util.DfOper.add([util.DfOper.mult((self.electricity_energy_slice(year, subsectors_map[id]), shapes_map[id])) for id in subsectors_map])
if hasattr(self, 'ele_energy_helper'):
del self.ele_energy_helper
return df
def create_electricity_reconciliation(self):
logging.info('Creating electricity shape reconciliation')
# weather_year is the year for which we have top down load data
weather_year = int(np.round(np.mean(shape.shapes.active_dates_index.year)))
# the next four lines create the top down load shape in the weather_year
levels_to_keep = [cfg.primary_geography, 'year', 'final_energy']
temp_energy = self.group_output('energy', levels_to_keep=levels_to_keep, specific_years=weather_year)
top_down_energy = util.remove_df_levels(util.df_slice(temp_energy, cfg.electricity_energy_type_id, 'final_energy'), levels='year')
top_down_shape = top_down_energy * util.df_slice(self.default_electricity_shape.values, 2, 'timeshift_type')
# this calls the functions that create the bottom up load shape for the weather_year
bottom_up_shape = self.aggregate_electricity_shapes(weather_year, geomap_to_dispatch_geography=False, reconciliation_step=True)
bottom_up_shape = util.df_slice(bottom_up_shape, 2, 'timeshift_type')
bottom_up_shape = util.remove_df_levels(bottom_up_shape, 'dispatch_feeder')
# at this point we have a top down and bottom up estimates for the load shape across all demand
# to get the reconciliation we divide one by the other
self.electricity_reconciliation = util.DfOper.divi((top_down_shape, bottom_up_shape))
# the final step is to pass the reconciliation result down to subsectors. It becomes a pre-multiplier on all of the subsector shapes
self.pass_electricity_reconciliation()
self.pass_default_shape()
def pass_electricity_reconciliation(self):
""" This function threads the reconciliation factors into sectors and subsectors
it is necessary to do it like this because we need the reconciliation at the lowest level to apply reconciliation before
load shifting.
"""
for sector in self.sectors:
for subsector in self.sectors[sector].subsectors:
self.sectors[sector].subsectors[subsector].set_electricity_reconciliation(self.electricity_reconciliation)
def pass_default_shape(self):
for sector in self.sectors:
self.sectors[sector].set_default_shape(self.default_electricity_shape)
def aggregate_drivers(self):
def remove_na_levels(df):
if df is None:
return None
levels_with_na_only = [name for level, name in zip(df.index.levels, df.index.names) if list(level)==[u'N/A']]
return util.remove_df_levels(df, levels_with_na_only).sort_index()
df_list = []
for driver in self.drivers.values():
df = driver.geo_map(attr='values',current_geography=cfg.disagg_geography, converted_geography=cfg.primary_geography, current_data_type='total', inplace=False)
df['unit'] = driver.unit_base
df.set_index('unit',inplace=True,append=True)
other_indexers = [x for x in df.index.names if x not in [cfg.primary_geography,'year','unit']]
for i,v in enumerate(other_indexers):
if i == 0:
util.replace_index_name(df,"other_index_1",v)
else:
util.replace_index_name(df,"other_index_2",v)
df_list.append(df)
df=util.df_list_concatenate(df_list, keys=[x.id for x in self.drivers.values()],new_names='driver',levels_to_keep=['driver','unit']+cfg.output_demand_levels)
df = remove_na_levels(df) # if a level only as N/A values, we should remove it from the final outputs
self.outputs.d_driver = df
def aggregate_results_evolved(self,specific_years):
def remove_na_levels(df):
if df is None:
return None
levels_with_na_only = [name for level, name in zip(df.index.levels, df.index.names) if list(level)==[u'N/A']]
return util.remove_df_levels(df, levels_with_na_only).sort_index()
output_list = ['service_demand_evolved', 'energy_demand_evolved']
unit_flag = [False, False]
for output_name, include_unit in zip(output_list,unit_flag):
df = self.group_output(output_name, include_unit=include_unit, specific_years=specific_years)
df = remove_na_levels(df) # if a level only as N/A values, we should remove it from the final outputs
setattr(self.outputs,"d_"+ output_name, df)
def aggregate_results(self):
def remove_na_levels(df):
if df is None:
return None
levels_with_na_only = [name for level, name in zip(df.index.levels, df.index.names) if list(level)==[u'N/A']]
return util.remove_df_levels(df, levels_with_na_only).sort_index()
output_list = ['energy', 'stock', 'sales','annual_costs', 'levelized_costs', 'service_demand']
unit_flag = [False, True, False, False, True, True]
for output_name, include_unit in zip(output_list,unit_flag):
print "aggregating %s" %output_name
df = self.group_output(output_name, include_unit=include_unit)
df = remove_na_levels(df) # if a level only as N/A values, we should remove it from the final outputs
setattr(self.outputs,"d_"+ output_name, df)
if cfg.output_tco == 'true':
output_list = ['energy_tco', 'levelized_costs_tco', 'service_demand_tco']
unit_flag = [False, False, False,True]
for output_name, include_unit in zip(output_list,unit_flag):
df = self.group_output_tco(output_name, include_unit=include_unit)
df = remove_na_levels(df) # if a level only as N/A values, we should remove it from the final outputs
setattr(self,"d_"+ output_name, df)
if cfg.output_payback == 'true':
output_list = ['annual_costs','all_energy_demand']
unit_flag = [False,False]
for output_name, include_unit in zip(output_list,unit_flag):
levels_to_keep = copy.deepcopy(cfg.output_demand_levels)
levels_to_keep = list(set(levels_to_keep + ['unit'])) if include_unit else levels_to_keep
levels_to_keep += ['demand_technology','vintage']
levels_to_keep = list(set(levels_to_keep))
df = self.group_output(output_name, levels_to_keep=levels_to_keep, include_unit=include_unit)
df = remove_na_levels(df) # if a level only as N/A values, we should remove it from the final outputs
setattr(self,"d_"+ output_name+"_payback", df)
self.aggregate_drivers()
# this may be redundant with the above code
for sector in self.sectors.values():
sector.aggregate_subsector_energy_for_supply_side()
self.aggregate_sector_energy_for_supply_side()
# we are going to output the shapes for all the demand subsectors for specific years
if cfg.cfgfile.get('demand_output_detail','subsector_electricity_profiles').lower() == 'true':
self.create_electricity_reconciliation()
self.output_subsector_electricity_profiles()
def output_subsector_electricity_profiles(self):
# include_technology = True if cfg.cfgfile.get('demand_output_detail','subsector_profiles_include_technology').lower() == 'true' else False
output_years = [int(dy) for dy in cfg.cfgfile.get('demand_output_detail', 'subsector_profile_years').split(',') if len(dy)]
stack = []
for output_year in output_years:
if output_year not in cfg.supply_years:
continue
profiles_df = self.stack_subsector_electricity_profiles(output_year)
stack.append(profiles_df)
stack = pd.concat(stack)
stack.columns = [cfg.calculation_energy_unit.upper()]
self.outputs.subsector_electricity_profiles = stack
def stack_subsector_electricity_profiles(self, year):
# df_zeros = pd.DataFrame(0, columns=['value'], index=pd.MultiIndex.from_product((cfg.geographies, shape.shapes.active_dates_index), names=[cfg.primary_geography, 'weather_datetime']))
stack = []
index_levels = ['year', cfg.primary_geography, 'dispatch_feeder', 'sector', 'subsector', 'timeshift_type', 'weather_datetime']
for sector in self.sectors.values():
feeder_allocation = self.feeder_allocation_class.values.xs(year, level='year').xs(sector.id, level='sector')
for subsector in sector.subsectors.values():
df = subsector.aggregate_electricity_shapes(year, for_direct_use=True)
if df is None:
continue
# df = df_zeros.copy(deep=True)
df['sector'] = sector.id
df['subsector'] = subsector.id
df['year'] = year
df = df.set_index(['sector', 'subsector', 'year'], append=True).sort()
df = util.DfOper.mult((df, feeder_allocation))
df = df.reorder_levels(index_levels)
stack.append(df)
stack = pd.concat(stack).sort()
stack *= self.energy_demand.xs([cfg.electricity_energy_type_id, year], level=['final_energy', 'year']).sum().sum() / stack.xs(2, level='timeshift_type').sum().sum()
return stack
def link_to_supply(self, embodied_emissions_link, direct_emissions_link, energy_link, cost_link):
logging.info("linking supply emissions to energy demand")
setattr(self.outputs, 'demand_embodied_emissions', self.group_linked_output(embodied_emissions_link))
logging.info("calculating direct demand emissions")
setattr(self.outputs, 'demand_direct_emissions', self.group_linked_output(direct_emissions_link))
logging.info("linking supply costs to energy demand")
setattr(self.outputs, 'demand_embodied_energy_costs', self.group_linked_output(cost_link))
logging.info("linking supply energy to energy demand")
setattr(self.outputs, 'demand_embodied_energy', self.group_linked_output(energy_link))
def link_to_supply_tco(self, embodied_emissions_link, direct_emissions_link,cost_link):
logging.info("linking supply costs to energy demand for tco calculations")
setattr(self.outputs, 'demand_embodied_energy_costs_tco', self.group_linked_output_tco(cost_link))
def link_to_supply_payback(self, embodied_emissions_link, direct_emissions_link,cost_link):
logging.info("linking supply costs to energy demand for payback calculations")
setattr(self.outputs, 'demand_embodied_energy_costs_payback', self.group_linked_output_payback(cost_link))
def group_output(self, output_type, levels_to_keep=None, include_unit=False, specific_years=None):
levels_to_keep = cfg.output_demand_levels if levels_to_keep is None else levels_to_keep
levels_to_keep = list(set(levels_to_keep + ['unit'])) if include_unit else levels_to_keep
dfs = [sector.group_output(output_type, levels_to_keep, include_unit, specific_years) for sector in self.sectors.values()]
if all([df is None for df in dfs]) or not len(dfs):
return None
dfs, keys = zip(*[(df, key) for df, key in zip(dfs, self.sectors.keys()) if df is not None])
new_names = 'sector'
return util.df_list_concatenate(dfs, keys, new_names, levels_to_keep)
def group_output_tco(self, output_type, levels_to_keep=None, include_unit=False, specific_years=None):
levels_to_keep = copy.deepcopy(cfg.output_demand_levels) if levels_to_keep is None else levels_to_keep
levels_to_keep = list(set(levels_to_keep + ['unit'])) if include_unit else levels_to_keep
levels_to_keep += ['demand_technology','vintage']
levels_to_keep = list(set(levels_to_keep))
dfs = [sector.group_output_tco(output_type, levels_to_keep, include_unit, specific_years) for sector in self.sectors.values()]
if all([df is None for df in dfs]) or not len(dfs):
return None
dfs, keys = zip(*[(df, key) for df, key in zip(dfs, self.sectors.keys()) if df is not None])
new_names = 'sector'
return util.df_list_concatenate(dfs, keys, new_names, levels_to_keep)
def group_linked_output(self, supply_link, levels_to_keep=None):
demand_df = self.outputs.d_energy.copy()
if cfg.primary_geography + '_supply' in supply_link:
geo_label = cfg.primary_geography + '_supply'
# direct_emissions_link is not in supply geography
levels_to_keep = cfg.output_combined_levels if levels_to_keep is None else levels_to_keep
levels_to_keep = [x for x in levels_to_keep if x in demand_df.index.names]
demand_df = demand_df.groupby(level=levels_to_keep).sum()
demand_df = demand_df[demand_df.index.get_level_values('year') >= int(cfg.cfgfile.get('case','current_year'))]
geography_df_list = []
for geography in cfg.geographies:
if geography in supply_link.index.get_level_values(geo_label):
supply_indexer = util.level_specific_indexer(supply_link, [geo_label], [geography])
supply_df = supply_link.loc[supply_indexer, :]
geography_df = util.DfOper.mult([demand_df, supply_df])
geography_df_list.append(geography_df)
df = pd.concat(geography_df_list)
else:
geo_label = cfg.primary_geography
levels_to_keep = cfg.output_combined_levels if levels_to_keep is None else levels_to_keep
levels_to_keep = [x for x in levels_to_keep if x in demand_df.index.names]
demand_df = demand_df.groupby(level=levels_to_keep).sum()
demand_df = demand_df[demand_df.index.get_level_values('year') >= int(cfg.cfgfile.get('case','current_year'))]
geography_df_list = []
for geography in cfg.geographies:
if geography in supply_link.index.get_level_values(geo_label):
supply_indexer = util.level_specific_indexer(supply_link, [geo_label], [geography])
demand_indexer = util.level_specific_indexer(demand_df, [geo_label], [geography])
supply_df = supply_link.loc[supply_indexer, :]
geography_df = util.DfOper.mult([demand_df.loc[demand_indexer, :], supply_df])
geography_df_list.append(geography_df)
df = pd.concat(geography_df_list)
return df
def group_linked_output_tco(self, supply_link, levels_to_keep=None):
demand_df = self.d_energy_tco.copy()
supply_link = supply_link.groupby(level=[cfg.primary_geography,'year', 'final_energy', 'sector']).sum()
geo_label = cfg.primary_geography
demand_df = demand_df[demand_df.index.get_level_values('year') >= int(cfg.cfgfile.get('case','current_year'))]
geography_df_list = []
for geography in cfg.geographies:
if geography in supply_link.index.get_level_values(geo_label):
supply_indexer = util.level_specific_indexer(supply_link, [geo_label], [geography])
demand_indexer = util.level_specific_indexer(demand_df, [geo_label], [geography])
supply_df = supply_link.loc[supply_indexer, :]
geography_df = util.DfOper.mult([demand_df.loc[demand_indexer, :], supply_df])
geography_df = util.remove_df_levels(geography_df,['year','final_energy'])
geography_df_list.append(geography_df)
df = pd.concat(geography_df_list)
return df
def group_linked_output_payback(self, supply_link, levels_to_keep=None):
demand_df = self.d_all_energy_demand_payback.copy()
supply_link = supply_link.groupby(level=[cfg.primary_geography,'year', 'final_energy', 'sector']).sum()
geo_label = cfg.primary_geography
demand_df = demand_df[demand_df.index.get_level_values('year') >= int(cfg.cfgfile.get('case','current_year'))]
geography_df_list = []
for geography in cfg.geographies:
if geography in supply_link.index.get_level_values(geo_label):
supply_indexer = util.level_specific_indexer(supply_link, [geo_label], [geography])
demand_indexer = util.level_specific_indexer(demand_df, [geo_label], [geography])
supply_df = supply_link.loc[supply_indexer, :]
geography_df = util.DfOper.mult([demand_df.loc[demand_indexer, :], supply_df])
geography_df = util.remove_df_levels(geography_df,['final_energy'])
geography_df_list.append(geography_df)
df = pd.concat(geography_df_list)
return df
def aggregate_sector_energy_for_supply_side(self):
"""Aggregates for the supply side, works with function in sector"""
names = ['sector', cfg.primary_geography, 'final_energy', 'year']
sectors_aggregates = [sector.aggregate_subsector_energy_for_supply_side() for sector in self.sectors.values()]
self.energy_demand = pd.concat([s for s in sectors_aggregates if s is not None], keys=self.sectors.keys(), names=names)
class Sector(object):
def __init__(self, id, drivers, scenario):
self.drivers = drivers
self.scenario = scenario
self.id = id
self.subsectors = {}
for col, att in util.object_att_from_table('DemandSectors', id):
setattr(self, col, att)
self.outputs = Output()
if self.shape_id is not None:
self.shape = shape.shapes.data[self.shape_id]
self.subsector_ids = util.sql_read_table('DemandSubsectors', column_names='id', sector_id=self.id, is_active=True, return_iterable=True)
self.stock_subsector_ids = util.sql_read_table('DemandStock', 'subsector_id', return_iterable=True)
self.service_demand_subsector_ids = util.sql_read_table('DemandServiceDemands', 'subsector_id', return_iterable=True)
self.energy_demand_subsector_ids = util.sql_read_table('DemandEnergyDemands', 'subsector_id', return_iterable=True)
self.service_efficiency_ids = util.sql_read_table('DemandServiceEfficiency', 'subsector_id', return_iterable=True)
feeder_allocation_class = dispatch_classes.DispatchFeederAllocation(1)
# FIXME: This next line will fail if we don't have a feeder allocation for each demand_sector
self.feeder_allocation = util.df_slice(feeder_allocation_class.values, id, 'demand_sector')
self.electricity_reconciliation = None
self.workingdir = cfg.workingdir
self.cfgfile_name = cfg.cfgfile_name
self.log_name = cfg.log_name
self.service_precursors = defaultdict(dict)
self.stock_precursors = defaultdict(dict)
self.subsector_precursors = defaultdict(list)
self.subsector_precursers_reversed = defaultdict(list)
def add_subsectors(self):
for id in self.subsector_ids:
self.add_subsector(id)
# # populate_subsector_data, this is a separate step so we can use multiprocessing
# if cfg.cfgfile.get('case','parallel_process').lower() == 'true':
# subsectors = helper_multiprocess.safe_pool(helper_multiprocess.subsector_populate, self.subsectors.values())
# self.subsectors = dict(zip(self.subsectors.keys(), subsectors))
# else:
# TODO: when we added shapes to technologies, we can no longer do this in parallel process because we don't have access to shapes
# we will need to add technologies in a separate prior step so that shapes are in the namespace
for id in self.subsector_ids:
self.subsectors[id].add_energy_system_data()
self.make_precursor_dict()
self.make_precursors_reversed_dict()
def add_subsector(self, id):
stock = True if id in self.stock_subsector_ids else False
service_demand = True if id in self.service_demand_subsector_ids else False
energy_demand = True if id in self.energy_demand_subsector_ids else False
service_efficiency = True if id in self.service_efficiency_ids else False
self.subsectors[id] = Subsector(id, self.drivers, stock, service_demand, energy_demand, service_efficiency, self.scenario)
def make_precursor_dict(self):
"""
determines calculation order based on subsector precursors for service demand drivers or specified stocks
example:
clothes washing can be a service demand precursor for water heating, and so must be solved first. This puts water heating as the
key in a dictionary with clothes washing as a value. Function also records the link in the service_precursors dictionary within the same loop.
"""
#service links
for subsector in self.subsectors.values():
for service_link in subsector.service_links.values():
self.subsector_precursors[service_link.linked_subsector_id].append(subsector.id)
# technology links
subsectors_with_techs = [subsector for subsector in self.subsectors.values() if hasattr(subsector, 'technologies')]
for subsector in subsectors_with_techs:
for demand_technology in subsector.technologies.values():
linked_tech_id = demand_technology.linked_id
for lookup_subsector in subsectors_with_techs:
if linked_tech_id in lookup_subsector.technologies.keys():
self.subsector_precursors[lookup_subsector.id].append(subsector.id)
def make_precursors_reversed_dict(self):
# revisit this -- there should be a more elegant way to reverse a dictionary
for subsector_id, precursor_ids in self.subsector_precursors.items():
for precursor_id in precursor_ids:
if subsector_id not in self.subsector_precursers_reversed[precursor_id]:
self.subsector_precursers_reversed[precursor_id].append(subsector_id)
def reset_subsector_for_perdubation(self, subsector_id):
self.add_subsector(subsector_id)
logging.info('resetting subsector {}'.format(self.subsectors[subsector_id].name))
if subsector_id in self.subsector_precursers_reversed:
for dependent_subsector_id in self.subsector_precursers_reversed[subsector_id]:
self.reset_subsector_for_perdubation(dependent_subsector_id)
def add_energy_system_data_after_reset(self, subsector_id):
if hasattr(self.subsectors[subsector_id], 'energy_system_data_has_been_added') and not self.subsectors[subsector_id].energy_system_data_has_been_added:
self.subsectors[subsector_id].add_energy_system_data()
self.subsectors[subsector_id].energy_system_data_has_been_added = True
if subsector_id in self.subsector_precursers_reversed:
for dependent_subsector_id in self.subsector_precursers_reversed[subsector_id]:
self.add_energy_system_data_after_reset(dependent_subsector_id)
def manage_calculations(self):
"""
loops through subsectors making sure to calculate subsector precursors
before calculating subsectors themselves
"""
precursors = set(util.flatten_list(self.subsector_precursors.values()))
self.calculate_precursors(precursors)
# TODO: seems like this next step could be shortened, but it changes the answer when it is removed altogether
self.update_links(precursors)
if cfg.cfgfile.get('case','parallel_process').lower() == 'true':
subsectors = helper_multiprocess.safe_pool(helper_multiprocess.subsector_calculate, self.subsectors.values())
self.subsectors = dict(zip(self.subsectors.keys(), subsectors))
else:
for subsector in self.subsectors.values():
if not subsector.calculated:
subsector.calculate()
def calculate_precursors(self, precursors):
"""
calculates subsector if all precursors have been calculated
"""
for id in precursors:
precursor = self.subsectors[id]
if precursor.calculated:
continue
# if the precursor itself has precursors, those must be done first
if self.subsector_precursors.has_key(precursor.id):
self.calculate_precursors(self.subsector_precursors[precursor.id])
precursor.linked_service_demand_drivers = self.service_precursors[precursor.id]
precursor.linked_stock = self.stock_precursors[precursor.id]
precursor.calculate()
#because other subsectors depend on "precursor", we add it's outputs to a dictionary
for service_link in precursor.service_links.values():
self.service_precursors[service_link.linked_subsector_id].update({precursor.id: precursor.output_service_drivers[service_link.linked_subsector_id]})
for dependent_id in self.subsector_precursers_reversed[id]:
dependent = self.subsectors[dependent_id]
if not hasattr(dependent, 'technologies'):
continue
for demand_technology in precursor.output_demand_technology_stocks.keys():
if demand_technology in dependent.technologies.keys():
# updates stock_precursor values dictionary with linked demand_technology stocks
self.stock_precursors[dependent_id].update({demand_technology: precursor.output_demand_technology_stocks[demand_technology]})
def update_links(self, precursors):
for subsector_id in self.subsectors:
subsector = self.subsectors[subsector_id]
for precursor_id in precursors:
precursor = self.subsectors[precursor_id]
if precursor.output_service_drivers.has_key(subsector.id):
self.service_precursors[subsector.id].update({precursor.id: precursor.output_service_drivers[subsector.id]})
for demand_technology in precursor.output_demand_technology_stocks.keys():
if hasattr(subsector,'technologies') and demand_technology in subsector.technologies.keys():
# updates stock_precursor values dictionary with linked demand_technology stocks
self.stock_precursors[subsector.id].update({demand_technology: precursor.output_demand_technology_stocks[demand_technology]})
subsector.linked_service_demand_drivers = self.service_precursors[subsector.id]
subsector.linked_stock = self.stock_precursors[subsector.id]
def aggregate_subsector_energy_for_supply_side(self):
"""Aggregates for the supply side, works with function in demand"""
levels_to_keep = [cfg.primary_geography, 'final_energy', 'year']
return util.DfOper.add([pd.DataFrame(subsector.energy_forecast.value.groupby(level=levels_to_keep).sum()).sort_index() for subsector in self.subsectors.values() if hasattr(subsector, 'energy_forecast')])
def group_output(self, output_type, levels_to_keep=None, include_unit=False, specific_years=None):
levels_to_keep = cfg.output_demand_levels if levels_to_keep is None else levels_to_keep
levels_to_keep = list(set(levels_to_keep + ['unit'])) if include_unit else levels_to_keep
dfs = [subsector.group_output(output_type, levels_to_keep, specific_years) for subsector in self.subsectors.values()]
if all([df is None for df in dfs]) or not len(dfs):
return None
dfs, keys = zip(*[(df, key) for df, key in zip(dfs, self.subsectors.keys()) if df is not None])
new_names = 'subsector'
return util.df_list_concatenate(dfs, keys, new_names, levels_to_keep)
def group_output_tco(self, output_type, levels_to_keep=None, include_unit=False, specific_years=None):
dfs = [subsector.group_output_tco(output_type, levels_to_keep, specific_years) for subsector in self.subsectors.values()]
if all([df is None for df in dfs]) or not len(dfs):
return None
dfs, keys = zip(*[(df, key) for df, key in zip(dfs, self.subsectors.keys()) if df is not None])
new_names = 'subsector'
return util.df_list_concatenate(dfs, keys, new_names, levels_to_keep)
def get_subsectors_with_no_shape(self, year):
""" Returns only subsectors that have electricity consumption
"""
return [sub.id for sub in self.subsectors.values() if (sub.has_electricity_consumption(year) and not sub.has_shape() and not sub.has_flexible_load(year))]
def aggregate_inflexible_electricity_shape(self, year):
""" Final levels that will always return from this function
['gau', 'dispatch_feeder', 'weather_datetime']
"""
subsectors_with_shape_only = [sub.id for sub in self.subsectors.values() if (sub.has_electricity_consumption(year) and sub.has_shape() and not sub.has_flexible_load(year))]
return self.aggregate_electricity_shape(year, ids=subsectors_with_shape_only) if len(subsectors_with_shape_only) else None
def aggregate_flexible_electricity_shape(self, year):
""" Final levels that will always return from this function
['timeshift_type', 'gau', 'dispatch_feeder', 'weather_datetime']
"""
subsectors_with_flex = [sub.id for sub in self.subsectors.values() if (sub.has_electricity_consumption(year) and sub.has_flexible_load(year))]
if len(subsectors_with_flex):
return self.aggregate_electricity_shape(year, ids=subsectors_with_flex).reorder_levels(['timeshift_type', cfg.primary_geography, 'dispatch_feeder', 'weather_datetime'])
else:
return None
def aggregate_electricity_shape(self, year, ids=None):
# we make this expandable because sometimes it has dispatch feeder
agg_shape = util.DfOper.add([self.subsectors[id].aggregate_electricity_shapes(year) for id in (self.subsectors.keys() if ids is None else ids)], expandable=True, collapsible=False)
return util.DfOper.mult((self.feeder_allocation.xs(year, level='year'), agg_shape))
def set_default_shape(self, default_shape):
if self.shape_id is None:
self.default_shape = default_shape
active_shape = default_shape
else:
active_shape = self.shape
for subsector in self.subsectors:
self.subsectors[subsector].set_default_shape(active_shape, self.max_lead_hours, self.max_lag_hours)
class Subsector(DataMapFunctions):
def __init__(self, id, drivers, stock, service_demand, energy_demand, service_efficiency, scenario):
self.id = id
self.drivers = drivers
self.workingdir = cfg.workingdir
self.cfgfile_name = cfg.cfgfile_name
self.log_name = cfg.log_name
# boolean check on data availability to determine calculation steps
self.has_stock = stock
self.has_service_demand = service_demand
self.has_energy_demand = energy_demand
self.has_service_efficiency = service_efficiency
self.scenario = scenario
for col, att in util.object_att_from_table('DemandSubsectors', id):
setattr(self, col, att)
self.outputs = Output()
self.calculated = False
self.shape = shape.shapes.data[self.shape_id] if self.shape_id is not None else None
self.shapes_weather_year = int(np.round(np.mean(shape.shapes.active_dates_index.year)))
self.electricity_reconciliation = None
self.default_shape = None
self.default_max_lead_hours = None
self.default_max_lag_hours = None
self.linked_service_demand_drivers = {}
self.linked_stock = {}
self.perturbation = None
self.energy_system_data_has_been_added = False
def set_electricity_reconciliation(self, electricity_reconciliation):
self.electricity_reconciliation = electricity_reconciliation
def set_default_shape(self, default_shape, default_max_lead_hours, default_max_lag_hours):
self.default_shape = default_shape if self.shape_id is None else None
self.default_max_lead_hours = default_max_lead_hours if hasattr(self, 'max_lead_hours') else None
self.default_max_lag_hours = default_max_lag_hours if hasattr(self, 'max_lag_hours') else None
def has_shape(self):
if (self.shape_id is not None) or (hasattr(self, 'technologies') and np.any([tech.shape is not None for tech in self.technologies.values()])):
return True
else:
return False
def has_flexible_load(self, year):
return True if (hasattr(self, 'flexible_load_measure') and self.flexible_load_measure.values.xs(year, level='year').sum().sum()>0) else False
def has_electricity_consumption(self, year):
if hasattr(self,'energy_forecast') and \
cfg.electricity_energy_type_id in util.get_elements_from_level(self.energy_forecast, 'final_energy') and \
self.energy_forecast.xs([cfg.electricity_energy_type_id, year], level=['final_energy', 'year']).sum().sum() > 0:
return True
else:
return False
def get_electricity_consumption(self, year, keep_technology=True):
""" Returns with primary geography level
"""
if self.has_electricity_consumption(year):
group_level = [cfg.primary_geography, 'demand_technology'] if (keep_technology and hasattr(self, 'technologies')) else cfg.primary_geography
return self.energy_forecast.xs([cfg.electricity_energy_type_id, year], level=['final_energy', 'year']).groupby(level=group_level).sum()
else:
return None
def aggr_elect_shapes_unique_techs_with_flex_load(self, unique_tech_ids, active_shape, active_hours, year, energy_slice, annual_flexible):
unique_tech_ids_with_energy = self._filter_techs_without_energy(unique_tech_ids, energy_slice)
result = []
# we have something unique about each of these technologies which means we have to calculate each separately
for tech_id in unique_tech_ids_with_energy:
tech_energy_slice = util.df_slice(energy_slice, tech_id, 'demand_technology', reset_index=True)
shape_values = self.technologies[tech_id].get_shape(default_shape=active_shape)
percent_flexible = annual_flexible.xs(tech_id, level='demand_technology') if 'demand_technology' in annual_flexible.index.names else annual_flexible
tech_max_lag_hours = self.technologies[tech_id].get_max_lag_hours() or active_hours['lag']
tech_max_lead_hours = self.technologies[tech_id].get_max_lead_hours() or active_hours['lead']
flex = self.return_shape_after_flex_load(shape_values, percent_flexible, tech_max_lag_hours, tech_max_lead_hours)
result.append(util.DfOper.mult((flex, tech_energy_slice)))
return util.DfOper.add(result, collapsible=False)
def aggr_elect_shapes_techs_not_unique(self, techs_with_energy_and_shapes, active_shape, energy_slice):
tech_shapes = pd.concat([self.technologies[tech].get_shape(default_shape=active_shape) for tech in techs_with_energy_and_shapes],
keys=techs_with_energy_and_shapes, names=['demand_technology'])
energy_with_shapes = util.df_slice(energy_slice, techs_with_energy_and_shapes, 'demand_technology',
drop_level=False, reset_index=True)
return util.remove_df_levels(util.DfOper.mult((energy_with_shapes, tech_shapes)), levels='demand_technology')
def return_shape_after_flex_load(self, shape_values, percent_flexible, max_lag_hours, max_lead_hours):
timeshift_levels = sorted(list(util.get_elements_from_level(shape_values, 'timeshift_type')))
# using electricity reconciliation with a profile with a timeshift type can cause big problems, so it is avoided
shape_df = shape_values if timeshift_levels == [1, 2, 3] else util.DfOper.mult((shape_values, self.electricity_reconciliation))
flex = shape.Shape.produce_flexible_load(shape_df, percent_flexible=percent_flexible, hr_delay=max_lag_hours, hr_advance=max_lead_hours)
return flex
def _filter_techs_without_energy(self, candidate_techs, energy_slice):
if not 'demand_technology' in energy_slice.index.names:
return []
techs_with_energy = sorted(energy_slice[energy_slice['value'] != 0].index.get_level_values('demand_technology').unique())
return [tech_id for tech_id in candidate_techs if tech_id in techs_with_energy]
def aggregate_electricity_shapes(self, year, for_direct_use=False):
""" Final levels that will always return from this function
['gau', 'weather_datetime'] or ['timeshift_type', 'gau', 'weather_datetime']
if for_direct_use, we are outputing the shape directly to csv
"""
energy_slice = self.get_electricity_consumption(year)
# if we have no electricity consumption, we don't make a shape
if energy_slice is None or len(energy_slice[energy_slice['value'] != 0])==0:
return None
# to speed this up, we are removing anything that has zero energy
energy_slice = energy_slice[energy_slice['value'] != 0]
active_shape = self.shape if self.shape is not None else self.default_shape
active_hours = {}
active_hours['lag'] = self.max_lag_hours if self.max_lag_hours is not None else self.default_max_lag_hours
active_hours['lead'] = self.max_lead_hours if self.max_lead_hours is not None else self.default_max_lead_hours
has_flexible_load = True if hasattr(self, 'flexible_load_measure') and \
self.flexible_load_measure.values.xs(year,level='year').sum().sum() > 0 else False
flexible_load_tech_index = True if has_flexible_load and 'demand_technology' in self.flexible_load_measure.values.index.names else False
percent_flexible = self.flexible_load_measure.values.xs(year, level='year') if has_flexible_load else None
tech_load, unique_tech_load, unique_tech_ids = None, None, []
if hasattr(self, 'technologies'):
if has_flexible_load:
# sometimes a tech will have a different lead or lag than the subsector, which means we need to treat this tech separately
unique_tech_ids = set([tech for tech in self.technologies if
(self.technologies[tech].get_max_lead_hours() and self.technologies[tech].get_max_lead_hours()!=active_hours['lead']) or
(self.technologies[tech].get_max_lag_hours() and self.technologies[tech].get_max_lag_hours()!=active_hours['lag'])])
# if we have a flexible load measure, sometimes techs will be called out specifically
if 'demand_technology' in self.flexible_load_measure.values.index.names:
unique_tech_ids = unique_tech_ids | set(self.flexible_load_measure.values.index.get_level_values('demand_technology'))
unique_tech_ids = self._filter_techs_without_energy(sorted(unique_tech_ids), energy_slice)
if unique_tech_ids:
unique_tech_load = self.aggr_elect_shapes_unique_techs_with_flex_load(unique_tech_ids, active_shape, active_hours, year, energy_slice, percent_flexible)
else:
unique_tech_load = None
# other times, we just have a tech with a unique shape. Note if we've already dealt with it in unique tech ids, we can skip this
# these are techs that we need to treat specially because they will have their own shape
not_unique_tech_ids = [tech.id for tech in self.technologies.values() if tech.shape and (tech.id not in unique_tech_ids)]
not_unique_tech_ids = self._filter_techs_without_energy(not_unique_tech_ids, energy_slice)
if not_unique_tech_ids:
# at this point we haven't yet done flexible load
tech_load = self.aggr_elect_shapes_techs_not_unique(not_unique_tech_ids, active_shape, energy_slice)
accounted_for_techs = sorted(set(unique_tech_ids) | set(not_unique_tech_ids))
# remove the energy from the techs we've already accounted for
remaining_energy = util.remove_df_levels(util.remove_df_elements(energy_slice, accounted_for_techs, 'demand_technology'), 'demand_technology')
else:
# we haven't done anything yet, so the remaining energy is just the starting energy_slice
remaining_energy = util.remove_df_levels(energy_slice, 'demand_technology')
if has_flexible_load:
# this is a special case where we've actually already accounted for all the parts that are flexible, we just need to add in the other parts and return it
if flexible_load_tech_index:
remaining_shape = util.DfOper.mult((active_shape.values.xs(2, level='timeshift_type'), remaining_energy))if remaining_energy.sum().sum()>0 else None
tech_load = tech_load.xs(2, level='timeshift_type') if tech_load is not None else tech_load
remaining_shape = util.DfOper.add((remaining_shape, tech_load), collapsible=False)
# we need to add in the electricity reconciliation because we have flexible load for the other parts and it's expected by the function calling this one
if remaining_shape is not None:
remaining_shape = util.DfOper.mult((remaining_shape, self.electricity_reconciliation), collapsible=False)
remaining_shape = pd.concat([remaining_shape] * 3, keys=[1, 2, 3], names=['timeshift_type'])
return_shape = util.DfOper.add((unique_tech_load, remaining_shape), collapsible=False)
else:
# here we have flexible load, but it is not indexed by technology
remaining_shape = util.DfOper.mult((active_shape.values, remaining_energy), collapsible=False)
remaining_shape = util.DfOper.add((remaining_shape, tech_load), collapsible=False)
return_shape = self.return_shape_after_flex_load(remaining_shape, percent_flexible, active_hours['lag'], active_hours['lead'])
else:
# if we don't have flexible load, we don't introduce electricity reconcilliation because that is done at a more aggregate level
remaining_shape = util.DfOper.mult((active_shape.values.xs(2, level='timeshift_type'), remaining_energy))
return_shape = (tech_load.xs(2, level='timeshift_type') + remaining_shape) if tech_load is not None else remaining_shape
if for_direct_use:
return_shape = util.DfOper.mult((return_shape, self.electricity_reconciliation), collapsible=False)
return_shape = pd.concat([return_shape] * 3, keys=[1, 2, 3], names=['timeshift_type'])
if for_direct_use:
# doing this will make the energy for the subsector match, but it won't exactly match the system shape used in the dispatch
correction_factors = util.remove_df_levels(energy_slice, 'demand_technology') / return_shape.xs(2, level='timeshift_type').groupby(level=cfg.primary_geography).sum()
return_shape = util.DfOper.mult((return_shape, correction_factors))
# if self.id == 15:
# pdb.set_trace()
try:
print self.id, self.name, (self.get_electricity_consumption(year).groupby(level=cfg.primary_geography).sum() - return_shape.groupby(level=cfg.primary_geography).sum())
except:
pdb.set_trace()
return return_shape
def add_energy_system_data(self):
"""
populates energy system based on available data and determines
subsector type
"""
logging.info(' '+self.name)
if self.has_stock is True and self.has_service_demand is True:
self.service_demand = SubDemand(self.id, sql_id_table='DemandServiceDemands', sql_data_table='DemandServiceDemandsData', scenario=self.scenario, drivers=self.drivers)
self.add_stock()
if self.stock.demand_stock_unit_type == 'equipment' or self.stock.demand_stock_unit_type == 'capacity factor':
self.add_technologies(self.service_demand.unit, self.stock.time_unit)
else:
self.add_technologies(self.stock.unit, self.stock.time_unit)
self.sub_type = 'stock and service'
elif self.has_stock is True and self.has_energy_demand is True:
self.energy_demand = SubDemand(self.id, sql_id_table='DemandEnergyDemands', sql_data_table='DemandEnergyDemandsData', scenario=self.scenario, drivers=self.drivers)
self.add_stock()
if self.stock.demand_stock_unit_type == 'equipment':
# service demand unit is equal to the energy demand unit for equipment stocks
# where no additional service demand information is given in the form of stock units
self.add_technologies(self.energy_demand.unit, self.stock.time_unit)
else:
# service demand unit is equal to the stock unit for stock input types
# of capacity factor and service demand
# Ex. if a stock unit was 1000 cubic feet per minute, we know that the service demand is
# cubic feet
self.add_technologies(self.stock.unit, self.stock.time_unit)
self.sub_type = 'stock and energy'
elif self.has_service_demand is True and self.has_service_efficiency is True:
self.service_demand = SubDemand(self.id, sql_id_table='DemandServiceDemands', sql_data_table='DemandServiceDemandsData', scenario=self.scenario, drivers=self.drivers)
self.service_efficiency = ServiceEfficiency(self.id, self.service_demand.unit, self.scenario)
self.sub_type = 'service and efficiency'
elif self.has_service_demand is True and self.has_energy_demand is True:
self.service_demand = SubDemand(self.id, sql_id_table='DemandServiceDemands', sql_data_table='DemandServiceDemandsData', scenario=self.scenario, drivers=self.drivers)
self.energy_demand = SubDemand(self.id, sql_id_table='DemandEnergyDemands', sql_data_table='DemandEnergyDemandsData',scenario=self.scenario, drivers=self.drivers)
self.sub_type = 'service and energy'
elif self.has_energy_demand is True:
self.energy_demand = SubDemand(self.id, sql_id_table='DemandEnergyDemands', sql_data_table='DemandEnergyDemandsData', scenario=self.scenario,drivers=self.drivers)
self.sub_type = 'energy'
elif self.has_stock is True:
self.sub_type = 'link'
self.add_stock()
if self.stock.demand_stock_unit_type == 'equipment':
# service demand unit is equal to the energy demand unit for equipment stocks
# where no additional service demand information is given in the form of stock units
self.add_technologies(None, None)
else:
raise ValueError("A subsector that has no service demand must have its stock input as equipment")
else:
raise ValueError("User has not input enough data in subsector %s" %self.name)
self.add_service_links()
self.calculate_years()
self.add_measures()
def add_measures(self):
""" add measures to subsector based on scenario inputs """
self.add_service_demand_measures(self.scenario)
self.add_energy_efficiency_measures(self.scenario)
self.add_fuel_switching_measures(self.scenario)
self.add_stock_measures(self.scenario)
self.add_flexible_load_measures(self.scenario)
def add_stock_measures(self, scenario):
""" add specified stock and sales measures to model if the subsector
is populated with stock data """
if self.has_stock:
for tech in self.technologies:
self.technologies[tech].add_specified_stock_measures()
self.technologies[tech].add_sales_share_measures()
def calculate(self):
logging.info(" calculating" + " " + self.name)
logging.debug(' '+'calculating measures')
self.calculate_measures()
logging.debug(' '+'adding linked inputs')
self.add_linked_inputs(self.linked_service_demand_drivers, self.linked_stock)
logging.debug(' '+'forecasting energy drivers')
self.project()
logging.debug(' '+'calculating subsector energy demand')
self.calculate_energy()
self.project_measure_stocks()
self.calculated = True
logging.debug(' '+'calculating costs')
self.calculate_costs()
logging.debug(' '+'processing outputs')
self.remove_extra_subsector_attributes()
def add_linked_inputs(self, linked_service_demand_drivers, linked_stock):
""" adds linked inputs to subsector """
self.linked_service_demand_drivers = linked_service_demand_drivers
self.linked_stock = linked_stock
# TODO: why do we do this override of interpolation and extrapolation methods? (from Ryan)
self.interpolation_method = 'linear_interpolation'
self.extrapolation_method = 'linear_interpolation'
def group_output(self, output_type, levels_to_keep=None, specific_years=None):
levels_to_keep = cfg.output_demand_levels if levels_to_keep is None else levels_to_keep
if output_type=='energy':
# a subsector type link would be something like building shell, which does not have an energy demand
if self.sub_type != 'link':
return_array = self.energy_forecast
else:
return None
elif output_type=='stock':
return_array = self.format_output_stock(levels_to_keep)
elif output_type=='sales':
return_array = self.format_output_sales(levels_to_keep)
elif output_type=='annual_costs':
return_array = self.format_output_costs('annual_costs', levels_to_keep)
elif output_type=='levelized_costs':
return_array = self.format_output_costs('levelized_costs', levels_to_keep)
elif output_type=='service_demand':
return_array = self.format_output_service_demand(levels_to_keep)
elif output_type == 'service_demand_evolved':
return_array = self.format_output_service_demand_evolved(levels_to_keep)
elif output_type == 'energy_demand_evolved':
return_array = self.format_output_energy_demand_evolved(levels_to_keep)
elif output_type == 'all_energy_demand':
if self.sub_type != 'link':
return_array = self.format_output_energy_demand_payback()
else:
return None
if return_array is not None:
return util.df_slice(return_array, specific_years, 'year', drop_level=False) if specific_years else return_array
else:
return None
def group_output_tco(self, output_type, levels_to_keep=None, specific_years=None):
index = pd.MultiIndex.from_product([self.vintages,self.years],names=['vintage','year'])
data = np.array(index.get_level_values('year') - index.get_level_values('vintage'))
data[data<0] = 0
data = (1-self.cost_of_capital)**data
npv = pd.DataFrame(data=data,index=index,columns=['value'])
if output_type=='levelized_costs_tco':
return_array = self.format_output_costs_tco('levelized_costs', npv, levels_to_keep)
elif output_type=='service_demand_tco':
return_array = self.format_output_service_demand_tco(levels_to_keep,npv)
elif output_type=='energy_tco':
# a subsector type link would be something like building shell, which does not have an energy demand
if self.sub_type != 'link':
return_array = self.format_output_energy_demand_tco(npv)
else:
return None
return util.df_slice(return_array, specific_years, 'year', drop_level=False) if specific_years else return_array
def format_output_energy_demand_tco(self,npv):
if hasattr(self,'energy_forecast_no_modifier'):
return util.DfOper.mult([self.energy_forecast_no_modifier,npv])
else:
return None
def format_output_energy_demand_evolved(self,levels_to_keep):
if hasattr(self,'energy_forecast_no_modifier'):
df = self.energy_forecast_no_modifier
levels_to_keep = cfg.output_demand_levels + ['demand_technology']
if 'vintage' in levels_to_keep:
levels_to_keep.remove('vintage')
levels_to_eliminate = [l for l in df.index.names if l not in levels_to_keep]
df = util.remove_df_levels(df,levels_to_eliminate).sort_index()
return df
else:
return None
def format_output_energy_demand_payback(self):
if hasattr(self,'energy_forecast_no_modifier'):
return self.energy_forecast_no_modifier
else:
return None
def format_output_service_demand(self, override_levels_to_keep):
if not hasattr(self, 'service_demand'):
return None
if hasattr(self.service_demand, 'modifier') and cfg.cfgfile.get('case', 'use_service_demand_modifiers').lower()=='true':
df = util.DfOper.mult([util.remove_df_elements(self.service_demand.modifier, 9999, 'final_energy'),self.stock.values_efficiency_normal]).groupby(level=self.service_demand.values.index.names).transform(lambda x: x/x.sum())
df = util.DfOper.mult([df,self.service_demand.values])
original_other_indexers = [x for x in self.service_demand.values.index.names if x not in [cfg.primary_geography,'year']]
for i,v in enumerate(original_other_indexers):
if i == 0:
util.replace_index_name(df,"other_index_1",v)
else:
util.replace_index_name(df,"other_index_2",v)
elif hasattr(self, 'stock'):
df = util.DfOper.mult([self.stock.values_efficiency_normal,self.service_demand.values])
original_other_indexers = [x for x in self.service_demand.values.index.names if x not in [cfg.primary_geography,'year']]
for i,v in enumerate(original_other_indexers):
if i == 0:
util.replace_index_name(df,"other_index_1",v)
else:
util.replace_index_name(df,"other_index_2",v)
else:
df = copy.deepcopy(self.service_demand.values)
original_other_indexers = [x for x in self.service_demand.values.index.names if x not in [cfg.primary_geography,'year']]
for i,v in enumerate(original_other_indexers):
if i == 0:
util.replace_index_name(df,"other_index_1",v)
else:
util.replace_index_name(df,"other_index_2",v)
levels_to_keep = cfg.output_demand_levels if override_levels_to_keep is None else override_levels_to_keep
levels_to_eliminate = [l for l in df.index.names if l not in levels_to_keep]
df = util.remove_df_levels(df,levels_to_eliminate).sort_index()
if len(df.columns)>1:df = df.stack().to_frame()
util.replace_column_name(df,'value')
util.replace_index_name(df, 'year')
df = util.add_and_set_index(df, 'unit', self.service_demand.unit.upper(), index_location=-2)
df.columns = ['value']
return df
def format_output_service_demand_tco(self, override_levels_to_keep,npv):
if not hasattr(self, 'service_demand'):
return None
if hasattr(self.service_demand,'modifier'):
df = self.stock.values.groupby(level=[x for x in self.stock.values.index.names if x not in ['demand_technology', 'vintage']]).transform(lambda x: x/x.sum())
df = util.DfOper.mult([df,self.service_demand.values])
else:
return None
levels_to_keep = cfg.output_demand_levels + ['demand_technology','vintage']
levels_to_eliminate = [l for l in df.index.names if l not in levels_to_keep]
df = util.remove_df_levels(df,levels_to_eliminate).sort_index()
df = df.stack().to_frame()
util.replace_column_name(df,'value')
util.replace_index_name(df, 'year')
df = util.add_and_set_index(df, 'unit', self.service_demand.unit.upper(), index_location=-2)
df.columns = ['value']
df = util.DfOper.mult([df,npv])
df = util.remove_df_levels(df,'year')
return df
def format_output_service_demand_evolved(self, override_levels_to_keep):
if not hasattr(self, 'service_demand'):
return None
if hasattr(self.service_demand,'modifier'):
df = self.stock.values.groupby(level=[x for x in self.stock.values.index.names if x in self.service_demand.values.index.names]).transform(lambda x: x/x.sum())
df = util.DfOper.mult([df,self.service_demand.values])
else:
return None
levels_to_keep = cfg.output_demand_levels + ['demand_technology']
if 'vintage' in levels_to_keep:
levels_to_keep.remove('vintage')
levels_to_eliminate = [l for l in df.index.names if l not in levels_to_keep]
df = util.remove_df_levels(df,levels_to_eliminate).sort_index()
df = df.stack().to_frame()
util.replace_column_name(df,'value')
util.replace_index_name(df, 'year')
df.columns = ['value']
return df
def format_output_sales(self, override_levels_to_keep):
if not hasattr(self, 'stock'):
return None
levels_to_keep = cfg.output_demand_levels if override_levels_to_keep is None else override_levels_to_keep
levels_to_eliminate = [l for l in self.stock.sales.index.names if l not in levels_to_keep]
df = self.stock.sales
other_indexers = [x for x in self.stock.rollover_group_names if x not in [cfg.primary_geography]]
for i,v in enumerate(other_indexers):
if i == 0:
util.replace_index_name(df,"other_index_1",v)
else:
util.replace_index_name(df,"other_index_2",v)
util.replace_index_name(df, 'year','vintage')
df = util.remove_df_levels(df, levels_to_eliminate).sort_index()
df = util.add_and_set_index(df, 'unit', self.stock.unit.upper(), -2)
return df
def format_output_stock(self, override_levels_to_keep=None):
if not hasattr(self, 'stock'):
return None
levels_to_keep = cfg.output_demand_levels if override_levels_to_keep is None else override_levels_to_keep
df = copy.deepcopy(self.stock.values)
other_indexers = [x for x in self.stock.rollover_group_names if x not in [cfg.primary_geography]]
for i,v in enumerate(other_indexers):
if i == 0:
util.replace_index_name(df,"other_index_1",v)
else:
util.replace_index_name(df,"other_index_2",v)
levels_to_eleminate = [l for l in df.index.names if l not in levels_to_keep]
df = util.remove_df_levels(df, levels_to_eleminate).sort_index()
# stock starts with vintage as an index and year as a column, but we need to stack it for export
df = df.stack().to_frame()
util.replace_column_name(df,'value')
util.replace_index_name(df, 'year')
index_location = -3 if ('year' in levels_to_keep and 'vintage' in levels_to_keep) else -2
df = util.add_and_set_index(df, 'unit', self.stock.unit.upper(), index_location)
df.columns = ['value']
return df
def format_output_measure_costs(self, att, override_levels_to_keep=None):
measure_types = [x for x in ['ee_stock','fs_stock','sd_stock'] if hasattr(self,x)]
if len(measure_types):
df_list = []
for measure_type in measure_types:
active_type = copy.deepcopy(getattr(self,measure_type))
df = copy.deepcopy(getattr(active_type,att))['unspecified']
if list(df.columns) != ['value']:
df = df.stack().to_frame()
df.columns = ['value']
util.replace_index_name(df, 'year')
if df.sum().values ==0:
continue
df = df[df.values>0]
else:
df = df[df.values>0]
if 'final_energy' in df.index.names:
df = util.remove_df_levels(df, 'final_energy')
if hasattr(active_type,'other_index_1'):
util.replace_index_name(df,"other_index_1",active_type.other_index_1)
if hasattr(active_type,'other_index_2'):
util.replace_index_name(df,"other_index_2",active_type.other_index_2)
df_list.append(df)
if len(df_list):
keys = measure_types
names = ['cost_type']
df = util.df_list_concatenate(df_list,keys=keys,new_names=names,levels_to_keep=override_levels_to_keep)
unit = cfg.cfgfile.get('case','currency_year_id') + " " + cfg.cfgfile.get('case','currency_name')
df.columns = [unit]
return df
else:
return None
else:
return None
def format_output_costs_tco(self,att,npv,override_levels_to_keep=None):
stock_costs = self.format_output_stock_costs(att, override_levels_to_keep)
measure_costs = self.format_output_measure_costs(att, override_levels_to_keep)
cost_list = []
for cost in [stock_costs, measure_costs]:
if cost is not None:
cost_list.append(cost)
if len(cost_list) == 0:
return None
elif len(cost_list) == 1 and stock_costs is not None:
df = cost_list[0]
df['cost_category'] = 'stock'
df.set_index('cost_category', append=True, inplace=True)
return util.remove_df_levels(util.DfOper.mult([df,npv]),'year')
elif len(cost_list) == 1 and measure_costs is not None:
df = cost_list[0]
df['cost_category'] = 'measure'
df.set_index('cost_category', append=True, inplace=True)
return util.remove_df_levels(util.DfOper.mult([df,npv]),'year')
else:
keys = ['stock', 'measure']
names = ['cost_category']
df = util.df_list_concatenate(cost_list,keys=keys,new_names=names)
return util.remove_df_levels(util.DfOper.mult([df,npv]),'year')
def format_output_costs(self,att,override_levels_to_keep=None):
stock_costs = self.format_output_stock_costs(att, override_levels_to_keep)
measure_costs = self.format_output_measure_costs(att, override_levels_to_keep)
cost_list = [c for c in [stock_costs, measure_costs] if c is not None]
if len(cost_list) == 0:
return None
elif len(cost_list) == 1 and stock_costs is not None:
df = cost_list[0]
df['cost_category'] = 'stock'
df.set_index('cost_category', append=True, inplace=True)
return df
elif len(cost_list) == 1 and measure_costs is not None:
df = cost_list[0]
df['cost_category'] = 'measure'
df.set_index('cost_category', append=True, inplace=True)
return df
else:
return util.df_list_concatenate(cost_list,keys=['stock', 'measure'],new_names=['cost_category'])
def format_output_stock_costs(self, att, override_levels_to_keep=None):
"""
Formats cost outputs
"""
if not hasattr(self, 'stock'):
return None
cost_dict = copy.deepcopy(getattr(self.stock, att))
keys, values = zip(*[(a, b) for a, b in util.unpack_dict(cost_dict)])
values = list(values)
for index, value in enumerate(values):
if list(value.columns) != ['value']:
value = value.stack().to_frame()
value.columns = ['value']
util.replace_index_name(value, 'year')
values[index] = value
else:
values[index]=value
if hasattr(self.stock,'other_index_1') and self.stock.other_index_1 != None :
util.replace_index_name(values[index],"other_index_1", self.stock.other_index_1)
if hasattr(self.stock,'other_index_2') and self.stock.other_index_2 != None:
util.replace_index_name(values[index],"other_index_2", self.stock.other_index_2)
values[index] = values[index].groupby(level = [x for x in values[index].index.names if x in override_levels_to_keep]).sum()
values[index]['cost_type'] = keys[index][0].upper()
values[index]['new/replacement'] = keys[index][1].upper()
df = util.df_list_concatenate([x.set_index(['cost_type', 'new/replacement'] ,append=True) for x in values],keys=None, new_names=None)
df.columns = [cfg.output_currency]
return df
def calculate_measures(self):
"""calculates measures for use in subsector calculations """
for measure in self.energy_efficiency_measures.values():
measure.calculate(self.vintages, self.years, cfg.calculation_energy_unit)
for measure in self.service_demand_measures.values():
if hasattr(self,'stock') and self.stock.demand_stock_unit_type == 'service demand' and hasattr(self,'service_demand'):
#service demand will be converted to stock unit, so measure must be converted to stock unit
measure.calculate(self.vintages, self.years, self.stock.unit)
elif hasattr(self,'service_demand'):
measure.calculate(self.vintages, self.years, self.service_demand.unit)
elif hasattr(self,'energy_demand'):
measure.calculate(self.vintages, self.years, self.energy_demand.unit)
else:
raise ValueError("service demand measure has been created for a subsector which doesn't have an energy demand or service demand")
for measure in self.fuel_switching_measures.values():
measure.calculate(self.vintages, self.years, cfg.calculation_energy_unit)
def calculate_energy(self):
""" calculates energy demand for all subsector types"""
logging.debug(' '+'subsector type = '+self.sub_type)
if self.sub_type == 'service and efficiency' or self.sub_type == 'service and energy':
# sets the service demand forecast equal to initial values
# calculates the energy demand change from fuel swiching measures
self.fuel_switching_impact_calc()
# calculates the energy demand change from energy efficiency measures
self.energy_efficiency_savings_calc()
# adds an index level of subsector name as demand_technology to the dataframe for use in aggregated outputs
if hasattr(self.service_demand,'other_index_1'):
util.replace_index_name(self.energy_forecast,"other_index_1",self.service_demand.other_index_1)
if hasattr(self.service_demand,'other_index_2'):
util.replace_index_name(self.energy_forecast,"other_index_2",self.service_demand.other_index_2)
elif self.sub_type == 'energy':
# calculates the energy demand change from fuel swiching measures
self.fuel_switching_impact_calc()
# calculates the energy demand change from fuel swiching measures
self.energy_efficiency_savings_calc()
# adds an index level of subsector name as demand_technology to the dataframe for use in aggregated outputs
if hasattr(self.energy_demand,'other_index_1'):
util.replace_index_name(self.energy_forecast,"other_index_1",self.energy_demand.other_index_1)
if hasattr(self.energy_demand,'other_index_2'):
util.replace_index_name(self.energy_forecast,"other_index_2",self.energy_demand.other_index_2)
elif self.sub_type == 'stock and service' or self.sub_type == 'stock and energy':
# initiates the energy calculation for a subsector with a stock
self.calculate_energy_stock()
# calculates service demand and stock linkages to pass to other subsectors
self.calculate_output_service_drivers()
self.calculate_output_demand_technology_stocks()
elif self.sub_type == 'link':
self.calculate_output_service_drivers()
self.calculate_output_demand_technology_stocks()
def calculate_costs(self):
""" calculates cost outputs for all subsector types """
if hasattr(self,'stock'):
self.calculate_costs_stock()
self.calculate_costs_measures()
def calculate_years(self):
"""determines the analysis period within the subsector based on the minimum year
of all inputs
"""
# self.calculate_driver_min_year()
driver_min_year = 9999
if self.sub_type == 'stock and energy':
stock_min_year = min(
self.stock.raw_values.index.levels[util.position_in_index(self.stock.raw_values, 'year')])
sales_share = util.sql_read_table('DemandSalesData', 'vintage', return_iterable=True, subsector_id=self.id)
if len(sales_share):
sales_share_min_year = min(sales_share)
else:
sales_share_min_year = 9999
energy_min_year = min(self.energy_demand.raw_values.index.levels[
util.position_in_index(self.energy_demand.raw_values, 'year')])
self.min_year = min(int(cfg.cfgfile.get('case', 'current_year')), driver_min_year,
stock_min_year, sales_share_min_year, energy_min_year)
elif self.sub_type == 'stock and service':
stock_min_year = min(
self.stock.raw_values.index.levels[util.position_in_index(self.stock.raw_values, 'year')])
sales_share = util.sql_read_table('DemandSalesData', 'vintage', return_iterable=True, subsector_id=self.id)
if len(sales_share):
sales_share_min_year = min(sales_share)
else:
sales_share_min_year = 9999
service_min_year = min(self.service_demand.raw_values.index.levels[
util.position_in_index(self.service_demand.raw_values, 'year')])
self.min_year = min(int(cfg.cfgfile.get('case', 'current_year')), driver_min_year,
stock_min_year, sales_share_min_year, service_min_year)
elif self.sub_type == 'service and efficiency':
service_min_year = min(self.service_demand.raw_values.index.levels[
util.position_in_index(self.service_demand.raw_values, 'year')])
service_efficiency_min_year = min(self.service_demand.raw_values.index.levels[
util.position_in_index(self.service_demand.raw_values, 'year')])
self.min_year = min(int(cfg.cfgfile.get('case', 'current_year')), driver_min_year, service_min_year,
service_efficiency_min_year)
elif self.sub_type == 'service and energy':
service_min_year = min(self.service_demand.raw_values.index.levels[
util.position_in_index(self.service_demand.raw_values, 'year')])
energy_min_year = min(self.energy_demand.raw_values.index.levels[
util.position_in_index(self.energy_demand.raw_values, 'year')])
self.min_year = min(int(cfg.cfgfile.get('case', 'current_year')), driver_min_year, service_min_year,
energy_min_year)
elif self.sub_type == 'energy':
energy_min_year = min(self.energy_demand.raw_values.index.levels[
util.position_in_index(self.energy_demand.raw_values, 'year')])
self.min_year = min(int(cfg.cfgfile.get('case', 'current_year')), driver_min_year, energy_min_year)
elif self.sub_type == 'link':
stock_min_year = min(
self.stock.raw_values.index.levels[util.position_in_index(self.stock.raw_values, 'year')])
sales_share = util.sql_read_table('DemandSalesData', 'vintage', return_iterable=True, subsector_id=self.id)
if len(sales_share):
sales_share_min_year = min(sales_share)
else:
sales_share_min_year = 9999
self.min_year = min(int(cfg.cfgfile.get('case', 'current_year')), driver_min_year,
stock_min_year, sales_share_min_year)
self.min_year = max(self.min_year, int(cfg.cfgfile.get('case', 'demand_start_year')))
self.min_year = min(self.shapes_weather_year, self.min_year)
self.min_year = int(int(cfg.cfgfile.get('case', 'year_step'))*round(float(self.min_year)/int(cfg.cfgfile.get('case', 'year_step'))))
self.years = range(self.min_year, int(cfg.cfgfile.get('case', 'end_year')) + 1,
int(cfg.cfgfile.get('case', 'year_step')))
self.vintages = self.years
def calculate_driver_min_year(self):
"""
calculates the minimum input years of all subsector drivers
"""
min_years = []
if self.has_stock:
for driver in self.stock.drivers.values():
min_years.append(min(driver.raw_values.index.levels[util.position_in_index(driver.raw_values, 'year')]))
if self.has_energy_demand:
for driver in self.energy_demand.drivers.values():
min_years.append(min(driver.raw_values.index.levels[util.position_in_index(driver.raw_values, 'year')]))
if self.has_service_demand:
for driver in self.service_demand.drivers.values():
min_years.append(min(driver.raw_values.index.levels[util.position_in_index(driver.raw_values, 'year')]))
if len(min_years):
self.driver_min_year = min(min_years)
else:
self.driver_min_year = 9999
def calculate_technologies(self):
"""
inititates calculation of all demand_technology attributes - costs, efficiency, etc.
"""
if hasattr(self, 'technologies'):
tech_classes = ['capital_cost_new', 'capital_cost_replacement', 'installation_cost_new',
'installation_cost_replacement', 'fixed_om', 'fuel_switch_cost', 'efficiency_main',
'efficiency_aux']
# if all the tests are True, it gets deleted
tests = defaultdict(list)
for tech in self.technologies.keys():
if tech in util.sql_read_table('DemandTechs', 'linked_id'):
tests[tech].append(False)
if self.technologies[tech].reference_sales_shares.has_key(1):
tests[tech].append(self.technologies[tech].reference_sales_shares[1].raw_values.sum().sum() == 0)
if self.perturbation is not None and self.perturbation.involves_tech_id(tech):
tests[tech].append(False)
tests[tech].append(len(self.technologies[tech].sales_shares) == 0)
tests[tech].append(len(self.technologies[tech].specified_stocks) == 0)
# Do we have a specified stock in the inputs for this tech?
if 'demand_technology' in self.stock.raw_values.index.names and tech in util.elements_in_index_level(self.stock.raw_values, 'demand_technology'):
tests[tech].append(self.stock.raw_values.groupby(level='demand_technology').sum().loc[tech].value==0)
if hasattr(self,'energy_demand'):
if 'demand_technology' in self.energy_demand.raw_values.index.names and tech in util.elements_in_index_level(self.energy_demand.raw_values, 'demand_technology'):
tests[tech].append(self.energy_demand.raw_values.groupby(level='demand_technology').sum().loc[tech].value==0)
if hasattr(self,'service_demand'):
if 'demand_technology' in self.service_demand.raw_values.index.names and tech in util.elements_in_index_level(self.service_demand.raw_values, 'demand_technology'):
tests[tech].append(self.service_demand.raw_values.groupby(level='demand_technology').sum().loc[tech].value==0)
for tech_class in tech_classes:
if hasattr(getattr(self.technologies[tech], tech_class), 'reference_tech_id') and getattr(getattr(self.technologies[tech], tech_class), 'reference_tech_id') is not None:
tests[getattr(getattr(self.technologies[tech], tech_class), 'reference_tech_id')].append(False)
for tech in self.technologies.keys():
if cfg.evolved_run == 'false':
if all(tests[tech]):
self.tech_ids.remove(tech)
del self.technologies[tech]
else:
self.technologies[tech].calculate([self.vintages[0] - 1] + self.vintages, self.years)
else:
self.technologies[tech].calculate([self.vintages[0] - 1] + self.vintages, self.years)
self.remap_tech_attrs(tech_classes)
def add_energy_efficiency_measures(self, scenario):
"""
add all energy efficiency measures for this subsector to a dictionary
"""
measure_ids = scenario.get_measures('DemandEnergyEfficiencyMeasures', self.id)
self.energy_efficiency_measures = {id: EnergyEfficiencyMeasure(id, self.cost_of_capital) for id in measure_ids}
def energy_efficiency_measure_savings(self):
"""
calculates measure savings for measures input as intensity
based on the energy forecast it is being applied to i.e. 10% of x = y
"""
for measure in self.energy_efficiency_measures.values():
if measure.input_type == 'intensity':
measure.savings = DfOper.mult([measure.values, self.energy_forecast])
else:
measure.remap(map_from='values', map_to='savings', drivers=self.energy_forecast, driver_geography=cfg.primary_geography)
def energy_efficiency_savings_calc(self):
"""
sums and reconciles energy efficiency savings with total available energy to be saved
"""
# create an empty df
self.energy_efficiency_measure_savings()
self.initial_energy_efficiency_savings = util.empty_df(self.energy_forecast.index,
self.energy_forecast.columns.values)
# add up each measure's savings to return total savings
for id in self.energy_efficiency_measures:
measure = self.energy_efficiency_measures[id]
self.initial_energy_efficiency_savings = DfOper.add([self.initial_energy_efficiency_savings,
measure.savings])
# check for savings in excess of demand
excess_savings = DfOper.subt([self.energy_forecast, self.initial_energy_efficiency_savings]) * -1
excess_savings[excess_savings < 0] = 0
# if any savings in excess of demand, adjust all measure savings down
if excess_savings.sum()['value'] == 0:
self.energy_efficiency_savings = self.initial_energy_efficiency_savings
else:
self.energy_efficiency_savings = DfOper.subt([self.initial_energy_efficiency_savings,
excess_savings])
impact_adjustment = self.energy_efficiency_savings / self.initial_energy_efficiency_savings
for measure in self.energy_efficiency_measures.values():
measure.savings = DfOper.mult([measure.savings, impact_adjustment])
self.energy_forecast = DfOper.subt([self.energy_forecast, self.energy_efficiency_savings])
def add_service_demand_measures(self, scenario):
"""
add all service demand measures for this subsector to a dictionary
"""
measure_ids = scenario.get_measures('DemandServiceDemandMeasures', self.id)
self.service_demand_measures = {id: ServiceDemandMeasure(id, self.cost_of_capital) for id in measure_ids}
def service_demand_measure_savings(self):
"""
calculates measure savings based on the service demand forecast it is being applied to
i.e. 10% of x = y
"""
for id in self.service_demand_measures:
measure = self.service_demand_measures[id]
if measure.input_type == 'intensity':
measure.savings = DfOper.mult([measure.values,self.service_demand.values])
else:
measure.remap(map_from='values', map_to='savings', drivers=self.service_demand.values, driver_geography=cfg.primary_geography)
def service_demand_savings_calc(self):
"""
sums and reconciles service demand savings with total available service demand to be saved
"""
# create an empty df
self.service_demand_measure_savings()
self.initial_service_demand_savings = util.empty_df(self.service_demand.values.index,
self.service_demand.values.columns.values)
# add up each measure's savings to return total savings
for id in self.service_demand_measures:
measure = self.service_demand_measures[id]
self.initial_service_demand_savings = DfOper.add([self.initial_service_demand_savings,
measure.savings])
# check for savings in excess of demand
excess_savings = DfOper.subt([self.service_demand.values, self.initial_service_demand_savings]) * -1
excess_savings[excess_savings < 0] = 0
# if any savings in excess of demand, adjust all measure savings down
if excess_savings.sum()['value'] == 0:
self.service_demand_savings = self.initial_service_demand_savings
else:
self.service_demand_savings = DfOper.subt([self.initial_service_demand_savings, excess_savings])
impact_adjustment = self.service_demand_savings / self.initial_service_demand_savings
for id in self.service_demand_measures:
measure = self.service_demand_measures[id]
measure.savings = DfOper.mult([measure.savings, impact_adjustment])
self.service_demand.values = DfOper.subt([self.service_demand.values,
self.service_demand_savings])
def add_flexible_load_measures(self, scenario):
"""
load this subsector's flexible load measure, if it has one
"""
measure_ids = scenario.get_measures('DemandFlexibleLoadMeasures', self.id)
if measure_ids:
assert len(measure_ids) == 1, "Found more than one flexible load measure for subsector {}".format(self.id,)
self.flexible_load_measure = FlexibleLoadMeasure(measure_ids[0])
if 'demand_technology' in self.flexible_load_measure.values.index.names:
techs_with_specific_flexible_load = sorted(self.flexible_load_measure.values.index.get_level_values('demand_technology').unique())
if techs_with_specific_flexible_load:
assert hasattr(self,'technologies'), "subsector {} cannot have a technology specific flexible load measure if it has no technologies".format(self.name)
if self.perturbation and self.perturbation.flexible_operation:
assert len(measure_ids) == 0, 'perturbations in flexible load when a flexible load measure already exists is not supported yet'
self.flexible_load_measure = FlexibleLoadMeasure2(self.perturbation)
def add_fuel_switching_measures(self, scenario):
"""
add all fuel switching measures for this subsector to a dictionary
"""
measure_ids = scenario.get_measures('DemandFuelSwitchingMeasures', self.id)
self.fuel_switching_measures = {id: FuelSwitchingMeasure(id, self.cost_of_capital) for id in measure_ids}
def fuel_switching_measure_impacts(self):
"""
calculates measure impact in energy terms for measures defined as intensity. For measures defined
as totals, the measure is remapped to the energy forecast.
"""
for measure in self.fuel_switching_measures.values():
indexer = util.level_specific_indexer(self.energy_forecast, 'final_energy', measure.final_energy_from_id)
if measure.impact.input_type == 'intensity':
measure.impact.savings = DfOper.mult([measure.impact.values,
self.energy_forecast.loc[indexer, :]])
else:
measure.impact.remap(map_from='values', map_to='savings', drivers=self.energy_forecast.loc[indexer, :],driver_geography=cfg.primary_geography)
measure.impact.additions = DfOper.mult([measure.impact.savings, measure.energy_intensity.values])
util.replace_index_label(measure.impact.additions,
{measure.final_energy_from_id: measure.final_energy_to_id},
level_name='final_energy')
def fuel_switching_impact_calc(self):
"""
sums and reconciles fuel switching impacts with total available energy to be saved
"""
# create an empty df
self.fuel_switching_measure_impacts()
self.initial_fuel_switching_savings = util.empty_df(self.energy_forecast.index,
self.energy_forecast.columns.values)
self.fuel_switching_additions = util.empty_df(self.energy_forecast.index, self.energy_forecast.columns.values)
# add up each measure's savings to return total savings
for measure in self.fuel_switching_measures.values():
self.initial_fuel_switching_savings = DfOper.add([self.initial_fuel_switching_savings,
measure.impact.savings])
self.fuel_switching_additions = DfOper.add([self.fuel_switching_additions,
measure.impact.additions])
# check for savings in excess of demand
fs_savings = DfOper.subt([self.energy_forecast, self.initial_fuel_switching_savings])
excess_savings = DfOper.add([self.fuel_switching_additions, fs_savings]) * -1
excess_savings[excess_savings < 0] = 0
# if any savings in excess of demand, adjust all measure savings down
if excess_savings.sum()['value'] == 0:
self.fuel_switching_savings = self.initial_fuel_switching_savings
else:
self.fuel_switching_savings = DfOper.subt([self.initial_fuel_switching_savings, excess_savings])
impact_adjustment = self.fuel_switching_savings / self.initial_fuel_switching_savings
for measure in self.fuel_switching_measures.values():
measure.impact.savings = DfOper.mult([measure.impact.savings, impact_adjustment])
self.energy_forecast = DfOper.subt([self.energy_forecast, self.fuel_switching_savings])
self.energy_forecast = DfOper.add([self.energy_forecast, self.fuel_switching_additions])
def add_service_links(self):
""" loops through service demand links and adds service demand link instance to subsector"""
self.service_links = {}
link_ids = util.sql_read_table('DemandServiceLink', 'id', subsector_id=self.id)
if link_ids is not None:
for link_id in util.ensure_iterable_and_not_string(link_ids):
self.add_service_link(link_id)
def add_service_link(self, link_id):
"""add service demand link object to subsector"""
if link_id in self.service_links:
# ToDo note that a service link by the same name was added twice
return
self.service_links[link_id] = ServiceLink(link_id)
def add_stock(self):
"""add stock instance to subsector"""
self.stock = DemandStock(id=self.id, drivers=self.drivers, scenario=self.scenario)
def add_technologies(self, service_demand_unit, stock_time_unit):
"""loops through subsector technologies and adds demand_technology instances to subsector"""
self.technologies = {}
ids = util.sql_read_table("DemandTechs",column_names='id',subsector_id=self.id, return_iterable=True)
for id in ids:
try:
self.add_demand_technology(id, self.id, service_demand_unit, stock_time_unit, self.cost_of_capital, self.scenario)
except:
pdb.set_trace()
if self.perturbation is not None:
self.add_new_technology_for_perturbation()
self.tech_ids = self.technologies.keys()
self.tech_ids.sort()
def add_new_technology_for_perturbation(self):
"""
By adding a new technology specific to a perturbation, it allows us to isolate a single vintage
"""
# here we don't want to change the technology name if the sales share is zero... flexible load case
for tech_id, new_tech_id in self.perturbation.new_techs.items():
self.technologies[new_tech_id] = copy.deepcopy(self.technologies[tech_id])
self.technologies[new_tech_id].id = new_tech_id #should be safe to replace the id at this point
self.technologies[new_tech_id].reference_sales_shares = {} # empty the reference sales share
# we also need to add the technology to the config outputs
cfg.outputs_id_map['demand_technology'][new_tech_id] = "{} {}".format(str(new_tech_id)[:4], cfg.outputs_id_map['demand_technology'][tech_id])
def add_demand_technology(self, id, subsector_id, service_demand_unit, stock_time_unit, cost_of_capital, scenario, **kwargs):
"""Adds demand_technology instances to subsector"""
if id in self.technologies:
# ToDo note that a demand_technology was added twice
return
self.technologies[id] = DemandTechnology(id, subsector_id, service_demand_unit, stock_time_unit, cost_of_capital, scenario=scenario, **kwargs)
def remap_tech_attrs(self, attr_classes, attr='values'):
"""
loops through attr_classes (ex. capital_cost, energy, etc.) in order to map technologies
that reference other technologies in their inputs (i.e. demand_technology A is 150% of the capital cost demand_technology B)
"""
attr_classes = util.ensure_iterable_and_not_string(attr_classes)
for demand_technology in self.technologies.keys():
for attr_class in attr_classes:
# It is possible that recursion has converted before we get to an
# attr_class in the list. If so, continue.
# if getattr(getattr(self.technologies[demand_technology], attr_class), 'absolute'):
# continue
try:
self.remap_tech_attr(demand_technology, attr_class, attr)
except:
pdb.set_trace()
def remap_tech_attr(self, demand_technology, class_name, attr):
"""
map reference demand_technology values to their associated demand_technology classes
"""
tech_class = getattr(self.technologies[demand_technology], class_name)
if hasattr(tech_class, 'reference_tech_id') and hasattr(tech_class,'definition') and tech_class.definition == 'relative':
if getattr(tech_class, 'reference_tech_id'):
ref_tech_id = (getattr(tech_class, 'reference_tech_id'))
ref_tech_class = getattr(self.technologies[ref_tech_id], class_name)
# converted is an indicator of whether an input is an absolute
# or has already been converted to an absolute
if not ref_tech_class.definition=='absolute':
# If a technnology hasn't been mapped, recursion is used
# to map it first (this can go multiple layers)
self.remap_tech_attr(getattr(tech_class, 'reference_tech_id'), class_name, attr)
if tech_class.raw_values is not None:
tech_data = getattr(tech_class, attr)
flipped = getattr(ref_tech_class, 'flipped') if hasattr(ref_tech_class, 'flipped') else False
if flipped is True:
tech_data = 1 / tech_data
# not all our techs have reference tech_operation which indicates how to do the math
if hasattr(tech_class, 'reference_tech_operation') and tech_class.reference_tech_operation == 'add':
# ADD instead of multiply
new_data = util.DfOper.add([tech_data, getattr(ref_tech_class, attr)])
else:
new_data = util.DfOper.mult([tech_data, getattr(ref_tech_class, attr)])
if hasattr(ref_tech_class, 'values_level'):
if hasattr(tech_class, 'reference_tech_operation') and tech_class.reference_tech_operation == 'add':
tech_data = getattr(tech_class, 'values_level')
new_data_level = util.DfOper.add([tech_data, getattr(ref_tech_class, 'values_level')])
else:
new_data_level = util.DfOper.mult([tech_data, getattr(ref_tech_class, 'values_level')])
else:
new_data = copy.deepcopy(getattr(ref_tech_class, attr))
if hasattr(ref_tech_class,'values_level'):
new_data_level = copy.deepcopy(getattr(ref_tech_class, 'values_level'))
tech_attributes = vars(getattr(self.technologies[ref_tech_id], class_name))
for attribute_name in tech_attributes.keys():
if not hasattr(tech_class, attribute_name) or getattr(tech_class, attribute_name) is None:
setattr(tech_class, attribute_name,
copy.deepcopy(getattr(ref_tech_class, attribute_name)) if hasattr(ref_tech_class,
attribute_name) else None)
setattr(tech_class, attr, new_data)
if hasattr(ref_tech_class,'values_level'):
setattr(tech_class,'values_level',new_data_level)
tech_class.definition == 'absolute'
delattr(tech_class,'reference_tech_id')
else:
# Now that it has been converted, set indicator to tru
if hasattr(tech_class,'definition'):
tech_class.definition == 'absolute'
def project_measure_stocks(self):
""" projects the 'stock' of measures for use in cost calculation """
if self.sub_type == 'service and efficiency' or self.sub_type == 'service and energy':
self.project_sd_measure_stock()
self.project_ee_measure_stock()
self.project_fs_measure_stock()
elif self.sub_type == 'energy':
self.project_ee_measure_stock()
self.project_fs_measure_stock()
else:
self.project_sd_measure_stock()
def project(self):
""""projects subsector data based on subsector types"""
if self.sub_type == 'service and efficiency':
self.project_service_demand()
self.service_efficiency.calculate(self.vintages, self.years)
self.energy_forecast = DfOper.mult([self.service_demand.values, self.service_efficiency.values])
elif self.sub_type == 'service and energy':
self.project_service_demand(service_dependent=True)
self.project_energy_demand(service_dependent=True)
self.energy_demand.values = util.unit_convert(self.energy_demand.values,
unit_from_num=self.energy_demand.unit,
unit_to_num=cfg.calculation_energy_unit)
self.energy_forecast = self.energy_demand.values
elif self.sub_type == 'energy':
self.project_energy_demand()
self.energy_demand.values = util.unit_convert(self.energy_demand.values,
unit_from_num=self.energy_demand.unit,
unit_to_num=cfg.calculation_energy_unit)
self.energy_forecast = self.energy_demand.values
elif self.sub_type == 'link':
self.calculate_technologies()
self.project_stock()
elif self.sub_type == 'stock and service' or self.sub_type == 'stock and energy':
self.calculate_technologies()
# determine what levels the service demand forecast has in order to determine
# what levels to calculate the service demand modifier on i.e. by demand_technology
# or by final energy
self.determine_service_subset()
if self.stock.demand_stock_unit_type == 'equipment':
if self.sub_type == 'stock and energy':
# determine the year range of energy demand inputs
self.min_year = self.min_cal_year(self.energy_demand)
self.max_year = self.max_cal_year(self.energy_demand)
# project the stock and prepare a subset for use in calculating
# the efficiency of the stock during the years in which we have
# energy demand inputs
self.project_stock(stock_dependent=self.energy_demand.is_stock_dependent)
self.stock_subset_prep()
self.energy_demand.project(map_from='raw_values', fill_timeseries=False)
# divide by the efficiency of the stock to return service demand values
self.efficiency_removal()
elif self.sub_type == 'stock and service':
# determine the year range of service demand inputs
self.min_year = self.min_cal_year(self.service_demand)
self.max_year = self.max_cal_year(self.service_demand)
elif self.stock.demand_stock_unit_type == 'capacity factor':
if self.sub_type == 'stock and service':
# determine the year range of service demand inputs
self.min_year = self.min_cal_year(self.service_demand)
self.max_year = self.max_cal_year(self.service_demand)
# project the service demand
self.service_demand.project(map_from='raw_values', fill_timeseries=False)
# service demand is projected, so change map from to values
self.service_demand.map_from = 'values'
# change the service demand to a per stock_time_unit service demand
# ex. kBtu/year to kBtu/hour average service demand
time_step_service = util.unit_convert(self.service_demand.values, unit_from_num=self.service_demand.unit,unit_to_num=self.stock.unit, unit_from_den='year',
unit_to_den=self.stock.time_unit)
# divide by capacity factor stock inputs to get a service demand stock
# ex. kBtu/hour/capacity factor equals kBtu/hour stock
self.stock.remap(map_from='raw_values', map_to='int_values', fill_timeseries=True)
_, x = util.difference_in_df_names(time_step_service, self.stock.int_values)
if x:
raise ValueError('service demand must have the same index levels as stock when stock is specified in capacity factor terms')
else:
self.stock.int_values = DfOper.divi([time_step_service, self.stock.int_values],expandable=(False,True),collapsible=(True,False))
# change map_from to int_values, which is an intermediate value, not yet final
self.stock.map_from = 'int_values'
# stock is by definition service demand dependent
self.service_demand.int_values = self.service_demand.values
self.stock.is_service_demand_dependent = 1
self.service_subset = None
else:
# used when we don't have service demand, just energy demand
# determine the year range of energy demand inputs
self.min_year = self.min_cal_year(self.energy_demand)
self.max_year = self.max_cal_year(self.energy_demand)
self.energy_demand.project(map_from='raw_values', fill_timeseries=False)
self.energy_demand.map_from = 'values'
# change the energy demand to a per stock_time_unit energy demand
# ex. kBtu/year to kBtu/hour average service demand
time_step_energy = util.unit_convert(self.energy_demand.values, unit_from_den='year',unit_to_den=self.stock.time_unit)
# divide by capacity factor stock inputs to get a service demand stock
# ex. kBtu/hour/capacity factor equals kBtu/hour stock
self.stock.remap(map_from='raw_values', map_to='int_values')
_, x = util.difference_in_df_names(time_step_energy, self.stock.int_values)
if x:
raise ValueError(
'energy demand must have the same index levels as stock when stock is specified in capacity factor terms')
else:
self.stock.int_values = DfOper.divi([time_step_energy, self.stock.int_values],expandable=(False,True),collapsible=(True,False))
# project energy demand stock
self.stock.map_from = 'int_values'
self.stock.projected_input_type = 'total'
self.project_stock(map_from=self.stock.map_from,stock_dependent = self.energy_demand.is_stock_dependent,reference_run=True)
self.stock.projected = False
self.stock_subset_prep()
# remove stock efficiency from energy demand to return service demand
self.efficiency_removal()
# change the service demand to a per stock_time_unit service demand
# ex. kBtu/year to kBtu/hour average service demand
time_step_service = util.unit_convert(self.service_demand.int_values, unit_from_den='year', unit_to_den=self.stock.time_unit)
# divide by capacity factor stock inputs to get a service demand stock
# ex. kBtu/hour/capacity factor equals kBtu/hour stock
_, x = util.difference_in_df_names(time_step_service, self.stock.int_values)
if x:
raise ValueError('service demand must have the same index levels as stock when stock is specified in capacity factor terms')
else:
self.stock.remap(map_from='raw_values', map_to='int_values')
self.stock.int_values = util.DfOper.divi([time_step_service, self.stock.int_values],expandable=(False,True),collapsible=(True,False))
# stock is by definition service demand dependent
self.stock.is_service_demand_dependent = 1
self.service_demand.int_values = self.service_demand.int_values.groupby(level=self.stock.rollover_group_names+['year']).sum()
self.service_subset = None
elif self.stock.demand_stock_unit_type == 'service demand':
if self.sub_type == 'stock and service':
# convert service demand units to stock units
self.service_demand.int_values = util.unit_convert(self.service_demand.raw_values, unit_from_num=self.service_demand.unit, unit_to_num=self.stock.unit)
self.service_demand.remap(map_from='int_values',map_to='int_values',fill_timeseries=False)
self.service_demand.unit = self.stock.unit
self.service_demand.current_data_type = self.service_demand.input_type
self.service_demand.projected = False
self.service_demand.map_from = 'int_values'
self.min_year = self.min_cal_year(self.service_demand)
self.max_year = self.max_cal_year(self.service_demand)
self.project_stock(stock_dependent=self.service_demand.is_stock_dependent)
self.stock_subset_prep()
if self.sub_type == 'stock and energy':
# used when we don't have service demand, just energy demand
# determine the year range of energy demand inputs
self.min_year = self.min_cal_year(self.energy_demand)
self.max_year = self.max_cal_year(self.energy_demand)
self.energy_demand.project(map_from='raw_values', fill_timeseries=False)
self.energy_demand.map_from = 'values'
self.project_stock(stock_dependent = self.energy_demand.is_stock_dependent)
self.stock_subset_prep()
# remove stock efficiency from energy demand to- return service demand
self.efficiency_removal()
else:
raise ValueError("incorrect demand stock unit type specification")
# some previous steps have required some manipulation of initial raw values and the starting point
# for projection will be a different 'map_from' variable
self.stock.map_from = self.stock.map_from if hasattr(self.stock, 'map_from') else 'raw_values'
# service demand has not been projected by default, so starting point, by default, is 'raw values
self.service_demand.map_from = self.service_demand.map_from if hasattr(self.service_demand,
'map_from') else 'raw_values'
if self.stock.is_service_demand_dependent == 0 and self.service_demand.is_stock_dependent == 0:
self.project_stock(map_from=self.stock.map_from)
self.project_service_demand(map_from=self.service_demand.map_from)
self.sd_modifier_full()
elif self.stock.is_service_demand_dependent == 1 and self.service_demand.is_stock_dependent == 0:
self.project_service_demand(map_from=self.service_demand.map_from,service_dependent=True)
self.project_stock(map_from=self.stock.map_from, service_dependent=True)
self.sd_modifier_full()
elif self.stock.is_service_demand_dependent == 0 and self.service_demand.is_stock_dependent == 1:
self.project_stock(map_from=self.stock.map_from,stock_dependent=True)
self.project_service_demand(map_from=self.service_demand.map_from, stock_dependent=True)
self.sd_modifier_full()
else:
raise ValueError(
"stock and service demands both specified as dependent on each other in subsector %s" % self.id)
levels = [level for level in self.stock.rollover_group_names if level in self.service_demand.values.index.names] + ['year']
self.service_demand.values = self.service_demand.values.groupby(
level=levels).sum()
def determine_service_subset(self):
if self.has_service_demand is not False:
attr = 'service_demand'
else:
attr = 'energy_demand'
demand = getattr(self, attr)
index_names = demand.raw_values.index.names
if 'final_energy' in index_names and 'demand_technology' in index_names:
# TODO review
self.service_subset = 'final_energy and demand_technology'
elif 'final_energy' in index_names and 'demand_technology' not in index_names:
self.service_subset = 'final_energy'
elif 'final_energy' not in index_names and 'demand_technology' in index_names:
self.service_subset = 'demand_technology'
else:
self.service_subset = None
def stock_subset_prep(self):
self.stock.tech_subset_normal = self.stack_and_reduce_years(self.stock.values_normal.groupby(level=util.ix_excl(self.stock.values_normal, 'vintage')).sum(), self.min_year, self.max_year)
self.stock.tech_subset_normal.fillna(0)
if self.service_subset == 'final_energy':
if not hasattr(self.stock, 'values_efficiency_normal'):
# subsectors with technologies having dual fuel capability do not generally calculate this. Only do so for this specific case.
self.stock.values_efficiency = pd.concat([self.stock.values_efficiency_main, self.stock.values_efficiency_aux])
self.stock.values_efficiency_normal = self.stock.values_efficiency.groupby(level=util.ix_excl(self.stock.values_efficiency, ['final_energy', 'demand_technology', 'vintage'])).transform(lambda x: x / x.sum()).fillna(0)
self.stock.energy_subset_normal = self.stack_and_reduce_years(self.stock.values_efficiency_normal.groupby(
level=util.ix_excl(self.stock.values_efficiency_normal, ['vintage', 'demand_technology'])).sum(), self.min_year, self.max_year)
def efficiency_removal(self):
if self.service_subset is None:
# base the efficiency conversion on the total stock
self.convert_energy_to_service('all')
self.service_demand.map_from = 'int_values'
elif self.service_subset == 'demand_technology':
# base the efficiency conversion on individual demand_technology efficiencies
# if service demand is in demand_technology terms.
self.convert_energy_to_service('demand_technology')
self.service_demand.map_from = 'int_values'
elif self.service_subset == 'final_energy':
# base the efficiency conversion on efficiencies of equipment with certain final energy types if service
# if service demand is in final energy terms
self.convert_energy_to_service('final_energy')
self.service_demand.map_from = 'int_values'
else:
# if service demand is input in demand_technology and energy terms, reduce over
# the final energy level because it may contradict input utility factors.
# Then use demand_technology efficiencies to convert to service demand
self.energy_demand.int_values = self.energy_demand.raw_values.groupby(level=util.ix_excl(self.energy_demand.raw_values, ['final_energy'])).sum()
self.convert_energy_to_service('demand_technology')
self.service_demand.int_values = DfOper.mult([self.service_demand.int_values,
self.stock.tech_subset])
self.service_demand.map_from = 'int_values'
def sd_modifier_full(self):
"""
calculates the service demand modifier (relation of the share of the service demand
a demand_technology satisfies vs. the percentage of stock it represents) for subsectors if input service or energy demand
has a demand_technology or final energy index. Once an initial calculation is made, demand_technology specific service demand modifiers
can be applied. sd_modifiers are multiplied by the normalized stock and then must normalize to 1 in every year (i.e. the amount of
service demand that a stock satisfies in any year must equal the service demand projection)
Tech - A : 50% of Stock
Tech - B : 50% of Stock
Tech - A: 25% of Service Demand
Tech - B: 75% of Service Demand
Tech - A: sd_modifier = .5
Tech - B: sd_modifier = 1.5
service demand = .5 * .5 + 1.5 * .5 = 100%
"""
self.stock_subset_prep()
df_for_indexing = util.empty_df(index=self.stock.values_efficiency.index, columns=self.stock.values.columns.values, fill_value=1)
sd_subset = getattr(self.service_demand, 'values')
sd_subset = util.df_slice(self.service_demand.values,np.arange(self.min_year,self.max_year+1,1),'year',drop_level=False)
if self.service_subset is None:
# if there is no service subset, initial service demand modifiers equal 1"
sd_modifier = df_for_indexing
elif self.service_subset == 'demand_technology':
# calculate share of service demand by demand_technology
sd_subset_normal = sd_subset.groupby(level=util.ix_excl(sd_subset, ['demand_technology'])).transform(lambda x: x / x.sum())
# calculate service demand modifier by dividing the share of service demand by the share of stock
sd_modifier = DfOper.divi([sd_subset_normal, self.stock.tech_subset_normal])
# expand the dataframe to put years as columns
sd_modifier = self.vintage_year_array_expand(sd_modifier, df_for_indexing, sd_subset)
sd_modifier.fillna(1, inplace=True)
sd_modifier.sort_index()
# replace any 0's with 1 since zeros indicates no stock in that subset, not a service demand modifier of 0
sd_modifier[sd_modifier == 0] = 1
elif self.service_subset == 'final_energy':
# calculate share of service demand by final energy
sd_subset = sd_subset.groupby(level=util.ix_excl(sd_subset, ['demand_technology'])).sum()
sd_subset_normal = sd_subset.groupby(level=util.ix_excl(sd_subset, ['final_energy'])).transform(
lambda x: x / x.sum())
# calculate service demand modifier by dividing the share of service demand by the share of stock
sd_modifier = DfOper.divi([sd_subset_normal, self.stock.energy_subset_normal])
# expand the dataframe to put years as columns
sd_modifier = self.vintage_year_array_expand(sd_modifier, df_for_indexing, sd_subset)
sd_modifier.fillna(1, inplace=True)
# replace any 0's with 1 since zeros indicates no stock in that subset, not a service demand modifier of 0
sd_modifier[sd_modifier == 0] = 1
else:
# group over final energy since this is overspecified with demand_technology utility factors if indices of both
# demand_technology and energy are present
sd_subset = sd_subset.groupby(level=util.ix_excl(sd_subset, 'final_energy')).sum()
# calculate share of service demand by demand_technology
sd_subset_normal = sd_subset.groupby(level=util.ix_excl(sd_subset, 'demand_technology')).transform(
lambda x: x / x.sum())
# calculate service demand modifier by dividing the share of service demand by the share of stock
sd_modifier = DfOper.divi([sd_subset_normal, self.stock.tech_subset_normal])
# expand the dataframe to put years as columns
sd_modifier = self.vintage_year_array_expand(sd_modifier, df_for_indexing, sd_subset)
sd_modifier.fillna(1, inplace=True)
# replace any 0's with 1 since zeros indicates no stock in that subset, not a service demand modifier of 0
sd_modifier[sd_modifier == 0] = 1
#
# if 'final_energy' in (sd_modifier.index.names):
# # if final energy is in the original sd_modifier definition, we need to convert it to
# # a dataframe that instead has service demand modifiers by demand_technology
# blank_modifier_main = util.empty_df(index=self.stock.tech_subset_normal.index,
# columns=self.stock.tech_subset_normal.columns.values, fill_value=1)
# blank_modifier_aux = util.empty_df(index=self.stock.tech_subset_normal.index,
# columns=self.stock.tech_subset_normal.columns.values, fill_value=0.)
# # lookup service demand modifiers for each demand_technology in a dataframe of service demand modifiers by energy type
# for tech in self.tech_ids:
# main_energy_id = self.technologies[tech].efficiency_main.final_energy_id
# aux_energy_id = getattr(self.technologies[tech].efficiency_aux, 'final_energy_id') if hasattr(
# self.technologies[tech].efficiency_aux, 'final_energy_id') else None
# main_utility_factor = self.technologies[tech].efficiency_main.utility_factor
# aux_utility_factor = 1 - main_utility_factor
# main_energy_indexer = util.level_specific_indexer(sd_modifier, 'final_energy', main_energy_id)
# aux_energy_indexer = util.level_specific_indexer(sd_modifier, 'final_energy', aux_energy_id)
# tech_indexer = util.level_specific_indexer(blank_modifier_main, 'demand_technology', tech)
# blank_modifier_main.loc[tech_indexer, :] = sd_modifier.loc[main_energy_indexer, :] * main_utility_factor
# if aux_energy_id is not None:
# blank_modifier_aux.loc[tech_indexer, :] = sd_modifier.loc[aux_energy_indexer,
# :] * aux_utility_factor
# blank_modifier = DfOper.add([blank_modifier_main, blank_modifier_aux])
# sd_modifier = blank_modifier
# sd_modifier = self.vintage_year_array_expand(sd_modifier, df_for_indexing, sd_subset)
# sd_modifier.fillna(1, inplace=True)
# sd_modifier.sort_index()
# loop through technologies and add service demand modifiers by demand_technology-specific input (i.e. demand_technology has a
# a service demand modifier class)
for tech in self.tech_ids:
try:
if self.technologies[tech].service_demand_modifier.raw_values is not None:
tech_modifier = getattr(getattr(self.technologies[tech], 'service_demand_modifier'), 'values')
tech_modifier = util.expand_multi(tech_modifier, sd_modifier.index.levels, sd_modifier.index.names,
drop_index='demand_technology').fillna(method='bfill')
indexer = util.level_specific_indexer(sd_modifier, 'demand_technology', tech)
sd_modifier.loc[indexer, :] = tech_modifier.values
except:
pdb.set_trace()
# multiply stock by service demand modifiers
stock_values = DfOper.mult([sd_modifier, self.stock.values_efficiency]).groupby(level=self.stock.rollover_group_names).sum()
# # group stock and adjusted stock values
adj_stock_values = self.stock.values_efficiency.groupby(level=self.stock.rollover_group_names).sum()
# stock_values = self.stock.values.groupby(
# level=util.ix_excl(self.stock.values, exclude=['vintage', 'demand_technology'])).sum()
# # if this adds up to more or less than 1, we have to adjust the service demand modifers
sd_mod_adjustment = DfOper.divi([stock_values, adj_stock_values])
sd_mod_adjustment.replace([np.inf, -np.inf, np.nan], 1, inplace=True)
self.sd_modifier = sd_modifier
self.sd_mod_adjustment = sd_mod_adjustment
self.service_demand.modifier = DfOper.divi([sd_modifier, sd_mod_adjustment])
def calc_tech_survival_functions(self, steps_per_year=1, rollover_threshold=.95):
self.stock.spy = steps_per_year
for demand_technology in self.technologies.values():
demand_technology.spy = steps_per_year
demand_technology.set_survival_parameters()
demand_technology.set_survival_vintaged()
demand_technology.set_decay_vintaged()
demand_technology.set_survival_initial_stock()
demand_technology.set_decay_initial_stock()
for demand_technology in self.technologies.values():
if demand_technology.survival_vintaged[1] < rollover_threshold:
logging.debug(' increasing stock rollover time steps per year to {} to account for short lifetimes of equipment'.format(str(steps_per_year*2)))
self.calc_tech_survival_functions(steps_per_year=steps_per_year*2)
def calc_measure_survival_functions(self, measures, stock, steps_per_year=1, rollover_threshold=.99):
stock.spy = steps_per_year
for measure in measures.values():
measure.spy = steps_per_year
measure.set_survival_parameters()
measure.set_survival_vintaged()
measure.set_decay_vintaged()
measure.set_survival_initial_stock()
measure.set_decay_initial_stock()
for measure in measures.values():
if measure.survival_vintaged[1] < rollover_threshold:
self.calc_measure_survival_functions(measures, stock, steps_per_year=steps_per_year*2)
def max_cal_year(self, demand):
"""finds the maximum year to calibrate service demand input in energy terms
back to service terms (i.e. without efficiency)
"""
current_year = datetime.now().year - 1
year_position = util.position_in_index(getattr(demand, 'raw_values'), 'year')
year_position = getattr(demand, 'raw_values').index.names.index('year')
max_service_year = getattr(demand, 'raw_values').index.levels[year_position].max()
# return min(current_year, max_service_year)
return max_service_year
def min_cal_year(self, demand):
"""finds the maximum year to calibrate service demand input in energy terms
back to service terms (i.e. without efficiency)
"""
current_year = datetime.now().year - 1
year_position = util.position_in_index(getattr(demand, 'raw_values'), 'year')
year_position = getattr(demand, 'raw_values').index.names.index('year')
min_service_year = getattr(demand, 'raw_values').index.levels[year_position].min()
return min(current_year, min_service_year)
@staticmethod
def stack_and_reduce_years(df, min_year, max_year):
"""
converts efficiency outputs with year across columns back to year multindex level.
Reduces years from full projection back to original specifications.
"""
df = df.copy()
if min_year == max_year:
years = [min_year]
else:
years = range(min_year, max_year+1)
df = df.ix[:, years]
df = pd.DataFrame(df.stack())
util.replace_index_name(df, 'year')
util.replace_column(df, 'value')
return df
def convert_energy_to_service(self, other_index):
"""converts service demand input in energy terms to service terms by dividing by stock efficiencies
up to the minimum of the current year-1 or the last year of input service demand.
"""
self.rollover_efficiency_outputs(other_index)
eff = copy.deepcopy(self.stock.efficiency[other_index]['all'])
if other_index == 'demand_technology':
exclude_index = ['final_energy']
elif other_index == 'final_energy':
exclude_index = ['demand_technology']
else:
exclude_index = ['final_energy', 'demand_technology']
exclude_index.append('vintage')
eff = eff.groupby(
level=util.ix_excl(self.stock.efficiency[other_index]['all'], exclude=exclude_index)).sum()
eff = self.stack_and_reduce_years(eff, self.min_year,
self.max_year)
self.energy_demand.values = util.unit_convert(self.energy_demand.values, unit_from_num=self.energy_demand.unit,
unit_to_num=cfg.calculation_energy_unit)
# make a copy of energy demand for use as service demand
self.service_demand = copy.deepcopy(self.energy_demand)
self.service_demand.raw_values = self.service_demand.values
self.service_demand.int_values = DfOper.divi([self.service_demand.raw_values, eff])
self.service_demand.int_values.replace([np.inf, -np.inf], 1, inplace=True)
def output_efficiency_stock(self):
""" reduces efficiency result for output and deletes efficiency dictionary"""
for other_index in self.stock.efficiency.keys():
for key in self.stock.efficiency[other_index].keys():
if key == 'all':
self.loop_stock_efficiency(other_index)
def loop_stock_efficiency(self, other_index):
for other_index in self.stock.efficiency.keys():
if other_index == 'all':
exclude = ['vintage', 'demand_technology', 'final_energy']
elif other_index == 'demand_technology':
exclude = ['vintage']
elif other_index == 'final_energy':
exclude = ['demand_technology', 'vintage']
else:
raise ValueError('unknown key in stock efficiency dictionary')
efficiency = self.stock.efficiency[other_index]['all']
efficiency = efficiency.groupby(level=util.ix_excl(efficiency, exclude)).sum()
efficiency = pd.DataFrame(efficiency.stack(), columns=['value'])
util.replace_index_name(efficiency, 'year')
attribute = 'efficiency_' + other_index
setattr(self.stock_outputs, attribute, efficiency)
def vintage_year_array_expand(self, df, df_for_indexing, sd_subset):
"""creates a dataframe with years as columns instead of an index"""
level_values = sd_subset.index.get_level_values(level='year')
max_column = max(level_values)
df = df.unstack(level='year')
df.columns = df.columns.droplevel()
df = df.loc[:, max_column].to_frame()
for column in self.years:
df[column] = df[max_column]
df = util.DfOper.mult([df,df_for_indexing])
df = df.sort(axis=1)
if hasattr(df.columns, 'levels'):
df.columns = df.columns.droplevel()
return df
def project_stock(self, map_from='raw_values', service_dependent=False,stock_dependent=False, override=False, reference_run=False):
"""
project stock moving forward includes projecting total and demand_technology stock as well as initiating stock rollover
If stock has already been projected, it gets reprojected if override is specified.
"""
self.stock.vintages = self.vintages
self.stock.years = self.years
if not self.stock.projected or override:
self.project_total_stock(map_from, service_dependent,stock_dependent)
self.calculate_specified_stocks()
self.project_demand_technology_stock(map_from, service_dependent,reference_run)
self.stock.set_rollover_groups()
self.tech_sd_modifier_calc()
self.calculate_service_modified_stock()
self.stock.calc_annual_stock_changes()
self.calc_tech_survival_functions()
self.calculate_sales_shares(reference_run)
self.reconcile_sales_shares()
self.stock_rollover()
self.stock.projected = True
def project_total_stock(self, map_from, service_dependent=False, stock_dependent = False):
if map_from == 'values':
current_geography = cfg.primary_geography
current_data_type = 'total'
self.stock.values = self.stock.values.groupby(level=util.ix_excl(self.stock.values, 'vintage')).sum()
self.stock.values = self.stack_and_reduce_years(self.stock.values, min(self.years), max(self.years))
projected = True
elif map_from == 'int_values':
current_geography = cfg.primary_geography
current_data_type = self.stock.current_data_type if hasattr(self.stock, 'current_data_type') else 'total'
projected = False
else:
current_geography = self.stock.geography
current_data_type = self.stock.input_type
projected = False
self.stock.project(map_from=map_from, map_to='total', current_geography=current_geography, additional_drivers=self.additional_drivers(stock_or_service='stock',service_dependent=service_dependent), current_data_type=current_data_type, projected=projected)
self.stock.total = util.remove_df_levels(self.stock.total, ['demand_technology', 'final_energy']+cfg.removed_demand_levels )
self.stock.total = self.stock.total.swaplevel('year',-1)
if stock_dependent:
self.stock.project(map_from=map_from, map_to='total_unfiltered', current_geography=current_geography,
additional_drivers=self.additional_drivers(stock_or_service='stock',service_dependent=service_dependent),
current_data_type=current_data_type, projected=projected,filter_geo=False)
self.stock.total_unfiltered = util.remove_df_levels(self.stock.total_unfiltered, ['demand_technology', 'final_energy'])
self.stock.total_unfiltered = self.stock.total_unfiltered.swaplevel('year',-1)
def project_demand_technology_stock(self, map_from, service_dependent,reference_run=False):
if map_from == 'values' or map_from == 'int_values':
current_geography = cfg.primary_geography
current_data_type = 'total'
projected = True
else:
current_geography = self.stock.geography
current_data_type = self.stock.input_type
projected = False
if 'demand_technology' in getattr(self.stock, map_from).index.names:
self.stock.project(map_from=map_from, map_to='technology', current_geography=current_geography,
additional_drivers=self.additional_drivers(stock_or_service='service',service_dependent=service_dependent), interpolation_method=None,extrapolation_method=None,
fill_timeseries=True,fill_value=np.nan,current_data_type=current_data_type,projected=projected)
self.stock.technology = self.stock.technology.swaplevel('year',-1)
self.stock.technology = util.reindex_df_level_with_new_elements(self.stock.technology,'demand_technology',self.tech_ids,fill_value=np.nan)
self.stock.technology.sort(inplace=True)
else:
full_names = self.stock.total.index.names + ['demand_technology']
full_levels = self.stock.total.index.levels + [self.tech_ids]
index = pd.MultiIndex.from_product(full_levels, names=full_names)
self.stock.technology = self.stock.total.reindex(index)
self.stock.technology = util.remove_df_levels(self.stock.technology, cfg.removed_demand_levels)
if map_from == 'int_values':
self.stock.technology[self.stock.technology.index.get_level_values('year')>=int(cfg.cfgfile.get('case','current_year'))] = np.nan
self.remap_linked_stock()
if self.stock.has_linked_demand_technology:
# if a stock a demand_technology stock that is linked from other subsectors, we goup by the levels found in the stotal stock of the subsector
full_names = [x for x in self.stock.total.index.names]
full_names.insert(-1,'demand_technology')
linked_demand_technology = self.stock.linked_demand_technology.groupby(
level=[x for x in self.stock.linked_demand_technology.index.names if x in full_names]).sum()
linked_demand_technology = linked_demand_technology.reorder_levels(full_names)
linked_demand_technology.sort(inplace=True)
self.stock.technology = self.stock.technology.reorder_levels(full_names)
self.stock.technology.sort(inplace=True)
# expand the levels of the subsector's endogenous demand_technology stock so that it includes all years. That's because the linked stock specification will be for all years
linked_demand_technology = linked_demand_technology[linked_demand_technology.index.get_level_values('year')>=min(self.years)]
linked_demand_technology = util.DfOper.none([linked_demand_technology,self.stock.technology],fill_value=np.nan)
# if a demand_technology isn't demand_technology by a lnk to another subsector, replace it with subsector stock specification
linked_demand_technology = linked_demand_technology.mask(np.isnan(linked_demand_technology), self.stock.technology)
# check whether this combination exceeds the total stock
linked_demand_technology_total_check = util.remove_df_levels(linked_demand_technology, 'demand_technology')
total_check = util.DfOper.subt((linked_demand_technology_total_check, self.stock.total))
total_check[total_check < 0] = 0
# if the total check fails, normalize all subsector inputs. We normalize the linked stocks as well.
if total_check.sum().any() > 0:
if (hasattr(self,'service_demand') and self.service_demand.is_stock_dependent) or (hasattr(self,'energy_demand') and self.energy_demand.is_stock_dependent):
pass
else:
demand_technology_normal = linked_demand_technology.groupby(level=util.ix_excl(linked_demand_technology, 'demand_technology')).transform(lambda x: x / x.sum())
stock_reduction = DfOper.mult((demand_technology_normal, total_check))
linked_demand_technology = DfOper.subt((linked_demand_technology, stock_reduction))
self.stock.technology = linked_demand_technology
for demand_technology in self.technologies.values():
if len(demand_technology.specified_stocks) and reference_run==False:
for specified_stock in demand_technology.specified_stocks.values():
try:
specified_stock.remap(map_from='values', current_geography = cfg.primary_geography, drivers=self.stock.total, driver_geography=cfg.primary_geography, fill_value=np.nan, interpolation_method=None, extrapolation_method=None)
except:
pdb.set_trace()
self.stock.technology.sort(inplace=True)
indexer = util.level_specific_indexer(self.stock.technology,'demand_technology',demand_technology.id)
df = util.remove_df_levels(self.stock.technology.loc[indexer,:],'demand_technology')
df = df.reorder_levels([x for x in self.stock.technology.index.names if x not in ['demand_technology']])
df.sort(inplace=True)
specified_stock.values = specified_stock.values.reorder_levels([x for x in self.stock.technology.index.names if x not in ['demand_technology']])
df.sort(inplace=True)
specified_stock.values.sort(inplace=True)
specified_stock.values = specified_stock.values.fillna(df)
self.stock.technology.loc[indexer,:] = specified_stock.values.values
self.max_total()
def max_total(self):
tech_sum = util.remove_df_levels(self.stock.technology,'demand_technology')
# self.stock.total = self.stock.total.fillna(tech_sum)
self.stock.total.sort(inplace=True)
try:
self.stock.total[self.stock.total<tech_sum] = tech_sum
except:
pdb.set_trace()
def project_ee_measure_stock(self):
"""
adds a MeasureStock class to the subsector and specifies stocks based on energy efficiency measure savings i.e. an energy efficiency
measure that saves 1 EJ would represent an energy efficiency stock of 1 EJ
"""
measure_dfs = [self.reformat_measure_df(map_df=self.energy_forecast, measure=measure, measure_class=None,
measure_att='savings', id=measure.id) for measure in self.energy_efficiency_measures.values()]
if len(measure_dfs):
self.ee_stock = AggregateStock()
measure_df = DfOper.add(measure_dfs)
self.ee_stock.specified = measure_df.unstack('measure')
self.ee_stock.total = measure_df.groupby(level=util.ix_excl(measure_df, 'measure')).sum()
self.ee_stock.set_rollover_groups()
self.ee_stock.calc_annual_stock_changes()
self.measure_stock_rollover('energy_efficiency_measures', 'ee_stock')
def project_fs_measure_stock(self):
"""
adds a MeasureStock class to the subsector and specifies stocks based on fuel switching measure impact i.e. a fuel switching measure
that switches 1 EJ from one final energy to another would represent a fuel switching stock of 1 EJ
"""
measure_dfs = [self.reformat_measure_df(map_df=self.energy_forecast, measure=measure, measure_class='impact',
measure_att='savings', id=measure.id) for measure in
self.fuel_switching_measures.values()]
if len(measure_dfs):
self.fs_stock = AggregateStock()
measure_df = DfOper.add(measure_dfs)
self.fs_stock.specified = measure_df.unstack('measure')
self.fs_stock.total = measure_df.groupby(level=util.ix_excl(measure_df, 'measure')).sum()
self.fs_stock.set_rollover_groups()
self.fs_stock.calc_annual_stock_changes()
self.measure_stock_rollover('fuel_switching_measures', 'fs_stock')
def project_sd_measure_stock(self):
"""
adds a MeasureStock class to the subsector and specifies stocks based on service demand impact
i.e. a service demand measure that reduces 1M millions of LDV travel would represent a
service demand measure stock of 1M VMTs
"""
measure_dfs = [self.reformat_measure_df(map_df=self.service_demand.values, measure=measure, measure_class=None,
measure_att='savings', id=measure.id) for measure in
self.service_demand_measures.values()]
if len(measure_dfs):
self.sd_stock = AggregateStock()
measure_df = DfOper.add(measure_dfs)
self.sd_stock.specified = measure_df.unstack('measure')
self.sd_stock.total = measure_df.groupby(level=util.ix_excl(measure_df, 'measure')).sum()
self.sd_stock.set_rollover_groups()
self.sd_stock.calc_annual_stock_changes()
self.measure_stock_rollover('service_demand_measures', 'sd_stock')
def measure_stock_rollover(self, measures_name, stock_name):
""" Stock rollover function for measures"""
measures = getattr(self, measures_name)
stock = getattr(self, stock_name)
self.calc_measure_survival_functions(measures, stock)
self.create_measure_survival_functions(measures, stock)
self.create_measure_rollover_markov_matrices(measures, stock)
levels = stock.rollover_group_names
if len(levels) == 1:
rollover_groups = util.remove_df_levels(stock.total,'year').groupby(level=levels[0]).groups
else:
rollover_groups = util.remove_df_levels(stock.total,'year').groupby(level=levels).groups
full_levels = stock.rollover_group_levels + [measures.keys()] + [
[self.vintages[0] - 1] + self.vintages]
full_names = stock.rollover_group_names + ['measure', 'vintage']
columns = self.years
index = pd.MultiIndex.from_product(full_levels, names=full_names)
stock.values = util.empty_df(index=index, columns=pd.Index(columns, dtype='object'))
full_levels = stock.rollover_group_levels + [measures.keys()] + [self.vintages]
index = pd.MultiIndex.from_product(full_levels, names=full_names)
stock.retirements = util.empty_df(index=index, columns=['value'])
stock.sales = util.empty_df(index=index, columns=['value'])
if not any([x.cost.data for x in measures.values()]):
#no need to do stock rollover if there are no costs
return
for elements in rollover_groups.keys():
specified_stock = util.df_slice(stock.specified, elements, levels)
if np.all(specified_stock.sum().values==0):
continue
annual_stock_change = util.df_slice(stock.annual_stock_changes, elements, levels)
self.rollover = Rollover(vintaged_markov_matrix=stock.vintaged_markov_matrix,
initial_markov_matrix=stock.initial_markov_matrix,
num_years=len(self.years), num_vintages=len(self.vintages),
num_techs=len(measures.keys()), initial_stock=None,
sales_share=None, stock_changes=annual_stock_change.values,
specified_stock=specified_stock.values, specified_retirements=None,
exceedance_tolerance=0.1)
if abs(annual_stock_change.sum().values)!=0:
self.rollover.run()
stock_total, stock_new, stock_replacement, retirements, retirements_natural, retirements_early, sales_record, sales_new, sales_replacement = self.rollover.return_formatted_outputs()
stock.values.loc[elements], stock.retirements.loc[elements, 'value'] = stock_total, retirements
stock.sales.loc[elements, 'value'] = sales_record
def calculate_costs_measures(self):
year_df = util.vintage_year_matrix(self.years,self.vintages)
if hasattr(self, 'ee_stock'):
self.ee_stock.levelized_costs = defaultdict(dict)
self.ee_stock.levelized_costs['unspecified'] = self.rollover_measure_output(
measures='energy_efficiency_measures', measure_class='cost', measure_att='values_level',
stock_class='ee_stock',
stock_att='values')
year_df = util.vintage_year_matrix(self.years,self.vintages)
self.ee_stock.annual_costs = defaultdict(dict)
self.ee_stock.annual_costs['unspecified']= util.DfOper.mult([self.rollover_measure_output(
measures='energy_efficiency_measures', measure_class='cost', measure_att='values',
stock_class='ee_stock',
stock_att='sales'),year_df])
if hasattr(self, 'fs_stock'):
self.fs_stock.levelized_costs = defaultdict(dict)
self.fs_stock.levelized_costs['unspecified']= self.rollover_measure_output(
measures='fuel_switching_measures', measure_class='cost', measure_att='values_level',
stock_class='fs_stock',
stock_att='values')
self.fs_stock.annual_costs = defaultdict(dict)
self.fs_stock.annual_costs['unspecified'] = util.DfOper.mult([self.rollover_measure_output(
measures='fuel_switching_measures', measure_class='cost', measure_att='values', stock_class='fs_stock',
stock_att='sales'),year_df])
if hasattr(self, 'sd_stock'):
self.sd_stock.levelized_costs = defaultdict(dict)
self.sd_stock.levelized_costs['unspecified']= self.rollover_measure_output(
measures='service_demand_measures', measure_class='cost', measure_att='values_level',
stock_class='sd_stock',
stock_att='values')
self.sd_stock.annual_costs = defaultdict(dict)
self.sd_stock.annual_costs['unspecified']= util.DfOper.mult([self.rollover_measure_output(
measures='service_demand_measures', measure_class='cost', measure_att='values', stock_class='sd_stock',
stock_att='sales'),year_df])
def rollover_measure_output(self, measures, measure_class=None, measure_att='values', stock_class=None,
stock_att='values',
stack_label=None, other_aggregate_levels=None, non_expandable_levels=None):
""" Produces rollover outputs for a subsector measure stock
"""
stock_df = getattr(getattr(self, stock_class), stock_att)
groupby_level = util.ix_excl(stock_df, ['vintage'])
# determines index position for demand_technology and final energy element
c = util.empty_df(stock_df.index, stock_df.columns.values, fill_value=0.)
# puts technologies on the appropriate basi
measure_dfs = [self.reformat_measure_df(stock_df, measure, measure_class, measure_att, measure.id) for measure
in getattr(self, measures).values() if
hasattr(measure, measure_class) and getattr(getattr(measure, measure_class),
'raw_values') is not None]
if len(measure_dfs):
measure_df = pd.concat(measure_dfs)
c = DfOper.mult([measure_df, stock_df],expandable=(True, False), collapsible=(False, True),non_expandable_levels=non_expandable_levels)
if stack_label is not None:
c = c.stack()
util.replace_index_name(c, stack_label)
util.replace_column(c, 'value')
if other_aggregate_levels is not None:
groupby_level = util.ix_excl(c, other_aggregate_levels)
c = c.groupby(level=groupby_level).sum()
# TODO fix
if 'final_energy' in c.index.names:
c = util.remove_df_elements(c, 9999, 'final_energy')
return c
def reformat_measure_df(self, map_df, measure, measure_class, measure_att, id):
"""
reformat measure dataframes for use in stock-level dataframe operations
"""
if measure_class is None:
measure_df = getattr(measure, measure_att)
else:
measure_class = getattr(measure, measure_class)
measure_df = getattr(measure_class, measure_att) if hasattr(measure_class, measure_att) else None
if measure_df is None:
return
else:
measure_df['measure'] = id
measure_df.set_index('measure', append=True, inplace=True)
level_order = [cfg.primary_geography] + util.ix_excl(measure_df, [
cfg.primary_geography, 'vintage']) + ['vintage']
level_order = [x for x in level_order if x in measure_df.index.names]
measure_df = measure_df.reorder_levels(level_order)
return measure_df
def create_measure_survival_functions(self, measures, stock):
functions = defaultdict(list)
for fun in ['survival_vintaged', 'survival_initial_stock', 'decay_vintaged', 'decay_initial_stock']:
for measure in measures.values():
functions[fun].append(getattr(measure, fun))
setattr(stock, fun, pd.DataFrame(np.array(functions[fun]).T, columns=measures.keys()))
def create_tech_survival_functions(self):
functions = defaultdict(list)
for fun in ['survival_vintaged', 'survival_initial_stock', 'decay_vintaged', 'decay_initial_stock']:
for tech_id in self.tech_ids:
demand_technology = self.technologies[tech_id]
functions[fun].append(getattr(demand_technology, fun))
setattr(self.stock, fun, pd.DataFrame(np.array(functions[fun]).T, columns=self.tech_ids))
def create_rollover_markov_matrices(self):
vintaged_markov = util.create_markov_vector(self.stock.decay_vintaged.values, self.stock.survival_vintaged.values)
self.stock.vintaged_markov_matrix = util.create_markov_matrix(vintaged_markov, len(self.tech_ids), len(self.years), self.stock.spy)
initial_markov = util.create_markov_vector(self.stock.decay_initial_stock.values, self.stock.survival_initial_stock.values)
self.stock.initial_markov_matrix = util.create_markov_matrix(initial_markov, len(self.tech_ids), len(self.years), self.stock.spy)
def create_measure_rollover_markov_matrices(self, measures, stock):
vintaged_markov = util.create_markov_vector(stock.decay_vintaged.values, stock.survival_vintaged.values)
stock.vintaged_markov_matrix = util.create_markov_matrix(vintaged_markov, len(measures.keys()), len(self.years),stock.spy)
initial_markov = util.create_markov_vector(stock.decay_initial_stock.values, stock.survival_initial_stock.values)
stock.initial_markov_matrix = util.create_markov_matrix(initial_markov, len(measures.keys()), len(self.years),stock.spy)
def format_demand_technology_stock(self):
""" formats demand_technology stock and linked demand_technology stocks from other subsectors
overwrites demand_technology stocks with linked demand_technology stocks"""
needed_names = self.stock.rollover_group_names + ['demand_technology'] + ['year']
groupby_levels = [x for x in self.stock.technology.index.names if x in needed_names]
if len(groupby_levels) > 0:
self.stock.technology = self.stock.technology.groupby(level=groupby_levels).sum()
self.stock.technology = util.reindex_df_level_with_new_elements(self.stock.technology,'demand_technology',self.tech_ids)
self.stock.technology = self.stock.technology.unstack(level='demand_technology')
def remap_linked_stock(self):
"""remap stocks specified in linked subsectors """
df_concat = self.linked_stock.values()
if len(df_concat) > 0:
self.stock.linked_demand_technology = pd.concat(df_concat, axis=0)
if np.all(np.isnan(self.stock.technology.values)):
subsector_stock = util.remove_df_levels(self.stock.total, 'demand_technology')
else:
subsector_stock = util.remove_df_levels(self.stock.technology,'year')
self.stock.remap(map_from='linked_demand_technology', map_to='linked_demand_technology', drivers=subsector_stock.replace([0,np.nan],[1e-10,1e-10]),
current_geography=cfg.primary_geography, current_data_type='total',
time_index=self.years, driver_geography=cfg.primary_geography, interpolation_method=None, extrapolation_method=None)
self.stock.linked_demand_technology[self.stock.linked_demand_technology==0]=np.nan
self.stock.has_linked_demand_technology = True
else:
self.stock.has_linked_demand_technology = False
def additional_drivers(self, stock_or_service = None,stock_dependent=False, service_dependent=False):
""" create a list of additional drivers from linked subsectors """
additional_drivers = []
if stock_or_service == 'service':
if len(self.linked_service_demand_drivers):
linked_driver = 1 - DfOper.add(self.linked_service_demand_drivers.values())
additional_drivers.append(linked_driver)
if stock_dependent:
if len(additional_drivers):
additional_drivers.append(self.stock.geo_map(attr='total_unfiltered',current_geography=cfg.primary_geography, converted_geography=cfg.disagg_geography, current_data_type='total', inplace=False))
else:
additional_drivers = self.stock.geo_map(attr='total_unfiltered',current_geography=cfg.primary_geography,converted_geography=cfg.disagg_geography, current_data_type='total', inplace=False)
if service_dependent:
if len(additional_drivers):
additional_drivers.append(self.service_demand.geo_map(attr='values_unfiltered',current_geography=cfg.primary_geography,converted_geography=cfg.disagg_geography, current_data_type='total', inplace=False))
else:
additional_drivers = self.service_demand.geo_map(attr='values_unfiltered',current_geography=cfg.primary_geography,converted_geography=cfg.disagg_geography, current_data_type='total', inplace=False)
if len(additional_drivers) == 0:
additional_drivers = None
return additional_drivers
def project_service_demand(self, map_from='raw_values', stock_dependent=False, service_dependent=False):
self.service_demand.vintages = self.vintages
self.service_demand.years = self.years
if map_from == 'raw_values':
current_geography = self.service_demand.geography
current_data_type = self.service_demand.input_type
projected = False
elif map_from == 'int_values':
current_geography = cfg.primary_geography
current_data_type = self.service_demand.current_data_type if hasattr(self.service_demand, 'current_data_type') else 'total'
projected = False
else:
current_geography = cfg.primary_geography
current_data_type = 'total'
projected = True
self.service_demand.project(map_from=map_from, map_to='values', current_geography=current_geography,
additional_drivers=self.additional_drivers(stock_or_service='service',stock_dependent=stock_dependent),current_data_type=current_data_type,projected=projected)
if service_dependent:
self.service_demand.project(map_from=map_from, map_to='values_unfiltered', current_geography=current_geography,
additional_drivers=self.additional_drivers(stock_or_service='service',stock_dependent=stock_dependent),current_data_type=current_data_type,projected=projected,filter_geo=False)
self.service_demand.values = util.remove_df_levels(self.service_demand.values,cfg.removed_demand_levels)
self.service_demand_forecast = self.service_demand.values
# calculates the service demand change from service demand measures
self.service_demand_savings_calc()
def project_energy_demand(self, map_from='raw_values', stock_dependent=False, service_dependent=False):
self.energy_demand.vintages = self.vintages
self.energy_demand.years = self.years
if map_from == 'raw_values':
current_geography = self.energy_demand.geography
current_data_type = self.energy_demand.input_type
projected = False
elif map_from == 'int_values':
current_geography = cfg.primary_geography
current_data_type = self.energy_demand.current_data_type if hasattr(self.energy_demand, 'current_data_type') else 'total'
projected = False
else:
current_geography = cfg.primary_geography
current_data_type = 'total'
projected = True
self.energy_demand.project(map_from=map_from, map_to='values', current_geography=current_geography, additional_drivers=self.additional_drivers(stock_or_service='service',service_dependent=service_dependent),current_data_type=current_data_type, projected=projected)
self.energy_demand.values = util.remove_df_levels(self.energy_demand.values,cfg.removed_demand_levels)
def calculate_sales_shares(self,reference_run=False):
for tech in self.tech_ids:
demand_technology = self.technologies[tech]
demand_technology.calculate_sales_shares('reference_sales_shares')
demand_technology.calculate_sales_shares('sales_shares',reference_run)
def calculate_specified_stocks(self):
for demand_technology in self.technologies.values():
demand_technology.calculate_specified_stocks()
def reconcile_sales_shares(self):
needed_sales_share_levels = self.stock.rollover_group_levels + [self.years]
needed_sales_share_names = self.stock.rollover_group_names + ['vintage']
for demand_technology in self.technologies.values():
demand_technology.reconcile_sales_shares('sales_shares', needed_sales_share_levels, needed_sales_share_names)
demand_technology.reconcile_sales_shares('reference_sales_shares', needed_sales_share_levels,
needed_sales_share_names)
def calculate_total_sales_share(self, elements, levels):
ss_measure = self.helper_calc_sales_share(elements, levels, reference=False)
space_for_reference = 1 - np.sum(ss_measure, axis=1)
ss_reference = self.helper_calc_sales_share(elements, levels, reference=True, space_for_reference=space_for_reference)
if np.sum(ss_reference)==0:
ref_array = np.tile(np.eye(len(self.tech_ids)), (len(self.years), 1, 1))
if np.nansum(util.df_slice(self.stock.technology, elements, levels).values[0]) >= np.nansum(util.df_slice(self.stock.total, elements, levels).values[0]):
initial_stock = util.df_slice(self.stock.technology, elements, levels).replace(np.nan,0).values[0]
tech_lifetimes = np.array([x.book_life for x in self.technologies.values()])
x = initial_stock/tech_lifetimes
x = np.nan_to_num(x)
x /= sum(x)
x = np.nan_to_num(x)
for i, tech_id in enumerate(self.tech_ids):
for sales_share in self.technologies[tech_id].sales_shares.values():
if sales_share.replaced_demand_tech_id is None:
ref_array[:,:,i] = x
ss_reference = SalesShare.scale_reference_array_to_gap(ref_array, space_for_reference)
#sales shares are always 1 with only one demand_technology so the default can be used as a reference
if len(self.tech_ids)>1 and np.sum(ss_measure)<1:
reference_sales_shares = False
else:
reference_sales_shares = True
else:
reference_sales_shares = True
# return SalesShare.normalize_array(ss_reference+ss_measure, retiring_must_have_replacement=True)
# todo make retiring_must_have_replacement true after all data has been put in db
return SalesShare.normalize_array(ss_reference + ss_measure, retiring_must_have_replacement=False), reference_sales_shares
def calculate_total_sales_share_after_initial(self, elements, levels, initial_stock):
ss_measure = self.helper_calc_sales_share(elements, levels, reference=False)
space_for_reference = 1 - np.sum(ss_measure, axis=1)
ref_array = np.tile(np.eye(len(self.tech_ids)), (len(self.years), 1, 1))
tech_lifetimes = np.array([x.book_life for x in self.technologies.values()])
x = initial_stock/tech_lifetimes
x = np.nan_to_num(x)
x /= sum(x)
for i, tech_id in enumerate(self.tech_ids):
for sales_share in self.technologies[tech_id].sales_shares.values():
if sales_share.replaced_demand_tech_id is None:
ref_array[:,:,i] = x
ss_reference = SalesShare.scale_reference_array_to_gap(ref_array, space_for_reference)
#sales shares are always 1 with only one demand_technology so the default can be used as a reference
return SalesShare.normalize_array(ss_reference + ss_measure, retiring_must_have_replacement=False)
def helper_calc_sales_share(self, elements, levels, reference, space_for_reference=None):
num_techs = len(self.tech_ids)
tech_lookup = dict(zip(self.tech_ids, range(num_techs)))
num_years = len(self.years)
# ['vintage', 'replacing tech id', 'retiring tech id']
ss_array = np.zeros(shape=(num_years, num_techs, num_techs))
# tech_ids must be sorted
# normalize ss in one of two ways
if reference:
for tech_id in self.tech_ids:
for sales_share in self.technologies[tech_id].reference_sales_shares.values():
repl_index = tech_lookup[tech_id]
reti_index = slice(None)
# demand_technology sales share dataframe may not have all elements of stock dataframe
if any([element not in sales_share.values.index.levels[
util.position_in_index(sales_share.values, level)] for element, level in
zip(elements, levels)]):
continue
ss_array[:, repl_index, reti_index] += util.df_slice(sales_share.values, elements, levels).values
ss_array = SalesShare.scale_reference_array_to_gap(ss_array, space_for_reference)
else:
for tech_id in self.tech_ids:
for sales_share in self.technologies[tech_id].sales_shares.values():
repl_index = tech_lookup[sales_share.demand_technology_id]
if sales_share.replaced_demand_tech_id is not None and not tech_lookup.has_key(sales_share.replaced_demand_tech_id):
#if you're replacing a demand tech that doesn't have a sales share or stock in the model, this is zero and so we continue the loop
continue
reti_index = tech_lookup[sales_share.replaced_demand_tech_id] if \
sales_share.replaced_demand_tech_id is not None and tech_lookup.has_key(sales_share.replaced_demand_tech_id) else \
slice(None)
# TODO address the discrepancy when a demand tech is specified
try:
ss_array[:, repl_index, reti_index] += util.df_slice(sales_share.values, elements, levels).values
except:
ss_array[:, repl_index, reti_index] += util.df_slice(sales_share.values, elements, levels).values.flatten()
ss_array = SalesShare.cap_array_at_1(ss_array)
return ss_array
def tech_to_energy(self, df, tech_class):
"""
reformats a dataframe with a demand_technology index to one with a final energy index
based on a lookup of the final_energy_id by demand_technology class (i.e. aux_efficiency will provide the aux energy type id)
"""
df = df.copy()
rename_dict = {}
for tech in self.technologies.keys():
if hasattr(getattr(self.technologies[tech], tech_class), 'final_energy_id'):
rename_dict[tech] = getattr(getattr(self.technologies[tech], tech_class), 'final_energy_id')
else:
# if no energy type exists for the demand_technology class, put in 9999 as placeholder
rename_dict[tech] = 9999
index = df.index
tech_position = index.names.index('demand_technology')
index.set_levels(
[[rename_dict.get(item, item) for item in names] if i == tech_position else names for i, names in
enumerate(index.levels)], inplace=True)
util.replace_index_name(df, 'final_energy', 'demand_technology')
df = df.reset_index().groupby(df.index.names).sum()
return df
def tech_sd_modifier_calc(self):
if self.stock.is_service_demand_dependent and self.stock.demand_stock_unit_type == 'equipment':
full_levels = self.stock.rollover_group_levels + [self.technologies.keys()] + [
[self.vintages[0] - 1] + self.vintages]
full_names = self.stock.rollover_group_names + ['demand_technology', 'vintage']
columns = self.years
index = pd.MultiIndex.from_product(full_levels, names=full_names)
sd_modifier = util.empty_df(index=index, columns=pd.Index(columns, dtype='object'),fill_value=1.0)
for tech in self.tech_ids:
if self.technologies[tech].service_demand_modifier.raw_values is not None:
tech_modifier = getattr(getattr(self.technologies[tech], 'service_demand_modifier'), 'values')
tech_modifier = util.expand_multi(tech_modifier, sd_modifier.index.levels, sd_modifier.index.names,
drop_index='demand_technology').fillna(method='bfill')
indexer = util.level_specific_indexer(sd_modifier, 'demand_technology', tech)
sd_modifier.loc[indexer, :] = tech_modifier.values
self.tech_sd_modifier = sd_modifier
def calculate_service_modified_sales(self,elements,levels,sales_share):
sd_modifier = copy.deepcopy(self.tech_sd_modifier)
sd_modifier = sd_modifier[sd_modifier>0]
service_modified_sales = util.df_slice(sd_modifier,elements,levels,drop_level=False)
service_modified_sales =service_modified_sales.groupby(level=levels+['demand_technology','vintage']).mean().mean(axis=1).to_frame()
service_modified_sales = service_modified_sales.swaplevel('demand_technology','vintage')
service_modified_sales.sort(inplace=True)
service_modified_sales = service_modified_sales.fillna(1)
service_modified_sales = service_modified_sales.unstack('demand_technology').values
service_modified_sales = np.array([np.outer(i, 1./i).T for i in service_modified_sales])[1:]
sales_share *= service_modified_sales
return sales_share
def calculate_service_modified_stock(self):
if self.stock.is_service_demand_dependent and self.stock.demand_stock_unit_type == 'equipment':
sd_modifier = copy.deepcopy(self.tech_sd_modifier)
sd_modifier = sd_modifier[sd_modifier>0]
sd_modifier = util.remove_df_levels(sd_modifier,'vintage',agg_function='mean').stack(-1).to_frame()
util.replace_index_name(sd_modifier,'year')
sd_modifier.columns = ['value']
spec_tech_stock = copy.deepcopy(self.stock.technology).replace(np.nan,0)
util.replace_index_name(spec_tech_stock,'demand_technology')
service_adj_tech_stock = util.DfOper.mult([spec_tech_stock,sd_modifier])
total_stock_adjust = util.DfOper.subt([util.remove_df_levels(spec_tech_stock,'demand_technology'),util.remove_df_levels(service_adj_tech_stock,'demand_technology')]).replace(np.nan,0)
self.stock.total = util.DfOper.add([self.stock.total, total_stock_adjust])
self.stock.total[self.stock.total<util.remove_df_levels(spec_tech_stock,'demand_technology')] = util.remove_df_levels(spec_tech_stock,'demand_technology')
def sales_share_perturbation(self, elements, levels, sales_share):
# we don't always have a perturbation object because this is introduced only when we are making a supply curve
if self.perturbation is None:
return sales_share
num_techs = len(self.tech_ids)
tech_lookup = dict(zip(self.tech_ids, range(num_techs)))
num_years = len(self.years)
years_lookup = dict(zip(self.years, range(num_years)))
for i, row in self.perturbation.filtered_sales_share_changes(elements, levels).reset_index().iterrows():
y_i = years_lookup[int(row['year'])]
dt_i = tech_lookup[self.perturbation.new_techs[int(row['demand_technology_id'])]]
rdt_i = tech_lookup[int(row['replaced_demand_technology_id'])]
if dt_i == rdt_i:
# if the demand and replace technology are the same, we don't do anything
continue
sales_share[y_i, dt_i, :] += sales_share[y_i, rdt_i, :] * row['adoption_achieved']
sales_share[y_i, rdt_i, :] *= 1-row['adoption_achieved']
# for all future years, we return our tech back to the original replaced tech
sales_share[y_i+1:, rdt_i, dt_i] = 1
sales_share[y_i+1:, dt_i, dt_i] = 0
return sales_share
def set_up_empty_stock_rollover_output_dataframes(self):
full_levels = self.stock.rollover_group_levels + [self.technologies.keys()] + [[self.vintages[0] - 1] + self.vintages]
full_names = self.stock.rollover_group_names + ['demand_technology', 'vintage']
columns = self.years
index = pd.MultiIndex.from_product(full_levels, names=full_names)
self.stock.values = util.empty_df(index=index, columns=pd.Index(columns, dtype='object'))
self.stock.values_new = copy.deepcopy(self.stock.values)
self.stock.values_replacement = copy.deepcopy(self.stock.values)
full_levels = self.stock.rollover_group_levels + [self.technologies.keys()] + [self.vintages]
index = pd.MultiIndex.from_product(full_levels, names=full_names)
self.stock.retirements = util.empty_df(index=index, columns=['value'])
self.stock.retirements_early = copy.deepcopy(self.stock.retirements)
self.stock.retirements_natural = copy.deepcopy(self.stock.retirements)
self.stock.sales = util.empty_df(index=index, columns=['value'])
self.stock.sales_new = copy.deepcopy(self.stock.sales)
self.stock.sales_replacement = copy.deepcopy(self.stock.sales)
def stock_rollover(self):
""" Stock rollover function."""
self.format_demand_technology_stock()
self.create_tech_survival_functions()
self.create_rollover_markov_matrices()
self.set_up_empty_stock_rollover_output_dataframes()
rollover_groups = self.stock.total.groupby(level=self.stock.rollover_group_names).groups
if self.stock.is_service_demand_dependent and self.stock.demand_stock_unit_type == 'equipment':
self.tech_sd_modifier_calc()
for elements in rollover_groups.keys():
elements = util.ensure_tuple(elements)
#returns sales share and a flag as to whether the sales share can be used to parameterize initial stock.
sales_share, initial_sales_share = self.calculate_total_sales_share(elements, self.stock.rollover_group_names) # group is not necessarily the same for this other dataframe
if np.any(np.isnan(sales_share)):
raise ValueError('Sales share has NaN values in subsector ' + str(self.id))
initial_stock, rerun_sales_shares = self.calculate_initial_stock(elements, sales_share, initial_sales_share)
if rerun_sales_shares:
sales_share = self.calculate_total_sales_share_after_initial(elements, self.stock.rollover_group_names, initial_stock)
# the perturbation object is used to create supply curves of demand technologies
if self.perturbation is not None:
sales_share = self.sales_share_perturbation(elements, self.stock.rollover_group_names, sales_share)
if self.stock.is_service_demand_dependent and self.stock.demand_stock_unit_type == 'equipment':
sales_share = self.calculate_service_modified_sales(elements,self.stock.rollover_group_names,sales_share)
demand_technology_stock = self.stock.return_stock_slice(elements, self.stock.rollover_group_names)
if cfg.evolved_run=='true':
sales_share[len(self.years) -len(cfg.supply_years):] = 1/float(len(self.tech_ids))
annual_stock_change = util.df_slice(self.stock.annual_stock_changes, elements, self.stock.rollover_group_names)
self.rollover = Rollover(vintaged_markov_matrix=self.stock.vintaged_markov_matrix,
initial_markov_matrix=self.stock.initial_markov_matrix,
num_years=len(self.years), num_vintages=len(self.vintages),
num_techs=len(self.tech_ids), initial_stock=initial_stock,
sales_share=sales_share, stock_changes=annual_stock_change.values,
specified_stock=demand_technology_stock.values, specified_retirements=None,
steps_per_year=self.stock.spy,lifetimes=np.array([self.technologies[tech_id].book_life for tech_id in self.tech_ids]))
try:
self.rollover.run()
except:
pdb.set_trace()
stock, stock_new, stock_replacement, retirements, retirements_natural, retirements_early, sales_record, sales_new, sales_replacement = self.rollover.return_formatted_outputs()
self.stock.values.loc[elements], self.stock.values_new.loc[elements], self.stock.values_replacement.loc[elements] = stock, stock_new, stock_replacement
self.stock.retirements.loc[elements, 'value'], self.stock.retirements_natural.loc[elements, 'value'], \
self.stock.retirements_early.loc[elements, 'value'] = retirements, retirements_natural, retirements_early
self.stock.sales.loc[elements, 'value'], self.stock.sales_new.loc[elements, 'value'], \
self.stock.sales_replacement.loc[elements, 'value'] = sales_record, sales_new, sales_replacement
self.stock_normalize(self.stock.rollover_group_names)
self.financial_stock()
if self.sub_type != 'link':
self.fuel_switch_stock_calc()
def calculate_initial_stock(self, elements, sales_share, initial_sales_share):
initial_total = util.df_slice(self.stock.total, elements, self.stock.rollover_group_names).values[0]
demand_technology_years = self.stock.technology.sum(axis=1)[self.stock.technology.sum(axis=1)>0].index.get_level_values('year')
if len(demand_technology_years):
min_demand_technology_year = min(demand_technology_years)
else:
min_demand_technology_year = None
#Best way is if we have all demand_technology stocks specified
if (np.nansum(self.stock.technology.loc[elements,:].values[0])/self.stock.total.loc[elements,:].values[0])>.99:
initial_stock = self.stock.technology.loc[elements,:].values[0]
# gross up if it is just under 100% of the stock represented
initial_stock /= np.nansum(self.stock.technology.loc[elements,:].values[0])/initial_total
rerun_sales_shares = False
#Second best way is if we have all demand_technology stocks specified in some year before the current year
elif min_demand_technology_year is not None and min_demand_technology_year<=int(cfg.cfgfile.get('case','current_year')) and np.nansum(self.stock.technology.loc[elements+(min_demand_technology_year,),:].values)==self.stock.total.loc[elements+(min_demand_technology_year,),:].values:
initial_stock = self.stock.technology.loc[elements+(min_demand_technology_year,),:].values/np.nansum(self.stock.technology.loc[elements+(min_demand_technology_year,),:].values) * initial_total
rerun_sales_shares = False
#Third best way is if we have an initial sales share
elif initial_sales_share:
initial_stock = self.stock.calc_initial_shares(initial_total=initial_total, transition_matrix=sales_share[0], num_years=len(self.years))
rerun_sales_shares = True
ss_measure = self.helper_calc_sales_share(elements, self.stock.rollover_group_names, reference=False)
if np.sum(ss_measure) == 0:
for i in range(1,len(sales_share)):
if np.any(sales_share[0]!=sales_share[i]):
rerun_sales_shares=False
#Fourth best way is if we have specified some technologies in the initial year, even if not all
elif min_demand_technology_year:
initial_stock = self.stock.technology.loc[elements+(min_demand_technology_year,),:].values/np.nansum(self.stock.technology.loc[elements+(min_demand_technology_year,),:].values) * initial_total
rerun_sales_shares = False
elif np.nansum(initial_total) == 0:
initial_stock = np.zeros(len(self.tech_ids))
rerun_sales_shares = False
else:
pdb.set_trace()
raise ValueError('user has not input stock data with technologies or sales share data so the model cannot determine the demand_technology composition of the initial stock in subsector %s' %self.id)
return initial_stock, rerun_sales_shares
def determine_need_for_aux_efficiency(self):
""" determines whether auxiliary efficiency calculations are necessary. Used to avoid unneccessary calculations elsewhere """
utility_factor = []
for demand_technology in self.technologies.values():
if demand_technology.efficiency_main.utility_factor == 1:
utility_factor.append(False)
else:
utility_factor.append(True)
if any(utility_factor):
self.stock.aux = True
else:
self.stock.aux = False
def stock_normalize(self, levels):
"""returns normalized stocks for use in other subsector calculations"""
# normalization of complete stock
self.stock.values_normal = self.stock.values.groupby(level=levels).transform(lambda x: x / x.sum())
# normalization of demand_technology stocks (i.e. % by vintage/year)
self.stock.values_normal_tech = self.stock.values.groupby(
level=util.ix_excl(self.stock.values, ['vintage'])).transform(lambda x
: x / x.sum()).fillna(0)
self.stock.values_efficiency_main = copy.deepcopy(self.stock.values)
# this section normalizes stocks used for efficiency calculations. There is a different process if the stock
# has technologies that use multiple energy types. If it does, it must keep separate dataframes for main and auxiliary efficiency
if self.sub_type != 'link':
self.determine_need_for_aux_efficiency()
if self.stock.aux:
self.stock.values_efficiency_aux = copy.deepcopy(self.stock.values)
for demand_technology in self.technologies.keys():
demand_technology_class = self.technologies[demand_technology]
indexer = util.level_specific_indexer(self.stock.values_efficiency_main, 'demand_technology', demand_technology)
self.stock.values_efficiency_main.loc[indexer, :] = self.stock.values_efficiency_main.loc[indexer,:] * demand_technology_class.efficiency_main.utility_factor
self.stock.values_efficiency_main.loc[indexer, 'final_energy'] = demand_technology_class.efficiency_main.final_energy_id
if self.stock.aux:
self.stock.values_efficiency_aux.loc[indexer, :] = self.stock.values.loc[indexer, :] * (
1 - demand_technology_class.efficiency_main.utility_factor)
if hasattr(demand_technology_class.efficiency_aux, 'final_energy_id'):
self.stock.values_efficiency_aux.loc[
indexer, 'final_energy'] = demand_technology_class.efficiency_aux.final_energy_id
else:
self.stock.values_efficiency_aux.loc[indexer, 'final_energy'] = 9999
if self.stock.aux:
self.stock.values_efficiency = pd.concat([self.stock.values_efficiency_aux, self.stock.values_efficiency_main])
else:
# if there is no auxiliary efficiency, efficiency main becomes used for total efficiency
self.stock.values_efficiency = self.stock.values_efficiency_main
self.stock.values_efficiency = self.set_energy_as_index(self.stock.values_efficiency)
self.stock.values_efficiency = self.stock.values_efficiency.reset_index().groupby(
self.stock.values_efficiency.index.names).sum()
# normalize dataframe for efficiency calculations
# self.stock.values_efficiency = pd.concat([self.stock.values_efficiency_main, self.stock.values_efficiency_aux])
self.stock.values_efficiency_normal = self.stock.values_efficiency.groupby(
level=util.ix_excl(self.stock.values_efficiency, ['final_energy', 'demand_technology', 'vintage'])).transform(
lambda x: x / x.sum()).fillna(0)
self.stock.values_efficiency_normal_tech = self.stock.values_efficiency.groupby(
level=util.ix_excl(self.stock.values_efficiency, ['final_energy', 'vintage'])).transform(
lambda x: x / x.sum()).fillna(0)
self.stock.values_efficiency_normal_energy = self.stock.values_efficiency.groupby(
level=util.ix_excl(self.stock.values_efficiency, ['demand_technology', 'vintage'])).transform(
lambda x: x / x.sum()).fillna(0)
def set_energy_as_index(self, df):
""" takes a column with an energy id and makes it an index level """
df = df.set_index('final_energy', append=True)
df = df.swaplevel('vintage', 'final_energy')
util.replace_index_name(df, 'final_energy')
return df
def financial_stock(self):
"""
Calculates the amount of stock based on sales and demand_technology book life
instead of physical decay
"""
for tech in self.technologies.values():
# creates binary matrix across years and vintages for a demand_technology based on its book life
tech.book_life_matrix = util.book_life_df(tech.book_life, self.vintages, self.years)
# creates a linear decay of initial stock
tech.initial_book_life_matrix = util.initial_book_life_df(tech.book_life, tech.mean_lifetime, self.vintages, self.years)
# reformat the book_life_matrix dataframes to match the stock dataframe
# creates a list of formatted tech dataframes and concatenates them
tech_dfs = [self.reformat_tech_df(self.stock.sales, tech, tech_class=None, tech_att='book_life_matrix', id=tech.id) for tech in self.technologies.values()]
tech_df = pd.concat(tech_dfs)
# initial_stock_df uses the stock values dataframe and removes vintagesot
initial_stock_df = self.stock.values[min(self.years)]
# formats tech dfs to match stock df
initial_tech_dfs = [self.reformat_tech_df(initial_stock_df, tech, tech_class=None, tech_att='initial_book_life_matrix',id=tech.id) for tech in self.technologies.values()]
initial_tech_df = pd.concat(initial_tech_dfs)
# stock values in any year equals vintage sales multiplied by book life
self.stock.values_financial_new = util.DfOper.mult([self.stock.sales_new, tech_df])
self.stock.values_financial_replacement = util.DfOper.mult([self.stock.sales_replacement, tech_df])
# initial stock values in any year equals stock.values multiplied by the initial tech_df
initial_values_financial_new = util.DfOper.mult([self.stock.values_new, initial_tech_df],non_expandable_levels=('year'))
initial_values_financial_replacement = util.DfOper.mult([self.stock.values_replacement, initial_tech_df],non_expandable_levels=('year'))
# sum normal and initial stock values
self.stock.values_financial_new = util.DfOper.add([self.stock.values_financial_new, initial_values_financial_new],non_expandable_levels=('year'))
self.stock.values_financial_replacement = util.DfOper.add([self.stock.values_financial_replacement, initial_values_financial_replacement],non_expandable_levels=('year'))
self.stock.values_financial = util.DfOper.add([self.stock.values_financial_new,self.stock.values_financial_replacement])
def calculate_costs_stock(self):
"""
produce equipment cost outputs based on stock rollover values and equipment specifications
"""
self.stock.levelized_costs['capital']['new'] = self.rollover_output(tech_class='capital_cost_new', tech_att='values_level', stock_att='values_financial_new')
self.stock.levelized_costs['capital']['replacement'] = self.rollover_output(tech_class='capital_cost_replacement', tech_att='values_level', stock_att='values_financial_replacement')
self.stock.levelized_costs['fixed_om']['new'] = self.rollover_output(tech_class='fixed_om', tech_att='values', stock_att='values_new')
self.stock.levelized_costs['fixed_om']['replacement'] = self.rollover_output(tech_class='fixed_om', tech_att='values', stock_att='values_replacement')
self.stock.levelized_costs['installation']['new'] = self.rollover_output(tech_class='installation_cost_new', tech_att='values_level', stock_att='values_financial_new')
self.stock.levelized_costs['installation']['replacement'] = self.rollover_output(tech_class='installation_cost_replacement', tech_att='values_level', stock_att='values_financial_replacement')
if self.sub_type != 'link':
self.stock.levelized_costs['fuel_switching']['new'] = self.rollover_output(tech_class='fuel_switch_cost', tech_att='values_level', stock_att='values_fuel_switch')
self.stock.levelized_costs['fuel_switching']['replacement'] = self.stock.levelized_costs['fuel_switching']['new'] *0
year_df = util.vintage_year_matrix(self.years,self.vintages)
self.stock.annual_costs['fixed_om']['new'] = self.rollover_output(tech_class='fixed_om', tech_att='values', stock_att='values_new').stack().to_frame()
self.stock.annual_costs['fixed_om']['replacement'] = self.rollover_output(tech_class='fixed_om', tech_att='values', stock_att='values_replacement')
self.stock.annual_costs['capital']['new'] = util.DfOper.mult([self.rollover_output(tech_class='capital_cost_new', tech_att='values', stock_att='sales_new'),year_df])
self.stock.annual_costs['capital']['replacement'] = util.DfOper.mult([self.rollover_output(tech_class='capital_cost_replacement', tech_att='values', stock_att='sales_replacement'),year_df])
self.stock.annual_costs['installation']['new'] = util.DfOper.mult([self.rollover_output(tech_class='installation_cost_new', tech_att='values', stock_att='sales_new'),year_df])
self.stock.annual_costs['installation']['replacement'] = util.DfOper.mult([self.rollover_output(tech_class='installation_cost_replacement', tech_att='values', stock_att='sales_replacement'),year_df])
if self.sub_type != 'link':
self.stock.annual_costs['fuel_switching']['new'] = util.DfOper.mult([self.rollover_output(tech_class='fuel_switch_cost', tech_att='values', stock_att='sales_fuel_switch'),year_df])
self.stock.annual_costs['fuel_switching']['replacement'] = self.stock.annual_costs['fuel_switching']['new'] * 0
def remove_extra_subsector_attributes(self):
if hasattr(self, 'stock'):
delete_list = ['values_financial_new', 'values_financial_replacement', 'values_new',
'values_replacement', 'sales_new', 'sales_replacement','sales_fuel_switch']
delete_list = []
for att in delete_list:
if hasattr(self.stock, att):
delattr(self.stock, att)
def rollover_efficiency_outputs(self, other_index=None):
"""
Calculate rollover efficiency outputs for the whole stock or stock groups by final energy and demand_technology.
Efficiency values are all stock-weighted by indexed inputs. Ex. if the other_index input equals 'all', multiplying
these efficiency values by total service demand would equal total energy.
"""
if other_index is not None:
index = util.ensure_iterable_and_not_string(other_index)
else:
index = util.ensure_iterable_and_not_string(['all', 'demand_technology', 'final_energy'])
index.append('all')
self.stock.efficiency = defaultdict(dict)
for element in index:
if element == 'demand_technology':
aggregate_level = None
values_normal = 'values_efficiency_normal_tech'
elif element == 'final_energy':
aggregate_level = None
values_normal = 'values_efficiency_normal_energy'
elif element == 'all':
aggregate_level = None
values_normal = 'values_efficiency_normal'
elif element == 'total':
aggregate_level = 'demand_technology'
values_normal = 'values_efficiency_normal'
if self.stock.aux:
self.stock.efficiency[element]['all'] = self.rollover_output(tech_class=['efficiency_main','efficiency_aux'],
stock_att=values_normal,
other_aggregate_levels=aggregate_level, efficiency=True)
else:
self.stock.efficiency[element]['all'] = self.rollover_output(tech_class='efficiency_main',stock_att=values_normal,other_aggregate_levels=aggregate_level, efficiency=True)
def fuel_switch_stock_calc(self):
"""
Calculates the amount of stock that has switched energy type and allocates
it to technologies based on demand_technology share. This is used to calculate the additional
costs of fuel switching for applicable technologies.
Ex. an EV might have a fuel switching cost that represents the cost of a home charger that is incurred only
when the charger is installed.
"""
fuel_switch_sales = copy.deepcopy(self.stock.sales)
fuel_switch_retirements = copy.deepcopy(self.stock.retirements)
for demand_technology in self.technologies.values():
indexer = util.level_specific_indexer(fuel_switch_sales, 'demand_technology', demand_technology.id)
fuel_switch_sales.loc[indexer, 'final_energy'] = demand_technology.efficiency_main.final_energy_id
fuel_switch_retirements.loc[indexer, 'final_energy'] = demand_technology.efficiency_main.final_energy_id
fuel_switch_sales = fuel_switch_sales.set_index('final_energy', append=True)
fuel_switch_sales = fuel_switch_sales.swaplevel('vintage', 'final_energy')
util.replace_index_name(fuel_switch_sales, 'final_energy')
fuel_switch_retirements = fuel_switch_retirements.set_index('final_energy', append=True)
fuel_switch_retirements = fuel_switch_retirements.swaplevel('vintage', 'final_energy')
util.replace_index_name(fuel_switch_retirements, 'final_energy')
# TODO check that these work!!
fuel_switch_sales_energy = util.remove_df_levels(fuel_switch_sales, 'demand_technology')
fuel_switch_retirements_energy = util.remove_df_levels(fuel_switch_retirements, 'demand_technology')
new_energy_sales = util.DfOper.subt([fuel_switch_sales_energy, fuel_switch_retirements_energy])
new_energy_sales[new_energy_sales<0]=0
new_energy_sales_share_by_demand_technology = fuel_switch_sales.groupby(level=util.ix_excl(fuel_switch_sales, 'demand_technology')).transform(lambda x: x / x.sum())
new_energy_sales_by_demand_technology = util.DfOper.mult([new_energy_sales_share_by_demand_technology, new_energy_sales])
fuel_switch_sales_share = util.DfOper.divi([new_energy_sales_by_demand_technology, fuel_switch_sales]).replace(np.nan,0)
fuel_switch_sales_share = util.remove_df_levels(fuel_switch_sales_share, 'final_energy')
self.stock.sales_fuel_switch = DfOper.mult([self.stock.sales, fuel_switch_sales_share])
self.stock.values_fuel_switch = DfOper.mult([self.stock.values_financial, fuel_switch_sales_share])
def calculate_parasitic(self):
"""
calculates parasitic energy demand
"""
self.parasitic_energy = self.rollover_output(tech_class='parasitic_energy', tech_att='values',
stock_att='values',stock_expandable=True)
def calculate_output_service_drivers(self):
""" calculates service drivers for use in linked subsectors
"""
self.calculate_service_impact()
self.output_service_drivers = {}
for link in self.service_links.values():
if hasattr(self,'service_demand') and hasattr(self.service_demand,'geography_map_key'):
link.geography_map_key=self.service_demand.geography_map_key
elif hasattr(self,'stock') and hasattr(self.stock,'geography_map_key'):
link.geography_map_key=self.stock.geography_map_key
else:
link.geography_map_key=None
link.input_type = 'total'
link.geography = cfg.primary_geography
df = link.geo_map(attr='values',current_geography=cfg.primary_geography,converted_geography=cfg.disagg_geography, current_data_type='intensity', inplace=False)
df = pd.DataFrame(df.stack(), columns=['value'])
util.replace_index_name(df, 'year')
self.output_service_drivers[link.linked_subsector_id] = df
# self.reformat_service_demand()
def calculate_output_demand_technology_stocks(self):
""" calculates demand_technology stocks for use in subsectors with linked technologies
"""
self.output_demand_technology_stocks = {}
self.stock.output_stock = self.stock.values.groupby(level=util.ix_excl(self.stock.values, 'vintage')).sum()
# self.stock.geo_map(attr='output_stock', current_geography=cfg.primary_geography,converted_geography=cfg.disagg_geography)
stock = self.stock.output_stock.stack()
util.replace_index_name(stock, 'year')
stock = pd.DataFrame(stock, columns=['value'])
for demand_technology in self.technologies.values():
if demand_technology.linked_id is not None:
indexer = util.level_specific_indexer(stock, 'demand_technology', demand_technology.id)
tech_values = pd.DataFrame(stock.loc[indexer, :], columns=['value'])
util.replace_index_label(tech_values, {demand_technology.id: demand_technology.linked_id}, 'demand_technology')
self.output_demand_technology_stocks[demand_technology.linked_id] = tech_values * demand_technology.stock_link_ratio
def calculate_service_impact(self):
"""
calculates a normalized service impact for linked demand_technology stocks as a function
of demand tech service link data and subsector service link impact specifications.
ex.
service efficiency of clothes washing stock for the water heating subsector =
(1- clothes washing service_demand_share of water heating) +
(normalized stock service efficiency of clothes washers (i.e. efficiency of hot water usage) * service_demand_share)
This vector can then be multiplied by clothes washing service demand to create an additional driver for water heating
"""
for id in self.service_links:
link = self.service_links[id]
#pdb.set_trace()
link.values = self.rollover_output_dict(tech_dict='service_links',tech_dict_key=id, tech_att='values', stock_att='values_normal')
# # sum over demand_technology and vintage to get a total stock service efficiency
link.values = util.remove_df_levels(link.values,['demand_technology', 'vintage'])
# normalize stock service efficiency to calibration year
values = link.values.as_matrix()
calibration_values = link.values[link.year].as_matrix()
calibration_values = np.column_stack(calibration_values).T
new_values = 1.0 - (values / np.array(calibration_values,float))
# calculate weighted after service efficiency as a function of service demand share
new_values = (link.service_demand_share * new_values)
link.values = pd.DataFrame(new_values, link.values.index, link.values.columns.values)
link.values.replace([np.inf,-np.inf,np.nan],[0,0,0],inplace=True)
def reformat_service_demand(self):
"""
format service demand with year index once calculations are complete
"""
if hasattr(self,'service_demand') and 'year' not in self.service_demand.values.index.names:
self.service_demand.values = pd.DataFrame(self.service_demand.values.stack(), columns=['value'])
util.replace_index_name(self.service_demand.values, 'year')
def rollover_output(self, tech_class=None, tech_att='values', stock_att=None,
stack_label=None, other_aggregate_levels=None, efficiency=False, stock_expandable=False):
""" Produces rollover outputs for a subsector stock based on the tech_att class, att of the class, and the attribute of the stock
ex. to produce levelized costs for all new vehicles, it takes the capital_costs_new class, the 'values_level' attribute, and the 'values'
attribute of the stock
"""
stock_df = getattr(self.stock, stock_att)
groupby_level = util.ix_excl(stock_df, ['vintage'])
c = util.empty_df(stock_df.index, stock_df.columns.values, fill_value=0.)
tech_class = util.put_in_list(tech_class)
tech_dfs = []
for tech_class in tech_class:
tech_dfs += ([self.reformat_tech_df(stock_df, tech, tech_class, tech_att, tech.id, efficiency) for tech in
self.technologies.values() if
hasattr(getattr(tech, tech_class), tech_att) and getattr(tech, tech_class).raw_values is not None])
if len(tech_dfs):
#TODO we are doing an add here when an append might work and will be faster
tech_df = util.DfOper.add(tech_dfs)
tech_df = tech_df.reorder_levels([x for x in stock_df.index.names if x in tech_df.index.names]+[x for x in tech_df.index.names if x not in stock_df.index.names])
tech_df = tech_df.sort_index()
c = util.DfOper.mult((tech_df, stock_df), expandable=(True, stock_expandable), collapsible=(False, True))
else:
util.empty_df(stock_df.index, stock_df.columns.values, 0.)
if stack_label is not None:
c = c.stack()
util.replace_index_name(c, stack_label)
util.replace_column(c, 'value')
if other_aggregate_levels is not None:
groupby_level = util.ix_excl(c, other_aggregate_levels)
c = c.groupby(level=groupby_level).sum()
if 'final_energy' in c.index.names:
c = c[c.index.get_level_values('final_energy')!=9999]
return c
def rollover_output_dict(self, tech_dict=None, tech_dict_key=None, tech_att='values', stock_att=None,
stack_label=None, other_aggregate_levels=None, efficiency=False,fill_value=0.0):
""" Produces rollover outputs for a subsector stock based on the tech_att class, att of the class, and the attribute of the stock
ex. to produce levelized costs for all new vehicles, it takes the capital_costs_new class, the 'values_level' attribute, and the 'values'
attribute of the stock
"""
stock_df = getattr(self.stock, stock_att)
groupby_level = util.ix_excl(stock_df, ['vintage'])
# determines index position for demand_technology and final energy element
c = util.empty_df(stock_df.index, stock_df.columns.values, fill_value=fill_value)
# puts technologies on the appropriate basis
tech_dfs = []
tech_dfs += ([self.reformat_tech_df_dict(stock_df = stock_df, tech=tech, tech_dict=tech_dict,
tech_dict_key=tech_dict_key, tech_att=tech_att, id=tech.id, efficiency=efficiency) for tech in self.technologies.values()
if getattr(tech,tech_dict).has_key(tech_dict_key)])
if len(tech_dfs):
tech_df = util.DfOper.add(tech_dfs)
tech_df = tech_df.reorder_levels([x for x in stock_df.index.names if x in tech_df.index.names]+[x for x in tech_df.index.names if x not in stock_df.index.names])
tech_df = tech_df.sort_index()
# TODO figure a better way
c = DfOper.mult((tech_df, stock_df), expandable=(True, False), collapsible=(False, True))
else:
util.empty_df(stock_df.index, stock_df.columns.values, 0.)
if stack_label is not None:
c = c.stack()
util.replace_index_name(c, stack_label)
util.replace_column(c, 'value')
if other_aggregate_levels is not None:
groupby_level = util.ix_excl(c, other_aggregate_levels)
c = c.groupby(level=groupby_level).sum()
# TODO fix
if 'final_energy' in c.index.names:
c = util.remove_df_elements(c, 9999, 'final_energy')
return c
def reformat_tech_df(self, stock_df, tech, tech_class, tech_att, id, efficiency=False):
"""
reformat technoology dataframes for use in stock-level dataframe operations
"""
if tech_class is None:
tech_df = getattr(tech, tech_att)
else:
tech_df = getattr(getattr(tech, tech_class), tech_att)
if 'demand_technology' not in tech_df.index.names:
tech_df['demand_technology'] = id
tech_df.set_index('demand_technology', append=True, inplace=True)
if efficiency is True and 'final_energy' not in tech_df.index.names:
final_energy_id = getattr(getattr(tech, tech_class), 'final_energy_id')
tech_df['final_energy'] = final_energy_id
tech_df.set_index('final_energy', append=True, inplace=True)
return tech_df
def reformat_tech_df_dict(self, stock_df, tech, tech_dict, tech_dict_key, tech_att, id, efficiency=False):
"""
reformat technoology dataframes for use in stock-level dataframe operations
"""
if getattr(tech,tech_dict).has_key(tech_dict_key):
tech_df = getattr(getattr(tech,tech_dict)[tech_dict_key],tech_att)
if 'demand_technology' not in tech_df.index.names:
tech_df['demand_technology'] = id
tech_df.set_index('demand_technology', append=True, inplace=True)
if efficiency is True and 'final_energy' not in tech_df.index.names:
final_energy_id = getattr(getattr(tech,tech_dict)[tech_dict_key], 'final_energy_id')
tech_df['final_energy'] = final_energy_id
tech_df.set_index('final_energy', append=True, inplace=True)
return tech_df
def calculate_energy_stock(self):
"""
calculate subsector energy forecasts based on main and auxiliary efficiency and service demand as well
as parasitic energy
"""
self.rollover_efficiency_outputs()
self.calculate_parasitic()
self.service_demand.values = self.service_demand.values.unstack('year')
self.service_demand.values.columns = self.service_demand.values.columns.droplevel()
if len([x for x in self.stock.rollover_group_names if x not in self.service_demand.values.index.names]):
multiplier = self.stock.values.groupby(level=[x for x in self.stock.rollover_group_names if x not in self.service_demand.values.index.names]).sum()/self.stock.values.sum()
all_energy = util.DfOper.mult([self.stock.efficiency['all']['all'],self.service_demand.modifier, self.service_demand.values,multiplier])
else:
all_energy = util.DfOper.mult([self.stock.efficiency['all']['all'],self.service_demand.modifier, self.service_demand.values])
self.energy_forecast = util.DfOper.add([all_energy, self.parasitic_energy])
self.energy_forecast = pd.DataFrame(self.energy_forecast.stack())
if len([x for x in self.stock.rollover_group_names if x not in self.service_demand.values.index.names]):
multiplier = self.stock.values.groupby(level=[x for x in self.stock.rollover_group_names if x not in self.service_demand.values.index.names]).sum()/self.stock.values.sum()
all_energy = util.DfOper.mult([self.stock.efficiency['all']['all'], self.service_demand.values,multiplier])
else:
all_energy = util.DfOper.mult([self.stock.efficiency['all']['all'], self.service_demand.values])
self.energy_forecast_no_modifier = util.DfOper.add([all_energy, self.parasitic_energy])
self.energy_forecast_no_modifier = pd.DataFrame(self.energy_forecast_no_modifier.stack())
util.replace_index_name(self.energy_forecast, 'year')
util.replace_column(self.energy_forecast, 'value')
util.replace_index_name(self.energy_forecast_no_modifier, 'year')
util.replace_column(self.energy_forecast_no_modifier, 'value')
self.energy_forecast = util.remove_df_elements(self.energy_forecast, 9999, 'final_energy')
self.energy_forecast_no_modifier = util.remove_df_elements(self.energy_forecast_no_modifier, 9999, 'final_energy')
if cfg.cfgfile.get('case', 'use_service_demand_modifiers').lower()=='false':
self.energy_forecast = self.energy_forecast_no_modifier
if hasattr(self,'service_demand') and hasattr(self.service_demand,'other_index_1') :
util.replace_index_name(self.energy_forecast,"other_index_1",self.service_demand.other_index_1)
if hasattr(self,'service_demand') and hasattr(self.service_demand,'other_index_2') :
util.replace_index_name(self.energy_forecast,"other_index_2",self.service_demand.other_index_2)
if hasattr(self,'energy_demand') and hasattr(self.energy_demand,'other_index_1'):
util.replace_index_name(self.energy_forecast,"other_index_1",self.service_demand.other_index_1)
if hasattr(self,'energy_demand') and hasattr(self.energy_demand,'other_index_2'):
util.replace_index_name(self.energy_forecast,"other_index_2",self.service_demand.other_index_2)
| {
"repo_name": "energyPATHWAYS/energyPATHWAYS",
"path": "energyPATHWAYS/demand.py",
"copies": "1",
"size": "217531",
"license": "mit",
"hash": 4647208757354094000,
"line_mean": 63.3012119421,
"line_max": 289,
"alpha_frac": 0.6354956305,
"autogenerated": false,
"ratio": 3.806583137927414,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49420787684274137,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Ben Haley & Ryan Jones'
import config as cfg
import util
import pandas as pd
import numpy as np
from datamapfunctions import DataMapFunctions, Abstract
import copy
import logging
import time
from util import DfOper
from collections import defaultdict
from supply_measures import BlendMeasure, ExportMeasure, StockMeasure, StockSalesMeasure, CO2PriceMeasure
from supply_technologies import SupplyTechnology, StorageTechnology
from supply_classes import SupplySpecifiedStock
from shared_classes import SalesShare, SpecifiedStock, Stock, StockItem
from rollover import Rollover
from solve_io import solve_IO
from dispatch_classes import Dispatch, DispatchFeederAllocation
import dispatch_classes
import inspect
import operator
import shape
from outputs import Output
from multiprocessing import Pool, cpu_count
import energyPATHWAYS.helper_multiprocess as helper_multiprocess
import pdb
import os
from datetime import datetime
import random
import dispatch_budget
import dispatch_generators
#def node_update_stock(node):
# if hasattr(node, 'stock'):
# node.update_stock(node.year,node.loop)
# return node
class Supply(object):
"""This module calculates all supply nodes in an IO loop to calculate energy,
emissions, and cost flows through the energy economy
"""
def __init__(self, scenario, demand_object=None, api_run=False):
"""Initializes supply instance"""
self.all_nodes, self.blend_nodes, self.non_storage_nodes, self.storage_nodes = [], [], [], []
self.nodes = {}
self.demand_object = demand_object # used to retrieve results from demand-side
self.scenario = scenario # used in writing dispatch outputs
self.demand_sectors = util.sql_read_table('DemandSectors','id',return_iterable=True)
self.demand_sectors.sort()
self.thermal_dispatch_node_id = util.sql_read_table('DispatchConfig', 'thermal_dispatch_node_id')
self.distribution_node_id = util.sql_read_table('DispatchConfig', 'distribution_node_id')
self.distribution_grid_node_id = util.sql_read_table('DispatchConfig', 'distribution_grid_node_id')
self.transmission_node_id = util.sql_read_table('DispatchConfig', 'transmission_node_id')
self.dispatch_zones = [self.distribution_node_id, self.transmission_node_id]
self.electricity_nodes = defaultdict(list)
self.injection_nodes = defaultdict(list)
self.ghgs = util.sql_read_table('GreenhouseGases','id', return_iterable=True)
self.dispatch_feeder_allocation = DispatchFeederAllocation(id=1)
self.dispatch_feeders = list(set(self.dispatch_feeder_allocation.values.index.get_level_values('dispatch_feeder')))
self.dispatch = Dispatch(self.dispatch_feeders, cfg.dispatch_geography, cfg.dispatch_geographies, self.scenario)
self.outputs = Output()
self.outputs.hourly_dispatch_results = None
self.outputs.hourly_marginal_cost = None
self.outputs.hourly_production_cost = None
self.active_thermal_dispatch_df_list = []
self.map_dict = dict(util.sql_read_table('SupplyNodes', ['final_energy_link', 'id']))
self.api_run = api_run
if self.map_dict.has_key(None):
del self.map_dict[None]
self.CO2PriceMeasures = scenario.get_measures('CO2PriceMeasures', self.thermal_dispatch_node_id)
self.add_co2_price_to_dispatch(self.CO2PriceMeasures)
def add_co2_price_to_dispatch(self,CO2PriceMeasures):
if len(self.CO2PriceMeasures)>1:
raise ValueError('multiple CO2 price measures are active')
elif len(self.CO2PriceMeasures) ==0:
self.CO2PriceMeasure=None
else:
self.CO2PriceMeasure = CO2PriceMeasure(self.CO2PriceMeasures[0])
self.CO2PriceMeasure.calculate()
def calculate_technologies(self):
""" initiates calculation of all technology attributes - costs, efficiency, etc.
"""
for node in self.nodes.values():
if not hasattr(node, 'technologies'):
continue
for technology in node.technologies.values():
technology.calculate([node.vintages[0] - 1] + node.vintages, node.years)
# indentation is correct
if isinstance(technology, StorageTechnology):
node.remap_tech_attrs(cfg.storage_tech_classes)
else:
node.remap_tech_attrs(cfg.tech_classes)
def aggregate_results(self):
def remove_na_levels(df):
if df is None:
return None
levels_with_na_only = [name for level, name in zip(df.index.levels, df.index.names) if list(level)==[u'N/A']]
return util.remove_df_levels(df, levels_with_na_only).sort_index()
output_list = ['stock', 'annual_costs', 'levelized_costs', 'capacity_utilization']
for output_name in output_list:
df = self.group_output(output_name)
df = remove_na_levels(df) # if a level only as N/A values, we should remove it from the final outputs
setattr(self.outputs, "s_"+output_name, df)
setattr(self.outputs,'s_energy',self.format_output_io_supply())
def format_output_io_supply(self):
energy = self.io_supply_df.stack().to_frame()
util.replace_index_name(energy,'year')
energy_unit = cfg.calculation_energy_unit
energy.columns = [energy_unit.upper()]
return energy
def group_output(self, output_type, levels_to_keep=None):
levels_to_keep = cfg.output_supply_levels if levels_to_keep is None else levels_to_keep
dfs = [node.group_output(output_type, levels_to_keep) for node in self.nodes.values()]
if all([df is None for df in dfs]) or not len(dfs):
return None
keys = [node.id for node in self.nodes.values()]
dfs, keys = zip(*[(df, key) for df, key in zip(dfs, keys) if df is not None])
new_names = 'supply_node'
return util.df_list_concatenate(dfs, keys, new_names, levels_to_keep)
def calculate_years(self):
"""
Determines the period of stock rollover within a node based on the minimum year
of specified sales or stock.
"""
for node in self.nodes.values():
node.min_year = int(cfg.cfgfile.get('case', 'current_year'))
if hasattr(node,'technologies'):
for technology in node.technologies.values():
for reference_sales in technology.reference_sales.values():
min_year = min(reference_sales.raw_values.index.levels[util.position_in_index(reference_sales.raw_values, 'vintage')])
if min_year < node.min_year:
node.min_year = min_year
for sales in technology.sales.values():
min_year = min(sales.raw_values.index.get_level_values('vintage'))
if min_year < node.min_year:
node.min_year = min_year
if hasattr(node,'stock') and node.stock.raw_values is not None:
min_year = min(node.stock.raw_values.index.levels[util.position_in_index(node.stock.raw_values, 'year')])
if min_year < node.min_year:
node.min_year = min_year
node.min_year = max(node.min_year, int(cfg.cfgfile.get('case','supply_start_year')))
node.years = range(node.min_year, int(cfg.cfgfile.get('case', 'end_year')) + int(cfg.cfgfile.get('case', 'year_step')), int(cfg.cfgfile.get('case', 'year_step')))
node.vintages = copy.deepcopy(node.years)
self.years = cfg.cfgfile.get('case','supply_years')
def initial_calculate(self):
"""Calculates all nodes in years before IO loop"""
logging.info("Calculating supply-side prior to current year")
self.calculate_years()
self.add_empty_output_df()
logging.info("Creating input-output table")
self.create_IO()
self.create_inverse_dict()
self.cost_dict = util.recursivedict()
self.emissions_dict = util.recursivedict()
self.create_embodied_cost_and_energy_demand_link()
self.create_embodied_emissions_demand_link()
logging.info("Initiating calculation of technology attributes")
self.calculate_technologies()
logging.info("Running stock rollover prior to current year")
self.calculate_nodes()
self.calculate_initial_demand()
def final_calculate(self):
self.concatenate_annual_costs()
self.concatenate_levelized_costs()
self.calculate_capacity_utilization()
def calculate_nodes(self):
"""Performs an initial calculation for all import, conversion, delivery, and storage nodes"""
if cfg.cfgfile.get('case','parallel_process').lower() == 'true':
nodes = helper_multiprocess.safe_pool(helper_multiprocess.node_calculate, self.nodes.values())
self.nodes = dict(zip(self.nodes.keys(), nodes))
else:
for node in self.nodes.values():
node.calculate()
for node in self.nodes.values():
if node.id in self.blend_nodes and node.id in cfg.evolved_blend_nodes and cfg.evolved_run=='true':
node.values = node.values.groupby(level=[x for x in node.values.index.names if x !='supply_node']).transform(lambda x: 1/float(x.count()))
for x in node.nodes:
if x in self.storage_nodes:
indexer = util.level_specific_indexer(node.values,'supply_node',x)
node.values.loc[indexer,:] = 1e-7 * 4
node.values = node.values.groupby(level=[x for x in node.values.index.names if x !='supply_node']).transform(lambda x: x/x.sum())/4.0
def create_IO(self):
"""Creates a dictionary with year and demand sector keys to store IO table structure"""
self.io_dict = util.recursivedict()
index = pd.MultiIndex.from_product([cfg.geographies, self.all_nodes], names=[cfg.primary_geography,
'supply_node'])
for year in self.years:
for sector in util.ensure_iterable_and_not_string(self.demand_sectors):
self.io_dict[year][sector] = util.empty_df(index = index, columns = index, fill_value=0.0)
self.io_dict = util.freeze_recursivedict(self.io_dict)
def add_nodes(self):
"""Adds node instances for all active supply nodes"""
logging.info('Adding supply nodes')
supply_type_dict = dict(util.sql_read_table('SupplyTypes', column_names=['id', 'name']))
supply_nodes = util.sql_read_table('SupplyNodes', column_names=['id', 'name', 'supply_type_id', 'is_active'], return_iterable=True)
supply_nodes.sort()
for node_id, name, supply_type_id, is_active in supply_nodes:
if is_active:
self.all_nodes.append(node_id)
logging.info(' {} node {}'.format(supply_type_dict[supply_type_id], name))
self.add_node(node_id, supply_type_dict[supply_type_id], self.scenario)
# this ideally should be moved to the init statements for each of the nodes
for node in self.nodes.values():
node.demand_sectors = self.demand_sectors
node.ghgs = self.ghgs
node.distribution_grid_node_id = self.distribution_grid_node_id
# for some reason, when this next part gets moved to the init for node, the DR node ends up having a tradeable geography of none
if node.tradable_geography is None:
node.enforce_tradable_geography = False
node.tradable_geography = cfg.primary_geography
else:
node.enforce_tradable_geography = True
def add_node(self, id, supply_type, scenario):
"""Add node to Supply instance
Args:
id (int): supply node id
supply_type (str): supply type i.e. 'blend'
"""
if supply_type == "Blend":
self.nodes[id] = BlendNode(id, supply_type, scenario)
self.blend_nodes.append(id)
elif supply_type == "Storage":
if len(util.sql_read_table('SupplyTechs', 'supply_node_id', supply_node_id=id, return_iterable=True)):
self.nodes[id] = StorageNode(id, supply_type, scenario)
else:
logging.debug(ValueError('insufficient data in storage node %s' %id))
elif supply_type == "Import":
self.nodes[id] = ImportNode(id, supply_type, scenario)
elif supply_type == "Primary":
self.nodes[id] = PrimaryNode(id, supply_type, scenario)
else:
if len(util.sql_read_table('SupplyEfficiency', 'id', id=id, return_iterable=True)):
self.nodes[id] = SupplyNode(id, supply_type, scenario)
elif len(util.sql_read_table('SupplyTechs', 'supply_node_id', supply_node_id=id, return_iterable=True)):
self.nodes[id] = SupplyStockNode(id, supply_type, scenario)
elif len(util.sql_read_table('SupplyStock', 'supply_node_id', supply_node_id=id, return_iterable=True)):
self.nodes[id] = SupplyNode(id, supply_type, scenario)
else:
logging.debug(ValueError('insufficient data in supply node %s' %id))
if supply_type != "Storage":
self.non_storage_nodes.append(id)
else:
self.storage_nodes.append(id)
def add_measures(self):
""" Adds measures to supply nodes based on scenario inputs"""
logging.info('Adding supply measures')
scenario = self.scenario
for node in self.nodes.values():
#all nodes have export measures
node.add_export_measures(scenario)
#once measures are loaded, export classes can be initiated
node.add_exports()
if node.supply_type == 'Blend':
node.add_blend_measures(scenario)
elif isinstance(node, SupplyStockNode) or isinstance(node, StorageNode):
node.add_total_stock_measures(scenario)
for technology in node.technologies.values():
technology.add_sales_measures(scenario)
technology.add_sales_share_measures(scenario)
technology.add_specified_stock_measures(scenario)
elif isinstance(node, SupplyNode):
node.add_total_stock_measures(scenario)
def _calculate_initial_loop(self):
"""
in the first year of the io loop, we have an initial loop step called 'initial'.
this loop is necessary in order to calculate initial active coefficients. Because we haven't calculated
throughput for all nodes, these coefficients are just proxies in this initial loop
"""
loop, year = 'initial', min(self.years)
self.calculate_demand(year, loop)
self.pass_initial_demand_to_nodes(year)
self.discover_thermal_nodes()
self.calculate_stocks(year, loop)
self.calculate_coefficients(year, loop)
self.bulk_node_id = self.discover_bulk_id()
self.discover_thermal_nodes()
self.update_io_df(year, loop)
self.calculate_io(year, loop)
def _recalculate_stocks_and_io(self, year, loop):
""" Basic calculation control for the IO
"""
self.calculate_coefficients(year, loop)
# we have just solved the dispatch, so coefficients need to be updated before updating the io
if loop == 3 and year in self.dispatch_years:
self.update_coefficients_from_dispatch(year)
self.copy_io(year,loop)
self.update_io_df(year,loop)
self.calculate_io(year, loop)
self.calculate_stocks(year, loop)
def _recalculate_after_reconciliation(self, year, loop, update_demand=False):
""" if reconciliation has occured, we have to recalculate coefficients and resolve the io
"""
if self.reconciled is True:
if update_demand:
self.update_demand(year,loop)
self._recalculate_stocks_and_io(year, loop)
self.reconciled = False
def copy_io(self,year,loop):
if year != min(self.years) and loop ==1:
for sector in self.demand_sectors:
self.io_dict[year][sector] = copy.deepcopy(self.io_dict[year-1][sector])
def set_dispatch_years(self):
dispatch_year_step = int(cfg.cfgfile.get('case','dispatch_step'))
dispatch_write_step = int(cfg.cfgfile.get('output_detail','dispatch_write_step'))
logging.info("Dispatch year step = {}".format(dispatch_year_step))
self.dispatch_years = sorted([min(self.years)] + range(max(self.years), min(self.years), -dispatch_year_step))
if dispatch_year_step ==0:
self.dispatch_write_years = []
else:
self.dispatch_write_years = sorted([min(self.years)] + range(max(self.years), min(self.years), -dispatch_write_step))
def restart_loop(self):
self.calculate_loop(self.years,self.calculated_years)
def calculate_loop(self, years, calculated_years):
"""Performs all IO loop calculations"""
self.set_dispatch_years()
first_year = min(self.years)
self._calculate_initial_loop()
self.calculated_years = calculated_years
for year in [x for x in years if x not in self.calculated_years]:
logging.info("Starting supply side calculations for {}".format(year))
for loop in [1, 2, 3]:
# starting loop
if loop == 1:
logging.info(" loop {}: input-output calculation".format(loop))
if year is not first_year:
# initialize year is not necessary in the first year
self.initialize_year(year, loop)
self._recalculate_stocks_and_io(year, loop)
self.calculate_coefficients(year, loop)
# reconciliation loop
elif loop == 2:
logging.info(" loop {}: supply reconciliation".format(loop))
# sets a flag for whether any reconciliation occurs in the loop determined in the reconcile function
self.reconciled = False
# each time, if reconciliation has occured, we have to recalculate coefficients and resolve the io
self.reconcile_trades(year, loop)
self._recalculate_after_reconciliation(year, loop, update_demand=True)
for i in range(2):
self.reconcile_oversupply(year, loop)
self._recalculate_after_reconciliation(year, loop, update_demand=True)
self.reconcile_constraints(year,loop)
self._recalculate_after_reconciliation(year, loop, update_demand=True)
self.reconcile_oversupply(year, loop)
self._recalculate_after_reconciliation(year, loop, update_demand=True)
# dispatch loop
elif loop == 3 and year in self.dispatch_years:
logging.info(" loop {}: electricity dispatch".format(loop))
# loop - 1 is necessary so that it uses last year's throughput
self.calculate_embodied_costs(year, loop-1) # necessary here because of the dispatch
#necessary to calculate emissions to apply CO2 price in year 1 if applicable
# if year == min(self.years):
self.calculate_embodied_emissions(year)
self.prepare_dispatch_inputs(year, loop)
self.solve_electricity_dispatch(year)
self._recalculate_stocks_and_io(year, loop)
self.calculate_embodied_costs(year, loop=3)
self.calculate_embodied_emissions(year)
self.calculate_annual_costs(year)
self.calculated_years.append(year)
def discover_bulk_id(self):
for node in self.nodes.values():
if hasattr(node, 'active_coefficients_total') and getattr(node, 'active_coefficients_total') is not None:
if self.thermal_dispatch_node_id in node.active_coefficients_total.index.get_level_values('supply_node'):
self.bulk_id = node.id
def discover_thermal_nodes(self):
self.thermal_nodes = []
for node in self.nodes.values():
if node.id in self.nodes[self.thermal_dispatch_node_id].values.index.get_level_values('supply_node'):
self.thermal_nodes.append(node.id)
node.thermal_dispatch_node = True
else:
node.thermal_dispatch_node = False
def calculate_supply_outputs(self):
self.map_dict = dict(util.sql_read_table('SupplyNodes', ['final_energy_link', 'id']))
if self.map_dict.has_key(None):
del self.map_dict[None]
logging.info("calculating supply-side outputs")
self.aggregate_results()
logging.info("calculating supply cost link")
self.cost_demand_link = self.map_embodied_to_demand(self.cost_dict, self.embodied_cost_link_dict)
logging.info("calculating supply emissions link")
self.emissions_demand_link = self.map_embodied_to_demand(self.emissions_dict, self.embodied_emissions_link_dict)
logging.info("calculating supply energy link")
self.energy_demand_link = self.map_embodied_to_demand(self.inverse_dict['energy'], self.embodied_energy_link_dict)
# self.remove_blend_and_import()
logging.info("calculate exported costs")
self.calculate_export_result('export_costs', self.cost_dict)
logging.info("calculate exported emissions")
self.calculate_export_result('export_emissions', self.emissions_dict)
logging.info("calculate exported energy")
self.calculate_export_result('export_energy', self.inverse_dict['energy'])
logging.info("calculate emissions rates for demand side")
self.calculate_demand_emissions_rates()
def calculate_embodied_supply_outputs(self):
supply_embodied_cost = self.convert_io_matrix_dict_to_df(self.cost_dict)
supply_embodied_cost.columns = [cfg.cfgfile.get('case','currency_year_id') + " " + cfg.cfgfile.get('case','currency_name')]
self.outputs.supply_embodied_cost = supply_embodied_cost
supply_embodied_emissions = self.convert_io_matrix_dict_to_df(self.emissions_dict)
supply_embodied_emissions.columns = [cfg.cfgfile.get('case', 'mass_unit')]
self.outputs.supply_embodied_emissions = supply_embodied_emissions
def calculate_annual_costs(self, year):
for node in self.nodes.values():
if hasattr(node,'calculate_annual_costs'):
node.calculate_annual_costs(year)
def concatenate_annual_costs(self):
for node in self.nodes.values():
if hasattr(node,'concatenate_annual_costs'):
node.concatenate_annual_costs()
def concatenate_levelized_costs(self):
for node in self.nodes.values():
if hasattr(node,'concatenate_levelized_costs'):
node.concatenate_annual_costs()
def calculate_capacity_utilization(self):
for node in self.nodes.values():
if hasattr(node,'calculate_capacity_utilization'):
df = util.df_slice(self.io_supply_df,node.id,'supply_node')
node.calculate_capacity_utilization(df,self.years)
def remove_blend_and_import(self):
keep_list = [node.id for node in self.nodes.values() if node.supply_type != 'Blend' and node.supply_type != 'Import']
indexer = util.level_specific_indexer(self.energy_demand_link,'supply_node',[keep_list])
self.energy_demand_link = self.energy_demand_link.loc[indexer,:]
def calculate_demand_emissions_rates(self):
map_dict = self.map_dict
index = pd.MultiIndex.from_product([cfg.geographies, self.demand_sectors, map_dict.keys(),self.years, self.ghgs], names=[cfg.primary_geography, 'sector', 'final_energy','year','ghg'])
self.demand_emissions_rates = util.empty_df(index, ['value'])
for final_energy, node_id in map_dict.iteritems():
node = self.nodes[node_id]
for year in self.years:
df = node.pass_through_df_dict[year].groupby(level='ghg').sum()
df = df.stack(level=[cfg.primary_geography,'demand_sector']).to_frame()
df = df.reorder_levels([cfg.primary_geography,'demand_sector','ghg'])
df.sort(inplace=True)
emissions_rate_indexer = util.level_specific_indexer(self.demand_emissions_rates, ['final_energy', 'year'], [final_energy, year])
self.demand_emissions_rates.loc[emissions_rate_indexer,:] = df.values
for final_energy, node_id in map_dict.iteritems():
node = self.nodes[node_id]
if hasattr(node,'emissions') and hasattr(node.emissions, 'values_physical'):
if 'demand_sector' not in node.emissions.values_physical.index.names:
keys = self.demand_sectors
name = ['demand_sector']
df = pd.concat([node.emissions.values_physical]*len(keys), keys=keys, names=name)
df = df.stack('year').to_frame()
df = df.groupby(level=[cfg.primary_geography, 'demand_sector', 'year', 'ghg']).sum()
df = df.reorder_levels([cfg.primary_geography, 'demand_sector', 'year', 'ghg'])
idx = pd.IndexSlice
df = df.loc[idx[:, :, self.years,:], :]
emissions_rate_indexer = util.level_specific_indexer(self.demand_emissions_rates, ['final_energy'], [final_energy])
self.demand_emissions_rates.loc[emissions_rate_indexer,:] += df.values
def set_dispatchability(self):
"""Determines the dispatchability of electricity generation nodes and nodes that demand electricity
Sets:
electricity_gen_nodes = list of all supply nodes that inject electricity onto the grid
electricity_load_nodes = dictionary of all supply nodes that demand electricity with keys of
"""
self.electricity_gen_nodes = util.recursivedict()
self.electricity_load_nodes = util.recursivedict()
for zone in self.dispatch_zones:
self.electricity_gen_nodes[zone]['flexible'] = list(set([x for x in self.injection_nodes[zone] if self.nodes[x].is_flexible == 1]))
self.electricity_gen_nodes[zone]['non_flexible'] = list(set([x for x in self.injection_nodes[zone] if self.nodes[x].is_flexible != 1]))
for zone in self.dispatch_zones:
self.electricity_load_nodes[zone]['flexible'] = list(set([x for x in self.all_electricity_load_nodes[zone] if self.nodes[x].is_flexible == 1]))
self.electricity_load_nodes[zone]['non_flexible'] = list(set([x for x in self.all_electricity_load_nodes[zone] if self.nodes[x].is_flexible != 1]))
self.electricity_gen_nodes = util.freeze_recursivedict(self.electricity_gen_nodes)
self.electricity_load_nodes = util.freeze_recursivedict(self.electricity_load_nodes)
def set_flow_nodes(self,zone):
flow_nodes = list(set([x for x in self.nodes[zone].active_coefficients_untraded.index.get_level_values('supply_node') if self.nodes[x].supply_type in ['Delivery','Blend'] and x not in self.dispatch_zones]))
self.solve_flow_nodes(flow_nodes,zone)
def solve_flow_nodes(self,flow_nodes, zone):
for flow_node in flow_nodes:
self.electricity_nodes[zone].append(flow_node)
flow_nodes = list(set([x for x in util.df_slice(self.nodes[flow_node].active_coefficients_untraded,2,'efficiency_type').index.get_level_values('supply_node') if self.nodes[x].supply_type in ['Delivery','Blend'] and x not in self.dispatch_zones ]))
if len(flow_nodes):
self.solve_flow_nodes(flow_nodes,zone)
def set_electricity_gen_nodes(self, dispatch_zone, node):
"""Determines all nodes that inject electricity onto the grid in a recursive loop
args:
dispatch_zone: key for dictionary to indicate whether the generation is at the transmission or distribution level
node: checks for injection of electricity into this node from conversion nodes
sets:
self.injection_nodes = dictionary with dispatch_zone as key and a list of injection nodes as the value
self.electricity_nodes = dictionary with dispatch_zone as key and a list of all nodes that transfer electricity (i.e. blend nodes, etc.)
"""
for zone in self.dispatch_zones:
self.electricity_nodes[zone].append(zone)
self.set_flow_nodes(zone)
for flow_node in self.electricity_nodes[zone]:
injection_nodes = list(set([x for x in self.nodes[flow_node].active_coefficients_untraded.index.get_level_values('supply_node') if self.nodes[x].supply_type in ['Conversion']]))
for injection_node in injection_nodes:
self.injection_nodes[zone].append(injection_node)
self.injection_nodes[zone] = list(set(self.injection_nodes[zone]))
self.electricity_nodes[zone] = list(set(self.electricity_nodes[zone]))
def set_electricity_load_nodes(self):
"""Determines all nodes that demand electricity from the grid
args:
sets:
all_electricity_load_nodes = dictionary with dispatch_zone as key and a list of all nodes that demand electricity from that zone
"""
self.all_electricity_load_nodes = defaultdict(list)
for zone in self.dispatch_zones:
for node_id in self.electricity_nodes[zone]:
for load_node in self.nodes.values():
if hasattr(load_node,'active_coefficients_untraded') and load_node.active_coefficients_untraded is not None:
if node_id in load_node.active_coefficients_untraded.index.get_level_values('supply_node') and load_node not in self.electricity_nodes[zone] and load_node.supply_type != 'Storage':
if load_node.id not in util.flatten_list(self.electricity_nodes.values()):
self.all_electricity_load_nodes[zone].append(load_node.id)
def append_heuristic_load_and_gen_to_dispatch_outputs(self, df, load_or_gen):
if 0 in util.elements_in_index_level(df,'dispatch_feeder'):
bulk_df = util.df_slice(df,0,'dispatch_feeder')
if load_or_gen=='load':
bulk_df = DfOper.mult([bulk_df, self.transmission_losses])
bulk_df = self.outputs.clean_df(bulk_df)
bulk_df.columns = [cfg.calculation_energy_unit.upper()]
util.replace_index_name(bulk_df, 'DISPATCH_OUTPUT', 'SUPPLY_NODE')
self.bulk_dispatch = pd.concat([self.bulk_dispatch, bulk_df.reorder_levels(self.bulk_dispatch.index.names)])
if len([x for x in util.elements_in_index_level(df,'dispatch_feeder') if x in self.dispatch_feeders]):
distribution_df = util.df_slice(df, [x for x in util.elements_in_index_level(df,'dispatch_feeder') if x in self.dispatch_feeders], self.dispatch_feeders)
distribution_df = self.outputs.clean_df(distribution_df)
distribution_df.columns = [cfg.calculation_energy_unit.upper()]
distribution_df = DfOper.mult([distribution_df, self.distribution_losses, self.transmission_losses])
util.replace_index_name(distribution_df, 'DISPATCH_OUTPUT', 'SUPPLY_NODE')
distribution_df = util.remove_df_levels(distribution_df, 'DISPATCH_FEEDER')
self.bulk_dispatch = pd.concat([self.bulk_dispatch, distribution_df.reorder_levels(self.bulk_dispatch.index.names)])
def set_long_duration_opt(self, year):
# MOVE
"""sets input parameters for dispatched nodes (ex. conventional hydro)"""
def split_and_apply(array, dispatch_periods, fun):
energy_by_block = np.array_split(array, np.where(np.diff(dispatch_periods)!=0)[0]+1)
return [fun(block) for block in energy_by_block]
self.dispatch.ld_technologies = []
for node_id in [x for x in self.dispatch.long_duration_dispatch_order if x in self.nodes.keys()]:
node = self.nodes[node_id]
full_energy_shape, p_min_shape, p_max_shape = node.aggregate_flexible_electricity_shapes(year, util.remove_df_levels(util.df_slice(self.dispatch_feeder_allocation.values,year,'year'),year))
if node_id in self.flexible_gen.keys():
lookup = self.flexible_gen
load_or_gen = 'gen'
elif node_id in self.flexible_load.keys():
lookup = self.flexible_load
load_or_gen = 'load'
else:
continue
for geography in lookup[node_id].keys():
for zone in lookup[node_id][geography].keys():
for feeder in lookup[node_id][geography][zone].keys():
capacity = util.remove_df_levels(lookup[node_id][geography][zone][feeder]['capacity'], 'resource_bin')
if capacity.sum().sum() == 0:
continue
annual_energy = lookup[node_id][geography][zone][feeder]['energy'].values.sum()
opt_periods = self.dispatch.period_repeated
dispatch_window = self.dispatch.dispatch_window_dict[self.dispatch.node_config_dict[node_id].dispatch_window_id]
dispatch_periods = getattr(shape.shapes.active_dates_index, dispatch_window)
if load_or_gen=='load':
annual_energy = copy.deepcopy(annual_energy) *-1
if p_min_shape is None:
p_min = np.repeat(0.0,len(dispatch_periods))
p_max = np.repeat(capacity.sum().values[0],len(dispatch_periods))
hourly_p_min = np.repeat(0.0,len(self.dispatch.hours))
opt_p_min = np.repeat(0.0,len(opt_periods))
opt_p_max = np.repeat(capacity.sum().values[0],len(opt_periods))
hourly_p_max = np.repeat(capacity.sum().values[0],len(self.dispatch.hours))
else:
hourly_p_min = util.remove_df_levels(util.DfOper.mult([capacity, p_min_shape]), cfg.primary_geography).values
p_min = np.array(split_and_apply(hourly_p_min, dispatch_periods, np.mean))
opt_p_min = np.array(split_and_apply(hourly_p_min, opt_periods, np.mean))
hourly_p_max = util.remove_df_levels(util.DfOper.mult([capacity, p_max_shape]),cfg.primary_geography).values
p_max = np.array(split_and_apply(hourly_p_max, dispatch_periods, np.mean))
opt_p_max = np.array(split_and_apply(hourly_p_max, opt_periods, np.mean))
tech_id = str(tuple([geography,node_id, feeder]))
self.dispatch.ld_technologies.append(tech_id)
#reversed sign for load so that pmin always represents greatest load or smallest generation
if zone == self.transmission_node_id:
if load_or_gen=='load':
p_min *= self.transmission_losses.loc[geography,:].values[0]
p_max *= self.transmission_losses.loc[geography,:].values[0]
opt_p_min *= self.transmission_losses.loc[geography,:].values[0]
opt_p_max *= self.transmission_losses.loc[geography,:].values[0]
hourly_p_min *=self.transmission_losses.loc[geography,:].values[0]
hourly_p_max *= self.transmission_losses.loc[geography,:].values[0]
annual_energy*=self.transmission_losses.loc[geography,:].values[0]
else:
p_min *= self.transmission_losses.loc[geography,:].values[0] * util.df_slice(self.distribution_losses, [geography,feeder],[cfg.dispatch_geography, 'dispatch_feeder']).values[0][0]
p_max *= self.transmission_losses.loc[geography,:].values[0] * util.df_slice(self.distribution_losses, [geography,feeder],[cfg.dispatch_geography, 'dispatch_feeder']).values[0][0]
opt_p_min *= self.transmission_losses.loc[geography,:].values[0] * util.df_slice(self.distribution_losses, [geography,feeder],[cfg.dispatch_geography, 'dispatch_feeder']).values[0][0]
opt_p_max *= self.transmission_losses.loc[geography,:].values[0] * util.df_slice(self.distribution_losses, [geography,feeder],[cfg.dispatch_geography, 'dispatch_feeder']).values[0][0]
hourly_p_min *=self.transmission_losses.loc[geography,:].values[0] * util.df_slice(self.distribution_losses, [geography,feeder],[cfg.dispatch_geography, 'dispatch_feeder']).values[0][0]
hourly_p_max *= self.transmission_losses.loc[geography,:].values[0] * util.df_slice(self.distribution_losses, [geography,feeder],[cfg.dispatch_geography, 'dispatch_feeder']).values[0][0]
annual_energy *= self.transmission_losses.loc[geography,:].values[0] * util.df_slice(self.distribution_losses, [geography,feeder],[cfg.dispatch_geography, 'dispatch_feeder']).values[0][0]
if load_or_gen == 'gen':
max_capacity = opt_p_max
min_capacity = opt_p_min
max_hourly_capacity = hourly_p_max
min_hourly_capacity = hourly_p_min
else:
max_capacity = -opt_p_min
min_capacity = -opt_p_max
max_hourly_capacity = -hourly_p_min
min_hourly_capacity = -hourly_p_max
self.dispatch.annual_ld_energy[tech_id] = annual_energy
self.dispatch.ld_geography[tech_id] = geography
self.dispatch.ld_capacity.update(dict([((tech_id, h), value) for h, value in enumerate(max_hourly_capacity)]))
self.dispatch.ld_min_capacity.update(dict([((tech_id, h), value) for h, value in enumerate(min_hourly_capacity)]))
for period in self.dispatch.periods:
self.dispatch.capacity[period][tech_id] = max_capacity[period]
self.dispatch.min_capacity[period][tech_id] = min_capacity[period]
self.dispatch.geography[period][tech_id] = geography
self.dispatch.feeder[period][tech_id] = feeder
def solve_heuristic_load_and_gen(self, year):
# MOVE
"""solves dispatch shapes for heuristically dispatched nodes (ex. conventional hydro)"""
def split_and_apply(array, dispatch_periods, fun):
energy_by_block = np.array_split(array, np.where(np.diff(dispatch_periods)!=0)[0]+1)
return [fun(block) for block in energy_by_block]
self.dispatched_bulk_load = copy.deepcopy(self.bulk_gen)*0
self.dispatched_bulk_gen = copy.deepcopy(self.bulk_gen)*0
self.dispatched_dist_load = copy.deepcopy(self.bulk_gen)*0
self.dispatched_dist_gen = copy.deepcopy(self.bulk_gen)*0
for node_id in [x for x in self.dispatch.heuristic_dispatch_order if x in self.nodes.keys() ]:
node = self.nodes[node_id]
full_energy_shape, p_min_shape, p_max_shape = node.aggregate_flexible_electricity_shapes(year, util.remove_df_levels(util.df_slice(self.dispatch_feeder_allocation.values,year,'year'),year))
if node_id in self.flexible_gen.keys():
lookup = self.flexible_gen
load_or_gen = 'gen'
elif node_id in self.flexible_load.keys():
lookup = self.flexible_load
load_or_gen = 'load'
else:
continue
logging.info(" solving dispatch for %s" %node.name)
geography_list = []
for geography in lookup[node_id].keys():
for zone in lookup[node_id][geography].keys():
feeder_list = []
for feeder in lookup[node_id][geography][zone].keys():
capacity = lookup[node_id][geography][zone][feeder]['capacity']
energy = lookup[node_id][geography][zone][feeder]['energy']
dispatch_window = self.dispatch.dispatch_window_dict[self.dispatch.node_config_dict[node_id].dispatch_window_id]
dispatch_periods = getattr(shape.shapes.active_dates_index, dispatch_window)
num_years = len(dispatch_periods)/8766.
if load_or_gen=='load':
energy = copy.deepcopy(energy) *-1
if full_energy_shape is not None and 'dispatch_feeder' in full_energy_shape.index.names:
energy_shape = util.df_slice(full_energy_shape, feeder, 'dispatch_feeder')
else:
energy_shape = full_energy_shape
if energy_shape is None:
energy_budgets = util.remove_df_levels(energy,[cfg.primary_geography,'resource_bin']).values * np.diff([0]+list(np.where(np.diff(dispatch_periods)!=0)[0]+1)+[len(dispatch_periods)-1])/8766.*num_years
energy_budgets = energy_budgets[0]
else:
hourly_energy = util.remove_df_levels(util.DfOper.mult([energy,energy_shape]), cfg.primary_geography).values
energy_budgets = split_and_apply(hourly_energy, dispatch_periods, sum)
if p_min_shape is None:
p_min = 0.0
p_max = capacity.sum().values[0]
else:
hourly_p_min = util.remove_df_levels(util.DfOper.mult([capacity,p_min_shape]),cfg.primary_geography).values
p_min = split_and_apply(hourly_p_min, dispatch_periods, np.mean)
hourly_p_max = util.remove_df_levels(util.DfOper.mult([capacity,p_max_shape]),cfg.primary_geography).values
p_max = split_and_apply(hourly_p_max, dispatch_periods, np.mean)
if zone == self.transmission_node_id:
net_indexer = util.level_specific_indexer(self.bulk_net_load,[cfg.dispatch_geography,'timeshift_type'], [geography,2])
if load_or_gen=='load':
self.energy_budgets = energy_budgets
self.p_min = p_min
self.p_max = p_max
dispatch = np.transpose([dispatch_budget.dispatch_to_energy_budget(self.bulk_net_load.loc[net_indexer,:].values.flatten(),energy_budgets, dispatch_periods, p_min, p_max)])
self.dispatch_result = dispatch
indexer = util.level_specific_indexer(self.bulk_load,[cfg.dispatch_geography], [geography])
self.bulk_load.loc[indexer,:] += dispatch
indexer = util.level_specific_indexer(self.bulk_load,cfg.dispatch_geography, geography)
self.dispatched_bulk_load.loc[indexer,:] += dispatch
else:
indexer = util.level_specific_indexer(self.bulk_gen,cfg.dispatch_geography, geography)
dispatch = np.transpose([dispatch_budget.dispatch_to_energy_budget(self.bulk_net_load.loc[net_indexer,:].values.flatten(),np.array(energy_budgets).flatten(), dispatch_periods, p_min, p_max)])
self.bulk_gen.loc[indexer,:] += dispatch
self.dispatched_bulk_gen.loc[indexer,:] += dispatch
else:
if load_or_gen=='load':
indexer = util.level_specific_indexer(self.dist_load,[cfg.dispatch_geography,'dispatch_feeder'], [geography,feeder])
dispatch = np.transpose([dispatch_budget.dispatch_to_energy_budget(self.dist_net_load_no_feeders.loc[net_indexer,:].values.flatten(),energy_budgets, dispatch_periods, p_min, p_max)])
for timeshift_type in list(set(self.distribution_load.index.get_level_values('timeshift_type'))):
indexer = util.level_specific_indexer(self.distribution_load,[cfg.dispatch_geography,'timeshift_type'], [geography,timeshift_type])
self.distribution_load.loc[indexer,:] += dispatch
indexer = util.level_specific_indexer(self.distribution_load,cfg.dispatch_geography, geography)
self.dispatched_dist_load.loc[indexer,:] += dispatch
else:
indexer = util.level_specific_indexer(self.dist_gen,[cfg.dispatch_geography,'dispatch_feeder'], [geography,feeder])
dispatch = np.transpose([dispatch_budget.dispatch_to_energy_budget(self.dist_net_load_no_feeders.loc[net_indexer,:].values.flatten(),energy_budgets, dispatch_periods, p_min, p_max)])
self.distribution_gen.loc[indexer,:] += dispatch
self.dispatched_dist_gen.loc[indexer,:] += dispatch
index = pd.MultiIndex.from_product([shape.shapes.active_dates_index,[feeder]],names=['weather_datetime','dispatch_feeder'])
dispatch=pd.DataFrame(dispatch,index=index,columns=['value'])
if load_or_gen=='gen':
dispatch *=-1
feeder_list.append(dispatch)
geography_list.append(pd.concat(feeder_list))
self.update_net_load_signal()
df = pd.concat(geography_list, keys=lookup[node_id].keys(), names=[cfg.dispatch_geography])
df = pd.concat([df], keys=[node_id], names=['supply_node'])
df = pd.concat([df], keys=[year], names=['year'])
if year in self.dispatch_write_years:
self.append_heuristic_load_and_gen_to_dispatch_outputs(df, load_or_gen)
def prepare_optimization_inputs(self,year):
# MOVE
logging.info(" preparing optimization inputs")
self.dispatch.set_timeperiods()
self.dispatch.set_losses(self.transmission_losses,self.distribution_losses)
self.set_net_load_thresholds(year)
#freeze the bulk net load as opt bulk net load just in case we want to rerun a year. If we don't do this, bulk_net_load would be updated with optimization results
self.dispatch.set_opt_loads(self.distribution_load,self.distribution_gen,self.bulk_load,self.bulk_gen,self.dispatched_bulk_load, self.bulk_net_load, self.active_thermal_dispatch_df)
self.dispatch.set_technologies(self.storage_capacity_dict, self.storage_efficiency_dict, self.active_thermal_dispatch_df)
self.set_long_duration_opt(year)
def set_grid_capacity_factors(self, year):
max_year = max(self.years)
distribution_grid_node = self.nodes[self.distribution_grid_node_id]
dist_cap_factor = util.DfOper.divi([util.df_slice(self.dist_only_net_load,2,'timeshift_type').groupby(level=[cfg.dispatch_geography,'dispatch_feeder']).mean(),util.df_slice(self.dist_only_net_load,2,'timeshift_type').groupby(level=[cfg.dispatch_geography,'dispatch_feeder']).max()])
geography_map_key = distribution_grid_node.geography_map_key if hasattr(distribution_grid_node, 'geography_map_key') and distribution_grid_node.geography_map_key is not None else cfg.cfgfile.get('case','default_geography_map_key')
if cfg.dispatch_geography != cfg.primary_geography:
map_df = cfg.geo.map_df(cfg.dispatch_geography,cfg.primary_geography, normalize_as='intensity', map_key=geography_map_key, eliminate_zeros=False)
dist_cap_factor = util.remove_df_levels(util.DfOper.mult([dist_cap_factor,map_df]),cfg.dispatch_geography)
dist_cap_factor = util.remove_df_levels(util.DfOper.mult([dist_cap_factor, util.df_slice(self.dispatch_feeder_allocation.values,year, 'year')]),'dispatch_feeder')
dist_cap_factor = dist_cap_factor.reorder_levels([cfg.primary_geography,'demand_sector']).sort()
distribution_grid_node.capacity_factor.values.loc[:,year] = dist_cap_factor.values
for i in range(0,int(cfg.cfgfile.get('case','dispatch_step'))+1):
distribution_grid_node.capacity_factor.values.loc[:,min(year+i,max_year)] = dist_cap_factor.values
if hasattr(distribution_grid_node, 'stock'):
distribution_grid_node.update_stock(year,3)
#hardcoded 50% assumption of colocated energy for dispatched flexible gen. I.e. wind and solar. Means that transmission capacity isn't needed to support energy demands.
#TODO change to config parameter
bulk_flow = util.df_slice(util.DfOper.subt([util.DfOper.add([self.bulk_load,util.remove_df_levels(self.dist_only_net_load,'dispatch_feeder')]),self.dispatched_bulk_load * .5]),2,'timeshift_type')
bulk_cap_factor = util.DfOper.divi([bulk_flow.groupby(level=cfg.dispatch_geography).mean(),bulk_flow.groupby(level=cfg.dispatch_geography).max()])
transmission_grid_node = self.nodes[self.transmission_node_id]
geography_map_key = transmission_grid_node.geography_map_key if hasattr(transmission_grid_node, 'geography_map_key') and transmission_grid_node.geography_map_key is not None else cfg.cfgfile.get('case','default_geography_map_key')
if cfg.dispatch_geography != cfg.primary_geography:
map_df = cfg.geo.map_df(cfg.dispatch_geography,cfg.primary_geography, normalize_as='intensity', map_key=geography_map_key, eliminate_zeros=False)
bulk_cap_factor = util.remove_df_levels(util.DfOper.mult([bulk_cap_factor,map_df]),cfg.dispatch_geography)
transmission_grid_node.capacity_factor.values.loc[:,year] = bulk_cap_factor.values
for i in range(0,int(cfg.cfgfile.get('case','dispatch_step'))+1):
transmission_grid_node.capacity_factor.values.loc[:,min(year+i,max_year)] = bulk_cap_factor.values
if hasattr(transmission_grid_node, 'stock'):
transmission_grid_node.update_stock(year,3)
def solve_storage_and_flex_load_optimization(self,year):
# MOVE
"""prepares, solves, and updates the net load with results from the storage and flexible load optimization"""
self.dispatch.set_year(year)
self.prepare_optimization_inputs(year)
logging.info(" solving dispatch for storage and dispatchable load")
self.dispatch.solve_optimization()
storage_charge = self.dispatch.storage_df.xs('charge', level='charge_discharge')
storage_discharge = self.dispatch.storage_df.xs('discharge', level='charge_discharge')
#load and gen are the same in the ld_df, just with different signs. We want to separate and use absolute values (i.e. *- when it is load)
timeshift_types = list(set(self.distribution_load.index.get_level_values('timeshift_type')))
if len(self.dispatch.ld_technologies):
ld_load = util.remove_df_levels(-self.dispatch.ld_df[self.dispatch.ld_df.values<0],'supply_node')
ld_gen = util.remove_df_levels(self.dispatch.ld_df[self.dispatch.ld_df.values>0], 'supply_node')
dist_ld_load = util.add_and_set_index(util.df_slice(ld_load, self.dispatch_feeders, 'dispatch_feeder'), 'timeshift_type', timeshift_types)
if not len(dist_ld_load):
dist_ld_load = None
if not len(ld_load):
ld_load = None
if not len(ld_gen):
ld_gen = None
else:
ld_load = None
ld_gen = None
dist_ld_load = None
if len(set(storage_charge.index.get_level_values('dispatch_feeder')))>1:
dist_storage_charge = util.add_and_set_index(util.df_slice(storage_charge, self.dispatch_feeders, 'dispatch_feeder'), 'timeshift_type', timeshift_types)
dist_storage_discharge = util.df_slice(storage_discharge, self.dispatch_feeders, 'dispatch_feeder')
else:
dist_storage_charge = None
dist_storage_discharge = None
dist_flex_load = util.add_and_set_index(util.df_slice(self.dispatch.flex_load_df, self.dispatch_feeders, 'dispatch_feeder'), 'timeshift_type', timeshift_types)
self.distribution_load = util.DfOper.add((self.distribution_load, dist_storage_charge, dist_flex_load, dist_ld_load))
self.distribution_gen = util.DfOper.add((self.distribution_gen, dist_storage_discharge,util.df_slice(ld_gen, self.dispatch_feeders, 'dispatch_feeder',return_none=True) ))
if self.dispatch.transmission_flow_df is not None:
flow_with_losses = util.DfOper.divi((self.dispatch.transmission_flow_df, 1 - self.dispatch.transmission.losses.get_values(year)))
imports = self.dispatch.transmission_flow_df.groupby(level=['geography_to', 'weather_datetime']).sum()
exports = flow_with_losses.groupby(level=['geography_from', 'weather_datetime']).sum()
imports.index.names = [cfg.dispatch_geography, 'weather_datetime']
exports.index.names = [cfg.dispatch_geography, 'weather_datetime']
else:
imports = None
exports = None
self.bulk_load = util.DfOper.add((self.bulk_load, storage_charge.xs(0, level='dispatch_feeder'), util.DfOper.divi([util.df_slice(ld_load, 0, 'dispatch_feeder',return_none=True),self.transmission_losses]),util.DfOper.divi([exports,self.transmission_losses])))
self.bulk_gen = util.DfOper.add((self.bulk_gen, storage_discharge.xs(0, level='dispatch_feeder'),util.df_slice(ld_gen, 0, 'dispatch_feeder',return_none=True),imports))
self.opt_bulk_net_load = copy.deepcopy(self.bulk_net_load)
self.update_net_load_signal()
self.produce_distributed_storage_outputs(year)
self.produce_bulk_storage_outputs(year)
self.produce_flex_load_outputs(year)
self.produce_ld_outputs(year)
self.produce_transmission_outputs(year)
def produce_transmission_outputs(self, year):
# MOVE
if year in self.dispatch_write_years and self.dispatch.transmission_flow_df is not None:
df_index_reset = self.dispatch.transmission_flow_df.reset_index()
df_index_reset['geography_from'] = map(cfg.outputs_id_map[cfg.dispatch_geography].get, df_index_reset['geography_from'].values)
df_index_reset['geography_to'] = map(cfg.outputs_id_map[cfg.dispatch_geography].get, df_index_reset['geography_to'].values)
df_index_reset_with_losses = DfOper.divi((self.dispatch.transmission_flow_df, 1 - self.dispatch.transmission.losses.get_values(year))).reset_index()
df_index_reset_with_losses['geography_from'] = map(cfg.outputs_id_map[cfg.dispatch_geography].get, df_index_reset_with_losses['geography_from'].values)
df_index_reset_with_losses['geography_to'] = map(cfg.outputs_id_map[cfg.dispatch_geography].get, df_index_reset_with_losses['geography_to'].values)
imports = df_index_reset.rename(columns={'geography_to':cfg.dispatch_geography})
exports = df_index_reset_with_losses.rename(columns={'geography_from':cfg.dispatch_geography})
exports['geography_to'] = 'TRANSMISSION EXPORT TO ' + exports['geography_to']
imports['geography_from'] = 'TRANSMISSION IMPORT FROM ' + imports['geography_from']
imports = imports.rename(columns={'geography_from':'DISPATCH_OUTPUT'})
exports = exports.rename(columns={'geography_to':'DISPATCH_OUTPUT'})
imports = imports.set_index([cfg.dispatch_geography, 'DISPATCH_OUTPUT', 'weather_datetime'])
exports = exports.set_index([cfg.dispatch_geography, 'DISPATCH_OUTPUT', 'weather_datetime'])
# drop any lines that don't have flows this is done to reduce the size of outputs
imports = imports.groupby(level=[cfg.dispatch_geography, 'DISPATCH_OUTPUT']).filter(lambda x: x.sum() > 0)
exports = exports.groupby(level=[cfg.dispatch_geography, 'DISPATCH_OUTPUT']).filter(lambda x: x.sum() > 0)
transmission_output = pd.concat((-imports, exports))
transmission_output = util.add_and_set_index(transmission_output, 'year', year)
transmission_output.columns = [cfg.calculation_energy_unit.upper()]
transmission_output = self.outputs.clean_df(transmission_output)
self.bulk_dispatch = pd.concat([self.bulk_dispatch, transmission_output.reorder_levels(self.bulk_dispatch.index.names)])
def produce_distributed_storage_outputs(self, year):
# MOVE
if year in self.dispatch_write_years and len(set(self.dispatch.storage_df.index.get_level_values('dispatch_feeder')))>1 :
dist_storage_df = util.df_slice(self.dispatch.storage_df, self.dispatch_feeders, 'dispatch_feeder')
distribution_df = util.remove_df_levels(util.DfOper.mult([dist_storage_df, self.distribution_losses,self.transmission_losses]), 'dispatch_feeder')
distribution_df.columns = [cfg.calculation_energy_unit.upper()]
charge_df = util.df_slice(distribution_df,'charge','charge_discharge')
charge_df = self.outputs.clean_df(charge_df)
charge_df = pd.concat([charge_df],keys=['DISTRIBUTED STORAGE CHARGE'],names=['DISPATCH_OUTPUT'])
self.bulk_dispatch = pd.concat([self.bulk_dispatch, charge_df.reorder_levels(self.bulk_dispatch.index.names)])
# self.bulk_dispatch = util.DfOper.add([self.bulk_dispatch, charge_df])
discharge_df = util.df_slice(distribution_df,'discharge','charge_discharge')*-1
discharge_df = self.outputs.clean_df(discharge_df)
discharge_df = pd.concat([discharge_df],keys=['DISTRIBUTED STORAGE DISCHARGE'],names=['DISPATCH_OUTPUT'])
self.bulk_dispatch = pd.concat([self.bulk_dispatch, discharge_df.reorder_levels(self.bulk_dispatch.index.names)])
# self.bulk_dispatch = util.DfOper.add([self.bulk_dispatch, discharge_df])
def produce_ld_outputs(self,year):
# MOVE
if year in self.dispatch_write_years and self.dispatch.ld_df is not None:
#produce distributed long duration outputs
#- changes the sign coming out of the dispatch, with is the reverse of what we want for outputs
ld_df = -util.df_slice(self.dispatch.ld_df, self.dispatch_feeders, 'dispatch_feeder')
if len(ld_df):
ld_df = util.add_and_set_index(ld_df, 'year', year)
ld_df.columns = [cfg.calculation_energy_unit.upper()]
ld_df= self.outputs.clean_df(ld_df)
ld_df.reset_index(['SUPPLY_NODE','DISPATCH_FEEDER'],inplace=True)
ld_df['DISPATCH_OUTPUT'] = ld_df['DISPATCH_FEEDER'] + " " + ld_df['SUPPLY_NODE']
ld_df.set_index('DISPATCH_OUTPUT',inplace=True, append=True)
#remove the columns we used to set the dispatch output name
ld_df = ld_df.iloc[:,0].to_frame()
self.bulk_dispatch = pd.concat([self.bulk_dispatch, ld_df.reorder_levels(self.bulk_dispatch.index.names)])
#- changes the sign coming out of the dispatch, with is the reverse of what we want for outputs
#produce bulk long duration outputs
ld_df = -util.df_slice(self.dispatch.ld_df, 0, 'dispatch_feeder')
if len(ld_df):
ld_df = util.add_and_set_index(ld_df, 'year', year)
ld_df.columns = [cfg.calculation_energy_unit.upper()]
ld_df= self.outputs.clean_df(ld_df)
util.replace_index_name(ld_df,'DISPATCH_OUTPUT', 'SUPPLY_NODE')
self.bulk_dispatch = pd.concat([self.bulk_dispatch, ld_df.reorder_levels(self.bulk_dispatch.index.names)])
def produce_bulk_storage_outputs(self, year):
# MOVE
if year in self.dispatch_write_years:
bulk_df = self.dispatch.storage_df.xs(0, level='dispatch_feeder')
bulk_df = util.add_and_set_index(bulk_df, 'year', year)
bulk_df.columns = [cfg.calculation_energy_unit.upper()]
charge_df = util.DfOper.mult([util.df_slice(bulk_df,'charge','charge_discharge'),self.transmission_losses])
charge_df = self.outputs.clean_df(charge_df)
charge_df = pd.concat([charge_df],keys=['BULK STORAGE CHARGE'],names=['DISPATCH_OUTPUT'])
self.bulk_dispatch = pd.concat([self.bulk_dispatch, charge_df.reorder_levels(self.bulk_dispatch.index.names)])
# self.bulk_dispatch = util.DfOper.add([self.bulk_dispatch, charge_df])
discharge_df = util.df_slice(bulk_df,'discharge','charge_discharge')*-1
discharge_df = self.outputs.clean_df(discharge_df)
discharge_df = pd.concat([discharge_df], keys=['BULK STORAGE DISCHARGE'], names=['DISPATCH_OUTPUT'])
self.bulk_dispatch = pd.concat([self.bulk_dispatch, discharge_df.reorder_levels(self.bulk_dispatch.index.names)])
# self.bulk_dispatch = util.DfOper.add([self.bulk_dispatch, discharge_df])
def produce_flex_load_outputs(self, year):
# MOVE
if year in self.dispatch_write_years:
flex_load_df = util.df_slice(self.dispatch.flex_load_df, self.dispatch_feeders, 'dispatch_feeder')
flex_load_df.columns = [cfg.calculation_energy_unit.upper()]
flex_load_df = DfOper.mult([flex_load_df, self.distribution_losses, self.transmission_losses])
flex_load_df= self.outputs.clean_df(flex_load_df)
label_replace_dict = dict(zip(util.elements_in_index_level(flex_load_df,'DISPATCH_FEEDER'),[x+' FLEXIBLE LOAD' for x in util.elements_in_index_level(flex_load_df,'DISPATCH_FEEDER')]))
util.replace_index_label(flex_load_df,label_replace_dict,'DISPATCH_FEEDER')
util.replace_index_name(flex_load_df,'DISPATCH_OUTPUT','DISPATCH_FEEDER')
self.bulk_dispatch = pd.concat([self.bulk_dispatch, flex_load_df.reorder_levels(self.bulk_dispatch.index.names)])
# self.bulk_dispatch = util.DfOper.add([self.bulk_dispatch, flex_load_df])
def set_distribution_losses(self,year):
distribution_grid_node =self.nodes[self.distribution_grid_node_id]
coefficients = distribution_grid_node.active_coefficients_total.sum().to_frame()
indexer = util.level_specific_indexer(self.dispatch_feeder_allocation.values, 'year', year)
a = util.DfOper.mult([coefficients, self.dispatch_feeder_allocation.values.loc[indexer,:], distribution_grid_node.active_supply])
b = util.DfOper.mult([self.dispatch_feeder_allocation.values.loc[indexer,:], distribution_grid_node.active_supply])
self.distribution_losses = util.DfOper.divi([util.remove_df_levels(a,'demand_sector'),util.remove_df_levels(b,'demand_sector')]).fillna(1)
geography_map_key = distribution_grid_node.geography_map_key if hasattr(distribution_grid_node, 'geography_map_key') and distribution_grid_node.geography_map_key is not None else cfg.cfgfile.get('case','default_geography_map_key')
if cfg.dispatch_geography != cfg.primary_geography:
map_df = cfg.geo.map_df(cfg.primary_geography,cfg.dispatch_geography,normalize_as='intensity',map_key=geography_map_key,eliminate_zeros=False)
self.distribution_losses = util.remove_df_levels(DfOper.mult([self.distribution_losses,map_df]),cfg.primary_geography)
def set_transmission_losses(self,year):
transmission_grid_node =self.nodes[self.transmission_node_id]
coefficients = transmission_grid_node.active_coefficients_total.sum().to_frame()
coefficients.columns = [year]
geography_map_key = transmission_grid_node.geography_map_key if hasattr(transmission_grid_node, 'geography_map_key') and transmission_grid_node.geography_map_key is not None else cfg.cfgfile.get('case','default_geography_map_key')
if cfg.dispatch_geography != cfg.primary_geography:
map_df = cfg.geo.map_df(cfg.primary_geography,cfg.dispatch_geography,normalize_as='intensity',map_key=geography_map_key,eliminate_zeros=False)
self.transmission_losses = util.remove_df_levels(DfOper.mult([coefficients,map_df]),cfg.primary_geography)
else:
self.transmission_losses = coefficients
self.transmission_losses = util.remove_df_levels(self.transmission_losses,'demand_sector',agg_function='mean')
def set_net_load_thresholds(self, year):
# MOVE?
distribution_grid_node = self.nodes[self.distribution_grid_node_id]
dist_stock = distribution_grid_node.stock.values.groupby(level=[cfg.primary_geography,'demand_sector']).sum().loc[:,year].to_frame()
dist_stock = util.remove_df_levels(DfOper.mult([dist_stock, util.df_slice(self.dispatch_feeder_allocation.values,year,'year')]),'demand_sector')
geography_map_key = distribution_grid_node.geography_map_key if hasattr(distribution_grid_node, 'geography_map_key') and distribution_grid_node.geography_map_key is not None else cfg.cfgfile.get('case','default_geography_map_key')
if cfg.dispatch_geography != cfg.primary_geography:
map_df = cfg.geo.map_df(cfg.primary_geography,cfg.dispatch_geography, normalize_as='total',map_key=geography_map_key,eliminate_zeros=False)
dist_stock = util.remove_df_levels(DfOper.mult([dist_stock,map_df]),cfg.primary_geography)
transmission_grid_node = self.nodes[self.transmission_node_id]
transmission_stock = transmission_grid_node.stock.values.groupby(level=[cfg.primary_geography]).sum().loc[:,year].to_frame()
geography_map_key = transmission_grid_node.geography_map_key if hasattr(distribution_grid_node, 'geography_map_key') and transmission_grid_node.geography_map_key is not None else cfg.cfgfile.get('case','default_geography_map_key')
if cfg.dispatch_geography != cfg.primary_geography:
map_df = cfg.geo.map_df(cfg.primary_geography,cfg.dispatch_geography, normalize_as='total', map_key=geography_map_key,eliminate_zeros=False)
transmission_stock = util.remove_df_levels(DfOper.mult([transmission_stock,map_df]),cfg.primary_geography)
self.dispatch.set_thresholds(dist_stock,transmission_stock)
def prepare_flexible_load(self,year):
"""Calculates the availability of flexible load for the hourly dispatch. Used for nodes like hydrogen and P2G.
Args:
year (int) = year of analysis
loop (int or str) = loop identifier
Sets:
flexible_load (dict) = dictionary with keys of supply_node_id, 'energy' or 'capacity', 'geography', zone (i.e. transmission grid
or distribution grid), and dispatch_feeder and values of a np.array()
"""
self.flexible_load= util.recursivedict()
for zone in self.dispatch_zones:
for node_id in self.electricity_load_nodes[zone]['flexible']:
node = self.nodes[node_id]
if hasattr(node.stock, 'coefficients'):
indexer = util.level_specific_indexer(node.stock.coefficients.loc[:,year],'supply_node',[self.electricity_nodes[zone]+[zone]])
energy_demand = util.DfOper.mult([util.remove_df_levels(node.stock.values_energy.loc[:,year].to_frame(),['vintage','supply_technology']), util.remove_df_levels(node.stock.coefficients.loc[indexer,year].to_frame(),['vintage','supply_technology','resource_bin'])])
capacity = util.DfOper.mult([util.remove_df_levels(node.stock.values.loc[:,year].to_frame(),['vintage','supply_technology']), util.remove_df_levels(node.stock.coefficients.loc[indexer,year].to_frame(),['vintage','supply_technology','resource_bin'])])
else:
indexer = util.level_specific_indexer(node.active_coefficients_untraded,'supply_node',[self.electricity_nodes[zone]+[zone]])
energy_demand = util.DfOper.mult([node.active_coefficients_untraded, node.active_supply])
capacity = util.DfOper.divi([energy_demand,node.capacity_factor.values.loc[:,year].to_frame()])/ util.unit_conversion(unit_to_num='hour', unit_from_num = 'year')[0]
capacity.replace([np.nan,np.inf], 0,inplace=True)
if zone == self.distribution_node_id and 'demand_sector' not in node.stock.values.index.names:
#requires energy on the distribution system to be allocated to feeder for dispatch
energy_demand = DfOper.mult([energy_demand, node.active_supply.groupby(level=[cfg.primary_geography,'demand_sector']).transform(lambda x: x/x.sum())])
capacity = DfOper.mult([capacity, node.active_supply.groupby(level=[cfg.primary_geography,'demand_sector']).transform(lambda x: x/x.sum())])
#geomap to dispatch geography
remove_levels = []
if cfg.dispatch_geography != cfg.primary_geography:
geography_map_key = node.geography_map_key if hasattr(node, 'geography_map_key') and node.geography_map_key is not None else cfg.cfgfile.get('case','default_geography_map_key')
map_df = cfg.geo.map_df(cfg.primary_geography,cfg.dispatch_geography,normalize_as='total',map_key=geography_map_key, eliminate_zeros=False)
energy_demand = DfOper.mult([energy_demand,map_df])
capacity = DfOper.mult([capacity,map_df])
remove_levels.append(cfg.dispatch_geography)
if zone == self.distribution_node_id:
#specific for distribution node because of feeder allocation requirement
indexer = util.level_specific_indexer(self.dispatch_feeder_allocation.values, 'year', year)
energy_demand = util.remove_df_levels(util.DfOper.mult([energy_demand, self.dispatch_feeder_allocation.values.loc[indexer, ]]), 'demand_sector')
capacity = util.remove_df_levels(util.DfOper.mult([capacity, self.dispatch_feeder_allocation.values.loc[indexer, ]]), 'demand_sector')
remove_levels.append('dispatch_feeder')
for geography in cfg.dispatch_geographies:
for dispatch_feeder in self.dispatch_feeders:
indexer = util.level_specific_indexer(energy_demand, [cfg.dispatch_geography, 'supply_node', 'dispatch_feeder'],[geography,zone,dispatch_feeder])
self.flexible_load[node.id][geography][zone][dispatch_feeder]['energy']= util.remove_df_levels(energy_demand.loc[indexer,:],remove_levels)
indexer = util.level_specific_indexer(capacity, [cfg.dispatch_geography, 'supply_node', 'dispatch_feeder'],[geography,zone,dispatch_feeder])
self.flexible_load[node.id][geography][zone][dispatch_feeder]['capacity']= util.remove_df_levels(capacity.loc[indexer,:],remove_levels)
else:
remove_levels.append('demand_sector')
for geography in cfg.dispatch_geographies:
#feeder is set to 0 for flexible load not on the distribution system
indexer = util.level_specific_indexer(energy_demand, [cfg.dispatch_geography, 'supply_node'],[geography,zone])
self.flexible_load[node.id][geography][zone][0]['energy']= util.remove_df_levels(energy_demand.loc[indexer,:],remove_levels)
indexer = util.level_specific_indexer(capacity,[cfg.dispatch_geography, 'supply_node'],[geography,zone])
self.flexible_load[node.id][geography][zone][0]['capacity']= util.remove_df_levels(capacity.loc[indexer,:],remove_levels)
self.flexible_load = util.freeze_recursivedict(self.flexible_load)
def prepare_flexible_gen(self,year):
"""Calculates the availability of flexible generation for the hourly dispatch. Used for nodes like hydroelectricity.
Args:
year (int) = year of analysis
loop (int or str) = loop identifier
Sets:
flexible_gen (dict) = dictionary with keys of supply_node_id, 'energy' or 'capacity', 'geography', zone (i.e. transmission grid
or distribution grid), and dispatch_feeder and values of a np.array()
"""
self.flexible_gen = util.recursivedict()
for zone in self.dispatch_zones:
non_thermal_dispatch_nodes = [x for x in self.electricity_gen_nodes[zone]['flexible'] if x not in self.nodes[self.thermal_dispatch_node_id].values.index.get_level_values('supply_node')]
for node_id in non_thermal_dispatch_nodes:
node = self.nodes[node_id]
energy = node.active_supply
capacity = node.stock.values.loc[:,year].to_frame()
if zone == self.distribution_node_id and 'demand_sector' not in node.stock.values.index.names:
#requires energy on the distribution system to be allocated to feeder for dispatch
energy = DfOper.mult([energy, node.active_supply.groupby(level=[cfg.primary_geography,'demand_sector']).transform(lambda x: x/x.sum())])
capacity = DfOper.mult([capacity, node.active_supply.groupby(level=[cfg.primary_geography,'demand_sector']).transform(lambda x: x/x.sum())])
energy = util.remove_df_levels(energy,['vintage', 'supply_technology'])
capacity = util.remove_df_levels(capacity,['vintage', 'supply_technology'])
#geomap to dispatch geography
if cfg.dispatch_geography != cfg.primary_geography:
geography_map_key = node.geography_map_key if hasattr(node, 'geography_map_key') and node.geography_map_key is not None else cfg.cfgfile.get('case','default_geography_map_key')
map_df = cfg.geo.map_df(cfg.primary_geography,cfg.dispatch_geography,normalize_as='total',map_key=geography_map_key, eliminate_zeros=False)
energy = DfOper.mult([energy,map_df])
capacity = DfOper.mult([capacity,map_df])
if zone == self.distribution_node_id:
#specific for distribution node because of feeder allocation requirement
indexer = util.level_specific_indexer(self.dispatch_feeder_allocation.values, 'year', year)
energy = util.remove_df_levels(util.DfOper.mult([energy, self.dispatch_feeder_allocation.values.loc[indexer, ]]), 'demand_sector')
capacity = util.remove_df_levels(util.DfOper.mult([capacity, self.dispatch_feeder_allocation.values.loc[indexer, ]]), 'demand_sector')
for geography in cfg.dispatch_geographies:
for dispatch_feeder in self.dispatch_feeders:
indexer = util.level_specific_indexer(energy, cfg.dispatch_geography, geography)
remove_list = ['dispatch_feeder','supply_node']
if cfg.primary_geography!=cfg.dispatch_geography:
remove_list.append(cfg.dispatch_geography)
indexer = util.level_specific_indexer(energy, [cfg.dispatch_geography, 'dispatch_feeder'],[geography,zone,dispatch_feeder])
self.flexible_gen[node.id][geography][zone][dispatch_feeder]['energy']= util.remove_df_levels(energy.loc[indexer,:],remove_list)
indexer = util.level_specific_indexer(capacity, [cfg.dispatch_geography, 'dispatch_feeder'],[geography,zone,dispatch_feeder])
self.flexible_gen[node.id][geography][zone][dispatch_feeder]['capacity']= util.remove_df_levels(capacity.loc[indexer,:],remove_list)
else:
for geography in cfg.dispatch_geographies:
#feeder is set to 0 for flexible load not on the distribution system
indexer = util.level_specific_indexer(energy, cfg.dispatch_geography, geography)
remove_list = ['demand_sector','supply_node']
if cfg.primary_geography!=cfg.dispatch_geography:
remove_list.append(cfg.dispatch_geography)
self.flexible_gen[node.id][geography][zone][0]['energy'] = util.remove_df_levels(energy.loc[indexer,:],remove_list)
indexer = util.level_specific_indexer(capacity,cfg.dispatch_geography, geography)
self.flexible_gen[node.id][geography][zone][0]['capacity'] = util.remove_df_levels(capacity.loc[indexer,:],remove_list)
self.flexible_gen = util.freeze_recursivedict(self.flexible_gen)
def _help_prepare_non_flexible_load_or_gen(self, energy, year, node, zone):
energy['dispatch_zone'] = zone
energy['supply_node'] = node.id # replace supply node with the node id
energy = energy.set_index(['dispatch_zone', 'supply_node'], append=True)
if zone == self.distribution_node_id:
energy = util.DfOper.mult([energy, self.dispatch_feeder_allocation.values.xs(year, level='year')])
else:
energy['dispatch_feeder'] = 0
energy = energy.set_index('dispatch_feeder', append=True)
energy = util.remove_df_levels(energy, 'demand_sector')
if cfg.dispatch_geography != cfg.primary_geography:
#geomap to dispatch geography
geography_map_key = node.geography_map_key if hasattr(node, 'geography_map_key') and node.geography_map_key is not None else cfg.cfgfile.get('case','default_geography_map_key')
map_df = cfg.geo.map_df(cfg.primary_geography, cfg.dispatch_geography, normalize_as='total', map_key=geography_map_key, eliminate_zeros=False)
energy = DfOper.mult([energy, map_df])
return energy
def prepare_non_flexible_load(self, year):
# MOVE
"""Calculates the demand from non-flexible load on the supply-side
Args:
year (int) = year of analysis
loop (int or str) = loop identifier
Sets:
non_flexible_load (df)
"""
self.non_flexible_load = []
for zone in self.dispatch_zones:
for node_id in self.electricity_load_nodes[zone]['non_flexible']:
node = self.nodes[node_id]
indexer = util.level_specific_indexer(node.active_coefficients_untraded,'supply_node',[list(set(self.electricity_nodes[zone]+[zone]))])
energy = DfOper.mult([node.active_supply, node.active_coefficients_untraded.loc[indexer,:]])
energy = util.remove_df_levels(energy, ['supply_node', 'efficiency_type']) # supply node is electricity transmission or distribution
energy = self._help_prepare_non_flexible_load_or_gen(energy, year, node, zone)
self.non_flexible_load.append(energy) # important that the order of the columns be correct
if len(self.non_flexible_load):
self.non_flexible_load = pd.concat(self.non_flexible_load).sort()
else:
self.non_flexible_load = None
def prepare_non_flexible_gen(self,year):
# MOVE
"""Calculates the supply from non-flexible generation on the supply-side
Args:
year (int) = year of analysis
loop (int or str) = loop identifier
Sets:
non_flexible_load (df)
"""
self.non_flexible_gen = []
for zone in self.dispatch_zones:
non_thermal_dispatch_nodes = [x for x in self.electricity_gen_nodes[zone]['non_flexible'] if x not in
self.nodes[self.thermal_dispatch_node_id].values.index.get_level_values('supply_node')]
for node_id in non_thermal_dispatch_nodes:
node = self.nodes[node_id]
energy = node.active_supply.copy()
energy = self._help_prepare_non_flexible_load_or_gen(energy, year, node, zone)
self.non_flexible_gen.append(energy)
if len(self.non_flexible_gen):
self.non_flexible_gen = pd.concat(self.non_flexible_gen).sort()
else:
self.non_flexible_gen = None
def prepare_dispatch_inputs(self, year, loop):
# MOVE
"""Calculates supply node parameters needed to run electricity dispatch
Args:
year (int) = year of analysis
loop (int or str) = loop identifier
"""
logging.info(" preparing dispatch inputs")
self.solved_gen_list = []
self.set_electricity_gen_nodes(self.nodes[self.distribution_node_id],self.nodes[self.distribution_node_id])
self.solved_gen_list = []
self.set_electricity_load_nodes()
self.set_dispatchability()
self.prepare_non_flexible_gen(year)
self.prepare_flexible_gen(year)
self.prepare_non_flexible_load(year)
self.prepare_flexible_load(year)
self.prepare_thermal_dispatch_nodes(year,loop)
self.prepare_electricity_storage_nodes(year,loop)
self.set_distribution_losses(year)
self.set_transmission_losses(year)
self.set_shapes(year)
self.set_initial_net_load_signals(year)
def solve_electricity_dispatch(self, year):
# MOVE
"""solves heuristic dispatch, optimization dispatch, and thermal dispatch
Args:
year (int) = year of analysis
"""
#solves dispatched load and gen on the supply-side for nodes like hydro and H2 electrolysis
self.solve_heuristic_load_and_gen(year)
#solves electricity storage and flexible demand load optimizatio
self.solve_storage_and_flex_load_optimization(year)
#updates the grid capacity factors for distribution and transmission grid (i.e. load factors)
self.set_grid_capacity_factors(year)
#solves dispatch (stack model) for thermal resource connected to thermal dispatch node
self.solve_thermal_dispatch(year)
self.solve_hourly_curtailment(year)
if year in self.dispatch_write_years and not self.api_run:
if cfg.filter_dispatch_less_than_x is not None:
self.bulk_dispatch = self.bulk_dispatch.groupby(level=['DISPATCH_OUTPUT']).filter(
lambda x: x.max().max()>cfg.filter_dispatch_less_than_x or x.min().min()<-cfg.filter_dispatch_less_than_x)
if cfg.cfgfile.get('output_detail', 'keep_dispatch_outputs_in_model').lower() == 'true':
self.outputs.hourly_dispatch_results = pd.concat([self.outputs.hourly_dispatch_results, self.bulk_dispatch])
else:
# we are going to save them as we go along
result_df = self.outputs.clean_df(self.bulk_dispatch)
keys = [self.scenario.name.upper(), cfg.timestamp]
names = ['SCENARIO','TIMESTAMP']
for key, name in zip(keys, names):
result_df = pd.concat([result_df], keys=[key], names=[name])
Output.write(result_df, 'hourly_dispatch_results.csv', os.path.join(cfg.workingdir, 'dispatch_outputs'))
self.calculate_thermal_totals(year)
self.calculate_curtailment(year)
def solve_hourly_curtailment(self,year):
# MOVE
if year in self.dispatch_write_years:
curtailment = -util.remove_df_levels(self.bulk_dispatch,'DISPATCH_OUTPUT')
curtailment['DISPATCH_OUTPUT'] = 'CURTAILMENT'
curtailment = curtailment.set_index('DISPATCH_OUTPUT',append=True)
curtailment = curtailment.reorder_levels(self.bulk_dispatch.index.names)
self.bulk_dispatch = pd.concat([self.bulk_dispatch,curtailment])
def prepare_thermal_dispatch_nodes(self,year,loop):
# MOVE
"""Calculates the operating cost of all thermal dispatch resources
Args:
year (int) = year of analysis
loop (int or str) = loop identifier
Sets:
thermal_dispatch_dict (dict) = dictionary with keys of dispatch location (i.e. geography analysis unit)
, a key from the list ['capacity', 'cost', 'maintenance_outage_rate', 'forced_outage_rate', 'must_run'] and a tuple with the thermal resource identifier. Values are either float or boolean.
"""
dataframes = []
keys = self.demand_sectors
names = ['demand_sector']
#apennds sector costs to list
for sector in self.demand_sectors:
dataframes.append(self.cost_dict[year][sector].sum())
#concatenates sector costs into single dataframe
embodied_cost_df = pd.concat(dataframes,keys=keys,names=names)
embodied_cost_df = embodied_cost_df.reorder_levels([cfg.primary_geography,'demand_sector','supply_node']).to_frame()
embodied_cost_df.sort(inplace=True)
self.dispatch_df = embodied_cost_df
self.thermal_dispatch_nodes = [x for x in set(list(self.nodes[self.thermal_dispatch_node_id].active_coefficients.index.get_level_values('supply_node')))]
dispatch_resource_list = []
for node_id in self.thermal_dispatch_nodes:
stock_values = self.nodes[node_id].stock.values.loc[:,year].to_frame()
cap_factor_values = self.nodes[node_id].stock.capacity_factor.loc[:,year].to_frame()
stock_values = stock_values[((stock_values.index.get_level_values('vintage')==year) == True) | ((stock_values[year]>0) == True)]
stock_values = stock_values[((cap_factor_values[year]>0) == True)]
resources = [str(x[0]) for x in stock_values.groupby(level = stock_values.index.names).groups.values()]
inputs_and_outputs = ['capacity','cost','maintenance_outage_rate','forced_outage_rate','capacity_weights','must_run','gen_cf','generation','stock_changes','thermal_capacity_multiplier']
node_list = [node_id]
index = pd.MultiIndex.from_product([cfg.dispatch_geographies, node_list, resources,inputs_and_outputs],names = [cfg.dispatch_geography, 'supply_node','thermal_generators','IO'])
dispatch_resource_list.append(util.empty_df(index=index,columns=[year],fill_value=0.0))
self.active_thermal_dispatch_df = pd.concat(dispatch_resource_list)
self.active_thermal_dispatch_df.sort(inplace=True)
for node_id in self.thermal_dispatch_nodes:
node = self.nodes[node_id]
if hasattr(node, 'calculate_dispatch_costs'):
node.calculate_dispatch_costs(year, embodied_cost_df,loop)
if hasattr(node,'active_dispatch_costs'):
active_dispatch_costs = node.active_dispatch_costs
#TODO Remove 1 is the Reference Case
if self.CO2PriceMeasure:
co2_price = util.df_slice(self.CO2PriceMeasure.values,year,'year')
co2_price.columns = [year]
else:
co2_price=0
if hasattr(node,'active_physical_emissions_coefficients') and hasattr(node,'active_co2_capture_rate'):
total_physical =node.active_physical_emissions_coefficients.groupby(level='supply_node').sum().stack().stack().to_frame()
emissions_rate = util.DfOper.mult([node.stock.dispatch_coefficients.loc[:,year].to_frame(), util.DfOper.divi([total_physical,node.active_coefficients_untraded]).replace([np.inf,np.nan],0)])
# emissions_rate = util.DfOper.mult([node.stock.dispatch_coefficients.loc[:,year].to_frame(), util.DfOper.divi([total_physical,node.active_emissions_coefficients.transpose().groupby(level='supply_node',axis=1).sum().stack().to_frame()]).replace([np.inf,np.nan],0)])
emissions_rate = util.remove_df_levels(emissions_rate,'supply_node')
emissions_rate = util.remove_df_levels(emissions_rate,[x for x in emissions_rate.index.names if x not in node.stock.values.index.names],agg_function='mean')
co2_cost = util.DfOper.mult([emissions_rate, 1-node.rollover_output(tech_class = 'co2_capture', stock_att='exist',year=year)]) * co2_price * util.unit_conversion(unit_from_den='ton',unit_to_den=cfg.cfgfile.get('case','mass_unit'))[0]
active_dispatch_costs = util.DfOper.add([node.active_dispatch_costs ,co2_cost])
stock_values = node.stock.values.loc[:,year].to_frame()
stock_values = stock_values[((stock_values.index.get_level_values('vintage')==year) == True) | ((stock_values[year]>0) == True)]
capacity_factor = copy.deepcopy(node.stock.capacity_factor.loc[:,year].to_frame())
if cfg.dispatch_geography != cfg.primary_geography:
geography_map_key = node.geography_map_key if hasattr(node, 'geography_map_key') and node.geography_map_key is not None else cfg.cfgfile.get('case','default_geography_map_key')
int_map_df = cfg.geo.map_df(cfg.primary_geography,cfg.dispatch_geography, normalize_as='intensity', map_key=geography_map_key, eliminate_zeros=False)
tot_map_df = cfg.geo.map_df(cfg.primary_geography, cfg.dispatch_geography, normalize_as='total', map_key=geography_map_key, eliminate_zeros=False).swaplevel(0,1)
active_dispatch_costs = util.remove_df_levels(util.DfOper.mult([int_map_df,active_dispatch_costs],fill_value=0.0),cfg.primary_geography).swaplevel(0,cfg.dispatch_geography)
active_dispatch_costs = active_dispatch_costs.replace([np.nan,np.inf],0)
stock_values = util.DfOper.mult([tot_map_df,stock_values],fill_value=0.0).swaplevel(0,cfg.dispatch_geography).swaplevel(1,cfg.primary_geography)
capacity_factor = util.remove_df_levels(util.DfOper.mult([int_map_df, capacity_factor,],fill_value=0.0),cfg.primary_geography).swaplevel(0,cfg.dispatch_geography)
groups = [x[0]for x in stock_values.groupby(level=stock_values.index.names).groups.values()]
for group in groups:
dispatch_geography = group[0]
if cfg.primary_geography == cfg.dispatch_geography:
geomapped_resource = group
resource = group
else:
geomapped_resource = (group[0],) +group[2:]
resource = group[1:]
try:
self.active_thermal_dispatch_df.loc[(dispatch_geography,node.id,str(resource),'capacity'),year] = stock_values.loc[group].values[0]
self.active_thermal_dispatch_df.loc[(dispatch_geography,node.id,str(resource), 'cost'),year] = active_dispatch_costs.loc[geomapped_resource].values[0]
self.active_thermal_dispatch_df.loc[(dispatch_geography,node.id,str(resource),'maintenance_outage_rate'),year] = (1- capacity_factor.loc[geomapped_resource].values[0])*.9
self.active_thermal_dispatch_df.loc[(dispatch_geography,node.id, str(resource), 'forced_outage_rate'),year]= np.nan_to_num((1- capacity_factor.loc[geomapped_resource].values[0])*.1/(1-self.active_thermal_dispatch_df.loc[(dispatch_geography,node.id,str(resource),'maintenance_outage_rate'),year]))
self.active_thermal_dispatch_df.loc[(dispatch_geography,node.id, str(resource), 'thermal_capacity_multiplier'),year] = node.technologies[resource[1]].thermal_capacity_multiplier
if hasattr(node,'is_flexible') and node.is_flexible == False:
self.active_thermal_dispatch_df.loc[(dispatch_geography,node.id, str(resource), 'must_run'),year] = 1
except:
pdb.set_trace()
self.active_thermal_dispatch_df = self.active_thermal_dispatch_df[((np.array([int(x[-6:-2]) for x in self.active_thermal_dispatch_df.index.get_level_values('thermal_generators')])==year) == True) | ((self.active_thermal_dispatch_df.groupby(level=[cfg.dispatch_geography,'thermal_generators']).transform(lambda x: x.sum())[year]>0) == True)]
def capacity_weights(self,year):
"""sets the share of new capacity by technology and location to resolve insufficient capacity in the thermal dispatch
Args:
year (int) = year of analysis
Sets:
thermal_dispatch_dict (dict) = set dictionary with keys of dispatch geography, 'capacity_weights', and a tuple thermal resource identifier. Values are the share of new capacity
by thermal resource identifier in a specified dispatch geography.
"""
weights = self.nodes[self.thermal_dispatch_node_id].values.loc[:,year].to_frame().groupby(level=[cfg.primary_geography,'supply_node']).mean()
if cfg.dispatch_geography != cfg.primary_geography:
map_df = cfg.geo.map_df(cfg.primary_geography,cfg.dispatch_geography, normalize_as='intensity', eliminate_zeros=False)
weights = util.DfOper.mult([map_df,weights]).swaplevel(0,cfg.dispatch_geography)
for dispatch_geography in cfg.dispatch_geographies:
for node_id in self.thermal_nodes:
node = self.nodes[node_id]
resources = list(set(util.df_slice(self.active_thermal_dispatch_df, [dispatch_geography, node_id], [cfg.dispatch_geography, 'supply_node']).index.get_level_values('thermal_generators')))
for resource in resources:
resource = eval(resource)
if resource[-1] == year:
self.active_thermal_dispatch_df.loc[(dispatch_geography,node.id, str(resource), 'capacity_weights'),year]= (util.df_slice(weights,[dispatch_geography, resource[0], node_id],[cfg.dispatch_geography,cfg.primary_geography,'supply_node']).values * node.active_weighted_sales.loc[resource[0:-1:1],:].values)[0][0]
else:
for dispatch_geography in cfg.dispatch_geographies:
for node_id in self.thermal_nodes:
node = self.nodes[node_id]
resources = list(set(util.df_slice(self.active_thermal_dispatch_df, [dispatch_geography, node_id], [cfg.dispatch_geography, 'supply_node']).index.get_level_values('thermal_generators')))
for resource in resources:
resource = eval(resource)
if resource[-1] == year and resource[0]== dispatch_geography:
self.active_thermal_dispatch_df.loc[(dispatch_geography,node.id, str(resource), 'capacity_weights'),year]= (util.df_slice(weights,[dispatch_geography, node_id],[cfg.dispatch_geography,'supply_node']).values * node.active_weighted_sales.loc[resource[0:-1:1],:].values)[0][0]
def calculate_weighted_sales(self,year):
"""sets the anticipated share of sales by technology for capacity additions added in the thermal dispatch.
Thermal dispatch inputs determines share by supply_node, so we have to determine the share of technologies for
that capacity addition.
Args:
year (int) = year of analysis
Sets:
active_weighted_sales (dataframe) = share of sales by technology for each thermal dispatch node
"""
for node_id in self.thermal_nodes:
node = self.nodes[node_id]
vintage_start = min(node.vintages) -1
total = []
for elements in node.rollover_groups.keys():
elements = util.ensure_tuple(elements)
sales_share, initial_sales_share = node.calculate_total_sales_share(elements,
node.stock.rollover_group_names)
sales_share = sales_share[year-min(node.vintages)]
total.append(sales_share)
total = np.concatenate(total)
weighted_sales = node.stock.retirements[node.stock.retirements.index.get_level_values('vintage')==year]
if weighted_sales.sum().sum() == 0:
weighted_sales[weighted_sales.values==0] = 1
for tech_id in node.tech_ids:
weighted_sales[tech_id] = weighted_sales['value']
weighted_sales = weighted_sales[node.tech_ids]
weighted_sales*=total
weighted_sales = weighted_sales.sum(axis=1).to_frame()
weighted_sales.columns = ['value']
weighted_sales *= (np.column_stack(weighted_sales.index.get_level_values('vintage').values).T-vintage_start)
weighted_sales = util.remove_df_levels(weighted_sales,'vintage')
weighted_sales = weighted_sales.groupby(level = cfg.primary_geography).transform(lambda x: x/x.sum())
node.active_weighted_sales = weighted_sales
node.active_weighted_sales = node.active_weighted_sales.fillna(1/float(len(node.tech_ids)))
def solve_thermal_dispatch(self, year):
# MOVE
"""solves the thermal dispatch, updating the capacity factor for each thermal dispatch technology
and adding capacity to each node based on determination of need"""
logging.info(' solving thermal dispatch')
self.calculate_weighted_sales(year)
self.capacity_weights(year)
parallel_params = list(zip(cfg.dispatch_geographies,[util.df_slice(self.active_thermal_dispatch_df,x,cfg.dispatch_geography,drop_level=False) for x in cfg.dispatch_geographies],
[cfg.dispatch_geography]*len(cfg.dispatch_geographies),
[util.df_slice(self.bulk_net_load,2,'timeshift_type')]*len(cfg.dispatch_geographies),
[year in self.dispatch_write_years]*len(cfg.dispatch_geographies),
[float(cfg.cfgfile.get('opt', 'operating_reserves'))]*len(cfg.dispatch_geographies),
[cfg.cfgfile.get('opt', 'schedule_maintenance').lower() == 'true']*len(cfg.dispatch_geographies)))
if cfg.cfgfile.get('case','parallel_process').lower() == 'true':
dispatch_results = helper_multiprocess.safe_pool(dispatch_generators.run_thermal_dispatch, parallel_params)
else:
dispatch_results = []
for params in parallel_params:
dispatch_results.append(dispatch_generators.run_thermal_dispatch(params))
thermal_dispatch_df, detailed_results = zip(*dispatch_results) #both of these are lists by geography
thermal_dispatch_df = pd.concat(thermal_dispatch_df).sort()
self.active_thermal_dispatch_df = thermal_dispatch_df
if year in self.dispatch_write_years:
for x in detailed_results:
x['dispatch_by_category'].index = shape.shapes.active_dates_index
thermal_shape = pd.concat([x['dispatch_by_category'] for x in detailed_results],axis=0,keys=cfg.dispatch_geographies)
thermal_shape = thermal_shape.stack().to_frame()
thermal_shape = pd.concat([thermal_shape], keys=[year], names=['year'])
thermal_shape.index.names = ['year', cfg.dispatch_geography, 'weather_datetime','supply_technology']
thermal_shape.columns = [cfg.calculation_energy_unit.upper()]
thermal_shape_dataframe = self.outputs.clean_df(thermal_shape)
util.replace_index_name(thermal_shape_dataframe,'DISPATCH_OUTPUT','SUPPLY_TECHNOLOGY')
self.bulk_dispatch = pd.concat([self.bulk_dispatch, -thermal_shape_dataframe.reorder_levels(self.bulk_dispatch.index.names)])
bulk_marginal_cost = pd.concat([pd.DataFrame(outputs['market_price'], index=shape.shapes.active_dates_index) for outputs in detailed_results],axis=0,keys=cfg.dispatch_geographies)
bulk_production_cost = pd.concat([pd.DataFrame(outputs['production_cost'], index=shape.shapes.active_dates_index) for outputs in detailed_results],axis=0,keys=cfg.dispatch_geographies)
bulk_marginal_cost = pd.concat([bulk_marginal_cost], keys=[year], names=['year'])
bulk_production_cost = pd.concat([bulk_production_cost], keys=[year], names=['year'])
bulk_marginal_cost.index.names = ['year', cfg.dispatch_geography, 'weather_datetime']
bulk_production_cost.index.names = ['year', cfg.dispatch_geography, 'weather_datetime']
# we really don't want this ever to be anything but $/MWh
bulk_marginal_cost *= util.unit_convert(1, unit_from_den=cfg.calculation_energy_unit,unit_to_den='megawatt_hour')
bulk_marginal_cost.columns = ["{} / {}".format(cfg.output_currency.upper(), 'MWh')]
bulk_production_cost.columns = [cfg.output_currency.upper()]
# bulk_marginal_cost = self.outputs.clean_df(bulk_marginal_cost)
# bulk_production_cost = self.outputs.clean_df(bulk_production_cost)
if cfg.cfgfile.get('output_detail', 'keep_dispatch_outputs_in_model').lower() == 'true':
self.outputs.hourly_marginal_cost = pd.concat([self.outputs.hourly_marginal_cost, bulk_marginal_cost])
self.outputs.hourly_production_cost = pd.concat([self.outputs.hourly_production_cost, bulk_production_cost])
else:
# we are going to save them as we go along
for obj, obj_name in zip([bulk_marginal_cost, bulk_production_cost], ['hourly_marginal_cost', 'hourly_production_cost']):
result_df = self.outputs.clean_df(obj)
keys = [self.scenario.name.upper(), cfg.timestamp]
names = ['SCENARIO','TIMESTAMP']
for key, name in zip(keys, names):
result_df = pd.concat([result_df], keys=[key], names=[name])
Output.write(result_df, obj_name + '.csv', os.path.join(cfg.workingdir, 'dispatch_outputs'))
for node_id in self.thermal_dispatch_nodes:
node = self.nodes[node_id]
for dispatch_geography in cfg.dispatch_geographies:
dispatch_df = util.df_slice(self.active_thermal_dispatch_df.loc[:,:],[dispatch_geography,node_id], [cfg.dispatch_geography,'supply_node'],drop_level=False)
resources = list(set([eval(x) for x in dispatch_df.index.get_level_values('thermal_generators')]))
for resource in resources:
capacity_indexer = util.level_specific_indexer(dispatch_df, ['thermal_generators','IO'], [str(resource),'capacity'])
dispatch_capacity_indexer = util.level_specific_indexer(dispatch_df,['thermal_generators','IO'], [str(resource),'stock_changes'])
node.stock.dispatch_cap.loc[resource,year] += dispatch_df.loc[dispatch_capacity_indexer,year].values
node.stock.capacity_factor.loc[:,year] = 0
for dispatch_geography in cfg.dispatch_geographies:
dispatch_df = util.df_slice(self.active_thermal_dispatch_df.loc[:,:],[dispatch_geography,node_id], [cfg.dispatch_geography,'supply_node'],drop_level=False)
resources = list(set([eval(x) for x in dispatch_df.index.get_level_values('thermal_generators')]))
for resource in resources:
capacity_indexer = util.level_specific_indexer(dispatch_df, ['thermal_generators','IO'], [str(resource),'capacity'])
stock_changes_indexer = util.level_specific_indexer(dispatch_df, ['thermal_generators','IO'], [str(resource),'stock_changes'])
if node.stock.values.loc[resource,year] ==0 and node.stock.dispatch_cap.loc[resource,year] == 0:
ratio = 0
elif node.stock.values.loc[resource,year] ==0 and node.stock.dispatch_cap.loc[resource,year] != 0:
ratio = dispatch_df.loc[stock_changes_indexer,year]/node.stock.dispatch_cap.loc[resource,year]
else:
ratio = dispatch_df.loc[capacity_indexer,year]/node.stock.values.loc[resource,year]
ratio = np.nan_to_num(ratio)
capacity_factor_indexer = util.level_specific_indexer(dispatch_df,['thermal_generators','IO'], [str(resource),'gen_cf'])
capacity_factor = np.nan_to_num(dispatch_df.loc[capacity_factor_indexer,year]*ratio)
node.stock.capacity_factor.loc[resource,year] += capacity_factor
dispatch_capacity_indexer = util.level_specific_indexer(dispatch_df,['thermal_generators','IO'], [str(resource),'stock_changes'])
node.stock.dispatch_cap.loc[resource,year] += dispatch_df.loc[dispatch_capacity_indexer,year].values
def calculate_curtailment(self,year):
if year == int(cfg.cfgfile.get('case','current_year')):
self.curtailment_list = []
bulk_net_load = util.df_slice(self.bulk_net_load,2,'timeshift_type')
initial_overgen = copy.deepcopy(bulk_net_load)
initial_overgen[initial_overgen.values>0]=0
initial_overgen *= -1
initial_overgen = initial_overgen.groupby(level=cfg.dispatch_geography).sum()
bulk_net_load[bulk_net_load.values<0]=0
curtailment = util.DfOper.add([util.DfOper.subt([self.thermal_totals.sum().to_frame(),bulk_net_load.groupby(level=cfg.dispatch_geography).sum()]),initial_overgen])
supply = self.nodes[self.bulk_id].active_supply
if cfg.primary_geography!= cfg.dispatch_geography:
map_df = cfg.geo.map_df(cfg.primary_geography,cfg.dispatch_geography,normalize_as='total',eliminate_zeros=False)
supply = util.DfOper.mult([supply,map_df])
normal_supply = supply.groupby(level=[cfg.dispatch_geography]).transform(lambda x: x/x.sum())
curtailment = util.DfOper.mult([curtailment,normal_supply])
if cfg.primary_geography!= cfg.dispatch_geography:
curtailment = util.remove_df_levels(curtailment,cfg.dispatch_geography)
curtailment.columns = ['value']
self.curtailment_list.append(curtailment)
if year == max(self.dispatch_years):
keys = self.dispatch_years
names = ['year']
self.outputs.s_curtailment = pd.concat(self.curtailment_list,keys=keys, names=names)
util.replace_index_name(self.outputs.s_curtailment,'sector','demand_sector')
self.outputs.s_curtailment.columns = [cfg.calculation_energy_unit.upper()]
def update_coefficients_from_dispatch(self,year):
# MOVE
self.update_thermal_coefficients(year)
self.store_active_thermal_df(year)
# self.update_bulk_coefficients()
def update_thermal_coefficients(self,year):
# MOVE
if cfg.primary_geography != cfg.dispatch_geography:
map_df = cfg.geo.map_df(cfg.primary_geography,cfg.dispatch_geography,normalize_as='total',eliminate_zeros=False)
thermal_demand = util.DfOper.mult([self.nodes[self.thermal_dispatch_node_id].active_supply,map_df])
thermal_demand = thermal_demand.unstack(cfg.dispatch_geography)
thermal_demand.columns = thermal_demand.columns.droplevel()
demand_supply_ratio = util.DfOper.divi([thermal_demand.groupby(level=cfg.primary_geography).sum(),self.thermal_totals.groupby(level=cfg.primary_geography).sum()])
demand_supply_ratio[demand_supply_ratio>1]=1
demand_supply_ratio.replace(np.nan,0,inplace=True)
df = self.nodes[self.thermal_dispatch_node_id].active_coefficients_total * 0
native_thermal = util.DfOper.mult([self.thermal_totals,demand_supply_ratio])
native_thermal_coefficients = util.DfOper.divi([native_thermal.sum(axis=1).to_frame(),
self.nodes[self.thermal_dispatch_node_id].active_supply.groupby(level=cfg.primary_geography).sum()]).fillna(0)
residual_demand = util.DfOper.subt([thermal_demand.groupby(level=cfg.primary_geography).sum(),native_thermal.groupby(level=cfg.primary_geography).sum()])
residual_demand[residual_demand<0] = 0
remaining_supply = util.DfOper.subt([self.thermal_totals,native_thermal])
residual_share = util.DfOper.divi([residual_demand.sum().to_frame(),remaining_supply.sum().to_frame()]).fillna(0)
residual_share[residual_share>1]=1
residual_share.replace(np.inf,0,inplace=True)
residual_supply = copy.deepcopy(remaining_supply)
excess_supply = copy.deepcopy(remaining_supply)
excess_supply.loc[:,:] = remaining_supply.values * np.column_stack((1-residual_share).values)
excess_thermal = excess_supply
excess_thermal_coefficients = util.DfOper.divi([excess_thermal.sum(axis=1).to_frame(),self.nodes[self.thermal_dispatch_node_id].active_supply.groupby(level=cfg.primary_geography).sum()]).fillna(0)
residual_supply.loc[:,:] = remaining_supply.values * np.column_stack(residual_share.values)
undersupply_adjustment = (residual_supply.sum()/residual_demand.sum())
undersupply_adjustment[undersupply_adjustment>1]=1
residual_supply_share = residual_supply/residual_supply.sum() * undersupply_adjustment
residual_supply_share = residual_supply_share.fillna(0)
util.replace_index_name(residual_supply_share,cfg.primary_geography + "from",cfg.primary_geography)
residual_supply_share = residual_supply_share.stack().to_frame()
util.replace_index_name(residual_supply_share,cfg.dispatch_geography)
residual_demand_stack = residual_demand.stack().to_frame()
util.replace_index_name(residual_demand_stack,cfg.dispatch_geography)
residual_thermal = util.remove_df_levels(util.DfOper.mult([residual_supply_share,residual_demand_stack]),cfg.dispatch_geography)
residual_thermal = residual_thermal.unstack(cfg.primary_geography)
residual_thermal.columns = residual_thermal.columns.droplevel()
residual_thermal.loc[:,:] = residual_thermal.values/np.column_stack(self.nodes[self.thermal_dispatch_node_id].active_supply.groupby(level=cfg.primary_geography).sum().T.values).T
residual_thermal_coefficients = residual_thermal.fillna(0)
util.replace_index_name(residual_thermal,cfg.primary_geography,cfg.primary_geography + "from")
for row_sector in self.demand_sectors:
for col_sector in self.demand_sectors:
for row_geo in cfg.geographies:
for col_geo in cfg.geographies:
if row_sector == col_sector and row_geo == col_geo:
native_coeff_row_indexer = util.level_specific_indexer(native_thermal_coefficients,[cfg.primary_geography],[row_geo])
excess_coeff_row_indexer = util.level_specific_indexer(excess_thermal_coefficients,[cfg.primary_geography],[row_geo])
df_row_indexer = util.level_specific_indexer(df,[cfg.primary_geography,'demand_sector'],[row_geo,row_sector])
df_col_indexer = util.level_specific_indexer(df,[cfg.primary_geography,'demand_sector'],[col_geo,col_sector],axis=1)
df.loc[df_row_indexer,df_col_indexer] = np.column_stack(df.loc[df_row_indexer,df_col_indexer].values).T + np.column_stack(native_thermal_coefficients.loc[native_coeff_row_indexer,:].values).T
df.loc[df_row_indexer,df_col_indexer] = np.column_stack(df.loc[df_row_indexer,df_col_indexer].values).T + np.column_stack(excess_thermal_coefficients.loc[excess_coeff_row_indexer,:].values).T
elif row_sector == col_sector:
df_row_indexer = util.level_specific_indexer(df,[cfg.primary_geography,'demand_sector'],[row_geo,row_sector])
df_col_indexer = util.level_specific_indexer(df,[cfg.primary_geography,'demand_sector'],[col_geo,col_sector],axis=1)
residual_coeff_row_indexer = util.level_specific_indexer(residual_thermal_coefficients,[cfg.primary_geography],[row_geo])
residual_coeff_col_indexer = util.level_specific_indexer(residual_thermal_coefficients,[cfg.primary_geography],[col_geo],axis=1)
df.loc[df_row_indexer,df_col_indexer] = np.column_stack(df.loc[df_row_indexer,df_col_indexer].values).T + np.column_stack(residual_thermal_coefficients.loc[residual_coeff_row_indexer,residual_coeff_col_indexer].values).T
else:
df = util.DfOper.divi([self.thermal_totals,util.remove_df_levels(self.nodes[self.thermal_dispatch_node_id].active_supply,'demand_sector')])
df = pd.concat([df]*len(self.demand_sectors),keys=self.demand_sectors,names=['demand_sector'])
df = pd.concat([df]*len(self.demand_sectors),keys=self.demand_sectors,names=['demand_sector'],axis=1)
df = df.reorder_levels([cfg.primary_geography,'demand_sector','supply_node'])
df = df.reorder_levels([cfg.primary_geography,'demand_sector'],axis=1)
df.sort(inplace=True)
df.sort(inplace=True, axis=1)
for row_sector in self.demand_sectors:
for col_sector in self.demand_sectors:
if row_sector != col_sector:
row_indexer = util.level_specific_indexer(df,'demand_sector',row_sector,axis=0)
col_indexer = util.level_specific_indexer(df,'demand_sector',col_sector,axis=1)
df.loc[row_indexer,col_indexer] = 0
normalized = df.groupby(level=['demand_sector']).transform(lambda x: x/x.sum())
# df[df<normalized] = normalized
bulk_multiplier = df.sum()
df = normalized
df.replace([np.inf,np.nan],0,inplace=True)
for row_geo in cfg.geographies:
for col_geo in cfg.geographies:
if row_geo == col_geo:
row_indexer = util.level_specific_indexer(df,cfg.primary_geography,row_geo)
col_indexer = util.level_specific_indexer(df,cfg.primary_geography,col_geo, axis=1)
sliced = df.loc[row_indexer,col_indexer]
sliced = sliced.clip(lower=1E-7)
df.loc[row_indexer,col_indexer] = sliced
self.nodes[self.thermal_dispatch_node_id].active_coefficients_total = df
indexer = util.level_specific_indexer(self.nodes[self.bulk_id].values,'supply_node',self.thermal_dispatch_node_id)
self.nodes[self.bulk_id].values.loc[indexer, year] *= bulk_multiplier.values
thermal_df = copy.deepcopy(self.nodes[self.bulk_id].values.loc[indexer, year])
thermal_df[thermal_df>1]=1
self.nodes[self.bulk_id].values.loc[indexer, year] =0
#don't normalize these if it's an evolved run. Leave curtailment. Simplifies per-unit accounting
if cfg.evolved_run == 'false':
pass
#self.nodes[self.bulk_id].values.loc[:, year] = util.DfOper.mult([self.nodes[self.bulk_id].values.loc[:, year].to_frame().groupby(level=[cfg.primary_geography,'demand_sector']).transform(lambda x: x/x.sum()),1-util.remove_df_levels(thermal_df,'supply_node').to_frame()],expandable=True)
self.nodes[self.bulk_id].values.loc[indexer, year] = thermal_df
self.nodes[self.bulk_id].calculate_active_coefficients(year, 3)
def store_active_thermal_df(self,year):
active_thermal_dispatch_df = self.active_thermal_dispatch_df.stack().to_frame()
util.replace_index_name(active_thermal_dispatch_df,'year')
self.active_thermal_dispatch_df_list.append(active_thermal_dispatch_df)
if year == max(self.years):
self.thermal_dispatch_df = pd.concat(self.active_thermal_dispatch_df_list)
def calculate_thermal_totals(self,year):
row_index = pd.MultiIndex.from_product([cfg.geographies, self.thermal_dispatch_nodes], names=[cfg.primary_geography, 'supply_node'])
col_index = pd.MultiIndex.from_product([cfg.dispatch_geographies],names=[cfg.dispatch_geography])
df = util.empty_df(index=row_index,columns=col_index)
for dispatch_geography in cfg.dispatch_geographies:
for node_id in self.thermal_dispatch_nodes:
thermal_dispatch_df = util.df_slice(self.active_thermal_dispatch_df,[dispatch_geography,'generation',node_id],[cfg.dispatch_geography,'IO','supply_node'])
resources = list(set(thermal_dispatch_df.index.get_level_values('thermal_generators')))
for resource in resources:
resource = eval(resource)
primary_geography = resource[0]
df.loc[(primary_geography,node_id),(dispatch_geography)] += np.nan_to_num(thermal_dispatch_df.loc[str(resource),:].values)
self.thermal_totals = df
# def update_bulk_coefficients(self):
# bulk_load = util.DfOper.add([self.bulk_load.groupby(level=cfg.dispatch_geography).sum(), util.DfOper.mult([util.DfOper.subt([self.distribution_load,self.distribution_gen]),self.distribution_losses]).groupby(level=cfg.dispatch_geography).sum()])
# thermal_totals = self.thermal_totals.sum().to_frame()
#
# bulk_coefficients = DfOper.divi([thermal_totals, bulk_load])
# if cfg.dispatch_geography != cfg.primary_geography:
# map_key = cfg.cfgfile.get('case','default_geography_map_key')
# map_df = cfg.geo.map_df(cfg.dispatch_geography,cfg.primary_geography,map_key, eliminate_zeros=False)
# bulk_coefficients = DfOper.mult([bulk_coefficients,map_df]).groupby(level=cfg.primary_geography).sum()
# bulk_coefficients = pd.concat([bulk_coefficients]*len(self.demand_sectors),keys=self.demand_sectors,names=['demand_sector'])
# bulk_coefficients = bulk_coefficients.reorder_levels([cfg.primary_geography,'demand_sector'])
# bulk_coefficients = self.add_column_index(bulk_coefficients)
# bulk_coefficients.sort(inplace=True,axis=1)
# bulk_coefficients.sort(inplace=True,axis=0)
# for row_geography in cfg.geographies:
# for col_geography in cfg.geographies:
# for row_sector in self.demand_sectors:
# for col_sector in self.demand_sectors:
# if row_geography != col_geography or row_sector != col_sector:
# row_indexer = util.level_specific_indexer(bulk_coefficients, [cfg.primary_geography,'demand_sector'],[row_geography,row_sector])
# col_indexer = util.level_specific_indexer(bulk_coefficients, [cfg.primary_geography,'demand_sector'],[col_geography,col_sector])
# bulk_coefficients.loc[row_indexer,col_indexer] = 0
# indexer = util.level_specific_indexer(self.nodes[self.bulk_id].active_coefficients_total,'supply_node', self.thermal_dispatch_node_id)
# self.nodes[self.bulk_id].active_coefficients_total.loc[indexer,:] = bulk_coefficients.values
#
def add_column_index(self, data):
names = ['demand_sector', cfg.primary_geography]
keys = [self.demand_sectors, cfg.geo.geographies[cfg.primary_geography]]
data = copy.deepcopy(data)
for key,name in zip(keys,names):
data = pd.concat([data]*len(key), axis=1, keys=key, names=[name])
data.columns = data.columns.droplevel(-1)
return data
# def geo_map_thermal_coefficients(self,df,old_geography, new_geography, geography_map_key):
# if old_geography != new_geography:
# keys=cfg.geo.geographies[new_geography]
# name = [new_geography]
# df = pd.concat([df] * len(keys),keys=keys,names=name,axis=1)
# df.sort(inplace=True,axis=1)
# df.sort(inplace=True,axis=0)
# map_df = cfg.geo.map_df(old_geography,new_geography,geography_map_key,eliminate_zeros=False).transpose()
# names = [x for x in df.index.names if x not in map_df.index.names]
# names.reverse()
# for name in names:
# keys=list(set(df.index.get_level_values(name)))
# map_df = pd.concat([map_df]*len(keys),keys=keys,names=[name])
# map_df.index = map_df.index.droplevel(None)
# names = [x for x in df.columns.names if x not in map_df.columns.names]
# names.reverse()
# keys = []
# for name in names:
# keys = list(set(df.columns.get_level_values(name)))
# map_df = pd.concat([map_df]*len(keys),keys=keys,names=[name],axis=1)
# map_df=map_df.reorder_levels(df.index.names,axis=0)
# map_df = map_df.reorder_levels(df.columns.names,axis=1)
# map_df.sort(inplace=True,axis=0)
# map_df.sort(inplace=True,axis=1)
# old_geographies = list(set(df.columns.get_level_values(old_geography)))
# new_geographies =list(set(map_df.columns.get_level_values(new_geography)))
# for old in old_geographies:
# for new in new_geographies:
# row_indexer = util.level_specific_indexer(df,[new_geography],[new],axis=0)
# col_indexer = util.level_specific_indexer(df,[old_geography,new_geography],[old,new],axis=1)
# shape = (df.loc[row_indexer,col_indexer].values.shape)
# diag = np.ndarray(shape)
# np.fill_diagonal(diag,1)
# df *= map_df.values
# df = df.groupby(level=util.ix_excl(df,old_geography,axis=1),axis=1).sum()
# return df
def prepare_electricity_storage_nodes(self,year,loop):
"""Calculates the efficiency and capacity (energy and power) of all electric
storage nodes
year (int) = year of analysis
loop (int or str) = loop identifier
Sets:
storage_capacity_dict (dict) = dictionary with keys of 'power' or 'duration', dispatch_geography, dispatch_zone, feeder, and technology and
values of type float
storage_efficiency_dict (dict) = dictionary with keys dispatch_geography, dispatch_zone, feeder, and technology and
values of type floa
Args:t
"""
self.storage_efficiency_dict = util.recursivedict()
self.storage_capacity_dict = util.recursivedict()
for node in [x for x in self.nodes.values() if x.supply_type == 'Storage']:
for zone in self.dispatch_zones:
node.calculate_dispatch_coefficients(year, loop)
if hasattr(node,'active_dispatch_coefficients'):
storage_node_location = list(set(node.active_dispatch_coefficients.index.get_level_values('supply_node')))
if len(storage_node_location)>1:
raise ValueError('StorageNode %s has technologies with two different supply node locations' %node.id)
if storage_node_location[0] in self.electricity_nodes[zone]+[zone]:
capacity= node.stock.values.loc[:,year].to_frame().groupby(level=[cfg.primary_geography,'supply_technology']).sum()
efficiency = copy.deepcopy(node.active_dispatch_coefficients)
if 'demand_sector' not in capacity.index.names and zone == self.distribution_node_id:
sector_capacity= []
for sector in self.demand_sectors:
capacity = copy.deepcopy(capacity) * 1/len(self.demand_sectors)
capacity['demand_sector'] = sector
sector_capacity.append(capacity)
capacity = pd.concat(sector_capacity)
capacity.set_index('demand_sector', append=True, inplace=True)
keys = self.demand_sectors
name = ['demand_sector']
efficiency = pd.concat([efficiency]*len(keys),keys=keys,names=name)
if cfg.dispatch_geography != cfg.primary_geography:
geography_map_key = node.geography_map_key if hasattr(node, 'geography_map_key') and node.geography_map_key is not None else cfg.cfgfile.get('case','default_geography_map_key')
map_df = cfg.geo.map_df(cfg.primary_geography,cfg.dispatch_geography, normalize_as='total', map_key=geography_map_key, eliminate_zeros=False)
capacity = DfOper.mult([capacity, map_df],fill_value=0.0)
efficiency = DfOper.divi([util.remove_df_levels(DfOper.mult([efficiency, capacity]), cfg.primary_geography),util.remove_df_levels(capacity,cfg.primary_geography)]).fillna(0)
capacity = util.remove_df_levels(capacity, cfg.primary_geography)
#creates an empty database to fill with duration values, which are a technology parameter
duration = copy.deepcopy(capacity)*0
duration = duration.sort()
for tech in node.technologies.values():
tech_indexer = util.level_specific_indexer(duration,'supply_technology', tech.id)
duration.loc[tech_indexer,:] = tech.discharge_duration
efficiency = util.remove_df_levels(efficiency,'supply_node')
if zone == self.distribution_node_id:
indexer = util.level_specific_indexer(self.dispatch_feeder_allocation.values, 'year', year)
capacity = util.DfOper.mult([capacity, self.dispatch_feeder_allocation.values.loc[indexer, ]])
duration = DfOper.divi([util.remove_df_levels(DfOper.mult([duration, capacity]),'demand_sector'),util.remove_df_levels(capacity,'demand_sector')]).fillna(0)
efficiency = DfOper.divi([util.remove_df_levels(DfOper.mult([efficiency, capacity]),'demand_sector'),util.remove_df_levels(capacity,'demand_sector')]).fillna(1)
capacity = util.remove_df_levels(capacity,'demand_sector')
for geography in cfg.dispatch_geographies:
for dispatch_feeder in self.dispatch_feeders:
for technology in node.technologies.keys():
indexer = util.level_specific_indexer(efficiency, [cfg.dispatch_geography, 'dispatch_feeder','supply_technology'],[geography,dispatch_feeder,technology])
self.storage_efficiency_dict[geography][zone][dispatch_feeder][technology] = efficiency.loc[indexer,:].values[0][0]
indexer = util.level_specific_indexer(capacity, [cfg.dispatch_geography, 'dispatch_feeder','supply_technology'],[geography,dispatch_feeder,technology])
self.storage_capacity_dict['power'][geography][zone][dispatch_feeder][technology] = capacity.loc[indexer,:].values[0][0]
indexer = util.level_specific_indexer(duration, [cfg.dispatch_geography, 'dispatch_feeder','supply_technology'],[geography,dispatch_feeder,technology])
self.storage_capacity_dict['duration'][geography][zone][dispatch_feeder][technology] = duration.loc[indexer,:].values[0][0]
else:
for geography in cfg.dispatch_geographies:
for technology in node.technologies.keys():
indexer = util.level_specific_indexer(capacity, [cfg.dispatch_geography, 'supply_technology'],[geography,technology])
tech_capacity = self.ensure_frame(util.remove_df_levels(capacity.loc[indexer,:], 'demand_sector'))
indexer = util.level_specific_indexer(duration, [cfg.dispatch_geography,'supply_technology'],[geography,technology])
tech_duration = self.ensure_frame(util.remove_df_levels(duration.loc[indexer,:], 'demand_sector'))
indexer = util.level_specific_indexer(efficiency, [cfg.dispatch_geography, 'supply_technology'],[geography,technology])
tech_efficiency = self.ensure_frame(util.remove_df_levels(efficiency.loc[indexer,:], 'demand_sector'))
if tech_capacity.values[0][0] == 0:
continue
else:
self.storage_capacity_dict['power'][geography][zone][0][technology] = tech_capacity.values[0][0]
self.storage_capacity_dict['duration'][geography][zone][0][technology] = tech_duration.values[0][0]
self.storage_efficiency_dict[geography][zone][0][technology] = tech_efficiency.values[0][0]
@staticmethod
def ensure_frame(variable):
if isinstance(variable,pd.DataFrame):
return variable
else:
try:
variable = variable.to_frame()
return variable
except:
raise ValueError('variable not convertible to dataframe')
def set_shapes(self,year):
for zone in self.dispatch_zones:
for node_id in self.electricity_load_nodes[zone]['non_flexible'] + self.electricity_gen_nodes[zone]['non_flexible']:
self.nodes[node_id].active_shape = self.nodes[node_id].aggregate_electricity_shapes(year, util.remove_df_levels(util.df_slice(self.dispatch_feeder_allocation.values,year,'year'),year))
def _helper_shaped_bulk_and_dist(self, year, energy_slice):
node_ids = list(set(energy_slice.index.get_level_values('supply_node')))
if year in self.dispatch_write_years:
# this keeps supply node as a level
dfs = [util.DfOper.mult((energy_slice.xs(node_id, level='supply_node'), self.nodes[node_id].active_shape.xs(2, level='timeshift_type'))) for node_id in node_ids]
df = pd.concat(dfs, keys=node_ids, names=['supply_node'])
else:
# this removes supply node as a level and is faster
df = util.DfOper.add([util.DfOper.mult((energy_slice.xs(node_id, level='supply_node'), self.nodes[node_id].active_shape.xs(2, level='timeshift_type'))) for node_id in node_ids], expandable=False, collapsible=False)
if cfg.primary_geography != cfg.dispatch_geography:
# because energy_slice had both geographies, we have effectively done a geomap
df = util.remove_df_levels(df, cfg.primary_geography)
return df
def shaped_dist(self, year, load_or_gen_df, generation):
if load_or_gen_df is not None and self.distribution_node_id in load_or_gen_df.index.get_level_values('dispatch_zone'):
dist_slice = load_or_gen_df.xs(self.distribution_node_id, level='dispatch_zone')
df = self._helper_shaped_bulk_and_dist(year, dist_slice)
if year in self.dispatch_write_years:
df_output = df.copy()
df_output = DfOper.mult([df_output, self.distribution_losses,self.transmission_losses])
df_output = self.outputs.clean_df(df_output)
util.replace_index_name(df_output,'DISPATCH_OUTPUT','SUPPLY_NODE')
df_output = df_output.reset_index(level=['DISPATCH_OUTPUT','DISPATCH_FEEDER'])
df_output['NEW_DISPATCH_OUTPUT'] = df_output['DISPATCH_FEEDER'] + " " + df_output['DISPATCH_OUTPUT']
df_output = df_output.set_index('NEW_DISPATCH_OUTPUT',append=True)
df_output = df_output[year].to_frame()
util.replace_index_name(df_output,'DISPATCH_OUTPUT','NEW_DISPATCH_OUTPUT')
df_output.columns = [cfg.calculation_energy_unit.upper()]
if generation:
df_output*=-1
self.bulk_dispatch = pd.concat([self.bulk_dispatch, df_output.reorder_levels(self.bulk_dispatch.index.names)])
# self.bulk_dispatch = util.DfOper.add([self.bulk_dispatch, df_output])
df = util.remove_df_levels(df, 'supply_node') # only necessary when we origionally kept supply node as a level
return df
else:
return self.distribution_gen * 0
def shaped_bulk(self, year, load_or_gen_df, generation):
if load_or_gen_df is not None and self.transmission_node_id in load_or_gen_df.index.get_level_values('dispatch_zone'):
bulk_slice = util.remove_df_levels(load_or_gen_df.xs(self.transmission_node_id, level='dispatch_zone'), 'dispatch_feeder')
node_ids = list(set(bulk_slice.index.get_level_values('supply_node')))
assert not any(['dispatch_feeder' in self.nodes[node_id].active_shape.index.names for node_id in node_ids])
df = self._helper_shaped_bulk_and_dist(year, bulk_slice)
if year in self.dispatch_write_years:
df_output = pd.concat([df],keys=[year],names=['year'])
if generation:
df_output*=-1
else:
df_output = DfOper.mult([df_output,self.transmission_losses])
df_output = self.outputs.clean_df(df_output)
util.replace_index_name(df_output,'DISPATCH_OUTPUT','SUPPLY_NODE')
df_output.columns = [cfg.calculation_energy_unit.upper()]
df_output = util.reorder_b_to_match_a(df_output, self.bulk_dispatch)
self.bulk_dispatch = pd.concat([self.bulk_dispatch, df_output.reorder_levels(self.bulk_dispatch.index.names)])
# self.bulk_dispatch = util.DfOper.add([self.bulk_dispatch, df_output])
df = util.remove_df_levels(df, 'supply_node') # only necessary when we origionally kept supply node as a level
return df
else:
return self.bulk_gen * 0
def set_initial_net_load_signals(self,year):
# t = util.time.time()
final_demand = self.demand_object.aggregate_electricity_shapes(year)
# t = util.time_stamp(t)
if year in self.dispatch_write_years:
self.output_final_demand_for_bulk_dispatch_outputs(final_demand)
# t = util.time_stamp(t)
self.distribution_gen = self.shaped_dist(year, self.non_flexible_gen, generation=True)
# t = util.time_stamp(t)
self.distribution_load = util.DfOper.add([final_demand, self.shaped_dist(year, self.non_flexible_load, generation=False)])
# t = util.time_stamp(t)
self.bulk_gen = self.shaped_bulk(year, self.non_flexible_gen, generation=True)
# t = util.time_stamp(t)
self.bulk_load = self.shaped_bulk(year, self.non_flexible_load, generation=False)
# t = util.time_stamp(t)
self.update_net_load_signal()
# t = util.time_stamp(t)
def output_final_demand_for_bulk_dispatch_outputs(self,final_demand):
df_output = util.df_slice(final_demand,2,'timeshift_type')
df_output = DfOper.mult([df_output, self.distribution_losses,self.transmission_losses])
df_output = self.outputs.clean_df(df_output)
util.replace_index_name(df_output,'DISPATCH_OUTPUT','DISPATCH_FEEDER')
df_output.columns = [cfg.calculation_energy_unit.upper()]
self.bulk_dispatch = df_output
def update_net_load_signal(self):
self.dist_only_net_load = DfOper.subt([self.distribution_load,self.distribution_gen])
self.bulk_only_net_load = DfOper.subt([DfOper.mult([self.bulk_load,self.transmission_losses]),self.bulk_gen])
self.bulk_net_load = DfOper.add([DfOper.mult([util.remove_df_levels(DfOper.mult([self.dist_only_net_load,self.distribution_losses]),'dispatch_feeder'),self.transmission_losses]),self.bulk_only_net_load])
self.dist_net_load_no_feeders = DfOper.add([DfOper.divi([DfOper.divi([self.bulk_only_net_load,util.remove_df_levels(self.distribution_losses,'dispatch_feeder',agg_function='mean')]),self.transmission_losses]), util.remove_df_levels(self.dist_only_net_load,'dispatch_feeder')])
def calculate_embodied_costs(self, year, loop):
"""Calculates the embodied costs for all supply nodes by multiplying each node's
active_embodied_costs by the cost inverse. Result is stored in
the Supply instance's 'cost_dict' attribute"
Args:
year (int) = year of analysis
loop (int or str) = loop identifier
"""
index = pd.MultiIndex.from_product([cfg.geographies, self.all_nodes], names=[cfg.primary_geography, 'supply_node'])
for node in self.nodes.values():
supply_indexer = util.level_specific_indexer(self.io_embodied_cost_df, 'supply_node', node.id)
if hasattr(node,'calculate_levelized_costs'):
node.calculate_levelized_costs(year,loop)
elif hasattr(node,'calculate_costs'):
node.calculate_costs(year,loop)
if hasattr(node, 'active_embodied_cost'):
self.io_embodied_cost_df.loc[supply_indexer, year] = node.active_embodied_cost.values
for sector in self.demand_sectors:
inverse = self.inverse_dict['cost'][year][sector]
indexer = util.level_specific_indexer(self.io_embodied_cost_df, 'demand_sector', sector)
cost = np.column_stack(self.io_embodied_cost_df.loc[indexer,year].values).T
self.cost_dict[year][sector] = pd.DataFrame(cost * inverse.values, index=index, columns=index)
def calculate_embodied_emissions(self, year):
"""Calculates the embodied emissions for all supply nodes by multiplying each node's
active_embodied_emissions by the emissions inverse. Result is stored in
the Supply instance's 'emissions_dict' attribute"
Args:
year (int) = year of analysis
loop (int or str) = loop identifier
"""
self.calculate_emissions(year)
row_index = pd.MultiIndex.from_product([cfg.geographies, self.all_nodes, self.ghgs], names=[cfg.primary_geography, 'supply_node', 'ghg'])
col_index = pd.MultiIndex.from_product([cfg.geographies, self.all_nodes], names=[cfg.primary_geography, 'supply_node'])
for node in self.nodes.values():
supply_indexer = util.level_specific_indexer(self.io_embodied_emissions_df, 'supply_node', node.id)
if hasattr(node, 'active_embodied_emissions_rate'):
try:
self.io_embodied_emissions_df.loc[supply_indexer, year] = node.active_embodied_emissions_rate.values
except:
pdb.set_trace()
for sector in self.demand_sectors:
inverse = copy.deepcopy(self.inverse_dict['energy'][year][sector])
keys = self.ghgs
name = ['ghg']
inverse = pd.concat([inverse]*len(keys), keys=keys, names=name)
inverse = inverse.reorder_levels([cfg.primary_geography,'supply_node','ghg'])
inverse.sort(axis=0,inplace=True)
indexer = util.level_specific_indexer(self.io_embodied_emissions_df, 'demand_sector', sector)
emissions = np.column_stack(self.io_embodied_emissions_df.loc[indexer,year].values).T
self.emissions_dict[year][sector] = pd.DataFrame(emissions * inverse.values,index=row_index,columns=col_index)
def map_embodied_to_demand(self, embodied_dict, link_dict):
"""Maps embodied emissions results for supply node to their associated final energy type and then
to final energy demand.
Args:
embodied_dict (dict): dictionary of supply-side embodied result DataFrames (energy, emissions, or cost)
link_dict (dict): dictionary of dataframes with structure with [geography, final_energy] multiIndex columns
and [geography,supply_node] rows
Returns:
df (DataFrame)
Dtype: Float
Row Index: [geography, supply_node, demand_sector, ghgs (emissions results only), year]
Cols: ['value']
"""
df_list = []
for year in self.years:
sector_df_list = []
keys = self.demand_sectors
name = ['sector']
idx = pd.IndexSlice
for sector in self.demand_sectors:
link_dict[year][sector].loc[:,:] = embodied_dict[year][sector].loc[:,idx[:, self.map_dict.values()]].values
link_dict[year][sector]= link_dict[year][sector].stack([cfg.primary_geography,'final_energy']).to_frame()
link_dict[year][sector] = link_dict[year][sector][link_dict[year][sector][0]!=0]
levels_to_keep = [x for x in link_dict[year][sector].index.names if x in cfg.output_combined_levels]
sector_df_list.append(link_dict[year][sector].groupby(level=levels_to_keep).sum())
year_df = pd.concat(sector_df_list, keys=keys,names=name)
df_list.append(year_df)
self.sector_df_list = sector_df_list
self.df_list = df_list
keys = self.years
name = ['year']
df = pd.concat(df_list,keys=keys,names=name)
df.columns = ['value']
df = df.groupby(level=[x for x in df.index.names if x in cfg.output_combined_levels]).sum()
return df
def convert_io_matrix_dict_to_df(self, adict):
"""Converts an io dictionary to a dataframe
Args:
adict = dictionary containing sliced datafrmes
Returns:
df (DataFrame)
Dtype: Float
Row Index: [geography, supply_node_input, supply_node_output, year]
Cols: ['value']
"""
df_list = []
for year in self.years:
sector_df_list = []
keys = self.demand_sectors
name = ['sector']
for sector in self.demand_sectors:
df = adict[year][sector]
levels_to_keep = [x for x in df.index.names if x in cfg.output_combined_levels]
df = df.groupby(level=levels_to_keep).sum()
df = df.stack([cfg.primary_geography,'supply_node']).to_frame()
df.index.names = [cfg.primary_geography+'_input','supply_node_input',cfg.primary_geography,'supply_node']
sector_df_list.append(df)
year_df = pd.concat(sector_df_list, keys=keys,names=name)
df_list.append(year_df)
self.sector_df_list = sector_df_list
self.df_list = df_list
keys = self.years
name = ['year']
df = pd.concat(df_list,keys=keys,names=name)
df.columns = ['value']
return df
def map_embodied(self, embodied_dict, link_dict):
"""Maps embodied results for supply node to other supply nodes
Args:
embodied_dict (dict): dictionary of supply-side embodied result DataFrames (energy, emissions, or cost)
link_dict (dict): dictionary of dataframes with structure with [geography, final_energy] multiIndex columns
and [geography,supply_node] rows
Returns:
df (DataFrame)
Dtype: Float
Row Index: [geography, supply_node, demand_sector, ghgs (emissions results only), year]
Cols: ['value']
"""
df_list = []
for year in self.years:
sector_df_list = []
keys = self.demand_sectors
name = ['sector']
idx = pd.IndexSlice
for sector in self.demand_sectors:
link_dict[year][sector].loc[:,:] = embodied_dict[year][sector].loc[:,idx[:, self.map_dict.values()]].values
link_dict[year][sector]= link_dict[year][sector].stack([cfg.primary_geography,'final_energy']).to_frame()
# levels = [x for x in ['supply_node',cfg.primary_geography +'_supply', 'ghg',cfg.primary_geography,'final_energy'] if x in link_dict[year][sector].index.names]
link_dict[year][sector] = link_dict[year][sector][link_dict[year][sector][0]!=0]
levels_to_keep = [x for x in link_dict[year][sector].index.names if x in cfg.output_combined_levels]
sector_df_list.append(link_dict[year][sector].groupby(level=levels_to_keep).sum())
year_df = pd.concat(sector_df_list, keys=keys,names=name)
df_list.append(year_df)
self.sector_df_list = sector_df_list
self.df_list = df_list
keys = self.years
name = ['year']
df = pd.concat(df_list,keys=keys,names=name)
df.columns = ['value']
# levels = [x for x in ['supply_node',cfg.primary_geography +'_supply', 'ghg',cfg.primary_geography,'final_energy'] if x in df.index.names]
return df
def map_embodied_to_export(self, embodied_dict):
"""Maps embodied emissions results for supply node to their associated final energy type and then
to final energy demand.
Args:
embodied_dict (dict): dictionary of supply-side embodied result DataFrames (energy, emissions, or cost)
link_dict (dict): dictionary of dataframes with structure with [geography, final_energy] multiIndex columns
and [geography,supply_node] rows
Returns:
df (DataFrame)
Dtype: Float
Row Index: [geography, supply_node, demand_sector, ghgs (emissions results only), year]
Cols: ['value']
"""
export_df = self.io_export_df.stack().to_frame()
export_df = export_df.groupby(level='supply_node').filter(lambda x: x.sum()!=0)
util.replace_index_name(export_df, 'year')
util.replace_column(export_df, 'value')
supply_nodes = list(set(export_df.index.get_level_values('supply_node')))
df_list = []
idx = pd.IndexSlice
for year in self.years:
sector_df_list = []
keys = self.demand_sectors
name = ['sector']
for sector in self.demand_sectors:
df = copy.deepcopy(embodied_dict[year][sector]).loc[:,idx[:,supply_nodes]]
util.replace_column_name(df,'supply_node_export', 'supply_node')
util.replace_index_name(df, cfg.primary_geography + "_supply", cfg.primary_geography)
stack_levels =[cfg.primary_geography, "supply_node_export"]
df = df.stack(stack_levels).to_frame()
levels_to_keep = [x for x in df.index.names if x in cfg.output_combined_levels+stack_levels]
df = df.groupby(level=list(set(levels_to_keep+['supply_node']))).sum()
df = df.groupby(level='supply_node').filter(lambda x: x.sum()!=0)
sector_df_list.append(df)
year_df = pd.concat(sector_df_list, keys=keys,names=name)
df_list.append(year_df)
keys = self.years
name = ['year']
df = pd.concat(df_list,keys=keys,names=name)
df.columns = ['value']
return df
def calculate_export_result(self, export_result_name, io_dict):
export_map_df = self.map_embodied_to_export(io_dict)
export_df = self.io_export_df.stack().to_frame()
export_df = export_df.groupby(level=['supply_node']).filter(lambda x: x.sum()!=0)
if cfg.primary_geography+"_supply" in cfg.output_combined_levels:
export_map_df = export_map_df.groupby(level=['supply_node',cfg.primary_geography+"_supply"]).filter(lambda x: x.sum()>0)
else:
export_map_df = export_map_df.groupby(level=['supply_node']).filter(lambda x: x.sum()!=0)
if export_map_df.empty is False and export_df.empty is False:
util.replace_index_name(export_df, 'year')
util.replace_index_name(export_df, 'sector', 'demand_sector')
util.replace_index_name(export_df,'supply_node_export','supply_node')
util.replace_column(export_df, 'value')
geo_df_list = []
for geography in cfg.geographies:
export_map_df_indexer = util.level_specific_indexer(export_map_df,[cfg.primary_geography],[geography])
export_df_indexer = util.level_specific_indexer(export_df,[cfg.primary_geography],[geography])
df = util.DfOper.mult([export_df.loc[export_df_indexer,:], export_map_df.loc[export_map_df_indexer,:]])
geo_df_list.append(df)
export_result = pd.concat(geo_df_list)
else:
export_result = None
setattr(self, export_result_name, export_result)
## @timecall(immediate=True)
def adjust_for_not_incremental(self,io):
"""Adjusts for import nodes. Their io column must be zeroed out so that we don't double count upstream values.
Args:
io (DataFrame) = DataFrame to be adjusted
node_class (Class) = Supply node attribute class (i.e. cost) to use to determine whether upstream values are zeroed out
Returns:
io_adjusted (DataFrame) = adjusted DataFrame
"""
io_adjusted = copy.deepcopy(io)
for node in self.nodes.values():
if isinstance(node, ImportNode):
col_indexer = util.level_specific_indexer(io_adjusted,'supply_node',node.id, axis=1)
io_adjusted.loc[:,col_indexer] = 0
return io_adjusted
def calculate_coefficients(self, year, loop):
"""Loops through all supply nodes and calculates active coefficients
Args:
year (int) = year of analysis
loop (int or str) = loop identifier
"""
for node_id in self.blend_nodes:
node = self.nodes[node_id]
node.update_residual(year)
for node in self.nodes.values():
if node.id!=self.thermal_dispatch_node_id:
node.calculate_active_coefficients(year, loop)
elif year == int(cfg.cfgfile.get('case','current_year')) and node.id == self.thermal_dispatch_node_id:
#thermal dispatch node is updated through the dispatch. In the first year we assume equal shares
#TODO make capacity weighted for better approximation
node.calculate_active_coefficients(year, loop)
node.active_coefficients_total[node.active_coefficients_total>0] = 1E-7
node.active_coefficients_total = node.active_coefficients_total.groupby(level=['demand_sector',cfg.primary_geography]).transform(lambda x: x/x.sum())
node.active_coefficients_total.replace(to_replace=np.nan,value=0,inplace=True)
def set_pass_through_dicts(self, year):
"""Sets pass-through dictionaries that control the loop when calculating
physical emissions. If a node has an efficiency type that implies that energy
is passed through it (i.e. gas distribution system) then the physical emissions values
of the fuel are also passed through. All inputs must be calculated before loop continues downstream
of any supply node.
Args:
year (int) = year of analysis
"""
if year == min(self.years):
for node in self.nodes.values():
node.set_pass_through_dict(self.nodes)
for node in self.nodes.values():
if (hasattr(node,'pass_through_dict')) or hasattr(node,'active_physical_emissions_rate'):
pass
elif hasattr(node,'id'):
for dict_node in self.nodes.values():
if hasattr(dict_node, 'pass_through_dict'):
if node.id in dict_node.pass_through_dict.keys():
del dict_node.pass_through_dict[node.id]
for node in self.nodes.values():
if hasattr(node,'pass_through_dict') and node.pass_through_dict == {}:
for dict_node in self.nodes.values():
if hasattr(dict_node, 'pass_through_dict'):
if node.id in dict_node.pass_through_dict.keys():
del dict_node.pass_through_dict[node.id]
else:
for node in self.nodes.values():
if hasattr(node,'pass_through_dict'):
for key in node.pass_through_dict.keys():
node.pass_through_dict[key] = False
def calculate_stocks(self,year,loop):
"""Loops through all supply nodes that have stocks and updates those stocks
Args:
year (int) = year of analysis
loop (int or str) = loop identifier
"""
for node in self.nodes.values():
if hasattr(node, 'stock'):
node.update_stock(year,loop)
def reconcile_trades(self,year,loop):
"""Reconciles internal trades. These are instances where the geographic location of supply in a node is divorced from
the geographic location of the demand. To achieve this result, we calculate the expected location of supply and then pass
that information to blend or import node trade adjustment dataframes so that the IO demands supply with that geographic distribution
Args:
year (int) = year of analysis
loop (int or str) = loop identifier
"""
if len(cfg.geographies) > 1:
for node in self.nodes.values():
if node.id not in self.blend_nodes:
#Checks whether a node has information (stocks or potential) that means that demand is not a good proxy for location of supply
node.calculate_internal_trades(year, loop)
for node in self.nodes.values():
#loops through all nodes checking for excess supply from nodes that are not curtailable, flexible, or exportable
trade_sub = node.id
if node.id not in self.blend_nodes:
#Checks whether a node has information that means that demand is not a good proxy for location of supply
if hasattr(node,'active_internal_trade_df') and node.internal_trades == "stop and feed":
#enters a loop to feed that constraint forward in the supply node until it can be reconciled at a blend node or exported
self.feed_internal_trades(year, trade_sub, node.active_internal_trade_df)
def reconcile_constraints(self,year,loop):
"""Reconciles instances where IO demands exceed a node's potentia. To achieve this result, we calculate the expected location of supply and then pass
that information to blend or import node trade adjustment dataframes so that the IO demands supply with that geographic distribution
Args:
year (int) = year of analysis
loop (int or str) = loop identifier
"""
for node in self.nodes.values():
#blend nodes cannot be potential constrained
if node.id not in self.blend_nodes:
#checks the node for an adjustment factor due to excess throughput requested from a constraiend node
#Ex. if biomass throughput exceeeds biomass potential by 2x, the adjustment factor passed would be .5
node.calculate_potential_constraints(year)
#Checks whether a constraint was violated
if node.constraint_violation:
#enters a loop to feed that constraint forward in the supply node
self.feed_constraints(year, node.id, node.active_constraint_df)
def reconcile_oversupply(self,year,loop):
"""Reconciles instances where IO demands less than a node's expected level of supply based
on its existing stock. To achieve this result, we calculate the expected location of supply and then pass
that information to blend or import node trade adjustment dataframes so that the IO demands supply with that geographic distribution
Args:
year (int) = year of analysis
loop (int or str) = loop identifier
"""
for node in self.nodes.values():
#loops through all nodes checking for excess supply from nodes that are not curtailable, flexible, or exportable
oversupply_factor = node.calculate_oversupply(year,loop) if hasattr(node,'calculate_oversupply') else None
if oversupply_factor is not None:
if node.is_exportable:
#if the node is exportable, excess supply is added to the node's exports
excess_supply = DfOper.subt([DfOper.mult([node.active_supply, oversupply_factor]), node.active_supply])
node.export.active_values = DfOper.add([node.export.active_values, excess_supply])
elif node.id in self.nodes[self.thermal_dispatch_node_id].nodes:
#excess supply is not possible for thermally dispatched nodes
pass
elif node.id in self.blend_nodes:
pass
elif node.is_curtailable:
#if the node's production is curtailable, then the energy production is adjusted down in the node
node.adjust_energy(oversupply_factor,year)
else:
#otherwise, the model enters a loop to feed that constraint forward in the supply node until it can be reconciled at a blend node or exported
self.feed_oversupply(year, node.id, oversupply_factor)
def calculate_emissions(self,year):
"""Calculates physical and embodied emissions for each supply node
Args:
year (int) = year of analysis
"""
self.calculate_input_emissions_rates(year)
self.calculate_emissions_coefficients(year)
for node in self.nodes.values():
node.calculate_emissions(year)
node.calculate_embodied_emissions_rate(year)
def calculate_input_emissions_rates(self,year):
"""Calculates physical emissions coefficients for all nodes in order to calculate the actual
physical emissions attributable to each supply node and demand subsector.
Args:
year (int) = year of analysis
loop (int or str) = loop identifier
"""
for node in self.nodes.values():
node.update_pass_through_df_dict(year)
node.calculate_input_emissions_rates(year, self.ghgs)
def calculate_emissions_coefficients(self,year):
"""Calculates and propagates physical emissions coefficients to downstream nodes for internal emissions
calculations
Args:
year (int) = year of analysis
loop (int or str) = loop identifier
"""
for node in self.nodes.values():
if hasattr(node,'calculate_co2_capture_rate'):
node.calculate_co2_capture_rate(year)
for node in self.nodes.values():
if node.active_emissions_coefficients is not None:
node.active_physical_emissions_coefficients = node.active_emissions_coefficients * 0
self.set_pass_through_dicts(year)
for node in self.nodes.values():
if hasattr(node,'active_physical_emissions_rate'):
self.feed_physical_emissions(year, node.id, node.active_physical_emissions_rate)
def feed_constraints(self, year, constrained_node, constraint_adjustment):
"""Propagates constraint reconciliation adjustment factors to all dependent blend nodes
Args:
year (int) = year of analysis
constrained_node (int) = integer id key of supply node
contraint_adjustment (df) = dataframe of constraint adjustment factors.
Ex. If demanded biomass is 2 EJ and available biomass is 1 EJ, constraint adjustment factor would equal .5
"""
#loops all potential output nodes
for output_node in self.nodes.values():
if hasattr(output_node, 'active_coefficients_total') and getattr(output_node, 'active_coefficients_total') is not None:
#if the constrained sub is an input to this output node
if constrained_node in set(output_node.active_coefficients_total.index.get_level_values('supply_node')):
#if this output node is a blend node, the reconciliation happens here.
if output_node.id in self.blend_nodes:
logging.info(" constrained node %s being reconciled in blend node %s" %(self.nodes[constrained_node].name, output_node.name))
indexer = util.level_specific_indexer(output_node.values,'supply_node', constrained_node)
try:
output_node.values.loc[indexer,year] = DfOper.mult([output_node.values.loc[indexer, year].to_frame(),constraint_adjustment]).values
#flag for the blend node that it has been reconciled and needs to recalculate residual
output_node.reconciled = True
except:
pdb.set_trace()
#flag that anything in the supply loop has been reconciled, and so the loop needs to be resolved
self.reconciled = True
else:
#if the output node has the constrained sub as an input, and it is not a blend node, it becomes the constrained sub
#in the loop in order to feed the adjustment factor forward to dependent nodes until it terminates at a blend node
self.feed_constraints(year, constrained_node=output_node.id, constraint_adjustment=constraint_adjustment)
#TODO add logic if it reaches the end of the supply node and has never been reconicled at a blend node
def feed_internal_trades(self, year, internal_trade_node, internal_trade_adjustment):
"""Propagates internal trading adjustments to downstream nodes
Args:
year (int) = year of analysis
internal_trade_node (int) = integer id key of supply node that has internal trade adjustments
internal_trade_adjustment (df) = dataframe of internal trade adjustments to propagate forward
Ex. If demanded biomass is 3 EJ in geography A and 1 EJ in geography B and the supply of biomass is 1 EJ in geography A and 1 EJ in geography B,
the internal trade adjustment is 2/3 for geography A (ratio of supply (.5) divided by ratio of demand (.75)) and 2 for geography B (.5/.25).
"""
#loops all potential output nodes
for output_node in self.nodes.values():
if hasattr(output_node, 'nodes'):
if internal_trade_node in output_node.nodes:
#if this output node is a blend node, or an import node, or the node has its own internal trades that are not fed forward (i.e. are on the primary geography) the reconciliation happens here.
if output_node.id in self.blend_nodes and output_node.tradable_geography or isinstance(output_node,ImportNode) or output_node.internal_trades == 'stop':
indexer = util.level_specific_indexer(output_node.active_trade_adjustment_df,'supply_node', internal_trade_node)
output_node.active_trade_adjustment_df.loc[indexer, :] = internal_trade_adjustment.values
self.reconciled = True
output_node.reconciled = True
elif output_node.internal_trades == 'stop and feed':
#if the output_node feeds forward its own trades, it becomes the trade node and recursively calls this function
indexer = util.level_specific_indexer(output_node.active_coefficients_total,'supply_node', internal_trade_node)
output_node.active_trade_adjustment_df.loc[indexer, :] = internal_trade_adjustment.values
output_node.reconciled = True
self.reconciled = True
self.feed_internal_trades(year, internal_trade_node=output_node.id, internal_trade_adjustment=output_node.active_internal_trade_df)
elif output_node.internal_trades == 'feed':
#if the output node is set to feed the trades forward, it becomes the trade node while using the original trade node's adjustment factors in order to perpetuate the downstream impacts
self.feed_internal_trades(year, internal_trade_node=output_node.id, internal_trade_adjustment=internal_trade_adjustment)
def feed_oversupply(self, year, oversupply_node, oversupply_factor):
"""Propagates oversupply adjustments to downstream nodes
Args:
year (int) = year of analysis
oversupply_node (int) = integer id key of supply node that has the capacity to produce more throughput than demanded
oversupply_factor (df) = dataframe of oversupply adjustments to propagate forward
Ex. If demand form wind energy is 1 EJ and the existing stock has the ability to produce 2 EJ, if the node is not flagged as curtailable or exportable,
the oversupply is propogated to downstream nodes until it reaches a Blend node or a node that is curtailable or exportable. If a node is curtailable, excess supply is ignored
and the capacity is unutilized. If a node is exportable, excess supply results in exports to demand outside the model. If it reaches a Blend node, blend coefficient values are adjusted
and reconciled so that the excess supply is then demanded in the next loop in order to resolve the conflict.
"""
for output_node in self.nodes.values():
if hasattr(output_node,'nodes'):
if oversupply_node in output_node.nodes:
if output_node.id in self.blend_nodes:
# if the output node is a blend node, this is where oversupply is reconciled
# print oversupply_node, output_node.id, oversupply_factor
indexer = util.level_specific_indexer(output_node.values,'supply_node', oversupply_node)
output_node.values.loc[indexer, year] = DfOper.mult([output_node.values.loc[indexer, year].to_frame(),oversupply_factor]).values
output_node.reconciled = True
self.reconciled=True
else:
if output_node.is_curtailable or output_node.id in self.thermal_nodes:
#if the output node is curtailable, then the excess supply feed-loop ends and the excess is curtailed within this node. If the node is flexible, excess supply
#will be reconciled in the dispatch loop
pass
elif output_node.is_exportable:
#if the output node is exportable, then excess supply is added to the export demand in this node
excess_supply = DfOper.subt([DfOper.mult([output_node.active_supply, oversupply_factor]), output_node.active_supply])
output_node.export.active_values = DfOper.add([output_node.export.active_values, excess_supply])
else:
#otherwise, continue the feed-loop until the excess supply can be reconciled
self.feed_oversupply(year, oversupply_node = output_node.id, oversupply_factor=oversupply_factor)
def update_io_df(self,year,loop):
"""Updates the io dictionary with the active coefficients of all nodes
Args:
year (int) = year of analysis
"""
for geography in cfg.geographies:
#fix for zero energy demand
if util.df_slice(self.io_total_active_demand_df,geography,cfg.primary_geography).sum().sum()==0:
for col_node in self.nodes.values():
if col_node.supply_type == 'Blend':
if col_node.active_coefficients_total is None:
continue
else:
col_indexer = util.level_specific_indexer(col_node.active_coefficients_total,cfg.primary_geography,geography,axis=1)
normalized = col_node.active_coefficients_total.loc[:,col_indexer].groupby(level=['demand_sector']).transform(lambda x: x/x.sum())
normalized = normalized.replace([np.nan,np.inf],1E-7)
col_node.active_coefficients_total.loc[:,col_indexer] = normalized
for col_node in self.nodes.values():
if loop == 1 and col_node.id not in self.blend_nodes:
continue
elif col_node.active_coefficients_total is None:
continue
else:
for sector in self.demand_sectors:
levels = ['supply_node' ]
col_indexer = util.level_specific_indexer(self.io_dict[year][sector], levels=levels, elements=[col_node.id])
row_nodes = list(map(int,col_node.active_coefficients_total.index.levels[util.position_in_index(col_node.active_coefficients_total,'supply_node')]))
row_nodes = [x for x in row_nodes if x in self.all_nodes]
row_indexer = util.level_specific_indexer(self.io_dict[year][sector], levels=levels, elements=[row_nodes])
levels = ['demand_sector','supply_node']
active_row_indexer = util.level_specific_indexer(col_node.active_coefficients_total, levels=levels, elements=[sector,row_nodes])
active_col_indexer = util.level_specific_indexer(col_node.active_coefficients_total, levels=['demand_sector'], elements=[sector], axis=1)
try:
self.io_dict[year][sector].loc[row_indexer, col_indexer] = col_node.active_coefficients_total.loc[active_row_indexer,active_col_indexer].values
except:
pdb.set_trace()
if col_node.overflow_node:
self.io_dict[year][sector].loc[row_indexer, col_indexer]=0
def feed_physical_emissions(self, year, emissions_node, active_physical_emissions_rate):
"""Propagates physical emissions rates of energy products to downstream nodes in order to calculate emissions in each node
Args:
year (int) = year of analysis
emissions_node (int) = integer id key of supply node that a physical emissions rate (ex. oil) or a node that is conveying a product
with a physical emissions rate (i.e. oil pipeline)
active_physical_emissions_rate (df) = dataframe of a node's physical emissions rate
Ex. Node A is natural gas with a physical emissions rate of 53.06 kG CO2/MMBtu. Node B is the natural gas pipeline that delivers
natural gas to Node C, which is combustion gas turbines, which consumes the natural gas. The input emissions rate of 53.06 has to pass through
Node B to Node C in order to calculate the combustion emissions of the gas turbines.
"""
#loops all potential output node
for output_node in self.nodes.values():
#check whether the node has active coefficients (i.e. complete data)
if hasattr(output_node, 'active_coefficients') and getattr(output_node, 'active_coefficients') is not None:
#check whether the emissions node is in the coefficient of the output node
if emissions_node in set(output_node.active_emissions_coefficients.index.get_level_values('supply_node')):
#if it is, set the emissions coefficient values based on the physical emissions rate of the emissions node
indexer = util.level_specific_indexer(output_node.active_emissions_coefficients,['supply_node'], [emissions_node])
for efficiency_type in set(output_node.active_emissions_coefficients.loc[indexer,:].index.get_level_values('efficiency_type')):
emissions_indexer = util.level_specific_indexer(output_node.active_emissions_coefficients,['efficiency_type','supply_node'], [efficiency_type,emissions_node])
output_node.active_physical_emissions_coefficients.loc[emissions_indexer,:] += output_node.active_emissions_coefficients.loc[emissions_indexer,:].values * active_physical_emissions_rate.values
if hasattr(output_node, 'pass_through_dict') and emissions_node in output_node.pass_through_dict.keys():
#checks whether the node has a pass_through_dictionary (i.e. has an efficiency of type 2 in its coefficients)
#if so, the value of the pass_through_dict is True for the emissions node. This means that the emissions rate for
#this pass_through has been solved. They must all be solved before the emissions rate of the output node can be fed to
output_node.pass_through_dict[emissions_node] = True
# feeds passed-through emissions rates until it reaches a node where it is completely consumed
if isinstance(output_node,ImportNode) and output_node.emissions.data is True:
#if the node is an import node, and the emissions intensity is not incremental, the loop stops because the input emissions intensity
#overrides the passed through emissions intensity
pass
else:
indexer = util.level_specific_indexer(output_node.active_emissions_coefficients,['supply_node','efficiency_type'], [emissions_node,2])
additional_emissions_rate = output_node.active_emissions_coefficients.loc[indexer,:].values * active_physical_emissions_rate.values
output_node.active_pass_through_df += additional_emissions_rate
# output_node.active_pass_through_df.columns = output_node.active_pass_through_df.columns.droplevel(-1)
if all(output_node.pass_through_dict.values()):
emissions_rate = output_node.active_pass_through_df.groupby(level=['ghg'],axis=0).sum()
emissions_rate= emissions_rate.stack([cfg.primary_geography, 'demand_sector']).to_frame()
emissions_rate = emissions_rate.reorder_levels([cfg.primary_geography, 'demand_sector', 'ghg'])
emissions_rate.sort(inplace=True, axis=0)
keys = [self.demand_sectors,cfg.geographies]
names = ['demand_sector', cfg.primary_geography]
for key, name in zip(keys, names):
emissions_rate = pd.concat([emissions_rate]*len(key),axis=1,keys=key,names=[name])
emissions_rate.sort(inplace=True, axis=0)
emissions_rate.columns = emissions_rate.columns.droplevel(-1)
self.feed_physical_emissions(year,output_node.id,emissions_rate)
def initialize_year(self,year,loop):
"""Updates the dataframes of supply nodes so that the 'active' versions of dataframes reference the appropriate year
Args:
year (int) = year of analysis
loop (int or str) = loop identifier
"""
for node in self.nodes.values():
node.reconciled = False
if hasattr(node,'active_supply') and node.active_supply is not None:
node.active_supply.columns = [year]
if hasattr(node, 'active_trade_adjustment_df'):
previous_year = max(min(self.years), year-1)
node.trade_adjustment_dict[previous_year] = copy.deepcopy(node.active_trade_adjustment_df)
self.map_export_to_io(year,loop)
self.calculate_demand(year,loop)
def calculate_initial_demand(self):
"""Calculates the demands on the supply-side from final energy demand
from the demand-side calculation and Export values. Export values can be updated
during reconciliation process, so initial demand does not necessarily equal the demands on the energy
system after the supply -side has been solved.
"""
#year is the first year and loop is the initial loop
year = min(self.years)
loop = 'initial'
self.add_initial_demand_dfs(year)
self.add_io_df(['io_demand_df', 'io_export_df', 'io_supply_df','io_embodied_cost_df'])
self.add_io_embodied_emissions_df()
self.map_demand_to_io()
self.io_active_supply_df = copy.deepcopy(self.empty_output_df)
self.map_export_to_io(year, loop)
self.io_total_active_demand_df = DfOper.add([self.io_demand_df.loc[:,year].to_frame(),self.io_export_df.loc[:,year].to_frame()])
def calculate_demand(self,year,loop):
self.map_export_to_io(year, loop)
self.io_total_active_demand_df = util.DfOper.add([self.io_demand_df.loc[:,year].to_frame(),self.io_export_df.loc[:,year].to_frame()])
def update_demand(self, year, loop):
self.map_export_to_io(year, loop)
self.io_total_active_demand_df = DfOper.add([self.io_demand_df.loc[:,year].to_frame(),self.io_export_df.loc[:,year].to_frame()])
def calculate_io(self, year, loop):
index = pd.MultiIndex.from_product([cfg.geographies, self.all_nodes
], names=[cfg.primary_geography,'supply_node'])
for sector in self.demand_sectors:
indexer = util.level_specific_indexer(self.io_total_active_demand_df,'demand_sector', sector)
self.active_io = self.io_dict[year][sector]
active_cost_io = self.adjust_for_not_incremental(self.active_io)
self.active_demand = self.io_total_active_demand_df.loc[indexer,:]
temp = solve_IO(self.active_io.values, self.active_demand.values)
temp[np.nonzero(self.active_io.values.sum(axis=1) + self.active_demand.values.flatten()==0)[0]] = 0
self.io_supply_df.loc[indexer,year] = temp
temp = solve_IO(self.active_io.values)
temp[np.nonzero(self.active_io.values.sum(axis=1) + self.active_demand.values.flatten()==0)[0]] = 0
self.inverse_dict['energy'][year][sector] = pd.DataFrame(temp, index=index, columns=index)
temp = solve_IO(active_cost_io.values)
temp[np.nonzero(active_cost_io.values.sum(axis=1) + self.active_demand.values.flatten()==0)[0]] = 0
self.inverse_dict['cost'][year][sector] = pd.DataFrame(temp, index=index, columns=index)
idx = pd.IndexSlice
self.inverse_dict['energy'][year][sector].loc[idx[:,self.non_storage_nodes],:] = self.inverse_dict['energy'][year][sector].loc[idx[:,self.non_storage_nodes],:]
self.inverse_dict['cost'][year][sector].loc[idx[:,self.non_storage_nodes],:] = self.inverse_dict['cost'][year][sector].loc[idx[:,self.non_storage_nodes],:]
for node in self.nodes.values():
indexer = util.level_specific_indexer(self.io_supply_df,levels=['supply_node'], elements = [node.id])
node.active_supply = self.io_supply_df.loc[indexer,year].groupby(level=[cfg.primary_geography, 'demand_sector']).sum().to_frame()
def add_initial_demand_dfs(self, year):
for node in self.nodes.values():
node.internal_demand = copy.deepcopy(self.empty_output_df)
node.export_demand = copy.deepcopy(self.empty_output_df)
node.internal_demand = copy.deepcopy(self.empty_output_df)
def pass_initial_demand_to_nodes(self, year):
for node in self.nodes.values():
indexer = util.level_specific_indexer(self.io_total_active_demand_df,levels=['supply_node'], elements = [node.id])
node.active_demand = self.io_total_active_demand_df.loc[indexer,:]
def add_empty_output_df(self):
"""adds an empty df to node instances"""
index = pd.MultiIndex.from_product([cfg.geo.geographies[cfg.primary_geography],
self.demand_sectors], names=[cfg.primary_geography, 'demand_sector'])
self.empty_output_df = util.empty_df(index = index, columns = self.years,fill_value = 1E-25)
def add_io_df(self,attribute_names):
#TODO only need to run years with a complete demand data set. Check demand dataframe.
index = pd.MultiIndex.from_product([cfg.geo.geographies[cfg.primary_geography],self.demand_sectors, self.all_nodes
], names=[cfg.primary_geography,
'demand_sector','supply_node'])
for attribute_name in util.put_in_list(attribute_names):
setattr(self, attribute_name, util.empty_df(index = index, columns = self.years))
def add_io_embodied_emissions_df(self):
#TODO only need to run years with a complete demand data set. Check demand dataframe.
index = pd.MultiIndex.from_product([cfg.geo.geographies[cfg.primary_geography],self.demand_sectors, self.all_nodes,self.ghgs,
], names=[cfg.primary_geography,
'demand_sector', 'supply_node','ghg'])
setattr(self, 'io_embodied_emissions_df', util.empty_df(index = index, columns = self.years))
def create_inverse_dict(self):
index = pd.MultiIndex.from_product([cfg.geo.geographies[cfg.primary_geography], self.all_nodes
], names=[cfg.primary_geography,'supply_node'])
df = util.empty_df(index = index, columns = index)
self.inverse_dict = util.recursivedict()
for key in ['energy', 'cost']:
for year in self.years:
for sector in util.ensure_iterable_and_not_string(self.demand_sectors):
self.inverse_dict[key][year][sector]= df
def create_embodied_cost_and_energy_demand_link(self):
map_dict = self.map_dict
keys = sorted(map_dict.items(), key=operator.itemgetter(1))
keys = [x[0] for x in keys]
#sorts final energy in the same order as the supply node dataframes
index = pd.MultiIndex.from_product([cfg.geographies, self.all_nodes], names=[cfg.primary_geography+"_supply",'supply_node'])
columns = pd.MultiIndex.from_product([cfg.geographies,keys], names=[cfg.primary_geography,'final_energy'])
self.embodied_cost_link_dict = util.recursivedict()
self.embodied_energy_link_dict = util.recursivedict()
for year in self.years:
for sector in self.demand_sectors:
self.embodied_cost_link_dict[year][sector] = util.empty_df(index = index, columns = columns)
self.embodied_energy_link_dict[year][sector] = util.empty_df(index = index, columns = columns)
def create_embodied_emissions_demand_link(self):
map_dict = self.map_dict
#sorts final energy in the same order as the supply node dataframes
keys = sorted(map_dict.items(), key=operator.itemgetter(1))
keys = [x[0] for x in keys]
index = pd.MultiIndex.from_product([cfg.geographies, self.all_nodes, self.ghgs], names=[cfg.primary_geography+"_supply",'supply_node','ghg'])
columns = pd.MultiIndex.from_product([cfg.geographies,keys], names=[cfg.primary_geography,'final_energy'])
self.embodied_emissions_link_dict = util.recursivedict()
for year in self.years:
for sector in self.demand_sectors:
self.embodied_emissions_link_dict[year][sector] = util.empty_df(index = index, columns = columns)
def map_demand_to_io(self):
"""maps final energy demand ids to node nodes for IO table demand calculation"""
#loops through all final energy types in demand df and adds
map_dict = self.map_dict
self.demand_df = self.demand_object.energy_demand.unstack(level='year')
# round here to get rid of really small numbers
self.demand_df = self.demand_df.round()
self.demand_df.columns = self.demand_df.columns.droplevel()
for demand_sector, geography, final_energy in self.demand_df.groupby(level = self.demand_df.index.names).groups:
supply_indexer = util.level_specific_indexer(self.io_demand_df, levels=[cfg.primary_geography, 'demand_sector','supply_node'],elements=[geography, demand_sector, map_dict[final_energy]])
demand_indexer = util.level_specific_indexer(self.demand_df, levels = [ 'sector', cfg.primary_geography, 'final_energy'],elements=[demand_sector, geography, final_energy])
self.io_demand_df.loc[supply_indexer, self.years] = self.demand_df.loc[demand_indexer, self.years].values
def map_export_to_io(self,year, loop):
"""maps specified export nodes for IO table total demand calculation"""
for node in self.nodes.values():
supply_indexer = util.level_specific_indexer(self.io_export_df, 'supply_node', node.id)
if loop == 'initial' or loop==1:
node.export.allocate(node.active_supply, self.demand_sectors, self.years, year, loop)
self.io_export_df.loc[supply_indexer, year] = node.export.active_values.sort().values
class Node(DataMapFunctions):
def __init__(self, id, supply_type, scenario):
self.id = id
self.supply_type = supply_type
self.scenario = scenario
for col, att in util.object_att_from_table('SupplyNodes', id):
setattr(self, col, att)
self.active_supply= None
self.reconciled = False
#all nodes have emissions subclass
self.emissions = SupplyEmissions(self.id, self.scenario)
self.shape = self.determine_shape()
self.workingdir = cfg.workingdir
self.cfgfile_name = cfg.cfgfile_name
self.log_name = cfg.log_name
def determine_shape(self):
if self.shape_id is not None:
return shape.shapes.data[self.shape_id]
def calculate_subclasses(self):
""" calculates subclasses of nodes and passes requisite data"""
if hasattr(self,'export'):
self.export.calculate(self.years, self.demand_sectors)
if hasattr(self,'coefficients'):
self.coefficients.calculate(self.years, self.demand_sectors)
if hasattr(self,'emissions'):
self.emissions.calculate(self.conversion, self.resource_unit)
if hasattr(self,'potential'):
self.potential.calculate(self.conversion, self.resource_unit)
if hasattr(self,'capacity_factor'):
self.capacity_factor.calculate(self.years, self.demand_sectors)
if hasattr(self,'cost'):
self.cost.calculate(self.conversion, self.resource_unit)
def add_total_stock_measures(self, scenario):
self.total_stocks = {}
measure_ids = scenario.get_measures('SupplyStockMeasures', self.id)
for total_stock in measure_ids:
self.total_stocks[total_stock] = SupplySpecifiedStock(id=total_stock,
sql_id_table='SupplyStockMeasures',
sql_data_table='SupplyStockMeasuresData',
scenario=scenario)
def set_cost_dataframes(self):
index = pd.MultiIndex.from_product([cfg.geo.geographies[cfg.primary_geography], self.demand_sectors], names=[cfg.primary_geography, 'demand_sector'])
self.levelized_costs = util.empty_df(index=index,columns=self.years,fill_value=0.0)
self.annual_costs = util.empty_df(index=index,columns=self.years,fill_value=0.0)
def group_output(self, output_type, levels_to_keep=None):
levels_to_keep = cfg.output_supply_levels if levels_to_keep is None else levels_to_keep
if output_type=='stock':
return self.format_output_stock(levels_to_keep)
elif output_type=='annual_costs':
return self.format_output_annual_costs(levels_to_keep)
elif output_type=='levelized_costs':
return self.format_output_levelized_costs(levels_to_keep)
elif output_type == 'capacity_utilization':
return self.format_output_capacity_utilization(levels_to_keep)
def format_output_stock(self, override_levels_to_keep=None):
if not hasattr(self, 'stock'):
return None
levels_to_keep = cfg.output_supply_levels if override_levels_to_keep is None else override_levels_to_keep
levels_to_eliminate = [l for l in self.stock.values.index.names if l not in levels_to_keep]
df = util.remove_df_levels(self.stock.values, levels_to_eliminate).sort()
# stock starts with vintage as an index and year as a column, but we need to stack it for export
df = df.stack().to_frame()
util.replace_index_name(df, 'year')
stock_unit = cfg.calculation_energy_unit + "/" + cfg.cfgfile.get('case','time_step')
df.columns = [stock_unit.upper()]
return df
def format_output_capacity_utilization(self, override_levels_to_keep=None):
if not hasattr(self, 'capacity_utilization'):
return None
# levels_to_keep = cfg.output_supply_levels if override_levels_to_keep is None else override_levels_to_keep
# levels_to_eliminate = [l for l in self.capacity_utilization.index.names if l not in levels_to_keep]
# df = util.remove_df_levels(self.capacity_utilization, levels_to_eliminate).sort()
# stock starts with vintage as an index and year as a column, but we need to stack it for export
df = self.capacity_utilization
df = df.stack().to_frame()
util.replace_index_name(df, 'year')
df.columns = ["%"]
return df
def format_output_annual_costs(self,override_levels_to_keep=None):
if not hasattr(self, 'final_annual_costs'):
return None
levels_to_keep = cfg.output_supply_levels if override_levels_to_keep is None else override_levels_to_keep
levels_to_eliminate = [l for l in self.final_annual_costs.index.names if l not in levels_to_keep]
if 'vintage' in self.final_annual_costs.index.names:
df = self.final_annual_costs
util.replace_index_name(df, 'year','vintage')
else:
df = self.final_annual_costs.stack().to_frame()
util.replace_index_name(df,'year')
df = util.remove_df_levels(df, levels_to_eliminate).sort()
cost_unit = cfg.cfgfile.get('case','currency_year_id') + " " + cfg.cfgfile.get('case','currency_name')
df.columns = [cost_unit.upper()]
return df
def format_output_levelized_costs(self, override_levels_to_keep=None):
if not hasattr(self, 'final_levelized_costs'):
return None
levels_to_keep = cfg.output_supply_levels if override_levels_to_keep is None else override_levels_to_keep
levels_to_eliminate = [l for l in self.final_levelized_costs.index.names if l not in levels_to_keep]
df = util.remove_df_levels(self.final_levelized_costs, levels_to_eliminate).sort()
# stock starts with vintage as an index and year as a column, but we need to stack it for export
df = df.stack().to_frame()
util.replace_index_name(df, 'year')
cost_unit = cfg.cfgfile.get('case','currency_year_id') + " " + cfg.cfgfile.get('case','currency_name')
df.columns = [cost_unit.upper()]
return df
def add_conversion(self):
"""
adds a dataframe used to convert input values that are not in energy terms, to energy terms
ex. Biomass input as 'tons' must be converted to energy units using a conversion factor data table
"""
energy_unit = cfg.calculation_energy_unit
potential_unit = util.sql_read_table('SupplyPotential', 'unit', supply_node_id=self.id)
# check to see if unit is in energy terms, if so, no conversion necessary
if potential_unit is not None:
if cfg.ureg.Quantity(potential_unit).dimensionality == cfg.ureg.Quantity(energy_unit).dimensionality:
conversion = None
resource_unit = None
else:
# if the unit is not in energy terms, create a conversion class to convert to energy units
conversion = SupplyEnergyConversion(self.id, potential_unit)
resource_unit = potential_unit
else:
conversion = None
resource_unit = None
return conversion, resource_unit
def aggregate_electricity_shapes(self, year, dispatch_feeder_allocation):
""" returns a single shape for a year with supply_technology and resource_bin removed and dispatch_feeder added
['dispatch_feeder', 'timeshift_type', 'gau', 'weather_datetime']
"""
if not hasattr(self,'stock'):
if self.shape_id is None:
index = pd.MultiIndex.from_product([cfg.geo.geographies[cfg.primary_geography],[2],shape.shapes.active_dates_index], names=[cfg.primary_geography,'timeshift_type','weather_datetime'])
energy_shape = shape.shapes.make_flat_load_shape(index)
else:
energy_shape = self.shape.values
elif 'demand_sector' in self.stock.values_energy:
values_energy = util.remove_df_levels(DfOper.mult([self.stock.values_energy[year],dispatch_feeder_allocation]),'demand_sector')
else:
values_energy = self.stock.values_energy[year]
if self.shape_id is None and (hasattr(self, 'technologies') and np.all([tech.shape_id is None for tech in self.technologies.values()]) or not hasattr(self,'technologies')):
index = pd.MultiIndex.from_product([cfg.geo.geographies[cfg.primary_geography],[2],shape.shapes.active_dates_index], names=[cfg.primary_geography,'timeshift_type','weather_datetime'])
energy_shape = shape.shapes.make_flat_load_shape(index)
# we don't have technologies or none of the technologies have specific shapes
elif not hasattr(self, 'technologies') or np.all([tech.shape_id is None for tech in self.technologies.values()]):
if 'resource_bin' in self.shape.values.index.names and 'resource_bin' not in self.stock.values.index.names:
raise ValueError('Shape for %s has resource bins but the stock in this supply node does not have resource bins as a level' %self.name)
elif 'resource_bin' in self.stock.values.index.names and 'resource_bin' not in self.shape.values.index.names:
energy_shape = self.shape.values
elif 'resource_bin' in self.stock.values.index.names and 'resource_bin' in self.shape.values.index.names:
energy_slice = util.remove_df_levels(values_energy, ['vintage', 'supply_technology']).to_frame()
energy_slice.columns = ['value']
energy_shape = util.DfOper.mult([energy_slice, self.shape.values])
energy_shape = util.DfOper.divi([util.remove_df_levels(energy_shape,'resource_bin'), util.remove_df_levels(energy_slice, 'resource_bin')])
energy_shape = energy_shape.replace(np.nan,0)
else:
energy_shape = self.shape.values
else:
energy_slice = util.remove_df_levels(values_energy, 'vintage').to_frame()
energy_slice.columns = ['value']
techs_with_default_shape = [tech_id for tech_id, tech in self.technologies.items() if tech.shape_id is None]
techs_with_own_shape = [tech_id for tech_id, tech in self.technologies.items() if tech.shape_id is not None]
if techs_with_default_shape:
energy_slice_default_shape = util.df_slice(energy_slice, techs_with_default_shape, 'supply_technology')
energy_slice_default_shape = util.remove_df_levels(energy_slice_default_shape, 'supply_technology')
default_shape_portion = util.DfOper.mult([energy_slice_default_shape, self.shape.values])
default_shape_portion = util.remove_df_levels(default_shape_portion, ['resource_bin'])
if techs_with_own_shape:
energy_slice_own_shape = util.df_slice(energy_slice, techs_with_own_shape, 'supply_technology')
tech_shapes = pd.concat([self.technologies[tech_id].shape.values for tech_id in techs_with_own_shape],keys=techs_with_own_shape,names=['supply_technology'])
tech_shape_portion = util.DfOper.mult([energy_slice_own_shape, tech_shapes])
tech_shape_portion = util.remove_df_levels(tech_shape_portion, ['supply_technology', 'resource_bin'])
df = util.DfOper.add([default_shape_portion if techs_with_default_shape else None,
tech_shape_portion if techs_with_own_shape else None],
expandable=False, collapsible=False)
energy_shape = DfOper.divi([df, util.remove_df_levels(energy_slice,['vintage','supply_technology','resource_bin'])])
try:
if 'dispatch_constraint' in energy_shape.index.names:
energy_shape = util.df_slice(energy_shape,1,'dispatch_constraint')
except:
pdb.set_trace()
return energy_shape
def aggregate_flexible_electricity_shapes(self, year, dispatch_feeder_allocation):
""" returns a single shape for a year with supply_technology and resource_bin removed and dispatch_feeder added
['dispatch_feeder', 'timeshift_type', 'gau', 'weather_datetime']
"""
if 'demand_sector' in self.stock.values_energy:
stock_values_energy = util.remove_df_levels(DfOper.mult([self.stock.values_energy[year],dispatch_feeder_allocation]),'demand_sector')
stock_values = util.remove_df_levels(DfOper.mult([self.stock.values[year],dispatch_feeder_allocation]),'demand_sector')
else:
stock_values_energy = self.stock.values_energy[year]
stock_values = self.stock.values[year]
if self.shape_id is None or not hasattr(self, 'stock'):
energy_shape = None
p_max_shape = None
p_min_shape = None
elif not hasattr(self, 'technologies') or np.all([tech.shape_id is None for tech in self.technologies.values()]):
if 'dispatch_constraint' not in self.shape.values.index.names:
if 'resource_bin' in self.shape.values.index.names and 'resource_bin' not in self.stock.values.index.names:
raise ValueError('Shape for %s has resource bins but the stock in this supply node does not have resource bins as a level' %self.name)
elif 'resource_bin' in self.stock.values.index.names and 'resource_bin' not in self.shape.values.index.names:
energy_shape = self.shape.values
elif 'resource_bin' not in self.stock.values.index.names:
energy_shape = self.shape.values
elif 'resource_bin' in self.stock.values.index.names and 'resource_bin' in self.shape.values.index.names:
energy_slice = util.remove_df_levels(stock_values_energy[year], ['vintage', 'supply_technology']).to_frame()
energy_slice.columns = ['value']
energy_shape = util.DfOper.mult([energy_slice, self.shape.values])
energy_shape = util.remove_df_levels(energy_shape, 'resource_bin')
energy_shape = DfOper.div([energy_shape, util.remove_df_levels(energy_slice,'resource_bin')])
p_min_shape = None
p_max_shape = None
else:
energy_shape, p_min_shape, p_max_shape, = self.calculate_disp_constraints_shape(year, stock_values, stock_values_energy)
else:
if 'dispatch_constraint' not in self.shape.values.index.names:
energy_slice = util.remove_df_levels(self.stock.values_energy[year], 'vintage').to_frame()
energy_slice.columns = ['value']
techs_with_default_shape = [tech_id for tech_id, tech in self.technologies.items() if tech.shape_id is None or 'dispatch_constraint' in shape.shapes.data[tech.shape_id].df_index_names]
techs_with_own_shape = [tech_id for tech_id, tech in self.technologies.items() if tech.shape_id is not None and 'dispatch_constraint' not in shape.shapes.data[tech.shape_id].df_index_names]
if techs_with_default_shape:
energy_slice_default_shape = util.df_slice(energy_slice, techs_with_default_shape, 'supply_technology')
energy_slice_default_shape = util.remove_df_levels(energy_slice_default_shape, 'supply_technology')
default_shape_portion = util.DfOper.mult([energy_slice_default_shape, self.shape.values])
default_shape_portion = util.remove_df_levels(default_shape_portion, 'resource_bin')
if techs_with_own_shape:
energy_slice_own_shape = util.df_slice(energy_slice, techs_with_own_shape, 'supply_technology')
tech_shapes = pd.concat([self.technologies[tech_id].shape.values for tech_id in techs_with_own_shape])
tech_shape_portion = util.DfOper.mult([energy_slice_own_shape, tech_shapes])
tech_shape_portion = util.remove_df_levels(tech_shape_portion, 'supply_technology', 'resource_bin')
#TODO check with Ryan why this is not exapandable
energy_shape = util.DfOper.add([default_shape_portion if techs_with_default_shape else None,
tech_shape_portion if techs_with_own_shape else None],
expandable=False, collapsible=False)
energy_shape = util.DfOper.divi([energy_shape,util.remove_df_levels(self.stock.values_energy,['vintage','supply_technology','resource_bin'])])
p_min_shape = None
p_max_shape = None
else:
energy_shape, p_min_shape, p_max_shape = self.calculate_disp_constraints_shape(self,year, stock_values, stock_values_energy)
return energy_shape, p_min_shape , p_max_shape
def calculate_disp_constraints_shape(self,year, stock_values, stock_values_energy):
if 'resource_bin' in self.shape.values.index.names and 'resource_bin' not in self.stock.values.index.names:
raise ValueError('Shape for %s has resource bins but the stock in this supply node does not have resource bins as a level' %self.name)
elif 'resource_bin' in self.stock.values.index.names and 'resource_bin' not in self.shape.values.index.names:
energy_shape = util.df_slice(self.shape.values,1,'dispatch_constraint')
p_min_shape = util.df_slice(self.shape.values,2,'dispatch_constraint')
p_max_shape = util.df_slice(self.shape.values,3,'dispatch_constraint')
elif 'resource_bin' in self.stock.values.index.names and 'resource_bin' in self.shape.values.index.names:
energy_slice = util.remove_df_levels(stock_values_energy, ['vintage', 'supply_technology']).to_frame()
energy_slice.columns = ['value']
energy_shape = util.DfOper.mult([energy_slice, util.df_slice(self.shape.values,1,'dispatch_constraint')])
energy_shape = util.remove_df_levels(energy_shape, 'resource_bin')
energy_shape = DfOper.div([energy_shape, util.remove_df_levels(energy_slice,'resource_bin')])
capacity_slice = util.remove_df_levels(stock_values, ['vintage', 'supply_technology']).to_frame()
capacity_slice.columns = ['value']
p_min_shape = util.DfOper.mult([capacity_slice, util.df_slice(self.shape.values,2,'dispatch_constraint')])
p_min_shape = util.remove_df_levels(p_min_shape, 'resource_bin')
p_min_shape = DfOper.div([p_min_shape, util.remove_df_levels(capacity_slice,'resource_bin')])
p_max_shape = util.DfOper.mult([capacity_slice, util.df_slice(self.shape.values,3,'dispatch_constraint')])
p_max_shape = util.remove_df_levels(p_max_shape, 'resource_bin')
p_max_shape = DfOper.div([p_max_shape, util.remove_df_levels(capacity_slice,'resource_bin')])
else:
energy_shape = util.df_slice(self.shape.values,1,'dispatch_constraint')
p_min_shape = util.df_slice(self.shape.values,2,'dispatch_constraint')
p_max_shape = util.df_slice(self.shape.values,3,'dispatch_constraint')
return energy_shape, p_min_shape, p_max_shape
def calculate_active_coefficients(self, year, loop):
if year == int(cfg.cfgfile.get('case','current_year'))and loop == 'initial' :
#in the first loop, we take the active coefficients for the year
throughput = self.active_demand
else:
throughput = self.active_supply
if hasattr(self,'potential') and self.potential.data is True:
self.potential.remap_to_potential_and_normalize(throughput, year, self.tradable_geography)
if self.coefficients.data is True:
filter_geo_potential_normal = util.remove_df_elements(self.potential.active_supply_curve_normal, [x for x in cfg.geo.geographies_unfiltered[cfg.primary_geography] if x not in cfg.geographies],cfg.primary_geography)
filter_geo_potential_normal = filter_geo_potential_normal.reset_index().set_index(filter_geo_potential_normal.index.names)
self.active_coefficients = util.remove_df_levels(util.DfOper.mult([self.coefficients.values.loc[:,year].to_frame(),
filter_geo_potential_normal]),'resource_bin')
else:
self.active_coefficients = None
elif self.coefficients.data is True:
self.active_coefficients = self.coefficients.values.groupby(level=[cfg.primary_geography, 'demand_sector', 'efficiency_type','supply_node']).sum().loc[:,year].to_frame()
else:
self.active_coefficients = None
if self.active_coefficients is not None:
self.active_coefficients_total_untraded = util.remove_df_levels(self.active_coefficients,'efficiency_type')
self.active_coefficients_total = self.add_column_index(self.active_coefficients_total_untraded)
self.active_coefficients_total = util.DfOper.mult([self.active_coefficients_total,self.active_trade_adjustment_df])
self.active_coefficients_untraded = copy.deepcopy(self.active_coefficients)
self.active_coefficients_untraded.sort(inplace=True,axis=0)
nodes = list(set(self.active_trade_adjustment_df.index.get_level_values('supply_node')))
df_list = []
for node in nodes:
trade_indexer = util.level_specific_indexer(self.active_trade_adjustment_df, 'supply_node', node)
coefficient_indexer = util.level_specific_indexer(self.active_coefficients_untraded, 'supply_node', node)
efficiency_types = list(set(self.active_coefficients_untraded.loc[coefficient_indexer,:].index.get_level_values('efficiency_type')))
keys = efficiency_types
name = ['efficiency_type']
df = pd.concat([self.active_trade_adjustment_df.loc[trade_indexer,:]]*len(keys),keys=keys,names=name)
df_list.append(df)
active_trade_adjustment_df = pd.concat(df_list)
self.active_coefficients = self.add_column_index(self.active_coefficients_untraded)
self.active_coefficients = util.DfOper.mult([self.active_coefficients,active_trade_adjustment_df])
keys = self.ghgs
name = ['ghg']
self.active_emissions_coefficients = pd.concat([self.active_coefficients]*len(keys), keys=keys, names=name)
self.active_emissions_coefficients = self.active_emissions_coefficients.reorder_levels([cfg.primary_geography,'demand_sector', 'supply_node', 'efficiency_type', 'ghg'])
self.active_emissions_coefficients.sort(inplace=True)
else:
self.active_coefficients_total = None
self.active_emissions_coefficients = None
def add_column_index(self, data):
names = ['demand_sector', cfg.primary_geography]
keys = [self.demand_sectors, cfg.geo.geographies[cfg.primary_geography]]
data = copy.deepcopy(data)
for key,name in zip(keys,names):
data = pd.concat([data]*len(key), axis=1, keys=key, names=[name])
data.columns = data.columns.droplevel(-1)
return data
def add_export_measures(self, scenario):
"""
add all export measures from the scenario to a dictionary
"""
self.export_measures = []
ids = scenario.get_measures('SupplyExportMeasures', self.id)
if len(ids) > 1:
raise ValueError('model does not currently support multiple active export measures from a single supply node. Turn off an export measure in supply node %s' %self.name)
for id in ids:
self.export_measures.append(id)
self.add_export_measure(id)
def add_export_measure(self, id, **kwargs):
"""Adds measure instances to node"""
self.export_measure = id
# if id in self.export_measures:
# return
#self.export_measures.append(id)
def add_exports(self):
if len(self.export_measures):
self.export = Export(self.id,measure_id=self.export_measure)
else:
self.export = Export(self.id)
def convert_stock(self, stock_name='stock', attr='total'):
model_energy_unit = cfg.calculation_energy_unit
model_time_step = cfg.cfgfile.get('case', 'time_step')
stock = getattr(self,stock_name)
if stock.time_unit is not None:
# if a stock has a time_unit, then the unit is energy and must be converted to capacity
setattr(stock, attr, util.unit_convert(getattr(stock, attr), unit_from_num=stock.capacity_or_energy_unit, unit_from_den=stock.time_unit,
unit_to_num=model_energy_unit, unit_to_den=model_time_step))
else:
# if a stock is a capacity unit, the model must convert the unit type to an energy unit for conversion ()
try:
setattr(stock, attr, util.unit_convert(getattr(stock, attr), unit_from_num=cfg.ureg.Quantity(stock.capacity_or_energy_unit)*cfg.ureg.Quantity(model_time_step), unit_from_den=model_time_step,
unit_to_num=model_energy_unit, unit_to_den=model_time_step))
except:
pdb.set_trace()
def calculate_potential_constraints(self, year):
"""calculates the exceedance factor of a node if the node active supply exceeds the potential in the node. This adjustment factor
is passed to other nodes in the reconcile step"""
if hasattr(self,'potential') and self.potential.data is True and self.enforce_potential_constraint == True:
#geomap potential to the tradable geography. Potential is not exceeded unless it is exceeded in a tradable geography region.
active_geomapped_potential, active_geomapped_supply = self.potential.format_potential_and_supply_for_constraint_check(self.active_supply, self.tradable_geography, year)
self.potential_exceedance = util.DfOper.divi([active_geomapped_potential,active_geomapped_supply], expandable = (False,False), collapsible = (True, True))
#reformat dataframes for a remap
self.potential_exceedance[self.potential_exceedance<0] = 1
self.potential_exceedance = self.potential_exceedance.replace([np.nan,np.inf],[1,1])
self.potential_exceedance= pd.DataFrame(self.potential_exceedance.stack(), columns=['value'])
util.replace_index_name(self.potential_exceedance, 'year')
remove_levels = [x for x in self.potential_exceedance.index.names if x not in [self.tradable_geography, 'demand_sector']]
if len(remove_levels):
self.potential_exceedance = util.remove_df_levels(self.potential_exceedance, remove_levels)
geography_map_key = self.geography_map_key if hasattr(self, 'geography_map_key') else cfg.cfgfile.get('case','default_geography_map_key')
if self.tradable_geography != cfg.primary_geography:
map_df = cfg.geo.map_df(self.tradable_geography, cfg.primary_geography, normalize_as='intensity', map_key=geography_map_key, eliminate_zeros=False)
self.potential_exceedance = util.remove_df_levels(util.DfOper.mult([self.potential_exceedance,map_df]), self.tradable_geography)
self.active_constraint_df = util.remove_df_elements(self.potential_exceedance, [x for x in cfg.geo.geographies_unfiltered[cfg.primary_geography] if x not in cfg.geographies],cfg.primary_geography)
if 'demand_sector' not in self.active_constraint_df.index.names:
keys = self.demand_sectors
name = ['demand_sector']
active_constraint_df = pd.concat([self.active_constraint_df]*len(keys), keys=keys, names=name)
active_constraint_df= active_constraint_df.swaplevel('demand_sector',-1)
self.active_constraint_df = active_constraint_df.sort(inplace=False)
self.active_constraint_df[self.active_constraint_df>1]=1
if np.any(self.active_constraint_df.values<1):
self.constraint_violation = True
else:
self.constraint_violation = False
else:
self.constraint_violation = False
def calculate_potential_constraints_evolved(self, year, active_supply):
"""calculates the exceedance factor of a node if the node active supply exceeds the potential in the node. This adjustment factor
is passed to other nodes in the reconcile step"""
if hasattr(self,'potential') and self.potential.data is True:
#geomap potential to the tradable geography. Potential is not exceeded unless it is exceeded in a tradable geography region.
active_geomapped_potential, active_geomapped_supply = self.potential.format_potential_and_supply_for_constraint_check(active_supply, self.tradable_geography, year)
self.potential_exceedance = util.DfOper.divi([active_geomapped_potential,active_geomapped_supply], expandable = (False,False), collapsible = (True, True))
#reformat dataframes for a remap
self.potential_exceedance[self.potential_exceedance<0] = 1
self.potential_exceedance = self.potential_exceedance.replace([np.nan,np.inf],[1,1])
self.potential_exceedance= pd.DataFrame(self.potential_exceedance.stack(), columns=['value'])
util.replace_index_name(self.potential_exceedance, 'year')
remove_levels = [x for x in self.potential_exceedance.index.names if x not in [self.tradable_geography, 'demand_sector']]
if len(remove_levels):
self.potential_exceedance = util.remove_df_levels(self.potential_exceedance, remove_levels)
geography_map_key = self.geography_map_key if hasattr(self, 'geography_map_key') else cfg.cfgfile.get('case','default_geography_map_key')
if self.tradable_geography != cfg.primary_geography:
map_df = cfg.geo.map_df(self.tradable_geography, cfg.primary_geography, normalize_as='intensity', map_key=geography_map_key, eliminate_zeros=False)
self.potential_exceedance = util.remove_df_levels(util.DfOper.mult([self.potential_exceedance,map_df]), self.tradable_geography)
self.active_constraint_df = util.remove_df_elements(self.potential_exceedance, [x for x in cfg.geo.geographies_unfiltered[cfg.primary_geography] if x not in cfg.geographies],cfg.primary_geography)
if 'demand_sector' not in self.active_constraint_df.index.names:
keys = self.demand_sectors
name = ['demand_sector']
active_constraint_df = pd.concat([self.active_constraint_df]*len(keys), keys=keys, names=name)
active_constraint_df= active_constraint_df.swaplevel('demand_sector',-1)
self.active_constraint_df = active_constraint_df.sort(inplace=False)
self.active_constraint_df[self.active_constraint_df>1]=1
if np.any(self.active_constraint_df.values<1):
self.constraint_violation = True
else:
self.constraint_violation = False
else:
self.constraint_violation = False
def calculate_internal_trades(self, year, loop):
"""calculates internal trading adjustment factors based on the ratio of active supply to supply potential or stock
used for nodes where the location of throughput is unrelated to the location of demand (ex. primary biomass supply)
"""
if self.tradable_geography!= cfg.primary_geography and len(cfg.geographies)>1 :
if hasattr(self,'potential') and self.potential.data is True or (hasattr(self,'stock') and self.stock.data is True):
#tradable supply is mapping of active supply to a tradable geography
try:
self.geo_step1 = cfg.geo.map_df(cfg.primary_geography,self.tradable_geography, normalize_as='total', eliminate_zeros=False)
except:
logging.error('self.tradable_geography = {}, primary_geography = {}, id = {}, name = {}'.format(self.tradable_geography, cfg.primary_geography, self.id, self.name))
raise
if hasattr(self,'potential') and self.potential.data is True:
df = util.remove_df_elements(self.potential.active_supply_curve, [x for x in cfg.geo.geographies_unfiltered[cfg.primary_geography] if x not in cfg.geographies],cfg.primary_geography)
self.potential_geo = util.DfOper.mult([util.remove_df_levels(df,
[x for x in self.potential.active_supply_curve.index.names if x not in [cfg.primary_geography,'demand_sector']]),
cfg.geo.map_df(cfg.primary_geography,self.tradable_geography,normalize_as='total')])
util.replace_index_name(self.potential_geo,cfg.primary_geography + "from", cfg.primary_geography)
#if a node has potential, this becomes the basis for remapping
if hasattr(self,'stock') and hasattr(self.stock,'act_total_energy'):
total_stock = util.remove_df_levels(self.stock.act_total_energy, [x for x in self.stock.act_total_energy.index.names if x not in [cfg.primary_geography,'demand_sector']])
self.stock_energy_geo = util.DfOper.mult([total_stock,cfg.geo.map_df(cfg.primary_geography,self.tradable_geography, normalize_as='total', eliminate_zeros=False)])
util.replace_index_name(self.stock_energy_geo,cfg.primary_geography+ "from", cfg.primary_geography)
elif hasattr(self,'stock') and hasattr(self.stock,'act_total'):
total_stock = util.remove_df_levels(self.stock.act_total, [x for x in self.stock.act_total.index.names if x not in [cfg.primary_geography,'demand_sector']])
self.stock_capacity_geo = util.DfOper.mult([total_stock,cfg.geo.map_df(cfg.primary_geography, self.tradable_geography,eliminate_zeros=False)])
util.replace_index_name(self.stock_capacity_geo ,cfg.primary_geography + "from", cfg.primary_geography)
if hasattr(self,'stock_energy_geo') and hasattr(self,'potential_geo'):
#this is a special case when we have a stock and specified potential. We want to distribute growth in the stock by potential while maintaing trades to support existing stock.
active_supply = util.remove_df_levels(self.active_supply, [x for x in self.active_supply.index.names if x not in self.stock.act_total_energy.index.names])
total_active_supply = util.remove_df_levels(util.DfOper.mult([active_supply,cfg.geo.map_df(cfg.primary_geography,self.tradable_geography, normalize_as='total', eliminate_zeros=False)]),cfg.primary_geography)
total_stock = util.remove_df_levels(self.stock_energy_geo,cfg.primary_geography)
stock_share = util.remove_df_levels(util.DfOper.divi([total_stock,total_active_supply]),cfg.primary_geography + "from")
stock_share[stock_share>1] = 1
stock_geo_step = self.stock_energy_geo.groupby(level=util.ix_excl(self.stock_energy_geo,cfg.primary_geography + "from")).transform(lambda x: x/x.sum()).fillna(0)
stock_geo_step = util.DfOper.mult([stock_geo_step, stock_share])
potential_share = 1- stock_share
potential_geo_step = util.DfOper.subt([self.potential_geo,self.stock_energy_geo])
potential_geo_step[potential_geo_step<0]=0
potential_geo_step = potential_geo_step.groupby(level=util.ix_excl(self.potential_geo,cfg.primary_geography + "from")).transform(lambda x: x/x.sum()).fillna(0)
potential_geo_step = util.DfOper.mult([potential_geo_step, potential_share])
self.geo_step2 = util.DfOper.add([stock_geo_step,potential_geo_step])
elif hasattr(self,'stock_energy_geo'):
self.geo_step2 = self.stock_energy_geo.groupby(level=util.ix_excl(self.stock_energy_geo,cfg.primary_geography + "from")).transform(lambda x: x/x.sum()).fillna(0)
elif hasattr(self,'stock_capacity_geo'):
self.geo_step2 = self.stock_capacity_geo.groupby(level=util.ix_excl(self.stock_capacity_geo,cfg.primary_geography + "from")).transform(lambda x: x/x.sum()).fillna(0)
elif hasattr(self,'potential_geo'):
self.geo_step2 = self.potential_geo.groupby(level=util.ix_excl(self.potential_geo,cfg.primary_geography + "from")).transform(lambda x: x/x.sum()).fillna(0)
self.geomapped_coefficients = util.DfOper.mult([self.geo_step1, self.geo_step2])
self.geomapped_coefficients = self.geomapped_coefficients.unstack(cfg.primary_geography)
util.replace_index_name(self.geomapped_coefficients,cfg.primary_geography,cfg.primary_geography + "from")
self.geomapped_coefficients = util.remove_df_levels(self.geomapped_coefficients,self.tradable_geography)
self.geomapped_coefficients.columns = self.geomapped_coefficients.columns.droplevel()
self.active_internal_trade_df = pd.concat([self.geomapped_coefficients]*len(self.demand_sectors),axis=1,keys=self.demand_sectors,names=['demand_sector'])
self.active_internal_trade_df= self.active_internal_trade_df.swaplevel(cfg.primary_geography,'demand_sector',axis=1)
if 'demand_sector' not in self.active_internal_trade_df.index.names:
self.active_internal_trade_df = pd.concat([self.active_internal_trade_df]*len(self.demand_sectors),axis=0,keys=self.demand_sectors,names=['demand_sector'])
self.active_internal_trade_df = self.active_internal_trade_df.swaplevel(cfg.primary_geography,'demand_sector',axis=0)
self.active_internal_trade_df.sort(axis=0,inplace=True)
self.active_internal_trade_df.sort(axis=1,inplace=True)
for sector_row in self.demand_sectors:
for sector_column in self.demand_sectors:
row_indexer = util.level_specific_indexer(self.active_internal_trade_df,'demand_sector', sector_row)
col_indexer = util.level_specific_indexer(self.active_internal_trade_df,'demand_sector', sector_column)
if sector_row == sector_column:
mult =1
else:
mult=0
self.active_internal_trade_df.loc[row_indexer, col_indexer] *= mult
if np.all(np.round(self.active_internal_trade_df.sum().values,2)==1.) or np.all(np.round(self.active_internal_trade_df.sum().values,2)==0):
pass
else:
pdb.set_trace()
self.internal_trades = "stop and feed"
else:
self.internal_trades = "stop"
elif self.tradable_geography != cfg.primary_geography:
#if there is only one geography, there is no trading
self.internal_trades = "stop"
elif self.tradable_geography == cfg.primary_geography and self.enforce_tradable_geography:
#if the tradable geography is set to equal the primary geography, then any trades upstream are stopped at this node
self.internal_trades = "stop"
else:
#otherwise, pass trades upstream to downstream nodes
self.internal_trades = "feed"
def calculate_input_emissions_rates(self,year,ghgs):
"calculates the emissions rate of nodes with emissions"
if hasattr(self,'emissions') and self.emissions.data is True:
if hasattr(self,'potential') and self.potential.data is True:
filter_geo_potential_normal = util.remove_df_elements(self.potential.active_supply_curve_normal, [x for x in cfg.geo.geographies_unfiltered[cfg.primary_geography] if x not in cfg.geographies],cfg.primary_geography)
filter_geo_potential_normal = filter_geo_potential_normal.reset_index().set_index(filter_geo_potential_normal.index.names)
self.active_physical_emissions_rate = DfOper.mult([filter_geo_potential_normal,self.emissions.values_physical.loc[:,year].to_frame()])
levels = ['demand_sector',cfg.primary_geography,'ghg']
disallowed_levels = [x for x in self.active_physical_emissions_rate.index.names if x not in levels]
if len(disallowed_levels):
self.active_physical_emissions_rate = util.remove_df_levels(self.active_physical_emissions_rate, disallowed_levels)
self.active_physical_emissions_rate = util.expand_multi(self.active_physical_emissions_rate, levels_list = [cfg.geographies, self.demand_sectors, self.ghgs],levels_names=[cfg.primary_geography,'demand_sector', 'ghg'])
self.active_accounting_emissions_rate = DfOper.mult([filter_geo_potential_normal,self.emissions.values_accounting.loc[:,year].to_frame()])
levels = ['demand_sector',cfg.primary_geography,'ghg']
disallowed_levels = [x for x in self.active_accounting_emissions_rate.index.names if x not in levels]
if len(disallowed_levels):
self.active_accounting_emissions_rate = util.remove_df_levels(self.active_accounting_emissions_rate, disallowed_levels)
self.active_accounting_emissions_rate = util.expand_multi(self.active_accounting_emissions_rate, levels_list = [cfg.geographies, self.demand_sectors, self.ghgs], levels_names=[cfg.primary_geography,'demand_sector','ghg'])
else:
allowed_indices = ['demand_sector', cfg.primary_geography, 'ghg', 'ghg_type']
if set(self.emissions.values_physical.index.names).issubset(allowed_indices):
self.active_physical_emissions_rate = util.remove_df_levels(self.emissions.values_physical.loc[:,year].to_frame(), 'ghg_type')
self.active_physical_emissions_rate = util.expand_multi(self.active_physical_emissions_rate, levels_list = [cfg.geographies, self.demand_sectors, self.ghgs],levels_names=[cfg.primary_geography,'demand_sector', 'ghg'])
if set(self.emissions.values_accounting.index.names).issubset(allowed_indices):
self.active_accounting_emissions_rate = util.remove_df_levels(self.emissions.values_accounting.loc[:,year].to_frame(), 'ghg_type')
self.active_accounting_emissions_rate = util.expand_multi(self.active_accounting_emissions_rate, levels_list = [cfg.geographies, self.demand_sectors, self.ghgs],levels_names=[cfg.primary_geography,'demand_sector', 'ghg'])
else:
raise ValueError("too many indexes in emissions inputs of node %s" %self.id)
keys = [self.demand_sectors, cfg.geographies]
names = ['demand_sector', cfg.primary_geography]
active_physical_emissions_rate = copy.deepcopy(self.active_physical_emissions_rate)
for key,name in zip(keys,names):
active_physical_emissions_rate = pd.concat([active_physical_emissions_rate]*len(key), axis=1, keys=key, names=[name])
for sector_a in self.demand_sectors:
for sector_b in self.demand_sectors:
row_indexer = util.level_specific_indexer(active_physical_emissions_rate,'demand_sector', sector_a, axis=0)
col_indexer = util.level_specific_indexer(active_physical_emissions_rate,'demand_sector',sector_b, axis=1)
if sector_a == sector_b:
mult = 1
else:
mult = 0
active_physical_emissions_rate.loc[row_indexer,col_indexer] = active_physical_emissions_rate.loc[row_indexer,col_indexer].values * mult
self.active_physical_emissions_rate = active_physical_emissions_rate
self.active_physical_emissions_rate.columns = self.active_physical_emissions_rate.columns.droplevel(-1)
self.emissions_rate = True
else:
self.emissions_rate = False
def calculate_emissions(self,year):
if hasattr(self,'active_physical_emissions_coefficients'):
if 1 in self.active_physical_emissions_coefficients.index.get_level_values('efficiency_type'):
indexer = util.level_specific_indexer(self.active_physical_emissions_coefficients,'efficiency_type', 1)
combustion_emissions = copy.deepcopy(self.active_physical_emissions_coefficients.loc[indexer,:])
combustion_emissions.loc[:,:] = self.active_supply.T.values * self.active_physical_emissions_coefficients.loc[indexer,:].values
self.active_combustion_emissions = combustion_emissions.groupby(level='ghg').sum()
self.active_combustion_emissions = self.active_combustion_emissions.unstack(cfg.primary_geography).to_frame()
if hasattr(self,'active_co2_capture_rate'):
self.active_combustion_emissions = DfOper.mult([self.active_combustion_emissions, 1- self.active_co2_capture_rate])
self.active_combustion_emissions = util.remove_df_levels(self.active_combustion_emissions,'resource_bin')
if hasattr(self,'active_accounting_emissions_rate'):
self.active_accounting_emissions = DfOper.mult([self.active_accounting_emissions_rate,self.active_supply])
if hasattr(self,'active_accounting_emissions') and hasattr(self,'active_combustion_emissions'):
self.active_total_emissions = DfOper.add([self.active_accounting_emissions, self.active_combustion_emissions])
elif hasattr(self,'active_accounting_emissions'):
self.active_total_emissions = self.active_accounting_emissions
elif hasattr(self,'active_combustion_emissions'):
self.active_total_emissions = self.active_combustion_emissions
def calculate_embodied_emissions_rate(self,year):
if hasattr(self,'active_total_emissions'):
self.active_embodied_emissions_rate = DfOper.divi([self.active_total_emissions, self.active_supply])
def set_adjustments(self):
self.set_trade_adjustment_dict()
self.set_internal_trade_dict()
# self.set_constraint_adjustment_dict()
# self.set_constraint_dict()
def set_trade_adjustment_dict(self):
"""sets an empty df with a fill value of 1 for trade adjustments"""
if hasattr(self,'nodes'):
self.trade_adjustment_dict = defaultdict(dict)
row_index = pd.MultiIndex.from_product([cfg.geographies, self.demand_sectors, self.nodes], names=[cfg.primary_geography, 'demand_sector', 'supply_node'])
col_index = pd.MultiIndex.from_product([cfg.geographies, self.demand_sectors], names=[cfg.primary_geography, 'demand_sector'])
trade_adjustment_df = util.empty_df(index=row_index,columns=col_index,fill_value=0.0)
trade_adjustment_df.sort(inplace=True, axis=0)
trade_adjustment_df.sort(inplace=True, axis=1)
trade_adjustment_groups = trade_adjustment_df.groupby(level=trade_adjustment_df.index.names).groups
for elements in trade_adjustment_groups.keys():
row_indexer = util.level_specific_indexer(trade_adjustment_df, trade_adjustment_df.index.names, elements)
col_indexer = util.level_specific_indexer(trade_adjustment_df,[cfg.primary_geography, 'demand_sector'], elements[:-1], axis=1)
trade_adjustment_df.loc[row_indexer, col_indexer] = 1.0
for year in self.years:
self.trade_adjustment_dict[year] = copy.deepcopy(trade_adjustment_df)
self.active_trade_adjustment_df = trade_adjustment_df
def set_internal_trade_dict(self):
"""sets an empty df with a fill value of 1 for internal trades"""
if self.tradable_geography is not None:
self.internal_trade_dict = defaultdict(dict)
index = pd.MultiIndex.from_product([cfg.geographies, self.demand_sectors], names=[cfg.primary_geography, 'demand_sector'])
internal_trade_df = util.empty_df(index=index,columns=index,fill_value=0.0)
internal_trade_df.sort(inplace=True, axis=0)
internal_trade_df.sort(inplace=True, axis=1)
internal_trade_groups = internal_trade_df.groupby(level=internal_trade_df.index.names).groups
for elements in internal_trade_groups.keys():
row_indexer = util.level_specific_indexer(internal_trade_df, internal_trade_df.index.names, elements)
col_indexer = util.level_specific_indexer(internal_trade_df,[cfg.primary_geography, 'demand_sector'], list(elements), axis=1)
internal_trade_df.loc[row_indexer, col_indexer] = 1.0
for year in self.years:
self.internal_trade_dict[year] = copy.deepcopy(internal_trade_df)
self.active_internal_trade_df = internal_trade_df
def set_pass_through_df_dict(self):
"""sets an empty df with a fill value of 1 for trade adjustments"""
self.pass_through_df_dict = defaultdict(dict)
row_index = pd.MultiIndex.from_product([cfg.geographies, self.demand_sectors, self.ghgs], names=[cfg.primary_geography, 'demand_sector', 'ghg'])
col_index = pd.MultiIndex.from_product([cfg.geographies, self.demand_sectors], names=[cfg.primary_geography, 'demand_sector'])
pass_through_df = util.empty_df(index=row_index,columns=col_index,fill_value=0.0)
pass_through_df.sort(inplace=True, axis=0)
pass_through_df.sort(inplace=True, axis=1)
for year in self.years:
self.pass_through_df_dict[year] = copy.deepcopy(pass_through_df)
self.active_pass_through_df = copy.deepcopy(self.pass_through_df_dict[year])
def update_pass_through_df_dict(self,year, loop=None):
if hasattr(self,'pass_through_df_dict'):
self.active_pass_through_df =self.pass_through_df_dict[year]
self.active_pass_through_df*=0
def set_pass_through_dict(self, node_dict):
if self.active_coefficients is not None:
self.active_coefficients = self.active_coefficients.sort()
if 2 in set(self.active_coefficients.index.get_level_values('efficiency_type')):
indexer = util.level_specific_indexer(self.active_coefficients,'efficiency_type',2)
pass_through_subsectors = self.active_coefficients.loc[indexer,:].index.get_level_values('supply_node')
pass_through_subsectors = [int(x) for x in pass_through_subsectors if node_dict.has_key(x)]
self.pass_through_dict = dict.fromkeys(pass_through_subsectors,False)
class Export(Abstract):
def __init__(self,node_id, measure_id=None, **kwargs):
self.input_type = 'total'
if measure_id is None:
self.id = node_id
self.sql_id_table = 'SupplyExport'
self.sql_data_table = 'SupplyExportData'
Abstract.__init__(self, self.id, primary_key='supply_node_id')
else:
self.id = measure_id
self.sql_id_table = 'SupplyExportMeasures'
self.sql_data_table = 'SupplyExportMeasuresData'
Abstract.__init__(self, measure_id, primary_key='id', data_id_key='parent_id')
def calculate(self, years, demand_sectors):
self.years = years
self.demand_sectors = demand_sectors
if self.data is True:
self.remap(lower=None)
self.convert()
self.values = util.reindex_df_level_with_new_elements(self.values, cfg.primary_geography, cfg.geographies, fill_value=0.0)
else:
self.set_export_df()
def convert(self):
self.values = self.values.unstack(level='year')
self.values.columns = self.values.columns.droplevel()
self.values = util.unit_convert(self.values, unit_from_num=self.unit, unit_to_num=cfg.calculation_energy_unit)
def set_export_df(self):
"""sets an empty df with a fill value of 0"""
df_index = pd.MultiIndex.from_product([cfg.geographies, self.demand_sectors], names=[cfg.primary_geography, 'demand_sector'])
self.values= util.empty_df(index=df_index, columns=self.years, fill_value=0)
def allocate(self, active_supply, demand_sectors, supply_years, year, loop):
"""Performs sectoral allocation of active export values. In year 1/loop1, this happens equally across sectors. Once throughput is known, it is allocated by throughput"""
if year == min(supply_years) and loop == 'initial':
if 'demand_sector' not in self.values.index.names:
active_values = []
for sector in self.demand_sectors:
#if we have no active supply, we must allocate exports pro-rata across number of sectors
active_value = copy.deepcopy(self.values.loc[:,year].to_frame()) * 1/len(self.demand_sectors)
active_value['demand_sector'] = sector
active_values.append(active_value)
active_values = pd.concat(active_values)
active_values.set_index('demand_sector', append=True, inplace=True)
self.active_values = active_values
else:
self.active_values = self.values.loc[:,year].to_frame()
else:
#remap exports to active supply, which has information about sectoral throughput
self.active_values = self.values.loc[:,year].to_frame()
active_supply[active_supply.values<0]=0
self.remap(map_from='active_values', map_to='active_values', drivers=active_supply, fill_timeseries=False, current_geography=cfg.primary_geography, driver_geography=cfg.primary_geography)
self.active_values.replace(np.nan,0,inplace=True)
self.active_values = self.active_values.reorder_levels([cfg.primary_geography, 'demand_sector'])
class BlendNode(Node):
def __init__(self, id, supply_type, scenario, **kwargs):
Node.__init__(self, id, supply_type, scenario)
self.id = id
self.supply_type = supply_type
for col, att in util.object_att_from_table('SupplyNodes', id, ):
setattr(self, col, att)
self.nodes = util.sql_read_table('BlendNodeInputsData', 'supply_node_id', blend_node_id=id, return_iterable=True)
#used as a flag in the annual loop for whether we need to recalculate the coefficients
def calculate_active_coefficients(self, year, loop):
self.active_coefficients = self.values.loc[:,year].to_frame()
self.active_coefficients_total_untraded = util.remove_df_levels(self.active_coefficients,'efficiency_type')
self.active_coefficients_untraded = copy.deepcopy(self.active_coefficients)
self.active_coefficients_untraded.sort(inplace=True,axis=0)
self.active_coefficients = self.add_column_index(self.active_coefficients_untraded).T.stack(['supply_node','efficiency_type'])
self.active_coefficients_total = self.add_column_index(self.active_coefficients_total_untraded).T.stack(['supply_node'])
self.active_coefficients_total_emissions_rate = copy.deepcopy(self.active_coefficients_total)
self.active_coefficients_total = DfOper.mult([self.active_coefficients_total,self.active_trade_adjustment_df])
keys = [2]
name = ['efficiency_type']
active_trade_adjustment_df = pd.concat([self.active_trade_adjustment_df]*len(keys), keys=keys, names=name)
# active_constraint_adjustment_df = pd.concat([self.active_constraint_adjustment_df]*len(keys), keys=keys, names=name)
self.active_coefficients = DfOper.mult([self.active_coefficients,active_trade_adjustment_df])
keys = self.ghgs
name = ['ghg']
self.active_emissions_coefficients = pd.concat([self.active_coefficients]*len(keys), keys=keys, names=name)
self.active_emissions_coefficients = self.active_emissions_coefficients.reorder_levels([cfg.primary_geography, 'demand_sector', 'supply_node', 'efficiency_type', 'ghg'])
self.active_emissions_coefficients.sort(inplace=True)
def add_blend_measures(self, scenario):
"""
add all blend measures in a selected scenario to a dictionary
"""
self.blend_measures = {id: BlendMeasure(id, scenario)
for id in scenario.get_measures('BlendNodeBlendMeasures', self.id)}
def calculate(self):
#all nodes can have potential conversions. Set to None if no data.
self.conversion, self.resource_unit = self.add_conversion()
measures = []
for measure in self.blend_measures.values():
measure.calculate(self.vintages, self.years)
measures.append(measure.values)
if len(measures):
self.raw_values = util.DfOper.add(measures)
self.calculate_residual()
else:
self.set_residual()
self.set_adjustments()
self.set_pass_through_df_dict()
self.calculate_subclasses()
def calculate_residual(self):
"""calculates values for residual node in Blend Node dataframe
ex. if 10% of hydrogen blend is supplied by electrolysis and the rest is unspecified,
90% of hydrogen blend is allocated to residual node
"""
# calculates sum of all supply_nodes
# residual equals 1-sum of all other specified nodes
self.values = self.raw_values.sort()
if 'demand_sector' in self.values.index.names:
self.values = util.reindex_df_level_with_new_elements(self.values,'demand_sector',self.demand_sectors,0)
if self.residual_supply_node_id in self.values.index.get_level_values('supply_node'):
indexer = util.level_specific_indexer(self.values, 'supply_node', self.residual_supply_node_id)
self.values.loc[indexer,:] = 0
residual = 1-util.remove_df_levels(self.values,['supply_node'])
residual['supply_node'] = self.residual_supply_node_id
residual.set_index('supply_node', append=True, inplace=True)
# residual = residual.reorder_levels(residual_levels+['supply_node'])
# concatenate values
# residual = residual.reorder_levels(self.values.index.names)
self.values = pd.concat([self.values, residual], join='outer', axis=0)
# remove duplicates where a node is specified and is specified as residual node
self.values = self.values.groupby(level=self.values.index.names).sum()
# set negative values to 0
self.values.loc[self.values['value'] <= 0, 'value'] = 1e-7
self.expand_blend()
self.values = self.values.unstack(level='year')
self.values.columns = self.values.columns.droplevel()
def update_residual(self, year):
"""calculates values for residual node in Blend Node dataframe
ex. if 10% of hydrogen blend is supplied by electrolysis and the rest is unspecified,
90% of hydrogen blend is allocated to residual node
"""
# calculates sum of all supply_nodes
indexer = util.level_specific_indexer(self.values, 'supply_node', self.residual_supply_node_id)
self.values.loc[indexer,year] = 0
residual_levels = [x for x in self.values.index.names if x != 'supply_node']
# residual equals 1-sum of all other specified nodes
residual = 1-self.values.loc[:,year].to_frame().groupby(level=residual_levels).sum()
residual['supply_node'] = self.residual_supply_node_id
residual.set_index('supply_node', append=True, inplace=True)
residual = residual.reorder_levels(residual_levels+['supply_node'])
# concatenate values
residual = residual.reorder_levels(self.values.index.names)
self.values.loc[indexer,year] = residual
# remove duplicates where a node is specified and is specified as residual node
self.values.loc[:,year] = self.values.loc[:,year].groupby(level=self.values.index.names).sum()
# set negative values to 0
self.values[self.values <= 0] = 1e-7
def expand_blend(self):
#needs a fill value because if a node is not demanding any energy from another node, it still may be supplied, and reconciliation happens via division (can't multiply by 0)
self.values = util.reindex_df_level_with_new_elements(self.values,'supply_node', self.nodes, fill_value = 1e-7)
if 'demand_sector' not in self.values.index.names:
self.values = util.expand_multi(self.values, self.demand_sectors, ['demand_sector'], incremental=True)
self.values['efficiency_type'] = 2
self.values.set_index('efficiency_type', append=True, inplace=True)
self.values = self.values.reorder_levels([cfg.primary_geography,'demand_sector','supply_node','efficiency_type','year'])
self.values = self.values.sort()
def set_residual(self):
"""creats an empty df with the value for the residual node of 1. For nodes with no blend measures specified"""
df_index = pd.MultiIndex.from_product([cfg.geo.geographies[cfg.primary_geography], self.demand_sectors, self.nodes, self.years, [2]], names=[cfg.primary_geography, 'demand_sector','supply_node','year','efficiency_type' ])
self.raw_values = util.empty_df(index=df_index,columns=['value'],fill_value=1e-7)
indexer = util.level_specific_indexer(self.raw_values, 'supply_node', self.residual_supply_node_id)
self.raw_values.loc[indexer, 'value'] = 1
self.raw_values = self.raw_values.unstack(level='year')
self.raw_values.columns = self.raw_values.columns.droplevel()
self.raw_values = self.raw_values.sort()
self.values = copy.deepcopy(self.raw_values)
class SupplyNode(Node,StockItem):
def __init__(self, id, supply_type, scenario, **kwargs):
Node.__init__(self, id, supply_type, scenario)
StockItem.__init__(self)
self.input_type = 'total'
self.coefficients = SupplyCoefficients(self.id, self.scenario)
self.potential = SupplyPotential(self.id, self.enforce_potential_constraint, self.scenario)
self.capacity_factor = SupplyCapacityFactor(self.id, self.scenario)
self.costs = {}
self.create_costs()
self.add_stock()
def calculate(self):
self.conversion, self.resource_unit = self.add_conversion()
self.set_rollover_groups()
self.calculate_subclasses()
for cost in self.costs.values():
cost.calculate(self.years, self.demand_sectors)
self.calculate_stock_measures()
self.add_case_stock()
self.setup_stock_rollover(self.years)
if self.coefficients.raw_values is not None:
self.nodes = set(self.coefficients.values.index.get_level_values('supply_node'))
self.set_adjustments()
self.set_pass_through_df_dict()
def set_rollover_groups(self):
"""sets the internal index for use in stock and cost calculations"""
# determines whether stock rollover needs to occur on demand sector or resource bin index
self.rollover_group_names = []
self.rollover_group_levels = []
if self.potential.data is True:
for name, level in zip(self.potential.raw_values.index.names, self.potential.raw_values.index.levels):
if (name == 'resource_bin' or name == 'demand_sector') and name not in self.rollover_group_names:
if name == 'demand_sector':
level = self.demand_sectors
self.rollover_group_levels.append(list(level))
self.rollover_group_names.append(name)
if self.stock.data is True:
for name, level in zip(self.stock.raw_values.index.names, self.stock.raw_values.index.levels):
if (name == 'resource_bin' or name == 'demand_sector') and name not in self.rollover_group_names:
if name == 'demand_sector':
level = self.demand_sectors
self.rollover_group_levels.append(list(level))
self.rollover_group_names.append(name)
for cost in self.costs.keys():
for name, level in zip(self.costs[cost].raw_values.index.names, self.costs[cost].raw_values.index.levels):
if (name == 'resource_bin' or name == 'demand_sector') and name not in self.rollover_group_names:
if name == 'demand_sector':
level = self.demand_sectors
self.rollover_group_levels.append(list(level))
self.rollover_group_names.append(name)
if self.id == self.distribution_grid_node_id and 'demand_sector' not in self.rollover_group_names:
#requires distribution grid node to maintain demand sector resolution in its stocks
self.rollover_group_levels.append(self.demand_sectors)
self.rollover_group_names.append('demand_sector')
self.rollover_group_names = [cfg.primary_geography] + self.rollover_group_names
self.rollover_group_levels = [cfg.geo.geographies[cfg.primary_geography]] + self.rollover_group_levels
def add_stock(self):
"""add stock instance to node"""
self.stock = Stock(id=self.id, drivers=None, sql_id_table='SupplyStock', sql_data_table='SupplyStockData', primary_key='supply_node_id', scenario=self.scenario)
self.stock.input_type = 'total'
self.stock.unit = cfg.calculation_energy_unit + "/" + cfg.cfgfile.get('case','time_step')
def add_case_stock(self):
self.case_stock = StockItem()
# total_stocks = []
# for stock in self.total_stocks:
# total_stocks.append(stock)
if len(self.total_stocks):
self.case_stock.data = True
self.case_stock.total = DfOper.add([ x.values for x in self.total_stocks.values()], expandable=False)
# self.case_stock.total[self.case_stock.total.index.get_level_values('year')<int(cfg.cfgfile.get('case','current_year'))+1] = np.nan
def calculate_stock_measures(self):
for stock in self.total_stocks.values():
stock.calculate(self.vintages,self.years)
stock.convert()
def calculate_input_stock(self):
self.stock.years = self.years
if self.stock.data is True:
self.stock.remap(map_from='raw_values', map_to='total',fill_timeseries=True,fill_value=np.nan)
self.convert_stock('stock','total')
self.stock.total = util.remove_df_levels(self.stock.total,'supply_technology')
# if hasattr(self.case_stock,'total'):
# self.case_stock.remap(map_from='raw_values', map_to='total',fill_timeseries=True, fill_value=np.nan)
if self.stock.data is True and hasattr(self.case_stock,'total'):
self.convert_stock('case_stock','total')
# self.case_stock.total.fillna(self.stock.total, inplace=True)
self.stock.total = self.stock.total[self.stock.total.index.get_level_values('year')<=int(cfg.cfgfile.get('case','current_year'))]
self.case_stock.total[self.stock.total.index.get_level_values('year')>int(cfg.cfgfile.get('case','current_year'))]
self.case_stock.total = pd.concat([self.stock.total,self.case_stock.total])
self
elif self.stock.data is False and hasattr(self.case_stock,'total'):
self.stock = self.case_stock
elif self.stock.data is False and not hasattr(self.case_stock,'total'):
index = pd.MultiIndex.from_product(self.rollover_group_levels + [self.years] ,names=self.rollover_group_names + ['year'] )
self.stock.total = util.empty_df(index=index,columns=['value'],fill_value=np.nan)
self.stock.total_rollover = copy.deepcopy(self.stock.total)
self.stock.total = self.stock.total.unstack('year')
self.stock.total.columns = self.stock.total.columns.droplevel()
if self.stock.data or hasattr(self.case_stock,'data') and self.case_stock.data == True:
self.stock.data = True
def calc_node_survival_function(self):
self.set_survival_parameters()
self.set_survival_vintaged()
self.set_decay_vintaged()
self.set_decay_initial_stock()
self.set_survival_initial_stock()
def create_node_survival_functions(self):
functions = defaultdict(list)
for fun in ['survival_vintaged', 'survival_initial_stock', 'decay_vintaged', 'decay_initial_stock']:
functions[fun].append(getattr(self, fun))
setattr(self.stock, fun, pd.DataFrame(np.array(functions[fun]).T, columns=[self.id]))
def create_node_rollover_markov_matrices(self):
vintaged_markov = util.create_markov_vector(self.stock.decay_vintaged.values, self.stock.survival_vintaged.values)
self.stock.vintaged_markov_matrix = util.create_markov_matrix(vintaged_markov, 1 , len(self.years))
initial_markov = util.create_markov_vector(self.stock.decay_initial_stock.values, self.stock.survival_initial_stock.values)
self.stock.initial_markov_matrix = util.create_markov_matrix(initial_markov, 1 , len(self.years))
def setup_stock_rollover(self, years):
""" Stock rollover function for an entire supply node"""
#prep stock rollover for initial solve
self.vintages = self.years
self.calc_node_survival_function()
self.create_node_survival_functions()
self.create_node_rollover_markov_matrices()
self.calculate_input_stock()
self.ensure_capacity_factor()
levels = self.rollover_group_levels
names = self.rollover_group_names
index = pd.MultiIndex.from_product(levels, names=names)
columns = self.years
self.stock.requirement = util.empty_df(index=index, columns=pd.Index(columns, dtype='object'))
self.stock.requirement_energy = util.empty_df(index=index, columns=pd.Index(columns, dtype='object'))
if len(names)>1:
self.rollover_groups = self.stock.total.groupby(level=names).groups
else:
#TODO Ryan List Comprehension
item_list = levels[0]
self.rollover_groups = dict()
for x in item_list:
self.rollover_groups[(x,)] = (x,)
full_levels = self.rollover_group_levels + [[self.vintages[0] - 1] + self.vintages]
full_names = self.rollover_group_names + ['vintage']
index = pd.MultiIndex.from_product(full_levels, names=full_names)
self.stock.values = util.empty_df(index=index, columns=pd.Index(columns, dtype='object'))
self.stock.values_energy = util.empty_df(index=index, columns=pd.Index(columns, dtype='object'))
self.stock.remaining = util.empty_df(index=index, columns=pd.Index(columns, dtype='object'))
self.stock.values_financial = util.empty_df(index=index, columns=pd.Index(columns, dtype='object'))
self.stock.values_financial_energy = util.empty_df(index=index, columns=pd.Index(columns, dtype='object'))
full_levels = self.rollover_group_levels + [self.vintages]
index = pd.MultiIndex.from_product(full_levels, names=full_names)
self.stock.retirements = util.empty_df(index=index, columns= self.years)
self.stock.sales = util.empty_df(index=index, columns=['value'])
self.stock.sales_energy = util.empty_df(index=index, columns=['value'])
self.rollover_dict = {}
self.total_stock = self.stock.total.stack(dropna=False)
self.setup_financial_stock()
for elements in self.rollover_groups.keys():
elements = util.ensure_tuple(elements)
total_stock = self.stock.total_rollover.loc[elements].values
self.rollover_dict[elements] = Rollover(vintaged_markov_matrix=self.stock.vintaged_markov_matrix,
initial_markov_matrix=self.stock.initial_markov_matrix,
num_years=len(years), num_vintages=len(years),
num_techs=1, initial_stock=total_stock[0],
sales_share=None, stock_changes=None,
specified_stock=total_stock, specified_retirements=None,stock_changes_as_min=True)
for year in [x for x in self.years if x<int(cfg.cfgfile.get('case', 'current_year'))]:
for elements in self.rollover_groups.keys():
elements = util.ensure_tuple(elements)
try:
self.rollover_dict[elements].run(1)
except:
logging.error('error encountered in rollover for node ' + str(self.id) + ' in elements '+ str(elements) + ' year ' + str(year))
raise
stock_total, stock_new, stock_replacement, retirements, retirements_natural, retirements_early, sales_record, sales_new, sales_replacement = self.rollover_dict[(elements)].return_formatted_outputs(year_offset=0)
self.stock.values.loc[elements,year] = stock_total
sales_indexer = elements + (year,)
self.stock.sales.loc[sales_indexer, 'value'] = sales_record
# self.stock.retirements.loc[sales_indexer,year] = retirements
self.financial_stock(year)
self.calculate_energy(year)
def ensure_capacity_factor(self):
index = pd.MultiIndex.from_product(self.rollover_group_levels, names=self.rollover_group_names)
columns = self.years
df = util.empty_df(index=index, columns=pd.Index(columns, dtype='object'),fill_value=1.0)
if self.capacity_factor.data is True:
self.capacity_factor.values = DfOper.mult([df,self.capacity_factor.values])
else:
self.capacity_factor.values = df
def calculate_dispatch_costs(self, year, embodied_cost_df, loop=None):
self.active_dispatch_costs = copy.deepcopy(self.active_trade_adjustment_df)
for node in self.active_trade_adjustment_df.index.get_level_values('supply_node'):
embodied_cost_indexer = util.level_specific_indexer(embodied_cost_df, 'supply_node',node)
trade_adjustment_indexer = util.level_specific_indexer(self.active_trade_adjustment_df, 'supply_node',node)
self.active_dispatch_costs.loc[trade_adjustment_indexer,:] = util.DfOper.mult([self.active_trade_adjustment_df.loc[trade_adjustment_indexer,:],embodied_cost_df.loc[embodied_cost_indexer,:]]).values
self.active_dispatch_costs = self.active_dispatch_costs.groupby(level='supply_node').sum()
self.active_dispatch_costs = self.active_dispatch_costs.stack([cfg.primary_geography,'demand_sector'])
self.active_dispatch_costs *= self.active_coefficients_total
self.active_dispatch_costs = util.reduce_levels(self.active_dispatch_costs, self.rollover_group_names, agg_function='mean')
self.active_dispatch_costs = DfOper.mult([self.active_dispatch_costs, self.active_dispatch_coefficients])
self.active_dispatch_costs = util.remove_df_levels(self.active_dispatch_costs, 'supply_node')
self.active_dispatch_costs = self.active_dispatch_costs.reorder_levels(self.stock.values.index.names)
self.active_dispatch_costs[self.active_dispatch_costs<0] = 0
def stock_rollover(self, year, loop, stock_changes):
"""stock rollover function that is used for years after the IO has been initiated"""
#if the stock rollover's first year is also the first year of the IO loop, we set the initial stock
#equal to the first year's stock requirement. This insures propoer rolloff of th existing stock
if min(self.years) == year:
for elements in self.rollover_groups.keys():
elements = util.ensure_tuple(elements)
self.rollover_dict[elements].initial_stock = np.array(util.ensure_iterable_and_not_string(self.stock.requirement.loc[elements, year]))
#run the stock rollover for the year and record values
for elements in self.rollover_groups.keys():
elements = util.ensure_tuple(elements)
try:
self.rollover_dict[elements].use_stock_changes = True
self.rollover_dict[elements].run(1, stock_changes.loc[elements],np.array(self.stock.total_rollover.loc[elements+(year,)]))
except:
logging.error('error encountered in rollover for node ' + str(self.id) + ' in elements '+ str(elements) + ' year ' + str(year))
raise
stock_total, stock_new, stock_replacement, retirements, retirements_natural, retirements_early, sales_record, sales_new, sales_replacement = self.rollover_dict[(elements)].return_formatted_outputs(year_offset=0)
self.stock.values.loc[elements,year] = stock_total
sales_indexer = elements + (year,)
self.stock.sales.loc[sales_indexer, 'value'] = sales_record
# self.stock.retirements[sales_indexer, year] = retirements
self.financial_stock(year)
self.calculate_energy(year)
def setup_financial_stock(self):
# creates binary matrix across years and vintages for a technology based on its book life
self.book_life_matrix = util.book_life_df(self.book_life, self.vintages, self.years)
# creates a linear decay of initial stock
self.initial_book_life_matrix = util.initial_book_life_df(self.book_life, self.mean_lifetime, self.vintages, self.years)
def calculate_energy(self, year):
self.stock.values_energy[year] = DfOper.mult([self.stock.values[year].to_frame(), self.capacity_factor.values[year].to_frame()])* util.unit_conversion(unit_from_den=cfg.cfgfile.get('case','time_step'), unit_to_den='year')[0]
indexer = util.level_specific_indexer(self.stock.sales,'vintage', year)
self.stock.sales_energy.loc[indexer,:] = DfOper.mult([self.stock.sales.loc[indexer,:], self.capacity_factor.values[year].to_frame()])* util.unit_conversion(unit_from_den=cfg.cfgfile.get('case','time_step'), unit_to_den='year')[0]
def financial_stock(self, year):
"""
Calculates the amount of stock based on sales and book life
instead of physical decay
"""
# stock values in any year equals vintage sales multiplied by book life
start_year = min(self.years)
values_financial = util.DfOper.mult([self.stock.sales, self.book_life_matrix[year].to_frame()])
indexer = util.level_specific_indexer(self.stock.values,'vintage',start_year-1)
starting_financial_stock = self.stock.values.loc[indexer,start_year].to_frame()
starting_financial_stock.columns = [year]
initial_values_financial = util.DfOper.mult([starting_financial_stock, self.initial_book_life_matrix[year].to_frame()])
# sum normal and initial stock values
self.stock.values_financial[year] = util.DfOper.add([values_financial, initial_values_financial[year].to_frame()],non_expandable_levels=None)
self.stock.values_financial_energy[year] = DfOper.mult([self.stock.values_financial[year].to_frame(), self.capacity_factor.values[year].to_frame()])* util.unit_conversion(unit_from_den=cfg.cfgfile.get('case','time_step'), unit_to_den='year')[0]
def calculate_active_coefficients(self,year, loop):
"""calculates the active coefficients"""
#If a node has no potential data, then it doesn't have a supply curve. Therefore the coefficients are just the specified inputs in that year
if year == int(cfg.cfgfile.get('case', 'current_year')) and loop == 'initial':
#in the initial loop of the supply-side, we only know internal demand
throughput = self.active_demand
else:
#after that, our best representation of throughput is active supply, which is updated in every IO loop
throughput = self.active_supply
#in the first loop we take a slice of the input node efficiency
if self.potential.data is False:
#if the node has no potential data, and therefore no supply curve
if self.coefficients.data is True:
#we take the coefficients for the current year
self.active_coefficients = self.coefficients.values.loc[:,year].to_frame()
else:
self.active_coefficients = None
self.active_coefficients_total = None
elif self.coefficients.data is True:
if self.potential.raw_values is not None:
try:
self.potential.remap_to_potential_and_normalize(throughput, year, self.tradable_geography)
except:
pdb.set_trace()
filter_geo_potential_normal = self.potential.active_supply_curve_normal
filter_geo_potential_normal = filter_geo_potential_normal.reset_index().set_index(filter_geo_potential_normal.index.names)
self.active_coefficients = util.remove_df_levels(util.DfOper.mult([self.coefficients.values.loc[:,year].to_frame(),
filter_geo_potential_normal],
(True,True),(False,False)),'resource_bin')
else:
stock_normal = self.stock.values.loc[:,year].to_frame().groupby(level=util.ix_excl(self.stock.values,['resource_bin'])).transform(lambda x: x/x.sum())
self.active_coefficients = DfOper.mult([self.coefficients.values.loc[:,year].to_frame(), stock_normal])
self.active_coefficients.sort(inplace=True)
else:
self.active_coefficients = None
self.active_coefficients_total = None
self.active_emissions_coefficients = None
#we multiply the active coefficients by the trade adjustments to account for inter-geography trades
if self.active_coefficients is not None:
self.active_coefficients_total_untraded = util.remove_df_levels(self.active_coefficients,'efficiency_type')
self.active_coefficients_total = DfOper.mult([self.add_column_index(self.active_coefficients_total_untraded), self.active_trade_adjustment_df])
self.active_coefficients_untraded = copy.deepcopy(self.active_coefficients)
self.active_coefficients_untraded.sort(inplace=True,axis=0)
nodes = list(set(self.active_trade_adjustment_df.index.get_level_values('supply_node')))
df_list = []
for node in nodes:
trade_indexer = util.level_specific_indexer(self.active_trade_adjustment_df, 'supply_node', node)
# coefficient_indexer = util.level_specific_indexer(self.active_coefficients_untraded.sort(), 'supply_node', node)
efficiency_types = list(set(util.df_slice(self.active_coefficients_untraded, node, 'supply_node').index.get_level_values('efficiency_type')))
keys = efficiency_types
name = ['efficiency_type']
df = pd.concat([self.active_trade_adjustment_df.loc[trade_indexer,:]]*len(keys),keys=keys,names=name)
df_list.append(df)
active_trade_adjustment_df = pd.concat(df_list)
# active_trade_adjustment_df = self.active_trade_adjustment_df.reindex(index = self.active_coefficients_untraded.index,method='bfill')
self.active_coefficients = DfOper.mult([self.add_column_index(self.active_coefficients_untraded),active_trade_adjustment_df])
keys = self.ghgs
name = ['ghg']
self.active_emissions_coefficients = pd.concat([self.active_coefficients]*len(keys), keys=keys, names=name)
self.active_emissions_coefficients = self.active_emissions_coefficients.reorder_levels([cfg.primary_geography,'demand_sector', 'supply_node', 'efficiency_type', 'ghg'])
self.active_emissions_coefficients.sort(inplace=True)
def update_stock(self, year, loop):
"""updates the stock in the IO loop"""
self.determine_throughput(year,loop)
self.update_remaining_stock(year, loop)
self.update_total(year)
self.update_requirement(year)
self.stock_rollover(year, loop, self.stock.act_stock_changes)
def determine_throughput(self,year,loop):
"""determines the throughput requirement of the node"""
if year == int(cfg.cfgfile.get('case','current_year')) and loop == 'initial':
#in the initial loop of the supply-side, we only know internal demand
self.throughput = self.active_demand
else:
self.throughput = self.active_supply
if self.throughput is not None:
self.throughput = self.throughput.groupby(level=util.ix_incl(self.throughput, self.rollover_group_names)).sum()
self.throughput[self.throughput<0]=0
def update_remaining_stock(self,year, loop):
"""calculates the amount of energy throughput from remaining stock (after natural rollover from the previous year)"""
for elements in self.rollover_groups.keys():
elements = util.ensure_tuple(elements)
element_indexer= util.level_specific_indexer(self.stock.remaining, self.rollover_group_names,elements)
if year == int(cfg.cfgfile.get('case','current_year')) and loop == 'initial':
self.stock.remaining.loc[element_indexer, year] = self.rollover_dict[elements].return_formatted_stock(year_offset=1)
elif year == int(cfg.cfgfile.get('case','current_year')) and loop == 1:
self.rollover_dict[elements].rewind(1)
self.stock.remaining.loc[element_indexer, year] = self.rollover_dict[elements].return_formatted_stock(year_offset=1)
elif loop == 1:
self.stock.remaining.loc[element_indexer, year] = self.rollover_dict[elements].return_formatted_stock(year_offset=1)
else:
self.rollover_dict[elements].rewind(1)
self.stock.act_rem_energy = util.DfOper.mult([self.stock.remaining.loc[:,year].to_frame(), self.capacity_factor.values.loc[:,year].to_frame()]) * util.unit_conversion(unit_from_den=cfg.cfgfile.get('case', 'time_step'), unit_to_den='year')[0]
default_conversion = self.capacity_factor.values.loc[:,year].to_frame() * util.unit_conversion(unit_from_num='year',unit_to_num=cfg.cfgfile.get('case', 'time_step'))[0]
self.stock.act_energy_capacity_ratio = util.DfOper.divi([self.stock.act_rem_energy.groupby(level=util.ix_excl(self.stock.act_rem_energy,['vintage'])).sum(),
self.stock.remaining.loc[:, year].to_frame().groupby(level=util.ix_excl(self.stock.remaining, ['vintage'])).sum()]).fillna(default_conversion)
self.stock.act_energy_capacity_ratio[self.stock.act_energy_capacity_ratio==0]= util.unit_conversion(unit_from_num='year',unit_to_num=cfg.cfgfile.get('case', 'time_step'))[0]
def update_total(self, year):
"""sets the minimum necessary total stock - based on throughput (stock requirement) and total of the specified and remaining stock"""
self.stock.act_total_energy = util.DfOper.mult([self.stock.total.loc[:,year].to_frame(), self.stock.act_energy_capacity_ratio],fill_value=np.nan)
self.stock.act_total_energy = self.stock.act_total_energy.fillna(util.remove_df_levels(self.stock.act_rem_energy,'vintage'))
def update_requirement(self,year):
"""updates annual stock requirements with the maximum of required stock and specified and remaining natural rolloff. Also
distributes the necessary stock changes to the available residuals in the supply curve bins if the stock has resource_bin indexers
"""
previous_year = max(min(self.years),year-1)
if self.potential.data is False:
if self.throughput is not None:
self.stock.requirement_energy.loc[:,year] = self.throughput
a = self.stock.requirement_energy.loc[:,year].to_frame()
b = self.stock.act_total_energy
a[a<b] = b
self.stock.requirement_energy.loc[:,year] = a
self.stock.requirement.loc[:,year] = DfOper.divi([self.stock.requirement_energy.loc[:,year].to_frame(),self.stock.act_energy_capacity_ratio]).fillna(0)
else:
#calculates the total amount of energy needed to distribute
total_residual = util.DfOper.subt([self.throughput, self.stock.act_total_energy],expandable=(False,False), collapsible=(True,True))
total_residual[total_residual<0] = 0
#calculates the residual amount of energy available in each bin
bins = util.DfOper.subt([self.potential.values.loc[:, year].to_frame(), self.stock.act_total_energy],expandable=(False,False), collapsible=(True,True))
bins[bins<0] = 0
#calculates the supply curve of remaining energy
bin_supply_curve = bins.groupby(level=[x for x in self.rollover_group_names if x!= 'resource_bin']).cumsum()
#expands the total energy needed to distribute to mask against the supply curve. Used as a cap on the supply curve.
total_residual = util.expand_multi(total_residual,bins.index.levels,bins.index.names)
bin_supply_curve[bin_supply_curve>total_residual] = total_residual
bin_supply_curve = bin_supply_curve.groupby(level=util.ix_excl(bin_supply_curve,'resource_bin')).diff().fillna(bin_supply_curve)
self.stock.requirement_energy.loc[:,year] = util.DfOper.add([self.stock.act_total_energy, bin_supply_curve])
self.stock.requirement.loc[:,year] = util.DfOper.divi([self.stock.requirement_energy.loc[:,year].to_frame(),self.stock.act_energy_capacity_ratio])
if year == int(cfg.cfgfile.get('case','current_year')) and year==min(self.years):
self.stock.act_stock_changes = self.stock.requirement[year]*0
else:
self.stock.act_stock_changes = util.DfOper.subt([self.stock.requirement[year].to_frame(), util.remove_df_levels(self.stock.values[previous_year].to_frame(),['vintage'])])[year]
def calculate_oversupply(self, year, loop):
"""calculates whether the stock would oversupply the IO requirement and returns an oversupply adjustment factor."""
if hasattr(self,'stock'):
oversupply_factor = util.DfOper.divi([self.stock.values_energy.loc[:,year].to_frame(), self.throughput], expandable=(False,False), collapsible=(True,True)).fillna(1)
oversupply_factor.replace(np.inf,1,inplace=True)
oversupply_factor[oversupply_factor<1] = 1
if (oversupply_factor.values>1).any():
return oversupply_factor
else:
return None
else:
return None
def adjust_energy(self,oversupply_factor,year):
# self.capacity_factor.values.loc[:,year] = util.DfOper.mult([self.capacity_factor.values.loc[:,year].to_frame(),1/oversupply_factor])
self.stock.values_energy.loc[:,year] = util.DfOper.mult([self.stock.values_energy.loc[:,year].to_frame(),1/oversupply_factor])
def create_costs(self):
ids = util.sql_read_table('SupplyCost',column_names='id',supply_node_id=self.id,return_iterable=True)
for id in ids:
self.add_costs(id)
def add_costs(self, id, **kwargs):
"""
add cost object to a supply stock node
"""
if id in self.costs:
# ToDo note that a node by the same name was added twice
return
else:
self.costs[id] = SupplyCost(id,self.cost_of_capital)
def calculate_costs(self,year, loop):
start_year = min(self.years)
if not hasattr(self,'levelized_costs'):
index = pd.MultiIndex.from_product(self.rollover_group_levels, names=self.rollover_group_names)
self.levelized_costs = util.empty_df(index, columns=self.years,fill_value=0.)
self.embodied_cost = util.empty_df(index, columns=self.years,fill_value=0.)
self.embodied_cost = util.remove_df_levels(self.embodied_cost,'resource_bin')
if not hasattr(self,'annual_costs'):
index = self.stock.sales.index
self.annual_costs = util.empty_df(index, columns=['value'],fill_value=0.)
self.levelized_costs[year] = 0
for cost in self.costs.values():
rev_req = util.DfOper.add([self.calculate_dynamic_supply_costs(cost, year, start_year), self.calculate_static_supply_costs(cost, year, start_year)])
self.levelized_costs.loc[:,year] += rev_req.values.flatten()
self.calculate_lump_costs(year, rev_req)
cost.prev_yr_rev_req = rev_req
self.embodied_cost.loc[:,year] = util.DfOper.divi([self.levelized_costs[year].to_frame(), self.throughput],expandable=(False,False)).replace([np.inf,-np.inf,np.nan,-np.nan],[0,0,0,0]).values
self.active_embodied_cost = util.expand_multi(self.embodied_cost[year].to_frame(), levels_list = [cfg.geo.geographies[cfg.primary_geography], self.demand_sectors],levels_names=[cfg.primary_geography,'demand_sector'])
def calculate_dynamic_supply_costs(self, cost,year,start_year):
"returns a revenue requirement for the component of costs that is correlated with throughput"
if cost.data is True:
#determine increased tariffs due to growth rate (financial/physical stock rati0)
limited_names = self.rollover_group_names if len(self.rollover_group_names)>1 else self.rollover_group_names[0]
first_yr_financial_stock = self.stock.values_financial.groupby(level=limited_names).sum()[start_year].to_frame()
first_yr_stock = self.stock.values.groupby(level= limited_names).sum()[start_year].to_frame()
financial_stock = self.stock.values_financial.groupby(level= limited_names).sum()[year].to_frame()
stock = self.stock.values.groupby(level= limited_names).sum()[year].to_frame()
first_yr_energy = self.stock.values_energy.groupby(level=limited_names).sum()[start_year].to_frame()
ini_growth_ratio = util.DfOper.divi([first_yr_stock,first_yr_financial_stock]).replace([np.inf,-np.inf],0)
growth_ratio = util.DfOper.divi([stock, financial_stock]).replace([np.inf,-np.inf],0)
growth_mult = util.DfOper.divi([ini_growth_ratio,growth_ratio]).replace([np.nan,-np.inf,np.inf],1)
stock_energy = self.stock.values_energy.groupby(level= limited_names).sum()[year].to_frame()
flow = self.active_supply.groupby(level= limited_names).sum()[year].to_frame()
cap_factor_mult = util.DfOper.divi([stock_energy,flow]).replace([np.nan,-np.inf,np.inf],1)
if cost.supply_cost_type == 'tariff':
tariff = cost.values_level_no_vintage[year].to_frame()
if cost.capacity is True:
rev_req = util.DfOper.mult([tariff,stock],expandable=(False,False))[year].to_frame()
if cost.is_capital_cost:
return util.DfOper.mult([rev_req,growth_mult]) * cost.throughput_correlation
else:
return rev_req* cost.throughput_correlation
else:
stock_energy = self.stock.values_energy.groupby(level= limited_names).sum()[year].to_frame()
flow = self.active_supply.groupby(level= limited_names).sum()[year].to_frame()
cap_factor_mult = util.DfOper.mult([util.DfOper.divi([stock_energy,flow]).replace([np.nan,-np.inf,np.inf],1), util.DfOper.divi([self.capacity_factor.values[start_year].to_frame(),self.capacity_factor.values[year].to_frame()]).replace([np.nan,-np.inf,np.inf],1)])
rev_req = util.DfOper.mult([tariff,self.active_supply],expandable=(False,False))[year].to_frame()
if len(rev_req.index.names)==1:
rev_names = rev_req.index.names[0]
else:
rev_names = rev_req.index.names
rev_req = util.DfOper.mult([rev_req,financial_stock.groupby(level=rev_names).transform(lambda x: x/x.sum())]).replace([np.inf,-np.inf],0)
if cost.is_capital_cost:
return util.DfOper.mult([rev_req,cap_factor_mult,growth_mult]) * cost.throughput_correlation
else:
return util.DfOper.mult([rev_req,cap_factor_mult]) * cost.throughput_correlation
elif cost.supply_cost_type == 'revenue requirement':
rev_req = cost.values_level_no_vintage[start_year].to_frame()
tariff = util.DfOper.divi([rev_req,first_yr_energy]).replace([np.nan,-np.inf,np.inf],1)
tariff.columns = [year]
rev_req = util.DfOper.mult([tariff,self.active_supply],expandable=(False,False))[year].to_frame()
stock_energy = self.stock.values_energy.groupby(level= limited_names).sum()[year].to_frame()
flow = self.active_supply.groupby(level= limited_names).sum()[year].to_frame()
cap_factor_mult = util.DfOper.divi([stock_energy,flow]).replace([np.nan,-np.inf,np.inf],1)
return util.DfOper.mult([rev_req,cap_factor_mult,growth_mult]) * cost.throughput_correlation
else:
raise ValueError("investment cost types not implemented")
def calculate_static_supply_costs(self,cost,year,start_year):
"returns a revenue requirement for the component of costs that is not correlated with throughput"
first_yr_stock = self.stock.values.groupby(level= self.rollover_group_names).sum()[start_year].to_frame()
first_yr_energy = self.stock.values_energy.groupby(level=self.rollover_group_names).sum()[start_year].to_frame()
if cost.supply_cost_type == 'tariff':
tariff = cost.values_level_no_vintage[year].to_frame()
if cost.capacity is True:
rev_req = util.DfOper.mult([tariff,first_yr_stock],expandable=(False,False))[year].to_frame()
else:
rev_req = util.DfOper.mult([tariff,first_yr_energy],expandable=(False,False))[year].to_frame()
elif cost.supply_cost_type == 'revenue requirement':
rev_req = cost.values_level_no_vintage[year].to_frame()
return rev_req * (1- cost.throughput_correlation)
def calculate_lump_costs(self,year, rev_req):
start_year = min(self.years)
indexer = util.level_specific_indexer(self.annual_costs,'vintage',year)
self.annual_costs.loc[indexer,:] = 0
for cost in self.costs.values():
if cost.raw_values is not None:
financial_stock = self.stock.values_financial.groupby(level= self.rollover_group_names).sum()[year].to_frame()
if cost.is_capital_cost:
if year == start_year:
annual_costs = rev_req/(cost.book_life)
else:
rem_rev_req = cost.prev_yr_rev_req * (1 - 1/self.book_life)
new_rev_req = util.DfOper.subt([rev_req,rem_rev_req])
annual_costs = new_rev_req * cost.book_life
sales = util.df_slice(self.stock.sales,start_year,'vintage')
sales.columns = [start_year]
ratio = util.DfOper.divi([sales,financial_stock]).replace([np.nan,-np.inf,np.inf],0)
annual_costs = util.DfOper.mult([ratio, rev_req])
indexer = util.level_specific_indexer(self.annual_costs,'vintage',year)
self.annual_costs.loc[indexer,:] += annual_costs.values
else:
pass
def calculate_capacity_utilization(self, energy_supply, supply_years):
energy_stock = self.stock.values[supply_years] * util.unit_convert(1,unit_from_den='hour', unit_to_den='year')
self.capacity_utilization = util.DfOper.divi([energy_supply,energy_stock],expandable=False).replace([np.inf,np.nan,-np.nan],[0,0,0])
class SupplyCapacityFactor(Abstract):
def __init__(self, id, scenario, **kwargs):
self.id = id
self.input_type = 'intensity'
self.sql_id_table = 'SupplyCapacityFactor'
self.sql_data_table = 'SupplyCapacityFactorData'
self.scenario = scenario
Abstract.__init__(self, self.id, 'supply_node_id')
def calculate(self, years, demand_sectors):
self.years = years
self.demand_sectors = demand_sectors
if self.data is True:
self.remap()
# self.convert()
self.values = self.values.unstack('year')
self.values.columns = self.values.columns.droplevel()
class SupplyPotential(Abstract):
def __init__(self, id, enforce_potential_constraint, scenario, **kwargs):
self.id = id
self.input_type = 'total'
self.sql_id_table = 'SupplyPotential'
self.sql_data_table = 'SupplyPotentialData'
self.enforce_potential_constraint = enforce_potential_constraint
self.scenario = scenario
Abstract.__init__(self, self.id, 'supply_node_id')
def calculate(self, conversion, resource_unit):
if self.data is True:
self.conversion = conversion
self.resource_unit=resource_unit
self.remap(filter_geo=False)
if self.enforce_potential_constraint!=True:
self.values = self.values[self.values.values>0]
max_bin = np.asarray(list(set(self.values.index.get_level_values('resource_bin')))).max()
indexer = util.level_specific_indexer(self.values,'resource_bin',max_bin)
self.values.loc[indexer,:] = self.values.loc[indexer,:] * 1E6
self.convert()
def convert(self):
self.values = self.values.unstack(level='year')
self.values.columns = self.values.columns.droplevel()
if hasattr(self,'time_unit') and self.time_unit is None:
self.time_unit = 'year'
if self.conversion is not None:
# if a conversion is necessary, it means that original input values are in resource and not energy terms
# this means that they must be copied to resource values and values are the result of
# multiplying the original values by the conversion dataframe
# self.resource_values = util.unit_convert(self.values,unit_from_den=self.time_unit, unit_to_den='year')
# self.resource_supply_curve = self.resource_values.groupby(level=[cfg.primary_geography]).cumsum()
self.values = DfOper.mult([self.values, self.conversion.values],fill_value=self.conversion.values.mean().mean())
# self.supply_curve = util.remove_df_levels(self.values, [x for x in self.values.index.names if x not in [cfg.primary_geography,'resource_bin']])
self.supply_curve = self.values
else:
if util.determ_energy(self.unit):
self.values = util.unit_convert(self.values, unit_from_num=self.unit, unit_from_den=self.time_unit, unit_to_num=cfg.calculation_energy_unit, unit_to_den='year')
else:
raise ValueError('unit is not an energy unit and no resource conversion has been entered in node %s' %self.id)
self.supply_curve = self.values
def remap_to_potential(self, active_throughput, year, tradable_geography=None):
"""remaps throughput to potential bins"""
#original supply curve represents an annual timeslice
primary_geography = cfg.primary_geography
self.active_throughput = active_throughput
#self.active_throughput[self.active_throughput<=0] = 1E-25
original_supply_curve = util.remove_df_levels(self.supply_curve.loc[:,year].to_frame().sort(),[x for x in self.supply_curve.loc[:,year].index.names if x not in [primary_geography, 'resource_bin', 'demand_sector']])
self.active_supply_curve = util.remove_df_levels(original_supply_curve,[x for x in self.supply_curve.loc[:,year].index.names if x not in [primary_geography, 'resource_bin', 'demand_sector']])
if tradable_geography is not None and tradable_geography!=primary_geography:
map_df = cfg.geo.map_df(primary_geography,tradable_geography,normalize_as='total',eliminate_zeros=False,filter_geo=False)
original_supply_curve = util.DfOper.mult([map_df,original_supply_curve])
self.geo_map(converted_geography=tradable_geography, attr='active_supply_curve', inplace=True, current_geography=primary_geography,filter_geo=False)
self.geo_map(converted_geography=tradable_geography, attr='active_throughput', inplace=True, current_geography=primary_geography,filter_geo=False)
self.active_supply_curve = self.active_supply_curve.groupby(level=[x for x in self.active_supply_curve.index.names if x not in 'resource_bin']).cumsum()
reindexed_throughput = util.DfOper.none([self.active_throughput,self.active_supply_curve],expandable=(True,False),collapsible=(True,True))
self.active_supply_curve = util.expand_multi(self.active_supply_curve, reindexed_throughput.index.levels,reindexed_throughput.index.names)
reindexed_throughput = util.DfOper.none([self.active_throughput,self.active_supply_curve],expandable=(True,False),collapsible=(True,True))
bin_supply_curve = copy.deepcopy(self.active_supply_curve)
try:
bin_supply_curve[bin_supply_curve>reindexed_throughput] = reindexed_throughput
except:
pdb.set_trace()
self.active_supply_curve = bin_supply_curve.groupby(level=util.ix_excl(bin_supply_curve,'resource_bin')).diff().fillna(bin_supply_curve)
if tradable_geography is not None and tradable_geography!=primary_geography:
normalized = original_supply_curve.groupby(level=[tradable_geography,'resource_bin']).transform(lambda x: x/x.sum())
self.active_supply_curve = util.remove_df_levels(util.DfOper.mult([normalized,self.active_supply_curve]),tradable_geography)
self.active_supply_curve.columns = ['value']
def remap_to_potential_and_normalize(self,active_throughput, year,tradable_geography=None) :
"""returns the proportion of the supply curve that is in each bin"""
self.remap_to_potential(active_throughput, year, tradable_geography)
self.active_supply_curve_normal= self.active_supply_curve.groupby(level=cfg.primary_geography).transform(lambda x:x/x.sum()).fillna(0.)
def format_potential_and_supply_for_constraint_check(self,active_supply, tradable_geography, year):
if tradable_geography == cfg.primary_geography:
self.active_potential = self.values.loc[:,year].to_frame()
self.active_geomapped_potential = self.active_potential
self.active_geomapped_supply = active_supply
else:
self.active_potential = self.values.loc[:,year].to_frame()
self.active_geomapped_potential = self.geo_map(converted_geography=tradable_geography, attr='active_potential', inplace=False, current_geography=cfg.primary_geography, current_data_type='total',filter_geo=False)
self.active_geomapped_supply = active_supply
self.active_geomapped_supply = self.geo_map(converted_geography=tradable_geography, attr='active_geomapped_supply', inplace=False, current_geography=cfg.primary_geography, current_data_type='total',filter_geo=False)
levels = util.intersect(self.active_geomapped_supply.index.names, self.active_geomapped_supply.index.names)
disallowed_potential_levels = [x for x in self.active_geomapped_potential.index.names if x not in levels]
disallowed_supply_levels = [x for x in self.active_geomapped_supply.index.names if x not in levels]
if len(disallowed_potential_levels):
self.active_geomapped_potential = util.remove_df_levels(self.active_geomapped_potential, disallowed_potential_levels)
if len(disallowed_supply_levels):
self.active_geomapped_supply= util.remove_df_levels(self.active_geomapped_supply, disallowed_supply_levels)
return self.active_geomapped_potential, self.active_geomapped_supply
class SupplyCost(Abstract):
def __init__(self, id, cost_of_capital, **kwargs):
self.id = id
self.sql_id_table = 'SupplyCost'
self.sql_data_table = 'SupplyCostData'
Abstract.__init__(self, self.id, primary_key='id', data_id_key='parent_id')
if self.cost_of_capital == None:
self.cost_of_capital = cost_of_capital
def calculate(self, years, demand_sectors):
self.years = years
self.vintages = [years[0]-1] + years
self.demand_sectors = demand_sectors
if self.data is True:
self.determ_input_type()
self.remap(lower=None)
self.convert()
self.levelize_costs()
def determ_input_type(self):
"""function that determines whether the input cost is a total or an intensity value"""
if self.supply_cost_type == 'revenue requirement':
self.input_type = 'total'
elif self.supply_cost_type == 'tariff' or self.cost_type == 'investment':
self.input_type = 'intensity'
else:
logging.error("incompatible cost type entry in cost %s" % self.id)
raise ValueError
def convert(self):
"""
convert raw_values to model currency and capacity (energy_unit/time_step)
"""
self.values = util.currency_convert(self.values, self.currency_id, self.currency_year_id)
model_energy_unit = cfg.calculation_energy_unit
model_time_step = cfg.cfgfile.get('case', 'time_step')
if self.input_type == 'intensity':
if self.time_unit is not None:
# if a cost has a time_unit, then the unit is energy and must be converted to capacity
self.values = util.unit_convert(self.values,
unit_from_den=self.energy_or_capacity_unit,
unit_from_num=self.time_unit, unit_to_den=model_energy_unit,
unit_to_num=model_time_step)
self.capacity = False
else:
# if a cost is a capacity unit, the model must convert the unit type to an energy unit for conversion ()
if util.determ_energy(self.energy_or_capacity_unit):
self.values = util.unit_convert(self.values, unit_from_den =self.energy_or_capacity_unit, unit_to_den=model_energy_unit)
self.capacity = False
else:
self.values = util.unit_convert(self.values,
unit_from_den=cfg.ureg.Quantity(self.energy_or_capacity_unit)*cfg.ureg.Quantity(model_time_step),
unit_from_num=model_time_step,
unit_to_den=model_energy_unit,
unit_to_num=model_time_step)
self.capacity = True
else:
self.capacity = True
def levelize_costs(self):
inflation = float(cfg.cfgfile.get('case', 'inflation_rate'))
rate = self.cost_of_capital - inflation
if self.supply_cost_type == 'investment':
self.values_level = - np.pmt(rate, self.book_life, 1, 0, 'end') * self.values
# util.convert_age(self, vintages=self.vintages, years=self.years, attr_from='values_level', attr_to='values_level', reverse=False)
elif self.supply_cost_type == 'tariff' or self.supply_cost_type == 'revenue requirement':
self.values_level = self.values.copy()
# util.convert_age(self, vintages=self.vintages, years=self.years, attr_from='values_level', attr_to='values_level', reverse=False)
self.values = np.pv(rate, self.book_life, -1, 0, 'end') * self.values
util.replace_index_name(self.values,'vintage','year')
self.values_level_no_vintage = self.values_level
keys = self.vintages
name = ['vintage']
self.values_level= pd.concat([self.values_level_no_vintage]*len(keys), keys=keys, names=name)
self.values_level = self.values_level.swaplevel('vintage', -1)
self.values_level = self.values_level.unstack('year')
self.values_level.columns = self.values_level.columns.droplevel()
self.values_level_no_vintage = self.values_level_no_vintage.unstack('year')
self.values_level_no_vintage.columns = self.values_level_no_vintage.columns.droplevel()
class SupplyCoefficients(Abstract):
def __init__(self, id, scenario, **kwargs):
self.id = id
self.input_type = 'intensity'
self.sql_id_table = 'SupplyEfficiency'
self.sql_data_table = 'SupplyEfficiencyData'
self.scenario = scenario
Abstract.__init__(self, self.id, primary_key='id', data_id_key='parent_id')
def calculate(self, years, demand_sectors):
self.years = years
self.demand_sectors = demand_sectors
if self.data is True:
self.convert()
self.remap(map_from='values',lower=None)
self.values = self.values.unstack(level='year')
self.values.columns = self.values.columns.droplevel()
#TODO fix
if 'demand_sector' not in self.values.index.names:
keys = self.demand_sectors
names = ['demand_sector']
self.values = pd.concat([self.values]*len(keys),keys=keys,names=names)
self.values = self.values.reorder_levels([x for x in [cfg.primary_geography,'demand_sector', 'efficiency_type', 'supply_node','resource_bin'] if x in self.values.index.names])
self.values = self.values.sort()
def convert(self):
"""
return raw_values that are converted to units consistent with output units for
normalized efficiency values
"""
self.values = util.unit_convert(self.raw_values, unit_from_num=self.input_unit, unit_to_num=self.output_unit)
class SupplyStockNode(Node):
def __init__(self, id, supply_type, scenario, **kwargs):
Node.__init__(self, id, supply_type, scenario)
for col, att in util.object_att_from_table('SupplyNodes', id):
setattr(self, col, att)
self.potential = SupplyPotential(self.id, self.enforce_potential_constraint, self.scenario)
self.technologies = {}
self.tech_ids = []
self.add_technologies()
# self.add_nodes()
self.add_stock()
def calculate_oversupply(self, year, loop):
"""calculates whether the stock would oversupply the IO requirement and returns an oversupply adjustment factor."""
if hasattr(self,'stock'):
oversupply_factor = DfOper.divi([self.stock.values_energy.loc[:,year].to_frame(), self.throughput], expandable=False, collapsible=True).fillna(1)
oversupply_factor.replace(np.inf, 1, inplace=True)
oversupply_factor[oversupply_factor<1] = 1
if (oversupply_factor.values>1.000000001).any():
self.oversupply_factor = oversupply_factor
#TODO fix
return oversupply_factor
else:
return None
else:
return None
def adjust_energy(self,oversupply_factor,year):
# self.stock.capacity_factor.loc[:,year] = util.DfOper.mult([self.stock.capacity_factor.loc[:,year].to_frame(),1/oversupply_factor])
self.stock.values_energy.loc[:,year] = util.DfOper.mult([self.stock.values_energy.loc[:,year].to_frame(),1/oversupply_factor])
def set_rollover_groups(self):
"""sets the internal index for use in stock and cost calculations"""
# determines whether stock rollover needs to occur on demand sector or resource bin index
self.stock.rollover_group_levels = []
self.stock.rollover_group_names = []
if self.stock.data is True:
for name, level in zip(self.stock.raw_values.index.names, self.stock.raw_values.index.levels):
if (name == 'resource_bin' or name == 'demand_sector') and name not in self.stock.rollover_group_names:
if name == 'demand_sector':
level = self.demand_sectors
self.stock.rollover_group_levels.append(list(level))
self.stock.rollover_group_names.append(name)
elif name == 'resource_bin' or name == 'demand_sector':
original_levels = self.stock.rollover_group_levels[self.stock.rollover_group_names.index(name)]
new_levels = list(set(original_levels+list(level)))
self.stock.rollover_group_levels[self.stock.rollover_group_names.index(name)] = new_levels
if self.potential.data is True:
for name, level in zip(self.potential.raw_values.index.names, self.potential.raw_values.index.levels):
if (name == 'resource_bin' or name == 'demand_sector') and name not in self.stock.rollover_group_names:
if name == 'demand_sector':
level = self.demand_sectors
self.stock.rollover_group_levels.append(list(level))
self.stock.rollover_group_names.append(name)
elif name == 'resource_bin' or name == 'demand_sector':
original_levels = self.stock.rollover_group_levels[self.stock.rollover_group_names.index(name)]
new_levels = list(set(original_levels+list(level)))
self.stock.rollover_group_levels[self.stock.rollover_group_names.index(name)] = new_levels
for technology in self.technologies.values():
attributes = vars (technology)
for att in attributes:
obj = getattr(technology, att)
if inspect.isclass(type(obj)) and hasattr(obj, '__dict__') and hasattr(obj, 'raw_values') and obj.raw_values is not None:
for name, level in zip(obj.raw_values.index.names, obj.raw_values.index.levels):
if (name == 'resource_bin' or name == 'demand_sector') and name not in self.stock.rollover_group_names:
if name == 'demand_sector':
level = self.demand_sectors
self.stock.rollover_group_levels.append(list(level))
self.stock.rollover_group_names.append(name)
elif name == 'resource_bin' or name == 'demand_sector':
original_levels = self.stock.rollover_group_levels[self.stock.rollover_group_names.index(name)]
new_levels = list(set(original_levels+list(level)))
self.stock.rollover_group_levels[self.stock.rollover_group_names.index(name)] = new_levels
if self.id == self.distribution_grid_node_id and 'demand_sector' not in self.stock.rollover_group_names:
#requires distribution grid node to maintain demand sector resolution in its stocks
self.stock.rollover_group_levels.append(self.demand_sectors)
self.stock.rollover_group_names.append('demand_sector')
elif self.id == self.distribution_grid_node_id:
original_levels = self.stock.rollover_group_levels[self.stock.rollover_group_names.index('demand_sector')]
new_levels = list(set(original_levels+self.demand_sectors))
self.stock.rollover_group_levels[self.stock.rollover_group_names.index('demand_sector')] = new_levels
self.stock.rollover_group_names = [cfg.primary_geography] + self.stock.rollover_group_names
self.stock.rollover_group_levels = [cfg.geo.geographies[cfg.primary_geography]] + self.stock.rollover_group_levels
def add_stock(self):
"""add stock instance to node"""
self.stock = Stock(id=self.id, drivers=None,sql_id_table='SupplyStock', sql_data_table='SupplyStockData', primary_key='supply_node_id', scenario=self.scenario)
self.stock.input_type = 'total'
def calculate(self):
#all nodes can have potential conversions. Set to None if no data.
self.add_nodes()
self.conversion, self.resource_unit = self.add_conversion()
self.set_rollover_groups()
self.calculate_subclasses()
self.calculate_stock_measures()
self.add_case_stock()
self.set_adjustments()
self.set_pass_through_df_dict()
self.setup_stock_rollover(self.years)
def calculate_input_stock(self):
"""calculates the technology stocks in a node based on the combination of measure-stocks and reference stocks"""
levels = self.stock.rollover_group_levels + [self.years] + [self.tech_ids]
names = self.stock.rollover_group_names + ['year'] + ['supply_technology']
index = pd.MultiIndex.from_product(levels,names=names)
if self.stock.data is True and 'supply_technology' in self.stock.raw_values.index.names:
#remap to technology stocks
self.stock.years = self.years
self.stock.remap(map_from='raw_values', map_to='technology',fill_timeseries=True, fill_value=np.nan)
#TODO add to clean timeseries. Don't allow filling of timseries before raw values.
self.stock.technology[self.stock.technology.index.get_level_values('year')<min(self.stock.raw_values.index.get_level_values('year'))] = np.nan
self.convert_stock('stock', 'technology')
self.stock.technology = self.stock.technology.reorder_levels(names)
self.stock.technology = self.stock.technology.reindex(index)
#if there's case_specific stock data, we must use that to replace reference technology stocks
if hasattr(self.case_stock,'technology'):
# if there are levels in the case specific stock that are not in the reference stock, we must remove that level from the case stock
mismatched_levels = [x for x in self.case_stock.technology.index.names if x not in self.stock.technology.index.names]
if len(mismatched_levels):
self.case_stock.technology= util.remove_df_levels(self.case_stock.technology,mismatched_levels)
#if there are still level mismatches, it means the reference stock has more levels, which returns an error
if np.any(util.difference_in_df_names(self.case_stock.technology, self.stock.technology,return_bool=True)):
raise ValueError("technology stock indices in node %s do not match input energy system stock data" %self.id)
else:
#if the previous test is passed, we use the reference stock to fill in the Nans of the case stock
self.case_stock.technology = self.case_stock.technology.reorder_levels(names)
self.case_stock.technology = self.case_stock.technology.reindex(index)
self.stock.technology = self.case_stock.technology.fillna(self.stock.technology)
self.stock.technology = self.stock.technology.unstack('year')
self.stock.technology.columns = self.stock.technology.columns.droplevel()
self.stock.technology = util.reindex_df_level_with_new_elements(self.stock.technology,'supply_technology',self.tech_ids)
elif hasattr(self.case_stock,'technology'):
# if there are levels in the case specific stock that are not in the rollover groups, we must remove that level from the case stock
mismatched_levels = [x for x in self.case_stock.technology.index.names if x not in names]
if len(mismatched_levels):
self.case_stock.technology = util.remove_df_levels(self.case_stock.technology,mismatched_levels)
#if there are still level mismatches, it means the rollover has more levels, which returns an error
if len([x for x in self.stock.rollover_group_names if x not in self.case_stock.technology.index.names]) :
raise ValueError("technology stock levels in node %s do not match other node input data" %self.id)
else:
#if the previous test is passed we reindex the case stock for unspecified technologies
self.case_stock.technology = self.case_stock.technology.reorder_levels(names)
structure_df = pd.DataFrame(1,index=index,columns=['value'])
self.case_stock.technology = self.case_stock.technology.reindex(index)
self.stock.technology = self.case_stock.technology
self.stock.technology = self.stock.technology.unstack('year')
self.stock.technology.columns = self.stock.technology.columns.droplevel()
self.stock.technology = util.reindex_df_level_with_new_elements(self.stock.technology,'supply_technology',self.tech_ids)
else:
levels = self.stock.rollover_group_levels + [self.tech_ids]
names = self.stock.rollover_group_names + ['supply_technology']
index = pd.MultiIndex.from_product(levels,names=names)
self.stock.technology = util.empty_df(index=index,columns=self.years,fill_value=np.NaN)
if self.stock.data is True and 'supply_technology' not in self.stock.raw_values.index.names:
levels = self.stock.rollover_group_levels + [self.years]
names = self.stock.rollover_group_names + ['year']
index = pd.MultiIndex.from_product(levels,names=names)
structure_df = pd.DataFrame(1,index=index,columns=['value'])
self.stock.remap(map_from='raw_values', map_to='total', time_index = self.years,fill_timeseries=True, fill_value=np.nan)
#TODO add to clean timeseries. Don't allow filling of timseries before raw values.
self.stock.total[self.stock.total.index.get_level_values('year')<min(self.stock.raw_values.index.get_level_values('year'))] = np.nan
self.stock.total = DfOper.mult([self.stock.total,structure_df],fill_value=np.nan)
self.convert_stock('stock', 'total')
if hasattr(self.case_stock,'total'):
mismatched_levels = [x for x in self.case_stock.total.index.names if x not in names]
if len(mismatched_levels):
self.case_stock.total = util.remove_df_levels(self.case_stock.total,mismatched_levels)
#if there are still level mismatches, it means the reference stock has more levels, which returns an error
if np.any(util.difference_in_df_names(self.case_stock.total, self.stock.total,return_bool=True)):
raise ValueError("total stock indices in node %s do not match input energy system stock data" %self.id)
else:
#if the previous test is passed, we use the reference stock to fill in the Nans of the case stock
self.case_stock.total= self.case_stock.total.reorder_levels(names)
self.stock.total = self.stock.total.reorder_levels(names)
structure_df = pd.DataFrame(1,index=index,columns=['value'])
self.case_stock.total = DfOper.mult([self.case_stock.total,structure_df],fill_value=np.nan)
self.stock.total = DfOper.mult([self.stock.total,structure_df],fill_value=np.nan)
self.stock.total = self.case_stock.total.fillna(self.stock.total)
self.stock.total = self.stock.total.unstack('year')
self.stock.total.columns = self.stock.total.columns.droplevel()
elif hasattr(self.case_stock,'total'):
levels = self.stock.rollover_group_levels + [self.years]
names = self.stock.rollover_group_names + ['year']
index = pd.MultiIndex.from_product(levels,names=names)
# if there are levels in the case specific stock that are not in the rollover groups, we must remove that level from the case stock
mismatched_levels = [x for x in self.case_stock.total.index.names if x not in names]
if len(mismatched_levels):
self.case_stock.total = util.remove_df_levels(self.case_stock.total,mismatched_levels)
#if there are still level mismatches, it means the rollover has more levels, which returns an error
if len([x for x in names if x not in self.case_stock.total.index.names]) :
raise ValueError("total stock levels in node %s do not match other node input data" %self.id)
else:
self.case_stock.total= self.case_stock.total.reorder_levels(names)
self.case_stock.total = self.case_stock.total.reindex(index)
self.stock.total = self.case_stock.total
self.stock.total = self.stock.total.unstack('year')
self.stock.total.columns = self.stock.total.columns.droplevel()
else:
index = pd.MultiIndex.from_product(self.stock.rollover_group_levels,names=self.stock.rollover_group_names)
self.stock.total = util.empty_df(index=index,columns=self.years,fill_value=np.NaN)
if self.stock.data or hasattr(self.case_stock,'data') and self.case_stock.data == True:
self.stock.data = True
self.max_total()
self.format_rollover_stocks()
def max_total(self):
tech_sum = util.remove_df_levels(self.stock.technology,'supply_technology')
# self.stock.total = self.stock.total.fillna(tech_sum)
if hasattr(self.stock,'total'):
self.stock.total[self.stock.total<tech_sum] = tech_sum
else:
# self.stock.total = tech_sum
self.stock.total = pd.DataFrame(np.nan, tech_sum.index,tech_sum.columns)
def format_rollover_stocks(self):
#transposed technology stocks are used for entry in the stock rollover function
self.stock.technology_rollover = self.stock.technology.stack(dropna=False)
util.replace_index_name(self.stock.technology_rollover,'year')
self.stock.total_rollover = util.remove_df_levels(self.stock.technology_rollover,'supply_technology')
self.stock.technology_rollover=self.stock.technology_rollover.unstack('supply_technology')
for tech_id in self.tech_ids:
if tech_id not in self.stock.technology_rollover.columns:
self.stock.technology_rollover[tech_id]=np.nan
def add_case_stock(self):
self.case_stock = StockItem()
tech_stocks = []
for technology in self.technologies.values():
for stock in technology.specified_stocks.values():
if stock.values is not None:
stock.values['supply_technology'] = technology.id
stock.values.set_index('supply_technology', append=True, inplace=True)
tech_stocks.append(stock.values)
if len(tech_stocks):
self.case_stock.data = True
self.case_stock.technology = util.DfOper.add(tech_stocks, expandable=False)
self.case_stock.technology[self.case_stock.technology.index.get_level_values('year')<int(cfg.cfgfile.get('case','current_year'))] = np.nan
total_stocks = []
for stock in self.total_stocks.values():
if stock.values is not None:
self.case_stock.data = True
total_stocks.append(stock.values)
if len(total_stocks):
self.case_stock.total = DfOper.add(total_stocks, expandable=False)
self.case_stock.total[self.case_stock.total.index.get_level_values('year')<int(cfg.cfgfile.get('case','current_year'))] = np.nan
# elif len(tech_stocks):
# self.case_stock.total = util.remove_df_levels(self.case_stock.technology,'supply_technology')
def remap_tech_attrs(self, attr_classes, attr='values'):
"""
loops through attr_classes (ex. capital_cost, energy, etc.) in order to map technologies
that reference other technologies in their inputs (i.e. technology A is 150% of the capital cost technology B)
"""
attr_classes = util.ensure_iterable_and_not_string(attr_classes)
for technology in self.technologies.keys():
for attr_class in attr_classes:
self.remap_tech_attr(technology, attr_class, attr)
def remap_tech_attr(self, technology, class_name, attr):
"""
map reference technology values to their associated technology classes
"""
tech_class = getattr(self.technologies[technology], class_name)
if hasattr(tech_class, 'reference_tech_id'):
if getattr(tech_class, 'reference_tech_id'):
ref_tech_id = (getattr(tech_class, 'reference_tech_id'))
if not self.technologies.has_key(ref_tech_id):
raise ValueError("supply node {} has no technology {} to serve as a reference for technology {} in attribute {}".format(self.id, ref_tech_id, technology, class_name))
ref_tech_class = getattr(self.technologies[ref_tech_id], class_name)
# converted is an indicator of whether an input is an absolute
# or has already been converted to an absolute
if not getattr(ref_tech_class, 'absolute'):
# If a technnology hasn't been mapped, recursion is used
# to map it first (this can go multiple layers)
self.remap_tech_attr(getattr(tech_class, 'reference_tech_id'), class_name, attr)
if tech_class.raw_values is not None:
tech_data = getattr(tech_class, attr)
new_data = DfOper.mult([tech_data,
getattr(ref_tech_class, attr)])
if hasattr(ref_tech_class,'values_level'):
new_data_level = DfOper.mult([tech_data,
getattr(ref_tech_class, 'values_level')])
else:
new_data = copy.deepcopy(getattr(ref_tech_class, attr))
if hasattr(ref_tech_class,'values_level'):
new_data_level = copy.deepcopy(getattr(ref_tech_class, 'values_level'))
tech_attributes = vars(getattr(self.technologies[ref_tech_id], class_name))
for attribute_name in tech_attributes.keys():
if not hasattr(tech_class, attribute_name) or getattr(tech_class, attribute_name) is None:
setattr(tech_class, attribute_name,
copy.deepcopy(getattr(ref_tech_class, attribute_name)) if hasattr(ref_tech_class,
attribute_name) else None)
setattr(tech_class, attr, new_data)
if hasattr(ref_tech_class,'values_level'):
setattr(tech_class,'values_level',new_data_level)
# Now that it has been converted, set indicator to true
tech_class.absolute = True
def add_technologies(self):
ids = util.sql_read_table('SupplyTechs',column_names='id',supply_node_id=self.id,return_iterable=True)
for id in ids:
self.add_technology(id)
def add_nodes(self):
self.nodes = []
for technology in self.technologies.values():
if hasattr(technology,'efficiency') and technology.efficiency.raw_values is not None:
for value in technology.efficiency.values.index.get_level_values('supply_node'):
self.nodes.append(value)
self.nodes = list(set(self.nodes))
def add_technology(self, id, **kwargs):
"""
Adds technology instances to node
"""
if id in self.technologies:
# ToDo note that a technology was added twice
return
self.technologies[id] = SupplyTechnology(id, self.cost_of_capital, self.scenario, **kwargs)
self.tech_ids.append(id)
self.tech_ids.sort()
def calculate_stock_measures(self):
for technology in self.technologies.values():
for stock in technology.specified_stocks.values():
stock.calculate(self.vintages,self.years)
stock.convert()
for stock in self.total_stocks.values():
stock.calculate(self.vintages,self.years)
stock.convert()
def calculate_sales_shares(self):
for tech in self.tech_ids:
technology = self.technologies[tech]
technology.calculate_sales_shares('reference_sales_shares')
technology.calculate_sales_shares('sales_shares')
def reconcile_sales_shares(self):
needed_sales_share_levels = self.stock.rollover_group_levels + [self.years]
needed_sales_share_names = self.stock.rollover_group_names + ['vintage']
for technology in self.technologies.values():
technology.reconcile_sales_shares('sales_shares', needed_sales_share_levels, needed_sales_share_names)
technology.reconcile_sales_shares('reference_sales_shares', needed_sales_share_levels,
needed_sales_share_names)
def calculate_sales(self):
for tech in self.tech_ids:
technology = self.technologies[tech]
technology.calculate_sales('reference_sales')
technology.calculate_sales('sales')
def reconcile_sales(self):
needed_sales_share_levels = self.stock.rollover_group_levels + [self.years]
needed_sales_share_names = self.stock.rollover_group_names + ['vintage']
for technology in self.technologies.values():
technology.reconcile_sales('sales', needed_sales_share_levels, needed_sales_share_names)
technology.reconcile_sales('reference_sales', needed_sales_share_levels,
needed_sales_share_names)
def calculate_total_sales_share(self, elements, levels):
ss_measure = self.helper_calc_sales_share(elements, levels, reference=False)
space_for_reference = 1 - np.sum(ss_measure, axis=1)
ss_reference = self.helper_calc_sales_share(elements, levels, reference=True, space_for_reference=space_for_reference)
if np.sum(ss_reference)==0:
ss_reference = SalesShare.scale_reference_array_to_gap( np.tile(np.eye(len(self.tech_ids)), (len(self.years), 1, 1)), space_for_reference)
#sales shares are always 1 with only one technology so the default can be used as a reference
if len(self.tech_ids)>1 and np.sum(ss_measure)<1:
reference_sales_shares = False
else:
reference_sales_shares = True
else:
reference_sales_shares = True
# return SalesShare.normalize_array(ss_reference+ss_measure, retiring_must_have_replacement=True)
# todo make retiring_must_have_replacement true after all data has been put in db
return SalesShare.normalize_array(ss_reference + ss_measure, retiring_must_have_replacement=False),reference_sales_shares
def calculate_total_sales_share_after_initial(self, elements, levels):
ss_measure = self.helper_calc_sales_share(elements, levels, reference=False)
space_for_reference = 1 - np.sum(ss_measure, axis=1)
ss_reference = SalesShare.scale_reference_array_to_gap( np.tile(np.eye(len(self.tech_ids)), (len(self.years), 1, 1)), space_for_reference)
# return SalesShare.normalize_array(ss_reference+ss_measure, retiring_must_have_replacement=True)
# todo make retiring_must_have_replacement true after all data has been put in db
return SalesShare.normalize_array(ss_reference + ss_measure, retiring_must_have_replacement=False)
def calculate_total_sales(self,elements,levels):
s_measure = self.helper_calc_sales(elements, levels, reference=False)
s_reference = self.helper_calc_sales(elements, levels, reference=True)
# return SalesShare.normalize_array(ss_reference+ss_measure, retiring_must_have_replacement=True)
# todo make retirin)g_must_have_replacement true after all data has been put in db
s_measure = pd.DataFrame(s_measure)
s_reference = pd.DataFrame(s_reference)
s_combined = s_measure.fillna(s_reference).values
return s_combined
def helper_calc_sales(self, elements, levels, reference, space_for_reference=None):
num_techs = len(self.tech_ids)
tech_lookup = dict(zip(self.tech_ids, range(num_techs)))
num_years = len(self.years)
# ['vintage', 'replacing tech id', 'retiring tech id']
ss_array = np.empty(shape=(num_years, num_techs))
ss_array.fill(np.nan)
# tech_ids must be sorted
# normalize ss in one of two ways
if reference:
for tech_id in self.tech_ids:
for sales in self.technologies[tech_id].reference_sales.values():
repl_index = tech_lookup[tech_id]
# technology sales share dataframe may not have all elements of stock dataframe
sales.values.index.levels
if any([element not in sales.values.index.levels[
util.position_in_index(sales.values, level)] for element, level in
zip(elements, levels)]):
continue
ss_array[:, repl_index] = util.df_slice(sales.values, elements, levels).values.T[0]
else:
for tech_id in self.tech_ids:
for sales in self.technologies[tech_id].sales.values():
repl_index = tech_lookup[sales.supply_technology_id]
# TODO address the discrepancy when a demand tech is specified
try:
ss_array[:, repl_index] = util.df_slice(sales.values, elements, levels).values.T[0]
except:
ss_array[:, repl_index] = util.df_slice(sales.values, elements, levels).values.flatten()
return ss_array
def helper_calc_sales_share(self, elements, levels, reference, space_for_reference=None):
num_techs = len(self.tech_ids)
tech_lookup = dict(zip(self.tech_ids, range(num_techs)))
num_years = len(self.years)
# ['vintage', 'replacing tech id', 'retiring tech id']
ss_array = np.zeros(shape=(num_years, num_techs, num_techs))
# tech_ids must be sorted
# normalize ss in one of two ways
if reference:
for tech_id in self.tech_ids:
for sales_share in self.technologies[tech_id].reference_sales_shares.values():
if set(sales_share.values.index.names).issubset(self.stock.sales_share_reconcile.index.names) and set(sales_share.values.index.names)!=set(self.stock.sales_share_reconcile.index.names):
sales_share.values = DfOper.none([sales_share.values,self.stock.sales_share_reconcile],non_expandable_levels=None)
repl_index = tech_lookup[tech_id]
reti_index = slice(None)
# technology sales share dataframe may not have all elements of stock dataframe
ss_array[:, repl_index, reti_index] += util.df_slice(sales_share.values, elements, levels).values
ss_array = SalesShare.scale_reference_array_to_gap(ss_array, space_for_reference)
else:
for tech_id in self.tech_ids:
for sales_share in self.technologies[tech_id].sales_shares.values():
if set(sales_share.values.index.names).issubset(self.stock.sales.index.names) and set(sales_share.values.index.names)!=set(self.stock.sales.index.names):
sales_share.values = util.DfOper.none([sales_share.values,util.remove_df_levels(self.stock.sales_exist,'supply_technology')],non_expandable_levels=None)
repl_index = tech_lookup[tech_id]
repl_index = tech_lookup[sales_share.supply_technology_id]
reti_index = tech_lookup[
sales_share.replaced_supply_technology_id] if sales_share.replaced_supply_technology_id is not None and tech_lookup.has_key(
sales_share.replaced_supply_technology_id) else slice(None)
if sales_share.replaced_supply_technology_id is not None and not tech_lookup.has_key(sales_share.replaced_supply_technology_id):
#sales share specified for a technology not in the model
continue
try:
ss_array[:, repl_index, reti_index] += util.df_slice(sales_share.values, elements,levels).values
except:
ss_array[:, repl_index, reti_index] += util.df_slice(sales_share.values, elements,
levels).values.flatten()
ss_array = SalesShare.cap_array_at_1(ss_array)
return ss_array
def calc_tech_survival_functions(self):
for technology in self.technologies.values():
technology.set_survival_parameters()
technology.set_survival_vintaged()
technology.set_decay_vintaged()
technology.set_decay_initial_stock()
technology.set_survival_initial_stock()
def create_tech_survival_functions(self):
functions = defaultdict(list)
for fun in ['survival_vintaged', 'survival_initial_stock', 'decay_vintaged', 'decay_initial_stock']:
for tech_id in self.tech_ids:
technology = self.technologies[tech_id]
functions[fun].append(getattr(technology, fun))
setattr(self.stock, fun, pd.DataFrame(np.array(functions[fun]).T, columns=self.tech_ids))
def create_rollover_markov_matrices(self):
vintaged_markov = util.create_markov_vector(self.stock.decay_vintaged.values,
self.stock.survival_vintaged.values)
self.stock.vintaged_markov_matrix = util.create_markov_matrix(vintaged_markov, len(self.tech_ids),
len(self.years))
initial_markov = util.create_markov_vector(self.stock.decay_initial_stock.values, self.stock.survival_initial_stock.values)
self.stock.initial_markov_matrix = util.create_markov_matrix(initial_markov, len(self.tech_ids),
len(self.years))
def setup_stock_rollover(self, years):
""" Sets up dataframes and inputs to stock rollover before loop commences"""
#prep stock rollover for initial solve
self.calc_tech_survival_functions()
self.calculate_sales_shares()
self.calculate_sales()
self.calculate_input_stock()
self.create_tech_survival_functions()
self.create_rollover_markov_matrices()
self.add_stock_dataframes()
self.setup_financial_stock()
self.rollover_dict = {}
for elements in self.rollover_groups.keys():
elements = util.ensure_tuple(elements)
sales_share, initial_sales_share = self.calculate_total_sales_share(elements,
self.stock.rollover_group_names) # group is not necessarily the same for this other dataframe
if np.any(np.isnan(sales_share)):
raise ValueError('Sales share has NaN values in node ' + str(self.id))
sales = self.calculate_total_sales(elements, self.stock.rollover_group_names)
initial_total = util.df_slice(self.stock.total_rollover, elements, self.stock.rollover_group_names).values[0]
initial_stock, rerun_sales_shares = self.calculate_initial_stock(elements, initial_total, sales_share,initial_sales_share)
if rerun_sales_shares:
sales_share = self.calculate_total_sales_share_after_initial(elements,self.stock.rollover_group_names)
technology_stock = self.stock.return_stock_slice(elements, self.stock.rollover_group_names,'technology_rollover').values
self.rollover_dict[elements] = Rollover(vintaged_markov_matrix=self.stock.vintaged_markov_matrix,
initial_markov_matrix=self.stock.initial_markov_matrix,
num_years=len(years), num_vintages=len(years),
num_techs=len(self.tech_ids), initial_stock=initial_stock,
sales_share=sales_share, stock_changes=None, specified_sales=sales,
specified_stock=technology_stock, specified_retirements=None,stock_changes_as_min=True)
# if self.id == 115:
# print self.rollover_dict[(64,)].i
# print self.rollover_dict[(64,)].sales_share[0]
# print self.rollover_dict[(64,)].initial_stock, self.rollover_dict[(64,)].stock.sum()
for year in [x for x in self.years if x<int(cfg.cfgfile.get('case', 'current_year'))]:
for elements in self.rollover_groups.keys():
elements = util.ensure_tuple(elements)
try:
self.rollover_dict[elements].run(1)
except:
logging.error('error encountered in rollover for node ' + str(self.id) + ' in elements '+ str(elements) + ' year ' + str(year))
raise
stock, stock_new, stock_replacement, retirements, retirements_natural, retirements_early, sales_record, sales_new, sales_replacement = self.rollover_dict[(elements)].return_formatted_outputs(year_offset=0)
self.stock.values.loc[elements, year], self.stock.values_new.loc[elements, year], self.stock.values_replacement.loc[
elements,year] = stock, stock_new, stock_replacement
full_levels = [[x] for x in elements] + [self.tech_ids] + [[year]]
full_names = self.stock.rollover_group_names + ['supply_technology'] + ['vintage']
elements_indexer = util.level_specific_indexer(self.stock.retirements, full_names, full_levels)
self.stock.retirements.loc[elements_indexer, 'value'], self.stock.retirements_natural.loc[elements_indexer, 'value'], \
self.stock.retirements_early.loc[elements_indexer, 'value'] = retirements, retirements_natural, retirements_early
self.stock.sales.loc[elements_indexer, 'value'], self.stock.sales_new.loc[elements_indexer, 'value'], \
self.stock.sales_replacement.loc[elements_indexer, 'value'] = sales_record, sales_new, sales_replacement
self.stock_normalize(year)
self.financial_stock(year, 1)
self.calculate_actual_stock(year, 1)
def calculate_initial_stock(self, elements, initial_total, sales_share, initial_sales_share):
technology = self.stock.technology_rollover.sum(axis=1).to_frame()
technology_years = technology[technology>0].index.get_level_values('year')
if len(technology_years) and not np.all(np.isnan(self.stock.technology_rollover.values)):
min_technology_year = min(technology_years)
else:
min_technology_year = None
if (np.nansum(self.stock.technology_rollover.loc[elements,:].values[0])/self.stock.total_rollover.loc[elements,:].values[0])>.99 and np.nansum(self.stock.technology_rollover.loc[elements,:].values[0])>0:
initial_stock = self.stock.technology_rollover.loc[elements,:].fillna(0).values[0]
rerun_sales_shares = False
elif initial_sales_share:
initial_stock = self.stock.calc_initial_shares(initial_total=initial_total, transition_matrix=sales_share[0], num_years=len(self.years))
rerun_sales_shares = True
for i in range(1, len(sales_share)):
if np.any(sales_share[0]!=sales_share[i]):
rerun_sales_shares=False
elif min_technology_year:
initial_stock = self.stock.technology_rollover.loc[elements+(min_technology_year,),:].fillna(0).values/np.nansum(self.stock.technology_rollover.loc[elements+(min_technology_year,),:].values) * initial_total
rerun_sales_shares = False
else:
raise ValueError("""user has not input stock data with technologies or sales share data so the model
cannot determine the technology composition of the initial stock in node %s""" % self.name)
# TODO Ben to review
# this is a fix for the case where we have no initial stock and we don't want to rerun the reference sales share
# the problem came up in model.supply.nodes[97].rollover_dict[(61, 1)] (CSP)
if any(np.isnan(initial_stock)) or np.sum(initial_stock)==0:
rerun_sales_shares = False
return initial_stock, rerun_sales_shares
def stock_rollover(self, year, loop, stock_changes):
if min(self.years) == int(cfg.cfgfile.get('case', 'current_year')) ==year:
for elements in self.rollover_groups.keys():
elements = util.ensure_tuple(elements)
sales_share, initial_sales_share = self.calculate_total_sales_share(elements,self.stock.rollover_group_names) # group is not necessarily the same for this other dataframe
if np.any(np.isnan(sales_share)):
raise ValueError('Sales share has NaN values in node ' + str(self.id))
if len(self.stock.requirement.index.names)>1:
initial_total = self.stock.requirement.loc[elements, year]
else:
initial_total = self.stock.requirement.loc[elements, year].values
initial_stock, rerun_sales_shares = self.calculate_initial_stock(elements, initial_total, sales_share,initial_sales_share)
if rerun_sales_shares:
sales_share = self.calculate_total_sales_share_after_initial(elements,self.stock.rollover_group_names)
self.rollover_dict[elements].initial_stock = initial_stock
self.rollover_dict[elements].sales_share = sales_share
for elements in self.rollover_groups.keys():
elements = util.ensure_tuple(elements)
try:
self.rollover_dict[elements].use_stock_changes = True
self.rollover_dict[elements].run(1, stock_changes.loc[elements],self.stock.return_stock_slice(elements + (year,), self.stock.rollover_group_names+['year'],'technology_rollover').values)
except:
logging.error('error encountered in rollover for node ' + str(self.id) + ' in elements '+ str(elements) + ' year ' + str(year))
raise
stock, stock_new, stock_replacement, retirements, retirements_natural, retirements_early, sales_record, sales_new, sales_replacement = self.rollover_dict[(elements)].return_formatted_outputs(year_offset=0)
self.stock.values.loc[elements, year], self.stock.values_new.loc[elements, year], self.stock.values_replacement.loc[
elements,year] = stock, stock_new, stock_replacement
full_levels = [[x] for x in elements] + [self.tech_ids] + [[year]]
full_names = self.stock.rollover_group_names + ['supply_technology'] + ['vintage']
elements_indexer = util.level_specific_indexer(self.stock.retirements, full_names, full_levels)
self.stock.retirements.loc[elements_indexer, 'value'], self.stock.retirements_natural.loc[elements_indexer, 'value'], \
self.stock.retirements_early.loc[elements_indexer, 'value'] = retirements, retirements_natural, retirements_early
self.stock.sales.loc[elements_indexer, 'value'], self.stock.sales_new.loc[elements_indexer, 'value'], \
self.stock.sales_replacement.loc[elements_indexer, 'value'] = sales_record, sales_new, sales_replacement
self.calculate_actual_stock(year,loop)
if loop!= 'initial':
if not self.thermal_dispatch_node:
adjustment_factor = self.calculate_adjustment_factor(year)
for elements in self.rollover_groups.keys():
elements = util.ensure_tuple(elements)
self.rollover_dict[elements].factor_adjust_current_year(adjustment_factor.loc[elements].values)
stock, stock_new, stock_replacement, retirements, retirements_natural, retirements_early, sales_record, sales_new, sales_replacement = self.rollover_dict[(elements)].return_formatted_outputs(year_offset=0)
self.stock.values.loc[elements, year], self.stock.values_new.loc[elements, year], self.stock.values_replacement.loc[
elements,year] = stock, stock_new, stock_replacement
full_levels = [[x] for x in elements] + [self.tech_ids] + [[year]]
full_names = self.stock.rollover_group_names + ['supply_technology'] + ['vintage']
elements_indexer = util.level_specific_indexer(self.stock.retirements, full_names, full_levels)
self.stock.retirements.loc[elements_indexer, 'value'], self.stock.retirements_natural.loc[elements_indexer, 'value'], \
self.stock.retirements_early.loc[elements_indexer, 'value'] = retirements, retirements_natural, retirements_early
self.stock.sales.loc[elements_indexer, 'value'], self.stock.sales_new.loc[elements_indexer, 'value'], \
self.stock.sales_replacement.loc[elements_indexer, 'value'] = sales_record, sales_new, sales_replacement
self.calculate_actual_stock(year,loop)
self.stock_normalize(year)
self.financial_stock(year, loop)
def add_stock_dataframes(self):
index = pd.MultiIndex.from_product(self.stock.rollover_group_levels, names=self.stock.rollover_group_names)
self.vintages = self.years
columns = self.years
index = pd.MultiIndex.from_product(self.stock.rollover_group_levels, names=self.stock.rollover_group_names)
self.stock.requirement_energy = util.empty_df(index=index, columns=pd.Index(columns, dtype='object'))
self.stock.requirement = util.empty_df(index=index, columns=pd.Index(columns, dtype='object'))
if len(self.stock.rollover_group_names)>1:
self.rollover_groups = self.stock.requirement.groupby(level=self.stock.rollover_group_names).groups
else:
self.rollover_groups = self.stock.requirement.groupby(level=0).groups
full_levels = self.stock.rollover_group_levels + [self.technologies.keys()] + [
[self.vintages[0] - 1] + self.vintages]
full_names = self.stock.rollover_group_names + ['supply_technology', 'vintage']
index = pd.MultiIndex.from_product(full_levels, names=full_names)
self.stock.values = util.empty_df(index=index, columns=pd.Index(columns, dtype='object'))
self.stock.exist = util.empty_df(index=index, columns=pd.Index(columns, dtype='object'),fill_value=1.0)
self.stock.values_energy = copy.deepcopy(self.stock.values)
self.stock.values_normal = copy.deepcopy(self.stock.values)
self.stock.values_normal_energy = copy.deepcopy(self.stock.values)
self.stock.ones = util.empty_df(index=index, columns=pd.Index(columns, dtype='object'), fill_value=1.0)
self.stock.capacity_factor = util.empty_df(index=index, columns=pd.Index(columns, dtype='object'), fill_value=1.0)
for year in self.years:
self.stock.capacity_factor.loc[:,year] = self.rollover_output(tech_class='capacity_factor',stock_att='ones',year=year, non_expandable_levels=None,fill_value=1.0)
self.stock.remaining = util.empty_df(index=index, columns=pd.Index(columns, dtype='object'))
self.stock.dispatch_cap = util.empty_df(index=index, columns=pd.Index(columns, dtype='object'))
self.stock.preview = util.empty_df(index=index, columns=pd.Index(columns, dtype='object'))
self.stock.values_new = copy.deepcopy(self.stock.values)
self.stock.values_replacement = copy.deepcopy(self.stock.values)
self.stock.values_normal = copy.deepcopy(self.stock.values)
self.stock.values_financial = copy.deepcopy(self.stock.values)
self.stock.values_financial_new = copy.deepcopy(self.stock.values)
self.stock.values_financial_replacement = copy.deepcopy(self.stock.values)
full_levels = self.stock.rollover_group_levels + [self.technologies.keys()] + [self.vintages]
full_names = self.stock.rollover_group_names + ['supply_technology', 'vintage']
index = pd.MultiIndex.from_product(full_levels, names=full_names)
self.stock.retirements = util.empty_df(index=index, columns=['value'])
self.stock.retirements_early = copy.deepcopy(self.stock.retirements)
self.stock.retirements_natural = copy.deepcopy(self.stock.retirements)
self.stock.sales = util.empty_df(index=index, columns=['value'])
self.stock.sales_new = copy.deepcopy(self.stock.sales)
self.stock.sales_replacement = copy.deepcopy(self.stock.sales)
self.stock.sales_exist = util.empty_df(index=index, columns=['value'],fill_value = 1.0)
full_levels = self.stock.rollover_group_levels + [self.vintages]
full_names = self.stock.rollover_group_names + ['vintage']
index = pd.MultiIndex.from_product(full_levels, names=full_names)
self.stock.sales_share_reconcile = util.empty_df(index=index, columns=['value'],fill_value=1.0)
def update_stock(self, year, loop):
"""updates the stock in the IO loop"""
if self.thermal_dispatch_node:
self.determine_throughput(year,loop)
self.update_remaining_stock(year, loop)
self.update_technology_dispatch(year)
self.update_total_dispatch(year)
self.update_requirement_dispatch(year)
else:
self.determine_throughput(year,loop)
self.update_remaining_stock(year, loop)
self.update_technology(year)
self.update_total(year)
self.update_requirement(year, loop)
self.stock_rollover(year, loop, self.stock.act_stock_changes)
def determine_throughput(self,year,loop):
if year == int(cfg.cfgfile.get('case','current_year')) and loop == 'initial':
#in the initial loop of the supply-side, we only know internal demand
self.throughput = self.active_demand
else:
self.throughput = self.active_supply
if self.throughput is not None:
remove_levels = [x for x in self.throughput.index.names if x not in self.stock.requirement_energy.index.names]
self.throughput = util.remove_df_levels(self.throughput,remove_levels)
self.throughput[self.throughput<=0] = 0
if self.potential.raw_values is not None:
self.potential.remap_to_potential_and_normalize(self.throughput, year, self.tradable_geography)
def calculate_actual_stock(self,year,loop):
"""used to calculate the actual throughput of built stock. This is used to adjust the stock values if it does not
match the required throughput in the year."""
# for elements in self.rollover_groups.keys():
# self.stock.values.loc[elements, year]
self.stock.values_energy.loc[:, year] = DfOper.mult([self.stock.capacity_factor.loc[:,year].to_frame(), self.stock.values.loc[:,year].to_frame()]) * util.unit_conversion(unit_from_den=cfg.cfgfile.get('case','time_step'), unit_to_den='year')[0]
def calculate_adjustment_factor(self,year):
"""used to calculate the adjustment factor for this year's sales to make sure the stock energy meets the required
energy"""
num = DfOper.subt([self.stock.requirement_energy.loc[:,year].to_frame(),self.stock.act_rem_energy.groupby(level=self.stock.rollover_group_names).sum()])
den = DfOper.subt([self.stock.values_energy.loc[:, year].to_frame().groupby(level=self.stock.rollover_group_names).sum(),self.stock.act_rem_energy.groupby(level=self.stock.rollover_group_names).sum()])
factor = DfOper.divi([num,den]).fillna(1)
factor = factor.replace(np.inf,1)
factor[factor<0] = 1
self.active_adjustment_factor = factor
return factor
def update_remaining_stock(self,year, loop):
for elements in self.rollover_groups.keys():
elements = util.ensure_tuple(elements)
element_indexer= util.level_specific_indexer(self.stock.remaining, self.stock.rollover_group_names,elements)
if loop == 1 or loop == 'initial':
if year == int(cfg.cfgfile.get('case','current_year')) and loop == 1:
self.rollover_dict[elements].rewind(1)
self.stock.remaining.loc[element_indexer, year] = self.rollover_dict[elements].return_formatted_stock(year_offset=1)
else:
self.rollover_dict[elements].rewind(1)
self.stock.preview.loc[element_indexer,year] = self.rollover_dict[elements].return_formatted_stock(year_offset=0)
self.set_energy_capacity_ratios(year,loop)
def set_energy_capacity_ratios(self,year,loop):
if loop == 1 or loop == 'initial':
self.stock.act_rem_energy = DfOper.mult([self.stock.remaining.loc[:,year].to_frame(),self.stock.capacity_factor.loc[:,year].to_frame()]) * util.unit_conversion(unit_from_den=cfg.cfgfile.get('case', 'time_step'), unit_to_den='year')[0]
exist_cap_factor = self.stock.capacity_factor.loc[:,year].to_frame()
exist_cap_factor[exist_cap_factor==0]=np.nan
default_conversion = exist_cap_factor.groupby(level=self.stock.rollover_group_names).mean().fillna(1)*util.unit_conversion(unit_from_num='year',unit_to_num=cfg.cfgfile.get('case', 'time_step'))[0]
default_conversion[default_conversion==0] = 1
self.stock.act_energy_capacity_ratio = util.DfOper.divi([util.remove_df_levels(self.stock.act_rem_energy,['vintage','supply_technology']),
util.remove_df_levels(self.stock.remaining.loc[:, year].to_frame(),['vintage','supply_technology'])]).fillna(default_conversion)
self.stock.act_energy_capacity_ratio[self.stock.act_energy_capacity_ratio==0] = default_conversion
else:
preview = util.df_slice(self.stock.preview.loc[:,year].to_frame(), year, 'vintage')
preview_energy = util.DfOper.mult([preview,util.df_slice(self.stock.capacity_factor.loc[:,year].to_frame(),year,'vintage')])*util.unit_conversion(unit_from_den=cfg.cfgfile.get('case', 'time_step'), unit_to_den='year')[0]
preview = util.remove_df_levels(preview,['vintage','supply_technology'])
preview_energy = util.remove_df_levels(preview_energy,['vintage','supply_technology'])
exist_cap_factor = DfOper.divi([DfOper.mult([self.stock.capacity_factor.loc[:,year].to_frame(),self.stock.values.loc[:,year].to_frame()]).groupby(level=self.stock.rollover_group_names).sum(),self.stock.values.loc[:,year].to_frame().groupby(level=self.stock.rollover_group_names).sum()])
exist_cap_factor[exist_cap_factor==0]=np.nan
default_conversion = exist_cap_factor.fillna(1)*util.unit_conversion(unit_from_num='year',unit_to_num=cfg.cfgfile.get('case', 'time_step'))[0]
default_conversion[default_conversion==0] = 1
self.stock.act_rem_energy = util.DfOper.mult([self.stock.remaining.loc[:,year].to_frame(),self.stock.capacity_factor.loc[:,year].to_frame()]) * util.unit_conversion(unit_from_den=cfg.cfgfile.get('case', 'time_step'), unit_to_den='year')[0]
self.stock.act_energy_capacity_ratio = util.DfOper.divi([util.remove_df_levels(self.stock.act_rem_energy,['vintage','supply_technology']),
util.remove_df_levels(self.stock.remaining.loc[:, year].to_frame(),['vintage','supply_technology'])]).fillna(default_conversion)
self.stock.act_energy_capacity_ratio[self.stock.act_energy_capacity_ratio==0] = default_conversion
self.stock.preview_energy_capacity_ratio = util.DfOper.divi([preview_energy,preview]).fillna(self.stock.act_energy_capacity_ratio)
def update_technology(self, year):
"""sets the minimum necessary stock by technology based on remaining stock after natural rollover and technology stock amounts"""
self.stock.act_tech_energy = util.DfOper.mult([self.stock.technology.loc[:,year].to_frame(), self.stock.act_energy_capacity_ratio],fill_value=np.nan)
self.stock.act_tech_or_rem_energy = self.stock.act_tech_energy.fillna(self.stock.act_rem_energy.groupby(level=self.stock.act_tech_energy.index.names).sum())
self.stock.act_tech_or_rem = util.remove_df_levels(util.DfOper.divi([self.stock.act_tech_or_rem_energy, self.stock.act_energy_capacity_ratio]),'supply_technology')
def update_technology_dispatch(self, year):
"""sets the minimum necessary stock by technology based on remaining stock after natural rollover and technology stock amounts"""
self.stock.act_tech = self.stock.technology.loc[:,year].to_frame()
self.stock.act_rem = (self.stock.remaining.loc[:,year].to_frame().groupby(level=util.ix_incl(self.stock.act_tech,self.stock.act_tech.index.names)).sum())
self.stock.act_tech_or_rem = self.stock.act_tech.fillna(self.stock.act_rem)
self.stock.act_tech_or_rem = DfOper.add([self.stock.act_tech_or_rem,util.remove_df_levels(self.stock.dispatch_cap.loc[:,year].to_frame(),'vintage')])
def update_total(self, year):
"""sets the minimum necessary total stock - based on throughput (stock requirement) and total of the technology and remaining stock"""
self.stock.act_total_energy = DfOper.mult([self.stock.total.loc[:,year].to_frame(), self.stock.act_energy_capacity_ratio],fill_value=np.nan)
self.stock.act_total_energy = self.stock.act_total_energy.fillna(self.stock.act_tech_or_rem_energy.groupby(level=self.stock.rollover_group_names).sum())
def update_total_dispatch(self, year):
"""sets the minimum necessary total stock - based on throughput (stock requirement) and total of the technology and remaining stock"""
self.stock.act_total= util.DfOper.add([self.stock.total.loc[:,year].to_frame(),util.remove_df_levels(self.stock.dispatch_cap.loc[:,year].to_frame(),['supply_technology','vintage'])])
self.stock.act_total = self.stock.act_total.fillna(self.stock.act_tech_or_rem.groupby(level=self.stock.rollover_group_names).sum())
def update_requirement(self,year,loop):
"""updates annual stock requirements with the maximum of required stock and specified and remaining natural rolloff. Also
distributes the necessary stock changes to the available residuals in the supply curve bins if the stock has resource_bin indexers
"""
previous_year = max(min(self.years),year-1)
if self.potential.data is False:
if self.throughput is not None:
self.stock.requirement_energy.loc[:,year] = self.throughput
a = copy.deepcopy(self.stock.requirement_energy.loc[:,year].to_frame())
b = copy.deepcopy(self.stock.act_total_energy)
b[b<a] = a
self.stock.requirement_energy.loc[:,year] = b
self.stock.requirement.loc[:,year] = DfOper.divi([self.stock.requirement_energy.loc[:,year].to_frame(),self.stock.act_energy_capacity_ratio])
else:
#calculates the total amount of energy needed to distribute
total_residual = util.DfOper.subt([self.throughput, self.stock.act_total_energy],expandable=(False,False), collapsible=(True,True))
total_residual[total_residual<0] = 0
#calculates the residual amount of energy available in each bin
df = util.remove_df_elements(self.potential.values,[x for x in cfg.geo.geographies_unfiltered[cfg.primary_geography] if x not in cfg.geographies],cfg.primary_geography)
bins = util.DfOper.subt([df[year].to_frame(), self.stock.act_total_energy],expandable=(False,False), collapsible=(True,True))
bins = bins.reset_index().set_index(bins.index.names)
bins[bins<0] = 0
#calculates the supply curve of remaining energy
bin_supply_curve = bins.groupby(level=[x for x in self.stock.rollover_group_names if x!= 'resource_bin']).cumsum()
#expands the total energy needed to distribute to mask against the supply curve. Used as a cap on the supply curve.
total_residual = util.expand_multi(total_residual,bins.index.levels,bins.index.names)
bin_supply_curve[bin_supply_curve>total_residual] = total_residual
try:
bin_supply_curve = bin_supply_curve.groupby(level=util.ix_excl(bin_supply_curve,'resource_bin')).diff().fillna(bin_supply_curve)
self.stock.requirement_energy.loc[:,year] = util.DfOper.add([self.stock.act_total_energy, bin_supply_curve])
self.stock.requirement.loc[:,year] = util.DfOper.divi([self.stock.requirement_energy.loc[:,year].to_frame(),self.stock.act_energy_capacity_ratio])
except:
pdb.set_trace()
if year == int(cfg.cfgfile.get('case','current_year')) and year==min(self.years):
self.stock.act_stock_energy_changes = self.stock.requirement_energy[year].to_frame()*0
# self.stock.act_stock_capacity_changes = 0
# self.stock.act_max_negative_stock_capacity_changes = 0
else:
self.stock.act_stock_energy_changes = util.DfOper.subt([self.stock.requirement_energy[year].to_frame(),
util.remove_df_levels(self.stock.values_energy[previous_year].to_frame(),['vintage','supply_technology'])])
# self.stock.act_stock_capacity_changes = DfOper.subt([self.stock.total[year].to_frame(),util.remove_df_levels(self.stock.values[previous_year].to_frame(),['vintage','supply_technology'])],fill_value=np.nan)
# self.stock.act_max_negative_stock_capacity_changes = DfOper.subt([self.stock.total_min[year].to_frame(),util.remove_df_levels(self.stock.values[previous_year].to_frame(),['vintage','supply_technology'])],fill_value=np.nan)
if loop == 'initial' or loop ==1:
needed_capacity_ratio = self.stock.act_energy_capacity_ratio
else:
needed_capacity_ratio = self.stock.preview_energy_capacity_ratio
if_positive_stock_changes = util.DfOper.divi([self.stock.act_stock_energy_changes,needed_capacity_ratio]).fillna(0)
if_negative_stock_changes = util.DfOper.divi([self.stock.act_stock_energy_changes,self.stock.act_energy_capacity_ratio]).fillna(0)
max_retirable = -self.stock.values.loc[:,previous_year].to_frame().groupby(level=self.stock.rollover_group_names).sum()
max_retirable.columns = [year]
# if_negative_stock_changes[self.stock.act_stock_capacity_changes<if_negative_stock_changes] = self.stock.act_stock_capacity_changes
if_negative_stock_changes[if_negative_stock_changes<=max_retirable]=max_retirable
# if_negative_stock_changes[if_negative_stock_changes<=self.stock.act_max_negative_stock_capacity_changes]=self.stock.act_max_negative_stock_capacity_changes
if_positive_stock_changes[if_positive_stock_changes<0] = if_negative_stock_changes
# if_positive_stock_changes[self.stock.act_stock_capacity_changes>if_positive_stock_changes] = self.stock.act_stock_capacity_changes
self.stock.act_stock_changes = if_positive_stock_changes
# self.stock.act_stock_changes[self.stock.act_stock_changes<self.stock.act_stock_capacity_changes] = self.stock.act_stock_capacity_changes
self.stock.act_stock_changes = self.stock.act_stock_changes[year]
def update_requirement_dispatch(self,year):
"""updates annual stock requirements with the maximum of required stock and specified and remaining natural rolloff.
"""
previous_year = max(min(self.years),year-1)
#TODO Flexible Nodes can't have SupplyPotential
# if self.potential.data is False:
self.stock.requirement.loc[:,year] = self.stock.act_total
if year == int(cfg.cfgfile.get('case','current_year')) and year==min(self.years):
self.stock.act_stock_changes = self.stock.requirement[year].to_frame()*0
else:
#stock changes only equal capacity added from the dispatch
self.stock.act_stock_changes = util.DfOper.subt([self.stock.requirement.loc[:,year].to_frame(),
util.remove_df_levels(self.stock.values[previous_year].to_frame(),['vintage','supply_technology'])])
# else:
# self.stock.act_stock_changes = DfOper.subt([self.stock.requirement[year].to_frame(),util.remove_df_levels(self.stock.act_rem,'supply_technology')])
self.stock.act_stock_changes = self.stock.act_stock_changes[year]
def setup_financial_stock(self):
for tech in self.technologies.values():
# creates binary matrix across years and vintages for a technology based on its book life
tech.book_life_matrix = util.book_life_df(tech.book_life, self.vintages, self.years)
# creates a linear decay of initial stock
tech.initial_book_life_matrix = util.initial_book_life_df(tech.book_life, tech.mean_lifetime, self.vintages, self.years)
def financial_stock(self, year, loop):
"""
Calculates the amount of stock based on sales and technology book life
instead of physical decay
"""
# reformat the book_life_matrix dataframes to match the stock dataframe
# creates a list of formatted tech dataframes and concatenates them
tech_dfs = [self.reformat_tech_df(self.stock.sales, tech, tech_class=None, tech_att='book_life_matrix', tech_id=tech.id, year=year) for
tech in self.technologies.values()]
tech_df = pd.concat(tech_dfs)
# initial_stock_df uses the stock values dataframe and removes vintagesot
initial_stock_df = self.stock.values[min(self.years)]
# formats tech dfs to match stock df
initial_tech_dfs = [self.reformat_tech_df(initial_stock_df, tech, tech_class=None, tech_att='initial_book_life_matrix',tech_id=tech.id, year=year) for tech in self.technologies.values()]
initial_tech_df = pd.concat(initial_tech_dfs)
# stock values in any year equals vintage sales multiplied by book life
values_financial_new = DfOper.mult([self.stock.sales_new, tech_df])
values_financial_new.columns = [year]
values_financial_replacement = DfOper.mult([self.stock.sales_replacement, tech_df])
values_financial_replacement.columns = [year]
# initial stock values in any year equals stock.values multiplied by the initial tech_df
initial_values_financial_new = DfOper.mult([self.stock.values_new.loc[:,year].to_frame(), initial_tech_df],non_expandable_levels=None)
initial_values_financial_replacement = DfOper.mult([self.stock.values_replacement.loc[:,year].to_frame(), initial_tech_df],non_expandable_levels=None)
# sum normal and initial stock values
self.stock.values_financial_new.loc[:,year] = DfOper.add([values_financial_new, initial_values_financial_new], non_expandable_levels=None)
self.stock.values_financial_replacement.loc[:,year] = DfOper.add(
[values_financial_replacement, initial_values_financial_replacement],non_expandable_levels=None)
def calculate_levelized_costs(self,year, loop):
"calculates total and per-unit costs in a subsector with technologies"
if not hasattr(self.stock,'capital_cost_new'):
index = self.rollover_output(tech_class='capital_cost_new', tech_att= 'values_level', stock_att='values_normal_energy',year=year).index
self.stock.capital_cost_new= util.empty_df(index, columns=self.years,fill_value=0)
self.stock.capital_cost_replacement = util.empty_df(index, columns=self.years,fill_value=0)
self.stock.capital_cost = util.empty_df(index, columns=self.years,fill_value=0)
self.stock.installation_cost_new = util.empty_df(index, columns=self.years,fill_value=0)
self.stock.installation_cost_replacement = util.empty_df(index, columns=self.years,fill_value=0)
self.stock.installation_cost = util.empty_df(index, columns=self.years,fill_value=0)
self.stock.fixed_om = util.empty_df(index, columns=self.years,fill_value=0)
self.stock.variable_om = util.empty_df(index, columns=self.years,fill_value= 0)
self.levelized_costs = util.empty_df(index, columns=self.years,fill_value= 0)
index = pd.MultiIndex.from_product(self.stock.rollover_group_levels, names=self.stock.rollover_group_names)
self.embodied_cost = util.empty_df(index, columns=self.years,fill_value=0)
self.embodied_cost = util.remove_df_levels(self.embodied_cost,'resource_bin')
self.stock.capital_cost_new.loc[:,year] = self.rollover_output(tech_class='capital_cost_new',tech_att='values_level',
stock_att='values_financial_new',year=year)
self.stock.capital_cost_replacement.loc[:,year] = self.rollover_output(tech_class='capital_cost_replacement',tech_att='values_level',
stock_att='values_financial_replacement',year=year)
self.stock.fixed_om.loc[:,year] = self.rollover_output(tech_class='fixed_om',tech_att='values_level', stock_att='values',year=year)
self.stock.variable_om.loc[:,year] = self.rollover_output(tech_class='variable_om',tech_att='values_level',
stock_att='values_energy',year=year)
self.stock.installation_cost_new.loc[:,year] = self.rollover_output(tech_class='installation_cost_new',tech_att='values_level',
stock_att='values_financial_new',year=year)
self.stock.installation_cost_replacement.loc[:,year] = self.rollover_output(tech_class='installation_cost_replacement',tech_att='values_level',
stock_att='values_financial_replacement',year=year)
self.stock.capital_cost.loc[:,year] = DfOper.add([self.stock.capital_cost_new.loc[:,year].to_frame(), self.stock.capital_cost_replacement.loc[:,year].to_frame()])
self.stock.installation_cost.loc[:,year] = DfOper.add([self.stock.installation_cost_new.loc[:,year].to_frame(), self.stock.installation_cost_replacement.loc[:,year].to_frame()])
self.levelized_costs.loc[:,year] = DfOper.add([self.stock.capital_cost.loc[:,year].to_frame(), self.stock.installation_cost.loc[:,year].to_frame(), self.stock.fixed_om.loc[:,year].to_frame(), self.stock.variable_om.loc[:,year].to_frame()])
self.calculate_per_unit_costs(year)
def calculate_per_unit_costs(self,year):
total_costs = util.remove_df_levels(self.levelized_costs.loc[:,year].to_frame(),['vintage', 'supply_technology'])
self.embodied_cost.loc[:,year] = DfOper.divi([total_costs, self.active_supply],expandable=(False,False)).replace([np.inf,np.nan,-np.nan],[0,0,0])
self.active_embodied_cost = util.expand_multi(self.embodied_cost[year].to_frame(), levels_list = [cfg.geo.geographies[cfg.primary_geography], self.demand_sectors],levels_names=[cfg.primary_geography,'demand_sector'])
def calculate_annual_costs(self,year):
if not hasattr(self.stock,'capital_cost_new_annual'):
index = self.stock.sales.index
self.stock.capital_cost_new_annual= util.empty_df(index, columns=['value'],fill_value=0)
self.stock.capital_cost_replacement_annual = util.empty_df(index, columns=['value'],fill_value=0)
self.stock.installation_cost_new_annual = util.empty_df(index, columns=['value'],fill_value=0)
self.stock.installation_cost_replacement_annual = util.empty_df(index, columns=['value'],fill_value=0)
indexer = util.level_specific_indexer(self.stock.capital_cost_new_annual,'vintage', year)
self.stock.capital_cost_new_annual.loc[indexer,:] = self.rollover_output(tech_class='capital_cost_new', tech_att= 'values', stock_att='sales_new',year=year)
self.stock.capital_cost_replacement_annual.loc[indexer,:] = self.rollover_output(tech_class='capital_cost_replacement', tech_att= 'values', stock_att='sales_replacement',year=year)
self.stock.installation_cost_new_annual.loc[indexer,:] = self.rollover_output(tech_class='installation_cost_new', tech_att= 'values', stock_att='sales_new',year=year)
self.stock.installation_cost_replacement_annual.loc[indexer,:] = self.rollover_output(tech_class='installation_cost_new', tech_att= 'values', stock_att='sales_new',year=year)
#in the last year of the analysis, these need to be concatenated together for output
def concatenate_annual_costs(self):
fixed_om = util.remove_df_levels(self.stock.fixed_om,'vintage').stack().to_frame()
util.replace_index_name(fixed_om,'vintage',)
fixed_om.columns = ['value']
variable_om = util.remove_df_levels(self.stock.fixed_om,'vintage').stack().to_frame()
util.replace_index_name(variable_om,'vintage')
variable_om.columns = ['value']
keys = ['capital cost - new', 'capital cost - replacement','installation cost - new','installation cost - replacement',
'fixed om', 'variable om']
keys = [x.upper() for x in keys]
names = ['cost_type']
self.final_annual_costs = pd.concat([self.stock.capital_cost_new_annual, self.stock.capital_cost_replacement_annual, self.stock.installation_cost_new_annual,
self.stock.installation_cost_replacement_annual, fixed_om, variable_om],keys=keys, names=names)
def concatenate_levelized_costs(self):
keys = ['capital cost - new', 'capital cost - replacement','installation cost - new','installation cost - replacement',
'fixed om', 'variable om']
keys = [x.upper() for x in keys]
names = ['cost_type']
self.final_levelized_costs = pd.concat([self.stock.capital_cost_new, self.stock.capital_cost_replacement, self.stock.installation_cost_new,
self.stock.installation_cost_replacement, self.stock.fixed_om, self.stock.variable_om],keys=keys, names=names)
def calculate_dispatch_costs(self, year, embodied_cost_df, loop=None):
self.calculate_dispatch_coefficients(year, loop)
if not isinstance(self, StorageNode):
self.active_dispatch_costs = copy.deepcopy(self.active_trade_adjustment_df)
for node in list(set(self.active_trade_adjustment_df.index.get_level_values('supply_node'))):
embodied_cost_indexer = util.level_specific_indexer(embodied_cost_df, 'supply_node',node)
trade_adjustment_indexer = util.level_specific_indexer(self.active_trade_adjustment_df, 'supply_node',node)
self.active_dispatch_costs.loc[trade_adjustment_indexer,:] = util.DfOper.mult([self.active_trade_adjustment_df.loc[trade_adjustment_indexer,:],embodied_cost_df.loc[embodied_cost_indexer,:]]).values
self.active_dispatch_costs = self.active_dispatch_costs.groupby(level='supply_node').sum()
self.active_dispatch_costs = self.active_dispatch_costs.stack([cfg.primary_geography,'demand_sector'])
self.active_dispatch_costs = util.reduce_levels(self.active_dispatch_costs, self.stock.rollover_group_names+['supply_node'], agg_function='mean')
self.active_dispatch_costs = util.DfOper.mult([self.active_dispatch_costs.to_frame(), self.active_dispatch_coefficients])
self.active_dispatch_costs = util.remove_df_levels(self.active_dispatch_costs, 'supply_node')
self.active_dispatch_costs = self.active_dispatch_costs.reorder_levels(self.stock.values.index.names)
self.active_dispatch_costs = util.DfOper.add([self.active_dispatch_costs,self.rollover_output(tech_class='variable_om', tech_att= 'values_level', stock_att='ones',year=year)])
self.active_dispatch_costs[self.active_dispatch_costs<0] = 0
def stock_normalize(self,year):
"""returns normalized stocks for use in other node calculations"""
self.stock.values_normal.loc[:,year] = self.stock.values.loc[:,year].to_frame().groupby(level=[x for x in self.stock.rollover_group_names if x!='resource_bin']).transform(lambda x: x / x.sum()).fillna(0)
self.stock.values_normal_energy.loc[:,year] = DfOper.mult([self.stock.values.loc[:,year].to_frame(), self.stock.capacity_factor.loc[:,year].to_frame()]).groupby(level=[x for x in self.stock.rollover_group_names if x!='resource_bin']).transform(lambda x: x / x.sum()).fillna(0)
self.stock.values_normal.loc[:,year].replace(np.inf,0,inplace=True)
self.stock.values_normal_energy.loc[:,year].replace(np.inf,0,inplace=True)
def calculate_co2_capture_rate(self,year):
if not hasattr(self,'co2_capture_rate'):
index = self.rollover_output(tech_class='co2_capture',stock_att='values_normal_energy',year=year).index
self.co2_capture_rate= util.empty_df(index, columns=self.years,fill_value=0)
self.co2_capture_rate.loc[:,year] = self.rollover_output(tech_class = 'co2_capture', stock_att='values_normal_energy',year=year)
self.active_co2_capture_rate = util.remove_df_levels(self.co2_capture_rate.loc[:,year].to_frame(),['supply_technology','vintage'])
def calculate_active_coefficients(self, year,loop):
"""
Calculate rollover efficiency outputs for a supply node
"""
self.stock_normalize(year)
if hasattr(self.stock,'coefficients'):
self.stock.coefficients.loc[:,year] = self.rollover_output(tech_class='efficiency',
stock_att='values_normal_energy',year=year)
else:
index = self.rollover_output(tech_class='efficiency',stock_att='values_normal_energy',year=year).index
self.stock.coefficients = util.empty_df(index, columns=self.years,fill_value=0.)
self.stock.coefficients.loc[:,year] = self.rollover_output(tech_class='efficiency',stock_att='values_normal_energy',year=year)
if 'supply_node' not in self.stock.coefficients.index.names:
print ("no efficiency has been input for technologies in the %s supply node" %self.name)
index = pd.MultiIndex.from_product([self.id,cfg.geographies],names = ['supply_node', cfg.primary_geography],)
columns = [year]
self.stock.coefficients = pd.DataFrame(0, index=index, columns = columns)
if 'demand_sector' not in self.stock.rollover_group_names:
keys = self.demand_sectors
name = ['demand_sector']
self.active_coefficients = util.remove_df_levels(pd.concat([self.stock.coefficients.loc[:,year].to_frame()]*len(keys), keys=keys,names=name).fillna(0),['supply_technology', 'vintage','resource_bin'])
self.active_coefficients_untraded = copy.deepcopy(self.active_coefficients)
self.active_coefficients_untraded.sort(inplace=True,axis=0)
self.active_coefficients_total_untraded = util.remove_df_levels(self.active_coefficients,['efficiency_type']).reorder_levels([cfg.primary_geography,'demand_sector', 'supply_node']).sort().fillna(0)
else:
self.active_coefficients = util.remove_df_levels(self.stock.coefficients.loc[:,year].to_frame().fillna(0),['supply_technology', 'vintage','resource_bin'])
self.active_coefficients_untraded = copy.deepcopy(self.active_coefficients)
self.active_coefficients_untraded.sort(inplace=True,axis=0)
self.active_coefficients_total_untraded = util.remove_df_levels(self.active_coefficients,['efficiency_type']).reorder_levels([cfg.primary_geography,'demand_sector', 'supply_node']).sort().fillna(0)
self.active_coefficients_total = DfOper.mult([self.add_column_index(self.active_coefficients_total_untraded),self.active_trade_adjustment_df]).fillna(0)
nodes = list(set(self.active_trade_adjustment_df.index.get_level_values('supply_node')))
if len(nodes):
df_list = []
for node in nodes:
trade_indexer = util.level_specific_indexer(self.active_trade_adjustment_df, 'supply_node', node)
coefficient_indexer = util.level_specific_indexer(self.active_coefficients_untraded, 'supply_node', node)
efficiency_types = list(set(self.active_coefficients_untraded.loc[coefficient_indexer,:].index.get_level_values('efficiency_type')))
keys = efficiency_types
name = ['efficiency_type']
df = pd.concat([self.active_trade_adjustment_df.loc[trade_indexer,:]]*len(keys),keys=keys,names=name)
df_list.append(df)
active_trade_adjustment_df = pd.concat(df_list)
self.active_coefficients = DfOper.mult([self.add_column_index(self.active_coefficients),active_trade_adjustment_df])
keys = self.ghgs
name = ['ghg']
self.active_emissions_coefficients = pd.concat([self.active_coefficients]*len(keys), keys=keys, names=name)
self.active_emissions_coefficients = self.active_emissions_coefficients.reorder_levels([cfg.primary_geography,'demand_sector', 'supply_node', 'efficiency_type', 'ghg'])
self.active_emissions_coefficients.sort(inplace=True)
def calculate_dispatch_coefficients(self, year,loop):
"""
Calculates operating costs for all technology/vintage combinations
"""
if loop == 3:
if hasattr(self.stock,'dispatch_coefficients'):
self.stock.dispatch_coefficients.loc[:,year] = self.rollover_output(tech_class='efficiency',
stock_att='exist',year=year)
else:
index = self.rollover_output(tech_class='efficiency',stock_att='exist',year=year).index
self.stock.dispatch_coefficients = util.empty_df(index, columns=self.years,fill_value=0)
self.stock.dispatch_coefficients.loc[:,year] = self.rollover_output(tech_class='efficiency',
stock_att='exist',year=year)
self.active_dispatch_coefficients = self.stock.dispatch_coefficients.loc[:,year].to_frame()
self.active_dispatch_coefficients= util.remove_df_levels(self.active_dispatch_coefficients,['efficiency_type'])
def calculate_capacity_utilization(self,energy_supply,supply_years):
energy_stock = self.stock.values[supply_years] * util.unit_convert(1,unit_from_den='hour', unit_to_den='year')
self.capacity_utilization = util.DfOper.divi([energy_supply,energy_stock],expandable=False).replace([np.inf,np.nan,-np.nan],[0,0,0])
def rollover_output(self, tech_class=None, tech_att='values', stock_att=None, year=None, non_expandable_levels=('year', 'vintage'),fill_value=0.0):
""" Produces rollover outputs for a node stock based on the tech_att class, att of the class, and the attribute of the stock
"""
stock_df = getattr(self.stock, stock_att)
stock_df = stock_df.sort()
tech_classes = util.put_in_list(tech_class)
tech_dfs = []
for tech_class in tech_classes:
tech_dfs += ([self.reformat_tech_df(stock_df, tech, tech_class, tech_att, tech.id, year) for tech in
self.technologies.values() if
hasattr(getattr(tech, tech_class), tech_att) and getattr(getattr(tech, tech_class),tech_att) is not None])
if len(tech_dfs):
first_tech_order = tech_dfs[0].index.names
tech_df = pd.concat([x.reorder_levels(first_tech_order) for x in tech_dfs])
tech_df = tech_df.reorder_levels([x for x in stock_df.index.names if x in tech_df.index.names]+ [x for x in tech_df.index.names if x not in stock_df.index.names])
tech_df = tech_df.sort()
if year in stock_df.columns.values:
result_df = util.DfOper.mult((stock_df.loc[:,year].to_frame(),tech_df), expandable=(True, True), collapsible=(True, False),non_expandable_levels=non_expandable_levels,fill_value=fill_value)
else:
stock_vintage_indexer = util.level_specific_indexer(stock_df,'vintage', year)
tech_vintage_indexer = util.level_specific_indexer(tech_df, 'vintage', year)
result_df = DfOper.mult((stock_df.loc[stock_vintage_indexer,:], tech_df.loc[tech_vintage_indexer,:]), expandable=(True, True), collapsible=(True, False), non_expandable_levels=non_expandable_levels,fill_value=fill_value)
#TODO shouldn't be necessary
result_vintage_indexer = util.level_specific_indexer(result_df, 'vintage', year)
result_df = result_df.loc[result_vintage_indexer,:]
return result_df
else:
if year in stock_df.columns.values:
stock_df.loc[:,year] = fill_value
return stock_df.loc[:,year].to_frame()
else:
vintage_indexer = util.level_specific_indexer(stock_df,'vintage', year)
stock_df.loc[vintage_indexer,:] = fill_value
return stock_df.loc[vintage_indexer,:]
def reformat_tech_df(self, stock_df, tech, tech_class, tech_att, tech_id, year):
"""
reformat technology dataframes for use in stock-level dataframe operations
"""
if tech_class is None:
tech_df = getattr(tech, tech_att)
else:
tech_df = getattr(getattr(tech, tech_class), tech_att)
if 'supply_technology' not in tech_df.index.names:
tech_df['supply_technology'] = tech_id
tech_df.set_index('supply_technology', append=True, inplace=True)
if year in tech_df.columns.values:
#tech df has a year/vintage structure. We locate the values for year of all vintages
tech_df = tech_df.loc[:,year].to_frame()
else:
#tech has a vintage/value structure. We locate the values for the year's vintage
indexer = util.level_specific_indexer(tech_df, 'vintage', year)
tech_df = tech_df.loc[indexer,:]
return tech_df
class StorageNode(SupplyStockNode):
def __init__(self, id, supply_type, scenario, **kwargs):
SupplyStockNode.__init__(self, id, supply_type, scenario, **kwargs)
def add_technology(self, id, **kwargs):
"""
Adds technology instances to node
"""
if id in self.technologies:
# ToDo note that a technology was added twice
return
self.technologies[id] = StorageTechnology(id, self.cost_of_capital, self.scenario, **kwargs)
self.tech_ids.append(id)
self.tech_ids.sort()
def calculate_levelized_costs(self,year, loop):
"calculates per-unit costs in a subsector with technologies"
if not hasattr(self.stock,'capital_cost_new_energy'):
index = self.rollover_output(tech_class='capital_cost_new_energy', tech_att= 'values_level', stock_att='values_normal_energy',year=year).index
self.stock.capital_cost_new_energy= util.empty_df(index, columns=self.years,fill_value=0)
self.stock.capital_cost_replacement_energy = util.empty_df(index, columns=self.years,fill_value=0)
self.stock.capital_cost_new_capacity= util.empty_df(index, columns=self.years,fill_value=0)
self.stock.capital_cost_replacement_capacity = util.empty_df(index, columns=self.years,fill_value=0)
self.stock.capital_cost = util.empty_df(index, columns=self.years,fill_value=0)
self.stock.installation_cost_new = util.empty_df(index, columns=self.years,fill_value=0)
self.stock.installation_cost_replacement = util.empty_df(index, columns=self.years,fill_value=0)
self.stock.installation_cost = util.empty_df(index, columns=self.years,fill_value=0)
self.stock.fixed_om = util.empty_df(index, columns=self.years,fill_value=0)
self.stock.variable_om = util.empty_df(index, columns=self.years,fill_value= 0)
self.levelized_costs = util.empty_df(index, columns=self.years,fill_value= 0)
index = pd.MultiIndex.from_product(self.stock.rollover_group_levels, names=self.stock.rollover_group_names)
self.embodied_cost = util.empty_df(index, columns=self.years,fill_value=0)
self.stock.values_financial_new_energy = copy.deepcopy(self.stock.values_financial_new.loc[:,year].to_frame())
self.stock.values_financial_replacement_energy = copy.deepcopy(self.stock.values_financial_replacement.loc[:,year].to_frame())
for tech in self.technologies.values():
tech_indexer = util.level_specific_indexer(self.stock.values_financial_new,'supply_technology', tech.id)
self.stock.values_financial_new_energy.loc[tech_indexer,:] = self.stock.values_financial_new.loc[tech_indexer,year].to_frame() * tech.discharge_duration
self.stock.values_financial_replacement_energy.loc[tech_indexer,:] = self.stock.values_financial_replacement.loc[tech_indexer,year].to_frame() * tech.discharge_duration
self.stock.capital_cost_new_energy.loc[:,year] = self.rollover_output(tech_class='capital_cost_new_energy',tech_att='values_level',
stock_att='values_financial_new_energy',year=year)
self.stock.capital_cost_replacement_energy.loc[:,year] = self.rollover_output(tech_class='capital_cost_replacement_energy',tech_att='values_level',
stock_att='values_financial_replacement_energy',year=year)
self.stock.capital_cost_new_capacity.loc[:,year] = self.rollover_output(tech_class='capital_cost_new_capacity',tech_att='values_level',
stock_att='values_financial_new',year=year)
self.stock.capital_cost_replacement_capacity.loc[:,year] = self.rollover_output(tech_class='capital_cost_replacement_capacity',tech_att='values_level',
stock_att='values_financial_replacement',year=year)
self.stock.fixed_om.loc[:,year]= self.rollover_output(tech_class='fixed_om',tech_att='values_level',
stock_att='values',year=year)
self.stock.variable_om.loc[:,year] = DfOper.mult([self.rollover_output(tech_class='variable_om',tech_att='values_level', stock_att='values_normal_energy',year=year), self.active_supply],non_expandable_levels=None).groupby(level=self.stock.variable_om.index.names).sum()
self.stock.installation_cost_new.loc[:,year] = self.rollover_output(tech_class='installation_cost_new',tech_att='values_level',
stock_att='values_financial_new',year=year)
self.stock.installation_cost_replacement.loc[:,year] = self.rollover_output(tech_class='installation_cost_replacement',tech_att='values_level',
stock_att='values_financial_replacement',year=year)
self.stock.capital_cost.loc[:,year] = DfOper.add([self.stock.capital_cost_new_energy.loc[:,year].to_frame(), self.stock.capital_cost_replacement_energy.loc[:,year].to_frame(),self.stock.capital_cost_new_capacity.loc[:,year].to_frame(), self.stock.capital_cost_replacement_capacity.loc[:,year].to_frame()])
self.stock.installation_cost.loc[:,year] = DfOper.add([self.stock.installation_cost_new.loc[:,year].to_frame(), self.stock.installation_cost_replacement.loc[:,year].to_frame()])
self.levelized_costs.loc[:,year]= DfOper.add([self.stock.capital_cost.loc[:,year].to_frame(), self.stock.installation_cost.loc[:,year].to_frame(), self.stock.fixed_om.loc[:,year].to_frame(), self.stock.variable_om.loc[:,year].to_frame()])
total_costs = util.remove_df_levels(self.levelized_costs.loc[:,year].to_frame(),['vintage', 'supply_technology'])
# total_stock = util.remove_df_levels(self.stock.values_energy.loc[:,year].to_frame(), ['vintage', 'supply_technology'])
self.embodied_cost.loc[:,year] = DfOper.divi([total_costs, self.active_supply.groupby(level=self.stock.rollover_group_names).sum()])
self.active_embodied_cost = util.expand_multi(self.embodied_cost[year].to_frame(), levels_list = [cfg.geo.geographies[cfg.primary_geography], self.demand_sectors],levels_names=[cfg.primary_geography,'demand_sector'])
def calculate_annual_costs(self,year):
if not hasattr(self.stock,'capital_cost_new_annual_energy'):
index = self.stock.sales.index
self.stock.capital_cost_new_annual_energy= util.empty_df(index, columns=['value'],fill_value=0)
self.stock.capital_cost_new_annual_capacity = util.empty_df(index, columns=['value'],fill_value=0)
self.stock.capital_cost_replacement_annual_energy = util.empty_df(index, columns=['value'],fill_value=0)
self.stock.capital_cost_replacement_annual_capacity = util.empty_df(index, columns=['value'],fill_value=0)
self.stock.installation_cost_new_annual = util.empty_df(index, columns=['value'],fill_value=0)
self.stock.installation_cost_replacement_annual = util.empty_df(index, columns=['value'],fill_value=0)
indexer = util.level_specific_indexer(self.stock.capital_cost_new_annual_energy,'vintage', year)
self.stock.capital_cost_new_annual_energy.loc[indexer,:] = self.rollover_output(tech_class='capital_cost_new_energy', tech_att= 'values', stock_att='sales_new',year=year)
self.stock.capital_cost_new_annual_capacity.loc[indexer,:] = self.rollover_output(tech_class='capital_cost_new_capacity', tech_att= 'values', stock_att='sales_new',year=year)
self.stock.capital_cost_replacement_annual_energy.loc[indexer,:] = self.rollover_output(tech_class='capital_cost_replacement_energy', tech_att= 'values', stock_att='sales_replacement',year=year)
self.stock.capital_cost_replacement_annual_capacity.loc[indexer,:] = self.rollover_output(tech_class='capital_cost_replacement_capacity', tech_att= 'values', stock_att='sales_replacement',year=year)
self.stock.installation_cost_new_annual.loc[indexer,:] = self.rollover_output(tech_class='installation_cost_new', tech_att= 'values', stock_att='sales_new',year=year)
self.stock.installation_cost_replacement_annual.loc[indexer,:] = self.rollover_output(tech_class='installation_cost_new', tech_att= 'values', stock_att='sales_new',year=year)
def concatenate_annual_costs(self):
fixed_om = util.remove_df_levels(self.stock.fixed_om,'vintage').stack().to_frame()
util.replace_index_name(fixed_om,'vintage')
fixed_om.columns =['value']
variable_om = util.remove_df_levels(self.stock.variable_om,'vintage').stack().to_frame()
util.replace_index_name(variable_om,'vintage')
variable_om.columns =['value']
keys = ['capital cost energy - new', 'capital cost capacity - new', 'capital cost energy - replacement', 'capital cost capacity - replacement', 'installation cost - new','installation cost - replacement',
'fixed om', 'variable om']
keys = [x.upper() for x in keys]
names = ['cost_type']
self.final_annual_costs = pd.concat([self.stock.capital_cost_new_annual_energy, self.stock.capital_cost_new_annual_capacity, self.stock.capital_cost_replacement_annual_energy,self.stock.capital_cost_replacement_annual_capacity,
self.stock.installation_cost_new_annual,
self.stock.installation_cost_replacement_annual, fixed_om, variable_om], keys=keys, names=names)
def concatenate_levelized_costs(self):
keys = ['capital cost energy - new', 'capital cost capacity - new', 'capital cost energy - replacement', 'capital cost capacity - replacement', 'installation cost - new','installation cost - replacement',
'fixed om', 'variable om']
keys = [x.upper() for x in keys]
names = ['cost_type']
self.final_levelized_costs = pd.concat([self.stock.capital_cost_new_energy, self.stock.capital_cost_new_capacity, self.stock.capital_cost_replacement_energy,self.stock.capital_cost_replacement_capacity,
self.stock.installation_cost_new,
self.stock.installation_cost_replacement, self.stock.fixed_om, self.stock.variable_om], keys=keys, names=names)
def calculate_dispatch_coefficients(self, year,loop):
"""
Calculates operating costs for all technology/vintage combinations
"""
self.stock.values_normal_tech = self.stock.values.loc[:,year].to_frame().groupby(level=[cfg.primary_geography,'supply_technology']).transform(lambda x: x/x.sum())
self.stock.values_normal_tech.replace(np.inf,0,inplace=True)
if loop == 3:
if hasattr(self.stock,'dispatch_coefficients'):
self.stock.dispatch_coefficients.loc[:,year] = self.rollover_output(tech_class='efficiency',
stock_att='values_normal_tech',year=year)
else:
index = self.rollover_output(tech_class='efficiency',stock_att='exist',year=year).index
self.stock.dispatch_coefficients = util.empty_df(index, columns=self.years,fill_value=0)
self.stock.dispatch_coefficients.loc[:,year] = self.rollover_output(tech_class='efficiency',
stock_att='values_normal_tech',year=year)
self.active_dispatch_coefficients = self.stock.dispatch_coefficients.loc[:,year].to_frame().groupby(level=[cfg.primary_geography,'supply_node','supply_technology']).sum()
self.active_dispatch_coefficients= util.remove_df_levels(self.active_dispatch_coefficients,['efficiency_type'])
class ImportNode(Node):
def __init__(self, id, supply_type, scenario, **kwargs):
Node.__init__(self,id, supply_type, scenario)
self.cost = ImportCost(self.id, scenario)
self.potential = SupplyPotential(self.id, self.enforce_potential_constraint, self.scenario)
self.coefficients = SupplyCoefficients(self.id, self.scenario)
def calculate(self):
#all nodes can have potential conversions. Set to None if no data.
self.conversion, self.resource_unit = self.add_conversion()
# self.set_rollover_groups()
self.calculate_subclasses()
if self.coefficients.raw_values is not None:
self.nodes = list(set(self.coefficients.values.index.get_level_values('supply_node')))
self.set_trade_adjustment_dict()
self.set_pass_through_df_dict()
self.set_cost_dataframes()
def calculate_levelized_costs(self,year,loop):
"calculates the embodied costs of nodes with emissions"
if hasattr(self,'cost') and self.cost.data is True:
if hasattr(self,'potential') and self.potential.data is True and self.cost.cost_method == 'average':
self.active_embodied_cost = self.calculate_avg_costs(year)
elif hasattr(self,'potential') and self.potential.data is True and self.cost.cost_method == 'marginal':
self.active_embodied_cost = self.calculate_marginal_costs(year)
else:
allowed_indices = ['demand_sector', cfg.primary_geography]
if set(self.cost.values.index.names).issubset(allowed_indices):
self.active_embodied_cost = self.cost.values.loc[:,year].to_frame()
self.active_embodied_cost = util.expand_multi(self.active_embodied_cost, levels_list = [cfg.geo.geographies[cfg.primary_geography], self.demand_sectors],levels_names=[cfg.primary_geography,'demand_sector'])
else:
raise ValueError("too many indexes in cost inputs of node %s" %self.id)
self.levelized_costs.loc[:,year] = DfOper.mult([self.active_embodied_cost,self.active_supply]).values
def calculate_annual_costs(self,year):
if hasattr(self,'active_embodied_cost'):
self.annual_costs.loc[:,year] = DfOper.mult([self.active_embodied_cost,self.active_supply]).values
if year == int(cfg.cfgfile.get('case', 'end_year')):
self.final_annual_costs = self.annual_costs
def calculate_avg_costs(self,year):
filter_geo_potential_normal = util.remove_df_elements(self.potential.active_supply_curve_normal, [x for x in cfg.geo.geographies_unfiltered[cfg.primary_geography] if x not in cfg.geographies],cfg.primary_geography)
filter_geo_potential_normal = filter_geo_potential_normal.reset_index().set_index(filter_geo_potential_normal.index.names)
supply_curve = filter_geo_potential_normal
supply_curve = supply_curve[supply_curve.values>0]
supply_curve = util.DfOper.mult([util.DfOper.divi([self.potential.values.loc[:,year].to_frame(),
util.remove_df_levels(self.potential.values.loc[:,year].to_frame(),[x for x in self.potential.values.index.names if x not in ['demand_sector',cfg.primary_geography,'resource_bin']])]),
supply_curve])
cost = util.DfOper.mult([supply_curve,self.cost.values.loc[:,year].to_frame()])
levels = ['demand_sector',cfg.primary_geography]
cost = cost.groupby(level = [x for x in levels if x in cost.index.names]).sum()
return util.expand_multi(cost, levels_list = [cfg.geo.geographies[cfg.primary_geography], self.demand_sectors],
levels_names=[cfg.primary_geography,'demand_sector']).replace([np.nan,np.inf],0)
def calculate_marginal_costs(self,year):
filter_geo_potential_normal = util.remove_df_elements(self.potential.active_supply_curve_normal, [x for x in cfg.geo.geographies_unfiltered[cfg.primary_geography] if x not in cfg.geographies],cfg.primary_geography)
filter_geo_potential_normal = filter_geo_potential_normal.reset_index().set_index(filter_geo_potential_normal.index.names)
supply_curve = filter_geo_potential_normal
supply_curve = supply_curve[supply_curve.values>0]
supply_curve = util.DfOper.mult([util.DfOper.divi([self.potential.values.loc[:,year].to_frame(),
util.remove_df_levels(self.potential.values.loc[:,year].to_frame(),[x for x in self.potential.values.index.names if x not in ['demand_sector',cfg.primary_geography,'resource_bin']])]),
supply_curve])
supply_curve[supply_curve.values>0] = 1
cost = util.DfOper.mult([supply_curve,self.cost.values.loc[:,year].to_frame()])
levels = ['demand_sector',cfg.primary_geography]
tradable_levels = ['demand_sector',self.tradable_geography]
map_df = cfg.geo.map_df(cfg.primary_geography,self.tradable_geography,normalize_as='total',eliminate_zeros=False,filter_geo=False)
traded_cost = util.DfOper.mult([cost,map_df])
traded_cost = traded_cost.groupby(level=[x for x in tradable_levels if x in traded_cost.index.names]).transform(lambda x: x.max())
cost = traded_cost.groupby(level = [x for x in levels if x in cost.index.names]).max()
return util.expand_multi(cost, levels_list = [cfg.geo.geographies[cfg.primary_geography], self.demand_sectors],
levels_names=[cfg.primary_geography,'demand_sector']).replace([np.nan,np.inf],0)
class ImportCost(Abstract):
def __init__(self, id, scenario, **kwargs):
self.id = id
self.scenario = scenario
self.input_type = 'intensity'
self.sql_id_table = 'ImportCost'
self.sql_data_table = 'ImportCostData'
Abstract.__init__(self, self.id, 'import_node_id')
def calculate(self, years, demand_sectors):
self.years = years
self.demand_sectors = demand_sectors
if self.data is True:
self.remap()
self.convert()
def convert(self):
"""
return cost values converted to model energy and currency unit
"""
self.values = util.currency_convert(self.values, self.currency_id, self.currency_year_id)
self.values = util.unit_convert(self.values, unit_from_den=self.denominator_unit,
unit_to_den=cfg.calculation_energy_unit)
self.values = self.values.unstack(level='year')
self.values.columns = self.values.columns.droplevel()
class PrimaryNode(Node):
def __init__(self, id, supply_type, scenario, **kwargs):
Node.__init__(self,id, supply_type, scenario)
self.potential = SupplyPotential(self.id, self.enforce_potential_constraint, self.scenario)
self.cost = PrimaryCost(self.id, scenario)
self.coefficients = SupplyCoefficients(self.id, self.scenario)
def calculate(self):
#all nodes can have potential conversions. Set to None if no data.
self.conversion, self.resource_unit = self.add_conversion()
if self.conversion is not None:
self.conversion.calculate(self.resource_unit)
self.potential.calculate(self.conversion, self.resource_unit)
self.cost.calculate(self.conversion, self.resource_unit)
self.emissions.calculate(self.conversion, self.resource_unit)
self.coefficients.calculate(self.years, self.demand_sectors)
if self.coefficients.data is True:
self.nodes = list(set(self.coefficients.values.index.get_level_values('supply_node')))
self.set_adjustments()
self.set_pass_through_df_dict()
self.set_cost_dataframes()
self.export.calculate(self.years, self.demand_sectors)
def calculate_levelized_costs(self,year,loop):
"calculates the embodied costs of nodes with emissions"
if hasattr(self,'cost') and self.cost.data is True:
if hasattr(self,'potential') and self.potential.data is True and self.cost.cost_method == 'average':
self.active_embodied_cost = self.calculate_avg_costs(year)
elif hasattr(self,'potential') and self.potential.data is True and self.cost.cost_method == 'marginal':
self.active_embodied_cost = self.calculate_marginal_costs(year)
else:
allowed_indices = ['demand_sector', cfg.primary_geography]
if set(self.cost.values.index.names).issubset(allowed_indices):
self.active_embodied_cost = self.cost.values.loc[:,year].to_frame()
self.active_embodied_cost = util.expand_multi(self.active_embodied_cost, levels_list = [cfg.geo.geographies[cfg.primary_geography], self.demand_sectors],levels_names=[cfg.primary_geography,'demand_sector'])
else:
raise ValueError("too many indexes in cost inputs of node %s" %self.id)
self.levelized_costs.loc[:,year] = DfOper.mult([self.active_embodied_cost,self.active_supply]).values
def calculate_annual_costs(self,year):
if hasattr(self,'active_embodied_cost'):
self.annual_costs.loc[:,year] = DfOper.mult([self.active_embodied_cost,self.active_supply]).values
if year == int(cfg.cfgfile.get('case', 'end_year')):
self.final_annual_costs = self.annual_costs
def calculate_avg_costs(self,year):
filter_geo_potential_normal = util.remove_df_elements(self.potential.active_supply_curve_normal, [x for x in cfg.geo.geographies_unfiltered[cfg.primary_geography] if x not in cfg.geographies],cfg.primary_geography)
filter_geo_potential_normal = filter_geo_potential_normal.reset_index().set_index(filter_geo_potential_normal.index.names)
supply_curve = filter_geo_potential_normal
supply_curve = supply_curve[supply_curve.values>=0]
supply_curve = util.DfOper.mult([util.DfOper.divi([self.potential.values.loc[:,year].to_frame(),
util.remove_df_levels(self.potential.values.loc[:,year].to_frame(),[x for x in self.potential.values.index.names if x not in ['demand_sector',cfg.primary_geography,'resource_bin']])]),
supply_curve])
cost = util.DfOper.mult([supply_curve,self.cost.values.loc[:,year].to_frame()])
levels = ['demand_sector',cfg.primary_geography]
cost = cost.groupby(level = [x for x in levels if x in cost.index.names]).sum()
return util.expand_multi(cost, levels_list = [cfg.geo.geographies[cfg.primary_geography], self.demand_sectors],
levels_names=[cfg.primary_geography,'demand_sector']).replace([np.nan,np.inf],0)
def calculate_marginal_costs(self,year):
filter_geo_potential_normal = util.remove_df_elements(self.potential.active_supply_curve_normal, [x for x in cfg.geo.geographies_unfiltered[cfg.primary_geography] if x not in cfg.geographies],cfg.primary_geography)
filter_geo_potential_normal = filter_geo_potential_normal.reset_index().set_index(filter_geo_potential_normal.index.names)
supply_curve = filter_geo_potential_normal
supply_curve = supply_curve[supply_curve.values>0]
supply_curve = util.DfOper.mult([util.DfOper.divi([self.potential.values.loc[:,year].to_frame(),
util.remove_df_levels(self.potential.values.loc[:,year].to_frame(),[x for x in self.potential.values.index.names if x not in ['demand_sector',cfg.primary_geography,'resource_bin']])]),
supply_curve])
supply_curve[supply_curve.values>0] = 1
cost = util.DfOper.mult([supply_curve,self.cost.values.loc[:,year].to_frame()])
levels = ['demand_sector',cfg.primary_geography]
tradable_levels = ['demand_sector',self.tradable_geography]
map_df = cfg.geo.map_df(cfg.primary_geography,self.tradable_geography,normalize_as='total',eliminate_zeros=False,filter_geo=False)
traded_cost = util.DfOper.mult([cost,map_df])
traded_cost = traded_cost.groupby(level=[x for x in tradable_levels if x in traded_cost.index.names]).transform(lambda x: x.max())
cost = traded_cost.groupby(level = [x for x in levels if x in cost.index.names]).max()
return util.expand_multi(cost, levels_list = [cfg.geo.geographies[cfg.primary_geography], self.demand_sectors],
levels_names=[cfg.primary_geography,'demand_sector']).replace([np.nan,np.inf],0)
class PrimaryCost(Abstract):
def __init__(self, id, scenario, node_geography=None, **kwargs):
self.id = id
self.scenario = scenario
self.input_type = 'intensity'
self.sql_id_table = 'PrimaryCost'
self.sql_data_table = 'PrimaryCostData'
Abstract.__init__(self, self.id, 'primary_node_id')
def calculate(self, conversion, resource_unit):
if self.data is True:
self.conversion = conversion
self.resource_unit=resource_unit
self.remap()
self.convert()
def convert(self):
self.values = self.values.unstack(level='year')
self.values.columns = self.values.columns.droplevel()
if self.conversion is not None:
self.energy = util.determ_energy(self.denominator_unit)
# checks if a conversion dataframe is available
if self.energy:
# if input values for cost are in energy terms, converts currency to model
# currency and converts denominator_units into model energy units. Resource values are
# (ex. $/ton of biomass) are a result of using conversion dataframe mutliplied by values
self.values = util.currency_convert(self.values, self.currency_id, self.currency_year_id)
self.values = util.unit_convert(self.values, unit_from_den=self.denominator_unit,
unit_to_den=cfg.calculation_energy_unit)
# self.resource_values = DfOper.mult([self.values, self.conversion.values])
else:
# if input values for costs are not in energy terms, cost values must be converted to model currency
# and resource values are a result of unit conversion to the node resource unit.
# ex. if costs for biomass were in $/kg and the node resource unit was tons, the costs would
# need to be converted to $/ton. Then, these values can be converted to energy by dividing the
# values by the conversion dataframe.
self.values = util.currency_convert(self.values, self.currency_id, self.currency_year_id)
self.values = util.unit_convert(self.values, unit_from_den=self.denominator_unit,
unit_to_den=self.resource_unit)
self.values = DfOper.divi([self.values, self.conversion.values])
else:
# if there is no conversion necessary, a simple conversion to model currency and
# energy units is effected
self.values = util.currency_convert(self.values, self.currency_id, self.currency_year_id)
self.values = util.unit_convert(self.values, unit_from_den=self.denominator_unit,
unit_to_den=cfg.calculation_energy_unit)
class SupplyEmissions(Abstract):
def __init__(self, id, scenario, **kwargs):
self.id = id
self.input_type = 'intensity'
self.sql_id_table = 'SupplyEmissions'
self.sql_data_table = 'SupplyEmissionsData'
self.scenario = scenario
Abstract.__init__(self, self.id,primary_key='supply_node_id')
def calculate(self, conversion, resource_unit):
if self.data is True:
self.conversion = conversion
self.resource_unit=resource_unit
self.remap(lower=None)
self.convert()
self.calculate_physical_and_accounting()
def convert(self):
self.values = self.values.unstack(level='year')
self.values.columns = self.values.columns.droplevel()
if self.conversion is not None:
# checks whether a conversion dataframe has been created and the values can
# be represented in energy and resource terms
self.energy = util.determ_energy(self.denominator_unit)
if self.energy:
# if the input values are in energy terms, the input mass unit and energy units are
# converted to model mass units and energy units. These values are multiplied by the
# conversion dataframe to produce resource values
self.values = util.unit_convert(self.values, unit_from_num=self.mass_unit,
unit_from_den=self.denominator_unit,
unit_to_num=cfg.cfgfile.get('case', "mass_unit"),
unit_to_den=cfg.calculation_energy_unit)
# self.resource_values = DfOper.mult([self.values, self.conversion.values])
else:
# if the input values are in resource terms, values are converted from input mass and resource units
# to model units and node resource units. These resource values are divided by the conversion
# dataframe to produce values.
self.values = util.unit_convert(self.values, unit_from_num=self.mass_unit,
unit_from_den=self.denominator_unit,
unit_to_num=cfg.cfgfile.get('case', "mass_unit"),
unit_to_den=self.resource_unit)
self.values = DfOper.divi([self.values, self.conversion.values])
else:
# if there is no conversion, then values are converted to model mass and energy units
self.values = util.unit_convert(self.values, unit_from_num=self.mass_unit,
unit_from_den=self.denominator_unit,
unit_to_num=cfg.cfgfile.get('case', "mass_unit"),
unit_to_den=cfg.calculation_energy_unit)
self.ghgs = util.sql_read_table('GreenhouseGases','id', return_iterable=True)
self.values = util.reindex_df_level_with_new_elements(self.values,'ghg',self.ghgs,fill_value=0.).sort()
def calculate_physical_and_accounting(self):
"""converts emissions intensities for use in physical and accounting emissions calculations"""
physical_emissions_indexer = util.level_specific_indexer(self.values, 'ghg_type', 1)
self.values_physical = self.values.loc[physical_emissions_indexer,:]
accounting_emissions_indexer = util.level_specific_indexer(self.values, 'ghg_type', 2)
if 2 in self.values.index.get_level_values('ghg_type'):
self.values_accounting = self.values.loc[accounting_emissions_indexer,:]
else:
self.values_accounting = self.values_physical * 0
class SupplyEnergyConversion(Abstract):
def __init__(self, id, resource_unit, **kwargs):
"""
creates a dataframe of conversion values from energy (i.e. exajoule) to
# resource terms (i.e. tons of biomass)
"""
self.id = id
self.input_type = 'intensity'
self.sql_id_table = 'SupplyPotentialConversion'
self.sql_data_table = 'SupplyPotentialConversionData'
Abstract.__init__(self, self.id, 'supply_node_id')
def calculate(self, resource_unit):
if self.data is True:
self.resource_unit = resource_unit
self.remap()
self.values = util.unit_convert(self.values, unit_from_num=self.energy_unit_numerator,
unit_from_den=self.resource_unit_denominator,
unit_to_num=cfg.calculation_energy_unit,
unit_to_den=self.resource_unit)
self.values = self.values.unstack(level='year')
self.values.columns = self.values.columns.droplevel()
| {
"repo_name": "energyPATHWAYS/energyPATHWAYS",
"path": "energyPATHWAYS/supply.py",
"copies": "1",
"size": "427457",
"license": "mit",
"hash": 7401486428126340000,
"line_mean": 70.8656691325,
"line_max": 542,
"alpha_frac": 0.6277333159,
"autogenerated": false,
"ratio": 3.771790346774905,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9823321416508862,
"avg_score": 0.015240449233208523,
"num_lines": 5948
} |
__author__ = 'Ben Haley & Ryan Jones'
import os
from demand import Demand
import util
from outputs import Output
import shutil
import config as cfg
from supply import Supply
import pandas as pd
import logging
import shape
import pdb
from scenario_loader import Scenario
import copy
import numpy as np
class PathwaysModel(object):
"""
Highest level classification of the definition of an energy system.
"""
def __init__(self, scenario_id, api_run=False):
self.scenario_id = scenario_id
self.scenario = Scenario(self.scenario_id)
self.api_run = api_run
self.outputs = Output()
self.demand = Demand(self.scenario)
self.supply = None
self.demand_solved, self.supply_solved = False, False
def run(self, scenario_id, solve_demand, solve_supply, load_demand, load_supply, export_results, save_models, append_results):
try:
if solve_demand and not (load_demand or load_supply):
self.calculate_demand(save_models)
if not append_results:
self.remove_old_results()
# it is nice if when loading a demand side object to rerun supply, it doesn't re-output these results every time
if self.demand_solved and export_results and not self.api_run and not (load_demand and solve_supply):
self.export_result_to_csv('demand_outputs')
if solve_supply and not load_supply:
if load_demand:
# if we are loading the demand, we are changing the supply measures and want to reload our scenarios
self.scenario = Scenario(self.scenario_id)
self.supply = Supply(self.scenario, demand_object=self.demand)
self.calculate_supply(save_models)
if load_demand and solve_supply:
# we do this now because we delayed before
self.export_result_to_csv('demand_outputs')
if self.supply_solved and export_results and load_supply or solve_supply:
self.supply.calculate_supply_outputs()
self.pass_supply_results_back_to_demand()
self.calculate_combined_results()
self.outputs.electricity_reconciliation = self.demand.electricity_reconciliation # we want to write these to outputs
if self.api_run:
self.export_results_to_db()
else:
self.export_result_to_csv('supply_outputs')
self.export_result_to_csv('combined_outputs')
self.export_io()
except:
# pickle the model in the event that it crashes
if save_models:
Output.pickle(self, file_name=str(scenario_id) + cfg.model_error_append_name, path=cfg.workingdir)
raise
def calculate_demand(self, save_models):
self.demand.setup_and_solve()
self.demand_solved = True
if cfg.output_payback == 'true':
if self.demand.d_all_energy_demand_payback is not None:
self.calculate_d_payback()
self.calculate_d_payback_energy()
if save_models:
Output.pickle(self, file_name=str(self.scenario_id) + cfg.demand_model_append_name, path=cfg.workingdir)
def calculate_supply(self, save_models):
if not self.demand_solved:
raise ValueError('demand must be solved first before supply')
logging.info('Configuring energy system supply')
self.supply.add_nodes()
self.supply.add_measures()
self.supply.initial_calculate()
self.supply.calculated_years = []
self.supply.calculate_loop(self.supply.years, self.supply.calculated_years)
self.supply.final_calculate()
self.supply_solved = True
if save_models:
Output.pickle(self, file_name=str(self.scenario_id) + cfg.full_model_append_name, path=cfg.workingdir)
# we don't need the demand side object any more, so we can remove it to save drive space
if os.path.isfile(os.path.join(cfg.workingdir, str(self.scenario_id) + cfg.demand_model_append_name)):
os.remove(os.path.join(cfg.workingdir, str(self.scenario_id) + cfg.demand_model_append_name))
def pass_supply_results_back_to_demand(self):
logging.info("Calculating link to supply")
self.demand.link_to_supply(self.supply.emissions_demand_link, self.supply.demand_emissions_rates, self.supply.energy_demand_link, self.supply.cost_demand_link)
if cfg.output_tco == 'true':
if hasattr(self,'d_energy_tco'):
self.demand.link_to_supply_tco(self.supply.emissions_demand_link, self.supply.demand_emissions_rates, self.supply.cost_demand_link)
else:
print "demand side has not been run with tco outputs set to 'true'"
if cfg.output_payback == 'true':
if hasattr(self,'demand.d_all_energy_demand_payback'):
self.demand.link_to_supply_payback(self.supply.emissions_demand_link, self.supply.demand_emissions_rates, self.supply.cost_demand_link)
else:
print "demand side has not been run with tco outputs set to 'true'"
def calculate_combined_results(self):
logging.info("Calculating combined emissions results")
self.calculate_combined_emissions_results()
logging.info("Calculating combined cost results")
self.calculate_combined_cost_results()
logging.info("Calculating combined energy results")
self.calculate_combined_energy_results()
if cfg.output_tco == 'true':
if self.demand.d_energy_tco is not None:
self.calculate_tco()
if cfg.output_payback == 'true':
if self.demand.d_all_energy_demand_payback is not None:
self.calculate_payback()
def remove_old_results(self):
folder_names = ['combined_outputs', 'demand_outputs', 'supply_outputs', 'dispatch_outputs']
for folder_name in folder_names:
folder = os.path.join(cfg.workingdir, folder_name)
if os.path.isdir(folder):
shutil.rmtree(folder)
def export_result_to_csv(self, result_name):
if result_name=='combined_outputs':
res_obj = self.outputs
elif result_name=='demand_outputs':
res_obj = self.demand.outputs
elif result_name=='supply_outputs':
res_obj = self.supply.outputs
else:
raise ValueError('result_name not recognized')
for attribute in dir(res_obj):
if not isinstance(getattr(res_obj, attribute), pd.DataFrame):
continue
result_df = getattr(res_obj, 'return_cleaned_output')(attribute)
keys = [self.scenario.name.upper(), cfg.timestamp]
names = ['SCENARIO','TIMESTAMP']
for key, name in zip(keys, names):
result_df = pd.concat([result_df], keys=[key], names=[name])
if attribute in ('hourly_dispatch_results', 'electricity_reconciliation', 'hourly_marginal_cost', 'hourly_production_cost'):
# Special case for hourly dispatch results where we want to write them outside of supply_outputs
Output.write(result_df, attribute + '.csv', os.path.join(cfg.workingdir, 'dispatch_outputs'))
else:
Output.write(result_df, attribute+'.csv', os.path.join(cfg.workingdir, result_name))
def export_results_to_db(self):
scenario_run_id = util.active_scenario_run_id(self.scenario_id)
# Levelized costs
costs = self.outputs.c_costs.groupby(level=['SUPPLY/DEMAND', 'YEAR']).sum()
util.write_output_to_db(scenario_run_id, 1, costs)
#Energy
energy = self.outputs.c_energy.xs('FINAL', level='ENERGY ACCOUNTING')\
.groupby(level=['SECTOR', 'FINAL_ENERGY', 'YEAR']).sum()
# Energy demand by sector
util.write_output_to_db(scenario_run_id, 2, energy.groupby(level=['SECTOR', 'YEAR']).sum())
# Residential Energy by Fuel Type
util.write_output_to_db(scenario_run_id, 6, energy.xs('RESIDENTIAL', level='SECTOR'))
# Commercial Energy by Fuel Type
util.write_output_to_db(scenario_run_id, 8, energy.xs('COMMERCIAL', level='SECTOR'))
# Transportation Energy by Fuel Type
util.write_output_to_db(scenario_run_id, 10, energy.xs('TRANSPORTATION', level='SECTOR'))
# Productive Energy by Fuel Type
util.write_output_to_db(scenario_run_id, 12, energy.xs('PRODUCTIVE', level='SECTOR'))
#Emissions
emissions = self.outputs.c_emissions.xs('DOMESTIC', level='EXPORT/DOMESTIC')\
.groupby(level=['SECTOR', 'FINAL_ENERGY', 'YEAR']).sum()
emissions = util.DfOper.mult((emissions, 1-(emissions.abs()<1E-10).groupby(level='FINAL_ENERGY').all())) # get rid of noise
# Annual emissions by sector
util.write_output_to_db(scenario_run_id, 3, emissions.groupby(level=['SECTOR', 'YEAR']).sum())
# Residential Emissions by Fuel Type
util.write_output_to_db(scenario_run_id, 7, emissions.xs('RESIDENTIAL', level='SECTOR'))
# Commercial Emissions by Fuel Type
util.write_output_to_db(scenario_run_id, 9, emissions.xs('COMMERCIAL', level='SECTOR'))
# Transportation Emissions by Fuel Type
util.write_output_to_db(scenario_run_id, 11, emissions.xs('TRANSPORTATION', level='SECTOR'))
# Productive Emissions by Fuel Type
util.write_output_to_db(scenario_run_id, 13, emissions.xs('PRODUCTIVE', level='SECTOR'))
# Domestic emissions per capita
annual_emissions = self.outputs.c_emissions.xs('DOMESTIC', level='EXPORT/DOMESTIC').groupby(level=['YEAR']).sum()
population_driver = self.demand.drivers[2].values.groupby(level='year').sum().loc[annual_emissions.index]
population_driver.index.name = 'YEAR'
factor = 1E6
df = util.DfOper.divi((annual_emissions, population_driver)) * factor
df.columns = ['TONNE PER CAPITA']
util.write_output_to_db(scenario_run_id, 4, df)
# Electricity supply
electricity_node_names = [self.supply.nodes[nodeid].name.upper() for nodeid in util.flatten_list(self.supply.injection_nodes.values())]
df = self.outputs.c_energy.xs('ELECTRICITY', level='FINAL_ENERGY')\
.xs('EMBODIED', level='ENERGY ACCOUNTING')\
.groupby(level=['SUPPLY_NODE', 'YEAR']).sum()
util.write_output_to_db(scenario_run_id, 5, df.loc[electricity_node_names])
def calculate_combined_cost_results(self):
#calculate and format export costs
cost_unit = cfg.cfgfile.get('case','currency_year_id') + " " + cfg.cfgfile.get('case','currency_name')
if self.supply.export_costs is not None:
setattr(self.outputs,'export_costs',self.supply.export_costs)
self.export_costs_df = self.outputs.return_cleaned_output('export_costs')
del self.outputs.export_costs
util.replace_index_name(self.export_costs_df, 'FINAL_ENERGY','SUPPLY_NODE_EXPORT')
keys = ["EXPORT","SUPPLY"]
names = ['EXPORT/DOMESTIC', "SUPPLY/DEMAND"]
for key,name in zip(keys,names):
self.export_costs_df = pd.concat([self.export_costs_df],keys=[key],names=[name])
self.export_costs_df.columns = [cost_unit.upper()]
else:
self.export_costs_df = None
#calculate and format emobodied supply costs
self.embodied_energy_costs_df = self.demand.outputs.return_cleaned_output('demand_embodied_energy_costs')
self.embodied_energy_costs_df.columns = [cost_unit.upper()]
keys = ["DOMESTIC","SUPPLY"]
names = ['EXPORT/DOMESTIC', "SUPPLY/DEMAND"]
for key,name in zip(keys,names):
self.embodied_energy_costs_df = pd.concat([self.embodied_energy_costs_df],keys=[key],names=[name])
#calculte and format direct demand costs
self.demand_costs_df = self.demand.outputs.return_cleaned_output('d_levelized_costs')
if self.demand_costs_df is not None:
levels_to_keep = [x.upper() for x in cfg.output_combined_levels]
levels_to_keep = [x for x in levels_to_keep if x in self.demand_costs_df.index.names]
self.demand_costs_df = self.demand_costs_df.groupby(level=levels_to_keep).sum()
keys = ["DOMESTIC","DEMAND"]
names = ['EXPORT/DOMESTIC', "SUPPLY/DEMAND"]
for key,name in zip(keys,names):
self.demand_costs_df = pd.concat([self.demand_costs_df],keys=[key],names=[name])
keys = ['EXPORTED', 'SUPPLY-SIDE', 'DEMAND-SIDE']
names = ['COST TYPE']
self.outputs.c_costs = util.df_list_concatenate([self.export_costs_df, self.embodied_energy_costs_df, self.demand_costs_df],keys=keys,new_names=names)
self.outputs.c_costs[self.outputs.c_costs<0]=0
self.outputs.c_costs= self.outputs.c_costs[self.outputs.c_costs[cost_unit.upper()]!=0]
def calculate_tco(self):
# self.embodied_emissions_df = self.demand.outputs.return_cleaned_output('demand_embodied_emissions_tco')
# del self.demand.outputs.demand_embodied_emissions
#calculte and format direct demand emissions
# self.direct_emissions_df = self.demand.outputs.return_cleaned_output('demand_direct_emissions')
## del self.demand.outputs.demand_direct_emissions
# emissions = util.DfOper.add([self.embodied_emissions_df,self.direct_emissions_df])
# #calculate and format export costs
cost_unit = cfg.cfgfile.get('case','currency_year_id') + " " + cfg.cfgfile.get('case','currency_name')
initial_vintage = min(cfg.supply_years)
supply_side_df = self.demand.outputs.demand_embodied_energy_costs_tco
supply_side_df = supply_side_df[supply_side_df.index.get_level_values('vintage')>=initial_vintage]
demand_side_df = self.demand.d_levelized_costs_tco
demand_side_df.columns = ['value']
demand_side_df = demand_side_df[demand_side_df.index.get_level_values('vintage')>=initial_vintage]
service_demand_df = self.demand.d_service_demand_tco
service_demand_df = service_demand_df[service_demand_df.index.get_level_values('vintage')>=initial_vintage]
keys = ['SUPPLY-SIDE', 'DEMAND-SIDE']
names = ['COST TYPE']
self.outputs.c_tco = pd.concat([util.DfOper.divi([supply_side_df,util.remove_df_levels(service_demand_df,'unit')]),
util.DfOper.divi([demand_side_df,util.remove_df_levels(service_demand_df,'unit')])],
keys=keys,names=names)
self.outputs.c_tco = self.outputs.c_tco.replace([np.inf,np.nan],0)
self.outputs.c_tco[self.outputs.c_tco<0]=0
for sector in self.demand.sectors.values():
for subsector in sector.subsectors.values():
if hasattr(subsector,'service_demand') and hasattr(subsector,'stock'):
indexer = util.level_specific_indexer(self.outputs.c_tco,'subsector',subsector.id)
self.outputs.c_tco.loc[indexer,'unit'] = subsector.service_demand.unit.upper()
self.outputs.c_tco = self.outputs.c_tco.set_index('unit',append=True)
self.outputs.c_tco.columns = [cost_unit.upper()]
self.outputs.c_tco= self.outputs.c_tco[self.outputs.c_tco[cost_unit.upper()]!=0]
self.outputs.c_tco = self.outputs.return_cleaned_output('c_tco')
def calculate_payback(self):
# self.embodied_emissions_df = self.demand.outputs.return_cleaned_output('demand_embodied_emissions_tco')
# del self.demand.outputs.demand_embodied_emissions
#calculte and format direct demand emissions
# self.direct_emissions_df = self.demand.outputs.return_cleaned_output('demand_direct_emissions')
## del self.demand.outputs.demand_direct_emissions
# emissions = util.DfOper.add([self.embodied_emissions_df,self.direct_emissions_df])
# #calculate and format export costs
cost_unit = cfg.cfgfile.get('case','currency_year_id') + " " + cfg.cfgfile.get('case','currency_name')
initial_vintage = min(cfg.supply_years)
supply_side_df = self.demand.outputs.demand_embodied_energy_costs_payback
supply_side_df = supply_side_df[supply_side_df.index.get_level_values('vintage')>=initial_vintage]
supply_side_df = supply_side_df[supply_side_df.index.get_level_values('year')>=initial_vintage]
supply_side_df = supply_side_df.sort_index()
demand_side_df = self.demand.d_annual_costs_payback
demand_side_df.columns = ['value']
demand_side_df = demand_side_df[demand_side_df.index.get_level_values('vintage')>=initial_vintage]
demand_side_df = demand_side_df[demand_side_df.index.get_level_values('year')>=initial_vintage]
demand_side_df = demand_side_df.reindex(supply_side_df.index).sort_index()
sales_df = copy.deepcopy(self.demand.outputs.d_sales)
util.replace_index_name(sales_df,'vintage','year')
sales_df = sales_df[sales_df.index.get_level_values('vintage')>=initial_vintage]
sales_df = util.add_and_set_index(sales_df,'year',cfg.supply_years)
sales_df.index = sales_df.index.reorder_levels(supply_side_df.index.names)
sales_df = sales_df.reindex(supply_side_df.index).sort_index()
keys = ['SUPPLY-SIDE', 'DEMAND-SIDE']
names = ['COST TYPE']
self.outputs.c_payback = pd.concat([util.DfOper.divi([supply_side_df, sales_df]), util.DfOper.divi([demand_side_df, sales_df])],keys=keys,names=names)
self.outputs.c_payback = self.outputs.c_payback[np.isfinite(self.outputs.c_payback.values)]
self.outputs.c_payback = self.outputs.c_payback.replace([np.inf,np.nan],0)
for sector in self.demand.sectors.values():
for subsector in sector.subsectors.values():
if hasattr(subsector,'stock') and subsector.sub_type!='link':
indexer = util.level_specific_indexer(self.outputs.c_payback,'subsector',subsector.id)
self.outputs.c_payback.loc[indexer,'unit'] = subsector.stock.unit.upper()
self.outputs.c_payback = self.outputs.c_payback.set_index('unit', append=True)
self.outputs.c_payback.columns = [cost_unit.upper()]
self.outputs.c_payback['lifetime_year'] = self.outputs.c_payback.index.get_level_values('year')-self.outputs.c_payback.index.get_level_values('vintage')+1
self.outputs.c_payback = self.outputs.c_payback.set_index('lifetime_year',append=True)
self.outputs.c_payback = util.remove_df_levels(self.outputs.c_payback,'year')
self.outputs.c_payback = self.outputs.c_payback.groupby(level = [x for x in self.outputs.c_payback.index.names if x !='lifetime_year']).transform(lambda x: x.cumsum())
self.outputs.c_payback = self.outputs.c_payback[self.outputs.c_payback[cost_unit.upper()]!=0]
self.outputs.c_payback = self.outputs.return_cleaned_output('c_payback')
def calculate_d_payback(self):
cost_unit = cfg.cfgfile.get('case','currency_year_id') + " " + cfg.cfgfile.get('case','currency_name')
initial_vintage = min(cfg.supply_years)
demand_side_df = self.demand.d_annual_costs_payback
demand_side_df.columns = ['value']
demand_side_df = demand_side_df[demand_side_df.index.get_level_values('vintage')>=initial_vintage]
demand_side_df = demand_side_df[demand_side_df.index.get_level_values('year')>=initial_vintage]
sales_df = copy.deepcopy(self.demand.outputs.d_sales)
util.replace_index_name(sales_df,'vintage','year')
sales_df = sales_df[sales_df.index.get_level_values('vintage')>=initial_vintage]
sales_df = util.add_and_set_index(sales_df,'year',cfg.supply_years)
sales_df.index = sales_df.index.reorder_levels(demand_side_df.index.names)
sales_df = sales_df.reindex(demand_side_df.index).sort_index()
self.demand.outputs.d_payback = util.DfOper.divi([demand_side_df, sales_df])
self.demand.outputs.d_payback = self.demand.outputs.d_payback[np.isfinite(self.demand.outputs.d_payback.values)]
self.demand.outputs.d_payback = self.demand.outputs.d_payback.replace([np.inf,np.nan],0)
for sector in self.demand.sectors.values():
for subsector in sector.subsectors.values():
if hasattr(subsector,'stock') and subsector.sub_type!='link':
indexer = util.level_specific_indexer(self.demand.outputs.d_payback,'subsector',subsector.id)
self.demand.outputs.d_payback.loc[indexer,'unit'] = subsector.stock.unit.upper()
self.demand.outputs.d_payback = self.demand.outputs.d_payback.set_index('unit', append=True)
self.demand.outputs.d_payback.columns = [cost_unit.upper()]
self.demand.outputs.d_payback['lifetime_year'] = self.demand.outputs.d_payback.index.get_level_values('year')-self.demand.outputs.d_payback.index.get_level_values('vintage')+1
self.demand.outputs.d_payback = self.demand.outputs.d_payback.set_index('lifetime_year',append=True)
self.demand.outputs.d_payback = util.remove_df_levels(self.demand.outputs.d_payback,'year')
self.demand.outputs.d_payback = self.demand.outputs.d_payback.groupby(level = [x for x in self.demand.outputs.d_payback.index.names if x !='lifetime_year']).transform(lambda x: x.cumsum())
self.demand.outputs.d_payback = self.demand.outputs.d_payback[self.demand.outputs.d_payback[cost_unit.upper()]!=0]
self.demand.outputs.d_payback = self.demand.outputs.return_cleaned_output('d_payback')
def calculate_d_payback_energy(self):
initial_vintage = min(cfg.supply_years)
demand_side_df = self.demand.d_all_energy_demand_payback
demand_side_df.columns = ['value']
demand_side_df = demand_side_df[demand_side_df.index.get_level_values('vintage')>=initial_vintage]
demand_side_df = demand_side_df[demand_side_df.index.get_level_values('year')>=initial_vintage]
sales_df = copy.deepcopy(self.demand.outputs.d_sales)
util.replace_index_name(sales_df,'vintage','year')
sales_df = sales_df[sales_df.index.get_level_values('vintage')>=initial_vintage]
sales_df = util.add_and_set_index(sales_df,'year',cfg.supply_years)
# sales_df.index = sales_df.index.reorder_levels(demand_side_df.index.names)
# sales_df = sales_df.reindex(demand_side_df.index).sort_index()
self.demand.outputs.d_payback_energy = util.DfOper.divi([demand_side_df, sales_df])
self.demand.outputs.d_payback_energy = self.demand.outputs.d_payback_energy[np.isfinite(self.demand.outputs.d_payback_energy.values)]
self.demand.outputs.d_payback_energy = self.demand.outputs.d_payback_energy.replace([np.inf,np.nan],0)
for sector in self.demand.sectors.values():
for subsector in sector.subsectors.values():
if hasattr(subsector,'stock') and subsector.sub_type!='link':
indexer = util.level_specific_indexer(self.demand.outputs.d_payback_energy,'subsector',subsector.id)
self.demand.outputs.d_payback_energy.loc[indexer,'unit'] = subsector.stock.unit.upper()
self.demand.outputs.d_payback_energy = self.demand.outputs.d_payback_energy.set_index('unit', append=True)
self.demand.outputs.d_payback_energy.columns = [cfg.calculation_energy_unit.upper()]
self.demand.outputs.d_payback_energy['lifetime_year'] = self.demand.outputs.d_payback_energy.index.get_level_values('year')-self.demand.outputs.d_payback_energy.index.get_level_values('vintage')+1
self.demand.outputs.d_payback_energy = self.demand.outputs.d_payback_energy.set_index('lifetime_year',append=True)
self.demand.outputs.d_payback_energy = util.remove_df_levels(self.demand.outputs.d_payback_energy,'year')
self.demand.outputs.d_payback_energy = self.demand.outputs.d_payback_energy.groupby(level = [x for x in self.demand.outputs.d_payback_energy.index.names if x !='lifetime_year']).transform(lambda x: x.cumsum())
self.demand.outputs.d_payback_energy = self.demand.outputs.d_payback_energy[self.demand.outputs.d_payback_energy[cfg.calculation_energy_unit.upper()]!=0]
self.demand.outputs.d_payback_energy = self.demand.outputs.return_cleaned_output('d_payback_energy')
def calculate_combined_emissions_results(self):
#calculate and format export emissions
if self.supply.export_emissions is not None:
setattr(self.outputs,'export_emissions',self.supply.export_emissions)
if 'supply_geography' not in cfg.output_combined_levels:
util.remove_df_levels(self.outputs.export_emissions, cfg.primary_geography +'_supply')
self.export_emissions_df = self.outputs.return_cleaned_output('export_emissions')
del self.outputs.export_emissions
util.replace_index_name(self.export_emissions_df, 'FINAL_ENERGY','SUPPLY_NODE_EXPORT')
keys = ["EXPORT","SUPPLY"]
names = ['EXPORT/DOMESTIC', "SUPPLY/DEMAND"]
for key,name in zip(keys,names):
self.export_emissions_df = pd.concat([self.export_emissions_df],keys=[key],names=[name])
else:
self.export_emissions_df = None
#calculate and format emobodied supply emissions
self.embodied_emissions_df = self.demand.outputs.return_cleaned_output('demand_embodied_emissions')
# del self.demand.outputs.demand_embodied_emissions
keys = ["DOMESTIC","SUPPLY"]
names = ['EXPORT/DOMESTIC', "SUPPLY/DEMAND"]
for key,name in zip(keys,names):
self.embodied_emissions_df = pd.concat([self.embodied_emissions_df],keys=[key],names=[name])
#calculte and format direct demand emissions
self.direct_emissions_df = self.demand.outputs.return_cleaned_output('demand_direct_emissions')
# del self.demand.outputs.demand_direct_emissions
keys = ["DOMESTIC","DEMAND"]
names = ['EXPORT/DOMESTIC', "SUPPLY/DEMAND"]
for key, name in zip(keys, names):
self.direct_emissions_df = pd.concat([self.direct_emissions_df], keys=[key], names=[name])
if cfg.primary_geography+'_supply' in cfg.output_combined_levels:
keys = self.direct_emissions_df.index.get_level_values(cfg.primary_geography.upper()).values
names = cfg.primary_geography.upper() +'_SUPPLY'
self.direct_emissions_df[names] = keys
self.direct_emissions_df.set_index(names,append=True,inplace=True)
keys = ['EXPORTED', 'SUPPLY-SIDE', 'DEMAND-SIDE']
names = ['EMISSIONS TYPE']
self.outputs.c_emissions = util.df_list_concatenate([self.export_emissions_df, self.embodied_emissions_df, self.direct_emissions_df],keys=keys,new_names = names)
util.replace_index_name(self.outputs.c_emissions, cfg.primary_geography.upper() +'-EMITTED', cfg.primary_geography.upper() +'_SUPPLY')
util.replace_index_name(self.outputs.c_emissions, cfg.primary_geography.upper() +'-CONSUMED', cfg.primary_geography.upper())
self.outputs.c_emissions= self.outputs.c_emissions[self.outputs.c_emissions['VALUE']!=0]
emissions_unit = cfg.cfgfile.get('case','mass_unit')
self.outputs.c_emissions.columns = [emissions_unit.upper()]
def calculate_combined_energy_results(self):
energy_unit = cfg.calculation_energy_unit
if self.supply.export_costs is not None:
setattr(self.outputs,'export_energy',self.supply.export_energy)
self.export_energy = self.outputs.return_cleaned_output('export_energy')
del self.outputs.export_energy
util.replace_index_name(self.export_energy, 'FINAL_ENERGY','SUPPLY_NODE_EXPORT')
keys = ["EXPORT","EMBODIED"]
names = ['EXPORT/DOMESTIC', 'ENERGY ACCOUNTING']
for key,name in zip(keys,names):
self.export_energy = pd.concat([self.export_energy],keys=[key],names=[name])
else:
self.export_energy = None
self.embodied_energy = self.demand.outputs.return_cleaned_output('demand_embodied_energy')
self.embodied_energy = self.embodied_energy[self.embodied_energy ['VALUE']!=0]
keys = ['DOMESTIC','EMBODIED']
names = ['EXPORT/DOMESTIC', 'ENERGY ACCOUNTING']
for key,name in zip(keys,names):
self.embodied_energy = pd.concat([self.embodied_energy],keys=[key],names=[name])
self.final_energy = self.demand.outputs.return_cleaned_output('d_energy')
self.final_energy = self.final_energy[self.final_energy.index.get_level_values('YEAR')>=int(cfg.cfgfile.get('case','current_year'))]
keys = ['DOMESTIC','FINAL']
names = ['EXPORT/DOMESTIC', 'ENERGY ACCOUNTING']
for key,name in zip(keys,names):
self.final_energy = pd.concat([self.final_energy],keys=[key],names=[name])
# self.outputs.c_energy = pd.concat([self.embodied_energy, self.final_energy],keys=['DROP'],names=['DROP'])
for name in [x for x in self.embodied_energy.index.names if x not in self.final_energy.index.names]:
self.final_energy[name] = "N/A"
self.final_energy.set_index(name,append=True,inplace=True)
if self.export_energy is not None:
for name in [x for x in self.embodied_energy.index.names if x not in self.export_energy.index.names]:
self.export_energy[name] = "N/A"
self.export_energy.set_index(name,append=True,inplace=True)
self.export_energy = self.export_energy.groupby(level=self.embodied_energy.index.names).sum()
self.export_energy = self.export_energy.reorder_levels(self.embodied_energy.index.names)
self.final_energy = self.final_energy.groupby(level=self.embodied_energy.index.names).sum()
self.final_energy = self.final_energy.reorder_levels(self.embodied_energy.index.names)
self.outputs.c_energy = pd.concat([self.embodied_energy,self.final_energy,self.export_energy])
self.outputs.c_energy= self.outputs.c_energy[self.outputs.c_energy['VALUE']!=0]
self.outputs.c_energy.columns = [energy_unit.upper()]
def export_io(self):
io_table_write_step = int(cfg.cfgfile.get('output_detail','io_table_write_step'))
io_table_years = sorted([min(cfg.supply_years)] + range(max(cfg.supply_years), min(cfg.supply_years), -io_table_write_step))
df_list = []
for year in io_table_years:
sector_df_list = []
keys = self.supply.demand_sectors
name = ['sector']
for sector in self.supply.demand_sectors:
sector_df_list.append(self.supply.io_dict[year][sector])
year_df = pd.concat(sector_df_list, keys=keys,names=name)
year_df = pd.concat([year_df]*len(keys),keys=keys,names=name,axis=1)
df_list.append(year_df)
keys = io_table_years
name = ['year']
df = pd.concat(df_list,keys=keys,names=name)
for row_sector in self.supply.demand_sectors:
for col_sector in self.supply.demand_sectors:
if row_sector != col_sector:
df.loc[util.level_specific_indexer(df,'sector',row_sector),util.level_specific_indexer(df,'sector',col_sector,axis=1)] = 0
self.supply.outputs.io = df
result_df = self.supply.outputs.return_cleaned_output('io')
keys = [self.scenario.name.upper(), cfg.timestamp]
names = ['SCENARIO','TIMESTAMP']
for key, name in zip(keys,names):
result_df = pd.concat([result_df], keys=[key],names=[name])
Output.write(result_df, 's_io.csv', os.path.join(cfg.workingdir, 'supply_outputs'))
# self.export_stacked_io()
def export_stacked_io(self):
df = copy.deepcopy(self.supply.outputs.io)
df.index.names = [x + '_input'if x!= 'year' else x for x in df.index.names ]
df = df.stack(level=df.columns.names).to_frame()
df.columns = ['value']
self.supply.outputs.stacked_io = df
result_df = self.supply.outputs.return_cleaned_output('stacked_io')
keys = [self.scenario.name.upper(), cfg.timestamp]
names = ['SCENARIO','TIMESTAMP']
for key, name in zip(keys,names):
result_df = pd.concat([result_df], keys=[key],names=[name])
Output.write(result_df, 's_stacked_io.csv', os.path.join(cfg.workingdir, 'supply_outputs'))
| {
"repo_name": "energyPATHWAYS/energyPATHWAYS",
"path": "energyPATHWAYS/pathways_model.py",
"copies": "1",
"size": "32907",
"license": "mit",
"hash": -8965640394133475000,
"line_mean": 63.1461988304,
"line_max": 217,
"alpha_frac": 0.6495274562,
"autogenerated": false,
"ratio": 3.3712734350988627,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4520800891298863,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Ben Haley & Ryan Jones'
import pandas as pd
import numpy as np
from scipy import optimize, interpolate, stats
import util
import logging
import pylab
import pdb
pd.options.mode.chained_assignment = None
class TimeSeries:
@staticmethod
def decay_towards_linear_regression_fill(x, y, newindex, decay_speed=0.2):
"""Use known x, y values and assuming a linear relationship to map a new index"""
slope, intercept, r_value, p_value, std_err = stats.linregress(x, y)
linear_regression = newindex * slope + intercept
# top part
max_x, y_at_max_x = max(x), y[np.argmax(x)]
regressed_y_at_max_x = linear_regression[np.argmax(x)]
regressed_y_at_max_x = max_x * slope + intercept
extrapolated_index = np.nonzero(newindex >= max_x)[0]
decay = (y_at_max_x - regressed_y_at_max_x) * np.exp(-decay_speed*(newindex[extrapolated_index] - max_x))
linear_regression[extrapolated_index] += decay
# bottom part
min_x, y_at_min_x = min(x), y[np.argmin(x)]
regressed_y_at_min_x = min_x * slope + intercept
extrapolated_index = np.nonzero(newindex <= min_x)[0]
decay = (y_at_min_x - regressed_y_at_min_x) * np.exp(-decay_speed*extrapolated_index)
linear_regression[extrapolated_index] += decay[-1::-1]
return linear_regression
@staticmethod
def linear_regression_fill(x, y, newindex):
"""Use known x, y values and assuming a linear relationship to map a new index"""
slope, intercept, r_value, p_value, std_err = stats.linregress(x, y)
return newindex * slope + intercept
@staticmethod
def generalized_logistic(x, A, K, M, B):
"""General logistic curve
Args:
x (ndarray): domain values
A (float): lower bound
K (float): upper bound
B (float): controls speed of transition
M (float): domain location of transition
returns:
y (ndarray)
"""
return A + ((K - A) / (1 + np.exp(-B * (x - M))))
@staticmethod
def _approx_M(A, K, B, x, y):
return (np.log(((K - A) / (y - A)) - 1) + B * x) / B
@staticmethod
def _logistic_end_point_error(B, A, K, x, y, slope, t=.005):
M = TimeSeries._approx_M(A, K, B, x[1], y[1])
y_est = TimeSeries.generalized_logistic(np.array([x[0], x[-1]]), A, K, M, B)
y_tar = np.array([y[0], y[-1]]) + np.array([t, -t])*slope/abs(slope)
return sum((y_tar - y_est)**2)
@staticmethod
def logistic_default_param(x, y):
slope, intercept, r_value, p_value, std_err = stats.linregress(x, y)
A = min(y) if slope > 0 else max(y)
K = max(y) if slope > 0 else min(y)
if len(x) == 3:
if sum(y) == 0:
# if we have all zeros, the logistic should just return all zeros
A, K, M, B = 0, 0, 0, 0
else:
assert y[0] != y[1] != y[2], "x = {}, y = {}".format(x, y)
B0 = (10 / float(max(x) - min(x)))
B = optimize.root(TimeSeries._logistic_end_point_error, x0=B0, args=(A, K, x, y, slope))['x'][0]
M = TimeSeries._approx_M(A, K, B, x[1], y[1])
else:
B = 10 / float(max(x) - min(x))
M = min(x) + (max(x) - min(x)) / 2.
return A, K, M, B
@staticmethod
def default_logistic(x, y, newindex=None):
A, K, M, B = TimeSeries.logistic_default_param(x, y)
return TimeSeries.generalized_logistic(x if newindex is None else newindex, A, K, M, B)
@staticmethod
def fit_generalized_logistic(x, y, newindex, **kwargs):
"""Function to use leastsq_curve_fit to fit a general logistic curve to data x, y
Tries to fit 50 times (default) before raising a runtime error
"""
if len(x) < 4:
return TimeSeries.default_logistic(x, y, newindex)
# TODO: print to log file here
A, K, M, B = TimeSeries.logistic_default_param(x, y)
popt = TimeSeries.leastsq_curve_fit(x, y, f=TimeSeries.generalized_logistic, p0=(A, K, M, B))
if popt is None:
# Raise an error if no fit is found
logging.debug("leastsq_curve_fit failed to find a solution - data does not support logistic fit")
logging.debug("Model using default logistic curve")
# TODO: print to log file here
return TimeSeries.default_logistic(x, y, newindex)
return TimeSeries.generalized_logistic(newindex, *popt)
@staticmethod
def leastsq_curve_fit(x, y, f, p0):
"""
Args:
x (1d array): domain values for fitting
y (1d array): range values
f (function): function that maps x to y; must have x as first param
p0 (tuple): default parameter values for function f
returns:
popt (tuple): best fit parameters for function f
"""
try:
popt, pcov = optimize.curve_fit(f, x, y, p0)
return popt
except RuntimeError as e:
return None
@staticmethod
def spline_fill(x, y, newindex, k=3, s=0):
"""
s gives the smoothness, k is the degree of the spline
k=1 "linear interpolation"; k=2 "quadratic spline"; k=3 "cubic spline"
documentation says that use cubic spline by default
This function will work for both interpolation and extrapolation
However, using cubic or quadratic spline for extrapolation can give values far from
the origional range.
"""
# First line creates the relationship
tck = interpolate.splrep(x, y, k=k, s=s)
# Final line passes the new index to fill
return interpolate.splev(newindex, tck, der=0)
@staticmethod
def fill_with_nearest(x, y, newindex):
"""Interpolates and extrapolates using the nearest available known datapoint"""
if len(y) == 1: # if we only have 1 good data point
return np.array([y[0]] * len(newindex))
interp = interpolate.interp1d(x, y, kind='nearest', bounds_error=False)
fill = interp(newindex)
fill[newindex < min(x)] = y[np.argmin(x)]
fill[newindex > max(x)] = y[np.argmax(x)]
return fill
@staticmethod
def fill_with_exponential(x, y, newindex, growth_rate=None):
"""If growth_rate is None, extrapolates with NaN"""
fill_dict = dict(zip(x, y))
fill = np.array(map(lambda p: fill_dict.get(p, np.NaN), newindex))
if growth_rate is None:
firstx, lastx = min(x), max(x)
firsty, lasty = fill_dict[firstx], fill_dict[lastx]
growth_rate = (lasty / firsty) ** (1. / (lastx - firstx))
else:
# the growth rates in the DB come in as 0.05, for example, and we need to add 1 before we use it
growth_rate += 1
gapindex = np.nonzero(~np.isfinite(fill))[0]
gapgroups = np.array_split(gapindex, np.where(np.diff(gapindex) != 1)[0] + 1)
for group in gapgroups:
if group[0] == 0:
firstx = newindex[group[-1] + 1]
firsty = fill_dict[firstx]
fill[group] = firsty * (growth_rate) ** (newindex[group] - firstx)
elif group[-1] == len(fill) - 1:
lastx = newindex[group[0] - 1]
lasty = fill_dict[lastx]
fill[group] = lasty * (growth_rate) ** (newindex[group] - lastx)
else:
firstx, lastx = newindex[group[0] - 1], newindex[group[-1] + 1]
firsty, lasty = fill_dict[firstx], fill_dict[lastx]
rate = (lasty / firsty) ** (1. / (lastx - firstx))
fill[group] = lasty * (rate) ** (newindex[group] - lastx)
return fill
@staticmethod
def fill_with_average(x, y, newindex):
fill = np.ones_like(newindex) * np.mean(y)
return fill
@staticmethod
def _run_cleaning_method(x, y, newindex, method, **kwargs):
if method == 'linear_interpolation':
return TimeSeries.spline_fill(x, y, newindex, k=1, s=0)
elif method == 'linear_regression':
return TimeSeries.linear_regression_fill(x, y, newindex)
elif method == 'logistic':
return TimeSeries.fit_generalized_logistic(x, y, newindex, **kwargs)
elif method == 'cubic':
return TimeSeries.spline_fill(x, y, newindex, k=3, s=0)
elif method == 'quadratic':
return TimeSeries.spline_fill(x, y, newindex, k=2, s=0)
elif method == 'nearest':
return TimeSeries.fill_with_nearest(x, y, newindex)
elif method == 'exponential':
return TimeSeries.fill_with_exponential(x, y, newindex, kwargs.get('exp_growth_rate'))
elif method == 'average' or method == 'mean':
return TimeSeries.fill_with_average(x, y, newindex)
elif method == 'decay_towards_linear_regression':
return TimeSeries.decay_towards_linear_regression_fill(x, y, newindex)
else:
raise ValueError("{} is not a known cleaning method type".format(method))
@staticmethod
def _clean_method_checks(x, interpolation_method, extrapolation_method, **kwargs):
if len(x)==1:
if interpolation_method=='exponential' and kwargs.get('exp_growth_rate') is None:
interpolation_method = 'nearest'
elif interpolation_method=='logistic':
interpolation_method = 'nearest'
logging.debug('More than one x, y pair is needed for logistic regression')
elif interpolation_method is not None and interpolation_method != 'none':
interpolation_method = 'nearest'
if extrapolation_method=='exponential' and kwargs.get('exp_growth_rate', None) is None:
extrapolation_method = 'nearest'
elif extrapolation_method=='logistic':
extrapolation_method = 'nearest'
logging.debug('More than one x, y pair is needed for logistic regression')
elif extrapolation_method is not None and extrapolation_method != 'none':
extrapolation_method = 'nearest'
if interpolation_method == 'quadratic':
if len(x) < 3:
# TODO: print to log file here
interpolation_method = 'linear_interpolation'
if extrapolation_method == 'quadratic':
if len(x) < 3:
# TODO: print to log file here
extrapolation_method = 'linear_interpolation'
if interpolation_method == 'cubic':
if len(x) < 4:
# TODO: print to log file here
interpolation_method = 'linear_interpolation'
if extrapolation_method == 'cubic':
if len(x) < 4:
# TODO: print to log file here
extrapolation_method = 'linear_interpolation'
if interpolation_method == 'decay_towards_linear_regression':
raise ValueError('decay_towards_linear_regression is only supported for extrapolation, not interpolation')
return interpolation_method, extrapolation_method, kwargs
@staticmethod
def clean(data, newindex=None, interpolation_method=None, extrapolation_method=None, time_index_name=None, **kwargs):
"""
Return cleaned timeseries data reindexed to time_index, interpolated for missing data points,
and extrapolated using selected method.
Each column in the dataframe is cleaned and the returned data maintains column names
Cleaning methods:
linear_interpolation - linear interpolation between points (default)
linear_regression - linear regression for a set of x, y values and fill
logistic - fit logistic regression and fill
nearest - fill missing value with nearest y value
quadratic - quadratic spline fill (no smoothing)
cubic - cubic spline fill (no smoothing)
exponential - annual growth rate for extrapolating data
average - takes an average of all given values to fill in missing values
Args:
data (dataframe): dataframe with missing values
interpolation_method (string): method to use between max(y) and min(y), defaults to linear_interpolation
extrapolation_method (string): method to use beyond the range of max(y) and min(y), defaults to linear_interpolation
newindex (array): new dataframe index to fill, defaults to range(min(x), max(x)+1)
Returns:
returndata (dataframe): reindexed and with values filled
"""
if not len(data):
raise IndexError('Empty data passed to TimeSeries.clean')
if not isinstance(data, pd.core.frame.DataFrame):
raise ValueError('cleaning requires a pandas dataframe as an input')
if np.all(data.isnull()):
raise ValueError('cleaning requires at least one finite data point')
if data.index.nlevels > 1:
return TimeSeries._clean_multindex(data[:], time_index_name, interpolation_method, extrapolation_method, newindex, **kwargs)
else:
return TimeSeries._singleindex_clean(data[:], newindex, interpolation_method, extrapolation_method, **kwargs)
@staticmethod
def _clean_multindex(data, time_index_name, interpolation_method=None, extrapolation_method=None, newindex=None, **kwargs):
if time_index_name not in data.index.names:
raise ValueError('Time_index_name must match one of the index level names if cleaning a multi-index dataframe')
if newindex is None:
exist_index = data.index.get_level_values(time_index_name)
newindex = np.array(sorted(set(exist_index)), dtype=int)
# newindex = np.arange(min(exist_index), max(exist_index) + 1, dtype=int)
elif not isinstance(newindex, np.ndarray):
# We use newindex to calculate extrap_index using a method that takes an array
newindex = np.array(newindex, dtype=int)
# this is done so that we can take use data that falls outside of the newindex
wholeindex = np.array(sorted(list(set(newindex) | set(data.index.get_level_values(time_index_name)))), dtype=int)
# Add new levels to data for missing time indices
data = util.reindex_df_level_with_new_elements(data, time_index_name, wholeindex)
group_levels = tuple([n for n in data.index.names if n != time_index_name])
data = data.groupby(level=group_levels).apply(TimeSeries._clean_multindex_helper,
time_index_name=time_index_name,
newindex=wholeindex,
interpolation_method=interpolation_method,
extrapolation_method=extrapolation_method,
**kwargs)
data = util.reindex_df_level_with_new_elements(data, time_index_name, newindex)
return data
@staticmethod
def _clean_multindex_helper(data, time_index_name, newindex, interpolation_method=None, extrapolation_method=None, **kwargs):
x = np.array(data.index.get_level_values(time_index_name), dtype=int)
for colname in data.columns:
y = np.array(data[colname])
if not np.any(np.isfinite(y)):
continue
data[colname] = TimeSeries.cleanxy(x, y, newindex, interpolation_method, extrapolation_method, **kwargs)
return data
@staticmethod
def _singleindex_clean(data, newindex=None, interpolation_method=None, extrapolation_method=None, **kwargs):
# TODO: duplicate values should raise an error when doing data validation
# drop duplicates
data.groupby(data.index).first()
data = data.sort_index()
if newindex is None:
newindex = np.arange(min(data.index), max(data.index) + 1, dtype=int)
elif not isinstance(newindex, np.ndarray):
# We use newindex to calculate extrap_index using a method that takes an array
newindex = np.array(newindex, dtype=int)
# this is done so that we can take use data that falls outside of the newindex
wholeindex = np.array(sorted(list(set(newindex) | set(data.index))), dtype=int)
data = data.reindex(wholeindex)
x = np.array(data.index)
for colname in data.columns:
y = np.array(data[colname])
data[colname] = TimeSeries.cleanxy(x, y, wholeindex, interpolation_method, extrapolation_method, **kwargs)
data = data.reindex(newindex)
return data
@staticmethod
def cleanxy(x, y, newindex, interpolation_method=None, extrapolation_method=None, replace_training_data=True, **kwargs):
#if you have no interpolation method, start with the current y (with nans)
if interpolation_method is None or interpolation_method == 'none':
yhat = y.copy()
goody = np.nonzero(np.isfinite(y))[0] # Used to isolate only good data (not NaN)
x = np.array(x)[goody]
y = np.array(y)[goody]
interpolation_method, extrapolation_method, kwargs = TimeSeries._clean_method_checks(x, interpolation_method, extrapolation_method, **kwargs)
##################
# interpolation
##################
# basic process is to use interpolation method on ALL points and then replace points with
# extrapolation method if it is specified. Objective is to cut down on if statements and make
# interpolation and extrapolation consistant in the case of curve fitting
if interpolation_method is not None and interpolation_method != 'none':
yhat = TimeSeries._run_cleaning_method(x, y, newindex, interpolation_method, **kwargs)
##################
# extrapolation
##################
# if given an extrapolation method and there are points to extrapolate to
extrap_index = np.nonzero(np.any([newindex < min(x), newindex > max(x)], axis=0))[0]
if extrapolation_method is None or extrapolation_method == 'none':
yhat[extrap_index] = np.NaN
elif len(extrap_index) and extrapolation_method != interpolation_method:
yhat[extrap_index] = TimeSeries._run_cleaning_method(x, y, newindex, extrapolation_method, **kwargs)[extrap_index]
# fill back in the "training" data, meaning good data kept and not replaced
if not replace_training_data:
yhat[goody] = y
return yhat
# newindex = np.arange(2000, 2051)
# x = np.array([2015, 2045, 2070])
# y = np.array([.2, .8, 1])
# start = pd.DataFrame(y, index=x)
# interpolation_method = 'logistic'
# extrapolation_method = 'logistic'
# filled = TimeSeries.clean(start, newindex, interpolation_method, extrapolation_method)
#
# pylab.plot(newindex, filled.values.flatten(), '.')
# pylab.plot(x, y, '*')
# newindex = np.arange(2000, 2051)
# x = np.array([2015, 2020, 2025, 2030, 2040])
# y = np.array([.2, -0.03, .1, 6, 4])
# start = pd.DataFrame(y, index=x)
# interpolation_method = 'linear_regression'
# extrapolation_method = 'decay_towards_linear_regression'
# filled = TimeSeries.clean(start, newindex, interpolation_method, extrapolation_method)
#
# pylab.plot(newindex, filled.values.flatten(), '.')
# pylab.plot(x, y, '*')
# decay_towards_linear_regression
# newindex = np.arange(2000, 2051)
# x = np.array([2015, 2020, 2025, 2030, 2040, 2050])
# y = np.array([0.01, 0.03, .1, 1, .8, .4])
# # x = np.array([2020])
# # y = np.array([0.716194956])
# start = pd.DataFrame(y, index=x)
# interpolation_method = 'exponential'
# extrapolation_method = 'exponential'
# filled = TimeSeries.clean(start, newindex, interpolation_method, extrapolation_method)
#
# pylab.plot(newindex, filled.values.flatten(), '.')
# pylab.plot(x, y, '*')
#newindex = np.arange(2000, 2051)
##x = np.array([2015, 2020, 2025, 2030, 2040, 2050])
##y = np.array([0.01, 0.03, .1, 1, .8, .4])
#x = np.array([2000, 2010, 2015])
#y = np.array([0.6, 0.716194956, .725])
#start = pd.DataFrame(y, index=x)
#interpolation_method = 'linear_interpolation'
#extrapolation_method = 'linear_interpolation'
#filled = TimeSeries.clean(start, newindex, interpolation_method, extrapolation_method)
#
#pylab.plot(newindex, filled.values.flatten(), '-.')
#pylab.plot(x, y, '*')
#
#'linear_interpolation'
#'linear_regression'
#'logistic'
#'nearest'
#'quadratic'
#'cubic'
#'exponential'
#B = .5
#M2 = approx_M(A, K, B, x[1], y[1])
#B2 = approx_B(A, K, M2, x[0], .01)
#M2 = approx_M(A, K, B2, x[1], y[1])
#y = A + ((K - A) / (1 + e^(-B * (x - M))))
#newindex = np.arange(2015, 2051)
#
#y_hat = TimeSeries.fit_generalized_logistic(x, y, newindex)
#
#x = np.array([2016, 2023, 2030, 2040, 2050])
#y = np.array([0, .16, .48, .7, .72])
#x = np.array([2015, 2030, 2033, 2050])
#y = np.array([0, .48, .6, .72])
# groups.apply(TimeSeries.clean, args=(time_index_name, interpolation_method, extrapolation_method, newindex, lbound, ubound))
# @staticmethod
# def generate_data_to_clean(data, time_index_name):
#
# if data.index.nlevels>1:
# if time_index_name is None or time_index_name not in data.index.names:
# raise ValueError('time_index_name must match one of the index level names')
# group_levels = [n for n in data.index.names if n!=time_index_name]
# groups = data.groupby(level=group_levels)
# for key, value in groups:
# print data.loc[key]
# print value
# break
# else:
# if time_index_name is not None and time_index_name not in data.index.names:
# raise ValueError('time_index_name must match one of the index level names')
# else:
# time_index_name = data.index.names[0]
# frames = []
# for group in data.groupby(level=group_levels).groups.keys():
# if isinstance(group, basestring):
# group = (group,)
#
# data_slice = data.xs(group, level=group_levels)
# clean_data = TimeSeries.clean(data_slice, time_index_name, interpolation_method, extrapolation_method, newindex, lbound, ubound).reset_index()
#
# for level, ele in zip(group_levels, group):
# clean_data[level] = ele
#
# frames.append(clean_data)
#from collections import defaultdict
#
#x = np.array([2010, 2018, 2025, 2040, 2050])
#y = np.array([.8, .7, .4, .35, .34])
#
#example_data = pd.DataFrame(y, index=x)
#
#data = defaultdict(dict)
#data['a']['a'] = pd.DataFrame(y, index=x)
#data['a']['b'] = pd.DataFrame(y, index=x)
#data['b'] = pd.DataFrame(y, index=x)
#
#newdata = TimeSeries.clean_dict(data, newindex=newindex)
| {
"repo_name": "energyPATHWAYS/energyPATHWAYS",
"path": "energyPATHWAYS/time_series.py",
"copies": "1",
"size": "22911",
"license": "mit",
"hash": -2472377781585807400,
"line_mean": 41.2712177122,
"line_max": 149,
"alpha_frac": 0.6091397145,
"autogenerated": false,
"ratio": 3.616003787878788,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4725143502378788,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Ben Hughes <bwghughes@gmail.com>'
__version__ = '0.1'
from collections import deque
from decimal import Decimal
STD_DEV = Decimal(2.66)
class InvalidChartDataError(Exception):
pass
class ControlChart(object):
def __init__(self, data=None):
try:
assert data, 'Data cannot be None'
assert len(data) > 0, 'Data cannot be None'
assert any([isinstance(x, int) for x in data]), 'Data can only be ints or floats'
self.data = data
self._rod_mean = None
except AssertionError, e:
raise InvalidChartDataError(e)
def _get_range_of_difference(self):
shifted_data = deque(self.data, len(self.data))
shifted_data.appendleft(0)
rod = [abs(a - b) for a, b in zip(self.data, shifted_data)]
del rod[0] # Delete the first one, as we don't need it in the calculation.
return rod
@property
def mean(self):
return Decimal(sum(self.data) / len(self.data))
@property
def range_of_difference_mean(self):
if not self._rod_mean:
range_of_difference = self._get_range_of_difference()
self._rod_mean = Decimal(sum(range_of_difference) / len(range_of_difference))
return self._rod_mean
else:
return self._rod_mean
@property
def upper_control_limit(self):
return Decimal(self.mean + (self.range_of_difference_mean * STD_DEV))
@property
def lower_control_limit(self):
return Decimal(self.mean - (self.range_of_difference_mean * STD_DEV))
| {
"repo_name": "bwghughes/controlchart",
"path": "controlchart/__init__.py",
"copies": "1",
"size": "1593",
"license": "isc",
"hash": -3559165458367054300,
"line_mean": 29.6346153846,
"line_max": 93,
"alpha_frac": 0.6120527307,
"autogenerated": false,
"ratio": 3.6122448979591835,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47242976286591837,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Beni'
test_data = [
["2014-06-01", "APPL", 100.11],
["2014-06-02", "APPL", 110.61],
["2014-06-03", "APPL", 120.22],
["2014-06-04", "APPL", 100.54],
["2014-06-01", "MSFT", 20.46],
["2014-06-02", "MSFT", 21.25],
["2014-06-03", "MSFT", 32.53],
["2014-06-04", "MSFT", 40.71, "APPL"],
]
appl = []
msft = []
for i in test_data:
ticker_symbol = i[1]
if "APPL" == ticker_symbol:
appl.append(i)
else:
msft.append(i)
print(appl)
print(msft)
# test_data = [
# ["2014-06-01", "APPL", 100.11],
# ["2014-06-01", "APPL", 110.61],
# ["2014-06-01", "APPL", 120.22],
# ["2014-06-01", "APPL", 100.54],
# ["2014-06-01", "MSFT", 20.46],
# ["2014-06-01", "MSFT", 21.25],
# ["2014-06-01", "MSFT", 32.53],
# ["2014-06-01", "MSFT", 40.71],
# ["2014-04-01", "BLAH", 19.99],
# ["2014-09-01", "BLAH", 29.99],
# ]
#
# appl = []
#
# msft = []
#
# for data in test_data:
#
# temp = []
#
# if data[1] == "APPL":
# temp.append(data[0])
# temp.append(data[2])
# appl.append(temp)
# elif data[1] == "MSFT":
# temp.append(data[0])
# temp.append(data[2])
# msft.append(temp)
#
# print(appl)
# print(msft)
| {
"repo_name": "benmuresan/django_work",
"path": "tango_with_django_project/rango/stocks.py",
"copies": "1",
"size": "1249",
"license": "mit",
"hash": 2094336894536686000,
"line_mean": 19.8166666667,
"line_max": 42,
"alpha_frac": 0.4667734187,
"autogenerated": false,
"ratio": 2.2343470483005365,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.32011204670005367,
"avg_score": null,
"num_lines": null
} |
__author__ = 'benjamin.c.yan'
class Bear(object):
def __init__(self, other=None):
if isinstance(other, (dict, Bear)):
for key in other:
self[key] = other[key]
def __iter__(self):
return iter(self.__dict__)
def __getitem__(self, key):
if not key.startswith('_'):
return self.__dict__.get(key)
def __setitem__(self, key, value):
if not key.startswith('_'):
self.__dict__[key] = value
def __getattr__(self, item):
if not item.startswith('_'):
return self[item]
def __str__(self):
return '%s (%s)' % (self.__class__.__name__, repr(self))
def __repr__(self):
keys = sorted(self.__dict__.keys())
text = ', '.join('%s=%r' % (key, self[key]) for key in keys)
return '{%s}' % text
if __name__ == '__main__':
pass
bear = Bear()
bear.name = 'benjamin'
print bear
for key1 in bear:
print key1, bear[key1], bear.name
bear['sex'] = 21
print 'sex', bear.sex, repr(bear), str(bear)
other = dict(name='benjamin', sex='male', high=175)
print Bear(other)
import json
import simplekit.objson
print simplekit.objson.dumps2(object()) | {
"repo_name": "by46/simplekit",
"path": "simples/bear.py",
"copies": "1",
"size": "1244",
"license": "mit",
"hash": 8483419521361102000,
"line_mean": 23.9,
"line_max": 68,
"alpha_frac": 0.5209003215,
"autogenerated": false,
"ratio": 3.465181058495822,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9483755798600473,
"avg_score": 0.0004651162790697674,
"num_lines": 50
} |
__author__ = 'benjamindeleener'
from liblo import *
import socket
class MuseIOUDP():
def __init__(self, port, signal=None, viewer=None):
self.signal = signal
self.viewer = viewer
self.game = None
self.port = port
self.udp_ip = '127.0.0.1'
def initializePort(self):
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.bind((self.udp_ip, self.port))
print 'Port started to listen'
while True:
data, addr = sock.recvfrom(1024) # buffer size is 1024 bytes
print data
class MuseServer(ServerThread):
# listen for messages on port 5001
def __init__(self, port, signal, viewer):
self.signal = signal
self.viewer = viewer
self.game = None
ServerThread.__init__(self, port)
# receive accelrometer data
@make_method('/muse/acc', 'fff')
def acc_callback(self, path, args):
acc_x, acc_y, acc_z = args
# print "%s %f %f %f" % (path, acc_x, acc_y, acc_z)
# receive EEG data
@make_method('/muse/eeg', 'ffff')
def eeg_callback(self, path, args):
if 'eeg' in self.signal:
#print self.port, args
self.signal['eeg'].add_time()
self.signal['eeg'].add_l_ear(args[0])
self.signal['eeg'].add_l_forehead(args[1])
self.signal['eeg'].add_r_forehead(args[2])
self.signal['eeg'].add_r_ear(args[3])
self.viewer['eeg'].refresh()
# receive alpha relative data
@make_method('/muse/elements/alpha_relative', 'ffff')
def alpha_callback(self, path, args):
if 'alpha_rel' in self.signal:
self.signal['alpha_rel'].add_time()
self.signal['alpha_rel'].add_l_ear(args[0])
self.signal['alpha_rel'].add_l_forehead(args[1])
self.signal['alpha_rel'].add_r_forehead(args[2])
self.signal['alpha_rel'].add_r_ear(args[3])
self.viewer['alpha_rel'].refresh()
# receive alpha relative data
@make_method('/muse/elements/experimental/concentration', 'f')
def concentration_callback(self, path, args):
if 'concentration' in self.signal:
self.signal['concentration'].add_time()
self.signal['concentration'].add_concentration(args[0])
self.viewer['concentration-mellow'].refresh()
self.game.change_velocity(self.signal['concentration'].concentration)
# receive mellow data - viewer is the same as concentration
@make_method('/muse/elements/experimental/mellow', 'f')
def mellow_callback(self, path, args):
if 'mellow' in self.signal:
self.signal['mellow'].add_time()
self.signal['mellow'].add_mellow(args[0])
self.viewer['concentration-mellow'].refresh()
# handle unexpected messages
@make_method(None, None)
def fallback(self, path, args, types, src):
test = args
# print "Unknown message \n\t Source: '%s' \n\t Address: '%s' \n\t Types: '%s ' \n\t Payload: '%s'" %
# (src.url, path, types, args)
if __name__ == "__main__":
io_udp = MuseIOUDP(5000)
io_udp.initializePort()
| {
"repo_name": "gaamy/pyMuse",
"path": "pymuse/ios.py",
"copies": "1",
"size": "3179",
"license": "mit",
"hash": 755585466855846100,
"line_mean": 34.7191011236,
"line_max": 109,
"alpha_frac": 0.5894935514,
"autogenerated": false,
"ratio": 3.462962962962963,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45524565143629625,
"avg_score": null,
"num_lines": null
} |
__author__ = 'benjamindeleener'
from liblo import *
class MuseServer(ServerThread):
# listen for messages on port 5001
def __init__(self, signal, viewer):
self.signal = signal
self.viewer = viewer
ServerThread.__init__(self, 5001)
# receive accelrometer data
@make_method('/muse/acc', 'fff')
def acc_callback(self, path, args):
acc_x, acc_y, acc_z = args
# print "%s %f %f %f" % (path, acc_x, acc_y, acc_z)
# receive EEG data
@make_method('/muse/eeg', 'ffff')
def eeg_callback(self, path, args):
if 'eeg' in self.signal:
self.signal['eeg'].add_time()
self.signal['eeg'].add_l_ear(args[0])
self.signal['eeg'].add_l_forehead(args[1])
self.signal['eeg'].add_r_forehead(args[2])
self.signal['eeg'].add_r_ear(args[3])
self.viewer['eeg'].refresh()
# receive alpha relative data
@make_method('/muse/elements/alpha_relative', 'ffff')
def alpha_callback(self, path, args):
if 'alpha_rel' in self.signal:
self.signal['alpha_rel'].add_time()
self.signal['alpha_rel'].add_l_ear(args[0])
self.signal['alpha_rel'].add_l_forehead(args[1])
self.signal['alpha_rel'].add_r_forehead(args[2])
self.signal['alpha_rel'].add_r_ear(args[3])
self.viewer['alpha_rel'].refresh()
# receive alpha relative data
@make_method('/muse/elements/experimental/concentration', 'f')
def concentration_callback(self, path, args):
if 'concentration' in self.signal:
self.signal['concentration'].add_time()
self.signal['concentration'].add_concentration(args[0])
self.viewer['concentration-mellow'].refresh()
# receive mellow data - viewer is the same as concentration
@make_method('/muse/elements/experimental/mellow', 'f')
def mellow_callback(self, path, args):
if 'mellow' in self.signal:
self.signal['mellow'].add_time()
self.signal['mellow'].add_mellow(args[0])
self.viewer['concentration-mellow'].refresh()
# handle unexpected messages
@make_method(None, None)
def fallback(self, path, args, types, src):
test = args
# print "Unknown message \n\t Source: '%s' \n\t Address: '%s' \n\t Types: '%s ' \n\t Payload: '%s'" %
# (src.url, path, types, args)
| {
"repo_name": "twuilliam/pyMuse",
"path": "pymuse/ios.py",
"copies": "1",
"size": "2416",
"license": "mit",
"hash": 1269422444870522600,
"line_mean": 37.9677419355,
"line_max": 109,
"alpha_frac": 0.5910596026,
"autogenerated": false,
"ratio": 3.388499298737728,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4479558901337728,
"avg_score": null,
"num_lines": null
} |
__author__ = 'benjamindeleener'
from numpy import fft, linspace
from datetime import datetime
class MuseSignal(object):
def __init__(self, length, acquisition_freq):
self.length = length
self.acquisition_freq = acquisition_freq
self.time = list(linspace(-float(self.length) / self.acquisition_freq + 1.0 / self.acquisition_freq, 0.0, self.length))
self.init_time = datetime.now()
def add_time(self):
diff = datetime.now() - self.init_time
self.time.append(float(diff.total_seconds() * 1000))
del self.time[0]
class MuseEEG(MuseSignal):
def __init__(self, length=200, acquisition_freq=220.0, do_fft=False):
super(MuseEEG, self).__init__(length, acquisition_freq)
self.do_fft = do_fft
self.l_ear, self.l_forehead, self.r_forehead, self.r_ear = [0.0] * self.length, [0.0] * self.length, [
0.0] * self.length, [0.0] * self.length
self.l_ear_fft, self.l_forehead_fft, self.r_forehead_fft, self.r_ear_fft = [0.0] * self.length, [0.0] * self.length, [
0.0] * self.length, [0.0] * self.length
def add_l_ear(self, s):
self.l_ear.append(s)
del self.l_ear[0]
if self.do_fft:
self.l_ear_fft = fft.fft(self.l_ear)
def add_l_forehead(self, s):
self.l_forehead.append(s)
del self.l_forehead[0]
if self.do_fft:
self.l_forehead_fft = fft.fft(self.l_forehead)
def add_r_forehead(self, s):
self.r_forehead.append(s)
del self.r_forehead[0]
if self.do_fft:
self.r_forehead_fft = fft.fft(self.r_forehead)
def add_r_ear(self, s):
self.r_ear.append(s)
del self.r_ear[0]
if self.do_fft:
self.r_ear_fft = fft.fft(self.r_ear)
class MuseConcentration(MuseSignal):
def __init__(self, length=200, acquisition_freq=10.0):
super(MuseConcentration, self).__init__(length, acquisition_freq)
self.concentration = [0.0] * self.length
def add_concentration(self, s):
self.concentration.append(s)
del self.concentration[0]
class MuseMellow(MuseSignal):
def __init__(self, length=200, acquisition_freq=10.0):
super(MuseMellow, self).__init__(length, acquisition_freq)
self.mellow = [0.0] * self.length
def add_mellow(self, s):
self.mellow.append(s)
del self.mellow[0]
| {
"repo_name": "twuilliam/pyMuse",
"path": "pymuse/signals.py",
"copies": "2",
"size": "2407",
"license": "mit",
"hash": 2156098996344791600,
"line_mean": 33.884057971,
"line_max": 127,
"alpha_frac": 0.6044869132,
"autogenerated": false,
"ratio": 2.960639606396064,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4565126519596064,
"avg_score": null,
"num_lines": null
} |
__author__ = 'benjamindeleener'
import matplotlib.pyplot as plt
import matplotlib.ticker as mticker
from datetime import datetime, timedelta
from numpy import linspace
def timeTicks(x, pos):
d = timedelta(milliseconds=x)
return str(d)
class MuseViewer(object):
def __init__(self, acquisition_freq, signal_boundaries=None):
self.refresh_freq = 0.15
self.acquisition_freq = acquisition_freq
self.init_time = datetime.now()
self.last_refresh = datetime.now()
if signal_boundaries is not None:
self.low, self.high = signal_boundaries[0], signal_boundaries[1]
else:
self.low, self.high = 0, 1
class MuseViewerSignal(MuseViewer):
def __init__(self, signal, acquisition_freq, signal_boundaries=None):
super(MuseViewerSignal, self).__init__(acquisition_freq, signal_boundaries)
self.signal = signal
self.figure, (self.ax1, self.ax2, self.ax3, self.ax4) = plt.subplots(4, 1, sharex=True, figsize=(15, 10))
self.ax1.set_title('Left ear')
self.ax2.set_title('Left forehead')
self.ax3.set_title('Right forehead')
self.ax4.set_title('Right ear')
if self.signal.do_fft:
self.ax1_plot, = self.ax1.plot(self.x_data[0:len(self.x_data)/2], self.signal.l_ear_fft[0:len(self.x_data)/2])
self.ax2_plot, = self.ax2.plot(self.x_data[0:len(self.x_data)/2], self.signal.l_forehead_fft[0:len(self.x_data)/2])
self.ax3_plot, = self.ax3.plot(self.x_data[0:len(self.x_data)/2], self.signal.r_forehead_fft[0:len(self.x_data)/2])
self.ax4_plot, = self.ax4.plot(self.x_data[0:len(self.x_data)/2], self.signal.r_ear_fft[0:len(self.x_data)/2])
self.ax1.set_ylim([0, 10000])
self.ax2.set_ylim([0, 10000])
self.ax3.set_ylim([0, 10000])
self.ax4.set_ylim([0, 10000])
else:
self.ax1_plot, = self.ax1.plot(self.signal.time, self.signal.l_ear)
self.ax2_plot, = self.ax2.plot(self.signal.time, self.signal.l_forehead)
self.ax3_plot, = self.ax3.plot(self.signal.time, self.signal.r_forehead)
self.ax4_plot, = self.ax4.plot(self.signal.time, self.signal.r_ear)
self.ax1.set_ylim([self.low, self.high])
self.ax2.set_ylim([self.low, self.high])
self.ax3.set_ylim([self.low, self.high])
self.ax4.set_ylim([self.low, self.high])
formatter = mticker.FuncFormatter(timeTicks)
self.ax1.xaxis.set_major_formatter(formatter)
self.ax2.xaxis.set_major_formatter(formatter)
self.ax3.xaxis.set_major_formatter(formatter)
self.ax4.xaxis.set_major_formatter(formatter)
plt.ion()
def show(self):
plt.show(block=False)
self.refresh()
def refresh(self):
time_now = datetime.now()
if (time_now - self.last_refresh).total_seconds() > self.refresh_freq:
self.last_refresh = time_now
pass
else:
return
if self.signal.do_fft:
self.ax1_plot.set_ydata(self.signal.l_ear_fft[0:len(self.x_data)/2])
self.ax2_plot.set_ydata(self.signal.l_forehead_fft[0:len(self.x_data)/2])
self.ax3_plot.set_ydata(self.signal.r_forehead_fft[0:len(self.x_data)/2])
self.ax4_plot.set_ydata(self.signal.r_ear_fft[0:len(self.x_data)/2])
else:
self.ax1_plot.set_ydata(self.signal.l_ear)
self.ax2_plot.set_ydata(self.signal.l_forehead)
self.ax3_plot.set_ydata(self.signal.r_forehead)
self.ax4_plot.set_ydata(self.signal.r_ear)
times = list(linspace(self.signal.time[0], self.signal.time[-1], self.signal.length))
self.ax1_plot.set_xdata(times)
self.ax2_plot.set_xdata(times)
self.ax3_plot.set_xdata(times)
self.ax4_plot.set_xdata(times)
plt.xlim(self.signal.time[0], self.signal.time[-1])
self.figure.canvas.draw()
self.figure.canvas.flush_events()
class MuseViewerConcentrationMellow(object):
def __init__(self, signal_concentration, signal_mellow, signal_boundaries=None):
self.refresh_freq = 0.05
self.init_time = 0.0
self.last_refresh = datetime.now()
self.signal_concentration = signal_concentration
self.signal_mellow = signal_mellow
if signal_boundaries is not None:
self.low, self.high = signal_boundaries[0], signal_boundaries[1]
else:
self.low, self.high = 0, 1
self.x_data_concentration = range(0, self.signal_concentration.length, 1)
self.x_data_mellow = range(0, self.signal_mellow.length, 1)
self.figure, (self.ax1, self.ax2) = plt.subplots(2, 1, sharex=True, figsize=(15, 10))
self.ax1.set_title('Concentration')
self.ax2.set_title('Mellow')
self.ax1_plot, = self.ax1.plot(self.x_data_concentration, self.signal_concentration.concentration)
self.ax2_plot, = self.ax2.plot(self.x_data_mellow, self.signal_mellow.mellow)
self.ax1.set_ylim([self.low, self.high])
self.ax2.set_ylim([self.low, self.high])
formatter = mticker.FuncFormatter(timeTicks)
self.ax1.xaxis.set_major_formatter(formatter)
self.ax2.xaxis.set_major_formatter(formatter)
plt.ion()
def show(self):
plt.show(block=False)
self.refresh()
def refresh(self):
time_now = datetime.now()
if (time_now - self.last_refresh).total_seconds() > self.refresh_freq:
self.last_refresh = time_now
pass
else:
return
self.ax1_plot.set_ydata(self.signal_concentration.concentration)
self.ax2_plot.set_ydata(self.signal_mellow.mellow)
times = list(linspace(self.signal_concentration.time[0], self.signal_concentration.time[-1], self.signal_concentration.length))
self.ax1_plot.set_xdata(times)
self.ax2_plot.set_xdata(times)
plt.xlim(self.signal_concentration.time[0], self.signal_concentration.time[-1])
self.figure.canvas.draw()
self.figure.canvas.flush_events()
| {
"repo_name": "twuilliam/pyMuse",
"path": "pymuse/viz.py",
"copies": "1",
"size": "6204",
"license": "mit",
"hash": 8146313915515503000,
"line_mean": 39.5490196078,
"line_max": 135,
"alpha_frac": 0.6223404255,
"autogenerated": false,
"ratio": 3.1349166245578575,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.42572570500578577,
"avg_score": null,
"num_lines": null
} |
__author__ = 'benjamindeleener'
import sys
import time
from pymuse.ios import MuseServer
from pymuse.viz import MuseViewerSignal, MuseViewerConcentrationMellow
from pymuse.signals import MuseEEG, MuseConcentration, MuseMellow
from liblo import ServerError
def main():
# initialization of variables
signals, viewers = dict(), dict()
# Concentration and Mellow
signal_concentration = MuseConcentration(length=400, acquisition_freq=10.0)
signal_mellow = MuseMellow(length=400, acquisition_freq=10.0)
viewer_concentration_mellow = MuseViewerConcentrationMellow(signal_concentration, signal_mellow, signal_boundaries=[-0.05, 1.05])
signals['concentration'] = signal_concentration
signals['mellow'] = signal_mellow
viewers['concentration-mellow'] = viewer_concentration_mellow
# Initializing the server
try:
server = MuseServer(port=5001, signal=signals, viewer=viewers)
except ServerError, err:
print str(err)
sys.exit(1)
import apps.pong.pong as pong
pong_game = pong.PongApp()
server.game = pong_game
# Starting the server
try:
server.start()
pong_game.run()
while 1:
time.sleep(0.01)
except KeyboardInterrupt:
print "\nEnd of program: Caught KeyboardInterrupt"
sys.exit(0)
if __name__ == "__main__":
main()
| {
"repo_name": "gaamy/pyMuse",
"path": "eeg_pong.py",
"copies": "1",
"size": "1366",
"license": "mit",
"hash": -8382631122858573000,
"line_mean": 28.6956521739,
"line_max": 133,
"alpha_frac": 0.6932650073,
"autogenerated": false,
"ratio": 3.594736842105263,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4788001849405263,
"avg_score": null,
"num_lines": null
} |
# @AUTHOR: Benjamin Meyers
# @DESCRIPTION: Try to write code to convert text into hAck3r, using regular
# expressions and substitution, where e → 3, i → 1, o → 0,
# l → |, s → 5, . → 5w33t!, ate → 8. Normalize the text to
# lowercase before converting it. Add more substitutions of your
# own. Now try to map s to two different values: $ for
# word-initial s, and 5 for word-internal s.
import re, sys
def file_to_hacker(input_file):
""" Open a text file and replace each line with hack3r text. """
with open(input_file, 'r') as f:
for line in f:
temp_line = line.lower().strip('.,:;!?')
temp_line = re.sub(r'\ss', ' $', temp_line)
temp_line = re.sub(r'ate', '8', temp_line)
temp_line = re.sub(r'for', '4', temp_line)
temp_line = re.sub(r'too', '2', temp_line)
temp_line = re.sub(r'to', '2', temp_line)
temp_line = re.sub(r'e', '3', temp_line)
temp_line = re.sub(r'i', '1', temp_line)
temp_line = re.sub(r'o', '0', temp_line)
temp_line = re.sub(r'l', '|', temp_line)
temp_line = re.sub(r's', '5', temp_line)
temp_line = re.sub(r'\.', '5w33t!', temp_line)
print(temp_line)
def text_to_hacker(input_text):
""" Given a string, replace each line with hack3r text. """
for line in input_text.split('\n'):
temp_line = line.lower().strip('.,:;!?')
temp_line = re.sub(r'\ss', ' $', temp_line)
temp_line = re.sub(r'ate', '8', temp_line)
temp_line = re.sub(r'for', '4', temp_line)
temp_line = re.sub(r'too', '2', temp_line)
temp_line = re.sub(r'to', '2', temp_line)
temp_line = re.sub(r'e', '3', temp_line)
temp_line = re.sub(r'i', '1', temp_line)
temp_line = re.sub(r'o', '0', temp_line)
temp_line = re.sub(r'l', '|', temp_line)
temp_line = re.sub(r's', '5', temp_line)
temp_line = re.sub(r'\.', '5w33t!', temp_line)
print(temp_line)
def main():
if sys.argv[1] == '-f':
file_to_hacker(sys.argv[2])
elif sys.argv[1] == '-t':
text_to_hacker(sys.argv[2])
else:
sys.exit("Invalid command.")
if __name__ == "__main__":
main()
| {
"repo_name": "meyersbs/misc_nlp_scripts",
"path": "english_to_hack3r.py",
"copies": "1",
"size": "2343",
"license": "mit",
"hash": 975622660034387600,
"line_mean": 39.8596491228,
"line_max": 78,
"alpha_frac": 0.5135251181,
"autogenerated": false,
"ratio": 2.893167701863354,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8850020467786616,
"avg_score": 0.011334470435347629,
"num_lines": 57
} |
__author__ = 'benjamin'
from PIL import Image, ImageDraw
import colorsys
class Sample:
min_lat = min_lon = 10000
max_lat = max_lon = -10000
min_val = 10
max_val = -10
color1 = (46, 239, 67, 255) #green
color2 = (147, 239, 67, 255)
color3 = (199, 239, 67, 255)
color4 = (224, 239, 67, 255) #yellow
color5 = (224, 213, 67, 255)
color6 = (224, 162, 67, 255)
color7 = (224, 60, 67, 255) #red
scale = (0.53, 0.54, 0.55, 0.56, 0.57, 0.62)
classes = 7
use_scale = False
def __init__(self, lat, lon, val):
""" Constructor method for the Sample class
Note:
Do not include the `self` parameter in the ``Args`` section.
Args:
lat (float): `lat` is the latitude where the sample was taken.
lon (float): `lon` is the longitude where the sample was taken.
val (float): `val` is the value of the taken sample.
"""
self.lat = lat
self.lon = lon
self.val = val
def get_color_from_val(self):
#percent = (self.val - Sample.min_val) / (Sample.max_val - Sample.min_val)
if self.val <= Sample.scale[0]:
self.color = Sample.color7
elif self.val <= Sample.scale[1]:
self.color = Sample.color6
elif self.val <= Sample.scale[2]:
self.color = Sample.color5
elif self.val <= Sample.scale[3]:
self.color = Sample.color4
elif self.val <= Sample.scale[4]:
self.color = Sample.color3
elif self.val <= Sample.scale[5]:
self.color = Sample.color2
else:
self.color = Sample.color1
return self.color
@staticmethod
def reset_values():
Sample.min_lat = Sample.min_lon = 10000
Sample.max_lat = Sample.max_lon = -10000
Sample.min_val = 10
Sample.max_val = -10
@staticmethod
def useScale(scale):
Sample.scale = scale
Sample.use_scale = True
@staticmethod
def useNaturalBreaks(classes):
Sample.classes = classes
Sample.use_scale = False
| {
"repo_name": "silva96/geojson-ndvi",
"path": "Sample.py",
"copies": "1",
"size": "2112",
"license": "mit",
"hash": 6420472639804537000,
"line_mean": 29.1714285714,
"line_max": 82,
"alpha_frac": 0.5596590909,
"autogenerated": false,
"ratio": 3.4966887417218544,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4556347832621854,
"avg_score": null,
"num_lines": null
} |
__author__ = 'benjamin'
class Quad:
# _quadlist and _vertexlist have to be of type np.array!
def __init__(self, _id, _quadlist, _vertexlist):
import numpy as np
if type(_quadlist) is list:
_quadlist = np.array(_quadlist)
if type(_vertexlist) is list:
_vertexlist = np.array(_vertexlist)
if not (type(_quadlist) is np.ndarray and type(_vertexlist) is np.ndarray):
raise Exception("WRONG TYPE! exiting...")
self.quad_id = _id
self.vertex_ids = _quadlist[_id]
self.centroid = self.compute_centroid(_vertexlist)
self.is_plane = self.compute_plane(_vertexlist)
self.normal = self.compute_normal(_vertexlist)
self.vertices_plane = self.compute_plane_corner_points(_vertexlist)
self.ortho_basis_AB, \
self.basis_BAD, \
self.ortho_basis_CB, \
self.basis_BCD = \
self.compute_basis(_vertexlist)# [edge_AB;edge_orthogonal;normal]
self.neighbors = self.find_neighbors(_quadlist)
#self.basis, self.basis_inv = self.get_basis()
def compute_centroid(self, _vertexlist):
import numpy as np
return np.mean(_vertexlist[self.vertex_ids],0)
def compute_plane(self, _vertexlist):
import numpy as np
A=_vertexlist[self.vertex_ids[0]]
B=_vertexlist[self.vertex_ids[1]]
C=_vertexlist[self.vertex_ids[2]]
D=_vertexlist[self.vertex_ids[3]]
AB=B-A
AC=C-A
AD=D-A
Q=np.array([AB,AC,AD])
return abs(np.linalg.det(Q))<10**-14
def compute_normal(self, _vertexlist):
import numpy as np
if self.is_plane:
vertex1 = _vertexlist[self.vertex_ids[1]]
vertex2 = _vertexlist[self.vertex_ids[2]]
vertex3 = _vertexlist[self.vertex_ids[3]]
edge12 = vertex2-vertex1
edge13 = vertex3-vertex1
normal = np.cross(edge12,edge13)
normal /= np.linalg.norm(normal)
else:
#find least squares fit plane
lsq_matrix = _vertexlist[self.vertex_ids] - self.centroid
u, s, v = np.linalg.svd(lsq_matrix)
idx = np.where(np.min(abs(s)) == abs(s))[0][0]
normal = v[idx, :]
normal /= np.linalg.norm(normal)
return normal
# TODO there is a problem with the coordinate system of the quad:
# One system is right handed, one left. In the end the parameters are therefore flipped. For now we fixed this in a
# quite pragmatic way, but it should be improved in a refactoring session!
def compute_basis(self, _vertexlist):
import numpy as np
vertexA = self.vertices_plane[0,:]
vertexB = self.vertices_plane[1,:]
vertexC = self.vertices_plane[2,:]
vertexD = self.vertices_plane[3,:]
edgeAB = vertexB - vertexA
edgeAD = vertexD - vertexA
edgeCB = vertexB - vertexC
edgeCD = vertexD - vertexC
basis_BAD = np.array([self.normal, edgeAB, edgeAD])
basis_BCD = np.array([self.normal, edgeCD, edgeCB])
edgeAB_normalized = edgeAB / np.linalg.norm(edgeAB)
edgeCD_normalized = edgeCD / np.linalg.norm(edgeCD)
ortho_basis_AB = np.array([self.normal,
edgeAB_normalized,
np.cross(edgeAB_normalized, self.normal)])
ortho_basis_CD = np.array([self.normal,
edgeCD_normalized,
np.cross(edgeCD_normalized, self.normal)])
return ortho_basis_AB.transpose(), basis_BAD.transpose(), ortho_basis_CD.transpose(), basis_BCD.transpose()
def projection_onto_plane(self, _point):
import numpy as np
distance = np.dot(self.centroid-_point, self.normal)
projected_point = _point+distance*self.normal
return projected_point, distance
def point_on_quad(self, u, v):
import numpy as np
if u+v <= 1 and u >= 0 and v >= 0:
vertexA = self.vertices_plane[0,:]
point = vertexA + np.dot(self.basis_BAD[:,1:3],[u,v])
elif u+v > 1 >= u and v <= 1:
vertexC = self.vertices_plane[2,:]
u = -u+1
v = -v+1
point = vertexC + np.dot(self.basis_BCD[:,1:3],[u,v])
else:
print "INVALID INPUT!"
quit()
return point
def projection_onto_quad(self, _point):
from scipy.linalg import solve_triangular
import numpy as np
# first assume that _point is below diagonal BD
vertexA = self.vertices_plane[0,:]
vector_vertexA_point = _point - vertexA
# we want to transform _point to the BASIS=[normal,AB,AC] and use QR decomposition of BASIS = Q*R
# BASIS * coords = _point -> R * coords = Q' * _point
R_BAD = np.dot(self.ortho_basis_AB.transpose(),self.basis_BAD)
b = np.dot(self.ortho_basis_AB.transpose(),vector_vertexA_point)
x = solve_triangular(R_BAD,b)
distance = x[0]
projected_point = _point - distance * self.normal
u = x[1]
v = x[2]
# if not, _point is above diagonal BD
if u+v > 1:
vertexC = self.vertices_plane[2,:]
vector_vertexC_point = _point - vertexC
R_BCD = np.dot(self.ortho_basis_CB.transpose(),self.basis_BCD)
b = np.dot(self.ortho_basis_CB.transpose(),vector_vertexC_point)
x = solve_triangular(R_BCD,b)
distance = x[0]
projected_point = _point - distance * self.normal
u = 1-x[1]
v = 1-x[2]
distance = abs(distance)
u_crop = u
v_crop = v
if not (0<=u<=1 and 0<=v<=1):
if u < 0:
u_crop = 0
elif u > 1:
u_crop = 1
if v < 0:
v_crop = 0
elif v > 1:
v_crop = 1
projected_point = self.point_on_quad(u_crop,v_crop)
distance = np.linalg.norm(_point-projected_point)
return projected_point, distance, u, v
def measure_centroid_distance_squared(self, _point):
import numpy as np
r = self.centroid-_point
return np.dot(r,r)
def compute_plane_corner_points(self, _vertexlist):
import numpy as np
if self.is_plane:
return _vertexlist[self.vertex_ids]
else:
#return corner points projected onto fit plane!
vertices = _vertexlist[self.vertex_ids]
projected_vertices = np.zeros([4,3])
i = 0
for vertex in vertices:
projected_vertex, distance = self.projection_onto_plane(vertex)
projected_vertices[i,:] = projected_vertex
i += 1
return projected_vertices
def find_neighbors(self,_quadlist):
import numpy as np
neighbors = np.array([])
edges = [self.vertex_ids[[0,1]],
self.vertex_ids[[1,2]],
self.vertex_ids[[2,3]],
self.vertex_ids[[3,0]]]
for e in edges:
has_vertex1 = np.where(_quadlist == e[0])[0]
has_vertex2 = np.where(_quadlist == e[1])[0]
same_edge = np.intersect1d(has_vertex1, has_vertex2)
neighbor = same_edge[same_edge != self.quad_id]
neighbors = np.append(neighbors, neighbor)
return neighbors.astype(int) | {
"repo_name": "BGCECSE2015/CADO",
"path": "PYTHON/NURBSReconstruction/DualContouring/quad.py",
"copies": "1",
"size": "7540",
"license": "bsd-3-clause",
"hash": -3897038094175691300,
"line_mean": 32.9684684685,
"line_max": 119,
"alpha_frac": 0.5547745358,
"autogenerated": false,
"ratio": 3.670886075949367,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4725660611749367,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Benjamin S. Murphy'
__version__ = '1.4.0'
__doc__ = """
PyKrige
=======
Code by Benjamin S. Murphy and the PyKrige Developers
bscott.murphy@gmail.com
Summary
-------
Kriging toolkit for Python.
ok: Contains class OrdinaryKriging, which is a convenience class for easy
access to 2D ordinary kriging.
uk: Contains class UniversalKriging, which provides more control over
2D kriging by utilizing drift terms. Supported drift terms currently
include point-logarithmic, regional linear, and external z-scalar.
Generic functions of the spatial coordinates may also be supplied to
provide drift terms, or the point-by-point values of a drift term
may be supplied.
ok3d: Contains class OrdinaryKriging3D, which provides support for
3D ordinary kriging.
uk3d: Contains class UniversalKriging3D, which provide support for
3D universal kriging. A regional linear drift is the only drift term
currently supported, but generic drift functions or point-by-point
values of a drift term may also be supplied.
kriging_tools: Contains a set of functions to work with *.asc files.
variogram_models: Contains the definitions for the implemented variogram
models. Note that the utilized formulas are as presented in Kitanidis,
so the exact definition of the range (specifically, the associated
scaling of that value) may differ slightly from other sources.
core: Contains the backbone functions of the package that are called by both
the various kriging classes. The functions were consolidated here
in order to reduce redundancy in the code.
test: Contains the test script.
References
----------
.. [1] P.K. Kitanidis, Introduction to Geostatistics: Applications in
Hydrogeology, (Cambridge University Press, 1997) 272 p.
Copyright (c) 2015-2018, PyKrige Developers
"""
from . import kriging_tools as kt # noqa
from .ok import OrdinaryKriging # noqa
from .uk import UniversalKriging # noqa
from .ok3d import OrdinaryKriging3D # noqa
from .uk3d import UniversalKriging3D # noqa
__all__ = ['ok', 'uk', 'ok3d', 'uk3d', 'kriging_tools']
| {
"repo_name": "rth/PyKrige",
"path": "pykrige/__init__.py",
"copies": "1",
"size": "2155",
"license": "bsd-3-clause",
"hash": 3398942906124494000,
"line_mean": 39.4423076923,
"line_max": 76,
"alpha_frac": 0.7354988399,
"autogenerated": false,
"ratio": 3.475806451612903,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4711305291512903,
"avg_score": null,
"num_lines": null
} |
__author__ = 'ben'
from pprint import pprint
import os
import json
import pandas as pd
from os import walk
import os
import csv
data = {}
phase = 'practice'
easyPrac = [10,12,17,30,34]
hardPrac = [25,26,35,37,42]
mypath = '../build/img/' + phase + '/900'
prac = True
data['batchMeta'] = {
'numBatches':2,
'imgPerSet':10,
'batchPerSet':2,
'imgPerBatch':5,
'subjects':0
}
# mypath = '../build/img/training1/test'
# data['batchMeta'] = {
# 'numBatches':1,
# 'imgPerSet':5,
# 'batchPerSet':1,
# 'imgPerBatch':5,
# }
path = '/media/ben/Data1/Users/Ben/Google Drive/MODA/DownloadUserData/'
phaseSet = 'phase1trial6'
data_out = pd.DataFrame(columns={'workerId', 'myHits', 'mturkHits', 'missing','qual','new','old','new_prac'})
#workerData = pd.read_csv("DownloadedUserData/" + phaseSet + "/WorkerData.csv", sep=',')
#workerIdsTurk = workerData.loc[:,['Worker ID','Number of HITs approved or rejected - Lifetime', 'CURRENT-MODASleepScoring_PracticeCompleted']].copy()
#workerIdsTurk.rename(columns={'Worker ID': 'workerId', 'Number of HITs approved or rejected - Lifetime': 'Hits','CURRENT-MODASleepScoring_PracticeCompleted':'pracQual'}, inplace=True)
workerData = pd.read_csv(path + phaseSet + "/WorkerResultData.csv", sep=',')
workerIdsTurk = workerData.loc[:,['workerId','numHits']].copy()
workerIdsTurk.rename(columns={'numHits': 'Hits'}, inplace=True)
myData = pd.read_csv(path + phaseSet + "/EpochViews.csv", sep=',')
for workerId in workerIdsTurk["workerId"]:
print "Worker {0}".format(workerId)
# print "My Hits: {0}".format(int(myWorkerData[1]['allSets']))
mturkHits = workerIdsTurk[workerIdsTurk["workerId"]==workerId]['Hits']
#mTurkQual = workerIdsTurk[workerIdsTurk["workerId"]==workerId]['pracQual']
mTurkQual = pd.DataFrame(['missing'])
if mturkHits.empty:
mturkHit = 0
else:
mturkHit = mturkHits.values[0]
myDataHits = myData[myData["annotatorID"] == workerId]
myHits = len(myDataHits.index)/10
missing = mturkHit-myHits
if missing:
print "MISSING: {0}".format(missing)
if workerId in data_out['workerId']:
workerLoc = data_out['workerId'] == workerId
data_out.loc[workerLoc, 'myHits'] += myHits
data_out.loc[workerLoc, 'mturkHits'] += mturkHits
data_out.loc[workerLoc, 'missing'] += missing
else:
ser = pd.Series([workerId,
myHits,
mturkHit,
missing,
mTurkQual.values,
os.path.isfile(path+ phaseSet + '/UserData_' + workerId),
os.path.isfile(path + phaseSet + '/UserData_practice_'+workerId),
os.path.isfile(path + phaseSet + '/UserData_phase1_'+workerId)],
index=['workerId', 'myHits', 'mturkHits', 'missing','qual','old','new','new_prac'])
data_out = data_out.append(ser, ignore_index=True)
data_out.to_csv(path + phaseSet + "/MissingData.csv")
| {
"repo_name": "bdyetton/MODA",
"path": "Tools/errorInvestigation.py",
"copies": "1",
"size": "3026",
"license": "mit",
"hash": -2108043188779607000,
"line_mean": 36.3580246914,
"line_max": 184,
"alpha_frac": 0.6245869134,
"autogenerated": false,
"ratio": 3.0596562184024267,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.41842431318024265,
"avg_score": null,
"num_lines": null
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.